diff --git a/vendor/github.com/elastic/beats/.github/CODEOWNERS b/vendor/github.com/elastic/beats/.github/CODEOWNERS index 3dd2dc7d..9d041bef 100644 --- a/vendor/github.com/elastic/beats/.github/CODEOWNERS +++ b/vendor/github.com/elastic/beats/.github/CODEOWNERS @@ -21,20 +21,20 @@ /x-pack/packetbeat/ @elastic/siem # Filebeat -/filebeat/module/ @elastic/infrastructure +/filebeat/module/ @elastic/integrations /filebeat/module/elasticsearch/ @elastic/stack-monitoring /filebeat/module/kibana/ @elastic/stack-monitoring /filebeat/module/logstash/ @elastic/stack-monitoring -/x-pack/filebeat/module/ @elastic/infrastructure +/x-pack/filebeat/module/ @elastic/integrations /x-pack/filebeat/module/suricata/ @elastic/secops # Metricbeat -/metricbeat/module/ @elastic/infrastructure +/metricbeat/module/ @elastic/integrations /metricbeat/module/elasticsearch/ @elastic/stack-monitoring /metricbeat/module/kibana/ @elastic/stack-monitoring /metricbeat/module/logstash/ @elastic/stack-monitoring /metricbeat/module/beat/ @elastic/stack-monitoring -/x-pack/metricbeat/module/ @elastic/infrastructure +/x-pack/metricbeat/module/ @elastic/integrations # Heartbeat /heartbeat/ @elastic/uptime diff --git a/vendor/github.com/elastic/beats/.go-version b/vendor/github.com/elastic/beats/.go-version index 166a50ff..656fd0d7 100644 --- a/vendor/github.com/elastic/beats/.go-version +++ b/vendor/github.com/elastic/beats/.go-version @@ -1 +1 @@ -1.12.9 +1.12.12 diff --git a/vendor/github.com/elastic/beats/.travis.yml b/vendor/github.com/elastic/beats/.travis.yml index de629120..13f55f57 100644 --- a/vendor/github.com/elastic/beats/.travis.yml +++ b/vendor/github.com/elastic/beats/.travis.yml @@ -142,11 +142,11 @@ jobs: # Generators - os: linux - env: TARGETS="-C generator/metricbeat test" + env: TARGETS="-C generator/metricbeat test test-package" go: $TRAVIS_GO_VERSION stage: test - os: linux - env: TARGETS="-C generator/beat test" + env: TARGETS="-C generator/beat test test-package" go: $TRAVIS_GO_VERSION stage: test @@ -166,12 +166,6 @@ jobs: stage: test # Kubernetes - - os: linux - install: deploy/kubernetes/.travis/setup.sh - env: - - TARGETS="-C deploy/kubernetes test" - - TRAVIS_K8S_VERSION=v1.8.0 - stage: test - os: linux install: deploy/kubernetes/.travis/setup.sh env: @@ -184,6 +178,24 @@ jobs: - TARGETS="-C deploy/kubernetes test" - TRAVIS_K8S_VERSION=v1.10.0 stage: test + - os: linux + dist: xenial + install: deploy/kubernetes/.travis/setup.sh + env: + - TARGETS="-C deploy/kubernetes test" + - TRAVIS_K8S_VERSION=v1.15.3 + - TRAVIS_MINIKUBE_VERSION=v1.3.1 + stage: test + addons: + apt: + update: true + packages: + - python-virtualenv + - libpcap-dev + - xsltproc + - libxml2-utils + - librpm-dev + # TODO include 1.11 once minikube supports it #- os: linux # install: deploy/kubernetes/.travis/setup.sh diff --git a/vendor/github.com/elastic/beats/CHANGELOG-developer.asciidoc b/vendor/github.com/elastic/beats/CHANGELOG-developer.asciidoc index 61ef2017..15efed8e 100644 --- a/vendor/github.com/elastic/beats/CHANGELOG-developer.asciidoc +++ b/vendor/github.com/elastic/beats/CHANGELOG-developer.asciidoc @@ -12,11 +12,26 @@ other Beats should be migrated. Note: This changelog was only started after the 6.3 release. +=== Beats version 7.5.1 +https://github.com/elastic/beats/compare/v7.5.0..v7.5.1[Check the HEAD diff] + +=== Beats version 7.5.0 +https://github.com/elastic/beats/compare/v7.4.1..v7.5.0[Check the HEAD diff] + +==== Breaking changes + +- Build docker and kubernetes features only on supported platforms. {pull}13509[13509] +- Need to register new processors to be used in the JS processor in their `init` functions. {pull}13509[13509] + +==== Added + +- Compare event by event in `testadata` framework to avoid sorting problems {pull}13747[13747] + === Beats version 7.4.1 -https://github.com/elastic/beats/compare/v7.4.0..v7.4.1[Check the HEAD diff +https://github.com/elastic/beats/compare/v7.4.0..v7.4.1[Check the HEAD diff] === Beats version 7.4.0 -https://github.com/elastic/beats/compare/v7.3.1..v7.4.0[Check the HEAD diff +https://github.com/elastic/beats/compare/v7.3.1..v7.4.0[Check the HEAD diff] ==== Breaking changes @@ -70,7 +85,6 @@ https://github.com/elastic/beats/compare/v7.1.1..v7.2.0[Check the HEAD diff] - Add new option `IgnoreAllErrors` to `libbeat.common.schema` for skipping fields that failed while converting. {pull}12089[12089] - Deprecate setup cmds for `template` and `ilm-policy`. Add new setup cmd for `index-management`. {pull}12132[12132] - === Beats version 7.1.1 https://github.com/elastic/beats/compare/v7.1.0..v7.1.1[Check the HEAD diff] diff --git a/vendor/github.com/elastic/beats/CHANGELOG-developer.next.asciidoc b/vendor/github.com/elastic/beats/CHANGELOG-developer.next.asciidoc index 043ac9b6..6e271910 100644 --- a/vendor/github.com/elastic/beats/CHANGELOG-developer.next.asciidoc +++ b/vendor/github.com/elastic/beats/CHANGELOG-developer.next.asciidoc @@ -21,10 +21,11 @@ The list below covers the major changes between 7.0.0-rc2 and master only. ==== Breaking changes - Move Fields from package libbeat/common to libbeat/mapping. {pull}11198[11198] +- For "metricbeat style" generated custom beats, the mage target `GoTestIntegration` has changed to `GoIntegTest` and `GoTestUnit` has changed to `GoUnitTest`. {pull}13341[13341] ==== Bugfixes -- Stop using `mage:import` in community beats. This was ignoring the vendorized beats directory for some mage targets, using the code available in GOPATH, this causes inconsistencies and compilation problems if the version of the code in the GOPATH is different to the vendored one. Use of `mage:import` will continue to be unsupported in custom beats till beats is migrated to go modules, or mage supports vendored dependencies. {issue}13998[13998] {pull}[] +- Stop using `mage:import` in community beats. This was ignoring the vendorized beats directory for some mage targets, using the code available in GOPATH, this causes inconsistencies and compilation problems if the version of the code in the GOPATH is different to the vendored one. Use of `mage:import` will continue to be unsupported in custom beats till beats is migrated to go modules, or mage supports vendored dependencies. {issue}13998[13998] {pull}14162[14162] ==== Added @@ -45,4 +46,10 @@ The list below covers the major changes between 7.0.0-rc2 and master only. - Use the go-lookslike library for testing in heartbeat. Eventually the mapval package will be replaced with it. {pull}12540[12540] - New ReporterV2 interfaces that can receive a context on `Fetch(ctx, reporter)`, or `Run(ctx, reporter)`. {pull}11981[11981] - Generate configuration from `mage` for all Beats. {pull}12618[12618] +- Add ClientFactory to TCP input source to add SplitFunc/NetworkFuncs per client. {pull}8543[8543] +- Introduce beat.OutputChooses publisher mode. {pull}12996[12996] +- Ensure that beat.Processor, beat.ProcessorList, and processors.ProcessorList are compatible and can be composed more easily. {pull}12996[12996] +- Add support to close beat.Client via beat.CloseRef (a subset of context.Context). {pull}13031[13031] +- Add checks for types and formats used in fields definitions in `fields.yml` files. {pull}13188[13188] +- Makefile included in generator copies files from beats repository using `git archive` instead of cp. {pull}13193[13193] - Strip debug symbols from binaries to reduce binary sizes. {issue}12768[12768] diff --git a/vendor/github.com/elastic/beats/CHANGELOG.asciidoc b/vendor/github.com/elastic/beats/CHANGELOG.asciidoc index dede1c01..342abf8c 100644 --- a/vendor/github.com/elastic/beats/CHANGELOG.asciidoc +++ b/vendor/github.com/elastic/beats/CHANGELOG.asciidoc @@ -3,10 +3,220 @@ :issue: https://github.com/elastic/beats/issues/ :pull: https://github.com/elastic/beats/pull/ +[[release-notes-7.5.1]] +=== Beats version 7.5.1 +https://github.com/elastic/beats/compare/v7.5.0...v7.5.1[View commits] + +==== Bugfixes + +*Affecting all Beats* + +- Fix `proxy_url` option in Elasticsearch output. {pull}14950[14950] +- Fix bug with potential concurrent reads and writes from event.Meta map by Kafka output. {issue}14542[14542] {pull}14568[14568] + +*Filebeat* + +- Change iis url path grok pattern from URIPATH to NOTSPACE. {issue}12710[12710] {pull}13225[13225] {issue}7951[7951] {pull}13378[13378] {pull}14754[14754] +- Fix azure filesets test files. {issue}14185[14185] {pull}14235[14235] +- Update Logstash module's Grok patterns to support Logstash 7.4 logs. {pull}14743[14743] + +*Metricbeat* + +- Fix perfmon expanding counter path/adding counter to query when OS language is not english. {issue}14684[14684] {pull}14800[14800] +- Add extra check on `ignore_non_existent_counters` flag if the PdhExpandWildCardPathW returns no errors but does not expand the counter path successfully in windows/perfmon metricset. {pull}14797[14797] +- Fix rds metricset from reporting same values for different instances. {pull}14702[14702] +- Closing handler after verifying the registry key in diskio metricset. {issue}14683[14683] {pull}14759[14759] +- Fix docker network stats when multiple interfaces are configured. {issue}14586[14586] {pull}14825[14825] +- Fix ListMetrics pagination in aws module. {issue}14926[14926] {pull}14942[14942] +- Fix CPU count in docker/cpu in cases where no `online_cpus` are reported {pull}15070[15070] + +[[release-notes-7.5.0]] +=== Beats version 7.5.0 +https://github.com/elastic/beats/compare/v7.4.1...v7.5.0[View commits] + +==== Breaking changes + +*Affecting all Beats* + +- By default, all Beats-created files and folders will have a umask of 0027 (on POSIX systems). {pull}14119[14119] + +*Filebeat* + +*Heartbeat* + +- JSON/Regex checks against HTTP bodies will only consider the first 100MiB of the HTTP body to prevent excessive memory usage. {pull}14223[14223] + +*Metricbeat* + +==== Bugfixes + +*Affecting all Beats* + +- Disable `add_kubernetes_metadata` if no matchers found. {pull}13709[13709] +- Better wording for xpack beats when the _xpack endpoint is not reachable. {pull}13771[13771] +- Kubernetes watcher at `add_kubernetes_metadata` fails with StatefulSets {pull}13905[13905] +- Fix panics that could result from invalid TLS certificates. This can affect Beats that connect over TLS or Beats that accept connections over TLS and validate client certificates. {pull}14146[14146] +- Fix memory leak in kubernetes autodiscover provider and add_kubernetes_metadata processor happening when pods are terminated without sending a delete event. {pull}14259[14259] +- Fix kubernetes `metaGenerator.ResourceMetadata` when parent reference controller is nil {issue}14320[14320] {pull}14329[14329] + +*Auditbeat* + +- Socket dataset: Fix start errors when IPv6 is disabled on the kernel. {issue}13953[13953] {pull}13966[13966] + +*Filebeat* + +- Fix a denial of service flaw when parsing malformed DSA public keys in Go. +If {filebeat} is configured to accept incoming TLS connections with client +authentication enabled, a remote attacker could cause the Beat to stop +processing events. (CVE-2019-17596) See https://www.elastic.co/community/security/ +- Fix timezone parsing of rabbitmq module ingest pipelines. {pull}13879[13879] +- Fix conditions and error checking of date processors in ingest pipelines that use `event.timezone` to parse dates. {pull}13883[13883] +- Fix timezone parsing of Cisco module ingest pipelines. {pull}13893[13893] +- Fix timezone parsing of logstash module ingest pipelines. {pull}13890[13890] +- Fix timezone parsing of iptables, mssql and panw module ingest pipelines. {pull}13926[13926] +- Fixed increased memory usage with large files when multiline pattern does not match. {issue}14068[14068] +- Fix azure fields names. {pull}14098[14098] {pull}14132[14132] +- Fix calculation of `network.bytes` and `network.packets` for bi-directional netflow events. {pull}14111[14111] +- Accept '-' as http.response.body.bytes in apache module. {pull}14137[14137] +- Fix timezone parsing of MySQL module ingest pipelines. {pull}14130[14130] +- Improve error message in s3 input when handleSQSMessage failed. {pull}14113[14113] +- Fix race condition in S3 input plugin. {pull}14359[14359] + +*Heartbeat* + +- Fix storage of HTTP bodies to work when JSON/Regex body checks are enabled. {pull}14223[14223] + +*Metricbeat* + +- Fix a denial of service flaw when parsing malformed DSA public keys in Go. +If {metricbeat} is configured to accept incoming TLS connections with client +authentication enabled, a remote attacker could cause the Beat to stop +processing events. (CVE-2019-17596) See https://www.elastic.co/community/security/ +- PdhExpandWildCardPathW will not expand counter paths in 32 bit windows systems, workaround will use a different function. {issue}12590[12590] {pull}12622[12622] +- Fix `docker.cpu.system.pct` calculation by using the reported number online cpus instead of the number of metrics per cpu. {pull}13691[13691] +- Change kubernetes.event.message to text {pull}13964[13964] +- Fix performance counter values for windows/perfmon metricset.{issue}14036[14036] {pull}14039[14039] {pull}14108[14108] +- Add FailOnRequired when applying schema and fix metric names in mongodb metrics metricset. {pull}14143[14143] +- Convert indexed ms-since-epoch timestamp fields in `elasticsearch/ml_job` metricset to ints from float64s. {issue}14220[14220] {pull}14222[14222] +- Fix ARN parsing function to work for ELB ARNs. {pull}14316[14316] +- Update azure configuration example. {issue}14224[14224] +- Limit some of the error messages to the logs only {issue}14317[14317] {pull}14327[14327] +- Fix cloudwatch metricset with names and dimensions in config. {issue}14376[14376] {pull}14391[14391] +- Fix marshaling of ms-since-epoch values in `elasticsearch/cluster_stats` metricset. {pull}14378[14378] + +*Packetbeat* + +- Fix parsing of the HTTP host header when it contains a port or an IPv6 address. {pull}14215[14215] + + +==== Added + +*Affecting all Beats* + +- Fail with error when autodiscover providers have no defined configs. {pull}13078[13078] +- Add autodetection mode for add_docker_metadata and enable it by default in included configuration files{pull}13374[13374] +- Add autodetection mode for add_kubernetes_metadata and enable it by default in included configuration files. {pull}13473[13473] +- Use less restrictive API to check if template exists. {pull}13847[13847] +- Do not check for alias when setup.ilm.check_exists is false. {pull}13848[13848] +- Add support for numeric time zone offsets in timestamp processor. {pull}13902[13902] +- Add condition to the config file template for add_kubernetes_metadata {pull}14056[14056] +- Marking Central Management deprecated. {pull}14018[14018] +- Add `keep_null` setting to allow Beats to publish null values in events. {issue}5522[5522] {pull}13928[13928] +- Add shared_credential_file option in aws related config for specifying credential file directory. {issue}14157[14157] {pull}14178[14178] +- Ensure that init containers are no longer tailed after they stop. {pull}14394[14394] +- Libbeat HTTP's Server can listen to a unix socket using the `unix:///tmp/hello.sock` syntax. {pull}13655[13655] +- Libbeat HTTP's Server can listen to a Windows named pipe using the `npipe:///hello` syntax. {pull}13655[13655] +- Adding new `Enterprise` license type to the licenser. {issue}14246[14246] + +*Auditbeat* + +- Socket: Add DNS enrichment. {pull}14004[14004] + +*Filebeat* + +- Add support for virtual host in Apache access logs {pull}12778[12778] +- Update CoreDNS module to populate ECS DNS fields. {issue}13320[13320] {pull}13505[13505] +- Parse query steps in PostgreSQL slowlogs. {issue}13496[13496] {pull}13701[13701] +- Add filebeat azure module with activitylogs, auditlogs, signinlogs filesets. {pull}13776[13776] +- Add support to set the document id in the json reader. {pull}5844[5844] +- Add input httpjson. {issue}13545[13545] {pull}13546[13546] +- Filebeat Netflow input: Remove beta label. {pull}13858[13858] +- Remove `event.timezone` from events that don't need it in some modules that support log formats with and without timezones. {pull}13918[13918] +- Add ExpandEventListFromField config option in the kafka input. {pull}13965[13965] +- Add ELB fileset to AWS module. {pull}14020[14020] +- Add module for MISP (Malware Information Sharing Platform). {pull}13805[13805] +- Add filebeat azure module with activitylogs, auditlogs, signinlogs filesets. {pull}13776[13776] {pull}14033[14033] {pull}14107[14107] +- Add support for all the ObjectCreated events in S3 input. {pull}14077[14077] +- Add `source.bytes` and `source.packets` for uni-directional netflow events. {pull}14111[14111] +- Add Kibana Dashboard for MISP module. {pull}14147[14147] +- Add support for gzipped files in S3 input {pull}13980[13980] +- Add Filebeat Azure Dashboards {pull}14127[14127] + + +*Heartbeat* +- Add non-privileged icmp on linux and darwin(mac). {pull}13795[13795] {issue}11498[11498] +- Allow `hosts` to be used to configure http monitors {pull}13703[13703] + +*Metricbeat* + +- Add refresh list of perf counters at every fetch {issue}13091[13091] +- Add proc/vmstat data to the system/memory metricset on linux {pull}13322[13322] +- Add support for NATS version 2. {pull}13601[13601] +- Add `docker.cpu.*.norm.pct` metrics for `cpu` metricset of Docker Metricbeat module. {pull}13695[13695] +- Add `instance` label by default when using Prometheus collector. {pull}13737[13737] +- Add azure module. {pull}13196[13196] {pull}13859[13859] {pull}13988[13988] +- Add Apache Tomcat module {pull}13491[13491] +- Add ECS `container.id` and `container.runtime` to kubernetes `state_container` metricset. {pull}13884[13884] +- Add `job` label by default when using Prometheus collector. {pull}13878[13878] +- Add `state_resourcequota` metricset for Kubernetes module. {pull}13693[13693] +- Add tags filter in ec2 metricset. {pull}13872[13872] {issue}13145[13145] +- Add cloud.account.id and cloud.account.name into events from aws module. {issue}13551[13551] {pull}13558[13558] +- Add `metrics_path` as known hint for autodiscovery {pull}13996[13996] +- Leverage KUBECONFIG when creating k8s client. {pull}13916[13916] +- Add ability to filter by tags for cloudwatch metricset. {pull}13758[13758] {issue}13145[13145] +- Release cloudwatch, s3_daily_storage, s3_request, sqs and rds metricset as GA. {pull}14114[14114] {issue}14059[14059] +- Add `elasticsearch/enrich` metricset. {pull}14243[14243] {issue}14221[14221] +- Add new dashboards for Azure vms, vm guest metrics, vm scale sets {pull}14000[14000] + +*Functionbeat* + +- Make `bulk_max_size` configurable in outputs. {pull}13493[13493] + +*Winlogbeat* + +- Fill `event.provider`. {pull}13937[13937] +- Add support for user management events to the Security module. {pull}13530[13530] + +==== Deprecated + +*Metricbeat* + +- `kubernetes.container.id` field for `state_container` is deprecated in favour of ECS `container.id` and `container.runtime`. {pull}13884[13884] + [[release-notes-7.4.1]] === Beats version 7.4.1 https://github.com/elastic/beats/compare/v7.4.0...v7.4.1[View commits] +==== Breaking changes + +*Affecting all Beats* + +*Auditbeat* + +*Filebeat* + +*Heartbeat* + +*Journalbeat* + +*Metricbeat* + +*Packetbeat* + +*Winlogbeat* + +*Functionbeat* + ==== Bugfixes *Affecting all Beats* @@ -16,8 +226,6 @@ https://github.com/elastic/beats/compare/v7.4.0...v7.4.1[View commits] *Auditbeat* -- Socket dataset: Fix start errors when IPv6 is disabled on the kernel. {issue}13953[13953] {pull}13966[13966] - *Filebeat* - Fixed early expiration of templates (Netflow v9 and IPFIX). {pull}13821[13821] @@ -27,12 +235,63 @@ https://github.com/elastic/beats/compare/v7.4.0...v7.4.1[View commits] - Fix delay in enforcing close_renamed and close_removed options. {issue}13488[13488] {pull}13907[13907] - Fix missing netflow fields in index template. {issue}13768[13768] {pull}13914[13914] - Fix cisco module's asa and ftd filesets parsing of domain names where an IP address is expected. {issue}14034[14034] -- Fixed increased memory usage with large files when multiline pattern does not match. {issue}14068[14068] + +*Heartbeat* + +*Journalbeat* *Metricbeat* - Mark Kibana usage stats as collected only if API call succeeds. {pull}13881[13881] +*Packetbeat* + +*Winlogbeat* + +*Functionbeat* + +==== Added + +*Affecting all Beats* + +*Auditbeat* + +*Filebeat* + +*Heartbeat* + +*Journalbeat* + +*Metricbeat* + +*Packetbeat* + +*Functionbeat* + +*Winlogbeat* + +==== Deprecated + +*Affecting all Beats* + +*Filebeat* + +*Heartbeat* + +*Journalbeat* + +*Metricbeat* + +*Packetbeat* + +*Winlogbeat* + +*Functionbeat* + +==== Known Issue + +*Journalbeat* + [[release-notes-7.4.0]] === Beats version 7.4.0 https://github.com/elastic/beats/compare/v7.3.1...v7.4.0[View commits] diff --git a/vendor/github.com/elastic/beats/CHANGELOG.next.asciidoc b/vendor/github.com/elastic/beats/CHANGELOG.next.asciidoc index 3d54e64b..20b30a38 100644 --- a/vendor/github.com/elastic/beats/CHANGELOG.next.asciidoc +++ b/vendor/github.com/elastic/beats/CHANGELOG.next.asciidoc @@ -11,19 +11,21 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Affecting all Beats* - Update to Golang 1.12.1. {pull}11330[11330] -- Disable Alibaba Cloud and Tencent Cloud metadata providers by default. {pull}13812[12812] *Auditbeat* *Filebeat* +- Fix parsing of Elasticsearch node name by `elasticsearch/slowlog` fileset. {pull}14547[14547] *Heartbeat* *Journalbeat* +- Remove broken dashboard. {pull}15288[15288] + *Metricbeat* - kubernetes.container.cpu.limit.cores and kubernetes.container.cpu.requests.cores are now floats. {issue}11975[11975] @@ -35,28 +37,46 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Functionbeat* + ==== Bugfixes *Affecting all Beats* - Fix a race condition with the Kafka pipeline client, it is possible that `Close()` get called before `Connect()` . {issue}11945[11945] +- Allow users to configure only `cluster_uuid` setting under `monitoring` namespace. {pull}14338[14338] *Auditbeat* *Filebeat* -- panw module: Use geo.name instead of geo.country_iso_code for free-form location. {issue}13272[13272] +- cisco/asa fileset: Fix parsing of 302021 message code. {pull}14519[14519] +- Fix filebeat azure dashboards, event category should be `Alert`. {pull}14668[14668] +- Check content-type when creating new reader in s3 input. {pull}15252[15252] {issue}15225[15225] +- Fix session reset detection and a crash in Netflow input. {pull}14904[14904] +- netflow: Allow for options templates without scope fields. {pull}15449[15449] +- netflow: Fix bytes/packets counters on some devices (NSEL and Netstream). {pull}15449[15449] +- netflow: Fix compatibility with some Cisco devices by changing the field `class_id` from short to long. {pull}15449[15449] +- Fixed dashboard for Cisco ASA Firewall. {issue}15420[15420] {pull}15553[15553] *Heartbeat* +- Fix recording of SSL cert metadata for Expired/Unvalidated x509 certs. {pull}13687[13687] *Journalbeat* *Metricbeat* -- Ignore prometheus untyped metrics with NaN value. {issue}13750[13750] {pull}13790[13790] +- Fix checking tagsFilter using length in cloudwatch metricset. {pull}14525[14525] +- Fixed bug with `elasticsearch/cluster_stats` metricset not recording license expiration date correctly. {issue}14541[14541] {pull}14591[14591] +- Log bulk failures from bulk API requests to monitoring cluster. {issue}14303[14303] {pull}14356[14356] +- Fix regular expression to detect instance name in perfmon metricset. {issue}14273[14273] {pull}14666[14666] +- Fixed bug with `elasticsearch/cluster_stats` metricset not recording license ID in the correct field. {pull}14592[14592] +- Fix `docker.container.size` fields values {issue}14979[14979] {pull}15224[15224] +- Make `kibana` module more resilient to Kibana unavailability. {issue}15258[15258] {pull}15270[15270] +- Fix panic exception with some unicode strings in perfmon metricset. {issue}15264[15264] +- Make `logstash` module more resilient to Logstash unavailability. {issue}15276[15276] {pull}15306[15306] *Packetbeat* @@ -71,9 +91,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Affecting all Beats* -- Decouple Debug logging from fail_on_error logic for rename, copy, truncate processors {pull}12451[12451] -- Allow a beat to ship monitoring data directly to an Elasticsearch monitoring cluster. {pull}9260[9260] -- Add `providers` setting to `add_cloud_metadata` processor. {pull}13812[13812] +- Add a friendly log message when a request to docker has exceeded the deadline. {pull}15336[15336] *Auditbeat* @@ -81,7 +99,8 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Filebeat* - `container` and `docker` inputs now support reading of labels and env vars written by docker JSON file logging driver. {issue}8358[8358] -- Use correct OS path separator in `add_kubernetes_metadata` to support Windows nodes. {pull}9205[9205] +- Add `index` option to all inputs to directly set a per-input index value. {pull}14010[14010] +- Include log.source.address for unparseable syslog messages. {issue}13268[13268] {pull}15453[15453] *Heartbeat* @@ -93,6 +112,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Packetbeat* + *Functionbeat* @@ -112,6 +132,7 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d *Metricbeat* + *Packetbeat* *Winlogbeat* @@ -121,3 +142,4 @@ https://github.com/elastic/beats/compare/v7.0.0-alpha2...master[Check the HEAD d ==== Known Issue *Journalbeat* + diff --git a/vendor/github.com/elastic/beats/Makefile b/vendor/github.com/elastic/beats/Makefile index 15580723..a75c772d 100644 --- a/vendor/github.com/elastic/beats/Makefile +++ b/vendor/github.com/elastic/beats/Makefile @@ -98,16 +98,12 @@ check: python-env @git diff-index --exit-code HEAD -- .PHONY: check-headers -check-headers: - @go get -u github.com/elastic/go-licenser - @go-licenser -d -exclude x-pack - @go-licenser -d -license Elastic x-pack +check-headers: mage + @mage checkLicenseHeaders .PHONY: add-headers -add-headers: - @go get github.com/elastic/go-licenser - @go-licenser -exclude x-pack - @go-licenser -license Elastic x-pack +add-headers: mage + @mage addLicenseHeaders # Corrects spelling errors .PHONY: misspell diff --git a/vendor/github.com/elastic/beats/NOTICE.txt b/vendor/github.com/elastic/beats/NOTICE.txt index 46798974..d7ad0ffb 100644 --- a/vendor/github.com/elastic/beats/NOTICE.txt +++ b/vendor/github.com/elastic/beats/NOTICE.txt @@ -1,5 +1,5 @@ Elastic Beats -Copyright 2014-2019 Elasticsearch BV +Copyright 2014-2020 Elasticsearch BV This product includes software developed by The Apache Software Foundation (http://www.apache.org/). @@ -79,6 +79,15 @@ License type (autodetected): Apache-2.0 Apache License 2.0 +-------------------------------------------------------------------- +Dependency: contrib.go.opencensus.io/exporter/ocagent +Revision: 8110e6c0236bb231b19119275a6be6ec666d05c8 +License type (autodetected): Apache-2.0 +./vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + -------------------------------------------------------------------- Dependency: github.com/aerospike/aerospike-client-go Revision: 0f3b54da6bdc2c31c505f9afbc5f434dd2089658 @@ -227,10 +236,6 @@ License type (autodetected): Apache-2.0 -------------------------------------------------------------------- Apache License 2.0 --------NOTICE.txt----- -AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. -Copyright 2014-2015 Stripe, Inc. -------------------------------------------------------------------- Dependency: github.com/awslabs/goformation @@ -244,6 +249,57 @@ Apache License 2.0 GoFormation Copyright 2011-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. +-------------------------------------------------------------------- +Dependency: github.com/Azure/azure-sdk-for-go +Revision: 7a9d2769e4a581b0b1bc609c71b59af043e05c98 +License type (autodetected): Apache-2.0 +./vendor/github.com/Azure/azure-sdk-for-go/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + +-------NOTICE----- +Microsoft Azure-SDK-for-Go +Copyright 2014-2017 Microsoft + +This product includes software developed at +the Microsoft Corporation (https://www.microsoft.com). + +-------------------------------------------------------------------- +Dependency: github.com/Azure/go-autorest +Revision: ba1147dc57f993013ef255c128ca1cac8a557409 +License type (autodetected): Apache-2.0 +./vendor/github.com/Azure/go-autorest/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + +-------------------------------------------------------------------- +Dependency: github.com/beorn7/perks +Revision: 37c8de3658fcb183f997c4e13e8337516ab753e6 +License type (autodetected): MIT +./vendor/github.com/beorn7/perks/LICENSE: +-------------------------------------------------------------------- +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + -------------------------------------------------------------------- Dependency: github.com/bsm/sarama-cluster Revision: 7e67d87a6b3f83fe08c096fd084691bd9dca112f @@ -273,6 +329,55 @@ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------- +Dependency: github.com/census-instrumentation/opencensus-proto +Revision: 26aa36c099c2041b432cf3cc8a26c5fb858d218b +License type (autodetected): Apache-2.0 +./vendor/github.com/census-instrumentation/opencensus-proto/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + +-------------------------------------------------------------------- +Dependency: github.com/cloudflare/cfssl +Revision: b1ec8c586c2aa3ec3eaf4a622933f169cfa5648b +License type (autodetected): BSD-2-Clause +./vendor/github.com/cloudflare/cfssl/LICENSE: +-------------------------------------------------------------------- +Copyright (c) 2014 CloudFlare Inc. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------- +Dependency: github.com/containerd/continuity +Revision: 75bee3e2ccb6402e3a986ab8bd3b17003fc0fdec +License type (autodetected): Apache-2.0 +./vendor/github.com/containerd/continuity/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + -------------------------------------------------------------------- Dependency: github.com/coreos/bbolt Revision: af9db2027c98c61ecd8e17caa5bd265792b9b9a2 @@ -300,6 +405,21 @@ COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------- +Dependency: github.com/coreos/etcd +Revision: 4d210173ae0d59d4d746735fdd26839513aadaf1 +License type (autodetected): Apache-2.0 +./vendor/github.com/coreos/etcd/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + +-------NOTICE----- +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). + -------------------------------------------------------------------- Dependency: github.com/coreos/go-systemd Version: v18 @@ -441,6 +561,21 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------- +Dependency: github.com/dgrijalva/jwt-go +Revision: 5e25c22bd5d6de03265bbe5462dcd162f85046f6 +License type (autodetected): MIT +./vendor/github.com/dgrijalva/jwt-go/LICENSE: +-------------------------------------------------------------------- +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + -------------------------------------------------------------------- Dependency: github.com/digitalocean/go-libvirt Revision: 59d541f19311883ad82708651353009fb207d8a9 @@ -450,6 +585,15 @@ License type (autodetected): Apache-2.0 Apache License 2.0 +-------------------------------------------------------------------- +Dependency: github.com/dimchansky/utfbom +Revision: d2133a1ce379ef6fa992b0514a77146c60db9d1c +License type (autodetected): Apache-2.0 +./vendor/github.com/dimchansky/utfbom/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + -------------------------------------------------------------------- Dependency: github.com/dlclark/regexp2 Revision: 7632a260cbaf5e7594fc1544a503456ecd0827f1 @@ -489,7 +633,8 @@ Apache License 2.0 -------------------------------------------------------------------- Dependency: github.com/docker/docker -Revision: 1009e6a40b295187e038b67e184e9c0384d95538 +Version: v19.03.2 +Revision: ed20165a37b40ff1cfbe55e218344c5e89f30ee2 License type (autodetected): Apache-2.0 ./vendor/github.com/docker/docker/LICENSE: -------------------------------------------------------------------- @@ -526,6 +671,41 @@ License type (autodetected): Apache-2.0 Apache License 2.0 +-------------------------------------------------------------------- +Dependency: github.com/docker/go-events +Revision: e31b211e4f1cd09aa76fe4ac244571fab96ae47f +License type (autodetected): Apache-2.0 +./vendor/github.com/docker/go-events/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + +-------------------------------------------------------------------- +Dependency: github.com/docker/go-metrics +Revision: b619b3592b65de4f087d9f16863a7e6ff905973c +License type (autodetected): Apache-2.0 +./vendor/github.com/docker/go-metrics/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + +-------NOTICE----- +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. + -------------------------------------------------------------------- Dependency: github.com/docker/go-units Revision: 0dadbb0345b35ec7ef35e228dabb8de89a65bf52 @@ -535,6 +715,24 @@ License type (autodetected): Apache-2.0 Apache License 2.0 +-------------------------------------------------------------------- +Dependency: github.com/docker/libkv +Revision: 458977154600b9f23984d9f4b82e79570b5ae12b +License type (autodetected): Apache-2.0 +./vendor/github.com/docker/libkv/LICENSE.code: +-------------------------------------------------------------------- +Apache License 2.0 + + +-------------------------------------------------------------------- +Dependency: github.com/docker/libnetwork +Revision: 92d1fbe1eb0883cf11d283cea8e658275146411d +License type (autodetected): Apache-2.0 +./vendor/github.com/docker/libnetwork/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + -------------------------------------------------------------------- Dependency: github.com/docker/libtrust Revision: aabc10ec26b754e797f9028f4589c5b7bd90dc20 @@ -544,6 +742,15 @@ License type (autodetected): Apache-2.0 Apache License 2.0 +-------------------------------------------------------------------- +Dependency: github.com/docker/swarmkit +Revision: 958d149179db019aef3a065f23b35455b2dd54ca +License type (autodetected): Apache-2.0 +./vendor/github.com/docker/swarmkit/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + -------------------------------------------------------------------- Dependency: github.com/dop251/goja Revision: dd2ac4456e2073f116d6b88741d513addabe0326 @@ -707,21 +914,6 @@ License type (autodetected): Apache-2.0 -------------------------------------------------------------------- Apache License 2.0 --------NOTICE.txt----- -Elastic Common Schema -Copyright 2018 Elasticsearch B.V. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -------------------------------------------------------------------- Dependency: github.com/elastic/go-libaudit @@ -732,17 +924,27 @@ License type (autodetected): Apache-2.0 -------------------------------------------------------------------- Apache License 2.0 --------NOTICE.txt----- -Elastic go-libaudit -Copyright 2017-2018 Elasticsearch B.V. + +-------------------------------------------------------------------- +Dependency: github.com/elastic/go-licenser +Version: 0.2.0 +Revision: 2b2abd4ee9b58025ebd0630d7621cfd7619f58ac +License type (autodetected): Apache-2.0 +./vendor/github.com/elastic/go-licenser/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + +-------NOTICE----- +Elastic go-licenser +Copyright 2018 Elasticsearch B.V. This product includes software developed at Elasticsearch, B.V. (https://www.elastic.co/). -------------------------------------------------------------------- Dependency: github.com/elastic/go-lookslike -Version: v0.2.0 -Revision: 807124eb9fc6684949aa99744577175fd6bac4fd +Version: =v0.3.0 +Revision: 747dc7db1c961662d8e225a42af6c3859a1a0f1d License type (autodetected): Apache-2.0 ./vendor/github.com/elastic/go-lookslike/LICENSE: -------------------------------------------------------------------- @@ -801,12 +1003,6 @@ License type (autodetected): Apache-2.0 -------------------------------------------------------------------- Apache License 2.0 --------NOTICE.txt----- -Elastic go-seccomp-bpf -Copyright 2018 Elasticsearch B.V. - -This product includes software developed at -Elasticsearch, B.V. (https://www.elastic.co/). -------------------------------------------------------------------- Dependency: github.com/elastic/go-structform @@ -820,19 +1016,13 @@ Apache License 2.0 -------------------------------------------------------------------- Dependency: github.com/elastic/go-sysinfo -Version: v1.0.2 -Revision: 06c1f463545498d8f4b378d4dcf3171794c28537 +Version: v1.1.0 +Revision: 51d9d1362d77a4792dfb39a7a19f056cdf1b9840 License type (autodetected): Apache-2.0 ./vendor/github.com/elastic/go-sysinfo/LICENSE.txt: -------------------------------------------------------------------- Apache License 2.0 --------NOTICE.txt----- -Elastic go-sysinfo -Copyright 2017-2019 Elasticsearch B.V. - -This product includes software developed at -Elasticsearch, B.V. (https://www.elastic.co/). -------------------------------------------------------------------- Dependency: github.com/elastic/go-txfile @@ -863,12 +1053,6 @@ License type (autodetected): Apache-2.0 -------------------------------------------------------------------- Apache License 2.0 --------NOTICE.txt----- -Elastic go-windows -Copyright 2017-2019 Elasticsearch B.V. - -This product includes software developed at -Elasticsearch, B.V. (https://www.elastic.co/). -------------------------------------------------------------------- Dependency: github.com/elastic/gosigar @@ -1062,17 +1246,31 @@ Apache License 2.0 -------------------------------------------------------------------- Dependency: github.com/go-ole/go-ole -Revision: de8695c8edbf8236f30d6e1376e20b198a028d42 +Revision: 14974a1cf6477f616180232977d8ab4791ea8820 License type (autodetected): MIT ./vendor/github.com/go-ole/go-ole/LICENSE: -------------------------------------------------------------------- -Copyright © 2013-2018 Yasuhiro Matsumoto, http://mattn.kaoriya.net +The MIT License (MIT) -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +Copyright © 2013-2017 Yasuhiro Matsumoto, -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the “Software”), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. -------------------------------------------------------------------- Dependency: github.com/go-sourcemap/sourcemap @@ -1544,7 +1742,7 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------- Dependency: github.com/gogo/protobuf -Revision: 65acae22fc9d1fe290b33faa2bd64cdc20a463a0 +Revision: 4c00d2f19fb91be5fecd8681fa83450a2a979e69 License type (autodetected): BSD-3-Clause ./vendor/github.com/gogo/protobuf/LICENSE: -------------------------------------------------------------------- @@ -1627,10 +1825,18 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------- +Dependency: github.com/golang/glog +Revision: 23def4e6c14b4da8ac2ed8007337bc5eb5007998 +License type (autodetected): Apache-2.0 +./vendor/github.com/golang/glog/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + -------------------------------------------------------------------- Dependency: github.com/golang/protobuf -Version: v1.3.1 -Revision: b5d812f8a3706043e23a9cd5babf2e5423744d30 +Revision: 6c65a5562fc06764971b7c5d05c76c75e84bdbf7 License type (autodetected): BSD-3-Clause ./vendor/github.com/golang/protobuf/LICENSE: -------------------------------------------------------------------- @@ -1735,6 +1941,15 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------- +Dependency: github.com/google/certificate-transparency-go +Revision: 2c006aff63ed2c60653701dfb7b53424339382b1 +License type (autodetected): Apache-2.0 +./vendor/github.com/google/certificate-transparency-go/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + -------------------------------------------------------------------- Dependency: github.com/google/flatbuffers Revision: 7a6b2bf521e95097a92ec848001531b2dcf0f3fa @@ -1787,6 +2002,41 @@ License type (autodetected): Apache-2.0 Apache License 2.0 +-------------------------------------------------------------------- +Dependency: github.com/google/gopacket +Revision: 0ad7f2610e344e58c1c95e2adda5c3258da8e97b +License type (autodetected): BSD-3-Clause +./vendor/github.com/google/gopacket/LICENSE: +-------------------------------------------------------------------- +Copyright (c) 2012 Google, Inc. All rights reserved. +Copyright (c) 2009-2011 Andreas Krennmair. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Andreas Krennmair, Google, nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + -------------------------------------------------------------------- Dependency: github.com/google/uuid Revision: 281f560d28af7174109514e936f94c2ab2cb2823 @@ -1874,6 +2124,789 @@ License type (autodetected): Apache-2.0 Apache License 2.0 +-------------------------------------------------------------------- +Dependency: github.com/grpc-ecosystem/go-grpc-prometheus +Revision: ae0d8660c5f2108ca70a3776dbe0fb53cf79f1da +License type (autodetected): Apache-2.0 +./vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + +-------------------------------------------------------------------- +Dependency: github.com/grpc-ecosystem/grpc-gateway +Revision: d63917fcb0d53f39184485b9b6a0893af18a5668 +License type (autodetected): BSD-3-Clause +./vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt: +-------------------------------------------------------------------- +Copyright (c) 2015, Gengo, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of Gengo, Inc. nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +-------------------------------------------------------------------- +Dependency: github.com/hashicorp/go-immutable-radix +Revision: 0146a9aba1948ded4ed290cfd3fded2c15313f63 +License type (autodetected): MPL-2.0 +./vendor/github.com/hashicorp/go-immutable-radix/LICENSE: +-------------------------------------------------------------------- +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + + +-------------------------------------------------------------------- +Dependency: github.com/hashicorp/go-memdb +Revision: 5500ca0de0dab231b02aedabac095d43a59f31d2 +License type (autodetected): MPL-2.0 +./vendor/github.com/hashicorp/go-memdb/LICENSE: +-------------------------------------------------------------------- +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + + -------------------------------------------------------------------- Dependency: github.com/hashicorp/go-uuid Revision: 4f571afc59f3043a65f8fe6bf46d887b10a01d43 @@ -2693,6 +3726,15 @@ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------- +Dependency: github.com/ishidawataru/sctp +Revision: 7c296d48a2b553e41cc06904a1e6317a20694dc0 +License type (autodetected): Apache-2.0 +./vendor/github.com/ishidawataru/sctp/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + -------------------------------------------------------------------- Dependency: github.com/jcmturner/gofork Revision: dc7c13fece037a4a36e2b3c69db4991498d30692 @@ -3013,6 +4055,17 @@ The above copyright notice and this permission notice shall be included in all c THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------- +Dependency: github.com/matttproud/golang_protobuf_extensions +Revision: c182affec369e30f25d3eb8cd8a478dee585ae7d +License type (autodetected): Apache-2.0 +./vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + +-------NOTICE----- +Copyright 2012 Matt T. Proud (matt.proud@gmail.com) + -------------------------------------------------------------------- Dependency: github.com/matttproud/golang_protobuf_extensions Revision: c12348ce28de40eed0136aa2b644d0ee0650e56c @@ -3026,7 +4079,8 @@ Copyright 2012 Matt T. Proud (matt.proud@gmail.com) -------------------------------------------------------------------- Dependency: github.com/Microsoft/go-winio -Revision: f533f7a102197536779ea3a8cb881d639e21ec5a +Version: v0.4.14 +Revision: 6c72808b55902eae4c5943626030429ff20f3b63 License type (autodetected): MIT ./vendor/github.com/Microsoft/go-winio/LICENSE: -------------------------------------------------------------------- @@ -3053,6 +4107,33 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------- +Dependency: github.com/Microsoft/hcsshim +Revision: 84b0c364e1e3bb91e43b85bf20d72e7948666817 +License type (autodetected): MIT +./vendor/github.com/Microsoft/hcsshim/LICENSE: +-------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. -------------------------------------------------------------------- Dependency: github.com/miekg/dns Version: v1.1.15 @@ -3093,6 +4174,34 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------- +Dependency: github.com/mitchellh/go-homedir +Revision: af06845cf3004701891bf4fdb884bfe4920b3727 +License type (autodetected): MIT +./vendor/github.com/mitchellh/go-homedir/LICENSE: +-------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + -------------------------------------------------------------------- Dependency: github.com/mitchellh/hashstructure Revision: ab25296c0f51f1022f01cd99dfb45f1775de8799 @@ -3369,6 +4478,53 @@ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------- +Dependency: github.com/prometheus/client_golang +Revision: 35ef65db672a76effef5f0808decd0484a636f3f +License type (autodetected): Apache-2.0 +./vendor/github.com/prometheus/client_golang/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + +-------NOTICE----- +Prometheus instrumentation library for Go applications +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +perks - a fork of https://github.com/bmizerany/perks +https://github.com/beorn7/perks +Copyright 2013-2015 Blake Mizerany, Björn Rabenstein +See https://github.com/beorn7/perks/blob/master/README.md for license details. + +Go support for Protocol Buffers - Google's data interchange format +http://github.com/golang/protobuf/ +Copyright 2010 The Go Authors +See source code for license details. + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 + +-------------------------------------------------------------------- +Dependency: github.com/prometheus/client_model +License type (autodetected): Apache-2.0 +./vendor/github.com/prometheus/client_model/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + +-------NOTICE----- +Data model artifacts for Prometheus. +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + -------------------------------------------------------------------- Dependency: github.com/prometheus/client_model Revision: 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c @@ -3384,6 +4540,21 @@ Copyright 2012-2015 The Prometheus Authors This product includes software developed at SoundCloud Ltd. (http://soundcloud.com/). +-------------------------------------------------------------------- +Dependency: github.com/prometheus/common +Revision: 637d7c34db122e2d1a25d061423098663758d2d3 +License type (autodetected): Apache-2.0 +./vendor/github.com/prometheus/common/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + +-------NOTICE----- +Common libraries shared by Prometheus Go components. +Copyright 2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + -------------------------------------------------------------------- Dependency: github.com/prometheus/common Revision: 89604d197083d4781071d3c65855d24ecfb0a563 @@ -3692,8 +4863,8 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------- Dependency: github.com/Shopify/sarama -Version: v1.23.1 -Revision: 46c83074a05474240f9620fb7c70fb0d80ca401a +Version: v1.23.1-elastic +Revision: 71dcfe72351b8daf910276a46540ca0b7bbe0a2b License type (autodetected): MIT ./vendor/github.com/Shopify/sarama/LICENSE: -------------------------------------------------------------------- @@ -4007,6 +5178,33 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------- +Dependency: github.com/weppos/publicsuffix-go +Revision: 120738c23213637160ef8bdcfae4b10bf42bfffc +License type (autodetected): MIT +./vendor/github.com/weppos/publicsuffix-go/LICENSE.txt: +-------------------------------------------------------------------- +The MIT License (MIT) + +Copyright (c) 2016-2018 Simone Carletti + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. -------------------------------------------------------------------- Dependency: github.com/xdg/scram Revision: 7eeb5667e42c09cb51bf7b7c28aea8c56767da90 @@ -4053,6 +5251,24 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------- +Dependency: github.com/zmap/zcrypto +Revision: 9051775e6a2e3a89ec27977077b09f4496febecf +License type (autodetected): Apache-2.0 +./vendor/github.com/zmap/zcrypto/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + +-------------------------------------------------------------------- +Dependency: github.com/zmap/zlint +Revision: 5dcecad773158b82b5e52064ee2782d1b8a79314 +License type (autodetected): Apache-2.0 +./vendor/github.com/zmap/zlint/LICENSE: +-------------------------------------------------------------------- +Apache License 2.0 + + -------------------------------------------------------------------- Dependency: go.opencensus.io Version: v0.22.0 diff --git a/vendor/github.com/elastic/beats/auditbeat/Dockerfile b/vendor/github.com/elastic/beats/auditbeat/Dockerfile index c10786ee..4624e4c9 100644 --- a/vendor/github.com/elastic/beats/auditbeat/Dockerfile +++ b/vendor/github.com/elastic/beats/auditbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.12.9 +FROM golang:1.12.12 RUN \ apt-get update \ diff --git a/vendor/github.com/elastic/beats/auditbeat/auditbeat.docker.yml b/vendor/github.com/elastic/beats/auditbeat/auditbeat.docker.yml index 3178297b..a012bbb6 100644 --- a/vendor/github.com/elastic/beats/auditbeat/auditbeat.docker.yml +++ b/vendor/github.com/elastic/beats/auditbeat/auditbeat.docker.yml @@ -14,6 +14,7 @@ auditbeat.modules: - /etc processors: - add_cloud_metadata: ~ +- add_docker_metadata: ~ output.elasticsearch: hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}' diff --git a/vendor/github.com/elastic/beats/auditbeat/auditbeat.reference.yml b/vendor/github.com/elastic/beats/auditbeat/auditbeat.reference.yml index 224b0a12..ac863cfe 100644 --- a/vendor/github.com/elastic/beats/auditbeat/auditbeat.reference.yml +++ b/vendor/github.com/elastic/beats/auditbeat/auditbeat.reference.yml @@ -38,6 +38,10 @@ auditbeat.modules: rate_limit: 0 include_raw_message: false include_warnings: false + + # Set to true to publish fields with null values in events. + #keep_null: false + # Load audit rules from separate files. Same format as audit.rules(7). audit_rule_files: [ '${path.config}/audit.rules.d/*.conf' ] audit_rules: | @@ -110,6 +114,9 @@ auditbeat.modules: # Detect changes to files included in subdirectories. Disabled by default. recursive: false + # Set to true to publish fields with null values in events. + #keep_null: false + #================================ General ====================================== @@ -1070,7 +1077,7 @@ setup.template.settings: #setup.ilm.enabled: auto # Set the prefix used in the index lifecycle write alias name. The default alias -# name is 'auditbeat-%{[agent.version]}'. +# name is 'auditbeat-%{[agent.version]}'. #setup.ilm.rollover_alias: "auditbeat" # Set the rollover index pattern. The default is "%{now/d}-000001". @@ -1333,12 +1340,21 @@ logging.files: # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# When using IP addresses, it is recommended to only use localhost. #http.host: localhost # Port on which the HTTP endpoint will bind. Default is 5066. #http.port: 5066 +# Define which user should be owning the named pipe. +#http.named_pipe.user: + +# Define which the permissions that should be applied to the named pipe, use the Security +# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with +# `http.user`. +#http.named_pipe.security_descriptor: + #============================= Process Security ================================ # Enable or disable seccomp system call filtering on Linux. Default is enabled. diff --git a/vendor/github.com/elastic/beats/auditbeat/auditbeat.yml b/vendor/github.com/elastic/beats/auditbeat/auditbeat.yml index 1e9e4c85..d3db66e6 100644 --- a/vendor/github.com/elastic/beats/auditbeat/auditbeat.yml +++ b/vendor/github.com/elastic/beats/auditbeat/auditbeat.yml @@ -148,6 +148,7 @@ output.elasticsearch: processors: - add_host_metadata: ~ - add_cloud_metadata: ~ + - add_docker_metadata: ~ #================================ Logging ===================================== diff --git a/vendor/github.com/elastic/beats/auditbeat/docs/auditbeat-filtering.asciidoc b/vendor/github.com/elastic/beats/auditbeat/docs/auditbeat-filtering.asciidoc index 617660b6..c5dfdd55 100644 --- a/vendor/github.com/elastic/beats/auditbeat/docs/auditbeat-filtering.asciidoc +++ b/vendor/github.com/elastic/beats/auditbeat/docs/auditbeat-filtering.asciidoc @@ -1,6 +1,6 @@ [[filtering-and-enhancing-data]] == Filter and enhance the exported data -include::{libbeat-dir}/docs/processors.asciidoc[] +include::{libbeat-dir}/processors.asciidoc[] -include::{libbeat-dir}/docs/processors-using.asciidoc[] +include::{libbeat-dir}/processors-using.asciidoc[] diff --git a/vendor/github.com/elastic/beats/auditbeat/docs/auditbeat-general-options.asciidoc b/vendor/github.com/elastic/beats/auditbeat/docs/auditbeat-general-options.asciidoc index df2e6655..6fb7ba16 100644 --- a/vendor/github.com/elastic/beats/auditbeat/docs/auditbeat-general-options.asciidoc +++ b/vendor/github.com/elastic/beats/auditbeat/docs/auditbeat-general-options.asciidoc @@ -4,4 +4,4 @@ You can specify settings in the +{beatname_lc}.yml+ config file to control the general behavior of {beatname_uc}. -include::{libbeat-dir}/docs/generalconfig.asciidoc[] +include::{libbeat-dir}/generalconfig.asciidoc[] diff --git a/vendor/github.com/elastic/beats/auditbeat/docs/auditbeat-options.asciidoc b/vendor/github.com/elastic/beats/auditbeat/docs/auditbeat-options.asciidoc new file mode 100644 index 00000000..8233f79c --- /dev/null +++ b/vendor/github.com/elastic/beats/auditbeat/docs/auditbeat-options.asciidoc @@ -0,0 +1,56 @@ +////////////////////////////////////////////////////////////////////////// +//// This content is shared by all Auditbeat modules. Make sure you keep the +//// descriptions generic enough to work for all modules. To include +//// this file, use: +//// +//// include::{docdir}/auditbeat-options.asciidoc[] +//// +////////////////////////////////////////////////////////////////////////// + +[id="module-standard-options-{modulename}"] +[float] +==== Standard configuration options + +You can specify the following options for any {beatname_uc} module. + +*`module`*:: The name of the module to run. + +ifeval::["{modulename}"=="system"] +*`datasets`*:: A list of datasets to execute. +endif::[] + +*`enabled`*:: A Boolean value that specifies whether the module is enabled. + +ifeval::["{modulename}"=="system"] +*`period`*:: The frequency at which the datasets check for changes. If a system +is not reachable, {beatname_uc} returns an error for each period. This setting +is required. For most datasets, especially `process` and `socket`, a shorter +period is recommended. +endif::[] + +*`fields`*:: A dictionary of fields that will be sent with the dataset event. This setting +is optional. + +*`tags`*:: A list of tags that will be sent with the dataset event. This setting is +optional. + +*`processors`*:: A list of processors to apply to the data generated by the dataset. ++ +See <> for information about specifying +processors in your config. + +*`index`*:: If present, this formatted string overrides the index for events from this +module (for elasticsearch outputs), or sets the `raw_index` field of the event's +metadata (for other outputs). This string can only refer to the agent name and +version and the event timestamp; for access to dynamic fields, use +`output.elasticsearch.index` or a processor. ++ +Example value: `"%{[agent.name]}-myindex-%{+yyyy.MM.dd}"` might +expand to +"{beatname_lc}-myindex-2019.12.13"+. + +*`keep_null`*:: If this option is set to true, fields with `null` values will be published in +the output document. By default, `keep_null` is set to `false`. + +*`service.name`*:: A name given by the user to the service the data is collected from. It can be +used for example to identify information collected from nodes of different +clusters with the same `service.type`. diff --git a/vendor/github.com/elastic/beats/auditbeat/docs/configuring-howto.asciidoc b/vendor/github.com/elastic/beats/auditbeat/docs/configuring-howto.asciidoc index 0f6fd289..007a0670 100644 --- a/vendor/github.com/elastic/beats/auditbeat/docs/configuring-howto.asciidoc +++ b/vendor/github.com/elastic/beats/auditbeat/docs/configuring-howto.asciidoc @@ -51,38 +51,38 @@ include::./auditbeat-general-options.asciidoc[] include::./reload-configuration.asciidoc[] -include::{libbeat-dir}/docs/queueconfig.asciidoc[] +include::{libbeat-dir}/queueconfig.asciidoc[] -include::{libbeat-dir}/docs/outputconfig.asciidoc[] +include::{libbeat-dir}/outputconfig.asciidoc[] -include::{libbeat-dir}/docs/shared-ilm.asciidoc[] +include::{libbeat-dir}/shared-ilm.asciidoc[] -include::{libbeat-dir}/docs/shared-ssl-config.asciidoc[] +include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::./auditbeat-filtering.asciidoc[] -include::{libbeat-dir}/docs/shared-config-ingest.asciidoc[] +include::{libbeat-dir}/shared-config-ingest.asciidoc[] -include::{libbeat-dir}/docs/shared-geoip.asciidoc[] +include::{libbeat-dir}/shared-geoip.asciidoc[] -include::{libbeat-dir}/docs/shared-path-config.asciidoc[] +include::{libbeat-dir}/shared-path-config.asciidoc[] -include::{libbeat-dir}/docs/shared-kibana-config.asciidoc[] +include::{libbeat-dir}/shared-kibana-config.asciidoc[] -include::{libbeat-dir}/docs/setup-config.asciidoc[] +include::{libbeat-dir}/setup-config.asciidoc[] -include::{libbeat-dir}/docs/loggingconfig.asciidoc[] +include::{libbeat-dir}/loggingconfig.asciidoc[] :standalone: -include::{libbeat-dir}/docs/shared-env-vars.asciidoc[] +include::{libbeat-dir}/shared-env-vars.asciidoc[] :standalone!: :standalone: -include::{libbeat-dir}/docs/yaml.asciidoc[] +include::{libbeat-dir}/yaml.asciidoc[] :standalone!: -include::{libbeat-dir}/docs/regexp.asciidoc[] +include::{libbeat-dir}/regexp.asciidoc[] -include::{libbeat-dir}/docs/http-endpoint.asciidoc[] +include::{libbeat-dir}/http-endpoint.asciidoc[] -include::{libbeat-dir}/docs/reference-yml.asciidoc[] +include::{libbeat-dir}/reference-yml.asciidoc[] diff --git a/vendor/github.com/elastic/beats/auditbeat/docs/faq.asciidoc b/vendor/github.com/elastic/beats/auditbeat/docs/faq.asciidoc index a5e7511c..d0f4fbe8 100644 --- a/vendor/github.com/elastic/beats/auditbeat/docs/faq.asciidoc +++ b/vendor/github.com/elastic/beats/auditbeat/docs/faq.asciidoc @@ -7,6 +7,6 @@ https://discuss.elastic.co/c/beats/{beatname_lc}[{beatname_uc} discussion forum] include::./faq-ulimit.asciidoc[] -include::{libbeat-dir}/docs/faq-limit-bandwidth.asciidoc[] +include::{libbeat-dir}/faq-limit-bandwidth.asciidoc[] -include::{libbeat-dir}/docs/shared-faq.asciidoc[] +include::{libbeat-dir}/shared-faq.asciidoc[] diff --git a/vendor/github.com/elastic/beats/auditbeat/docs/getting-started.asciidoc b/vendor/github.com/elastic/beats/auditbeat/docs/getting-started.asciidoc index 4b0ee73d..493160de 100644 --- a/vendor/github.com/elastic/beats/auditbeat/docs/getting-started.asciidoc +++ b/vendor/github.com/elastic/beats/auditbeat/docs/getting-started.asciidoc @@ -1,7 +1,7 @@ [id="{beatname_lc}-getting-started"] == Getting started with {beatname_uc} -include::{libbeat-dir}/docs/shared-getting-started-intro.asciidoc[] +include::{libbeat-dir}/shared-getting-started-intro.asciidoc[] * <<{beatname_lc}-installation>> * <<{beatname_lc}-configuration>> @@ -16,14 +16,14 @@ include::{libbeat-dir}/docs/shared-getting-started-intro.asciidoc[] Install {beatname_uc} on all the servers you want to monitor. -include::{libbeat-dir}/docs/shared-download-and-install.asciidoc[] +include::{libbeat-dir}/shared-download-and-install.asciidoc[] [[deb]] *deb:* ifeval::["{release-state}"=="unreleased"] -Version {stack-version} of {beatname_uc} has not yet been released. +Version {version} of {beatname_uc} has not yet been released. endif::[] @@ -42,7 +42,7 @@ endif::[] ifeval::["{release-state}"=="unreleased"] -Version {stack-version} of {beatname_uc} has not yet been released. +Version {version} of {beatname_uc} has not yet been released. endif::[] @@ -61,7 +61,7 @@ endif::[] ifeval::["{release-state}"=="unreleased"] -Version {stack-version} of {beatname_uc} has not yet been released. +Version {version} of {beatname_uc} has not yet been released. endif::[] @@ -75,14 +75,14 @@ tar xzvf {beatname_lc}-{version}-darwin-x86_64.tar.gz endif::[] -include::{libbeat-dir}/docs/shared-brew-install.asciidoc[] +include::{libbeat-dir}/shared-brew-install.asciidoc[] [[linux]] *linux:* ifeval::["{release-state}"=="unreleased"] -Version {stack-version} of {beatname_uc} has not yet been released. +Version {version} of {beatname_uc} has not yet been released. endif::[] @@ -101,7 +101,7 @@ endif::[] ifeval::["{release-state}"=="unreleased"] -Version {stack-version} of {beatname_uc} has not yet been released. +Version {version} of {beatname_uc} has not yet been released. endif::[] @@ -122,7 +122,7 @@ See <> for deploying Docker containers. ifeval::["{release-state}"=="unreleased"] -Version {stack-version} of {beatname_uc} has not yet been released. +Version {version} of {beatname_uc} has not yet been released. endif::[] @@ -162,7 +162,7 @@ For more information about these options, see [id="{beatname_lc}-configuration"] === Step 2: Configure {beatname_uc} -include::{libbeat-dir}/docs/shared-configuring.asciidoc[] +include::{libbeat-dir}/shared-configuring.asciidoc[] To configure {beatname_uc}: @@ -192,25 +192,25 @@ If you accept the default configuration without specifying additional modules, + See <> for more details about configuring modules. -include::{libbeat-dir}/docs/step-configure-output.asciidoc[] +include::{libbeat-dir}/step-configure-output.asciidoc[] -include::{libbeat-dir}/docs/step-configure-kibana-endpoint.asciidoc[] +include::{libbeat-dir}/step-configure-kibana-endpoint.asciidoc[] -include::{libbeat-dir}/docs/step-configure-credentials.asciidoc[] +include::{libbeat-dir}/step-configure-credentials.asciidoc[] -include::{libbeat-dir}/docs/step-test-config.asciidoc[] +include::{libbeat-dir}/step-test-config.asciidoc[] -include::{libbeat-dir}/docs/step-look-at-config.asciidoc[] +include::{libbeat-dir}/step-look-at-config.asciidoc[] [id="{beatname_lc}-template"] === Step 3: Load the index template in {es} -include::{libbeat-dir}/docs/shared-template-load.asciidoc[] +include::{libbeat-dir}/shared-template-load.asciidoc[] [[load-kibana-dashboards]] === Step 4: Set up the {kib} dashboards -include::{libbeat-dir}/docs/dashboards.asciidoc[] +include::{libbeat-dir}/dashboards.asciidoc[] [id="{beatname_lc}-starting"] === Step 5: Start {beatname_uc} @@ -246,7 +246,7 @@ in the _Beats Platform Reference_. If you see a warning about too many open files, you need to increase the `ulimit`. See the <> for more details. -include::{libbeat-dir}/docs/shared-brew-run.asciidoc[] +include::{libbeat-dir}/shared-brew-run.asciidoc[] *win:* @@ -280,7 +280,7 @@ To make it easier for you to start auditing the activities of users and processes on your system, we have created example {beatname_uc} dashboards. You loaded the dashboards earlier when you ran the `setup` command. -include::{libbeat-dir}/docs/opendashboards.asciidoc[] +include::{libbeat-dir}/opendashboards.asciidoc[] The dashboards are provided as examples. We recommend that you {kibana-ref}/dashboard.html[customize] them to meet your needs. diff --git a/vendor/github.com/elastic/beats/auditbeat/docs/index.asciidoc b/vendor/github.com/elastic/beats/auditbeat/docs/index.asciidoc index c052439a..6de3c9db 100644 --- a/vendor/github.com/elastic/beats/auditbeat/docs/index.asciidoc +++ b/vendor/github.com/elastic/beats/auditbeat/docs/index.asciidoc @@ -1,12 +1,13 @@ = Auditbeat Reference -:libbeat-dir: {docdir}/../../libbeat +:libbeat-dir: {docdir}/../../libbeat/docs -include::{libbeat-dir}/docs/version.asciidoc[] +include::{libbeat-dir}/version.asciidoc[] + +include::{asciidoc-dir}/../../shared/versions/stack/{source_branch}.asciidoc[] include::{asciidoc-dir}/../../shared/attributes.asciidoc[] -:version: {stack-version} :beatname_lc: auditbeat :beatname_uc: Auditbeat :beatname_pkg: {beatname_lc} @@ -19,14 +20,18 @@ include::{asciidoc-dir}/../../shared/attributes.asciidoc[] :docker_platform: :win_os: :linux_os: +:no_decode_cef_processor: +:no_decode_csv_fields_processor: +:no_script_processor: +:no_timestamp_processor: -include::{libbeat-dir}/docs/shared-beats-attributes.asciidoc[] +include::{libbeat-dir}/shared-beats-attributes.asciidoc[] include::./overview.asciidoc[] include::./getting-started.asciidoc[] -include::{libbeat-dir}/docs/repositories.asciidoc[] +include::{libbeat-dir}/repositories.asciidoc[] include::./setting-up-running.asciidoc[] @@ -38,13 +43,13 @@ include::./modules.asciidoc[] include::./fields.asciidoc[] -include::{libbeat-dir}/docs/monitoring/monitoring-beats.asciidoc[] +include::{libbeat-dir}/monitoring/monitoring-beats.asciidoc[] -include::{libbeat-dir}/docs/shared-securing-beat.asciidoc[] +include::{libbeat-dir}/shared-securing-beat.asciidoc[] include::./troubleshooting.asciidoc[] include::./faq.asciidoc[] -include::{libbeat-dir}/docs/contributing-to-beats.asciidoc[] +include::{libbeat-dir}/contributing-to-beats.asciidoc[] diff --git a/vendor/github.com/elastic/beats/auditbeat/docs/modules/auditd.asciidoc b/vendor/github.com/elastic/beats/auditbeat/docs/modules/auditd.asciidoc index 8868fc75..c419c66d 100644 --- a/vendor/github.com/elastic/beats/auditbeat/docs/modules/auditd.asciidoc +++ b/vendor/github.com/elastic/beats/auditbeat/docs/modules/auditd.asciidoc @@ -218,6 +218,9 @@ time. - `none`: No backpressure mitigation measures are enabled. -- +*`keep_null`*:: If this option is set to true, fields with `null` values will be +published in the output document. By default, `keep_null` is set to `false`. + [float] === Audit rules diff --git a/vendor/github.com/elastic/beats/auditbeat/docs/modules/file_integrity.asciidoc b/vendor/github.com/elastic/beats/auditbeat/docs/modules/file_integrity.asciidoc index df896a7d..c420818c 100644 --- a/vendor/github.com/elastic/beats/auditbeat/docs/modules/file_integrity.asciidoc +++ b/vendor/github.com/elastic/beats/auditbeat/docs/modules/file_integrity.asciidoc @@ -122,6 +122,9 @@ of this directories are watched. If `recursive` is set to `true`, the `file_integrity` module will watch for changes on this directories and all their subdirectories. +*`keep_null`*:: If this option is set to true, fields with `null` values will be +published in the output document. By default, `keep_null` is set to `false`. + [float] === Example configuration diff --git a/vendor/github.com/elastic/beats/auditbeat/docs/overview.asciidoc b/vendor/github.com/elastic/beats/auditbeat/docs/overview.asciidoc index 241e491f..951b9059 100644 --- a/vendor/github.com/elastic/beats/auditbeat/docs/overview.asciidoc +++ b/vendor/github.com/elastic/beats/auditbeat/docs/overview.asciidoc @@ -12,4 +12,4 @@ Audit Framework. You can also use {beatname_uc} to detect changes to critical files, like binaries and configuration files, and identify potential security policy violations. -include::{libbeat-dir}/docs/shared-libbeat-description.asciidoc[] +include::{libbeat-dir}/shared-libbeat-description.asciidoc[] diff --git a/vendor/github.com/elastic/beats/auditbeat/docs/running-on-docker.asciidoc b/vendor/github.com/elastic/beats/auditbeat/docs/running-on-docker.asciidoc index 216e1abf..74007cde 100644 --- a/vendor/github.com/elastic/beats/auditbeat/docs/running-on-docker.asciidoc +++ b/vendor/github.com/elastic/beats/auditbeat/docs/running-on-docker.asciidoc @@ -1,4 +1,4 @@ -include::{libbeat-dir}/docs/shared-docker.asciidoc[] +include::{libbeat-dir}/shared-docker.asciidoc[] ==== Special requirements diff --git a/vendor/github.com/elastic/beats/auditbeat/docs/running-on-kubernetes.asciidoc b/vendor/github.com/elastic/beats/auditbeat/docs/running-on-kubernetes.asciidoc index 950487d4..6a3cf204 100644 --- a/vendor/github.com/elastic/beats/auditbeat/docs/running-on-kubernetes.asciidoc +++ b/vendor/github.com/elastic/beats/auditbeat/docs/running-on-kubernetes.asciidoc @@ -6,7 +6,7 @@ check files integrity. ifeval::["{release-state}"=="unreleased"] -However, version {stack-version} of {beatname_uc} has not yet been +However, version {version} of {beatname_uc} has not yet been released, so no Docker image is currently available for this version. endif::[] diff --git a/vendor/github.com/elastic/beats/auditbeat/docs/setting-up-running.asciidoc b/vendor/github.com/elastic/beats/auditbeat/docs/setting-up-running.asciidoc index 724c003f..05e68bb9 100644 --- a/vendor/github.com/elastic/beats/auditbeat/docs/setting-up-running.asciidoc +++ b/vendor/github.com/elastic/beats/auditbeat/docs/setting-up-running.asciidoc @@ -27,16 +27,16 @@ This section includes additional information on how to set up and run //MAINTAINERS: If you add a new file to this section, make sure you update the bulleted list ^^ too. -include::{libbeat-dir}/docs/shared-directory-layout.asciidoc[] +include::{libbeat-dir}/shared-directory-layout.asciidoc[] -include::{libbeat-dir}/docs/keystore.asciidoc[] +include::{libbeat-dir}/keystore.asciidoc[] -include::{libbeat-dir}/docs/command-reference.asciidoc[] +include::{libbeat-dir}/command-reference.asciidoc[] include::./running-on-docker.asciidoc[] include::./running-on-kubernetes.asciidoc[] -include::{libbeat-dir}/docs/shared-systemd.asciidoc[] +include::{libbeat-dir}/shared-systemd.asciidoc[] -include::{libbeat-dir}/docs/shared-shutdown.asciidoc[] +include::{libbeat-dir}/shared-shutdown.asciidoc[] diff --git a/vendor/github.com/elastic/beats/auditbeat/docs/troubleshooting.asciidoc b/vendor/github.com/elastic/beats/auditbeat/docs/troubleshooting.asciidoc index 4a7c4d8b..463f4123 100644 --- a/vendor/github.com/elastic/beats/auditbeat/docs/troubleshooting.asciidoc +++ b/vendor/github.com/elastic/beats/auditbeat/docs/troubleshooting.asciidoc @@ -17,14 +17,14 @@ following tips: [[getting-help]] == Get Help -include::{libbeat-dir}/docs/getting-help.asciidoc[] +include::{libbeat-dir}/getting-help.asciidoc[] //sets block macro for debugging.asciidoc included in next section [id="enable-{beatname_lc}-debugging"] == Debug -include::{libbeat-dir}/docs/debugging.asciidoc[] +include::{libbeat-dir}/debugging.asciidoc[] diff --git a/vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/config.yml.tmpl b/vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/config.yml.tmpl index fae5d6a0..e1050773 100644 --- a/vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/config.yml.tmpl +++ b/vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/config.yml.tmpl @@ -11,6 +11,10 @@ rate_limit: 0 include_raw_message: false include_warnings: false + + # Set to true to publish fields with null values in events. + #keep_null: false + {{ end -}} # Load audit rules from separate files. Same format as audit.rules(7). audit_rule_files: [ '${path.config}/audit.rules.d/*.conf' ] diff --git a/vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/docs.asciidoc index 930ef4c9..45e3e3de 100644 --- a/vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/auditbeat/module/auditd/_meta/docs.asciidoc @@ -213,6 +213,9 @@ time. - `none`: No backpressure mitigation measures are enabled. -- +*`keep_null`*:: If this option is set to true, fields with `null` values will be +published in the output document. By default, `keep_null` is set to `false`. + [float] === Audit rules diff --git a/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/_meta/config.yml.tmpl b/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/_meta/config.yml.tmpl index b8902d70..21384235 100644 --- a/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/_meta/config.yml.tmpl +++ b/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/_meta/config.yml.tmpl @@ -75,4 +75,7 @@ # Detect changes to files included in subdirectories. Disabled by default. recursive: false + + # Set to true to publish fields with null values in events. + #keep_null: false {{ end }} diff --git a/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/_meta/docs.asciidoc index 74b6a193..9282b289 100644 --- a/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/auditbeat/module/file_integrity/_meta/docs.asciidoc @@ -116,3 +116,6 @@ The supported hash types are `blake2b_256`, `blake2b_384`, `blake2b_512`, `md5`, of this directories are watched. If `recursive` is set to `true`, the `file_integrity` module will watch for changes on this directories and all their subdirectories. + +*`keep_null`*:: If this option is set to true, fields with `null` values will be +published in the output document. By default, `keep_null` is set to `false`. diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/.travis/setup.sh b/vendor/github.com/elastic/beats/deploy/kubernetes/.travis/setup.sh index 41ba277c..56d563c5 100755 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/.travis/setup.sh +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/.travis/setup.sh @@ -9,6 +9,8 @@ export CHANGE_MINIKUBE_NONE_USER=true curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/$TRAVIS_K8S_VERSION/bin/linux/amd64/kubectl && \ chmod +x kubectl && sudo mv kubectl /usr/local/bin/ curl -Lo minikube https://storage.googleapis.com/minikube/releases/$TRAVIS_MINIKUBE_VERSION/minikube-linux-amd64 && chmod +x minikube && sudo mv minikube /usr/local/bin/ +mkdir -p $HOME/.kube $HOME/.minikube +touch $HOME/.kube/config sudo minikube start --vm-driver=none --kubernetes-version=$TRAVIS_K8S_VERSION --logtostderr sudo minikube update-context JSONPATH='{range .items[*]}{@.metadata.name}:{range @.status.conditions[*]}{@.type}={@.status};{end}{end}'; \ diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/auditbeat-kubernetes.yaml b/vendor/github.com/elastic/beats/deploy/kubernetes/auditbeat-kubernetes.yaml index 2228a885..12d39760 100644 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/auditbeat-kubernetes.yaml +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/auditbeat-kubernetes.yaml @@ -52,7 +52,7 @@ data: recursive: true --- # Deploy a auditbeat instance per node for node metrics retrieval -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: DaemonSet metadata: name: auditbeat @@ -60,6 +60,9 @@ metadata: labels: k8s-app: auditbeat spec: + selector: + matchLabels: + k8s-app: auditbeat template: metadata: labels: @@ -71,7 +74,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: auditbeat - image: docker.elastic.co/beats/auditbeat:7.4.1 + image: docker.elastic.co/beats/auditbeat:7.5.1 args: [ "-c", "/etc/auditbeat.yml" ] @@ -148,7 +151,7 @@ spec: path: /var/lib/auditbeat-data type: DirectoryOrCreate --- -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: auditbeat @@ -161,7 +164,7 @@ roleRef: name: auditbeat apiGroup: rbac.authorization.k8s.io --- -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: auditbeat diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/auditbeat/auditbeat-daemonset.yaml b/vendor/github.com/elastic/beats/deploy/kubernetes/auditbeat/auditbeat-daemonset.yaml index 2a3f19aa..bdd38cd3 100644 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/auditbeat/auditbeat-daemonset.yaml +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/auditbeat/auditbeat-daemonset.yaml @@ -1,5 +1,5 @@ # Deploy a auditbeat instance per node for node metrics retrieval -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: DaemonSet metadata: name: auditbeat @@ -7,6 +7,9 @@ metadata: labels: k8s-app: auditbeat spec: + selector: + matchLabels: + k8s-app: auditbeat template: metadata: labels: diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/auditbeat/auditbeat-role-binding.yaml b/vendor/github.com/elastic/beats/deploy/kubernetes/auditbeat/auditbeat-role-binding.yaml index dec98a5f..56252e61 100644 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/auditbeat/auditbeat-role-binding.yaml +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/auditbeat/auditbeat-role-binding.yaml @@ -1,4 +1,4 @@ -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: auditbeat diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/auditbeat/auditbeat-role.yaml b/vendor/github.com/elastic/beats/deploy/kubernetes/auditbeat/auditbeat-role.yaml index ae6d32f4..181e11bd 100644 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/auditbeat/auditbeat-role.yaml +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/auditbeat/auditbeat-role.yaml @@ -1,4 +1,4 @@ -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: auditbeat diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat-kubernetes.yaml b/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat-kubernetes.yaml index a7adf81f..82613a94 100644 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat-kubernetes.yaml +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat-kubernetes.yaml @@ -42,7 +42,7 @@ data: username: ${ELASTICSEARCH_USERNAME} password: ${ELASTICSEARCH_PASSWORD} --- -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: DaemonSet metadata: name: filebeat @@ -50,6 +50,9 @@ metadata: labels: k8s-app: filebeat spec: + selector: + matchLabels: + k8s-app: filebeat template: metadata: labels: @@ -61,7 +64,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: filebeat - image: docker.elastic.co/beats/filebeat:7.4.1 + image: docker.elastic.co/beats/filebeat:7.5.1 args: [ "-c", "/etc/filebeat.yml", "-e", @@ -123,7 +126,7 @@ spec: path: /var/lib/filebeat-data type: DirectoryOrCreate --- -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: filebeat @@ -136,7 +139,7 @@ roleRef: name: filebeat apiGroup: rbac.authorization.k8s.io --- -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: filebeat diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/filebeat-daemonset.yaml b/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/filebeat-daemonset.yaml index 9979a9fa..20c742d5 100644 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/filebeat-daemonset.yaml +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/filebeat-daemonset.yaml @@ -1,4 +1,4 @@ -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: DaemonSet metadata: name: filebeat @@ -6,6 +6,9 @@ metadata: labels: k8s-app: filebeat spec: + selector: + matchLabels: + k8s-app: filebeat template: metadata: labels: diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/filebeat-role-binding.yaml b/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/filebeat-role-binding.yaml index f24259ff..5c634e69 100644 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/filebeat-role-binding.yaml +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/filebeat-role-binding.yaml @@ -1,4 +1,4 @@ -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: filebeat diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/filebeat-role.yaml b/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/filebeat-role.yaml index 160bb904..c5cc8171 100644 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/filebeat-role.yaml +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/filebeat/filebeat-role.yaml @@ -1,4 +1,4 @@ -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: filebeat diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat-kubernetes.yaml b/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat-kubernetes.yaml index 3b7eb0d3..232db96a 100644 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat-kubernetes.yaml +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat-kubernetes.yaml @@ -91,7 +91,7 @@ data: hosts: ["localhost:10249"] --- # Deploy a Metricbeat instance per node for node metrics retrieval -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: DaemonSet metadata: name: metricbeat @@ -99,6 +99,9 @@ metadata: labels: k8s-app: metricbeat spec: + selector: + matchLabels: + k8s-app: metricbeat template: metadata: labels: @@ -110,7 +113,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: metricbeat - image: docker.elastic.co/beats/metricbeat:7.4.1 + image: docker.elastic.co/beats/metricbeat:7.5.1 args: [ "-c", "/etc/metricbeat.yml", "-e", @@ -223,6 +226,8 @@ data: - state_replicaset - state_pod - state_container + - state_cronjob + - state_resourcequota # Uncomment this to get k8s events: #- event period: 10s @@ -230,7 +235,7 @@ data: hosts: ["kube-state-metrics:8080"] --- # Deploy singleton instance in the whole cluster for some unique data sources, like kube-state-metrics -apiVersion: apps/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: name: metricbeat @@ -238,6 +243,9 @@ metadata: labels: k8s-app: metricbeat spec: + selector: + matchLabels: + k8s-app: metricbeat template: metadata: labels: @@ -248,7 +256,7 @@ spec: dnsPolicy: ClusterFirstWithHostNet containers: - name: metricbeat - image: docker.elastic.co/beats/metricbeat:7.4.1 + image: docker.elastic.co/beats/metricbeat:7.5.1 args: [ "-c", "/etc/metricbeat.yml", "-e", @@ -296,7 +304,7 @@ spec: defaultMode: 0600 name: metricbeat-deployment-modules --- -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: metricbeat @@ -309,7 +317,7 @@ roleRef: name: metricbeat apiGroup: rbac.authorization.k8s.io --- -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: metricbeat diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml b/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml index 6335a73f..2fe3a7a1 100644 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-daemonset.yaml @@ -1,5 +1,5 @@ # Deploy a Metricbeat instance per node for node metrics retrieval -apiVersion: extensions/v1beta1 +apiVersion: apps/v1 kind: DaemonSet metadata: name: metricbeat @@ -7,6 +7,9 @@ metadata: labels: k8s-app: metricbeat spec: + selector: + matchLabels: + k8s-app: metricbeat template: metadata: labels: diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-deployment-configmap.yaml b/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-deployment-configmap.yaml index 62cbd79f..4a3cf4e2 100644 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-deployment-configmap.yaml +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-deployment-configmap.yaml @@ -41,6 +41,8 @@ data: - state_replicaset - state_pod - state_container + - state_cronjob + - state_resourcequota # Uncomment this to get k8s events: #- event period: 10s diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-deployment.yaml b/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-deployment.yaml index fec95a83..8b0c5351 100644 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-deployment.yaml +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-deployment.yaml @@ -1,5 +1,5 @@ # Deploy singleton instance in the whole cluster for some unique data sources, like kube-state-metrics -apiVersion: apps/v1beta1 +apiVersion: apps/v1 kind: Deployment metadata: name: metricbeat @@ -7,6 +7,9 @@ metadata: labels: k8s-app: metricbeat spec: + selector: + matchLabels: + k8s-app: metricbeat template: metadata: labels: diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-role-binding.yaml b/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-role-binding.yaml index 8a74c8f8..3f6f7b62 100644 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-role-binding.yaml +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-role-binding.yaml @@ -1,4 +1,4 @@ -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: metricbeat diff --git a/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-role.yaml b/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-role.yaml index b4533e7b..72580abc 100644 --- a/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-role.yaml +++ b/vendor/github.com/elastic/beats/deploy/kubernetes/metricbeat/metricbeat-role.yaml @@ -1,4 +1,4 @@ -apiVersion: rbac.authorization.k8s.io/v1beta1 +apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: metricbeat diff --git a/vendor/github.com/elastic/beats/dev-tools/cherrypick_pr b/vendor/github.com/elastic/beats/dev-tools/cherrypick_pr index 735c33e5..a16ca38e 100755 --- a/vendor/github.com/elastic/beats/dev-tools/cherrypick_pr +++ b/vendor/github.com/elastic/beats/dev-tools/cherrypick_pr @@ -28,7 +28,9 @@ This script does the following: remote * if the --create_pr flag is used, it uses the GitHub API to create the PR for you. Note that this requires you to have a Github token with the - public_repo scope in the `~/.elastic/github.token` file + public_repo scope in the `~/.elastic/github.token` file. This token + should be also authorized to Elastic organization so as to work with single-sign-on. + (see https://help.github.com/en/articles/authorizing-a-personal-access-token-for-use-with-saml-single-sign-on) Note that you need to take the commit hashes from `git log` on the from_branch, copying the IDs from Github doesn't work in case we squashed the diff --git a/vendor/github.com/elastic/beats/dev-tools/generate_notice.py b/vendor/github.com/elastic/beats/dev-tools/generate_notice.py index 15446981..0ccae1fd 100644 --- a/vendor/github.com/elastic/beats/dev-tools/generate_notice.py +++ b/vendor/github.com/elastic/beats/dev-tools/generate_notice.py @@ -228,8 +228,10 @@ def create_notice(filename, beat, copyright, vendor_dirs, csvfile, overrides=Non APACHE2_LICENSE_TITLES = [ + "Apache License 2.0", "Apache License Version 2.0", "Apache License, Version 2.0", + "licensed under the Apache 2.0 license", # github.com/zmap/zcrypto re.sub(r"\s+", " ", """Apache License ============== diff --git a/vendor/github.com/elastic/beats/dev-tools/mage/check.go b/vendor/github.com/elastic/beats/dev-tools/mage/check.go index 86f0a28e..6c30bd1d 100644 --- a/vendor/github.com/elastic/beats/dev-tools/mage/check.go +++ b/vendor/github.com/elastic/beats/dev-tools/mage/check.go @@ -35,6 +35,7 @@ import ( "github.com/magefile/mage/sh" "github.com/pkg/errors" + "github.com/elastic/beats/dev-tools/mage/gotool" "github.com/elastic/beats/libbeat/processors/dissect" ) @@ -189,6 +190,26 @@ func GoVet() error { return errors.Wrap(err, "failed running go vet, please fix the issues reported") } +// CheckLicenseHeaders checks license headers in .go files. +func CheckLicenseHeaders() error { + fmt.Println(">> fmt - go-licenser: Checking for missing headers") + + mg.Deps(InstallGoLicenser) + + var license string + switch BeatLicense { + case "ASL2", "ASL 2.0": + license = "ASL2" + case "Elastic", "Elastic License": + license = "Elastic" + default: + return errors.Errorf("unknown license type %v", BeatLicense) + } + + licenser := gotool.Licenser + return licenser(licenser.Check(), licenser.License(license)) +} + // CheckDashboardsFormat checks the format of dashboards func CheckDashboardsFormat() error { dashboardSubDir := "/_meta/kibana/" diff --git a/vendor/github.com/elastic/beats/dev-tools/mage/config.go b/vendor/github.com/elastic/beats/dev-tools/mage/config.go index 9ae73597..724970d8 100644 --- a/vendor/github.com/elastic/beats/dev-tools/mage/config.go +++ b/vendor/github.com/elastic/beats/dev-tools/mage/config.go @@ -105,17 +105,19 @@ func Config(types ConfigFileType, args ConfigFileParams, targetDir string) error } params := map[string]interface{}{ - "GOOS": EnvOr("DEV_OS", "linux"), - "GOARCH": EnvOr("DEV_ARCH", "amd64"), - "Reference": false, - "Docker": false, - "ExcludeConsole": false, - "ExcludeFileOutput": false, - "ExcludeKafka": false, - "ExcludeLogstash": false, - "ExcludeRedis": false, - "UseObserverProcessor": false, - "ExcludeDashboards": false, + "GOOS": EnvOr("DEV_OS", "linux"), + "GOARCH": EnvOr("DEV_ARCH", "amd64"), + "Reference": false, + "Docker": false, + "ExcludeConsole": false, + "ExcludeFileOutput": false, + "ExcludeKafka": false, + "ExcludeLogstash": false, + "ExcludeRedis": false, + "UseObserverProcessor": false, + "UseDockerMetadataProcessor": true, + "UseKubernetesMetadataProcessor": false, + "ExcludeDashboards": false, } for k, v := range args.ExtraVars { params[k] = v diff --git a/vendor/github.com/elastic/beats/dev-tools/mage/fmt.go b/vendor/github.com/elastic/beats/dev-tools/mage/fmt.go index e2671015..8851f676 100644 --- a/vendor/github.com/elastic/beats/dev-tools/mage/fmt.go +++ b/vendor/github.com/elastic/beats/dev-tools/mage/fmt.go @@ -26,6 +26,8 @@ import ( "github.com/magefile/mage/mg" "github.com/magefile/mage/sh" "github.com/pkg/errors" + + "github.com/elastic/beats/dev-tools/mage/gotool" ) var ( @@ -35,9 +37,6 @@ var ( // GoImportsLocalPrefix is a string prefix matching imports that should be // grouped after third-party packages. GoImportsLocalPrefix = "github.com/elastic" - - // GoLicenserImportPath controls the import path used to install go-licenser. - GoLicenserImportPath = "github.com/elastic/go-licenser" ) // Format adds license headers, formats .go files with goimports, and formats @@ -120,9 +119,7 @@ func AddLicenseHeaders() error { fmt.Println(">> fmt - go-licenser: Adding missing headers") - if err := sh.Run("go", "get", GoLicenserImportPath); err != nil { - return err - } + mg.Deps(InstallGoLicenser) var license string switch BeatLicense { @@ -134,5 +131,6 @@ func AddLicenseHeaders() error { return errors.Errorf("unknown license type %v", BeatLicense) } - return sh.RunV("go-licenser", "-license", license) + licenser := gotool.Licenser + return licenser(licenser.License(license)) } diff --git a/vendor/github.com/elastic/beats/dev-tools/mage/gotool/get.go b/vendor/github.com/elastic/beats/dev-tools/mage/gotool/get.go new file mode 100644 index 00000000..6a147ca9 --- /dev/null +++ b/vendor/github.com/elastic/beats/dev-tools/mage/gotool/get.go @@ -0,0 +1,31 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package gotool + +type goGet func(opts ...ArgOpt) error + +// Get runs `go get` and provides optionals for adding command line arguments. +var Get goGet = runGoGet + +func runGoGet(opts ...ArgOpt) error { + args := buildArgs(opts) + return runVGo("get", args) +} + +func (goGet) Update() ArgOpt { return flagBoolIf("-u", true) } +func (goGet) Package(pkg string) ArgOpt { return posArg(pkg) } diff --git a/vendor/github.com/elastic/beats/dev-tools/mage/gotool/go.go b/vendor/github.com/elastic/beats/dev-tools/mage/gotool/go.go new file mode 100644 index 00000000..def2d5d5 --- /dev/null +++ b/vendor/github.com/elastic/beats/dev-tools/mage/gotool/go.go @@ -0,0 +1,257 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package gotool + +import ( + "os" + "strings" + + "github.com/magefile/mage/mg" + "github.com/magefile/mage/sh" +) + +// Args holds parameters, environment variables and flag information used to +// pass to the go tool. +type Args struct { + extra map[string]string // extra flags one can pass to the command + env map[string]string + flags map[string][]string + pos []string +} + +// ArgOpt is a functional option adding info to Args once executed. +type ArgOpt func(args *Args) + +type goTest func(opts ...ArgOpt) error + +// Test runs `go test` and provides optionals for adding command line arguments. +var Test goTest = runGoTest + +// ListProjectPackages lists all packages in the current project +func ListProjectPackages() ([]string, error) { + return ListPackages("./...") +} + +// ListPackages calls `go list` for every package spec given. +func ListPackages(pkgs ...string) ([]string, error) { + return getLines(callGo(nil, "list", pkgs...)) +} + +// ListTestFiles lists all go and cgo test files available in a package. +func ListTestFiles(pkg string) ([]string, error) { + const tmpl = `{{ range .TestGoFiles }}{{ printf "%s\n" . }}{{ end }}` + + `{{ range .XTestGoFiles }}{{ printf "%s\n" . }}{{ end }}` + + return getLines(callGo(nil, "list", "-f", tmpl, pkg)) +} + +// HasTests returns true if the given package contains test files. +func HasTests(pkg string) (bool, error) { + files, err := ListTestFiles(pkg) + if err != nil { + return false, err + } + return len(files) > 0, nil +} + +func (goTest) WithCoverage(to string) ArgOpt { + return combine(flagArg("-cover", ""), flagArgIf("-test.coverprofile", to)) +} +func (goTest) Short(b bool) ArgOpt { return flagBoolIf("-test.short", b) } +func (goTest) Use(bin string) ArgOpt { return extraArgIf("use", bin) } +func (goTest) OS(os string) ArgOpt { return envArgIf("GOOS", os) } +func (goTest) ARCH(arch string) ArgOpt { return envArgIf("GOARCH", arch) } +func (goTest) Create() ArgOpt { return flagArg("-c", "") } +func (goTest) Out(path string) ArgOpt { return flagArg("-o", path) } +func (goTest) Package(path string) ArgOpt { return posArg(path) } +func (goTest) Verbose() ArgOpt { return flagArg("-test.v", "") } +func runGoTest(opts ...ArgOpt) error { + args := buildArgs(opts) + if bin := args.Val("use"); bin != "" { + flags := map[string][]string{} + for k, v := range args.flags { + if strings.HasPrefix(k, "-test.") { + flags[k] = v + } + } + + useArgs := &Args{} + *useArgs = *args + useArgs.flags = flags + + _, err := sh.Exec(useArgs.env, os.Stdout, os.Stderr, bin, useArgs.build()...) + return err + } + + return runVGo("test", args) +} + +func getLines(out string, err error) ([]string, error) { + if err != nil { + return nil, err + } + + lines := strings.Split(out, "\n") + res := lines[:0] + for _, line := range lines { + line = strings.TrimSpace(line) + if len(line) > 0 { + res = append(res, line) + } + } + + return res, nil +} + +func callGo(env map[string]string, cmd string, opts ...string) (string, error) { + args := []string{cmd} + args = append(args, opts...) + return sh.OutputWith(env, mg.GoCmd(), args...) +} + +func runVGo(cmd string, args *Args) error { + return execGoWith(func(env map[string]string, cmd string, args ...string) error { + _, err := sh.Exec(env, os.Stdout, os.Stderr, cmd, args...) + return err + }, cmd, args) +} + +func runGo(cmd string, args *Args) error { + return execGoWith(sh.RunWith, cmd, args) +} + +func execGoWith( + fn func(map[string]string, string, ...string) error, + cmd string, args *Args, +) error { + cliArgs := []string{cmd} + cliArgs = append(cliArgs, args.build()...) + return fn(args.env, mg.GoCmd(), cliArgs...) +} + +func posArg(value string) ArgOpt { + return func(a *Args) { a.Add(value) } +} + +func extraArg(k, v string) ArgOpt { + return func(a *Args) { a.Extra(k, v) } +} + +func extraArgIf(k, v string) ArgOpt { + if v == "" { + return nil + } + return extraArg(k, v) +} + +func envArg(k, v string) ArgOpt { + return func(a *Args) { a.Env(k, v) } +} + +func envArgIf(k, v string) ArgOpt { + if v == "" { + return nil + } + return envArg(k, v) +} + +func flagArg(flag, value string) ArgOpt { + return func(a *Args) { a.Flag(flag, value) } +} + +func flagArgIf(flag, value string) ArgOpt { + if value == "" { + return nil + } + return flagArg(flag, value) +} + +func flagBoolIf(flag string, b bool) ArgOpt { + if b { + return flagArg(flag, "") + } + return nil +} + +func combine(opts ...ArgOpt) ArgOpt { + return func(a *Args) { + for _, opt := range opts { + if opt != nil { + opt(a) + } + } + } +} + +func buildArgs(opts []ArgOpt) *Args { + a := &Args{} + combine(opts...)(a) + return a +} + +// Extra sets a special k/v pair to be interpreted by the execution function. +func (a *Args) Extra(k, v string) { + if a.extra == nil { + a.extra = map[string]string{} + } + a.extra[k] = v +} + +// Val returns a special functions value for a given key. +func (a *Args) Val(k string) string { + if a.extra == nil { + return "" + } + return a.extra[k] +} + +// Env sets an environmant variable to be passed to the child process on exec. +func (a *Args) Env(k, v string) { + if a.env == nil { + a.env = map[string]string{} + } + a.env[k] = v +} + +// Flag adds a flag to be passed to the child process on exec. +func (a *Args) Flag(flag, value string) { + if a.flags == nil { + a.flags = map[string][]string{} + } + a.flags[flag] = append(a.flags[flag], value) +} + +// Add adds a positional argument to be passed to the child process on exec. +func (a *Args) Add(p string) { + a.pos = append(a.pos, p) +} + +func (a *Args) build() []string { + args := make([]string, 0, 2*len(a.flags)+len(a.pos)) + for k, values := range a.flags { + for _, v := range values { + args = append(args, k) + if v != "" { + args = append(args, v) + } + } + } + + args = append(args, a.pos...) + return args +} diff --git a/vendor/github.com/elastic/beats/dev-tools/mage/gotool/licenser.go b/vendor/github.com/elastic/beats/dev-tools/mage/gotool/licenser.go new file mode 100644 index 00000000..1d7c6d52 --- /dev/null +++ b/vendor/github.com/elastic/beats/dev-tools/mage/gotool/licenser.go @@ -0,0 +1,37 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package gotool + +import ( + "github.com/magefile/mage/sh" +) + +type goLicenser func(opts ...ArgOpt) error + +// Licenser runs `go-licenser` and provides optionals for adding command line arguments. +var Licenser goLicenser = runGoLicenser + +func runGoLicenser(opts ...ArgOpt) error { + args := buildArgs(opts).build() + return sh.RunV("go-licenser", args...) +} + +func (goLicenser) Check() ArgOpt { return flagBoolIf("-d", true) } +func (goLicenser) License(license string) ArgOpt { return flagArgIf("-license", license) } +func (goLicenser) Exclude(path string) ArgOpt { return flagArgIf("-exclude", path) } +func (goLicenser) Path(path string) ArgOpt { return posArg(path) } diff --git a/vendor/github.com/elastic/beats/dev-tools/mage/install.go b/vendor/github.com/elastic/beats/dev-tools/mage/install.go new file mode 100644 index 00000000..9b2c7795 --- /dev/null +++ b/vendor/github.com/elastic/beats/dev-tools/mage/install.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mage + +import ( + "path/filepath" + + "github.com/pkg/errors" + + "github.com/elastic/beats/dev-tools/mage/gotool" +) + +var ( + // GoLicenserImportPath controls the import path used to install go-licenser. + GoLicenserImportPath = "github.com/elastic/go-licenser" +) + +// InstallVendored uses go get to install a command from its vendored source +func InstallVendored(importPath string) error { + beatDir, err := ElasticBeatsDir() + if err != nil { + return errors.Wrap(err, "failed to obtain beats repository path") + } + + get := gotool.Get + return get( + get.Package(filepath.Join(beatDir, "vendor", importPath)), + ) +} + +// InstallGoLicenser target installs go-licenser +func InstallGoLicenser() error { + return InstallVendored(GoLicenserImportPath) +} diff --git a/vendor/github.com/elastic/beats/dev-tools/mage/target/collectors/collect.go b/vendor/github.com/elastic/beats/dev-tools/mage/target/collectors/collect.go new file mode 100644 index 00000000..eb24360f --- /dev/null +++ b/vendor/github.com/elastic/beats/dev-tools/mage/target/collectors/collect.go @@ -0,0 +1,27 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package collectors + +import ( + metricbeat "github.com/elastic/beats/metricbeat/scripts/mage" +) + +//CollectDocs creates the documentation under docs/ +func CollectDocs() error { + return metricbeat.CollectDocs() +} diff --git a/vendor/github.com/elastic/beats/dev-tools/mage/target/common/check.go b/vendor/github.com/elastic/beats/dev-tools/mage/target/common/check.go index 16ad9568..d5038c99 100644 --- a/vendor/github.com/elastic/beats/dev-tools/mage/target/common/check.go +++ b/vendor/github.com/elastic/beats/dev-tools/mage/target/common/check.go @@ -39,3 +39,8 @@ func Check() { deps = append(deps, devtools.Check) mg.SerialDeps(deps...) } + +// CheckLicenseHeaders checks license headers +func CheckLicenseHeaders() { + mg.Deps(devtools.CheckLicenseHeaders) +} diff --git a/vendor/github.com/elastic/beats/dev-tools/mage/target/common/fmt.go b/vendor/github.com/elastic/beats/dev-tools/mage/target/common/fmt.go index 69f45fd4..9405d69c 100644 --- a/vendor/github.com/elastic/beats/dev-tools/mage/target/common/fmt.go +++ b/vendor/github.com/elastic/beats/dev-tools/mage/target/common/fmt.go @@ -27,3 +27,8 @@ import ( func Fmt() { mg.Deps(devtools.Format) } + +// AddLicenseHeaders adds license headers +func AddLicenseHeaders() { + mg.Deps(devtools.AddLicenseHeaders) +} diff --git a/vendor/github.com/elastic/beats/dev-tools/mage/target/update/update.go b/vendor/github.com/elastic/beats/dev-tools/mage/target/update/update.go new file mode 100644 index 00000000..dbb4df67 --- /dev/null +++ b/vendor/github.com/elastic/beats/dev-tools/mage/target/update/update.go @@ -0,0 +1,25 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package update + +import "github.com/magefile/mage/sh" + +// Update updates the generated files (aka make update). +func Update() error { + return sh.Run("make", "update") +} diff --git a/vendor/github.com/elastic/beats/dev-tools/promote_docs b/vendor/github.com/elastic/beats/dev-tools/promote_docs old mode 100755 new mode 100644 diff --git a/vendor/github.com/elastic/beats/docs/devguide/contributing.asciidoc b/vendor/github.com/elastic/beats/docs/devguide/contributing.asciidoc index c5648020..fb69862e 100644 --- a/vendor/github.com/elastic/beats/docs/devguide/contributing.asciidoc +++ b/vendor/github.com/elastic/beats/docs/devguide/contributing.asciidoc @@ -89,7 +89,7 @@ recommend that you install it. [[update-scripts]] === Update scripts -The Beats use a variety of scripts based on Python to generate configuration files +The Beats use a variety of scripts based on Python, make and mage to generate configuration files and documentation. The primary command used for this is: [source,shell] @@ -108,6 +108,7 @@ These commands have the following dependencies: * Python >= {python} * https://virtualenv.pypa.io/en/latest/[virtualenv] for Python +* https://github.com/magefile/mage[Mage] Virtualenv can be installed with the command `easy_install virtualenv` or `pip install virtualenv`. More details can be found diff --git a/vendor/github.com/elastic/beats/docs/devguide/index.asciidoc b/vendor/github.com/elastic/beats/docs/devguide/index.asciidoc index b2a33bf2..a4d6fe87 100644 --- a/vendor/github.com/elastic/beats/docs/devguide/index.asciidoc +++ b/vendor/github.com/elastic/beats/docs/devguide/index.asciidoc @@ -1,9 +1,11 @@ [[beats-reference]] = Beats Developer Guide -:libbeat-dir: {docdir}/../../libbeat +:libbeat-dir: {docdir}/../../libbeat/docs -include::{libbeat-dir}/docs/version.asciidoc[] +include::{libbeat-dir}/version.asciidoc[] + +include::{asciidoc-dir}/../../shared/versions/stack/{source_branch}.asciidoc[] :dev-guide: true :beatname_lc: beatname @@ -11,13 +13,13 @@ include::{libbeat-dir}/docs/version.asciidoc[] include::{asciidoc-dir}/../../shared/attributes.asciidoc[] -include::{libbeat-dir}/docs/shared-beats-attributes.asciidoc[] +include::{libbeat-dir}/shared-beats-attributes.asciidoc[] include::./pull-request-guidelines.asciidoc[] include::./contributing.asciidoc[] -include::{libbeat-dir}/docs/communitybeats.asciidoc[] +include::{libbeat-dir}/communitybeats.asciidoc[] include::./newbeat.asciidoc[] diff --git a/vendor/github.com/elastic/beats/docs/devguide/newbeat.asciidoc b/vendor/github.com/elastic/beats/docs/devguide/newbeat.asciidoc index bc775a46..aa65ad01 100644 --- a/vendor/github.com/elastic/beats/docs/devguide/newbeat.asciidoc +++ b/vendor/github.com/elastic/beats/docs/devguide/newbeat.asciidoc @@ -31,13 +31,10 @@ The following topics describe how to build a new Beat: All Beats are written in http://golang.org/[Go], so having Go installed and knowing the basics are prerequisites for understanding this guide. -But don't worry if you aren't a Go expert. Go is a relatively new -language, and very few people are experts in it. In fact, several -people learned Go by contributing to Packetbeat and libbeat, including the -original Packetbeat authors. *Before you begin:* Set up your Go environment as described under -<> in <>. +<> in <>. The minimum required +Go version is {go-version}. To build your Beat on a specific version of libbeat, check out the specific branch ({branch} in the example below): diff --git a/vendor/github.com/elastic/beats/filebeat/Dockerfile b/vendor/github.com/elastic/beats/filebeat/Dockerfile index ea8515f9..f81198e3 100644 --- a/vendor/github.com/elastic/beats/filebeat/Dockerfile +++ b/vendor/github.com/elastic/beats/filebeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.12.9 +FROM golang:1.12.12 RUN \ apt-get update \ diff --git a/vendor/github.com/elastic/beats/filebeat/_meta/common.reference.inputs.yml b/vendor/github.com/elastic/beats/filebeat/_meta/common.reference.inputs.yml index 78340b25..b7de598b 100644 --- a/vendor/github.com/elastic/beats/filebeat/_meta/common.reference.inputs.yml +++ b/vendor/github.com/elastic/beats/filebeat/_meta/common.reference.inputs.yml @@ -62,6 +62,9 @@ filebeat.inputs: # fields. #fields_under_root: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Ignore files which were modified more then the defined timespan in the past. # ignore_older is disabled by default, so no files are ignored by setting it to 0. # Time strings like 2h (2 hours), 5m (5 minutes) can be used. diff --git a/vendor/github.com/elastic/beats/filebeat/beater/filebeat.go b/vendor/github.com/elastic/beats/filebeat/beater/filebeat.go index c30203de..76e7e8b1 100644 --- a/vendor/github.com/elastic/beats/filebeat/beater/filebeat.go +++ b/vendor/github.com/elastic/beats/filebeat/beater/filebeat.go @@ -326,7 +326,7 @@ func (fb *Filebeat) Run(b *beat.Beat) error { outDone := make(chan struct{}) // outDone closes down all active pipeline connections crawler, err := crawler.New( - channel.NewOutletFactory(outDone, wgEvents).Create, + channel.NewOutletFactory(outDone, wgEvents, b.Info).Create, config.Inputs, b.Info.Version, fb.done, diff --git a/vendor/github.com/elastic/beats/filebeat/channel/connector.go b/vendor/github.com/elastic/beats/filebeat/channel/connector.go index af1cf33c..ebd5983a 100644 --- a/vendor/github.com/elastic/beats/filebeat/channel/connector.go +++ b/vendor/github.com/elastic/beats/filebeat/channel/connector.go @@ -18,8 +18,11 @@ package channel import ( + "fmt" + "github.com/elastic/beats/libbeat/beat" "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/fmtstr" "github.com/elastic/beats/libbeat/processors" ) @@ -31,6 +34,14 @@ type pipelineConnector struct { pipeline beat.Pipeline } +// addFormattedIndex is a Processor to set an event's "raw_index" metadata field +// with a given TimestampFormatString. The elasticsearch output interprets +// that field as specifying the (raw string) index the event should be sent to; +// in other outputs it is just included in the metadata. +type addFormattedIndex struct { + formatString *fmtstr.TimestampFormatString +} + // Connect passes the cfg and the zero value of beat.ClientConfig to the underlying function. func (fn ConnectorFunc) Connect(cfg *common.Config) (Outleter, error) { return fn(cfg, beat.ClientConfig{}) @@ -51,24 +62,11 @@ func (c *pipelineConnector) ConnectWith(cfg *common.Config, clientCfg beat.Clien return nil, err } - var err error - var userProcessors beat.ProcessorList - - userProcessors, err = processors.New(config.Processors) + procs, err := processorsForConfig(c.parent.beatInfo, config, clientCfg) if err != nil { return nil, err } - if lst := clientCfg.Processing.Processor; lst != nil { - if len(userProcessors.All()) == 0 { - userProcessors = lst - } else if orig := lst.All(); len(orig) > 0 { - newLst := processors.NewList(nil) - newLst.List = append(newLst.List, lst, userProcessors) - userProcessors = newLst - } - } - setOptional := func(to common.MapStr, key string, value string) { if value != "" { to.Put(key, value) @@ -105,7 +103,8 @@ func (c *pipelineConnector) ConnectWith(cfg *common.Config, clientCfg beat.Clien clientCfg.Processing.EventMetadata = config.EventMetadata clientCfg.Processing.Meta = meta clientCfg.Processing.Fields = fields - clientCfg.Processing.Processor = userProcessors + clientCfg.Processing.Processor = procs + clientCfg.Processing.KeepNull = config.KeepNull client, err := c.pipeline.ConnectWith(clientCfg) if err != nil { return nil, err @@ -117,3 +116,64 @@ func (c *pipelineConnector) ConnectWith(cfg *common.Config, clientCfg beat.Clien } return outlet, nil } + +// processorsForConfig assembles the Processors for a pipelineConnector. +func processorsForConfig( + beatInfo beat.Info, config inputOutletConfig, clientCfg beat.ClientConfig, +) (*processors.Processors, error) { + procs := processors.NewList(nil) + + // Processor ordering is important: + // 1. Index configuration + if !config.Index.IsEmpty() { + staticFields := fmtstr.FieldsForBeat(beatInfo.Beat, beatInfo.Version) + timestampFormat, err := + fmtstr.NewTimestampFormatString(&config.Index, staticFields) + if err != nil { + return nil, err + } + indexProcessor := &addFormattedIndex{timestampFormat} + procs.List = append(procs.List, indexProcessor) + } + + // 2. ClientConfig processors + if lst := clientCfg.Processing.Processor; lst != nil { + procs.List = append(procs.List, lst) + } + + // 3. User processors + userProcessors, err := processors.New(config.Processors) + if err != nil { + return nil, err + } + // Subtlety: it is important here that we append the individual elements of + // userProcessors, rather than userProcessors itself, even though + // userProcessors implements the processors.Processor interface. This is + // because the contents of what we return are later pulled out into a + // processing.group rather than a processors.Processors, and the two have + // different error semantics: processors.Processors aborts processing on + // any error, whereas processing.group only aborts on fatal errors. The + // latter is the most common behavior, and the one we are preserving here for + // backwards compatibility. + // We are unhappy about this and have plans to fix this inconsistency at a + // higher level, but for now we need to respect the existing semantics. + procs.List = append(procs.List, userProcessors.List...) + return procs, nil +} + +func (p *addFormattedIndex) Run(event *beat.Event) (*beat.Event, error) { + index, err := p.formatString.Run(event.Timestamp) + if err != nil { + return nil, err + } + + if event.Meta == nil { + event.Meta = common.MapStr{} + } + event.Meta["raw_index"] = index + return event, nil +} + +func (p *addFormattedIndex) String() string { + return fmt.Sprintf("add_index_pattern=%v", p.formatString) +} diff --git a/vendor/github.com/elastic/beats/filebeat/channel/connector_test.go b/vendor/github.com/elastic/beats/filebeat/channel/connector_test.go new file mode 100644 index 00000000..4708a7e4 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/channel/connector_test.go @@ -0,0 +1,213 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package channel + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/libbeat/beat" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/processors" + "github.com/elastic/beats/libbeat/processors/actions" +) + +func TestProcessorsForConfig(t *testing.T) { + testCases := map[string]struct { + beatInfo beat.Info + configStr string + clientCfg beat.ClientConfig + event beat.Event + expectedFields map[string]string + }{ + "Simple static index": { + configStr: "index: 'test'", + expectedFields: map[string]string{ + "@metadata.raw_index": "test", + }, + }, + "Index with agent info + timestamp": { + beatInfo: beat.Info{Beat: "TestBeat", Version: "3.9.27"}, + configStr: "index: 'beat-%{[agent.name]}-%{[agent.version]}-%{+yyyy.MM.dd}'", + event: beat.Event{Timestamp: time.Date(1999, time.December, 31, 23, 0, 0, 0, time.UTC)}, + expectedFields: map[string]string{ + "@metadata.raw_index": "beat-TestBeat-3.9.27-1999.12.31", + }, + }, + "Set index in ClientConfig": { + clientCfg: beat.ClientConfig{ + Processing: beat.ProcessingConfig{ + Processor: makeProcessors(&setRawIndex{"clientCfgIndex"}), + }, + }, + expectedFields: map[string]string{ + "@metadata.raw_index": "clientCfgIndex", + }, + }, + "ClientConfig processor runs after beat input Index": { + configStr: "index: 'test'", + clientCfg: beat.ClientConfig{ + Processing: beat.ProcessingConfig{ + Processor: makeProcessors(&setRawIndex{"clientCfgIndex"}), + }, + }, + expectedFields: map[string]string{ + "@metadata.raw_index": "clientCfgIndex", + }, + }, + "Set field in input config": { + configStr: `processors: [add_fields: {fields: {testField: inputConfig}}]`, + expectedFields: map[string]string{ + "fields.testField": "inputConfig", + }, + }, + "Set field in ClientConfig": { + clientCfg: beat.ClientConfig{ + Processing: beat.ProcessingConfig{ + Processor: makeProcessors(actions.NewAddFields(common.MapStr{ + "fields": common.MapStr{"testField": "clientConfig"}, + }, false)), + }, + }, + expectedFields: map[string]string{ + "fields.testField": "clientConfig", + }, + }, + "Input config processors run after ClientConfig": { + configStr: `processors: [add_fields: {fields: {testField: inputConfig}}]`, + clientCfg: beat.ClientConfig{ + Processing: beat.ProcessingConfig{ + Processor: makeProcessors(actions.NewAddFields(common.MapStr{ + "fields": common.MapStr{"testField": "clientConfig"}, + }, false)), + }, + }, + expectedFields: map[string]string{ + "fields.testField": "inputConfig", + }, + }, + } + for description, test := range testCases { + if test.event.Fields == nil { + test.event.Fields = common.MapStr{} + } + config, err := outletConfigFromString(test.configStr) + if err != nil { + t.Errorf("[%s] %v", description, err) + continue + } + processors, err := processorsForConfig(test.beatInfo, config, test.clientCfg) + if err != nil { + t.Errorf("[%s] %v", description, err) + continue + } + processedEvent, err := processors.Run(&test.event) + // We don't check if err != nil, because we are testing the final outcome + // of running the processors, including when some of them fail. + if processedEvent == nil { + t.Errorf("[%s] Unexpected fatal error running processors: %v\n", + description, err) + } + for key, value := range test.expectedFields { + field, err := processedEvent.GetValue(key) + if err != nil { + t.Errorf("[%s] Couldn't get field %s from event: %v", description, key, err) + continue + } + assert.Equal(t, field, value) + fieldStr, ok := field.(string) + if !ok { + // Note that requiring a string here is just to simplify the test setup, + // not a requirement of the underlying api. + t.Errorf("[%s] Field [%s] should be a string", description, key) + continue + } + if fieldStr != value { + t.Errorf("[%s] Event field [%s]: expected [%s], got [%s]", description, key, value, fieldStr) + } + } + } +} + +func TestProcessorsForConfigIsFlat(t *testing.T) { + // This test is regrettable, and exists because of inconsistencies in + // processor handling between processors.Processors and processing.group + // (which implements beat.ProcessorList) -- see processorsForConfig for + // details. The upshot is that, for now, if the input configuration specifies + // processors, they must be returned as direct children of the resulting + // processors.Processors (rather than being collected in additional tree + // structure). + // This test should be removed once we have a more consistent mechanism for + // collecting and running processors. + configStr := `processors: +- add_fields: {fields: {testField: value}} +- add_fields: {fields: {testField2: stuff}}` + config, err := outletConfigFromString(configStr) + if err != nil { + t.Fatal(err) + } + processors, err := processorsForConfig( + beat.Info{}, config, beat.ClientConfig{}) + if err != nil { + t.Fatal(err) + } + assert.Equal(t, 2, len(processors.List)) +} + +// setRawIndex is a bare-bones processor to set the raw_index field to a +// constant string in the event metadata. It is used to test order of operations +// for processorsForConfig. +type setRawIndex struct { + indexStr string +} + +func (p *setRawIndex) Run(event *beat.Event) (*beat.Event, error) { + if event.Meta == nil { + event.Meta = common.MapStr{} + } + event.Meta["raw_index"] = p.indexStr + return event, nil +} + +func (p *setRawIndex) String() string { + return fmt.Sprintf("set_raw_index=%v", p.indexStr) +} + +// Helper function to convert from YML input string to an unpacked +// inputOutletConfig +func outletConfigFromString(s string) (inputOutletConfig, error) { + config := inputOutletConfig{} + cfg, err := common.NewConfigFrom(s) + if err != nil { + return config, err + } + if err := cfg.Unpack(&config); err != nil { + return config, err + } + return config, nil +} + +// makeProcessors wraps one or more bare Processor objects in Processors. +func makeProcessors(procs ...processors.Processor) *processors.Processors { + procList := processors.NewList(nil) + procList.List = procs + return procList +} diff --git a/vendor/github.com/elastic/beats/filebeat/channel/factory.go b/vendor/github.com/elastic/beats/filebeat/channel/factory.go index e31c3f52..d4373bee 100644 --- a/vendor/github.com/elastic/beats/filebeat/channel/factory.go +++ b/vendor/github.com/elastic/beats/filebeat/channel/factory.go @@ -20,6 +20,7 @@ package channel import ( "github.com/elastic/beats/libbeat/beat" "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/fmtstr" "github.com/elastic/beats/libbeat/processors" ) @@ -28,6 +29,7 @@ type OutletFactory struct { eventer beat.ClientEventer wgEvents eventCounter + beatInfo beat.Info } type eventCounter interface { @@ -46,6 +48,7 @@ type inputOutletConfig struct { // event processing common.EventMetadata `config:",inline"` // Fields and tags to add to events. Processors processors.PluginConfig `config:"processors"` + KeepNull bool `config:"keep_null"` // implicit event fields Type string `config:"type"` // input.type @@ -56,8 +59,8 @@ type inputOutletConfig struct { Fileset string `config:"_fileset_name"` // hidden setting // Output meta data settings - Pipeline string `config:"pipeline"` // ES Ingest pipeline name - + Pipeline string `config:"pipeline"` // ES Ingest pipeline name + Index fmtstr.EventFormatString `config:"index"` // ES output index pattern } // NewOutletFactory creates a new outlet factory for @@ -65,10 +68,12 @@ type inputOutletConfig struct { func NewOutletFactory( done <-chan struct{}, wgEvents eventCounter, + beatInfo beat.Info, ) *OutletFactory { o := &OutletFactory{ done: done, wgEvents: wgEvents, + beatInfo: beatInfo, } if wgEvents != nil { diff --git a/vendor/github.com/elastic/beats/filebeat/docs/configuring-howto.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/configuring-howto.asciidoc index f0c71771..08f61a4f 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/configuring-howto.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/configuring-howto.asciidoc @@ -17,8 +17,6 @@ The {beatname_uc} configuration file uses http://yaml.org/[YAML] for its syntax. See the {beats-ref}/config-file-format.html[Config File Format] section of the _Beats Platform Reference_ for more about the structure of the config file. -include::../../libbeat/docs/shared-cm-tip.asciidoc[] - The following topics describe how to configure Filebeat: * <> @@ -58,44 +56,44 @@ include::./filebeat-general-options.asciidoc[] include::./reload-configuration.asciidoc[] -include::{libbeat-dir}/docs/queueconfig.asciidoc[] +include::{libbeat-dir}/queueconfig.asciidoc[] -include::{libbeat-dir}/docs/outputconfig.asciidoc[] +include::{libbeat-dir}/outputconfig.asciidoc[] include::../../libbeat/docs/shared-ilm.asciidoc[] include::./load-balancing.asciidoc[] -include::{libbeat-dir}/docs/shared-ssl-config.asciidoc[] +include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::./filebeat-filtering.asciidoc[] -include::{libbeat-dir}/docs/shared-config-ingest.asciidoc[] +include::{libbeat-dir}/shared-config-ingest.asciidoc[] -include::{libbeat-dir}/docs/shared-geoip.asciidoc[] +include::{libbeat-dir}/shared-geoip.asciidoc[] -include::{libbeat-dir}/docs/shared-path-config.asciidoc[] +include::{libbeat-dir}/shared-path-config.asciidoc[] -include::{libbeat-dir}/docs/shared-kibana-config.asciidoc[] +include::{libbeat-dir}/shared-kibana-config.asciidoc[] -include::{libbeat-dir}/docs/setup-config.asciidoc[] +include::{libbeat-dir}/setup-config.asciidoc[] -include::{libbeat-dir}/docs/loggingconfig.asciidoc[] +include::{libbeat-dir}/loggingconfig.asciidoc[] :standalone: -include::{libbeat-dir}/docs/shared-env-vars.asciidoc[] +include::{libbeat-dir}/shared-env-vars.asciidoc[] :standalone!: :autodiscoverJolokia: :autodiscoverHints: -include::{libbeat-dir}/docs/shared-autodiscover.asciidoc[] +include::{libbeat-dir}/shared-autodiscover.asciidoc[] :standalone: -include::{libbeat-dir}/docs/yaml.asciidoc[] +include::{libbeat-dir}/yaml.asciidoc[] :standalone!: -include::{libbeat-dir}/docs/regexp.asciidoc[] +include::{libbeat-dir}/regexp.asciidoc[] -include::{libbeat-dir}/docs/http-endpoint.asciidoc[] +include::{libbeat-dir}/http-endpoint.asciidoc[] -include::{libbeat-dir}/docs/reference-yml.asciidoc[] +include::{libbeat-dir}/reference-yml.asciidoc[] diff --git a/vendor/github.com/elastic/beats/filebeat/docs/faq.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/faq.asciidoc index 4fc93ac8..76c6478a 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/faq.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/faq.asciidoc @@ -122,6 +122,6 @@ reached EOF or not. Note that this option can lead to data loss if the file is deleted before {beatname_uc} reaches the end of the file. -include::{libbeat-dir}/docs/faq-limit-bandwidth.asciidoc[] +include::{libbeat-dir}/faq-limit-bandwidth.asciidoc[] -include::{libbeat-dir}/docs/shared-faq.asciidoc[] +include::{libbeat-dir}/shared-faq.asciidoc[] diff --git a/vendor/github.com/elastic/beats/filebeat/docs/fields.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/fields.asciidoc index bd732678..770aaf0e 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/fields.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/fields.asciidoc @@ -15,6 +15,7 @@ grouped in the following categories: * <> * <> * <> +* <> * <> * <> * <> @@ -38,6 +39,7 @@ grouped in the following categories: * <> * <> * <> +* <> * <> * <> * <> @@ -1004,6 +1006,252 @@ Fields from AWS logs. +[float] +=== elb + +Fields for AWS ELB logs. + + + +*`aws.elb.name`*:: ++ +-- +The name of the load balancer. + + +type: keyword + +-- + +*`aws.elb.type`*:: ++ +-- +The type of the load balancer for v2 Load Balancers. + + +type: keyword + +-- + +*`aws.elb.target_group.arn`*:: ++ +-- +The ARN of the target group handling the request. + + +type: keyword + +-- + +*`aws.elb.listener`*:: ++ +-- +The ELB listener that received the connection. + + +type: keyword + +-- + +*`aws.elb.protocol`*:: ++ +-- +The protocol of the load balancer (http or tcp). + + +type: keyword + +-- + +*`aws.elb.request_processing_time.sec`*:: ++ +-- +The total time in seconds since the connection or request is received until it is sent to a registered backend. + + +type: float + +-- + +*`aws.elb.backend_processing_time.sec`*:: ++ +-- +The total time in seconds since the connection is sent to the backend till the backend starts responding. + + +type: float + +-- + +*`aws.elb.response_processing_time.sec`*:: ++ +-- +The total time in seconds since the response is received from the backend till it is sent to the client. + + +type: float + +-- + +*`aws.elb.connection_time.ms`*:: ++ +-- +The total time of the connection in milliseconds, since it is opened till it is closed. + + +type: long + +-- + +*`aws.elb.tls_handshake_time.ms`*:: ++ +-- +The total time for the TLS handshake to complete in milliseconds once the connection has been established. + + +type: long + +-- + +*`aws.elb.backend.ip`*:: ++ +-- +The IP address of the backend processing this connection. + + +type: keyword + +-- + +*`aws.elb.backend.port`*:: ++ +-- +The port in the backend processing this connection. + + +type: keyword + +-- + +*`aws.elb.backend.http.response.status_code`*:: ++ +-- +The status code from the backend (status code sent to the client from ELB is stored in `http.response.status_code` + + +type: keyword + +-- + +*`aws.elb.ssl_cipher`*:: ++ +-- +The SSL cipher used in TLS/SSL connections. + + +type: keyword + +-- + +*`aws.elb.ssl_protocol`*:: ++ +-- +The SSL protocol used in TLS/SSL connections. + + +type: keyword + +-- + +*`aws.elb.chosen_cert.arn`*:: ++ +-- +The ARN of the chosen certificate presented to the client in TLS/SSL connections. + + +type: keyword + +-- + +*`aws.elb.chosen_cert.serial`*:: ++ +-- +The serial number of the chosen certificate presented to the client in TLS/SSL connections. + + +type: keyword + +-- + +*`aws.elb.incoming_tls_alert`*:: ++ +-- +The integer value of TLS alerts received by the load balancer from the client, if present. + + +type: keyword + +-- + +*`aws.elb.tls_named_group`*:: ++ +-- +The TLS named group. + + +type: keyword + +-- + +*`aws.elb.trace_id`*:: ++ +-- +The contents of the `X-Amzn-Trace-Id` header. + + +type: keyword + +-- + +*`aws.elb.matched_rule_priority`*:: ++ +-- +The priority value of the rule that matched the request, if a rule matched. + + +type: keyword + +-- + +*`aws.elb.action_executed`*:: ++ +-- +The action executed when processing the request (forward, fixed-response, authenticate...). It can contain several values. + + +type: keyword + +-- + +*`aws.elb.redirect_url`*:: ++ +-- +The URL used if a redirection action was executed. + + +type: keyword + +-- + +*`aws.elb.error.reason`*:: ++ +-- +The error reason if the executed action failed. + +type: keyword + +-- + [float] === s3access @@ -1237,6 +1485,1065 @@ type: keyword The Transport Layer Security (TLS) version negotiated by the client. +type: keyword + +-- + +[[exported-fields-azure]] +== Azure fields + +Azure Module + + + +[float] +=== azure + + + + +*`azure.subscription_id`*:: ++ +-- +Azure subscription ID + + +type: keyword + +-- + +*`azure.correlation_id`*:: ++ +-- +Correlation ID + + +type: keyword + +-- + +*`azure.tenant_id`*:: ++ +-- +tenant ID + + +type: keyword + +-- + +[float] +=== resource + +Resource + + + +*`azure.resource.id`*:: ++ +-- +Resource ID + + +type: keyword + +-- + +*`azure.resource.group`*:: ++ +-- +Resource group + + +type: keyword + +-- + +*`azure.resource.provider`*:: ++ +-- +Resource type/namespace + + +type: keyword + +-- + +*`azure.resource.namespace`*:: ++ +-- +Resource type/namespace + + +type: keyword + +-- + +*`azure.resource.name`*:: ++ +-- +Name + + +type: keyword + +-- + +*`azure.resource.authorization_rule`*:: ++ +-- +Authorization rule + + +type: keyword + +-- + +[float] +=== activitylogs + +Fields for Azure activity logs. + + + +[float] +=== identity + +Identity + + + +[float] +=== claims_initiated_by_user + +Claims initiated by user + + + +*`azure.activitylogs.identity.claims_initiated_by_user.name`*:: ++ +-- +Name + + +type: keyword + +-- + +*`azure.activitylogs.identity.claims_initiated_by_user.givenname`*:: ++ +-- +Givenname + + +type: keyword + +-- + +*`azure.activitylogs.identity.claims_initiated_by_user.surname`*:: ++ +-- +Surname + + +type: keyword + +-- + +*`azure.activitylogs.identity.claims_initiated_by_user.fullname`*:: ++ +-- +Fullname + + +type: keyword + +-- + +*`azure.activitylogs.identity.claims_initiated_by_user.schema`*:: ++ +-- +Schema + + +type: keyword + +-- + +*`azure.activitylogs.identity.claims.*`*:: ++ +-- +Claims + + +type: object + +-- + +[float] +=== authorization + +Authorization + + + +*`azure.activitylogs.identity.authorization.scope`*:: ++ +-- +Scope + + +type: keyword + +-- + +*`azure.activitylogs.identity.authorization.action`*:: ++ +-- +Action + + +type: keyword + +-- + +[float] +=== evidence + +Evidence + + + +*`azure.activitylogs.identity.authorization.evidence.role_assignment_scope`*:: ++ +-- +Role assignment scope + + +type: keyword + +-- + +*`azure.activitylogs.identity.authorization.evidence.role_definition_id`*:: ++ +-- +Role definition ID + + +type: keyword + +-- + +*`azure.activitylogs.identity.authorization.evidence.role`*:: ++ +-- +Role + + +type: keyword + +-- + +*`azure.activitylogs.identity.authorization.evidence.role_assignment_id`*:: ++ +-- +Role assignment ID + + +type: keyword + +-- + +*`azure.activitylogs.identity.authorization.evidence.principal_id`*:: ++ +-- +Principal ID + + +type: keyword + +-- + +*`azure.activitylogs.identity.authorization.evidence.principal_type`*:: ++ +-- +Principal type + + +type: keyword + +-- + +*`azure.activitylogs.operation_name`*:: ++ +-- +Operation name + + +type: keyword + +-- + +*`azure.activitylogs.result_signature`*:: ++ +-- +Result signature + + +type: keyword + +-- + +*`azure.activitylogs.category`*:: ++ +-- +Category + + +type: keyword + +-- + +[float] +=== properties + +Properties + + + +*`azure.activitylogs.properties.service_request_id`*:: ++ +-- +Service Request Id + + +type: keyword + +-- + +*`azure.activitylogs.properties.status_code`*:: ++ +-- +Status code + + +type: keyword + +-- + +[float] +=== auditlogs + +Fields for Azure audit logs. + + + +*`azure.auditlogs.operation_name`*:: ++ +-- +The operation name + + +type: keyword + +-- + +*`azure.auditlogs.operation_version`*:: ++ +-- +The operation version + + +type: keyword + +-- + +*`azure.auditlogs.identity`*:: ++ +-- +Identity + + +type: keyword + +-- + +*`azure.auditlogs.tenant_id`*:: ++ +-- +Tenant ID + + +type: keyword + +-- + +*`azure.auditlogs.result_signature`*:: ++ +-- +Result signature + + +type: keyword + +-- + +[float] +=== properties + +The audit log properties + + + +*`azure.auditlogs.properties.result`*:: ++ +-- +Log result + + +type: keyword + +-- + +*`azure.auditlogs.properties.activity_display_name`*:: ++ +-- +Activity display name + + +type: keyword + +-- + +*`azure.auditlogs.properties.result_reason`*:: ++ +-- +Reason for the log result + + +type: keyword + +-- + +*`azure.auditlogs.properties.correlation_id`*:: ++ +-- +Correlation ID + + +type: keyword + +-- + +*`azure.auditlogs.properties.logged_by_service`*:: ++ +-- +Logged by service + + +type: keyword + +-- + +*`azure.auditlogs.properties.operation_type`*:: ++ +-- +Operation type + + +type: keyword + +-- + +*`azure.auditlogs.properties.id`*:: ++ +-- +ID + + +type: keyword + +-- + +*`azure.auditlogs.properties.activity_datetime`*:: ++ +-- +Activity timestamp + + +type: date + +-- + +*`azure.auditlogs.properties.category`*:: ++ +-- +category + + +type: keyword + +-- + +[float] +=== target_resources.* + +Target resources + + + +*`azure.auditlogs.properties.target_resources.*.display_name`*:: ++ +-- +Display name + + +type: keyword + +-- + +*`azure.auditlogs.properties.target_resources.*.id`*:: ++ +-- +ID + + +type: keyword + +-- + +*`azure.auditlogs.properties.target_resources.*.type`*:: ++ +-- +Type + + +type: keyword + +-- + +*`azure.auditlogs.properties.target_resources.*.ip_address`*:: ++ +-- +ip Address + + +type: keyword + +-- + +*`azure.auditlogs.properties.target_resources.*.user_principal_name`*:: ++ +-- +User principal name + + +type: keyword + +-- + +[float] +=== modified_properties.* + +Modified properties + + + +*`azure.auditlogs.properties.target_resources.*.modified_properties.*.new_value`*:: ++ +-- +New value + + +type: keyword + +-- + +*`azure.auditlogs.properties.target_resources.*.modified_properties.*.display_name`*:: ++ +-- +Display value + + +type: keyword + +-- + +*`azure.auditlogs.properties.target_resources.*.modified_properties.*.old_value`*:: ++ +-- +Old value + + +type: keyword + +-- + +[float] +=== initiated_by + +Information regarding the initiator + + + +[float] +=== app + +App + + + +*`azure.auditlogs.properties.initiated_by.app.servicePrincipalName`*:: ++ +-- +Service principal name + + +type: keyword + +-- + +*`azure.auditlogs.properties.initiated_by.app.displayName`*:: ++ +-- +Display name + + +type: keyword + +-- + +*`azure.auditlogs.properties.initiated_by.app.appId`*:: ++ +-- +App ID + + +type: keyword + +-- + +*`azure.auditlogs.properties.initiated_by.app.servicePrincipalId`*:: ++ +-- +Service principal ID + + +type: keyword + +-- + +[float] +=== user + +User + + + +*`azure.auditlogs.properties.initiated_by.user.userPrincipalName`*:: ++ +-- +User principal name + + +type: keyword + +-- + +*`azure.auditlogs.properties.initiated_by.user.displayName`*:: ++ +-- +Display name + + +type: keyword + +-- + +*`azure.auditlogs.properties.initiated_by.user.id`*:: ++ +-- +ID + + +type: keyword + +-- + +*`azure.auditlogs.properties.initiated_by.user.ipAddress`*:: ++ +-- +ip Address + + +type: keyword + +-- + +[float] +=== signinlogs + +Fields for Azure sign-in logs. + + + +*`azure.signinlogs.operation_name`*:: ++ +-- +The operation name + + +type: keyword + +-- + +*`azure.signinlogs.operation_version`*:: ++ +-- +The operation version + + +type: keyword + +-- + +*`azure.signinlogs.tenant_id`*:: ++ +-- +Tenant ID + + +type: keyword + +-- + +*`azure.signinlogs.result_signature`*:: ++ +-- +Result signature + + +type: keyword + +-- + +*`azure.signinlogs.result_description`*:: ++ +-- +Result description + + +type: keyword + +-- + +*`azure.signinlogs.identity`*:: ++ +-- +Identity + + +type: keyword + +-- + +[float] +=== properties + +The signin log properties + + + +*`azure.signinlogs.properties.id`*:: ++ +-- +ID + + +type: keyword + +-- + +*`azure.signinlogs.properties.created_at`*:: ++ +-- +Created date time + + +type: date + +-- + +*`azure.signinlogs.properties.user_display_name`*:: ++ +-- +User display name + + +type: keyword + +-- + +*`azure.signinlogs.properties.correlation_id`*:: ++ +-- +Correlation ID + + +type: keyword + +-- + +*`azure.signinlogs.properties.user_principal_name`*:: ++ +-- +User principal name + + +type: keyword + +-- + +*`azure.signinlogs.properties.user_id`*:: ++ +-- +User ID + + +type: keyword + +-- + +*`azure.signinlogs.properties.app_id`*:: ++ +-- +App ID + + +type: keyword + +-- + +*`azure.signinlogs.properties.app_display_name`*:: ++ +-- +App display name + + +type: keyword + +-- + +*`azure.signinlogs.properties.ip_address`*:: ++ +-- +Ip address + + +type: keyword + +-- + +*`azure.signinlogs.properties.client_app_used`*:: ++ +-- +Client app used + + +type: keyword + +-- + +*`azure.signinlogs.properties.conditional_access_status`*:: ++ +-- +Conditional access status + + +type: keyword + +-- + +*`azure.signinlogs.properties.original_request_id`*:: ++ +-- +Original request ID + + +type: keyword + +-- + +*`azure.signinlogs.properties.is_interactive`*:: ++ +-- +Is interactive + + +type: keyword + +-- + +*`azure.signinlogs.properties.token_issuer_name`*:: ++ +-- +Token issuer name + + +type: keyword + +-- + +*`azure.signinlogs.properties.token_issuer_type`*:: ++ +-- +Token issuer type + + +type: keyword + +-- + +*`azure.signinlogs.properties.processing_time_ms`*:: ++ +-- +Processing time in milliseconds + + +type: float + +-- + +*`azure.signinlogs.properties.risk_detail`*:: ++ +-- +Risk detail + + +type: keyword + +-- + +*`azure.signinlogs.properties.risk_level_aggregated`*:: ++ +-- +Risk level aggregated + + +type: keyword + +-- + +*`azure.signinlogs.properties.risk_level_during_signin`*:: ++ +-- +Risk level during signIn + + +type: keyword + +-- + +*`azure.signinlogs.properties.risk_state`*:: ++ +-- +Risk state + + +type: keyword + +-- + +*`azure.signinlogs.properties.resource_display_name`*:: ++ +-- +Resource display name + + +type: keyword + +-- + +[float] +=== status + +Status + + + +*`azure.signinlogs.properties.status.error_code`*:: ++ +-- +Error code + + +type: keyword + +-- + +[float] +=== device_detail + +Status + + + +*`azure.signinlogs.properties.device_detail.device_id`*:: ++ +-- +Device ID + + +type: keyword + +-- + +*`azure.signinlogs.properties.device_detail.operating_system`*:: ++ +-- +Operating system + + +type: keyword + +-- + +*`azure.signinlogs.properties.device_detail.browser`*:: ++ +-- +Browser + + +type: keyword + +-- + +*`azure.signinlogs.properties.device_detail.display_name`*:: ++ +-- +Display name + + +type: keyword + +-- + +*`azure.signinlogs.properties.device_detail.trust_type`*:: ++ +-- +Trust type + + +type: keyword + +-- + +*`azure.signinlogs.properties.service_principal_id`*:: ++ +-- +Status + + type: keyword -- @@ -9199,6 +10506,18 @@ type: object -- +*`logstash.log.pipeline_id`*:: ++ +-- +The ID of the pipeline. + + +type: keyword + +example: main + +-- + *`logstash.log.message`*:: + -- @@ -9343,6 +10662,1048 @@ alias to: event.duration -- +[[exported-fields-misp]] +== MISP fields + +Module for handling threat information from MISP. + + + +[float] +=== misp + +Fields from MISP threat information. + + + +[float] +=== attack_pattern + +Fields provide support for specifying information about attack patterns. + + + +*`misp.attack_pattern.id`*:: ++ +-- +Identifier of the threat indicator. + + +type: keyword + +-- + +*`misp.attack_pattern.name`*:: ++ +-- +Name of the attack pattern. + + +type: keyword + +-- + +*`misp.attack_pattern.description`*:: ++ +-- +Description of the attack pattern. + + +type: text + +-- + +*`misp.attack_pattern.kill_chain_phases`*:: ++ +-- +The kill chain phase(s) to which this attack pattern corresponds. + + +type: keyword + +-- + +[float] +=== campaign + +Fields provide support for specifying information about campaigns. + + + +*`misp.campaign.id`*:: ++ +-- +Identifier of the campaign. + + +type: keyword + +-- + +*`misp.campaign.name`*:: ++ +-- +Name of the campaign. + + +type: keyword + +-- + +*`misp.campaign.description`*:: ++ +-- +Description of the campaign. + + +type: text + +-- + +*`misp.campaign.aliases`*:: ++ +-- +Alternative names used to identify this campaign. + + +type: text + +-- + +*`misp.campaign.first_seen`*:: ++ +-- +The time that this Campaign was first seen, in RFC3339 format. + + +type: date + +-- + +*`misp.campaign.last_seen`*:: ++ +-- +The time that this Campaign was last seen, in RFC3339 format. + + +type: date + +-- + +*`misp.campaign.objective`*:: ++ +-- +This field defines the Campaign's primary goal, objective, desired outcome, or intended effect. + + +type: keyword + +-- + +[float] +=== course_of_action + +A Course of Action is an action taken either to prevent an attack or to respond to an attack that is in progress. + + + +*`misp.course_of_action.id`*:: ++ +-- +Identifier of the Course of Action. + + +type: keyword + +-- + +*`misp.course_of_action.name`*:: ++ +-- +The name used to identify the Course of Action. + + +type: keyword + +-- + +*`misp.course_of_action.description`*:: ++ +-- +Description of the Course of Action. + + +type: text + +-- + +[float] +=== identity + +Identity can represent actual individuals, organizations, or groups, as well as classes of individuals, organizations, or groups. + + + +*`misp.identity.id`*:: ++ +-- +Identifier of the Identity. + + +type: keyword + +-- + +*`misp.identity.name`*:: ++ +-- +The name used to identify the Identity. + + +type: keyword + +-- + +*`misp.identity.description`*:: ++ +-- +Description of the Identity. + + +type: text + +-- + +*`misp.identity.identity_class`*:: ++ +-- +The type of entity that this Identity describes, e.g., an individual or organization. Open Vocab - identity-class-ov + + +type: keyword + +-- + +*`misp.identity.labels`*:: ++ +-- +The list of roles that this Identity performs. + + +type: keyword + +example: CEO + + +-- + +*`misp.identity.sectors`*:: ++ +-- +The list of sectors that this Identity belongs to. Open Vocab - industry-sector-ov + + +type: keyword + +-- + +*`misp.identity.contact_information`*:: ++ +-- +The contact information (e-mail, phone number, etc.) for this Identity. + + +type: text + +-- + +[float] +=== intrusion_set + +An Intrusion Set is a grouped set of adversary behavior and resources with common properties that is believed to be orchestrated by a single organization. + + + +*`misp.intrusion_set.id`*:: ++ +-- +Identifier of the Intrusion Set. + + +type: keyword + +-- + +*`misp.intrusion_set.name`*:: ++ +-- +The name used to identify the Intrusion Set. + + +type: keyword + +-- + +*`misp.intrusion_set.description`*:: ++ +-- +Description of the Intrusion Set. + + +type: text + +-- + +*`misp.intrusion_set.aliases`*:: ++ +-- +Alternative names used to identify the Intrusion Set. + + +type: text + +-- + +*`misp.intrusion_set.first_seen`*:: ++ +-- +The time that this Intrusion Set was first seen, in RFC3339 format. + + +type: date + +-- + +*`misp.intrusion_set.last_seen`*:: ++ +-- +The time that this Intrusion Set was last seen, in RFC3339 format. + + +type: date + +-- + +*`misp.intrusion_set.goals`*:: ++ +-- +The high level goals of this Intrusion Set, namely, what are they trying to do. + + +type: text + +-- + +*`misp.intrusion_set.resource_level`*:: ++ +-- +This defines the organizational level at which this Intrusion Set typically works. Open Vocab - attack-resource-level-ov + + +type: text + +-- + +*`misp.intrusion_set.primary_motivation`*:: ++ +-- +The primary reason, motivation, or purpose behind this Intrusion Set. Open Vocab - attack-motivation-ov + + +type: text + +-- + +*`misp.intrusion_set.secondary_motivations`*:: ++ +-- +The secondary reasons, motivations, or purposes behind this Intrusion Set. Open Vocab - attack-motivation-ov + + +type: text + +-- + +[float] +=== malware + +Malware is a type of TTP that is also known as malicious code and malicious software, refers to a program that is inserted into a system, usually covertly, with the intent of compromising the confidentiality, integrity, or availability of the victim's data, applications, or operating system (OS) or of otherwise annoying or disrupting the victim. + + + +*`misp.malware.id`*:: ++ +-- +Identifier of the Malware. + + +type: keyword + +-- + +*`misp.malware.name`*:: ++ +-- +The name used to identify the Malware. + + +type: keyword + +-- + +*`misp.malware.description`*:: ++ +-- +Description of the Malware. + + +type: text + +-- + +*`misp.malware.labels`*:: ++ +-- +The type of malware being described. Open Vocab - malware-label-ov. adware,backdoor,bot,ddos,dropper,exploit-kit,keylogger,ransomware, remote-access-trojan,resource-exploitation,rogue-security-software,rootkit, screen-capture,spyware,trojan,virus,worm + + +type: keyword + +-- + +*`misp.malware.kill_chain_phases`*:: ++ +-- +The list of kill chain phases for which this Malware instance can be used. + + +type: keyword + +format: string + +-- + +[float] +=== note + +A Note is a comment or note containing informative text to help explain the context of one or more STIX Objects (SDOs or SROs) or to provide additional analysis that is not contained in the original object. + + + +*`misp.note.id`*:: ++ +-- +Identifier of the Note. + + +type: keyword + +-- + +*`misp.note.summary`*:: ++ +-- +A brief description used as a summary of the Note. + + +type: keyword + +-- + +*`misp.note.description`*:: ++ +-- +The content of the Note. + + +type: text + +-- + +*`misp.note.authors`*:: ++ +-- +The name of the author(s) of this Note. + + +type: keyword + +-- + +*`misp.note.object_refs`*:: ++ +-- +The STIX Objects (SDOs and SROs) that the note is being applied to. + + +type: keyword + +-- + +[float] +=== threat_indicator + +Fields provide support for specifying information about threat indicators, and related matching patterns. + + + +*`misp.threat_indicator.labels`*:: ++ +-- +list of type open-vocab that specifies the type of indicator. + + +type: keyword + +example: Domain Watchlist + + +-- + +*`misp.threat_indicator.id`*:: ++ +-- +Identifier of the threat indicator. + + +type: keyword + +-- + +*`misp.threat_indicator.version`*:: ++ +-- +Version of the threat indicator. + + +type: keyword + +-- + +*`misp.threat_indicator.type`*:: ++ +-- +Type of the threat indicator. + + +type: keyword + +-- + +*`misp.threat_indicator.description`*:: ++ +-- +Description of the threat indicator. + + +type: text + +-- + +*`misp.threat_indicator.feed`*:: ++ +-- +Name of the threat feed. + + +type: text + +-- + +*`misp.threat_indicator.valid_from`*:: ++ +-- +The time from which this Indicator should be considered valuable intelligence, in RFC3339 format. + + +type: date + +-- + +*`misp.threat_indicator.valid_until`*:: ++ +-- +The time at which this Indicator should no longer be considered valuable intelligence. If the valid_until property is omitted, then there is no constraint on the latest time for which the indicator should be used, in RFC3339 format. + + +type: date + +-- + +*`misp.threat_indicator.severity`*:: ++ +-- +Threat severity to which this indicator corresponds. + + +type: keyword + +example: high + +format: string + +-- + +*`misp.threat_indicator.confidence`*:: ++ +-- +Confidence level to which this indicator corresponds. + + +type: keyword + +example: high + +-- + +*`misp.threat_indicator.kill_chain_phases`*:: ++ +-- +The kill chain phase(s) to which this indicator corresponds. + + +type: keyword + +format: string + +-- + +*`misp.threat_indicator.mitre_tactic`*:: ++ +-- +MITRE tactics to which this indicator corresponds. + + +type: keyword + +example: Initial Access + +format: string + +-- + +*`misp.threat_indicator.mitre_technique`*:: ++ +-- +MITRE techniques to which this indicator corresponds. + + +type: keyword + +example: Drive-by Compromise + +format: string + +-- + +*`misp.threat_indicator.attack_pattern`*:: ++ +-- +The attack_pattern for this indicator is a STIX Pattern as specified in STIX Version 2.0 Part 5 - STIX Patterning. + + +type: keyword + +example: [source.ip = '198.51.100.1/32'] + + +-- + +*`misp.threat_indicator.negate`*:: ++ +-- +When set to true, it specifies the absence of the attack_pattern. + + +type: boolean + +-- + +*`misp.threat_indicator.intrusion_set`*:: ++ +-- +Name of the intrusion set if known. + + +type: keyword + +-- + +*`misp.threat_indicator.campaign`*:: ++ +-- +Name of the attack campaign if known. + + +type: keyword + +-- + +*`misp.threat_indicator.threat_actor`*:: ++ +-- +Name of the threat actor if known. + + +type: keyword + +-- + +[float] +=== observed_data + +Observed data conveys information that was observed on systems and networks, such as log data or network traffic, using the Cyber Observable specification. + + + +*`misp.observed_data.id`*:: ++ +-- +Identifier of the Observed Data. + + +type: keyword + +-- + +*`misp.observed_data.first_observed`*:: ++ +-- +The beginning of the time window that the data was observed, in RFC3339 format. + + +type: date + +-- + +*`misp.observed_data.last_observed`*:: ++ +-- +The end of the time window that the data was observed, in RFC3339 format. + + +type: date + +-- + +*`misp.observed_data.number_observed`*:: ++ +-- +The number of times the data represented in the objects property was observed. This MUST be an integer between 1 and 999,999,999 inclusive. + + +type: integer + +-- + +*`misp.observed_data.objects`*:: ++ +-- +A dictionary of Cyber Observable Objects that describes the single fact that was observed. + + +type: keyword + +-- + +[float] +=== report + +Reports are collections of threat intelligence focused on one or more topics, such as a description of a threat actor, malware, or attack technique, including context and related details. + + + +*`misp.report.id`*:: ++ +-- +Identifier of the Report. + + +type: keyword + +-- + +*`misp.report.labels`*:: ++ +-- +This field is an Open Vocabulary that specifies the primary subject of this report. Open Vocab - report-label-ov. threat-report,attack-pattern,campaign,identity,indicator,malware,observed-data,threat-actor,tool,vulnerability + + +type: keyword + +-- + +*`misp.report.name`*:: ++ +-- +The name used to identify the Report. + + +type: keyword + +-- + +*`misp.report.description`*:: ++ +-- +A description that provides more details and context about Report. + + +type: text + +-- + +*`misp.report.published`*:: ++ +-- +The date that this report object was officially published by the creator of this report, in RFC3339 format. + + +type: date + +-- + +*`misp.report.object_refs`*:: ++ +-- +Specifies the STIX Objects that are referred to by this Report. + + +type: text + +-- + +[float] +=== threat_actor + +Threat Actors are actual individuals, groups, or organizations believed to be operating with malicious intent. + + + +*`misp.threat_actor.id`*:: ++ +-- +Identifier of the Threat Actor. + + +type: keyword + +-- + +*`misp.threat_actor.labels`*:: ++ +-- +This field specifies the type of threat actor. Open Vocab - threat-actor-label-ov. activist,competitor,crime-syndicate,criminal,hacker,insider-accidental,insider-disgruntled,nation-state,sensationalist,spy,terrorist + + +type: keyword + +-- + +*`misp.threat_actor.name`*:: ++ +-- +The name used to identify this Threat Actor or Threat Actor group. + + +type: keyword + +-- + +*`misp.threat_actor.description`*:: ++ +-- +A description that provides more details and context about the Threat Actor. + + +type: text + +-- + +*`misp.threat_actor.aliases`*:: ++ +-- +A list of other names that this Threat Actor is believed to use. + + +type: text + +-- + +*`misp.threat_actor.roles`*:: ++ +-- +This is a list of roles the Threat Actor plays. Open Vocab - threat-actor-role-ov. agent,director,independent,sponsor,infrastructure-operator,infrastructure-architect,malware-author + + +type: text + +-- + +*`misp.threat_actor.goals`*:: ++ +-- +The high level goals of this Threat Actor, namely, what are they trying to do. + + +type: text + +-- + +*`misp.threat_actor.sophistication`*:: ++ +-- +The skill, specific knowledge, special training, or expertise a Threat Actor must have to perform the attack. Open Vocab - threat-actor-sophistication-ov. none,minimal,intermediate,advanced,strategic,expert,innovator + + +type: text + +-- + +*`misp.threat_actor.resource_level`*:: ++ +-- +This defines the organizational level at which this Threat Actor typically works. Open Vocab - attack-resource-level-ov. individual,club,contest,team,organization,government + + +type: text + +-- + +*`misp.threat_actor.primary_motivation`*:: ++ +-- +The primary reason, motivation, or purpose behind this Threat Actor. Open Vocab - attack-motivation-ov. accidental,coercion,dominance,ideology,notoriety,organizational-gain,personal-gain,personal-satisfaction,revenge,unpredictable + + +type: text + +-- + +*`misp.threat_actor.secondary_motivations`*:: ++ +-- +The secondary reasons, motivations, or purposes behind this Threat Actor. Open Vocab - attack-motivation-ov. accidental,coercion,dominance,ideology,notoriety,organizational-gain,personal-gain,personal-satisfaction,revenge,unpredictable + + +type: text + +-- + +*`misp.threat_actor.personal_motivations`*:: ++ +-- +The personal reasons, motivations, or purposes of the Threat Actor regardless of organizational goals. Open Vocab - attack-motivation-ov. accidental,coercion,dominance,ideology,notoriety,organizational-gain,personal-gain,personal-satisfaction,revenge,unpredictable + + +type: text + +-- + +[float] +=== tool + +Tools are legitimate software that can be used by threat actors to perform attacks. + + + +*`misp.tool.id`*:: ++ +-- +Identifier of the Tool. + + +type: keyword + +-- + +*`misp.tool.labels`*:: ++ +-- +The kind(s) of tool(s) being described. Open Vocab - tool-label-ov. denial-of-service,exploitation,information-gathering,network-capture,credential-exploitation,remote-access,vulnerability-scanning + + +type: keyword + +-- + +*`misp.tool.name`*:: ++ +-- +The name used to identify the Tool. + + +type: keyword + +-- + +*`misp.tool.description`*:: ++ +-- +A description that provides more details and context about the Tool. + + +type: text + +-- + +*`misp.tool.tool_version`*:: ++ +-- +The version identifier associated with the Tool. + + +type: keyword + +-- + +*`misp.tool.kill_chain_phases`*:: ++ +-- +The list of kill chain phases for which this Tool instance can be used. + + +type: text + +-- + +[float] +=== vulnerability + +A Vulnerability is a mistake in software that can be directly used by a hacker to gain access to a system or network. + + + +*`misp.vulnerability.id`*:: ++ +-- +Identifier of the Vulnerability. + + +type: keyword + +-- + +*`misp.vulnerability.name`*:: ++ +-- +The name used to identify the Vulnerability. + + +type: keyword + +-- + +*`misp.vulnerability.description`*:: ++ +-- +A description that provides more details and context about the Vulnerability. + + +type: text + +-- + [[exported-fields-mongodb]] == mongodb fields @@ -10537,7 +12898,7 @@ type: long *`netflow.class_id`*:: + -- -type: short +type: long -- @@ -13937,6 +16298,7 @@ type: long -- Name of database + example: mydb -- @@ -13946,10 +16308,31 @@ example: mydb -- Query statement. + example: SELECT * FROM users; -- +*`postgresql.log.query_step`*:: ++ +-- +Statement step when using extended query protocol (one of statement, parse, bind or execute) + + +example: parse + +-- + +*`postgresql.log.query_name`*:: ++ +-- +Name given to a query when using extended query protocol. If it is "", or not present, this field is ignored. + + +example: pdo_stmt_00000001 + +-- + *`postgresql.log.error.code`*:: + -- diff --git a/vendor/github.com/elastic/beats/filebeat/docs/filebeat-filtering.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/filebeat-filtering.asciidoc index bbeb8382..ddfa8fd4 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/filebeat-filtering.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/filebeat-filtering.asciidoc @@ -22,7 +22,7 @@ global processing across all data exported by {beatname_uc}. [[using-processors]] === Processors -include::{libbeat-dir}/docs/processors.asciidoc[] +include::{libbeat-dir}/processors.asciidoc[] [float] [[drop-event-example]] @@ -103,4 +103,4 @@ The resulting output looks something like this: } ----------------------------------------------------- -include::{libbeat-dir}/docs/processors-using.asciidoc[] +include::{libbeat-dir}/processors-using.asciidoc[] diff --git a/vendor/github.com/elastic/beats/filebeat/docs/filebeat-general-options.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/filebeat-general-options.asciidoc index 9de97f29..45767813 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/filebeat-general-options.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/filebeat-general-options.asciidoc @@ -42,13 +42,15 @@ NOTE: The content stored in filebeat/data.json is compatible to the old registry The permissions mask to apply on registry data file. The default value is 0600. The permissions option must be a valid Unix-style file permissions mask expressed in octal notation. In Go, numbers in octal notation must start with 0. +The most permissive mask allowed is 0640. If a higher permissions mask is +specified via this setting, it will be subject to a umask of 0027. + This option is not supported on Windows. Examples: - 0644: give read and write access to the file owner, and read access to all others. + 0640: give read and write access to the file owner, and read access to members of the group associated with the file. 0600: give read and write access to the file owner, and no access to all others. - 0664: give read and write access to the file owner and members of the group associated with the file, as well as read access to all other users. [source,yaml] ------------------------------------------------------------------------------------- @@ -141,4 +143,4 @@ Example configuration: filebeat.shutdown_timeout: 5s ------------------------------------------------------------------------------------- -include::{libbeat-dir}/docs/generalconfig.asciidoc[] +include::{libbeat-dir}/generalconfig.asciidoc[] diff --git a/vendor/github.com/elastic/beats/filebeat/docs/filebeat-modules-options.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/filebeat-modules-options.asciidoc index 7cdf1cf2..6dde1797 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/filebeat-modules-options.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/filebeat-modules-options.asciidoc @@ -18,7 +18,7 @@ implement and deploy a log monitoring solution. * <> * <> -include::{libbeat-dir}/docs/shared-note-file-permissions.asciidoc[] +include::{libbeat-dir}/shared-note-file-permissions.asciidoc[] When you enable modules, you can also <> to change the default diff --git a/vendor/github.com/elastic/beats/filebeat/docs/getting-started.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/getting-started.asciidoc index e009a753..fe20fdea 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/getting-started.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/getting-started.asciidoc @@ -1,7 +1,7 @@ [[filebeat-getting-started]] == Getting Started With Filebeat -include::{libbeat-dir}/docs/shared-getting-started-intro.asciidoc[] +include::{libbeat-dir}/shared-getting-started-intro.asciidoc[] * <> * <> @@ -15,7 +15,7 @@ include::{libbeat-dir}/docs/shared-getting-started-intro.asciidoc[] [[filebeat-installation]] === Step 1: Install Filebeat -include::{libbeat-dir}/docs/shared-download-and-install.asciidoc[] +include::{libbeat-dir}/shared-download-and-install.asciidoc[] [[deb]] *deb:* @@ -74,7 +74,7 @@ tar xzvf filebeat-{version}-darwin-x86_64.tar.gz endif::[] -include::{libbeat-dir}/docs/shared-brew-install.asciidoc[] +include::{libbeat-dir}/shared-brew-install.asciidoc[] [[linux]] *linux:* @@ -146,7 +146,7 @@ started experience for common log formats. If you are using Filebeat modules, skip this section, including the remaining getting started steps, and go directly to <>. -include::{libbeat-dir}/docs/shared-configuring.asciidoc[] +include::{libbeat-dir}/shared-configuring.asciidoc[] Here is a sample of the `filebeat` section of the `filebeat.yml` file. Filebeat uses predefined default values for most configuration options. @@ -185,27 +185,25 @@ To fetch all files from a predefined level of subdirectories, the following patt fetch log files from the `/var/log` folder itself. Currently it is not possible to recursively fetch all files in all subdirectories of a directory. -include::{libbeat-dir}/docs/step-configure-output.asciidoc[] +include::{libbeat-dir}/step-configure-output.asciidoc[] -include::{libbeat-dir}/docs/step-configure-kibana-endpoint.asciidoc[] +include::{libbeat-dir}/step-configure-kibana-endpoint.asciidoc[] -include::{libbeat-dir}/docs/step-configure-credentials.asciidoc[] +include::{libbeat-dir}/step-configure-credentials.asciidoc[] -include::{libbeat-dir}/docs/step-test-config.asciidoc[] +include::{libbeat-dir}/step-test-config.asciidoc[] -include::{libbeat-dir}/docs/step-look-at-config.asciidoc[] - -include::../../libbeat/docs/shared-cm-tip.asciidoc[] +include::{libbeat-dir}/step-look-at-config.asciidoc[] [[filebeat-template]] === Step 3: Load the index template in Elasticsearch -include::{libbeat-dir}/docs/shared-template-load.asciidoc[] +include::{libbeat-dir}/shared-template-load.asciidoc[] [[load-kibana-dashboards]] === Step 4: Set up the Kibana dashboards -include::{libbeat-dir}/docs/dashboards.asciidoc[] +include::{libbeat-dir}/dashboards.asciidoc[] [[filebeat-starting]] === Step 5: Start Filebeat @@ -242,7 +240,7 @@ specified. See {beats-ref}/config-file-permissions.html[Config File Ownership and Permissions] in the _Beats Platform Reference_. -include::{libbeat-dir}/docs/shared-brew-run.asciidoc[] +include::{libbeat-dir}/shared-brew-run.asciidoc[] *win:* @@ -263,7 +261,7 @@ To make it easier for you to explore Filebeat data in Kibana, we've created example {beatname_uc} dashboards. You loaded the dashboards earlier when you ran the `setup` command. -include::{libbeat-dir}/docs/opendashboards.asciidoc[] +include::{libbeat-dir}/opendashboards.asciidoc[] These dashboards are designed to work out-of-the box when you use <>. However, you can also use them diff --git a/vendor/github.com/elastic/beats/filebeat/docs/images/filebeat-azure-overview.png b/vendor/github.com/elastic/beats/filebeat/docs/images/filebeat-azure-overview.png new file mode 100644 index 00000000..32c5a720 Binary files /dev/null and b/vendor/github.com/elastic/beats/filebeat/docs/images/filebeat-azure-overview.png differ diff --git a/vendor/github.com/elastic/beats/filebeat/docs/images/kibana-misp.png b/vendor/github.com/elastic/beats/filebeat/docs/images/kibana-misp.png new file mode 100644 index 00000000..a02068dd Binary files /dev/null and b/vendor/github.com/elastic/beats/filebeat/docs/images/kibana-misp.png differ diff --git a/vendor/github.com/elastic/beats/filebeat/docs/index.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/index.asciidoc index e42901da..132b4757 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/index.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/index.asciidoc @@ -1,12 +1,13 @@ = Filebeat Reference -:libbeat-dir: {docdir}/../../libbeat +:libbeat-dir: {docdir}/../../libbeat/docs -include::{libbeat-dir}/docs/version.asciidoc[] +include::{libbeat-dir}/version.asciidoc[] + +include::{asciidoc-dir}/../../shared/versions/stack/{source_branch}.asciidoc[] include::{asciidoc-dir}/../../shared/attributes.asciidoc[] -:version: {stack-version} :beatname_lc: filebeat :beatname_uc: Filebeat :beatname_pkg: {beatname_lc} @@ -18,10 +19,6 @@ include::{asciidoc-dir}/../../shared/attributes.asciidoc[] :has_solutions: :ignores_max_retries: :has_docker_label_ex: -:has_decode_cef_processor: -:has_decode_csv_fields_processor: -:has_script_processor: -:has_timestamp_processor: :has_modules_command: :has_registry: :deb_os: @@ -31,7 +28,7 @@ include::{asciidoc-dir}/../../shared/attributes.asciidoc[] :docker_platform: :win_os: -include::{libbeat-dir}/docs/shared-beats-attributes.asciidoc[] +include::{libbeat-dir}/shared-beats-attributes.asciidoc[] include::./overview.asciidoc[] @@ -39,7 +36,7 @@ include::./getting-started.asciidoc[] include::./modules-getting-started.asciidoc[] -include::{libbeat-dir}/docs/repositories.asciidoc[] +include::{libbeat-dir}/repositories.asciidoc[] include::./setting-up-running.asciidoc[] @@ -49,19 +46,19 @@ include::./how-filebeat-works.asciidoc[] include::./configuring-howto.asciidoc[] -include::{libbeat-dir}/docs/shared-central-management.asciidoc[] +include::{libbeat-dir}/shared-central-management.asciidoc[] include::./modules.asciidoc[] include::./fields.asciidoc[] -include::{libbeat-dir}/docs/monitoring/monitoring-beats.asciidoc[] +include::{libbeat-dir}/monitoring/monitoring-beats.asciidoc[] -include::{libbeat-dir}/docs/shared-securing-beat.asciidoc[] +include::{libbeat-dir}/shared-securing-beat.asciidoc[] include::./troubleshooting.asciidoc[] include::./faq.asciidoc[] -include::{libbeat-dir}/docs/contributing-to-beats.asciidoc[] +include::{libbeat-dir}/contributing-to-beats.asciidoc[] diff --git a/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-common-harvester-options.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-common-harvester-options.asciidoc index 8f161006..c7f053df 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-common-harvester-options.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-common-harvester-options.asciidoc @@ -191,6 +191,10 @@ must be at the top level in the JSON object and the value associated with the key must be a string, otherwise no filtering or multiline aggregation will occur. +*`document_id`*:: Option configuration setting that specifies the JSON key to +set the document id. If configured, the field will be removed from the original +json document and stored in `@metadata.id` + *`ignore_decoding_error`*:: An optional configuration setting that specifies if JSON decoding errors should be logged or not. If set to true, errors will not be logged. The default is false. diff --git a/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-common-options.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-common-options.asciidoc index 53d745e0..8e08a807 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-common-options.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-common-options.asciidoc @@ -64,7 +64,7 @@ If this option is set to true, the custom <<{beatname_lc}-input-{type}-fields,fields>> are stored as top-level fields in the output document instead of being grouped under a `fields` sub-dictionary. If the custom field names conflict with other field names added by {beatname_uc}, -then the custom fields overwrite the other fields. +then the custom fields overwrite the other fields. [float] ===== `processors` @@ -84,3 +84,20 @@ this option usually results in simpler configuration files. If the pipeline is configured both in the input and output, the option from the input is used. +[float] +===== `keep_null` + +If this option is set to true, fields with `null` values will be published in +the output document. By default, `keep_null` is set to `false`. + +[float] +===== `index` + +If present, this formatted string overrides the index for events from this input +(for elasticsearch outputs), or sets the `raw_index` field of the event's +metadata (for other outputs). This string can only refer to the agent name and +version and the event timestamp; for access to dynamic fields, use +`output.elasticsearch.index` or a processor. + +Example value: `"%{[agent.name]}-myindex-%{+yyyy.MM.dd}"` might +expand to `"filebeat-myindex-2019.11.01"`. diff --git a/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-kafka.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-kafka.asciidoc index a801dc9f..2c265fd9 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-kafka.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/inputs/input-kafka.asciidoc @@ -9,7 +9,7 @@ Use the `kafka` input to read from topics in a Kafka cluster. -To configure this input, specify a list of one or more <> in the +To configure this input, specify a list of one or more <> in the cluster to bootstrap the connection with, a list of <> to track, and a <> for the connection. @@ -27,6 +27,26 @@ Example configuration: ---- +The following example shows how to use the `kafka` input to ingest data from +Microsoft Azure Event Hubs that have Kafka compatibility enabled: + +["source","yaml",subs="attributes"] +---- +{beatname_lc}.inputs: +- type: kafka + hosts: [".servicebus.windows.net:9093"] + topics: [""] + group_id: "" + + username: "$ConnectionString" + password: "" + ssl.enabled: true + +---- + +For more details on the mapping between Kafka and Event Hubs configuration +parameters, see the +link:https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-for-kafka-ecosystem-overview[Azure documentation]. [id="{beatname_lc}-input-{type}-options"] ==== Configuration options @@ -107,6 +127,17 @@ Kafka fetch settings: *`max`*:: The maximum number of bytes to read per request. Defaults to 0 (no limit). +===== `expand_event_list_from_field` + +If the fileset using this input expects to receive multiple messages bundled under a specific field then the config option `expand_event_list_from_field` value can be assigned the name of the field. +For example in the case of azure filesets the events are found under the json object "records". +``` +{ +"records": [ {event1}, {event2}] +} +``` +This setting will be able to split the messages under the group value ('records') into separate events. + ===== `rebalance` Kafka rebalance settings: diff --git a/vendor/github.com/elastic/beats/filebeat/docs/modules-getting-started.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/modules-getting-started.asciidoc index 4cf94c6f..0bbb5cf2 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/modules-getting-started.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/modules-getting-started.asciidoc @@ -40,9 +40,9 @@ To set up and run {beatname_uc} modules: installation. By default, {beatname_uc} assumes {es} is running locally on port 9200. + -include::{libbeat-dir}/docs/step-configure-output.asciidoc[] +include::{libbeat-dir}/step-configure-output.asciidoc[] -include::{libbeat-dir}/docs/step-configure-credentials.asciidoc[] +include::{libbeat-dir}/step-configure-credentials.asciidoc[] . Enable the modules you want to run. For example, the following command enables the system, nginx, and mysql modules: diff --git a/vendor/github.com/elastic/beats/filebeat/docs/modules/apache.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/modules/apache.asciidoc index e674e0ad..7a15fc61 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/modules/apache.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/modules/apache.asciidoc @@ -73,6 +73,19 @@ include::../include/var-paths.asciidoc[] include::../include/timezone-support.asciidoc[] +[float] +=== Virtual Host + +See customlog documentation https://httpd.apache.org/docs/2.4/en/mod/mod_log_config.html +Add %v config in httpd.conf in log section +["source","sh",subs="attributes"] +----- + # Replace + LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined + # By + LogFormat "%v %h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined +----- + :has-dashboards!: :fileset_ex!: diff --git a/vendor/github.com/elastic/beats/filebeat/docs/modules/aws.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/modules/aws.asciidoc index fe435a10..79c59245 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/modules/aws.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/modules/aws.asciidoc @@ -14,8 +14,9 @@ beta[] This is a module for aws logs. It uses filebeat s3 input to get log files from AWS S3 buckets with SQS notification. This module supports reading s3 server -access logs with `s3access` fileset. Server access logging provides detailed -records for the requests that are made to a bucket. +access logs with `s3access` fileset and ELB access logs with `elb` fileset. +Access logs contain detailed information about the requests made to these +services. [float] === Example dashboard diff --git a/vendor/github.com/elastic/beats/filebeat/docs/modules/azure.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/modules/azure.asciidoc new file mode 100644 index 00000000..4b0113f8 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/docs/modules/azure.asciidoc @@ -0,0 +1,117 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[filebeat-module-azure]] +[role="xpack"] + +:modulename: azure +:has-dashboards: false + +== azure module + +beta[] + +This is the azure module. + +The azure module will concentrate on retrieving different types of log data from Azure. +There are several requirements before using the module since the logs will actually be read from azure event hubs. + + - the event hubs the azure module will read logs from must have the kafka option enabled . + - the logs have to be exported first to the event hubs https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-create-kafka-enabled + - to export activity logs to event hubs users can follow the steps here https://docs.microsoft.com/en-us/azure/azure-monitor/platform/activity-log-export + - to export audit and sign-in logs to event hubs users can follow the steps here https://docs.microsoft.com/en-us/azure/active-directory/reports-monitoring/tutorial-azure-monitor-stream-logs-to-event-hub + +The module will contain the following filesets: + +`activitylogs` :: +Will retrieve azure activity logs. Control-plane events on Azure Resource Manager resources. Activity logs provide insight into the operations that were performed on resources in your subscription. + +`signinlogs` :: +Will retrieve azure Active Directory sign-in logs. The sign-ins report provides information about the usage of managed applications and user sign-in activities. + +`auditlogs` :: +Will retrieve azure Active Directory audit logs. The audit logs provide traceability through logs for all changes done by various features within Azure AD. Examples of audit logs include changes made to any resources within Azure AD like adding or removing users, apps, groups, roles and policies. + +[float] +=== Dashboards + +The azure module comes with several predefined dashboards for general cloud overview, user activity and alerts. For example: + +image::./images/filebeat-azure-overview.png[] + + +[float] +=== Module configuration + +``` +- module: azure + activitylogs: + enabled: true + var: + namespace: "obseventhubs.servicebus.windows.net:9093" + eventhub: ["insights-operational-logs"] + consumer_group: "$Default" + connection_string: "" + auditlogs: + enabled: true + var: + namespace: "" + eventhub: ["insights-logs-auditlogs"] + consumer_group: "$Default" + connection_string: "" + + signinlogs: + enabled: true + var: + namespace: "" + eventhub: ["insights-logs-signinlogs"] + consumer_group: "$Default" + connection_string: "" + +``` + + +A side by side kafka/event hubs notation, we will follow Azure notations in this case. + + +`namespace` :: +_string_ +An Event Hubs namespace provides a unique scoping container, referenced by its fully qualified domain name, in which users can create one or more event hubs or Kafka topics. + +`eventhub` :: + _[]string_ +Or kafka topic, is a fully managed, real-time data ingestion service. +Default value `insights-operational-logs` + +`consumer_group` :: +_string_ + The publish/subscribe mechanism of Event Hubs is enabled through consumer groups. A consumer group is a view (state, position, or offset) of an entire event hub. Consumer groups enable multiple consuming applications to each have a separate view of the event stream, and to read the stream independently at their own pace and with their own offsets. +Default value: `$Default` + +`connection_string` :: +_string_ +The connection string required to communicate with Event Hubs, steps here https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-get-connection-string. + + + +include::../include/what-happens.asciidoc[] + +[float] +=== Compatibility + +TODO: document with what versions of the software is this tested + + + + + + + + +[float] +=== Fields + +For a description of each field in the module, see the +<> section. + diff --git a/vendor/github.com/elastic/beats/filebeat/docs/modules/kafka.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/modules/kafka.asciidoc index 05525dfc..f9f62106 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/modules/kafka.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/modules/kafka.asciidoc @@ -33,7 +33,7 @@ include::../include/configuring-intro.asciidoc[] The following example shows how to set paths in the +modules.d/{modulename}.yml+ file to override the default paths for logs: -["source","yaml",subs="attributes"] +[source,yaml] ----- - module: kafka log: @@ -48,7 +48,7 @@ file to override the default paths for logs: To specify the same settings at the command line, you use: -["source","sh",subs="attributes"] +[source,yaml] ----- -M "kafka.log.var.paths=[/path/to/logs/controller.log*, /path/to/logs/server.log*, /path/to/logs/state-change.log*, /path/to/logs/kafka-*.log*]" ----- @@ -62,6 +62,19 @@ include::../include/config-option-intro.asciidoc[] [float] ==== `log` fileset settings +*`var.kafka_home`*:: + +The path to your Kafka installation. The default is `/opt`. For example: ++ +[source,yaml] +---- +- module: kafka + log: + enabled: true + var.kafka_home: /usr/share/kafka_2.12-2.4.0 + ... +---- + include::../include/var-paths.asciidoc[] include::../include/timezone-support.asciidoc[] diff --git a/vendor/github.com/elastic/beats/filebeat/docs/modules/misp.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/modules/misp.asciidoc new file mode 100644 index 00000000..4460a443 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/docs/modules/misp.asciidoc @@ -0,0 +1,41 @@ +//// +This file is generated! See scripts/docs_collector.py +//// + +[[filebeat-module-misp]] +[role="xpack"] + +:modulename: misp +:has-dashboards: false + +== MISP module + +beta[] + +This is a filebeat module for reading threat intel information from the MISP platform (https://www.circl.lu/doc/misp/). It uses the httpjson input to access the MISP REST API interface. + +The configuration in the config.yml file uses the following format: + + * var.api_key: specifies the API key to access MISP. + * var.json_objects_array: specifies the array object in MISP response, e.g., "response.Attribute". + * var.url: URL of the MISP REST API, e.g., "http://x.x.x.x/attributes/restSearch" + +[float] +=== Example dashboard + +This module comes with a sample dashboard. For example: + +[role="screenshot"] +image::./images/kibana-misp.png[] + +:has-dashboards!: + +:modulename!: + + +[float] +=== Fields + +For a description of each field in the module, see the +<> section. + diff --git a/vendor/github.com/elastic/beats/filebeat/docs/modules/netflow.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/modules/netflow.asciidoc index c0d698ff..ce28607a 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/modules/netflow.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/modules/netflow.asciidoc @@ -59,6 +59,10 @@ expiration. processing. Use this setting to avoid packet-loss when dealing with occasional bursts of traffic. +`var.detect_sequence_reset`:: Flag controlling whether {beatname_uc} should +monitor sequence numbers in the Netflow packets to detect an Exporting Process +reset. See <> for details. + :has-dashboards!: :fileset_ex!: diff --git a/vendor/github.com/elastic/beats/filebeat/docs/modules/panw.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/modules/panw.asciidoc index 139300cb..ca96711f 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/modules/panw.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/modules/panw.asciidoc @@ -109,8 +109,8 @@ in ECS that are added under the `panw.panos` prefix: | Category | | panw.panos.url.category | Severity | log.level | | Direction | network.direction | -| Source Location | source.geo.name | -| Destination Location | destination.geo.name | +| Source Location | source.geo.country_iso_code | +| Destination Location | destination.geo.country_iso_code | | PCAP_id | | panw.panos.network.pcap_id | Filedigest | | panw.panos.file.hash | User Agent | user_agent.original | diff --git a/vendor/github.com/elastic/beats/filebeat/docs/modules_list.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/modules_list.asciidoc index ffee384c..c4a92858 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/modules_list.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/modules_list.asciidoc @@ -6,6 +6,7 @@ This file is generated! See scripts/docs_collector.py * <> * <> * <> + * <> * <> * <> * <> @@ -20,6 +21,7 @@ This file is generated! See scripts/docs_collector.py * <> * <> * <> + * <> * <> * <> * <> @@ -44,6 +46,7 @@ include::modules-overview.asciidoc[] include::modules/apache.asciidoc[] include::modules/auditd.asciidoc[] include::modules/aws.asciidoc[] +include::modules/azure.asciidoc[] include::modules/cef.asciidoc[] include::modules/cisco.asciidoc[] include::modules/coredns.asciidoc[] @@ -58,6 +61,7 @@ include::modules/iptables.asciidoc[] include::modules/kafka.asciidoc[] include::modules/kibana.asciidoc[] include::modules/logstash.asciidoc[] +include::modules/misp.asciidoc[] include::modules/mongodb.asciidoc[] include::modules/mssql.asciidoc[] include::modules/mysql.asciidoc[] diff --git a/vendor/github.com/elastic/beats/filebeat/docs/overview.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/overview.asciidoc index 5aae2001..4f3a9474 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/overview.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/overview.asciidoc @@ -23,4 +23,4 @@ image::./images/filebeat.png[Beats design] For more information about inputs and harvesters, see <>. -include::{libbeat-dir}/docs/shared-libbeat-description.asciidoc[] +include::{libbeat-dir}/shared-libbeat-description.asciidoc[] diff --git a/vendor/github.com/elastic/beats/filebeat/docs/reload-configuration.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/reload-configuration.asciidoc index 7936254e..52eb800d 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/reload-configuration.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/reload-configuration.asciidoc @@ -6,7 +6,7 @@ allowing you to separate your configuration into multiple smaller configuration files. See the <> and the <> sections for details. -include::{libbeat-dir}/docs/shared-note-file-permissions.asciidoc[] +include::{libbeat-dir}/shared-note-file-permissions.asciidoc[] [float] [[load-input-config]] @@ -129,4 +129,4 @@ set the `period` to less than 1s because the modification time of files is often stored in seconds. Setting the `period` to less than 1s will result in unnecessary overhead. -include::{libbeat-dir}/docs/shared-note-file-permissions.asciidoc[] +include::{libbeat-dir}/shared-note-file-permissions.asciidoc[] diff --git a/vendor/github.com/elastic/beats/filebeat/docs/running-on-docker.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/running-on-docker.asciidoc index d9306b55..dbfcce5b 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/running-on-docker.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/running-on-docker.asciidoc @@ -1 +1 @@ -include::{libbeat-dir}/docs/shared-docker.asciidoc[] +include::{libbeat-dir}/shared-docker.asciidoc[] diff --git a/vendor/github.com/elastic/beats/filebeat/docs/running-on-kubernetes.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/running-on-kubernetes.asciidoc index 28b4114d..ccf69f57 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/running-on-kubernetes.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/running-on-kubernetes.asciidoc @@ -6,7 +6,7 @@ retrieve and ship container logs. ifeval::["{release-state}"=="unreleased"] -However, version {stack-version} of {beatname_uc} has not yet been +However, version {version} of {beatname_uc} has not yet been released, so no Docker image is currently available for this version. endif::[] diff --git a/vendor/github.com/elastic/beats/filebeat/docs/setting-up-running.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/setting-up-running.asciidoc index 724c003f..05e68bb9 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/setting-up-running.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/setting-up-running.asciidoc @@ -27,16 +27,16 @@ This section includes additional information on how to set up and run //MAINTAINERS: If you add a new file to this section, make sure you update the bulleted list ^^ too. -include::{libbeat-dir}/docs/shared-directory-layout.asciidoc[] +include::{libbeat-dir}/shared-directory-layout.asciidoc[] -include::{libbeat-dir}/docs/keystore.asciidoc[] +include::{libbeat-dir}/keystore.asciidoc[] -include::{libbeat-dir}/docs/command-reference.asciidoc[] +include::{libbeat-dir}/command-reference.asciidoc[] include::./running-on-docker.asciidoc[] include::./running-on-kubernetes.asciidoc[] -include::{libbeat-dir}/docs/shared-systemd.asciidoc[] +include::{libbeat-dir}/shared-systemd.asciidoc[] -include::{libbeat-dir}/docs/shared-shutdown.asciidoc[] +include::{libbeat-dir}/shared-shutdown.asciidoc[] diff --git a/vendor/github.com/elastic/beats/filebeat/docs/troubleshooting.asciidoc b/vendor/github.com/elastic/beats/filebeat/docs/troubleshooting.asciidoc index b093c5d7..766c7a37 100644 --- a/vendor/github.com/elastic/beats/filebeat/docs/troubleshooting.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/docs/troubleshooting.asciidoc @@ -17,14 +17,14 @@ following tips: [[getting-help]] == Get help -include::{libbeat-dir}/docs/getting-help.asciidoc[] +include::{libbeat-dir}/getting-help.asciidoc[] //sets block macro for debugging.asciidoc included in next section [[enable-filebeat-debugging]] == Debug -include::{libbeat-dir}/docs/debugging.asciidoc[] +include::{libbeat-dir}/debugging.asciidoc[] diff --git a/vendor/github.com/elastic/beats/filebeat/filebeat.docker.yml b/vendor/github.com/elastic/beats/filebeat/filebeat.docker.yml index 99cf52e1..983e8c5c 100644 --- a/vendor/github.com/elastic/beats/filebeat/filebeat.docker.yml +++ b/vendor/github.com/elastic/beats/filebeat/filebeat.docker.yml @@ -5,6 +5,7 @@ filebeat.config: processors: - add_cloud_metadata: ~ +- add_docker_metadata: ~ output.elasticsearch: hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}' diff --git a/vendor/github.com/elastic/beats/filebeat/filebeat.reference.yml b/vendor/github.com/elastic/beats/filebeat/filebeat.reference.yml index f58ac33e..28126553 100644 --- a/vendor/github.com/elastic/beats/filebeat/filebeat.reference.yml +++ b/vendor/github.com/elastic/beats/filebeat/filebeat.reference.yml @@ -439,6 +439,9 @@ filebeat.inputs: # fields. #fields_under_root: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Ignore files which were modified more then the defined timespan in the past. # ignore_older is disabled by default, so no files are ignored by setting it to 0. # Time strings like 2h (2 hours), 5m (5 minutes) can be used. @@ -1771,7 +1774,7 @@ setup.template.settings: #setup.ilm.enabled: auto # Set the prefix used in the index lifecycle write alias name. The default alias -# name is 'filebeat-%{[agent.version]}'. +# name is 'filebeat-%{[agent.version]}'. #setup.ilm.rollover_alias: "filebeat" # Set the rollover index pattern. The default is "%{now/d}-000001". @@ -2034,12 +2037,21 @@ logging.files: # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# When using IP addresses, it is recommended to only use localhost. #http.host: localhost # Port on which the HTTP endpoint will bind. Default is 5066. #http.port: 5066 +# Define which user should be owning the named pipe. +#http.named_pipe.user: + +# Define which the permissions that should be applied to the named pipe, use the Security +# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with +# `http.user`. +#http.named_pipe.security_descriptor: + #============================= Process Security ================================ # Enable or disable seccomp system call filtering on Linux. Default is enabled. diff --git a/vendor/github.com/elastic/beats/filebeat/filebeat.yml b/vendor/github.com/elastic/beats/filebeat/filebeat.yml index b56530ff..d02b4d16 100644 --- a/vendor/github.com/elastic/beats/filebeat/filebeat.yml +++ b/vendor/github.com/elastic/beats/filebeat/filebeat.yml @@ -176,6 +176,8 @@ output.elasticsearch: processors: - add_host_metadata: ~ - add_cloud_metadata: ~ + - add_docker_metadata: ~ + - add_kubernetes_metadata: ~ #================================ Logging ===================================== diff --git a/vendor/github.com/elastic/beats/filebeat/input/input.go b/vendor/github.com/elastic/beats/filebeat/input/input.go index 98eea51d..e1931d4e 100644 --- a/vendor/github.com/elastic/beats/filebeat/input/input.go +++ b/vendor/github.com/elastic/beats/filebeat/input/input.go @@ -60,7 +60,7 @@ type Runner struct { // New instantiates a new Runner func New( conf *common.Config, - outlet channel.Connector, + connector channel.Connector, beatDone chan struct{}, states []file.State, dynFields *common.MapStrPointer, @@ -99,7 +99,7 @@ func New( Meta: nil, } var ipt Input - ipt, err = f(conf, outlet, context) + ipt, err = f(conf, connector, context) if err != nil { return input, err } diff --git a/vendor/github.com/elastic/beats/filebeat/input/kafka/config.go b/vendor/github.com/elastic/beats/filebeat/input/kafka/config.go index ddc505bf..6fb14730 100644 --- a/vendor/github.com/elastic/beats/filebeat/input/kafka/config.go +++ b/vendor/github.com/elastic/beats/filebeat/input/kafka/config.go @@ -34,22 +34,23 @@ import ( type kafkaInputConfig struct { // Kafka hosts with port, e.g. "localhost:9092" - Hosts []string `config:"hosts" validate:"required"` - Topics []string `config:"topics" validate:"required"` - GroupID string `config:"group_id" validate:"required"` - ClientID string `config:"client_id"` - Version kafka.Version `config:"version"` - InitialOffset initialOffset `config:"initial_offset"` - ConnectBackoff time.Duration `config:"connect_backoff" validate:"min=0"` - ConsumeBackoff time.Duration `config:"consume_backoff" validate:"min=0"` - WaitClose time.Duration `config:"wait_close" validate:"min=0"` - MaxWaitTime time.Duration `config:"max_wait_time"` - IsolationLevel isolationLevel `config:"isolation_level"` - Fetch kafkaFetch `config:"fetch"` - Rebalance kafkaRebalance `config:"rebalance"` - TLS *tlscommon.Config `config:"ssl"` - Username string `config:"username"` - Password string `config:"password"` + Hosts []string `config:"hosts" validate:"required"` + Topics []string `config:"topics" validate:"required"` + GroupID string `config:"group_id" validate:"required"` + ClientID string `config:"client_id"` + Version kafka.Version `config:"version"` + InitialOffset initialOffset `config:"initial_offset"` + ConnectBackoff time.Duration `config:"connect_backoff" validate:"min=0"` + ConsumeBackoff time.Duration `config:"consume_backoff" validate:"min=0"` + WaitClose time.Duration `config:"wait_close" validate:"min=0"` + MaxWaitTime time.Duration `config:"max_wait_time"` + IsolationLevel isolationLevel `config:"isolation_level"` + Fetch kafkaFetch `config:"fetch"` + Rebalance kafkaRebalance `config:"rebalance"` + TLS *tlscommon.Config `config:"ssl"` + Username string `config:"username"` + Password string `config:"password"` + ExpandEventListFromField string `config:"expand_event_list_from_field"` } type kafkaFetch struct { diff --git a/vendor/github.com/elastic/beats/filebeat/input/kafka/input.go b/vendor/github.com/elastic/beats/filebeat/input/kafka/input.go index 98b7f15c..72cb1254 100644 --- a/vendor/github.com/elastic/beats/filebeat/input/kafka/input.go +++ b/vendor/github.com/elastic/beats/filebeat/input/kafka/input.go @@ -19,6 +19,7 @@ package kafka import ( "context" + "encoding/json" "fmt" "strings" "sync" @@ -107,6 +108,9 @@ func (input *kafkaInput) runConsumerGroup( handler := &groupHandler{ version: input.config.Version, outlet: input.outlet, + // expandEventListFromField will be assigned the configuration option expand_event_list_from_field + expandEventListFromField: input.config.ExpandEventListFromField, + log: input.log, } input.saramaWaitGroup.Add(1) @@ -234,6 +238,10 @@ type groupHandler struct { version kafka.Version session sarama.ConsumerGroupSession outlet channel.Outleter + // if the fileset using this input expects to receive multiple messages bundled under a specific field then this value is assigned + // ex. in this case are the azure fielsets where the events are found under the json object "records" + expandEventListFromField string + log *logp.Logger } // The metadata attached to incoming events so they can be ACKed once they've @@ -243,11 +251,11 @@ type eventMeta struct { message *sarama.ConsumerMessage } -func (h *groupHandler) createEvent( +func (h *groupHandler) createEvents( sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim, message *sarama.ConsumerMessage, -) beat.Event { +) []beat.Event { timestamp := time.Now() kafkaFields := common.MapStr{ "topic": claim.Topic(), @@ -266,19 +274,31 @@ func (h *groupHandler) createEvent( if versionOk && version.IsAtLeast(sarama.V0_11_0_0) { kafkaFields["headers"] = arrayForKafkaHeaders(message.Headers) } - event := beat.Event{ - Timestamp: timestamp, - Fields: common.MapStr{ - "message": string(message.Value), - "kafka": kafkaFields, - }, - Private: eventMeta{ - handler: h, - message: message, - }, - } - return event + // if expandEventListFromField has been set, then a check for the actual json object will be done and a return for multiple messages is executed + var events []beat.Event + var messages []string + if h.expandEventListFromField == "" { + messages = []string{string(message.Value)} + } else { + messages = h.parseMultipleMessages(message.Value) + } + for _, msg := range messages { + event := beat.Event{ + Timestamp: timestamp, + Fields: common.MapStr{ + "message": msg, + "kafka": kafkaFields, + }, + Private: eventMeta{ + handler: h, + message: message, + }, + } + events = append(events, event) + + } + return events } func (h *groupHandler) Setup(session sarama.ConsumerGroupSession) error { @@ -307,8 +327,32 @@ func (h *groupHandler) ack(message *sarama.ConsumerMessage) { func (h *groupHandler) ConsumeClaim(sess sarama.ConsumerGroupSession, claim sarama.ConsumerGroupClaim) error { for msg := range claim.Messages() { - event := h.createEvent(sess, claim, msg) - h.outlet.OnEvent(event) + events := h.createEvents(sess, claim, msg) + for _, event := range events { + h.outlet.OnEvent(event) + } } return nil } + +// parseMultipleMessages will try to split the message into multiple ones based on the group field provided by the configuration +func (h *groupHandler) parseMultipleMessages(bMessage []byte) []string { + var obj map[string][]interface{} + err := json.Unmarshal(bMessage, &obj) + if err != nil { + h.log.Errorw(fmt.Sprintf("Kafka desirializing multiple messages using the group object %s", h.expandEventListFromField), "error", err) + return []string{} + } + var messages []string + if len(obj[h.expandEventListFromField]) > 0 { + for _, ms := range obj[h.expandEventListFromField] { + js, err := json.Marshal(ms) + if err == nil { + messages = append(messages, string(js)) + } else { + h.log.Errorw(fmt.Sprintf("Kafka serializing message %s", ms), "error", err) + } + } + } + return messages +} diff --git a/vendor/github.com/elastic/beats/filebeat/input/kafka/kafka_integration_test.go b/vendor/github.com/elastic/beats/filebeat/input/kafka/kafka_integration_test.go index 71e44cf6..ca4d03a8 100644 --- a/vendor/github.com/elastic/beats/filebeat/input/kafka/kafka_integration_test.go +++ b/vendor/github.com/elastic/beats/filebeat/input/kafka/kafka_integration_test.go @@ -147,16 +147,21 @@ func TestInput(t *testing.T) { input.Run() timeout := time.After(30 * time.Second) - for _, m := range messages { + for range messages { select { case event := <-events: - text, err := event.Fields.GetValue("message") + v, err := event.Fields.GetValue("message") if err != nil { t.Fatal(err) } - assert.Equal(t, text, m.message) + text, ok := v.(string) + if !ok { + t.Fatal("could not get message text from event") + } + msg := findMessage(t, text, messages) + assert.Equal(t, text, msg.message) - checkMatchingHeaders(t, event, m.headers) + checkMatchingHeaders(t, event, msg.headers) case <-timeout: t.Fatal("timeout waiting for incoming events") } @@ -178,6 +183,92 @@ func TestInput(t *testing.T) { } } +func TestInputWithMultipleEvents(t *testing.T) { + id := strconv.Itoa(rand.New(rand.NewSource(int64(time.Now().Nanosecond()))).Int()) + testTopic := fmt.Sprintf("Filebeat-TestInput-%s", id) + context := input.Context{ + Done: make(chan struct{}), + BeatDone: make(chan struct{}), + } + + // Send test messages to the topic for the input to read. + message := testMessage{ + message: "{\"records\": [{\"val\":\"val1\"}, {\"val\":\"val2\"}]}", + headers: []sarama.RecordHeader{ + recordHeader("X-Test-Header", "test header value"), + }, + } + writeToKafkaTopic(t, testTopic, message.message, message.headers, time.Second*20) + + // Setup the input config + config := common.MustNewConfigFrom(common.MapStr{ + "hosts": getTestKafkaHost(), + "topics": []string{testTopic}, + "group_id": "filebeat", + "wait_close": 0, + "expand_event_list_from_field": "records", + }) + + // Route input events through our capturer instead of sending through ES. + events := make(chan beat.Event, 100) + defer close(events) + capturer := NewEventCapturer(events) + defer capturer.Close() + connector := channel.ConnectorFunc(func(_ *common.Config, _ beat.ClientConfig) (channel.Outleter, error) { + return channel.SubOutlet(capturer), nil + }) + + input, err := NewInput(config, connector, context) + if err != nil { + t.Fatal(err) + } + + // Run the input and wait for finalization + input.Run() + + timeout := time.After(30 * time.Second) + select { + case event := <-events: + text, err := event.Fields.GetValue("message") + if err != nil { + t.Fatal(err) + } + msgs := []string{"{\"val\":\"val1\"}", "{\"val\":\"val2\"}"} + assert.Contains(t, msgs, text) + checkMatchingHeaders(t, event, message.headers) + case <-timeout: + t.Fatal("timeout waiting for incoming events") + } + + // Close the done channel and make sure the beat shuts down in a reasonable + // amount of time. + close(context.Done) + didClose := make(chan struct{}) + go func() { + input.Wait() + close(didClose) + }() + + select { + case <-time.After(30 * time.Second): + t.Fatal("timeout waiting for beat to shut down") + case <-didClose: + } +} + +func findMessage(t *testing.T, text string, msgs []testMessage) *testMessage { + var msg *testMessage + for _, m := range msgs { + if text == m.message { + msg = &m + break + } + } + + assert.NotNil(t, msg) + return msg +} + func checkMatchingHeaders( t *testing.T, event beat.Event, expected []sarama.RecordHeader, ) { diff --git a/vendor/github.com/elastic/beats/filebeat/input/log/harvester.go b/vendor/github.com/elastic/beats/filebeat/input/log/harvester.go index 8abe9fcf..fd5c8c8d 100644 --- a/vendor/github.com/elastic/beats/filebeat/input/log/harvester.go +++ b/vendor/github.com/elastic/beats/filebeat/input/log/harvester.go @@ -360,15 +360,21 @@ func (h *Harvester) onMessage( jsonFields = f.(common.MapStr) } + var meta common.MapStr timestamp := message.Ts - if h.config.JSON != nil && len(jsonFields) > 0 { - ts := readjson.MergeJSONFields(fields, jsonFields, &text, *h.config.JSON) + id, ts := readjson.MergeJSONFields(fields, jsonFields, &text, *h.config.JSON) if !ts.IsZero() { // there was a `@timestamp` key in the event, so overwrite // the resulting timestamp timestamp = ts } + + if id != "" { + meta = common.MapStr{ + "id": id, + } + } } else if &text != nil { if fields == nil { fields = common.MapStr{} @@ -379,6 +385,7 @@ func (h *Harvester) onMessage( err := forwarder.Send(beat.Event{ Timestamp: timestamp, Fields: fields, + Meta: meta, Private: state, }) return err == nil diff --git a/vendor/github.com/elastic/beats/filebeat/input/syslog/input.go b/vendor/github.com/elastic/beats/filebeat/input/syslog/input.go index 10365c6d..71efa8bc 100644 --- a/vendor/github.com/elastic/beats/filebeat/input/syslog/input.go +++ b/vendor/github.com/elastic/beats/filebeat/input/syslog/input.go @@ -128,24 +128,8 @@ func NewInput( forwarder := harvester.NewForwarder(out) cb := func(data []byte, metadata inputsource.NetworkMetadata) { - ev := newEvent() - Parse(data, ev) - if !ev.IsValid() { - log.Errorw("can't parse event as syslog rfc3164", "message", string(data)) - // On error revert to the raw bytes content, we need a better way to communicate this kind of - // error upstream this should be a global effort. - forwarder.Send(beat.Event{ - Timestamp: time.Now(), - Meta: common.MapStr{ - "truncated": metadata.Truncated, - }, - Fields: common.MapStr{ - "message": string(data), - }, - }) - } else { - forwarder.Send(createEvent(ev, metadata, time.Local, log)) - } + ev := parseAndCreateEvent(data, metadata, time.Local, log) + forwarder.Send(ev) } server, err := factory(cb, config.Protocol) @@ -201,11 +185,6 @@ func (p *Input) Wait() { func createEvent(ev *event, metadata inputsource.NetworkMetadata, timezone *time.Location, log *logp.Logger) beat.Event { f := common.MapStr{ "message": strings.TrimRight(ev.Message(), "\n"), - "log": common.MapStr{ - "source": common.MapStr{ - "address": metadata.RemoteAddr.String(), - }, - }, } syslog := common.MapStr{} @@ -254,13 +233,31 @@ func createEvent(ev *event, metadata inputsource.NetworkMetadata, timezone *time f["event.sequence"] = ev.Sequence() } - return beat.Event{ - Timestamp: ev.Timestamp(timezone), + return newBeatEvent(ev.Timestamp(timezone), metadata, f) +} + +func parseAndCreateEvent(data []byte, metadata inputsource.NetworkMetadata, timezone *time.Location, log *logp.Logger) beat.Event { + ev := newEvent() + Parse(data, ev) + if !ev.IsValid() { + log.Errorw("can't parse event as syslog rfc3164", "message", string(data)) + return newBeatEvent(time.Now(), metadata, common.MapStr{ + "message": string(data), + }) + } + return createEvent(ev, metadata, time.Local, log) +} + +func newBeatEvent(timestamp time.Time, metadata inputsource.NetworkMetadata, fields common.MapStr) beat.Event { + event := beat.Event{ + Timestamp: timestamp, Meta: common.MapStr{ "truncated": metadata.Truncated, }, - Fields: f, + Fields: fields, } + event.Fields.Put("log.source.address", metadata.RemoteAddr.String()) + return event } func mapValueToName(v int, m mapper) (string, error) { diff --git a/vendor/github.com/elastic/beats/filebeat/input/syslog/input_test.go b/vendor/github.com/elastic/beats/filebeat/input/syslog/input_test.go index 566a9404..f52d235e 100644 --- a/vendor/github.com/elastic/beats/filebeat/input/syslog/input_test.go +++ b/vendor/github.com/elastic/beats/filebeat/input/syslog/input_test.go @@ -193,6 +193,58 @@ func TestSequence(t *testing.T) { }) } +func TestParseAndCreateEvent(t *testing.T) { + cases := map[string]struct { + data []byte + expected common.MapStr + }{ + "valid data": { + data: []byte("<34>Oct 11 22:14:15 mymachine su[230]: 'su root' failed for lonvick on /dev/pts/8"), + expected: common.MapStr{ + "event": common.MapStr{"severity": 2}, + "hostname": "mymachine", + "log": common.MapStr{ + "source": common.MapStr{ + "address": "127.0.0.1", + }, + }, + "message": "'su root' failed for lonvick on /dev/pts/8", + "process": common.MapStr{"pid": 230, "program": "su"}, + "syslog": common.MapStr{ + "facility": 4, + "facility_label": "security/authorization", + "priority": 34, + "severity_label": "Critical", + }, + }, + }, + + "invalid data": { + data: []byte("invalid"), + expected: common.MapStr{ + "log": common.MapStr{ + "source": common.MapStr{ + "address": "127.0.0.1", + }, + }, + "message": "invalid", + }, + }, + } + + tz := time.Local + log := logp.NewLogger("syslog") + metadata := dummyMetadata() + + for title, c := range cases { + t.Run(title, func(t *testing.T) { + event := parseAndCreateEvent(c.data, metadata, tz, log) + assert.Equal(t, c.expected, event.Fields) + assert.Equal(t, metadata.Truncated, event.Meta["truncated"]) + }) + } +} + func dummyMetadata() inputsource.NetworkMetadata { ip := "127.0.0.1" parsedIP := net.ParseIP(ip) diff --git a/vendor/github.com/elastic/beats/filebeat/module/apache/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/filebeat/module/apache/_meta/docs.asciidoc index ef2f4196..3cfb71c1 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/apache/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/module/apache/_meta/docs.asciidoc @@ -68,6 +68,19 @@ include::../include/var-paths.asciidoc[] include::../include/timezone-support.asciidoc[] +[float] +=== Virtual Host + +See customlog documentation https://httpd.apache.org/docs/2.4/en/mod/mod_log_config.html +Add %v config in httpd.conf in log section +["source","sh",subs="attributes"] +----- + # Replace + LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined + # By + LogFormat "%v %h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined +----- + :has-dashboards!: :fileset_ex!: diff --git a/vendor/github.com/elastic/beats/filebeat/module/apache/access/ingest/default.json b/vendor/github.com/elastic/beats/filebeat/module/apache/access/ingest/default.json index 5a0b48ca..6edd5b02 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/apache/access/ingest/default.json +++ b/vendor/github.com/elastic/beats/filebeat/module/apache/access/ingest/default.json @@ -1,99 +1,100 @@ { - "description": "Pipeline for parsing Apache HTTP Server access logs. Requires the geoip and user_agent plugins.", - "processors": [ - { - "grok": { - "field": "message", - "patterns": [ - "%{IPORHOST:source.address} - %{DATA:user.name} \\[%{HTTPDATE:apache.access.time}\\] \"(?:%{WORD:http.request.method} %{DATA:url.original} HTTP/%{NUMBER:http.version}|-)?\" %{NUMBER:http.response.status_code:long} (?:%{NUMBER:http.response.body.bytes:long}|-)( \"%{DATA:http.request.referrer}\")?( \"%{DATA:user_agent.original}\")?", - "%{IPORHOST:source.address} - %{DATA:user.name} \\[%{HTTPDATE:apache.access.time}\\] \"-\" %{NUMBER:http.response.status_code:long} -", - "\\[%{HTTPDATE:apache.access.time}\\] %{IPORHOST:source.address} %{DATA:apache.access.ssl.protocol} %{DATA:apache.access.ssl.cipher} \"%{WORD:http.request.method} %{DATA:url.original} HTTP/%{NUMBER:http.version}\" %{NUMBER:http.response.body.bytes:long}" - ], - "ignore_missing": true - } - }, - { - "remove": { - "field": "message" - } - }, - { - "grok": { - "field": "source.address", - "ignore_missing": true, - "patterns": [ - "^(%{IP:source.ip}|%{HOSTNAME:source.domain})$" - ] - } - }, - { - "rename": { - "field": "@timestamp", - "target_field": "event.created" - } - }, - { - "date": { - "field": "apache.access.time", - "target_field": "@timestamp", - "formats": [ - "dd/MMM/yyyy:H:m:s Z" - ], - "ignore_failure": true - } - }, - { - "remove": { - "field": "apache.access.time", - "ignore_failure": true - } - }, - { - "user_agent": { - "field": "user_agent.original", - "ignore_failure": true - } - }, - { - "geoip": { - "field": "source.ip", - "target_field": "source.geo", - "ignore_missing": true - } - }, - { - "geoip": { - "database_file": "GeoLite2-ASN.mmdb", - "field": "source.ip", - "target_field": "source.as", - "properties": [ - "asn", - "organization_name" - ], - "ignore_missing": true - } - }, - { - "rename": { - "field": "source.as.asn", - "target_field": "source.as.number", - "ignore_missing": true - } - }, - { - "rename": { - "field": "source.as.organization_name", - "target_field": "source.as.organization.name", - "ignore_missing": true - } - } - ], - "on_failure": [ - { - "set": { - "field": "error.message", - "value": "{{ _ingest.on_failure_message }}" - } - } - ] + "description": "Pipeline for parsing Apache HTTP Server access logs. Requires the geoip and user_agent plugins.", + "processors": [ + { + "grok": { + "field": "message", + "patterns": [ + "%{IPORHOST:destination.domain} %{IPORHOST:source.ip} - %{DATA:user.name} \\[%{HTTPDATE:apache.access.time}\\] \"(?:%{WORD:http.request.method} %{DATA:url.original} HTTP/%{NUMBER:http.version}|-)?\" %{NUMBER:http.response.status_code:long} (?:%{NUMBER:http.response.body.bytes:long}|-)( \"%{DATA:http.request.referrer}\")?( \"%{DATA:user_agent.original}\")?", + "%{IPORHOST:source.address} - %{DATA:user.name} \\[%{HTTPDATE:apache.access.time}\\] \"(?:%{WORD:http.request.method} %{DATA:url.original} HTTP/%{NUMBER:http.version}|-)?\" %{NUMBER:http.response.status_code:long} (?:%{NUMBER:http.response.body.bytes:long}|-)( \"%{DATA:http.request.referrer}\")?( \"%{DATA:user_agent.original}\")?", + "%{IPORHOST:source.address} - %{DATA:user.name} \\[%{HTTPDATE:apache.access.time}\\] \"-\" %{NUMBER:http.response.status_code:long} -", + "\\[%{HTTPDATE:apache.access.time}\\] %{IPORHOST:source.address} %{DATA:apache.access.ssl.protocol} %{DATA:apache.access.ssl.cipher} \"%{WORD:http.request.method} %{DATA:url.original} HTTP/%{NUMBER:http.version}\" (-|%{NUMBER:http.response.body.bytes:long})" + ], + "ignore_missing": true + } + }, + { + "remove": { + "field": "message" + } + }, + { + "grok": { + "field": "source.address", + "ignore_missing": true, + "patterns": [ + "^(%{IP:source.ip}|%{HOSTNAME:source.domain})$" + ] + } + }, + { + "rename": { + "field": "@timestamp", + "target_field": "event.created" + } + }, + { + "date": { + "field": "apache.access.time", + "target_field": "@timestamp", + "formats": [ + "dd/MMM/yyyy:H:m:s Z" + ], + "ignore_failure": true + } + }, + { + "remove": { + "field": "apache.access.time", + "ignore_failure": true + } + }, + { + "user_agent": { + "field": "user_agent.original", + "ignore_failure": true + } + }, + { + "geoip": { + "field": "source.ip", + "target_field": "source.geo", + "ignore_missing": true + } + }, + { + "geoip": { + "database_file": "GeoLite2-ASN.mmdb", + "field": "source.ip", + "target_field": "source.as", + "properties": [ + "asn", + "organization_name" + ], + "ignore_missing": true + } + }, + { + "rename": { + "field": "source.as.asn", + "target_field": "source.as.number", + "ignore_missing": true + } + }, + { + "rename": { + "field": "source.as.organization_name", + "target_field": "source.as.organization.name", + "ignore_missing": true + } + } + ], + "on_failure": [ + { + "set": { + "field": "error.message", + "value": "{{ _ingest.on_failure_message }}" + } + } + ] } diff --git a/vendor/github.com/elastic/beats/filebeat/module/apache/access/test/ssl-request.log b/vendor/github.com/elastic/beats/filebeat/module/apache/access/test/ssl-request.log index 5bc28b27..5b65e323 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/apache/access/test/ssl-request.log +++ b/vendor/github.com/elastic/beats/filebeat/module/apache/access/test/ssl-request.log @@ -1 +1,2 @@ [10/Aug/2018:09:45:56 +0200] 172.30.0.119 TLSv1.2 ECDHE-RSA-AES128-GCM-SHA256 "GET /nagiosxi/ajaxhelper.php?cmd=getxicoreajax&opts=%7B%22func%22%3A%22get_admin_tasks_html%22%2C%22args%22%3A%22%22%7D&nsp=b5c7d5d4b6f7d0cf0c92f9cbdf737f6a5c838218425e6ae21 HTTP/1.1" 1375 +[16/Oct/2019:11:53:47 +0200] 11.19.0.217 TLSv1.2 ECDHE-RSA-AES128-GCM-SHA256 "GET /appl/ajaxhelper.php?cmd=getxicoreajax&opts=%7B%22func%22%3A%22get_pagetop_alert_content_html%22%2C%22args%22%3A%22%22%7D&nsp=c2700eab9797eda8a9f65a3ab17a6adbceccd60a6cca7708650a5923950d HTTP/1.1" - diff --git a/vendor/github.com/elastic/beats/filebeat/module/apache/access/test/ssl-request.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/apache/access/test/ssl-request.log-expected.json index 17279c34..8d2749b2 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/apache/access/test/ssl-request.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/apache/access/test/ssl-request.log-expected.json @@ -15,5 +15,25 @@ "source.address": "172.30.0.119", "source.ip": "172.30.0.119", "url.original": "/nagiosxi/ajaxhelper.php?cmd=getxicoreajax&opts=%7B%22func%22%3A%22get_admin_tasks_html%22%2C%22args%22%3A%22%22%7D&nsp=b5c7d5d4b6f7d0cf0c92f9cbdf737f6a5c838218425e6ae21" + }, + { + "@timestamp": "2019-10-16T09:53:47.000Z", + "apache.access.ssl.cipher": "ECDHE-RSA-AES128-GCM-SHA256", + "apache.access.ssl.protocol": "TLSv1.2", + "event.dataset": "apache.access", + "event.module": "apache", + "fileset.name": "access", + "http.request.method": "GET", + "http.version": "1.1", + "input.type": "log", + "log.offset": 276, + "service.type": "apache", + "source.address": "11.19.0.217", + "source.geo.continent_name": "North America", + "source.geo.country_iso_code": "US", + "source.geo.location.lat": 37.751, + "source.geo.location.lon": -97.822, + "source.ip": "11.19.0.217", + "url.original": "/appl/ajaxhelper.php?cmd=getxicoreajax&opts=%7B%22func%22%3A%22get_pagetop_alert_content_html%22%2C%22args%22%3A%22%22%7D&nsp=c2700eab9797eda8a9f65a3ab17a6adbceccd60a6cca7708650a5923950d" } ] \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/apache/access/test/test-vhost.log b/vendor/github.com/elastic/beats/filebeat/module/apache/access/test/test-vhost.log new file mode 100644 index 00000000..64a432e4 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/apache/access/test/test-vhost.log @@ -0,0 +1 @@ +vhost1.domaine.fr 192.168.33.2 - - [26/Dec/2016:16:22:14 +0000] "GET /hello HTTP/1.1" 404 499 "-" "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:50.0) Gecko/20100101 Firefox/50.0" diff --git a/vendor/github.com/elastic/beats/filebeat/module/apache/access/test/test-vhost.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/apache/access/test/test-vhost.log-expected.json new file mode 100644 index 00000000..5d8d41a8 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/apache/access/test/test-vhost.log-expected.json @@ -0,0 +1,27 @@ +[ + { + "@timestamp": "2016-12-26T16:22:14.000Z", + "destination.domain": "vhost1.domaine.fr", + "event.dataset": "apache.access", + "event.module": "apache", + "fileset.name": "access", + "http.request.method": "GET", + "http.request.referrer": "-", + "http.response.body.bytes": 499, + "http.response.status_code": 404, + "http.version": "1.1", + "input.type": "log", + "log.offset": 0, + "service.type": "apache", + "source.ip": "192.168.33.2", + "url.original": "/hello", + "user.name": "-", + "user_agent.device.name": "Other", + "user_agent.name": "Firefox", + "user_agent.original": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:50.0) Gecko/20100101 Firefox/50.0", + "user_agent.os.full": "Mac OS X 10.12", + "user_agent.os.name": "Mac OS X", + "user_agent.os.version": "10.12", + "user_agent.version": "50.0" + } +] \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/apache/error/ingest/pipeline.json b/vendor/github.com/elastic/beats/filebeat/module/apache/error/ingest/pipeline.json index 8a126878..89a6624a 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/apache/error/ingest/pipeline.json +++ b/vendor/github.com/elastic/beats/filebeat/module/apache/error/ingest/pipeline.json @@ -16,13 +16,21 @@ }, { "date": { + "if": "ctx.event.timezone == null", "field": "apache.error.timestamp", "target_field": "@timestamp", "formats": [ "EEE MMM dd H:m:s yyyy", "EEE MMM dd H:m:s.SSSSSS yyyy" ], - "ignore_failure": true + "on_failure": [ + { + "append": { + "field": "error.message", + "value": "{{ _ingest.on_failure_message }}" + } + } + ] } }, { diff --git a/vendor/github.com/elastic/beats/filebeat/module/apache/error/test/darwin-2.4.23.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/apache/error/test/darwin-2.4.23.log-expected.json index 3098d1e6..004e23a4 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/apache/error/test/darwin-2.4.23.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/apache/error/test/darwin-2.4.23.log-expected.json @@ -1,10 +1,10 @@ [ { - "@timestamp": "2016-12-26T16:15:55.103Z", + "@timestamp": "2016-12-26T16:15:55.103-02:00", "apache.error.module": "mpm_prefork", "event.dataset": "apache.error", "event.module": "apache", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "notice", @@ -14,11 +14,11 @@ "service.type": "apache" }, { - "@timestamp": "2016-12-26T16:15:55.103Z", + "@timestamp": "2016-12-26T16:15:55.103-02:00", "apache.error.module": "core", "event.dataset": "apache.error", "event.module": "apache", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "notice", diff --git a/vendor/github.com/elastic/beats/filebeat/module/apache/error/test/test.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/apache/error/test/test.log-expected.json index 598ec3b9..c7b88c46 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/apache/error/test/test.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/apache/error/test/test.log-expected.json @@ -1,9 +1,9 @@ [ { - "@timestamp": "2016-12-26T16:22:08.000Z", + "@timestamp": "2016-12-26T16:22:08.000-02:00", "event.dataset": "apache.error", "event.module": "apache", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "error", @@ -14,11 +14,11 @@ "source.ip": "192.168.33.1" }, { - "@timestamp": "2016-12-26T16:15:55.103Z", + "@timestamp": "2016-12-26T16:15:55.103-02:00", "apache.error.module": "core", "event.dataset": "apache.error", "event.module": "apache", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "notice", @@ -28,11 +28,11 @@ "service.type": "apache" }, { - "@timestamp": "2011-09-09T10:42:29.902Z", + "@timestamp": "2011-09-09T10:42:29.902-02:00", "apache.error.module": "core", "event.dataset": "apache.error", "event.module": "apache", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "error", @@ -54,11 +54,11 @@ "source.ip": "72.15.99.187" }, { - "@timestamp": "2019-06-27T06:58:09.169Z", + "@timestamp": "2019-06-27T06:58:09.169-02:00", "apache.error.module": "include", "event.dataset": "apache.error", "event.module": "apache", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "warn", diff --git a/vendor/github.com/elastic/beats/filebeat/module/apache/error/test/ubuntu-2.2.22.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/apache/error/test/ubuntu-2.2.22.log-expected.json index 70761f7c..6133c89a 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/apache/error/test/ubuntu-2.2.22.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/apache/error/test/ubuntu-2.2.22.log-expected.json @@ -1,9 +1,9 @@ [ { - "@timestamp": "2016-12-26T16:17:53.000Z", + "@timestamp": "2016-12-26T16:17:53.000-02:00", "event.dataset": "apache.error", "event.module": "apache", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "notice", @@ -12,10 +12,10 @@ "service.type": "apache" }, { - "@timestamp": "2016-12-26T16:22:00.000Z", + "@timestamp": "2016-12-26T16:22:00.000-02:00", "event.dataset": "apache.error", "event.module": "apache", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "error", @@ -26,10 +26,10 @@ "source.ip": "192.168.33.1" }, { - "@timestamp": "2016-12-26T16:22:08.000Z", + "@timestamp": "2016-12-26T16:22:08.000-02:00", "event.dataset": "apache.error", "event.module": "apache", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "error", @@ -40,10 +40,10 @@ "source.ip": "192.168.33.1" }, { - "@timestamp": "2016-12-26T16:22:08.000Z", + "@timestamp": "2016-12-26T16:22:08.000-02:00", "event.dataset": "apache.error", "event.module": "apache", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "error", @@ -54,10 +54,10 @@ "source.ip": "192.168.33.1" }, { - "@timestamp": "2016-12-26T16:22:10.000Z", + "@timestamp": "2016-12-26T16:22:10.000-02:00", "event.dataset": "apache.error", "event.module": "apache", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "error", @@ -68,10 +68,10 @@ "source.ip": "192.168.33.1" }, { - "@timestamp": "2016-12-26T16:22:13.000Z", + "@timestamp": "2016-12-26T16:22:13.000-02:00", "event.dataset": "apache.error", "event.module": "apache", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "error", @@ -82,10 +82,10 @@ "source.ip": "192.168.33.1" }, { - "@timestamp": "2016-12-26T16:22:17.000Z", + "@timestamp": "2016-12-26T16:22:17.000-02:00", "event.dataset": "apache.error", "event.module": "apache", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "error", diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/audit/config/audit.yml b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/audit/config/audit.yml index d96242ac..e8c035e3 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/audit/config/audit.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/audit/config/audit.yml @@ -6,4 +6,5 @@ paths: exclude_files: [".gz$"] processors: -- add_locale: ~ +# Locale for timezone is only needed in non-json logs +- add_locale.when.not.regexp.message: "^{" diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/audit/ingest/pipeline-plaintext.json b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/audit/ingest/pipeline-plaintext.json index 448028cf..345df18b 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/audit/ingest/pipeline-plaintext.json +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/audit/ingest/pipeline-plaintext.json @@ -54,12 +54,13 @@ }, { "date": { + "if": "ctx.event.timezone == null", "field": "elasticsearch.audit.@timestamp", "target_field": "@timestamp", "formats": [ "yyyy-MM-dd'T'HH:mm:ss,SSS" ], - "ignore_failure": true + "on_failure": [{"append": {"field": "error.message", "value": "{{ _ingest.on_failure_message }}"}}] } }, { diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/audit/test/test-access.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/audit/test/test-access.log-expected.json index e24d72d1..63674428 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/audit/test/test-access.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/audit/test/test-access.log-expected.json @@ -1,11 +1,11 @@ [ { - "@timestamp": "2018-06-19T05:16:15.549Z", + "@timestamp": "2018-06-19T05:16:15.549-02:00", "elasticsearch.audit.event_type": "authentication_failed", "elasticsearch.audit.layer": "rest", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "audit", "input.type": "log", "log.offset": 0, @@ -16,13 +16,13 @@ "user.name": "i030648" }, { - "@timestamp": "2018-06-19T05:07:52.304Z", + "@timestamp": "2018-06-19T05:07:52.304-02:00", "elasticsearch.audit.event_type": "authentication_failed", "elasticsearch.audit.layer": "rest", "elasticsearch.node.name": "v_VJhjV", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "audit", "input.type": "log", "log.offset": 155, @@ -33,7 +33,7 @@ "user.name": "rado" }, { - "@timestamp": "2018-06-19T05:00:15.778Z", + "@timestamp": "2018-06-19T05:00:15.778-02:00", "elasticsearch.audit.action": "indices:data/read/scroll/clear", "elasticsearch.audit.event_type": "access_granted", "elasticsearch.audit.layer": "transport", @@ -41,7 +41,7 @@ "elasticsearch.audit.request.name": "ClearScrollRequest", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "audit", "input.type": "log", "log.offset": 306, @@ -51,13 +51,13 @@ "user.name": "_xpack_security" }, { - "@timestamp": "2018-06-19T05:07:45.544Z", + "@timestamp": "2018-06-19T05:07:45.544-02:00", "elasticsearch.audit.event_type": "anonymous_access_denied", "elasticsearch.audit.layer": "rest", "elasticsearch.node.name": "v_VJhjV", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "audit", "input.type": "log", "log.offset": 519, @@ -67,12 +67,12 @@ "url.original": "/_xpack/security/_authenticate" }, { - "@timestamp": "2018-06-19T05:26:27.268Z", + "@timestamp": "2018-06-19T05:26:27.268-02:00", "elasticsearch.audit.event_type": "authentication_failed", "elasticsearch.audit.layer": "rest", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "audit", "input.type": "log", "log.offset": 654, @@ -83,7 +83,7 @@ "user.name": "N078801" }, { - "@timestamp": "2018-06-19T05:55:26.898Z", + "@timestamp": "2018-06-19T05:55:26.898-02:00", "elasticsearch.audit.action": "cluster:monitor/main", "elasticsearch.audit.event_type": "access_denied", "elasticsearch.audit.layer": "transport", @@ -91,7 +91,7 @@ "elasticsearch.audit.request.name": "MainRequest", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "audit", "input.type": "log", "log.offset": 802, @@ -101,13 +101,13 @@ "user.name": "_anonymous" }, { - "@timestamp": "2018-06-19T05:24:15.190Z", + "@timestamp": "2018-06-19T05:24:15.190-02:00", "elasticsearch.audit.event_type": "authentication_failed", "elasticsearch.audit.layer": "rest", "elasticsearch.node.name": "v_VJhjV", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "audit", "http.request.body.content": "body", "input.type": "log", @@ -119,7 +119,7 @@ "user.name": "elastic" }, { - "@timestamp": "2019-01-08T14:15:02.011Z", + "@timestamp": "2019-01-08T14:15:02.011-02:00", "elasticsearch.audit.action": "indices:data/read/search[free_context]", "elasticsearch.audit.event_type": "access_granted", "elasticsearch.audit.indices": [ @@ -142,7 +142,7 @@ "elasticsearch.node.name": "NodeName-0", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "audit", "input.type": "log", "log.offset": 1210, @@ -152,7 +152,7 @@ "user.name": "username" }, { - "@timestamp": "2019-01-27T20:04:27.244Z", + "@timestamp": "2019-01-27T20:04:27.244-02:00", "elasticsearch.audit.event_type": "authentication_success", "elasticsearch.audit.layer": "rest", "elasticsearch.audit.realm": "default_file", @@ -160,7 +160,7 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "audit", "http.request.body.content": "{\"metadata\":{\"intelligence\":7},\"full_name\":\"Jack Nicholson\",\"roles\":[\"admin\",\"other_role1\"", "input.type": "log", diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/audit/test/test-audit-730.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/audit/test/test-audit-730.log-expected.json index 3cbadf62..5c1518d7 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/audit/test/test-audit-730.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/audit/test/test-audit-730.log-expected.json @@ -18,7 +18,6 @@ "event.action": "access_granted", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "audit", "input.type": "log", "log.offset": 0, @@ -48,7 +47,6 @@ "event.action": "access_granted", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "audit", "input.type": "log", "log.offset": 423, @@ -78,7 +76,6 @@ "event.action": "access_granted", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "audit", "input.type": "log", "log.offset": 846, @@ -107,7 +104,6 @@ "event.action": "access_granted", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "audit", "input.type": "log", "log.offset": 1269, @@ -136,7 +132,6 @@ "event.action": "access_granted", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "audit", "input.type": "log", "log.offset": 1706, @@ -162,7 +157,6 @@ "event.action": "access_granted", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "audit", "input.type": "log", "log.offset": 2170, @@ -188,7 +182,6 @@ "event.action": "access_granted", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "audit", "input.type": "log", "log.offset": 2576, @@ -217,7 +210,6 @@ "event.action": "access_granted", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "audit", "input.type": "log", "log.offset": 2984, @@ -246,7 +238,6 @@ "event.action": "access_granted", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "audit", "input.type": "log", "log.offset": 3402, @@ -272,7 +263,6 @@ "event.action": "access_granted", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "audit", "input.type": "log", "log.offset": 3823, diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/audit/test/test-audit-docker.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/audit/test/test-audit-docker.log-expected.json index 0afc7c8e..a2da63f6 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/audit/test/test-audit-docker.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/audit/test/test-audit-docker.log-expected.json @@ -8,7 +8,6 @@ "event.action": "anonymous_access_denied", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "audit", "http.request.method": "GET", "input.type": "log", @@ -29,7 +28,6 @@ "event.action": "authentication_failed", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "audit", "http.request.method": "GET", "input.type": "log", diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/audit/test/test-audit.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/audit/test/test-audit.log-expected.json index 4dd2cf62..4155cfd8 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/audit/test/test-audit.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/audit/test/test-audit.log-expected.json @@ -7,7 +7,6 @@ "event.action": "authentication_failed", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "audit", "input.type": "log", "log.offset": 0, @@ -27,7 +26,6 @@ "event.action": "authentication_failed", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "audit", "input.type": "log", "log.offset": 274, @@ -53,7 +51,6 @@ "event.action": "access_granted", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "audit", "input.type": "log", "log.offset": 558, @@ -78,7 +75,6 @@ "event.action": "access_granted", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "audit", "input.type": "log", "log.offset": 941, @@ -103,7 +99,6 @@ "event.action": "access_granted", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "audit", "input.type": "log", "log.offset": 1309, @@ -131,7 +126,6 @@ "event.action": "access_granted", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "audit", "input.type": "log", "log.offset": 1676, @@ -153,7 +147,6 @@ "event.action": "authentication_success", "event.dataset": "elasticsearch.audit", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "audit", "http.request.body.content": "\n{\n \"query\" : {\n \"term\" : { \"user\" : \"kimchy\" }\n }\n}\n", "http.request.method": "GET", diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/deprecation/config/log.yml b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/deprecation/config/log.yml index fa541cde..14cdbbe9 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/deprecation/config/log.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/deprecation/config/log.yml @@ -10,4 +10,5 @@ multiline: match: after processors: -- add_locale: ~ +# Locale for timezone is only needed in non-json logs +- add_locale.when.not.regexp.message: "^{" diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/deprecation/ingest/pipeline-plaintext.json b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/deprecation/ingest/pipeline-plaintext.json index b3d95fad..d9c4faad 100755 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/deprecation/ingest/pipeline-plaintext.json +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/deprecation/ingest/pipeline-plaintext.json @@ -22,12 +22,13 @@ }, { "date": { + "if": "ctx.event.timezone == null", "field": "elasticsearch.deprecation.timestamp", "target_field": "@timestamp", "formats": [ "yyyy-MM-dd'T'HH:mm:ss,SSS" ], - "ignore_failure": true + "on_failure": [{"append": {"field": "error.message", "value": "{{ _ingest.on_failure_message }}"}}] } }, { diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/deprecation/test/elasticsearch_deprecation.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/deprecation/test/elasticsearch_deprecation.log-expected.json index ce4a3133..a1c8699c 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/deprecation/test/elasticsearch_deprecation.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/deprecation/test/elasticsearch_deprecation.log-expected.json @@ -1,10 +1,10 @@ [ { - "@timestamp": "2018-04-23T16:40:13.737Z", + "@timestamp": "2018-04-23T16:40:13.737-02:00", "elasticsearch.component": "o.e.d.a.a.i.t.p.PutIndexTemplateRequest", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -13,11 +13,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-04-23T16:40:13.862Z", + "@timestamp": "2018-04-23T16:40:13.862-02:00", "elasticsearch.component": "o.e.d.a.a.i.t.p.PutIndexTemplateRequest", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -26,11 +26,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-04-23T16:40:14.792Z", + "@timestamp": "2018-04-23T16:40:14.792-02:00", "elasticsearch.component": "o.e.d.a.a.i.t.p.PutIndexTemplateRequest", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -39,11 +39,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-04-23T16:40:15.127Z", + "@timestamp": "2018-04-23T16:40:15.127-02:00", "elasticsearch.component": "o.e.d.a.a.i.t.p.PutIndexTemplateRequest", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/deprecation/test/other_elasticsearch_deprecation.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/deprecation/test/other_elasticsearch_deprecation.log-expected.json index 9b09a117..79e81424 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/deprecation/test/other_elasticsearch_deprecation.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/deprecation/test/other_elasticsearch_deprecation.log-expected.json @@ -1,10 +1,10 @@ [ { - "@timestamp": "2017-11-30T13:38:16.911Z", + "@timestamp": "2017-11-30T13:38:16.911-02:00", "elasticsearch.component": "o.e.d.c.ParseField", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -13,11 +13,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2017-11-30T13:38:16.941Z", + "@timestamp": "2017-11-30T13:38:16.941-02:00", "elasticsearch.component": "o.e.d.c.ParseField", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -26,11 +26,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2017-11-30T13:39:28.986Z", + "@timestamp": "2017-11-30T13:39:28.986-02:00", "elasticsearch.component": "o.e.d.i.m.UidFieldMapper", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -39,11 +39,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2017-11-30T13:39:36.339Z", + "@timestamp": "2017-11-30T13:39:36.339-02:00", "elasticsearch.component": "o.e.d.i.m.UidFieldMapper", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -52,11 +52,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2017-11-30T13:40:49.540Z", + "@timestamp": "2017-11-30T13:40:49.540-02:00", "elasticsearch.component": "o.e.d.i.m.UidFieldMapper", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -65,11 +65,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2017-11-30T14:08:37.413Z", + "@timestamp": "2017-11-30T14:08:37.413-02:00", "elasticsearch.component": "o.e.d.i.m.UidFieldMapper", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -78,11 +78,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2017-11-30T14:08:37.413Z", + "@timestamp": "2017-11-30T14:08:37.413-02:00", "elasticsearch.component": "o.e.d.i.m.UidFieldMapper", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -91,11 +91,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2017-11-30T14:08:46.006Z", + "@timestamp": "2017-11-30T14:08:46.006-02:00", "elasticsearch.component": "o.e.d.i.m.UidFieldMapper", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -104,11 +104,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2017-11-30T14:08:46.006Z", + "@timestamp": "2017-11-30T14:08:46.006-02:00", "elasticsearch.component": "o.e.d.i.m.UidFieldMapper", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -117,11 +117,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2017-12-01T14:05:54.017Z", + "@timestamp": "2017-12-01T14:05:54.017-02:00", "elasticsearch.component": "o.e.d.i.m.AllFieldMapper", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -130,11 +130,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2017-12-01T14:05:54.019Z", + "@timestamp": "2017-12-01T14:05:54.019-02:00", "elasticsearch.component": "o.e.d.i.m.AllFieldMapper", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -143,11 +143,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2017-12-01T14:06:52.059Z", + "@timestamp": "2017-12-01T14:06:52.059-02:00", "elasticsearch.component": "o.e.d.i.m.AllFieldMapper", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -156,11 +156,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2017-12-01T14:46:10.428Z", + "@timestamp": "2017-12-01T14:46:10.428-02:00", "elasticsearch.component": "o.e.d.s.a.InternalOrder$Parser", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -169,11 +169,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2017-12-04T16:17:18.271Z", + "@timestamp": "2017-12-04T16:17:18.271-02:00", "elasticsearch.component": "o.e.d.a.a.i.t.p.PutIndexTemplateRequest", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -182,11 +182,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2017-12-04T16:17:18.282Z", + "@timestamp": "2017-12-04T16:17:18.282-02:00", "elasticsearch.component": "o.e.d.i.m.MapperService", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -195,11 +195,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2017-12-04T16:20:43.248Z", + "@timestamp": "2017-12-04T16:20:43.248-02:00", "elasticsearch.component": "o.e.d.i.m.MapperService", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/deprecation/test/test-json.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/deprecation/test/test-json.log-expected.json index 487c5f83..6f6b1731 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/deprecation/test/test-json.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/deprecation/test/test-json.log-expected.json @@ -8,7 +8,6 @@ "elasticsearch.node.name": "es1_1", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -25,7 +24,6 @@ "elasticsearch.node.name": "es1_1", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -42,7 +40,6 @@ "elasticsearch.node.name": "es1_1", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -59,7 +56,6 @@ "elasticsearch.node.name": "es1_1", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -76,7 +72,6 @@ "elasticsearch.node.name": "es1_1", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -93,7 +88,6 @@ "elasticsearch.node.name": "es1_1", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -110,7 +104,6 @@ "elasticsearch.node.name": "es1_1", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -127,7 +120,6 @@ "elasticsearch.node.name": "es1_1", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -144,7 +136,6 @@ "elasticsearch.node.name": "es1_1", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -161,7 +152,6 @@ "elasticsearch.node.name": "es1_1", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -178,7 +168,6 @@ "elasticsearch.node.name": "es1_1", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -195,7 +184,6 @@ "elasticsearch.node.name": "es1_1", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", @@ -212,7 +200,6 @@ "elasticsearch.node.name": "es1_1", "event.dataset": "elasticsearch.deprecation", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "deprecation", "input.type": "log", "log.level": "WARN", diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/server/config/log.yml b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/server/config/log.yml index 37c2e585..7d7e969a 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/server/config/log.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/server/config/log.yml @@ -10,5 +10,5 @@ multiline: match: after processors: -- add_locale: ~ - +# Locale for timezone is only needed in non-json logs +- add_locale.when.not.regexp.message: "^{" diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/server/ingest/pipeline-plaintext.json b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/server/ingest/pipeline-plaintext.json index 0c9779ac..b1752133 100755 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/server/ingest/pipeline-plaintext.json +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/server/ingest/pipeline-plaintext.json @@ -28,12 +28,13 @@ }, { "date": { + "if": "ctx.event.timezone == null", "field": "elasticsearch.server.timestamp", "target_field": "@timestamp", "formats": [ "yyyy-MM-dd'T'HH:mm:ss,SSS" ], - "ignore_failure": true + "on_failure": [{"append": {"field": "error.message", "value": "{{ _ingest.on_failure_message }}"}}] } }, { diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/server/test/elasticsearch.624.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/server/test/elasticsearch.624.log-expected.json index 934ccd6d..436b0229 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/server/test/elasticsearch.624.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/server/test/elasticsearch.624.log-expected.json @@ -1,11 +1,11 @@ [ { - "@timestamp": "2018-05-17T08:19:35.939Z", + "@timestamp": "2018-05-17T08:19:35.939-02:00", "elasticsearch.component": "o.e.n.Node", "elasticsearch.node.name": "", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -14,12 +14,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:36.089Z", + "@timestamp": "2018-05-17T08:19:36.089-02:00", "elasticsearch.component": "o.e.e.NodeEnvironment", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -28,12 +28,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:36.090Z", + "@timestamp": "2018-05-17T08:19:36.090-02:00", "elasticsearch.component": "o.e.e.NodeEnvironment", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -42,11 +42,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:36.116Z", + "@timestamp": "2018-05-17T08:19:36.116-02:00", "elasticsearch.component": "o.e.n.Node", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -55,11 +55,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:36.117Z", + "@timestamp": "2018-05-17T08:19:36.117-02:00", "elasticsearch.component": "o.e.n.Node", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -68,11 +68,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:36.117Z", + "@timestamp": "2018-05-17T08:19:36.117-02:00", "elasticsearch.component": "o.e.n.Node", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -81,12 +81,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:37.563Z", + "@timestamp": "2018-05-17T08:19:37.563-02:00", "elasticsearch.component": "o.e.p.PluginsService", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -95,12 +95,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:37.564Z", + "@timestamp": "2018-05-17T08:19:37.564-02:00", "elasticsearch.component": "o.e.p.PluginsService", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -109,12 +109,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:37.564Z", + "@timestamp": "2018-05-17T08:19:37.564-02:00", "elasticsearch.component": "o.e.p.PluginsService", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -123,12 +123,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:37.564Z", + "@timestamp": "2018-05-17T08:19:37.564-02:00", "elasticsearch.component": "o.e.p.PluginsService", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -137,12 +137,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:37.564Z", + "@timestamp": "2018-05-17T08:19:37.564-02:00", "elasticsearch.component": "o.e.p.PluginsService", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -151,12 +151,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:37.564Z", + "@timestamp": "2018-05-17T08:19:37.564-02:00", "elasticsearch.component": "o.e.p.PluginsService", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -165,12 +165,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:37.565Z", + "@timestamp": "2018-05-17T08:19:37.565-02:00", "elasticsearch.component": "o.e.p.PluginsService", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -179,12 +179,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:37.565Z", + "@timestamp": "2018-05-17T08:19:37.565-02:00", "elasticsearch.component": "o.e.p.PluginsService", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -193,12 +193,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:37.565Z", + "@timestamp": "2018-05-17T08:19:37.565-02:00", "elasticsearch.component": "o.e.p.PluginsService", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -207,12 +207,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:37.565Z", + "@timestamp": "2018-05-17T08:19:37.565-02:00", "elasticsearch.component": "o.e.p.PluginsService", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -221,12 +221,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:37.566Z", + "@timestamp": "2018-05-17T08:19:37.566-02:00", "elasticsearch.component": "o.e.p.PluginsService", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -235,12 +235,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:37.566Z", + "@timestamp": "2018-05-17T08:19:37.566-02:00", "elasticsearch.component": "o.e.p.PluginsService", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -249,12 +249,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:37.566Z", + "@timestamp": "2018-05-17T08:19:37.566-02:00", "elasticsearch.component": "o.e.p.PluginsService", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -263,12 +263,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:37.566Z", + "@timestamp": "2018-05-17T08:19:37.566-02:00", "elasticsearch.component": "o.e.p.PluginsService", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -277,12 +277,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:37.567Z", + "@timestamp": "2018-05-17T08:19:37.567-02:00", "elasticsearch.component": "o.e.p.PluginsService", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -291,12 +291,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:43.741Z", + "@timestamp": "2018-05-17T08:19:43.741-02:00", "elasticsearch.component": "o.e.d.DiscoveryModule", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -305,11 +305,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:45.090Z", + "@timestamp": "2018-05-17T08:19:45.090-02:00", "elasticsearch.component": "o.e.n.Node", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -318,12 +318,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:45.090Z", + "@timestamp": "2018-05-17T08:19:45.090-02:00", "elasticsearch.component": "o.e.n.Node", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -332,12 +332,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:45.482Z", + "@timestamp": "2018-05-17T08:19:45.482-02:00", "elasticsearch.component": "o.e.t.TransportService", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -346,12 +346,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:48.816Z", + "@timestamp": "2018-05-17T08:19:48.816-02:00", "elasticsearch.component": "o.e.c.s.MasterService", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -360,12 +360,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:48.826Z", + "@timestamp": "2018-05-17T08:19:48.826-02:00", "elasticsearch.component": "o.e.c.s.ClusterApplierService", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -374,12 +374,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:48.895Z", + "@timestamp": "2018-05-17T08:19:48.895-02:00", "elasticsearch.component": "o.e.h.n.Netty4HttpServerTransport", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -388,12 +388,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:48.895Z", + "@timestamp": "2018-05-17T08:19:48.895-02:00", "elasticsearch.component": "o.e.n.Node", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -402,12 +402,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:49.354Z", + "@timestamp": "2018-05-17T08:19:49.354-02:00", "elasticsearch.component": "o.e.g.GatewayService", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -416,12 +416,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:50.077Z", + "@timestamp": "2018-05-17T08:19:50.077-02:00", "elasticsearch.component": "o.e.c.r.a.AllocationService", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -430,12 +430,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:20:18.871Z", + "@timestamp": "2018-05-17T08:20:18.871-02:00", "elasticsearch.component": "o.e.c.r.a.DiskThresholdMonitor", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -444,13 +444,13 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:20:19.467Z", + "@timestamp": "2018-05-17T08:20:19.467-02:00", "elasticsearch.component": "o.e.c.m.MetaDataCreateIndexService", "elasticsearch.index.name": "metricbeat-7.0.0-alpha1-2018.05.17", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -459,12 +459,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:20:48.886Z", + "@timestamp": "2018-05-17T08:20:48.886-02:00", "elasticsearch.component": "o.e.c.r.a.DiskThresholdMonitor", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -473,12 +473,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:21:18.895Z", + "@timestamp": "2018-05-17T08:21:18.895-02:00", "elasticsearch.component": "o.e.c.r.a.DiskThresholdMonitor", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -487,12 +487,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:21:48.904Z", + "@timestamp": "2018-05-17T08:21:48.904-02:00", "elasticsearch.component": "o.e.c.r.a.DiskThresholdMonitor", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -501,12 +501,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:22:18.911Z", + "@timestamp": "2018-05-17T08:22:18.911-02:00", "elasticsearch.component": "o.e.c.r.a.DiskThresholdMonitor", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -515,12 +515,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:22:48.920Z", + "@timestamp": "2018-05-17T08:22:48.920-02:00", "elasticsearch.component": "o.e.c.r.a.DiskThresholdMonitor", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -529,12 +529,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:23:18.932Z", + "@timestamp": "2018-05-17T08:23:18.932-02:00", "elasticsearch.component": "o.e.c.r.a.DiskThresholdMonitor", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -543,12 +543,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:23:48.941Z", + "@timestamp": "2018-05-17T08:23:48.941-02:00", "elasticsearch.component": "o.e.c.r.a.DiskThresholdMonitor", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -557,12 +557,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:24:18.956Z", + "@timestamp": "2018-05-17T08:24:18.956-02:00", "elasticsearch.component": "o.e.c.r.a.DiskThresholdMonitor", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -571,12 +571,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:24:48.963Z", + "@timestamp": "2018-05-17T08:24:48.963-02:00", "elasticsearch.component": "o.e.c.r.a.DiskThresholdMonitor", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -585,12 +585,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:25:18.976Z", + "@timestamp": "2018-05-17T08:25:18.976-02:00", "elasticsearch.component": "o.e.c.r.a.DiskThresholdMonitor", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -599,12 +599,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:25:48.988Z", + "@timestamp": "2018-05-17T08:25:48.988-02:00", "elasticsearch.component": "o.e.c.r.a.DiskThresholdMonitor", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -613,12 +613,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:26:18.997Z", + "@timestamp": "2018-05-17T08:26:18.997-02:00", "elasticsearch.component": "o.e.c.r.a.DiskThresholdMonitor", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -627,12 +627,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:26:49.009Z", + "@timestamp": "2018-05-17T08:26:49.009-02:00", "elasticsearch.component": "o.e.c.r.a.DiskThresholdMonitor", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -641,12 +641,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:27:19.024Z", + "@timestamp": "2018-05-17T08:27:19.024-02:00", "elasticsearch.component": "o.e.c.r.a.DiskThresholdMonitor", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -655,12 +655,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:27:49.035Z", + "@timestamp": "2018-05-17T08:27:49.035-02:00", "elasticsearch.component": "o.e.c.r.a.DiskThresholdMonitor", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -669,12 +669,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:28:19.048Z", + "@timestamp": "2018-05-17T08:28:19.048-02:00", "elasticsearch.component": "o.e.c.r.a.DiskThresholdMonitor", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -683,12 +683,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:28:49.060Z", + "@timestamp": "2018-05-17T08:28:49.060-02:00", "elasticsearch.component": "o.e.c.r.a.DiskThresholdMonitor", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -697,13 +697,13 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:29:09.245Z", + "@timestamp": "2018-05-17T08:29:09.245-02:00", "elasticsearch.component": "o.e.c.m.MetaDataCreateIndexService", "elasticsearch.index.name": "filebeat-test-input", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -712,14 +712,14 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:29:09.576Z", + "@timestamp": "2018-05-17T08:29:09.576-02:00", "elasticsearch.component": "o.e.c.m.MetaDataMappingService", "elasticsearch.index.id": "aOGgDwbURfCV57AScqbCgw", "elasticsearch.index.name": "filebeat-test-input", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -728,13 +728,13 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:29:12.177Z", + "@timestamp": "2018-05-17T08:29:12.177-02:00", "elasticsearch.component": "o.e.c.m.MetaDataCreateIndexService", "elasticsearch.index.name": "test-filebeat-modules", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -743,14 +743,14 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:29:12.660Z", + "@timestamp": "2018-05-17T08:29:12.660-02:00", "elasticsearch.component": "o.e.c.m.MetaDataMappingService", "elasticsearch.index.id": "npNY8YrBQtC7JpFOh1sB0w", "elasticsearch.index.name": "test-filebeat-modules", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -759,12 +759,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:29:19.114Z", + "@timestamp": "2018-05-17T08:29:19.114-02:00", "elasticsearch.component": "o.e.c.r.a.DiskThresholdMonitor", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -773,12 +773,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:29:25.418Z", + "@timestamp": "2018-05-17T08:29:25.418-02:00", "elasticsearch.component": "o.e.n.Node", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -787,12 +787,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:29:25.598Z", + "@timestamp": "2018-05-17T08:29:25.598-02:00", "elasticsearch.component": "o.e.n.Node", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -801,12 +801,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:29:25.598Z", + "@timestamp": "2018-05-17T08:29:25.598-02:00", "elasticsearch.component": "o.e.n.Node", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -815,12 +815,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:29:25.612Z", + "@timestamp": "2018-05-17T08:29:25.612-02:00", "elasticsearch.component": "o.e.n.Node", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/server/test/test-json.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/server/test/test-json.log-expected.json index 484d748d..10949d3b 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/server/test/test-json.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/server/test/test-json.log-expected.json @@ -6,7 +6,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -21,7 +20,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -36,7 +34,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -51,7 +48,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -66,7 +62,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -81,7 +76,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "WARN", @@ -96,7 +90,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -111,7 +104,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -126,7 +118,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -141,7 +132,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -156,7 +146,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -171,7 +160,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -186,7 +174,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -201,7 +188,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -216,7 +202,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -231,7 +216,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -246,7 +230,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -261,7 +244,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -276,7 +258,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -291,7 +272,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -306,7 +286,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -321,7 +300,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -336,7 +314,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -351,7 +328,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -366,7 +342,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -381,7 +356,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -396,7 +370,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -411,7 +384,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -426,7 +398,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -441,7 +412,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -456,7 +426,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -471,7 +440,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -486,7 +454,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -501,7 +468,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -516,7 +482,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -531,7 +496,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -548,7 +512,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -563,7 +526,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "DEBUG", @@ -578,7 +540,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -593,7 +554,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -608,7 +568,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -623,7 +582,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -638,7 +596,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "WARN", @@ -653,7 +610,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -668,7 +624,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -685,7 +640,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -702,7 +656,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -719,7 +672,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -736,7 +688,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -753,7 +704,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -770,7 +720,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -787,7 +736,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -804,7 +752,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -821,7 +768,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -838,7 +784,6 @@ "elasticsearch.node.name": "node-0", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -902,7 +847,6 @@ ], "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.flags": [ @@ -920,7 +864,6 @@ "elasticsearch.node.name": "sample-name", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -977,7 +920,6 @@ ], "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "server", "input.type": "log", "log.flags": [ diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/server/test/test.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/server/test/test.log-expected.json index 715a5874..548642d1 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/server/test/test.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/server/test/test.log-expected.json @@ -1,12 +1,12 @@ [ { - "@timestamp": "2018-05-17T08:29:12.177Z", + "@timestamp": "2018-05-17T08:29:12.177-02:00", "elasticsearch.component": "o.e.c.m.MetaDataCreateIndexService", "elasticsearch.index.name": "test-filebeat-modules", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -15,12 +15,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:35.939Z", + "@timestamp": "2018-05-17T08:19:35.939-02:00", "elasticsearch.component": "o.e.n.Node", "elasticsearch.node.name": "", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -29,12 +29,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:36.089Z", + "@timestamp": "2018-05-17T08:19:36.089-02:00", "elasticsearch.component": "o.e.e.NodeEnvironment", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -43,12 +43,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:36.090Z", + "@timestamp": "2018-05-17T08:19:36.090-02:00", "elasticsearch.component": "o.e.e.NodeEnvironment", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -57,11 +57,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:19:36.116Z", + "@timestamp": "2018-05-17T08:19:36.116-02:00", "elasticsearch.component": "o.e.n.Node", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -70,12 +70,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:23:48.941Z", + "@timestamp": "2018-05-17T08:23:48.941-02:00", "elasticsearch.component": "o.e.c.r.a.DiskThresholdMonitor", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -84,13 +84,13 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:29:09.245Z", + "@timestamp": "2018-05-17T08:29:09.245-02:00", "elasticsearch.component": "o.e.c.m.MetaDataCreateIndexService", "elasticsearch.index.name": "filebeat-test-input", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -99,14 +99,14 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:29:09.576Z", + "@timestamp": "2018-05-17T08:29:09.576-02:00", "elasticsearch.component": "o.e.c.m.MetaDataMappingService", "elasticsearch.index.id": "aOGgDwbURfCV57AScqbCgw", "elasticsearch.index.name": "filebeat-test-input", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -115,14 +115,14 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-07-09T12:47:33.959Z", + "@timestamp": "2018-07-09T12:47:33.959-02:00", "elasticsearch.component": "o.e.c.m.MetaDataMappingService", "elasticsearch.index.id": "3tWftqb4RLKdyCAga9syGA", "elasticsearch.index.name": ".kibana", "elasticsearch.node.name": "QGY1F5P", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -131,12 +131,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:29:25.598Z", + "@timestamp": "2018-05-17T08:29:25.598-02:00", "elasticsearch.component": "o.e.n.Node", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -145,12 +145,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-05-17T08:29:25.612Z", + "@timestamp": "2018-05-17T08:29:25.612-02:00", "elasticsearch.component": "o.e.n.Node", "elasticsearch.node.name": "vWNJsZ3", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -159,12 +159,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-07-03T11:45:48.548Z", + "@timestamp": "2018-07-03T11:45:48.548-02:00", "elasticsearch.component": "o.e.d.z.ZenDiscovery", "elasticsearch.node.name": "srvmulpvlsk252_md", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "INFO", @@ -173,12 +173,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-07-03T11:45:48.548Z", + "@timestamp": "2018-07-03T11:45:48.548-02:00", "elasticsearch.component": "o.e.d.z.ZenDiscovery", "elasticsearch.node.name": "srvmulpvlsk252_md", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.flags": [ @@ -190,11 +190,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-07-03T11:45:52.666Z", + "@timestamp": "2018-07-03T11:45:52.666-02:00", "elasticsearch.component": "r.suppressed", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.flags": [ @@ -206,11 +206,11 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-07-03T11:48:02.552Z", + "@timestamp": "2018-07-03T11:48:02.552-02:00", "elasticsearch.component": "r.suppressed", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.flags": [ @@ -222,14 +222,14 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-07-03T11:45:27.896Z", + "@timestamp": "2018-07-03T11:45:27.896-02:00", "elasticsearch.component": "o.e.m.j.JvmGcMonitorService", "elasticsearch.node.name": "srvmulpvlsk252_md", "elasticsearch.server.gc.young.one": "3449979", "elasticsearch.server.gc.young.two": "986594", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.flags": [ @@ -241,7 +241,7 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-07-03T11:45:45.604Z", + "@timestamp": "2018-07-03T11:45:45.604-02:00", "elasticsearch.component": "o.e.m.j.JvmGcMonitorService", "elasticsearch.node.name": "srvmulpvlsk252_md", "elasticsearch.server.gc.collection_duration.ms": 1600.0, @@ -249,7 +249,7 @@ "elasticsearch.server.gc.overhead_seq": "3449992", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "WARN", @@ -258,12 +258,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-07-03T11:48:02.541Z", + "@timestamp": "2018-07-03T11:48:02.541-02:00", "elasticsearch.component": "o.e.a.b.TransportShardBulkAction", "elasticsearch.node.name": "srvmulpvlsk252_md", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.level": "WARN", @@ -272,12 +272,12 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-07-03T20:10:07.376Z", + "@timestamp": "2018-07-03T20:10:07.376-02:00", "elasticsearch.component": "o.e.x.m.MonitoringService", "elasticsearch.node.name": "srvmulpvlsk252_md", "event.dataset": "elasticsearch.server", "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "server", "input.type": "log", "log.flags": [ diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/config/slowlog.yml b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/config/slowlog.yml index e255eaac..d6a75034 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/config/slowlog.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/config/slowlog.yml @@ -11,4 +11,5 @@ multiline: match: after processors: -- add_locale: ~ +# Locale for timezone is only needed in non-json logs +- add_locale.when.not.regexp.message: "^{" diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/ingest/pipeline-plaintext.json b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/ingest/pipeline-plaintext.json index e5840763..ae88869d 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/ingest/pipeline-plaintext.json +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/ingest/pipeline-plaintext.json @@ -9,7 +9,7 @@ "INDEXNAME": "[a-zA-Z0-9_.-]*" }, "patterns": [ - "\\[%{TIMESTAMP_ISO8601:elasticsearch.slowlog.timestamp}\\]\\[%{WORD:log.level}(%{SPACE})?\\]\\[%{DATA:elasticsearch.slowlog.logger}\\]%{SPACE}\\[%{WORD:elasticsearch.node.name}\\](%{SPACE})?(\\[%{INDEXNAME:elasticsearch.index.name}\\]\\[%{NUMBER:elasticsearch.shard.id}\\])?(%{SPACE})?(\\[%{INDEXNAME:elasticsearch.index.name}\\/%{DATA:elasticsearch.index.id}\\])?(%{SPACE})?%{SPACE}(took\\[%{DATA:elasticsearch.slowlog.took}\\],)?%{SPACE}(took_millis\\[%{NUMBER:elasticsearch.slowlog.duration:long}\\],)?%{SPACE}(type\\[%{DATA:elasticsearch.slowlog.type}\\],)?%{SPACE}(id\\[%{DATA:elasticsearch.slowlog.id}\\],)?%{SPACE}(routing\\[%{DATA:elasticsearch.slowlog.routing}\\],)?%{SPACE}(total_hits\\[%{NUMBER:elasticsearch.slowlog.total_hits:int}\\],)?%{SPACE}(types\\[%{DATA:elasticsearch.slowlog.types}\\],)?%{SPACE}(stats\\[%{DATA:elasticsearch.slowlog.stats}\\],)?%{SPACE}(search_type\\[%{DATA:elasticsearch.slowlog.search_type}\\],)?%{SPACE}(total_shards\\[%{NUMBER:elasticsearch.slowlog.total_shards:int}\\],)?%{SPACE}(source\\[%{GREEDYMULTILINE:elasticsearch.slowlog.source_query}\\])?,?%{SPACE}(extra_source\\[%{DATA:elasticsearch.slowlog.extra_source}\\])?,?" + "\\[%{TIMESTAMP_ISO8601:elasticsearch.slowlog.timestamp}\\]\\[%{WORD:log.level}(%{SPACE})?\\]\\[%{DATA:elasticsearch.slowlog.logger}\\]%{SPACE}\\[%{DATA:elasticsearch.node.name}\\](%{SPACE})?(\\[%{INDEXNAME:elasticsearch.index.name}\\]\\[%{NUMBER:elasticsearch.shard.id}\\])?(%{SPACE})?(\\[%{INDEXNAME:elasticsearch.index.name}\\/%{DATA:elasticsearch.index.id}\\])?(%{SPACE})?%{SPACE}(took\\[%{DATA:elasticsearch.slowlog.took}\\],)?%{SPACE}(took_millis\\[%{NUMBER:elasticsearch.slowlog.duration:long}\\],)?%{SPACE}(type\\[%{DATA:elasticsearch.slowlog.type}\\],)?%{SPACE}(id\\[%{DATA:elasticsearch.slowlog.id}\\],)?%{SPACE}(routing\\[%{DATA:elasticsearch.slowlog.routing}\\],)?%{SPACE}(total_hits\\[%{NUMBER:elasticsearch.slowlog.total_hits:int}\\],)?%{SPACE}(types\\[%{DATA:elasticsearch.slowlog.types}\\],)?%{SPACE}(stats\\[%{DATA:elasticsearch.slowlog.stats}\\],)?%{SPACE}(search_type\\[%{DATA:elasticsearch.slowlog.search_type}\\],)?%{SPACE}(total_shards\\[%{NUMBER:elasticsearch.slowlog.total_shards:int}\\],)?%{SPACE}(source\\[%{GREEDYMULTILINE:elasticsearch.slowlog.source_query}\\])?,?%{SPACE}(extra_source\\[%{DATA:elasticsearch.slowlog.extra_source}\\])?,?" ] } }, @@ -23,12 +23,13 @@ }, { "date": { + "if": "ctx.event.timezone == null", "field": "elasticsearch.slowlog.timestamp", "target_field": "@timestamp", "formats": [ "yyyy-MM-dd'T'HH:mm:ss,SSS" ], - "ignore_failure": true + "on_failure": [{"append": {"field": "error.message", "value": "{{ _ingest.on_failure_message }}"}}] } }, { diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/auditlog_index_indexing_slowlog.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/auditlog_index_indexing_slowlog.log-expected.json index 1dc37747..9f621a0e 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/auditlog_index_indexing_slowlog.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/auditlog_index_indexing_slowlog.log-expected.json @@ -1,6 +1,6 @@ [ { - "@timestamp": "2018-07-04T21:51:29.536Z", + "@timestamp": "2018-07-04T21:51:29.536-02:00", "elasticsearch.index.id": "VLKxBLvUSYuIMKzpacGjRg", "elasticsearch.index.name": "metricbeat-6.3.0-2018.07.04", "elasticsearch.node.name": "v_VJhjV", @@ -13,7 +13,7 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 0, "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "INFO", @@ -22,7 +22,7 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-07-04T21:51:29.537Z", + "@timestamp": "2018-07-04T21:51:29.537-02:00", "elasticsearch.index.id": "VLKxBLvUSYuIMKzpacGjRg", "elasticsearch.index.name": "metricbeat-6.3.0-2018.07.04", "elasticsearch.node.name": "v_VJhjV", @@ -35,7 +35,7 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 0, "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "INFO", @@ -44,7 +44,7 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-07-04T21:51:29.538Z", + "@timestamp": "2018-07-04T21:51:29.538-02:00", "elasticsearch.index.id": "VLKxBLvUSYuIMKzpacGjRg", "elasticsearch.index.name": "metricbeat-6.3.0-2018.07.04", "elasticsearch.node.name": "v_VJhjV", @@ -57,7 +57,7 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 0, "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "INFO", @@ -66,7 +66,7 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-07-04T21:51:30.411Z", + "@timestamp": "2018-07-04T21:51:30.411-02:00", "elasticsearch.index.id": "VLKxBLvUSYuIMKzpacGjRg", "elasticsearch.index.name": "metricbeat-6.3.0-2018.07.04", "elasticsearch.node.name": "v_VJhjV", @@ -78,7 +78,7 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 1000000, "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "INFO", @@ -87,7 +87,7 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-07-04T21:51:30.963Z", + "@timestamp": "2018-07-04T21:51:30.963-02:00", "elasticsearch.index.id": "VLKxBLvUSYuIMKzpacGjRg", "elasticsearch.index.name": "metricbeat-6.3.0-2018.07.04", "elasticsearch.node.name": "v_VJhjV", @@ -100,7 +100,7 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 0, "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "INFO", @@ -109,7 +109,7 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-07-04T21:51:30.965Z", + "@timestamp": "2018-07-04T21:51:30.965-02:00", "elasticsearch.index.id": "VLKxBLvUSYuIMKzpacGjRg", "elasticsearch.index.name": "metricbeat-6.3.0-2018.07.04", "elasticsearch.node.name": "v_VJhjV", @@ -122,7 +122,7 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 0, "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "INFO", diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/es74_index_indexing_slowlog-json.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/es74_index_indexing_slowlog-json.log-expected.json index 32c7151c..d6c2c575 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/es74_index_indexing_slowlog-json.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/es74_index_indexing_slowlog-json.log-expected.json @@ -16,7 +16,6 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 3000000, "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "WARN", @@ -40,7 +39,6 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 2000000, "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "WARN", diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/es74_index_search_slowlog-json.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/es74_index_search_slowlog-json.log-expected.json index 1d52e439..c59127f6 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/es74_index_search_slowlog-json.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/es74_index_search_slowlog-json.log-expected.json @@ -19,7 +19,6 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 6000000, "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "WARN", @@ -47,7 +46,6 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 17000000, "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "WARN", @@ -75,7 +73,6 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 4000000, "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "WARN", @@ -103,7 +100,6 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 5000000, "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "WARN", @@ -131,7 +127,6 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 5000000, "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "WARN", @@ -159,7 +154,6 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 4000000, "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "WARN", @@ -187,7 +181,6 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 9000000, "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "WARN", @@ -215,7 +208,6 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 4000000, "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "WARN", @@ -241,7 +233,6 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 0, "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "WARN", diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/es_index_indexing_slowlog-json.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/es_index_indexing_slowlog-json.log-expected.json index 77bb95ba..e0991f52 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/es_index_indexing_slowlog-json.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/es_index_indexing_slowlog-json.log-expected.json @@ -16,7 +16,6 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 4000000, "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "WARN", @@ -41,7 +40,6 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 0, "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "WARN", diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/es_index_search_slowlog-json.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/es_index_search_slowlog-json.log-expected.json index 429e40fd..e7933d23 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/es_index_search_slowlog-json.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/es_index_search_slowlog-json.log-expected.json @@ -12,7 +12,6 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 0, "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "WARN", @@ -33,7 +32,6 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 0, "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "WARN", @@ -54,7 +52,6 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 9000000, "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "WARN", diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/slowlogs-json.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/slowlogs-json.log-expected.json index 5f48900b..3de1770e 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/slowlogs-json.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/slowlogs-json.log-expected.json @@ -18,7 +18,6 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 9000000, "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "WARN", @@ -45,7 +44,6 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 0, "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "WARN", @@ -72,7 +70,6 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 0, "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "WARN", @@ -100,7 +97,6 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 2000000, "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "WARN", @@ -128,7 +124,6 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 0, "event.module": "elasticsearch", - "event.timezone": "+00:00", "fileset.name": "slowlog", "input.type": "log", "log.flags": [ diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/test.log b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/test.log index 3d6d1eba..52cbd374 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/test.log +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/test.log @@ -35,3 +35,4 @@ "name":"Rados-MacBook-Pro.local" } }] +[2019-11-14T21:18:40,269][TRACE][index.search.slowlog.query] [exp-data-elasticsearc-2] [exp_v3_1_current][3] took[516.4ms], took_millis[516], types[encounter], stats[], search_type[QUERY_THEN_FETCH], total_shards[10], source[{"size":1000,"query":{"constant_score":{"filter":{"bool":{"must":[{"bool":{"should":[{"nested":{"query":{"constant_score":{"filter":{"bool":{"must":[{"term":{"diagnosis.dx_rank":{"value":1,"boost":1.0}}}],"disable_coord":false,"adjust_pure_negative":true,"boost":1.0}},"boost":1.0}},"path":"diagnosis","ignore_unmapped":true,"score_mode":"avg","boost":1.0}},{"nested":{"query":{"constant_score":{"filter":{"bool":{"must":[{"term":{"procedure.px_rank":{"value":1,"boost":1.0}}}],"disable_coord":false,"adjust_pure_negative":true,"boost":1.0}},"boost":1.0}},"path":"procedure","ignore_unmapped":true,"score_mode":"avg","boost":1.0}}],"disable_coord":false,"adjust_pure_negative":true,"boost":1.0}}],"must_not":[{"exists":{"field":"primary_px_key","boost":1.0}}],"disable_coord":false,"adjust_pure_negative":true,"boost":1.0}},"boost":1.0}},"version":true,"sort":[{"_doc":{"order":"asc"}}]}] diff --git a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/test.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/test.log-expected.json index 99756789..55fb7a6c 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/test.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/elasticsearch/slowlog/test/test.log-expected.json @@ -1,6 +1,6 @@ [ { - "@timestamp": "2018-06-29T10:06:14.933Z", + "@timestamp": "2018-06-29T10:06:14.933-02:00", "elasticsearch.index.name": "metricbeat-6.3.0-2018.06.26", "elasticsearch.node.name": "v_VJhjV", "elasticsearch.shard.id": "0", @@ -18,7 +18,7 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 4000000, "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "INFO", @@ -27,7 +27,7 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-06-29T10:06:14.943Z", + "@timestamp": "2018-06-29T10:06:14.943-02:00", "elasticsearch.index.name": "metricbeat-6.3.0-2018.06.26", "elasticsearch.node.name": "v_VJhjV", "elasticsearch.shard.id": "0", @@ -42,7 +42,7 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 10000000, "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "INFO", @@ -51,7 +51,7 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-06-29T09:01:01.821Z", + "@timestamp": "2018-06-29T09:01:01.821-02:00", "elasticsearch.index.name": "metricbeat-6.3.0-2018.06.26", "elasticsearch.node.name": "v_VJhjV", "elasticsearch.shard.id": "0", @@ -66,7 +66,7 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 124000000, "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "INFO", @@ -75,7 +75,7 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-06-29T09:01:01.827Z", + "@timestamp": "2018-06-29T09:01:01.827-02:00", "elasticsearch.index.name": "metricbeat-6.3.0-2018.06.26", "elasticsearch.node.name": "v_VJhjV", "elasticsearch.shard.id": "0", @@ -90,7 +90,7 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 7000000, "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "INFO", @@ -99,7 +99,7 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-07-04T13:48:07.452Z", + "@timestamp": "2018-07-04T13:48:07.452-02:00", "elasticsearch.index.id": "VLKxBLvUSYuIMKzpacGjRg", "elasticsearch.index.name": "metricbeat-6.3.0-2018.07.04", "elasticsearch.node.name": "v_VJhjV", @@ -112,7 +112,7 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 1000000, "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "INFO", @@ -121,7 +121,7 @@ "service.type": "elasticsearch" }, { - "@timestamp": "2018-07-04T21:51:30.411Z", + "@timestamp": "2018-07-04T21:51:30.411-02:00", "elasticsearch.index.id": "VLKxBLvUSYuIMKzpacGjRg", "elasticsearch.index.name": "metricbeat-6.3.0-2018.07.04", "elasticsearch.node.name": "v_VJhjV", @@ -133,12 +133,35 @@ "event.dataset": "elasticsearch.slowlog", "event.duration": 1000000, "event.module": "elasticsearch", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "INFO", "log.offset": 4766, "message": "[2018-07-04T21:51:30,411][INFO ][index.indexing.slowlog.index] [v_VJhjV] [metricbeat-6.3.0-2018.07.04/VLKxBLvUSYuIMKzpacGjRg] took[1.7ms], took_millis[1], type[doc], id[s01HZ2QBk9jw4gtgaFtn], routing[], source[", "service.type": "elasticsearch" + }, + { + "@timestamp": "2019-11-14T21:18:40.269-02:00", + "elasticsearch.index.name": "exp_v3_1_current", + "elasticsearch.node.name": "exp-data-elasticsearc-2", + "elasticsearch.shard.id": "3", + "elasticsearch.slowlog.logger": "index.search.slowlog.query", + "elasticsearch.slowlog.search_type": "QUERY_THEN_FETCH", + "elasticsearch.slowlog.source_query": "{\"size\":1000,\"query\":{\"constant_score\":{\"filter\":{\"bool\":{\"must\":[{\"bool\":{\"should\":[{\"nested\":{\"query\":{\"constant_score\":{\"filter\":{\"bool\":{\"must\":[{\"term\":{\"diagnosis.dx_rank\":{\"value\":1,\"boost\":1.0}}}],\"disable_coord\":false,\"adjust_pure_negative\":true,\"boost\":1.0}},\"boost\":1.0}},\"path\":\"diagnosis\",\"ignore_unmapped\":true,\"score_mode\":\"avg\",\"boost\":1.0}},{\"nested\":{\"query\":{\"constant_score\":{\"filter\":{\"bool\":{\"must\":[{\"term\":{\"procedure.px_rank\":{\"value\":1,\"boost\":1.0}}}],\"disable_coord\":false,\"adjust_pure_negative\":true,\"boost\":1.0}},\"boost\":1.0}},\"path\":\"procedure\",\"ignore_unmapped\":true,\"score_mode\":\"avg\",\"boost\":1.0}}],\"disable_coord\":false,\"adjust_pure_negative\":true,\"boost\":1.0}}],\"must_not\":[{\"exists\":{\"field\":\"primary_px_key\",\"boost\":1.0}}],\"disable_coord\":false,\"adjust_pure_negative\":true,\"boost\":1.0}},\"boost\":1.0}},\"version\":true,\"sort\":[{\"_doc\":{\"order\":\"asc\"}}]}", + "elasticsearch.slowlog.stats": "", + "elasticsearch.slowlog.took": "516.4ms", + "elasticsearch.slowlog.total_shards": 10, + "elasticsearch.slowlog.types": "encounter", + "event.dataset": "elasticsearch.slowlog", + "event.duration": 516000000, + "event.module": "elasticsearch", + "event.timezone": "-02:00", + "fileset.name": "slowlog", + "input.type": "log", + "log.level": "TRACE", + "log.offset": 5638, + "message": "[2019-11-14T21:18:40,269][TRACE][index.search.slowlog.query] [exp-data-elasticsearc-2] [exp_v3_1_current][3] took[516.4ms], took_millis[516], types[encounter], stats[], search_type[QUERY_THEN_FETCH], total_shards[10], source[{\"size\":1000,\"query\":{\"constant_score\":{\"filter\":{\"bool\":{\"must\":[{\"bool\":{\"should\":[{\"nested\":{\"query\":{\"constant_score\":{\"filter\":{\"bool\":{\"must\":[{\"term\":{\"diagnosis.dx_rank\":{\"value\":1,\"boost\":1.0}}}],\"disable_coord\":false,\"adjust_pure_negative\":true,\"boost\":1.0}},\"boost\":1.0}},\"path\":\"diagnosis\",\"ignore_unmapped\":true,\"score_mode\":\"avg\",\"boost\":1.0}},{\"nested\":{\"query\":{\"constant_score\":{\"filter\":{\"bool\":{\"must\":[{\"term\":{\"procedure.px_rank\":{\"value\":1,\"boost\":1.0}}}],\"disable_coord\":false,\"adjust_pure_negative\":true,\"boost\":1.0}},\"boost\":1.0}},\"path\":\"procedure\",\"ignore_unmapped\":true,\"score_mode\":\"avg\",\"boost\":1.0}}],\"disable_coord\":false,\"adjust_pure_negative\":true,\"boost\":1.0}}],\"must_not\":[{\"exists\":{\"field\":\"primary_px_key\",\"boost\":1.0}}],\"disable_coord\":false,\"adjust_pure_negative\":true,\"boost\":1.0}},\"boost\":1.0}},\"version\":true,\"sort\":[{\"_doc\":{\"order\":\"asc\"}}]}]", + "service.type": "elasticsearch" } ] \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/iis/access/ingest/default.json b/vendor/github.com/elastic/beats/filebeat/module/iis/access/ingest/default.json index a7217421..d9031009 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/iis/access/ingest/default.json +++ b/vendor/github.com/elastic/beats/filebeat/module/iis/access/ingest/default.json @@ -6,10 +6,10 @@ "field": "message", "patterns": [ "%{TIMESTAMP_ISO8601:iis.access.time} %{IPORHOST:destination.address} %{WORD:http.request.method} %{NOTSPACE:url.path} %{NOTSPACE:url.query} %{NUMBER:destination.port:long} %{NOTSPACE:user.name} %{IPORHOST:source.address} %{NOTSPACE:user_agent.original} %{NOTSPACE:http.request.referrer} %{NUMBER:http.response.status_code:long} %{NUMBER:iis.access.sub_status:long} %{NUMBER:iis.access.win32_status:long} %{NUMBER:temp.duration:long}", - "%{TIMESTAMP_ISO8601:iis.access.time} %{NOTSPACE:iis.access.site_name} %{WORD:http.request.method} %{URIPATH:url.path} %{NOTSPACE:url.query} %{NUMBER:destination.port:long} %{NOTSPACE:user.name} %{IPORHOST:source.address} %{NOTSPACE:user_agent.original} %{NOTSPACE:iis.access.cookie} %{NOTSPACE:http.request.referrer} %{NOTSPACE:destination.domain} %{NUMBER:http.response.status_code:long} %{NUMBER:iis.access.sub_status:long} %{NUMBER:iis.access.win32_status:long} %{NUMBER:http.response.body.bytes:long} %{NUMBER:http.request.body.bytes:long} %{NUMBER:temp.duration:long}", - "%{TIMESTAMP_ISO8601:iis.access.time} %{NOTSPACE:iis.access.site_name} %{NOTSPACE:iis.access.server_name} %{IPORHOST:destination.address} %{WORD:http.request.method} %{URIPATH:url.path} %{NOTSPACE:url.query} %{NUMBER:destination.port:long} %{NOTSPACE:user.name} %{IPORHOST:source.address} HTTP/%{NUMBER:http.version} %{NOTSPACE:user_agent.original} %{NOTSPACE:iis.access.cookie} %{NOTSPACE:http.request.referrer} %{NOTSPACE:destination.domain} %{NUMBER:http.response.status_code:long} %{NUMBER:iis.access.sub_status:long} %{NUMBER:iis.access.win32_status:long} %{NUMBER:http.response.body.bytes:long} %{NUMBER:http.request.body.bytes:long} %{NUMBER:temp.duration:long}", - "%{TIMESTAMP_ISO8601:iis.access.time} \\[%{IPORHOST:destination.address}\\]\\(http://%{IPORHOST:destination.address}\\) %{WORD:http.request.method} %{URIPATH:url.path} %{NOTSPACE:url.query} %{NUMBER:destination.port:long} %{NOTSPACE:user.name} \\[%{IPORHOST:source.address}\\]\\(http://%{IPORHOST:source.address}\\) %{NOTSPACE:user_agent.original} %{NUMBER:http.response.status_code:long} %{NUMBER:iis.access.sub_status:long} %{NUMBER:iis.access.win32_status:long} %{NUMBER:temp.duration:long}", - "%{TIMESTAMP_ISO8601:iis.access.time} %{IPORHOST:destination.address} %{WORD:http.request.method} %{URIPATH:url.path} %{NOTSPACE:url.query} %{NUMBER:destination.port:long} %{NOTSPACE:user.name} %{IPORHOST:source.address} %{NOTSPACE:user_agent.original} %{NUMBER:http.response.status_code:long} %{NUMBER:iis.access.sub_status:long} %{NUMBER:iis.access.win32_status:long} %{NUMBER:temp.duration:long}" + "%{TIMESTAMP_ISO8601:iis.access.time} %{NOTSPACE:iis.access.site_name} %{WORD:http.request.method} %{NOTSPACE:url.path} %{NOTSPACE:url.query} %{NUMBER:destination.port:long} %{NOTSPACE:user.name} %{IPORHOST:source.address} %{NOTSPACE:user_agent.original} %{NOTSPACE:iis.access.cookie} %{NOTSPACE:http.request.referrer} %{NOTSPACE:destination.domain} %{NUMBER:http.response.status_code:long} %{NUMBER:iis.access.sub_status:long} %{NUMBER:iis.access.win32_status:long} %{NUMBER:http.response.body.bytes:long} %{NUMBER:http.request.body.bytes:long} %{NUMBER:temp.duration:long}", + "%{TIMESTAMP_ISO8601:iis.access.time} %{NOTSPACE:iis.access.site_name} %{NOTSPACE:iis.access.server_name} %{IPORHOST:destination.address} %{WORD:http.request.method} %{NOTSPACE:url.path} %{NOTSPACE:url.query} %{NUMBER:destination.port:long} %{NOTSPACE:user.name} %{IPORHOST:source.address} HTTP/%{NUMBER:http.version} %{NOTSPACE:user_agent.original} %{NOTSPACE:iis.access.cookie} %{NOTSPACE:http.request.referrer} %{NOTSPACE:destination.domain} %{NUMBER:http.response.status_code:long} %{NUMBER:iis.access.sub_status:long} %{NUMBER:iis.access.win32_status:long} %{NUMBER:http.response.body.bytes:long} %{NUMBER:http.request.body.bytes:long} %{NUMBER:temp.duration:long}", + "%{TIMESTAMP_ISO8601:iis.access.time} \\[%{IPORHOST:destination.address}\\]\\(http://%{IPORHOST:destination.address}\\) %{WORD:http.request.method} %{NOTSPACE:url.path} %{NOTSPACE:url.query} %{NUMBER:destination.port:long} %{NOTSPACE:user.name} \\[%{IPORHOST:source.address}\\]\\(http://%{IPORHOST:source.address}\\) %{NOTSPACE:user_agent.original} %{NUMBER:http.response.status_code:long} %{NUMBER:iis.access.sub_status:long} %{NUMBER:iis.access.win32_status:long} %{NUMBER:temp.duration:long}", + "%{TIMESTAMP_ISO8601:iis.access.time} %{IPORHOST:destination.address} %{WORD:http.request.method} %{NOTSPACE:url.path} %{NOTSPACE:url.query} %{NUMBER:destination.port:long} %{NOTSPACE:user.name} %{IPORHOST:source.address} %{NOTSPACE:user_agent.original} %{NUMBER:http.response.status_code:long} %{NUMBER:iis.access.sub_status:long} %{NUMBER:iis.access.win32_status:long} %{NUMBER:temp.duration:long}" ], "ignore_missing": true } diff --git a/vendor/github.com/elastic/beats/filebeat/module/kafka/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/filebeat/module/kafka/_meta/docs.asciidoc index ca0068f8..787bcd8d 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/kafka/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/filebeat/module/kafka/_meta/docs.asciidoc @@ -28,7 +28,7 @@ include::../include/configuring-intro.asciidoc[] The following example shows how to set paths in the +modules.d/{modulename}.yml+ file to override the default paths for logs: -["source","yaml",subs="attributes"] +[source,yaml] ----- - module: kafka log: @@ -43,7 +43,7 @@ file to override the default paths for logs: To specify the same settings at the command line, you use: -["source","sh",subs="attributes"] +[source,yaml] ----- -M "kafka.log.var.paths=[/path/to/logs/controller.log*, /path/to/logs/server.log*, /path/to/logs/state-change.log*, /path/to/logs/kafka-*.log*]" ----- @@ -57,6 +57,19 @@ include::../include/config-option-intro.asciidoc[] [float] ==== `log` fileset settings +*`var.kafka_home`*:: + +The path to your Kafka installation. The default is `/opt`. For example: ++ +[source,yaml] +---- +- module: kafka + log: + enabled: true + var.kafka_home: /usr/share/kafka_2.12-2.4.0 + ... +---- + include::../include/var-paths.asciidoc[] include::../include/timezone-support.asciidoc[] diff --git a/vendor/github.com/elastic/beats/filebeat/module/kafka/log/ingest/pipeline.json b/vendor/github.com/elastic/beats/filebeat/module/kafka/log/ingest/pipeline.json index 0c6e44d3..6ba84de7 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/kafka/log/ingest/pipeline.json +++ b/vendor/github.com/elastic/beats/filebeat/module/kafka/log/ingest/pipeline.json @@ -59,10 +59,11 @@ }, { "date": { + "if": "ctx.event.timezone == null", "field": "kafka.log.timestamp", "target_field": "@timestamp", "formats": ["yyyy-MM-dd HH:mm:ss,SSS"], - "ignore_failure": true + "on_failure": [{"append": {"field": "error.message", "value": "{{ _ingest.on_failure_message }}"}}] } }, { diff --git a/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/controller-2.0.0.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/controller-2.0.0.log-expected.json index d82fbd91..40e888a3 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/controller-2.0.0.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/controller-2.0.0.log-expected.json @@ -1,9 +1,9 @@ [ { - "@timestamp": "2018-10-31T15:03:32.474Z", + "@timestamp": "2018-10-31T15:03:32.474-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -14,10 +14,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-31T15:03:32.474Z", + "@timestamp": "2018-10-31T15:03:32.474-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -28,10 +28,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-31T15:03:32.474Z", + "@timestamp": "2018-10-31T15:03:32.474-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -42,10 +42,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-31T15:03:32.475Z", + "@timestamp": "2018-10-31T15:03:32.475-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -56,10 +56,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-31T15:03:32.475Z", + "@timestamp": "2018-10-31T15:03:32.475-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -70,10 +70,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-31T15:03:32.475Z", + "@timestamp": "2018-10-31T15:03:32.475-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -84,10 +84,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-31T15:03:32.475Z", + "@timestamp": "2018-10-31T15:03:32.475-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -98,10 +98,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-31T15:03:32.475Z", + "@timestamp": "2018-10-31T15:03:32.475-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -112,10 +112,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-31T15:08:32.475Z", + "@timestamp": "2018-10-31T15:08:32.475-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -126,10 +126,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-31T15:08:32.475Z", + "@timestamp": "2018-10-31T15:08:32.475-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -140,10 +140,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-31T15:08:32.475Z", + "@timestamp": "2018-10-31T15:08:32.475-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -154,10 +154,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-31T15:08:32.475Z", + "@timestamp": "2018-10-31T15:08:32.475-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -168,10 +168,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-31T15:08:32.475Z", + "@timestamp": "2018-10-31T15:08:32.475-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -182,10 +182,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-31T15:08:32.475Z", + "@timestamp": "2018-10-31T15:08:32.475-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -196,10 +196,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-31T15:08:32.475Z", + "@timestamp": "2018-10-31T15:08:32.475-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -210,10 +210,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-31T15:08:32.475Z", + "@timestamp": "2018-10-31T15:08:32.475-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -224,10 +224,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-31T15:09:30.306Z", + "@timestamp": "2018-10-31T15:09:30.306-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -238,10 +238,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-31T15:09:30.307Z", + "@timestamp": "2018-10-31T15:09:30.307-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -252,10 +252,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-31T15:09:30.396Z", + "@timestamp": "2018-10-31T15:09:30.396-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.RequestSendThread", @@ -266,10 +266,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-31T15:09:30.397Z", + "@timestamp": "2018-10-31T15:09:30.397-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.RequestSendThread", @@ -280,10 +280,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-31T15:09:30.396Z", + "@timestamp": "2018-10-31T15:09:30.396-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.RequestSendThread", @@ -294,10 +294,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-31T15:13:32.475Z", + "@timestamp": "2018-10-31T15:13:32.475-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", diff --git a/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log-expected.json index 95aaeef6..46c92262 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/controller.log-expected.json @@ -1,9 +1,9 @@ [ { - "@timestamp": "2017-08-04T10:48:21.048Z", + "@timestamp": "2017-08-04T10:48:21.048-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.ControllerEventManager$ControllerEventThread", @@ -14,10 +14,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:21.063Z", + "@timestamp": "2017-08-04T10:48:21.063-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -28,10 +28,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:21.064Z", + "@timestamp": "2017-08-04T10:48:21.064-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -42,10 +42,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:21.082Z", + "@timestamp": "2017-08-04T10:48:21.082-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -56,10 +56,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:21.085Z", + "@timestamp": "2017-08-04T10:48:21.085-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -70,10 +70,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:21.154Z", + "@timestamp": "2017-08-04T10:48:21.154-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.ReplicaStateMachine", @@ -84,10 +84,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:21.156Z", + "@timestamp": "2017-08-04T10:48:21.156-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.PartitionStateMachine", @@ -98,10 +98,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:21.157Z", + "@timestamp": "2017-08-04T10:48:21.157-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -112,10 +112,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:21.165Z", + "@timestamp": "2017-08-04T10:48:21.165-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.PartitionStateMachine", @@ -126,10 +126,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T11:44:22.588Z", + "@timestamp": "2017-08-04T11:44:22.588-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -140,10 +140,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T11:44:25.094Z", + "@timestamp": "2017-08-04T11:44:25.094-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.ControllerEventManager$ControllerEventThread", @@ -154,10 +154,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T11:44:25.095Z", + "@timestamp": "2017-08-04T11:44:25.095-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.ControllerEventManager$ControllerEventThread", @@ -168,10 +168,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T11:44:25.097Z", + "@timestamp": "2017-08-04T11:44:25.097-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.ControllerEventManager$ControllerEventThread", @@ -182,10 +182,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T11:44:25.099Z", + "@timestamp": "2017-08-04T11:44:25.099-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -196,10 +196,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T11:44:25.100Z", + "@timestamp": "2017-08-04T11:44:25.100-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.KafkaController", @@ -210,10 +210,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T11:44:25.105Z", + "@timestamp": "2017-08-04T11:44:25.105-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.PartitionStateMachine", @@ -224,10 +224,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T11:44:25.111Z", + "@timestamp": "2017-08-04T11:44:25.111-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.ReplicaStateMachine", @@ -238,10 +238,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T11:44:25.112Z", + "@timestamp": "2017-08-04T11:44:25.112-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.RequestSendThread", @@ -252,10 +252,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T11:44:25.112Z", + "@timestamp": "2017-08-04T11:44:25.112-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.RequestSendThread", @@ -266,10 +266,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T11:44:25.113Z", + "@timestamp": "2017-08-04T11:44:25.113-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.controller.RequestSendThread", diff --git a/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/server-2.0.0.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/server-2.0.0.log-expected.json index 7bbb29b8..4402a7d3 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/server-2.0.0.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/server-2.0.0.log-expected.json @@ -1,9 +1,9 @@ [ { - "@timestamp": "2018-10-17T12:04:41.718Z", + "@timestamp": "2018-10-17T12:04:41.718-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.coordinator.group.GroupMetadataManager", @@ -14,10 +14,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:14:41.719Z", + "@timestamp": "2018-10-17T12:14:41.719-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.coordinator.group.GroupMetadataManager", @@ -28,10 +28,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:24:41.719Z", + "@timestamp": "2018-10-17T12:24:41.719-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.coordinator.group.GroupMetadataManager", @@ -42,10 +42,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:34:41.719Z", + "@timestamp": "2018-10-17T12:34:41.719-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.coordinator.group.GroupMetadataManager", @@ -56,10 +56,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:44:41.719Z", + "@timestamp": "2018-10-17T12:44:41.719-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.coordinator.group.GroupMetadataManager", @@ -70,10 +70,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:50:23.313Z", + "@timestamp": "2018-10-17T12:50:23.313-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -84,10 +84,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:50:23.314Z", + "@timestamp": "2018-10-17T12:50:23.314-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.cluster.Partition", @@ -98,10 +98,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:50:23.321Z", + "@timestamp": "2018-10-17T12:50:23.321-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -112,10 +112,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:50:23.322Z", + "@timestamp": "2018-10-17T12:50:23.322-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -126,10 +126,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:50:23.322Z", + "@timestamp": "2018-10-17T12:50:23.322-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaAlterLogDirsManager", @@ -140,10 +140,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:50:23.323Z", + "@timestamp": "2018-10-17T12:50:23.323-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherThread", @@ -154,10 +154,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:50:23.323Z", + "@timestamp": "2018-10-17T12:50:23.323-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "org.apache.kafka.clients.FetchSessionHandler", @@ -168,10 +168,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:50:23.324Z", + "@timestamp": "2018-10-17T12:50:23.324-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherThread", @@ -182,10 +182,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:50:23.331Z", + "@timestamp": "2018-10-17T12:50:23.331-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherThread", @@ -196,10 +196,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:50:23.348Z", + "@timestamp": "2018-10-17T12:50:23.348-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -210,10 +210,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:50:23.348Z", + "@timestamp": "2018-10-17T12:50:23.348-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.cluster.Partition", @@ -224,10 +224,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:50:23.350Z", + "@timestamp": "2018-10-17T12:50:23.350-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.cluster.Partition", @@ -238,10 +238,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:50:23.351Z", + "@timestamp": "2018-10-17T12:50:23.351-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.cluster.Partition", @@ -252,10 +252,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:50:23.355Z", + "@timestamp": "2018-10-17T12:50:23.355-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -266,10 +266,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:50:23.360Z", + "@timestamp": "2018-10-17T12:50:23.360-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -280,10 +280,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:50:23.361Z", + "@timestamp": "2018-10-17T12:50:23.361-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaAlterLogDirsManager", @@ -294,10 +294,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:50:23.421Z", + "@timestamp": "2018-10-17T12:50:23.421-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherThread", @@ -308,10 +308,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:50:23.421Z", + "@timestamp": "2018-10-17T12:50:23.421-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.log.Log", @@ -322,10 +322,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:50:24.508Z", + "@timestamp": "2018-10-17T12:50:24.508-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaAlterLogDirsManager", @@ -336,10 +336,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:51:56.064Z", + "@timestamp": "2018-10-17T12:51:56.064-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.cluster.Partition", @@ -350,10 +350,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:51:56.091Z", + "@timestamp": "2018-10-17T12:51:56.091-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.cluster.Partition", @@ -364,10 +364,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:51:56.098Z", + "@timestamp": "2018-10-17T12:51:56.098-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.cluster.Partition", @@ -378,10 +378,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:51:56.104Z", + "@timestamp": "2018-10-17T12:51:56.104-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.cluster.Partition", @@ -392,10 +392,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:54:31.461Z", + "@timestamp": "2018-10-17T12:54:31.461-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -406,10 +406,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:54:31.481Z", + "@timestamp": "2018-10-17T12:54:31.481-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -420,10 +420,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:54:31.482Z", + "@timestamp": "2018-10-17T12:54:31.482-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaAlterLogDirsManager", @@ -434,10 +434,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:54:31.483Z", + "@timestamp": "2018-10-17T12:54:31.483-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherThread", @@ -448,10 +448,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:54:31.501Z", + "@timestamp": "2018-10-17T12:54:31.501-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherThread", @@ -462,10 +462,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:54:31.504Z", + "@timestamp": "2018-10-17T12:54:31.504-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.log.Log", @@ -476,10 +476,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:54:31.504Z", + "@timestamp": "2018-10-17T12:54:31.504-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -490,10 +490,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:54:31.508Z", + "@timestamp": "2018-10-17T12:54:31.508-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -504,10 +504,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:54:31.510Z", + "@timestamp": "2018-10-17T12:54:31.510-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaAlterLogDirsManager", @@ -518,10 +518,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:54:32.043Z", + "@timestamp": "2018-10-17T12:54:32.043-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherThread", @@ -532,10 +532,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:54:32.044Z", + "@timestamp": "2018-10-17T12:54:32.044-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.log.Log", @@ -546,10 +546,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:54:41.719Z", + "@timestamp": "2018-10-17T12:54:41.719-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.coordinator.group.GroupMetadataManager", @@ -560,10 +560,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:57:17.790Z", + "@timestamp": "2018-10-17T12:57:17.790-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -574,10 +574,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:57:17.809Z", + "@timestamp": "2018-10-17T12:57:17.809-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.log.Log", @@ -588,10 +588,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:57:17.810Z", + "@timestamp": "2018-10-17T12:57:17.810-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.log.Log", @@ -602,10 +602,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:57:17.812Z", + "@timestamp": "2018-10-17T12:57:17.812-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.log.LogManager", @@ -616,10 +616,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:57:17.816Z", + "@timestamp": "2018-10-17T12:57:17.816-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.cluster.Partition", @@ -630,10 +630,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:57:17.816Z", + "@timestamp": "2018-10-17T12:57:17.816-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.cluster.Replica", @@ -644,10 +644,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:57:17.816Z", + "@timestamp": "2018-10-17T12:57:17.816-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.cluster.Replica", @@ -658,10 +658,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:57:17.816Z", + "@timestamp": "2018-10-17T12:57:17.816-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.cluster.Partition", @@ -672,10 +672,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:57:17.817Z", + "@timestamp": "2018-10-17T12:57:17.817-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.cluster.Replica", @@ -686,10 +686,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:57:17.833Z", + "@timestamp": "2018-10-17T12:57:17.833-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.log.Log", @@ -700,10 +700,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:57:17.833Z", + "@timestamp": "2018-10-17T12:57:17.833-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.log.Log", @@ -714,10 +714,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:57:17.835Z", + "@timestamp": "2018-10-17T12:57:17.835-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.log.LogManager", @@ -728,10 +728,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:57:17.836Z", + "@timestamp": "2018-10-17T12:57:17.836-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.cluster.Partition", @@ -742,10 +742,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:57:17.836Z", + "@timestamp": "2018-10-17T12:57:17.836-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.cluster.Replica", @@ -756,10 +756,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:57:17.837Z", + "@timestamp": "2018-10-17T12:57:17.837-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -770,10 +770,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:57:17.838Z", + "@timestamp": "2018-10-17T12:57:17.838-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -784,10 +784,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:57:17.839Z", + "@timestamp": "2018-10-17T12:57:17.839-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaAlterLogDirsManager", @@ -798,10 +798,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:57:17.896Z", + "@timestamp": "2018-10-17T12:57:17.896-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherThread", @@ -812,10 +812,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:57:17.897Z", + "@timestamp": "2018-10-17T12:57:17.897-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.log.Log", @@ -826,10 +826,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:57:18.400Z", + "@timestamp": "2018-10-17T12:57:18.400-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherThread", @@ -845,10 +845,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.490Z", + "@timestamp": "2018-10-17T12:58:47.490-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "org.apache.kafka.common.utils.LoggingSignalHandler", @@ -859,10 +859,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.492Z", + "@timestamp": "2018-10-17T12:58:47.492-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.KafkaServer", @@ -873,10 +873,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.494Z", + "@timestamp": "2018-10-17T12:58:47.494-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.KafkaServer", @@ -887,10 +887,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.547Z", + "@timestamp": "2018-10-17T12:58:47.547-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -901,10 +901,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.550Z", + "@timestamp": "2018-10-17T12:58:47.550-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaAlterLogDirsManager", @@ -915,10 +915,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.556Z", + "@timestamp": "2018-10-17T12:58:47.556-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -929,10 +929,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.556Z", + "@timestamp": "2018-10-17T12:58:47.556-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaAlterLogDirsManager", @@ -943,10 +943,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.558Z", + "@timestamp": "2018-10-17T12:58:47.558-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -957,10 +957,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.558Z", + "@timestamp": "2018-10-17T12:58:47.558-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaAlterLogDirsManager", @@ -971,10 +971,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.561Z", + "@timestamp": "2018-10-17T12:58:47.561-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -985,10 +985,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.561Z", + "@timestamp": "2018-10-17T12:58:47.561-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaAlterLogDirsManager", @@ -999,10 +999,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.567Z", + "@timestamp": "2018-10-17T12:58:47.567-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -1013,10 +1013,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.567Z", + "@timestamp": "2018-10-17T12:58:47.567-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaAlterLogDirsManager", @@ -1027,10 +1027,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.568Z", + "@timestamp": "2018-10-17T12:58:47.568-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -1041,10 +1041,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.568Z", + "@timestamp": "2018-10-17T12:58:47.568-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaAlterLogDirsManager", @@ -1055,10 +1055,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.568Z", + "@timestamp": "2018-10-17T12:58:47.568-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherThread", @@ -1069,10 +1069,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.577Z", + "@timestamp": "2018-10-17T12:58:47.577-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "org.apache.kafka.clients.FetchSessionHandler", @@ -1083,10 +1083,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.577Z", + "@timestamp": "2018-10-17T12:58:47.577-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherThread", @@ -1097,10 +1097,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.583Z", + "@timestamp": "2018-10-17T12:58:47.583-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherThread", @@ -1111,10 +1111,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.585Z", + "@timestamp": "2018-10-17T12:58:47.585-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -1125,10 +1125,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.586Z", + "@timestamp": "2018-10-17T12:58:47.586-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaAlterLogDirsManager", @@ -1139,10 +1139,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.594Z", + "@timestamp": "2018-10-17T12:58:47.594-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherThread", @@ -1153,10 +1153,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.601Z", + "@timestamp": "2018-10-17T12:58:47.601-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "org.apache.kafka.clients.FetchSessionHandler", @@ -1167,10 +1167,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.602Z", + "@timestamp": "2018-10-17T12:58:47.602-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherThread", @@ -1181,10 +1181,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.602Z", + "@timestamp": "2018-10-17T12:58:47.602-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherThread", @@ -1195,10 +1195,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.604Z", + "@timestamp": "2018-10-17T12:58:47.604-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.KafkaServer", @@ -1209,10 +1209,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.605Z", + "@timestamp": "2018-10-17T12:58:47.605-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread", @@ -1223,10 +1223,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.606Z", + "@timestamp": "2018-10-17T12:58:47.606-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread", @@ -1237,10 +1237,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.606Z", + "@timestamp": "2018-10-17T12:58:47.606-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -1251,10 +1251,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.606Z", + "@timestamp": "2018-10-17T12:58:47.606-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaAlterLogDirsManager", @@ -1265,10 +1265,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.606Z", + "@timestamp": "2018-10-17T12:58:47.606-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.common.ZkNodeChangeNotificationListener$ChangeEventProcessThread", @@ -1279,10 +1279,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.607Z", + "@timestamp": "2018-10-17T12:58:47.607-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.network.SocketServer", @@ -1293,10 +1293,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.608Z", + "@timestamp": "2018-10-17T12:58:47.608-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -1307,10 +1307,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.608Z", + "@timestamp": "2018-10-17T12:58:47.608-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaAlterLogDirsManager", @@ -1321,10 +1321,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.609Z", + "@timestamp": "2018-10-17T12:58:47.609-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -1335,10 +1335,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.609Z", + "@timestamp": "2018-10-17T12:58:47.609-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaAlterLogDirsManager", @@ -1349,10 +1349,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.610Z", + "@timestamp": "2018-10-17T12:58:47.610-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -1363,10 +1363,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.610Z", + "@timestamp": "2018-10-17T12:58:47.610-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaAlterLogDirsManager", @@ -1377,10 +1377,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.611Z", + "@timestamp": "2018-10-17T12:58:47.611-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaFetcherManager", @@ -1391,10 +1391,10 @@ "service.type": "kafka" }, { - "@timestamp": "2018-10-17T12:58:47.611Z", + "@timestamp": "2018-10-17T12:58:47.611-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ReplicaAlterLogDirsManager", diff --git a/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log-expected.json index 6c9f0a17..f92b26d2 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/server.log-expected.json @@ -1,9 +1,9 @@ [ { - "@timestamp": "2017-08-04T10:48:20.377Z", + "@timestamp": "2017-08-04T10:48:20.377-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.KafkaServer", @@ -14,10 +14,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:20.379Z", + "@timestamp": "2017-08-04T10:48:20.379-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.KafkaServer", @@ -28,10 +28,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:20.400Z", + "@timestamp": "2017-08-04T10:48:20.400-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "org.apache.zookeeper.ZooKeeper", @@ -42,10 +42,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:20.400Z", + "@timestamp": "2017-08-04T10:48:20.400-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "org.apache.zookeeper.ZooKeeper", @@ -56,10 +56,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:20.401Z", + "@timestamp": "2017-08-04T10:48:20.401-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "org.apache.zookeeper.ZooKeeper", @@ -70,10 +70,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:20.413Z", + "@timestamp": "2017-08-04T10:48:20.413-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "org.I0Itec.zkclient.ZkClient", @@ -84,10 +84,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:20.415Z", + "@timestamp": "2017-08-04T10:48:20.415-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "org.apache.zookeeper.ClientCnxn", @@ -98,10 +98,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:20.420Z", + "@timestamp": "2017-08-04T10:48:20.420-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "org.apache.zookeeper.ClientCnxn", @@ -112,10 +112,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:20.457Z", + "@timestamp": "2017-08-04T10:48:20.457-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "org.apache.zookeeper.ClientCnxn", @@ -126,10 +126,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:20.458Z", + "@timestamp": "2017-08-04T10:48:20.458-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "org.I0Itec.zkclient.ZkClient", @@ -140,10 +140,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:20.748Z", + "@timestamp": "2017-08-04T10:48:20.748-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.BrokerMetadataCheckpoint", @@ -154,10 +154,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:20.800Z", + "@timestamp": "2017-08-04T10:48:20.800-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.ClientQuotaManager$ThrottledRequestReaper", @@ -168,10 +168,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:20.866Z", + "@timestamp": "2017-08-04T10:48:20.866-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.log.LogManager", @@ -182,10 +182,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:20.873Z", + "@timestamp": "2017-08-04T10:48:20.873-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.log.LogManager", @@ -196,10 +196,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:21.062Z", + "@timestamp": "2017-08-04T10:48:21.062-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.server.DelayedOperationPurgatory$ExpiredOperationReaper", @@ -210,10 +210,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:21.063Z", + "@timestamp": "2017-08-04T10:48:21.063-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.utils.ZKCheckedEphemeral", @@ -224,10 +224,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:21.095Z", + "@timestamp": "2017-08-04T10:48:21.095-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.coordinator.group.GroupMetadataManager", @@ -238,10 +238,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:21.127Z", + "@timestamp": "2017-08-04T10:48:21.127-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.coordinator.transaction.ProducerIdManager", @@ -252,10 +252,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:21.162Z", + "@timestamp": "2017-08-04T10:48:21.162-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.coordinator.transaction.TransactionCoordinator", @@ -266,10 +266,10 @@ "service.type": "kafka" }, { - "@timestamp": "2017-08-04T10:48:21.167Z", + "@timestamp": "2017-08-04T10:48:21.167-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "kafka.coordinator.transaction.TransactionMarkerChannelManager", diff --git a/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/state-change-1.1.0.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/state-change-1.1.0.log-expected.json index 3ebcec91..e8c9e5d0 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/state-change-1.1.0.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/state-change-1.1.0.log-expected.json @@ -1,9 +1,9 @@ [ { - "@timestamp": "2018-07-16T10:17:06.489Z", + "@timestamp": "2018-07-16T10:17:06.489-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "state.change.logger", diff --git a/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/state-change-2.0.0.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/state-change-2.0.0.log-expected.json index 2431dff5..db2c5e91 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/state-change-2.0.0.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/state-change-2.0.0.log-expected.json @@ -1,9 +1,9 @@ [ { - "@timestamp": "2018-10-31T15:09:30.451Z", + "@timestamp": "2018-10-31T15:09:30.451-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "state.change.logger", diff --git a/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/state-change.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/state-change.log-expected.json index 55c784b2..76a5b7a8 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/state-change.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/kafka/log/test/state-change.log-expected.json @@ -1,9 +1,9 @@ [ { - "@timestamp": "2017-08-04T10:48:21.428Z", + "@timestamp": "2017-08-04T10:48:21.428-02:00", "event.dataset": "kafka.log", "event.module": "kafka", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "kafka.log.class": "state.change.logger", diff --git a/vendor/github.com/elastic/beats/filebeat/module/logstash/fields.go b/vendor/github.com/elastic/beats/filebeat/module/logstash/fields.go index 5c6b4ebd..70fb1886 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/logstash/fields.go +++ b/vendor/github.com/elastic/beats/filebeat/module/logstash/fields.go @@ -32,5 +32,5 @@ func init() { // AssetLogstash returns asset data. // This is the base64 encoded gzipped contents of module/logstash. func AssetLogstash() string { - return "eJzslctu2zwQhfd+ioOsEz+AFtn8+AME6AVouxdoaUSxJjkCL3H09oUky5ZkKoWTNECBcmeOOec7oxnyDntqM2iWPghfb4CggqYMN+PWzQYoyRdONUGxzXC/AXA6gc9cRk0boFKkS5/10TtYYWiWt1uhbSiDdByb404i8zzTIttp7wz6aQJ6il0IrYoN66GXROXYINSEMWnvYDv565JtymfGUkzXgLKn9sCuXMReAOrWj5qOOcEOhRbe41CTox6RnsgGsFNSWRFom0QKtSOxlH0D0qOt2BnRhSF2HEOP4qK1ysqj2oRRs1wjBEzUQeWpgs4c0HO4CI4eLoKTVsn7AiWt8+4nFcvQb5zvqYWwJZ6EjoSSdlHKzrM6V+RsMN0e5L2Q6f4QWgm/iDQi1GunjJJODKjBRUrXgJ5IX6mmWW5T59b0Ri2v+bAYz2tH8DLFv1n7K2Ztfc5eZfubOKCMpgFXva+jJ53U+ZO+Gh2lsnn34/3cfRGGRmeDwEvandA79njbLLQzPNomBn+LB6UDOX+LrzF0O90U/MclFX6l2Zn3ubK5UVqr5V0yMGq28jrA/5+piH2/B2UIFbsJK5TFoEYF23KF61i4Rjhh0livKt334LqhGy7/WQlRsK2UjMP1+PHtOTjNk0/a2167u/u535lRRE8ldu2kEukP8hGvEBKNaYXldAOs6/Y3zLZc+ZYL8V8BAAD//6XJ7J4=" + return "eJzsVU1v2zAMvedXED23+QE+9LKtQIB9ANvuhmLTMhdJFPSRxP9+sB0ntiN3SNsVGDDdLFp875FP1APssMlAsfRB+HoFECgozOBu2LpbAZToC0c2EJsMHlcAcD4BX7iMClcAFaEqfdZFH8AIjZO87QqNxQyk42hPO4nM00yzbOe9C9HPI6Ln2BXQIli/njpIqBxrCDXCkLRTsB79Ouc25qeHUoxXT2WHzYFdOYs9Q6hdP2s85QR2UCjhPRxqdNhRxD2aAOxIkhEB10lKoXYo5rCvoLQxFTst2jCILcfQUXHRGDLyhDbiqFguMQTQUQXKUwWdKMBjuAoOGq6CI6vkXYGS0nn7C4t56A/Kd9iAMCXshYoIJW6jlK1mulQk3QJLFhUZzOmWPuBRaNv6Wwsyt9tm8xG46lowwF/Ipb2L3guZNq9QJPwsYkWol05pkk707IKLmG4Q7lHdiKZYrlPnlvAGLK/4MJsdt86H6xT/B8E/MQiWh8CLZH8XByijtsPtOmlSSZy/qcuqKMnk7cfbqfsqNJ7nRgfwHHYL9IYeb+wMO4ONsTH4e3giFdD5e/gWQ7vT3oIPXGLhF8zOvMvJ5JqUovks6TkqNvI2gp+OWMTO74E0QsVuxBXIQI+GBZtygdepcFY4odO0XlS6H8G1l65/mSYlhIJNRTL24/H97dkrzZPv7eue4ofHqd6JUIgeS9g2o0qkG/IerxAkjGmE4bQBlnG7CbMuF3o5A/8dAAD//7rhE3k=" } diff --git a/vendor/github.com/elastic/beats/filebeat/module/logstash/log/_meta/fields.yml b/vendor/github.com/elastic/beats/filebeat/module/logstash/log/_meta/fields.yml index 5d8fc352..6ca12ca1 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/logstash/log/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/logstash/log/_meta/fields.yml @@ -19,6 +19,11 @@ type: object description: > key and value debugging information. + - name: pipeline_id + type: keyword + example: main + description: > + The ID of the pipeline. - name: message type: alias diff --git a/vendor/github.com/elastic/beats/filebeat/module/logstash/log/config/log.yml b/vendor/github.com/elastic/beats/filebeat/module/logstash/log/config/log.yml index 8d2e78e7..d90907fb 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/logstash/log/config/log.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/logstash/log/config/log.yml @@ -13,4 +13,5 @@ multiline: {{ end }} processors: -- add_locale: ~ +# Locale for timezone is only needed in non-json logs +- add_locale.when.not.regexp.message: "^{" diff --git a/vendor/github.com/elastic/beats/filebeat/module/logstash/log/ingest/pipeline-plain.json b/vendor/github.com/elastic/beats/filebeat/module/logstash/log/ingest/pipeline-plain.json index c2c54715..0969a6d7 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/logstash/log/ingest/pipeline-plain.json +++ b/vendor/github.com/elastic/beats/filebeat/module/logstash/log/ingest/pipeline-plain.json @@ -18,6 +18,7 @@ "GREEDYMULTILINE" : "(.|\n)*" }, "patterns": [ + "\\[%{TIMESTAMP_ISO8601:logstash.log.timestamp}\\]\\[%{LOGSTASH_LOGLEVEL:log.level}\\s?\\]\\[%{LOGSTASH_CLASS_MODULE:logstash.log.module}\\s*\\]\\[%{WORD:logstash.log.pipeline_id}\\] %{GREEDYMULTILINE:message}", "\\[%{TIMESTAMP_ISO8601:logstash.log.timestamp}\\]\\[%{LOGSTASH_LOGLEVEL:log.level}\\s?\\]\\[%{LOGSTASH_CLASS_MODULE:logstash.log.module}\\s*\\] %{GREEDYMULTILINE:message}" ] } @@ -30,19 +31,19 @@ }, { "date": { + "if": "ctx.event.timezone == null", "field": "logstash.log.timestamp", "target_field": "@timestamp", - "formats": [ - "ISO8601" - ], - "ignore_failure": true + "formats": ["yyyy-MM-dd'T'HH:mm:ss,SSS"], + "on_failure": [{"append": {"field": "error.message", "value": "{{ _ingest.on_failure_message }}"}}] } }, { "date": { "if": "ctx.event.timezone != null", - "field": "@timestamp", - "formats": ["ISO8601"], + "field": "logstash.log.timestamp", + "target_field": "@timestamp", + "formats": ["yyyy-MM-dd'T'HH:mm:ss,SSS"], "timezone": "{{ event.timezone }}", "on_failure": [{"append": {"field": "error.message", "value": "{{ _ingest.on_failure_message }}"}}] } diff --git a/vendor/github.com/elastic/beats/filebeat/module/logstash/log/test/logstash-json.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/logstash/log/test/logstash-json.log-expected.json index cac7a78e..5470cb17 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/logstash/log/test/logstash-json.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/logstash/log/test/logstash-json.log-expected.json @@ -3,7 +3,6 @@ "@timestamp": "2019-01-07T21:25:21.871Z", "event.dataset": "logstash.log", "event.module": "logstash", - "event.timezone": "+00:00", "fileset.name": "log", "input.type": "log", "log.level": "INFO", @@ -30,7 +29,6 @@ "@timestamp": "2019-01-07T21:25:22.538Z", "event.dataset": "logstash.log", "event.module": "logstash", - "event.timezone": "+00:00", "fileset.name": "log", "input.type": "log", "log.level": "INFO", @@ -46,7 +44,6 @@ "@timestamp": "2019-01-07T21:25:22.594Z", "event.dataset": "logstash.log", "event.module": "logstash", - "event.timezone": "+00:00", "fileset.name": "log", "input.type": "log", "log.level": "INFO", diff --git a/vendor/github.com/elastic/beats/filebeat/module/logstash/log/test/logstash-plain-7.4.log b/vendor/github.com/elastic/beats/filebeat/module/logstash/log/test/logstash-plain-7.4.log new file mode 100644 index 00000000..b0514986 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/logstash/log/test/logstash-plain-7.4.log @@ -0,0 +1 @@ +[2019-11-20T19:04:48,468][WARN ][org.logstash.dissect.Dissector][the_pipeline_id] Dissector mapping, pattern not found {"field"=>"message", "pattern"=>"%{LogLineTimeStamp->}\t%{Healthy}\t%{Fatals}\t%{Errors}\t%{Warnings}\t%{TimeToBuildPatternsCache}\t%{CachedPatternsCount}\t%{MessagesEnqueued}\t%{DropMsgNoSubscribers}\t%{MessagesEnqueued}\t%{TotalDests}\t%{CycleProcTime}\t%{TimeSinceNap}\t%{QUtilPermilAvg}\t%{QUtilPermilMax}\t%{QUtilPermilCount}\t%{NotifierRequests}\t%{NotifierProcessedRequests}\t%{NotifierRequestsChangeDynamicSubs}\t%{NotifierSentRequestsChangeExtDynamicSubs}\t%{NotifierProcessedRequestsDropped}\t%{NotifierBadTargets}\t%{NotifierCycleTimeNetAvg}\t%{NotifierCycleTimeNetCount}\t%{NotifierUtilAvg->}", "event"=>{"fields"=>{"pipeline"=>"mypipeline", "indexprefix"=>"idx", "regid"=>"w", "env"=>"production"}, "beat"=>{"version"=>"6.8.3", "hostname"=>"myhostname", "name"=>"myname"}, "message"=>"msg", "tags"=>["production", "beats_input_codec_plain_applied"], "host"=>{"name"=>"myhostname"}}} diff --git a/vendor/github.com/elastic/beats/filebeat/module/logstash/log/test/logstash-plain-7.4.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/logstash/log/test/logstash-plain-7.4.log-expected.json new file mode 100644 index 00000000..e299008a --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/logstash/log/test/logstash-plain-7.4.log-expected.json @@ -0,0 +1,16 @@ +[ + { + "@timestamp": "2019-11-20T19:04:48.468-02:00", + "event.dataset": "logstash.log", + "event.module": "logstash", + "event.timezone": "-02:00", + "fileset.name": "log", + "input.type": "log", + "log.level": "WARN", + "log.offset": 0, + "logstash.log.module": "org.logstash.dissect.Dissector", + "logstash.log.pipeline_id": "the_pipeline_id", + "message": "Dissector mapping, pattern not found {\"field\"=>\"message\", \"pattern\"=>\"%{LogLineTimeStamp->}\\t%{Healthy}\\t%{Fatals}\\t%{Errors}\\t%{Warnings}\\t%{TimeToBuildPatternsCache}\\t%{CachedPatternsCount}\\t%{MessagesEnqueued}\\t%{DropMsgNoSubscribers}\\t%{MessagesEnqueued}\\t%{TotalDests}\\t%{CycleProcTime}\\t%{TimeSinceNap}\\t%{QUtilPermilAvg}\\t%{QUtilPermilMax}\\t%{QUtilPermilCount}\\t%{NotifierRequests}\\t%{NotifierProcessedRequests}\\t%{NotifierRequestsChangeDynamicSubs}\\t%{NotifierSentRequestsChangeExtDynamicSubs}\\t%{NotifierProcessedRequestsDropped}\\t%{NotifierBadTargets}\\t%{NotifierCycleTimeNetAvg}\\t%{NotifierCycleTimeNetCount}\\t%{NotifierUtilAvg->}\", \"event\"=>{\"fields\"=>{\"pipeline\"=>\"mypipeline\", \"indexprefix\"=>\"idx\", \"regid\"=>\"w\", \"env\"=>\"production\"}, \"beat\"=>{\"version\"=>\"6.8.3\", \"hostname\"=>\"myhostname\", \"name\"=>\"myname\"}, \"message\"=>\"msg\", \"tags\"=>[\"production\", \"beats_input_codec_plain_applied\"], \"host\"=>{\"name\"=>\"myhostname\"}}}", + "service.type": "logstash" + } +] \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/logstash/log/test/logstash-plain.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/logstash/log/test/logstash-plain.log-expected.json index b74a1242..31907ba6 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/logstash/log/test/logstash-plain.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/logstash/log/test/logstash-plain.log-expected.json @@ -1,9 +1,9 @@ [ { - "@timestamp": "2017-10-23T14:20:12.046Z", + "@timestamp": "2017-10-23T14:20:12.046-02:00", "event.dataset": "logstash.log", "event.module": "logstash", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "log.level": "INFO", @@ -13,10 +13,10 @@ "service.type": "logstash" }, { - "@timestamp": "2017-11-20T03:55:00.318Z", + "@timestamp": "2017-11-20T03:55:00.318-02:00", "event.dataset": "logstash.log", "event.module": "logstash", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "log", "input.type": "log", "log.flags": [ diff --git a/vendor/github.com/elastic/beats/filebeat/module/logstash/slowlog/config/slowlog.yml b/vendor/github.com/elastic/beats/filebeat/module/logstash/slowlog/config/slowlog.yml index d96242ac..e8c035e3 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/logstash/slowlog/config/slowlog.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/logstash/slowlog/config/slowlog.yml @@ -6,4 +6,5 @@ paths: exclude_files: [".gz$"] processors: -- add_locale: ~ +# Locale for timezone is only needed in non-json logs +- add_locale.when.not.regexp.message: "^{" diff --git a/vendor/github.com/elastic/beats/filebeat/module/logstash/slowlog/ingest/pipeline-plain.json b/vendor/github.com/elastic/beats/filebeat/module/logstash/slowlog/ingest/pipeline-plain.json index d711dc3d..6e4bd906 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/logstash/slowlog/ingest/pipeline-plain.json +++ b/vendor/github.com/elastic/beats/filebeat/module/logstash/slowlog/ingest/pipeline-plain.json @@ -45,19 +45,19 @@ }, { "date": { + "if": "ctx.event.timezone == null", "field": "logstash.slowlog.timestamp", "target_field": "@timestamp", - "formats": [ - "ISO8601" - ], - "ignore_failure": true + "formats": ["yyyy-MM-dd'T'HH:mm:ss,SSS"], + "on_failure": [{"append": {"field": "error.message", "value": "{{ _ingest.on_failure_message }}"}}] } }, { "date": { "if": "ctx.event.timezone != null", - "field": "@timestamp", - "formats": ["ISO8601"], + "field": "logstash.slowlog.timestamp", + "target_field": "@timestamp", + "formats": ["yyyy-MM-dd'T'HH:mm:ss,SSS"], "timezone": "{{ event.timezone }}", "on_failure": [{"append": {"field": "error.message", "value": "{{ _ingest.on_failure_message }}"}}] } diff --git a/vendor/github.com/elastic/beats/filebeat/module/logstash/slowlog/test/slowlog-json.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/logstash/slowlog/test/slowlog-json.log-expected.json index 8b5870eb..865f8389 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/logstash/slowlog/test/slowlog-json.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/logstash/slowlog/test/slowlog-json.log-expected.json @@ -4,7 +4,6 @@ "event.dataset": "logstash.slowlog", "event.duration": 5026401704, "event.module": "logstash", - "event.timezone": "+00:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "INFO", diff --git a/vendor/github.com/elastic/beats/filebeat/module/logstash/slowlog/test/slowlog-plain.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/logstash/slowlog/test/slowlog-plain.log-expected.json index cc16cdee..1aeac562 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/logstash/slowlog/test/slowlog-plain.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/logstash/slowlog/test/slowlog-plain.log-expected.json @@ -1,10 +1,10 @@ [ { - "@timestamp": "2017-10-30T09:57:58.243Z", + "@timestamp": "2017-10-30T09:57:58.243-02:00", "event.dataset": "logstash.slowlog", "event.duration": 3027675106, "event.module": "logstash", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "slowlog", "input.type": "log", "log.level": "WARN", diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/error/config/error.yml b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/config/error.yml index 0afd1731..6fbeec1b 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/mysql/error/config/error.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/config/error.yml @@ -4,3 +4,12 @@ paths: - {{$path}} {{ end }} exclude_files: [".gz$"] + +multiline: + # Consider lines without timestamp part of the previous message + pattern: '^([0-9]{4}-[0-9]{2}-[0-9]{2}|[0-9]{6})' + negate: true + match: after + +processors: +- add_locale: ~ diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/error/ingest/pipeline.json b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/ingest/pipeline.json index 06dac9ce..fed1e138 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/mysql/error/ingest/pipeline.json +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/ingest/pipeline.json @@ -4,33 +4,55 @@ "grok": { "field": "message", "patterns": [ - "%{LOCALDATETIME:mysql.error.timestamp} (\\[%{DATA:log.level}\\] )?%{GREEDYDATA:message}", - "%{DATA:mysql.error.timestamp} %{NUMBER:mysql.thread_id:long} \\[%{DATA:log.level}\\] %{GREEDYDATA:message}", + "%{MYSQLDATETIME}%{SPACE}(%{NUMBER:mysql.thread_id:long}%{SPACE})?(\\[%{DATA:log.level}\\]%{SPACE})?%{GREEDYMULTILINE:message}", "%{GREEDYDATA:message}" ], "ignore_missing": true, "pattern_definitions": { - "LOCALDATETIME": "[0-9]+ %{TIME}" + "LOCALDATETIME": "(?:%{YEAR}-%{MONTHNUM}-%{MONTHDAY}|%{NUMBER})%{SPACE}%{TIME}", + "MYSQLDATETIME": "(?:%{LOCALDATETIME:_tmp.local_timestamp}|%{TIMESTAMP_ISO8601:_tmp.timestamp})", + "GREEDYMULTILINE": "(.|\n)+" } } }, { - "rename": { - "field": "@timestamp", - "target_field": "event.created" - } + "rename": { + "field": "@timestamp", + "target_field": "event.created" + } }, { "date": { - "field": "mysql.error.timestamp", - "target_field": "@timestamp", + "if": "ctx._tmp?.local_timestamp != null && ctx.event?.timezone == null", + "field": "_tmp.local_timestamp", "formats": [ - "ISO8601", - "yyMMdd H:m:s" - ], - "ignore_failure": true + "yyMMdd H:m:s", + "yyMMdd H:m:s", + "yyyy-MM-dd H:m:s", + "yyyy-MM-dd H:m:s" + ] + } + }, { + "date": { + "if": "ctx._tmp?.local_timestamp != null && ctx.event?.timezone != null", + "field": "_tmp.local_timestamp", + "timezone": "{{ event.timezone }}", + "formats": [ + "yyMMdd H:m:s", + "yyMMdd H:m:s", + "yyyy-MM-dd H:m:s", + "yyyy-MM-dd H:m:s" + ] + } + }, { + "date": { + "if": "ctx._tmp?.timestamp != null", + "field": "_tmp.timestamp", + "formats": [ + "ISO8601" + ] } }, { "remove":{ - "field": "mysql.error.timestamp", + "field": "_tmp", "ignore_missing": true } }], diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/error.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/error.log-expected.json index eebe6548..3b6c8a09 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/error.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/error.log-expected.json @@ -1,8 +1,9 @@ [ { - "@timestamp": "2016-12-09T13:08:33.000Z", + "@timestamp": "2016-12-09T13:08:33.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 0, @@ -13,6 +14,7 @@ "@timestamp": "2016-12-09T12:08:33.335Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Warning", @@ -25,6 +27,7 @@ "@timestamp": "2016-12-09T12:08:33.335Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Warning", @@ -37,6 +40,7 @@ "@timestamp": "2016-12-09T12:08:33.336Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -49,6 +53,7 @@ "@timestamp": "2016-12-09T12:08:33.345Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Warning", @@ -61,6 +66,7 @@ "@timestamp": "2016-12-09T12:08:33.351Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -73,27 +79,23 @@ "@timestamp": "2016-12-09T12:08:33.784Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", + "log.flags": [ + "multiline" + ], "log.level": "Note", "log.offset": 832, - "message": "/usr/local/Cellar/mysql/5.7.10/bin/mysqld: ready for connections.", + "message": "/usr/local/Cellar/mysql/5.7.10/bin/mysqld: ready for connections.\nVersion: '5.7.10' socket: '/tmp/mysql.sock' port: 3306 Homebrew", "mysql.thread_id": 0, "service.type": "mysql" }, - { - "event.dataset": "mysql.error", - "event.module": "mysql", - "fileset.name": "error", - "input.type": "log", - "log.offset": 935, - "message": "Version: '5.7.10' socket: '/tmp/mysql.sock' port: 3306 Homebrew", - "service.type": "mysql" - }, { "@timestamp": "2016-12-09T22:21:02.443Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -103,9 +105,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:50.000Z", + "@timestamp": "2016-12-09T14:18:50.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Warning", @@ -114,9 +117,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:50.000Z", + "@timestamp": "2016-12-09T14:18:50.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -125,9 +129,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:50.000Z", + "@timestamp": "2016-12-09T14:18:50.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 1422, diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mariadb-10.4.8.log b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mariadb-10.4.8.log new file mode 100644 index 00000000..96bcaba1 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mariadb-10.4.8.log @@ -0,0 +1,30 @@ +191015 9:46:45 13 Query SHOW /*!50002 GLOBAL */ STATUS +2019-10-16 17:24:15 0 [Note] InnoDB: Using Linux native AIO +2019-10-16 17:24:15 0 [Note] InnoDB: Mutexes and rw_locks use GCC atomic builtins +2019-10-16 17:24:15 0 [Note] InnoDB: Uses event mutexes +2019-10-16 17:24:15 0 [Note] InnoDB: Compressed tables use zlib 1.2.7 +2019-10-16 17:24:15 0 [Note] InnoDB: Number of pools: 1 +2019-10-16 17:24:15 0 [Note] InnoDB: Using SSE2 crc32 instructions +2019-10-16 17:24:15 0 [Note] InnoDB: Initializing buffer pool, total size = 128M, instances = 1, chunk size = 128M +2019-10-16 17:24:15 0 [Note] InnoDB: Completed initialization of buffer pool +2019-10-16 17:24:15 0 [Note] InnoDB: If the mysqld execution user is authorized, page cleaner thread priority can be changed. See the man page of setpriority(). +2019-10-16 17:24:15 0 [Note] InnoDB: 128 out of 128 rollback segments are active. +2019-10-16 17:24:15 0 [Note] InnoDB: Creating shared tablespace for temporary tables +2019-10-16 17:24:15 0 [Note] InnoDB: Setting file './ibtmp1' size to 12 MB. Physically writing the file full; Please wait ... +2019-10-16 17:24:15 0 [Note] InnoDB: File './ibtmp1' size is now 12 MB. +2019-10-16 17:24:15 0 [Note] InnoDB: Waiting for purge to start +2019-10-16 17:24:15 0 [Note] InnoDB: 10.4.8 started; log sequence number 1631101; transaction id 791 +2019-10-16 17:24:15 0 [Note] InnoDB: Loading buffer pool(s) from /data/mysqldata/mysql/ib_buffer_pool +2019-10-16 17:24:15 0 [Note] Plugin 'FEEDBACK' is disabled. +2019-10-16 17:24:15 0 [Note] InnoDB: Buffer pool(s) load completed at 191016 17:24:15 +2019-10-16 17:24:15 0 [Note] Server socket created on IP: '::'. +2019-10-16 17:24:15 0 [Note] Reading of all Master_info entries succeeded +2019-10-16 17:24:15 0 [Note] Added new Master_info '' to hash table +2019-10-16 17:24:15 0 [Note] /usr/sbin/mysqld: ready for connections. +Version: '10.4.8-MariaDB-log' socket: '/data/mysqldata/mysql.sock' port: 3306 MariaDB Server +2019-10-16 17:25:43 11 [Note] Event Scheduler: scheduler thread started with id 11 +2019-10-16 17:25:43 11 [Note] Event Scheduler: Last execution of test.test_error_log. Dropping. +2019-10-16 17:25:43 12 [Note] Event Scheduler: Dropping test.test_error_log +2019-10-16 17:25:43 12 [ERROR] Event Scheduler: [root@localhost][test.test_error_log] hi from the error log +2019-10-16 17:25:43 12 [Note] Event Scheduler: [root@localhost][test.test_error_log] At line 1 in test.test_error_log + diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mariadb-10.4.8.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mariadb-10.4.8.log-expected.json new file mode 100644 index 00000000..9722b9cf --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mariadb-10.4.8.log-expected.json @@ -0,0 +1,371 @@ +[ + { + "@timestamp": "2019-10-15T09:46:45.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.offset": 0, + "message": "Query\tSHOW /*!50002 GLOBAL */ STATUS", + "mysql.thread_id": 13, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:24:15.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 60, + "message": "InnoDB: Using Linux native AIO", + "mysql.thread_id": 0, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:24:15.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 120, + "message": "InnoDB: Mutexes and rw_locks use GCC atomic builtins", + "mysql.thread_id": 0, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:24:15.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 202, + "message": "InnoDB: Uses event mutexes", + "mysql.thread_id": 0, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:24:15.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 258, + "message": "InnoDB: Compressed tables use zlib 1.2.7", + "mysql.thread_id": 0, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:24:15.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 328, + "message": "InnoDB: Number of pools: 1", + "mysql.thread_id": 0, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:24:15.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 384, + "message": "InnoDB: Using SSE2 crc32 instructions", + "mysql.thread_id": 0, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:24:15.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 451, + "message": "InnoDB: Initializing buffer pool, total size = 128M, instances = 1, chunk size = 128M", + "mysql.thread_id": 0, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:24:15.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 566, + "message": "InnoDB: Completed initialization of buffer pool", + "mysql.thread_id": 0, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:24:15.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 643, + "message": "InnoDB: If the mysqld execution user is authorized, page cleaner thread priority can be changed. See the man page of setpriority().", + "mysql.thread_id": 0, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:24:15.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 804, + "message": "InnoDB: 128 out of 128 rollback segments are active.", + "mysql.thread_id": 0, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:24:15.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 886, + "message": "InnoDB: Creating shared tablespace for temporary tables", + "mysql.thread_id": 0, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:24:15.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 971, + "message": "InnoDB: Setting file './ibtmp1' size to 12 MB. Physically writing the file full; Please wait ...", + "mysql.thread_id": 0, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:24:15.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 1097, + "message": "InnoDB: File './ibtmp1' size is now 12 MB.", + "mysql.thread_id": 0, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:24:15.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 1169, + "message": "InnoDB: Waiting for purge to start", + "mysql.thread_id": 0, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:24:15.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 1233, + "message": "InnoDB: 10.4.8 started; log sequence number 1631101; transaction id 791", + "mysql.thread_id": 0, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:24:15.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 1334, + "message": "InnoDB: Loading buffer pool(s) from /data/mysqldata/mysql/ib_buffer_pool", + "mysql.thread_id": 0, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:24:15.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 1436, + "message": "Plugin 'FEEDBACK' is disabled.", + "mysql.thread_id": 0, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:24:15.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 1496, + "message": "InnoDB: Buffer pool(s) load completed at 191016 17:24:15", + "mysql.thread_id": 0, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:24:15.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 1582, + "message": "Server socket created on IP: '::'.", + "mysql.thread_id": 0, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:24:15.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 1646, + "message": "Reading of all Master_info entries succeeded", + "mysql.thread_id": 0, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:24:15.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 1720, + "message": "Added new Master_info '' to hash table", + "mysql.thread_id": 0, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:24:15.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.flags": [ + "multiline" + ], + "log.level": "Note", + "log.offset": 1788, + "message": "/usr/sbin/mysqld: ready for connections.\nVersion: '10.4.8-MariaDB-log' socket: '/data/mysqldata/mysql.sock' port: 3306 MariaDB Server", + "mysql.thread_id": 0, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:25:43.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 1954, + "message": "Event Scheduler: scheduler thread started with id 11", + "mysql.thread_id": 11, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:25:43.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 2037, + "message": "Event Scheduler: Last execution of test.test_error_log. Dropping.", + "mysql.thread_id": 11, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:25:43.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 2133, + "message": "Event Scheduler: Dropping test.test_error_log", + "mysql.thread_id": 12, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:25:43.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "ERROR", + "log.offset": 2209, + "message": "Event Scheduler: [root@localhost][test.test_error_log] hi from the error log", + "mysql.thread_id": 12, + "service.type": "mysql" + }, + { + "@timestamp": "2019-10-16T17:25:43.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.flags": [ + "multiline" + ], + "log.level": "Note", + "log.offset": 2317, + "message": "Event Scheduler: [root@localhost][test.test_error_log] At line 1 in test.test_error_log\n", + "mysql.thread_id": 12, + "service.type": "mysql" + } +] \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mysql-darwin-brew-5.7.10.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mysql-darwin-brew-5.7.10.log-expected.json index d16af366..1bec003f 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mysql-darwin-brew-5.7.10.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mysql-darwin-brew-5.7.10.log-expected.json @@ -1,8 +1,9 @@ [ { - "@timestamp": "2016-12-09T13:08:33.000Z", + "@timestamp": "2016-12-09T13:08:33.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 0, @@ -13,6 +14,7 @@ "@timestamp": "2016-12-09T12:08:33.335Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Warning", @@ -25,6 +27,7 @@ "@timestamp": "2016-12-09T12:08:33.335Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Warning", @@ -37,6 +40,7 @@ "@timestamp": "2016-12-09T12:08:33.336Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -49,6 +53,7 @@ "@timestamp": "2016-12-09T12:08:33.345Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Warning", @@ -61,6 +66,7 @@ "@timestamp": "2016-12-09T12:08:33.351Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -73,6 +79,7 @@ "@timestamp": "2016-12-09T12:08:33.351Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -85,6 +92,7 @@ "@timestamp": "2016-12-09T12:08:33.351Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -97,6 +105,7 @@ "@timestamp": "2016-12-09T12:08:33.351Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -109,6 +118,7 @@ "@timestamp": "2016-12-09T12:08:33.352Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -121,6 +131,7 @@ "@timestamp": "2016-12-09T12:08:33.354Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -133,6 +144,7 @@ "@timestamp": "2016-12-09T12:08:33.366Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -145,6 +157,7 @@ "@timestamp": "2016-12-09T12:08:33.379Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -157,6 +170,7 @@ "@timestamp": "2016-12-09T12:08:33.401Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -169,6 +183,7 @@ "@timestamp": "2016-12-09T12:08:33.402Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -181,6 +196,7 @@ "@timestamp": "2016-12-09T12:08:33.402Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -193,6 +209,7 @@ "@timestamp": "2016-12-09T12:08:33.402Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -205,6 +222,7 @@ "@timestamp": "2016-12-09T12:08:33.402Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -217,6 +235,7 @@ "@timestamp": "2016-12-09T12:08:33.402Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -229,6 +248,7 @@ "@timestamp": "2016-12-09T12:08:33.549Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -241,6 +261,7 @@ "@timestamp": "2016-12-09T12:08:33.549Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -253,6 +274,7 @@ "@timestamp": "2016-12-09T12:08:33.549Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -265,6 +287,7 @@ "@timestamp": "2016-12-09T12:08:33.585Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -277,6 +300,7 @@ "@timestamp": "2016-12-09T12:08:33.588Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -289,6 +313,7 @@ "@timestamp": "2016-12-09T12:08:33.588Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -301,6 +326,7 @@ "@timestamp": "2016-12-09T12:08:33.588Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -313,6 +339,7 @@ "@timestamp": "2016-12-09T12:08:33.641Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -325,6 +352,7 @@ "@timestamp": "2016-12-09T12:08:33.642Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -337,6 +365,7 @@ "@timestamp": "2016-12-09T12:08:33.642Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -349,6 +378,7 @@ "@timestamp": "2016-12-09T12:08:33.643Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -361,6 +391,7 @@ "@timestamp": "2016-12-09T12:08:33.652Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -373,6 +404,7 @@ "@timestamp": "2016-12-09T12:08:33.662Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -385,6 +417,7 @@ "@timestamp": "2016-12-09T12:08:33.662Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -397,6 +430,7 @@ "@timestamp": "2016-12-09T12:08:33.665Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Warning", @@ -409,6 +443,7 @@ "@timestamp": "2016-12-09T12:08:33.665Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -421,6 +456,7 @@ "@timestamp": "2016-12-09T12:08:33.698Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -433,6 +469,7 @@ "@timestamp": "2016-12-09T12:08:33.699Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -445,11 +482,12 @@ "@timestamp": "2016-12-09T12:08:33.699Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", "log.offset": 3682, - "message": " - '::' resolves to '::';", + "message": "- '::' resolves to '::';", "mysql.thread_id": 0, "service.type": "mysql" }, @@ -457,6 +495,7 @@ "@timestamp": "2016-12-09T12:08:33.699Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -469,6 +508,7 @@ "@timestamp": "2016-12-09T12:08:33.784Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -481,27 +521,23 @@ "@timestamp": "2016-12-09T12:08:33.784Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", + "log.flags": [ + "multiline" + ], "log.level": "Note", "log.offset": 3888, - "message": "/usr/local/Cellar/mysql/5.7.10/bin/mysqld: ready for connections.", + "message": "/usr/local/Cellar/mysql/5.7.10/bin/mysqld: ready for connections.\nVersion: '5.7.10' socket: '/tmp/mysql.sock' port: 3306 Homebrew", "mysql.thread_id": 0, "service.type": "mysql" }, - { - "event.dataset": "mysql.error", - "event.module": "mysql", - "fileset.name": "error", - "input.type": "log", - "log.offset": 3991, - "message": "Version: '5.7.10' socket: '/tmp/mysql.sock' port: 3306 Homebrew", - "service.type": "mysql" - }, { "@timestamp": "2016-12-09T22:21:02.443Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -514,6 +550,7 @@ "@timestamp": "2016-12-09T22:36:49.017Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -526,6 +563,7 @@ "@timestamp": "2016-12-09T23:37:34.021Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -538,6 +576,7 @@ "@timestamp": "2016-12-10T00:17:54.198Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -550,6 +589,7 @@ "@timestamp": "2016-12-10T01:18:38.017Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -562,6 +602,7 @@ "@timestamp": "2016-12-10T01:39:00.017Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -574,6 +615,7 @@ "@timestamp": "2016-12-10T02:39:45.021Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -586,6 +628,7 @@ "@timestamp": "2016-12-10T02:49:08.015Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -598,6 +641,7 @@ "@timestamp": "2016-12-10T03:24:15.016Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -610,6 +654,7 @@ "@timestamp": "2016-12-10T04:25:00.016Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -622,6 +667,7 @@ "@timestamp": "2016-12-10T04:34:24.021Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -634,6 +680,7 @@ "@timestamp": "2016-12-10T04:39:18.022Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -646,6 +693,7 @@ "@timestamp": "2016-12-10T05:40:03.016Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -658,6 +706,7 @@ "@timestamp": "2016-12-10T06:40:48.025Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -670,6 +719,7 @@ "@timestamp": "2016-12-10T06:45:55.018Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -682,6 +732,7 @@ "@timestamp": "2016-12-10T07:46:40.016Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -694,6 +745,7 @@ "@timestamp": "2016-12-10T07:56:04.016Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -706,6 +758,7 @@ "@timestamp": "2016-12-10T08:56:49.390Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -718,6 +771,7 @@ "@timestamp": "2016-12-10T09:06:11.019Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -730,6 +784,7 @@ "@timestamp": "2016-12-10T10:06:56.015Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -742,6 +797,7 @@ "@timestamp": "2016-12-10T10:16:18.022Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -754,6 +810,7 @@ "@timestamp": "2016-12-10T11:17:02.165Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -766,6 +823,7 @@ "@timestamp": "2016-12-10T11:30:44.018Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -778,6 +836,7 @@ "@timestamp": "2016-12-10T12:03:24.017Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -790,6 +849,7 @@ "@timestamp": "2016-12-10T12:06:40.015Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -802,6 +862,7 @@ "@timestamp": "2016-12-10T12:24:37.025Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -814,6 +875,7 @@ "@timestamp": "2016-12-10T13:25:22.017Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -826,6 +888,7 @@ "@timestamp": "2016-12-10T13:39:05.016Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -838,6 +901,7 @@ "@timestamp": "2016-12-10T14:39:50.178Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -850,6 +914,7 @@ "@timestamp": "2016-12-10T14:49:14.023Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -862,6 +927,7 @@ "@timestamp": "2016-12-10T15:49:59.022Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -874,6 +940,7 @@ "@timestamp": "2016-12-10T15:59:23.014Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -886,6 +953,7 @@ "@timestamp": "2016-12-10T17:00:08.019Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -898,6 +966,7 @@ "@timestamp": "2016-12-10T17:09:30.026Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -910,6 +979,7 @@ "@timestamp": "2016-12-10T17:48:20.017Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -922,6 +992,7 @@ "@timestamp": "2016-12-10T18:00:05.183Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -934,6 +1005,7 @@ "@timestamp": "2016-12-10T18:54:13.016Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -946,6 +1018,7 @@ "@timestamp": "2016-12-10T20:13:03.016Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -958,6 +1031,7 @@ "@timestamp": "2016-12-10T20:50:11.201Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -970,6 +1044,7 @@ "@timestamp": "2016-12-10T20:53:54.016Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -982,6 +1057,7 @@ "@timestamp": "2016-12-10T21:03:18.023Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -994,6 +1070,7 @@ "@timestamp": "2016-12-10T22:04:03.021Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -1006,6 +1083,7 @@ "@timestamp": "2016-12-10T22:13:57.015Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -1018,6 +1096,7 @@ "@timestamp": "2016-12-10T22:49:59.020Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -1030,6 +1109,7 @@ "@timestamp": "2016-12-10T23:12:12.023Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -1042,6 +1122,7 @@ "@timestamp": "2016-12-11T00:12:57.015Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -1054,6 +1135,7 @@ "@timestamp": "2016-12-11T00:26:41.053Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -1066,6 +1148,7 @@ "@timestamp": "2016-12-11T00:47:44.015Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -1078,6 +1161,7 @@ "@timestamp": "2016-12-11T00:49:50.017Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -1090,6 +1174,7 @@ "@timestamp": "2016-12-11T01:20:40.031Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -1102,6 +1187,7 @@ "@timestamp": "2016-12-11T02:21:24.021Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -1114,6 +1200,7 @@ "@timestamp": "2016-12-11T02:26:30.015Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -1126,6 +1213,7 @@ "@timestamp": "2016-12-11T03:18:55.018Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -1138,6 +1226,7 @@ "@timestamp": "2016-12-11T04:15:14.022Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -1150,6 +1239,7 @@ "@timestamp": "2016-12-11T04:20:52.016Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -1162,6 +1252,7 @@ "@timestamp": "2016-12-11T04:25:56.035Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -1174,6 +1265,7 @@ "@timestamp": "2016-12-11T05:26:41.020Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -1186,6 +1278,7 @@ "@timestamp": "2016-12-11T05:36:05.024Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -1193,5 +1286,18 @@ "message": "InnoDB: page_cleaner: 1000ms intended loop took 515624ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.)", "mysql.thread_id": 0, "service.type": "mysql" + }, + { + "@timestamp": "2016-12-11T06:36:50.017Z", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 14182, + "message": "InnoDB: page_cleaner: 1000ms intended loop took 3598619ms. The settings might not be optimal. (flushed=0 and evicted=0, during the time.)", + "mysql.thread_id": 0, + "service.type": "mysql" } ] \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mysql-ubuntu-5.5.53.log b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mysql-ubuntu-5.5.53.log index 1e9ed334..31aa8efd 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mysql-ubuntu-5.5.53.log +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mysql-ubuntu-5.5.53.log @@ -97,19 +97,6 @@ Version: '5.5.53-0ubuntu0.12.04.1' socket: '/var/run/mysqld/mysqld.sock' port: 161209 14:37:58 [Note] Event Scheduler: Loaded 0 events 161209 14:37:58 [Note] /usr/sbin/mysqld: ready for connections. Version: '5.5.53-0ubuntu0.12.04.1-log' socket: '/var/run/mysqld/mysqld.sock' port: 3306 (Ubuntu) -vagrant@precise32:~$ cat /var/log/mysql.log | grep phisically -vagrant@precise32:~$ cat /var/log/mysql.log | grep physi -vagrant@precise32:~$ cat /var/log/mysql.log | physically -physically: command not found -vagrant@precise32:~$ cat /var/log/mysql.log | grep physically -vagrant@precise32:~$ less /var/log/mysql. -mysql.err mysql.log -vagrant@precise32:~$ less /var/log/mysql.err -vagrant@precise32:~$ less /var/log/mysql.log -vagrant@precise32:~$ less /var/log/mysql/ -error.log mysql-slow.log -vagrant@precise32:~$ less /var/log/mysql/error.log -vagrant@precise32:~$ cat /var/log/mysql/error.log 161209 14:18:50 [Warning] Using unique option prefix myisam-recover instead of myisam-recover-options is deprecated and will be removed in a future release. Please use the full name instead. 161209 14:18:50 [Note] Plugin 'FEDERATED' is disabled. 161209 14:18:50 InnoDB: The InnoDB memory heap is disabled diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mysql-ubuntu-5.5.53.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mysql-ubuntu-5.5.53.log-expected.json index db1c9015..a02eb5f7 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mysql-ubuntu-5.5.53.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mysql-ubuntu-5.5.53.log-expected.json @@ -1,8 +1,9 @@ [ { - "@timestamp": "2016-12-09T14:18:50.000Z", + "@timestamp": "2016-12-09T14:18:50.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Warning", @@ -11,9 +12,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:50.000Z", + "@timestamp": "2016-12-09T14:18:50.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -22,9 +24,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:50.000Z", + "@timestamp": "2016-12-09T14:18:50.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 246, @@ -32,9 +35,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:50.000Z", + "@timestamp": "2016-12-09T14:18:50.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 305, @@ -42,9 +46,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:50.000Z", + "@timestamp": "2016-12-09T14:18:50.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 374, @@ -52,9 +57,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:50.000Z", + "@timestamp": "2016-12-09T14:18:50.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 433, @@ -62,167 +68,77 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:50.000Z", + "@timestamp": "2016-12-09T14:18:50.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", + "log.flags": [ + "multiline" + ], "log.offset": 497, - "message": "InnoDB: Completed initialization of buffer pool", + "message": "InnoDB: Completed initialization of buffer pool\nInnoDB: The first specified data file ./ibdata1 did not exist:\nInnoDB: a new database to be created!", "service.type": "mysql" }, { + "@timestamp": "2016-12-09T14:18:50.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", - "log.offset": 561, - "message": "InnoDB: The first specified data file ./ibdata1 did not exist:", - "service.type": "mysql" - }, - { - "event.dataset": "mysql.error", - "event.module": "mysql", - "fileset.name": "error", - "input.type": "log", - "log.offset": 624, - "message": "InnoDB: a new database to be created!", - "service.type": "mysql" - }, - { - "@timestamp": "2016-12-09T14:18:50.000Z", - "event.dataset": "mysql.error", - "event.module": "mysql", - "fileset.name": "error", - "input.type": "log", + "log.flags": [ + "multiline" + ], "log.offset": 662, - "message": " InnoDB: Setting file ./ibdata1 size to 10 MB", + "message": "InnoDB: Setting file ./ibdata1 size to 10 MB\nInnoDB: Database physically writes the file full: wait...", "service.type": "mysql" }, { + "@timestamp": "2016-12-09T14:18:50.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", - "log.offset": 724, - "message": "InnoDB: Database physically writes the file full: wait...", - "service.type": "mysql" - }, - { - "@timestamp": "2016-12-09T14:18:50.000Z", - "event.dataset": "mysql.error", - "event.module": "mysql", - "fileset.name": "error", - "input.type": "log", + "log.flags": [ + "multiline" + ], "log.offset": 782, - "message": " InnoDB: Log file ./ib_logfile0 did not exist: new to be created", + "message": "InnoDB: Log file ./ib_logfile0 did not exist: new to be created\nInnoDB: Setting log file ./ib_logfile0 size to 5 MB\nInnoDB: Database physically writes the file full: wait...", "service.type": "mysql" }, { + "@timestamp": "2016-12-09T14:18:50.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", - "log.offset": 863, - "message": "InnoDB: Setting log file ./ib_logfile0 size to 5 MB", - "service.type": "mysql" - }, - { - "event.dataset": "mysql.error", - "event.module": "mysql", - "fileset.name": "error", - "input.type": "log", - "log.offset": 915, - "message": "InnoDB: Database physically writes the file full: wait...", - "service.type": "mysql" - }, - { - "@timestamp": "2016-12-09T14:18:50.000Z", - "event.dataset": "mysql.error", - "event.module": "mysql", - "fileset.name": "error", - "input.type": "log", + "log.flags": [ + "multiline" + ], "log.offset": 973, - "message": " InnoDB: Log file ./ib_logfile1 did not exist: new to be created", + "message": "InnoDB: Log file ./ib_logfile1 did not exist: new to be created\nInnoDB: Setting log file ./ib_logfile1 size to 5 MB\nInnoDB: Database physically writes the file full: wait...\nInnoDB: Doublewrite buffer not found: creating new\nInnoDB: Doublewrite buffer created\nInnoDB: 127 rollback segment(s) active.\nInnoDB: Creating foreign key constraint system tables\nInnoDB: Foreign key constraint system tables created", "service.type": "mysql" }, { + "@timestamp": "2016-12-09T14:18:50.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", - "fileset.name": "error", - "input.type": "log", - "log.offset": 1054, - "message": "InnoDB: Setting log file ./ib_logfile1 size to 5 MB", - "service.type": "mysql" - }, - { - "event.dataset": "mysql.error", - "event.module": "mysql", - "fileset.name": "error", - "input.type": "log", - "log.offset": 1106, - "message": "InnoDB: Database physically writes the file full: wait...", - "service.type": "mysql" - }, - { - "event.dataset": "mysql.error", - "event.module": "mysql", - "fileset.name": "error", - "input.type": "log", - "log.offset": 1164, - "message": "InnoDB: Doublewrite buffer not found: creating new", - "service.type": "mysql" - }, - { - "event.dataset": "mysql.error", - "event.module": "mysql", - "fileset.name": "error", - "input.type": "log", - "log.offset": 1215, - "message": "InnoDB: Doublewrite buffer created", - "service.type": "mysql" - }, - { - "event.dataset": "mysql.error", - "event.module": "mysql", - "fileset.name": "error", - "input.type": "log", - "log.offset": 1250, - "message": "InnoDB: 127 rollback segment(s) active.", - "service.type": "mysql" - }, - { - "event.dataset": "mysql.error", - "event.module": "mysql", - "fileset.name": "error", - "input.type": "log", - "log.offset": 1290, - "message": "InnoDB: Creating foreign key constraint system tables", - "service.type": "mysql" - }, - { - "event.dataset": "mysql.error", - "event.module": "mysql", - "fileset.name": "error", - "input.type": "log", - "log.offset": 1344, - "message": "InnoDB: Foreign key constraint system tables created", - "service.type": "mysql" - }, - { - "@timestamp": "2016-12-09T14:18:50.000Z", - "event.dataset": "mysql.error", - "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 1397, - "message": " InnoDB: Waiting for the background threads to start", + "message": "InnoDB: Waiting for the background threads to start", "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:51.000Z", + "@timestamp": "2016-12-09T14:18:51.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 1466, @@ -230,29 +146,32 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:51.000Z", + "@timestamp": "2016-12-09T14:18:51.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 1528, - "message": " InnoDB: Starting shutdown...", + "message": "InnoDB: Starting shutdown...", "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:52.000Z", + "@timestamp": "2016-12-09T14:18:52.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 1574, - "message": " InnoDB: Shutdown completed; log sequence number 1595675", + "message": "InnoDB: Shutdown completed; log sequence number 1595675", "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:52.000Z", + "@timestamp": "2016-12-09T14:18:52.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Warning", @@ -261,9 +180,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:52.000Z", + "@timestamp": "2016-12-09T14:18:52.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -272,9 +192,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:52.000Z", + "@timestamp": "2016-12-09T14:18:52.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 1893, @@ -282,9 +203,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:52.000Z", + "@timestamp": "2016-12-09T14:18:52.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 1952, @@ -292,9 +214,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:52.000Z", + "@timestamp": "2016-12-09T14:18:52.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 2021, @@ -302,9 +225,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:52.000Z", + "@timestamp": "2016-12-09T14:18:52.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 2080, @@ -312,9 +236,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:52.000Z", + "@timestamp": "2016-12-09T14:18:52.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 2144, @@ -322,9 +247,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:52.000Z", + "@timestamp": "2016-12-09T14:18:52.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 2208, @@ -332,80 +258,87 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:52.000Z", + "@timestamp": "2016-12-09T14:18:52.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 2276, - "message": " InnoDB: Waiting for the background threads to start", + "message": "InnoDB: Waiting for the background threads to start", "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:53.000Z", + "@timestamp": "2016-12-09T14:18:53.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", + "log.flags": [ + "multiline" + ], "log.offset": 2345, - "message": "InnoDB: 5.5.53 started; log sequence number 1595675", + "message": "InnoDB: 5.5.53 started; log sequence number 1595675\nERROR: 1064 You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'ALTER TABLE user ADD column Show_view_priv enum('N','Y') CHARACTER SET utf8 NOT ' at line 1", "service.type": "mysql" }, { + "@timestamp": "2016-12-09T14:18:53.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", - "log.offset": 2413, - "message": "ERROR: 1064 You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'ALTER TABLE user ADD column Show_view_priv enum('N','Y') CHARACTER SET utf8 NOT ' at line 1", - "service.type": "mysql" - }, - { - "@timestamp": "2016-12-09T14:18:53.000Z", - "event.dataset": "mysql.error", - "event.module": "mysql", - "fileset.name": "error", - "input.type": "log", + "log.flags": [ + "multiline" + ], "log.level": "ERROR", "log.offset": 2653, - "message": "Aborting", + "message": "Aborting\n", "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:53.000Z", + "@timestamp": "2016-12-09T14:18:53.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 2687, - "message": " InnoDB: Starting shutdown...", + "message": "InnoDB: Starting shutdown...", "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:53.000Z", + "@timestamp": "2016-12-09T14:18:53.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 2733, - "message": " InnoDB: Shutdown completed; log sequence number 1595675", + "message": "InnoDB: Shutdown completed; log sequence number 1595675", "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:53.000Z", + "@timestamp": "2016-12-09T14:18:53.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", + "log.flags": [ + "multiline" + ], "log.level": "Note", "log.offset": 2806, - "message": "/usr/sbin/mysqld: Shutdown complete", + "message": "/usr/sbin/mysqld: Shutdown complete\n", "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:53.000Z", + "@timestamp": "2016-12-09T14:18:53.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Warning", @@ -414,9 +347,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:53.000Z", + "@timestamp": "2016-12-09T14:18:53.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -425,9 +359,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:53.000Z", + "@timestamp": "2016-12-09T14:18:53.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 3112, @@ -435,9 +370,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:53.000Z", + "@timestamp": "2016-12-09T14:18:53.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 3171, @@ -445,9 +381,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:53.000Z", + "@timestamp": "2016-12-09T14:18:53.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 3240, @@ -455,9 +392,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:53.000Z", + "@timestamp": "2016-12-09T14:18:53.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 3299, @@ -465,9 +403,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:53.000Z", + "@timestamp": "2016-12-09T14:18:53.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 3363, @@ -475,9 +414,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:53.000Z", + "@timestamp": "2016-12-09T14:18:53.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 3427, @@ -485,19 +425,21 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:53.000Z", + "@timestamp": "2016-12-09T14:18:53.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 3495, - "message": " InnoDB: Waiting for the background threads to start", + "message": "InnoDB: Waiting for the background threads to start", "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:54.000Z", + "@timestamp": "2016-12-09T14:18:54.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 3564, @@ -505,61 +447,73 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:54.000Z", + "@timestamp": "2016-12-09T14:18:54.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 3632, - "message": " InnoDB: Starting shutdown...", + "message": "InnoDB: Starting shutdown...", "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:56.000Z", + "@timestamp": "2016-12-09T14:18:56.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", + "log.flags": [ + "multiline" + ], "log.level": "ERROR", "log.offset": 3678, - "message": "Aborting", + "message": "Aborting\n", "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:56.000Z", + "@timestamp": "2016-12-09T14:18:56.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 3712, - "message": " InnoDB: Starting shutdown...", + "message": "InnoDB: Starting shutdown...", "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:56.000Z", + "@timestamp": "2016-12-09T14:18:56.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 3758, - "message": " InnoDB: Shutdown completed; log sequence number 1595675", + "message": "InnoDB: Shutdown completed; log sequence number 1595675", "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:56.000Z", + "@timestamp": "2016-12-09T14:18:56.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", + "log.flags": [ + "multiline" + ], "log.level": "Note", "log.offset": 3831, - "message": "/usr/sbin/mysqld: Shutdown complete", + "message": "/usr/sbin/mysqld: Shutdown complete\n", "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:56.000Z", + "@timestamp": "2016-12-09T14:18:56.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Warning", @@ -568,9 +522,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:56.000Z", + "@timestamp": "2016-12-09T14:18:56.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -579,9 +534,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:56.000Z", + "@timestamp": "2016-12-09T14:18:56.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 4137, @@ -589,9 +545,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:56.000Z", + "@timestamp": "2016-12-09T14:18:56.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 4196, @@ -599,9 +556,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:56.000Z", + "@timestamp": "2016-12-09T14:18:56.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 4265, @@ -609,9 +567,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:56.000Z", + "@timestamp": "2016-12-09T14:18:56.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 4324, @@ -619,9 +578,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:56.000Z", + "@timestamp": "2016-12-09T14:18:56.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 4388, @@ -629,9 +589,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:57.000Z", + "@timestamp": "2016-12-09T14:18:57.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 4452, @@ -639,19 +600,21 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:57.000Z", + "@timestamp": "2016-12-09T14:18:57.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 4520, - "message": " InnoDB: Waiting for the background threads to start", + "message": "InnoDB: Waiting for the background threads to start", "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:58.000Z", + "@timestamp": "2016-12-09T14:18:58.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 4589, @@ -659,9 +622,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:58.000Z", + "@timestamp": "2016-12-09T14:18:58.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -670,20 +634,22 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:58.000Z", + "@timestamp": "2016-12-09T14:18:58.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", "log.offset": 4736, - "message": " - '127.0.0.1' resolves to '127.0.0.1';", + "message": "- '127.0.0.1' resolves to '127.0.0.1';", "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:58.000Z", + "@timestamp": "2016-12-09T14:18:58.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -692,9 +658,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:58.000Z", + "@timestamp": "2016-12-09T14:18:58.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -703,40 +670,40 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:18:58.000Z", + "@timestamp": "2016-12-09T14:18:58.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", + "log.flags": [ + "multiline" + ], "log.level": "Note", "log.offset": 4921, - "message": "/usr/sbin/mysqld: ready for connections.", + "message": "/usr/sbin/mysqld: ready for connections.\nVersion: '5.5.53-0ubuntu0.12.04.1' socket: '/var/run/mysqld/mysqld.sock' port: 3306 (Ubuntu)", "service.type": "mysql" }, { + "@timestamp": "2016-12-09T14:37:57.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", - "log.offset": 4985, - "message": "Version: '5.5.53-0ubuntu0.12.04.1' socket: '/var/run/mysqld/mysqld.sock' port: 3306 (Ubuntu)", - "service.type": "mysql" - }, - { - "@timestamp": "2016-12-09T14:37:57.000Z", - "event.dataset": "mysql.error", - "event.module": "mysql", - "fileset.name": "error", - "input.type": "log", + "log.flags": [ + "multiline" + ], "log.level": "Note", "log.offset": 5081, - "message": "/usr/sbin/mysqld: Normal shutdown", + "message": "/usr/sbin/mysqld: Normal shutdown\n", "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:37:57.000Z", + "@timestamp": "2016-12-09T14:37:57.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -745,40 +712,47 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:37:57.000Z", + "@timestamp": "2016-12-09T14:37:57.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 5207, - "message": " InnoDB: Starting shutdown...", + "message": "InnoDB: Starting shutdown...", "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:37:57.000Z", + "@timestamp": "2016-12-09T14:37:57.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 5253, - "message": " InnoDB: Shutdown completed; log sequence number 1595685", + "message": "InnoDB: Shutdown completed; log sequence number 1595685", "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:37:57.000Z", + "@timestamp": "2016-12-09T14:37:57.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", + "log.flags": [ + "multiline" + ], "log.level": "Note", "log.offset": 5326, - "message": "/usr/sbin/mysqld: Shutdown complete", + "message": "/usr/sbin/mysqld: Shutdown complete\n", "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:37:57.000Z", + "@timestamp": "2016-12-09T14:37:57.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Warning", @@ -787,9 +761,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:37:57.000Z", + "@timestamp": "2016-12-09T14:37:57.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -798,9 +773,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:37:57.000Z", + "@timestamp": "2016-12-09T14:37:57.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 5632, @@ -808,9 +784,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:37:57.000Z", + "@timestamp": "2016-12-09T14:37:57.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 5691, @@ -818,9 +795,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:37:57.000Z", + "@timestamp": "2016-12-09T14:37:57.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 5760, @@ -828,9 +806,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:37:57.000Z", + "@timestamp": "2016-12-09T14:37:57.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 5819, @@ -838,9 +817,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:37:57.000Z", + "@timestamp": "2016-12-09T14:37:57.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 5883, @@ -848,9 +828,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:37:57.000Z", + "@timestamp": "2016-12-09T14:37:57.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 5947, @@ -858,19 +839,21 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:37:57.000Z", + "@timestamp": "2016-12-09T14:37:57.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 6015, - "message": " InnoDB: Waiting for the background threads to start", + "message": "InnoDB: Waiting for the background threads to start", "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:37:58.000Z", + "@timestamp": "2016-12-09T14:37:58.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.offset": 6084, @@ -878,9 +861,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:37:58.000Z", + "@timestamp": "2016-12-09T14:37:58.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -889,20 +873,22 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:37:58.000Z", + "@timestamp": "2016-12-09T14:37:58.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", "log.offset": 6231, - "message": " - '127.0.0.1' resolves to '127.0.0.1';", + "message": "- '127.0.0.1' resolves to '127.0.0.1';", "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:37:58.000Z", + "@timestamp": "2016-12-09T14:37:58.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -911,9 +897,10 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:37:58.000Z", + "@timestamp": "2016-12-09T14:37:58.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", @@ -922,86 +909,276 @@ "service.type": "mysql" }, { - "@timestamp": "2016-12-09T14:37:58.000Z", + "@timestamp": "2016-12-09T14:37:58.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.flags": [ + "multiline" + ], + "log.level": "Note", + "log.offset": 6416, + "message": "/usr/sbin/mysqld: ready for connections.\nVersion: '5.5.53-0ubuntu0.12.04.1-log' socket: '/var/run/mysqld/mysqld.sock' port: 3306 (Ubuntu)", + "service.type": "mysql" + }, + { + "@timestamp": "2016-12-09T14:18:50.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Warning", + "log.offset": 6580, + "message": "Using unique option prefix myisam-recover instead of myisam-recover-options is deprecated and will be removed in a future release. Please use the full name instead.", + "service.type": "mysql" + }, + { + "@timestamp": "2016-12-09T14:18:50.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Note", - "log.offset": 6416, - "message": "/usr/sbin/mysqld: ready for connections.", + "log.offset": 6771, + "message": "Plugin 'FEDERATED' is disabled.", "service.type": "mysql" }, { + "@timestamp": "2016-12-09T14:18:50.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", - "log.offset": 6480, - "message": "Version: '5.5.53-0ubuntu0.12.04.1-log' socket: '/var/run/mysqld/mysqld.sock' port: 3306 (Ubuntu)", + "log.offset": 6826, + "message": "InnoDB: The InnoDB memory heap is disabled", "service.type": "mysql" }, { + "@timestamp": "2016-12-09T14:18:50.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", - "log.offset": 6580, - "message": "vagrant@precise32:~$ cat /var/log/mysql.log | grep phisically", + "log.offset": 6885, + "message": "InnoDB: Mutexes and rw_locks use GCC atomic builtins", "service.type": "mysql" }, { + "@timestamp": "2016-12-09T14:18:50.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", - "log.offset": 6642, - "message": "vagrant@precise32:~$ cat /var/log/mysql.log | grep physi", + "log.offset": 6954, + "message": "InnoDB: Compressed tables use zlib 1.2.3.4", "service.type": "mysql" }, { + "@timestamp": "2016-12-09T14:18:50.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", - "log.offset": 6699, - "message": "vagrant@precise32:~$ cat /var/log/mysql.log | physically", + "log.offset": 7013, + "message": "InnoDB: Initializing buffer pool, size = 128.0M", "service.type": "mysql" }, { + "@timestamp": "2016-12-09T14:18:50.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", - "log.offset": 6756, - "message": "physically: command not found", + "log.flags": [ + "multiline" + ], + "log.offset": 7077, + "message": "InnoDB: Completed initialization of buffer pool\nInnoDB: The first specified data file ./ibdata1 did not exist:\nInnoDB: a new database to be created!", "service.type": "mysql" }, { + "@timestamp": "2016-12-09T14:18:50.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", - "log.offset": 6786, - "message": "vagrant@precise32:~$ cat /var/log/mysql.log | grep physically", + "log.flags": [ + "multiline" + ], + "log.offset": 7242, + "message": "InnoDB: Setting file ./ibdata1 size to 10 MB\nInnoDB: Database physically writes the file full: wait...", "service.type": "mysql" }, { + "@timestamp": "2016-12-09T14:18:50.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", - "log.offset": 6848, - "message": "vagrant@precise32:~$ less /var/log/mysql.", + "log.flags": [ + "multiline" + ], + "log.offset": 7362, + "message": "InnoDB: Log file ./ib_logfile0 did not exist: new to be created\nInnoDB: Setting log file ./ib_logfile0 size to 5 MB\nInnoDB: Database physically writes the file full: wait...", "service.type": "mysql" }, { + "@timestamp": "2016-12-09T14:18:50.000-02:00", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", - "log.offset": 6890, - "message": "mysql.err mysql.log", + "log.flags": [ + "multiline" + ], + "log.offset": 7553, + "message": "InnoDB: Log file ./ib_logfile1 did not exist: new to be created\nInnoDB: Setting log file ./ib_logfile1 size to 5 MB\nInnoDB: Database physically writes the file full: wait...\nInnoDB: Doublewrite buffer not found: creating new\nInnoDB: Doublewrite buffer created\nInnoDB: 127 rollback segment(s) active.\nInnoDB: Creating foreign key constraint system tables\nInnoDB: Foreign key constraint system tables created", + "service.type": "mysql" + }, + { + "@timestamp": "2016-12-09T14:18:50.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.offset": 7977, + "message": "InnoDB: Waiting for the background threads to start", + "service.type": "mysql" + }, + { + "@timestamp": "2016-12-09T14:18:51.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.offset": 8046, + "message": "InnoDB: 5.5.53 started; log sequence number 0", + "service.type": "mysql" + }, + { + "@timestamp": "2016-12-09T14:18:51.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.offset": 8108, + "message": "InnoDB: Starting shutdown...", + "service.type": "mysql" + }, + { + "@timestamp": "2016-12-09T14:18:52.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.offset": 8154, + "message": "InnoDB: Shutdown completed; log sequence number 1595675", + "service.type": "mysql" + }, + { + "@timestamp": "2016-12-09T14:18:52.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Warning", + "log.offset": 8227, + "message": "Using unique option prefix myisam-recover instead of myisam-recover-options is deprecated and will be removed in a future release. Please use the full name instead.", + "service.type": "mysql" + }, + { + "@timestamp": "2016-12-09T14:18:52.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.level": "Note", + "log.offset": 8418, + "message": "Plugin 'FEDERATED' is disabled.", + "service.type": "mysql" + }, + { + "@timestamp": "2016-12-09T14:18:52.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.offset": 8473, + "message": "InnoDB: The InnoDB memory heap is disabled", + "service.type": "mysql" + }, + { + "@timestamp": "2016-12-09T14:18:52.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.offset": 8532, + "message": "InnoDB: Mutexes and rw_locks use GCC atomic builtins", + "service.type": "mysql" + }, + { + "@timestamp": "2016-12-09T14:18:52.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.offset": 8601, + "message": "InnoDB: Compressed tables use zlib 1.2.3.4", + "service.type": "mysql" + }, + { + "@timestamp": "2016-12-09T14:18:52.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.offset": 8660, + "message": "InnoDB: Initializing buffer pool, size = 128.0M", + "service.type": "mysql" + }, + { + "@timestamp": "2016-12-09T14:18:52.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.offset": 8724, + "message": "InnoDB: Completed initialization of buffer pool", + "service.type": "mysql" + }, + { + "@timestamp": "2016-12-09T14:18:52.000-02:00", + "event.dataset": "mysql.error", + "event.module": "mysql", + "event.timezone": "-02:00", + "fileset.name": "error", + "input.type": "log", + "log.offset": 8788, + "message": "InnoDB: highest supported file format is Barracuda.", "service.type": "mysql" } ] \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mysql-ubuntu-8.0.15.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mysql-ubuntu-8.0.15.log-expected.json index af36bbfc..148506b7 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mysql-ubuntu-8.0.15.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/mysql/error/test/mysql-ubuntu-8.0.15.log-expected.json @@ -3,6 +3,7 @@ "@timestamp": "2019-03-24T13:44:25.484Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "System", @@ -15,6 +16,7 @@ "@timestamp": "2019-03-24T13:44:27.924Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Warning", @@ -27,6 +29,7 @@ "@timestamp": "2019-03-24T13:44:29.065Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "System", @@ -39,6 +42,7 @@ "@timestamp": "2019-03-24T13:44:31.085Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "System", @@ -51,6 +55,7 @@ "@timestamp": "2019-03-24T13:44:31.533Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Warning", @@ -63,6 +68,7 @@ "@timestamp": "2019-03-24T13:44:31.534Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Warning", @@ -75,6 +81,7 @@ "@timestamp": "2019-03-24T13:44:31.555Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "System", @@ -87,6 +94,7 @@ "@timestamp": "2019-03-24T13:44:33.236Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "System", @@ -99,6 +107,7 @@ "@timestamp": "2019-03-24T13:44:34.072Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "System", @@ -111,6 +120,7 @@ "@timestamp": "2019-03-24T13:44:34.406Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "Warning", @@ -123,6 +133,7 @@ "@timestamp": "2019-03-24T13:44:34.420Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "System", @@ -135,6 +146,7 @@ "@timestamp": "2019-03-24T13:44:34.572Z", "event.dataset": "mysql.error", "event.module": "mysql", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "System", diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/access/test/access.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/test/access.log-expected.json index c1a305b6..d6d5dd05 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/nginx/access/test/access.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/test/access.log-expected.json @@ -3,7 +3,7 @@ "@timestamp": "2016-10-25T12:49:33.000Z", "event.dataset": "nginx.access", "event.module": "nginx", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "access", "http.request.method": "GET", "http.request.referrer": "-", @@ -38,7 +38,7 @@ "@timestamp": "2016-10-25T12:49:34.000Z", "event.dataset": "nginx.access", "event.module": "nginx", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "access", "http.request.method": "GET", "http.request.referrer": "http://localhost:8080/", @@ -73,7 +73,7 @@ "@timestamp": "2016-10-25T12:50:44.000Z", "event.dataset": "nginx.access", "event.module": "nginx", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "access", "http.request.method": "GET", "http.request.referrer": "-", @@ -108,7 +108,7 @@ "@timestamp": "2016-12-07T09:34:43.000Z", "event.dataset": "nginx.access", "event.module": "nginx", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "access", "http.request.method": "GET", "http.request.referrer": "-", @@ -143,7 +143,7 @@ "@timestamp": "2016-12-07T09:34:43.000Z", "event.dataset": "nginx.access", "event.module": "nginx", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "access", "http.request.method": "GET", "http.request.referrer": "http://localhost:8080/", @@ -178,7 +178,7 @@ "@timestamp": "2016-12-07T09:43:18.000Z", "event.dataset": "nginx.access", "event.module": "nginx", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "access", "http.request.method": "GET", "http.request.referrer": "-", @@ -213,7 +213,7 @@ "@timestamp": "2016-12-07T09:43:21.000Z", "event.dataset": "nginx.access", "event.module": "nginx", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "access", "http.request.method": "GET", "http.request.referrer": "-", @@ -248,7 +248,7 @@ "@timestamp": "2016-12-07T09:43:23.000Z", "event.dataset": "nginx.access", "event.module": "nginx", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "access", "http.request.method": "GET", "http.request.referrer": "-", @@ -283,7 +283,7 @@ "@timestamp": "2016-12-07T10:04:37.000Z", "event.dataset": "nginx.access", "event.module": "nginx", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "access", "http.request.method": "GET", "http.request.referrer": "-", @@ -312,7 +312,7 @@ "@timestamp": "2016-12-07T10:04:58.000Z", "event.dataset": "nginx.access", "event.module": "nginx", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "access", "http.request.method": "GET", "http.request.referrer": "-", @@ -341,7 +341,7 @@ "@timestamp": "2016-12-07T10:04:59.000Z", "event.dataset": "nginx.access", "event.module": "nginx", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "access", "http.request.method": "GET", "http.request.referrer": "-", @@ -370,7 +370,7 @@ "@timestamp": "2016-12-07T10:05:07.000Z", "event.dataset": "nginx.access", "event.module": "nginx", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "access", "http.request.method": "GET", "http.request.referrer": "-", diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/access/test/test.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/test/test.log-expected.json index 42463087..c14b385e 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/nginx/access/test/test.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/access/test/test.log-expected.json @@ -3,7 +3,7 @@ "@timestamp": "2016-12-07T10:05:07.000Z", "event.dataset": "nginx.access", "event.module": "nginx", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "access", "http.request.method": "GET", "http.request.referrer": "-", @@ -34,7 +34,7 @@ "@timestamp": "2017-05-29T19:02:48.000Z", "event.dataset": "nginx.access", "event.module": "nginx", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "access", "http.request.method": "GET", "http.request.referrer": "-", @@ -61,7 +61,7 @@ "@timestamp": "2016-12-07T10:05:07.000Z", "event.dataset": "nginx.access", "event.module": "nginx", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "access", "http.request.method": "GET", "http.request.referrer": "-", @@ -101,7 +101,7 @@ "@timestamp": "2016-12-07T10:05:07.000Z", "event.dataset": "nginx.access", "event.module": "nginx", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "access", "http.request.method": "GET", "http.request.referrer": "-", @@ -139,7 +139,7 @@ "@timestamp": "2016-01-22T13:18:29.000Z", "event.dataset": "nginx.access", "event.module": "nginx", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "access", "http.request.method": "GET", "http.request.referrer": "-", @@ -176,7 +176,7 @@ "@timestamp": "2016-12-30T06:47:09.000Z", "event.dataset": "nginx.access", "event.module": "nginx", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "access", "http.request.method": "GET", "http.request.referrer": "-", @@ -210,7 +210,7 @@ "@timestamp": "2018-04-12T07:48:40.000Z", "event.dataset": "nginx.access", "event.module": "nginx", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "access", "http.request.referrer": "-", "http.response.body.bytes": 0, @@ -232,7 +232,7 @@ "@timestamp": "2019-02-26T14:39:42.000Z", "event.dataset": "nginx.access", "event.module": "nginx", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "access", "http.request.referrer": "-", "http.response.body.bytes": 173, @@ -250,7 +250,7 @@ "@timestamp": "2017-05-29T19:02:48.000Z", "event.dataset": "nginx.access", "event.module": "nginx", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "access", "http.request.method": "GET", "http.request.referrer": "-", @@ -273,7 +273,7 @@ "@timestamp": "2017-05-29T19:02:48.000Z", "event.dataset": "nginx.access", "event.module": "nginx", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "access", "http.request.method": "GET", "http.request.referrer": "-", diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/error/ingest/pipeline.json b/vendor/github.com/elastic/beats/filebeat/module/nginx/error/ingest/pipeline.json index 4c89300a..c66ac042 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/nginx/error/ingest/pipeline.json +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/error/ingest/pipeline.json @@ -15,10 +15,11 @@ } }, { "date": { + "if": "ctx.event.timezone == null", "field": "nginx.error.time", "target_field": "@timestamp", "formats": ["yyyy/MM/dd H:m:s"], - "ignore_failure": true + "on_failure": [{"append": {"field": "error.message", "value": "{{ _ingest.on_failure_message }}"}}] } }, { "date": { diff --git a/vendor/github.com/elastic/beats/filebeat/module/nginx/error/test/error.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/nginx/error/test/error.log-expected.json index c0c43a1c..5b365ae3 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/nginx/error/test/error.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/nginx/error/test/error.log-expected.json @@ -1,9 +1,9 @@ [ { - "@timestamp": "2016-10-25T14:49:34.000Z", + "@timestamp": "2016-10-25T14:49:34.000-02:00", "event.dataset": "nginx.error", "event.module": "nginx", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "error", @@ -15,10 +15,10 @@ "service.type": "nginx" }, { - "@timestamp": "2016-10-25T14:50:44.000Z", + "@timestamp": "2016-10-25T14:50:44.000-02:00", "event.dataset": "nginx.error", "event.module": "nginx", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "error", "input.type": "log", "log.level": "error", diff --git a/vendor/github.com/elastic/beats/filebeat/module/postgresql/fields.go b/vendor/github.com/elastic/beats/filebeat/module/postgresql/fields.go index 0490b489..573e617d 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/postgresql/fields.go +++ b/vendor/github.com/elastic/beats/filebeat/module/postgresql/fields.go @@ -32,5 +32,5 @@ func init() { // AssetPostgresql returns asset data. // This is the base64 encoded gzipped contents of module/postgresql. func AssetPostgresql() string { - return "eJyck0FrGzEQhe/+FQ+f2kKWQg+FLfQSnFPSNk3uQV7NyqKSRhlpQ7e/vsiOsbPeNd7MccV73+eRfIU/1NeInLIRSs9uAWSbHdVY/tp9fLi/XS4ATakRG7PlUOP7AgDuWHeO0LIgKkk2GOQN4ZCDY4PWOkrVAkgblvzUcGitqZGlowXQWnI61du+KwTlaWBTJveRahjhLr5+GbHZzc22D62wH4hsHcocI4+xjs1R0SnzLPct+dwa9jPUOFbJ1lPKysc3pwUfhRqVSdf4Wn2pPp+cT+qVedzQofpgWvScDVSNujQs9GT1oGy3HsfBzFO4ZiEM2vYkrbJaq0SDEP1VPm5fpO/1enmGd0L7oTyB2/HmPfa5I+knmQ+r29X1Iz7h5vfPO3SJJH2b5XBf6pGyyuQp5MOSR2VIhKVqWA+3cNnCVyWOEodQ7iSQxrrfv8WED7aFCv3H8xLljfzjMK6gnFVpcBJV3tSgl/L7JsLeGlE7yde//gh3I6T01GObBkfhhlKq4knyEmq505nAEqlK+h04Ry/kZvIcm2osdwnPU0rKzL3L8dSQ9z8AAP//UU6bDw==" + return "eJyclEFv2zAMhe/5FQ85tUNrdNhhgDf0UrTAgHZb194DxaJtYbKoSnTX7NcPspsldZwsHk+GhcfvkRR1jp+0yuE5ShUoPtkZIEYs5Zh/738+3N/OZ4CmWATjxbDLcTkDgDvWrSWUHOBViMZVkJqw0cFyhdJYitkMiDUHWRTsSlPlkNDSDCgNWR3zLt85nGpo4CaFrDzlqAK3/vXPiJs+brp8KAM3AyOdhxTbyG2s5Wor0S7zIPct+VAb1jG0sW1FTENRVOPfnCa8D1QoIZ3jY/Yhu9g532svxWNNm9Qbp8meNY6yUS8FB1oYPUjWt8eyq6ZZuOJAGGRbk7QStVSRBiJ6UY3vbmSz0sv5NN5X1RC4HM+9Bj+1FFZ7qQ/Xt9dXj3iHmx/f7tBGCvHTRBf3CYAoSqghJ+ON7lwsotBw6hsrac1oIvthTUVKjV81ObTdstKLkNOkezJ8YOGCLU7YdT376/es2286w9I4DQ6gFypaodMDdaTv/XVoXkRpZHHRx/v/mWplnslBGOq1gH+XluFLCSMwEfPPrUse9eX8LJXkWOADxVTuDk9qE/uFTVJTOQ6kN1McbQOFwCErWA/bcNzqXCc5khyBpA2ONJar9asScWJKKLc6PWwibftvduMWlDUqDk68kjoHPadbukfcmCqo3uTrIz7CrQMpve/Z2A/2gQuKMfM7ymOoaTcnApMkG7mrx+AsPZOdyLNcZWO6Y3gNxaiqqbMcVw15fwIAAP//ui0d+Q==" } diff --git a/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/_meta/fields.yml b/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/_meta/fields.yml index 834c3859..4b4a5b48 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/_meta/fields.yml @@ -13,12 +13,21 @@ Core id - name: database example: "mydb" - description: + description: > Name of database - name: query example: "SELECT * FROM users;" - description: + description: > Query statement. + - name: query_step + example: "parse" + description: > + Statement step when using extended query protocol (one of statement, parse, bind or execute) + - name: query_name + example: "pdo_stmt_00000001" + description: > + Name given to a query when using extended query protocol. If it is "", or not present, + this field is ignored. - name: error.code type: long diff --git a/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/ingest/pipeline.json b/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/ingest/pipeline.json index f698fac4..1bed8277 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/ingest/pipeline.json +++ b/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/ingest/pipeline.json @@ -6,12 +6,13 @@ "field": "message", "ignore_missing": true, "patterns": [ - "^%{DATETIME:postgresql.log.timestamp} \\[%{NUMBER:process.pid:long}(-%{BASE16FLOAT:postgresql.log.core_id:long})?\\] ((\\[%{USERNAME:user.name}\\]@\\[%{POSTGRESQL_DB_NAME:postgresql.log.database}\\]|%{USERNAME:user.name}@%{POSTGRESQL_DB_NAME:postgresql.log.database}) )?%{WORD:log.level}: (?:%{NUMBER:postgresql.log.error.code:long}|%{SPACE})(duration: %{NUMBER:temp.duration:float} ms statement: %{GREEDYDATA:postgresql.log.query}|: %{GREEDYDATA:message}|%{GREEDYDATA:message})" + "^%{DATETIME:postgresql.log.timestamp} \\[%{NUMBER:process.pid:long}(-%{BASE16FLOAT:postgresql.log.core_id:long})?\\] ((\\[%{USERNAME:user.name}\\]@\\[%{POSTGRESQL_DB_NAME:postgresql.log.database}\\]|%{USERNAME:user.name}@%{POSTGRESQL_DB_NAME:postgresql.log.database}) )?%{WORD:log.level}: (?:%{NUMBER:postgresql.log.error.code:long}|%{SPACE})(duration: %{NUMBER:temp.duration:float} ms %{POSTGRESQL_QUERY_STEP}: %{GREEDYDATA:postgresql.log.query}|: %{GREEDYDATA:message}|%{GREEDYDATA:message})" ], "pattern_definitions": { "DATETIME": "[-0-9]+ %{TIME} %{WORD:event.timezone}", "GREEDYDATA": "(.|\n|\t)*", - "POSTGRESQL_DB_NAME": "[a-zA-Z0-9_]+[a-zA-Z0-9_\\$]*" + "POSTGRESQL_DB_NAME": "[a-zA-Z0-9_]+[a-zA-Z0-9_\\$]*", + "POSTGRESQL_QUERY_STEP": "%{WORD:postgresql.log.query_step}(?: | %{WORD:postgresql.log.query_name})?" } } }, diff --git a/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian-with-slowlog.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian-with-slowlog.log-expected.json index 23f76405..201c50cb 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian-with-slowlog.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-debian-with-slowlog.log-expected.json @@ -87,6 +87,7 @@ "message": "2017-07-31 13:36:43.557 CEST [4983] postgres@postgres LOG: duration: 37.118 ms statement: SELECT d.datname as \"Name\",\n\t pg_catalog.pg_get_userbyid(d.datdba) as \"Owner\",\n\t pg_catalog.pg_encoding_to_char(d.encoding) as \"Encoding\",\n\t d.datcollate as \"Collate\",\n\t d.datctype as \"Ctype\",\n\t pg_catalog.array_to_string(d.datacl, E'\\n') AS \"Access privileges\"\n\tFROM pg_catalog.pg_database d\n\tORDER BY 1;", "postgresql.log.database": "postgres", "postgresql.log.query": "SELECT d.datname as \"Name\",\n\t pg_catalog.pg_get_userbyid(d.datdba) as \"Owner\",\n\t pg_catalog.pg_encoding_to_char(d.encoding) as \"Encoding\",\n\t d.datcollate as \"Collate\",\n\t d.datctype as \"Ctype\",\n\t pg_catalog.array_to_string(d.datacl, E'\\n') AS \"Access privileges\"\n\tFROM pg_catalog.pg_database d\n\tORDER BY 1;", + "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2017-07-31 13:36:43.557 CEST", "process.pid": 4983, "service.type": "postgresql", @@ -108,6 +109,7 @@ "message": "2017-07-31 13:36:44.104 CEST [4986] postgres@postgres LOG: duration: 2.895 ms statement: SELECT d.datname as \"Name\",\n\t pg_catalog.pg_get_userbyid(d.datdba) as \"Owner\",\n\t pg_catalog.pg_encoding_to_char(d.encoding) as \"Encoding\",\n\t d.datcollate as \"Collate\",\n\t d.datctype as \"Ctype\",\n\t pg_catalog.array_to_string(d.datacl, E'\\n') AS \"Access privileges\"\n\tFROM pg_catalog.pg_database d\n\tORDER BY 1;", "postgresql.log.database": "postgres", "postgresql.log.query": "SELECT d.datname as \"Name\",\n\t pg_catalog.pg_get_userbyid(d.datdba) as \"Owner\",\n\t pg_catalog.pg_encoding_to_char(d.encoding) as \"Encoding\",\n\t d.datcollate as \"Collate\",\n\t d.datctype as \"Ctype\",\n\t pg_catalog.array_to_string(d.datacl, E'\\n') AS \"Access privileges\"\n\tFROM pg_catalog.pg_database d\n\tORDER BY 1;", + "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2017-07-31 13:36:44.104 CEST", "process.pid": 4986, "service.type": "postgresql", @@ -129,6 +131,7 @@ "message": "2017-07-31 13:36:44.642 CEST [4989] postgres@postgres LOG: duration: 2.809 ms statement: SELECT d.datname as \"Name\",\n\t pg_catalog.pg_get_userbyid(d.datdba) as \"Owner\",\n\t pg_catalog.pg_encoding_to_char(d.encoding) as \"Encoding\",\n\t d.datcollate as \"Collate\",\n\t d.datctype as \"Ctype\",\n\t pg_catalog.array_to_string(d.datacl, E'\\n') AS \"Access privileges\"\n\tFROM pg_catalog.pg_database d\n\tORDER BY 1;", "postgresql.log.database": "postgres", "postgresql.log.query": "SELECT d.datname as \"Name\",\n\t pg_catalog.pg_get_userbyid(d.datdba) as \"Owner\",\n\t pg_catalog.pg_encoding_to_char(d.encoding) as \"Encoding\",\n\t d.datcollate as \"Collate\",\n\t d.datctype as \"Ctype\",\n\t pg_catalog.array_to_string(d.datacl, E'\\n') AS \"Access privileges\"\n\tFROM pg_catalog.pg_database d\n\tORDER BY 1;", + "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2017-07-31 13:36:44.642 CEST", "process.pid": 4989, "service.type": "postgresql", @@ -182,6 +185,7 @@ "message": "2017-07-31 13:39:21.025 CEST [5404] postgres@postgres LOG: duration: 37.598 ms statement: SELECT n.nspname as \"Schema\",\n\t c.relname as \"Name\",\n\t CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'm' THEN 'materialized view' WHEN 'i' THEN 'index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' END as \"Type\",\n\t pg_catalog.pg_get_userbyid(c.relowner) as \"Owner\"\n\tFROM pg_catalog.pg_class c\n\t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n\tWHERE c.relkind IN ('r','')\n\t AND n.nspname <> 'pg_catalog'\n\t AND n.nspname <> 'information_schema'\n\t AND n.nspname !~ '^pg_toast'\n\t AND pg_catalog.pg_table_is_visible(c.oid)\n\tORDER BY 1,2;", "postgresql.log.database": "postgres", "postgresql.log.query": "SELECT n.nspname as \"Schema\",\n\t c.relname as \"Name\",\n\t CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'm' THEN 'materialized view' WHEN 'i' THEN 'index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' END as \"Type\",\n\t pg_catalog.pg_get_userbyid(c.relowner) as \"Owner\"\n\tFROM pg_catalog.pg_class c\n\t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n\tWHERE c.relkind IN ('r','')\n\t AND n.nspname <> 'pg_catalog'\n\t AND n.nspname <> 'information_schema'\n\t AND n.nspname !~ '^pg_toast'\n\t AND pg_catalog.pg_table_is_visible(c.oid)\n\tORDER BY 1,2;", + "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2017-07-31 13:39:21.025 CEST", "process.pid": 5404, "service.type": "postgresql", @@ -200,6 +204,7 @@ "message": "2017-07-31 13:39:31.619 CEST [5502] postgres@clients LOG: duration: 9.482 ms statement: select * from clients;", "postgresql.log.database": "clients", "postgresql.log.query": "select * from clients;", + "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2017-07-31 13:39:31.619 CEST", "process.pid": 5502, "service.type": "postgresql", @@ -218,6 +223,7 @@ "message": "2017-07-31 13:39:40.147 CEST [5502] postgres@clients LOG: duration: 0.765 ms statement: select id from clients;", "postgresql.log.database": "clients", "postgresql.log.query": "select id from clients;", + "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2017-07-31 13:39:40.147 CEST", "process.pid": 5502, "service.type": "postgresql", @@ -239,6 +245,7 @@ "message": "2017-07-31 13:40:54.310 CEST [5502] postgres@clients LOG: duration: 26.082 ms statement: SELECT n.nspname as \"Schema\",\n\t c.relname as \"Name\",\n\t CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'm' THEN 'materialized view' WHEN 'i' THEN 'index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' END as \"Type\",\n\t pg_catalog.pg_get_userbyid(c.relowner) as \"Owner\"\n\tFROM pg_catalog.pg_class c\n\t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n\tWHERE c.relkind IN ('r','')\n\t AND n.nspname <> 'pg_catalog'\n\t AND n.nspname <> 'information_schema'\n\t AND n.nspname !~ '^pg_toast'\n\t AND pg_catalog.pg_table_is_visible(c.oid)\n\tORDER BY 1,2;", "postgresql.log.database": "clients", "postgresql.log.query": "SELECT n.nspname as \"Schema\",\n\t c.relname as \"Name\",\n\t CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'm' THEN 'materialized view' WHEN 'i' THEN 'index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' END as \"Type\",\n\t pg_catalog.pg_get_userbyid(c.relowner) as \"Owner\"\n\tFROM pg_catalog.pg_class c\n\t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n\tWHERE c.relkind IN ('r','')\n\t AND n.nspname <> 'pg_catalog'\n\t AND n.nspname <> 'information_schema'\n\t AND n.nspname !~ '^pg_toast'\n\t AND pg_catalog.pg_table_is_visible(c.oid)\n\tORDER BY 1,2;", + "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2017-07-31 13:40:54.310 CEST", "process.pid": 5502, "service.type": "postgresql", @@ -257,6 +264,7 @@ "message": "2017-07-31 13:43:22.645 CEST [5502] postgres@clients LOG: duration: 36.162 ms statement: create table cats(name varchar(50) primary key, toy varchar (50) not null, born timestamp not null);", "postgresql.log.database": "clients", "postgresql.log.query": "create table cats(name varchar(50) primary key, toy varchar (50) not null, born timestamp not null);", + "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2017-07-31 13:43:22.645 CEST", "process.pid": 5502, "service.type": "postgresql", @@ -275,6 +283,7 @@ "message": "2017-07-31 13:46:02.670 CEST [5502] postgres@c$lients LOG: duration: 10.540 ms statement: insert into cats(name, toy, born) values('kate', 'ball', now());", "postgresql.log.database": "c$lients", "postgresql.log.query": "insert into cats(name, toy, born) values('kate', 'ball', now());", + "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2017-07-31 13:46:02.670 CEST", "process.pid": 5502, "service.type": "postgresql", @@ -293,6 +302,7 @@ "message": "2017-07-31 13:46:23.016 CEST [5502] postgres@_clients$db LOG: duration: 5.156 ms statement: insert into cats(name, toy, born) values('frida', 'horse', now());", "postgresql.log.database": "_clients$db", "postgresql.log.query": "insert into cats(name, toy, born) values('frida', 'horse', now());", + "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2017-07-31 13:46:23.016 CEST", "process.pid": 5502, "service.type": "postgresql", @@ -311,6 +321,7 @@ "message": "2017-07-31 13:46:55.637 CEST [5502] postgres@clients_db LOG: duration: 25.871 ms statement: create table dogs(name varchar(50) primary key, owner varchar (50) not null, born timestamp not null);", "postgresql.log.database": "clients_db", "postgresql.log.query": "create table dogs(name varchar(50) primary key, owner varchar (50) not null, born timestamp not null);", + "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2017-07-31 13:46:55.637 CEST", "process.pid": 5502, "service.type": "postgresql", @@ -332,6 +343,7 @@ "message": "2019-05-06 19:00:04.511 UTC [913763] elastic@opbeans LOG: duration: 0.753 ms statement: SELECT p.id, p.sku, p.name, p.stock, t.name AS type_name FROM products p LEFT JOIN product_types t ON p.type_id=t.id\n FROM orders JOIN customers ON orders.customer_id=customers.id\n FROM products JOIN product_types ON type_id=product_types.id", "postgresql.log.database": "opbeans", "postgresql.log.query": "SELECT p.id, p.sku, p.name, p.stock, t.name AS type_name FROM products p LEFT JOIN product_types t ON p.type_id=t.id\n FROM orders JOIN customers ON orders.customer_id=customers.id\n FROM products JOIN product_types ON type_id=product_types.id", + "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2019-05-06 19:00:04.511 UTC", "process.pid": 913763, "service.type": "postgresql", diff --git a/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-multi-core.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-multi-core.log-expected.json index 0a8a5f69..dbd1e12d 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-multi-core.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-9.6-multi-core.log-expected.json @@ -50,6 +50,7 @@ "postgresql.log.core_id": 2, "postgresql.log.database": "postgres", "postgresql.log.query": "SELECT n.nspname as \"Schema\",\n\t c.relname as \"Name\",\n\t CASE c.relkind WHEN 'r' THEN 'table' WHEN 'v' THEN 'view' WHEN 'm' THEN 'materialized view' WHEN 'i' THEN 'index' WHEN 'S' THEN 'sequence' WHEN 's' THEN 'special' WHEN 'f' THEN 'foreign table' END as \"Type\",\n\t pg_catalog.pg_get_userbyid(c.relowner) as \"Owner\"\n\tFROM pg_catalog.pg_class c\n\t LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n\tWHERE c.relkind IN ('r','')\n\t AND n.nspname <> 'pg_catalog'\n\t AND n.nspname <> 'information_schema'\n\t AND n.nspname !~ '^pg_toast'\n\t AND pg_catalog.pg_table_is_visible(c.oid)\n\tORDER BY 1,2;", + "postgresql.log.query_step": "statement", "postgresql.log.timestamp": "2017-04-03 22:35:22.389 CEST", "process.pid": 5404, "service.type": "postgresql", diff --git a/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-query-steps-slowlog.log b/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-query-steps-slowlog.log new file mode 100644 index 00000000..faba3c3b --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-query-steps-slowlog.log @@ -0,0 +1,3 @@ +2019-09-04 15:52:38.004 CEST [31136] user@host LOG: duration: 12.437 ms parse : select * from table +2019-09-04 15:52:38.004 CEST [31136] user@host LOG: duration: 12.437 ms execute pdo_stmt_00000002: select * from table + diff --git a/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-query-steps-slowlog.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-query-steps-slowlog.log-expected.json new file mode 100644 index 00000000..273499e8 --- /dev/null +++ b/vendor/github.com/elastic/beats/filebeat/module/postgresql/log/test/postgresql-query-steps-slowlog.log-expected.json @@ -0,0 +1,44 @@ +[ + { + "@timestamp": "2019-09-04T13:52:38.004Z", + "event.dataset": "postgresql.log", + "event.duration": 12437000, + "event.module": "postgresql", + "event.timezone": "CEST", + "fileset.name": "log", + "input.type": "log", + "log.level": "LOG", + "log.offset": 0, + "message": "2019-09-04 15:52:38.004 CEST [31136] user@host LOG: duration: 12.437 ms parse : select * from table", + "postgresql.log.database": "host", + "postgresql.log.query": "select * from table", + "postgresql.log.query_step": "parse", + "postgresql.log.timestamp": "2019-09-04 15:52:38.004 CEST", + "process.pid": 31136, + "service.type": "postgresql", + "user.name": "user" + }, + { + "@timestamp": "2019-09-04T13:52:38.004Z", + "event.dataset": "postgresql.log", + "event.duration": 12437000, + "event.module": "postgresql", + "event.timezone": "CEST", + "fileset.name": "log", + "input.type": "log", + "log.flags": [ + "multiline" + ], + "log.level": "LOG", + "log.offset": 111, + "message": "2019-09-04 15:52:38.004 CEST [31136] user@host LOG: duration: 12.437 ms execute pdo_stmt_00000002: select * from table\n", + "postgresql.log.database": "host", + "postgresql.log.query": "select * from table\n", + "postgresql.log.query_name": "pdo_stmt_00000002", + "postgresql.log.query_step": "execute", + "postgresql.log.timestamp": "2019-09-04 15:52:38.004 CEST", + "process.pid": 31136, + "service.type": "postgresql", + "user.name": "user" + } +] \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/filebeat/module/system/auth/ingest/pipeline.json b/vendor/github.com/elastic/beats/filebeat/module/system/auth/ingest/pipeline.json index debb3d06..8df0a77e 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/system/auth/ingest/pipeline.json +++ b/vendor/github.com/elastic/beats/filebeat/module/system/auth/ingest/pipeline.json @@ -41,6 +41,7 @@ }, { "date": { + "if": "ctx.event.timezone == null", "field": "system.auth.timestamp", "target_field": "@timestamp", "formats": [ @@ -48,7 +49,7 @@ "MMM dd HH:mm:ss", "ISO8601" ], - "ignore_failure": true + "on_failure": [{"append": {"field": "error.message", "value": "{{ _ingest.on_failure_message }}"}}] } }, { diff --git a/vendor/github.com/elastic/beats/filebeat/module/system/auth/test/auth-ubuntu1204.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/system/auth/test/auth-ubuntu1204.log-expected.json index 627fe04b..a7a3cee0 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/system/auth/test/auth-ubuntu1204.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/system/auth/test/auth-ubuntu1204.log-expected.json @@ -2,7 +2,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -15,7 +15,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -31,7 +31,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -43,7 +43,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -55,7 +55,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -68,7 +68,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -84,7 +84,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -96,7 +96,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -108,7 +108,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -121,7 +121,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -137,7 +137,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -149,7 +149,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -161,7 +161,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -174,7 +174,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -190,7 +190,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -202,7 +202,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -214,7 +214,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -227,7 +227,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -243,7 +243,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -255,7 +255,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -267,7 +267,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -280,7 +280,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "input.type": "log", "log.offset": 3355, @@ -292,7 +292,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -308,7 +308,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -320,7 +320,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -332,7 +332,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -345,7 +345,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -361,7 +361,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -373,7 +373,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -385,7 +385,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -398,7 +398,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -414,7 +414,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -426,7 +426,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -438,7 +438,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -451,7 +451,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -467,7 +467,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -479,7 +479,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -491,7 +491,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -504,7 +504,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -520,7 +520,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -532,7 +532,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -544,7 +544,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -557,7 +557,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -573,7 +573,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -585,7 +585,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -597,7 +597,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -610,7 +610,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -626,7 +626,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -638,7 +638,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -650,7 +650,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -663,7 +663,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -679,7 +679,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -691,7 +691,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -703,7 +703,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -716,7 +716,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -732,7 +732,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -744,7 +744,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -756,7 +756,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -769,7 +769,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -785,7 +785,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -797,7 +797,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -809,7 +809,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -825,7 +825,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -837,7 +837,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -849,7 +849,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -861,7 +861,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -874,7 +874,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -887,7 +887,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -903,7 +903,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -915,7 +915,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -927,7 +927,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -940,7 +940,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -956,7 +956,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -968,7 +968,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -980,7 +980,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -993,7 +993,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -1009,7 +1009,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -1021,7 +1021,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -1033,7 +1033,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -1046,7 +1046,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -1062,7 +1062,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -1074,7 +1074,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -1086,7 +1086,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -1099,7 +1099,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -1115,7 +1115,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -1127,7 +1127,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -1139,7 +1139,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -1152,7 +1152,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -1168,7 +1168,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -1180,7 +1180,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -1192,7 +1192,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -1205,7 +1205,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -1221,7 +1221,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -1233,7 +1233,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -1245,7 +1245,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -1258,7 +1258,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -1274,7 +1274,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -1286,7 +1286,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -1298,7 +1298,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -1311,7 +1311,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", diff --git a/vendor/github.com/elastic/beats/filebeat/module/system/auth/test/secure-rhel7.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/system/auth/test/secure-rhel7.log-expected.json index 48826c5b..331294ad 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/system/auth/test/secure-rhel7.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/system/auth/test/secure-rhel7.log-expected.json @@ -5,7 +5,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -31,7 +31,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -47,7 +47,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -73,7 +73,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -89,7 +89,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -115,7 +115,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -128,7 +128,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -141,7 +141,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -154,7 +154,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -167,7 +167,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -183,7 +183,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -209,7 +209,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -225,7 +225,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -251,7 +251,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -267,7 +267,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -293,7 +293,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -309,7 +309,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -335,7 +335,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -351,7 +351,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -377,7 +377,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -390,7 +390,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -403,7 +403,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -416,7 +416,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -429,7 +429,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -442,7 +442,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -455,7 +455,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -471,7 +471,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -497,7 +497,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -513,7 +513,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -539,7 +539,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -555,7 +555,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -581,7 +581,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -597,7 +597,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -623,7 +623,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -639,7 +639,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -665,7 +665,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -681,7 +681,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -707,7 +707,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -720,7 +720,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -736,7 +736,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -762,7 +762,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -778,7 +778,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -804,7 +804,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -817,7 +817,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -830,7 +830,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -843,7 +843,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -856,7 +856,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -872,7 +872,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -898,7 +898,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -914,7 +914,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -940,7 +940,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -956,7 +956,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -982,7 +982,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -998,7 +998,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -1024,7 +1024,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1040,7 +1040,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -1066,7 +1066,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1079,7 +1079,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1092,7 +1092,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1105,7 +1105,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1118,7 +1118,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1134,7 +1134,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -1160,7 +1160,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1176,7 +1176,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -1202,7 +1202,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1218,7 +1218,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -1244,7 +1244,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1260,7 +1260,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -1286,7 +1286,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1302,7 +1302,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -1328,7 +1328,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1341,7 +1341,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1354,7 +1354,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1367,7 +1367,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1380,7 +1380,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1396,7 +1396,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -1422,7 +1422,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1438,7 +1438,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -1464,7 +1464,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1480,7 +1480,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -1506,7 +1506,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1519,7 +1519,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1532,7 +1532,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1545,7 +1545,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1561,7 +1561,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -1587,7 +1587,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1603,7 +1603,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -1629,7 +1629,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1645,7 +1645,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -1671,7 +1671,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1687,7 +1687,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -1713,7 +1713,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1729,7 +1729,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -1755,7 +1755,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1768,7 +1768,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1781,7 +1781,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1794,7 +1794,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1807,7 +1807,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -1823,7 +1823,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -1849,7 +1849,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", diff --git a/vendor/github.com/elastic/beats/filebeat/module/system/auth/test/test.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/system/auth/test/test.log-expected.json index 6cba8253..974b2c0d 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/system/auth/test/test.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/system/auth/test/test.log-expected.json @@ -5,7 +5,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "success", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_success", "fileset.name": "auth", "host.hostname": "localhost", @@ -27,7 +27,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "success", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_success", "fileset.name": "auth", "host.hostname": "localhost", @@ -48,7 +48,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "localhost", @@ -67,7 +67,7 @@ "event.dataset": "system.auth", "event.module": "system", "event.outcome": "failure", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "event.type": "authentication_failure", "fileset.name": "auth", "host.hostname": "slave22", @@ -93,7 +93,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "localhost", "input.type": "log", @@ -109,7 +109,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "slave22", "input.type": "log", @@ -129,7 +129,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "localhost", "input.type": "log", @@ -145,7 +145,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "precise32", "input.type": "log", @@ -162,7 +162,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "group.id": "48", "group.name": "apache", @@ -176,7 +176,7 @@ { "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "group.id": "48", "host.hostname": "localhost", diff --git a/vendor/github.com/elastic/beats/filebeat/module/system/auth/test/timestamp.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/system/auth/test/timestamp.log-expected.json index 2b4ff36e..2b881dc5 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/system/auth/test/timestamp.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/system/auth/test/timestamp.log-expected.json @@ -1,9 +1,9 @@ [ { - "@timestamp": "2019-06-14T10:40:20.912Z", + "@timestamp": "2019-06-14T08:40:20.912-02:00", "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "localhost", "input.type": "log", @@ -14,10 +14,10 @@ "service.type": "system" }, { - "@timestamp": "2019-06-14T11:31:15.412Z", + "@timestamp": "2019-06-14T09:31:15.412-02:00", "event.dataset": "system.auth", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "auth", "host.hostname": "localhost", "input.type": "log", diff --git a/vendor/github.com/elastic/beats/filebeat/module/system/syslog/ingest/pipeline.json b/vendor/github.com/elastic/beats/filebeat/module/system/syslog/ingest/pipeline.json index 1bf54bc0..0c614b8a 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/system/syslog/ingest/pipeline.json +++ b/vendor/github.com/elastic/beats/filebeat/module/system/syslog/ingest/pipeline.json @@ -29,6 +29,7 @@ }, { "date": { + "if": "ctx.event.timezone == null", "field": "system.syslog.timestamp", "target_field": "@timestamp", "formats": [ @@ -37,7 +38,7 @@ "MMM d HH:mm:ss", "ISO8601" ], - "ignore_failure": true + "on_failure": [{"append": {"field": "error.message", "value": "{{ _ingest.on_failure_message }}"}}] } }, { diff --git a/vendor/github.com/elastic/beats/filebeat/module/system/syslog/test/darwin-syslog-sample.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/system/syslog/test/darwin-syslog-sample.log-expected.json index 3cc641ce..5b116507 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/system/syslog/test/darwin-syslog-sample.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/system/syslog/test/darwin-syslog-sample.log-expected.json @@ -2,7 +2,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -18,7 +18,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -31,7 +31,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "input.type": "log", "log.offset": 1176, diff --git a/vendor/github.com/elastic/beats/filebeat/module/system/syslog/test/darwin-syslog.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/system/syslog/test/darwin-syslog.log-expected.json index a1f49e89..fc057403 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/system/syslog/test/darwin-syslog.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/system/syslog/test/darwin-syslog.log-expected.json @@ -2,7 +2,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -15,7 +15,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -31,7 +31,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -44,7 +44,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -57,7 +57,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -73,7 +73,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -86,7 +86,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -99,7 +99,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -115,7 +115,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -128,7 +128,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -141,7 +141,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -154,7 +154,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -167,7 +167,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -180,7 +180,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -193,7 +193,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -206,7 +206,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -219,7 +219,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -232,7 +232,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -245,7 +245,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -258,7 +258,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -271,7 +271,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -284,7 +284,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -297,7 +297,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -313,7 +313,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -326,7 +326,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -342,7 +342,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -355,7 +355,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -368,7 +368,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -381,7 +381,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -394,7 +394,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -407,7 +407,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -420,7 +420,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -433,7 +433,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -449,7 +449,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -462,7 +462,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -478,7 +478,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -491,7 +491,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -504,7 +504,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -517,7 +517,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -530,7 +530,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -542,7 +542,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -555,7 +555,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -568,7 +568,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -581,7 +581,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -593,7 +593,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -605,7 +605,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -618,7 +618,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -630,7 +630,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -643,7 +643,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -656,7 +656,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -668,7 +668,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -681,7 +681,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -694,7 +694,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -707,7 +707,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -719,7 +719,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -732,7 +732,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -744,7 +744,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -757,7 +757,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -769,7 +769,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -782,7 +782,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -795,7 +795,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -808,7 +808,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -820,7 +820,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -833,7 +833,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -845,7 +845,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -858,7 +858,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -871,7 +871,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -884,7 +884,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -896,7 +896,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -909,7 +909,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -922,7 +922,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -934,7 +934,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -947,7 +947,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -960,7 +960,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -972,7 +972,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -985,7 +985,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -998,7 +998,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -1010,7 +1010,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -1023,7 +1023,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -1035,7 +1035,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -1048,7 +1048,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -1060,7 +1060,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -1073,7 +1073,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -1086,7 +1086,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -1099,7 +1099,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -1112,7 +1112,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -1124,7 +1124,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -1137,7 +1137,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -1150,7 +1150,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -1162,7 +1162,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -1175,7 +1175,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -1188,7 +1188,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -1200,7 +1200,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -1213,7 +1213,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -1225,7 +1225,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -1238,7 +1238,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -1251,7 +1251,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -1264,7 +1264,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -1276,7 +1276,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", @@ -1289,7 +1289,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "a-mac-with-esc-key", "input.type": "log", diff --git a/vendor/github.com/elastic/beats/filebeat/module/system/syslog/test/suse-syslog.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/system/syslog/test/suse-syslog.log-expected.json index 38547d62..0230189f 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/system/syslog/test/suse-syslog.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/system/syslog/test/suse-syslog.log-expected.json @@ -2,7 +2,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "linux-sqrz", "input.type": "log", @@ -15,7 +15,7 @@ { "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "linux-sqrz", "input.type": "log", diff --git a/vendor/github.com/elastic/beats/filebeat/module/system/syslog/test/tz-offset.log-expected.json b/vendor/github.com/elastic/beats/filebeat/module/system/syslog/test/tz-offset.log-expected.json index 873a3ce0..0e9c7d98 100644 --- a/vendor/github.com/elastic/beats/filebeat/module/system/syslog/test/tz-offset.log-expected.json +++ b/vendor/github.com/elastic/beats/filebeat/module/system/syslog/test/tz-offset.log-expected.json @@ -1,9 +1,9 @@ [ { - "@timestamp": "1986-04-25T21:23:45.101Z", + "@timestamp": "1986-04-25T19:23:45.101-02:00", "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "rmbkmonitor04", "input.type": "log", @@ -15,10 +15,10 @@ "service.type": "system" }, { - "@timestamp": "1986-04-25T21:23:45.388Z", + "@timestamp": "1986-04-25T19:23:45.388-02:00", "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "rmbkmonitor04", "input.type": "log", @@ -29,10 +29,10 @@ "service.type": "system" }, { - "@timestamp": "2019-06-14T10:40:20.912Z", + "@timestamp": "2019-06-14T08:40:20.912-02:00", "event.dataset": "system.syslog", "event.module": "system", - "event.timezone": "+00:00", + "event.timezone": "-02:00", "fileset.name": "syslog", "host.hostname": "localhost", "input.type": "log", diff --git a/vendor/github.com/elastic/beats/filebeat/modules.d/apache.yml.disabled b/vendor/github.com/elastic/beats/filebeat/modules.d/apache.yml.disabled index c392cd92..23ca64bd 100644 --- a/vendor/github.com/elastic/beats/filebeat/modules.d/apache.yml.disabled +++ b/vendor/github.com/elastic/beats/filebeat/modules.d/apache.yml.disabled @@ -1,5 +1,5 @@ # Module: apache -# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.4/filebeat-module-apache.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.5/filebeat-module-apache.html - module: apache # Access logs diff --git a/vendor/github.com/elastic/beats/filebeat/modules.d/auditd.yml.disabled b/vendor/github.com/elastic/beats/filebeat/modules.d/auditd.yml.disabled index 06be9137..d93fb523 100644 --- a/vendor/github.com/elastic/beats/filebeat/modules.d/auditd.yml.disabled +++ b/vendor/github.com/elastic/beats/filebeat/modules.d/auditd.yml.disabled @@ -1,5 +1,5 @@ # Module: auditd -# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.4/filebeat-module-auditd.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.5/filebeat-module-auditd.html - module: auditd log: diff --git a/vendor/github.com/elastic/beats/filebeat/modules.d/elasticsearch.yml.disabled b/vendor/github.com/elastic/beats/filebeat/modules.d/elasticsearch.yml.disabled index e18d7ef2..eda46df6 100644 --- a/vendor/github.com/elastic/beats/filebeat/modules.d/elasticsearch.yml.disabled +++ b/vendor/github.com/elastic/beats/filebeat/modules.d/elasticsearch.yml.disabled @@ -1,5 +1,5 @@ # Module: elasticsearch -# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.4/filebeat-module-elasticsearch.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.5/filebeat-module-elasticsearch.html - module: elasticsearch # Server log diff --git a/vendor/github.com/elastic/beats/filebeat/modules.d/haproxy.yml.disabled b/vendor/github.com/elastic/beats/filebeat/modules.d/haproxy.yml.disabled index 58a0ae0a..f9cb1ad5 100644 --- a/vendor/github.com/elastic/beats/filebeat/modules.d/haproxy.yml.disabled +++ b/vendor/github.com/elastic/beats/filebeat/modules.d/haproxy.yml.disabled @@ -1,5 +1,5 @@ # Module: haproxy -# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.4/filebeat-module-haproxy.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.5/filebeat-module-haproxy.html - module: haproxy # All logs diff --git a/vendor/github.com/elastic/beats/filebeat/modules.d/icinga.yml.disabled b/vendor/github.com/elastic/beats/filebeat/modules.d/icinga.yml.disabled index 96db7ea4..9ab1bc4a 100644 --- a/vendor/github.com/elastic/beats/filebeat/modules.d/icinga.yml.disabled +++ b/vendor/github.com/elastic/beats/filebeat/modules.d/icinga.yml.disabled @@ -1,5 +1,5 @@ # Module: icinga -# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.4/filebeat-module-icinga.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.5/filebeat-module-icinga.html - module: icinga # Main logs diff --git a/vendor/github.com/elastic/beats/filebeat/modules.d/iis.yml.disabled b/vendor/github.com/elastic/beats/filebeat/modules.d/iis.yml.disabled index 0775c544..c480aef2 100644 --- a/vendor/github.com/elastic/beats/filebeat/modules.d/iis.yml.disabled +++ b/vendor/github.com/elastic/beats/filebeat/modules.d/iis.yml.disabled @@ -1,5 +1,5 @@ # Module: iis -# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.4/filebeat-module-iis.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.5/filebeat-module-iis.html - module: iis # Access logs diff --git a/vendor/github.com/elastic/beats/filebeat/modules.d/kafka.yml.disabled b/vendor/github.com/elastic/beats/filebeat/modules.d/kafka.yml.disabled index 2c8d991c..ce2b74af 100644 --- a/vendor/github.com/elastic/beats/filebeat/modules.d/kafka.yml.disabled +++ b/vendor/github.com/elastic/beats/filebeat/modules.d/kafka.yml.disabled @@ -1,5 +1,5 @@ # Module: kafka -# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.4/filebeat-module-kafka.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.5/filebeat-module-kafka.html - module: kafka # All logs diff --git a/vendor/github.com/elastic/beats/filebeat/modules.d/kibana.yml.disabled b/vendor/github.com/elastic/beats/filebeat/modules.d/kibana.yml.disabled index 9fd57f8d..ecff5b47 100644 --- a/vendor/github.com/elastic/beats/filebeat/modules.d/kibana.yml.disabled +++ b/vendor/github.com/elastic/beats/filebeat/modules.d/kibana.yml.disabled @@ -1,5 +1,5 @@ # Module: kibana -# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.4/filebeat-module-kibana.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.5/filebeat-module-kibana.html - module: kibana # All logs diff --git a/vendor/github.com/elastic/beats/filebeat/modules.d/logstash.yml.disabled b/vendor/github.com/elastic/beats/filebeat/modules.d/logstash.yml.disabled index 200ebd3b..8050d725 100644 --- a/vendor/github.com/elastic/beats/filebeat/modules.d/logstash.yml.disabled +++ b/vendor/github.com/elastic/beats/filebeat/modules.d/logstash.yml.disabled @@ -1,5 +1,5 @@ # Module: logstash -# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.4/filebeat-module-logstash.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.5/filebeat-module-logstash.html - module: logstash # logs diff --git a/vendor/github.com/elastic/beats/filebeat/modules.d/mongodb.yml.disabled b/vendor/github.com/elastic/beats/filebeat/modules.d/mongodb.yml.disabled index 71c93c42..5e90d855 100644 --- a/vendor/github.com/elastic/beats/filebeat/modules.d/mongodb.yml.disabled +++ b/vendor/github.com/elastic/beats/filebeat/modules.d/mongodb.yml.disabled @@ -1,5 +1,5 @@ # Module: mongodb -# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.4/filebeat-module-mongodb.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.5/filebeat-module-mongodb.html - module: mongodb # All logs diff --git a/vendor/github.com/elastic/beats/filebeat/modules.d/mysql.yml.disabled b/vendor/github.com/elastic/beats/filebeat/modules.d/mysql.yml.disabled index aa7163fb..7e3f8286 100644 --- a/vendor/github.com/elastic/beats/filebeat/modules.d/mysql.yml.disabled +++ b/vendor/github.com/elastic/beats/filebeat/modules.d/mysql.yml.disabled @@ -1,5 +1,5 @@ # Module: mysql -# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.4/filebeat-module-mysql.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.5/filebeat-module-mysql.html - module: mysql # Error logs diff --git a/vendor/github.com/elastic/beats/filebeat/modules.d/nats.yml.disabled b/vendor/github.com/elastic/beats/filebeat/modules.d/nats.yml.disabled index fa6edb34..22cda9a1 100644 --- a/vendor/github.com/elastic/beats/filebeat/modules.d/nats.yml.disabled +++ b/vendor/github.com/elastic/beats/filebeat/modules.d/nats.yml.disabled @@ -1,5 +1,5 @@ # Module: nats -# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.4/filebeat-module-nats.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.5/filebeat-module-nats.html - module: nats # All logs diff --git a/vendor/github.com/elastic/beats/filebeat/modules.d/nginx.yml.disabled b/vendor/github.com/elastic/beats/filebeat/modules.d/nginx.yml.disabled index 72ff09fc..171ac9c1 100644 --- a/vendor/github.com/elastic/beats/filebeat/modules.d/nginx.yml.disabled +++ b/vendor/github.com/elastic/beats/filebeat/modules.d/nginx.yml.disabled @@ -1,5 +1,5 @@ # Module: nginx -# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.4/filebeat-module-nginx.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.5/filebeat-module-nginx.html - module: nginx # Access logs diff --git a/vendor/github.com/elastic/beats/filebeat/modules.d/osquery.yml.disabled b/vendor/github.com/elastic/beats/filebeat/modules.d/osquery.yml.disabled index f26cd831..16e20d4f 100644 --- a/vendor/github.com/elastic/beats/filebeat/modules.d/osquery.yml.disabled +++ b/vendor/github.com/elastic/beats/filebeat/modules.d/osquery.yml.disabled @@ -1,5 +1,5 @@ # Module: osquery -# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.4/filebeat-module-osquery.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.5/filebeat-module-osquery.html - module: osquery result: diff --git a/vendor/github.com/elastic/beats/filebeat/modules.d/postgresql.yml.disabled b/vendor/github.com/elastic/beats/filebeat/modules.d/postgresql.yml.disabled index f2523c6a..ae0fe19f 100644 --- a/vendor/github.com/elastic/beats/filebeat/modules.d/postgresql.yml.disabled +++ b/vendor/github.com/elastic/beats/filebeat/modules.d/postgresql.yml.disabled @@ -1,5 +1,5 @@ # Module: postgresql -# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.4/filebeat-module-postgresql.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.5/filebeat-module-postgresql.html - module: postgresql # All logs diff --git a/vendor/github.com/elastic/beats/filebeat/modules.d/redis.yml.disabled b/vendor/github.com/elastic/beats/filebeat/modules.d/redis.yml.disabled index f9d52489..21bb8b6d 100644 --- a/vendor/github.com/elastic/beats/filebeat/modules.d/redis.yml.disabled +++ b/vendor/github.com/elastic/beats/filebeat/modules.d/redis.yml.disabled @@ -1,5 +1,5 @@ # Module: redis -# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.4/filebeat-module-redis.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.5/filebeat-module-redis.html - module: redis # Main logs diff --git a/vendor/github.com/elastic/beats/filebeat/modules.d/santa.yml.disabled b/vendor/github.com/elastic/beats/filebeat/modules.d/santa.yml.disabled index d59afadc..0597189e 100644 --- a/vendor/github.com/elastic/beats/filebeat/modules.d/santa.yml.disabled +++ b/vendor/github.com/elastic/beats/filebeat/modules.d/santa.yml.disabled @@ -1,5 +1,5 @@ # Module: santa -# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.4/filebeat-module-santa.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.5/filebeat-module-santa.html - module: santa log: diff --git a/vendor/github.com/elastic/beats/filebeat/modules.d/system.yml.disabled b/vendor/github.com/elastic/beats/filebeat/modules.d/system.yml.disabled index 09daf609..be3b254d 100644 --- a/vendor/github.com/elastic/beats/filebeat/modules.d/system.yml.disabled +++ b/vendor/github.com/elastic/beats/filebeat/modules.d/system.yml.disabled @@ -1,5 +1,5 @@ # Module: system -# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.4/filebeat-module-system.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.5/filebeat-module-system.html - module: system # Syslog diff --git a/vendor/github.com/elastic/beats/filebeat/modules.d/traefik.yml.disabled b/vendor/github.com/elastic/beats/filebeat/modules.d/traefik.yml.disabled index d54b23af..c93d062c 100644 --- a/vendor/github.com/elastic/beats/filebeat/modules.d/traefik.yml.disabled +++ b/vendor/github.com/elastic/beats/filebeat/modules.d/traefik.yml.disabled @@ -1,5 +1,5 @@ # Module: traefik -# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.4/filebeat-module-traefik.html +# Docs: https://www.elastic.co/guide/en/beats/filebeat/7.5/filebeat-module-traefik.html - module: traefik # Access logs diff --git a/vendor/github.com/elastic/beats/filebeat/scripts/mage/config.go b/vendor/github.com/elastic/beats/filebeat/scripts/mage/config.go index 98ce4391..23abad70 100644 --- a/vendor/github.com/elastic/beats/filebeat/scripts/mage/config.go +++ b/vendor/github.com/elastic/beats/filebeat/scripts/mage/config.go @@ -46,6 +46,9 @@ func configFileParams(moduleDirs ...string) devtools.ConfigFileParams { devtools.OSSBeatDir("_meta/beat.docker.yml"), devtools.LibbeatDir("_meta/config.docker.yml"), }, + ExtraVars: map[string]interface{}{ + "UseKubernetesMetadataProcessor": true, + }, } } diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/config/filebeat.yml.j2 b/vendor/github.com/elastic/beats/filebeat/tests/system/config/filebeat.yml.j2 index 938e9e6a..0c48b34e 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/config/filebeat.yml.j2 +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/config/filebeat.yml.j2 @@ -76,6 +76,7 @@ filebeat.{{input_config | default("inputs")}}: {% if json.overwrite_keys %}overwrite_keys: true{% endif %} {% if json.add_error_key %}add_error_key: true{% endif %} {% if json.ignore_decoding_error %}ignore_decoding_error: true{% endif %} + {% if json.document_id %}document_id: {{json.document_id}}{% endif %} {% endif %} {% if multiline %} diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_json.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_json.py index 0fec0c68..04794e5c 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/test_json.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_json.py @@ -230,6 +230,34 @@ class Test(BaseTest): assert output[2]["error.message"] == \ "type not overwritten (not string)" + def test_id_in_message(self): + """ + Extract document ID from json contents. + """ + self.render_config_template( + path=os.path.abspath(self.working_dir) + "/log/*", + json=dict( + message_key="msg", + document_id="id", + ), + ) + os.mkdir(self.working_dir + "/log/") + self.copy_files(["logs/json_id.log"], + target_dir="log") + proc = self.start_beat() + self.wait_until( + lambda: self.output_has(lines=3), + max_timeout=10) + proc.check_kill_and_wait() + + output = self.read_output() + + assert len(output) == 3 + for i in xrange(len(output)): + assert("@metadata.id" in output[i]) + assert(output[i]["@metadata.id"] == str(i)) + assert("json.id" not in output[i]) + def test_with_generic_filtering(self): """ It should work fine to combine JSON decoding with diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_modules.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_modules.py index 7785277e..c21a9658 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/test_modules.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_modules.py @@ -139,8 +139,12 @@ class Test(BaseTest): output = open(os.path.join(output_path, "output.log"), "ab") output.write(" ".join(cmd) + "\n") + # Use a fixed timezone so results don't vary depending on the environment + # Don't use UTC to avoid hiding that non-UTC timezones are not being converted as needed, + # this can happen because UTC uses to be the default timezone in date parsers when no other + # timezone is specified. local_env = os.environ.copy() - local_env["TZ"] = 'Etc/UTC' + local_env["TZ"] = 'Etc/GMT+2' subprocess.Popen(cmd, env=local_env, @@ -220,11 +224,16 @@ def clean_keys(obj): # ECS versions change for any ECS release, large or small ecs_key = ["ecs.version"] # datasets for which @timestamp is removed due to date missing - remove_timestamp = {"icinga.startup", "redis.log", "haproxy.log", "system.auth", "system.syslog", "cef.log"} + remove_timestamp = {"icinga.startup", "redis.log", "haproxy.log", + "system.auth", "system.syslog", "cef.log", "activemq.audit", "iptables.log", "cisco.asa", "cisco.ios"} # dataset + log file pairs for which @timestamp is kept as an exception from above remove_timestamp_exception = { ('system.syslog', 'tz-offset.log'), - ('system.auth', 'timestamp.log') + ('system.auth', 'timestamp.log'), + ('cisco.asa', 'asa.log'), + ('cisco.asa', 'hostnames.log'), + ('cisco.asa', 'not-ip.log'), + ('cisco.asa', 'sample.log') } # Keep source log filename for exceptions diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_processors.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_processors.py index c6514986..95673cfd 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/test_processors.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_processors.py @@ -2,6 +2,8 @@ from filebeat import BaseTest import io import os +import unittest +import sys """ Contains tests for filtering. @@ -300,6 +302,42 @@ class Test(BaseTest): ["42", "hello world", "string\twith tabs and \"broken\" quotes"], ]) + def test_javascript_processor_add_host_metadata(self): + """ + Check JS processor with add_host_metadata + """ + + self._test_javascript_processor_with_source("""\'var processor = require("processor"); +var addHostMetadata = new processor.AddHostMetadata({"netinfo.enabled": true}); + +function process(evt) { + addHostMetadata.Run(evt); +}\' +""") + + output = self.read_output() + for evt in output: + assert "host.hostname" in evt + + def _test_javascript_processor_with_source(self, script_source): + self.render_config_template( + path=os.path.abspath(self.working_dir) + "/test.log", + processors=[ + { + "script": { + "lang": "javascript", + "source": script_source, + }, + }, + ] + ) + + self._init_and_read_test_input([ + u"test line 1\n", + u"test line 2\n", + u"test line 3\n", + ]) + def _init_and_read_test_input(self, input_lines): with io.open(self.working_dir + "/test.log", "w", encoding="utf-8") as f: for line in input_lines: diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_registrar.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_registrar.py index db2ad49f..66043614 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/test_registrar.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_registrar.py @@ -206,7 +206,7 @@ class Test(BaseTest): self.render_config_template( path=os.path.abspath(self.working_dir) + "/log/*", registry_home=registry_home, - registry_file_permissions=0644, + registry_file_permissions=0640, ) os.mkdir(self.working_dir + "/log/") testfile_path = self.working_dir + "/log/test.log" @@ -223,7 +223,7 @@ class Test(BaseTest): max_timeout=1) filebeat.check_kill_and_wait() - self.assertEqual(self.file_permissions(registry_file), "0644") + self.assertEqual(self.file_permissions(registry_file), "0640") def test_registry_file_update_permissions(self): """ @@ -262,7 +262,7 @@ class Test(BaseTest): self.render_config_template( path=os.path.abspath(self.working_dir) + "/log/*", registry_home="a/b/c/registry_x", - registry_file_permissions=0644 + registry_file_permissions=0640 ) filebeat = self.start_beat() @@ -280,7 +280,7 @@ class Test(BaseTest): filebeat.check_kill_and_wait() - self.assertEqual(self.file_permissions(registry_file), "0644") + self.assertEqual(self.file_permissions(registry_file), "0640") def test_rotating_file(self): """ diff --git a/vendor/github.com/elastic/beats/filebeat/tests/system/test_syslog.py b/vendor/github.com/elastic/beats/filebeat/tests/system/test_syslog.py index ac450e76..08a101d8 100644 --- a/vendor/github.com/elastic/beats/filebeat/tests/system/test_syslog.py +++ b/vendor/github.com/elastic/beats/filebeat/tests/system/test_syslog.py @@ -49,6 +49,46 @@ class Test(BaseTest): self.assert_syslog(output[0]) sock.close() + def test_syslog_with_tcp_invalid_message(self): + """ + Test syslog input with invalid events from TCP. + """ + host = "127.0.0.1" + port = 8080 + input_raw = """ +- type: syslog + protocol: + tcp: + host: "{}:{}" +""" + + input_raw = input_raw.format(host, port) + self.render_config_template( + input_raw=input_raw, + inputs=False, + ) + + filebeat = self.start_beat() + + self.wait_until(lambda: self.log_contains("Started listening for TCP connection")) + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # TCP + sock.connect((host, port)) + + for n in range(0, 2): + sock.send("invalid\n") + + self.wait_until(lambda: self.output_count(lambda x: x >= 2)) + + filebeat.check_kill_and_wait() + + output = self.read_output() + + assert len(output) == 2 + assert output[0]["message"] == "invalid" + assert len(output[0]["log.source.address"]) > 0 + sock.close() + def test_syslog_with_udp(self): """ Test syslog input with events from TCP. diff --git a/vendor/github.com/elastic/beats/generator/beat/{beat}/Makefile b/vendor/github.com/elastic/beats/generator/beat/{beat}/Makefile index 0c2efb16..3a96d912 100644 --- a/vendor/github.com/elastic/beats/generator/beat/{beat}/Makefile +++ b/vendor/github.com/elastic/beats/generator/beat/{beat}/Makefile @@ -24,7 +24,7 @@ pre-setup: copy-vendor git-init # Copy beats into vendor directory .PHONY: copy-vendor -copy-vendor: +copy-vendor: vendor-check mkdir -p vendor/github.com/elastic/beats git archive --remote ${BEAT_GOPATH}/src/github.com/elastic/beats HEAD | tar -x --exclude=x-pack -C vendor/github.com/elastic/beats mkdir -p vendor/github.com/magefile @@ -37,4 +37,9 @@ git-init: .PHONY: git-add git-add: git add -A - git commit -m "Add generated {beat} files" + git commit -q -m "Add generated {beat} files" + + +.PHONY: vendor-check +vendor-check: + @if output=$$(git -C ${BEAT_GOPATH}/src/github.com/elastic/beats status --porcelain) && [ ! -z "$${output}" ]; then printf "\033[31mWARNING: elastic/beats has uncommitted changes, these will not be in the vendor directory!\033[0m\n"; fi \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/generator/beat/{beat}/beater/{beat}.go.tmpl b/vendor/github.com/elastic/beats/generator/beat/{beat}/beater/{beat}.go similarity index 100% rename from vendor/github.com/elastic/beats/generator/beat/{beat}/beater/{beat}.go.tmpl rename to vendor/github.com/elastic/beats/generator/beat/{beat}/beater/{beat}.go diff --git a/vendor/github.com/elastic/beats/generator/beat/{beat}/cmd/root.go.tmpl b/vendor/github.com/elastic/beats/generator/beat/{beat}/cmd/root.go similarity index 100% rename from vendor/github.com/elastic/beats/generator/beat/{beat}/cmd/root.go.tmpl rename to vendor/github.com/elastic/beats/generator/beat/{beat}/cmd/root.go diff --git a/vendor/github.com/elastic/beats/generator/beat/{beat}/config/config.go.tmpl b/vendor/github.com/elastic/beats/generator/beat/{beat}/config/config.go similarity index 100% rename from vendor/github.com/elastic/beats/generator/beat/{beat}/config/config.go.tmpl rename to vendor/github.com/elastic/beats/generator/beat/{beat}/config/config.go diff --git a/vendor/github.com/elastic/beats/generator/beat/{beat}/config/config_test.go.tmpl b/vendor/github.com/elastic/beats/generator/beat/{beat}/config/config_test.go similarity index 100% rename from vendor/github.com/elastic/beats/generator/beat/{beat}/config/config_test.go.tmpl rename to vendor/github.com/elastic/beats/generator/beat/{beat}/config/config_test.go diff --git a/vendor/github.com/elastic/beats/generator/beat/{beat}/magefile.go b/vendor/github.com/elastic/beats/generator/beat/{beat}/magefile.go index e40ce218..ede1bbf8 100644 --- a/vendor/github.com/elastic/beats/generator/beat/{beat}/magefile.go +++ b/vendor/github.com/elastic/beats/generator/beat/{beat}/magefile.go @@ -1,34 +1,19 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - // +build mage package main import ( - "context" "fmt" "time" "github.com/magefile/mage/mg" - "github.com/magefile/mage/sh" devtools "github.com/elastic/beats/dev-tools/mage" + "github.com/elastic/beats/dev-tools/mage/target/build" "github.com/elastic/beats/dev-tools/mage/target/common" + "github.com/elastic/beats/dev-tools/mage/target/pkg" + "github.com/elastic/beats/dev-tools/mage/target/unittest" + "github.com/elastic/beats/dev-tools/mage/target/update" ) func init() { @@ -38,37 +23,6 @@ func init() { devtools.BeatVendor = "{full_name}" } -// Build builds the Beat binary. -func Build() error { - return devtools.Build(devtools.DefaultBuildArgs()) -} - -// GolangCrossBuild build the Beat binary inside of the golang-builder. -// Do not use directly, use crossBuild instead. -func GolangCrossBuild() error { - return devtools.GolangCrossBuild(devtools.DefaultGolangCrossBuildArgs()) -} - -// BuildGoDaemon builds the go-daemon binary (use crossBuildGoDaemon). -func BuildGoDaemon() error { - return devtools.BuildGoDaemon() -} - -// CrossBuild cross-builds the beat for all target platforms. -func CrossBuild() error { - return devtools.CrossBuild() -} - -// CrossBuildGoDaemon cross-builds the go-daemon binary using Docker. -func CrossBuildGoDaemon() error { - return devtools.CrossBuildGoDaemon() -} - -// Clean cleans all generated files and build artifacts. -func Clean() error { - return devtools.Clean() -} - // Package packages the Beat for distribution. // Use SNAPSHOT=true to build snapshots. // Use PLATFORMS to control the target platforms. @@ -78,38 +32,9 @@ func Package() { devtools.UseCommunityBeatPackaging() - mg.Deps(Update) - mg.Deps(CrossBuild, CrossBuildGoDaemon) - mg.SerialDeps(devtools.Package, TestPackages) -} - -// TestPackages tests the generated packages (i.e. file modes, owners, groups). -func TestPackages() error { - return devtools.TestPackages() -} - -// Update updates the generated files (aka make update). -func Update() error { - return sh.Run("make", "update") -} - -// Fields generates a fields.yml for the Beat. -func Fields() error { - return devtools.GenerateFieldsYAML() -} - -// GoTestUnit executes the Go unit tests. -// Use TEST_COVERAGE=true to enable code coverage profiling. -// Use RACE_DETECTOR=true to enable the race detector. -func GoTestUnit(ctx context.Context) error { - return devtools.GoTest(ctx, devtools.DefaultGoTestUnitArgs()) -} - -// GoTestIntegration executes the Go integration tests. -// Use TEST_COVERAGE=true to enable code coverage profiling. -// Use RACE_DETECTOR=true to enable the race detector. -func GoTestIntegration(ctx context.Context) error { - return devtools.GoTest(ctx, devtools.DefaultGoTestIntegrationArgs()) + mg.Deps(update.Update) + mg.Deps(build.CrossBuild, build.CrossBuildGoDaemon) + mg.SerialDeps(devtools.Package, pkg.PackageTest) } // Config generates both the short/reference/docker configs. @@ -117,6 +42,16 @@ func Config() error { return devtools.Config(devtools.AllConfigTypes, devtools.ConfigFileParams{}, ".") } +//Fields generates a fields.yml for the Beat. +func Fields() error { + return devtools.GenerateFieldsYAML() +} + +// Clean cleans all generated files and build artifacts. +func Clean() error { + return devtools.Clean() +} + // Check formats code, updates generated content, check for common errors, and // checks for any modified files. func Check() { @@ -127,3 +62,29 @@ func Check() { func Fmt() { common.Fmt() } + +// Test runs all available tests +func Test() { + mg.Deps(unittest.GoUnitTest) +} + +// Build builds the Beat binary. +func Build() error { + return build.Build() +} + +// CrossBuild cross-builds the beat for all target platforms. +func CrossBuild() error { + return build.CrossBuild() +} + +// BuildGoDaemon builds the go-daemon binary (use crossBuildGoDaemon). +func BuildGoDaemon() error { + return build.BuildGoDaemon() +} + +// GolangCrossBuild build the Beat binary inside of the golang-builder. +// Do not use directly, use crossBuild instead. +func GolangCrossBuild() error { + return build.GolangCrossBuild() +} diff --git a/vendor/github.com/elastic/beats/generator/beat/{beat}/main.go.tmpl b/vendor/github.com/elastic/beats/generator/beat/{beat}/main.go similarity index 100% rename from vendor/github.com/elastic/beats/generator/beat/{beat}/main.go.tmpl rename to vendor/github.com/elastic/beats/generator/beat/{beat}/main.go diff --git a/vendor/github.com/elastic/beats/generator/beat/{beat}/main_test.go.tmpl b/vendor/github.com/elastic/beats/generator/beat/{beat}/main_test.go similarity index 100% rename from vendor/github.com/elastic/beats/generator/beat/{beat}/main_test.go.tmpl rename to vendor/github.com/elastic/beats/generator/beat/{beat}/main_test.go diff --git a/vendor/github.com/elastic/beats/generator/common/Makefile b/vendor/github.com/elastic/beats/generator/common/Makefile index e880b412..5cd08aab 100644 --- a/vendor/github.com/elastic/beats/generator/common/Makefile +++ b/vendor/github.com/elastic/beats/generator/common/Makefile @@ -24,6 +24,13 @@ test: prepare-test $(MAKE) || exit 1 ; \ $(MAKE) unit +.PHONY: test-package +test-package: test + cd ${BEAT_PATH} ; \ + export GOPATH=${PWD}/build ; \ + export PATH=$${GOPATH}/bin:$${PATH}; \ + mage package + .PHONY: prepare-test prepare-test:: python-env # Makes sure to use current version of beats for testing diff --git a/vendor/github.com/elastic/beats/generator/metricbeat/{beat}/Makefile b/vendor/github.com/elastic/beats/generator/metricbeat/{beat}/Makefile index d1ddcfc0..996964ea 100644 --- a/vendor/github.com/elastic/beats/generator/metricbeat/{beat}/Makefile +++ b/vendor/github.com/elastic/beats/generator/metricbeat/{beat}/Makefile @@ -21,7 +21,7 @@ setup: copy-vendor git-init # Copy beats into vendor directory .PHONY: copy-vendor -copy-vendor: +copy-vendor: vendor-check mkdir -p vendor/github.com/elastic/beats git archive --remote ${BEAT_GOPATH}/src/github.com/elastic/beats HEAD | tar -x --exclude=x-pack -C vendor/github.com/elastic/beats ln -sf ${PWD}/vendor/github.com/elastic/beats/metricbeat/scripts/generate_imports_helper.py ${PWD}/vendor/github.com/elastic/beats/script/generate_imports_helper.py @@ -36,4 +36,8 @@ git-init: .PHONY: git-add git-add: git add -A - git commit -m "Add generated {beat} files" + git commit -q -m "Add generated {beat} files" + +.PHONY: vendor-check +vendor-check: + @if output=$$(git -C ${BEAT_GOPATH}/src/github.com/elastic/beats status --porcelain) && [ ! -z "$${output}" ]; then printf "\033[31mWARNING: elastic/beats has uncommitted changes, these will not be in the vendor directory!\033[0m\n"; fi \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/generator/metricbeat/{beat}/cmd/root.go.tmpl b/vendor/github.com/elastic/beats/generator/metricbeat/{beat}/cmd/root.go similarity index 100% rename from vendor/github.com/elastic/beats/generator/metricbeat/{beat}/cmd/root.go.tmpl rename to vendor/github.com/elastic/beats/generator/metricbeat/{beat}/cmd/root.go diff --git a/vendor/github.com/elastic/beats/generator/metricbeat/{beat}/magefile.go b/vendor/github.com/elastic/beats/generator/metricbeat/{beat}/magefile.go index 82c96241..649cae97 100644 --- a/vendor/github.com/elastic/beats/generator/metricbeat/{beat}/magefile.go +++ b/vendor/github.com/elastic/beats/generator/metricbeat/{beat}/magefile.go @@ -1,34 +1,20 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - // +build mage package main import ( - "context" "fmt" "time" "github.com/magefile/mage/mg" - "github.com/magefile/mage/sh" devtools "github.com/elastic/beats/dev-tools/mage" + "github.com/elastic/beats/dev-tools/mage/target/build" + "github.com/elastic/beats/dev-tools/mage/target/collectors" "github.com/elastic/beats/dev-tools/mage/target/common" + "github.com/elastic/beats/dev-tools/mage/target/pkg" + "github.com/elastic/beats/dev-tools/mage/target/unittest" + "github.com/elastic/beats/dev-tools/mage/target/update" metricbeat "github.com/elastic/beats/metricbeat/scripts/mage" ) @@ -36,42 +22,12 @@ func init() { devtools.SetBuildVariableSources(devtools.DefaultBeatBuildVariableSources) devtools.BeatDescription = "One sentence description of the Beat." + devtools.BeatVendor = "{full_name}" } // CollectAll generates the docs and the fields. func CollectAll() { - mg.Deps(CollectDocs, FieldsDocs) -} - -// Build builds the Beat binary. -func Build() error { - return devtools.Build(devtools.DefaultBuildArgs()) -} - -// GolangCrossBuild build the Beat binary inside of the golang-builder. -// Do not use directly, use crossBuild instead. -func GolangCrossBuild() error { - return devtools.GolangCrossBuild(devtools.DefaultGolangCrossBuildArgs()) -} - -// BuildGoDaemon builds the go-daemon binary (use crossBuildGoDaemon). -func BuildGoDaemon() error { - return devtools.BuildGoDaemon() -} - -// CrossBuild cross-builds the beat for all target platforms. -func CrossBuild() error { - return devtools.CrossBuild() -} - -// CrossBuildGoDaemon cross-builds the go-daemon binary using Docker. -func CrossBuildGoDaemon() error { - return devtools.CrossBuildGoDaemon() -} - -// Clean cleans all generated files and build artifacts. -func Clean() error { - return devtools.Clean() + mg.Deps(collectors.CollectDocs, FieldsDocs) } // Package packages the Beat for distribution. @@ -83,24 +39,9 @@ func Package() { devtools.UseCommunityBeatPackaging() - mg.Deps(Update) - mg.Deps(CrossBuild, CrossBuildGoDaemon) - mg.SerialDeps(devtools.Package, TestPackages) -} - -// TestPackages tests the generated packages (i.e. file modes, owners, groups). -func TestPackages() error { - return devtools.TestPackages() -} - -// Update updates the generated files (aka make update). -func Update() error { - return sh.Run("make", "update") -} - -// Fields generates a fields.yml for the Beat. -func Fields() error { - return devtools.GenerateFieldsYAML("module") + mg.Deps(update.Update) + mg.Deps(build.CrossBuild, build.CrossBuildGoDaemon) + mg.SerialDeps(devtools.Package, pkg.PackageTest) } // FieldsDocs generates docs/fields.asciidoc containing all fields @@ -116,27 +57,17 @@ func FieldsDocs() error { return devtools.Docs.FieldDocs(output) } -// CollectDocs creates the documentation under docs/ -func CollectDocs() error { - return metricbeat.CollectDocs() -} - -// GoTestUnit executes the Go unit tests. -// Use TEST_COVERAGE=true to enable code coverage profiling. -// Use RACE_DETECTOR=true to enable the race detector. -func GoTestUnit(ctx context.Context) error { - return devtools.GoTest(ctx, devtools.DefaultGoTestUnitArgs()) -} - -// GoTestIntegration executes the Go integration tests. -// Use TEST_COVERAGE=true to enable code coverage profiling. -// Use RACE_DETECTOR=true to enable the race detector. -func GoTestIntegration(ctx context.Context) error { - return devtools.GoTest(ctx, devtools.DefaultGoTestIntegrationArgs()) +// Fields generates a fields.yml for the Beat. +func Fields() error { + return devtools.GenerateFieldsYAML("module") } // Config generates both the short/reference/docker configs. -func Config() error { +func Config() { + mg.Deps(configYML, metricbeat.GenerateDirModulesD) +} + +func configYML() error { customDeps := devtools.ConfigFileParams{ ShortParts: []string{"_meta/short.yml", devtools.LibbeatDir("_meta/config.yml.tmpl")}, ReferenceParts: []string{"_meta/reference.yml", devtools.LibbeatDir("_meta/config.reference.yml.tmpl")}, @@ -146,6 +77,11 @@ func Config() error { return devtools.Config(devtools.AllConfigTypes, customDeps, ".") } +// Clean cleans all generated files and build artifacts. +func Clean() error { + return devtools.Clean() +} + // Check formats code, updates generated content, check for common errors, and // checks for any modified files. func Check() { @@ -156,3 +92,34 @@ func Check() { func Fmt() { common.Fmt() } + +// Update updates the generated files (aka make update). +func Update() error { + return update.Update() +} + +// Test runs all available tests +func Test() { + mg.Deps(unittest.GoUnitTest) +} + +// Build builds the Beat binary. +func Build() error { + return build.Build() +} + +// CrossBuild cross-builds the beat for all target platforms. +func CrossBuild() error { + return build.CrossBuild() +} + +// BuildGoDaemon builds the go-daemon binary (use crossBuildGoDaemon). +func BuildGoDaemon() error { + return build.BuildGoDaemon() +} + +// GolangCrossBuild build the Beat binary inside of the golang-builder. +// Do not use directly, use crossBuild instead. +func GolangCrossBuild() error { + return build.GolangCrossBuild() +} diff --git a/vendor/github.com/elastic/beats/generator/metricbeat/{beat}/main.go.tmpl b/vendor/github.com/elastic/beats/generator/metricbeat/{beat}/main.go similarity index 100% rename from vendor/github.com/elastic/beats/generator/metricbeat/{beat}/main.go.tmpl rename to vendor/github.com/elastic/beats/generator/metricbeat/{beat}/main.go diff --git a/vendor/github.com/elastic/beats/heartbeat/Dockerfile b/vendor/github.com/elastic/beats/heartbeat/Dockerfile index e71c19fe..aa9b14de 100644 --- a/vendor/github.com/elastic/beats/heartbeat/Dockerfile +++ b/vendor/github.com/elastic/beats/heartbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.12.9 +FROM golang:1.12.12 MAINTAINER Nicolas Ruflin RUN set -x && \ diff --git a/vendor/github.com/elastic/beats/heartbeat/_meta/beat.reference.yml b/vendor/github.com/elastic/beats/heartbeat/_meta/beat.reference.yml index 9e163c45..bd59aedd 100644 --- a/vendor/github.com/elastic/beats/heartbeat/_meta/beat.reference.yml +++ b/vendor/github.com/elastic/beats/heartbeat/_meta/beat.reference.yml @@ -74,6 +74,9 @@ heartbeat.monitors: # Interval between file file changed checks. #interval: 5s + # Set to true to publish fields with null values in events. + #keep_null: false + # Define a directory to load monitor definitions from. Definitions take the form # of individual yaml files. # heartbeat.config.monitors: @@ -158,6 +161,8 @@ heartbeat.monitors: # Interval between file file changed checks. #interval: 5s + # Set to true to publish fields with null values in events. + #keep_null: false - type: http # monitor type `http`. Connect via HTTP an optionally verify response @@ -246,6 +251,8 @@ heartbeat.monitors: # Interval between file file changed checks. #interval: 5s + # Set to true to publish fields with null values in events. + #keep_null: false heartbeat.scheduler: # Limit number of concurrent tasks executed by heartbeat. The task limit if diff --git a/vendor/github.com/elastic/beats/heartbeat/autodiscover/builder/hints/config.go b/vendor/github.com/elastic/beats/heartbeat/autodiscover/builder/hints/config.go new file mode 100644 index 00000000..60cc5d6c --- /dev/null +++ b/vendor/github.com/elastic/beats/heartbeat/autodiscover/builder/hints/config.go @@ -0,0 +1,30 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package hints + +type config struct { + Key string `config:"key"` + DefaultSchedule string `config:"defaults.schedule"` +} + +func defaultConfig() *config { + return &config{ + Key: "monitor", + DefaultSchedule: "@every 5s", + } +} diff --git a/vendor/github.com/elastic/beats/heartbeat/autodiscover/builder/hints/monitors.go b/vendor/github.com/elastic/beats/heartbeat/autodiscover/builder/hints/monitors.go new file mode 100644 index 00000000..bb2a7d16 --- /dev/null +++ b/vendor/github.com/elastic/beats/heartbeat/autodiscover/builder/hints/monitors.go @@ -0,0 +1,205 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package hints + +import ( + "fmt" + "sort" + "strconv" + "strings" + + "github.com/elastic/beats/libbeat/autodiscover" + "github.com/elastic/beats/libbeat/autodiscover/builder" + "github.com/elastic/beats/libbeat/autodiscover/template" + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/bus" + "github.com/elastic/beats/libbeat/logp" +) + +func init() { + autodiscover.Registry.AddBuilder("hints", NewHeartbeatHints) +} + +const ( + montype = "type" + schedule = "schedule" + hosts = "hosts" + processors = "processors" +) + +type heartbeatHints struct { + config *config + logger *logp.Logger +} + +// NewHeartbeatHints builds a heartbeat hints builder +func NewHeartbeatHints(cfg *common.Config) (autodiscover.Builder, error) { + config := defaultConfig() + err := cfg.Unpack(config) + + if err != nil { + return nil, fmt.Errorf("unable to unpack hints config due to error: %v", err) + } + + return &heartbeatHints{config, logp.NewLogger("hints.builder")}, nil +} + +// Create config based on input hints in the bus event +func (hb *heartbeatHints) CreateConfig(event bus.Event) []*common.Config { + var hints common.MapStr + hIface, ok := event["hints"] + if ok { + hints, _ = hIface.(common.MapStr) + } + + monitorConfig := hb.getRawConfigs(hints) + + // If explicty disabled, return nothing + if builder.IsDisabled(hints, hb.config.Key) { + hb.logger.Warnf("heartbeat config disabled by hint: %+v", event) + return []*common.Config{} + } + + port, _ := common.TryToInt(event["port"]) + + host, _ := event["host"].(string) + if host == "" { + return []*common.Config{} + } + + if monitorConfig != nil { + configs := []*common.Config{} + for _, cfg := range monitorConfig { + if config, err := common.NewConfigFrom(cfg); err == nil { + configs = append(configs, config) + } + } + hb.logger.Debugf("generated config %+v", configs) + // Apply information in event to the template to generate the final config + return template.ApplyConfigTemplate(event, configs) + } + + tempCfg := common.MapStr{} + monitors := hb.getMonitors(hints) + + var configs []*common.Config + for _, monitor := range monitors { + // If a monitor doesn't have a schedule associated with it then default it. + if _, ok := monitor[schedule]; !ok { + monitor[schedule] = hb.config.DefaultSchedule + } + + if procs := hb.getProcessors(monitor); len(procs) != 0 { + monitor[processors] = procs + } + + h := hb.getHostsWithPort(monitor, port) + monitor[hosts] = h + + config, err := common.NewConfigFrom(monitor) + if err != nil { + hb.logger.Debugf("unable to create config from MapStr %+v", tempCfg) + return []*common.Config{} + } + hb.logger.Debugf("hints.builder", "generated config %+v", config) + configs = append(configs, config) + } + + // Apply information in event to the template to generate the final config + return template.ApplyConfigTemplate(event, configs) +} + +func (hb *heartbeatHints) getType(hints common.MapStr) common.MapStr { + return builder.GetHintMapStr(hints, hb.config.Key, montype) +} + +func (hb *heartbeatHints) getSchedule(hints common.MapStr) []string { + return builder.GetHintAsList(hints, hb.config.Key, schedule) +} + +func (hb *heartbeatHints) getRawConfigs(hints common.MapStr) []common.MapStr { + return builder.GetHintAsConfigs(hints, hb.config.Key) +} + +func (hb *heartbeatHints) getMonitors(hints common.MapStr) []common.MapStr { + raw := builder.GetHintMapStr(hints, hb.config.Key, "") + if raw == nil { + return nil + } + + var words, nums []string + + for key := range raw { + if _, err := strconv.Atoi(key); err != nil { + words = append(words, key) + continue + } else { + nums = append(nums, key) + } + } + + sort.Strings(nums) + + var configs []common.MapStr + for _, key := range nums { + rawCfg, _ := raw[key] + if config, ok := rawCfg.(common.MapStr); ok { + configs = append(configs, config) + } + } + + defaultMap := common.MapStr{} + for _, word := range words { + defaultMap[word] = raw[word] + } + + if len(defaultMap) != 0 { + configs = append(configs, defaultMap) + } + return configs +} + +func (hb *heartbeatHints) getProcessors(hints common.MapStr) []common.MapStr { + return builder.GetConfigs(hints, "", "processors") +} + +func (hb *heartbeatHints) getHostsWithPort(hints common.MapStr, port int) []string { + var result []string + thosts := builder.GetHintAsList(hints, "", hosts) + // Only pick hosts that have ${data.port} or the port on current event. This will make + // sure that incorrect meta mapping doesn't happen + for _, h := range thosts { + if strings.Contains(h, "data.port") || strings.Contains(h, fmt.Sprintf(":%d", port)) || + // Use the event that has no port config if there is a ${data.host}:9090 like input + (port == 0 && strings.Contains(h, "data.host")) { + result = append(result, h) + } else if port == 0 && !strings.Contains(h, ":") { + // For ICMP like use cases allow only host to be passed if there is no port + result = append(result, h) + } else { + hb.logger.Warn("unable to frame a host from input host: %s", h) + } + } + + if len(thosts) > 0 && len(result) == 0 { + hb.logger.Debugf("no hosts selected for port %d with hints: %+v", port, thosts) + return nil + } + + return result +} diff --git a/vendor/github.com/elastic/beats/heartbeat/autodiscover/builder/hints/monitors_test.go b/vendor/github.com/elastic/beats/heartbeat/autodiscover/builder/hints/monitors_test.go new file mode 100644 index 00000000..d3d37f99 --- /dev/null +++ b/vendor/github.com/elastic/beats/heartbeat/autodiscover/builder/hints/monitors_test.go @@ -0,0 +1,218 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package hints + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/common/bus" + "github.com/elastic/beats/libbeat/logp" +) + +func TestGenerateHints(t *testing.T) { + tests := []struct { + message string + event bus.Event + len int + result common.MapStr + }{ + { + message: "Empty event hints should return empty config", + event: bus.Event{ + "host": "1.2.3.4", + "kubernetes": common.MapStr{ + "container": common.MapStr{ + "name": "foobar", + "id": "abc", + }, + }, + "docker": common.MapStr{ + "container": common.MapStr{ + "name": "foobar", + "id": "abc", + }, + }, + }, + len: 0, + result: common.MapStr{}, + }, + { + message: "Hints without host should return nothing", + event: bus.Event{ + "hints": common.MapStr{ + "monitor": common.MapStr{ + "type": "icmp", + }, + }, + }, + len: 0, + result: common.MapStr{}, + }, + { + message: "Hints without matching port should return nothing in the hosts section", + event: bus.Event{ + "host": "1.2.3.4", + "port": 9090, + "hints": common.MapStr{ + "monitor": common.MapStr{ + "type": "icmp", + "hosts": "${data.host}:8888", + }, + }, + }, + len: 1, + result: common.MapStr{ + "schedule": "@every 5s", + "type": "icmp", + }, + }, + { + message: "Hints with multiple hosts return only the matching one", + event: bus.Event{ + "host": "1.2.3.4", + "port": 9090, + "hints": common.MapStr{ + "monitor": common.MapStr{ + "type": "icmp", + "hosts": "${data.host}:8888,${data.host}:9090", + }, + }, + }, + len: 1, + result: common.MapStr{ + "type": "icmp", + "schedule": "@every 5s", + "hosts": []interface{}{"1.2.3.4:9090"}, + }, + }, + { + message: "Hints with multiple hosts return only the one with the template", + event: bus.Event{ + "host": "1.2.3.4", + "port": 9090, + "hints": common.MapStr{ + "monitor": common.MapStr{ + "type": "icmp", + "hosts": "${data.host}:8888,${data.host}:${data.port}", + }, + }, + }, + len: 1, + result: common.MapStr{ + "type": "icmp", + "schedule": "@every 5s", + "hosts": []interface{}{"1.2.3.4:9090"}, + }, + }, + { + message: "Monitor defined in monitors as a JSON string should return a config", + event: bus.Event{ + "host": "1.2.3.4", + "hints": common.MapStr{ + "monitor": common.MapStr{ + "raw": "{\"enabled\":true,\"type\":\"icmp\",\"schedule\":\"@every 20s\",\"timeout\":\"3s\"}", + }, + }, + }, + len: 1, + result: common.MapStr{ + "type": "icmp", + "timeout": "3s", + "schedule": "@every 20s", + "enabled": true, + }, + }, + { + message: "Monitor with processor config must return an module having the processor defined", + event: bus.Event{ + "host": "1.2.3.4", + "port": 9090, + "hints": common.MapStr{ + "monitor": common.MapStr{ + "type": "icmp", + "hosts": "${data.host}:9090", + "processors": common.MapStr{ + "add_locale": common.MapStr{ + "abbrevation": "MST", + }, + }, + }, + }, + }, + len: 1, + result: common.MapStr{ + "type": "icmp", + "hosts": []interface{}{"1.2.3.4:9090"}, + "schedule": "@every 5s", + "processors": []interface{}{ + map[string]interface{}{ + "add_locale": map[string]interface{}{ + "abbrevation": "MST", + }, + }, + }, + }, + }, + { + message: "Hints with multiple monitors should return multiple", + event: bus.Event{ + "host": "1.2.3.4", + "port": 9090, + "hints": common.MapStr{ + "monitor": common.MapStr{ + "1": common.MapStr{ + "type": "icmp", + "hosts": "${data.host}:8888,${data.host}:9090", + }, + "2": common.MapStr{ + "type": "icmp", + "hosts": "${data.host}:8888,${data.host}:9090", + }, + }, + }, + }, + len: 2, + result: common.MapStr{ + "type": "icmp", + "schedule": "@every 5s", + "hosts": []interface{}{"1.2.3.4:9090"}, + }, + }, + } + for _, test := range tests { + + m := heartbeatHints{ + config: defaultConfig(), + logger: logp.NewLogger("hints.builder"), + } + cfgs := m.CreateConfig(test.event) + assert.Equal(t, len(cfgs), test.len, test.message) + + if len(cfgs) != 0 { + config := common.MapStr{} + err := cfgs[0].Unpack(&config) + assert.Nil(t, err, test.message) + + assert.Equal(t, test.result, config, test.message) + } + + } +} diff --git a/vendor/github.com/elastic/beats/heartbeat/autodiscover/include.go b/vendor/github.com/elastic/beats/heartbeat/autodiscover/include.go new file mode 100644 index 00000000..cd4bf8bf --- /dev/null +++ b/vendor/github.com/elastic/beats/heartbeat/autodiscover/include.go @@ -0,0 +1,23 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package autodiscover + +import ( + // include all heartbeat specific builders + _ "github.com/elastic/beats/heartbeat/autodiscover/builder/hints" +) diff --git a/vendor/github.com/elastic/beats/heartbeat/cmd/root.go b/vendor/github.com/elastic/beats/heartbeat/cmd/root.go index 99cd9e5d..d86e4291 100644 --- a/vendor/github.com/elastic/beats/heartbeat/cmd/root.go +++ b/vendor/github.com/elastic/beats/heartbeat/cmd/root.go @@ -20,6 +20,7 @@ package cmd import ( "fmt" // register default heartbeat monitors + _ "github.com/elastic/beats/heartbeat/autodiscover" "github.com/elastic/beats/heartbeat/beater" _ "github.com/elastic/beats/heartbeat/monitors/defaults" cmd "github.com/elastic/beats/libbeat/cmd" diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/autodiscover-hints.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/autodiscover-hints.asciidoc new file mode 100644 index 00000000..3af54481 --- /dev/null +++ b/vendor/github.com/elastic/beats/heartbeat/docs/autodiscover-hints.asciidoc @@ -0,0 +1,42 @@ +{beatname_uc} supports autodiscover based on hints from the both Docker and Kubernetes. The hints system looks for +hints in Kubernetes Pod annotations or Docker labels that have the prefix `co.elastic.monitor`. As soon as +the container starts, {beatname_uc} will check if it contains any hints and launch the proper config for +it. Hints tell {beatname_uc} how to get logs for the given container. + +Hints for {beatname_uc} take the form `co.elastic.monitor/MONITOR_INDEX.CONFIG_OPTION=VALUE`, where `MONITOR_INDEX` represents +the index of monitor, and `CONFIG_OPTION` is any of the <> options, with `VALUE` being the value +you'd like to set that option to. + +As an example let's convert the regular syntax {beatname_uc} monitors below into autodiscover hints. + +[source,yml] +---------------------------------------------------------------------- +-- heartbeat.monitors: +- type: tcp + hosts: '${data.host}:6379' + schedule: @every 10s +- type: icmp + hosts: '${data.host}' + schedule: @every 10s +---------------------------------------------------------------------- + +To configure this with autodiscover, add the following line to your Dockerfile to add the requisite labels. + +---------------------------------------------------------------------- +LABEL co.elastic.monitor/1.type=tcp co.elastic.monitor/1.hosts='${data.host}:6379' co.elastic.monitor/1.schedule='@every 10s' +LABEL co.elastic.monitor/2.type=icmp co.elastic.monitor/2.hosts='${data.host}' co.elastic.monitor/2.schedule='@every 10s' +---------------------------------------------------------------------- + +Then, add the following to `heartbeat.yml`: + +[source,yml] +---------------------------------------------------------------------- +heartbeat.autodiscover: + providers: + - type: docker + hints.enabled: true +---------------------------------------------------------------------- + +Then, start {beatname_uc}. This should discover the container you built and ping it once with ICMP, another time with TCP. + +For a full list of variables available, see the provider-specific docs in <>. diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/autodiscover-kubernetes-config.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/autodiscover-kubernetes-config.asciidoc index 12b5f20c..f46aa35d 100644 --- a/vendor/github.com/elastic/beats/heartbeat/docs/autodiscover-kubernetes-config.asciidoc +++ b/vendor/github.com/elastic/beats/heartbeat/docs/autodiscover-kubernetes-config.asciidoc @@ -12,7 +12,7 @@ heartbeat.autodiscover: kubernetes.annotations.prometheus.io.scrape: "true" config: - type: http - urls: ["${data.host}:${data.port}"] + hosts: ["${data.host}:${data.port}"] schedule: "@every 1s" timeout: 1s ------------------------------------------------------------------------------------- diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/configuring-howto.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/configuring-howto.asciidoc index e033406c..ef38354d 100644 --- a/vendor/github.com/elastic/beats/heartbeat/docs/configuring-howto.asciidoc +++ b/vendor/github.com/elastic/beats/heartbeat/docs/configuring-howto.asciidoc @@ -49,42 +49,44 @@ include::./heartbeat-general-options.asciidoc[] include::./heartbeat-observer-options.asciidoc[] -include::{libbeat-dir}/docs/queueconfig.asciidoc[] +include::{libbeat-dir}/queueconfig.asciidoc[] -include::{libbeat-dir}/docs/outputconfig.asciidoc[] +include::{libbeat-dir}/outputconfig.asciidoc[] -include::{libbeat-dir}/docs/shared-ilm.asciidoc[] +include::{libbeat-dir}/shared-ilm.asciidoc[] -include::{libbeat-dir}/docs/shared-ssl-config.asciidoc[] +include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::./heartbeat-filtering.asciidoc[] -include::{libbeat-dir}/docs/shared-config-ingest.asciidoc[] +include::{libbeat-dir}/shared-config-ingest.asciidoc[] -include::{libbeat-dir}/docs/shared-geoip.asciidoc[] +include::{libbeat-dir}/shared-geoip.asciidoc[] -include::{libbeat-dir}/docs/shared-path-config.asciidoc[] +include::{libbeat-dir}/shared-path-config.asciidoc[] -include::{libbeat-dir}/docs/shared-kibana-config.asciidoc[] +include::{libbeat-dir}/shared-kibana-config.asciidoc[] -include::{libbeat-dir}/docs/setup-config.asciidoc[] +include::{libbeat-dir}/setup-config.asciidoc[] -include::{libbeat-dir}/docs/loggingconfig.asciidoc[] +include::{libbeat-dir}/loggingconfig.asciidoc[] :standalone: -include::{libbeat-dir}/docs/shared-env-vars.asciidoc[] +include::{libbeat-dir}/shared-env-vars.asciidoc[] :standalone!: +:autodiscoverHints: :autodiscoverAWSELB: -include::{libbeat-dir}/docs/shared-autodiscover.asciidoc[] +include::{libbeat-dir}/shared-autodiscover.asciidoc[] :autodiscoverAWSELB!: +:autodiscoverHints!: :standalone: -include::{libbeat-dir}/docs/yaml.asciidoc[] +include::{libbeat-dir}/yaml.asciidoc[] :standalone!: -include::{libbeat-dir}/docs/regexp.asciidoc[] +include::{libbeat-dir}/regexp.asciidoc[] -include::{libbeat-dir}/docs/http-endpoint.asciidoc[] +include::{libbeat-dir}/http-endpoint.asciidoc[] -include::{libbeat-dir}/docs/reference-yml.asciidoc[] +include::{libbeat-dir}/reference-yml.asciidoc[] diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/faq.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/faq.asciidoc index 4848f873..bbe89a61 100644 --- a/vendor/github.com/elastic/beats/heartbeat/docs/faq.asciidoc +++ b/vendor/github.com/elastic/beats/heartbeat/docs/faq.asciidoc @@ -5,6 +5,6 @@ This section describes common problems you might encounter with {beatname_uc}. Also check out the https://discuss.elastic.co/c/beats/{beatname_lc}[{beatname_uc} discussion forum]. -include::{libbeat-dir}/docs/faq-limit-bandwidth.asciidoc[] +include::{libbeat-dir}/faq-limit-bandwidth.asciidoc[] -include::{libbeat-dir}/docs/shared-faq.asciidoc[] +include::{libbeat-dir}/shared-faq.asciidoc[] diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/getting-started.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/getting-started.asciidoc index 0d3427ce..bc702680 100644 --- a/vendor/github.com/elastic/beats/heartbeat/docs/getting-started.asciidoc +++ b/vendor/github.com/elastic/beats/heartbeat/docs/getting-started.asciidoc @@ -1,7 +1,7 @@ [[heartbeat-getting-started]] == Getting Started With Heartbeat -include::{libbeat-dir}/docs/shared-getting-started-intro.asciidoc[] +include::{libbeat-dir}/shared-getting-started-intro.asciidoc[] * <> * <> @@ -22,7 +22,7 @@ monitor are running. //TODO: Add a separate topic that explores deployment scenarios in more detail (like installing on a sub-network where there's a firewall etc. -include::{libbeat-dir}/docs/shared-download-and-install.asciidoc[] +include::{libbeat-dir}/shared-download-and-install.asciidoc[] [[deb]] *deb:* @@ -81,7 +81,7 @@ tar xzvf heartbeat-{version}-darwin-x86_64.tar.gz endif::[] -include::{libbeat-dir}/docs/shared-brew-install.asciidoc[] +include::{libbeat-dir}/shared-brew-install.asciidoc[] [[linux]] *linux:* @@ -151,7 +151,7 @@ options, see <>. [[heartbeat-configuration]] === Step 2: Configure Heartbeat -include::{libbeat-dir}/docs/shared-configuring.asciidoc[] +include::{libbeat-dir}/shared-configuring.asciidoc[] Heartbeat provides monitors to check the status of hosts at set intervals. You configure each monitor individually. Heartbeat currently provides monitors @@ -198,20 +198,20 @@ was started. Heartbeat adds the `@every` keyword to the syntax provided by the See <> for a full description of each configuration option. -include::{libbeat-dir}/docs/step-configure-output.asciidoc[] +include::{libbeat-dir}/step-configure-output.asciidoc[] -include::{libbeat-dir}/docs/step-configure-kibana-endpoint.asciidoc[] +include::{libbeat-dir}/step-configure-kibana-endpoint.asciidoc[] -include::{libbeat-dir}/docs/step-configure-credentials.asciidoc[] +include::{libbeat-dir}/step-configure-credentials.asciidoc[] -include::{libbeat-dir}/docs/step-test-config.asciidoc[] +include::{libbeat-dir}/step-test-config.asciidoc[] -include::{libbeat-dir}/docs/step-look-at-config.asciidoc[] +include::{libbeat-dir}/step-look-at-config.asciidoc[] [[heartbeat-template]] === Step 3: Load the index template in Elasticsearch -include::{libbeat-dir}/docs/shared-template-load.asciidoc[] +include::{libbeat-dir}/shared-template-load.asciidoc[] [[load-kibana-dashboards]] === Step 4: Set up the Kibana dashboards @@ -249,7 +249,7 @@ configuration file, or run Heartbeat with `--strict.perms=false` specified. See in the _Beats Platform Reference_. :requires-sudo: -include::{libbeat-dir}/docs/shared-brew-run.asciidoc[] +include::{libbeat-dir}/shared-brew-run.asciidoc[] :requires-sudo!: *win:* @@ -272,7 +272,7 @@ created example {beatname_uc} dashboards in the https://github.com/elastic/uptime-contrib[uptime-contrib] github repository. If you loaded them earlier, open them now. -include::{libbeat-dir}/docs/opendashboards.asciidoc[] +include::{libbeat-dir}/opendashboards.asciidoc[] The dashboards are provided as examples. We recommend that you {kibana-ref}/dashboard.html[customize] them to meet your needs. diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/heartbeat-filtering.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/heartbeat-filtering.asciidoc index 8dc1fbb5..29545c33 100644 --- a/vendor/github.com/elastic/beats/heartbeat/docs/heartbeat-filtering.asciidoc +++ b/vendor/github.com/elastic/beats/heartbeat/docs/heartbeat-filtering.asciidoc @@ -1,6 +1,6 @@ [[filtering-and-enhancing-data]] == Filter and Enhance the exported data -include::{libbeat-dir}/docs/processors.asciidoc[] +include::{libbeat-dir}/processors.asciidoc[] -include::{libbeat-dir}/docs/processors-using.asciidoc[] +include::{libbeat-dir}/processors-using.asciidoc[] diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/heartbeat-general-options.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/heartbeat-general-options.asciidoc index 60e017c7..2013d310 100644 --- a/vendor/github.com/elastic/beats/heartbeat/docs/heartbeat-general-options.asciidoc +++ b/vendor/github.com/elastic/beats/heartbeat/docs/heartbeat-general-options.asciidoc @@ -4,5 +4,5 @@ You can specify settings in the +{beatname_lc}.yml+ config file to control the general behavior of {beatname_uc}. -include::{libbeat-dir}/docs/generalconfig.asciidoc[] +include::{libbeat-dir}/generalconfig.asciidoc[] diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/heartbeat-options.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/heartbeat-options.asciidoc index 14a330a9..221af5c0 100644 --- a/vendor/github.com/elastic/beats/heartbeat/docs/heartbeat-options.asciidoc +++ b/vendor/github.com/elastic/beats/heartbeat/docs/heartbeat-options.asciidoc @@ -33,7 +33,7 @@ heartbeat.monitors: check.receive: "Check" - type: http schedule: '@every 5s' - urls: ["http://localhost:80/service/status"] + hosts: ["http://localhost:80/service/status"] check.response.status: 200 heartbeat.scheduler: limit: 10 @@ -68,7 +68,7 @@ monitor definitions only, e.g. what is normally under the `heartbeat.monitors` s # /path/to/my/monitors.d/localhost_service_check.yml - type: http schedule: '@every 5s' - urls: ["http://localhost:80/service/status"] + hosts: ["http://localhost:80/service/status"] check.response.status: 200 ---------------------------------------------------------------------- @@ -87,7 +87,7 @@ options that are specific to that monitor type. The type of monitor to run. One of: * `icmp`: Uses an ICMP (v4 and v6) Echo Request to ping the configured hosts. -Requires root access. See <>. +Requires special permissions or root access. See <>. * `tcp`: Connects via TCP and optionally verifies the endpoint by sending and/or receiving a custom payload. See <>. * `http`: Connects via HTTP and optionally verifies that the host returns the @@ -211,13 +211,28 @@ A list of processors to apply to the data generated by the monitor. See <> for information about specifying processors in your config. +[float] +[[monitor-keep-null]] +==== `keep_null` + +If this option is set to true, fields with `null` values will be published in +the output document. By default, `keep_null` is set to `false`. + [float] [[monitor-icmp-options]] === ICMP options These options configure {beatname_uc} to use ICMP (v4 and v6) Echo Requests to check the configured hosts. These options are valid when the <> is -`icmp`. +`icmp`. Please note that on most platforms you must execute Heartbeat with elevated permissions +to perform ICMP pings. + +On Linux, regular users may perform pings if the right file capabilities are set. Run +`sudo setcap cap_net_raw+eip /path/to/heartbeat` to grant {beatname_uc} ping capabilities on Linux. +Alternatively, one may grant ping permissions to the user {beatname_uc} runs as. To grant ping permissions +in this way, run `sudo sysctl -w net.ipv4.ping_group_range='myuserid myuserid'`. + +Other platforms may require {beatname_uc} to run as root or administrator to execute pings. [float] [[monitor-icmp-hosts]] @@ -368,7 +383,7 @@ the host returns the expected response. These options are valid when the [float] [[monitor-http-urls]] -==== `urls` +==== `hosts` A list of URLs to ping. @@ -378,7 +393,7 @@ Example configuration: ------------------------------------------------------------------------------- - type: http schedule: '@every 5s' - urls: ["http://myhost:80"] + hosts: ["http://myhost:80"] ------------------------------------------------------------------------------- @@ -419,7 +434,7 @@ Example configuration: ------------------------------------------------------------------------------- - type: http schedule: '@every 5s' - urls: ["https://myhost:443"] + hosts: ["https://myhost:443"] ssl: certificate_authorities: ['/etc/ca.crt'] supported_protocols: ["TLSv1.0", "TLSv1.1", "TLSv1.2"] @@ -446,7 +461,7 @@ Set `response.include_body_max_bytes` to control the maximum size of the stored [[monitor-http-check]] ==== `check` -An optional `request` to send to the remote host and the expected `response`. +An optional `request` to send to the remote host and the expected `response`. Example configuration: @@ -454,7 +469,7 @@ Example configuration: ------------------------------------------------------------------------------- - type: http schedule: '@every 5s' - urls: ["http://myhost:80"] + hosts: ["http://myhost:80"] check.request.method: HEAD check.response.status: 200 ------------------------------------------------------------------------------- @@ -493,7 +508,8 @@ Under `check.response`, specify these options: *`status`*:: The expected status code. 4xx and 5xx codes are considered `down` by default. Other codes are considered `up`. *`headers`*:: The required response headers. -*`body`*:: A list of regular expressions to match the the body output. Only a single expression needs to match. +*`body`*:: A list of regular expressions to match the the body output. Only a single expression needs to match. HTTP response +bodies of up to 100MiB are supported. Example configuration: This monitor examines the @@ -517,7 +533,8 @@ response body for the strings `saved` or `Saved` - saved ------------------------------------------------------------------------------- -*`json`*:: A list of <> expressions executed against the body when parsed as JSON. +*`json`*:: A list of <> expressions executed against the body when parsed as JSON. Body sizes +must be less than or equal to 100 MiB. The following configuration shows how to check the response when the body contains JSON: @@ -526,7 +543,7 @@ contains JSON: ------------------------------------------------------------------------------- - type: http schedule: '@every 5s' - urls: ["https://myhost:80"] + hosts: ["https://myhost:80"] check.request: method: GET headers: @@ -547,7 +564,7 @@ patterns: ------------------------------------------------------------------------------- - type: http schedule: '@every 5s' - urls: ["https://myhost:80"] + hosts: ["https://myhost:80"] check.request: method: GET headers: @@ -566,7 +583,7 @@ regex: ------------------------------------------------------------------------------- - type: http schedule: '@every 5s' - urls: ["https://myhost:80"] + hosts: ["https://myhost:80"] check.request: method: GET headers: @@ -669,5 +686,3 @@ the following JSON objects in `dynamic.json`: restarts it with a schedule of 15 seconds between checks. <2> {beatname_uc} starts a new monitor that uses a TLS-based connection with a custom CA certificate. - - diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/index.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/index.asciidoc index 5f0cbff4..9b356037 100644 --- a/vendor/github.com/elastic/beats/heartbeat/docs/index.asciidoc +++ b/vendor/github.com/elastic/beats/heartbeat/docs/index.asciidoc @@ -1,12 +1,13 @@ = Heartbeat Reference -:libbeat-dir: {docdir}/../../libbeat +:libbeat-dir: {docdir}/../../libbeat/docs -include::{libbeat-dir}/docs/version.asciidoc[] +include::{libbeat-dir}/version.asciidoc[] + +include::{asciidoc-dir}/../../shared/versions/stack/{source_branch}.asciidoc[] include::{asciidoc-dir}/../../shared/attributes.asciidoc[] -:version: {stack-version} :beatname_lc: heartbeat :beatname_uc: Heartbeat :beatname_pkg: heartbeat-elastic @@ -20,14 +21,18 @@ include::{asciidoc-dir}/../../shared/attributes.asciidoc[] :docker_platform: :win_os: :no_dashboards: +:no_decode_cef_processor: +:no_decode_csv_fields_processor: +:no_script_processor: +:no_timestamp_processor: -include::{libbeat-dir}/docs/shared-beats-attributes.asciidoc[] +include::{libbeat-dir}/shared-beats-attributes.asciidoc[] include::./overview.asciidoc[] include::./getting-started.asciidoc[] -include::{libbeat-dir}/docs/repositories.asciidoc[] +include::{libbeat-dir}/repositories.asciidoc[] include::./setting-up-running.asciidoc[] @@ -35,13 +40,13 @@ include::./configuring-howto.asciidoc[] include::./fields.asciidoc[] -include::{libbeat-dir}/docs/monitoring/monitoring-beats.asciidoc[] +include::{libbeat-dir}/monitoring/monitoring-beats.asciidoc[] -include::{libbeat-dir}/docs/shared-securing-beat.asciidoc[] +include::{libbeat-dir}/shared-securing-beat.asciidoc[] include::./troubleshooting.asciidoc[] include::./faq.asciidoc[] -include::{libbeat-dir}/docs/contributing-to-beats.asciidoc[] +include::{libbeat-dir}/contributing-to-beats.asciidoc[] diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/overview.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/overview.asciidoc index 6d0b78a6..2dd97907 100644 --- a/vendor/github.com/elastic/beats/heartbeat/docs/overview.asciidoc +++ b/vendor/github.com/elastic/beats/heartbeat/docs/overview.asciidoc @@ -37,4 +37,4 @@ specific status code, response header, or content. The `tcp` and `http` monitors both support SSL/TLS and some proxy settings. -include::{libbeat-dir}/docs/shared-libbeat-description.asciidoc[] +include::{libbeat-dir}/shared-libbeat-description.asciidoc[] diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/running-on-docker.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/running-on-docker.asciidoc index d9306b55..dbfcce5b 100644 --- a/vendor/github.com/elastic/beats/heartbeat/docs/running-on-docker.asciidoc +++ b/vendor/github.com/elastic/beats/heartbeat/docs/running-on-docker.asciidoc @@ -1 +1 @@ -include::{libbeat-dir}/docs/shared-docker.asciidoc[] +include::{libbeat-dir}/shared-docker.asciidoc[] diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/setting-up-running.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/setting-up-running.asciidoc index 66e1ead2..8ec02a23 100644 --- a/vendor/github.com/elastic/beats/heartbeat/docs/setting-up-running.asciidoc +++ b/vendor/github.com/elastic/beats/heartbeat/docs/setting-up-running.asciidoc @@ -24,14 +24,14 @@ This section includes additional information on how to set up and run //MAINTAINERS: If you add a new file to this section, make sure you update the bulleted list ^^ too. -include::{libbeat-dir}/docs/shared-directory-layout.asciidoc[] +include::{libbeat-dir}/shared-directory-layout.asciidoc[] -include::{libbeat-dir}/docs/keystore.asciidoc[] +include::{libbeat-dir}/keystore.asciidoc[] -include::{libbeat-dir}/docs/command-reference.asciidoc[] +include::{libbeat-dir}/command-reference.asciidoc[] include::./running-on-docker.asciidoc[] -include::{libbeat-dir}/docs/shared-systemd.asciidoc[] +include::{libbeat-dir}/shared-systemd.asciidoc[] -include::{libbeat-dir}/docs/shared-shutdown.asciidoc[] +include::{libbeat-dir}/shared-shutdown.asciidoc[] diff --git a/vendor/github.com/elastic/beats/heartbeat/docs/troubleshooting.asciidoc b/vendor/github.com/elastic/beats/heartbeat/docs/troubleshooting.asciidoc index c99b54c1..0fa1379f 100644 --- a/vendor/github.com/elastic/beats/heartbeat/docs/troubleshooting.asciidoc +++ b/vendor/github.com/elastic/beats/heartbeat/docs/troubleshooting.asciidoc @@ -17,14 +17,14 @@ following tips: [[getting-help]] == Get help -include::{libbeat-dir}/docs/getting-help.asciidoc[] +include::{libbeat-dir}/getting-help.asciidoc[] //sets block macro for debugging.asciidoc included in next section [[enable-heartbeat-debugging]] == Debug -include::{libbeat-dir}/docs/debugging.asciidoc[] +include::{libbeat-dir}/debugging.asciidoc[] diff --git a/vendor/github.com/elastic/beats/heartbeat/heartbeat.docker.yml b/vendor/github.com/elastic/beats/heartbeat/heartbeat.docker.yml index 2302283f..59636766 100644 --- a/vendor/github.com/elastic/beats/heartbeat/heartbeat.docker.yml +++ b/vendor/github.com/elastic/beats/heartbeat/heartbeat.docker.yml @@ -24,6 +24,7 @@ heartbeat.monitors: processors: - add_cloud_metadata: ~ +- add_docker_metadata: ~ output.elasticsearch: hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}' diff --git a/vendor/github.com/elastic/beats/heartbeat/heartbeat.reference.yml b/vendor/github.com/elastic/beats/heartbeat/heartbeat.reference.yml index a9f9de1b..ce1d76f6 100644 --- a/vendor/github.com/elastic/beats/heartbeat/heartbeat.reference.yml +++ b/vendor/github.com/elastic/beats/heartbeat/heartbeat.reference.yml @@ -74,6 +74,9 @@ heartbeat.monitors: # Interval between file file changed checks. #interval: 5s + # Set to true to publish fields with null values in events. + #keep_null: false + # Define a directory to load monitor definitions from. Definitions take the form # of individual yaml files. # heartbeat.config.monitors: @@ -158,6 +161,8 @@ heartbeat.monitors: # Interval between file file changed checks. #interval: 5s + # Set to true to publish fields with null values in events. + #keep_null: false - type: http # monitor type `http`. Connect via HTTP an optionally verify response @@ -246,6 +251,8 @@ heartbeat.monitors: # Interval between file file changed checks. #interval: 5s + # Set to true to publish fields with null values in events. + #keep_null: false heartbeat.scheduler: # Limit number of concurrent tasks executed by heartbeat. The task limit if @@ -1214,7 +1221,7 @@ setup.template.settings: #setup.ilm.enabled: auto # Set the prefix used in the index lifecycle write alias name. The default alias -# name is 'heartbeat-%{[agent.version]}'. +# name is 'heartbeat-%{[agent.version]}'. #setup.ilm.rollover_alias: "heartbeat" # Set the rollover index pattern. The default is "%{now/d}-000001". @@ -1477,12 +1484,21 @@ logging.files: # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# When using IP addresses, it is recommended to only use localhost. #http.host: localhost # Port on which the HTTP endpoint will bind. Default is 5066. #http.port: 5066 +# Define which user should be owning the named pipe. +#http.named_pipe.user: + +# Define which the permissions that should be applied to the named pipe, use the Security +# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with +# `http.user`. +#http.named_pipe.security_descriptor: + #============================= Process Security ================================ # Enable or disable seccomp system call filtering on Linux. Default is enabled. diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors.d/sample.http.yml.disabled b/vendor/github.com/elastic/beats/heartbeat/monitors.d/sample.http.yml.disabled index 2270c342..e5c44c1e 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors.d/sample.http.yml.disabled +++ b/vendor/github.com/elastic/beats/heartbeat/monitors.d/sample.http.yml.disabled @@ -15,7 +15,7 @@ schedule: '@every 5s' # every 5 seconds from start of beat # Configure URLs to ping - urls: ["http://localhost:9200"] + hosts: ["http://localhost:9200"] # Configure IP protocol types to ping on if hostnames are configured. # Ping all resolvable IPs if `mode` is `all`, or only one IP if `mode` is `any`. diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/dialchain/tls.go b/vendor/github.com/elastic/beats/heartbeat/monitors/active/dialchain/tls.go index c613311f..f7f6de72 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/active/dialchain/tls.go +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/dialchain/tls.go @@ -62,14 +62,14 @@ func TLSLayer(cfg *transport.TLSConfig, to time.Duration) Layer { timer.stop() event.PutValue("tls.rtt.handshake", look.RTT(timer.duration())) - addCertMetdata(event.Fields, tlsConn.ConnectionState().VerifiedChains) + addCertMetdata(event.Fields, tlsConn.ConnectionState().PeerCertificates) return conn, nil }), nil } } -func addCertMetdata(fields common.MapStr, chains [][]*x509.Certificate) { +func addCertMetdata(fields common.MapStr, certs []*x509.Certificate) { // The behavior here might seem strange. We *always* set a notBefore, but only optionally set a notAfter. // Why might we do this? // The root cause is that the x509.Certificate type uses time.Time for these fields instead of *time.Time @@ -94,15 +94,13 @@ func addCertMetdata(fields common.MapStr, chains [][]*x509.Certificate) { // To do this correctly, we take the maximum NotBefore and the minimum NotAfter. // This *should* always wind up being the terminal cert in the chain, but we should // compute this correctly. - for _, chain := range chains { - for _, cert := range chain { - if chainNotValidBefore.Before(cert.NotBefore) { - chainNotValidBefore = cert.NotBefore - } + for _, cert := range certs { + if chainNotValidBefore.Before(cert.NotBefore) { + chainNotValidBefore = cert.NotBefore + } - if cert.NotAfter != zeroTime && (chainNotValidAfter == nil || chainNotValidAfter.After(cert.NotAfter)) { - chainNotValidAfter = &cert.NotAfter - } + if cert.NotAfter != zeroTime && (chainNotValidAfter == nil || chainNotValidAfter.After(cert.NotAfter)) { + chainNotValidAfter = &cert.NotAfter } } diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/dialchain/tls_test.go b/vendor/github.com/elastic/beats/heartbeat/monitors/active/dialchain/tls_test.go index 043b86dc..b281bf60 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/active/dialchain/tls_test.go +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/dialchain/tls_test.go @@ -44,6 +44,19 @@ func Test_addCertMetdata(t *testing.T) { BasicConstraintsValid: true, } + expiredNotAfter := time.Now().Add(-time.Hour) + expiredCert := x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + Organization: []string{"Acme Co"}, + }, + NotBefore: goodNotBefore, + NotAfter: expiredNotAfter, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + missingNotBeforeCert := x509.Certificate{ SerialNumber: big.NewInt(1), Subject: pkix.Name{ @@ -76,27 +89,35 @@ func Test_addCertMetdata(t *testing.T) { } tests := []struct { name string - chains [][]*x509.Certificate + certs []*x509.Certificate expected expected }{ { "Valid cert", - [][]*x509.Certificate{{&goodCert}}, + []*x509.Certificate{&goodCert}, expected{ notBefore: goodNotBefore, notAfter: &goodNotAfter, }, }, + { + "Expired cert", + []*x509.Certificate{&expiredCert}, + expected{ + notBefore: goodNotBefore, + notAfter: &expiredNotAfter, + }, + }, { "Missing not before", - [][]*x509.Certificate{{&missingNotBeforeCert}}, + []*x509.Certificate{&missingNotBeforeCert}, expected{ notAfter: &goodNotAfter, }, }, { "Missing not after", - [][]*x509.Certificate{{&missingNotAfterCert}}, + []*x509.Certificate{&missingNotAfterCert}, expected{ notBefore: goodNotBefore, }, @@ -105,7 +126,7 @@ func Test_addCertMetdata(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { event := common.MapStr{} - addCertMetdata(event, tt.chains) + addCertMetdata(event, tt.certs) v, err := event.GetValue("tls.certificate_not_valid_before") assert.NoError(t, err) assert.Equal(t, tt.expected.notBefore, v) diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/check.go b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/check.go index db667e3d..8976cd56 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/check.go +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/check.go @@ -27,68 +27,82 @@ import ( pkgerrors "github.com/pkg/errors" + "github.com/elastic/beats/heartbeat/reason" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/common/jsontransform" "github.com/elastic/beats/libbeat/common/match" "github.com/elastic/beats/libbeat/conditions" ) -type RespCheck func(*http.Response) error +// multiValidator combines multiple validations of each type into a single easy to use object. +type multiValidator struct { + respValidators []respValidator + bodyValidators []bodyValidator +} + +func (rv multiValidator) wantsBody() bool { + return len(rv.bodyValidators) > 0 +} + +func (rv multiValidator) validate(resp *http.Response, body string) reason.Reason { + for _, respValidator := range rv.respValidators { + if err := respValidator(resp); err != nil { + return reason.ValidateFailed(err) + } + } + + for _, bodyValidator := range rv.bodyValidators { + if err := bodyValidator(resp, body); err != nil { + return reason.ValidateFailed(err) + } + } + + return nil +} + +// respValidator is used for validating using only the non-body fields of the *http.Response. +// Accessing the body of the response in such a validator should not be done due, use bodyValidator +// for those purposes instead. +type respValidator func(*http.Response) error + +// bodyValidator lets you validate a stringified version of the body along with other metadata in +// *http.Response. +type bodyValidator func(*http.Response, string) error var ( errBodyMismatch = errors.New("body mismatch") ) -func makeValidateResponse(config *responseParameters) (RespCheck, error) { - var checks []RespCheck +func makeValidateResponse(config *responseParameters) (multiValidator, error) { + var respValidators []respValidator + var bodyValidators []bodyValidator if config.Status > 0 { - checks = append(checks, checkStatus(config.Status)) + respValidators = append(respValidators, checkStatus(config.Status)) } else { - checks = append(checks, checkStatusOK) + respValidators = append(respValidators, checkStatusOK) } if len(config.RecvHeaders) > 0 { - checks = append(checks, checkHeaders(config.RecvHeaders)) + respValidators = append(respValidators, checkHeaders(config.RecvHeaders)) } if len(config.RecvBody) > 0 { - checks = append(checks, checkBody(config.RecvBody)) + bodyValidators = append(bodyValidators, checkBody(config.RecvBody)) } if len(config.RecvJSON) > 0 { jsonChecks, err := checkJSON(config.RecvJSON) if err != nil { - return nil, err + return multiValidator{}, err } - checks = append(checks, jsonChecks) + bodyValidators = append(bodyValidators, jsonChecks) } - return checkAll(checks...), nil + return multiValidator{respValidators, bodyValidators}, nil } -func checkOK(_ *http.Response) error { return nil } - -// TODO: collect all errors into on error message. -func checkAll(checks ...RespCheck) RespCheck { - switch len(checks) { - case 0: - return checkOK - case 1: - return checks[0] - } - - return func(r *http.Response) error { - for _, check := range checks { - if err := check(r); err != nil { - return err - } - } - return nil - } -} - -func checkStatus(status uint16) RespCheck { +func checkStatus(status uint16) respValidator { return func(r *http.Response) error { if r.StatusCode == int(status) { return nil @@ -104,7 +118,7 @@ func checkStatusOK(r *http.Response) error { return nil } -func checkHeaders(headers map[string]string) RespCheck { +func checkHeaders(headers map[string]string) respValidator { return func(r *http.Response) error { for k, v := range headers { value := r.Header.Get(k) @@ -116,14 +130,10 @@ func checkHeaders(headers map[string]string) RespCheck { } } -func checkBody(body []match.Matcher) RespCheck { - return func(r *http.Response) error { - content, err := ioutil.ReadAll(r.Body) - if err != nil { - return err - } - for _, m := range body { - if m.Match(content) { +func checkBody(matcher []match.Matcher) bodyValidator { + return func(r *http.Response, body string) error { + for _, m := range matcher { + if m.MatchString(body) { return nil } } @@ -131,7 +141,7 @@ func checkBody(body []match.Matcher) RespCheck { } } -func checkJSON(checks []*jsonResponseCheck) (RespCheck, error) { +func checkJSON(checks []*jsonResponseCheck) (bodyValidator, error) { type compiledCheck struct { description string condition conditions.Condition @@ -147,9 +157,9 @@ func checkJSON(checks []*jsonResponseCheck) (RespCheck, error) { compiledChecks = append(compiledChecks, compiledCheck{check.Description, cond}) } - return func(r *http.Response) error { + return func(r *http.Response, body string) error { decoded := &common.MapStr{} - decoder := json.NewDecoder(r.Body) + decoder := json.NewDecoder(strings.NewReader(body)) decoder.UseNumber() err := decoder.Decode(decoded) diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/check_test.go b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/check_test.go index be0b8cc7..a705ca34 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/check_test.go +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/check_test.go @@ -19,15 +19,15 @@ package http import ( "fmt" + "io/ioutil" "log" "net/http" "net/http/httptest" "testing" - "github.com/elastic/beats/libbeat/common" - "github.com/stretchr/testify/require" + "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/common/match" "github.com/elastic/beats/libbeat/conditions" ) @@ -118,7 +118,9 @@ func TestCheckBody(t *testing.T) { for _, pattern := range test.patterns { patterns = append(patterns, match.MustCompile(pattern)) } - check := checkBody(patterns)(res) + body, err := ioutil.ReadAll(res.Body) + require.NoError(t, err) + check := checkBody(patterns)(res, string(body)) if result := (check == nil); result != test.result { if test.result { @@ -183,7 +185,9 @@ func TestCheckJson(t *testing.T) { checker, err := checkJSON([]*jsonResponseCheck{{test.condDesc, test.condConf}}) require.NoError(t, err) - checkRes := checker(res) + body, err := ioutil.ReadAll(res.Body) + require.NoError(t, err) + checkRes := checker(res, string(body)) if result := checkRes == nil; result != test.result { if test.result { @@ -249,7 +253,9 @@ func TestCheckJsonWithIntegerComparison(t *testing.T) { checker, err := checkJSON([]*jsonResponseCheck{{test.condDesc, test.condConf}}) require.NoError(t, err) - checkRes := checker(res) + body, err := ioutil.ReadAll(res.Body) + require.NoError(t, err) + checkRes := checker(res, string(body)) if result := checkRes == nil; result != test.result { if test.result { diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/config.go b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/config.go index 6a63c399..fb69c09c 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/config.go +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/config.go @@ -19,19 +19,19 @@ package http import ( "fmt" + "net/url" "strings" "time" - "github.com/elastic/beats/libbeat/conditions" - + "github.com/elastic/beats/heartbeat/monitors" "github.com/elastic/beats/libbeat/common/match" "github.com/elastic/beats/libbeat/common/transport/tlscommon" - - "github.com/elastic/beats/heartbeat/monitors" + "github.com/elastic/beats/libbeat/conditions" ) type Config struct { - URLs []string `config:"urls" validate:"required"` + URLs []string `config:"urls"` + Hosts []string `config:"hosts"` ProxyURL string `config:"proxy_url"` Timeout time.Duration `config:"timeout"` MaxRedirects int `config:"max_redirects"` @@ -113,6 +113,7 @@ var defaultConfig = Config{ }, } +// Validate validates of the responseConfig object is valid or not func (r *responseConfig) Validate() error { switch strings.ToLower(r.IncludeBody) { case "always", "on_error", "never": @@ -127,6 +128,7 @@ func (r *responseConfig) Validate() error { return nil } +// Validate validates of the requestParameters object is valid or not func (r *requestParameters) Validate() error { switch strings.ToUpper(r.Method) { case "HEAD", "GET", "POST": @@ -137,6 +139,7 @@ func (r *requestParameters) Validate() error { return nil } +// Validate validates of the compressionConfig object is valid or not func (c *compressionConfig) Validate() error { t := strings.ToLower(c.Type) if t != "" && t != "gzip" { @@ -153,3 +156,37 @@ func (c *compressionConfig) Validate() error { return nil } + +// Validate validates of the Config object is valid or not +func (c *Config) Validate() error { + if len(c.Hosts) == 0 && len(c.URLs) == 0 { + return fmt.Errorf("hosts is a mandatory parameter") + } + + if len(c.URLs) != 0 { + c.Hosts = append(c.Hosts, c.URLs...) + } + + // updateScheme looks at TLS config to decide if http or https should be used to update the host + updateScheme := func(host string) string { + if c.TLS != nil && *c.TLS.Enabled == true { + return fmt.Sprint("https://", host) + } + return fmt.Sprint("http://", host) + } + + // Check if the URL is not parseable. If yes, then append scheme. + // If the url is valid but host or scheme is empty which can occur when someone configures host:port + // then update the scheme there as well. + for i := 0; i < len(c.Hosts); i++ { + host := c.Hosts[i] + u, err := url.ParseRequestURI(host) + if err != nil { + c.Hosts[i] = updateScheme(host) + } else if u.Scheme == "" || u.Host == "" { + c.Hosts[i] = updateScheme(host) + } + } + + return nil +} diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/config_test.go b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/config_test.go new file mode 100644 index 00000000..952ce354 --- /dev/null +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/config_test.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package http + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +var tests = []struct { + description string + host string + url string + convertedHost string + result bool +}{ + { + "Validate if neither urls nor host specified returns error", + "", + "", + "", + false, + }, + { + "Validate if only urls are present then the config is moved to hosts", + "", + "http://localhost:8080", + "http://localhost:8080", + true, + }, + { + "Validate if only hosts are present then the config is valid", + "http://localhost:8080", + "", + "http://localhost:8080", + true, + }, + { + "Validate if no scheme is present then it is added correctly", + "localhost", + "", + "http://localhost", + true, + }, + { + "Validate if no scheme is present but has a port then it is added correctly", + "localhost:8080", + "", + "http://localhost:8080", + true, + }, + { + "Validate if schemes like unix are honored", + "unix://localhost:8080", + "", + "unix://localhost:8080", + true, + }, +} + +func TestConfigValidate(t *testing.T) { + for _, test := range tests { + t.Run(test.description, func(t *testing.T) { + config := Config{} + if test.host != "" { + config.Hosts = []string{test.host} + } + + if test.url != "" { + config.URLs = []string{test.url} + } + + err := config.Validate() + if test.result { + assert.Nil(t, err) + assert.Equal(t, test.convertedHost, config.Hosts[0]) + } else { + assert.NotNil(t, err) + } + + }) + } +} diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/http.go b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/http.go index b8c9ce51..48c2abde 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/http.go +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/http.go @@ -95,8 +95,8 @@ func create( } } - js = make([]jobs.Job, len(config.URLs)) - for i, urlStr := range config.URLs { + js = make([]jobs.Job, len(config.Hosts)) + for i, urlStr := range config.Hosts { u, _ := url.Parse(urlStr) if err != nil { return nil, 0, err @@ -112,7 +112,7 @@ func create( js[i] = wrappers.WithURLField(u, job) } - return js, len(config.URLs), nil + return js, len(config.Hosts), nil } func newRoundTripper(config *Config, tls *transport.TLSConfig) (*http.Transport, error) { diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/http_test.go b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/http_test.go index af6deac5..42633571 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/http_test.go +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/http_test.go @@ -45,18 +45,23 @@ import ( "github.com/elastic/go-lookslike/validator" ) -func testRequest(t *testing.T, testURL string) *beat.Event { - return testTLSRequest(t, testURL, nil) +func testRequest(t *testing.T, testURL string, useUrls bool) *beat.Event { + return testTLSRequest(t, testURL, useUrls, nil) } // testTLSRequest tests the given request. certPath is optional, if given // an empty string no cert will be set. -func testTLSRequest(t *testing.T, testURL string, extraConfig map[string]interface{}) *beat.Event { +func testTLSRequest(t *testing.T, testURL string, useUrls bool, extraConfig map[string]interface{}) *beat.Event { configSrc := map[string]interface{}{ - "urls": testURL, "timeout": "1s", } + if useUrls { + configSrc["urls"] = testURL + } else { + configSrc["hosts"] = testURL + } + if extraConfig != nil { for k, v := range extraConfig { configSrc[k] = v @@ -80,10 +85,10 @@ func testTLSRequest(t *testing.T, testURL string, extraConfig map[string]interfa return event } -func checkServer(t *testing.T, handlerFunc http.HandlerFunc) (*httptest.Server, *beat.Event) { +func checkServer(t *testing.T, handlerFunc http.HandlerFunc, useUrls bool) (*httptest.Server, *beat.Event) { server := httptest.NewServer(handlerFunc) defer server.Close() - event := testRequest(t, server.URL) + event := testRequest(t, server.URL, useUrls) return server, event } @@ -102,7 +107,10 @@ func respondingHTTPChecks(url string, statusCode int) validator.Validator { httpBaseChecks(url), lookslike.MustCompile(map[string]interface{}{ "http": map[string]interface{}{ - "response.status_code": statusCode, + "response.status_code": statusCode, + "response.body.hash": isdef.IsString, + // TODO add this isdef to lookslike in a robust way + "response.body.bytes": isdef.IsIntGt(-1), "rtt.content.us": isdef.IsDuration, "rtt.response_header.us": isdef.IsDuration, "rtt.total.us": isdef.IsDuration, @@ -113,10 +121,30 @@ func respondingHTTPChecks(url string, statusCode int) validator.Validator { ) } +func minimalRespondingHTTPChecks(url string, statusCode int) validator.Validator { + return lookslike.Compose( + httpBaseChecks(url), + httpBodyChecks(), + lookslike.MustCompile(map[string]interface{}{ + "http": map[string]interface{}{ + "response.status_code": statusCode, + "rtt.total.us": isdef.IsDuration, + }, + }), + ) +} + +func httpBodyChecks() validator.Validator { + return lookslike.MustCompile(map[string]interface{}{ + "http.response.body.bytes": isdef.IsIntGt(-1), + "http.response.body.hash": isdef.IsString, + }) +} + func respondingHTTPBodyChecks(body string) validator.Validator { return lookslike.MustCompile(map[string]interface{}{ "http.response.body.content": body, - "http.response.body.bytes": int64(len(body)), + "http.response.body.bytes": len(body), }) } @@ -201,7 +229,27 @@ func TestUpStatuses(t *testing.T) { for _, status := range upStatuses { status := status t.Run(fmt.Sprintf("Test OK HTTP status %d", status), func(t *testing.T) { - server, event := checkServer(t, hbtest.HelloWorldHandler(status)) + server, event := checkServer(t, hbtest.HelloWorldHandler(status), false) + + testslike.Test( + t, + lookslike.Strict(lookslike.Compose( + hbtest.BaseChecks("127.0.0.1", "up", "http"), + hbtest.RespondingTCPChecks(), + hbtest.SummaryChecks(1, 0), + respondingHTTPChecks(server.URL, status), + )), + event.Fields, + ) + }) + } +} + +func TestUpStatusesWithUrlsConfig(t *testing.T) { + for _, status := range upStatuses { + status := status + t.Run(fmt.Sprintf("Test OK HTTP status %d", status), func(t *testing.T) { + server, event := checkServer(t, hbtest.HelloWorldHandler(status), true) testslike.Test( t, @@ -221,7 +269,7 @@ func TestDownStatuses(t *testing.T) { for _, status := range downStatuses { status := status t.Run(fmt.Sprintf("test down status %d", status), func(t *testing.T) { - server, event := checkServer(t, hbtest.HelloWorldHandler(status)) + server, event := checkServer(t, hbtest.HelloWorldHandler(status), false) testslike.Test( t, @@ -244,7 +292,7 @@ func TestLargeResponse(t *testing.T) { defer server.Close() configSrc := map[string]interface{}{ - "urls": server.URL, + "hosts": server.URL, "timeout": "1s", "check.response.body": "x", } @@ -296,7 +344,7 @@ func runHTTPSServerCheck( // we give it a few attempts to see if the server can come up before we run the real assertions. var event *beat.Event for i := 0; i < 10; i++ { - event = testTLSRequest(t, server.URL, mergedExtraConfig) + event = testTLSRequest(t, server.URL, false, mergedExtraConfig) if v, err := event.GetValue("monitor.status"); err == nil && reflect.DeepEqual(v, "up") { break } @@ -367,7 +415,7 @@ func TestConnRefusedJob(t *testing.T) { url := fmt.Sprintf("http://%s:%d", ip, port) - event := testRequest(t, url) + event := testRequest(t, url, false) testslike.Test( t, @@ -389,7 +437,7 @@ func TestUnreachableJob(t *testing.T) { port := uint16(1234) url := fmt.Sprintf("http://%s:%d", ip, port) - event := testRequest(t, url) + event := testRequest(t, url, false) testslike.Test( t, diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/respbody.go b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/respbody.go index a783da8b..7990b632 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/respbody.go +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/respbody.go @@ -20,69 +20,79 @@ package http import ( "crypto/sha256" "encoding/hex" - "fmt" "io" "net/http" "unicode/utf8" - "github.com/elastic/beats/heartbeat/eventext" - "github.com/elastic/beats/libbeat/common" - "github.com/elastic/beats/heartbeat/reason" - "github.com/elastic/beats/libbeat/beat" + "github.com/elastic/beats/libbeat/common" ) -func handleRespBody(event *beat.Event, resp *http.Response, responseConfig responseConfig, errReason reason.Reason) error { - defer resp.Body.Close() +// maxBufferBodyBytes sets a hard limit on how much we're willing to buffer for any reason internally. +// since we must buffer the whole body for body validators this is effectively a cap on that. +// 100MiB out to be enough for everybody. +const maxBufferBodyBytes = 100 * 1024 * 1024 - sampleMaxBytes := responseConfig.IncludeBodyMaxBytes - - includeSample := responseConfig.IncludeBody == "always" || (responseConfig.IncludeBody == "on_error" && errReason != nil) - - // No need to return any actual body bytes if we'll discard them anyway. This should save on allocation - if !includeSample { - sampleMaxBytes = 0 +func processBody(resp *http.Response, config responseConfig, validator multiValidator) (common.MapStr, reason.Reason) { + // Determine how much of the body to actually buffer in memory + var bufferBodyBytes int + if validator.wantsBody() { + bufferBodyBytes = maxBufferBodyBytes + } else if config.IncludeBody == "always" || config.IncludeBody == "on_error" { + // If the user has asked for bodies to be recorded we only need to buffer that much + bufferBodyBytes = config.IncludeBodyMaxBytes + } else { + // Otherwise, we buffer nothing + bufferBodyBytes = 0 } - sampleStr, bodyBytes, bodyHash, err := readResp(resp, sampleMaxBytes) - if err != nil { - return err + respBody, bodyLenBytes, bodyHash, respErr := readBody(resp, bufferBodyBytes) + // If we encounter an error while reading the body just fail early + if respErr != nil { + return nil, reason.IOFailed(respErr) } - evtBodyMap := common.MapStr{ + // Run any validations + errReason := validator.validate(resp, respBody) + + bodyFields := common.MapStr{ "hash": bodyHash, - "bytes": bodyBytes, + "bytes": bodyLenBytes, } - if includeSample { - evtBodyMap["content"] = sampleStr + if config.IncludeBody == "always" || + (config.IncludeBody == "on_error" && errReason != nil) { + + // Do not store more bytes than the config specifies. We may + // have read extra bytes for the validators + sampleNumBytes := len(respBody) + if bodyLenBytes < sampleNumBytes { + sampleNumBytes = bodyLenBytes + } + if config.IncludeBodyMaxBytes < sampleNumBytes { + sampleNumBytes = config.IncludeBodyMaxBytes + } + + bodyFields["content"] = respBody[0:sampleNumBytes] } - eventext.MergeEventFields(event, common.MapStr{ - "http": common.MapStr{ - "response": common.MapStr{"body": evtBodyMap}, - }, - }) - - return nil + return bodyFields, errReason } -// readResp reads the first sampleSize bytes from the httpResponse, +// readBody reads the first sampleSize bytes from the httpResponse, // then closes the body (which closes the connection). It doesn't return any errors // but does log them. During an error case the return values will be (nil, -1). // The maxBytes params controls how many bytes will be returned in a string, not how many will be read. // We always read the full response here since we want to time downloading the full thing. // This may return a nil body if the response is not valid UTF-8 -func readResp(resp *http.Response, maxSampleBytes int) (bodySample string, bodySize int64, hashStr string, err error) { - if resp == nil { - return "", -1, "", fmt.Errorf("cannot readResp of nil HTTP response") - } +func readBody(resp *http.Response, maxSampleBytes int) (bodySample string, bodySize int, hashStr string, err error) { + defer resp.Body.Close() respSize, bodySample, hash, err := readPrefixAndHash(resp.Body, maxSampleBytes) return bodySample, respSize, hash, err } -func readPrefixAndHash(body io.ReadCloser, maxPrefixSize int) (respSize int64, prefix string, hashStr string, err error) { +func readPrefixAndHash(body io.ReadCloser, maxPrefixSize int) (respSize int, prefix string, hashStr string, err error) { hash := sha256.New() // Function to lazily get the body of the response rawBuf := make([]byte, 1024) @@ -94,7 +104,7 @@ func readPrefixAndHash(body io.ReadCloser, maxPrefixSize int) (respSize int64, p for { readSize, readErr := body.Read(rawBuf) - respSize += int64(readSize) + respSize += readSize hash.Write(rawBuf[:readSize]) if prefixRemainingBytes > 0 { diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/respbody_test.go b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/respbody_test.go index 0ec80543..1a3b8acc 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/respbody_test.go +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/respbody_test.go @@ -27,22 +27,25 @@ import ( "strings" "testing" - "github.com/elastic/go-lookslike" - "github.com/elastic/go-lookslike/testslike" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/elastic/beats/heartbeat/reason" - "github.com/elastic/beats/libbeat/beat" + "github.com/elastic/beats/libbeat/common/match" + "github.com/elastic/go-lookslike" + "github.com/elastic/go-lookslike/testslike" ) func Test_handleRespBody(t *testing.T) { + matchingBodyValidator := checkBody([]match.Matcher{match.MustCompile("hello")}) + failingBodyValidator := checkBody([]match.Matcher{match.MustCompile("goodbye")}) + + matchingComboValidator := multiValidator{bodyValidators: []bodyValidator{matchingBodyValidator}} + failingComboValidator := multiValidator{bodyValidators: []bodyValidator{failingBodyValidator}} + type args struct { - event *beat.Event resp *http.Response responseConfig responseConfig - errReason reason.Reason + validator multiValidator } tests := []struct { name string @@ -53,21 +56,19 @@ func Test_handleRespBody(t *testing.T) { { "on_error with error", args{ - &beat.Event{}, simpleHTTPResponse("hello"), responseConfig{IncludeBody: "on_error", IncludeBodyMaxBytes: 3}, - reason.IOFailed(fmt.Errorf("something happened")), + failingComboValidator, }, - false, + true, true, }, { "on_error with success", args{ - &beat.Event{}, simpleHTTPResponse("hello"), responseConfig{IncludeBody: "on_error", IncludeBodyMaxBytes: 3}, - nil, + matchingComboValidator, }, false, false, @@ -75,21 +76,19 @@ func Test_handleRespBody(t *testing.T) { { "always with error", args{ - &beat.Event{}, simpleHTTPResponse("hello"), responseConfig{IncludeBody: "always", IncludeBodyMaxBytes: 3}, - reason.IOFailed(fmt.Errorf("something happened")), + failingComboValidator, }, - false, + true, true, }, { "always with success", args{ - &beat.Event{}, simpleHTTPResponse("hello"), responseConfig{IncludeBody: "always", IncludeBodyMaxBytes: 3}, - nil, + matchingComboValidator, }, false, true, @@ -97,21 +96,19 @@ func Test_handleRespBody(t *testing.T) { { "never with error", args{ - &beat.Event{}, simpleHTTPResponse("hello"), responseConfig{IncludeBody: "never", IncludeBodyMaxBytes: 3}, - reason.IOFailed(fmt.Errorf("something happened")), + failingComboValidator, }, - false, + true, false, }, { "never with success", args{ - &beat.Event{}, simpleHTTPResponse("hello"), responseConfig{IncludeBody: "never", IncludeBodyMaxBytes: 3}, - nil, + matchingComboValidator, }, false, false, @@ -120,25 +117,20 @@ func Test_handleRespBody(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - event := tt.args.event - if err := handleRespBody(tt.args.event, tt.args.resp, tt.args.responseConfig, tt.args.errReason); (err != nil) != tt.wantErr { + fields, err := processBody(tt.args.resp, tt.args.responseConfig, tt.args.validator) + if (err != nil) != tt.wantErr { t.Errorf("handleRespBody() error = %v, wantErr %v", err, tt.wantErr) } bodyMatch := map[string]interface{}{ "hash": "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", - "bytes": int64(5), + "bytes": 5, } if tt.wantFieldsSet { bodyMatch["content"] = "hel" } - testslike.Test(t, - lookslike.MustCompile( - map[string]interface{}{ - "http.response.body": bodyMatch, - }), - event.Fields) + testslike.Test(t, lookslike.MustCompile(bodyMatch), fields) }) } } @@ -152,7 +144,7 @@ func Test_readResp(t *testing.T) { name string args args wantBodySample string - wantBodySize int64 + wantBodySize int wantHashStr string wantErr bool }{ @@ -167,33 +159,22 @@ func Test_readResp(t *testing.T) { wantHashStr: "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", wantErr: false, }, - { - name: "no resp", - args: args{ - resp: nil, - maxSampleBytes: 3, - }, - wantBodySample: "", - wantBodySize: -1, - wantHashStr: "", - wantErr: true, - }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - gotBodySample, gotBodySize, gotHashStr, err := readResp(tt.args.resp, tt.args.maxSampleBytes) + gotBodySample, gotBodySize, gotHashStr, err := readBody(tt.args.resp, tt.args.maxSampleBytes) if (err != nil) != tt.wantErr { - t.Errorf("readResp() error = %v, wantErr %v", err, tt.wantErr) + t.Errorf("readBody() error = %v, wantErr %v", err, tt.wantErr) return } if gotBodySample != tt.wantBodySample { - t.Errorf("readResp() gotBodySample = %v, want %v", gotBodySample, tt.wantBodySample) + t.Errorf("readBody() gotBodySample = %v, want %v", gotBodySample, tt.wantBodySample) } if gotBodySize != tt.wantBodySize { - t.Errorf("readResp() gotBodySize = %v, want %v", gotBodySize, tt.wantBodySize) + t.Errorf("readBody() gotBodySize = %v, want %v", gotBodySize, tt.wantBodySize) } if gotHashStr != tt.wantHashStr { - t.Errorf("readResp() gotHashStr = %v, want %v", gotHashStr, tt.wantHashStr) + t.Errorf("readBody() gotHashStr = %v, want %v", gotHashStr, tt.wantHashStr) } }) } @@ -255,7 +236,7 @@ func Test_readPrefixAndHash(t *testing.T) { require.Error(t, err) } - assert.Equal(t, int64(len(tt.body)), gotRespSize) + assert.Equal(t, len(tt.body), gotRespSize) if tt.len <= len(tt.body) { assert.Equal(t, tt.body[0:tt.len], gotPrefix) } else { diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/task.go b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/task.go index 36648f3f..92eca0aa 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/task.go +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/task.go @@ -46,14 +46,17 @@ func newHTTPMonitorHostJob( transport *http.Transport, enc contentEncoder, body []byte, - validator RespCheck, + validator multiValidator, ) (jobs.Job, error) { + // Trace visited URLs when redirects occur + var redirects []string client := &http.Client{ - CheckRedirect: makeCheckRedirect(config.MaxRedirects), + CheckRedirect: makeCheckRedirect(config.MaxRedirects, &redirects), Transport: transport, Timeout: config.Timeout, } + request, err := buildRequest(addr, config, enc) if err != nil { return nil, err @@ -62,7 +65,7 @@ func newHTTPMonitorHostJob( timeout := config.Timeout return jobs.MakeSimpleJob(func(event *beat.Event) error { - _, _, err := execPing(event, client, request, body, timeout, validator, config.Response) + _, _, err := execPing(event, client, request, body, timeout, validator, config.Response, &redirects) return err }), nil } @@ -73,7 +76,7 @@ func newHTTPMonitorIPsJob( tls *transport.TLSConfig, enc contentEncoder, body []byte, - validator RespCheck, + validator multiValidator, ) (jobs.Job, error) { req, err := buildRequest(addr, config, enc) @@ -100,11 +103,11 @@ func createPingFactory( tls *transport.TLSConfig, request *http.Request, body []byte, - validator RespCheck, + validator multiValidator, ) func(*net.IPAddr) jobs.Job { timeout := config.Timeout isTLS := request.URL.Scheme == "https" - checkRedirect := makeCheckRedirect(config.MaxRedirects) + checkRedirect := makeCheckRedirect(config.MaxRedirects, nil) return monitors.MakePingIPFactory(func(event *beat.Event, ip *net.IPAddr) error { addr := net.JoinHostPort(ip.String(), strconv.Itoa(int(port))) @@ -153,7 +156,7 @@ func createPingFactory( }, } - _, end, err := execPing(event, client, request, body, timeout, validator, config.Response) + _, end, err := execPing(event, client, request, body, timeout, validator, config.Response, nil) cbMutex.Lock() defer cbMutex.Unlock() @@ -209,9 +212,10 @@ func execPing( req *http.Request, reqBody []byte, timeout time.Duration, - validator func(*http.Response) error, + validator multiValidator, responseConfig responseConfig, -) (start, end time.Time, errReason reason.Reason) { + redirects *[]string, +) (start, end time.Time, err reason.Reason) { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() @@ -219,23 +223,29 @@ func execPing( // Send the HTTP request. We don't immediately return on error since // we may want to add additional fields to contextualize the error. - start, resp, errReason := execRequest(client, req, validator) + start, resp, errReason := execRequest(client, req) - // If we have no response object there probably was an IO error, we can skip the rest of the logic + // If we have no response object or an error was set there probably was an IO error, we can skip the rest of the logic // since that logic is for adding metadata relating to completed HTTP transactions that have errored // in other ways - if resp == nil { + if resp == nil || errReason != nil { return start, time.Now(), errReason } - // Add response.status_code - eventext.MergeEventFields(event, common.MapStr{"http": common.MapStr{"response": common.MapStr{"status_code": resp.StatusCode}}}) - // Download the body, close the response body, then attach all fields - err := handleRespBody(event, resp, responseConfig, errReason) - if err != nil { - return start, time.Now(), reason.IOFailed(err) + bodyFields, errReason := processBody(resp, responseConfig, validator) + + responseFields := common.MapStr{ + "status_code": resp.StatusCode, + "body": bodyFields, } + httpFields := common.MapStr{"response": responseFields} + + if redirects != nil && len(*redirects) > 0 { + httpFields["redirects"] = redirects + } + eventext.MergeEventFields(event, common.MapStr{"http": httpFields}) + // Mark the end time as now, since we've finished downloading end = time.Now() @@ -260,7 +270,7 @@ func attachRequestBody(ctx *context.Context, req *http.Request, body []byte) *ht } // execute the request. Note that this does not close the resp body, which should be done by caller -func execRequest(client *http.Client, req *http.Request, validator func(*http.Response) error) (start time.Time, resp *http.Response, errReason reason.Reason) { +func execRequest(client *http.Client, req *http.Request) (start time.Time, resp *http.Response, errReason reason.Reason) { start = time.Now() resp, err := client.Do(req) @@ -268,11 +278,6 @@ func execRequest(client *http.Client, req *http.Request, validator func(*http.Re return start, nil, reason.IOFailed(err) } - err = validator(resp) - if err != nil { - return start, resp, reason.ValidateFailed(err) - } - return start, resp, nil } @@ -298,14 +303,21 @@ func splitHostnamePort(requ *http.Request) (string, uint16, error) { return host, uint16(p), nil } -func makeCheckRedirect(max int) func(*http.Request, []*http.Request) error { +// makeCheckRedirect checks if max redirects are exceeded, also append to the redirects list if we're tracking those. +// It's kind of ugly to return a result via a pointer argument, but it's the interface the +// golang HTTP client gives us. +func makeCheckRedirect(max int, redirects *[]string) func(*http.Request, []*http.Request) error { if max == 0 { return func(_ *http.Request, _ []*http.Request) error { return http.ErrUseLastResponse } } - return func(_ *http.Request, via []*http.Request) error { + return func(r *http.Request, via []*http.Request) error { + if redirects != nil { + *redirects = append(*redirects, r.URL.String()) + } + if max == len(via) { return http.ErrUseLastResponse } diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/task_test.go b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/task_test.go index 0d758d1e..448bff4b 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/task_test.go +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/task_test.go @@ -131,30 +131,6 @@ func makeTestHTTPRequest(t *testing.T) *http.Request { return req } -func TestZeroMaxRedirectShouldError(t *testing.T) { - checker := makeCheckRedirect(0) - req := makeTestHTTPRequest(t) - - res := checker(req, nil) - assert.Equal(t, http.ErrUseLastResponse, res) -} - -func TestNonZeroRedirect(t *testing.T) { - limit := 5 - checker := makeCheckRedirect(limit) - - var via []*http.Request - // Test requests within the limit - for i := 0; i < limit; i++ { - req := makeTestHTTPRequest(t) - assert.Nil(t, checker(req, via)) - via = append(via, req) - } - - // We are now at the limit, this request should fail - assert.Equal(t, http.ErrUseLastResponse, checker(makeTestHTTPRequest(t), via)) -} - func TestRequestBuildingWithCustomHost(t *testing.T) { var config = Config{} var encoder = nilEncoder{} diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/testdata/client_cert.pem b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/testdata/client_cert.pem index d7851f1e..604db331 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/testdata/client_cert.pem +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/testdata/client_cert.pem @@ -1,14 +1,23 @@ -----BEGIN CERTIFICATE----- -MIICGzCCAXygAwIBAgIRANIf32fETNS13JB39JYszmwwCgYIKoZIzj0EAwQwEjEQ -MA4GA1UEChMHQWNtZSBDbzAeFw0xODExMDgwMzIxNDlaFw0xOTExMDgwMzIxNDla -MBIxEDAOBgNVBAoTB0FjbWUgQ28wgZswEAYHKoZIzj0CAQYFK4EEACMDgYYABAE+ -n/OJoo7jvetm8zR4lAX2s99fxWF/LiOR1/qTPQgLmLYVUZq1yTZB027GtJGWAqph -kY/n0oNdxS4N9d2JPoaXMgHMGZAXl0A85Q3D5k0xKG/jwaEasTIbTe6UKHed2Zgk -CtEqutG9KwmnqAHCtlia14mgcERpO1eT0A7NRcdtNlcjlKNwMG4wDgYDVR0PAQH/ -BAQDAgKkMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAPBgNVHRMBAf8E -BTADAQH/MCwGA1UdEQQlMCOCCWxvY2FsaG9zdIEWbm9zdWNoYWRkckBleGFtcGxl -Lm5ldDAKBggqhkjOPQQDBAOBjAAwgYgCQgDvHj4Xt5TMqhR4Uavmfa0uOio0FZxL -vGnk3aLj5koJyrQNynntHBcCZ+sPb14J08FWk0j4GPOGroMVud/XTX1BZgJCAc3k -0p+X1r+lt1hkSGrumTY5NRWIGIvJ0gy1AhuZJzXYoPRRdPgnM04vBWniOLHDhmsX -ExbWSt0EY2IiOJc/1GNO +MIID0TCCArmgAwIBAgIUFjN0Dgop+G0j5ZgvXM+ppqBGYf4wDQYJKoZIhvcNAQEL +BQAweDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk1OMRQwEgYDVQQHDAtNaW5uZWFw +b2xpczEQMA4GA1UECgwHRWxhc3RpYzETMBEGA1UEAwwKZmFrZWNsaWVudDEfMB0G +CSqGSIb3DQEJARYQZmFrZUBleGFtcGxlLm5ldDAeFw0xOTExMTExNzAzMjdaFw0z +OTExMDYxNzAzMjdaMHgxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJNTjEUMBIGA1UE +BwwLTWlubmVhcG9saXMxEDAOBgNVBAoMB0VsYXN0aWMxEzARBgNVBAMMCmZha2Vj +bGllbnQxHzAdBgkqhkiG9w0BCQEWEGZha2VAZXhhbXBsZS5uZXQwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDh+dXMpyghQax90vZNVig+zHx8c/eRSxj+ +C2WiFKmZoe/wIVtVhEqy1SVruKhqhzkKcnHH0Xr/rttN/Um7hvA1eNpVlRk/FIKy +uVFgD5eCvxDsA3GI1Z25Qy0k0pH7zpjgCqHp72PaR9Zt//hDk4dHNPgIffGbddOa +KqhLAvEgkRqJZqKYIg/RSdYLdCGpzOafYKr9lcvvPrAMDN8DNrOBXIoSHGBv1FYv +lsasy8rG0m0UhrVHkvtv4Z4dN/0oOaNspPXYtjqhDdH3zb6ypHRlvdJ2oSRMqcfV +AyhUXnJ4tS6jmHW1VLoZ0zV7vJZLx/HY77wDqPSRy2/1esuEZLvfAgMBAAGjUzBR +MB0GA1UdDgQWBBSQ7ndYKax12Jq9AgHkgLJ41aDjyzAfBgNVHSMEGDAWgBSQ7ndY +Kax12Jq9AgHkgLJ41aDjyzAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUA +A4IBAQA85RKhXZslVSjfZIsLOmpbwhnqO26kQ5ksFp3oC6aqw7qj4Q4+yitoiNYZ +VR4Yyz7XOjNjyb4JjQFwGZQY6i+53ccZsH2ZpLgHFKgWxY8Ccc/vYOiKLtOWJsi9 +ktXiNStyktadUe7DekvhKIkOsLXcEIO2eim+grDSNfP8G3hdpw2aK8GgwpwEJapJ +GArm3tpQqSwywS7/5HKLNPQ/3Zpc5Mg+qrYbkVnFtu8ZKsOXrBkmme5TfbjogsO3 +y8LGMFfyeLi0R74tGuO+VDlHxwHFXOax1yRje+7ld7Icq1az3I3BPq9sCpqz4oA3 +PgrftqCrMpMK15vnWJ1er2Rz1bb1 -----END CERTIFICATE----- diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/testdata/client_key.pem b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/testdata/client_key.pem index 28911a4d..3fc6d8c4 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/testdata/client_key.pem +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/testdata/client_key.pem @@ -1,7 +1,28 @@ ------BEGIN EC PRIVATE KEY----- -MIHcAgEBBEIB1YnGgQ42OFGz1rOFlmT97JB52b9/2h1dj85QaBLxX6isSNgnS7yC -VQKAQCudJz+UpqiTNZBQK0goqbD/O47lswagBwYFK4EEACOhgYkDgYYABAE+n/OJ -oo7jvetm8zR4lAX2s99fxWF/LiOR1/qTPQgLmLYVUZq1yTZB027GtJGWAqphkY/n -0oNdxS4N9d2JPoaXMgHMGZAXl0A85Q3D5k0xKG/jwaEasTIbTe6UKHed2ZgkCtEq -utG9KwmnqAHCtlia14mgcERpO1eT0A7NRcdtNlcjlA== ------END EC PRIVATE KEY----- +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDh+dXMpyghQax9 +0vZNVig+zHx8c/eRSxj+C2WiFKmZoe/wIVtVhEqy1SVruKhqhzkKcnHH0Xr/rttN +/Um7hvA1eNpVlRk/FIKyuVFgD5eCvxDsA3GI1Z25Qy0k0pH7zpjgCqHp72PaR9Zt +//hDk4dHNPgIffGbddOaKqhLAvEgkRqJZqKYIg/RSdYLdCGpzOafYKr9lcvvPrAM +DN8DNrOBXIoSHGBv1FYvlsasy8rG0m0UhrVHkvtv4Z4dN/0oOaNspPXYtjqhDdH3 +zb6ypHRlvdJ2oSRMqcfVAyhUXnJ4tS6jmHW1VLoZ0zV7vJZLx/HY77wDqPSRy2/1 +esuEZLvfAgMBAAECggEAVwaXLtyFGOGo40AWeMwgbabkni0u/3ZzZorPFi9s7Wyu +8cR23yU2e06YvbZ8raHr9rkCVpi3DLazYIKWHbEm2wDy3JiyA1gLyXaS/zKh/8lD +ZBEjbTYPKuTTAuZRMC9tMoMFvgjiZJLb4rr8SlILfAHLL8IJBPzIhAu/ijvUztw5 +a1/cLEViN+hFDtdU8sRGGzqI7I+OZ4mwBkH94bzE5lBfE5SjNHRHbH7Tw1+eqVzU +mQRfAv4AIowp4WJQmKbos/GZy4G/BydXOBwlOihlI7ksG4UxOcRcVb/BxKeMJ2Ug +zO5lPcZLrIs1Vsg1lftWQLLVwNa7ydSujMssD6LVkQKBgQD8Rpd51G3UF3/yGOho +FhLSOV1XTVZQj2/u4C5RExtpGNVIdiCAlYcbnB5c3tggpvNx7JlrUlflYj1UsvPd +mc8Mzr9XybUJ5WHMermWPTnvaQV1Vu7ntAkO0OS2mL9Qj/L2oBbNrUqGUsMeystB +P0LwgzvADzcIiA3tGXUDYrmnxwKBgQDlT9muoYGYJWa462lKnHz6thNZ8hypR9lO +yff4sjJzbQdzOsFz5kaTgzAxOYEwnwytA/2O2xors/6dxOb64SNHe3qCd+3IBHb6 +0ESomkeaqWHBDlvXRyH8B62eOejIcCe32IIXPOZfXSPt+iZJBwhvrjM77ibBe3gm +NUtS6RI7KQKBgBCzz8WWJetb3vG1anWxuG9IswBbs3JD/OIw34AoprDvnvdcMsyw +mbBfRrdZXa/Tvdx2ELZFTmJ2fZ8E9hZe35F2gr6aMS1FjZMPMQhcZ2CNcHoTVl/q +0Iq1k97vAAOidDCV6syVQ5DfG+IHBSv9tz3bQnJ1z2v2Lg3bmJ6vw6DzAoGALxFw +0RN9kkk7CIHpntL4JE3zMucrUuPYiv+R0o5hJKgge1p4Et4VZnnrDm+LQPukkm7b +QNNgT9AmVyPvdVd48lc6EkWjnQatlVXigekJAYn68rGL0GhdZWn+DRg928aJoIbk +TPjHIJPxqAtMiHjVFndfNNHxSo2ZX95Yk7AbeeECgYEA96Scm3nHmozoknkmCaAP +hed7qA/9aTyqHVQDFulmRnHS8nXGUOWS+WEIOxxx8eI9j2qYsX0QOphwmz6Ijs9F +zbwL4kutUOF8PdHdCgTht+z5SR1oAjT/Joz5BlekVh1rhkDMLeeZWe/VO7ochabi +ORVoeRbPyJN9mX8JkQ+/7X8= +-----END PRIVATE KEY----- diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/testdata/openssl_command.txt b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/testdata/openssl_command.txt new file mode 100644 index 00000000..39a0086f --- /dev/null +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/http/testdata/openssl_command.txt @@ -0,0 +1 @@ +openssl req -x509 -sha256 -nodes -days 7300 -newkey rsa:2048 -keyout privateKey.key -out certificate.crt diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/icmp/icmp.go b/vendor/github.com/elastic/beats/heartbeat/monitors/active/icmp/icmp.go index 169c0e9e..be90421f 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/active/icmp/icmp.go +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/icmp/icmp.go @@ -47,12 +47,6 @@ func create( return nil, 0, err } - // TODO: check icmp is support by OS + check we've - // got required credentials (implementation uses RAW socket, requires root + - // not supported on all OSes) - // TODO: replace icmp package base reader/sender using raw sockets with - // OS specific solution - ipVersion := config.Mode.Network() if len(config.Hosts) > 0 && ipVersion == "" { err := fmt.Errorf("pinging hosts requires ipv4 or ipv6 mode enabled") @@ -61,13 +55,14 @@ func create( var loopErr error loopInit.Do(func() { - debugf("initialize icmp handler") + debugf("initializing ICMP loop") loop, loopErr = newICMPLoop() }) if loopErr != nil { - debugf("Failed to initialize ICMP loop %v", loopErr) + logp.Warn("Failed to initialize ICMP loop %v", loopErr) return nil, 0, loopErr } + debugf("ICMP loop successfully initialized") if err := loop.checkNetworkMode(ipVersion); err != nil { return nil, 0, err diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/active/icmp/loop.go b/vendor/github.com/elastic/beats/heartbeat/monitors/active/icmp/loop.go index 4cd97d59..414686c1 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/active/icmp/loop.go +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/active/icmp/loop.go @@ -24,14 +24,14 @@ import ( "fmt" "math/rand" "net" + "os" + "runtime" "sync" "time" "golang.org/x/net/icmp" "golang.org/x/net/ipv4" "golang.org/x/net/ipv6" - - "github.com/elastic/beats/libbeat/logp" ) type icmpLoop struct { @@ -85,6 +85,10 @@ var ( loop *icmpLoop ) +func noPingCapabilityError(message string) error { + return fmt.Errorf(fmt.Sprintf("Insufficient privileges to perform ICMP ping. %s", message)) +} + func newICMPLoop() (*icmpLoop, error) { // Log errors at info level, as the loop is setup globally when ICMP module is loaded // first (not yet configured). @@ -92,7 +96,7 @@ func newICMPLoop() (*icmpLoop, error) { // IPv4/IPv6 checking conn4 := createListener("IPv4", "ip4:icmp") conn6 := createListener("IPv6", "ip6:ipv6-icmp") - + unprivilegedPossible := false l := &icmpLoop{ conn4: conn4, conn6: conn6, @@ -100,11 +104,33 @@ func newICMPLoop() (*icmpLoop, error) { requests: map[requestID]*requestContext{}, } - if conn4 != nil { - go l.runICMPRecv(conn4, protocolICMP) + if l.conn4 == nil && l.conn6 == nil { + switch runtime.GOOS { + case "linux", "darwin": + unprivilegedPossible = true + //This is non-privileged ICMP, not udp + l.conn4 = createListener("Unprivileged IPv4", "udp4") + l.conn6 = createListener("Unprivileged IPv6", "udp6") + } } - if conn6 != nil { - go l.runICMPRecv(conn6, protocolIPv6ICMP) + + if l.conn4 != nil { + go l.runICMPRecv(l.conn4, protocolICMP) + } + if l.conn6 != nil { + go l.runICMPRecv(l.conn6, protocolIPv6ICMP) + } + + if l.conn4 == nil && l.conn6 == nil { + if unprivilegedPossible { + var buffer bytes.Buffer + path, _ := os.Executable() + buffer.WriteString("You can run without root by setting cap_net_raw:\n sudo setcap cap_net_raw+eip ") + buffer.WriteString(path + " \n") + buffer.WriteString("Your system allows the use of unprivileged ping by setting net.ipv4.ping_group_range \n sysctl -w net.ipv4.ping_group_range=' ' ") + return nil, noPingCapabilityError(buffer.String()) + } + return nil, noPingCapabilityError("You must provide the appropriate permissions to this executable") } return l, nil @@ -124,10 +150,10 @@ func (l *icmpLoop) checkNetworkMode(mode string) error { } if ip4 && l.conn4 == nil { - return errors.New("failed to initiate IPv4 support") + return errors.New("failed to initiate IPv4 support. Check log details for permission configuration") } if ip6 && l.conn6 == nil { - return errors.New("failed to initiate IPv6 support") + return errors.New("failed to initiate IPv6 support. Check log details for permission configuration") } return nil @@ -272,6 +298,7 @@ func (l *icmpLoop) ping( if !success { return 0, requests, timeoutError{} } + return rtt, requests, nil } @@ -344,7 +371,6 @@ func createListener(name, network string) *icmp.PacketConn { // true, even if error value itself is `nil`. Checking for conn suppresses // misleading log message. if conn == nil && err != nil { - logp.Info("%v ICMP not supported: %v", name, err) return nil } return conn diff --git a/vendor/github.com/elastic/beats/heartbeat/monitors/task.go b/vendor/github.com/elastic/beats/heartbeat/monitors/task.go index dc5e1831..960d0b5f 100644 --- a/vendor/github.com/elastic/beats/heartbeat/monitors/task.go +++ b/vendor/github.com/elastic/beats/heartbeat/monitors/task.go @@ -75,6 +75,9 @@ type jobConfig struct { // Fields and tags to add to monitor. EventMetadata common.EventMetadata `config:",inline"` Processors processors.PluginConfig `config:"processors"` + + // KeepNull determines whether published events will keep null values or omit them. + KeepNull bool `config:"keep_null"` } // ProcessorsError is used to indicate situations when processors could not be loaded. @@ -108,6 +111,7 @@ func (t *configuredJob) Start() { Processing: beat.ProcessingConfig{ EventMetadata: t.config.EventMetadata, Processor: t.processors, + KeepNull: t.config.KeepNull, Fields: fields, }, }) diff --git a/vendor/github.com/elastic/beats/heartbeat/scripts/generate_imports_helper.py b/vendor/github.com/elastic/beats/heartbeat/scripts/generate_imports_helper.py index 9a7e7ecd..be85b8a2 100644 --- a/vendor/github.com/elastic/beats/heartbeat/scripts/generate_imports_helper.py +++ b/vendor/github.com/elastic/beats/heartbeat/scripts/generate_imports_helper.py @@ -23,7 +23,11 @@ def get_importable_lines(go_beat_path, import_line): package = "monitors/{}".format(mode) return [format(package, m) for m in collect_monitors(package)] - return sorted(imports("active") + imports("passive")) + return [{ + "file_suffix": "", + "build_tags": "", + "imported_lines": sorted(imports("active") + imports("passive")) + }] def collect_monitors(package): diff --git a/vendor/github.com/elastic/beats/heartbeat/tests/system/test_icmp.py b/vendor/github.com/elastic/beats/heartbeat/tests/system/test_icmp.py new file mode 100644 index 00000000..5e7fc551 --- /dev/null +++ b/vendor/github.com/elastic/beats/heartbeat/tests/system/test_icmp.py @@ -0,0 +1,52 @@ +import os +import unittest +import platform +import socket +import sys +from heartbeat import BaseTest +from elasticsearch import Elasticsearch +from beat.beat import INTEGRATION_TESTS +import nose.tools +import logging +import subprocess +import time + + +class Test(BaseTest): + def test_base(self): + """ + Basic test with icmp root non privilege ICMP test. + + """ + + config = { + "monitors": [ + { + "type": "icmp", + "schedule": "*/5 * * * * * *", + "hosts": ["127.0.0.1"], + } + ] + } + + self.render_config_template( + path=os.path.abspath(self.working_dir) + "/log/*", + **config + ) + + proc = self.start_beat() + + def has_started_message(): return self.log_contains("ICMP loop successfully initialized") + + def has_failed_message(): return self.log_contains("Failed to initialize ICMP loop") + + # We don't know if the system tests are running is configured to support or not support ping, but we can at least check that the ICMP loop + # was initiated. In the future we should start up VMs with the correct perms configured and be more specific. In addition to that + # we should run pings on those machines and make sure they work. + self.wait_until(lambda: has_started_message() or has_failed_message(), 30) + + if has_failed_message(): + proc.check_kill_and_wait(1) + else: + # Check that documents are moving through + self.wait_until(lambda: self.output_has(lines=1)) diff --git a/vendor/github.com/elastic/beats/heartbeat/tests/system/test_monitor.py b/vendor/github.com/elastic/beats/heartbeat/tests/system/test_monitor.py index 4eecc3c6..3906d552 100644 --- a/vendor/github.com/elastic/beats/heartbeat/tests/system/test_monitor.py +++ b/vendor/github.com/elastic/beats/heartbeat/tests/system/test_monitor.py @@ -18,7 +18,37 @@ class Test(BaseTest): server = self.start_server("hello world", status_code) self.render_http_config( - ["http://localhost:{}".format(server.server_port)]) + ["localhost:{}".format(server.server_port)]) + + proc = self.start_beat() + self.wait_until(lambda: self.log_contains("heartbeat is running")) + + self.wait_until( + lambda: self.output_has(lines=1)) + + proc.check_kill_and_wait() + + server.shutdown() + output = self.read_output() + assert status_code == output[0]["http.response.status_code"] + + if os.name == "nt": + # Currently skipped on Windows as fields.yml not generated + raise SkipTest + self.assert_fields_are_documented(output[0]) + + @parameterized.expand([ + "200", "404" + ]) + def test_http_with_hosts_config(self, status_code): + """ + Test http server + """ + status_code = int(status_code) + server = self.start_server("hello world", status_code) + + self.render_http_config_with_hosts( + ["localhost:{}".format(server.server_port)]) proc = self.start_beat() self.wait_until(lambda: self.log_contains("heartbeat is running")) @@ -94,6 +124,10 @@ class Test(BaseTest): proc.check_kill_and_wait() self.assert_last_status(expected_status) + if expected_status == "down": + nose.tools.eq_(self.last_output_line()["http.response.body.content"], body) + else: + assert not self.last_output_line().has_key("http.response.body.content") finally: server.shutdown() @@ -179,3 +213,11 @@ class Test(BaseTest): "urls": urls, }] ) + + def render_http_config_with_hosts(self, urls): + self.render_config_template( + monitors=[{ + "type": "http", + "hosts": urls, + }] + ) diff --git a/vendor/github.com/elastic/beats/journalbeat/Dockerfile b/vendor/github.com/elastic/beats/journalbeat/Dockerfile index 05b02f74..eb8adfbf 100644 --- a/vendor/github.com/elastic/beats/journalbeat/Dockerfile +++ b/vendor/github.com/elastic/beats/journalbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.12.9 +FROM golang:1.12.12 MAINTAINER Noémi Ványi RUN set -x && \ diff --git a/vendor/github.com/elastic/beats/journalbeat/_meta/kibana/7/dashboard/Journalbeat-overview.json b/vendor/github.com/elastic/beats/journalbeat/_meta/kibana/7/dashboard/Journalbeat-overview.json deleted file mode 100644 index eb3a584b..00000000 --- a/vendor/github.com/elastic/beats/journalbeat/_meta/kibana/7/dashboard/Journalbeat-overview.json +++ /dev/null @@ -1,223 +0,0 @@ -{ - "objects": [ - { - "attributes": { - "description": "", - "hits": 0, - "kibanaSavedObjectMeta": { - "searchSourceJSON": { - "filter": [], - "query": { - "language": "kuery", - "query": "" - } - } - }, - "optionsJSON": { - "hidePanelTitles": false, - "useMargins": true - }, - "panelsJSON": [ - { - "embeddableConfig": {}, - "gridData": { - "h": 15, - "i": "1", - "w": 24, - "x": 0, - "y": 0 - }, - "panelIndex": "1", - "panelRefName": "panel_0", - "version": "7.0.0" - }, - { - "embeddableConfig": {}, - "gridData": { - "h": 15, - "i": "2", - "w": 24, - "x": 24, - "y": 0 - }, - "panelIndex": "2", - "panelRefName": "panel_1", - "version": "7.0.0" - }, - { - "embeddableConfig": {}, - "gridData": { - "h": 13, - "i": "3", - "w": 48, - "x": 0, - "y": 15 - }, - "panelIndex": "3", - "panelRefName": "panel_2", - "version": "7.0.0" - } - ], - "timeRestore": false, - "title": "[Journalbeat] Overview ECS", - "version": 1 - }, - "id": "effdfcd0-2877-11e9-bc7e-af96a8fe0ddd-ecs", - "migrationVersion": { - "dashboard": "7.0.0" - }, - "references": [ - { - "id": "25251fc0-2877-11e9-bc7e-af96a8fe0ddd-ecs", - "name": "panel_0", - "type": "search" - }, - { - "id": "52a2a1c0-2877-11e9-bc7e-af96a8fe0ddd-ecs", - "name": "panel_1", - "type": "search" - }, - { - "id": "842a1340-2877-11e9-bc7e-af96a8fe0ddd-ecs", - "name": "panel_2", - "type": "search" - } - ], - "type": "dashboard", - "updated_at": "2019-02-04T12:25:20.413Z", - "version": 1 - }, - { - "attributes": { - "columns": [ - "@timestamp", - "host.hostname", - "journald.kernel.subsystem", - "message" - ], - "description": "", - "hits": 0, - "kibanaSavedObjectMeta": { - "searchSourceJSON": { - "filter": [], - "highlightAll": true, - "indexRefName": "kibanaSavedObjectMeta.searchSourceJSON.index", - "query": { - "language": "kuery", - "query": "syslog.facility:0 AND syslog.priority<4" - }, - "version": true - } - }, - "sort": [ - "@timestamp", - "desc" - ], - "title": "[Journalbeat] Kernel errors ECS", - "version": 1 - }, - "id": "25251fc0-2877-11e9-bc7e-af96a8fe0ddd-ecs", - "migrationVersion": { - "search": "7.0.0" - }, - "references": [ - { - "id": "33c018b0-2876-11e9-bc7e-af96a8fe0ddd", - "name": "kibanaSavedObjectMeta.searchSourceJSON.index", - "type": "index-pattern" - } - ], - "type": "search", - "updated_at": "2019-02-04T12:19:40.092Z", - "version": 1 - }, - { - "attributes": { - "columns": [ - "@timestamp", - "host.hostname", - "process.name", - "message" - ], - "description": "", - "hits": 0, - "kibanaSavedObjectMeta": { - "searchSourceJSON": { - "filter": [], - "highlightAll": true, - "indexRefName": "kibanaSavedObjectMeta.searchSourceJSON.index", - "query": { - "language": "kuery", - "query": "syslog.facility:4" - }, - "version": true - } - }, - "sort": [ - "@timestamp", - "desc" - ], - "title": "[Journalbeat] Authorization ECS", - "version": 1 - }, - "id": "52a2a1c0-2877-11e9-bc7e-af96a8fe0ddd-ecs", - "migrationVersion": { - "search": "7.0.0" - }, - "references": [ - { - "id": "33c018b0-2876-11e9-bc7e-af96a8fe0ddd", - "name": "kibanaSavedObjectMeta.searchSourceJSON.index", - "type": "index-pattern" - } - ], - "type": "search", - "updated_at": "2019-02-04T12:20:56.412Z", - "version": 1 - }, - { - "attributes": { - "columns": [ - "@timestamp", - "host.hostname", - "message" - ], - "description": "", - "hits": 0, - "kibanaSavedObjectMeta": { - "searchSourceJSON": { - "filter": [], - "highlightAll": true, - "indexRefName": "kibanaSavedObjectMeta.searchSourceJSON.index", - "query": { - "language": "kuery", - "query": "" - }, - "version": true - } - }, - "sort": [ - "@timestamp", - "desc" - ], - "title": "[Journalbeat] Systemd messages ECS", - "version": 1 - }, - "id": "842a1340-2877-11e9-bc7e-af96a8fe0ddd-ecs", - "migrationVersion": { - "search": "7.0.0" - }, - "references": [ - { - "id": "33c018b0-2876-11e9-bc7e-af96a8fe0ddd", - "name": "kibanaSavedObjectMeta.searchSourceJSON.index", - "type": "index-pattern" - } - ], - "type": "search", - "updated_at": "2019-02-04T12:22:19.508Z", - "version": 1 - } - ], - "version": "7.0.0" -} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/journalbeat/docs/configuring-howto.asciidoc b/vendor/github.com/elastic/beats/journalbeat/docs/configuring-howto.asciidoc index 620d0d6e..2a651def 100644 --- a/vendor/github.com/elastic/beats/journalbeat/docs/configuring-howto.asciidoc +++ b/vendor/github.com/elastic/beats/journalbeat/docs/configuring-howto.asciidoc @@ -8,7 +8,7 @@ Before modifying configuration settings, make sure you've completed the <<{beatname_lc}-configuration,configuration steps>> in the Getting Started. This section describes some common use cases for changing configuration options. -include::{libbeat-dir}/docs/shared-configuring.asciidoc[] +include::{libbeat-dir}/shared-configuring.asciidoc[] The following topics describe how to configure {beatname_uc}: @@ -37,38 +37,38 @@ include::./config-options.asciidoc[] include::./general-options.asciidoc[] -include::{libbeat-dir}/docs/queueconfig.asciidoc[] +include::{libbeat-dir}/queueconfig.asciidoc[] -include::{libbeat-dir}/docs/outputconfig.asciidoc[] +include::{libbeat-dir}/outputconfig.asciidoc[] -include::{libbeat-dir}/docs/shared-ilm.asciidoc[] +include::{libbeat-dir}/shared-ilm.asciidoc[] -include::{libbeat-dir}/docs/shared-ssl-config.asciidoc[] +include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::./filtering.asciidoc[] -include::{libbeat-dir}/docs/shared-config-ingest.asciidoc[] +include::{libbeat-dir}/shared-config-ingest.asciidoc[] -include::{libbeat-dir}/docs/shared-geoip.asciidoc[] +include::{libbeat-dir}/shared-geoip.asciidoc[] -include::{libbeat-dir}/docs/shared-path-config.asciidoc[] +include::{libbeat-dir}/shared-path-config.asciidoc[] -include::{libbeat-dir}/docs/shared-kibana-config.asciidoc[] +include::{libbeat-dir}/shared-kibana-config.asciidoc[] -include::{libbeat-dir}/docs/setup-config.asciidoc[] +include::{libbeat-dir}/setup-config.asciidoc[] -include::{libbeat-dir}/docs/loggingconfig.asciidoc[] +include::{libbeat-dir}/loggingconfig.asciidoc[] :standalone: -include::{libbeat-dir}/docs/shared-env-vars.asciidoc[] +include::{libbeat-dir}/shared-env-vars.asciidoc[] :standalone!: :standalone: -include::{libbeat-dir}/docs/yaml.asciidoc[] +include::{libbeat-dir}/yaml.asciidoc[] :standalone!: -include::{libbeat-dir}/docs/regexp.asciidoc[] +include::{libbeat-dir}/regexp.asciidoc[] -include::{libbeat-dir}/docs/http-endpoint.asciidoc[] +include::{libbeat-dir}/http-endpoint.asciidoc[] -include::{libbeat-dir}/docs/reference-yml.asciidoc[] +include::{libbeat-dir}/reference-yml.asciidoc[] diff --git a/vendor/github.com/elastic/beats/journalbeat/docs/faq.asciidoc b/vendor/github.com/elastic/beats/journalbeat/docs/faq.asciidoc index 4848f873..bbe89a61 100644 --- a/vendor/github.com/elastic/beats/journalbeat/docs/faq.asciidoc +++ b/vendor/github.com/elastic/beats/journalbeat/docs/faq.asciidoc @@ -5,6 +5,6 @@ This section describes common problems you might encounter with {beatname_uc}. Also check out the https://discuss.elastic.co/c/beats/{beatname_lc}[{beatname_uc} discussion forum]. -include::{libbeat-dir}/docs/faq-limit-bandwidth.asciidoc[] +include::{libbeat-dir}/faq-limit-bandwidth.asciidoc[] -include::{libbeat-dir}/docs/shared-faq.asciidoc[] +include::{libbeat-dir}/shared-faq.asciidoc[] diff --git a/vendor/github.com/elastic/beats/journalbeat/docs/filtering.asciidoc b/vendor/github.com/elastic/beats/journalbeat/docs/filtering.asciidoc index 1150e70f..fc7dba74 100644 --- a/vendor/github.com/elastic/beats/journalbeat/docs/filtering.asciidoc +++ b/vendor/github.com/elastic/beats/journalbeat/docs/filtering.asciidoc @@ -19,7 +19,7 @@ global processing across all data exported by {beatname_uc}. [[using-processors]] === Processors -include::{libbeat-dir}/docs/processors.asciidoc[] +include::{libbeat-dir}/processors.asciidoc[] // You must set the processor-scope attribute to resolve the attribute reference // defined in processors-using.asciidoc. The attribute is used to indicate where @@ -28,5 +28,5 @@ include::{libbeat-dir}/docs/processors.asciidoc[] // to processors-using.asciidoc. :processor-scope: input -include::{libbeat-dir}/docs/processors-using.asciidoc[] +include::{libbeat-dir}/processors-using.asciidoc[] :processor-scope!: diff --git a/vendor/github.com/elastic/beats/journalbeat/docs/general-options.asciidoc b/vendor/github.com/elastic/beats/journalbeat/docs/general-options.asciidoc index 8596fc7d..5649877f 100644 --- a/vendor/github.com/elastic/beats/journalbeat/docs/general-options.asciidoc +++ b/vendor/github.com/elastic/beats/journalbeat/docs/general-options.asciidoc @@ -56,5 +56,5 @@ This option is valid as a global setting under the +{beatname_lc}+ namespace or under `paths`. For a description of this option, see <>. -include::{libbeat-dir}/docs/generalconfig.asciidoc[] +include::{libbeat-dir}/generalconfig.asciidoc[] diff --git a/vendor/github.com/elastic/beats/journalbeat/docs/getting-started.asciidoc b/vendor/github.com/elastic/beats/journalbeat/docs/getting-started.asciidoc index e8f1b168..dcde1df7 100644 --- a/vendor/github.com/elastic/beats/journalbeat/docs/getting-started.asciidoc +++ b/vendor/github.com/elastic/beats/journalbeat/docs/getting-started.asciidoc @@ -1,7 +1,7 @@ [id="{beatname_lc}-getting-started"] == Getting started with {beatname_uc} -include::{libbeat-dir}/docs/shared-getting-started-intro.asciidoc[] +include::{libbeat-dir}/shared-getting-started-intro.asciidoc[] * <<{beatname_lc}-installation>> * <<{beatname_lc}-configuration>> @@ -13,7 +13,7 @@ include::{libbeat-dir}/docs/shared-getting-started-intro.asciidoc[] [id="{beatname_lc}-installation"] === Step 1: Install {beatname_uc} -include::{libbeat-dir}/docs/shared-download-and-install.asciidoc[] +include::{libbeat-dir}/shared-download-and-install.asciidoc[] [[deb]] *deb:* @@ -77,7 +77,7 @@ endif::[] ifeval::["{release-state}"=="unreleased"] -Version {stack-version} of {beatname_uc} has not yet been released. +Version {version} of {beatname_uc} has not yet been released. endif::[] @@ -94,7 +94,7 @@ Before running {beatname_uc}, you can specify the location of the systemd journal files and configure how you want the files to be read. If you accept the default configuration, {beatname_uc} reads from the local journal. -include::{libbeat-dir}/docs/shared-configuring.asciidoc[] +include::{libbeat-dir}/shared-configuring.asciidoc[] Here is a sample of the +{beatname_lc}+ section of the +{beatname_lc}.yml+ file. {beatname_uc} uses predefined default values for most configuration options. @@ -128,12 +128,12 @@ If no paths are specified, {beatname_uc} reads from the default journal. {beatname_uc} starts reading at the beginning of the file, but continues reading at the last known position after a reload or restart. For more detail about the settings, see the reference docs for the -<>. +<>. . (Optional) Set the <> option to filter entries in journald before collecting any log events. This reduces the number of events that {beatname_uc} needs to process. For example, to fetch only -Redis events from a Docker container tagged as `redis`, use: +Redis events from a Docker container tagged as `redis`, use: + ["source","sh",subs="attributes"] ---- @@ -144,21 +144,21 @@ Redis events from a Docker container tagged as `redis`, use: - "_COMM=redis" ---- -include::{libbeat-dir}/docs/step-configure-output.asciidoc[] +include::{libbeat-dir}/step-configure-output.asciidoc[] -include::{libbeat-dir}/docs/step-configure-kibana-endpoint.asciidoc[] +include::{libbeat-dir}/step-configure-kibana-endpoint.asciidoc[] -include::{libbeat-dir}/docs/step-configure-credentials.asciidoc[] +include::{libbeat-dir}/step-configure-credentials.asciidoc[] -include::{libbeat-dir}/docs/step-test-config.asciidoc[] +include::{libbeat-dir}/step-test-config.asciidoc[] -include::{libbeat-dir}/docs/step-look-at-config.asciidoc[] +include::{libbeat-dir}/step-look-at-config.asciidoc[] [id="{beatname_lc}-template"] === Step 3: Load the index template in {es} -include::{libbeat-dir}/docs/shared-template-load.asciidoc[] +include::{libbeat-dir}/shared-template-load.asciidoc[] [id="{beatname_lc}-starting"] === Step 5: Start {beatname_uc} @@ -196,9 +196,9 @@ in the _Beats Platform Reference_. [[view-kibana-dashboards]] === Step 6: Explore your data in {kib} -To start exploring your data, go to the Discover application in {kib}. From -there, you can submit search queries, filter the search results, and view -document data. +The {beatname_uc} dashboard is currently broken. To start exploring your data, +go to the Discover application in {kib}. From there, you can submit search +queries, filter the search results, and view document data. To learn how to build visualizations and dashboards to view your data, see the _{kibana-ref}/index.html[{kib} User Guide]_. @@ -206,9 +206,12 @@ _{kibana-ref}/index.html[{kib} User Guide]_. [role="xpack"] ==== Want to tail logs in real time? -Use the {infra-guide}/logs-ui-overview.html[Logs UI] in {kib}. The UI shows logs +Use the Logs app in {kib}. +For more details, see the {logs-guide}[Logs Monitoring Guide]. + +The Logs app shows logs from `filebeat-*` indices by default. To show {beatname_uc} indices, configure -the source to include `journalbeat-*`. You can do this in the Logs UI when you +the source to include `journalbeat-*`. You can do this in the Logs app when you configure the source, or you can modify the {kib} configuration. For example: [source,yaml] diff --git a/vendor/github.com/elastic/beats/journalbeat/docs/index.asciidoc b/vendor/github.com/elastic/beats/journalbeat/docs/index.asciidoc index 28384fed..2fb3b7ad 100644 --- a/vendor/github.com/elastic/beats/journalbeat/docs/index.asciidoc +++ b/vendor/github.com/elastic/beats/journalbeat/docs/index.asciidoc @@ -1,34 +1,33 @@ = Journalbeat Reference -:libbeat-dir: {docdir}/../../libbeat +:libbeat-dir: {docdir}/../../libbeat/docs -include::{libbeat-dir}/docs/version.asciidoc[] +include::{libbeat-dir}/version.asciidoc[] + +include::{asciidoc-dir}/../../shared/versions/stack/{source_branch}.asciidoc[] include::{asciidoc-dir}/../../shared/attributes.asciidoc[] -:version: {stack-version} :beatname_lc: journalbeat :beatname_uc: Journalbeat :beatname_pkg: {beatname_lc} :github_repo_name: beats :discuss_forum: beats/{beatname_lc} :beat_default_index_prefix: {beatname_lc} -:has_decode_csv_fields_processor: -:has_script_processor: -:has_timestamp_processor: :deb_os: :rpm_os: :linux_os: :docker_platform: :no_dashboards: +:no_decode_cef_processor: -include::{libbeat-dir}/docs/shared-beats-attributes.asciidoc[] +include::{libbeat-dir}/shared-beats-attributes.asciidoc[] include::./overview.asciidoc[] include::./getting-started.asciidoc[] -include::{libbeat-dir}/docs/repositories.asciidoc[] +include::{libbeat-dir}/repositories.asciidoc[] include::./setting-up-running.asciidoc[] @@ -36,9 +35,9 @@ include::./configuring-howto.asciidoc[] include::./fields.asciidoc[] -include::{libbeat-dir}/docs/monitoring/monitoring-beats.asciidoc[] +include::{libbeat-dir}/monitoring/monitoring-beats.asciidoc[] -include::{libbeat-dir}/docs/shared-securing-beat.asciidoc[] +include::{libbeat-dir}/shared-securing-beat.asciidoc[] include::./troubleshooting.asciidoc[] diff --git a/vendor/github.com/elastic/beats/journalbeat/docs/overview.asciidoc b/vendor/github.com/elastic/beats/journalbeat/docs/overview.asciidoc index 815de9bc..332dd624 100644 --- a/vendor/github.com/elastic/beats/journalbeat/docs/overview.asciidoc +++ b/vendor/github.com/elastic/beats/journalbeat/docs/overview.asciidoc @@ -12,4 +12,4 @@ locations that you specify, collects log events, and forwards them to either to https://www.elastic.co/products/elasticsearch[Elasticsearch] or https://www.elastic.co/products/logstash[Logstash]. -include::{libbeat-dir}/docs/shared-libbeat-description.asciidoc[] +include::{libbeat-dir}/shared-libbeat-description.asciidoc[] diff --git a/vendor/github.com/elastic/beats/journalbeat/docs/running-on-docker.asciidoc b/vendor/github.com/elastic/beats/journalbeat/docs/running-on-docker.asciidoc index d9306b55..dbfcce5b 100644 --- a/vendor/github.com/elastic/beats/journalbeat/docs/running-on-docker.asciidoc +++ b/vendor/github.com/elastic/beats/journalbeat/docs/running-on-docker.asciidoc @@ -1 +1 @@ -include::{libbeat-dir}/docs/shared-docker.asciidoc[] +include::{libbeat-dir}/shared-docker.asciidoc[] diff --git a/vendor/github.com/elastic/beats/journalbeat/docs/setting-up-running.asciidoc b/vendor/github.com/elastic/beats/journalbeat/docs/setting-up-running.asciidoc index 8969c149..bf67e650 100644 --- a/vendor/github.com/elastic/beats/journalbeat/docs/setting-up-running.asciidoc +++ b/vendor/github.com/elastic/beats/journalbeat/docs/setting-up-running.asciidoc @@ -23,14 +23,14 @@ This section includes additional information on how to set up and run //MAINTAINERS: If you add a new file to this section, make sure you update the bulleted list ^^ too. -include::{libbeat-dir}/docs/shared-directory-layout.asciidoc[] +include::{libbeat-dir}/shared-directory-layout.asciidoc[] -include::{libbeat-dir}/docs/keystore.asciidoc[] +include::{libbeat-dir}/keystore.asciidoc[] -include::{libbeat-dir}/docs/command-reference.asciidoc[] +include::{libbeat-dir}/command-reference.asciidoc[] include::./running-on-docker.asciidoc[] -include::{libbeat-dir}/docs/shared-systemd.asciidoc[] +include::{libbeat-dir}/shared-systemd.asciidoc[] -include::{libbeat-dir}/docs/shared-shutdown.asciidoc[] +include::{libbeat-dir}/shared-shutdown.asciidoc[] diff --git a/vendor/github.com/elastic/beats/journalbeat/docs/troubleshooting.asciidoc b/vendor/github.com/elastic/beats/journalbeat/docs/troubleshooting.asciidoc index 8d87893b..a1b4b1e7 100644 --- a/vendor/github.com/elastic/beats/journalbeat/docs/troubleshooting.asciidoc +++ b/vendor/github.com/elastic/beats/journalbeat/docs/troubleshooting.asciidoc @@ -18,14 +18,14 @@ following tips: [[getting-help]] == Get help -include::{libbeat-dir}/docs/getting-help.asciidoc[] +include::{libbeat-dir}/getting-help.asciidoc[] //sets block macro for debugging.asciidoc included in next section [id="enable-{beatname_lc}-debugging"] == Debug -include::{libbeat-dir}/docs/debugging.asciidoc[] +include::{libbeat-dir}/debugging.asciidoc[] diff --git a/vendor/github.com/elastic/beats/journalbeat/journalbeat.docker.yml b/vendor/github.com/elastic/beats/journalbeat/journalbeat.docker.yml index a1e67c0d..5e723c7a 100644 --- a/vendor/github.com/elastic/beats/journalbeat/journalbeat.docker.yml +++ b/vendor/github.com/elastic/beats/journalbeat/journalbeat.docker.yml @@ -4,6 +4,7 @@ journalbeat.inputs: processors: - add_cloud_metadata: ~ +- add_docker_metadata: ~ output.elasticsearch: hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}' diff --git a/vendor/github.com/elastic/beats/journalbeat/journalbeat.reference.yml b/vendor/github.com/elastic/beats/journalbeat/journalbeat.reference.yml index 0aa4c2c2..a7a768e8 100644 --- a/vendor/github.com/elastic/beats/journalbeat/journalbeat.reference.yml +++ b/vendor/github.com/elastic/beats/journalbeat/journalbeat.reference.yml @@ -1015,7 +1015,7 @@ setup.template.settings: #setup.ilm.enabled: auto # Set the prefix used in the index lifecycle write alias name. The default alias -# name is 'journalbeat-%{[agent.version]}'. +# name is 'journalbeat-%{[agent.version]}'. #setup.ilm.rollover_alias: "journalbeat" # Set the rollover index pattern. The default is "%{now/d}-000001". @@ -1278,12 +1278,21 @@ logging.files: # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# When using IP addresses, it is recommended to only use localhost. #http.host: localhost # Port on which the HTTP endpoint will bind. Default is 5066. #http.port: 5066 +# Define which user should be owning the named pipe. +#http.named_pipe.user: + +# Define which the permissions that should be applied to the named pipe, use the Security +# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with +# `http.user`. +#http.named_pipe.security_descriptor: + #============================= Process Security ================================ # Enable or disable seccomp system call filtering on Linux. Default is enabled. diff --git a/vendor/github.com/elastic/beats/journalbeat/journalbeat.yml b/vendor/github.com/elastic/beats/journalbeat/journalbeat.yml index ad4091ab..ee2ce450 100644 --- a/vendor/github.com/elastic/beats/journalbeat/journalbeat.yml +++ b/vendor/github.com/elastic/beats/journalbeat/journalbeat.yml @@ -145,6 +145,7 @@ output.elasticsearch: processors: - add_host_metadata: ~ - add_cloud_metadata: ~ + - add_docker_metadata: ~ #================================ Logging ===================================== diff --git a/vendor/github.com/elastic/beats/libbeat/Makefile b/vendor/github.com/elastic/beats/libbeat/Makefile index f6447b13..654e8ae0 100644 --- a/vendor/github.com/elastic/beats/libbeat/Makefile +++ b/vendor/github.com/elastic/beats/libbeat/Makefile @@ -1,7 +1,6 @@ BEAT_NAME=libbeat TEST_ENVIRONMENT?=true SYSTEM_TESTS=true -GOX_OS=linux darwin windows netbsd openbsd include scripts/Makefile diff --git a/vendor/github.com/elastic/beats/libbeat/_meta/config.docker.yml b/vendor/github.com/elastic/beats/libbeat/_meta/config.docker.yml index 241ec990..6ce79dc1 100644 --- a/vendor/github.com/elastic/beats/libbeat/_meta/config.docker.yml +++ b/vendor/github.com/elastic/beats/libbeat/_meta/config.docker.yml @@ -1,5 +1,6 @@ processors: - add_cloud_metadata: ~ +- add_docker_metadata: ~ output.elasticsearch: hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}' diff --git a/vendor/github.com/elastic/beats/libbeat/_meta/config.reference.yml.tmpl b/vendor/github.com/elastic/beats/libbeat/_meta/config.reference.yml.tmpl index 70b64a59..af90f66f 100644 --- a/vendor/github.com/elastic/beats/libbeat/_meta/config.reference.yml.tmpl +++ b/vendor/github.com/elastic/beats/libbeat/_meta/config.reference.yml.tmpl @@ -958,7 +958,7 @@ setup.template.settings: #setup.ilm.enabled: auto # Set the prefix used in the index lifecycle write alias name. The default alias -# name is 'beatname-%{[agent.version]}'. +# name is 'beatname-%{[agent.version]}'. #setup.ilm.rollover_alias: "beat-index-prefix" # Set the rollover index pattern. The default is "%{now/d}-000001". @@ -1221,12 +1221,21 @@ logging.files: # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# When using IP addresses, it is recommended to only use localhost. #http.host: localhost # Port on which the HTTP endpoint will bind. Default is 5066. #http.port: 5066 +# Define which user should be owning the named pipe. +#http.named_pipe.user: + +# Define which the permissions that should be applied to the named pipe, use the Security +# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with +# `http.user`. +#http.named_pipe.security_descriptor: + #============================= Process Security ================================ # Enable or disable seccomp system call filtering on Linux. Default is enabled. diff --git a/vendor/github.com/elastic/beats/libbeat/_meta/config.yml.tmpl b/vendor/github.com/elastic/beats/libbeat/_meta/config.yml.tmpl index bdf59928..0e32b2ff 100644 --- a/vendor/github.com/elastic/beats/libbeat/_meta/config.yml.tmpl +++ b/vendor/github.com/elastic/beats/libbeat/_meta/config.yml.tmpl @@ -93,6 +93,13 @@ output.elasticsearch: processors: - add_host_metadata: ~ - add_cloud_metadata: ~ +{{- if .UseDockerMetadataProcessor }} + - add_docker_metadata: ~{{ end }} + +{{- if .UseKubernetesMetadataProcessor }} + - add_kubernetes_metadata: ~ +{{- else -}} +{{ end }} {{else}} processors: - add_observer_metadata: diff --git a/vendor/github.com/elastic/beats/libbeat/api/config.go b/vendor/github.com/elastic/beats/libbeat/api/config.go index 0ba30634..f6e57551 100644 --- a/vendor/github.com/elastic/beats/libbeat/api/config.go +++ b/vendor/github.com/elastic/beats/libbeat/api/config.go @@ -17,16 +17,25 @@ package api +import "os" + +// Config is the configuration for the API endpoint. type Config struct { - Enabled bool - Host string - Port int + Enabled bool `config:"enabled"` + Host string `config:"host"` + Port int `config:"port"` + User string `config:"named_pipe.user"` + SecurityDescriptor string `config:"named_pipe.security_descriptor"` } var ( + // DefaultConfig is the default configuration used by the API endpoint. DefaultConfig = Config{ Enabled: false, Host: "localhost", Port: 5066, } ) + +// File mode for the socket file, owner of the process can do everything, member of the group can read. +const socketFileMode = os.FileMode(0740) diff --git a/vendor/github.com/elastic/beats/libbeat/api/make_listener_posix.go b/vendor/github.com/elastic/beats/libbeat/api/make_listener_posix.go new file mode 100644 index 00000000..672af5cc --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/api/make_listener_posix.go @@ -0,0 +1,83 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//+build !windows + +package api + +import ( + "fmt" + "net" + "os" + + "github.com/pkg/errors" + + "github.com/elastic/beats/libbeat/api/npipe" +) + +func makeListener(cfg Config) (net.Listener, error) { + if len(cfg.User) > 0 { + return nil, errors.New("specifying a user is not supported under this platform") + } + + if len(cfg.SecurityDescriptor) > 0 { + return nil, errors.New("security_descriptor option for the HTTP endpoint only work on Windows") + } + + if npipe.IsNPipe(cfg.Host) { + return nil, fmt.Errorf( + "cannot use %s as the host, named pipes are only supported on Windows", + cfg.Host, + ) + } + + network, path, err := parse(cfg.Host, cfg.Port) + if err != nil { + return nil, err + } + + if network == "unix" { + if _, err := os.Stat(path); !os.IsNotExist(err) { + if err := os.Remove(path); err != nil { + return nil, errors.Wrapf( + err, + "cannot remove existing unix socket file at location %s", + path, + ) + } + } + } + + l, err := net.Listen(network, path) + if err != nil { + return nil, err + } + + // Ensure file mode + if network == "unix" { + if err := os.Chmod(path, socketFileMode); err != nil { + return nil, errors.Wrapf( + err, + "could not set mode %d for unix socket file at location %s", + socketFileMode, + path, + ) + } + } + + return l, nil +} diff --git a/vendor/github.com/elastic/beats/libbeat/api/make_listener_windows.go b/vendor/github.com/elastic/beats/libbeat/api/make_listener_windows.go new file mode 100644 index 00000000..ef549f46 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/api/make_listener_windows.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//+build windows + +package api + +import ( + "fmt" + "net" + + "github.com/pkg/errors" + + "github.com/elastic/beats/libbeat/api/npipe" +) + +func makeListener(cfg Config) (net.Listener, error) { + if len(cfg.User) > 0 && len(cfg.SecurityDescriptor) > 0 { + return nil, errors.New("user and security_descriptor are mutually exclusive, define only one of them") + } + + if npipe.IsNPipe(cfg.Host) { + pipe := npipe.TransformString(cfg.Host) + var sd string + var err error + if len(cfg.SecurityDescriptor) == 0 { + sd, err = npipe.DefaultSD(cfg.User) + if err != nil { + return nil, errors.Wrap(err, "cannot generate security descriptor for the named pipe") + } + } else { + sd = cfg.SecurityDescriptor + } + return npipe.NewListener(pipe, sd) + } + + network, path, err := parse(cfg.Host, cfg.Port) + if err != nil { + return nil, err + } + + if network == "unix" { + return nil, fmt.Errorf( + "cannot use %s as the host, unix sockets are not supported on Windows, use npipe instead", + cfg.Host, + ) + } + + return net.Listen(network, path) +} diff --git a/vendor/github.com/elastic/beats/libbeat/api/npipe/listener_windows.go b/vendor/github.com/elastic/beats/libbeat/api/npipe/listener_windows.go new file mode 100644 index 00000000..c3c9db7a --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/api/npipe/listener_windows.go @@ -0,0 +1,95 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build windows + +package npipe + +import ( + "context" + "net" + "os/user" + "strings" + + winio "github.com/Microsoft/go-winio" + "github.com/pkg/errors" +) + +// NewListener creates a new Listener receiving events over a named pipe. +func NewListener(name, sd string) (net.Listener, error) { + c := &winio.PipeConfig{ + SecurityDescriptor: sd, + } + + l, err := winio.ListenPipe(name, c) + if err != nil { + return nil, errors.Wrapf(err, "failed to listen on the named pipe %s", name) + } + + return l, nil +} + +// TransformString takes an input type name defined as a URI like `npipe:///hello` and transform it into +// `\\.\pipe\hello` +func TransformString(name string) string { + if strings.HasPrefix(name, "npipe:///") { + path := strings.TrimPrefix(name, "npipe:///") + return `\\.\pipe\` + path + } + + if strings.HasPrefix(name, `\\.\pipe\`) { + return name + } + + return name +} + +// DialContext create a Dial to be use with an http.Client to connect to a pipe. +func DialContext(npipe string) func(context.Context, string, string) (net.Conn, error) { + return func(ctx context.Context, _, _ string) (net.Conn, error) { + return winio.DialPipeContext(ctx, npipe) + } +} + +// DefaultSD returns a default SecurityDescriptor which is the minimal required permissions to be +// able to write to the named pipe. The security descriptor is returned in SDDL format. +// +// Docs: https://docs.microsoft.com/en-us/windows/win32/secauthz/security-descriptor-string-format +func DefaultSD(forUser string) (string, error) { + var u *user.User + var err error + // No user configured we fallback to the current running user. + if len(forUser) == 0 { + u, err = user.Current() + if err != nil { + return "", errors.Wrap(err, "failed to retrieve the current user") + } + } else { + u, err = user.Lookup(forUser) + if err != nil { + return "", errors.Wrapf(err, "failed to retrieve the user %s", forUser) + } + } + + // Named pipe security and access rights. + // We create the pipe and the specific users should only be able to write to it. + // See docs: https://docs.microsoft.com/en-us/windows/win32/ipc/named-pipe-security-and-access-rights + // String definition: https://docs.microsoft.com/en-us/windows/win32/secauthz/ace-strings + // Give generic read/write access to the specified user. + descriptor := "D:P(A;;GA;;;" + u.Uid + ")" + return descriptor, nil +} diff --git a/vendor/github.com/elastic/beats/libbeat/api/npipe/listener_windows_test.go b/vendor/github.com/elastic/beats/libbeat/api/npipe/listener_windows_test.go new file mode 100644 index 00000000..486bb832 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/api/npipe/listener_windows_test.go @@ -0,0 +1,73 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build windows + +package npipe + +import ( + "fmt" + "io/ioutil" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestHTTPOverNamedPipe(t *testing.T) { + sd, err := DefaultSD("") + require.NoError(t, err) + npipe := TransformString("npipe:///hello-world") + l, err := NewListener(npipe, sd) + require.NoError(t, err) + defer l.Close() + + mux := http.NewServeMux() + mux.HandleFunc("/echo-hello", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "ehlo!") + }) + + go http.Serve(l, mux) + + c := http.Client{ + Transport: &http.Transport{ + DialContext: DialContext(npipe), + }, + } + + r, err := c.Get("http://npipe/echo-hello") + require.NoError(t, err) + body, err := ioutil.ReadAll(r.Body) + defer r.Body.Close() + + assert.Equal(t, "ehlo!", string(body)) +} + +func TestTransformString(t *testing.T) { + t.Run("with npipe:// scheme", func(t *testing.T) { + assert.Equal(t, `\\.\pipe\hello`, TransformString("npipe:///hello")) + }) + + t.Run("with windows pipe syntax", func(t *testing.T) { + assert.Equal(t, `\\.\pipe\hello`, TransformString(`\\.\pipe\hello`)) + }) + + t.Run("everything else", func(t *testing.T) { + assert.Equal(t, "hello", TransformString("hello")) + }) +} diff --git a/vendor/github.com/elastic/beats/libbeat/api/npipe/listerner_posix.go b/vendor/github.com/elastic/beats/libbeat/api/npipe/listerner_posix.go new file mode 100644 index 00000000..c8239fd9 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/api/npipe/listerner_posix.go @@ -0,0 +1,33 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//+build !windows + +package npipe + +import ( + "context" + "errors" + "net" +) + +// DialContext create a Dial to be use with an http.Client to connect to a pipe. +func DialContext(npipe string) func(context.Context, string, string) (net.Conn, error) { + return func(ctx context.Context, _, _ string) (net.Conn, error) { + return nil, errors.New("named pipe doesn't work on linux") + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/api/npipe/pipe.go b/vendor/github.com/elastic/beats/libbeat/api/npipe/pipe.go new file mode 100644 index 00000000..d7cbc44e --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/api/npipe/pipe.go @@ -0,0 +1,25 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package npipe + +import "strings" + +// IsNPipe returns true if the string has a npipe scheme. +func IsNPipe(s string) bool { + return strings.HasPrefix(s, "npipe:///") || strings.HasPrefix(s, `\\.\pipe\`) +} diff --git a/vendor/github.com/elastic/beats/libbeat/api/npipe/pipe_test.go b/vendor/github.com/elastic/beats/libbeat/api/npipe/pipe_test.go new file mode 100644 index 00000000..f0c3465e --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/api/npipe/pipe_test.go @@ -0,0 +1,35 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package npipe + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIsNPipe(t *testing.T) { + t.Run("return true on named pipe", func(t *testing.T) { + assert.True(t, IsNPipe("npipe:///hello")) + assert.True(t, IsNPipe(`\\.\pipe\hello`)) + }) + + t.Run("return false if its not a named pipe", func(t *testing.T) { + assert.False(t, IsNPipe("unix:///tmp/ok.sock")) + }) +} diff --git a/vendor/github.com/elastic/beats/libbeat/api/routes.go b/vendor/github.com/elastic/beats/libbeat/api/routes.go new file mode 100644 index 00000000..e78dfa16 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/api/routes.go @@ -0,0 +1,75 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package api + +import ( + "fmt" + "net/http" + "net/url" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/libbeat/monitoring" +) + +type handlerFunc func(http.ResponseWriter, *http.Request) +type lookupFunc func(string) *monitoring.Namespace + +// NewWithDefaultRoutes creates a new server with default API routes. +func NewWithDefaultRoutes(log *logp.Logger, config *common.Config, ns lookupFunc) (*Server, error) { + mux := http.NewServeMux() + + mux.HandleFunc("/", makeRootAPIHandler(makeAPIHandler(ns("info")))) + mux.HandleFunc("/state", makeAPIHandler(ns("state"))) + mux.HandleFunc("/stats", makeAPIHandler(ns("stats"))) + mux.HandleFunc("/dataset", makeAPIHandler(ns("dataset"))) + return New(log, mux, config) +} + +func makeRootAPIHandler(handler handlerFunc) handlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/" { + http.NotFound(w, r) + return + } + handler(w, r) + } +} + +func makeAPIHandler(ns *monitoring.Namespace) handlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + + data := monitoring.CollectStructSnapshot( + ns.GetRegistry(), + monitoring.Full, + false, + ) + + prettyPrint(w, data, r.URL) + } +} + +func prettyPrint(w http.ResponseWriter, data common.MapStr, u *url.URL) { + query := u.Query() + if _, ok := query["pretty"]; ok { + fmt.Fprintf(w, data.StringToPrint()) + } else { + fmt.Fprintf(w, data.String()) + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/api/server.go b/vendor/github.com/elastic/beats/libbeat/api/server.go index 3280cceb..44db0bae 100644 --- a/vendor/github.com/elastic/beats/libbeat/api/server.go +++ b/vendor/github.com/elastic/beats/libbeat/api/server.go @@ -19,86 +19,77 @@ package api import ( "fmt" + "net" "net/http" "net/url" "strconv" "github.com/elastic/beats/libbeat/common" - "github.com/elastic/beats/libbeat/common/cfgwarn" "github.com/elastic/beats/libbeat/logp" - "github.com/elastic/beats/libbeat/monitoring" ) -// Start starts the metrics api endpoint on the configured host and port -func Start(cfg *common.Config) { - cfgwarn.Experimental("Metrics endpoint is enabled.") - config := DefaultConfig - cfg.Unpack(&config) - - logp.Info("Starting stats endpoint") - go func() { - mux := http.NewServeMux() - - // register handlers - mux.HandleFunc("/", rootHandler()) - mux.HandleFunc("/state", stateHandler) - mux.HandleFunc("/stats", statsHandler) - mux.HandleFunc("/dataset", datasetHandler) - - url := config.Host + ":" + strconv.Itoa(config.Port) - logp.Info("Metrics endpoint listening on: %s", url) - endpoint := http.ListenAndServe(url, mux) - logp.Info("finished starting stats endpoint: %v", endpoint) - }() +// Server takes cares of correctly starting the HTTP component of the API +// and will answers all the routes defined in the received ServeMux. +type Server struct { + log *logp.Logger + mux *http.ServeMux + l net.Listener + config Config } -func rootHandler() func(http.ResponseWriter, *http.Request) { - return func(w http.ResponseWriter, r *http.Request) { - // Return error page - if r.URL.Path != "/" { - http.NotFound(w, r) - return - } +// New creates a new API Server. +func New(log *logp.Logger, mux *http.ServeMux, config *common.Config) (*Server, error) { + if log == nil { + log = logp.NewLogger("") + } - w.Header().Set("Content-Type", "application/json; charset=utf-8") + cfg := DefaultConfig + err := config.Unpack(&cfg) + if err != nil { + return nil, err + } - data := monitoring.CollectStructSnapshot(monitoring.GetNamespace("info").GetRegistry(), monitoring.Full, false) + l, err := makeListener(cfg) + if err != nil { + return nil, err + } - print(w, data, r.URL) - } -} - -// stateHandler reports state metrics -func stateHandler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - - data := monitoring.CollectStructSnapshot(monitoring.GetNamespace("state").GetRegistry(), monitoring.Full, false) - - print(w, data, r.URL) -} - -// statsHandler report expvar and all libbeat/monitoring metrics -func statsHandler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - - data := monitoring.CollectStructSnapshot(monitoring.GetNamespace("stats").GetRegistry(), monitoring.Full, false) - - print(w, data, r.URL) -} - -func datasetHandler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - - data := monitoring.CollectStructSnapshot(monitoring.GetNamespace("dataset").GetRegistry(), monitoring.Full, false) - - print(w, data, r.URL) -} - -func print(w http.ResponseWriter, data common.MapStr, u *url.URL) { - query := u.Query() - if _, ok := query["pretty"]; ok { - fmt.Fprintf(w, data.StringToPrint()) - } else { - fmt.Fprintf(w, data.String()) + return &Server{mux: mux, l: l, config: cfg, log: log.Named("api")}, nil +} + +// Start starts the HTTP server and accepting new connection. +func (s *Server) Start() { + s.log.Info("Starting stats endpoint") + go func(l net.Listener) { + s.log.Infof("Metrics endpoint listening on: %s (configured: %s)", l.Addr().String(), s.config.Host) + http.Serve(l, s.mux) + s.log.Infof("Finished starting stats endpoint: %s", l.Addr().String()) + }(s.l) +} + +// Stop stops the API server and free any resource associated with the process like unix sockets. +func (s *Server) Stop() error { + return s.l.Close() +} + +func parse(host string, port int) (string, string, error) { + url, err := url.Parse(host) + if err != nil { + return "", "", err + } + + // When you don't explicitely define the Scheme we fallback on tcp + host. + if len(url.Host) == 0 && len(url.Scheme) == 0 { + addr := host + ":" + strconv.Itoa(port) + return "tcp", addr, nil + } + + switch url.Scheme { + case "http": + return "tcp", url.Host, nil + case "unix": + return url.Scheme, url.Path, nil + default: + return "", "", fmt.Errorf("unknown scheme %s for host string %s", url.Scheme, host) } } diff --git a/vendor/github.com/elastic/beats/libbeat/api/server_test.go b/vendor/github.com/elastic/beats/libbeat/api/server_test.go new file mode 100644 index 00000000..c021113c --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/api/server_test.go @@ -0,0 +1,186 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package api + +import ( + "context" + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "runtime" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/libbeat/common" +) + +func TestConfiguration(t *testing.T) { + if runtime.GOOS != "windows" { + t.Skip("Check for User and Security Descriptor") + return + } + t.Run("when user is set", func(t *testing.T) { + cfg := common.MustNewConfigFrom(map[string]interface{}{ + "host": "unix:///tmp/ok", + "user": "admin", + }) + + _, err := New(nil, simpleMux(), cfg) + assert.Equal(t, err == nil, false) + }) + + t.Run("when security descriptor is set", func(t *testing.T) { + cfg := common.MustNewConfigFrom(map[string]interface{}{ + "host": "unix:///tmp/ok", + "security_descriptor": "D:P(A;;GA;;;1234)", + }) + + _, err := New(nil, simpleMux(), cfg) + assert.Equal(t, err == nil, false) + }) +} + +func TestSocket(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("Unix Sockets don't work under windows") + return + } + + client := func(sockFile string) http.Client { + return http.Client{ + Transport: &http.Transport{ + DialContext: func(_ context.Context, _, _ string) (net.Conn, error) { + return net.Dial("unix", sockFile) + }, + }, + } + } + + t.Run("socket doesn't exist before", func(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "testsocket") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + sockFile := tmpDir + "/test.sock" + + cfg := common.MustNewConfigFrom(map[string]interface{}{ + "host": "unix://" + sockFile, + }) + + s, err := New(nil, simpleMux(), cfg) + require.NoError(t, err) + go s.Start() + defer func() { + s.Stop() + // Make we cleanup behind + _, err := os.Stat(sockFile) + require.Error(t, err) + require.False(t, os.IsExist(err)) + }() + + c := client(sockFile) + + r, err := c.Get("http://unix/echo-hello") + require.NoError(t, err) + defer r.Body.Close() + + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + + assert.Equal(t, "ehlo!", string(body)) + fi, err := os.Stat(sockFile) + assert.Equal(t, socketFileMode, fi.Mode().Perm()) + }) + + t.Run("starting beat and recover a dangling socket file", func(t *testing.T) { + tmpDir, err := ioutil.TempDir("", "testsocket") + require.NoError(t, err) + defer os.RemoveAll(tmpDir) + + sockFile := tmpDir + "/test.sock" + + // Create the socket before the server. + f, err := os.Create(sockFile) + require.NoError(t, err) + f.Close() + + cfg := common.MustNewConfigFrom(map[string]interface{}{ + "host": "unix://" + sockFile, + }) + + s, err := New(nil, simpleMux(), cfg) + require.NoError(t, err) + go s.Start() + defer func() { + s.Stop() + // Make we cleanup behind + _, err := os.Stat(sockFile) + require.Error(t, err) + require.False(t, os.IsExist(err)) + }() + + c := client(sockFile) + + r, err := c.Get("http://unix/echo-hello") + require.NoError(t, err) + defer r.Body.Close() + + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + + assert.Equal(t, "ehlo!", string(body)) + + fi, err := os.Stat(sockFile) + assert.Equal(t, socketFileMode, fi.Mode().Perm(), "incorrect mode for file %s", sockFile) + }) +} + +func TestHTTP(t *testing.T) { + // select a random free port. + url := "http://localhost:0" + + cfg := common.MustNewConfigFrom(map[string]interface{}{ + "host": url, + }) + + s, err := New(nil, simpleMux(), cfg) + require.NoError(t, err) + go s.Start() + defer s.Stop() + + r, err := http.Get("http://" + s.l.Addr().String() + "/echo-hello") + require.NoError(t, err) + defer r.Body.Close() + + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + + assert.Equal(t, "ehlo!", string(body)) +} + +func simpleMux() *http.ServeMux { + mux := http.NewServeMux() + mux.HandleFunc("/echo-hello", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, "ehlo!") + }) + return mux +} diff --git a/vendor/github.com/elastic/beats/libbeat/api/server_windows_test.go b/vendor/github.com/elastic/beats/libbeat/api/server_windows_test.go new file mode 100644 index 00000000..0931da86 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/api/server_windows_test.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//+build windows + +package api + +import ( + "io/ioutil" + "net/http" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/beats/libbeat/api/npipe" + "github.com/elastic/beats/libbeat/common" +) + +func TestNamedPipe(t *testing.T) { + p := "npipe:///hello" + + cfg := common.MustNewConfigFrom(map[string]interface{}{ + "host": p, + }) + + s, err := New(nil, simpleMux(), cfg) + require.NoError(t, err) + go s.Start() + defer s.Stop() + + c := http.Client{ + Transport: &http.Transport{ + DialContext: npipe.DialContext(npipe.TransformString(p)), + }, + } + + r, err := c.Get("http://npipe/echo-hello") + require.NoError(t, err) + defer r.Body.Close() + + body, err := ioutil.ReadAll(r.Body) + require.NoError(t, err) + + assert.Equal(t, "ehlo!", string(body)) +} diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/autodiscover.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/autodiscover.go index 8fdef5dc..86a1c209 100644 --- a/vendor/github.com/elastic/beats/libbeat/autodiscover/autodiscover.go +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/autodiscover.go @@ -41,7 +41,6 @@ const ( // Adapter must be implemented by the beat in order to provide Autodiscover type Adapter interface { - // CreateConfig generates a valid list of configs from the given event, the received event will have all keys defined by `StartFilter` CreateConfig(bus.Event) ([]*common.Config, error) @@ -202,7 +201,7 @@ func (a *Autodiscover) handleStart(event bus.Event) bool { err = a.adapter.CheckConfig(config) if err != nil { - a.logger.Error(errors.Wrap(err, fmt.Sprintf("Auto discover config check failed for config %v, won't start runner", config))) + a.logger.Error(errors.Wrap(err, fmt.Sprintf("Auto discover config check failed for config '%s', won't start runner", common.DebugString(config, true)))) continue } diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/builder/helper.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/builder/helper.go index d9c1235f..e9c00abb 100644 --- a/vendor/github.com/elastic/beats/libbeat/autodiscover/builder/helper.go +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/builder/helper.go @@ -43,7 +43,13 @@ func GetContainerName(container common.MapStr) string { // GetHintString takes a hint and returns its value as a string func GetHintString(hints common.MapStr, key, config string) string { - if iface, err := hints.GetValue(fmt.Sprintf("%s.%s", key, config)); err == nil { + base := config + if base == "" { + base = key + } else if key != "" { + base = fmt.Sprint(key, ".", config) + } + if iface, err := hints.GetValue(base); err == nil { if str, ok := iface.(string); ok { return str } @@ -54,7 +60,13 @@ func GetHintString(hints common.MapStr, key, config string) string { // GetHintMapStr takes a hint and returns a MapStr func GetHintMapStr(hints common.MapStr, key, config string) common.MapStr { - if iface, err := hints.GetValue(fmt.Sprintf("%s.%s", key, config)); err == nil { + base := config + if base == "" { + base = key + } else if key != "" { + base = fmt.Sprint(key, ".", config) + } + if iface, err := hints.GetValue(base); err == nil { if mapstr, ok := iface.(common.MapStr); ok { return mapstr } diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/builder/helper_test.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/builder/helper_test.go index 1bee5836..5d36f814 100644 --- a/vendor/github.com/elastic/beats/libbeat/autodiscover/builder/helper_test.go +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/builder/helper_test.go @@ -63,6 +63,34 @@ func TestGenerateHints(t *testing.T) { }, }, // Scenarios being tested: + // logs/multiline.pattern must be a nested common.MapStr under hints.logs + // metrics/module must be found in hints.metrics + // not.to.include must not be part of hints + // metrics/metrics_path must be found in hints.metrics + { + annotations: map[string]string{ + "co.elastic.logs/multiline.pattern": "^test", + "co.elastic.metrics/module": "prometheus", + "co.elastic.metrics/period": "10s", + "co.elastic.metrics/metrics_path": "/metrics/prometheus", + "co.elastic.metrics.foobar/period": "15s", + "co.elastic.metrics.foobar1/period": "15s", + "not.to.include": "true", + }, + result: common.MapStr{ + "logs": common.MapStr{ + "multiline": common.MapStr{ + "pattern": "^test", + }, + }, + "metrics": common.MapStr{ + "module": "prometheus", + "period": "15s", + "metrics_path": "/metrics/prometheus", + }, + }, + }, + // Scenarios being tested: // have co.elastic.logs/disable set to false. // logs/multiline.pattern must be a nested common.MapStr under hints.logs // metrics/module must be found in hints.metrics diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/config.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/config.go index 6e103b20..cba67993 100644 --- a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/config.go +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/config.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +// +build linux darwin windows + package docker import ( diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/docker.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/docker.go index 4e9b8aec..e0febbdf 100644 --- a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/docker.go +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/docker.go @@ -15,13 +15,16 @@ // specific language governing permissions and limitations // under the License. +// +build linux darwin windows + package docker import ( - "errors" + "fmt" "time" "github.com/gofrs/uuid" + "github.com/pkg/errors" "github.com/elastic/beats/libbeat/autodiscover" "github.com/elastic/beats/libbeat/autodiscover/builder" @@ -58,37 +61,45 @@ type Provider struct { // AutodiscoverBuilder builds and returns an autodiscover provider func AutodiscoverBuilder(bus bus.Bus, uuid uuid.UUID, c *common.Config) (autodiscover.Provider, error) { cfgwarn.Beta("The docker autodiscover is beta") + + errWrap := func(err error) error { + return errors.Wrap(err, "error setting up docker autodiscover provider") + } + config := defaultConfig() err := c.Unpack(&config) if err != nil { - return nil, err + return nil, errWrap(err) } watcher, err := docker.NewWatcher(config.Host, config.TLS, false) if err != nil { - return nil, err + return nil, errWrap(err) } mapper, err := template.NewConfigMapper(config.Templates) if err != nil { - return nil, err + return nil, errWrap(err) + } + if len(mapper) == 0 && !config.Hints.Enabled() { + return nil, errWrap(fmt.Errorf("no configs or hints defined for autodiscover provider")) } builders, err := autodiscover.NewBuilders(config.Builders, config.Hints) if err != nil { - return nil, err + return nil, errWrap(err) } appenders, err := autodiscover.NewAppenders(config.Appenders) if err != nil { - return nil, err + return nil, errWrap(err) } start := watcher.ListenStart() stop := watcher.ListenStop() if err := watcher.Start(); err != nil { - return nil, err + return nil, errWrap(err) } return &Provider{ diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/docker_integration_test.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/docker_integration_test.go index ccba884d..fd5c4f6f 100644 --- a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/docker_integration_test.go +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/docker/docker_integration_test.go @@ -23,6 +23,8 @@ import ( "testing" "time" + "github.com/elastic/beats/libbeat/autodiscover/template" + "github.com/gofrs/uuid" "github.com/stretchr/testify/assert" @@ -45,6 +47,9 @@ func TestDockerStart(t *testing.T) { bus := bus.New("test") config := defaultConfig() config.CleanupTimeout = 0 + + s := &template.MapperSettings{nil, nil} + config.Templates = *s provider, err := AutodiscoverBuilder(bus, UUID, common.MustNewConfigFrom(config)) if err != nil { t.Fatal(err) diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/jolokia/jolokia.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/jolokia/jolokia.go index 728040d3..f938e4a3 100644 --- a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/jolokia/jolokia.go +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/jolokia/jolokia.go @@ -18,7 +18,10 @@ package jolokia import ( + "fmt" + "github.com/gofrs/uuid" + "github.com/pkg/errors" "github.com/elastic/beats/libbeat/autodiscover" "github.com/elastic/beats/libbeat/autodiscover/template" @@ -50,10 +53,14 @@ type Provider struct { // AutodiscoverBuilder builds a Jolokia Discovery autodiscover provider, it fails if // there is some problem with the configuration func AutodiscoverBuilder(bus bus.Bus, uuid uuid.UUID, c *common.Config) (autodiscover.Provider, error) { + errWrap := func(err error) error { + return errors.Wrap(err, "error setting up jolokia autodiscover provider") + } + config := defaultConfig() err := c.Unpack(&config) if err != nil { - return nil, err + return nil, errWrap(err) } discovery := &Discovery{ @@ -63,17 +70,20 @@ func AutodiscoverBuilder(bus bus.Bus, uuid uuid.UUID, c *common.Config) (autodis mapper, err := template.NewConfigMapper(config.Templates) if err != nil { - return nil, err + return nil, errWrap(err) + } + if len(mapper) == 0 { + return nil, errWrap(fmt.Errorf("no configs defined for autodiscover provider")) } builders, err := autodiscover.NewBuilders(config.Builders, nil) if err != nil { - return nil, err + return nil, errWrap(err) } appenders, err := autodiscover.NewAppenders(config.Appenders) if err != nil { - return nil, err + return nil, errWrap(err) } return &Provider{ diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes/config.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes/config.go index 486ff3bc..d7acea09 100644 --- a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes/config.go +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes/config.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +// +build linux darwin windows + package kubernetes import ( diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes/kubernetes.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes/kubernetes.go index 98c729c3..4ee993eb 100644 --- a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes/kubernetes.go +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes/kubernetes.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +// +build linux darwin windows + package kubernetes import ( @@ -22,6 +24,7 @@ import ( "time" "github.com/gofrs/uuid" + "github.com/pkg/errors" "github.com/elastic/beats/libbeat/autodiscover" "github.com/elastic/beats/libbeat/autodiscover/builder" @@ -54,46 +57,57 @@ type Provider struct { // AutodiscoverBuilder builds and returns an autodiscover provider func AutodiscoverBuilder(bus bus.Bus, uuid uuid.UUID, c *common.Config) (autodiscover.Provider, error) { cfgwarn.Beta("The kubernetes autodiscover is beta") + logger := logp.NewLogger("autodiscover") + + errWrap := func(err error) error { + return errors.Wrap(err, "error setting up kubernetes autodiscover provider") + } + config := defaultConfig() err := c.Unpack(&config) if err != nil { - return nil, err + return nil, errWrap(err) } client, err := kubernetes.GetKubernetesClient(config.KubeConfig) if err != nil { - return nil, err + return nil, errWrap(err) } metagen, err := kubernetes.NewMetaGenerator(c) if err != nil { - return nil, err + return nil, errWrap(err) } config.Host = kubernetes.DiscoverKubernetesNode(config.Host, kubernetes.IsInCluster(config.KubeConfig), client) + logger.Debugf("Initializing a new Kubernetes watcher using host: %v", config.Host) + watcher, err := kubernetes.NewWatcher(client, &kubernetes.Pod{}, kubernetes.WatchOptions{ SyncTimeout: config.SyncPeriod, Node: config.Host, Namespace: config.Namespace, }) if err != nil { - return nil, fmt.Errorf("kubernetes: Couldn't create watcher for %T due to error %+v", &kubernetes.Pod{}, err) + return nil, errWrap(fmt.Errorf("couldn't create watcher for %T due to error %+v", &kubernetes.Pod{}, err)) } mapper, err := template.NewConfigMapper(config.Templates) if err != nil { - return nil, err + return nil, errWrap(err) + } + if len(mapper) == 0 && !config.Hints.Enabled() { + return nil, errWrap(fmt.Errorf("no configs or hints defined for autodiscover provider")) } builders, err := autodiscover.NewBuilders(config.Builders, config.Hints) if err != nil { - return nil, err + return nil, errWrap(err) } appenders, err := autodiscover.NewAppenders(config.Appenders) if err != nil { - return nil, err + return nil, errWrap(err) } p := &Provider{ @@ -105,28 +119,53 @@ func AutodiscoverBuilder(bus bus.Bus, uuid uuid.UUID, c *common.Config) (autodis appenders: appenders, metagen: metagen, watcher: watcher, - logger: logp.NewLogger("kubernetes"), + logger: logger, } watcher.AddEventHandler(kubernetes.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - p.logger.Debugf("Watcher Pod add: %+v", obj) - p.emit(obj.(*kubernetes.Pod), "start") - }, - UpdateFunc: func(obj interface{}) { - p.logger.Debugf("Watcher Pod update: %+v", obj) - p.emit(obj.(*kubernetes.Pod), "stop") - p.emit(obj.(*kubernetes.Pod), "start") - }, - DeleteFunc: func(obj interface{}) { - p.logger.Debugf("Watcher Pod delete: %+v", obj) - time.AfterFunc(config.CleanupTimeout, func() { p.emit(obj.(*kubernetes.Pod), "stop") }) - }, + AddFunc: p.handleAdd, + UpdateFunc: p.handleUpdate, + DeleteFunc: p.handleDelete, }) return p, nil } +// handleAdd emits a start event for the given pod +func (p *Provider) handleAdd(obj interface{}) { + p.logger.Debugf("Watcher Pod add: %+v", obj) + p.emit(obj.(*kubernetes.Pod), "start") +} + +// handleUpdate emits events for a given pod depending on the state of the pod, +// if it is terminating, a stop event is scheduled, if not, a stop and a start +// events are sent sequentially to recreate the resources assotiated to the pod. +func (p *Provider) handleUpdate(obj interface{}) { + pod := obj.(*kubernetes.Pod) + if pod.GetObjectMeta().GetDeletionTimestamp() != nil { + p.logger.Debugf("Watcher Pod update (terminating): %+v", obj) + // Pod is terminating, don't reload its configuration and ignore the event + // if some pod is still running, we will receive more events when containers + // terminate. + for _, container := range pod.Status.ContainerStatuses { + if container.State.Running != nil { + return + } + } + time.AfterFunc(p.config.CleanupTimeout, func() { p.emit(pod, "stop") }) + } else { + p.logger.Debugf("Watcher Pod update: %+v", obj) + p.emit(pod, "stop") + p.emit(pod, "start") + } +} + +// handleDelete emits a stop event for the given pod +func (p *Provider) handleDelete(obj interface{}) { + p.logger.Debugf("Watcher Pod delete: %+v", obj) + time.AfterFunc(p.config.CleanupTimeout, func() { p.emit(obj.(*kubernetes.Pod), "stop") }) +} + // Start for Runner interface. func (p *Provider) Start() { if err := p.watcher.Start(); err != nil { @@ -159,9 +198,14 @@ func (p *Provider) emitEvents(pod *kubernetes.Pod, flag string, containers []kub containerIDs := map[string]string{} runtimes := map[string]string{} for _, c := range containerstatuses { - cid, runtime := kubernetes.ContainerIDWithRuntime(c) - containerIDs[c.Name] = cid - runtimes[c.Name] = runtime + // If the container is not being stopped then add the container only if it is in running state. + // This makes sure that we dont keep tailing init container logs after they have stopped. + // Emit the event in case that the pod is being stopped. + if flag == "stop" || c.State.Running != nil { + cid, runtime := kubernetes.ContainerIDWithRuntime(c) + containerIDs[c.Name] = cid + runtimes[c.Name] = runtime + } } // Emit container and port information diff --git a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes/kubernetes_test.go b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes/kubernetes_test.go index f2fffb17..796c6d83 100644 --- a/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes/kubernetes_test.go +++ b/vendor/github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes/kubernetes_test.go @@ -192,6 +192,9 @@ func TestEmitEvent(t *testing.T) { { Name: name, ContainerID: containerID, + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{}, + }, }, }, }, diff --git a/vendor/github.com/elastic/beats/libbeat/beat/pipeline.go b/vendor/github.com/elastic/beats/libbeat/beat/pipeline.go index 5e1fe15e..f07d2f0a 100644 --- a/vendor/github.com/elastic/beats/libbeat/beat/pipeline.go +++ b/vendor/github.com/elastic/beats/libbeat/beat/pipeline.go @@ -107,6 +107,9 @@ type ProcessingConfig struct { // the pipeline processors. Processor ProcessorList + // KeepNull determines whether published events will keep null values or omit them. + KeepNull bool + // Private contains additional information to be passed to the processing // pipeline builder. Private interface{} diff --git a/vendor/github.com/elastic/beats/libbeat/cmd/instance/beat.go b/vendor/github.com/elastic/beats/libbeat/cmd/instance/beat.go index fec526d1..1131bc56 100644 --- a/vendor/github.com/elastic/beats/libbeat/cmd/instance/beat.go +++ b/vendor/github.com/elastic/beats/libbeat/cmd/instance/beat.go @@ -147,6 +147,11 @@ func initRand() { // instance. // XXX Move this as a *Beat method? func Run(settings Settings, bt beat.Creator) error { + err := setUmaskWithSettings(settings) + if err != nil && err != errNotImplemented { + return errw.Wrap(err, "could not set umask") + } + name := settings.Name idxPrefix := settings.IndexPrefix version := settings.Version @@ -376,6 +381,18 @@ func (b *Beat) launch(settings Settings, bt beat.Creator) error { svc.BeforeRun() defer svc.Cleanup() + // Start the API Server before the Seccomp lock down, we do this so we can create the unix socket + // set the appropriate permission on the unix domain file without having to whitelist anything + // that would be set at runtime. + if b.Config.HTTP.Enabled() { + s, err := api.NewWithDefaultRoutes(logp.NewLogger(""), b.Config.HTTP, monitoring.GetNamespace) + if err != nil { + return errw.Wrap(err, "could not start the HTTP server for the API") + } + s.Start() + defer s.Stop() + } + if err = seccomp.LoadFilter(b.Config.Seccomp); err != nil { return err } @@ -385,22 +402,12 @@ func (b *Beat) launch(settings Settings, bt beat.Creator) error { return err } - monitoringCfg, reporterSettings, err := monitoring.SelectConfig(b.Config.MonitoringBeatConfig) + r, err := b.setupMonitoring(settings) if err != nil { return err } - - if monitoringCfg.Enabled() { - settings := report.Settings{ - DefaultUsername: settings.Monitoring.DefaultUsername, - Format: reporterSettings.Format, - ClusterUUID: reporterSettings.ClusterUUID, - } - reporter, err := report.New(b.Info, settings, monitoringCfg, b.Config.Output) - if err != nil { - return err - } - defer reporter.Stop() + if r != nil { + defer r.Stop() } if b.Config.MetricLogging == nil || b.Config.MetricLogging.Enabled() { @@ -421,10 +428,6 @@ func (b *Beat) launch(settings Settings, bt beat.Creator) error { logp.Info("%s start running.", b.Info.Beat) - if b.Config.HTTP.Enabled() { - api.Start(b.Config.HTTP) - } - // Launch config manager b.ConfigManager.Start() defer b.ConfigManager.Stop() @@ -866,6 +869,41 @@ func (b *Beat) clusterUUIDFetchingCallback() (elasticsearch.ConnectCallback, err return callback, nil } +func (b *Beat) setupMonitoring(settings Settings) (report.Reporter, error) { + monitoringCfg, reporterSettings, err := monitoring.SelectConfig(b.Config.MonitoringBeatConfig) + if err != nil { + return nil, err + } + + monitoringClusterUUID, err := monitoring.GetClusterUUID(monitoringCfg) + if err != nil { + return nil, err + } + + // Expose monitoring.cluster_uuid in state API + if monitoringClusterUUID != "" { + stateRegistry := monitoring.GetNamespace("state").GetRegistry() + monitoringRegistry := stateRegistry.NewRegistry("monitoring") + clusterUUIDRegVar := monitoring.NewString(monitoringRegistry, "cluster_uuid") + clusterUUIDRegVar.Set(monitoringClusterUUID) + } + + if monitoring.IsEnabled(monitoringCfg) { + settings := report.Settings{ + DefaultUsername: settings.Monitoring.DefaultUsername, + Format: reporterSettings.Format, + ClusterUUID: monitoringClusterUUID, + } + reporter, err := report.New(b.Info, settings, monitoringCfg, b.Config.Output) + if err != nil { + return nil, err + } + return reporter, nil + } + + return nil, nil +} + // handleError handles the given error by logging it and then returning the // error. If the err is nil or is a GracefulExit error then the method will // return nil without logging anything. @@ -1021,3 +1059,10 @@ func initPaths(cfg *common.Config) error { } return nil } + +func setUmaskWithSettings(settings Settings) error { + if settings.Umask != nil { + return setUmask(*settings.Umask) + } + return setUmask(0027) // 0640 for files | 0750 for dirs +} diff --git a/vendor/github.com/elastic/beats/libbeat/cmd/instance/imports.go b/vendor/github.com/elastic/beats/libbeat/cmd/instance/imports_common.go similarity index 86% rename from vendor/github.com/elastic/beats/libbeat/cmd/instance/imports.go rename to vendor/github.com/elastic/beats/libbeat/cmd/instance/imports_common.go index 240eca87..6b78ffc7 100644 --- a/vendor/github.com/elastic/beats/libbeat/cmd/instance/imports.go +++ b/vendor/github.com/elastic/beats/libbeat/cmd/instance/imports_common.go @@ -19,15 +19,11 @@ package instance import ( _ "github.com/elastic/beats/libbeat/autodiscover/appenders/config" // Register autodiscover appenders - _ "github.com/elastic/beats/libbeat/autodiscover/providers/docker" // Register autodiscover providers _ "github.com/elastic/beats/libbeat/autodiscover/providers/jolokia" - _ "github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes" _ "github.com/elastic/beats/libbeat/monitoring/report/elasticsearch" // Register default monitoring reporting _ "github.com/elastic/beats/libbeat/processors/actions" // Register default processors. _ "github.com/elastic/beats/libbeat/processors/add_cloud_metadata" - _ "github.com/elastic/beats/libbeat/processors/add_docker_metadata" _ "github.com/elastic/beats/libbeat/processors/add_host_metadata" - _ "github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata" _ "github.com/elastic/beats/libbeat/processors/add_locale" _ "github.com/elastic/beats/libbeat/processors/add_observer_metadata" _ "github.com/elastic/beats/libbeat/processors/add_process_metadata" diff --git a/vendor/github.com/elastic/beats/libbeat/cmd/instance/imports_docker.go b/vendor/github.com/elastic/beats/libbeat/cmd/instance/imports_docker.go new file mode 100644 index 00000000..faca620d --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/cmd/instance/imports_docker.go @@ -0,0 +1,27 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build linux darwin windows + +package instance + +import ( + _ "github.com/elastic/beats/libbeat/autodiscover/providers/docker" // Register autodiscover providers + _ "github.com/elastic/beats/libbeat/autodiscover/providers/kubernetes" + _ "github.com/elastic/beats/libbeat/processors/add_docker_metadata" + _ "github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata" +) diff --git a/vendor/github.com/elastic/beats/libbeat/cmd/instance/settings.go b/vendor/github.com/elastic/beats/libbeat/cmd/instance/settings.go index 7ada3870..13880955 100644 --- a/vendor/github.com/elastic/beats/libbeat/cmd/instance/settings.go +++ b/vendor/github.com/elastic/beats/libbeat/cmd/instance/settings.go @@ -43,4 +43,6 @@ type Settings struct { ILM ilm.SupportFactory Processing processing.SupportFactory + + Umask *int } diff --git a/vendor/github.com/elastic/beats/libbeat/cmd/instance/umask_other.go b/vendor/github.com/elastic/beats/libbeat/cmd/instance/umask_other.go new file mode 100644 index 00000000..e3af909f --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/cmd/instance/umask_other.go @@ -0,0 +1,32 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build !windows + +package instance + +import ( + "errors" + "syscall" +) + +var errNotImplemented = errors.New("not implemented on platform") + +func setUmask(newmask int) error { + syscall.Umask(newmask) + return nil // the umask syscall always succeeds: http://man7.org/linux/man-pages/man2/umask.2.html#RETURN_VALUE +} diff --git a/vendor/github.com/elastic/beats/libbeat/cmd/instance/umask_windows.go b/vendor/github.com/elastic/beats/libbeat/cmd/instance/umask_windows.go new file mode 100644 index 00000000..e5288630 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/cmd/instance/umask_windows.go @@ -0,0 +1,27 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package instance + +import "errors" + +var errNotImplemented = errors.New("not implemented on windows") + +func setUmask(newmask int) error { + // No way to set umask on Windows + return errNotImplemented +} diff --git a/vendor/github.com/elastic/beats/libbeat/common/config.go b/vendor/github.com/elastic/beats/libbeat/common/config.go index b1d6eadd..7277d364 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/config.go +++ b/vendor/github.com/elastic/beats/libbeat/common/config.go @@ -265,7 +265,7 @@ func (c *Config) PrintDebugf(msg string, params ...interface{}) { } } - debugStr := configDebugString(c, filtered) + debugStr := DebugString(c, filtered) if debugStr != "" { configDebugf(selector, "%s\n%s", fmt.Sprintf(msg, params...), debugStr) } @@ -358,7 +358,9 @@ func (ns *ConfigNamespace) IsSet() bool { return ns.config != nil } -func configDebugString(c *Config, filterPrivate bool) string { +// DebugString prints a human readable representation of the underlying config using +// JSON formatting. +func DebugString(c *Config, filterPrivate bool) string { var bufs []string if c.IsDict() { diff --git a/vendor/github.com/elastic/beats/libbeat/common/docker/client.go b/vendor/github.com/elastic/beats/libbeat/common/docker/client.go index ab223686..a86edcfc 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/docker/client.go +++ b/vendor/github.com/elastic/beats/libbeat/common/docker/client.go @@ -15,52 +15,39 @@ // specific language governing permissions and limitations // under the License. +// +build linux darwin windows + package docker import ( "net/http" "os" - "github.com/docker/docker/api" - "github.com/docker/docker/api/types/versions" "github.com/docker/docker/client" - "golang.org/x/net/context" "github.com/elastic/beats/libbeat/logp" ) -// NewClient builds and returns a new Docker client -// It uses version 1.30 by default, and negotiates it with the server so it is downgraded if 1.30 is too high +// NewClient builds and returns a new Docker client. On the first request the +// client will negotiate the API version with the server unless +// DOCKER_API_VERSION is set in the environment. func NewClient(host string, httpClient *http.Client, httpHeaders map[string]string) (*client.Client, error) { + log := logp.NewLogger("docker") + + opts := []client.Opt{ + client.WithHost(host), + client.WithHTTPClient(httpClient), + client.WithHTTPHeaders(httpHeaders), + } + version := os.Getenv("DOCKER_API_VERSION") - if version == "" { - version = api.DefaultVersion + if version != "" { + log.Debugf("Docker client will use API version %v as set by the DOCKER_API_VERSION environment variable.", version) + opts = append(opts, client.WithVersion(version)) + } else { + log.Debug("Docker client will negotiate the API version on the first request.") + opts = append(opts, client.WithAPIVersionNegotiation()) } - c, err := client.NewClient(host, version, httpClient, nil) - if err != nil { - return c, err - } - - if os.Getenv("DOCKER_API_VERSION") == "" { - logp.Debug("docker", "Negotiating client version") - ping, err := c.Ping(context.Background()) - if err != nil { - logp.Debug("docker", "Failed to perform ping: %s", err) - } - - // try a really old version, before versioning headers existed - if ping.APIVersion == "" { - ping.APIVersion = "1.22" - } - - // if server version is lower than the client version, downgrade - if versions.LessThan(ping.APIVersion, version) { - c.UpdateClientVersion(ping.APIVersion) - } - } - - logp.Debug("docker", "Client version set to %s", c.ClientVersion()) - - return c, nil + return client.NewClientWithOpts(opts...) } diff --git a/vendor/github.com/elastic/beats/libbeat/common/docker/client_test.go b/vendor/github.com/elastic/beats/libbeat/common/docker/client_test.go index a9a93a0f..14268e78 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/docker/client_test.go +++ b/vendor/github.com/elastic/beats/libbeat/common/docker/client_test.go @@ -16,6 +16,7 @@ // under the License. // +build integration +// +build linux darwin windows package docker diff --git a/vendor/github.com/elastic/beats/libbeat/common/docker/watcher.go b/vendor/github.com/elastic/beats/libbeat/common/docker/watcher.go index fb862957..ef01f263 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/docker/watcher.go +++ b/vendor/github.com/elastic/beats/libbeat/common/docker/watcher.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +// +build linux darwin windows + package docker import ( @@ -132,6 +134,12 @@ func NewWatcher(host string, tls *TLSConfig, storeShortID bool) (Watcher, error) return nil, err } + // Extra check to confirm that Docker is available + _, err = client.Info(context.Background()) + if err != nil { + return nil, err + } + return NewWatcherWithClient(client, 60*time.Second, storeShortID) } @@ -300,13 +308,18 @@ func (w *watcher) watch() { case err := <-errors: // Restart watch call - logp.Err("Error watching for docker events: %v", err) + if err == context.DeadlineExceeded { + logp.Info("Context deadline exceeded for docker request, restarting watch call") + } else { + logp.Err("Error watching for docker events: %v", err) + } + time.Sleep(1 * time.Second) break WATCH case <-tickChan.C: if time.Since(w.lastWatchReceivedEventTime) > dockerEventsWatchPityTimerTimeout { - logp.Info("No events received withing %s, restarting watch call", dockerEventsWatchPityTimerTimeout) + logp.Info("No events received within %s, restarting watch call", dockerEventsWatchPityTimerTimeout) time.Sleep(1 * time.Second) break WATCH } diff --git a/vendor/github.com/elastic/beats/libbeat/common/docker/watcher_test.go b/vendor/github.com/elastic/beats/libbeat/common/docker/watcher_test.go index 98f13b2d..faed9444 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/docker/watcher_test.go +++ b/vendor/github.com/elastic/beats/libbeat/common/docker/watcher_test.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +// +build linux darwin windows + package docker import ( diff --git a/vendor/github.com/elastic/beats/libbeat/common/event.go b/vendor/github.com/elastic/beats/libbeat/common/event.go index c2565c4c..d30136a6 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/event.go +++ b/vendor/github.com/elastic/beats/libbeat/common/event.go @@ -39,13 +39,30 @@ var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() type Float float64 -// ConvertToGenericEvent normalizes the types contained in the given MapStr. +// EventConverter is used to convert MapStr objects for publishing +type EventConverter interface { + Convert(m MapStr) MapStr +} + +// GenericEventConverter is used to normalize MapStr objects for publishing +type GenericEventConverter struct { + keepNull bool +} + +// NewGenericEventConverter creates an EventConverter with the given configuration options +func NewGenericEventConverter(keepNull bool) *GenericEventConverter { + return &GenericEventConverter{ + keepNull: keepNull, + } +} + +// Convert normalizes the types contained in the given MapStr. // // Nil values in maps are dropped during the conversion. Any unsupported types // that are found in the MapStr are dropped and warnings are logged. -func ConvertToGenericEvent(m MapStr) MapStr { +func (e *GenericEventConverter) Convert(m MapStr) MapStr { keys := make([]string, 0, 10) - event, errs := normalizeMap(m, keys...) + event, errs := e.normalizeMap(m, keys...) if len(errs) > 0 { logp.Warn("Unsuccessful conversion to generic event: %v errors: %v, "+ "event=%#v", len(errs), errs, m) @@ -56,18 +73,18 @@ func ConvertToGenericEvent(m MapStr) MapStr { // normalizeMap normalizes each element contained in the given map. If an error // occurs during normalization, processing of m will continue, and all errors // are returned at the end. -func normalizeMap(m MapStr, keys ...string) (MapStr, []error) { +func (e *GenericEventConverter) normalizeMap(m MapStr, keys ...string) (MapStr, []error) { var errs []error out := make(MapStr, len(m)) for key, value := range m { - v, err := normalizeValue(value, append(keys, key)...) + v, err := e.normalizeValue(value, append(keys, key)...) if len(err) > 0 { errs = append(errs, err...) } // Drop nil values from maps. - if v == nil { + if !e.keepNull && v == nil { if logp.IsDebug(eventDebugSelector) { eventDebugf("Dropped nil value from event where key=%v", joinKeys(append(keys, key)...)) } @@ -81,12 +98,12 @@ func normalizeMap(m MapStr, keys ...string) (MapStr, []error) { } // normalizeMapStrSlice normalizes each individual MapStr. -func normalizeMapStrSlice(maps []MapStr, keys ...string) ([]MapStr, []error) { +func (e *GenericEventConverter) normalizeMapStrSlice(maps []MapStr, keys ...string) ([]MapStr, []error) { var errs []error out := make([]MapStr, 0, len(maps)) for i, m := range maps { - normalizedMap, err := normalizeMap(m, append(keys, strconv.Itoa(i))...) + normalizedMap, err := e.normalizeMap(m, append(keys, strconv.Itoa(i))...) if len(err) > 0 { errs = append(errs, err...) } @@ -98,12 +115,12 @@ func normalizeMapStrSlice(maps []MapStr, keys ...string) ([]MapStr, []error) { // normalizeMapStringSlice normalizes each individual map[string]interface{} and // returns a []MapStr. -func normalizeMapStringSlice(maps []map[string]interface{}, keys ...string) ([]MapStr, []error) { +func (e *GenericEventConverter) normalizeMapStringSlice(maps []map[string]interface{}, keys ...string) ([]MapStr, []error) { var errs []error out := make([]MapStr, 0, len(maps)) for i, m := range maps { - normalizedMap, err := normalizeMap(m, append(keys, strconv.Itoa(i))...) + normalizedMap, err := e.normalizeMap(m, append(keys, strconv.Itoa(i))...) if len(err) > 0 { errs = append(errs, err...) } @@ -114,13 +131,13 @@ func normalizeMapStringSlice(maps []map[string]interface{}, keys ...string) ([]M } // normalizeSlice normalizes each element of the slice and returns a []interface{}. -func normalizeSlice(v reflect.Value, keys ...string) (interface{}, []error) { +func (e *GenericEventConverter) normalizeSlice(v reflect.Value, keys ...string) (interface{}, []error) { var errs []error var sliceValues []interface{} n := v.Len() for i := 0; i < n; i++ { - sliceValue, err := normalizeValue(v.Index(i).Interface(), append(keys, strconv.Itoa(i))...) + sliceValue, err := e.normalizeValue(v.Index(i).Interface(), append(keys, strconv.Itoa(i))...) if len(err) > 0 { errs = append(errs, err...) } @@ -131,7 +148,7 @@ func normalizeSlice(v reflect.Value, keys ...string) (interface{}, []error) { return sliceValues, errs } -func normalizeValue(value interface{}, keys ...string) (interface{}, []error) { +func (e *GenericEventConverter) normalizeValue(value interface{}, keys ...string) (interface{}, []error) { if value == nil { return nil, nil } @@ -202,20 +219,20 @@ func normalizeValue(value interface{}, keys ...string) (interface{}, []error) { case []complex64, []complex128: case Time, []Time: case MapStr: - return normalizeMap(value.(MapStr), keys...) + return e.normalizeMap(value.(MapStr), keys...) case []MapStr: - return normalizeMapStrSlice(value.([]MapStr), keys...) + return e.normalizeMapStrSlice(value.([]MapStr), keys...) case map[string]interface{}: - return normalizeMap(value.(map[string]interface{}), keys...) + return e.normalizeMap(value.(map[string]interface{}), keys...) case []map[string]interface{}: - return normalizeMapStringSlice(value.([]map[string]interface{}), keys...) + return e.normalizeMapStringSlice(value.([]map[string]interface{}), keys...) default: v := reflect.ValueOf(value) switch v.Type().Kind() { case reflect.Ptr: // Dereference pointers. - return normalizeValue(followPointer(value), keys...) + return e.normalizeValue(followPointer(value), keys...) case reflect.Bool: return v.Bool(), nil case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: @@ -229,7 +246,7 @@ func normalizeValue(value interface{}, keys ...string) (interface{}, []error) { case reflect.String: return v.String(), nil case reflect.Array, reflect.Slice: - return normalizeSlice(v, keys...) + return e.normalizeSlice(v, keys...) case reflect.Map, reflect.Struct: var m MapStr err := marshalUnmarshal(value, &m) diff --git a/vendor/github.com/elastic/beats/libbeat/common/event_test.go b/vendor/github.com/elastic/beats/libbeat/common/event_test.go index 54f42b8c..1bf622f3 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/event_test.go +++ b/vendor/github.com/elastic/beats/libbeat/common/event_test.go @@ -133,8 +133,9 @@ func TestConvertNestedMapStr(t *testing.T) { }, } + g := NewGenericEventConverter(false) for i, test := range tests { - assert.Equal(t, test.Output, ConvertToGenericEvent(test.Input), "Test case %d", i) + assert.Equal(t, test.Output, g.Convert(test.Input), "Test case %d", i) } } @@ -190,8 +191,54 @@ func TestConvertNestedStruct(t *testing.T) { }, } + g := NewGenericEventConverter(false) for i, test := range tests { - assert.EqualValues(t, test.Output, ConvertToGenericEvent(test.Input), "Test case %v", i) + assert.EqualValues(t, test.Output, g.Convert(test.Input), "Test case %v", i) + } +} + +func TestConvertWithNullEmission(t *testing.T) { + logp.TestingSetup() + + type io struct { + Input MapStr + Output MapStr + } + + type String string + type TestStruct struct { + A interface{} + } + + tests := []io{ + { + Input: MapStr{ + "key": MapStr{ + "key1": nil, + }, + }, + Output: MapStr{ + "key": MapStr{ + "key1": nil, + }, + }, + }, + { + Input: MapStr{ + "key": TestStruct{ + A: nil, + }, + }, + Output: MapStr{ + "key": MapStr{ + "A": nil, + }, + }, + }} + + g := NewGenericEventConverter(true) + for i, test := range tests { + assert.EqualValues(t, test.Output, g.Convert(test.Input), "Test case %v", i) } } @@ -201,10 +248,11 @@ func TestNormalizeValue(t *testing.T) { type testCase struct{ in, out interface{} } runTests := func(check func(t *testing.T, a, b interface{}), tests map[string]testCase) { + g := NewGenericEventConverter(false) for name, test := range tests { test := test t.Run(name, func(t *testing.T) { - out, err := normalizeValue(test.in) + out, err := g.normalizeValue(test.in) if err != nil { t.Error(err) return @@ -291,8 +339,9 @@ func TestNormalizeMapError(t *testing.T) { {"uintptr": uintptr(123)}, } + g := NewGenericEventConverter(false) for i, in := range badInputs { - _, errs := normalizeMap(in, "bad.type") + _, errs := g.normalizeMap(in, "bad.type") if assert.Len(t, errs, 1) { t.Log(errs[0]) assert.Contains(t, errs[0].Error(), "key=bad.type", "Test case %v", i) @@ -370,7 +419,8 @@ func TestNormalizeTime(t *testing.T) { } now := time.Now().In(ny) - v, errs := normalizeValue(now, "@timestamp") + g := NewGenericEventConverter(false) + v, errs := g.normalizeValue(now, "@timestamp") if len(errs) > 0 { t.Fatal(errs) } @@ -386,45 +436,51 @@ func TestNormalizeTime(t *testing.T) { // Uses TextMarshaler interface. func BenchmarkConvertToGenericEventNetString(b *testing.B) { + g := NewGenericEventConverter(false) for i := 0; i < b.N; i++ { - ConvertToGenericEvent(MapStr{"key": NetString("hola")}) + g.Convert(MapStr{"key": NetString("hola")}) } } // Uses reflection. func BenchmarkConvertToGenericEventMapStringString(b *testing.B) { + g := NewGenericEventConverter(false) for i := 0; i < b.N; i++ { - ConvertToGenericEvent(MapStr{"key": map[string]string{"greeting": "hola"}}) + g.Convert(MapStr{"key": map[string]string{"greeting": "hola"}}) } } // Uses recursion to step into the nested MapStr. func BenchmarkConvertToGenericEventMapStr(b *testing.B) { + g := NewGenericEventConverter(false) for i := 0; i < b.N; i++ { - ConvertToGenericEvent(MapStr{"key": map[string]interface{}{"greeting": "hola"}}) + g.Convert(MapStr{"key": map[string]interface{}{"greeting": "hola"}}) } } // No reflection required. func BenchmarkConvertToGenericEventStringSlice(b *testing.B) { + g := NewGenericEventConverter(false) for i := 0; i < b.N; i++ { - ConvertToGenericEvent(MapStr{"key": []string{"foo", "bar"}}) + g.Convert(MapStr{"key": []string{"foo", "bar"}}) } } // Uses reflection to convert the string array. func BenchmarkConvertToGenericEventCustomStringSlice(b *testing.B) { + g := NewGenericEventConverter(false) type myString string for i := 0; i < b.N; i++ { - ConvertToGenericEvent(MapStr{"key": []myString{"foo", "bar"}}) + g.Convert(MapStr{"key": []myString{"foo", "bar"}}) } } // Pointers require reflection to generically dereference. func BenchmarkConvertToGenericEventStringPointer(b *testing.B) { + g := NewGenericEventConverter(false) val := "foo" for i := 0; i < b.N; i++ { - ConvertToGenericEvent(MapStr{"key": &val}) + g.Convert(MapStr{"key": &val}) } } func TestDeDotJSON(t *testing.T) { diff --git a/vendor/github.com/elastic/beats/libbeat/common/fmtstr/formattimestamp.go b/vendor/github.com/elastic/beats/libbeat/common/fmtstr/formattimestamp.go new file mode 100644 index 00000000..68c22d63 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/common/fmtstr/formattimestamp.go @@ -0,0 +1,80 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package fmtstr + +import ( + "time" + + "github.com/elastic/beats/libbeat/beat" + "github.com/elastic/beats/libbeat/common" +) + +// TimestampFormatString is a wrapper around EventFormatString for the +// common special case where the format expression should only have access to +// shared static fields (typically agent / version) and the event timestamp. +type TimestampFormatString struct { + eventFormatString *EventFormatString + fields common.MapStr +} + +// NewTimestampFormatString creates from the given event format string a +// TimestampFormatString that includes only the given static fields and +// a timestamp. +func NewTimestampFormatString( + eventFormatString *EventFormatString, staticFields common.MapStr, +) (*TimestampFormatString, error) { + return &TimestampFormatString{ + eventFormatString: eventFormatString, + fields: staticFields.Clone(), + }, nil +} + +// FieldsForBeat returns a common.MapStr with the given beat name and +// version assigned to their standard field names. +func FieldsForBeat(beat string, version string) common.MapStr { + return common.MapStr{ + // beat object was left in for backward compatibility reason for older configs. + "beat": common.MapStr{ + "name": beat, + "version": version, + }, + "agent": common.MapStr{ + "name": beat, + "version": version, + }, + // For the Beats that have an observer role + "observer": common.MapStr{ + "name": beat, + "version": version, + }, + } +} + +// Run executes the format string returning a new expanded string or an error +// if execution or event field expansion fails. +func (fs *TimestampFormatString) Run(timestamp time.Time) (string, error) { + placeholderEvent := &beat.Event{ + Fields: fs.fields, + Timestamp: timestamp, + } + return fs.eventFormatString.Run(placeholderEvent) +} + +func (fs *TimestampFormatString) String() string { + return fs.eventFormatString.expression +} diff --git a/vendor/github.com/elastic/beats/libbeat/common/fmtstr/formattimestamp_test.go b/vendor/github.com/elastic/beats/libbeat/common/fmtstr/formattimestamp_test.go new file mode 100644 index 00000000..d194f597 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/common/fmtstr/formattimestamp_test.go @@ -0,0 +1,108 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package fmtstr + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/beats/libbeat/common" +) + +func TestTimestampFormatString(t *testing.T) { + tests := []struct { + title string + format string + staticFields common.MapStr + timestamp time.Time + expected string + }{ + { + "empty string", + "", + nil, + time.Time{}, + "", + }, + { + "no fields configured", + "format string", + nil, + time.Time{}, + "format string", + }, + { + "expand field", + "%{[key]}", + common.MapStr{"key": "value"}, + time.Time{}, + "value", + }, + { + "expand with default", + "%{[key]:default}", + nil, + time.Time{}, + "default", + }, + { + "expand nested field", + "%{[nested.key]}", + common.MapStr{"nested": common.MapStr{"key": "value"}}, + time.Time{}, + "value", + }, + { + "test timestamp formatter", + "%{[key]}: %{+YYYY.MM.dd}", + common.MapStr{"key": "timestamp"}, + time.Date(2015, 5, 1, 20, 12, 34, 0, time.Local), + "timestamp: 2015.05.01", + }, + { + "test timestamp formatter", + "%{[@timestamp]}: %{+YYYY.MM.dd}", + common.MapStr{"key": "timestamp"}, + time.Date(2015, 5, 1, 20, 12, 34, 0, time.Local), + "2015-05-01T20:12:34.000Z: 2015.05.01", + }, + } + + for i, test := range tests { + t.Logf("test(%v): %v", i, test.title) + + efs, err := CompileEvent(test.format) + if err != nil { + t.Error(err) + continue + } + + fs, err := NewTimestampFormatString(efs, test.staticFields) + if err != nil { + t.Error(err) + continue + } + + actual, err := fs.Run(test.timestamp) + + assert.NoError(t, err) + assert.Equal(t, test.expected, actual) + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/common/kubernetes/metadata.go b/vendor/github.com/elastic/beats/libbeat/common/kubernetes/metadata.go index 4e128153..34be4480 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/kubernetes/metadata.go +++ b/vendor/github.com/elastic/beats/libbeat/common/kubernetes/metadata.go @@ -110,7 +110,7 @@ func (g *metaGenerator) ResourceMetadata(obj Resource) common.MapStr { // Add controller metadata if present if g.IncludeCreatorMetadata { for _, ref := range accessor.GetOwnerReferences() { - if *ref.Controller { + if ref.Controller != nil && *ref.Controller { switch ref.Kind { // TODO grow this list as we keep adding more `state_*` metricsets case "Deployment", diff --git a/vendor/github.com/elastic/beats/libbeat/common/kubernetes/metadata_test.go b/vendor/github.com/elastic/beats/libbeat/common/kubernetes/metadata_test.go index df2780b3..241f366b 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/kubernetes/metadata_test.go +++ b/vendor/github.com/elastic/beats/libbeat/common/kubernetes/metadata_test.go @@ -21,7 +21,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -37,10 +37,12 @@ func TestPodMetadata(t *testing.T) { True := true False := false tests := []struct { + name string pod *Pod meta common.MapStr }{ { + name: "standalone Pod", pod: &Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a.key": "foo", "a": "bar"}, @@ -62,6 +64,7 @@ func TestPodMetadata(t *testing.T) { }, }, { + name: "Deployment + Replicaset owned Pod", pod: &Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a.key": "foo", "a": "bar"}, @@ -94,6 +97,7 @@ func TestPodMetadata(t *testing.T) { }, }, { + name: "StatefulSet + Deployment owned Pod", pod: &Pod{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"a.key": "foo", "a": "bar"}, @@ -131,6 +135,29 @@ func TestPodMetadata(t *testing.T) { "statefulset": common.MapStr{"name": "StatefulSet"}, }, }, + { + name: "empty owner reference Pod", + pod: &Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"a.key": "foo", "a": "bar"}, + UID: types.UID(UID), + OwnerReferences: []metav1.OwnerReference{{}}, + Namespace: test, + }, + Spec: v1.PodSpec{ + NodeName: test, + }, + }, + meta: common.MapStr{ + "pod": common.MapStr{ + "name": "", + "uid": "005f3b90-4b9d-12f8-acf0-31020a840133", + }, + "node": common.MapStr{"name": "test"}, + "namespace": "test", + "labels": common.MapStr{"a": common.MapStr{"value": "bar", "key": "foo"}}, + }, + }, } for _, test := range tests { @@ -142,9 +169,9 @@ func TestPodMetadata(t *testing.T) { metaGen, err := NewMetaGenerator(config) if err != nil { - t.Fatal(err) + t.Fatalf("case %q failed: %s", test.name, err.Error()) } - assert.Equal(t, metaGen.PodMetadata(test.pod), test.meta) + assert.Equal(t, metaGen.PodMetadata(test.pod), test.meta, "test failed for case %q", test.name) } } diff --git a/vendor/github.com/elastic/beats/libbeat/common/kubernetes/util.go b/vendor/github.com/elastic/beats/libbeat/common/kubernetes/util.go index 30385e4c..7d6f7113 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/kubernetes/util.go +++ b/vendor/github.com/elastic/beats/libbeat/common/kubernetes/util.go @@ -32,10 +32,22 @@ import ( const defaultNode = "localhost" +func getKubeConfigEnvironmentVariable() string { + envKubeConfig := os.Getenv("KUBECONFIG") + if _, err := os.Stat(envKubeConfig); !os.IsNotExist(err) { + return envKubeConfig + } + return "" +} + // GetKubernetesClient returns a kubernetes client. If inCluster is true, it returns an // in cluster configuration based on the secrets mounted in the Pod. If kubeConfig is passed, // it parses the config file to get the config required to build a client. func GetKubernetesClient(kubeconfig string) (kubernetes.Interface, error) { + if kubeconfig == "" { + kubeconfig = getKubeConfigEnvironmentVariable() + } + cfg, err := clientcmd.BuildConfigFromFlags("", kubeconfig) if err != nil { return nil, fmt.Errorf("unable to build kube config due to error: %+v", err) @@ -49,13 +61,13 @@ func GetKubernetesClient(kubeconfig string) (kubernetes.Interface, error) { return client, nil } -// IsInCluster takes a kubeconfig file path as input and deduces if Beats is running in cluster or not. +// IsInCluster takes a kubeconfig file path as input and deduces if Beats is running in cluster or not, +// taking into consideration the existence of KUBECONFIG variable func IsInCluster(kubeconfig string) bool { - if kubeconfig == "" { - return true + if kubeconfig != "" || getKubeConfigEnvironmentVariable() != "" { + return false } - - return false + return true } // DiscoverKubernetesNode figures out the Kubernetes node to use. diff --git a/vendor/github.com/elastic/beats/libbeat/common/kubernetes/watcher.go b/vendor/github.com/elastic/beats/libbeat/common/kubernetes/watcher.go index 15123267..9c278e8e 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/kubernetes/watcher.go +++ b/vendor/github.com/elastic/beats/libbeat/common/kubernetes/watcher.go @@ -162,7 +162,7 @@ func NewWatcher(client kubernetes.Interface, resource Resource, opts WatchOption objType = "replicaset" case *StatefulSet: - ss := client.AppsV1().ReplicaSets(opts.Namespace) + ss := client.AppsV1().StatefulSets(opts.Namespace) listwatch = &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { return ss.List(options) diff --git a/vendor/github.com/elastic/beats/libbeat/common/seccomp/policy_linux_386.go b/vendor/github.com/elastic/beats/libbeat/common/seccomp/policy_linux_386.go index 3f082489..76b24714 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/seccomp/policy_linux_386.go +++ b/vendor/github.com/elastic/beats/libbeat/common/seccomp/policy_linux_386.go @@ -31,6 +31,7 @@ func init() { "_llseek", "access", "brk", + "chmod", "clock_gettime", "clone", "close", diff --git a/vendor/github.com/elastic/beats/libbeat/common/seccomp/policy_linux_amd64.go b/vendor/github.com/elastic/beats/libbeat/common/seccomp/policy_linux_amd64.go index a131e7f3..92b5fbe4 100644 --- a/vendor/github.com/elastic/beats/libbeat/common/seccomp/policy_linux_amd64.go +++ b/vendor/github.com/elastic/beats/libbeat/common/seccomp/policy_linux_amd64.go @@ -34,6 +34,7 @@ func init() { "arch_prctl", "bind", "brk", + "chmod", "clock_gettime", "clone", "close", diff --git a/vendor/github.com/elastic/beats/libbeat/docs/command-reference.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/command-reference.asciidoc index 954272d2..30af1d7f 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/command-reference.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/command-reference.asciidoc @@ -265,7 +265,7 @@ ifndef::no_dashboards[] ["source","sh",subs="attributes"] ----- {beatname_lc} export config -{beatname_lc} export template --es.version {stack-version} --index myindexname +{beatname_lc} export template --es.version {version} --index myindexname {beatname_lc} export dashboard --id="a7b35890-8baa-11e8-9676-ef67484126fb" > dashboard.json ----- endif::no_dashboards[] @@ -274,7 +274,7 @@ ifdef::no_dashboards[] ["source","sh",subs="attributes"] ----- {beatname_lc} export config -{beatname_lc} export template --es.version {stack-version} --index myindexname +{beatname_lc} export template --es.version {version} --index myindexname ----- endif::no_dashboards[] endif::serverless[] @@ -283,7 +283,7 @@ ifdef::serverless[] ["source","sh",subs="attributes"] ----- {beatname_lc} export config -{beatname_lc} export template --es.version {stack-version} --index myindexname +{beatname_lc} export template --es.version {version} --index myindexname {beatname_lc} export function cloudwatch ----- endif::serverless[] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/communitybeats.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/communitybeats.asciidoc index 3134dd20..4e028cc5 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/communitybeats.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/communitybeats.asciidoc @@ -50,6 +50,7 @@ https://github.com/cloudronics/fileoccurancebeat[fileoccurencebeat]:: Checks for https://github.com/FStelzer/flowbeat[flowbeat]:: Collects, parses, and indexes http://www.sflow.org/index.php[sflow] samples. https://github.com/GeneralElectric/GABeat[gabeat]:: Collects data from Google Analytics Realtime API. https://github.com/GoogleCloudPlatform/gcsbeat[gcsbeat]:: Reads data from https://cloud.google.com/storage/[Google Cloud Storage] buckets. +https://github.com/threatstack/gelfbeat[gelfbeat]:: Collects and parses GELF-encoded UDP messages. https://github.com/josephlewis42/githubbeat[githubbeat]:: Easily monitors GitHub repository activity. https://github.com/hpcugent/gpfsbeat[gpfsbeat]:: Collects GPFS metric and quota information. https://github.com/ullaakut/hackerbeat[hackerbeat]:: Indexes the top stories of HackerNews into an ElasticSearch instance. @@ -80,6 +81,7 @@ https://github.com/mrkschan/nginxbeat[nginxbeat]:: Reads status from Nginx. https://github.com/2Fast2BCn/nginxupstreambeat[nginxupstreambeat]:: Reads upstream status from nginx upstream module. https://github.com/mschneider82/nsqbeat[nsqbeat]:: Reads data from a NSQ topic. https://github.com/eBay/nvidiagpubeat[nvidiagpubeat]:: Uses nvidia-smi to grab metrics of NVIDIA GPUs. +https://github.com/counteractive/o365beat[o365beat]:: Ships Office 365 logs from the O365 Management Activities API https://github.com/aristanetworks/openconfigbeat[openconfigbeat]:: Streams data from http://openconfig.net[OpenConfig]-enabled network devices https://github.com/radoondas/owmbeat[owmbeat]:: Open Weather Map beat to pull weather data from all around the world and store and visualize them in Elastic Stack https://github.com/joehillen/packagebeat[packagebeat]:: Collects information about system packages from package diff --git a/vendor/github.com/elastic/beats/libbeat/docs/dashboards.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/dashboards.asciidoc index 11c38167..13b1f25a 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/dashboards.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/dashboards.asciidoc @@ -10,10 +10,10 @@ ////////////////////////////////////////////////////////////////////////// ifdef::has_solutions[] -TIP: For deeper observability into your infrastructure, use the -{infra-guide}/infrastructure-ui-overview.html[Infrastructure] and -{infra-guide}/logs-ui-overview.html[Logs] UIs in {kib}. For setup details, see -the {infra-guide}/index.html[Infrastructure Monitoring Guide]. +TIP: For deeper observability into your infrastructure, you can use the +Metrics app and the Logs app in {kib}. +For more details, see the {metrics-guide}[Metrics Monitoring Guide] +and the {logs-guide}[Logs Monitoring Guide]. endif::has_solutions[] {beatname_uc} comes packaged with example Kibana dashboards, visualizations, @@ -25,7 +25,7 @@ command (as described here) or +{beatname_lc}.yml+ config file. This requires a Kibana endpoint configuration. If you didn't already configure -a Kibana endpoint, see <<{beatname_lc}-configuration,configure {beatname_uc}>>. +a Kibana endpoint, see <<{beatname_lc}-configuration,configure {beatname_uc}>>. Make sure Kibana is running before you perform this step. If you are accessing a secured Kibana instance, make sure you've configured credentials as described in @@ -142,7 +142,7 @@ ifdef::mac_os[] -E output.elasticsearch.hosts=['localhost:9200'] \ -E output.elasticsearch.username={beat_default_index_prefix}_internal \ -E output.elasticsearch.password={pwd} \ - -E setup.kibana.host=localhost:5601 + -E setup.kibana.host=localhost:5601 ---- *brew:* @@ -154,7 +154,7 @@ ifdef::mac_os[] -E output.elasticsearch.hosts=['localhost:9200'] \ -E output.elasticsearch.username={beat_default_index_prefix}_internal \ -E output.elasticsearch.password={pwd} \ - -E setup.kibana.host=localhost:5601 + -E setup.kibana.host=localhost:5601 ---- endif::mac_os[] @@ -168,7 +168,7 @@ ifdef::linux_os[] -E output.elasticsearch.hosts=['localhost:9200'] \ -E output.elasticsearch.username={beat_default_index_prefix}_internal \ -E output.elasticsearch.password={pwd} \ - -E setup.kibana.host=localhost:5601 + -E setup.kibana.host=localhost:5601 ---- endif::linux_os[] @@ -204,7 +204,7 @@ PS > .{backslash}{beatname_lc}.exe setup -e ` -E output.elasticsearch.hosts=['localhost:9200'] ` -E output.elasticsearch.username={beat_default_index_prefix}_internal ` -E output.elasticsearch.password={pwd} ` - -E setup.kibana.host=localhost:5601 + -E setup.kibana.host=localhost:5601 ---- endif::win_os[] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/gettingstarted.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/gettingstarted.asciidoc index 372aecd1..556103a2 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/gettingstarted.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/gettingstarted.asciidoc @@ -15,7 +15,7 @@ https://www.elastic.co/cloud/elasticsearch-service/signup[Try out the {ess} for free]. ============== -After installing the {stack}, see the {beats} getting started guides: +After installing the {stack}, see the {beats} getting started guides: * {auditbeat-ref}/auditbeat-getting-started.html[Auditbeat] * {filebeat-ref}/filebeat-getting-started.html[Filebeat] @@ -26,7 +26,6 @@ After installing the {stack}, see the {beats} getting started guides: * {packetbeat-ref}/packetbeat-getting-started.html[Packetbeat] * {winlogbeat-ref}/winlogbeat-getting-started.html[Winlogbeat] -If you're planning to use the -{infra-guide}/infrastructure-ui-overview.html[Infrastructure] and -{infra-guide}/logs-ui-overview.html[Logs] UIs in {kib}, also see the -{infra-guide}/index.html[Infrastructure Monitoring Guide]. +If you're planning to use the Metrics app or the Logs app in {kib}, +also see the {metrics-guide}[Metrics Monitoring Guide] +and the {logs-guide}[Logs Monitoring Guide]. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/http-endpoint.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/http-endpoint.asciidoc index 54e09440..e60a0abb 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/http-endpoint.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/http-endpoint.asciidoc @@ -21,12 +21,23 @@ by default, as you may want to avoid exposing this info. The HTTP endpoint has the following configuration settings: `http.enabled`:: (Optional) Enable the HTTP endpoint. Default is `false`. -`http.host`:: (Optional) Bind to this hostname or IP address. +`http.host`:: (Optional) Bind to this hostname, IP address, unix socket (unix:///var/run/{beatname_lc}.sock) or Windows named pipe (npipe:///{beatname_lc}). It is recommended to use only localhost. Default is `localhost` `http.port`:: (Optional) Port on which the HTTP endpoint will bind. Default is `5066`. +`http.named_pipe.user`:: (Optional) User to use to create the named pipe, only work on Windows, Default to the +current user. +`http.named_pipe.security_descriptor`:: (Optional) Windows Security descriptor string defined in the SDDL format. Default to +read and write permission for the current user. This is the list of paths you can access. For pretty JSON output append ?pretty to the URL. +You can query a unix socket using the `CURL` command and the `--unix-socket` flag. + +[source,js] +---- +curl -XGET --unix-socket '/var/run/{beatname_lc}.sock' 'http://unix/stats/?pretty' +---- + [float] === Info diff --git a/vendor/github.com/elastic/beats/libbeat/docs/index.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/index.asciidoc index 18f2ba2b..5d1309e4 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/index.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/index.asciidoc @@ -1,9 +1,11 @@ = Beats Platform Reference -:libbeat-dir: {docdir}/../../libbeat +:libbeat-dir: {docdir}/../../libbeat/docs include::./version.asciidoc[] +include::{asciidoc-dir}/../../shared/versions/stack/{source_branch}.asciidoc[] + include::{asciidoc-dir}/../../shared/attributes.asciidoc[] :beatname_lc: beatname @@ -14,7 +16,7 @@ include::{asciidoc-dir}/../../shared/attributes.asciidoc[] :beat_default_index_prefix: {beatname_lc} :has_ml_jobs: -include::{libbeat-dir}/docs/shared-beats-attributes.asciidoc[] +include::{libbeat-dir}/shared-beats-attributes.asciidoc[] include::./overview.asciidoc[] @@ -26,10 +28,10 @@ include::./config-file-format.asciidoc[] include::./upgrading.asciidoc[] -include::./highlights.asciidoc[] +include::./release-notes/highlights/highlights.asciidoc[] -include::./breaking.asciidoc[] +include::./release-notes/breaking/breaking.asciidoc[] include::./release.asciidoc[] -include::{libbeat-dir}/docs/contributing-to-beats.asciidoc[] +include::{libbeat-dir}/contributing-to-beats.asciidoc[] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/monitoring/monitoring-beats.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/monitoring/monitoring-beats.asciidoc index fa358894..fd6d28d5 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/monitoring/monitoring-beats.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/monitoring/monitoring-beats.asciidoc @@ -14,21 +14,24 @@ ifdef::apm-server[] endif::[] To monitor {beatname_uc}, make sure monitoring is enabled on your {es} cluster, -then configure the method used to collect {beatname_uc} metrics. You -ifndef::serverless[] -can use one of following methods: -endif::[] -ifdef::serverless[] -can use the following method: -endif::[] +then configure the method used to collect {beatname_uc} metrics. You can use one +of following methods: -* <> +* <> - Internal +collectors send monitoring data directly to your monitoring cluster. ifndef::serverless[] -* <> +* <> - +{metricbeat} collects monitoring data from your {beatname_uc} instance +and sends it directly to your monitoring cluster. endif::[] +* <> - +Legacy internal collectors send monitoring data to your production cluster. -To learn about monitoring in general, see -{ref}/monitor-elasticsearch-cluster.html[Monitor a cluster]. + +//Commenting out this link temporarily until the general monitoring docs can be +//updated. +//To learn about monitoring in general, see +//{ref}/monitor-elasticsearch-cluster.html[Monitor a cluster]. -- @@ -37,3 +40,5 @@ include::monitoring-internal-collection.asciidoc[] ifndef::serverless[] include::monitoring-metricbeat.asciidoc[] endif::[] + +include::monitoring-internal-collection-legacy.asciidoc[] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/monitoring/monitoring-internal-collection-legacy.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/monitoring/monitoring-internal-collection-legacy.asciidoc new file mode 100644 index 00000000..6efd1764 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/docs/monitoring/monitoring-internal-collection-legacy.asciidoc @@ -0,0 +1,38 @@ +////////////////////////////////////////////////////////////////////////// +//// This content is shared by all Elastic Beats. Make sure you keep the +//// descriptions here generic enough to work for all Beats that include +//// this file. When using cross references, make sure that the cross +//// references resolve correctly for any files that include this one. +//// Use the appropriate variables defined in the index.asciidoc file to +//// resolve Beat names: beatname_uc and beatname_lc. +//// Use the following include to pull this content into a doc file: +//// include::../../libbeat/docs/monitoring/monitoring-internal-collection-legacy.asciidoc[] +////////////////////////////////////////////////////////////////////////// + +[role="xpack"] +[[monitoring-internal-collection-legacy]] +== Use legacy internal collection to send monitoring data +++++ +Legacy internal collection (deprecated) +++++ + +deprecated[7.2.0] + +In {beatname_uc} version 7.1 and earlier, you configured internal collectors +that sent monitoring data to the production cluster, which would either index +the data locally, or forward the data to a dedicated monitoring cluster via HTTP +exporters. + +Starting in {beatname_uc} version 7.2, the legacy settings for internal +collection are deprecated and will be removed in version 8.0.0. Instead of +sending monitoring data to your production cluster, it's recommended that you +use the configuration described under +<> to route +monitoring data directly to your monitoring cluster. + +ifndef::serverless[] +Or as an alternative to internal collection, use +<>. +endif::[] + +include::shared-monitor-config-legacy.asciidoc[] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/monitoring/monitoring-internal-collection.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/monitoring/monitoring-internal-collection.asciidoc index 3b819c4c..33f464a8 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/monitoring/monitoring-internal-collection.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/monitoring/monitoring-internal-collection.asciidoc @@ -11,22 +11,24 @@ [role="xpack"] [[monitoring-internal-collection]] -== Collect {beatname_uc} monitoring data with internal collectors +== Use internal collection to send monitoring data ++++ Internal collection ++++ -The following method involves sending the metrics to the production cluster, -which ultimately routes them to the monitoring cluster. +Use internal collectors to send {beats} monitoring data directly to your +monitoring cluster. ifndef::serverless[] -For an alternative method, see <>. +Or as an alternative to internal collection, use +<>. The benefit of using internal collection +instead of {metricbeat} is that you have fewer pieces of software to install +and maintain. endif::[] -To learn about monitoring in general, see -{ref}/monitor-elasticsearch-cluster.html[Monitor a cluster]. - -//TODO: Not sure if these docs need to be updated to be parallel with other -//stack components since this is the old way of configuring monitoring. +//Commenting out this link temporarily until the general monitoring docs can be +//updated. +//To learn about monitoring in general, see +//{ref}/monitor-elasticsearch-cluster.html[Monitor a cluster]. . Create a user that has appropriate authority to send system-level monitoring data to {es}. For example, you can use the built-in +{beat_monitoring_user}+ user or @@ -70,6 +72,28 @@ Stack Monitoring UI. To get a cluster's `cluster_uuid`, call the `GET /` API against that cluster. <2> This setting identifies the hosts and port numbers of {es} nodes that are part of the monitoring cluster. ++ +If you want to use PKI authentication to send monitoring events to +{es}, you must specify a different set of configuration options. For +example: ++ +[source,yaml] +-------------------- +monitoring: + enabled: true + cluster_uuid: PRODUCTION_ES_CLUSTER_UUID + elasticsearch: + hosts: ["https://example.com:9200", "https://example2.com:9200"] + username: "" + ssl: + ssl.certificate_authorities: ["/etc/pki/root/ca.pem"] + ssl.certificate: "/etc/pki/client/cert.pem" + ssl.key: "/etc/pki/client/cert.key" +-------------------- ++ +You must specify the `username` as `""` explicitly so that +the username from the client certificate (`CN`) is used. See +<> for more information about SSL settings. ifndef::serverless[] . Start {beatname_uc}. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/monitoring/monitoring-metricbeat.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/monitoring/monitoring-metricbeat.asciidoc index b59d8bdf..d8c4c573 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/monitoring/monitoring-metricbeat.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/monitoring/monitoring-metricbeat.asciidoc @@ -1,14 +1,15 @@ [role="xpack"] [[monitoring-metricbeat-collection]] -== Collect {beatname_uc} monitoring data with {metricbeat} +== Use {metricbeat} to send monitoring data [subs="attributes"] ++++ {metricbeat} collection ++++ In 7.3 and later, you can use {metricbeat} to collect data about {beatname_uc} -and ship it to the monitoring cluster, rather than routing it through the -production cluster as described in <>. +and ship it to the monitoring cluster. The benefit of using {metricbeat} instead +of internal collection is that the monitoring agent remains active even if the +{beatname_uc} instance dies. ifeval::["{beatname_lc}"=="metricbeat"] Because you'll be using {metricbeat} to _monitor_ {beatname_uc}, you'll need to @@ -25,8 +26,10 @@ concurrently. If you don't want to run two instances concurrently, use {metricbeat}. endif::[] -To learn about monitoring in general, see -{ref}/monitor-elasticsearch-cluster.html[Monitor a cluster]. +//Commenting out this link temporarily until the general monitoring docs can be +//updated. +//To learn about monitoring in general, see +//{ref}/monitor-elasticsearch-cluster.html[Monitor a cluster]. //NOTE: The tagged regions are re-used in the Stack Overview. @@ -174,7 +177,7 @@ it via HTTPS. For example, use a `hosts` setting like `https://localhost:5066`. If the Elastic {security-features} are enabled, you must also provide a user ID and password so that {metricbeat} can collect metrics successfully: -.. Create a user on the production cluster that has the +.. Create a user on the {es} cluster that has the `remote_monitoring_collector` {ref}/built-in-roles.html[built-in role]. Alternatively, if it's available in your environment, use the `remote_monitoring_user` {ref}/built-in-users.html[built-in user]. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/monitoring/shared-monitor-config-legacy.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/monitoring/shared-monitor-config-legacy.asciidoc new file mode 100644 index 00000000..d00778f8 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/docs/monitoring/shared-monitor-config-legacy.asciidoc @@ -0,0 +1,138 @@ +////////////////////////////////////////////////////////////////////////// +//// This content is shared by all Elastic Beats. Make sure you keep the +//// descriptions here generic enough to work for all Beats that include +//// this file. When using cross references, make sure that the cross +//// references resolve correctly for any files that include this one. +//// Use the appropriate variables defined in the index.asciidoc file to +//// resolve Beat names: beatname_uc and beatname_lc. +//// Use the following include to pull this content into a doc file: +//// include::../../libbeat/docs/monitoring/shared-monitor-config.asciidoc[] +//// Make sure this content appears below a level 2 heading. +////////////////////////////////////////////////////////////////////////// + +[role="xpack"] +[[configuration-monitor-legacy]] +=== Settings for legacy internal collection + +deprecated::[7.2.0,These settings are deprecated and will be removed in version 8.0.0. Instead of sending monitoring data to your production cluster it's recommended that you use the configuration described under <> to route monitoring data directly to your monitoring cluster.] + +[float] +=== `xpack.monitoring.enabled` deprecated:[7.2] + +The `enabled` config is a boolean setting to enable or disable {monitoring}. +If set to `true`, monitoring is enabled. + +The default value is `false`. + +[float] +=== `xpack.monitoring.elasticsearch` deprecated:[7.2] + +The {es} instances that you want to ship your {beatname_uc} metrics to. This +configuration option contains the following fields: + +[float] +==== `bulk_max_size` + +The maximum number of metrics to bulk in a single {es} bulk API index request. +The default is `50`. For more information, see <>. + +[float] +==== `backoff.init` + +The number of seconds to wait before trying to reconnect to Elasticsearch after +a network error. After waiting `backoff.init` seconds, {beatname_uc} tries to +reconnect. If the attempt fails, the backoff timer is increased exponentially up +to `backoff.max`. After a successful connection, the backoff timer is reset. The +default is 1s. + +[float] +===== `backoff.max` + +The maximum number of seconds to wait before attempting to connect to +Elasticsearch after a network error. The default is 60s. + +[float] +==== `compression_level` + +The gzip compression level. Setting this value to `0` disables compression. The +compression level must be in the range of `1` (best speed) to `9` (best +compression). The default value is `0`. Increasing the compression level +reduces the network usage but increases the CPU usage. + +[float] +==== `headers` + +Custom HTTP headers to add to each request. For more information, see +<>. + +[float] +==== `hosts` + +The list of {es} nodes to connect to. Monitoring metrics are distributed to +these nodes in round-robin order. For more information, see +<>. + +[float] +==== `max_retries` + +The number of times to retry sending the monitoring metrics after a failure. +After the specified number of retries, the metrics are typically dropped. The +default value is `3`. For more information, see <>. + +[float] +==== `parameters` + +Dictionary of HTTP parameters to pass within the url with index operations. + +[float] +==== `password` + +The password that {beatname_uc} uses to authenticate with the {es} instances for +shipping monitoring data. + +[float] +==== `metrics.period` + +The time interval (in seconds) when metrics are sent to the {es} cluster. A new +snapshot of {beatname_uc} metrics is generated and scheduled for publishing each +period. The default value is 10 * time.Second. + +[float] +==== `state.period` + +The time interval (in seconds) when state information are sent to the {es} cluster. A new +snapshot of {beatname_uc} state is generated and scheduled for publishing each +period. The default value is 60 * time.Second. + +[float] +==== `protocol` + +The name of the protocol to use when connecting to the {es} cluster. The options +are: `http` or `https`. The default is `http`. If you specify a URL for `hosts`, +however, the value of protocol is overridden by the scheme you specify in the URL. + +[float] +==== `proxy_url` + +The URL of the proxy to use when connecting to the {es} cluster. For more +information, see <>. + +[float] +==== `timeout` + +The HTTP request timeout in seconds for the {es} request. The default is `90`. + +[float] +==== `ssl` + +Configuration options for Transport Layer Security (TLS) or Secure Sockets Layer +(SSL) parameters like the certificate authority (CA) to use for HTTPS-based +connections. If the `ssl` section is missing, the host CAs are used for +HTTPS connections to {es}. For more information, see <>. + +[float] +==== `username` + +The user ID that {beatname_uc} uses to authenticate with the {es} instances for +shipping monitoring data. + diff --git a/vendor/github.com/elastic/beats/libbeat/docs/monitoring/shared-monitor-config.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/monitoring/shared-monitor-config.asciidoc index 2977ad16..2a94d90a 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/monitoring/shared-monitor-config.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/monitoring/shared-monitor-config.asciidoc @@ -6,13 +6,13 @@ //// Use the appropriate variables defined in the index.asciidoc file to //// resolve Beat names: beatname_uc and beatname_lc. //// Use the following include to pull this content into a doc file: -//// include::../../libbeat/docs/monitoring/configuring.asciidoc[] +//// include::../../libbeat/docs/monitoring/shared-monitor-config.asciidoc[] //// Make sure this content appears below a level 2 heading. ////////////////////////////////////////////////////////////////////////// [role="xpack"] [[configuration-monitor]] -=== Settings for internal monitoring collection +=== Settings for internal collection Use the following settings to configure internal collection when you are not using {metricbeat} to collect monitoring data. @@ -32,12 +32,12 @@ The default value is `false`. The {es} instances that you want to ship your {beatname_uc} metrics to. This configuration option contains the following fields: -==== `bulk_max_size` +===== `bulk_max_size` The maximum number of metrics to bulk in a single {es} bulk API index request. The default is `50`. For more information, see <>. -==== `backoff.init` +===== `backoff.init` The number of seconds to wait before trying to reconnect to Elasticsearch after a network error. After waiting `backoff.init` seconds, {beatname_uc} tries to @@ -45,79 +45,79 @@ reconnect. If the attempt fails, the backoff timer is increased exponentially up to `backoff.max`. After a successful connection, the backoff timer is reset. The default is 1s. -==== `backoff.max` +===== `backoff.max` The maximum number of seconds to wait before attempting to connect to Elasticsearch after a network error. The default is 60s. -==== `compression_level` +===== `compression_level` The gzip compression level. Setting this value to `0` disables compression. The compression level must be in the range of `1` (best speed) to `9` (best compression). The default value is `0`. Increasing the compression level reduces the network usage but increases the CPU usage. -==== `headers` +===== `headers` Custom HTTP headers to add to each request. For more information, see <>. -==== `hosts` +===== `hosts` The list of {es} nodes to connect to. Monitoring metrics are distributed to these nodes in round robin order. For more information, see <>. -==== `max_retries` +===== `max_retries` The number of times to retry sending the monitoring metrics after a failure. After the specified number of retries, the metrics are typically dropped. The default value is `3`. For more information, see <>. -==== `parameters` +===== `parameters` Dictionary of HTTP parameters to pass within the url with index operations. -==== `password` +===== `password` The password that {beatname_uc} uses to authenticate with the {es} instances for shipping monitoring data. -==== `metrics.period` +===== `metrics.period` The time interval (in seconds) when metrics are sent to the {es} cluster. A new snapshot of {beatname_uc} metrics is generated and scheduled for publishing each period. The default value is 10 * time.Second. -==== `state.period` +===== `state.period` The time interval (in seconds) when state information are sent to the {es} cluster. A new snapshot of {beatname_uc} state is generated and scheduled for publishing each period. The default value is 60 * time.Second. -==== `protocol` +===== `protocol` The name of the protocol to use when connecting to the {es} cluster. The options are: `http` or `https`. The default is `http`. If you specify a URL for `hosts`, however, the value of protocol is overridden by the scheme you specify in the URL. -==== `proxy_url` +===== `proxy_url` The URL of the proxy to use when connecting to the {es} cluster. For more information, see <>. -==== `timeout` +===== `timeout` The HTTP request timeout in seconds for the {es} request. The default is `90`. -==== `ssl` +===== `ssl` Configuration options for Transport Layer Security (TLS) or Secure Sockets Layer (SSL) parameters like the certificate authority (CA) to use for HTTPS-based connections. If the `ssl` section is missing, the host CAs are used for HTTPS connections to {es}. For more information, see <>. -==== `username` +===== `username` The user ID that {beatname_uc} uses to authenticate with the {es} instances for shipping monitoring data. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/outputs/output-cloud.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/output-cloud.asciidoc similarity index 100% rename from vendor/github.com/elastic/beats/libbeat/docs/outputs/output-cloud.asciidoc rename to vendor/github.com/elastic/beats/libbeat/docs/output-cloud.asciidoc diff --git a/vendor/github.com/elastic/beats/libbeat/docs/outputconfig.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/outputconfig.asciidoc index f2b535a8..e9375fe6 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/outputconfig.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/outputconfig.asciidoc @@ -25,32 +25,10 @@ security-related configuration options. // end::shared-outputconfig[] -* <> -* <> -* <> -ifndef::no-output-redis[] -* <> -endif::[] -* <> -* <> -* <> +include::outputs-list.asciidoc[tag=outputs-list] ifdef::beat-specific-output-config[] include::{beat-specific-output-config}[] endif::[] -include::outputs/output-elasticsearch.asciidoc[] - -include::outputs/output-logstash.asciidoc[] - -include::outputs/output-kafka.asciidoc[] - -include::outputs/output-redis.asciidoc[] - -include::outputs/output-file.asciidoc[] - -include::outputs/output-console.asciidoc[] - -include::outputs/output-cloud.asciidoc[] - -include::outputs/change-output-codec.asciidoc[] +include::outputs-list.asciidoc[tag=outputs-include] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/outputs-list.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/outputs-list.asciidoc new file mode 100644 index 00000000..e3e16a6d --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/docs/outputs-list.asciidoc @@ -0,0 +1,35 @@ +// TODO: Create script that generates this file. Conditional coding needs to +// be preserved. + +//# tag::outputs-list[] + +* <> +* <> +* <> +ifndef::no-output-redis[] +* <> +endif::[] +* <> +* <> +* <> + +//# end::outputs-list[] + +//# tag::outputs-include[] +include::{libbeat-outputs-dir}/elasticsearch/docs/elasticsearch.asciidoc[] + +include::{libbeat-outputs-dir}/logstash/docs/logstash.asciidoc[] + +include::{libbeat-outputs-dir}/kafka/docs/kafka.asciidoc[] + +include::{libbeat-outputs-dir}/redis/docs/redis.asciidoc[] + +include::{libbeat-outputs-dir}/fileout/docs/fileout.asciidoc[] + +include::{libbeat-outputs-dir}/console/docs/console.asciidoc[] + +include::output-cloud.asciidoc[] + +include::{libbeat-outputs-dir}/codec/docs/codec.asciidoc[] + +//# end::outputs-include[] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/overview.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/overview.asciidoc index 2ed0c11f..c44c92a7 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/overview.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/overview.asciidoc @@ -30,11 +30,10 @@ image::./images/beats-platform.png[Beats Platform] To get started, see <>. Want to get up and running quickly with infrastructure metrics monitoring and -centralized log analytics? Try out the -{infra-guide}/infrastructure-ui-overview.html[Infrastructure] and -{infra-guide}/logs-ui-overview.html[Logs] UIs -in {kib}. For setup details, see the {infra-guide}/index.html[Infrastructure -Monitoring Guide]. +centralized log analytics? +Try out the Metrics app and the Logs app in {kib}. +For more details, see the {metrics-guide}[Metrics Monitoring Guide] +and the {logs-guide}[Logs Monitoring Guide]. [float] === Need to capture other kinds of data? diff --git a/vendor/github.com/elastic/beats/libbeat/docs/processors-list.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/processors-list.asciidoc new file mode 100644 index 00000000..50595a0c --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/docs/processors-list.asciidoc @@ -0,0 +1,173 @@ +// TODO: Create script that generates this file. Conditional coding needs to +// be preserved. + +//# tag::processors-list[] +ifndef::no_add_cloud_metadata_processor[] +* <> +endif::[] +ifndef::no_add_docker_metadata_processor[] +* <> +endif::[] +ifndef::no_add_fields_processor[] +* <> +endif::[] +ifndef::no_add_host_metadata_processor[] +* <> +endif::[] +ifndef::no_add_kubernetes_metadata_processor[] +* <> +endif::[] +ifndef::no_add_labels_processor[] +* <> +endif::[] +ifndef::no_add_locale_processor[] +* <> +endif::[] +ifndef::no_add_observer_metadata_processor[] +* <> +endif::[] +ifndef::no_add_process_metadata_processor[] +* <> +endif::[] +ifndef::no_add_tags_processor[] +* <> +endif::[] +ifndef::no_community_id_processor[] +* <> +endif::[] +ifndef::no_convert_processor[] +* <> +endif::[] +ifndef::no_decode_base64_field_processor[] +* <> +endif::[] +ifndef::no_decode_cef_processor[] +* <> +endif::[] +ifndef::no_decode_csv_fields_processor[] +* <> +endif::[] +ifndef::no_decode_json_fields_processor[] +* <> +endif::[] +ifndef::no_decompress_gzip_field_processor[] +* <> +endif::[] +ifndef::no_dissect_processor[] +* <> +endif::[] +ifndef::no_dns_processor[] +* <> +endif::[] +ifndef::no_drop_event_processor[] +* <> +endif::[] +ifndef::no_drop_fields_processor[] +* <> +endif::[] +ifndef::no_extract_array_processor[] +* <> +endif::[] +ifndef::no_include_fields_processor[] +* <> +endif::[] +ifndef::no_registered_domain_processor[] +* <> +endif::[] +ifndef::no_rename_processor[] +* <> +endif::[] +ifndef::no_script_processor[] +* <> +endif::[] +ifndef::no_timestamp_processor[] +* <> +endif::[] +//# end::processors-list[] + +//# tag::processors-include[] +ifndef::no_add_cloud_metadata_processor[] +include::{libbeat-processors-dir}/add_cloud_metadata/docs/add_cloud_metadata.asciidoc[] +endif::[] +ifndef::no_add_docker_metadata_processor[] +include::{libbeat-processors-dir}/add_docker_metadata/docs/add_docker_metadata.asciidoc[] +endif::[] +ifndef::no_add_fields_processor[] +include::{libbeat-processors-dir}/actions/docs/add_fields.asciidoc[] +endif::[] +ifndef::no_add_host_metadata_processor[] +include::{libbeat-processors-dir}/add_host_metadata/docs/add_host_metadata.asciidoc[] +endif::[] +ifndef::no_add_kubernetes_metadata_processor[] +include::{libbeat-processors-dir}/add_kubernetes_metadata/docs/add_kubernetes_metadata.asciidoc[] +endif::[] +ifndef::no_add_labels_processor[] +include::{libbeat-processors-dir}/actions/docs/add_labels.asciidoc[] +endif::[] +ifndef::no_add_locale_processor[] +include::{libbeat-processors-dir}/add_locale/docs/add_locale.asciidoc[] +endif::[] +ifndef::no_add_observer_metadata_processor[] +include::{libbeat-processors-dir}/add_observer_metadata/docs/add_observer_metadata.asciidoc[] +endif::[] +ifndef::no_add_process_metadata_processor[] +include::{libbeat-processors-dir}/add_process_metadata/docs/add_process_metadata.asciidoc[] +endif::[] +ifndef::no_add_tags_processor[] +include::{libbeat-processors-dir}/actions/docs/add_tags.asciidoc[] +endif::[] +ifndef::no_community_id_processor[] +include::{libbeat-processors-dir}/communityid/docs/communityid.asciidoc[] +endif::[] +ifndef::no_convert_processor[] +include::{libbeat-processors-dir}/convert/docs/convert.asciidoc[] +endif::[] +ifndef::no_decode_base64_field_processor[] +include::{libbeat-processors-dir}/actions/docs/decode_base64_field.asciidoc[] +endif::[] +ifndef::no_decode_cef_processor[] +include::{x-filebeat-processors-dir}/decode_cef/docs/decode_cef.asciidoc[] +endif::[] +ifndef::no_decode_csv_fields_processor[] +include::{libbeat-processors-dir}/decode_csv_fields/docs/decode_csv_fields.asciidoc[] +endif::[] +ifndef::no_decode_json_fields_processor[] +include::{libbeat-processors-dir}/actions/docs/decode_json_fields.asciidoc[] +endif::[] +ifndef::no_decompress_gzip_field_processor[] +include::{libbeat-processors-dir}/actions/docs/decompress_gzip_field.asciidoc[] +endif::[] +ifndef::no_dissect_processor[] +include::{libbeat-processors-dir}/dissect/docs/dissect.asciidoc[] +endif::[] +ifndef::no_dns_processor[] +include::{libbeat-processors-dir}/dns/docs/dns.asciidoc[] +endif::[] +ifndef::no_drop_event_processor[] +include::{libbeat-processors-dir}/actions/docs/drop_event.asciidoc[] +endif::[] +ifndef::no_drop_fields_processor[] +include::{libbeat-processors-dir}/actions/docs/drop_fields.asciidoc[] +endif::[] +ifndef::no_extract_array_processor[] +include::{libbeat-processors-dir}/extract_array/docs/extract_array.asciidoc[] +endif::[] +ifndef::no_include_fields_processor[] +include::{libbeat-processors-dir}/actions/docs/include_fields.asciidoc[] +endif::[] +ifndef::no_registered_domain_processor[] +include::{libbeat-processors-dir}/registered_domain/docs/registered_domain.asciidoc[] +endif::[] +ifndef::no_rename_processor[] +include::{libbeat-processors-dir}/actions/docs/rename.asciidoc[] +endif::[] +ifndef::no_script_processor[] +include::{libbeat-processors-dir}/script/docs/script.asciidoc[] +endif::[] +ifndef::no_timestamp_processor[] +include::{libbeat-processors-dir}/timestamp/docs/timestamp.asciidoc[] +endif::[] + +//# end::processors-include[] + + diff --git a/vendor/github.com/elastic/beats/libbeat/docs/processors-using.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/processors-using.asciidoc index 12807d04..87dd305a 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/processors-using.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/processors-using.asciidoc @@ -194,41 +194,7 @@ endif::[] The supported processors are: - * <> - * <> - * <> - * <> - * <> - * <> - * <> - * <> - * <> - * <> - * <> - * <> - * <> -ifdef::has_decode_cef_processor[] -* <> -endif::[] -ifdef::has_decode_csv_fields_processor[] - * <> -endif::[] - * <> - * <> - * <> - * <> - * <> - * <> - * <> - * <> - * <> - * <> -ifdef::has_script_processor[] - * <> -endif::[] -ifdef::has_timestamp_processor[] - * <> -endif::[] +include::processors-list.asciidoc[tag=processors-list] [[conditions]] ==== Conditions @@ -501,1533 +467,4 @@ not: status: OK ------ -[[add-cloud-metadata]] -=== Add cloud metadata - -The `add_cloud_metadata` processor enriches each event with instance metadata -from the machine's hosting provider. At startup it will query a list of hosting -providers and cache the instance metadata. - -The following cloud providers are supported: - -- Amazon Web Services (AWS) -- Digital Ocean -- Google Compute Engine (GCE) -- https://www.qcloud.com/?lang=en[Tencent Cloud] (QCloud) -- Alibaba Cloud (ECS) -- Azure Virtual Machine -- Openstack Nova - -The Alibaba Cloud and Tencent cloud providers are disabled by default, because -they require to access a remote host. The `providers` setting allows users to -select a list of default providers to query. - -The simple configuration below enables the processor. - -[source,yaml] -------------------------------------------------------------------------------- -processors: -- add_cloud_metadata: ~ -------------------------------------------------------------------------------- - -The `add_cloud_metadata` processor has three optional configuration settings. -The first one is `timeout` which specifies the maximum amount of time to wait -for a successful response when detecting the hosting provider. The default -timeout value is `3s`. - -If a timeout occurs then no instance metadata will be added to the events. This -makes it possible to enable this processor for all your deployments (in the -cloud or on-premise). - -The second optional setting is `providers`. The `providers` settings accepts a -list of cloud provider names to be used. If `providers` is not configured, then -all providers that do not access a remote endpoint are enabled by default. - -List of names the `providers` setting supports: -- "alibaba", or "ecs" for the Alibaba Cloud provider (disabled by default). -- "azure" for Azure Virtual Machine (enabled by default). -- "digitalocean" for Digital Ocean (enabled by default). -- "aws", or "ec2" for Amazon Web Services (enabled by default). -- "gcp" for Google Copmute Enging (enabled by default). -- "openstack", or "nova" for Openstack Nova (enabled by default). -- "tencent", or "qcloud" for Tencent Cloud (disabled by default). - -The third optional configuration setting is `overwrite`. When `overwrite` is -`true`, `add_cloud_metadata` overwrites existing `cloud.*` fields (`false` by -default). - -The metadata that is added to events varies by hosting provider. Below are -examples for each of the supported providers. - -_AWS_ - -[source,json] -------------------------------------------------------------------------------- -{ - "cloud": { - "account.id": "123456789012", - "availability_zone": "us-east-1c", - "instance.id": "i-4e123456", - "machine.type": "t2.medium", - "image.id": "ami-abcd1234", - "provider": "aws", - "region": "us-east-1" - } -} -------------------------------------------------------------------------------- - -_Digital Ocean_ - -[source,json] -------------------------------------------------------------------------------- -{ - "cloud": { - "instance.id": "1234567", - "provider": "digitalocean", - "region": "nyc2" - } -} -------------------------------------------------------------------------------- - -_GCP_ - -[source,json] -------------------------------------------------------------------------------- -{ - "cloud": { - "availability_zone": "us-east1-b", - "instance.id": "1234556778987654321", - "machine.type": "f1-micro", - "project.id": "my-dev", - "provider": "gcp" - } -} -------------------------------------------------------------------------------- - -_Tencent Cloud_ - -[source,json] -------------------------------------------------------------------------------- -{ - "cloud": { - "availability_zone": "gz-azone2", - "instance.id": "ins-qcloudv5", - "provider": "qcloud", - "region": "china-south-gz" - } -} -------------------------------------------------------------------------------- - -_Alibaba Cloud_ - -This metadata is only available when VPC is selected as the network type of the -ECS instance. - -[source,json] -------------------------------------------------------------------------------- -{ - "cloud": { - "availability_zone": "cn-shenzhen", - "instance.id": "i-wz9g2hqiikg0aliyun2b", - "provider": "ecs", - "region": "cn-shenzhen-a" - } -} -------------------------------------------------------------------------------- - -_Azure Virtual Machine_ - -[source,json] -------------------------------------------------------------------------------- -{ - "cloud": { - "provider": "az", - "instance.id": "04ab04c3-63de-4709-a9f9-9ab8c0411d5e", - "instance.name": "test-az-vm", - "machine.type": "Standard_D3_v2", - "region": "eastus2" - } -} -------------------------------------------------------------------------------- - -_Openstack Nova_ - -[source,json] -------------------------------------------------------------------------------- -{ - "cloud": { - "instance.name": "test-998d932195.mycloud.tld", - "instance.id": "i-00011a84", - "availability_zone": "xxxx-az-c", - "provider": "openstack", - "machine.type": "m2.large" - } -} -------------------------------------------------------------------------------- - -[[add-fields]] -=== Add fields - -The `add_fields` processor adds additional fields to the event. Fields can be -scalar values, arrays, dictionaries, or any nested combination of these. By -default the fields that you specify will be grouped under the `fields` -sub-dictionary in the event. To group the fields under a different -sub-dictionary, use the `target` setting. To store the fields as -top-level fields, set `target: ''`. - -`target`:: (Optional) Sub-dictionary to put all fields into. Defaults to `fields`. -`fields`:: Fields to be added. - - -For example, this configuration: - -[source,yaml] ------------------------------------------------------------------------------- -processors: -- add_fields: - target: project - fields: - name: myproject - id: '574734885120952459' ------------------------------------------------------------------------------- - -Adds these fields to any event: - -[source,json] -------------------------------------------------------------------------------- -{ - "project": { - "name": "myproject", - "id": "574734885120952459" - } -} -------------------------------------------------------------------------------- - - -[[add-labels]] -=== Add labels - -The `add_labels` processors adds a set of key-value pairs to an event. -The processor will flatten nested configuration objects like arrays or -dictionaries into a fully qualified name by merging nested names with a `.`. -Array entries create numeric names starting with 0. Labels are always stored -under the Elastic Common Schema compliant `labels` sub-dictionary. - -`labels`:: dictionaries of labels to be added. - -For example, this configuration: - -[source,yaml] ------------------------------------------------------------------------------- -processors: -- add_labels: - labels: - number: 1 - with.dots: test - nested: - with.dots: nested - array: - - do - - re - - with.field: mi ------------------------------------------------------------------------------- - -Adds these fields to every event: - -[source,json] -------------------------------------------------------------------------------- -{ - "labels": { - "number": 1, - "with.dots": "test", - "nested.with.dots": "nested", - "array.0": "do", - "array.1": "re", - "array.2.with.field": "mi" - } -} -------------------------------------------------------------------------------- - - -[[add-locale]] -=== Add the local time zone - -The `add_locale` processor enriches each event with the machine's time zone -offset from UTC or with the name of the time zone. It supports one configuration -option named `format` that controls whether an offset or time zone abbreviation -is added to the event. The default format is `offset`. The processor adds the -a `event.timezone` value to each event. - -The configuration below enables the processor with the default settings. - -[source,yaml] -------------------------------------------------------------------------------- -processors: -- add_locale: ~ -------------------------------------------------------------------------------- - -This configuration enables the processor and configures it to add the time zone -abbreviation to events. - -[source,yaml] -------------------------------------------------------------------------------- -processors: -- add_locale: - format: abbreviation -------------------------------------------------------------------------------- - -NOTE: Please note that `add_locale` differentiates between daylight savings -time (DST) and regular time. For example `CEST` indicates DST and and `CET` is -regular time. - -[[add-tags]] -=== Add tags - -The `add_tags` processor adds tags to a list of tags. If the target field already exists, -the tags are appended to the existing list of tags. - -`tags`:: List of tags to add. -`target`:: (Optional) Field the tags will be added to. Defaults to `tags`. - -For example, this configuration: - - -[source,yaml] ------------------------------------------------------------------------------- -processors: -- add_tags: - tags: [web, production] - target: "environment" ------------------------------------------------------------------------------- - -Adds the environment field to every event: - -[source,json] -------------------------------------------------------------------------------- -{ - "environment": ["web", "production"] -} -------------------------------------------------------------------------------- - -ifdef::has_decode_cef_processor[] -[[processor-decode-cef]] -[role="xpack"] -=== Decode CEF - -beta[] - -The `decode_cef` processor decodes Common Event Format (CEF) messages. This -processor is available in Filebeat. - -Below is an example configuration that decodes the `message` field as CEF after -renaming it to `event.original`. It is best to rename `message` to -`event.original` because the decoded CEF data contains its own `message` field. - -[source,yaml] ----- -processors: -- rename: - fields: - - {from: "message", to: "event.original"} -- decode_cef: - field: event.original ----- - -The `decode_cef` processor has the following configuration settings. - -.Decode CEF options -[options="header"] -|====== -| `field` | no | message | Source field containing the CEF message to be parsed. | -| `target_field` | no | cef | Target field where the parsed CEF object will be written. | -| `ecs` | no | true | Generate Elastic Common Schema (ECS) fields from the CEF data. - Certain CEF header and extension values will be used to populate ECS fields. | -| `ignore_missing` | no | false | Ignore errors when the source field is missing. | -| `ignore_failure` | no | false | Ignore failures when the source field does not contain a CEF message. | -| `id` | no | | An identifier for this processor instance. Useful for debugging. | -|====== -endif::[] - -ifdef::has_decode_csv_fields_processor[] -[[decode-csv-fields]] -=== Decode CSV fields - -experimental[] - -The `decode_csv_fields` processor decodes fields containing records in -comma-separated format (CSV). It will output the values as an array of strings. -This processor is available for Filebeat and Journalbeat. - -[source,yaml] ------------------------------------------------------ -processors: - - decode_csv_fields: - fields: - message: decoded.csv - separator: , - ignore_missing: false - overwrite_keys: true - trim_leading_whitespace: false - fail_on_error: true ------------------------------------------------------ - -The `decode_csv_fields` has the following settings: - -`fields`:: This is a mapping from the source field containing the CSV data to - the destination field to which the decoded array will be written. -`separator`:: (Optional) Character to be used as a column separator. - The default is the comma character. For using a TAB character you - must set it to "\t". -`ignore_missing`:: (Optional) Whether to ignore events which lack the source - field. The default is `false`, which will fail processing of - an event if a field is missing. -`overwrite_keys`:: Whether the target field is overwritten if it - already exists. The default is false, which will fail - processing of an event when `target` already exists. -`trim_leading_space`:: Whether extra space after the separator is trimmed from - values. This works even if the separator is also a space. - The default is `false`. -`fail_on_error`:: (Optional) If set to true, in case of an error the changes to -the event are reverted, and the original event is returned. If set to `false`, -processing continues also if an error happens. Default is `true`. - -endif::[] - -[[decode-json-fields]] -=== Decode JSON fields - -The `decode_json_fields` processor decodes fields containing JSON strings and -replaces the strings with valid JSON objects. - -[source,yaml] ------------------------------------------------------ -processors: - - decode_json_fields: - fields: ["field1", "field2", ...] - process_array: false - max_depth: 1 - target: "" - overwrite_keys: false - add_error_key: true ------------------------------------------------------ - -The `decode_json_fields` processor has the following configuration settings: - -`fields`:: The fields containing JSON strings to decode. -`process_array`:: (Optional) A boolean that specifies whether to process -arrays. The default is false. -`max_depth`:: (Optional) The maximum parsing depth. The default is 1. -`target`:: (Optional) The field under which the decoded JSON will be written. By -default the decoded JSON object replaces the string field from which it was -read. To merge the decoded JSON fields into the root of the event, specify -`target` with an empty string (`target: ""`). Note that the `null` value (`target:`) -is treated as if the field was not set at all. -`overwrite_keys`:: (Optional) A boolean that specifies whether keys that already -exist in the event are overwritten by keys from the decoded JSON object. The -default value is false. -`add_error_key`:: (Optional) If it set to true, in case of error while decoding json keys -`error` field is going to be part of event with error message. If it set to false, there -will not be any error in event's field. Even error occurs while decoding json keys. The -default value is false - - -[[decode-base64-field]] -=== Decode Base64 fields - -The `decode_base64_field` processor specifies a field to base64 decode. -The `field` key contains a `from: old-key` and a `to: new-key` pair. `from` is -the origin and `to` the target name of the field. - -To overwrite fields either first rename the target field or use the `drop_fields` -processor to drop the field and then rename the field. - -[source,yaml] -------- -processors: -- decode_base64_field: - field: - from: "field1" - to: "field2" - ignore_missing: false - fail_on_error: true -------- - -In the example above: - - field1 is decoded in field2 - -The `decode_base64_field` processor has the following configuration settings: - -`ignore_missing`:: (Optional) If set to true, no error is logged in case a key -which should be base64 decoded is missing. Default is `false`. - -`fail_on_error`:: (Optional) If set to true, in case of an error the base6 4decode -of fields is stopped and the original event is returned. If set to false, decoding -continues also if an error happened during decoding. Default is `true`. - -See <> for a list of supported conditions. - -[[decompress-gzip-field]] -=== Decompress gzip fields - -The `decompress_gzip_field` processor specifies a field to gzip decompress. -The `field` key contains a `from: old-key` and a `to: new-key` pair. `from` is -the origin and `to` the target name of the field. - -To overwrite fields either first rename the target field or use the `drop_fields` -processor to drop the field and then rename the field. - -[source,yaml] -------- -processors: -- decompress_gzip_field: - field: - from: "field1" - to: "field2" - ignore_missing: false - fail_on_error: true -------- - -In the example above: - - field1 is decoded in field2 - -The `decompress_gzip_field` processor has the following configuration settings: - -`ignore_missing`:: (Optional) If set to true, no error is logged in case a key -which should be base64 decoded is missing. Default is `false`. - -`fail_on_error`:: (Optional) If set to true, in case of an error the base6 4decode -of fields is stopped and the original event is returned. If set to false, decoding -continues also if an error happened during decoding. Default is `true`. - -See <> for a list of supported conditions. - -[[community-id]] -=== Community ID Network Flow Hash - -The `community_id` processor computes a network flow hash according to the -https://github.com/corelight/community-id-spec[Community ID Flow Hash -specification]. - -The flow hash is useful for correlating all network events related to a -single flow. For example you can filter on a community ID value and you might -get back the Netflow records from multiple collectors and layer 7 protocol -records from Packetbeat. - -By default the processor is configured to read the flow parameters from the -appropriate Elastic Common Schema (ECS) fields. If you are processing ECS data -then no parameters are required. - -[source,yaml] ----- -processors: - - community_id: ----- - -If the data does not conform to ECS then you can customize the field names -that the processor reads from. You can also change the `target` field which -is where the computed hash is written to. - -[source,yaml] ----- -processors: - - community_id: - fields: - source_ip: my_source_ip - source_port: my_source_port - destination_ip: my_dest_ip - destination_port: my_dest_port - iana_number: my_iana_number - transport: my_transport - icmp_type: my_icmp_type - icmp_code: my_icmp_code - target: network.community_id ----- - -If the necessary fields are not present in the event then the processor will -silently continue without adding the target field. - -The processor also accepts an optional `seed` parameter that must be a 16-bit -unsigned integer. This value gets incorporated into all generated hashes. - -[[convert]] -=== Convert - -The `convert` processor converts a field in the event to a different type, such -as converting a string to an integer. - -The supported types include: `integer`, `long`, `float`, `double`, `string`, -`boolean`, and `ip`. - -The `ip` type is effectively an alias for `string`, but with an added validation -that the value is an IPv4 or IPv6 address. - -[source,yaml] ----- -processors: - - convert: - fields: - - {from: "src_ip", to: "source.ip", type: "ip"} - - {from: "src_port", to: "source.port", type: "integer"} - ignore_missing: true - fail_on_error: false ----- - -The `convert` processor has the following configuration settings: - -`fields`:: (Required) This is the list of fields to convert. At least one item -must be contained in the list. Each item in the list must have a `from` key that -specifies the source field. The `to` key is optional and specifies where to -assign the converted value. If `to` is omitted then the `from` field is updated -in-place. The `type` key specifies the data type to convert the value to. If -`type` is omitted then the processor copies or renames the field without any -type conversion. - -`ignore_missing`:: (Optional) If `true` the processor continues to the next -field when the `from` key is not found in the event. If false then the processor -returns an error and does not process the remaining fields. Default is `false`. - -`fail_on_error`:: (Optional) If false type conversion failures are ignored and -the processor continues to the next field. Default is `true`. - -`tag`:: (Optional) An identifier for this processor. Useful for debugging. - -`mode`:: (Optional) When both `from` and `to` are defined for a field then -`mode` controls whether to `copy` or `rename` the field when the type conversion -is successful. Default is `copy`. - -[[drop-event]] -=== Drop events - -The `drop_event` processor drops the entire event if the associated condition -is fulfilled. The condition is mandatory, because without one, all the events -are dropped. - -[source,yaml] ------- -processors: - - drop_event: - when: - condition ------- - -See <> for a list of supported conditions. - -[[drop-fields]] -=== Drop fields from events - -The `drop_fields` processor specifies which fields to drop if a certain -condition is fulfilled. The condition is optional. If it's missing, the -specified fields are always dropped. The `@timestamp` and `type` fields cannot -be dropped, even if they show up in the `drop_fields` list. - -[source,yaml] ------------------------------------------------------ -processors: - - drop_fields: - when: - condition - fields: ["field1", "field2", ...] - ignore_missing: false ------------------------------------------------------ - -See <> for a list of supported conditions. - -NOTE: If you define an empty list of fields under `drop_fields`, then no fields -are dropped. - -The `drop_fields` processor has the following configuration settings: - -`ignore_missing`:: (Optional) If `true` the processor will not return an error -when a specified field does not exist. Defaults to `false`. - -[[extract-array]] -=== Extract array - -experimental[] - -The `extract_array` processor populates fields with values read from an array -field. The following example will populate `source.ip` with the first element of -the `my_array` field, `destination.ip` with the second element, and -`network.transport` with the third. - -[source,yaml] ------------------------------------------------------ -processors: - - extract_array: - field: my_array - mappings: - source.ip: 0 - destination.ip: 1 - network.transport: 2 ------------------------------------------------------ - -The following settings are supported: - -`field`:: The array field whose elements are to be extracted. -`mappings`:: Maps each field name to an array index. Use 0 for the first element in - the array. Multiple fields can be mapped to the same array element. -`ignore_missing`:: (Optional) Whether to ignore events where the array field is - missing. The default is `false`, which will fail processing - of an event if the specified field does not exist. Set it to - `true` to ignore this condition. -`overwrite_keys`:: Whether the target fields specified in the mapping are - overwritten if they already exist. The default is `false`, - which will fail processing if a target field already exists. -`fail_on_error`:: (Optional) If set to `true` and an error happens, changes to - the event are reverted, and the original event is returned. If - set to `false`, processing continues despite errors. - Default is `true`. -`omit_empty`:: (Optional) Whether empty values are extracted from the array. If - set to `true`, instead of the target field being set to an - empty value, it is left unset. The empty string (`""`), an - empty array (`[]`) or an empty object (`{}`) are considered - empty values. Default is `false`. - -[[include-fields]] -=== Keep fields from events - -The `include_fields` processor specifies which fields to export if a certain -condition is fulfilled. The condition is optional. If it's missing, the -specified fields are always exported. The `@timestamp` and `type` fields are -always exported, even if they are not defined in the `include_fields` list. - -[source,yaml] -------- -processors: - - include_fields: - when: - condition - fields: ["field1", "field2", ...] -------- - -See <> for a list of supported conditions. - -You can specify multiple `include_fields` processors under the `processors` -section. - -NOTE: If you define an empty list of fields under `include_fields`, then only -the required fields, `@timestamp` and `type`, are exported. - -[[processor-registered-domain]] -=== Registered Domain - -The `registered_domain` processor reads a field containing a hostname and then -writes the "registered domain" contained in the hostname to the target field. -For example, given `www.google.co.uk` the processor would output `google.co.uk`. -In other words the "registered domain" is the effective top-level domain -(`co.uk`) plus one level (`google`). - -This processor uses the Mozilla Public Suffix list to determine the value. - -[source,yaml] ----- -processors: -- registered_domain: - field: dns.question.name - target_field: dns.question.registered_domain - ignore_missing: true - ignore_failure: true ----- - -The `registered_domain` processor has the following configuration settings: - -.Registered Domain options -[options="header"] -|====== -| Name | Required | Default | Description | -| `field` | yes | | Source field containing a fully qualified domain name (FQDN). | -| `target_field` | yes | | Target field for the registered domain value. | -| `ignore_missing` | no | false | Ignore errors when the source field is missing. | -| `ignore_failure` | no | false | Ignore all errors produced by the processor. | -| `id` | no | | An identifier for this processor instance. Useful for debugging. | -|====== - -[[rename-fields]] -=== Rename fields from events - -The `rename` processor specifies a list of fields to rename. Under the `fields` -key each entry contains a `from: old-key` and a `to: new-key` pair. `from` is -the origin and `to` the target name of the field. - -Renaming fields can be useful in cases where field names cause conflicts. For -example if an event has two fields, `c` and `c.b`, that are both assigned scalar -values (e.g. `{"c": 1, "c.b": 2}`) this will result in an Elasticsearch error at -ingest time. This is because the value of a cannot simultaneously be a scalar -and an object. To prevent this rename_fields can be used to rename `c` to -`c.value`. - -Rename fields cannot be used to overwrite fields. To overwrite fields either -first rename the target field or use the `drop_fields` processor to drop the -field and then rename the field. - -[source,yaml] -------- -processors: -- rename: - fields: - - from: "a.g" - to: "e.d" - ignore_missing: false - fail_on_error: true -------- - -The `rename` processor has the following configuration settings: - -`ignore_missing`:: (Optional) If set to true, no error is logged in case a key -which should be renamed is missing. Default is `false`. - -`fail_on_error`:: (Optional) If set to true, in case of an error the renaming of -fields is stopped and the original event is returned. If set to false, renaming -continues also if an error happened during renaming. Default is `true`. - -See <> for a list of supported conditions. - -You can specify multiple `ignore_missing` processors under the `processors` -section. - -[[add-kubernetes-metadata]] -=== Add Kubernetes metadata - -The `add_kubernetes_metadata` processor annotates each event with relevant -metadata based on which Kubernetes pod the event originated from. Each event is -annotated with: - -* Pod Name -* Pod UID -* Namespace -* Labels - -The `add_kubernetes_metadata` processor has two basic building blocks which are: - -* Indexers -* Matchers - -Indexers take in a pod's metadata and builds indices based on the pod metadata. -For example, the `ip_port` indexer can take a Kubernetes pod and index the pod -metadata based on all `pod_ip:container_port` combinations. - -Matchers are used to construct lookup keys for querying indices. For example, -when the `fields` matcher takes `["metricset.host"]` as a lookup field, it would -construct a lookup key with the value of the field `metricset.host`. - -Each Beat can define its own default indexers and matchers which are enabled by -default. For example, FileBeat enables the `container` indexer, which indexes -pod metadata based on all container IDs, and a `logs_path` matcher, which takes -the `log.file.path` field, extracts the container ID, and uses it to retrieve -metadata. - -The configuration below enables the processor when {beatname_lc} is run as a pod in -Kubernetes. - -[source,yaml] -------------------------------------------------------------------------------- -processors: -- add_kubernetes_metadata: -------------------------------------------------------------------------------- - -The configuration below enables the processor on a Beat running as a process on -the Kubernetes node. - -[source,yaml] -------------------------------------------------------------------------------- -processors: -- add_kubernetes_metadata: - host: - kube_config: ${HOME}/.kube/config -------------------------------------------------------------------------------- - -The configuration below has the default indexers and matchers disabled and -enables ones that the user is interested in. - -[source,yaml] -------------------------------------------------------------------------------- -processors: -- add_kubernetes_metadata: - host: - kube_config: ~/.kube/config - default_indexers.enabled: false - default_matchers.enabled: false - indexers: - - ip_port: - matchers: - - fields: - lookup_fields: ["metricset.host"] -------------------------------------------------------------------------------- - -The `add_kubernetes_metadata` processor has the following configuration settings: - -`host`:: (Optional) Identify the node where {beatname_lc} is running in case it -cannot be accurately detected, as when running {beatname_lc} in host network -mode. -`namespace`:: (Optional) Select the namespace from which to collect the -metadata. If it is not set, the processor collects metadata from all namespaces. -It is unset by default. -`kube_config`:: (Optional) Use given config file as configuration for Kubernetes -client. -`default_indexers.enabled`:: (Optional) Enable/Disable default pod indexers, in -case you want to specify your own. -`default_matchers.enabled`:: (Optional) Enable/Disable default pod matchers, in -case you want to specify your own. - -[[add-docker-metadata]] -=== Add Docker metadata - -The `add_docker_metadata` processor annotates each event with relevant metadata -from Docker containers: - -* Container ID -* Name -* Image -* Labels - -[NOTE] -===== -When running {beatname_uc} in a container, you need to provide access to -Docker’s unix socket in order for the `add_docker_metadata` processor to work. -You can do this by mounting the socket inside the container. For example: - -`docker run -v /var/run/docker.sock:/var/run/docker.sock ...` - -To avoid privilege issues, you may also need to add `--user=root` to the -`docker run` flags. Because the user must be part of the docker group in order -to access `/var/run/docker.sock`, root access is required if {beatname_uc} is -running as non-root inside the container. -===== - -[source,yaml] -------------------------------------------------------------------------------- -processors: -- add_docker_metadata: - host: "unix:///var/run/docker.sock" - #match_fields: ["system.process.cgroup.id"] - #match_pids: ["process.pid", "process.ppid"] - #match_source: true - #match_source_index: 4 - #match_short_id: true - #cleanup_timeout: 60 - #labels.dedot: false - # To connect to Docker over TLS you must specify a client and CA certificate. - #ssl: - # certificate_authority: "/etc/pki/root/ca.pem" - # certificate: "/etc/pki/client/cert.pem" - # key: "/etc/pki/client/cert.key" -------------------------------------------------------------------------------- - -It has the following settings: - -`host`:: (Optional) Docker socket (UNIX or TCP socket). It uses -`unix:///var/run/docker.sock` by default. - -`ssl`:: (Optional) SSL configuration to use when connecting to the Docker -socket. - -`match_fields`:: (Optional) A list of fields to match a container ID, at least -one of them should hold a container ID to get the event enriched. - -`match_pids`:: (Optional) A list of fields that contain process IDs. If the -process is running in Docker then the event will be enriched. The default value -is `["process.pid", "process.ppid"]`. - -`match_source`:: (Optional) Match container ID from a log path present in the -`log.file.path` field. Enabled by default. - -`match_short_id`:: (Optional) Match container short ID from a log path present -in the `log.file.path` field. Disabled by default. -This allows to match directories names that have the first 12 characters -of the container ID. For example, `/var/log/containers/b7e3460e2b21/*.log`. - -`match_source_index`:: (Optional) Index in the source path split by `/` to look -for container ID. It defaults to 4 to match -`/var/lib/docker/containers//*.log` - -`cleanup_timeout`:: (Optional) Time of inactivity to consider we can clean and -forget metadata for a container, 60s by default. - -`labels.dedot`:: (Optional) Default to be false. If set to true, replace dots in - labels with `_`. - -[[add-host-metadata]] -=== Add Host metadata - -[source,yaml] -------------------------------------------------------------------------------- -processors: -- add_host_metadata: - netinfo.enabled: false - cache.ttl: 5m - geo: - name: nyc-dc1-rack1 - location: 40.7128, -74.0060 - continent_name: North America - country_iso_code: US - region_name: New York - region_iso_code: NY - city_name: New York -------------------------------------------------------------------------------- - -It has the following settings: - -`netinfo.enabled`:: (Optional) Default false. Include IP addresses and MAC addresses as fields host.ip and host.mac - -`cache.ttl`:: (Optional) The processor uses an internal cache for the host metadata. This sets the cache expiration time. The default is 5m, negative values disable caching altogether. - -`geo.name`:: (Optional) User definable token to be used for identifying a discrete location. Frequently a datacenter, rack, or similar. - -`geo.location`:: (Optional) Longitude and latitude in comma separated format. - -`geo.continent_name`:: (Optional) Name of the continent. - -`geo.country_name`:: (Optional) Name of the country. - -`geo.region_name`:: (Optional) Name of the region. - -`geo.city_name`:: (Optional) Name of the city. - -`geo.country_iso_code`:: (Optional) ISO country code. - -`geo.region_iso_code`:: (Optional) ISO region code. - - -The `add_host_metadata` processor annotates each event with relevant metadata from the host machine. -The fields added to the event look like the following: - -[source,json] -------------------------------------------------------------------------------- -{ - "host":{ - "architecture":"x86_64", - "name":"example-host", - "id":"", - "os":{ - "family":"darwin", - "build":"16G1212", - "platform":"darwin", - "version":"10.12.6", - "kernel":"16.7.0", - "name":"Mac OS X" - }, - "ip": ["192.168.0.1", "10.0.0.1"], - "mac": ["00:25:96:12:34:56", "72:00:06:ff:79:f1"], - "geo": { - "continent_name": "North America", - "country_iso_code": "US", - "region_name": "New York", - "region_iso_code": "NY", - "city_name": "New York", - "name": "nyc-dc1-rack1", - "location": "40.7128, -74.0060" - } - } -} -------------------------------------------------------------------------------- - -[[add-observer-metadata]] -=== Add Observer metadata - -beta[] - -[source,yaml] -------------------------------------------------------------------------------- -processors: -- add_observer_metadata: - netinfo.enabled: false - cache.ttl: 5m - geo: - name: nyc-dc1-rack1 - location: 40.7128, -74.0060 - continent_name: North America - country_iso_code: US - region_name: New York - region_iso_code: NY - city_name: New York -------------------------------------------------------------------------------- - -It has the following settings: - -`netinfo.enabled`:: (Optional) Default false. Include IP addresses and MAC addresses as fields observer.ip and observer.mac - -`cache.ttl`:: (Optional) The processor uses an internal cache for the observer metadata. This sets the cache expiration time. The default is 5m, negative values disable caching altogether. - -`geo.name`:: (Optional) User definable token to be used for identifying a discrete location. Frequently a datacenter, rack, or similar. - -`geo.location`:: (Optional) Longitude and latitude in comma separated format. - -`geo.continent_name`:: (Optional) Name of the continent. - -`geo.country_name`:: (Optional) Name of the country. - -`geo.region_name`:: (Optional) Name of the region. - -`geo.city_name`:: (Optional) Name of the city. - -`geo.country_iso_code`:: (Optional) ISO country code. - -`geo.region_iso_code`:: (Optional) ISO region code. - - -The `add_geo_metadata` processor annotates each event with relevant metadata from the observer machine. -The fields added to the event look like the following: - -[source,json] -------------------------------------------------------------------------------- -{ - "observer" : { - "hostname" : "avce", - "type" : "heartbeat", - "vendor" : "elastic", - "ip" : [ - "192.168.1.251", - "fe80::64b2:c3ff:fe5b:b974", - ], - "mac" : [ - "dc:c1:02:6f:1b:ed", - ], - "geo": { - "continent_name": "North America", - "country_iso_code": "US", - "region_name": "New York", - "region_iso_code": "NY", - "city_name": "New York", - "name": "nyc-dc1-rack1", - "location": "40.7128, -74.0060" - } - } -} -------------------------------------------------------------------------------- - -[[dissect]] -=== Dissect strings - -The dissect processor tokenizes incoming strings using defined patterns. - -[source,yaml] -------- -processors: -- dissect: - tokenizer: "%{key1} %{key2}" - field: "message" - target_prefix: "dissect" -------- - -The `dissect` processor has the following configuration settings: - -`field`:: (Optional) The event field to tokenize. Default is `message`. - -`target_prefix`:: (Optional) The name of the field where the values will be extracted. When an empty -string is defined, the processor will create the keys at the root of the event. Default is -`dissect`. When the target key already exists in the event, the processor won't replace it and log -an error; you need to either drop or rename the key before using dissect. - -For tokenization to be successful, all keys must be found and extracted, if one of them cannot be -found an error will be logged and no modification is done on the original event. - -NOTE: A key can contain any characters except reserved suffix or prefix modifiers: `/`,`&`, `+` -and `?`. - -See <> for a list of supported conditions. - -[[processor-dns]] -=== DNS Reverse Lookup - -The DNS processor performs reverse DNS lookups of IP addresses. It caches the -responses that it receives in accordance to the time-to-live (TTL) value -contained in the response. It also caches failures that occur during lookups. -Each instance of this processor maintains its own independent cache. - -The processor uses its own DNS resolver to send requests to nameservers and does -not use the operating system's resolver. It does not read any values contained -in `/etc/hosts`. - -This processor can significantly slow down your pipeline's throughput if you -have a high latency network or slow upstream nameserver. The cache will help -with performance, but if the addresses being resolved have a high cardinality -then the cache benefits will be diminished due to the high miss ratio. - -By way of example, if each DNS lookup takes 2 milliseconds, the maximum -throughput you can achieve is 500 events per second (1000 milliseconds / 2 -milliseconds). If you have a high cache hit ratio then your throughput can be -higher. - -This is a minimal configuration example that resolves the IP addresses contained -in two fields. - -[source,yaml] ----- -processors: -- dns: - type: reverse - fields: - source.ip: source.hostname - destination.ip: destination.hostname ----- - -Next is a configuration example showing all options. - -[source,yaml] ----- -processors: -- dns: - type: reverse - action: append - fields: - server.ip: server.hostname - client.ip: client.hostname - success_cache: - capacity.initial: 1000 - capacity.max: 10000 - failure_cache: - capacity.initial: 1000 - capacity.max: 10000 - ttl: 1m - nameservers: ['192.0.2.1', '203.0.113.1'] - timeout: 500ms - tag_on_failure: [_dns_reverse_lookup_failed] ----- - -The `dns` processor has the following configuration settings: - -`type`:: The type of DNS lookup to perform. The only supported type is -`reverse` which queries for a PTR record. - -`action`:: This defines the behavior of the processor when the target field -already exists in the event. The options are `append` (default) and `replace`. - -`fields`:: This is a mapping of source field names to target field names. The -value of the source field will be used in the DNS query and result will be -written to the target field. - -`success_cache.capacity.initial`:: The initial number of items that the success -cache will be allocated to hold. When initialized the processor will allocate -the memory for this number of items. Default value is `1000`. - -`success_cache.capacity.max`:: The maximum number of items that the success -cache can hold. When the maximum capacity is reached a random item is evicted. -Default value is `10000`. - -`failure_cache.capacity.initial`:: The initial number of items that the failure -cache will be allocated to hold. When initialized the processor will allocate -the memory for this number of items. Default value is `1000`. - -`failure_cache.capacity.max`:: The maximum number of items that the failure -cache can hold. When the maximum capacity is reached a random item is evicted. -Default value is `10000`. - -`failure_cache.ttl`:: The duration for which failures are cached. Valid time -units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Default value is `1m`. - -`nameservers`:: A list of nameservers to query. If there are multiple servers, -the resolver queries them in the order listed. If none are specified then it -will read the nameservers listed in `/etc/resolv.conf` once at initialization. -On Windows you must always supply at least one nameserver. - -`timeout`:: The duration after which a DNS query will timeout. This is timeout -for each DNS request so if you have 2 nameservers then the total timeout will be -2 times this value. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", -"h". Default value is `500ms`. - -`tag_on_failure`:: A list of tags to add to the event when any lookup fails. The -tags are only added once even if multiple lookups fail. By default no tags are -added upon failure. - -[[add-process-metadata]] -=== Add process metadata - -The Add process metadata processor enriches events with information from running -processes, identified by their process ID (PID). - -[source,yaml] -------------------------------------------------------------------------------- -processors: -- add_process_metadata: - match_pids: [system.process.ppid] - target: system.process.parent -------------------------------------------------------------------------------- - -The fields added to the event look as follows: -[source,json] -------------------------------------------------------------------------------- -"process": { - "name": "systemd", - "title": "/usr/lib/systemd/systemd --switched-root --system --deserialize 22", - "exe": "/usr/lib/systemd/systemd", - "args": ["/usr/lib/systemd/systemd", "--switched-root", "--system", "--deserialize", "22"], - "pid": 1, - "ppid": 0, - "start_time": "2018-08-22T08:44:50.684Z", -} -------------------------------------------------------------------------------- - -Optionally, the process environment can be included, too: -[source,json] -------------------------------------------------------------------------------- - ... - "env": { - "HOME": "/", - "TERM": "linux", - "BOOT_IMAGE": "/boot/vmlinuz-4.11.8-300.fc26.x86_64", - "LANG": "en_US.UTF-8", - } - ... -------------------------------------------------------------------------------- -It has the following settings: - -`match_pids`:: List of fields to lookup for a PID. The processor will -search the list sequentially until the field is found in the current event, and -the PID lookup will be applied to the value of this field. - -`target`:: (Optional) Destination prefix where the `process` object will be -created. The default is the event's root. - -`include_fields`:: (Optional) List of fields to add. By default, the processor -will add all the available fields except `process.env`. - -`ignore_missing`:: (Optional) When set to `false`, events that don't contain any -of the fields in match_pids will be discarded and an error will be generated. By -default, this condition is ignored. - -`overwrite_keys`:: (Optional) By default, if a target field already exists, it -will not be overwritten and an error will be logged. If `overwrite_keys` is -set to `true`, this condition will be ignored. - -`restricted_fields`:: (Optional) By default, the `process.env` field is not -output, to avoid leaking sensitive data. If `restricted_fields` is `true`, the -field will be present in the output. - -ifdef::has_script_processor[] -[[processor-script]] -=== Script Processor - -experimental[] - -The script processor executes Javascript code to process an event. The processor -uses a pure Go implementation of ECMAScript 5.1 and has no external -dependencies. This can be useful in situations where one of the other processors -doesn't provide the functionality you need to filter events. - -The processor can be configured by embedding Javascript in your configuration -file or by pointing the processor at external file(s). - -[source,yaml] ----- -processors: -- script: - lang: javascript - id: my_filter - source: > - function process(event) { - event.Tag("js"); - } ----- - -This loads `filter.js` from disk. - -[source,yaml] ----- -processors: -- script: - lang: javascript - id: my_filter - file: ${path.config}/filter.js ----- - -Parameters can be passed to the script by adding `params` to the config. -This allows for a script to be made reusable. When using `params` the -code must define a `register(params)` function to receive the parameters. - -[source,yaml] ----- -processors: -- script: - lang: javascript - id: my_filter - params: - threshold: 15 - source: > - var params = {threshold: 42}; - function register(scriptParams) { - params = scriptParams; - } - function process(event) { - if (event.Get("severity") < params.threshold) { - event.Cancel(); - } - } ----- - -If the script defines a `test()` function it will be invoked when the processor -is loaded. Any exceptions thrown will cause the processor to fail to load. This -can be used to make assertions about the behavior of the script. - -[source,javascript] ----- -function process(event) { - if (event.Get("event.code") === 1102) { - event.Put("event.action", "cleared"); - } -} - -function test() { - var event = process(new Event({event: {code: 1102})); - if (event.Get("event.action") !== "cleared") { - throw "expected event.action === cleared"; - } -} ----- - -[float] -==== Configuration options - -The `script` processor has the following configuration settings: - -`lang`:: This field is required and its value must be `javascript`. - -`tag`:: This is an optional identifier that is added to log messages. If defined -it enables metrics logging for this instance of the processor. The metrics -include the number of exceptions and a histogram of the execution times for -the `process` function. - -`source`:: Inline Javascript source code. - -`file`:: Path to a script file to load. Relative paths are interpreted as -relative to the `path.config` directory. Globs are expanded. - -`files`:: List of script files to load. The scripts are concatenated together. -Relative paths are interpreted as relative to the `path.config` directory. -And globs are expanded. - -`params`:: A dictionary of parameters that are passed to the `register` of the -script. - -`tag_on_exception`:: Tag to add to events in case the Javascript code causes an -exception while processing an event. Defaults to `_js_exception`. - -`timeout`:: This sets an execution timeout for the `process` function. When -the `process` function takes longer than the `timeout` period the function -is interrupted. You can set this option to prevent a script from running for -too long (like preventing an infinite `while` loop). By default there is no -timeout. - -[float] -==== Event API - -The `Event` object passed to the `process` method has the following API. - -[frame="topbot",options="header"] -|=== -|Method |Description - -|`Get(string)` -|Get a value from the event (either a scalar or an object). If the key does not -exist `null` is returned. If no key is provided then an object containing all -fields is returned. - -*Example*: `var value = event.Get(key);` - -|`Put(string, value)` -|Put a value into the event. If the key was already set then the -previous value is returned. It throws an exception if the key cannot be set -because one of the intermediate values is not an object. - -*Example*: `var old = event.Put(key, value);` - -|`Rename(string, string)` -|Rename a key in the event. The target key must not exist. It -returns true if the source key was successfully renamed to the target key. - -*Example*: `var success = event.Rename("source", "target");` - -|`Delete(string)` -|Delete a field from the event. It returns true on success. - -*Example*: `var deleted = event.Delete("user.email");` - -|`Cancel()` -|Flag the event as cancelled which causes the processor to drop -event. - -*Example*: `event.Cancel(); return;` - -|`Tag(string)` -|Append a tag to the `tags` field if the tag does not already -exist. Throws an exception if `tags` exists and is not a string or a list of -strings. - -*Example*: `event.Tag("user_event");` - -|`AppendTo(string, string)` -|`AppendTo` is a specialized `Put` method that converts the existing value to an -array and appends the value if it does not already exist. If there is an -existing value that's not a string or array of strings then an exception is -thrown. - -*Example*: `event.AppendTo("error.message", "invalid file hash");` -|=== -endif::[] - -ifdef::has_timestamp_processor[] -[[processor-timestamp]] -=== Timestamp - -beta[] - -The `timestamp` processor parses a timestamp from a field. By default the -timestamp processor writes the parsed result to the `@timestamp` field. You can -specify a different field by setting the `target_field` parameter. The timestamp -value is parsed according to the `layouts` parameter. Multiple layouts can be -specified and they will be used sequentially to attempt parsing the timestamp -field. - -NOTE: The timestamp layouts used by this processor are different than the - formats supported by date processors in Logstash and Elasticsearch Ingest - Node. - -The `layouts` are described using a reference time that is based on this -specific time: - - Mon Jan 2 15:04:05 MST 2006 - -Since MST is GMT-0700, the reference time is: - - 01/02 03:04:05PM '06 -0700 - -To define your own layout, rewrite the reference time in a format that matches -the timestamps you expect to parse. For more layout examples and details see the -https://godoc.org/time#pkg-constants[Go time package documentation]. - -If a layout does not contain a year then the current year in the specified -`timezone` is added to the time value. - -.Timestamp options -[options="header"] -|====== -| Name | Required | Default | Description | -| `field` | yes | | Source field containing the time to be parsed. | -| `target_field` | no | @timestamp | Target field for the parsed time value. The target value is always written as UTC. | -| `layouts` | yes | | Timestamp layouts that define the expected time value format. In addition layouts, `UNIX` and `UNIX_MS` are accepted. | -| `timezone` | no | UTC | Timezone (e.g. America/New_York) to use when parsing a timestamp not containing a timezone. | -| `ignore_missing` | no | false | Ignore errors when the source field is missing. | -| `ignore_failure` | no | false | Ignore all errors produced by the processor. | -| `test` | no | | A list of timestamps that must parse successfully when loading the processor. | -| `id` | no | | An identifier for this processor instance. Useful for debugging. | -|====== - -Here is an example that parses the `start_time` field and writes the result -to the `@timestamp` field then deletes the `start_time` field. When the -processor is loaded it will immediately validate that the two `test` timestamps -parse with this configuration. - -[source,yaml] ----- -processors: -- timestamp: - field: start_time - layouts: - - '2006-01-02T15:04:05Z' - - '2006-01-02T15:04:05.999Z' - test: - - '2019-06-22T16:33:51Z' - - '2019-11-18T04:59:51.123Z' -- drop_fields: - fields: [start_time] ----- -endif::[] +include::processors-list.asciidoc[tag=processors-include] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/queueconfig.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/queueconfig.asciidoc index 8531ece9..af53cb0e 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/queueconfig.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/queueconfig.asciidoc @@ -29,7 +29,7 @@ If no flush interval and no number of events to flush is configured, all events published to this queue will be directly consumed by the outputs. To enforce spooling in the queue, set the `flush.min_events` and `flush.timeout` options. -By default `flush.min.events` is set to 2048 and `flush.timeout` is set to 1s. +By default `flush.min_events` is set to 2048 and `flush.timeout` is set to 1s. The output's `bulk_max_size` setting limits the number of events being processed at once. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/breaking-7.0.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/breaking-7.0.asciidoc similarity index 97% rename from vendor/github.com/elastic/beats/libbeat/docs/breaking-7.0.asciidoc rename to vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/breaking-7.0.asciidoc index 128e7449..443c4d60 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/breaking-7.0.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/breaking-7.0.asciidoc @@ -1,6 +1,9 @@ [[breaking-changes-7.0]] === Breaking changes in 7.0 +++++ +7.0 +++++ This section discusses the main changes that you need to be aware of to migrate Beats to version 7.0. {see-relnotes} diff --git a/vendor/github.com/elastic/beats/libbeat/docs/breaking-7.1.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/breaking-7.1.asciidoc similarity index 85% rename from vendor/github.com/elastic/beats/libbeat/docs/breaking-7.1.asciidoc rename to vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/breaking-7.1.asciidoc index da164cee..5bc58d33 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/breaking-7.1.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/breaking-7.1.asciidoc @@ -1,7 +1,9 @@ [[breaking-changes-7.1]] === Breaking changes in 7.1 - +++++ +7.1 +++++ {see-relnotes} //NOTE: The notable-breaking-changes tagged regions are re-used in the diff --git a/vendor/github.com/elastic/beats/libbeat/docs/breaking-7.2.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/breaking-7.2.asciidoc similarity index 85% rename from vendor/github.com/elastic/beats/libbeat/docs/breaking-7.2.asciidoc rename to vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/breaking-7.2.asciidoc index 13c8fdf1..620e7020 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/breaking-7.2.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/breaking-7.2.asciidoc @@ -1,6 +1,9 @@ [[breaking-changes-7.2]] === Breaking changes in 7.2 +++++ +7.2 +++++ {see-relnotes} diff --git a/vendor/github.com/elastic/beats/libbeat/docs/breaking-7.3.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/breaking-7.3.asciidoc similarity index 85% rename from vendor/github.com/elastic/beats/libbeat/docs/breaking-7.3.asciidoc rename to vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/breaking-7.3.asciidoc index ae4cbbb0..2d1d4a84 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/breaking-7.3.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/breaking-7.3.asciidoc @@ -1,6 +1,9 @@ [[breaking-changes-7.3]] === Breaking changes in 7.3 +++++ +7.3 +++++ {see-relnotes} diff --git a/vendor/github.com/elastic/beats/libbeat/docs/breaking-7.4.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/breaking-7.4.asciidoc similarity index 96% rename from vendor/github.com/elastic/beats/libbeat/docs/breaking-7.4.asciidoc rename to vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/breaking-7.4.asciidoc index 11e2d612..544ce437 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/breaking-7.4.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/breaking-7.4.asciidoc @@ -1,6 +1,9 @@ [[breaking-changes-7.4]] === Breaking changes in 7.4 +++++ +7.4 +++++ This section discusses the main changes that you need to be aware of to migrate Beats to version 7.4. {see-relnotes} diff --git a/vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/breaking-7.5.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/breaking-7.5.asciidoc new file mode 100644 index 00000000..04bd85b2 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/breaking-7.5.asciidoc @@ -0,0 +1,15 @@ +[[breaking-changes-7.5]] + +=== Breaking changes in 7.5 +++++ +7.5 +++++ + +{see-relnotes} + +//NOTE: The notable-breaking-changes tagged regions are re-used in the +//Installation and Upgrade Guide + +//tag::notable-breaking-changes[] + +// end::notable-breaking-changes[] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/breaking-8.0.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/breaking-8.0.asciidoc similarity index 89% rename from vendor/github.com/elastic/beats/libbeat/docs/breaking-8.0.asciidoc rename to vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/breaking-8.0.asciidoc index 8493288f..2134a405 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/breaking-8.0.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/breaking-8.0.asciidoc @@ -1,6 +1,9 @@ [[breaking-changes-8.0]] === Breaking changes in 8.0 +++++ +8.0 +++++ This section discusses the main changes that you should be aware of if you upgrade the Beats to version 8.0. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/breaking.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/breaking.asciidoc similarity index 93% rename from vendor/github.com/elastic/beats/libbeat/docs/breaking.asciidoc rename to vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/breaking.asciidoc index 08926dd0..d3f4aa40 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/breaking.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/breaking.asciidoc @@ -11,6 +11,8 @@ changes, but there are breaking changes between major versions (e.g. 6.x to See the following topics for a description of breaking changes: +* <> + * <> * <> @@ -21,6 +23,8 @@ See the following topics for a description of breaking changes: * <> +include::breaking-7.5.asciidoc[] + include::breaking-7.4.asciidoc[] include::breaking-7.3.asciidoc[] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/field-name-changes.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/field-name-changes.asciidoc similarity index 100% rename from vendor/github.com/elastic/beats/libbeat/docs/field-name-changes.asciidoc rename to vendor/github.com/elastic/beats/libbeat/docs/release-notes/breaking/field-name-changes.asciidoc diff --git a/vendor/github.com/elastic/beats/libbeat/docs/highlights-7.0.0.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/highlights/highlights-7.0.0.asciidoc similarity index 91% rename from vendor/github.com/elastic/beats/libbeat/docs/highlights-7.0.0.asciidoc rename to vendor/github.com/elastic/beats/libbeat/docs/release-notes/highlights/highlights-7.0.0.asciidoc index 7d6095e3..2eadf156 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/highlights-7.0.0.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/highlights/highlights-7.0.0.asciidoc @@ -1,14 +1,17 @@ [[release-highlights-7.0.0]] -=== 7.0.0 release highlights +=== 7.0 release highlights ++++ -7.0.0 +7.0 ++++ Each release of {beats} brings new features and product improvements. -Here are the highlights of the new features and enhancements in 7.0.0. +Following are the most notable features and enhancements in 7.0. -Refer to the {beats} <> and <> for a list of bug fixes and other changes. +For a complete list of highlights, see the +https://www.elastic.co/blog/beats-7-0-0-released[{beats} 7.0 release blog]. + +For a list of bug fixes and other changes, see the {beats} +<> and <>. //NOTE: The notable-highlights tagged regions are re-used in the //Installation and Upgrade Guide diff --git a/vendor/github.com/elastic/beats/libbeat/docs/highlights-7.1.0.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/highlights/highlights-7.1.0.asciidoc similarity index 64% rename from vendor/github.com/elastic/beats/libbeat/docs/highlights-7.1.0.asciidoc rename to vendor/github.com/elastic/beats/libbeat/docs/release-notes/highlights/highlights-7.1.0.asciidoc index c45b3783..3ce62777 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/highlights-7.1.0.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/highlights/highlights-7.1.0.asciidoc @@ -1,11 +1,14 @@ [[release-highlights-7.1.0]] -=== 7.1.0 release highlights +=== 7.1 release highlights ++++ -7.1.0 +7.1 ++++ Each release of {beats} brings new features and product improvements. -Here are the highlights of the new features and enhancements in 7.1.0. +Following are the most notable features and enhancements in 7.1. + +For a list of bug fixes and other changes, see the {beats} +<> and <>. //NOTE: The notable-highlights tagged regions are re-used in the //Installation and Upgrade Guide @@ -15,8 +18,3 @@ Some Elastic Stack security features, such as encrypted communications, file and native authentication, and role-based access control, are now available in more subscription levels. For details, see https://www.elastic.co/subscriptions. // end::notable-highlights[] - -Refer to the {beats} <> and <> for a list of bug fixes and other changes. - - diff --git a/vendor/github.com/elastic/beats/libbeat/docs/highlights-7.2.0.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/highlights/highlights-7.2.0.asciidoc similarity index 88% rename from vendor/github.com/elastic/beats/libbeat/docs/highlights-7.2.0.asciidoc rename to vendor/github.com/elastic/beats/libbeat/docs/release-notes/highlights/highlights-7.2.0.asciidoc index b849b9e7..42ca5ab7 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/highlights-7.2.0.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/highlights/highlights-7.2.0.asciidoc @@ -1,14 +1,17 @@ [[release-highlights-7.2.0]] -=== 7.2.0 release highlights +=== 7.2 release highlights ++++ -7.2.0 +7.2 ++++ Each release of {beats} brings new features and product improvements. -Here are the highlights of the new features and enhancements in 7.2.0. +Following are the most notable features and enhancements in 7.2. -Refer to the {beats} <> and <> for a list of bug fixes and other changes. +For a complete list of highlights, see the +https://www.elastic.co/blog/beats-7-2-0-released[{beats} 7.2 release blog]. + +For a list of bug fixes and other changes, see the {beats} +<> and <>. //NOTE: The notable-highlights tagged regions are re-used in the //Installation and Upgrade Guide diff --git a/vendor/github.com/elastic/beats/libbeat/docs/highlights-7.3.0.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/highlights/highlights-7.3.0.asciidoc similarity index 92% rename from vendor/github.com/elastic/beats/libbeat/docs/highlights-7.3.0.asciidoc rename to vendor/github.com/elastic/beats/libbeat/docs/release-notes/highlights/highlights-7.3.0.asciidoc index 74f303e0..d37d5832 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/highlights-7.3.0.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/highlights/highlights-7.3.0.asciidoc @@ -1,14 +1,17 @@ [[release-highlights-7.3.0]] -=== 7.3.0 release highlights +=== 7.3 release highlights ++++ -7.3.0 +7.3 ++++ Each release of {beats} brings new features and product improvements. -Here are the highlights of the new features and enhancements in 7.3.0. +Following are the most notable features and enhancements in 7.3. -Refer to the {beats} <> and -<> for a list of bug fixes and other changes. +For a complete list of highlights, see the +https://www.elastic.co/blog/beats-7-3-0-released[{beats} 7.3 release blog]. + +For a list of bug fixes and other changes, see the {beats} +<> and <>. //NOTE: The notable-highlights tagged regions are re-used in the //Installation and Upgrade Guide diff --git a/vendor/github.com/elastic/beats/libbeat/docs/highlights-7.4.0.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/highlights/highlights-7.4.0.asciidoc similarity index 79% rename from vendor/github.com/elastic/beats/libbeat/docs/highlights-7.4.0.asciidoc rename to vendor/github.com/elastic/beats/libbeat/docs/release-notes/highlights/highlights-7.4.0.asciidoc index 30509152..bd46278c 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/highlights-7.4.0.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/highlights/highlights-7.4.0.asciidoc @@ -1,18 +1,17 @@ [[release-highlights-7.4.0]] -=== 7.4.0 release highlights +=== 7.4 release highlights ++++ -7.4.0 +7.4 ++++ Each release of {beats} brings new features and product improvements. -Here are the highlights of the new features and enhancements in 7.4.0. +Following are the most notable features and enhancements in 7.4. -Refer to the {beats} <> and -<> for a list of bug fixes and other changes. +For a complete list of highlights, see the +https://www.elastic.co/blog/beats-7-4-0-released[{beats} 7.4 release blog]. -Also read the -https://www.elastic.co/blog/beats-7-4-0-released[Beats release blog] for a full -description of new features. +For a list of bug fixes and other changes, see the {beats} +<> and <>. //NOTE: The notable-highlights tagged regions are re-used in the //Installation and Upgrade Guide diff --git a/vendor/github.com/elastic/beats/libbeat/docs/release-notes/highlights/highlights-7.5.0.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/highlights/highlights-7.5.0.asciidoc new file mode 100644 index 00000000..bc800536 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/highlights/highlights-7.5.0.asciidoc @@ -0,0 +1,38 @@ +[[release-highlights-7.5.0]] +=== 7.5 release highlights +++++ +7.5 +++++ + +Each release of {beats} brings new features and product improvements. +Following are the most notable features and enhancements in 7.5. + +For a complete list of highlights, see the +https://www.elastic.co/blog/beats-7-5-0-released[{beats} 7.5 release blog]. + +For a list of bug fixes and other changes, see the {beats} +<> and <>. + +//NOTE: The notable-highlights tagged regions are re-used in the +//Installation and Upgrade Guide + +// tag::notable-highlights[] + +[float] +==== New modules for Azure cloud monitoring + +With the addition of {metricbeat-ref}/metricbeat-module-azure.html[{metricbeat}] +and {filebeat-ref}/filebeat-module-azure.html[{filebeat}] modules for Azure +monitoring, Azure users can now directly monitor logs and metrics from Azure +Event Hub and Azure Monitor and use prebuilt {kib} dashboards to speed up the +analysis. + + +[float] +==== {heartbeat} enhanced with hint-based autodiscover + +In 7.5, we’ve enhanced {heartbeat}, as part of our Uptime solution, to include +{heartbeat-ref}/[hint-based autodiscover], which is a particularly great fit +for monitoring the health of Kubernetes services. + +// end::notable-highlights[] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/release-notes/highlights/highlights-8.0.0.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/highlights/highlights-8.0.0.asciidoc new file mode 100644 index 00000000..91941572 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/highlights/highlights-8.0.0.asciidoc @@ -0,0 +1,32 @@ +[[release-highlights-8.0.0]] +=== 8.0 release highlights +++++ +8.0 +++++ + +Each release of {beats} brings new features and product improvements. +Following are the most notable features and enhancements in 8.0. + +//For a complete list of highlights, see the +//https://www.elastic.co/blog/beats-8-0-0-released[{beats} 8.0 release blog]. + +For a list of bug fixes and other changes, see the {beats} +<> and <>. + +//NOTE: The notable-highlights tagged regions are re-used in the +//Installation and Upgrade Guide + +// tag::notable-highlights[] +// ADD NOTABLE HIGHLIGHTS HERE + +//[float] +//==== Add title here + +//Add description here. + +//[float] +//==== Add title here + +//Add description here. + +// end::notable-highlights[] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/highlights.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/highlights/highlights.asciidoc similarity index 88% rename from vendor/github.com/elastic/beats/libbeat/docs/highlights.asciidoc rename to vendor/github.com/elastic/beats/libbeat/docs/release-notes/highlights/highlights.asciidoc index 27a6a4a1..b77111f0 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/highlights.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/release-notes/highlights/highlights.asciidoc @@ -4,6 +4,8 @@ This section summarizes the most important changes in each release. For the full list, see <> and <>. +* <> + * <> * <> @@ -14,6 +16,8 @@ full list, see <> and <>. * <> +include::highlights-7.5.0.asciidoc[] + include::highlights-7.4.0.asciidoc[] include::highlights-7.3.0.asciidoc[] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/release.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/release.asciidoc index 59db0b53..19d71051 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/release.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/release.asciidoc @@ -8,6 +8,8 @@ This section summarizes the changes in each release. Also read <> for more detail about changes that affect upgrade. +* <> +* <> * <> * <> * <> diff --git a/vendor/github.com/elastic/beats/libbeat/docs/repositories.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/repositories.asciidoc index 17cae1ae..a7104414 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/repositories.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/repositories.asciidoc @@ -27,7 +27,7 @@ to sign all our packages. It is available from https://pgp.mit.edu. ifeval::["{release-state}"=="unreleased"] -Version {stack-version} of {repo} has not yet been released. +Version {version} of {repo} has not yet been released. endif::[] @@ -120,6 +120,13 @@ sudo apt-get update && sudo apt-get install {beatname_pkg} + ["source","sh",subs="attributes"] -------------------------------------------------- +sudo systemctl enable {beatname_pkg} +-------------------------------------------------- + +If your system does not use `systemd` then run: ++ +["source","sh",subs="attributes"] +-------------------------------------------------- sudo update-rc.d {beatname_pkg} defaults 95 10 -------------------------------------------------- @@ -130,7 +137,7 @@ endif::[] ifeval::["{release-state}"=="unreleased"] -Version {stack-version} of {repo} has not yet been released. +Version {version} of {repo} has not yet been released. endif::[] @@ -215,6 +222,13 @@ sudo yum install {beatname_pkg} + ["source","sh",subs="attributes"] -------------------------------------------------- +sudo systemctl enable {beatname_pkg} +-------------------------------------------------- + +If your system does not use `systemd` then run: ++ +["source","sh",subs="attributes"] +-------------------------------------------------- sudo chkconfig --add {beatname_pkg} -------------------------------------------------- diff --git a/vendor/github.com/elastic/beats/libbeat/docs/security/users.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/security/users.asciidoc index 8f212ae3..77634fdf 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/security/users.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/security/users.asciidoc @@ -42,27 +42,32 @@ the following privileges: + [options="header"] |==== -|Privileges | Why needed? +|Type | Privilege | Purpose +|Cluster |`monitor` -|Send monitoring data to the cluster +|Retrieve cluster details (e.g. version) ifndef::no_ilm[] +|Cluster |`manage_ilm` |Set up and manage index lifecycle management (ILM) policy endif::no_ilm[] ifdef::has_ml_jobs[] +|Cluster |`manage_ml` -|Set up machine learning job configurations +|Set up Machine Learning job configurations endif::has_ml_jobs[] +|Index |`manage` on +{beat_default_index_prefix}-*+ indices |Set up aliases used by ILM ifdef::has_ml_jobs[] +|Index |`read` on +{beat_default_index_prefix}-*+ indices -|Read {beatname_uc} indices in order to set up machine learning jobs +|Read {beatname_uc} indices in order to set up Machine Learning jobs endif::has_ml_jobs[] |==== + @@ -77,7 +82,7 @@ need to set up {beatname_uc}: + [options="header"] |==== -|Roles | Why needed? +|Role | Purpose |`kibana_user` |Load dependencies, such as example dashboards, if available, into {kib} @@ -113,58 +118,61 @@ on {ecloud}. To send monitoring data securely, create a monitoring user and grant it the roles described in the following sections. ==== -===== Internal collection - -For <>, {security} provides -the +{beat_monitoring_user}+ {ref}/built-in-users.html[built-in -user] and +{beat_monitoring_user}+ -{ref}/built-in-roles.html[built-in role] for sending monitoring information. You -can use the built-in user, if it's available in your environment, or create a -user who has the privileges needed to send monitoring information. - +* If you're using <> to +collect metrics about {beatname_uc}, {security} provides +the +{beat_monitoring_user}+ {ref}/built-in-users.html[built-in user] and ++{beat_monitoring_user}+ {ref}/built-in-roles.html[built-in role] to send +monitoring information. You can use the built-in user, if it's available in your +environment, or create a user who has the privileges needed to send monitoring +information. ++ If you use the +{beat_monitoring_user}+ user, make sure you set the password. - ++ If you don't use the +{beat_monitoring_user}+ user: - ++ +-- . Create a *monitoring role*, called something like +{beat_default_index_prefix}_monitoring+, that has the following privileges: + [options="header"] |==== -|Privileges | Why needed? +|Type | Privilege | Purpose +|Cluster |`monitor` -|Send monitoring info - -|`kibana_user` -|Use {kib} +|Retrieve cluster details (e.g. version) |==== -. Assign the *monitoring role*, along with the following built-in role, to +. Assign the *monitoring role*, along with the following built-in roles, to users who need to monitor {beatname_uc}: + [options="header"] |==== -|Role | Why needed? +|Role | Purpose + +|`kibana_user` +|Use {kib} + |`monitoring_user` |Use *Stack Monitoring* in {kib} to monitor {beatname_uc} |==== +-- ifndef::serverless[] -===== {metricbeat} collection - -For <>, {security} -provides the `remote_monitoring_user` {ref}/built-in-users.html[built-in -user], and the `remote_monitoring_collector` and `remote_monitoring_agent` -{ref}/built-in-roles.html[built-in roles] for collecting and sending -monitoring information. You can use the built-in user, if it's available in your -environment, or create a user who has the privileges needed to collect and send -monitoring information. +* If you're <> to collect +metrics about {beatname_uc}, {security} provides the `remote_monitoring_user` +{ref}/built-in-users.html[built-in user], and the `remote_monitoring_collector` +and `remote_monitoring_agent` {ref}/built-in-roles.html[built-in roles] for +collecting and sending monitoring information. You can use the built-in user, if +it's available in your environment, or create a user who has the privileges +needed to collect and send monitoring information. ++ If you use the `remote_monitoring_user` user, make sure you set the password. - ++ If you don't use the `remote_monitoring_user` user: - ++ +-- . Create a user on the production cluster who will collect and send monitoring information. @@ -172,92 +180,98 @@ information. + [options="header"] |==== -|Role | Why needed? +|Role | Purpose + |`remote_monitoring_collector` |Collect monitoring metrics from {beatname_uc} + |`remote_monitoring_agent` |Send monitoring data to the monitoring cluster |==== . Assign the following role to users who will view the monitoring data in {kib}: - ++ [options="header"] |==== -|Role | Why needed? +|Role | Purpose + |`monitoring_user` |Use *Stack Monitoring* in {kib} to monitor {beatname_uc} |==== +-- endif::serverless[] [[privileges-to-publish-events]] ==== Grant privileges and roles needed for publishing -Users who publish events to {es} need to create and read from {beatname_uc} +Users who publish events to {es} need to create and write to {beatname_uc} indices. To minimize the privileges required by the writer role, you can use the -<> to pre-load dependencies. Then turn off -setup options in the {beatname_uc} config file before running {beatname_uc} to -publish events. For example: +<> to pre-load dependencies. ifndef::no_ilm[] -[source,yaml] ----- -setup.template.enabled: false -setup.ilm.check_exists: false -setup.ilm.overwrite: false <1> ----- -<1> Omit `ilm.check_exists` and `ilm.overwrite` if ILM is disabled. -endif::no_ilm[] +When using ILM, turn off the ILM setup check in the {beatname_uc} config file before +running {beatname_uc} to publish events: -ifdef::no_ilm[] [source,yaml] ---- -setup.template.enabled: false +setup.ilm.check_exists: false ---- endif::no_ilm[] To grant the required privileges: -. Create a *writer role*, called something like +{beat_default_index_prefix}_writer+, that has -the following privileges (this list assumes the setup options shown earlier are -set to `false`): +. Create a *writer role*, called something like +{beat_default_index_prefix}_writer+, +that has the following privileges: ++ +NOTE: The `monitor` cluster privilege and the `create_doc` privilege on ++{beat_default_index_prefix}-*+ indices are required in every configuration. + [options="header"] |==== -|Privileges | Why needed? +|Type | Privilege | Purpose ifndef::apm-server[] +|Cluster |`monitor` -|Send monitoring info +|Retrieve cluster details (e.g. version) endif::apm-server[] ifndef::no_ilm[] +|Cluster |`read_ilm` -|Read the ILM policy when connecting to clusters that support ILM +| Read the ILM policy when connecting to clusters that support ILM. +Not needed when `setup.ilm.check_exists` is `false`. endif::no_ilm[] ifeval::["{beatname_lc}"=="filebeat"] -|`manage_pipeline` -|Load ingest pipelines used by modules +|Cluster +|`cluster:admin/ingest/pipeline/get` +|Check for ingest pipelines used by modules. Needed when using modules. endif::[] +|Index +|`create_doc` on +{beat_default_index_prefix}-*+ indices +|Write events into {es} + ifndef::no_ilm[] +|Index |`view_index_metadata` on +{beat_default_index_prefix}-*+ indices -|Check for alias when connecting to clusters that support ILM +|Check for alias when connecting to clusters that support ILM. +Not needed when `setup.ilm.check_exists` is `false`. endif::no_ilm[] -|`index` on +{beat_default_index_prefix}-*+ indices -|Index events into {es} - +|Index |`create_index` on +{beat_default_index_prefix}-*+ indices -|Create daily indices when connecting to clusters that do not support ILM +|Create daily indices when connecting to clusters that do not support ILM. +Not needed when using ILM. |==== ifndef::apm-server[] + Omit any privileges that aren't relevant in your environment. endif::apm-server[] -. Assign the *writer role* to users who will index events into {es}. +. Assign the *writer role* to users who will index events into {es}. [[kibana-user-privileges]] ==== Grant privileges and roles needed to read {beatname_uc} data @@ -278,8 +292,9 @@ the following privilege: + [options="header"] |==== -|Privilege | Why needed? +|Type | Privilege | Purpose +|Index |`read` on +{beat_default_index_prefix}-*+ indices |Read data indexed by {beatname_uc} |==== @@ -289,7 +304,7 @@ users who need to read {beatname_uc} data: + [options="header"] |==== -|Roles | Why needed? +|Role | Purpose |`kibana_user` or `kibana_dashboard_only_user` |Use {kib}. `kibana_dashboard_only_user` grants read-only access to dashboards. @@ -310,10 +325,11 @@ data: + [options="header"] |==== -|Roles | Why needed? +|Role | Purpose |`kibana_user` and `apm_user` |Use the APM UI + |`admin` |Read and update APM Agent configuration via Kibana |==== diff --git a/vendor/github.com/elastic/beats/libbeat/docs/shared-autodiscover.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/shared-autodiscover.asciidoc index 1d91914b..c7396865 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/shared-autodiscover.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/shared-autodiscover.asciidoc @@ -179,14 +179,15 @@ contain variables from the autodiscover event. They can be accessed under data n The `kubernetes` autodiscover provider has the following configuration settings: -`host`:: (Optional) Identify the node where {beatname_lc} is running in case it +`host`:: (Optional) Specify the node to scope {beatname_lc} to in case it cannot be accurately detected, as when running {beatname_lc} in host network mode. `namespace`:: (Optional) Select the namespace from which to collect the metadata. If it is not set, the processor collects metadata from all namespaces. It is unset by default. `kube_config`:: (Optional) Use given config file as configuration for Kubernetes - client. + client. If kube_config is not set, KUBECONFIG environment variable will be + checked and if not present it will fall back to InCluster. include::../../{beatname_lc}/docs/autodiscover-kubernetes-config.asciidoc[] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/shared-beats-attributes.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/shared-beats-attributes.asciidoc index 370994dd..8a5a9f86 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/shared-beats-attributes.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/shared-beats-attributes.asciidoc @@ -1,8 +1,11 @@ :beatsdevguide: http://www.elastic.co/guide/en/beats/devguide/{branch} -:dashboards: https://artifacts.elastic.co/downloads/beats/beats-dashboards/beats-dashboards-{stack-version}.zip +:dashboards: https://artifacts.elastic.co/downloads/beats/beats-dashboards/beats-dashboards-{version}.zip :dockerimage: docker.elastic.co/beats/{beatname_lc}:{version} :dockerconfig: https://raw.githubusercontent.com/elastic/beats/{branch}/deploy/docker/{beatname_lc}.docker.yml :downloads: https://artifacts.elastic.co/downloads/beats +:libbeat-processors-dir: {beats-root}/libbeat/processors +:libbeat-outputs-dir: {beats-root}/libbeat/outputs +:x-filebeat-processors-dir: {beats-root}/x-pack/filebeat/processors :cm-ui: Central Management :libbeat-docs: Beats Platform Reference diff --git a/vendor/github.com/elastic/beats/libbeat/docs/shared-brew-install.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/shared-brew-install.asciidoc index 7aed0371..0b11bb85 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/shared-brew-install.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/shared-brew-install.asciidoc @@ -3,7 +3,7 @@ ifeval::["{release-state}"=="unreleased"] -Version {stack-version} of {beatname_uc} has not yet been released. +Version {version} of {beatname_uc} has not yet been released. endif::[] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/shared-central-management.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/shared-central-management.asciidoc index de597ea2..1fb9fc0e 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/shared-central-management.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/shared-central-management.asciidoc @@ -5,7 +5,7 @@ [partintro] -- -beta[] +include::{asciidoc-dir}/../../shared/discontinued.asciidoc[tag=cm-discontinued] [WARNING] ======================================= @@ -38,7 +38,7 @@ include::shared-license-statement.asciidoc[] [role="xpack"] == How central management works -beta[] +include::{asciidoc-dir}/../../shared/discontinued.asciidoc[tag=cm-discontinued] {beats} central management uses a mechanism called configuration tags to group related configurations. You define configuration tags in the {cm-ui} UI in {kib} @@ -105,7 +105,7 @@ the Beat to troubleshoot the problem. [role="xpack"] == Enroll {beats} in central management -beta[] +include::{asciidoc-dir}/../../shared/discontinued.asciidoc[tag=cm-discontinued] You need to enroll {beats} to register them in <> and establish diff --git a/vendor/github.com/elastic/beats/libbeat/docs/shared-cm-tip.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/shared-cm-tip.asciidoc deleted file mode 100644 index 4e3a67f3..00000000 --- a/vendor/github.com/elastic/beats/libbeat/docs/shared-cm-tip.asciidoc +++ /dev/null @@ -1,3 +0,0 @@ -TIP: Starting with {beatname_uc} 6.5, you can define and manage {beatname_uc} -configurations in a central location in {kib}. For more information, see -<>. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/shared-docker.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/shared-docker.asciidoc index b8643dc2..0d34a6d3 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/shared-docker.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/shared-docker.asciidoc @@ -21,7 +21,7 @@ against the Elastic Docker registry. ifeval::["{release-state}"=="unreleased"] -However, version {stack-version} of {beatname_uc} has not yet been +However, version {version} of {beatname_uc} has not yet been released, so no Docker image is currently available for this version. endif::[] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/shared-ilm.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/shared-ilm.asciidoc index 8150e8ba..12069381 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/shared-ilm.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/shared-ilm.asciidoc @@ -51,7 +51,7 @@ required license; otherwise, {beatname_uc} creates daily indices. ==== `setup.ilm.rollover_alias` The index lifecycle write alias name. The default is -+{beatname_lc}-%{{beat_version_key}}+. Setting this option changes the alias name. ++{beatname_lc}-%{[{beat_version_key}]}+. Setting this option changes the alias name. NOTE: If you modify this setting after loading the index template, you must overwrite the template to apply the changes. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/shared-template-load.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/shared-template-load.asciidoc index ef8bd7df..51ab1936 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/shared-template-load.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/shared-template-load.asciidoc @@ -304,7 +304,7 @@ endif::win_only[] ["source","sh",subs="attributes"] ---- -PS > .{backslash}{beatname_lc}.exe export template --es.version {stack-version} | Out-File -Encoding UTF8 {beatname_lc}.template.json +PS > .{backslash}{beatname_lc}.exe export template --es.version {version} | Out-File -Encoding UTF8 {beatname_lc}.template.json ---- endif::win_os[] @@ -315,7 +315,7 @@ ifdef::deb_os,rpm_os[] ["source","sh",subs="attributes"] ---- -curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_template/{beatname_lc}-{stack-version} -d@{beatname_lc}.template.json +curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_template/{beatname_lc}-{version} -d@{beatname_lc}.template.json ---- endif::deb_os,rpm_os[] @@ -324,7 +324,7 @@ ifdef::mac_os[] ["source","sh",subs="attributes"] ---- -curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_template/{beatname_lc}-{stack-version} -d@{beatname_lc}.template.json +curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_template/{beatname_lc}-{version} -d@{beatname_lc}.template.json ---- endif::mac_os[] @@ -333,7 +333,7 @@ ifdef::linux_os[] ["source","sh",subs="attributes"] ---- -curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_template/{beatname_lc}-{stack-version} -d@{beatname_lc}.template.json +curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_template/{beatname_lc}-{version} -d@{beatname_lc}.template.json ---- endif::linux_os[] @@ -344,6 +344,6 @@ endif::win_only[] ["source","sh",subs="attributes"] ---- -PS > Invoke-RestMethod -Method Put -ContentType "application/json" -InFile {beatname_lc}.template.json -Uri http://localhost:9200/_template/{beatname_lc}-{stack-version} +PS > Invoke-RestMethod -Method Put -ContentType "application/json" -InFile {beatname_lc}.template.json -Uri http://localhost:9200/_template/{beatname_lc}-{version} ---- endif::win_os[] diff --git a/vendor/github.com/elastic/beats/libbeat/docs/template-config.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/template-config.asciidoc index b93ad315..9889997c 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/template-config.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/template-config.asciidoc @@ -69,11 +69,6 @@ setup.template.settings: index.number_of_replicas: 1 ---------------------------------------------------------------------- -NOTE: If you want to use {ref}/xpack-ccr.html[{ccr}] to replicate {beatname_uc} -indices to another cluster, you will need to add additional template settings to -{ref}/ccr-requirements.html#ccr-overview-beats[enable soft deletes] on the -underlying indices. - *`setup.template.settings._source`*:: A dictionary of settings for the `_source` field. For the available settings, please see the Elasticsearch {ref}/mapping-source-field.html[reference]. + diff --git a/vendor/github.com/elastic/beats/libbeat/docs/upgrading.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/upgrading.asciidoc index 77b9e252..ea3fb430 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/upgrading.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/upgrading.asciidoc @@ -32,8 +32,8 @@ If you're upgrading other products in the stack, also read the {stack-ref}/index.html[Elastic Stack Installation and Upgrade Guide]. We recommend that you fully upgrade {es} and {kib} to version 7.0 -before upgrading {beats}. If you're on {beats} 6.0 through 6.6, -upgrade the {stack} and {beats} to version 6.7 *before* proceeding with the +before upgrading {beats}. If you're on {beats} 6.0 through 6.7, +upgrade the {stack} and {beats} to version 6.8 *before* proceeding with the 7.0 upgrade. Upgrading between non-consecutive major versions (e.g. 5.x to 7.x) is not @@ -42,19 +42,19 @@ supported. IMPORTANT: Please read through all upgrade steps before proceeding. These steps are required before running the software for the first time. -[[upgrading-to-6.7]] -==== Upgrade to {beats} 6.7 before upgrading to 7.0 +[[upgrading-to-6.8]] +==== Upgrade to {beats} 6.8 before upgrading to 7.0 -The upgrade procedure assumes that you have {beats} 6.7 installed. If you're on -a previous 6.x version of {beats}, upgrade to version 6.7 first. If you're using +The upgrade procedure assumes that you have {beats} 6.8 installed. If you're on +a previous 6.x version of {beats}, upgrade to version 6.8 first. If you're using other products in the {stack}, upgrade {beats} as part of the {stack-ref}/upgrading-elastic-stack.html[{stack} upgrade process]. -Upgrading to 6.7 is required because the {es} index template was modified to +Upgrading to 6.8 is required because the {es} index template was modified to be compatible with {es} 7.0 (the `_type` setting changed from `doc` to `_doc`). -After upgrading to 6.7, use the {ref}/indices-templates.html#getting[Index -Template API] to verify that the 6.7 index template has been created in {ES}. +After upgrading to 6.8, use the {ref}/indices-templates.html#getting[Index +Template API] to verify that the 6.8 index template has been created in {ES}. :asset: the index template :option: template @@ -65,7 +65,7 @@ NOTE: In previous versions, we advised users to manually force loading of the index template. This is no longer recommended. Use the `setup` command instead. *Metricbeat and Filebeat users:* If you use {beats} central management, -make sure you migrate the {beats} central management index to 6.7 before you +make sure you migrate the {beats} central management index to 6.8 before you upgrade to 7.0. Although central management is not a GA-level feature in 7.0, we've provided a migration tool to help you migrate your configurations from version 6.6 to 6.7 or later. For more information, see the @@ -180,14 +180,14 @@ That's OK. See the clean-up steps described under <>. ==== Upgrade the {es} index template Index templates and the default index names are versioned. For example, -{metricbeat} {stack-version} typically creates indices like this: +{metricbeat} {version} typically creates indices like this: ["source","sh",subs="attributes"] ------------------------------------------------------------------------------ -metricbeat-{stack-version}-2019.04.02 +metricbeat-{version}-2019.04.02 ------------------------------------------------------------------------------ -And the corresponding {es} index template is named +metricbeat-{stack-version}+. +And the corresponding {es} index template is named +metricbeat-{version}+. This means that each version of the Beat creates a new index, and it's guaranteed that the correct index template for that version is applied. With @@ -195,7 +195,7 @@ these changes in place, you generally don't have to do anything to upgrade the index template when you move to a new version. Just load the new version of the index template *before* ingesting any data into {es}. -If you plan to run {beats} 6.7 and 7.0 in parallel, make sure you +If you plan to run {beats} 6.7 or higher and 7.0 in parallel, make sure you <> *before* you load the index template. @@ -237,7 +237,7 @@ allow {beats} to load and manage the index template, as described under IMPORTANT: The index name must match the index pattern in the {beats} index template. For example, if {ls} sends events to an index called +metricbeat-7-2019.04.02+, but the index template expects indices to match -+metricbeat-{stack-version}-*+, you may encounter mapping errors and be unable ++metricbeat-{version}-*+, you may encounter mapping errors and be unable to index {beats} events. ==== Upgrade dashboards @@ -310,12 +310,12 @@ up and start again: + ["source","sh",subs="attributes"] ---- -DELETE metricbeat-{stack-version}-2019.04.02* +DELETE metricbeat-{version}-2019.04.02* ---- + WARNING: Be careful when using wildcards to delete indices. Make sure the pattern matches only the indices you want to delete. The example shown here -deletes all data indexed into the metricbeat-{stack-version} indices on +deletes all data indexed into the metricbeat-{version} indices on 2019.04.02. . If you want the index to work with 6.x dashboards, turn on the compatibility @@ -325,7 +325,7 @@ layer. See <>. + ["source","sh",subs="attributes"] ---- -DELETE /_template/metricbeat-{stack-version} +DELETE /_template/metricbeat-{version} ---- + Because the index template was loaded without the compatibility layer enabled, diff --git a/vendor/github.com/elastic/beats/libbeat/docs/version.asciidoc b/vendor/github.com/elastic/beats/libbeat/docs/version.asciidoc index fdbecbf4..ee2abca6 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/version.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/docs/version.asciidoc @@ -1,10 +1,7 @@ -:stack-version: 7.4.1 -:doc-branch: 7.4 -:go-version: 1.12.9 +:stack-version: 7.5.1 +:doc-branch: 7.5 +:go-version: 1.12.12 :release-state: released :python: 2.7.9 :docker: 1.12 :docker-compose: 1.11 -:branch: 7.4 -:major-version: 7.x -:ecs_version: 1.1 diff --git a/vendor/github.com/elastic/beats/libbeat/idxmgmt/ilm/ilm.go b/vendor/github.com/elastic/beats/libbeat/idxmgmt/ilm/ilm.go index 07c92421..84ca92c4 100644 --- a/vendor/github.com/elastic/beats/libbeat/idxmgmt/ilm/ilm.go +++ b/vendor/github.com/elastic/beats/libbeat/idxmgmt/ilm/ilm.go @@ -142,23 +142,9 @@ func NoopSupport(_ *logp.Logger, info beat.Info, config *common.Config) (Support } func applyStaticFmtstr(info beat.Info, fmt *fmtstr.EventFormatString) (string, error) { - return fmt.Run(&beat.Event{ - Fields: common.MapStr{ - // beat object was left in for backward compatibility reason for older configs. - "beat": common.MapStr{ - "name": info.Beat, - "version": info.Version, - }, - "agent": common.MapStr{ - "name": info.Beat, - "version": info.Version, - }, - // For the Beats that have an observer role - "observer": common.MapStr{ - "name": info.Beat, - "version": info.Version, - }, - }, - Timestamp: time.Now(), - }) + return fmt.Run( + &beat.Event{ + Fields: fmtstr.FieldsForBeat(info.Beat, info.Version), + Timestamp: time.Now(), + }) } diff --git a/vendor/github.com/elastic/beats/libbeat/idxmgmt/ilm/std.go b/vendor/github.com/elastic/beats/libbeat/idxmgmt/ilm/std.go index fb057cba..b4f0c41b 100644 --- a/vendor/github.com/elastic/beats/libbeat/idxmgmt/ilm/std.go +++ b/vendor/github.com/elastic/beats/libbeat/idxmgmt/ilm/std.go @@ -102,6 +102,10 @@ func (m *stdManager) Enabled() (bool, error) { } func (m *stdManager) EnsureAlias() error { + if !m.checkExists { + return nil + } + b, err := m.client.HasAlias(m.alias.Name) if err != nil { return err diff --git a/vendor/github.com/elastic/beats/libbeat/idxmgmt/std.go b/vendor/github.com/elastic/beats/libbeat/idxmgmt/std.go index 13d1a201..f6951d72 100644 --- a/vendor/github.com/elastic/beats/libbeat/idxmgmt/std.go +++ b/vendor/github.com/elastic/beats/libbeat/idxmgmt/std.go @@ -54,12 +54,16 @@ type indexManager struct { assets Asseter } -type indexSelector outil.Selector +type indexSelector struct { + sel outil.Selector + beatInfo beat.Info +} type ilmIndexSelector struct { - index outil.Selector - alias outil.Selector - st *indexState + index outil.Selector + alias outil.Selector + st *indexState + beatInfo beat.Info } type componentType uint8 @@ -201,7 +205,7 @@ func (s *indexSupport) BuildSelector(cfg *common.Config) (outputs.IndexSelector, } if mode != ilm.ModeAuto { - return indexSelector(indexSel), nil + return indexSelector{indexSel, s.info}, nil } selCfg.SetString("index", -1, alias) @@ -321,7 +325,7 @@ func (m *indexManager) setupWithILM() (bool, error) { } func (s *ilmIndexSelector) Select(evt *beat.Event) (string, error) { - if idx := getEventCustomIndex(evt); idx != "" { + if idx := getEventCustomIndex(evt, s.beatInfo); idx != "" { return idx, nil } @@ -335,13 +339,13 @@ func (s *ilmIndexSelector) Select(evt *beat.Event) (string, error) { } func (s indexSelector) Select(evt *beat.Event) (string, error) { - if idx := getEventCustomIndex(evt); idx != "" { + if idx := getEventCustomIndex(evt, s.beatInfo); idx != "" { return idx, nil } - return outil.Selector(s).Select(evt) + return s.sel.Select(evt) } -func getEventCustomIndex(evt *beat.Event) string { +func getEventCustomIndex(evt *beat.Event, beatInfo beat.Info) string { if len(evt.Meta) == 0 { return "" } @@ -360,6 +364,16 @@ func getEventCustomIndex(evt *beat.Event) string { } } + // This is functionally identical to Meta["alias"], returning the overriding + // metadata as the index name if present. It is currently used by Filebeat + // to send the index for particular inputs to formatted string templates, + // which are then expanded by a processor to the "raw_index" field. + if tmp := evt.Meta["raw_index"]; tmp != nil { + if idx, ok := tmp.(string); ok { + return idx + } + } + return "" } diff --git a/vendor/github.com/elastic/beats/libbeat/kibana/client.go b/vendor/github.com/elastic/beats/libbeat/kibana/client.go index 30082807..a690180d 100644 --- a/vendor/github.com/elastic/beats/libbeat/kibana/client.go +++ b/vendor/github.com/elastic/beats/libbeat/kibana/client.go @@ -194,9 +194,6 @@ func (conn *Connection) Send(method, extraPath string, req.Header.Set("Content-Type", "application/json") req.Header.Add("Accept", "application/json") req.Header.Set("kbn-xsrf", "1") - if method != "GET" { - req.Header.Set("kbn-version", conn.Version.String()) - } for header, values := range headers { for _, value := range values { diff --git a/vendor/github.com/elastic/beats/libbeat/metric/system/memory/memory.go b/vendor/github.com/elastic/beats/libbeat/metric/system/memory/memory.go index 585c80d5..d2833d4b 100644 --- a/vendor/github.com/elastic/beats/libbeat/metric/system/memory/memory.go +++ b/vendor/github.com/elastic/beats/libbeat/metric/system/memory/memory.go @@ -20,8 +20,12 @@ package memory import ( + "github.com/pkg/errors" + "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" + sysinfo "github.com/elastic/go-sysinfo" + sysinfotypes "github.com/elastic/go-sysinfo/types" sigar "github.com/elastic/gosigar" ) @@ -148,3 +152,20 @@ func AddHugeTLBPagesPercentage(s *HugeTLBPagesStat) { perc := float64(s.Total-s.Free+s.Reserved) / float64(s.Total) s.UsedPercent = common.Round(perc, common.DefaultDecimalPlacesCount) } + +// GetVMStat gets linux vmstat metrics +func GetVMStat() (*sysinfotypes.VMStatInfo, error) { + h, err := sysinfo.Host() + if err != nil { + return nil, errors.Wrap(err, "failed to read self process information") + } + if vmstatHandle, ok := h.(sysinfotypes.VMStat); ok { + info, err := vmstatHandle.VMStat() + if err != nil { + return nil, errors.Wrap(err, "error getting VMStat info") + } + return info, nil + } + return nil, nil + +} diff --git a/vendor/github.com/elastic/beats/libbeat/monitoring/monitoring.go b/vendor/github.com/elastic/beats/libbeat/monitoring/monitoring.go index e7f13fb6..ffd21cb1 100644 --- a/vendor/github.com/elastic/beats/libbeat/monitoring/monitoring.go +++ b/vendor/github.com/elastic/beats/libbeat/monitoring/monitoring.go @@ -97,14 +97,40 @@ func SelectConfig(beatCfg BeatConfig) (*common.Config, *report.Settings, error) return monitoringCfg, &report.Settings{Format: report.FormatXPackMonitoringBulk}, nil case beatCfg.Monitoring.Enabled(): monitoringCfg := beatCfg.Monitoring - var info struct { - ClusterUUID string `config:"cluster_uuid"` - } - if err := monitoringCfg.Unpack(&info); err != nil { - return nil, nil, err - } - return monitoringCfg, &report.Settings{Format: report.FormatBulk, ClusterUUID: info.ClusterUUID}, nil + return monitoringCfg, &report.Settings{Format: report.FormatBulk}, nil default: return nil, nil, nil } } + +// GetClusterUUID returns the value of the monitoring.cluster_uuid setting, if it is set. +func GetClusterUUID(monitoringCfg *common.Config) (string, error) { + if monitoringCfg == nil { + return "", nil + } + + var config struct { + ClusterUUID string `config:"cluster_uuid"` + } + if err := monitoringCfg.Unpack(&config); err != nil { + return "", err + } + + return config.ClusterUUID, nil +} + +// IsEnabled returns whether the monitoring reporter is enabled or not. +func IsEnabled(monitoringCfg *common.Config) bool { + if monitoringCfg == nil { + return false + } + + // If the only setting in the monitoring config is cluster_uuid, it is + // not enabled + fields := monitoringCfg.GetFields() + if len(fields) == 1 && fields[0] == "cluster_uuid" { + return false + } + + return monitoringCfg.Enabled() +} diff --git a/vendor/github.com/elastic/beats/libbeat/monitoring/report/elasticsearch/client.go b/vendor/github.com/elastic/beats/libbeat/monitoring/report/elasticsearch/client.go index 3169c988..6f47c6b6 100644 --- a/vendor/github.com/elastic/beats/libbeat/monitoring/report/elasticsearch/client.go +++ b/vendor/github.com/elastic/beats/libbeat/monitoring/report/elasticsearch/client.go @@ -20,6 +20,7 @@ package elasticsearch import ( "encoding/json" "fmt" + "net/http" "time" "github.com/pkg/errors" @@ -32,6 +33,8 @@ import ( "github.com/elastic/beats/libbeat/testing" ) +var createDocPrivAvailableESVersion = common.MustNewVersion("7.5.0") + type publishClient struct { es *esout.Client params map[string]string @@ -185,13 +188,19 @@ func (c *publishClient) publishBulk(event publisher.Event, typ string) error { "_routing": nil, } - if c.es.GetVersion().Major < 7 { + esVersion := c.es.GetVersion() + if esVersion.Major < 7 { meta["_type"] = "doc" } - action := common.MapStr{ - "index": meta, + action := common.MapStr{} + var opType string + if esVersion.LessThan(createDocPrivAvailableESVersion) { + opType = "index" + } else { + opType = "create" } + action[opType] = meta event.Content.Fields.Put("timestamp", event.Content.Timestamp) @@ -221,7 +230,12 @@ func (c *publishClient) publishBulk(event publisher.Event, typ string) error { // Currently one request per event is sent. Reason is that each event can contain different // interval params and X-Pack requires to send the interval param. // FIXME: index name (first param below) - _, err = c.es.BulkWith(getMonitoringIndexName(), "", nil, nil, bulk[:]) + result, err := c.es.BulkWith(getMonitoringIndexName(), "", nil, nil, bulk[:]) + if err != nil { + return err + } + + logBulkFailures(result, []report.Event{document}) return err } @@ -230,3 +244,26 @@ func getMonitoringIndexName() string { date := time.Now().Format("2006.01.02") return fmt.Sprintf(".monitoring-beats-%v-%s", version, date) } + +func logBulkFailures(result esout.BulkResult, events []report.Event) { + reader := esout.NewJSONReader(result) + err := esout.BulkReadToItems(reader) + if err != nil { + logp.Err("failed to parse monitoring bulk items: %v", err) + return + } + + for i := range events { + status, msg, err := esout.BulkReadItemStatus(reader) + if err != nil { + logp.Err("failed to parse monitoring bulk item status: %v", err) + return + } + switch { + case status < 300, status == http.StatusConflict: + continue + default: + logp.Warn("monitoring bulk item insert failed (i=%v, status=%v): %s", i, status, msg) + } + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/docs/outputs/change-output-codec.asciidoc b/vendor/github.com/elastic/beats/libbeat/outputs/codec/docs/codec.asciidoc similarity index 100% rename from vendor/github.com/elastic/beats/libbeat/docs/outputs/change-output-codec.asciidoc rename to vendor/github.com/elastic/beats/libbeat/outputs/codec/docs/codec.asciidoc diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/codec/docs/placeholder.asciidoc b/vendor/github.com/elastic/beats/libbeat/outputs/codec/docs/placeholder.asciidoc new file mode 100644 index 00000000..073164e4 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/outputs/codec/docs/placeholder.asciidoc @@ -0,0 +1,2 @@ +Placeholder file to support adding a new resource to the conf.yaml in the docs +repo. diff --git a/vendor/github.com/elastic/beats/libbeat/docs/outputs/output-console.asciidoc b/vendor/github.com/elastic/beats/libbeat/outputs/console/docs/console.asciidoc similarity index 100% rename from vendor/github.com/elastic/beats/libbeat/docs/outputs/output-console.asciidoc rename to vendor/github.com/elastic/beats/libbeat/outputs/console/docs/console.asciidoc diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/api_integration_test.go b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/api_integration_test.go index 31903cf6..44c46365 100644 --- a/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/api_integration_test.go +++ b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/api_integration_test.go @@ -149,7 +149,7 @@ func TestIngest(t *testing.T) { } // get _source field from indexed document - _, docBody, err := client.apiCall("GET", index, "test", "1/_source", "", nil, nil) + _, docBody, err := client.apiCall("GET", index, "", "_source/1", "", nil, nil) if err != nil { t.Fatal(err) } diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/bulkapi.go b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/bulkapi.go index f013c2a1..48c17462 100644 --- a/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/bulkapi.go +++ b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/bulkapi.go @@ -19,6 +19,7 @@ package elasticsearch import ( "bytes" + "encoding/json" "io" "io/ioutil" "net/http" @@ -34,16 +35,15 @@ type bulkRequest struct { requ *http.Request } -type bulkResult struct { - raw []byte -} +// BulkResult contains the result of a bulk API request. +type BulkResult json.RawMessage // Bulk performs many index/delete operations in a single API call. // Implements: http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html func (conn *Connection) Bulk( index, docType string, params map[string]string, body []interface{}, -) (*QueryResult, error) { +) (BulkResult, error) { return conn.BulkWith(index, docType, params, nil, body) } @@ -56,7 +56,7 @@ func (conn *Connection) BulkWith( params map[string]string, metaBuilder MetaBuilder, body []interface{}, -) (*QueryResult, error) { +) (BulkResult, error) { if len(body) == 0 { return nil, nil } @@ -76,7 +76,7 @@ func (conn *Connection) BulkWith( if err != nil { return nil, err } - return readQueryResult(result.raw) + return result, nil } // SendMonitoringBulk creates a HTTP request to the X-Pack Monitoring API containing a bunch of @@ -85,7 +85,7 @@ func (conn *Connection) BulkWith( func (conn *Connection) SendMonitoringBulk( params map[string]string, body []interface{}, -) (*QueryResult, error) { +) (BulkResult, error) { if len(body) == 0 { return nil, nil } @@ -111,7 +111,7 @@ func (conn *Connection) SendMonitoringBulk( if err != nil { return nil, err } - return readQueryResult(result.raw) + return result, nil } func newBulkRequest( @@ -199,18 +199,9 @@ func (r *bulkRequest) Reset(body bodyEncoder) { body.AddHeader(&r.requ.Header) } -func (conn *Connection) sendBulkRequest(requ *bulkRequest) (int, bulkResult, error) { +func (conn *Connection) sendBulkRequest(requ *bulkRequest) (int, BulkResult, error) { status, resp, err := conn.execHTTPRequest(requ.requ) - if err != nil { - return status, bulkResult{}, err - } - - result, err := readBulkResult(resp) - return status, result, err -} - -func readBulkResult(obj []byte) (bulkResult, error) { - return bulkResult{obj}, nil + return status, BulkResult(resp), err } func bulkEncode(out bulkWriter, metaBuilder MetaBuilder, body []interface{}) error { diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/bulkapi_mock_test.go b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/bulkapi_mock_test.go index 2d0efa2f..ca927db6 100644 --- a/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/bulkapi_mock_test.go +++ b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/bulkapi_mock_test.go @@ -20,7 +20,6 @@ package elasticsearch import ( - "encoding/json" "fmt" "net/http" "os" @@ -34,7 +33,7 @@ func TestOneHostSuccessResp_Bulk(t *testing.T) { logp.TestingSetup(logp.WithSelectors("elasticsearch")) index := fmt.Sprintf("packetbeat-unittest-%d", os.Getpid()) - expectedResp, _ := json.Marshal(QueryResult{Ok: true, Index: index, Type: "type1", ID: "1", Version: 1, Created: true}) + expectedResp := []byte(`{"took":7,"errors":false,"items":[]}`) ops := []map[string]interface{}{ { @@ -61,13 +60,10 @@ func TestOneHostSuccessResp_Bulk(t *testing.T) { params := map[string]string{ "refresh": "true", } - resp, err := client.Bulk(index, "type1", params, body) + _, err := client.Bulk(index, "type1", params, body) if err != nil { t.Errorf("Bulk() returns error: %s", err) } - if !resp.Created { - t.Errorf("Bulk() fails: %s", resp) - } } func TestOneHost500Resp_Bulk(t *testing.T) { diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/client.go b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/client.go index 421e9a2d..b8a74f97 100644 --- a/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/client.go +++ b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/client.go @@ -52,7 +52,7 @@ type Client struct { bulkRequ *bulkRequest // buffered json response reader - json jsonReader + json JSONReader // additional configs compressionLevel int @@ -125,6 +125,7 @@ var ( ) var ( + errExpectedItemsArray = errors.New("expected items array") errExpectedItemObject = errors.New("expected item response object") errExpectedStatusCode = errors.New("expected item status code") errUnexpectedEmptyObject = errors.New("empty object") @@ -327,7 +328,7 @@ func (client *Client) publishEvents( } origCount := len(data) - data = bulkEncodePublishRequest(body, client.index, client.pipeline, eventType, data) + data = bulkEncodePublishRequest(client.GetVersion(), body, client.index, client.pipeline, eventType, data) newCount := len(data) if st != nil && origCount > newCount { st.Dropped(origCount - newCount) @@ -355,7 +356,7 @@ func (client *Client) publishEvents( failedEvents = data stats.fails = len(failedEvents) } else { - client.json.init(result.raw) + client.json.init(result) failedEvents, stats = bulkCollectPublishFails(&client.json, data) } @@ -384,6 +385,7 @@ func (client *Client) publishEvents( // fillBulkRequest encodes all bulk requests and returns slice of events // successfully added to bulk request. func bulkEncodePublishRequest( + version common.Version, body bulkWriter, index outputs.IndexSelector, pipeline *outil.Selector, @@ -393,7 +395,7 @@ func bulkEncodePublishRequest( okEvents := data[:0] for i := range data { event := &data[i].Content - meta, err := createEventBulkMeta(index, pipeline, eventType, event) + meta, err := createEventBulkMeta(version, index, pipeline, eventType, event) if err != nil { logp.Err("Failed to encode event meta data: %s", err) continue @@ -409,6 +411,7 @@ func bulkEncodePublishRequest( } func createEventBulkMeta( + version common.Version, indexSel outputs.IndexSelector, pipelineSel *outil.Selector, eventType string, @@ -444,7 +447,7 @@ func createEventBulkMeta( ID: id, } - if id != "" { + if id != "" || version.Major > 7 || (version.Major == 7 && version.Minor >= 5) { return bulkCreateAction{meta}, nil } return bulkIndexAction{meta}, nil @@ -471,38 +474,11 @@ func getPipeline(event *beat.Event, pipelineSel *outil.Selector) (string, error) // event failed due to some error in the event itself (e.g. does not respect mapping), // the event will be dropped. func bulkCollectPublishFails( - reader *jsonReader, + reader *JSONReader, data []publisher.Event, ) ([]publisher.Event, bulkResultStats) { - if err := reader.expectDict(); err != nil { - logp.Err("Failed to parse bulk response: expected JSON object") - return nil, bulkResultStats{} - } - - // find 'items' field in response - for { - kind, name, err := reader.nextFieldName() - if err != nil { - logp.Err("Failed to parse bulk response") - return nil, bulkResultStats{} - } - - if kind == dictEnd { - logp.Err("Failed to parse bulk response: no 'items' field in response") - return nil, bulkResultStats{} - } - - // found items array -> continue - if bytes.Equal(name, nameItems) { - break - } - - reader.ignoreNext() - } - - // check items field is an array - if err := reader.expectArray(); err != nil { - logp.Err("Failed to parse bulk response: expected items array") + if err := BulkReadToItems(reader); err != nil { + logp.Err("failed to parse bulk response: %v", err.Error()) return nil, bulkResultStats{} } @@ -510,7 +486,7 @@ func bulkCollectPublishFails( failed := data[:0] stats := bulkResultStats{} for i := 0; i < count; i++ { - status, msg, err := itemStatus(reader) + status, msg, err := BulkReadItemStatus(reader) if err != nil { return nil, bulkResultStats{} } @@ -546,9 +522,43 @@ func bulkCollectPublishFails( return failed, stats } -func itemStatus(reader *jsonReader) (int, []byte, error) { +// BulkReadToItems reads the bulk response up to (but not including) items +func BulkReadToItems(reader *JSONReader) error { + if err := reader.ExpectDict(); err != nil { + return errExpectedObject + } + + // find 'items' field in response + for { + kind, name, err := reader.nextFieldName() + if err != nil { + return err + } + + if kind == dictEnd { + return errExpectedItemsArray + } + + // found items array -> continue + if bytes.Equal(name, nameItems) { + break + } + + reader.ignoreNext() + } + + // check items field is an array + if err := reader.ExpectArray(); err != nil { + return errExpectedItemsArray + } + + return nil +} + +// BulkReadItemStatus reads the status and error fields from the bulk item +func BulkReadItemStatus(reader *JSONReader) (int, []byte, error) { // skip outer dictionary - if err := reader.expectDict(); err != nil { + if err := reader.ExpectDict(); err != nil { return 0, nil, errExpectedItemObject } @@ -586,8 +596,8 @@ func itemStatus(reader *jsonReader) (int, []byte, error) { return status, msg, nil } -func itemStatusInner(reader *jsonReader) (int, []byte, error) { - if err := reader.expectDict(); err != nil { +func itemStatusInner(reader *JSONReader) (int, []byte, error) { + if err := reader.ExpectDict(); err != nil { return 0, nil, errExpectedItemObject } diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/client_integration_test.go b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/client_integration_test.go index fb690c23..571c1e14 100644 --- a/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/client_integration_test.go +++ b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/client_integration_test.go @@ -20,11 +20,17 @@ package elasticsearch import ( + "io/ioutil" "math/rand" + "net" + "net/http" + "net/http/httptest" + "net/url" "testing" "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/elastic/beats/libbeat/beat" "github.com/elastic/beats/libbeat/common" @@ -42,6 +48,36 @@ func TestClientConnect(t *testing.T) { assert.NoError(t, err) } +func TestClientConnectWithProxy(t *testing.T) { + wrongPort, err := net.Listen("tcp", "localhost:0") + require.NoError(t, err) + go func() { + c, err := wrongPort.Accept() + if err == nil { + // Provoke an early-EOF error on client + c.Close() + } + }() + defer wrongPort.Close() + + proxy := startTestProxy(t, internal.GetURL()) + defer proxy.Close() + + // Use connectTestEs instead of getTestingElasticsearch to make use of makeES + _, client := connectTestEs(t, map[string]interface{}{ + "hosts": "http://" + wrongPort.Addr().String(), + "timeout": 5, // seconds + }) + assert.Error(t, client.Connect(), "it should fail without proxy") + + _, client = connectTestEs(t, map[string]interface{}{ + "hosts": "http://" + wrongPort.Addr().String(), + "proxy_url": proxy.URL, + "timeout": 5, // seconds + }) + assert.NoError(t, client.Connect()) +} + func TestClientPublishEvent(t *testing.T) { index := "beat-int-pub-single-event" output, client := connectTestEs(t, map[string]interface{}{ @@ -305,3 +341,33 @@ func randomClient(grp outputs.Group) outputs.NetworkClient { client := grp.Clients[rand.Intn(L)] return client.(outputs.NetworkClient) } + +// startTestProxy starts a proxy that redirects all connections to the specified URL +func startTestProxy(t *testing.T, redirectURL string) *httptest.Server { + t.Helper() + + realURL, err := url.Parse(redirectURL) + require.NoError(t, err) + + proxy := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + req, err := http.NewRequest(r.Method, r.URL.String(), r.Body) + require.NoError(t, err) + req.URL.Scheme = realURL.Scheme + req.URL.Host = realURL.Host + req.Header = r.Header + + resp, err := http.DefaultClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + for _, header := range []string{"Content-Encoding", "Content-Type"} { + w.Header().Set(header, resp.Header.Get(header)) + } + w.WriteHeader(resp.StatusCode) + w.Write(body) + })) + return proxy +} diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/client_test.go b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/client_test.go index 27825c65..d2b8affd 100644 --- a/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/client_test.go +++ b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/client_test.go @@ -41,8 +41,8 @@ import ( ) func readStatusItem(in []byte) (int, string, error) { - reader := newJSONReader(in) - code, msg, err := itemStatus(reader) + reader := NewJSONReader(in) + code, msg, err := BulkReadItemStatus(reader) return code, string(msg), err } @@ -102,7 +102,7 @@ func TestCollectPublishFailsNone(t *testing.T) { events[i] = publisher.Event{Content: beat.Event{Fields: event}} } - reader := newJSONReader(response) + reader := NewJSONReader(response) res, _ := bulkCollectPublishFails(reader, events) assert.Equal(t, 0, len(res)) } @@ -120,7 +120,7 @@ func TestCollectPublishFailMiddle(t *testing.T) { eventFail := publisher.Event{Content: beat.Event{Fields: common.MapStr{"field": 2}}} events := []publisher.Event{event, eventFail, event} - reader := newJSONReader(response) + reader := NewJSONReader(response) res, stats := bulkCollectPublishFails(reader, events) assert.Equal(t, 1, len(res)) if len(res) == 1 { @@ -141,7 +141,7 @@ func TestCollectPublishFailAll(t *testing.T) { event := publisher.Event{Content: beat.Event{Fields: common.MapStr{"field": 2}}} events := []publisher.Event{event, event, event} - reader := newJSONReader(response) + reader := NewJSONReader(response) res, stats := bulkCollectPublishFails(reader, events) assert.Equal(t, 3, len(res)) assert.Equal(t, events, res) @@ -183,7 +183,7 @@ func TestCollectPipelinePublishFail(t *testing.T) { event := publisher.Event{Content: beat.Event{Fields: common.MapStr{"field": 2}}} events := []publisher.Event{event} - reader := newJSONReader(response) + reader := NewJSONReader(response) res, _ := bulkCollectPublishFails(reader, events) assert.Equal(t, 1, len(res)) assert.Equal(t, events, res) @@ -201,7 +201,7 @@ func BenchmarkCollectPublishFailsNone(b *testing.B) { event := publisher.Event{Content: beat.Event{Fields: common.MapStr{"field": 1}}} events := []publisher.Event{event, event, event} - reader := newJSONReader(nil) + reader := NewJSONReader(nil) for i := 0; i < b.N; i++ { reader.init(response) res, _ := bulkCollectPublishFails(reader, events) @@ -224,7 +224,7 @@ func BenchmarkCollectPublishFailMiddle(b *testing.B) { eventFail := publisher.Event{Content: beat.Event{Fields: common.MapStr{"field": 2}}} events := []publisher.Event{event, eventFail, event} - reader := newJSONReader(nil) + reader := NewJSONReader(nil) for i := 0; i < b.N; i++ { reader.init(response) res, _ := bulkCollectPublishFails(reader, events) @@ -246,7 +246,7 @@ func BenchmarkCollectPublishFailAll(b *testing.B) { event := publisher.Event{Content: beat.Event{Fields: common.MapStr{"field": 2}}} events := []publisher.Event{event, event, event} - reader := newJSONReader(nil) + reader := NewJSONReader(nil) for i := 0; i < b.N; i++ { reader.init(response) res, _ := bulkCollectPublishFails(reader, events) @@ -265,7 +265,9 @@ func TestClientWithHeaders(t *testing.T) { // For incoming requests, the Host header is promoted to the // Request.Host field and removed from the Header map. assert.Equal(t, "myhost.local", r.Host) - fmt.Fprintln(w, "Hello, client") + + bulkResponse := `{"items":[{"index":{}},{"index":{}},{"index":{}}]}` + fmt.Fprintln(w, bulkResponse) requestCount++ })) defer ts.Close() @@ -388,7 +390,7 @@ func TestBulkEncodeEvents(t *testing.T) { recorder := &testBulkRecorder{} - encoded := bulkEncodePublishRequest(recorder, index, pipeline, test.docType, events) + encoded := bulkEncodePublishRequest(common.Version{Major: 7, Minor: 5}, recorder, index, pipeline, test.docType, events) assert.Equal(t, len(events), len(encoded), "all events should have been encoded") assert.False(t, recorder.inAction, "incomplete bulk") @@ -427,3 +429,55 @@ func (r *testBulkRecorder) AddRaw(raw interface{}) error { r.inAction = !r.inAction return nil } + +func TestBulkReadToItems(t *testing.T) { + response := []byte(`{ + "errors": false, + "items": [ + {"create": {"status": 200}}, + {"create": {"status": 300}}, + {"create": {"status": 400}} + ]}`) + + reader := NewJSONReader(response) + + err := BulkReadToItems(reader) + assert.NoError(t, err) + + for status := 200; status <= 400; status += 100 { + err = reader.ExpectDict() + assert.NoError(t, err) + + kind, raw, err := reader.nextFieldName() + assert.NoError(t, err) + assert.Equal(t, mapKeyEntity, kind) + assert.Equal(t, []byte("create"), raw) + + err = reader.ExpectDict() + assert.NoError(t, err) + + kind, raw, err = reader.nextFieldName() + assert.NoError(t, err) + assert.Equal(t, mapKeyEntity, kind) + assert.Equal(t, []byte("status"), raw) + + code, err := reader.nextInt() + assert.NoError(t, err) + assert.Equal(t, status, code) + + _, _, err = reader.endDict() + assert.NoError(t, err) + + _, _, err = reader.endDict() + assert.NoError(t, err) + } +} + +func TestBulkReadItemStatus(t *testing.T) { + response := []byte(`{"create": {"status": 200}}`) + + reader := NewJSONReader(response) + code, _, err := BulkReadItemStatus(reader) + assert.NoError(t, err) + assert.Equal(t, 200, code) +} diff --git a/vendor/github.com/elastic/beats/libbeat/docs/outputs/output-elasticsearch.asciidoc b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/docs/elasticsearch.asciidoc similarity index 100% rename from vendor/github.com/elastic/beats/libbeat/docs/outputs/output-elasticsearch.asciidoc rename to vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/docs/elasticsearch.asciidoc diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/elasticsearch.go b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/elasticsearch.go index ba43e1a0..f7038f43 100644 --- a/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/elasticsearch.go +++ b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/elasticsearch.go @@ -166,7 +166,7 @@ func makeES( var proxyURL *url.URL if !config.ProxyDisable { - proxyURL, err := parseProxyURL(config.ProxyURL) + proxyURL, err = parseProxyURL(config.ProxyURL) if err != nil { return outputs.Fail(err) } @@ -290,7 +290,7 @@ func NewElasticsearchClients(cfg *common.Config) ([]Client, error) { var proxyURL *url.URL if !config.ProxyDisable { - proxyURL, err := parseProxyURL(config.ProxyURL) + proxyURL, err = parseProxyURL(config.ProxyURL) if err != nil { return nil, err } diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/json_read.go b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/json_read.go index 5b205f4c..896ec89f 100644 --- a/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/json_read.go +++ b/vendor/github.com/elastic/beats/libbeat/outputs/elasticsearch/json_read.go @@ -25,12 +25,12 @@ import ( // SAX like json parser. But instead of relying on callbacks, state machine // returns raw item plus entity. On top of state machine additional helper methods -// like expectDict, expectArray, nextFieldName and nextInt are available for +// like ExpectDict, ExpectArray, nextFieldName and nextInt are available for // low-level parsing/stepping through a json document. // // Due to parser simply stepping through the input buffer, almost no additional // allocations are required. -type jsonReader struct { +type JSONReader struct { streambuf.Buffer // parser state machine @@ -133,13 +133,14 @@ func (s state) String() string { return "unknown" } -func newJSONReader(in []byte) *jsonReader { - r := &jsonReader{} +// NewJSONReader returns a new JSONReader initialized with in +func NewJSONReader(in []byte) *JSONReader { + r := &JSONReader{} r.init(in) return r } -func (r *jsonReader) init(in []byte) { +func (r *JSONReader) init(in []byte) { r.Buffer.Init(in, true) r.currentState = startState r.states = r.statesBuf[:0] @@ -147,18 +148,18 @@ func (r *jsonReader) init(in []byte) { var whitespace = []byte(" \t\r\n") -func (r *jsonReader) skipWS() { +func (r *JSONReader) skipWS() { r.IgnoreSymbols(whitespace) } -func (r *jsonReader) pushState(next state) { +func (r *JSONReader) pushState(next state) { if r.currentState != failedState { r.states = append(r.states, r.currentState) } r.currentState = next } -func (r *jsonReader) popState() { +func (r *JSONReader) popState() { if len(r.states) == 0 { r.currentState = failedState } else { @@ -168,7 +169,8 @@ func (r *jsonReader) popState() { } } -func (r *jsonReader) expectDict() error { +// ExpectDict checks if the next entity is a json object +func (r *JSONReader) ExpectDict() error { e, _, err := r.step() if err != nil { @@ -182,7 +184,8 @@ func (r *jsonReader) expectDict() error { return nil } -func (r *jsonReader) expectArray() error { +// ExpectArray checks if the next entity is a json array +func (r *JSONReader) ExpectArray() error { e, _, err := r.step() if err != nil { return err @@ -195,7 +198,7 @@ func (r *jsonReader) expectArray() error { return nil } -func (r *jsonReader) nextFieldName() (entity, []byte, error) { +func (r *JSONReader) nextFieldName() (entity, []byte, error) { e, raw, err := r.step() if err != nil { return e, raw, err @@ -208,7 +211,7 @@ func (r *jsonReader) nextFieldName() (entity, []byte, error) { return e, raw, err } -func (r *jsonReader) nextInt() (int, error) { +func (r *JSONReader) nextInt() (int, error) { e, raw, err := r.step() if err != nil { return 0, err @@ -224,7 +227,7 @@ func (r *jsonReader) nextInt() (int, error) { } // ignore type of next element and return raw content. -func (r *jsonReader) ignoreNext() (raw []byte, err error) { +func (r *JSONReader) ignoreNext() (raw []byte, err error) { r.skipWS() snapshot := r.Snapshot() @@ -253,7 +256,7 @@ func (r *jsonReader) ignoreNext() (raw []byte, err error) { return bytes, nil } -func ignoreKind(r *jsonReader, kind entity) error { +func ignoreKind(r *JSONReader, kind entity) error { for { e, _, err := r.step() if err != nil { @@ -276,7 +279,7 @@ func ignoreKind(r *jsonReader, kind entity) error { } // step continues the JSON parser state machine until next entity has been parsed. -func (r *jsonReader) step() (entity, []byte, error) { +func (r *JSONReader) step() (entity, []byte, error) { r.skipWS() switch r.currentState { case failedState: @@ -298,11 +301,11 @@ func (r *jsonReader) step() (entity, []byte, error) { } } -func (r *jsonReader) stepFailing() (entity, []byte, error) { +func (r *JSONReader) stepFailing() (entity, []byte, error) { return failEntity, nil, r.Err() } -func (r *jsonReader) stepStart() (entity, []byte, error) { +func (r *JSONReader) stepStart() (entity, []byte, error) { c, err := r.PeekByte() if err != nil { return r.failWith(err) @@ -311,11 +314,11 @@ func (r *jsonReader) stepStart() (entity, []byte, error) { return r.tryStepPrimitive(c) } -func (r *jsonReader) stepArray() (entity, []byte, error) { +func (r *JSONReader) stepArray() (entity, []byte, error) { return r.doStepArray(true) } -func (r *jsonReader) stepArrayNext() (entity, []byte, error) { +func (r *JSONReader) stepArrayNext() (entity, []byte, error) { c, err := r.PeekByte() if err != nil { return r.failWith(errFailing) @@ -334,7 +337,7 @@ func (r *jsonReader) stepArrayNext() (entity, []byte, error) { } } -func (r *jsonReader) doStepArray(allowArrayEnd bool) (entity, []byte, error) { +func (r *JSONReader) doStepArray(allowArrayEnd bool) (entity, []byte, error) { c, err := r.PeekByte() if err != nil { return r.failWith(err) @@ -351,11 +354,11 @@ func (r *jsonReader) doStepArray(allowArrayEnd bool) (entity, []byte, error) { return r.tryStepPrimitive(c) } -func (r *jsonReader) stepDict() (entity, []byte, error) { +func (r *JSONReader) stepDict() (entity, []byte, error) { return r.doStepDict(true) } -func (r *jsonReader) doStepDict(allowEnd bool) (entity, []byte, error) { +func (r *JSONReader) doStepDict(allowEnd bool) (entity, []byte, error) { c, err := r.PeekByte() if err != nil { return r.failWith(err) @@ -375,7 +378,7 @@ func (r *jsonReader) doStepDict(allowEnd bool) (entity, []byte, error) { } } -func (r *jsonReader) stepDictValue() (entity, []byte, error) { +func (r *JSONReader) stepDictValue() (entity, []byte, error) { c, err := r.PeekByte() if err != nil { return r.failWith(err) @@ -385,7 +388,7 @@ func (r *jsonReader) stepDictValue() (entity, []byte, error) { return r.tryStepPrimitive(c) } -func (r *jsonReader) stepDictValueEnd() (entity, []byte, error) { +func (r *JSONReader) stepDictValueEnd() (entity, []byte, error) { c, err := r.PeekByte() if err != nil { return r.failWith(err) @@ -404,7 +407,7 @@ func (r *jsonReader) stepDictValueEnd() (entity, []byte, error) { } } -func (r *jsonReader) tryStepPrimitive(c byte) (entity, []byte, error) { +func (r *JSONReader) tryStepPrimitive(c byte) (entity, []byte, error) { switch c { case '{': // start dictionary return r.startDict() @@ -432,19 +435,19 @@ func (r *jsonReader) tryStepPrimitive(c byte) (entity, []byte, error) { } } -func (r *jsonReader) stepNull() (entity, []byte, error) { +func (r *JSONReader) stepNull() (entity, []byte, error) { return stepSymbol(r, nullValue, nullSymbol, errExpectedNull) } -func (r *jsonReader) stepTrue() (entity, []byte, error) { +func (r *JSONReader) stepTrue() (entity, []byte, error) { return stepSymbol(r, trueValue, trueSymbol, errExpectedTrue) } -func (r *jsonReader) stepFalse() (entity, []byte, error) { +func (r *JSONReader) stepFalse() (entity, []byte, error) { return stepSymbol(r, falseValue, falseSymbol, errExpectedFalse) } -func stepSymbol(r *jsonReader, e entity, symb []byte, fail error) (entity, []byte, error) { +func stepSymbol(r *JSONReader, e entity, symb []byte, fail error) (entity, []byte, error) { ok, err := r.MatchASCII(symb) if err != nil { return failEntity, nil, err @@ -457,7 +460,7 @@ func stepSymbol(r *jsonReader, e entity, symb []byte, fail error) (entity, []byt return e, nil, nil } -func (r *jsonReader) stepMapKey() (entity, []byte, error) { +func (r *JSONReader) stepMapKey() (entity, []byte, error) { e, key, err := r.stepString() if err != nil { return e, key, err @@ -479,7 +482,7 @@ func (r *jsonReader) stepMapKey() (entity, []byte, error) { return mapKeyEntity, key, nil } -func (r *jsonReader) stepString() (entity, []byte, error) { +func (r *JSONReader) stepString() (entity, []byte, error) { start := 1 for { idxQuote := r.IndexByteFrom(start, '"') @@ -499,36 +502,36 @@ func (r *jsonReader) stepString() (entity, []byte, error) { } } -func (r *jsonReader) startDict() (entity, []byte, error) { +func (r *JSONReader) startDict() (entity, []byte, error) { r.Advance(1) r.pushState(dictState) return dictStart, nil, nil } -func (r *jsonReader) endDict() (entity, []byte, error) { +func (r *JSONReader) endDict() (entity, []byte, error) { r.Advance(1) r.popState() return dictEnd, nil, nil } -func (r *jsonReader) startArray() (entity, []byte, error) { +func (r *JSONReader) startArray() (entity, []byte, error) { r.Advance(1) r.pushState(arrState) return arrStart, nil, nil } -func (r *jsonReader) endArray() (entity, []byte, error) { +func (r *JSONReader) endArray() (entity, []byte, error) { r.Advance(1) r.popState() return arrEnd, nil, nil } -func (r *jsonReader) failWith(err error) (entity, []byte, error) { +func (r *JSONReader) failWith(err error) (entity, []byte, error) { r.currentState = failedState return failEntity, nil, r.SetError(err) } -func (r *jsonReader) stepNumber() (entity, []byte, error) { +func (r *JSONReader) stepNumber() (entity, []byte, error) { snapshot := r.Snapshot() lenBefore := r.Len() isDouble := false diff --git a/vendor/github.com/elastic/beats/libbeat/docs/outputs/output-file.asciidoc b/vendor/github.com/elastic/beats/libbeat/outputs/fileout/docs/fileout.asciidoc similarity index 100% rename from vendor/github.com/elastic/beats/libbeat/docs/outputs/output-file.asciidoc rename to vendor/github.com/elastic/beats/libbeat/outputs/fileout/docs/fileout.asciidoc diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/kafka/client.go b/vendor/github.com/elastic/beats/libbeat/outputs/kafka/client.go index 68b98f52..cb940244 100644 --- a/vendor/github.com/elastic/beats/libbeat/outputs/kafka/client.go +++ b/vendor/github.com/elastic/beats/libbeat/outputs/kafka/client.go @@ -162,19 +162,27 @@ func (c *client) String() string { func (c *client) getEventMessage(data *publisher.Event) (*message, error) { event := &data.Content msg := &message{partition: -1, data: *data} - if event.Meta != nil { - if value, ok := event.Meta["partition"]; ok { - if partition, ok := value.(int32); ok { - msg.partition = partition - } - } - if value, ok := event.Meta["topic"]; ok { - if topic, ok := value.(string); ok { - msg.topic = topic - } + value, err := data.Cache.GetValue("partition") + if err == nil { + if logp.IsDebug(debugSelector) { + debugf("got event.Meta[\"partition\"] = %v", value) + } + if partition, ok := value.(int32); ok { + msg.partition = partition } } + + value, err = data.Cache.GetValue("topic") + if err == nil { + if logp.IsDebug(debugSelector) { + debugf("got event.Meta[\"topic\"] = %v", value) + } + if topic, ok := value.(string); ok { + msg.topic = topic + } + } + if msg.topic == "" { topic, err := c.topic.Select(event) if err != nil { @@ -184,15 +192,19 @@ func (c *client) getEventMessage(data *publisher.Event) (*message, error) { return nil, errNoTopicsSelected } msg.topic = topic - if event.Meta == nil { - event.Meta = map[string]interface{}{} + if logp.IsDebug(debugSelector) { + debugf("setting event.Meta[\"topic\"] = %v", topic) + } + if _, err := data.Cache.Put("topic", topic); err != nil { + return nil, fmt.Errorf("setting kafka topic in publisher event failed: %v", err) } - event.Meta["topic"] = topic } serializedEvent, err := c.codec.Encode(c.index, event) if err != nil { - logp.Debug("kafka", "Failed event: %v", event) + if logp.IsDebug(debugSelector) { + debugf("failed event: %v", event) + } return nil, err } @@ -242,11 +254,13 @@ func (r *msgRef) fail(msg *message, err error) { switch err { case sarama.ErrInvalidMessage: logp.Err("Kafka (topic=%v): dropping invalid message", msg.topic) + r.client.observer.Dropped(1) case sarama.ErrMessageSizeTooLarge, sarama.ErrInvalidMessageSize: logp.Err("Kafka (topic=%v): dropping too large message of size %v.", msg.topic, len(msg.key)+len(msg.value)) + r.client.observer.Dropped(1) default: r.failed = append(r.failed, msg.data) diff --git a/vendor/github.com/elastic/beats/libbeat/docs/outputs/output-kafka.asciidoc b/vendor/github.com/elastic/beats/libbeat/outputs/kafka/docs/kafka.asciidoc similarity index 100% rename from vendor/github.com/elastic/beats/libbeat/docs/outputs/output-kafka.asciidoc rename to vendor/github.com/elastic/beats/libbeat/outputs/kafka/docs/kafka.asciidoc diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/kafka/kafka.go b/vendor/github.com/elastic/beats/libbeat/outputs/kafka/kafka.go index 0b300b14..37c47856 100644 --- a/vendor/github.com/elastic/beats/libbeat/outputs/kafka/kafka.go +++ b/vendor/github.com/elastic/beats/libbeat/outputs/kafka/kafka.go @@ -37,9 +37,11 @@ const ( // NOTE: maxWaitRetry has no effect on mode, as logstash client currently does // not return ErrTempBulkFailure defaultMaxWaitRetry = 60 * time.Second + + debugSelector = "kafka" ) -var debugf = logp.MakeDebug("kafka") +var debugf = logp.MakeDebug(debugSelector) var ( errNoTopicSet = errors.New("No topic configured") diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/kafka/kafka_integration_test.go b/vendor/github.com/elastic/beats/libbeat/outputs/kafka/kafka_integration_test.go index 72303356..771a402f 100644 --- a/vendor/github.com/elastic/beats/libbeat/outputs/kafka/kafka_integration_test.go +++ b/vendor/github.com/elastic/beats/libbeat/outputs/kafka/kafka_integration_test.go @@ -243,35 +243,65 @@ func TestKafkaPublish(t *testing.T) { validate = makeValidateFmtStr(fmt.(string)) } - for i, d := range expected { - validate(t, stored[i].Value, d) + seenMsgs := map[string]struct{}{} + for _, s := range stored { + msg := validate(t, s.Value, expected) + seenMsgs[msg] = struct{}{} } + assert.Equal(t, len(expected), len(seenMsgs)) }) } } -func validateJSON(t *testing.T, value []byte, event beat.Event) { +func validateJSON(t *testing.T, value []byte, events []beat.Event) string { var decoded map[string]interface{} err := json.Unmarshal(value, &decoded) if err != nil { t.Errorf("can not json decode event value: %v", value) - return + return "" } + + msg := decoded["message"].(string) + event := findEvent(events, msg) + if event == nil { + t.Errorf("could not find expected event with message: %v", msg) + return "" + } + assert.Equal(t, decoded["type"], event.Fields["type"]) - assert.Equal(t, decoded["message"], event.Fields["message"]) + + return msg } -func makeValidateFmtStr(fmt string) func(*testing.T, []byte, beat.Event) { +func makeValidateFmtStr(fmt string) func(*testing.T, []byte, []beat.Event) string { fmtString := fmtstr.MustCompileEvent(fmt) - return func(t *testing.T, value []byte, event beat.Event) { - expectedMessage, err := fmtString.Run(&event) + return func(t *testing.T, value []byte, events []beat.Event) string { + msg := string(value) + event := findEvent(events, msg) + if event == nil { + t.Errorf("could not find expected event with message: %v", msg) + return "" + } + + _, err := fmtString.Run(event) if err != nil { t.Fatal(err) } - assert.Equal(t, string(expectedMessage), string(value)) + + return msg } } +func findEvent(events []beat.Event, msg string) *beat.Event { + for _, e := range events { + if e.Fields["message"] == msg { + return &e + } + } + + return nil +} + func strDefault(a, defaults string) string { if len(a) == 0 { return defaults @@ -307,7 +337,49 @@ func newTestConsumer(t *testing.T) sarama.Consumer { return consumer } -var testTopicOffsets = map[string]int64{} +// topicOffsetMap is threadsafe map from topic => partition => offset +type topicOffsetMap struct { + m map[string]map[int32]int64 + mu sync.RWMutex +} + +func (m *topicOffsetMap) GetOffset(topic string, partition int32) int64 { + m.mu.RLock() + defer m.mu.RUnlock() + + if m.m == nil { + return sarama.OffsetOldest + } + + topicMap, ok := m.m[topic] + if !ok { + return sarama.OffsetOldest + } + + offset, ok := topicMap[partition] + if !ok { + return sarama.OffsetOldest + } + + return offset +} + +func (m *topicOffsetMap) SetOffset(topic string, partition int32, offset int64) { + m.mu.Lock() + defer m.mu.Unlock() + + if m.m == nil { + m.m = map[string]map[int32]int64{} + } + + if _, ok := m.m[topic]; !ok { + m.m[topic] = map[int32]int64{} + } + + m.m[topic][partition] = offset +} + +var testTopicOffsets = topicOffsetMap{} func testReadFromKafkaTopic( t *testing.T, topic string, nMessages int, @@ -318,31 +390,52 @@ func testReadFromKafkaTopic( consumer.Close() }() - offset, found := testTopicOffsets[topic] - if !found { - offset = sarama.OffsetOldest - } - - partitionConsumer, err := consumer.ConsumePartition(topic, 0, offset) + partitions, err := consumer.Partitions(topic) if err != nil { t.Fatal(err) } - defer func() { - partitionConsumer.Close() - }() - timer := time.After(timeout) + done := make(chan struct{}) + msgs := make(chan *sarama.ConsumerMessage) + for _, partition := range partitions { + offset := testTopicOffsets.GetOffset(topic, partition) + partitionConsumer, err := consumer.ConsumePartition(topic, partition, offset) + if err != nil { + t.Fatal(err) + } + defer func() { + partitionConsumer.Close() + }() + + go func(p int32, pc sarama.PartitionConsumer) { + for { + select { + case msg, ok := <-pc.Messages(): + if !ok { + break + } + testTopicOffsets.SetOffset(topic, p, msg.Offset+1) + msgs <- msg + case <-done: + break + } + } + }(partition, partitionConsumer) + } + var messages []*sarama.ConsumerMessage - for i := 0; i < nMessages; i++ { + timer := time.After(timeout) + + for len(messages) < nMessages { select { - case msg := <-partitionConsumer.Messages(): + case msg := <-msgs: messages = append(messages, msg) - testTopicOffsets[topic] = msg.Offset + 1 case <-timer: break } } + close(done) return messages } diff --git a/vendor/github.com/elastic/beats/libbeat/outputs/kafka/partition.go b/vendor/github.com/elastic/beats/libbeat/outputs/kafka/partition.go index 3ce18c24..6356f60e 100644 --- a/vendor/github.com/elastic/beats/libbeat/outputs/kafka/partition.go +++ b/vendor/github.com/elastic/beats/libbeat/outputs/kafka/partition.go @@ -127,11 +127,14 @@ func (p *messagePartitioner) Partition( } msg.partition = partition - event := &msg.data.Content - if event.Meta == nil { - event.Meta = map[string]interface{}{} + + if logp.IsDebug(debugSelector) { + debugf("setting event.Meta[\"partition\"] = %v", partition) } - event.Meta["partition"] = partition + if _, err := msg.data.Cache.Put("partition", partition); err != nil { + return 0, fmt.Errorf("setting kafka partition in publisher event failed: %v", err) + } + p.partitions = numPartitions return msg.partition, nil } diff --git a/vendor/github.com/elastic/beats/libbeat/docs/outputs/output-logstash.asciidoc b/vendor/github.com/elastic/beats/libbeat/outputs/logstash/docs/logstash.asciidoc similarity index 98% rename from vendor/github.com/elastic/beats/libbeat/docs/outputs/output-logstash.asciidoc rename to vendor/github.com/elastic/beats/libbeat/outputs/logstash/docs/logstash.asciidoc index 7bfa9906..82f35e2a 100644 --- a/vendor/github.com/elastic/beats/libbeat/docs/outputs/output-logstash.asciidoc +++ b/vendor/github.com/elastic/beats/libbeat/outputs/logstash/docs/logstash.asciidoc @@ -49,7 +49,7 @@ endif::[] ifndef::win-only[] -include::{libbeat-dir}/docs/step-test-config.asciidoc[] +include::{libbeat-dir}/step-test-config.asciidoc[] endif::win-only[] @@ -75,7 +75,7 @@ ifndef::apm-server[] ... "@metadata": { <1> "beat": "{beat_default_index_prefix}", <2> - "version": "{stack-version}" <3> + "version": "{version}" <3> } } ------------------------------------------------------------------------------ @@ -95,7 +95,7 @@ ifdef::apm-server[] "@metadata": { <1> "beat": "{beat_default_index_prefix}", <2> "pipeline":"apm", <3> - "version": "{stack-version}" <4> + "version": "{version}" <4> } } ------------------------------------------------------------------------------ @@ -288,7 +288,7 @@ NOTE: The "ttl" option is not yet supported on an async Logstash client (one wit Configures number of batches to be sent asynchronously to logstash while waiting for ACK from logstash. Output only becomes blocking once number of `pipelining` -batches have been written. Pipelining is disabled if a values of 0 is +batches have been written. Pipelining is disabled if a value of 0 is configured. The default value is 2. ===== `proxy_url` diff --git a/vendor/github.com/elastic/beats/libbeat/docs/outputs/output-redis.asciidoc b/vendor/github.com/elastic/beats/libbeat/outputs/redis/docs/redis.asciidoc similarity index 100% rename from vendor/github.com/elastic/beats/libbeat/docs/outputs/output-redis.asciidoc rename to vendor/github.com/elastic/beats/libbeat/outputs/redis/docs/redis.asciidoc diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/add_fields.go b/vendor/github.com/elastic/beats/libbeat/processors/actions/add_fields.go index 22e2fd02..39b41b0c 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/actions/add_fields.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/add_fields.go @@ -25,6 +25,7 @@ import ( "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/processors" "github.com/elastic/beats/libbeat/processors/checks" + jsprocessor "github.com/elastic/beats/libbeat/processors/script/javascript/module/processor" ) type addFields struct { @@ -40,6 +41,8 @@ func init() { checks.ConfigChecked(CreateAddFields, checks.RequireFields(FieldsKey), checks.AllowedFields(FieldsKey, "target", "when"))) + + jsprocessor.RegisterPlugin("AddFields", CreateAddFields) } // CreateAddFields constructs an add_fields processor from config. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/copy_fields.go b/vendor/github.com/elastic/beats/libbeat/processors/actions/copy_fields.go index af5709d5..c67dd996 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/actions/copy_fields.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/copy_fields.go @@ -27,6 +27,7 @@ import ( "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/processors" "github.com/elastic/beats/libbeat/processors/checks" + jsprocessor "github.com/elastic/beats/libbeat/processors/script/javascript/module/processor" ) type copyFields struct { @@ -45,6 +46,7 @@ func init() { checks.RequireFields("fields"), ), ) + jsprocessor.RegisterPlugin("CopyFields", NewCopyFields) } // NewCopyFields returns a new copy_fields processor. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/decode_base64_field.go b/vendor/github.com/elastic/beats/libbeat/processors/actions/decode_base64_field.go index c4e33d4a..bf2c92dc 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/actions/decode_base64_field.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/decode_base64_field.go @@ -28,6 +28,7 @@ import ( "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/processors" "github.com/elastic/beats/libbeat/processors/checks" + jsprocessor "github.com/elastic/beats/libbeat/processors/script/javascript/module/processor" ) const ( @@ -50,6 +51,7 @@ func init() { checks.ConfigChecked(NewDecodeBase64Field, checks.RequireFields("field"), checks.AllowedFields("field", "when", "ignore_missing", "fail_on_error"))) + jsprocessor.RegisterPlugin("DecodeBase64Field", NewDecodeBase64Field) } // NewDecodeBase64Field construct a new decode_base64_field processor. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/decode_json_fields.go b/vendor/github.com/elastic/beats/libbeat/processors/actions/decode_json_fields.go index a2666a8b..ca85a0d4 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/actions/decode_json_fields.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/decode_json_fields.go @@ -31,6 +31,7 @@ import ( "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/processors" "github.com/elastic/beats/libbeat/processors/checks" + jsprocessor "github.com/elastic/beats/libbeat/processors/script/javascript/module/processor" ) type decodeJSONFields struct { @@ -66,6 +67,8 @@ func init() { checks.ConfigChecked(NewDecodeJSONFields, checks.RequireFields("fields"), checks.AllowedFields("fields", "max_depth", "overwrite_keys", "add_error_key", "process_array", "target", "when"))) + + jsprocessor.RegisterPlugin("DecodeJSONFields", NewDecodeJSONFields) } // NewDecodeJSONFields construct a new decode_json_fields processor. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/add_fields.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/add_fields.asciidoc new file mode 100644 index 00000000..a71ddd1b --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/add_fields.asciidoc @@ -0,0 +1,37 @@ +[[add-fields]] +=== Add fields + +The `add_fields` processor adds additional fields to the event. Fields can be +scalar values, arrays, dictionaries, or any nested combination of these. By +default the fields that you specify will be grouped under the `fields` +sub-dictionary in the event. To group the fields under a different +sub-dictionary, use the `target` setting. To store the fields as +top-level fields, set `target: ''`. + +`target`:: (Optional) Sub-dictionary to put all fields into. Defaults to `fields`. +`fields`:: Fields to be added. + + +For example, this configuration: + +[source,yaml] +------------------------------------------------------------------------------ +processors: +- add_fields: + target: project + fields: + name: myproject + id: '574734885120952459' +------------------------------------------------------------------------------ + +Adds these fields to any event: + +[source,json] +------------------------------------------------------------------------------- +{ + "project": { + "name": "myproject", + "id": "574734885120952459" + } +} +------------------------------------------------------------------------------- diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/add_labels.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/add_labels.asciidoc new file mode 100644 index 00000000..8a35066d --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/add_labels.asciidoc @@ -0,0 +1,43 @@ +[[add-labels]] +=== Add labels + +The `add_labels` processors adds a set of key-value pairs to an event. +The processor will flatten nested configuration objects like arrays or +dictionaries into a fully qualified name by merging nested names with a `.`. +Array entries create numeric names starting with 0. Labels are always stored +under the Elastic Common Schema compliant `labels` sub-dictionary. + +`labels`:: dictionaries of labels to be added. + +For example, this configuration: + +[source,yaml] +------------------------------------------------------------------------------ +processors: +- add_labels: + labels: + number: 1 + with.dots: test + nested: + with.dots: nested + array: + - do + - re + - with.field: mi +------------------------------------------------------------------------------ + +Adds these fields to every event: + +[source,json] +------------------------------------------------------------------------------- +{ + "labels": { + "number": 1, + "with.dots": "test", + "nested.with.dots": "nested", + "array.0": "do", + "array.1": "re", + "array.2.with.field": "mi" + } +} +------------------------------------------------------------------------------- diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/add_tags.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/add_tags.asciidoc new file mode 100644 index 00000000..7aaec3ee --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/add_tags.asciidoc @@ -0,0 +1,28 @@ +[[add-tags]] +=== Add tags + +The `add_tags` processor adds tags to a list of tags. If the target field already exists, +the tags are appended to the existing list of tags. + +`tags`:: List of tags to add. +`target`:: (Optional) Field the tags will be added to. Defaults to `tags`. + +For example, this configuration: + + +[source,yaml] +------------------------------------------------------------------------------ +processors: +- add_tags: + tags: [web, production] + target: "environment" +------------------------------------------------------------------------------ + +Adds the environment field to every event: + +[source,json] +------------------------------------------------------------------------------- +{ + "environment": ["web", "production"] +} +------------------------------------------------------------------------------- diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/decode_base64_field.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/decode_base64_field.asciidoc new file mode 100644 index 00000000..26399353 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/decode_base64_field.asciidoc @@ -0,0 +1,34 @@ +[[decode-base64-field]] +=== Decode Base64 fields + +The `decode_base64_field` processor specifies a field to base64 decode. +The `field` key contains a `from: old-key` and a `to: new-key` pair. `from` is +the origin and `to` the target name of the field. + +To overwrite fields either first rename the target field or use the `drop_fields` +processor to drop the field and then rename the field. + +[source,yaml] +------- +processors: +- decode_base64_field: + field: + from: "field1" + to: "field2" + ignore_missing: false + fail_on_error: true +------- + +In the example above: + - field1 is decoded in field2 + +The `decode_base64_field` processor has the following configuration settings: + +`ignore_missing`:: (Optional) If set to true, no error is logged in case a key +which should be base64 decoded is missing. Default is `false`. + +`fail_on_error`:: (Optional) If set to true, in case of an error the base64 decode +of fields is stopped and the original event is returned. If set to false, decoding +continues also if an error happened during decoding. Default is `true`. + +See <> for a list of supported conditions. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/decode_json_fields.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/decode_json_fields.asciidoc new file mode 100644 index 00000000..fb86cf24 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/decode_json_fields.asciidoc @@ -0,0 +1,36 @@ +[[decode-json-fields]] +=== Decode JSON fields + +The `decode_json_fields` processor decodes fields containing JSON strings and +replaces the strings with valid JSON objects. + +[source,yaml] +----------------------------------------------------- +processors: + - decode_json_fields: + fields: ["field1", "field2", ...] + process_array: false + max_depth: 1 + target: "" + overwrite_keys: false + add_error_key: true +----------------------------------------------------- + +The `decode_json_fields` processor has the following configuration settings: + +`fields`:: The fields containing JSON strings to decode. +`process_array`:: (Optional) A boolean that specifies whether to process +arrays. The default is false. +`max_depth`:: (Optional) The maximum parsing depth. The default is 1. +`target`:: (Optional) The field under which the decoded JSON will be written. By +default the decoded JSON object replaces the string field from which it was +read. To merge the decoded JSON fields into the root of the event, specify +`target` with an empty string (`target: ""`). Note that the `null` value (`target:`) +is treated as if the field was not set at all. +`overwrite_keys`:: (Optional) A boolean that specifies whether keys that already +exist in the event are overwritten by keys from the decoded JSON object. The +default value is false. +`add_error_key`:: (Optional) If it set to true, in case of error while decoding json keys +`error` field is going to be part of event with error message. If it set to false, there +will not be any error in event's field. Even error occurs while decoding json keys. The +default value is false diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/decompress_gzip_field.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/decompress_gzip_field.asciidoc new file mode 100644 index 00000000..334f0fdb --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/decompress_gzip_field.asciidoc @@ -0,0 +1,34 @@ +[[decompress-gzip-field]] +=== Decompress gzip fields + +The `decompress_gzip_field` processor specifies a field to gzip decompress. +The `field` key contains a `from: old-key` and a `to: new-key` pair. `from` is +the origin and `to` the target name of the field. + +To overwrite fields either first rename the target field or use the `drop_fields` +processor to drop the field and then rename the field. + +[source,yaml] +------- +processors: +- decompress_gzip_field: + field: + from: "field1" + to: "field2" + ignore_missing: false + fail_on_error: true +------- + +In the example above: + - field1 is decoded in field2 + +The `decompress_gzip_field` processor has the following configuration settings: + +`ignore_missing`:: (Optional) If set to true, no error is logged in case a key +which should be base64 decoded is missing. Default is `false`. + +`fail_on_error`:: (Optional) If set to true, in case of an error the base64 decode +of fields is stopped and the original event is returned. If set to false, decoding +continues also if an error happened during decoding. Default is `true`. + +See <> for a list of supported conditions. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/drop_event.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/drop_event.asciidoc new file mode 100644 index 00000000..d5a1d246 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/drop_event.asciidoc @@ -0,0 +1,16 @@ +[[drop-event]] +=== Drop events + +The `drop_event` processor drops the entire event if the associated condition +is fulfilled. The condition is mandatory, because without one, all the events +are dropped. + +[source,yaml] +------ +processors: + - drop_event: + when: + condition +------ + +See <> for a list of supported conditions. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/drop_fields.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/drop_fields.asciidoc new file mode 100644 index 00000000..73cb7422 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/drop_fields.asciidoc @@ -0,0 +1,27 @@ +[[drop-fields]] +=== Drop fields from events + +The `drop_fields` processor specifies which fields to drop if a certain +condition is fulfilled. The condition is optional. If it's missing, the +specified fields are always dropped. The `@timestamp` and `type` fields cannot +be dropped, even if they show up in the `drop_fields` list. + +[source,yaml] +----------------------------------------------------- +processors: + - drop_fields: + when: + condition + fields: ["field1", "field2", ...] + ignore_missing: false +----------------------------------------------------- + +See <> for a list of supported conditions. + +NOTE: If you define an empty list of fields under `drop_fields`, then no fields +are dropped. + +The `drop_fields` processor has the following configuration settings: + +`ignore_missing`:: (Optional) If `true` the processor will not return an error +when a specified field does not exist. Defaults to `false`. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/include_fields.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/include_fields.asciidoc new file mode 100644 index 00000000..71cc2a32 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/include_fields.asciidoc @@ -0,0 +1,24 @@ +[[include-fields]] +=== Keep fields from events + +The `include_fields` processor specifies which fields to export if a certain +condition is fulfilled. The condition is optional. If it's missing, the +specified fields are always exported. The `@timestamp` and `type` fields are +always exported, even if they are not defined in the `include_fields` list. + +[source,yaml] +------- +processors: + - include_fields: + when: + condition + fields: ["field1", "field2", ...] +------- + +See <> for a list of supported conditions. + +You can specify multiple `include_fields` processors under the `processors` +section. + +NOTE: If you define an empty list of fields under `include_fields`, then only +the required fields, `@timestamp` and `type`, are exported. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/placeholder.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/placeholder.asciidoc new file mode 100644 index 00000000..073164e4 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/placeholder.asciidoc @@ -0,0 +1,2 @@ +Placeholder file to support adding a new resource to the conf.yaml in the docs +repo. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/rename.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/rename.asciidoc new file mode 100644 index 00000000..ca1e785c --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/docs/rename.asciidoc @@ -0,0 +1,44 @@ +[[rename-fields]] +=== Rename fields from events + +The `rename` processor specifies a list of fields to rename. Under the `fields` +key, each entry contains a `from: old-key` and a `to: new-key` pair, where: + +* `from` is the original field name +* `to` is the target field name + +The `rename` processor cannot be used to overwrite fields. To overwrite fields +either first rename the target field, or use the `drop_fields` processor to drop +the field and then rename the field. + +TIP: You can rename fields to resolve field name conflicts. For example, if an +event has two fields, `c` and `c.b` (where `b` is a subfield of `c`), assigning +scalar values results in an {es} error at ingest time. The assignment `{"c": 1, +"c.b": 2}` would result in an error because `c` is an object and cannot be +assigned a scalar value. To prevent this conflict, rename `c` to `c.value` +before assigning values. + +[source,yaml] +------- +processors: +- rename: + fields: + - from: "a.g" + to: "e.d" + ignore_missing: false + fail_on_error: true +------- + +The `rename` processor has the following configuration settings: + +`ignore_missing`:: (Optional) If set to true, no error is logged in case a key +which should be renamed is missing. Default is `false`. + +`fail_on_error`:: (Optional) If set to true, in case of an error the renaming of +fields is stopped and the original event is returned. If set to false, renaming +continues also if an error happened during renaming. Default is `true`. + +See <> for a list of supported conditions. + +You can specify multiple `ignore_missing` processors under the `processors` +section. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/rename.go b/vendor/github.com/elastic/beats/libbeat/processors/actions/rename.go index c0c8f778..89e92d2a 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/actions/rename.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/rename.go @@ -27,6 +27,7 @@ import ( "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/processors" "github.com/elastic/beats/libbeat/processors/checks" + jsprocessor "github.com/elastic/beats/libbeat/processors/script/javascript/module/processor" ) type renameFields struct { @@ -48,6 +49,8 @@ func init() { processors.RegisterPlugin("rename", checks.ConfigChecked(NewRenameFields, checks.RequireFields("fields"))) + + jsprocessor.RegisterPlugin("Rename", NewRenameFields) } // NewRenameFields returns a new rename processor. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/actions/truncate_fields.go b/vendor/github.com/elastic/beats/libbeat/processors/actions/truncate_fields.go index ca87eb35..1c1256f6 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/actions/truncate_fields.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/actions/truncate_fields.go @@ -30,6 +30,7 @@ import ( "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/processors" "github.com/elastic/beats/libbeat/processors/checks" + jsprocessor "github.com/elastic/beats/libbeat/processors/script/javascript/module/processor" ) type truncateFieldsConfig struct { @@ -54,6 +55,7 @@ func init() { checks.MutuallyExclusiveRequiredFields("max_bytes", "max_characters"), ), ) + jsprocessor.RegisterPlugin("TruncateFields", NewTruncateFields) } // NewTruncateFields returns a new truncate_fields processor. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_cloud_metadata/add_cloud_metadata.go b/vendor/github.com/elastic/beats/libbeat/processors/add_cloud_metadata/add_cloud_metadata.go index 1a7c01ce..d4a8ea22 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/add_cloud_metadata/add_cloud_metadata.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_cloud_metadata/add_cloud_metadata.go @@ -28,6 +28,7 @@ import ( "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/processors" + jsprocessor "github.com/elastic/beats/libbeat/processors/script/javascript/module/processor" ) const ( @@ -41,6 +42,7 @@ var debugf = logp.MakeDebug("filters") // init registers the add_cloud_metadata processor. func init() { processors.RegisterPlugin("add_cloud_metadata", New) + jsprocessor.RegisterPlugin("AddCloudMetadata", New) } type addCloudMetadata struct { diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_cloud_metadata/docs/add_cloud_metadata.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/add_cloud_metadata/docs/add_cloud_metadata.asciidoc new file mode 100644 index 00000000..8e4e5249 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_cloud_metadata/docs/add_cloud_metadata.asciidoc @@ -0,0 +1,164 @@ +[[add-cloud-metadata]] +=== Add cloud metadata + +The `add_cloud_metadata` processor enriches each event with instance metadata +from the machine's hosting provider. At startup it will query a list of hosting +providers and cache the instance metadata. + +The following cloud providers are supported: + +- Amazon Web Services (AWS) +- Digital Ocean +- Google Compute Engine (GCE) +- https://www.qcloud.com/?lang=en[Tencent Cloud] (QCloud) +- Alibaba Cloud (ECS) +- Azure Virtual Machine +- Openstack Nova + +The Alibaba Cloud and Tencent cloud providers are disabled by default, because +they require to access a remote host. The `providers` setting allows users to +select a list of default providers to query. + +The simple configuration below enables the processor. + +[source,yaml] +------------------------------------------------------------------------------- +processors: +- add_cloud_metadata: ~ +------------------------------------------------------------------------------- + +The `add_cloud_metadata` processor has three optional configuration settings. +The first one is `timeout` which specifies the maximum amount of time to wait +for a successful response when detecting the hosting provider. The default +timeout value is `3s`. + +If a timeout occurs then no instance metadata will be added to the events. This +makes it possible to enable this processor for all your deployments (in the +cloud or on-premise). + +The second optional setting is `providers`. The `providers` settings accepts a +list of cloud provider names to be used. If `providers` is not configured, then +all providers that do not access a remote endpoint are enabled by default. + +List of names the `providers` setting supports: + +- "alibaba", or "ecs" for the Alibaba Cloud provider (disabled by default). +- "azure" for Azure Virtual Machine (enabled by default). +- "digitalocean" for Digital Ocean (enabled by default). +- "aws", or "ec2" for Amazon Web Services (enabled by default). +- "gcp" for Google Copmute Enging (enabled by default). +- "openstack", or "nova" for Openstack Nova (enabled by default). +- "tencent", or "qcloud" for Tencent Cloud (disabled by default). + +The third optional configuration setting is `overwrite`. When `overwrite` is +`true`, `add_cloud_metadata` overwrites existing `cloud.*` fields (`false` by +default). + +The metadata that is added to events varies by hosting provider. Below are +examples for each of the supported providers. + +_AWS_ + +[source,json] +------------------------------------------------------------------------------- +{ + "cloud": { + "account.id": "123456789012", + "availability_zone": "us-east-1c", + "instance.id": "i-4e123456", + "machine.type": "t2.medium", + "image.id": "ami-abcd1234", + "provider": "aws", + "region": "us-east-1" + } +} +------------------------------------------------------------------------------- + +_Digital Ocean_ + +[source,json] +------------------------------------------------------------------------------- +{ + "cloud": { + "instance.id": "1234567", + "provider": "digitalocean", + "region": "nyc2" + } +} +------------------------------------------------------------------------------- + +_GCP_ + +[source,json] +------------------------------------------------------------------------------- +{ + "cloud": { + "availability_zone": "us-east1-b", + "instance.id": "1234556778987654321", + "machine.type": "f1-micro", + "project.id": "my-dev", + "provider": "gcp" + } +} +------------------------------------------------------------------------------- + +_Tencent Cloud_ + +[source,json] +------------------------------------------------------------------------------- +{ + "cloud": { + "availability_zone": "gz-azone2", + "instance.id": "ins-qcloudv5", + "provider": "qcloud", + "region": "china-south-gz" + } +} +------------------------------------------------------------------------------- + +_Alibaba Cloud_ + +This metadata is only available when VPC is selected as the network type of the +ECS instance. + +[source,json] +------------------------------------------------------------------------------- +{ + "cloud": { + "availability_zone": "cn-shenzhen", + "instance.id": "i-wz9g2hqiikg0aliyun2b", + "provider": "ecs", + "region": "cn-shenzhen-a" + } +} +------------------------------------------------------------------------------- + +_Azure Virtual Machine_ + +[source,json] +------------------------------------------------------------------------------- +{ + "cloud": { + "provider": "az", + "instance.id": "04ab04c3-63de-4709-a9f9-9ab8c0411d5e", + "instance.name": "test-az-vm", + "machine.type": "Standard_D3_v2", + "region": "eastus2" + } +} +------------------------------------------------------------------------------- + +_Openstack Nova_ + +[source,json] +------------------------------------------------------------------------------- +{ + "cloud": { + "instance.name": "test-998d932195.mycloud.tld", + "instance.id": "i-00011a84", + "availability_zone": "xxxx-az-c", + "provider": "openstack", + "machine.type": "m2.large" + } +} +------------------------------------------------------------------------------- diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/add_docker_metadata.go b/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/add_docker_metadata.go index 24012422..6148a4be 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/add_docker_metadata.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/add_docker_metadata.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +// +build linux darwin windows + package add_docker_metadata import ( @@ -35,6 +37,7 @@ import ( "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/processors" "github.com/elastic/beats/libbeat/processors/actions" + jsprocessor "github.com/elastic/beats/libbeat/processors/script/javascript/module/processor" ) const ( @@ -49,6 +52,7 @@ var processCgroupPaths = cgroup.ProcessCgroupPaths func init() { processors.RegisterPlugin(processorName, New) + jsprocessor.RegisterPlugin("AddDockerMetadata", New) } type addDockerMetadata struct { @@ -57,10 +61,11 @@ type addDockerMetadata struct { fields []string sourceProcessor processors.Processor - pidFields []string // Field names that contain PIDs. - cgroups *common.Cache // Cache of PID (int) to cgropus (map[string]string). - hostFS string // Directory where /proc is found - dedot bool // If set to true, replace dots in labels with `_`. + pidFields []string // Field names that contain PIDs. + cgroups *common.Cache // Cache of PID (int) to cgropus (map[string]string). + hostFS string // Directory where /proc is found + dedot bool // If set to true, replace dots in labels with `_`. + dockerAvailable bool // If Docker exists in env, then it is set to true } // New constructs a new add_docker_metadata processor. @@ -74,13 +79,19 @@ func buildDockerMetadataProcessor(cfg *common.Config, watcherConstructor docker. return nil, errors.Wrapf(err, "fail to unpack the %v configuration", processorName) } + var dockerAvailable bool + watcher, err := watcherConstructor(config.Host, config.TLS, config.MatchShortID) if err != nil { - return nil, err - } - - if err = watcher.Start(); err != nil { - return nil, err + dockerAvailable = false + errorMsg := fmt.Sprintf("%v: docker environment not detected: %v", processorName, err) + logp.Debug("add_docker_metadata", errorMsg) + } else { + dockerAvailable = true + logp.Debug("add_docker_metadata", "%v: docker environment detected", processorName) + if err = watcher.Start(); err != nil { + return nil, errors.Wrap(err, "failed to start watcher") + } } // Use extract_field processor to get container ID from source file path. @@ -106,6 +117,7 @@ func buildDockerMetadataProcessor(cfg *common.Config, watcherConstructor docker. pidFields: config.MatchPIDs, hostFS: config.HostFS, dedot: config.DeDot, + dockerAvailable: dockerAvailable, }, nil } @@ -121,8 +133,12 @@ func lazyCgroupCacheInit(d *addDockerMetadata) { } func (d *addDockerMetadata) Run(event *beat.Event) (*beat.Event, error) { + if !d.dockerAvailable { + return event, nil + } var cid string var err error + // Extract CID from the filepath contained in the "log.file.path" field. if d.sourceProcessor != nil { lfp, _ := event.Fields.GetValue("log.file.path") diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/add_docker_metadata_test.go b/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/add_docker_metadata_test.go index a6973ded..9a99ac1c 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/add_docker_metadata_test.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/add_docker_metadata_test.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +// +build linux darwin windows + package add_docker_metadata import ( @@ -52,6 +54,20 @@ func init() { } } +func TestInitializationNoDocker(t *testing.T) { + var testConfig = common.NewConfig() + testConfig.SetString("host", -1, "unix:///var/run42/docker.sock") + + p, err := buildDockerMetadataProcessor(testConfig, docker.NewWatcher) + assert.NoError(t, err, "initializing add_docker_metadata processor") + + input := common.MapStr{} + result, err := p.Run(&beat.Event{Fields: input}) + assert.NoError(t, err, "processing an event") + + assert.Equal(t, common.MapStr{}, result.Fields) +} + func TestInitialization(t *testing.T) { var testConfig = common.NewConfig() diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/config.go b/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/config.go index aa78dd08..930ca4ec 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/config.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/config.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +// +build linux darwin windows + package add_docker_metadata import ( diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/docs/add_docker_metadata.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/docs/add_docker_metadata.asciidoc new file mode 100644 index 00000000..aed8e205 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_docker_metadata/docs/add_docker_metadata.asciidoc @@ -0,0 +1,80 @@ +[[add-docker-metadata]] +=== Add Docker metadata + +The `add_docker_metadata` processor annotates each event with relevant metadata +from Docker containers. At startup it detects a docker environment and caches the metadata. +The events are annotated with Docker metadata, only if a valid configuration +is detected and the processor is able to reach Docker API. + +Each event is annotated with: + +* Container ID +* Name +* Image +* Labels + +[NOTE] +===== +When running {beatname_uc} in a container, you need to provide access to +Docker’s unix socket in order for the `add_docker_metadata` processor to work. +You can do this by mounting the socket inside the container. For example: + +`docker run -v /var/run/docker.sock:/var/run/docker.sock ...` + +To avoid privilege issues, you may also need to add `--user=root` to the +`docker run` flags. Because the user must be part of the docker group in order +to access `/var/run/docker.sock`, root access is required if {beatname_uc} is +running as non-root inside the container. +===== + +[source,yaml] +------------------------------------------------------------------------------- +processors: +- add_docker_metadata: + host: "unix:///var/run/docker.sock" + #match_fields: ["system.process.cgroup.id"] + #match_pids: ["process.pid", "process.ppid"] + #match_source: true + #match_source_index: 4 + #match_short_id: true + #cleanup_timeout: 60 + #labels.dedot: false + # To connect to Docker over TLS you must specify a client and CA certificate. + #ssl: + # certificate_authority: "/etc/pki/root/ca.pem" + # certificate: "/etc/pki/client/cert.pem" + # key: "/etc/pki/client/cert.key" +------------------------------------------------------------------------------- + +It has the following settings: + +`host`:: (Optional) Docker socket (UNIX or TCP socket). It uses +`unix:///var/run/docker.sock` by default. + +`ssl`:: (Optional) SSL configuration to use when connecting to the Docker +socket. + +`match_fields`:: (Optional) A list of fields to match a container ID, at least +one of them should hold a container ID to get the event enriched. + +`match_pids`:: (Optional) A list of fields that contain process IDs. If the +process is running in Docker then the event will be enriched. The default value +is `["process.pid", "process.ppid"]`. + +`match_source`:: (Optional) Match container ID from a log path present in the +`log.file.path` field. Enabled by default. + +`match_short_id`:: (Optional) Match container short ID from a log path present +in the `log.file.path` field. Disabled by default. +This allows to match directories names that have the first 12 characters +of the container ID. For example, `/var/log/containers/b7e3460e2b21/*.log`. + +`match_source_index`:: (Optional) Index in the source path split by `/` to look +for container ID. It defaults to 4 to match +`/var/lib/docker/containers//*.log` + +`cleanup_timeout`:: (Optional) Time of inactivity to consider we can clean and +forget metadata for a container, 60s by default. + +`labels.dedot`:: (Optional) Default to be false. If set to true, replace dots in + labels with `_`. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_host_metadata/add_host_metadata.go b/vendor/github.com/elastic/beats/libbeat/processors/add_host_metadata/add_host_metadata.go index 1e1266de..6cfc938e 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/add_host_metadata/add_host_metadata.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_host_metadata/add_host_metadata.go @@ -29,12 +29,14 @@ import ( "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/metric/system/host" "github.com/elastic/beats/libbeat/processors" + jsprocessor "github.com/elastic/beats/libbeat/processors/script/javascript/module/processor" "github.com/elastic/beats/libbeat/processors/util" "github.com/elastic/go-sysinfo" ) func init() { processors.RegisterPlugin("add_host_metadata", New) + jsprocessor.RegisterPlugin("AddHostMetadata", New) } type addHostMetadata struct { diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_host_metadata/docs/add_host_metadata.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/add_host_metadata/docs/add_host_metadata.asciidoc new file mode 100644 index 00000000..bd3d47d8 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_host_metadata/docs/add_host_metadata.asciidoc @@ -0,0 +1,74 @@ +[[add-host-metadata]] +=== Add Host metadata + +[source,yaml] +------------------------------------------------------------------------------- +processors: +- add_host_metadata: + netinfo.enabled: false + cache.ttl: 5m + geo: + name: nyc-dc1-rack1 + location: 40.7128, -74.0060 + continent_name: North America + country_iso_code: US + region_name: New York + region_iso_code: NY + city_name: New York +------------------------------------------------------------------------------- + +It has the following settings: + +`netinfo.enabled`:: (Optional) Default false. Include IP addresses and MAC addresses as fields host.ip and host.mac + +`cache.ttl`:: (Optional) The processor uses an internal cache for the host metadata. This sets the cache expiration time. The default is 5m, negative values disable caching altogether. + +`geo.name`:: (Optional) User definable token to be used for identifying a discrete location. Frequently a datacenter, rack, or similar. + +`geo.location`:: (Optional) Longitude and latitude in comma separated format. + +`geo.continent_name`:: (Optional) Name of the continent. + +`geo.country_name`:: (Optional) Name of the country. + +`geo.region_name`:: (Optional) Name of the region. + +`geo.city_name`:: (Optional) Name of the city. + +`geo.country_iso_code`:: (Optional) ISO country code. + +`geo.region_iso_code`:: (Optional) ISO region code. + + +The `add_host_metadata` processor annotates each event with relevant metadata from the host machine. +The fields added to the event look like the following: + +[source,json] +------------------------------------------------------------------------------- +{ + "host":{ + "architecture":"x86_64", + "name":"example-host", + "id":"", + "os":{ + "family":"darwin", + "build":"16G1212", + "platform":"darwin", + "version":"10.12.6", + "kernel":"16.7.0", + "name":"Mac OS X" + }, + "ip": ["192.168.0.1", "10.0.0.1"], + "mac": ["00:25:96:12:34:56", "72:00:06:ff:79:f1"], + "geo": { + "continent_name": "North America", + "country_iso_code": "US", + "region_name": "New York", + "region_iso_code": "NY", + "city_name": "New York", + "name": "nyc-dc1-rack1", + "location": "40.7128, -74.0060" + } + } +} +------------------------------------------------------------------------------- diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/cache.go b/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/cache.go index 156ae38e..f5fb4ba5 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/cache.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/cache.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +// +build linux darwin windows + package add_kubernetes_metadata import ( diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/docs/add_kubernetes_metadata.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/docs/add_kubernetes_metadata.asciidoc new file mode 100644 index 00000000..94181a44 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/docs/add_kubernetes_metadata.asciidoc @@ -0,0 +1,92 @@ +[[add-kubernetes-metadata]] +=== Add Kubernetes metadata + +The `add_kubernetes_metadata` processor annotates each event with relevant +metadata based on which Kubernetes pod the event originated from. +At startup it detects an `in_cluster` environment and caches the +Kubernetes-related metadata. Events are only annotated if a valid configuration +is detected. If it's not able to detect a valid Kubernetes configuration, +the events are not annotated with Kubernetes-related metadata. + +Each event is annotated with: + +* Pod Name +* Pod UID +* Namespace +* Labels + +The `add_kubernetes_metadata` processor has two basic building blocks which are: + +* Indexers +* Matchers + +Indexers take in a pod's metadata and builds indices based on the pod metadata. +For example, the `ip_port` indexer can take a Kubernetes pod and index the pod +metadata based on all `pod_ip:container_port` combinations. + +Matchers are used to construct lookup keys for querying indices. For example, +when the `fields` matcher takes `["metricset.host"]` as a lookup field, it would +construct a lookup key with the value of the field `metricset.host`. + +Each Beat can define its own default indexers and matchers which are enabled by +default. For example, FileBeat enables the `container` indexer, which indexes +pod metadata based on all container IDs, and a `logs_path` matcher, which takes +the `log.file.path` field, extracts the container ID, and uses it to retrieve +metadata. + +The configuration below enables the processor when {beatname_lc} is run as a pod in +Kubernetes. + +[source,yaml] +------------------------------------------------------------------------------- +processors: +- add_kubernetes_metadata: +------------------------------------------------------------------------------- + +The configuration below enables the processor on a Beat running as a process on +the Kubernetes node. + +[source,yaml] +------------------------------------------------------------------------------- +processors: +- add_kubernetes_metadata: + host: + # If kube_config is not set, KUBECONFIG environment variable will be checked + # and if not present it will fall back to InCluster + kube_config: ${HOME}/.kube/config +------------------------------------------------------------------------------- + +The configuration below has the default indexers and matchers disabled and +enables ones that the user is interested in. + +[source,yaml] +------------------------------------------------------------------------------- +processors: +- add_kubernetes_metadata: + host: + # If kube_config is not set, KUBECONFIG environment variable will be checked + # and if not present it will fall back to InCluster + kube_config: ~/.kube/config + default_indexers.enabled: false + default_matchers.enabled: false + indexers: + - ip_port: + matchers: + - fields: + lookup_fields: ["metricset.host"] +------------------------------------------------------------------------------- + +The `add_kubernetes_metadata` processor has the following configuration settings: + +`host`:: (Optional) Specify the node to scope {beatname_lc} to in case it +cannot be accurately detected, as when running {beatname_lc} in host network +mode. +`namespace`:: (Optional) Select the namespace from which to collect the +metadata. If it is not set, the processor collects metadata from all namespaces. +It is unset by default. +`kube_config`:: (Optional) Use given config file as configuration for Kubernetes +client. It defaults to `KUBECONFIG` environment variable if present. +`default_indexers.enabled`:: (Optional) Enable/Disable default pod indexers, in +case you want to specify your own. +`default_matchers.enabled`:: (Optional) Enable/Disable default pod matchers, in +case you want to specify your own. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/kubernetes.go b/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/kubernetes.go index ea635314..00ecf06d 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/kubernetes.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/kubernetes.go @@ -15,17 +15,23 @@ // specific language governing permissions and limitations // under the License. +// +build linux darwin windows + package add_kubernetes_metadata import ( "fmt" + "os" "time" + k8sclient "k8s.io/client-go/kubernetes" + "github.com/elastic/beats/libbeat/beat" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/common/kubernetes" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/processors" + jsprocessor "github.com/elastic/beats/libbeat/processors/script/javascript/module/processor" ) const ( @@ -33,14 +39,16 @@ const ( ) type kubernetesAnnotator struct { - watcher kubernetes.Watcher - indexers *Indexers - matchers *Matchers - cache *cache + watcher kubernetes.Watcher + indexers *Indexers + matchers *Matchers + cache *cache + kubernetesAvailable bool } func init() { processors.RegisterPlugin("add_kubernetes_metadata", New) + jsprocessor.RegisterPlugin("AddKubernetesMetadata", New) // Register default indexers Indexing.AddIndexer(PodNameIndexerName, NewPodNameIndexer) @@ -51,6 +59,16 @@ func init() { Indexing.AddMatcher(FieldFormatMatcherName, NewFieldFormatMatcher) } +func isKubernetesAvailable(client k8sclient.Interface) bool { + server, err := client.Discovery().ServerVersion() + if err != nil { + logp.Info("%v: could not detect kubernetes env: %v", "add_kubernetes_metadata", err) + return false + } + logp.Info("%v: kubernetes env detected, with version: %v", "add_kubernetes_metadata", server) + return true +} + // New constructs a new add_kubernetes_metadata processor. func New(cfg *common.Config) (processors.Processor, error) { config := defaultKubernetesAnnotatorConfig() @@ -83,23 +101,41 @@ func New(cfg *common.Config) (processors.Processor, error) { return nil, err } - indexers := NewIndexers(config.Indexers, metaGen) - - matchers := NewMatchers(config.Matchers) - - if matchers.Empty() { - return nil, fmt.Errorf("Can not initialize kubernetes plugin with zero matcher plugins") + processor := &kubernetesAnnotator{ + cache: newCache(config.CleanupTimeout), + kubernetesAvailable: false, } client, err := kubernetes.GetKubernetesClient(config.KubeConfig) if err != nil { - return nil, err + if kubernetes.IsInCluster(config.KubeConfig) { + logp.Debug("kubernetes", "%v: could not create kubernetes client using in_cluster config: %v", "add_kubernetes_metadata", err) + } else if config.KubeConfig == "" { + logp.Debug("kubernetes", "%v: could not create kubernetes client using config: %v: %v", "add_kubernetes_metadata", os.Getenv("KUBECONFIG"), err) + } else { + logp.Debug("kubernetes", "%v: could not create kubernetes client using config: %v: %v", "add_kubernetes_metadata", config.KubeConfig, err) + } + return processor, nil } + if !isKubernetesAvailable(client) { + return processor, nil + } + + processor.indexers = NewIndexers(config.Indexers, metaGen) + + matchers := NewMatchers(config.Matchers) + + if matchers.Empty() { + logp.Debug("kubernetes", "%v: could not initialize kubernetes plugin with zero matcher plugins", "add_kubernetes_metadata") + return processor, nil + } + + processor.matchers = matchers + config.Host = kubernetes.DiscoverKubernetesNode(config.Host, kubernetes.IsInCluster(config.KubeConfig), client) - logp.Debug("kubernetes", "Using host: %s", config.Host) - logp.Debug("kubernetes", "Initializing watcher") + logp.Debug("kubernetes", "Initializing a new Kubernetes watcher using host: %s", config.Host) watcher, err := kubernetes.NewWatcher(client, &kubernetes.Pod{}, kubernetes.WatchOptions{ SyncTimeout: config.SyncPeriod, @@ -111,23 +147,24 @@ func New(cfg *common.Config) (processors.Processor, error) { return nil, err } - processor := &kubernetesAnnotator{ - watcher: watcher, - indexers: indexers, - matchers: matchers, - cache: newCache(config.CleanupTimeout), - } + processor.watcher = watcher + processor.kubernetesAvailable = true watcher.AddEventHandler(kubernetes.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { - processor.addPod(obj.(*kubernetes.Pod)) + pod := obj.(*kubernetes.Pod) + logp.Debug("kubernetes", "%v: adding pod: %s/%s", "add_kubernetes_metadata", pod.GetNamespace(), pod.GetName()) + processor.addPod(pod) }, UpdateFunc: func(obj interface{}) { - processor.removePod(obj.(*kubernetes.Pod)) - processor.addPod(obj.(*kubernetes.Pod)) + pod := obj.(*kubernetes.Pod) + logp.Debug("kubernetes", "%v: updating pod: %s/%s", "add_kubernetes_metadata", pod.GetNamespace(), pod.GetName()) + processor.updatePod(pod) }, DeleteFunc: func(obj interface{}) { - processor.removePod(obj.(*kubernetes.Pod)) + pod := obj.(*kubernetes.Pod) + logp.Debug("kubernetes", "%v: removing pod: %s/%s", "add_kubernetes_metadata", pod.GetNamespace(), pod.GetName()) + processor.removePod(pod) }, }) @@ -139,6 +176,9 @@ func New(cfg *common.Config) (processors.Processor, error) { } func (k *kubernetesAnnotator) Run(event *beat.Event) (*beat.Event, error) { + if !k.kubernetesAvailable { + return event, nil + } index := k.matchers.MetadataIndex(event.Fields) if index == "" { return event, nil @@ -163,6 +203,18 @@ func (k *kubernetesAnnotator) addPod(pod *kubernetes.Pod) { } } +func (k *kubernetesAnnotator) updatePod(pod *kubernetes.Pod) { + k.removePod(pod) + + // Add it again only if it is not being deleted + if pod.GetObjectMeta().GetDeletionTimestamp() != nil { + logp.Debug("kubernetes", "%v: removing pod being terminated: %s/%s", "add_kubernetes_metadata", pod.GetNamespace(), pod.GetName()) + return + } + + k.addPod(pod) +} + func (k *kubernetesAnnotator) removePod(pod *kubernetes.Pod) { indexes := k.indexers.GetIndexes(pod) for _, idx := range indexes { diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/kubernetes_test.go b/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/kubernetes_test.go index c3467b5c..4da15db9 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/kubernetes_test.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata/kubernetes_test.go @@ -42,6 +42,7 @@ func TestAnnotatorDeepUpdate(t *testing.T) { matchers: &Matchers{ matchers: []Matcher{matcher}, }, + kubernetesAvailable: true, } processor.cache.set("foo", common.MapStr{ @@ -86,3 +87,42 @@ func TestAnnotatorDeepUpdate(t *testing.T) { }, }, event.Fields) } + +// Test metadata are not included in the event +func TestAnnotatorWithNoKubernetesAvailable(t *testing.T) { + cfg := common.MustNewConfigFrom(map[string]interface{}{ + "lookup_fields": []string{"kubernetes.pod.name"}, + }) + matcher, err := NewFieldMatcher(*cfg) + if err != nil { + t.Fatal(err) + } + + processor := kubernetesAnnotator{ + cache: newCache(10 * time.Second), + matchers: &Matchers{ + matchers: []Matcher{matcher}, + }, + kubernetesAvailable: false, + } + + intialEventMap := common.MapStr{ + "kubernetes": common.MapStr{ + "pod": common.MapStr{ + "name": "foo", + "id": "pod_id", + "metrics": common.MapStr{ + "a": 1, + "b": 2, + }, + }, + }, + } + + event, err := processor.Run(&beat.Event{ + Fields: intialEventMap.Clone(), + }) + assert.NoError(t, err) + + assert.Equal(t, intialEventMap, event.Fields) +} diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_locale/add_locale.go b/vendor/github.com/elastic/beats/libbeat/processors/add_locale/add_locale.go index 2e0eb81c..9d98c045 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/add_locale/add_locale.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_locale/add_locale.go @@ -27,6 +27,7 @@ import ( "github.com/elastic/beats/libbeat/beat" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/processors" + jsprocessor "github.com/elastic/beats/libbeat/processors/script/javascript/module/processor" ) type addLocale struct { @@ -53,6 +54,7 @@ func (t TimezoneFormat) String() string { func init() { processors.RegisterPlugin("add_locale", New) + jsprocessor.RegisterPlugin("AddLocale", New) } // New constructs a new add_locale processor. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_locale/docs/add_locale.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/add_locale/docs/add_locale.asciidoc new file mode 100644 index 00000000..69a0bc70 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_locale/docs/add_locale.asciidoc @@ -0,0 +1,30 @@ +[[add-locale]] +=== Add the local time zone + +The `add_locale` processor enriches each event with the machine's time zone +offset from UTC or with the name of the time zone. It supports one configuration +option named `format` that controls whether an offset or time zone abbreviation +is added to the event. The default format is `offset`. The processor adds the +a `event.timezone` value to each event. + +The configuration below enables the processor with the default settings. + +[source,yaml] +------------------------------------------------------------------------------- +processors: +- add_locale: ~ +------------------------------------------------------------------------------- + +This configuration enables the processor and configures it to add the time zone +abbreviation to events. + +[source,yaml] +------------------------------------------------------------------------------- +processors: +- add_locale: + format: abbreviation +------------------------------------------------------------------------------- + +NOTE: Please note that `add_locale` differentiates between daylight savings +time (DST) and regular time. For example `CEST` indicates DST and and `CET` is +regular time. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_observer_metadata/add_observer_metadata.go b/vendor/github.com/elastic/beats/libbeat/processors/add_observer_metadata/add_observer_metadata.go index 050e971d..b1740df0 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/add_observer_metadata/add_observer_metadata.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_observer_metadata/add_observer_metadata.go @@ -28,12 +28,14 @@ import ( "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/processors" + jsprocessor "github.com/elastic/beats/libbeat/processors/script/javascript/module/processor" "github.com/elastic/beats/libbeat/processors/util" "github.com/elastic/go-sysinfo" ) func init() { processors.RegisterPlugin("add_observer_metadata", New) + jsprocessor.RegisterPlugin("AddObserverMetadata", New) } type observerMetadata struct { diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_observer_metadata/docs/add_observer_metadata.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/add_observer_metadata/docs/add_observer_metadata.asciidoc new file mode 100644 index 00000000..1bf3e12e --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_observer_metadata/docs/add_observer_metadata.asciidoc @@ -0,0 +1,73 @@ +[[add-observer-metadata]] +=== Add Observer metadata + +beta[] + +[source,yaml] +------------------------------------------------------------------------------- +processors: +- add_observer_metadata: + netinfo.enabled: false + cache.ttl: 5m + geo: + name: nyc-dc1-rack1 + location: 40.7128, -74.0060 + continent_name: North America + country_iso_code: US + region_name: New York + region_iso_code: NY + city_name: New York +------------------------------------------------------------------------------- + +It has the following settings: + +`netinfo.enabled`:: (Optional) Default false. Include IP addresses and MAC addresses as fields observer.ip and observer.mac + +`cache.ttl`:: (Optional) The processor uses an internal cache for the observer metadata. This sets the cache expiration time. The default is 5m, negative values disable caching altogether. + +`geo.name`:: (Optional) User definable token to be used for identifying a discrete location. Frequently a datacenter, rack, or similar. + +`geo.location`:: (Optional) Longitude and latitude in comma separated format. + +`geo.continent_name`:: (Optional) Name of the continent. + +`geo.country_name`:: (Optional) Name of the country. + +`geo.region_name`:: (Optional) Name of the region. + +`geo.city_name`:: (Optional) Name of the city. + +`geo.country_iso_code`:: (Optional) ISO country code. + +`geo.region_iso_code`:: (Optional) ISO region code. + + +The `add_geo_metadata` processor annotates each event with relevant metadata from the observer machine. +The fields added to the event look like the following: + +[source,json] +------------------------------------------------------------------------------- +{ + "observer" : { + "hostname" : "avce", + "type" : "heartbeat", + "vendor" : "elastic", + "ip" : [ + "192.168.1.251", + "fe80::64b2:c3ff:fe5b:b974", + ], + "mac" : [ + "dc:c1:02:6f:1b:ed", + ], + "geo": { + "continent_name": "North America", + "country_iso_code": "US", + "region_name": "New York", + "region_iso_code": "NY", + "city_name": "New York", + "name": "nyc-dc1-rack1", + "location": "40.7128, -74.0060" + } + } +} +------------------------------------------------------------------------------- diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_process_metadata/add_process_metadata.go b/vendor/github.com/elastic/beats/libbeat/processors/add_process_metadata/add_process_metadata.go index e600da7b..2b78fcb8 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/add_process_metadata/add_process_metadata.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_process_metadata/add_process_metadata.go @@ -29,6 +29,7 @@ import ( "github.com/elastic/beats/libbeat/common/atomic" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/processors" + jsprocessor "github.com/elastic/beats/libbeat/processors/script/javascript/module/processor" ) const ( @@ -72,6 +73,7 @@ type processMetadataProvider interface { func init() { processors.RegisterPlugin(processorName, New) + jsprocessor.RegisterPlugin("AddProcessMetadata", New) } // New constructs a new add_process_metadata processor. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/add_process_metadata/docs/add_process_metadata.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/add_process_metadata/docs/add_process_metadata.asciidoc new file mode 100644 index 00000000..a7d54e9b --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/add_process_metadata/docs/add_process_metadata.asciidoc @@ -0,0 +1,63 @@ +[[add-process-metadata]] +=== Add process metadata + +The Add process metadata processor enriches events with information from running +processes, identified by their process ID (PID). + +[source,yaml] +------------------------------------------------------------------------------- +processors: +- add_process_metadata: + match_pids: [system.process.ppid] + target: system.process.parent +------------------------------------------------------------------------------- + +The fields added to the event look as follows: +[source,json] +------------------------------------------------------------------------------- +"process": { + "name": "systemd", + "title": "/usr/lib/systemd/systemd --switched-root --system --deserialize 22", + "exe": "/usr/lib/systemd/systemd", + "args": ["/usr/lib/systemd/systemd", "--switched-root", "--system", "--deserialize", "22"], + "pid": 1, + "ppid": 0, + "start_time": "2018-08-22T08:44:50.684Z", +} +------------------------------------------------------------------------------- + +Optionally, the process environment can be included, too: +[source,json] +------------------------------------------------------------------------------- + ... + "env": { + "HOME": "/", + "TERM": "linux", + "BOOT_IMAGE": "/boot/vmlinuz-4.11.8-300.fc26.x86_64", + "LANG": "en_US.UTF-8", + } + ... +------------------------------------------------------------------------------- +It has the following settings: + +`match_pids`:: List of fields to lookup for a PID. The processor will +search the list sequentially until the field is found in the current event, and +the PID lookup will be applied to the value of this field. + +`target`:: (Optional) Destination prefix where the `process` object will be +created. The default is the event's root. + +`include_fields`:: (Optional) List of fields to add. By default, the processor +will add all the available fields except `process.env`. + +`ignore_missing`:: (Optional) When set to `false`, events that don't contain any +of the fields in match_pids will be discarded and an error will be generated. By +default, this condition is ignored. + +`overwrite_keys`:: (Optional) By default, if a target field already exists, it +will not be overwritten and an error will be logged. If `overwrite_keys` is +set to `true`, this condition will be ignored. + +`restricted_fields`:: (Optional) By default, the `process.env` field is not +output, to avoid leaking sensitive data. If `restricted_fields` is `true`, the +field will be present in the output. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/communityid/communityid.go b/vendor/github.com/elastic/beats/libbeat/processors/communityid/communityid.go index cc13e731..f5c2812f 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/communityid/communityid.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/communityid/communityid.go @@ -31,12 +31,14 @@ import ( "github.com/elastic/beats/libbeat/common/flowhash" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/processors" + jsprocessor "github.com/elastic/beats/libbeat/processors/script/javascript/module/processor" ) const logName = "processor.community_id" func init() { processors.RegisterPlugin("community_id", New) + jsprocessor.RegisterPlugin("CommunityID", New) } type processor struct { diff --git a/vendor/github.com/elastic/beats/libbeat/processors/communityid/docs/communityid.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/communityid/docs/communityid.asciidoc new file mode 100644 index 00000000..5d620d1c --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/communityid/docs/communityid.asciidoc @@ -0,0 +1,47 @@ +[[community-id]] +=== Community ID Network Flow Hash + +The `community_id` processor computes a network flow hash according to the +https://github.com/corelight/community-id-spec[Community ID Flow Hash +specification]. + +The flow hash is useful for correlating all network events related to a +single flow. For example you can filter on a community ID value and you might +get back the Netflow records from multiple collectors and layer 7 protocol +records from Packetbeat. + +By default the processor is configured to read the flow parameters from the +appropriate Elastic Common Schema (ECS) fields. If you are processing ECS data +then no parameters are required. + +[source,yaml] +---- +processors: + - community_id: +---- + +If the data does not conform to ECS then you can customize the field names +that the processor reads from. You can also change the `target` field which +is where the computed hash is written to. + +[source,yaml] +---- +processors: + - community_id: + fields: + source_ip: my_source_ip + source_port: my_source_port + destination_ip: my_dest_ip + destination_port: my_dest_port + iana_number: my_iana_number + transport: my_transport + icmp_type: my_icmp_type + icmp_code: my_icmp_code + target: network.community_id +---- + +If the necessary fields are not present in the event then the processor will +silently continue without adding the target field. + +The processor also accepts an optional `seed` parameter that must be a 16-bit +unsigned integer. This value gets incorporated into all generated hashes. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/convert/convert.go b/vendor/github.com/elastic/beats/libbeat/processors/convert/convert.go index 8e84d93d..fbc6b5b3 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/convert/convert.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/convert/convert.go @@ -30,12 +30,14 @@ import ( "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/processors" + jsprocessor "github.com/elastic/beats/libbeat/processors/script/javascript/module/processor" ) const logName = "processor.convert" func init() { processors.RegisterPlugin("convert", New) + jsprocessor.RegisterPlugin("Convert", New) } type processor struct { diff --git a/vendor/github.com/elastic/beats/libbeat/processors/convert/docs/convert.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/convert/docs/convert.asciidoc new file mode 100644 index 00000000..7032a9f8 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/convert/docs/convert.asciidoc @@ -0,0 +1,45 @@ +[[convert]] +=== Convert + +The `convert` processor converts a field in the event to a different type, such +as converting a string to an integer. + +The supported types include: `integer`, `long`, `float`, `double`, `string`, +`boolean`, and `ip`. + +The `ip` type is effectively an alias for `string`, but with an added validation +that the value is an IPv4 or IPv6 address. + +[source,yaml] +---- +processors: + - convert: + fields: + - {from: "src_ip", to: "source.ip", type: "ip"} + - {from: "src_port", to: "source.port", type: "integer"} + ignore_missing: true + fail_on_error: false +---- + +The `convert` processor has the following configuration settings: + +`fields`:: (Required) This is the list of fields to convert. At least one item +must be contained in the list. Each item in the list must have a `from` key that +specifies the source field. The `to` key is optional and specifies where to +assign the converted value. If `to` is omitted then the `from` field is updated +in-place. The `type` key specifies the data type to convert the value to. If +`type` is omitted then the processor copies or renames the field without any +type conversion. + +`ignore_missing`:: (Optional) If `true` the processor continues to the next +field when the `from` key is not found in the event. If false then the processor +returns an error and does not process the remaining fields. Default is `false`. + +`fail_on_error`:: (Optional) If false type conversion failures are ignored and +the processor continues to the next field. Default is `true`. + +`tag`:: (Optional) An identifier for this processor. Useful for debugging. + +`mode`:: (Optional) When both `from` and `to` are defined for a field then +`mode` controls whether to `copy` or `rename` the field when the type conversion +is successful. Default is `copy`. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/decode_csv_fields/decode_csv_fields.go b/vendor/github.com/elastic/beats/libbeat/processors/decode_csv_fields/decode_csv_fields.go index 2084cf6b..e65fe223 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/decode_csv_fields/decode_csv_fields.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/decode_csv_fields/decode_csv_fields.go @@ -29,6 +29,7 @@ import ( "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/processors" "github.com/elastic/beats/libbeat/processors/checks" + jsprocessor "github.com/elastic/beats/libbeat/processors/script/javascript/module/processor" ) type decodeCSVFields struct { @@ -60,6 +61,8 @@ func init() { checks.ConfigChecked(NewDecodeCSVField, checks.RequireFields("fields"), checks.AllowedFields("fields", "ignore_missing", "overwrite_keys", "separator", "trim_leading_space", "overwrite_keys", "fail_on_error", "when"))) + + jsprocessor.RegisterPlugin("DecodeCSVField", NewDecodeCSVField) } // NewDecodeCSVField construct a new decode_csv_field processor. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/decode_csv_fields/docs/decode_csv_fields.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/decode_csv_fields/docs/decode_csv_fields.asciidoc new file mode 100644 index 00000000..718f9551 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/decode_csv_fields/docs/decode_csv_fields.asciidoc @@ -0,0 +1,41 @@ +[[decode-csv-fields]] +=== Decode CSV fields + +experimental[] + +The `decode_csv_fields` processor decodes fields containing records in +comma-separated format (CSV). It will output the values as an array of strings. +This processor is available for Filebeat and Journalbeat. + +[source,yaml] +----------------------------------------------------- +processors: + - decode_csv_fields: + fields: + message: decoded.csv + separator: "," + ignore_missing: false + overwrite_keys: true + trim_leading_space: false + fail_on_error: true +----------------------------------------------------- + +The `decode_csv_fields` has the following settings: + +`fields`:: This is a mapping from the source field containing the CSV data to + the destination field to which the decoded array will be written. +`separator`:: (Optional) Character to be used as a column separator. + The default is the comma character. For using a TAB character you + must set it to "\t". +`ignore_missing`:: (Optional) Whether to ignore events which lack the source + field. The default is `false`, which will fail processing of + an event if a field is missing. +`overwrite_keys`:: Whether the target field is overwritten if it + already exists. The default is false, which will fail + processing of an event when `target` already exists. +`trim_leading_space`:: Whether extra space after the separator is trimmed from + values. This works even if the separator is also a space. + The default is `false`. +`fail_on_error`:: (Optional) If set to true, in case of an error the changes to +the event are reverted, and the original event is returned. If set to `false`, +processing continues also if an error happens. Default is `true`. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/dissect/docs/dissect.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/dissect/docs/dissect.asciidoc new file mode 100644 index 00000000..5ecac9e8 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/dissect/docs/dissect.asciidoc @@ -0,0 +1,30 @@ +[[dissect]] +=== Dissect strings + +The dissect processor tokenizes incoming strings using defined patterns. + +[source,yaml] +------- +processors: +- dissect: + tokenizer: "%{key1} %{key2}" + field: "message" + target_prefix: "dissect" +------- + +The `dissect` processor has the following configuration settings: + +`field`:: (Optional) The event field to tokenize. Default is `message`. + +`target_prefix`:: (Optional) The name of the field where the values will be extracted. When an empty +string is defined, the processor will create the keys at the root of the event. Default is +`dissect`. When the target key already exists in the event, the processor won't replace it and log +an error; you need to either drop or rename the key before using dissect. + +For tokenization to be successful, all keys must be found and extracted, if one of them cannot be +found an error will be logged and no modification is done on the original event. + +NOTE: A key can contain any characters except reserved suffix or prefix modifiers: `/`,`&`, `+` +and `?`. + +See <> for a list of supported conditions. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/dissect/processor.go b/vendor/github.com/elastic/beats/libbeat/processors/dissect/processor.go index d47b595e..ab5c1890 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/dissect/processor.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/dissect/processor.go @@ -25,6 +25,7 @@ import ( "github.com/elastic/beats/libbeat/beat" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/processors" + jsprocessor "github.com/elastic/beats/libbeat/processors/script/javascript/module/processor" ) const flagParsingError = "dissect_parsing_error" @@ -35,6 +36,7 @@ type processor struct { func init() { processors.RegisterPlugin("dissect", NewProcessor) + jsprocessor.RegisterPlugin("Dissect", NewProcessor) } // NewProcessor constructs a new dissect processor. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/dns/dns.go b/vendor/github.com/elastic/beats/libbeat/processors/dns/dns.go index d1c93a36..7dc447d4 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/dns/dns.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/dns/dns.go @@ -31,6 +31,7 @@ import ( "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/monitoring" "github.com/elastic/beats/libbeat/processors" + jsprocessor "github.com/elastic/beats/libbeat/processors/script/javascript/module/processor" ) const logName = "processor.dns" @@ -40,6 +41,7 @@ var instanceID = atomic.MakeUint32(0) func init() { processors.RegisterPlugin("dns", New) + jsprocessor.RegisterPlugin("DNS", New) } type processor struct { diff --git a/vendor/github.com/elastic/beats/libbeat/processors/dns/docs/dns.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/dns/docs/dns.asciidoc new file mode 100644 index 00000000..d184ae00 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/dns/docs/dns.asciidoc @@ -0,0 +1,102 @@ +[[processor-dns]] +=== DNS Reverse Lookup + +The DNS processor performs reverse DNS lookups of IP addresses. It caches the +responses that it receives in accordance to the time-to-live (TTL) value +contained in the response. It also caches failures that occur during lookups. +Each instance of this processor maintains its own independent cache. + +The processor uses its own DNS resolver to send requests to nameservers and does +not use the operating system's resolver. It does not read any values contained +in `/etc/hosts`. + +This processor can significantly slow down your pipeline's throughput if you +have a high latency network or slow upstream nameserver. The cache will help +with performance, but if the addresses being resolved have a high cardinality +then the cache benefits will be diminished due to the high miss ratio. + +By way of example, if each DNS lookup takes 2 milliseconds, the maximum +throughput you can achieve is 500 events per second (1000 milliseconds / 2 +milliseconds). If you have a high cache hit ratio then your throughput can be +higher. + +This is a minimal configuration example that resolves the IP addresses contained +in two fields. + +[source,yaml] +---- +processors: +- dns: + type: reverse + fields: + source.ip: source.hostname + destination.ip: destination.hostname +---- + +Next is a configuration example showing all options. + +[source,yaml] +---- +processors: +- dns: + type: reverse + action: append + fields: + server.ip: server.hostname + client.ip: client.hostname + success_cache: + capacity.initial: 1000 + capacity.max: 10000 + failure_cache: + capacity.initial: 1000 + capacity.max: 10000 + ttl: 1m + nameservers: ['192.0.2.1', '203.0.113.1'] + timeout: 500ms + tag_on_failure: [_dns_reverse_lookup_failed] +---- + +The `dns` processor has the following configuration settings: + +`type`:: The type of DNS lookup to perform. The only supported type is +`reverse` which queries for a PTR record. + +`action`:: This defines the behavior of the processor when the target field +already exists in the event. The options are `append` (default) and `replace`. + +`fields`:: This is a mapping of source field names to target field names. The +value of the source field will be used in the DNS query and result will be +written to the target field. + +`success_cache.capacity.initial`:: The initial number of items that the success +cache will be allocated to hold. When initialized the processor will allocate +the memory for this number of items. Default value is `1000`. + +`success_cache.capacity.max`:: The maximum number of items that the success +cache can hold. When the maximum capacity is reached a random item is evicted. +Default value is `10000`. + +`failure_cache.capacity.initial`:: The initial number of items that the failure +cache will be allocated to hold. When initialized the processor will allocate +the memory for this number of items. Default value is `1000`. + +`failure_cache.capacity.max`:: The maximum number of items that the failure +cache can hold. When the maximum capacity is reached a random item is evicted. +Default value is `10000`. + +`failure_cache.ttl`:: The duration for which failures are cached. Valid time +units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Default value is `1m`. + +`nameservers`:: A list of nameservers to query. If there are multiple servers, +the resolver queries them in the order listed. If none are specified then it +will read the nameservers listed in `/etc/resolv.conf` once at initialization. +On Windows you must always supply at least one nameserver. + +`timeout`:: The duration after which a DNS query will timeout. This is timeout +for each DNS request so if you have 2 nameservers then the total timeout will be +2 times this value. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", +"h". Default value is `500ms`. + +`tag_on_failure`:: A list of tags to add to the event when any lookup fails. The +tags are only added once even if multiple lookups fail. By default no tags are +added upon failure. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/extract_array/docs/extract_array.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/extract_array/docs/extract_array.asciidoc new file mode 100644 index 00000000..ccdab1e7 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/extract_array/docs/extract_array.asciidoc @@ -0,0 +1,42 @@ +[[extract-array]] +=== Extract array + +experimental[] + +The `extract_array` processor populates fields with values read from an array +field. The following example will populate `source.ip` with the first element of +the `my_array` field, `destination.ip` with the second element, and +`network.transport` with the third. + +[source,yaml] +----------------------------------------------------- +processors: + - extract_array: + field: my_array + mappings: + source.ip: 0 + destination.ip: 1 + network.transport: 2 +----------------------------------------------------- + +The following settings are supported: + +`field`:: The array field whose elements are to be extracted. +`mappings`:: Maps each field name to an array index. Use 0 for the first element in + the array. Multiple fields can be mapped to the same array element. +`ignore_missing`:: (Optional) Whether to ignore events where the array field is + missing. The default is `false`, which will fail processing + of an event if the specified field does not exist. Set it to + `true` to ignore this condition. +`overwrite_keys`:: Whether the target fields specified in the mapping are + overwritten if they already exist. The default is `false`, + which will fail processing if a target field already exists. +`fail_on_error`:: (Optional) If set to `true` and an error happens, changes to + the event are reverted, and the original event is returned. If + set to `false`, processing continues despite errors. + Default is `true`. +`omit_empty`:: (Optional) Whether empty values are extracted from the array. If + set to `true`, instead of the target field being set to an + empty value, it is left unset. The empty string (`""`), an + empty array (`[]`) or an empty object (`{}`) are considered + empty values. Default is `false`. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/extract_array/extract_array.go b/vendor/github.com/elastic/beats/libbeat/processors/extract_array/extract_array.go index 2bb47358..ce76b096 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/extract_array/extract_array.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/extract_array/extract_array.go @@ -28,6 +28,7 @@ import ( "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/processors" "github.com/elastic/beats/libbeat/processors/checks" + jsprocessor "github.com/elastic/beats/libbeat/processors/script/javascript/module/processor" ) type config struct { @@ -61,6 +62,8 @@ func init() { checks.ConfigChecked(New, checks.RequireFields("field", "mappings"), checks.AllowedFields("field", "mappings", "ignore_missing", "overwrite_keys", "fail_on_error", "when", "omit_empty"))) + + jsprocessor.RegisterPlugin("ExtractArray", New) } // Unpack unpacks the processor's configuration. diff --git a/vendor/github.com/elastic/beats/libbeat/processors/namespace.go b/vendor/github.com/elastic/beats/libbeat/processors/namespace.go index cea3e53e..1dd10f4b 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/namespace.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/namespace.go @@ -121,4 +121,13 @@ func (ns *Namespace) Plugin() Constructor { }) } +// Constructors returns all registered processor constructors and its names. +func (ns *Namespace) Constructors() map[string]Constructor { + c := make(map[string]Constructor, len(ns.reg)) + for name, p := range ns.reg { + c[name] = p.Plugin() + } + return c +} + func (p plugin) Plugin() Constructor { return p.c } diff --git a/vendor/github.com/elastic/beats/libbeat/processors/registered_domain/docs/registered_domain.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/registered_domain/docs/registered_domain.asciidoc new file mode 100644 index 00000000..983867d5 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/registered_domain/docs/registered_domain.asciidoc @@ -0,0 +1,33 @@ +[[processor-registered-domain]] +=== Registered Domain + +The `registered_domain` processor reads a field containing a hostname and then +writes the "registered domain" contained in the hostname to the target field. +For example, given `www.google.co.uk` the processor would output `google.co.uk`. +In other words the "registered domain" is the effective top-level domain +(`co.uk`) plus one level (`google`). + +This processor uses the Mozilla Public Suffix list to determine the value. + +[source,yaml] +---- +processors: +- registered_domain: + field: dns.question.name + target_field: dns.question.registered_domain + ignore_missing: true + ignore_failure: true +---- + +The `registered_domain` processor has the following configuration settings: + +.Registered Domain options +[options="header"] +|====== +| Name | Required | Default | Description | +| `field` | yes | | Source field containing a fully qualified domain name (FQDN). | +| `target_field` | yes | | Target field for the registered domain value. | +| `ignore_missing` | no | false | Ignore errors when the source field is missing. | +| `ignore_failure` | no | false | Ignore all errors produced by the processor. | +| `id` | no | | An identifier for this processor instance. Useful for debugging. | +|====== diff --git a/vendor/github.com/elastic/beats/libbeat/processors/registered_domain/registered_domain.go b/vendor/github.com/elastic/beats/libbeat/processors/registered_domain/registered_domain.go index 0480c7c9..b378da75 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/registered_domain/registered_domain.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/registered_domain/registered_domain.go @@ -28,6 +28,7 @@ import ( "github.com/elastic/beats/libbeat/common/cfgwarn" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/processors" + jsprocessor "github.com/elastic/beats/libbeat/processors/script/javascript/module/processor" ) const ( @@ -37,6 +38,7 @@ const ( func init() { processors.RegisterPlugin(procName, New) + jsprocessor.RegisterPlugin("RegisteredDomain", New) } type processor struct { diff --git a/vendor/github.com/elastic/beats/libbeat/processors/script/docs/script.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/script/docs/script.asciidoc new file mode 100644 index 00000000..2b2ece54 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/script/docs/script.asciidoc @@ -0,0 +1,168 @@ +[[processor-script]] +=== Script Processor + +experimental[] + +The script processor executes Javascript code to process an event. The processor +uses a pure Go implementation of ECMAScript 5.1 and has no external +dependencies. This can be useful in situations where one of the other processors +doesn't provide the functionality you need to filter events. + +The processor can be configured by embedding Javascript in your configuration +file or by pointing the processor at external file(s). + +[source,yaml] +---- +processors: +- script: + lang: javascript + id: my_filter + source: > + function process(event) { + event.Tag("js"); + } +---- + +This loads `filter.js` from disk. + +[source,yaml] +---- +processors: +- script: + lang: javascript + id: my_filter + file: ${path.config}/filter.js +---- + +Parameters can be passed to the script by adding `params` to the config. +This allows for a script to be made reusable. When using `params` the +code must define a `register(params)` function to receive the parameters. + +[source,yaml] +---- +processors: +- script: + lang: javascript + id: my_filter + params: + threshold: 15 + source: > + var params = {threshold: 42}; + function register(scriptParams) { + params = scriptParams; + } + function process(event) { + if (event.Get("severity") < params.threshold) { + event.Cancel(); + } + } +---- + +If the script defines a `test()` function it will be invoked when the processor +is loaded. Any exceptions thrown will cause the processor to fail to load. This +can be used to make assertions about the behavior of the script. + +[source,javascript] +---- +function process(event) { + if (event.Get("event.code") === 1102) { + event.Put("event.action", "cleared"); + } +} + +function test() { + var event = process(new Event({event: {code: 1102})); + if (event.Get("event.action") !== "cleared") { + throw "expected event.action === cleared"; + } +} +---- + +[float] +==== Configuration options + +The `script` processor has the following configuration settings: + +`lang`:: This field is required and its value must be `javascript`. + +`tag`:: This is an optional identifier that is added to log messages. If defined +it enables metrics logging for this instance of the processor. The metrics +include the number of exceptions and a histogram of the execution times for +the `process` function. + +`source`:: Inline Javascript source code. + +`file`:: Path to a script file to load. Relative paths are interpreted as +relative to the `path.config` directory. Globs are expanded. + +`files`:: List of script files to load. The scripts are concatenated together. +Relative paths are interpreted as relative to the `path.config` directory. +And globs are expanded. + +`params`:: A dictionary of parameters that are passed to the `register` of the +script. + +`tag_on_exception`:: Tag to add to events in case the Javascript code causes an +exception while processing an event. Defaults to `_js_exception`. + +`timeout`:: This sets an execution timeout for the `process` function. When +the `process` function takes longer than the `timeout` period the function +is interrupted. You can set this option to prevent a script from running for +too long (like preventing an infinite `while` loop). By default there is no +timeout. + +[float] +==== Event API + +The `Event` object passed to the `process` method has the following API. + +[frame="topbot",options="header"] +|=== +|Method |Description + +|`Get(string)` +|Get a value from the event (either a scalar or an object). If the key does not +exist `null` is returned. If no key is provided then an object containing all +fields is returned. + +*Example*: `var value = event.Get(key);` + +|`Put(string, value)` +|Put a value into the event. If the key was already set then the +previous value is returned. It throws an exception if the key cannot be set +because one of the intermediate values is not an object. + +*Example*: `var old = event.Put(key, value);` + +|`Rename(string, string)` +|Rename a key in the event. The target key must not exist. It +returns true if the source key was successfully renamed to the target key. + +*Example*: `var success = event.Rename("source", "target");` + +|`Delete(string)` +|Delete a field from the event. It returns true on success. + +*Example*: `var deleted = event.Delete("user.email");` + +|`Cancel()` +|Flag the event as cancelled which causes the processor to drop +event. + +*Example*: `event.Cancel(); return;` + +|`Tag(string)` +|Append a tag to the `tags` field if the tag does not already +exist. Throws an exception if `tags` exists and is not a string or a list of +strings. + +*Example*: `event.Tag("user_event");` + +|`AppendTo(string, string)` +|`AppendTo` is a specialized `Put` method that converts the existing value to an +array and appends the value if it does not already exist. If there is an +existing value that's not a string or array of strings then an exception is +thrown. + +*Example*: `event.AppendTo("error.message", "invalid file hash");` +|=== diff --git a/vendor/github.com/elastic/beats/libbeat/processors/script/javascript/module/processor/chain.go b/vendor/github.com/elastic/beats/libbeat/processors/script/javascript/module/processor/chain.go index cbf09d1e..947903c5 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/script/javascript/module/processor/chain.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/script/javascript/module/processor/chain.go @@ -42,7 +42,7 @@ func newChainBuilder(runtime *goja.Runtime) func(call goja.ConstructorCall) *goj } c := &chainBuilder{runtime: runtime, this: call.This} - for name, fn := range constructors { + for name, fn := range registry.Constructors() { c.this.Set(name, c.makeBuilderFunc(fn)) } call.This.Set("Add", c.Add) diff --git a/vendor/github.com/elastic/beats/libbeat/processors/script/javascript/module/processor/processor.go b/vendor/github.com/elastic/beats/libbeat/processors/script/javascript/module/processor/processor.go index 758e7abc..da5a914c 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/script/javascript/module/processor/processor.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/script/javascript/module/processor/processor.go @@ -22,50 +22,23 @@ import ( "github.com/dop251/goja_nodejs/require" "github.com/pkg/errors" + "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/processors" - "github.com/elastic/beats/libbeat/processors/actions" - "github.com/elastic/beats/libbeat/processors/add_cloud_metadata" - "github.com/elastic/beats/libbeat/processors/add_docker_metadata" - "github.com/elastic/beats/libbeat/processors/add_host_metadata" - "github.com/elastic/beats/libbeat/processors/add_kubernetes_metadata" - "github.com/elastic/beats/libbeat/processors/add_locale" - "github.com/elastic/beats/libbeat/processors/add_observer_metadata" - "github.com/elastic/beats/libbeat/processors/add_process_metadata" - "github.com/elastic/beats/libbeat/processors/communityid" - "github.com/elastic/beats/libbeat/processors/convert" - "github.com/elastic/beats/libbeat/processors/decode_csv_fields" - "github.com/elastic/beats/libbeat/processors/dissect" - "github.com/elastic/beats/libbeat/processors/dns" - "github.com/elastic/beats/libbeat/processors/extract_array" - "github.com/elastic/beats/libbeat/processors/registered_domain" "github.com/elastic/beats/libbeat/processors/script/javascript" - "github.com/elastic/beats/libbeat/processors/timestamp" ) // Create constructors for most of the Beat processors. // Note that script is omitted to avoid nesting. -var constructors = map[string]processors.Constructor{ - "AddCloudMetadata": add_cloud_metadata.New, - "AddDockerMetadata": add_docker_metadata.New, - "AddFields": actions.CreateAddFields, - "AddHostMetadata": add_host_metadata.New, - "AddKubernetesMetadata": add_kubernetes_metadata.New, - "AddObserverMetadata": add_observer_metadata.New, - "AddLocale": add_locale.New, - "AddProcessMetadata": add_process_metadata.New, - "CommunityID": communityid.New, - "Convert": convert.New, - "CopyFields": actions.NewCopyFields, - "DecodeBase64Field": actions.NewDecodeBase64Field, - "DecodeCSVField": decode_csv_fields.NewDecodeCSVField, - "DecodeJSONFields": actions.NewDecodeJSONFields, - "Dissect": dissect.NewProcessor, - "DNS": dns.New, - "ExtractArray": extract_array.New, - "RegisteredDomain": registered_domain.New, - "Rename": actions.NewRenameFields, - "Timestamp": timestamp.New, - "TruncateFields": actions.NewTruncateFields, +var registry = processors.NewNamespace() + +// RegisterPlugin registeres processor plugins for the javascript processor. +func RegisterPlugin(name string, c processors.Constructor) { + logp.L().Named("javascript").Debugf("Register script processor %s", name) + + err := registry.Register(name, c) + if err != nil { + panic(err) + } } // beatProcessor wraps a processor for javascript. @@ -140,7 +113,7 @@ func newConstructor( func Require(runtime *goja.Runtime, module *goja.Object) { o := module.Get("exports").(*goja.Object) - for name, fn := range constructors { + for name, fn := range registry.Constructors() { o.Set(name, newConstructor(runtime, fn)) } diff --git a/vendor/github.com/elastic/beats/libbeat/processors/script/javascript/module/processor/processor_test.go b/vendor/github.com/elastic/beats/libbeat/processors/script/javascript/module/processor/processor_test.go index b78d0cfa..7faddd24 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/script/javascript/module/processor/processor_test.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/script/javascript/module/processor/processor_test.go @@ -18,8 +18,8 @@ package processor import ( - "os" - "runtime" + "encoding/json" + "fmt" "testing" "github.com/stretchr/testify/assert" @@ -27,11 +27,16 @@ import ( "github.com/elastic/beats/libbeat/beat" "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" + "github.com/elastic/beats/libbeat/processors" "github.com/elastic/beats/libbeat/processors/script/javascript" _ "github.com/elastic/beats/libbeat/processors/script/javascript/module/require" ) +func init() { + RegisterPlugin("Mock", newMock) +} + func testEvent() *beat.Event { return &beat.Event{ Fields: common.MapStr{ @@ -49,14 +54,14 @@ func testEvent() *beat.Event { } } -func TestNewProcessorAddHostMetadata(t *testing.T) { +func TestNewProcessorDummyProcessor(t *testing.T) { const script = ` var processor = require('processor'); -var addHostMetadata = new processor.AddHostMetadata({"netinfo.enabled": true}); +var mock = new processor.Mock({"fields": {"added": "new_value"}}); function process(evt) { - addHostMetadata.Run(evt); + mock.Run(evt); } ` @@ -71,307 +76,32 @@ function process(evt) { t.Fatal(err) } - _, err = evt.GetValue("host.hostname") - assert.NoError(t, err) + checkEvent(t, evt, "added", "new_value") } -func TestNewProcessorAddLocale(t *testing.T) { +func TestChainOfDummyProcessors(t *testing.T) { const script = ` var processor = require('processor'); -var addLocale = new processor.AddLocale(); - -function process(evt) { - addLocale.Run(evt); -} -` - - logp.TestingSetup() - p, err := javascript.NewFromConfig(javascript.Config{Source: script}, nil) - if err != nil { - t.Fatal(err) - } - - evt, err := p.Run(testEvent()) - if err != nil { - t.Fatal(err) - } - - _, err = evt.GetValue("event.timezone") - assert.NoError(t, err) -} - -func TestNewProcessorAddProcessMetadata(t *testing.T) { - const script = ` -var processor = require('processor'); - -var addProcessMetadata = new processor.AddProcessMetadata({ - match_pids: "process.pid", - overwrite_keys: true, -}); - -function process(evt) { - addProcessMetadata.Run(evt); -} -` - - logp.TestingSetup() - p, err := javascript.NewFromConfig(javascript.Config{Source: script}, nil) - if err != nil { - t.Fatal(err) - } - - evt := &beat.Event{Fields: common.MapStr{"process": common.MapStr{"pid": os.Getppid()}}} - evt, err = p.Run(evt) - if err != nil { - t.Fatal(err) - } - - _, err = evt.GetValue("process.name") - assert.NoError(t, err) - t.Logf("%+v", evt.Fields) -} - -func TestNewProcessorCommunityID(t *testing.T) { - const script = ` -var processor = require('processor'); - -var communityID = new processor.CommunityID(); - -function process(evt) { - communityID.Run(evt); -} -` - - logp.TestingSetup() - p, err := javascript.NewFromConfig(javascript.Config{Source: script}, nil) - if err != nil { - t.Fatal(err) - } - - evt, err := p.Run(testEvent()) - if err != nil { - t.Fatal(err) - } - - id, _ := evt.GetValue("network.community_id") - assert.Equal(t, "1:15+Ly6HsDg0sJdTmNktf6rko+os=", id) -} - -func TestNewCopyFields(t *testing.T) { - const script = ` -var processor = require('processor'); - -var copy = new processor.CopyFields({ - fields: [ - {from: "message", to: "log.original"}, - ], -}); - -function process(evt) { - copy.Run(evt); -} -` - - logp.TestingSetup() - p, err := javascript.NewFromConfig(javascript.Config{Source: script}, nil) - if err != nil { - t.Fatal(err) - } - - evt, err := p.Run(testEvent()) - if err != nil { - t.Fatal(err) - } - - _, err = evt.GetValue("log.original") - assert.NoError(t, err) -} - -func TestNewProcessorDecodeJSONFields(t *testing.T) { - const script = ` -var processor = require('processor'); - -var decodeJSON = new processor.DecodeJSONFields({ - fields: ["message"], - target: "", -}); - -function process(evt) { - decodeJSON.Run(evt); -} -` - - logp.TestingSetup() - p, err := javascript.NewFromConfig(javascript.Config{Source: script}, nil) - if err != nil { - t.Fatal(err) - } - - evt := testEvent() - evt.PutValue("message", `{"hello": "world"}`) - - _, err = p.Run(evt) - if err != nil { - t.Fatal(err) - } - - v, _ := evt.GetValue("hello") - assert.Equal(t, "world", v) -} - -func TestNewProcessorDissect(t *testing.T) { - const script = ` -var processor = require('processor'); - -var chopLog = new processor.Dissect({ - tokenizer: "key=%{key}", - field: "message", -}); - -function process(evt) { - chopLog.Run(evt); -} -` - - logp.TestingSetup() - p, err := javascript.NewFromConfig(javascript.Config{Source: script}, nil) - if err != nil { - t.Fatal(err) - } - - evt, err := p.Run(testEvent()) - if err != nil { - t.Fatal(err) - } - - key, _ := evt.GetValue("dissect.key") - assert.Equal(t, "hello", key) -} - -func TestNewProcessorDNS(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("Windows requires explicit DNS server configuration") - } - - const script = ` -var processor = require('processor'); - -var dns = new processor.DNS({ - type: "reverse", - fields: { - "source.ip": "source.domain", - "destination.ip": "destination.domain" - }, - tag_on_failure: ["_dns_reverse_lookup_failed"], -}); - -function process(evt) { - dns.Run(evt); - if (evt.Get().tags[0] !== "_dns_reverse_lookup_failed") { - throw "missing tag"; - } -} -` - - logp.TestingSetup() - p, err := javascript.NewFromConfig(javascript.Config{Source: script}, nil) - if err != nil { - t.Fatal(err) - } - - _, err = p.Run(testEvent()) - if err != nil { - t.Fatal(err) - } -} - -func TestNewRename(t *testing.T) { - const script = ` -var processor = require('processor'); - -var rename = new processor.Rename({ - fields: [ - {from: "message", to: "log.original"}, - ], -}); - -function process(evt) { - rename.Run(evt); -} -` - - logp.TestingSetup() - p, err := javascript.NewFromConfig(javascript.Config{Source: script}, nil) - if err != nil { - t.Fatal(err) - } - - evt, err := p.Run(testEvent()) - if err != nil { - t.Fatal(err) - } - - _, err = evt.GetValue("log.original") - assert.NoError(t, err) -} - -func TestNewTruncateFields(t *testing.T) { - const script = ` -var processor = require('processor'); - -var truncate = new processor.TruncateFields({ - fields: [ - "message", - ], - max_characters: 4, -}); - -function process(evt) { - truncate.Run(evt); -} -` - - logp.TestingSetup() - p, err := javascript.NewFromConfig(javascript.Config{Source: script}, nil) - if err != nil { - t.Fatal(err) - } - - evt, err := p.Run(testEvent()) - if err != nil { - t.Fatal(err) - } - - msg, _ := evt.GetValue("message") - assert.Equal(t, "key=", msg) -} - -func TestNewProcessorChain(t *testing.T) { - const script = ` -var processor = require('processor'); - -var localeProcessor = new processor.AddLocale(); +var hungarianHello = new processor.Mock({"fields": {"helló": "világ"}}); +var germanHello = new processor.Mock({"fields": {"hallo": "Welt"}}); var chain = new processor.Chain() - .Add(localeProcessor) - .Rename({ - fields: [ - {from: "event.timezone", to: "timezone"}, - ], + .Add(hungarianHello) + .Mock({ + fields: { "hola": "mundo" }, }) .Add(function(evt) { - evt.Put("hello", "world"); + evt.Put("hello", "world"); }) .Build(); var chainOfChains = new processor.Chain() .Add(chain) - .AddFields({fields: {foo: "bar"}}) - .Build(); - + .Add(germanHello) + .Build(); function process(evt) { - chainOfChains.Run(evt); + chainOfChains.Run(evt); } ` @@ -386,10 +116,49 @@ function process(evt) { t.Fatal(err) } - _, err = evt.GetValue("timezone") - assert.NoError(t, err) - v, _ := evt.GetValue("hello") - assert.Equal(t, "world", v) - v, _ = evt.GetValue("fields.foo") - assert.Equal(t, "bar", v) + // checking if hello world is added to the event in different languages + checkEvent(t, evt, "helló", "világ") + checkEvent(t, evt, "hola", "mundo") + checkEvent(t, evt, "hello", "world") + checkEvent(t, evt, "hallo", "Welt") +} + +func checkEvent(t *testing.T, evt *beat.Event, key, value string) { + s, err := evt.GetValue(key) + assert.NoError(t, err) + + switch ss := s.(type) { + case string: + assert.Equal(t, ss, value) + default: + t.Fatal("unexpected type") + } +} + +type mockProcessor struct { + fields common.MapStr +} + +func newMock(c *common.Config) (processors.Processor, error) { + config := struct { + Fields common.MapStr `config:"fields" validate:"required"` + }{} + err := c.Unpack(&config) + if err != nil { + return nil, fmt.Errorf("fail to unpack the mock processor configuration: %s", err) + } + + return &mockProcessor{ + fields: config.Fields, + }, nil +} + +func (m *mockProcessor) Run(event *beat.Event) (*beat.Event, error) { + event.Fields.DeepUpdate(m.fields) + return event, nil +} + +func (m *mockProcessor) String() string { + s, _ := json.Marshal(m.fields) + return fmt.Sprintf("mock=%s", s) } diff --git a/vendor/github.com/elastic/beats/libbeat/processors/timestamp/docs/timestamp.asciidoc b/vendor/github.com/elastic/beats/libbeat/processors/timestamp/docs/timestamp.asciidoc new file mode 100644 index 00000000..6aec156c --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/processors/timestamp/docs/timestamp.asciidoc @@ -0,0 +1,65 @@ +[[processor-timestamp]] +=== Timestamp + +beta[] + +The `timestamp` processor parses a timestamp from a field. By default the +timestamp processor writes the parsed result to the `@timestamp` field. You can +specify a different field by setting the `target_field` parameter. The timestamp +value is parsed according to the `layouts` parameter. Multiple layouts can be +specified and they will be used sequentially to attempt parsing the timestamp +field. + +NOTE: The timestamp layouts used by this processor are different than the + formats supported by date processors in Logstash and Elasticsearch Ingest + Node. + +The `layouts` are described using a reference time that is based on this +specific time: + + Mon Jan 2 15:04:05 MST 2006 + +Since MST is GMT-0700, the reference time is: + + 01/02 03:04:05PM '06 -0700 + +To define your own layout, rewrite the reference time in a format that matches +the timestamps you expect to parse. For more layout examples and details see the +https://godoc.org/time#pkg-constants[Go time package documentation]. + +If a layout does not contain a year then the current year in the specified +`timezone` is added to the time value. + +.Timestamp options +[options="header"] +|====== +| Name | Required | Default | Description | +| `field` | yes | | Source field containing the time to be parsed. | +| `target_field` | no | @timestamp | Target field for the parsed time value. The target value is always written as UTC. | +| `layouts` | yes | | Timestamp layouts that define the expected time value format. In addition layouts, `UNIX` and `UNIX_MS` are accepted. | +| `timezone` | no | UTC | Timezone (e.g. America/New_York) to use when parsing a timestamp not containing a timezone. | +| `ignore_missing` | no | false | Ignore errors when the source field is missing. | +| `ignore_failure` | no | false | Ignore all errors produced by the processor. | +| `test` | no | | A list of timestamps that must parse successfully when loading the processor. | +| `id` | no | | An identifier for this processor instance. Useful for debugging. | +|====== + +Here is an example that parses the `start_time` field and writes the result +to the `@timestamp` field then deletes the `start_time` field. When the +processor is loaded it will immediately validate that the two `test` timestamps +parse with this configuration. + +[source,yaml] +---- +processors: +- timestamp: + field: start_time + layouts: + - '2006-01-02T15:04:05Z' + - '2006-01-02T15:04:05.999Z' + test: + - '2019-06-22T16:33:51Z' + - '2019-11-18T04:59:51.123Z' +- drop_fields: + fields: [start_time] +---- diff --git a/vendor/github.com/elastic/beats/libbeat/processors/timestamp/timestamp.go b/vendor/github.com/elastic/beats/libbeat/processors/timestamp/timestamp.go index 975073b4..4971c24c 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/timestamp/timestamp.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/timestamp/timestamp.go @@ -28,12 +28,14 @@ import ( "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/libbeat/processors" + jsprocessor "github.com/elastic/beats/libbeat/processors/script/javascript/module/processor" ) const logName = "processor.timestamp" func init() { processors.RegisterPlugin("timestamp", New) + jsprocessor.RegisterPlugin("Timestamp", New) } type processor struct { @@ -55,7 +57,7 @@ func New(cfg *common.Config) (processors.Processor, error) { } func newFromConfig(c config) (*processor, error) { - loc, err := tz.LoadLocation(c.Timezone) + loc, err := loadLocation(c.Timezone) if err != nil { return nil, errors.Wrap(err, "failed to load timezone") } @@ -82,6 +84,21 @@ func newFromConfig(c config) (*processor, error) { return p, nil } +var timezoneFormats = []string{"-07", "-0700", "-07:00"} + +func loadLocation(timezone string) (*time.Location, error) { + for _, format := range timezoneFormats { + t, err := time.Parse(format, timezone) + if err == nil { + name, offset := t.Zone() + return time.FixedZone(name, offset), nil + } + } + + // Rest of location formats + return tz.LoadLocation(timezone) +} + func (p *processor) String() string { return fmt.Sprintf("timestamp=[field=%s, target_field=%v, timezone=%v]", p.Field, p.TargetField, p.tz) diff --git a/vendor/github.com/elastic/beats/libbeat/processors/timestamp/timestamp_test.go b/vendor/github.com/elastic/beats/libbeat/processors/timestamp/timestamp_test.go index 8e02e18a..814f9163 100644 --- a/vendor/github.com/elastic/beats/libbeat/processors/timestamp/timestamp_test.go +++ b/vendor/github.com/elastic/beats/libbeat/processors/timestamp/timestamp_test.go @@ -23,6 +23,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/elastic/beats/libbeat/beat" "github.com/elastic/beats/libbeat/common" @@ -199,3 +200,103 @@ func TestBuiltInTest(t *testing.T) { assert.Contains(t, err.Error(), "failed to parse test timestamp") } } + +func TestTimezone(t *testing.T) { + cases := map[string]struct { + Timezone string + Expected time.Time + Error bool + }{ + "no timezone": { + Expected: expected, + }, + "location label": { + // Use a location without DST to avoid surprises + Timezone: "America/Panama", + Expected: expected.Add(5 * time.Hour), + }, + "UTC label": { + Timezone: "Etc/UTC", + Expected: expected, + }, + "GMT label": { + Timezone: "Etc/GMT+2", + Expected: expected.Add(2 * time.Hour), + }, + "UTC as standard offset": { + Timezone: "+0000", + Expected: expected, + }, + "standard offset": { + Timezone: "+0430", + Expected: expected.Add(-4*time.Hour - 30*time.Minute), + }, + "hour and minute offset": { + Timezone: "+03:00", + Expected: expected.Add(-3 * time.Hour), + }, + "minute offset": { + Timezone: "+00:30", + Expected: expected.Add(-30 * time.Minute), + }, + "abbreviated hour offset": { + Timezone: "+04", + Expected: expected.Add(-4 * time.Hour), + }, + "negative hour and minute offset": { + Timezone: "-03:30", + Expected: expected.Add(3*time.Hour + 30*time.Minute), + }, + "negative minute offset": { + Timezone: "-00:30", + Expected: expected.Add(30 * time.Minute), + }, + "negative abbreviated hour offset": { + Timezone: "-04", + Expected: expected.Add(4 * time.Hour), + }, + + "unsupported UTC representation": { + Timezone: "Z", + Error: true, + }, + "non-existing location": { + Timezone: "Kalimdor/Orgrimmar", + Error: true, + }, + "incomplete offset": { + Timezone: "-400", + Error: true, + }, + } + + for title, c := range cases { + t.Run(title, func(t *testing.T) { + config := defaultConfig() + config.Field = "ts" + config.Timezone = c.Timezone + config.Layouts = append(config.Layouts, time.ANSIC) + + processor, err := newFromConfig(config) + if c.Error { + require.Error(t, err) + return + } + require.NoError(t, err) + + originalTimestamp := expected.Format(time.ANSIC) + t.Logf("Original timestamp: %+v", originalTimestamp) + t.Logf("Timezone: %s", c.Timezone) + + event := &beat.Event{ + Fields: common.MapStr{ + config.Field: originalTimestamp, + }, + } + + event, err = processor.Run(event) + assert.NoError(t, err) + assert.Equal(t, c.Expected, event.Timestamp) + }) + } +} diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/event.go b/vendor/github.com/elastic/beats/libbeat/publisher/event.go index dfcd930e..8edc2e9d 100644 --- a/vendor/github.com/elastic/beats/libbeat/publisher/event.go +++ b/vendor/github.com/elastic/beats/libbeat/publisher/event.go @@ -19,6 +19,7 @@ package publisher import ( "github.com/elastic/beats/libbeat/beat" + "github.com/elastic/beats/libbeat/common" ) // Batch is used to pass a batch of events to the outputs and asynchronously listening @@ -41,11 +42,38 @@ type Batch interface { type Event struct { Content beat.Event Flags EventFlags + Cache EventCache } // EventFlags provides additional flags/option types for used with the outputs. type EventFlags uint8 +// EventCache provides a space for outputs to define per-event metadata +// that's intended to be used only within the scope of an output +type EventCache struct { + m common.MapStr +} + +// Put lets outputs put key-value pairs into the event cache +func (ec *EventCache) Put(key string, value interface{}) (interface{}, error) { + if ec.m == nil { + // uninitialized map + ec.m = common.MapStr{} + } + + return ec.m.Put(key, value) +} + +// GetValue lets outputs retrieve values from the event cache by key +func (ec *EventCache) GetValue(key string) (interface{}, error) { + if ec.m == nil { + // uninitialized map + return nil, common.ErrKeyNotFound + } + + return ec.m.GetValue(key) +} + const ( // GuaranteedSend requires an output to not drop the event on failure, but // retry until ACK. diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/controller.go b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/controller.go index cbd1b520..885d0cd0 100644 --- a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/controller.go +++ b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/controller.go @@ -84,6 +84,8 @@ func newOutputController( func (c *outputController) Close() error { c.consumer.sigPause() + c.consumer.close() + c.retryer.close() if c.out != nil { for _, out := range c.out.outputs { @@ -92,9 +94,6 @@ func (c *outputController) Close() error { close(c.out.workQueue) } - c.consumer.close() - c.retryer.close() - return nil } diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/retry.go b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/retry.go index 250713b0..9dd3385c 100644 --- a/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/retry.go +++ b/vendor/github.com/elastic/beats/libbeat/publisher/pipeline/retry.go @@ -18,6 +18,8 @@ package pipeline import ( + "sync" + "github.com/elastic/beats/libbeat/logp" ) @@ -36,9 +38,10 @@ type retryer struct { consumer *eventConsumer - sig chan retryerSignal - out workQueue - in retryQueue + sig chan retryerSignal + out workQueue + in retryQueue + doneWaiter sync.WaitGroup } type retryQueue chan batchEvent @@ -75,20 +78,24 @@ func newRetryer( c *eventConsumer, ) *retryer { r := &retryer{ - logger: log, - observer: observer, - done: make(chan struct{}), - sig: make(chan retryerSignal, 3), - in: retryQueue(make(chan batchEvent, 3)), - out: out, - consumer: c, + logger: log, + observer: observer, + done: make(chan struct{}), + sig: make(chan retryerSignal, 3), + in: retryQueue(make(chan batchEvent, 3)), + out: out, + consumer: c, + doneWaiter: sync.WaitGroup{}, } + r.doneWaiter.Add(1) go r.loop() return r } func (r *retryer) close() { close(r.done) + //Block until loop() is properly closed + r.doneWaiter.Wait() } func (r *retryer) sigOutputAdded() { @@ -115,6 +122,7 @@ func (r *retryer) cancelled(b *Batch) { } func (r *retryer) loop() { + defer r.doneWaiter.Done() var ( out workQueue consumerBlocked bool @@ -131,7 +139,6 @@ func (r *retryer) loop() { select { case <-r.done: return - case evt := <-r.in: var ( countFailed int diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/processing/default.go b/vendor/github.com/elastic/beats/libbeat/publisher/processing/default.go index b4aafd6e..323938e4 100644 --- a/vendor/github.com/elastic/beats/libbeat/publisher/processing/default.go +++ b/vendor/github.com/elastic/beats/libbeat/publisher/processing/default.go @@ -266,7 +266,7 @@ func (b *builder) Create(cfg beat.ProcessingConfig, drop bool) (beat.Processor, if !b.skipNormalize { // setup 1: generalize/normalize output (P) - processors.add(generalizeProcessor) + processors.add(newGeneralizeProcessor(cfg.KeepNull)) } // setup 2: add Meta from client config (C) diff --git a/vendor/github.com/elastic/beats/libbeat/publisher/processing/processors.go b/vendor/github.com/elastic/beats/libbeat/publisher/processing/processors.go index 38a77bdd..bb0b2c7c 100644 --- a/vendor/github.com/elastic/beats/libbeat/publisher/processing/processors.go +++ b/vendor/github.com/elastic/beats/libbeat/publisher/processing/processors.go @@ -40,21 +40,24 @@ type processorFn struct { fn func(event *beat.Event) (*beat.Event, error) } -var generalizeProcessor = newProcessor("generalizeEvent", func(event *beat.Event) (*beat.Event, error) { - // Filter out empty events. Empty events are still reported by ACK callbacks. - if len(event.Fields) == 0 { - return nil, nil - } +func newGeneralizeProcessor(keepNull bool) *processorFn { + return newProcessor("generalizeEvent", func(event *beat.Event) (*beat.Event, error) { + // Filter out empty events. Empty events are still reported by ACK callbacks. + if len(event.Fields) == 0 { + return nil, nil + } - fields := common.ConvertToGenericEvent(event.Fields) - if fields == nil { - logp.Err("fail to convert to generic event") - return nil, nil - } + g := common.NewGenericEventConverter(keepNull) + fields := g.Convert(event.Fields) + if fields == nil { + logp.Err("fail to convert to generic event") + return nil, nil + } - event.Fields = fields - return event, nil -}) + event.Fields = fields + return event, nil + }) +} var dropDisabledProcessor = newProcessor("dropDisabled", func(event *beat.Event) (*beat.Event, error) { return nil, nil diff --git a/vendor/github.com/elastic/beats/libbeat/reader/readjson/json.go b/vendor/github.com/elastic/beats/libbeat/reader/readjson/json.go index 72bea9ce..a669b3f4 100644 --- a/vendor/github.com/elastic/beats/libbeat/reader/readjson/json.go +++ b/vendor/github.com/elastic/beats/libbeat/reader/readjson/json.go @@ -114,7 +114,7 @@ func createJSONError(message string) common.MapStr { // respecting the KeysUnderRoot and OverwriteKeys configuration options. // If MessageKey is defined, the Text value from the event always // takes precedence. -func MergeJSONFields(data common.MapStr, jsonFields common.MapStr, text *string, config Config) time.Time { +func MergeJSONFields(data common.MapStr, jsonFields common.MapStr, text *string, config Config) (string, time.Time) { // The message key might have been modified by multiline if len(config.MessageKey) > 0 && text != nil { jsonFields[config.MessageKey] = *text @@ -127,6 +127,16 @@ func MergeJSONFields(data common.MapStr, jsonFields common.MapStr, text *string, data["message"] = *text } + var id string + if key := config.DocumentID; key != "" { + if tmp, err := jsonFields.GetValue(key); err == nil { + if v, ok := tmp.(string); ok { + id = v + jsonFields.Delete(key) + } + } + } + if config.KeysUnderRoot { // Delete existing json key delete(data, "json") @@ -147,7 +157,7 @@ func MergeJSONFields(data common.MapStr, jsonFields common.MapStr, text *string, } jsontransform.WriteJSONKeys(event, jsonFields, config.OverwriteKeys, config.AddErrorKey) - return event.Timestamp + return id, event.Timestamp } - return time.Time{} + return id, time.Time{} } diff --git a/vendor/github.com/elastic/beats/libbeat/reader/readjson/json_config.go b/vendor/github.com/elastic/beats/libbeat/reader/readjson/json_config.go index a95b9db0..5469f00a 100644 --- a/vendor/github.com/elastic/beats/libbeat/reader/readjson/json_config.go +++ b/vendor/github.com/elastic/beats/libbeat/reader/readjson/json_config.go @@ -20,6 +20,7 @@ package readjson // Config holds the options a JSON reader. type Config struct { MessageKey string `config:"message_key"` + DocumentID string `config:"document_id"` KeysUnderRoot bool `config:"keys_under_root"` OverwriteKeys bool `config:"overwrite_keys"` AddErrorKey bool `config:"add_error_key"` diff --git a/vendor/github.com/elastic/beats/libbeat/reader/readjson/json_test.go b/vendor/github.com/elastic/beats/libbeat/reader/readjson/json_test.go index 5f4deebd..d6f82585 100644 --- a/vendor/github.com/elastic/beats/libbeat/reader/readjson/json_test.go +++ b/vendor/github.com/elastic/beats/libbeat/reader/readjson/json_test.go @@ -199,17 +199,15 @@ func TestAddJSONFields(t *testing.T) { now := time.Now().UTC() - tests := []struct { - Name string + tests := map[string]struct { Data common.MapStr Text *string JSONConfig Config ExpectedItems common.MapStr ExpectedTimestamp time.Time + ExpectedID string }{ - { - // by default, don't overwrite keys - Name: "default: do not overwrite", + "default: do not overwrite": { Data: common.MapStr{"type": "test_type", "json": common.MapStr{"type": "test", "text": "hello"}}, Text: &text, JSONConfig: Config{KeysUnderRoot: true}, @@ -219,9 +217,7 @@ func TestAddJSONFields(t *testing.T) { }, ExpectedTimestamp: time.Time{}, }, - { - // overwrite keys if asked - Name: "overwrite keys if configured", + "overwrite keys if configured": { Data: common.MapStr{"type": "test_type", "json": common.MapStr{"type": "test", "text": "hello"}}, Text: &text, JSONConfig: Config{KeysUnderRoot: true, OverwriteKeys: true}, @@ -231,9 +227,8 @@ func TestAddJSONFields(t *testing.T) { }, ExpectedTimestamp: time.Time{}, }, - { + "use json namespace w/o keys_under_root": { // without keys_under_root, put everything in a json key - Name: "use json namespace w/o keys_under_root", Data: common.MapStr{"type": "test_type", "json": common.MapStr{"type": "test", "text": "hello"}}, Text: &text, JSONConfig: Config{}, @@ -242,9 +237,9 @@ func TestAddJSONFields(t *testing.T) { }, ExpectedTimestamp: time.Time{}, }, - { + + "write result to message_key field": { // when MessageKey is defined, the Text overwrites the value of that key - Name: "write result to message_key field", Data: common.MapStr{"type": "test_type", "json": common.MapStr{"type": "test", "text": "hi"}}, Text: &text, JSONConfig: Config{MessageKey: "text"}, @@ -254,10 +249,9 @@ func TestAddJSONFields(t *testing.T) { }, ExpectedTimestamp: time.Time{}, }, - { + "parse @timestamp": { // when @timestamp is in JSON and overwrite_keys is true, parse it // in a common.Time - Name: "parse @timestamp", Data: common.MapStr{"@timestamp": now, "type": "test_type", "json": common.MapStr{"type": "test", "@timestamp": "2016-04-05T18:47:18.444Z"}}, Text: &text, JSONConfig: Config{KeysUnderRoot: true, OverwriteKeys: true}, @@ -266,10 +260,9 @@ func TestAddJSONFields(t *testing.T) { }, ExpectedTimestamp: time.Time(common.MustParseTime("2016-04-05T18:47:18.444Z")), }, - { + "fail to parse @timestamp": { // when the parsing on @timestamp fails, leave the existing value and add an error key // in a common.Time - Name: "fail to parse @timestamp", Data: common.MapStr{"@timestamp": common.Time(now), "type": "test_type", "json": common.MapStr{"type": "test", "@timestamp": "2016-04-05T18:47:18.44XX4Z"}}, Text: &text, JSONConfig: Config{KeysUnderRoot: true, OverwriteKeys: true, AddErrorKey: true}, @@ -279,10 +272,10 @@ func TestAddJSONFields(t *testing.T) { }, ExpectedTimestamp: time.Time{}, }, - { + + "wrong @timestamp format": { // when the @timestamp has the wrong type, leave the existing value and add an error key // in a common.Time - Name: "wrong @timestamp format", Data: common.MapStr{"@timestamp": common.Time(now), "type": "test_type", "json": common.MapStr{"type": "test", "@timestamp": 42}}, Text: &text, JSONConfig: Config{KeysUnderRoot: true, OverwriteKeys: true, AddErrorKey: true}, @@ -292,9 +285,8 @@ func TestAddJSONFields(t *testing.T) { }, ExpectedTimestamp: time.Time{}, }, - { + "ignore non-string type field": { // if overwrite_keys is true, but the `type` key in json is not a string, ignore it - Name: "ignore non-string type field", Data: common.MapStr{"type": "test_type", "json": common.MapStr{"type": 42}}, Text: &text, JSONConfig: Config{KeysUnderRoot: true, OverwriteKeys: true, AddErrorKey: true}, @@ -304,9 +296,9 @@ func TestAddJSONFields(t *testing.T) { }, ExpectedTimestamp: time.Time{}, }, - { + + "ignore empty type field": { // if overwrite_keys is true, but the `type` key in json is empty, ignore it - Name: "ignore empty type field", Data: common.MapStr{"type": "test_type", "json": common.MapStr{"type": ""}}, Text: &text, JSONConfig: Config{KeysUnderRoot: true, OverwriteKeys: true, AddErrorKey: true}, @@ -316,9 +308,8 @@ func TestAddJSONFields(t *testing.T) { }, ExpectedTimestamp: time.Time{}, }, - { + "ignore type names starting with underscore": { // if overwrite_keys is true, but the `type` key in json starts with _, ignore it - Name: "ignore type names starting with underscore", Data: common.MapStr{"@timestamp": common.Time(now), "type": "test_type", "json": common.MapStr{"type": "_type"}}, Text: &text, JSONConfig: Config{KeysUnderRoot: true, OverwriteKeys: true, AddErrorKey: true}, @@ -328,9 +319,7 @@ func TestAddJSONFields(t *testing.T) { }, ExpectedTimestamp: time.Time{}, }, - { - // if AddErrorKey is false, err should not be set. - Name: "ignore type names starting with underscore", + "do not set error if AddErrorKey is false": { Data: common.MapStr{"@timestamp": common.Time(now), "type": "test_type", "json": common.MapStr{"type": "_type"}}, Text: &text, JSONConfig: Config{KeysUnderRoot: true, OverwriteKeys: true, AddErrorKey: false}, @@ -340,22 +329,35 @@ func TestAddJSONFields(t *testing.T) { }, ExpectedTimestamp: time.Time{}, }, + "extract event id": { + // if document_id is set, extract the ID from the event + Data: common.MapStr{"@timestamp": common.Time(now), "json": common.MapStr{"id": "test_id"}}, + JSONConfig: Config{DocumentID: "id"}, + ExpectedID: "test_id", + }, + "extract event id with wrong type": { + // if document_id is set, extract the ID from the event + Data: common.MapStr{"@timestamp": common.Time(now), "json": common.MapStr{"id": 42}}, + JSONConfig: Config{DocumentID: "id"}, + ExpectedID: "", + }, } - for _, test := range tests { - t.Run(test.Name, func(t *testing.T) { + for name, test := range tests { + t.Run(name, func(t *testing.T) { var jsonFields common.MapStr if fields, ok := test.Data["json"]; ok { jsonFields = fields.(common.MapStr) } - ts := MergeJSONFields(test.Data, jsonFields, test.Text, test.JSONConfig) + id, ts := MergeJSONFields(test.Data, jsonFields, test.Text, test.JSONConfig) t.Log("Executing test:", test) for k, v := range test.ExpectedItems { assert.Equal(t, v, test.Data[k]) } assert.Equal(t, test.ExpectedTimestamp, ts) + assert.Equal(t, test.ExpectedID, id) }) } } diff --git a/vendor/github.com/elastic/beats/libbeat/scripts/Makefile b/vendor/github.com/elastic/beats/libbeat/scripts/Makefile index 180d567f..8c6969d3 100755 --- a/vendor/github.com/elastic/beats/libbeat/scripts/Makefile +++ b/vendor/github.com/elastic/beats/libbeat/scripts/Makefile @@ -125,17 +125,15 @@ check: check-headers mage ## @build Checks project and source code if everything @mage check .PHONY: $(.OVER)check-headers -$(.OVER)check-headers: +$(.OVER)check-headers: mage ifndef CHECK_HEADERS_DISABLED - @go get -u github.com/elastic/go-licenser - @go-licenser -d -license ${LICENSE} + @mage checkLicenseHeaders endif .PHONY: $(.OVER)add-headers -$(.OVER)add-headers: +$(.OVER)add-headers: mage ifndef CHECK_HEADERS_DISABLED - @go get github.com/elastic/go-licenser - @go-licenser -license ${LICENSE} + @mage addLicenseHeaders endif .PHONY: fmt diff --git a/vendor/github.com/elastic/beats/libbeat/template/load.go b/vendor/github.com/elastic/beats/libbeat/template/load.go index 33b9886d..2a63c9ef 100644 --- a/vendor/github.com/elastic/beats/libbeat/template/load.go +++ b/vendor/github.com/elastic/beats/libbeat/template/load.go @@ -23,6 +23,7 @@ import ( "io/ioutil" "net/http" "os" + "strings" "github.com/elastic/beats/libbeat/beat" "github.com/elastic/beats/libbeat/common" @@ -124,11 +125,10 @@ func (l *ESLoader) templateExists(templateName string) bool { if l.client == nil { return false } - status, _, _ := l.client.Request("HEAD", "/_template/"+templateName, "", nil, nil) - if status != http.StatusOK { - return false - } - return true + + status, body, _ := l.client.Request("GET", "/_cat/templates/"+templateName, "", nil, nil) + + return status == http.StatusOK && strings.Contains(string(body), templateName) } // Load reads the template from the config, creates the template body and prints it to the configured file. diff --git a/vendor/github.com/elastic/beats/libbeat/tests/compose/compose.go b/vendor/github.com/elastic/beats/libbeat/tests/compose/compose.go index 3eae2ce3..293e57e8 100644 --- a/vendor/github.com/elastic/beats/libbeat/tests/compose/compose.go +++ b/vendor/github.com/elastic/beats/libbeat/tests/compose/compose.go @@ -65,7 +65,6 @@ func EnsureUp(t testing.TB, service string, options ...UpOption) HostInfo { upOptions := UpOptions{ Timeout: 60 * time.Second, Create: CreateOptions{ - Build: true, ForceRecreate: true, }, } diff --git a/vendor/github.com/elastic/beats/libbeat/tests/compose/wrapper.go b/vendor/github.com/elastic/beats/libbeat/tests/compose/wrapper.go index 6e26173b..13bda235 100644 --- a/vendor/github.com/elastic/beats/libbeat/tests/compose/wrapper.go +++ b/vendor/github.com/elastic/beats/libbeat/tests/compose/wrapper.go @@ -183,6 +183,15 @@ func (d *wrapperDriver) Up(ctx context.Context, opts UpOptions, service string) args = append(args, service) } + // Try to pull the image before building it + var stderr bytes.Buffer + pull := d.cmd(ctx, "pull", "--ignore-pull-failures", service) + pull.Stdout = nil + pull.Stderr = &stderr + if err := pull.Run(); err != nil { + return errors.Wrapf(err, "failed to pull images using docker-compose: %s", stderr.String()) + } + err := d.cmd(ctx, "up", args...).Run() if err != nil { return err @@ -300,21 +309,44 @@ func (d *wrapperDriver) containers(ctx context.Context, projectFilter Filter, fi } } + serviceNames, err := d.serviceNames(ctx) + if err != nil { + return nil, errors.Wrap(err, "failed to get container list") + } + var containers []types.Container for _, f := range serviceFilters { - c, err := d.client.ContainerList(ctx, types.ContainerListOptions{ + list, err := d.client.ContainerList(ctx, types.ContainerListOptions{ All: true, Filters: f, }) if err != nil { return nil, errors.Wrap(err, "failed to get container list") } - containers = append(containers, c...) + for _, container := range list { + serviceName, ok := container.Labels[labelComposeService] + if !ok || !contains(serviceNames, serviceName) { + // Service is not defined in current docker compose file, ignore it + continue + } + containers = append(containers, container) + } } return containers, nil } +func (d *wrapperDriver) serviceNames(ctx context.Context) ([]string, error) { + var stdout bytes.Buffer + cmd := d.cmd(ctx, "config", "--services") + cmd.Stdout = &stdout + err := cmd.Run() + if err != nil { + return nil, errors.Wrap(err, "failed to get list of service names") + } + return strings.Fields(stdout.String()), nil +} + func makeFilter(project, service string, projectFilter Filter) filters.Args { f := filters.NewArgs() f.Add("label", fmt.Sprintf("%s=%s", labelComposeProject, project)) diff --git a/vendor/github.com/elastic/beats/libbeat/tests/docker/docker.go b/vendor/github.com/elastic/beats/libbeat/tests/docker/docker.go index a2e1e181..9565dd1b 100644 --- a/vendor/github.com/elastic/beats/libbeat/tests/docker/docker.go +++ b/vendor/github.com/elastic/beats/libbeat/tests/docker/docker.go @@ -62,8 +62,13 @@ func (c Client) ContainerStart(image string, cmd []string, labels map[string]str // ContainerWait waits for a container to finish func (c Client) ContainerWait(ID string) error { ctx := context.Background() - _, err := c.cli.ContainerWait(ctx, ID) - return err + waitC, errC := c.cli.ContainerWait(ctx, ID, container.WaitConditionNotRunning) + select { + case <-waitC: + case err := <-errC: + return err + } + return nil } // ContainerKill kills the given container diff --git a/vendor/github.com/elastic/beats/libbeat/tests/system/beat/compose.py b/vendor/github.com/elastic/beats/libbeat/tests/system/beat/compose.py index 17fd41a3..8fd402da 100644 --- a/vendor/github.com/elastic/beats/libbeat/tests/system/beat/compose.py +++ b/vendor/github.com/elastic/beats/libbeat/tests/system/beat/compose.py @@ -10,6 +10,7 @@ INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False) if INTEGRATION_TESTS: from compose.cli.command import get_project + from compose.config.environment import Environment from compose.service import BuildAction from compose.service import ConvergenceStrategy @@ -22,8 +23,8 @@ class ComposeMixin(object): # List of required services to run INTEGRATION_TESTS COMPOSE_SERVICES = [] - # docker-compose.yml dir path - COMPOSE_PROJECT_DIR = '.' + # Additional environment variables for docker compose + COMPOSE_ENV = {} # timeout waiting for health (seconds) COMPOSE_TIMEOUT = 300 @@ -51,10 +52,12 @@ class ComposeMixin(object): return container.inspect()['State']['Health']['Status'] == 'healthy' project = cls.compose_project() + project.pull( + ignore_pull_failures=True, + service_names=cls.COMPOSE_SERVICES) project.up( strategy=ConvergenceStrategy.always, service_names=cls.COMPOSE_SERVICES, - do_build=BuildAction.force, timeout=30) # Wait for them to be healthy @@ -126,7 +129,11 @@ class ComposeMixin(object): return if INTEGRATION_TESTS and cls.COMPOSE_SERVICES: - cls.compose_project().kill(service_names=cls.COMPOSE_SERVICES) + # Use down on per-module scenarios to release network pools too + if os.path.basename(os.path.dirname(cls.find_compose_path())) == "module": + cls.compose_project().down(remove_image_type=None, include_volumes=True) + else: + cls.compose_project().kill(service_names=cls.COMPOSE_SERVICES) @classmethod def get_hosts(cls): @@ -182,6 +189,43 @@ class ComposeMixin(object): return cls._private_host(info, port) return cls._exposed_host(info, port) + @classmethod + def compose_project_name(cls): + basename = os.path.basename(cls.find_compose_path()) + + def positivehash(x): + return hash(x) % ((sys.maxsize+1) * 2) + + return "%s_%X" % (basename, positivehash(frozenset(cls.COMPOSE_ENV.items()))) + @classmethod def compose_project(cls): - return get_project(cls.COMPOSE_PROJECT_DIR, project_name=os.environ.get('DOCKER_COMPOSE_PROJECT_NAME')) + env = Environment(os.environ.copy()) + env.update(cls.COMPOSE_ENV) + return get_project(cls.find_compose_path(), + project_name=cls.compose_project_name(), + environment=env) + + @classmethod + def find_compose_path(cls): + class_dir = os.path.abspath(os.path.dirname(sys.modules[cls.__module__].__file__)) + while True: + if os.path.exists(os.path.join(class_dir, "docker-compose.yml")): + return class_dir + class_dir, current = os.path.split(class_dir) + if current == '': # We have reached root + raise Exception("failed to find a docker-compose.yml file") + + @classmethod + def get_service_log(cls, service): + container = cls.compose_project().containers(service_names=[service])[0] + return container.logs() + + @classmethod + def service_log_contains(cls, service, msg): + log = cls.get_service_log(service) + counter = 0 + for line in log.splitlines(): + if line.find(msg) >= 0: + counter += 1 + return counter > 0 diff --git a/vendor/github.com/elastic/beats/libbeat/tests/system/config/mockbeat.yml.j2 b/vendor/github.com/elastic/beats/libbeat/tests/system/config/mockbeat.yml.j2 index df185c8c..cd3fe5f7 100644 --- a/vendor/github.com/elastic/beats/libbeat/tests/system/config/mockbeat.yml.j2 +++ b/vendor/github.com/elastic/beats/libbeat/tests/system/config/mockbeat.yml.j2 @@ -62,6 +62,9 @@ output: path: {{ output_file_path|default(beat.working_dir + "/output") }} filename: "{{ output_file_filename|default("mockbeat") }}" rotate_every_kb: 1000 + {% if output_file_permissions %} + permissions: {{ output_file_permissions }} + {% endif %} #number_of_files: 7 {%- endif %} @@ -113,10 +116,20 @@ xpack.monitoring.elasticsearch.state.period: 3s # to speed up tests {% if monitoring -%} #================================ X-Pack Monitoring (direct) ===================================== -monitoring.elasticsearch.hosts: {{monitoring.elasticsearch.hosts}} -monitoring.elasticsearch.metrics.period: 2s # to speed up tests -monitoring.elasticsearch.state.period: 3s # to speed up tests +monitoring: + {% if monitoring.elasticsearch -%} + elasticsearch.hosts: {{monitoring.elasticsearch.hosts}} + elasticsearch.metrics.period: 2s # to speed up tests + elasticsearch.state.period: 3s # to speed up tests + {% endif -%} + + {% if monitoring.cluster_uuid -%} + cluster_uuid: {{monitoring.cluster_uuid}} + {% endif -%} {% endif -%} # vim: set ft=jinja: +{% if http_enabled -%} +http.enabled: {{http_enabled}} +{% endif -%} diff --git a/vendor/github.com/elastic/beats/libbeat/tests/system/test_monitoring.py b/vendor/github.com/elastic/beats/libbeat/tests/system/test_monitoring.py index 833cb5cf..73767e24 100644 --- a/vendor/github.com/elastic/beats/libbeat/tests/system/test_monitoring.py +++ b/vendor/github.com/elastic/beats/libbeat/tests/system/test_monitoring.py @@ -4,6 +4,9 @@ from elasticsearch import Elasticsearch import re from nose.plugins.attrib import attr import unittest +import requests +import random +import string INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False) @@ -150,6 +153,29 @@ class Test(BaseTest): self.assert_same_structure(indirect_beats_state_doc['beats_state'], direct_beats_state_doc['beats_state']) self.assert_same_structure(indirect_beats_stats_doc['beats_stats'], direct_beats_stats_doc['beats_stats']) + @unittest.skipUnless(INTEGRATION_TESTS, "integration test") + @attr('integration') + def test_cluster_uuid_setting(self): + """ + Test that monitoring.cluster_uuid setting may be set without any other monitoring.* settings + """ + test_cluster_uuid = self.random_string(10) + self.render_config_template( + "mockbeat", + monitoring={ + "cluster_uuid": test_cluster_uuid + }, + http_enabled="true" + ) + + proc = self.start_beat(config="mockbeat.yml") + self.wait_until(lambda: self.log_contains("mockbeat start running.")) + + state = self.get_beat_state() + proc.check_kill_and_wait() + + self.assertEqual(test_cluster_uuid, state["monitoring"]["cluster_uuid"]) + def search_monitoring_doc(self, monitoring_type): results = self.es_monitoring.search( index='.monitoring-beats-*', @@ -241,3 +267,11 @@ class Test(BaseTest): host=os.getenv("ES_MONITORING_HOST", "localhost"), port=os.getenv("ES_MONITORING_PORT", "9210") ) + + def get_beat_state(self): + url = "http://localhost:5066/state" + return requests.get(url).json() + + def random_string(self, size): + char_pool = string.ascii_letters + string.digits + return ''.join(random.choice(char_pool) for i in range(size)) diff --git a/vendor/github.com/elastic/beats/libbeat/tests/system/test_umask.py b/vendor/github.com/elastic/beats/libbeat/tests/system/test_umask.py new file mode 100644 index 00000000..dd3a6df9 --- /dev/null +++ b/vendor/github.com/elastic/beats/libbeat/tests/system/test_umask.py @@ -0,0 +1,36 @@ +from base import BaseTest + +import os +import stat +import unittest +import sys + +INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False) + + +class TestUmask(BaseTest): + """ + Test default umask + """ + + DEFAULT_UMASK = 0027 + + def setUp(self): + super(BaseTest, self).setUp() + + self.output_file_permissions = 0666 + + self.render_config_template(output_file_permissions=self.output_file_permissions) + proc = self.start_beat() + self.wait_until(lambda: self.output_lines() > 0, max_timeout=2) + proc.check_kill_and_wait() + + @unittest.skipIf(sys.platform.startswith("win"), "umask is not available on Windows") + def test_output_file_perms(self): + """ + Test that output file permissions respect default umask + """ + output_file_path = os.path.join(self.working_dir, "output", "mockbeat") + perms = stat.S_IMODE(os.lstat(output_file_path).st_mode) + + self.assertEqual(perms, self.output_file_permissions & ~TestUmask.DEFAULT_UMASK) diff --git a/vendor/github.com/elastic/beats/libbeat/version/version.go b/vendor/github.com/elastic/beats/libbeat/version/version.go index db697e0a..3cc53133 100644 --- a/vendor/github.com/elastic/beats/libbeat/version/version.go +++ b/vendor/github.com/elastic/beats/libbeat/version/version.go @@ -18,4 +18,4 @@ // Code generated by dev-tools/set_version package version -const defaultBeatVersion = "7.4.2" +const defaultBeatVersion = "7.5.2" diff --git a/vendor/github.com/elastic/beats/magefile.go b/vendor/github.com/elastic/beats/magefile.go index e89775b0..25eddd0a 100644 --- a/vendor/github.com/elastic/beats/magefile.go +++ b/vendor/github.com/elastic/beats/magefile.go @@ -25,11 +25,11 @@ import ( "path/filepath" "github.com/magefile/mage/mg" - "github.com/magefile/mage/sh" "github.com/pkg/errors" "go.uber.org/multierr" devtools "github.com/elastic/beats/dev-tools/mage" + "github.com/elastic/beats/dev-tools/mage/gotool" ) var ( @@ -87,21 +87,54 @@ func PackageBeatDashboards() error { // Fmt formats code and adds license headers. func Fmt() { mg.Deps(devtools.GoImports, devtools.PythonAutopep8) - mg.Deps(addLicenseHeaders) + mg.Deps(AddLicenseHeaders) } -// addLicenseHeaders adds ASL2 headers to .go files outside of x-pack and +// AddLicenseHeaders adds ASL2 headers to .go files outside of x-pack and // add Elastic headers to .go files in x-pack. -func addLicenseHeaders() error { +func AddLicenseHeaders() error { fmt.Println(">> fmt - go-licenser: Adding missing headers") - if err := sh.Run("go", "get", devtools.GoLicenserImportPath); err != nil { - return err - } + mg.Deps(devtools.InstallGoLicenser) + + licenser := gotool.Licenser return multierr.Combine( - sh.RunV("go-licenser", "-license", "ASL2", "-exclude", "x-pack"), - sh.RunV("go-licenser", "-license", "Elastic", "x-pack"), + licenser( + licenser.License("ASL2"), + licenser.Exclude("x-pack"), + licenser.Exclude("generator/beat/{beat}"), + licenser.Exclude("generator/metricbeat/{beat}"), + ), + licenser( + licenser.License("Elastic"), + licenser.Path("x-pack"), + ), + ) +} + +// CheckLicenseHeaders checks ASL2 headers in .go files outside of x-pack and +// checks Elastic headers in .go files in x-pack. +func CheckLicenseHeaders() error { + fmt.Println(">> fmt - go-licenser: Checking for missing headers") + + mg.Deps(devtools.InstallGoLicenser) + + licenser := gotool.Licenser + + return multierr.Combine( + licenser( + licenser.Check(), + licenser.License("ASL2"), + licenser.Exclude("x-pack"), + licenser.Exclude("generator/beat/{beat}"), + licenser.Exclude("generator/metricbeat/{beat}"), + ), + licenser( + licenser.Check(), + licenser.License("Elastic"), + licenser.Path("x-pack"), + ), ) } diff --git a/vendor/github.com/elastic/beats/metricbeat/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/Dockerfile index ec892890..a66dfc46 100644 --- a/vendor/github.com/elastic/beats/metricbeat/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.12.9 +FROM golang:1.12.12 RUN \ apt-get update \ diff --git a/vendor/github.com/elastic/beats/metricbeat/Makefile b/vendor/github.com/elastic/beats/metricbeat/Makefile index 9f4e5f1e..72567d2d 100644 --- a/vendor/github.com/elastic/beats/metricbeat/Makefile +++ b/vendor/github.com/elastic/beats/metricbeat/Makefile @@ -34,7 +34,7 @@ configs: python-env @cat ${ES_BEATS}/metricbeat/_meta/common.reference.yml > _meta/beat.reference.yml @${PYTHON_ENV}/bin/python ${ES_BEATS}/script/config_collector.py --beat ${BEAT_NAME} --full $(PWD) >> _meta/beat.reference.yml @rm -rf modules.d - ${PYTHON_ENV}/bin/python ${ES_BEATS}/script/modules_collector.py --beat ${BEAT_NAME} --docs_branch=$(DOCS_BRANCH) + mage config @chmod go-w modules.d/* @# Enable system by default: @if [ -f modules.d/system.yml.disabled ]; then mv modules.d/system.yml.disabled modules.d/system.yml; fi diff --git a/vendor/github.com/elastic/beats/metricbeat/autodiscover/builder/hints/metrics.go b/vendor/github.com/elastic/beats/metricbeat/autodiscover/builder/hints/metrics.go index f4ac2e01..f46161c4 100644 --- a/vendor/github.com/elastic/beats/metricbeat/autodiscover/builder/hints/metrics.go +++ b/vendor/github.com/elastic/beats/metricbeat/autodiscover/builder/hints/metrics.go @@ -36,13 +36,14 @@ func init() { } const ( - module = "module" - namespace = "namespace" - hosts = "hosts" - metricsets = "metricsets" - period = "period" - timeout = "timeout" - ssl = "ssl" + module = "module" + namespace = "namespace" + hosts = "hosts" + metricsets = "metricsets" + period = "period" + timeout = "timeout" + ssl = "ssl" + metricspath = "metrics_path" defaultTimeout = "3s" defaultPeriod = "1m" @@ -110,6 +111,7 @@ func (m *metricHints) CreateConfig(event bus.Event) []*common.Config { ival := m.getPeriod(hints) sslConf := m.getSSLConfig(hints) procs := m.getProcessors(hints) + metricspath := m.getMetricPath(hints) moduleConfig := common.MapStr{ "module": mod, @@ -125,6 +127,9 @@ func (m *metricHints) CreateConfig(event bus.Event) []*common.Config { if ns != "" { moduleConfig["namespace"] = ns } + if metricspath != "" { + moduleConfig["metrics_path"] = metricspath + } logp.Debug("hints.builder", "generated config: %v", moduleConfig.String()) @@ -189,6 +194,10 @@ func (m *metricHints) getNamespace(hints common.MapStr) string { return builder.GetHintString(hints, m.Key, namespace) } +func (m *metricHints) getMetricPath(hints common.MapStr) string { + return builder.GetHintString(hints, m.Key, metricspath) +} + func (m *metricHints) getPeriod(hints common.MapStr) string { if ival := builder.GetHintString(hints, m.Key, period); ival != "" { return ival diff --git a/vendor/github.com/elastic/beats/metricbeat/docker-compose.yml b/vendor/github.com/elastic/beats/metricbeat/docker-compose.yml index ffdf4357..ef8970d6 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docker-compose.yml +++ b/vendor/github.com/elastic/beats/metricbeat/docker-compose.yml @@ -15,45 +15,57 @@ services: # Modules aerospike: - build: ./module/aerospike/_meta + image: docker.elastic.co/observability-ci/beats-integration-aerospike:${AEROSPIKE_VERSION:-3.9.0}-1 + build: + context: ./module/aerospike/_meta + args: + AEROSPIKE_VERSION: ${AEROSPIKE_VERSION:-3.9.0} ports: - 3000 - apache: - build: ./module/apache/_meta - ports: - - 80 - - apache_2_4_12: - build: - context: ./module/apache/_meta - args: - APACHE_VERSION: 2.4.12 - ports: - - 80 - metricbeat: - build: ./module/beat/_meta + image: docker.elastic.co/observability-ci/beats-integration-metricbeat:${BEAT_VERSION:-7.3.0}-1 + build: + context: ./module/beat/_meta + args: + BEAT_VERSION: ${BEAT_VERSION:-7.3.0} + command: '-e' ports: - 5066 ceph: - build: ./module/ceph/_meta + image: docker.elastic.co/observability-ci/beats-integration-ceph:${CEPH_VERSION:-master-6373c6a-jewel-centos-7-x86_64}-1 + build: + context: ./module/ceph/_meta + args: + CEPH_VERSION: ${CEPH_VERSION:-master-6373c6a-jewel-centos-7-x86_64} ports: - 5000 consul: - build: ./module/consul/_meta + image: docker.elastic.co/observability-ci/beats-integration-consul:${CONSUL_VERSION:-1.4.2}-1 + build: + context: ./module/consul/_meta + args: + CONSUL_VERSION: ${CONSUL_VERSION:-1.4.2} ports: - 8500 couchbase: - build: ./module/couchbase/_meta + image: docker.elastic.co/observability-ci/beats-integration-couchbase:${COUCHBASE_VERSION:-4.5.1}-1 + build: + context: ./module/couchbase/_meta + args: + COUCHBASE_VERSION: ${COUCHBASE_VERSION:-4.5.1} ports: - 8091 couchdb: - build: ./module/couchdb/_meta + image: docker.elastic.co/observability-ci/beats-integration-couchdb:${COUCHDB_VERSION:-1.7}-1 + build: + context: ./module/couchdb/_meta + args: + COUCHDB_VERSION: ${COUCHDB_VERSION:-1.7} ports: - 5984 @@ -67,12 +79,20 @@ services: test: ["CMD", "true"] dropwizard: - build: ./module/dropwizard/_meta + image: docker.elastic.co/observability-ci/beats-integration-dropwizard:${MAVEN_VERSION:-3.3-jdk-8}-1 + build: + context: ./module/dropwizard/_meta + args: + MAVEN_VERSION: ${MAVEN_VERSION:-3.3-jdk-8} ports: - 8080 elasticsearch: - build: ./module/elasticsearch/_meta + image: docker.elastic.co/observability-ci/beats-integration-elasticsearch:${ELASTICSEARCH_VERSION:-7.4.0}-1 + build: + context: ./module/elasticsearch/_meta + args: + ELASTICSEARCH_VERSION: ${ELASTICSEARCH_VERSION:-7.4.0} environment: - "ES_JAVA_OPTS=-Xms256m -Xmx256m" - "network.host=" @@ -83,89 +103,72 @@ services: - 9200 envoyproxy: - build: ./module/envoyproxy/_meta + image: docker.elastic.co/observability-ci/beats-integration-envoyproxy:${ENVOYPROXY_VERSION:-v1.7.0}-1 + build: + context: ./module/envoyproxy/_meta + args: + ENVOYPROXY_VERSION: ${ENVOYPROXY_VERSION:-v1.7.0} ports: - 9901 etcd: - build: ./module/etcd/_meta - ports: - - 2379 - - etcd_3_2: + image: docker.elastic.co/observability-ci/beats-integration-etcd:${ETCD_VERSION:-3.3.10}-1 build: context: ./module/etcd/_meta args: - ETCD_VERSION: v3.2.25 + ETCD_VERSION: ${ETCD_VERSION:-3.3.10} ports: - 2379 golang: - build: ./module/golang/_meta + image: docker.elastic.co/observability-ci/beats-integration-golang:1 + build: + context: ./module/golang/_meta ports: - 6060 haproxy: - build: ./module/haproxy/_meta - ports: - - 14567 - - 14568 - - 14569 - - haproxy_1_6: - build: + image: docker.elastic.co/observability-ci/beats-integration-haproxy:${HAPROXY_VERSION:-1.8}-1 + build: context: ./module/haproxy/_meta - dockerfile: Dockerfile.1.6 - ports: - - 14567 - - 14568 - - 14569 - - haproxy_1_7: - build: - context: ./module/haproxy/_meta - dockerfile: Dockerfile.1.7 + args: + HAPROXY_VERSION: ${HAPROXY_VERSION:-1.8} ports: - 14567 - 14568 - 14569 http: - build: ./module/http/_meta + image: docker.elastic.co/observability-ci/beats-integration-http:1 + build: + context: ./module/http/_meta ports: - 8080 jolokia: - build: ./module/jolokia/_meta + image: docker.elastic.co/observability-ci/beats-integration-jolokia:${JOLOKIA_VERSION:-1.5.0}-1 + build: + context: ./module/jolokia/_meta + args: + JOLOKIA_VERSION: ${JOLOKIA_VERSION:-1.5.0} ports: - 8778 kafka: + image: docker.elastic.co/observability-ci/beats-integration-kafka:${KAFKA_VERSION:-2.1.1}-1 build: context: ./module/kafka/_meta args: - KAFKA_VERSION: 2.1.1 - ports: - - 9092 - - kafka_1_1_0: - build: - context: ./module/kafka/_meta - args: - KAFKA_VERSION: 1.1.0 - ports: - - 9092 - - kafka_0_10_2: - build: - context: ./module/kafka/_meta - args: - KAFKA_VERSION: 0.10.2.2 + KAFKA_VERSION: ${KAFKA_VERSION:-2.1.1} ports: - 9092 kibana: - build: ./module/kibana/_meta + image: docker.elastic.co/observability-ci/beats-integration-kibana:${KIBANA_VERSION:-7.4.0}-1 + build: + context: ./module/kibana/_meta + args: + KIBANA_VERSION: ${KIBANA_VERSION:-7.4.0} depends_on: - elasticsearch ports: @@ -194,151 +197,157 @@ services: # - 18080 logstash: - build: ./module/logstash/_meta + image: docker.elastic.co/observability-ci/beats-integration-logstash:${LOGSTASH_VERSION:-7.4.0}-1 + build: + context: ./module/logstash/_meta + args: + LOGSTASH_VERSION: ${LOGSTASH_VERSION:-7.3.0} ports: - 9600 memcached: - build: ./module/memcached/_meta + image: docker.elastic.co/observability-ci/beats-integration-memcached:${MEMCACHED_VERSION:-1.4.35}-1 + build: + context: ./module/memcached/_meta + args: + MEMCACHED_VERSION: ${MEMCACHED_VERSION:-1.4.35} ports: - 11211 mongodb: - build: ./module/mongodb/_meta + image: docker.elastic.co/observability-ci/beats-integration-mongodb:${MONGODB_VERSION:-3.4}-1 + build: + context: ./module/mongodb/_meta + args: + MONGODB_VERSION: ${MONGODB_VERSION:-3.4} command: mongod --replSet beats ports: - 27017 munin: - build: ./module/munin/_meta + image: docker.elastic.co/observability-ci/beats-integration-munin:1 + build: + context: ./module/munin/_meta ports: - 4949 mysql: + image: docker.elastic.co/observability-ci/beats-integration-mysql:${MYSQL_VARIANT:-mysql}-${MYSQL_VERSION:-5.7.12}-1 build: context: ./module/mysql/_meta args: - MYSQL_IMAGE: mysql:5.7.24 - ports: - - 3306 - - mysql_8_0: - build: - context: ./module/mysql/_meta - args: - MYSQL_IMAGE: mysql:8.0.13 - ports: - - 3306 - - percona_5_7: - build: - context: ./module/mysql/_meta - args: - MYSQL_IMAGE: percona:5.7.24 - ports: - - 3306 - - percona_8_0: - build: - context: ./module/mysql/_meta - args: - MYSQL_IMAGE: percona:8.0.13-4 - ports: - - 3306 - - mariadb_10_2: - build: - context: ./module/mysql/_meta - args: - MYSQL_IMAGE: mariadb:10.2.23 - ports: - - 3306 - - mariadb_10_3: - build: - context: ./module/mysql/_meta - args: - MYSQL_IMAGE: mariadb:10.3.14 - ports: - - 3306 - - mariadb_10_4: - build: - context: ./module/mysql/_meta - args: - MYSQL_IMAGE: mariadb:10.4.4 + MYSQL_IMAGE: ${MYSQL_VARIANT:-mysql}:${MYSQL_VERSION:-5.7.12} ports: - 3306 nats: - build: ./module/nats/_meta + image: docker.elastic.co/observability-ci/beats-integration-nats:${NATS_VERSION:-2.0.4}-1 + build: + context: ./module/nats/_meta + dockerfile: Dockerfile.2.0.X + args: + NATS_VERSION: ${NATS_VERSION:-2.0.4} + ports: + - 8222 + + nats_1_3: + image: docker.elastic.co/observability-ci/beats-integration-nats:${NATS_VERSION:-1.3.0}-1 + build: + context: ./module/nats/_meta + dockerfile: Dockerfile.1.3 + args: + NATS_VERSION: ${NATS_VERSION:-1.3.0} ports: - 8222 nginx: - build: ./module/nginx/_meta + image: docker.elastic.co/observability-ci/beats-integration-nginx:${NGINX_VERSION:-1.9}-1 + build: + context: ./module/nginx/_meta + args: + NGINX_VERSION: ${NGINX_VERSION:-1.9} ports: - 80 phpfpm: - build: ./module/php_fpm/_meta + image: docker.elastic.co/observability-ci/beats-integration-phpfpm:${PHPFPM_VERSION:-7.1}-1 + build: + context: ./module/php_fpm/_meta + args: + PHPFPM_VERSION: ${PHPFPM_VERSION:-7.1} ports: - 81 postgresql: - build: ./module/postgresql/_meta + image: docker.elastic.co/observability-ci/beats-integration-postgresql:${POSTGRESQL_VERSION:-9.5.3}-1 + build: + context: ./module/postgresql/_meta + args: + POSTGRESQL_VERSION: ${POSTGRESQL_VERSION:-9.5.3} ports: - 5432 prometheus: - build: ./module/prometheus/_meta + image: docker.elastic.co/observability-ci/beats-integration-prometheus:${PROMETHEUS_VERSION:-2.6.0}-1 + build: + context: ./module/prometheus/_meta + args: + PROMETHEUS_VERSION: ${PROMETHEUS_VERSION:-2.6.0} ports: - 9090 rabbitmq: - build: ./module/rabbitmq/_meta + image: docker.elastic.co/observability-ci/beats-integration-rabbitmq:${RABBITMQ_VERSION:-3.7.4}-1 + build: + context: ./module/rabbitmq/_meta + args: + RABBITMQ_VERSION: ${RABBITMQ_VERSION:-3.7.4} ports: - 15672 redis: - build: ./module/redis/_meta - ports: - - 6379 - - redis_4: + image: docker.elastic.co/observability-ci/beats-integration-redis:${REDIS_VERSION:-3.2.12}-1 build: context: ./module/redis/_meta args: - REDIS_VERSION: 4.0.11 - ports: - - 6379 - - redis_5: - build: - context: ./module/redis/_meta - args: - REDIS_VERSION: 5.0-rc4 + REDIS_VERSION: ${REDIS_VERSION:-3.2.12} ports: - 6379 traefik: - build: ./module/traefik/_meta + image: docker.elastic.co/observability-ci/beats-integration-traefik:${TRAEFIK_VERSION:-1.6}-1 + build: + context: ./module/traefik/_meta + args: + TRAEFIK_VERSION: ${TRAEFIK_VERSION:-1.6} ports: - 8080 uwsgi_tcp: - build: ./module/uwsgi/_meta + image: docker.elastic.co/observability-ci/beats-integration-uwsgi:py${PYTHON_VERSION:-3.6}-1 + build: + context: ./module/uwsgi/_meta + args: + PYTHON_VERSION: ${PYTHON_VERSION:-3.6} command: uwsgi --http :8080 --master --processes 1 --threads 2 --stats 0.0.0.0:9191 --memory-report --wsgi-file app.py ports: - 9191 uwsgi_http: - build: ./module/uwsgi/_meta + image: docker.elastic.co/observability-ci/beats-integration-uwsgi:py${PYTHON_VERSION:-3.6}-1 + build: + context: ./module/uwsgi/_meta + args: + PYTHON_VERSION: ${PYTHON_VERSION:-3.6} command: uwsgi --http :8080 --master --processes 1 --threads 2 --stats 0.0.0.0:9192 --memory-report --stats-http --wsgi-file app.py ports: - 9192 zookeeper: - build: ./module/zookeeper/_meta + image: docker.elastic.co/observability-ci/beats-integration-zookeeper:${ZOOKEEPER_VERSION:-3.5.5}-1 + build: + context: ./module/zookeeper/_meta + args: + ZOOKEEPER_VERSION: ${ZOOKEEPER_VERSION:-3.5.5} ports: - 2181 diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/autodiscover-hints.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/autodiscover-hints.asciidoc index 2bb31112..08d2cd05 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/autodiscover-hints.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/autodiscover-hints.asciidoc @@ -20,6 +20,11 @@ values from the autodiscover event, ie: `${data.host}:80`. List of metricsets to use, comma separated. If no metricsets are provided, default metricsets for the module are used. +[float] +===== `co.elastic.metrics/metrics_path` + +The path to retrieve the metrics from (/metrics by default) for <>. + [float] ===== `co.elastic.metrics/period` @@ -36,14 +41,14 @@ Metrics retrieval timeout, default: 3s SSL parameters, as seen in <>. [float] -===== `co.elastic.logs/raw` +===== `co.elastic.metrics/raw` When an entire module configuration needs to be completely set the `raw` hint can be used. You can provide a stringified JSON of the input configuration. `raw` overrides every other hint and can be used to create both a single or a list of configurations. ["source","yaml",subs="attributes"] ------------------------------------------------------------------------------------- -co.elastic.metrics/raw: "[{"enabled\":true,\"metricsets\":[\"default\"],\"module\":\"mockmoduledefaults\",\"period\":\"1m\",\"timeout\":\"3s\"}]" +co.elastic.metrics/raw: "[{\"enabled\":true,\"metricsets\":[\"default\"],\"module\":\"mockmoduledefaults\",\"period\":\"1m\",\"timeout\":\"3s\"}]" ------------------------------------------------------------------------------------- [float] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/configuring-howto.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/configuring-howto.asciidoc index 398c35eb..d532b8a5 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/configuring-howto.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/configuring-howto.asciidoc @@ -17,8 +17,6 @@ The {beatname_uc} configuration file uses http://yaml.org/[YAML] for its syntax. See the {beats-ref}/config-file-format.html[Config File Format] section of the _Beats Platform Reference_ for more about the structure of the config file. -include::../../libbeat/docs/shared-cm-tip.asciidoc[] - The following topics describe how to configure {beatname_uc}: * <> @@ -51,42 +49,42 @@ include::./metricbeat-general-options.asciidoc[] include::./reload-configuration.asciidoc[] -include::{libbeat-dir}/docs/queueconfig.asciidoc[] +include::{libbeat-dir}/queueconfig.asciidoc[] -include::{libbeat-dir}/docs/outputconfig.asciidoc[] +include::{libbeat-dir}/outputconfig.asciidoc[] -include::{libbeat-dir}/docs/shared-ilm.asciidoc[] +include::{libbeat-dir}/shared-ilm.asciidoc[] -include::{libbeat-dir}/docs/shared-ssl-config.asciidoc[] +include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::./metricbeat-filtering.asciidoc[] -include::{libbeat-dir}/docs/shared-config-ingest.asciidoc[] +include::{libbeat-dir}/shared-config-ingest.asciidoc[] -include::{libbeat-dir}/docs/shared-geoip.asciidoc[] +include::{libbeat-dir}/shared-geoip.asciidoc[] -include::{libbeat-dir}/docs/shared-path-config.asciidoc[] +include::{libbeat-dir}/shared-path-config.asciidoc[] -include::{libbeat-dir}/docs/shared-kibana-config.asciidoc[] +include::{libbeat-dir}/shared-kibana-config.asciidoc[] -include::{libbeat-dir}/docs/setup-config.asciidoc[] +include::{libbeat-dir}/setup-config.asciidoc[] -include::{libbeat-dir}/docs/loggingconfig.asciidoc[] +include::{libbeat-dir}/loggingconfig.asciidoc[] :standalone: -include::{libbeat-dir}/docs/shared-env-vars.asciidoc[] +include::{libbeat-dir}/shared-env-vars.asciidoc[] :standalone!: :autodiscoverJolokia: :autodiscoverHints: -include::{libbeat-dir}/docs/shared-autodiscover.asciidoc[] +include::{libbeat-dir}/shared-autodiscover.asciidoc[] :standalone: -include::{libbeat-dir}/docs/yaml.asciidoc[] +include::{libbeat-dir}/yaml.asciidoc[] :standalone!: -include::{libbeat-dir}/docs/regexp.asciidoc[] +include::{libbeat-dir}/regexp.asciidoc[] -include::{libbeat-dir}/docs/http-endpoint.asciidoc[] +include::{libbeat-dir}/http-endpoint.asciidoc[] -include::{libbeat-dir}/docs/reference-yml.asciidoc[] +include::{libbeat-dir}/reference-yml.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/faq.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/faq.asciidoc index 7f550fbf..81c188a5 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/faq.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/faq.asciidoc @@ -22,6 +22,6 @@ sudo mount -t linprocfs /dev/null /compat/linux/proc include::faq-unexpected-metrics.asciidoc[] -include::{libbeat-dir}/docs/faq-limit-bandwidth.asciidoc[] +include::{libbeat-dir}/faq-limit-bandwidth.asciidoc[] -include::{libbeat-dir}/docs/shared-faq.asciidoc[] +include::{libbeat-dir}/shared-faq.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/fields.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/fields.asciidoc index 3e31d5d2..1339d41d 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/fields.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/fields.asciidoc @@ -14,7 +14,9 @@ grouped in the following categories: * <> * <> +* <> * <> +* <> * <> * <> * <> @@ -61,6 +63,7 @@ grouped in the following categories: * <> * <> * <> +* <> * <> * <> * <> @@ -817,6 +820,278 @@ type: long Total. +type: long + +-- + +[[exported-fields-appsearch]] +== App Search fields + +App Search module + + + +[float] +=== appsearch + + + + +[float] +=== stats + +App Search stats + + + +[float] +=== jvm + +JVM stats + + + +[float] +=== memory_usage + +Memory usage + + + +*`appsearch.stats.jvm.memory_usage.heap_init.bytes`*:: ++ +-- +Heap init used by the JVM in bytes. + + +type: long + +-- + +*`appsearch.stats.jvm.memory_usage.heap_used.bytes`*:: ++ +-- +Heap used by the JVM in bytes. + + +type: long + +-- + +*`appsearch.stats.jvm.memory_usage.heap_committed.bytes`*:: ++ +-- +Committed heap to the JVM in bytes. + + +type: long + +-- + +*`appsearch.stats.jvm.memory_usage.heap_max.bytes`*:: ++ +-- +Max heap used by the JVM in bytes + + +type: long + +-- + +*`appsearch.stats.jvm.memory_usage.non_heap_init.bytes`*:: ++ +-- +Non-Heap initial memory used by the JVM in bytes. + + +type: long + +-- + +*`appsearch.stats.jvm.memory_usage.non_heap_committed.bytes`*:: ++ +-- +Non-Heap committed memory used by the JVM in bytes. + + +type: long + +-- + +[float] +=== queues + +Worker queues + + + +*`appsearch.stats.queues.analytics_events.count`*:: ++ +-- +Number of pending jobs in the `analytics_events` queue. + + +type: long + +-- + +*`appsearch.stats.queues.document_destroyer.count`*:: ++ +-- +Number of pending jobs in the `document_destroyer` queue. + + +type: long + +-- + +*`appsearch.stats.queues.engine_destroyer.count`*:: ++ +-- +Number of pending jobs in the `engine_destroyer` queue. + + +type: long + +-- + +*`appsearch.stats.queues.index_adder.count`*:: ++ +-- +Number of pending jobs in the `index_adder` queue. + + +type: long + +-- + +*`appsearch.stats.queues.indexed_doc_remover.count`*:: ++ +-- +Number of pending jobs in the `indexed_doc_remover` queue. + + +type: long + +-- + +*`appsearch.stats.queues.mailer.count`*:: ++ +-- +Number of pending jobs in the `mailer` queue. + + +type: long + +-- + +*`appsearch.stats.queues.refresh_document_counts.count`*:: ++ +-- +Number of pending jobs in the `refresh_document_counts` queue. + + +type: long + +-- + +*`appsearch.stats.queues.reindexer.count`*:: ++ +-- +Number of pending jobs in the `reindexer` queue. + + +type: long + +-- + +*`appsearch.stats.queues.schema_updater.count`*:: ++ +-- +Number of pending jobs in the `schema_updater` queue. + + +type: long + +-- + +*`appsearch.stats.queues.failed.count`*:: ++ +-- +Number of failed jobs waiting to be retried. + + +type: long + +-- + +[float] +=== requests + +Request metrics + + + +*`appsearch.stats.requests.count`*:: ++ +-- +Number of recently completed requests + + +type: long + +-- + +[float] +=== api.duration + +API response time metrics + + + +*`appsearch.stats.requests.api.duration.avg.ms`*:: ++ +-- +Average response time in milliseconds + + +type: long + +-- + +*`appsearch.stats.requests.api.duration.max.ms`*:: ++ +-- +Max response time in milliseconds + + +type: long + +-- + +[float] +=== web.duration + +Dashboard response time metrics + + + +*`appsearch.stats.requests.web.duration.avg.ms`*:: ++ +-- +Average response time in milliseconds + + +type: long + +-- + +*`appsearch.stats.requests.web.duration.max.ms`*:: ++ +-- +Max response time in milliseconds + + type: long -- @@ -828,6 +1103,16 @@ type: long +*`cloud.account.name`*:: ++ +-- +The cloud account name or alias used to identify different entities in a multi-tenant environment. + + +type: keyword + +-- + [float] === aws @@ -844,6 +1129,16 @@ type: object -- +*`aws.s3.bucket.name`*:: ++ +-- +Name of a S3 bucket. + + +type: keyword + +-- + [float] === cloudwatch @@ -1777,6 +2072,36 @@ type: long -- +*`aws.rds.db_instance.db_cluster_identifier`*:: ++ +-- +This identifier is the unique key that identifies a DB cluster specifically for Amazon Aurora DB cluster. + + +type: keyword + +-- + +*`aws.rds.db_instance.role`*:: ++ +-- +DB roles like WRITER or READER, specifically for Amazon Aurora DB cluster. + + +type: keyword + +-- + +*`aws.rds.db_instance.engine_name`*:: ++ +-- +Each DB instance runs a DB engine, like MySQL, MariaDB, PostgreSQL and etc. + + +type: keyword + +-- + [float] === s3_daily_storage @@ -1784,16 +2109,6 @@ type: long -*`aws.s3_daily_storage.bucket.name`*:: -+ --- -Name of a S3 bucket. - - -type: keyword - --- - *`aws.s3_daily_storage.bucket.size.bytes`*:: + -- @@ -1823,16 +2138,6 @@ type: long -*`aws.s3_request.bucket.name`*:: -+ --- -Name of a S3 bucket. - - -type: keyword - --- - *`aws.s3_request.requests.total`*:: + -- @@ -2116,6 +2421,133 @@ type: keyword -- +[[exported-fields-azure]] +== azure fields + +azure module + + + +[float] +=== azure + + + + +*`azure.namespace`*:: ++ +-- +The namespace selected + + +type: keyword + +-- + +*`azure.subscription_id`*:: ++ +-- +The subscription ID + + +type: keyword + +-- + +*`azure.dimensions.*`*:: ++ +-- +Azure metric dimensions. + + +type: object + +-- + +[float] +=== resource + +The resource specified + + + +*`azure.resource.name`*:: ++ +-- +The name of the resource + + +type: keyword + +-- + +*`azure.resource.type`*:: ++ +-- +The type of the resource + + +type: keyword + +-- + +*`azure.resource.group`*:: ++ +-- +The resource group + + +type: keyword + +-- + +*`azure.resource.tags.*`*:: ++ +-- +Azure resource tags. + + +type: object + +-- + +*`azure.resource.compute_vm.*.*`*:: ++ +-- +compute_vm + + +type: object + +-- + +*`azure.resource.compute_vm_scaleset.*.*`*:: ++ +-- +compute_vm_scaleset + + +type: object + +-- + +[float] +=== monitor + +monitor + + + +*`azure.resource.monitor.metrics.*.*`*:: ++ +-- +Metrics returned. + + +type: object + +-- + [[exported-fields-beat-common]] == Beat fields @@ -4876,6 +5308,18 @@ Runtime CPU metrics. Percentage of time in kernel space. +type: scaled_float + +format: percent + +-- + +*`docker.cpu.kernel.norm.pct`*:: ++ +-- +Percentage of time in kernel space normalized by the number of CPU cores. + + type: scaled_float format: percent @@ -4898,6 +5342,18 @@ type: long Percentage of total CPU time in the system. +type: scaled_float + +format: percent + +-- + +*`docker.cpu.system.norm.pct`*:: ++ +-- +Percentage of total CPU time in the system normalized by the number of CPU cores. + + type: scaled_float format: percent @@ -4920,6 +5376,18 @@ type: long Percentage of time in user space. +type: scaled_float + +format: percent + +-- + +*`docker.cpu.user.norm.pct`*:: ++ +-- +Percentage of time in user space normalized by the number of CPU cores. + + type: scaled_float format: percent @@ -4942,6 +5410,18 @@ type: long Total CPU usage. +type: scaled_float + +format: percent + +-- + +*`docker.cpu.total.norm.pct`*:: ++ +-- +Total CPU usage normalized by the number of CPU cores. + + type: scaled_float format: percent @@ -4954,6 +5434,18 @@ format: percent Percentage of CPU time in this core. +type: object + +format: percent + +-- + +*`docker.cpu.core.*.norm.pct`*:: ++ +-- +Percentage of CPU time in this core, normalized by the number of CPU cores. + + type: object format: percent @@ -10031,6 +10523,54 @@ type: long Memory used for fielddata. +type: long + +-- + +[float] +=== enrich + +Enrich stats + + + +*`elasticsearch.enrich.queue.size`*:: ++ +-- +Number of search requests in the queue. + + +type: long + +-- + + +*`elasticsearch.enrich.remote_requests.current`*:: ++ +-- +Current number of outstanding remote requests. + + +type: long + +-- + +*`elasticsearch.enrich.remote_requests.total`*:: ++ +-- +Number of outstanding remote requests executed since node startup. + + +type: long + +-- + +*`elasticsearch.enrich.executed_searches.total`*:: ++ +-- +Number of search requests that enrich processors have executed since node startup. + + type: long -- @@ -15570,7 +16110,7 @@ type: date Message recorded for the given event -type: keyword +type: text -- @@ -17282,6 +17822,58 @@ type: long -- +[float] +=== resourcequota + +kubernetes resourcequota metrics + + + +*`kubernetes.resourcequota.created.sec`*:: ++ +-- +Epoch seconds since the ResourceQuota was created + +type: double + +-- + +*`kubernetes.resourcequota.quota`*:: ++ +-- +Quota informed (hard or used) for the resource + +type: double + +-- + +*`kubernetes.resourcequota.name`*:: ++ +-- +ResourceQuota name + +type: keyword + +-- + +*`kubernetes.resourcequota.type`*:: ++ +-- +Quota information type, `hard` or `used` + +type: keyword + +-- + +*`kubernetes.resourcequota.resource`*:: ++ +-- +Resource name the quota applies to + +type: keyword + +-- + [float] === statefulset @@ -18916,7 +19508,7 @@ Reports data from the query execution system. -*`mongodb.metrics.query_executor.scanned_indexes`*:: +*`mongodb.metrics.query_executor.scanned_indexes.count`*:: + -- The total number of index items scanned during queries and query-plan evaluation. @@ -18926,7 +19518,7 @@ type: long -- -*`mongodb.metrics.query_executor.scanned_documents`*:: +*`mongodb.metrics.query_executor.scanned_documents.count`*:: + -- The total number of documents scanned during queries and query-plan evaluation. @@ -19370,7 +19962,7 @@ Reports on the operation of the resource use of the ttl index process. -*`mongodb.metrics.ttl.deleted_documents`*:: +*`mongodb.metrics.ttl.deleted_documents.count`*:: + -- The total number of documents deleted from collections with a ttl index. @@ -19380,7 +19972,7 @@ type: long -- -*`mongodb.metrics.ttl.passes`*:: +*`mongodb.metrics.ttl.passes.count`*:: + -- The number of times the background process removes documents from collections with a ttl index. @@ -27894,7 +28486,7 @@ type: float *`system.diskio.iostat.request.avg_size`*:: + -- -The average size (in sectors) of the requests that were issued to the device. +The average size (in bytes) of the requests that were issued to the device. type: float @@ -28371,6 +28963,42 @@ format: bytes -- +*`system.memory.swap.out.pages`*:: ++ +-- +count of pages swapped out + +type: long + +-- + +*`system.memory.swap.in.pages`*:: ++ +-- +count of pages swapped in + +type: long + +-- + +*`system.memory.swap.readahead.pages`*:: ++ +-- +swap readahead pages + +type: long + +-- + +*`system.memory.swap.readahead.cached`*:: ++ +-- +swap readahead cache hits + +type: long + +-- + *`system.memory.swap.used.pct`*:: + -- @@ -28473,6 +29101,30 @@ format: bytes -- +[float] +=== swap.out + +huge pages swapped out + + +*`system.memory.hugepages.swap.out.pages`*:: ++ +-- +pages swapped out + +type: long + +-- + +*`system.memory.hugepages.swap.out.fallback`*:: ++ +-- +Count of huge pages that must be split before swapout + +type: long + +-- + [float] === network @@ -30012,6 +30664,317 @@ format: duration -- +[[exported-fields-tomcat]] +== Tomcat fields + +Tomcat module + + + + +[float] +=== cache + +Catalina Cache metrics from the WebResourceRoot + + +*`tomcat.cache.mbean`*:: ++ +-- +Mbean that this event is related to + +type: keyword + +-- + +*`tomcat.cache.hit.total`*:: ++ +-- +The number of requests for resources that were served from the cache + +type: long + +-- + +*`tomcat.cache.size.total.kb`*:: ++ +-- +The current estimate of the cache size in kilobytes + +type: long + +-- + +*`tomcat.cache.size.max.kb`*:: ++ +-- +The maximum permitted size of the cache in kilobytes + +type: long + +-- + +*`tomcat.cache.lookup.total`*:: ++ +-- +The number of requests for resources + +type: long + +-- + +*`tomcat.cache.ttl.ms`*:: ++ +-- +The time-to-live for cache entries in milliseconds + +type: long + +-- + +[float] +=== memory + +Memory metrics from java.lang JMX + + +*`tomcat.memory.mbean`*:: ++ +-- +Mbean that this event is related to + +type: keyword + +-- + +*`tomcat.memory.heap.usage.committed`*:: ++ +-- +Committed heap memory usage + +type: long + +-- + +*`tomcat.memory.heap.usage.max`*:: ++ +-- +Max heap memory usage + +type: long + +-- + +*`tomcat.memory.heap.usage.used`*:: ++ +-- +Used heap memory usage + +type: long + +-- + +*`tomcat.memory.heap.usage.init`*:: ++ +-- +Initial heap memory usage + +type: long + +-- + +*`tomcat.memory.other.usage.committed`*:: ++ +-- +Committed non-heap memory usage + +type: long + +-- + +*`tomcat.memory.other.usage.max`*:: ++ +-- +Max non-heap memory usage + +type: long + +-- + +*`tomcat.memory.other.usage.used`*:: ++ +-- +Used non-heap memory usage + +type: long + +-- + +*`tomcat.memory.other.usage.init`*:: ++ +-- +Initial non-heap memory usage + +type: long + +-- + +[float] +=== requests + +Requests processor metrics from GlobalRequestProcessor JMX + + +*`tomcat.requests.mbean`*:: ++ +-- +Mbean that this event is related to + +type: keyword + +-- + +*`tomcat.requests.total`*:: ++ +-- +Number of requests processed + +type: long + +-- + +*`tomcat.requests.bytes.received`*:: ++ +-- +Amount of data received, in bytes + +type: long + +-- + +*`tomcat.requests.bytes.sent`*:: ++ +-- +Amount of data sent, in bytes + +type: long + +-- + +*`tomcat.requests.processing.ms`*:: ++ +-- +Total time to process the requests + +type: long + +-- + +*`tomcat.requests.errors.total`*:: ++ +-- +Number of errors + +type: long + +-- + +[float] +=== threading + +Threading metrics from the Catalina's ThreadPool JMX + + +*`tomcat.threading.busy`*:: ++ +-- +Current busy threads from the ThreadPool + +type: long + +-- + +*`tomcat.threading.max`*:: ++ +-- +Max threads from the ThreadPool + +type: long + +-- + +*`tomcat.threading.current`*:: ++ +-- +Current number of threads, taken from the ThreadPool + +type: long + +-- + +*`tomcat.threading.keep_alive.total`*:: ++ +-- +Total keep alive on the ThreadPool + +type: long + +-- + +*`tomcat.threading.keep_alive.timeout.ms`*:: ++ +-- +Keep alive timeout on the ThreadPool + +type: long + +-- + +*`tomcat.threading.started.total`*:: ++ +-- +Current started threads at JVM level (from java.lang:type=Threading) + +type: long + +-- + +*`tomcat.threading.user.time.ms`*:: ++ +-- +User time in milliseconds (from java.lang:type=Threading) + +type: long + +-- + +*`tomcat.threading.cpu.time.ms`*:: ++ +-- +CPU time in milliseconds (from java.lang:type=Threading) + +type: long + +-- + +*`tomcat.threading.total`*:: ++ +-- +Total threads at the JVM level (from java.lang:type=Threading) + +type: long + +-- + +*`tomcat.threading.peak`*:: ++ +-- +Peak number of threads at JVM level (from java.lang:type=Threading) + +type: long + +-- + [[exported-fields-traefik]] == traefik fields diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/gettingstarted.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/gettingstarted.asciidoc index a8d503e1..db1c9fe3 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/gettingstarted.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/gettingstarted.asciidoc @@ -4,7 +4,7 @@ {beatname_uc} helps you monitor your servers and the services they host by collecting metrics from the operating system and services. -include::{libbeat-dir}/docs/shared-getting-started-intro.asciidoc[] +include::{libbeat-dir}/shared-getting-started-intro.asciidoc[] * <<{beatname_lc}-installation>> * <<{beatname_lc}-configuration>> @@ -25,14 +25,14 @@ traffic or prevent {beatname_uc} from collecting metrics when there are network problems. Metrics from multiple {beatname_uc} instances will be combined on the Elasticsearch server. -include::{libbeat-dir}/docs/shared-download-and-install.asciidoc[] +include::{libbeat-dir}/shared-download-and-install.asciidoc[] [[deb]] *deb:* ifeval::["{release-state}"=="unreleased"] -Version {stack-version} of {beatname_uc} has not yet been released. +Version {version} of {beatname_uc} has not yet been released. endif::[] @@ -51,7 +51,7 @@ endif::[] ifeval::["{release-state}"=="unreleased"] -Version {stack-version} of {beatname_uc} has not yet been released. +Version {version} of {beatname_uc} has not yet been released. endif::[] @@ -70,7 +70,7 @@ endif::[] ifeval::["{release-state}"=="unreleased"] -Version {stack-version} of {beatname_uc} has not yet been released. +Version {version} of {beatname_uc} has not yet been released. endif::[] @@ -84,14 +84,14 @@ tar xzvf {beatname_lc}-{version}-darwin-x86_64.tar.gz endif::[] -include::{libbeat-dir}/docs/shared-brew-install.asciidoc[] +include::{libbeat-dir}/shared-brew-install.asciidoc[] [[linux]] *linux:* ifeval::["{release-state}"=="unreleased"] -Version {stack-version} of {beatname_uc} has not yet been released. +Version {version} of {beatname_uc} has not yet been released. endif::[] @@ -120,7 +120,7 @@ See <> for deploying with Kubernet ifeval::["{release-state}"=="unreleased"] -Version {stack-version} of {beatname_uc} has not yet been released. +Version {version} of {beatname_uc} has not yet been released. endif::[] @@ -160,7 +160,7 @@ For more information about these options, see [id="{beatname_lc}-configuration"] === Step 2: Configure {beatname_uc} -include::{libbeat-dir}/docs/shared-configuring.asciidoc[] +include::{libbeat-dir}/shared-configuring.asciidoc[] When you configure {beatname_uc}, you need to specify which <<{beatname_lc}-modules,modules>> to run. {beatname_uc} uses modules to collect @@ -214,27 +214,25 @@ settings. See <> if you want to add the module configs to the +{beatname_lc}.yml+ file rather than using the `modules.d` directory. -include::{libbeat-dir}/docs/step-configure-output.asciidoc[] +include::{libbeat-dir}/step-configure-output.asciidoc[] -include::{libbeat-dir}/docs/step-configure-kibana-endpoint.asciidoc[] +include::{libbeat-dir}/step-configure-kibana-endpoint.asciidoc[] -include::{libbeat-dir}/docs/step-configure-credentials.asciidoc[] +include::{libbeat-dir}/step-configure-credentials.asciidoc[] -include::{libbeat-dir}/docs/step-test-config.asciidoc[] +include::{libbeat-dir}/step-test-config.asciidoc[] -include::{libbeat-dir}/docs/step-look-at-config.asciidoc[] - -include::../../libbeat/docs/shared-cm-tip.asciidoc[] +include::{libbeat-dir}/step-look-at-config.asciidoc[] [id="{beatname_lc}-template"] === Step 3: Load the index template in Elasticsearch -include::{libbeat-dir}/docs/shared-template-load.asciidoc[] +include::{libbeat-dir}/shared-template-load.asciidoc[] [[load-kibana-dashboards]] === Step 4: Set up the Kibana dashboards -include::{libbeat-dir}/docs/dashboards.asciidoc[] +include::{libbeat-dir}/dashboards.asciidoc[] [id="{beatname_lc}-starting"] === Step 5: Start {beatname_uc} @@ -273,7 +271,7 @@ or run {beatname_uc} with `--strict.perms=false` specified. See in the _Beats Platform Reference_. :requires-sudo: -include::{libbeat-dir}/docs/shared-brew-run.asciidoc[] +include::{libbeat-dir}/shared-brew-run.asciidoc[] :requires-sudo!: *win:* @@ -311,7 +309,7 @@ To make it easier for you to start monitoring your servers in Kibana, we have created example {beatname_uc} dashboards. You loaded the dashboards earlier when you ran the `setup` command. -include::{libbeat-dir}/docs/opendashboards.asciidoc[] +include::{libbeat-dir}/opendashboards.asciidoc[] The dashboards are provided as examples. We recommend that you {kibana-ref}/dashboard.html[customize] them to meet your needs. diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/images/metricbeat-azure-vm-guestmetrics-overview.png b/vendor/github.com/elastic/beats/metricbeat/docs/images/metricbeat-azure-vm-guestmetrics-overview.png new file mode 100644 index 00000000..0f219790 Binary files /dev/null and b/vendor/github.com/elastic/beats/metricbeat/docs/images/metricbeat-azure-vm-guestmetrics-overview.png differ diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/images/metricbeat-azure-vm-overview.png b/vendor/github.com/elastic/beats/metricbeat/docs/images/metricbeat-azure-vm-overview.png new file mode 100644 index 00000000..22136049 Binary files /dev/null and b/vendor/github.com/elastic/beats/metricbeat/docs/images/metricbeat-azure-vm-overview.png differ diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/images/metricbeat-azure-vmss-overview.png b/vendor/github.com/elastic/beats/metricbeat/docs/images/metricbeat-azure-vmss-overview.png new file mode 100644 index 00000000..c1456960 Binary files /dev/null and b/vendor/github.com/elastic/beats/metricbeat/docs/images/metricbeat-azure-vmss-overview.png differ diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/index.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/index.asciidoc index e9528b56..4991692b 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/index.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/index.asciidoc @@ -1,14 +1,15 @@ = Metricbeat Reference -:libbeat-dir: {docdir}/../../libbeat +:libbeat-dir: {docdir}/../../libbeat/docs :libbeat-xpack-dir: ../../../x-pack/libbeat -include::{libbeat-dir}/docs/version.asciidoc[] +include::{libbeat-dir}/version.asciidoc[] + +include::{asciidoc-dir}/../../shared/versions/stack/{source_branch}.asciidoc[] include::{asciidoc-dir}/../../shared/attributes.asciidoc[] -:version: {stack-version} :beatname_lc: metricbeat :beatname_uc: Metricbeat :beatname_pkg: {beatname_lc} @@ -25,14 +26,18 @@ include::{asciidoc-dir}/../../shared/attributes.asciidoc[] :linux_os: :docker_platform: :win_os: +:no_decode_cef_processor: +:no_decode_csv_fields_processor: +:no_script_processor: +:no_timestamp_processor: -include::{libbeat-dir}/docs/shared-beats-attributes.asciidoc[] +include::{libbeat-dir}/shared-beats-attributes.asciidoc[] include::./overview.asciidoc[] include::./gettingstarted.asciidoc[] -include::{libbeat-dir}/docs/repositories.asciidoc[] +include::{libbeat-dir}/repositories.asciidoc[] include::./setting-up-running.asciidoc[] @@ -42,18 +47,18 @@ include::./how-metricbeat-works.asciidoc[] include::./configuring-howto.asciidoc[] -include::{libbeat-dir}/docs/shared-central-management.asciidoc[] +include::{libbeat-dir}/shared-central-management.asciidoc[] include::./modules.asciidoc[] include::./fields.asciidoc[] -include::{libbeat-dir}/docs/monitoring/monitoring-beats.asciidoc[] +include::{libbeat-dir}/monitoring/monitoring-beats.asciidoc[] -include::{libbeat-dir}/docs/shared-securing-beat.asciidoc[] +include::{libbeat-dir}/shared-securing-beat.asciidoc[] include::./troubleshooting.asciidoc[] include::./faq.asciidoc[] -include::{libbeat-dir}/docs/contributing-to-beats.asciidoc[] +include::{libbeat-dir}/contributing-to-beats.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/metricbeat-filtering.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/metricbeat-filtering.asciidoc index 68d23b39..b5188233 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/metricbeat-filtering.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/metricbeat-filtering.asciidoc @@ -1,7 +1,7 @@ [[filtering-and-enhancing-data]] == Filter and enhance the exported data -include::{libbeat-dir}/docs/processors.asciidoc[] +include::{libbeat-dir}/processors.asciidoc[] For example, the following configuration reduces the exported fields by dropping the `agent.name` and `agent.version` fields under `beat` from all documents. @@ -13,4 +13,4 @@ processors: fields: ['agent'] ---- -include::{libbeat-dir}/docs/processors-using.asciidoc[] +include::{libbeat-dir}/processors-using.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/metricbeat-general-options.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/metricbeat-general-options.asciidoc index 3aff4ec2..3c3ce834 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/metricbeat-general-options.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/metricbeat-general-options.asciidoc @@ -42,4 +42,4 @@ timeseries.enabled: true ---- -include::{libbeat-dir}/docs/generalconfig.asciidoc[] +include::{libbeat-dir}/generalconfig.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/metricbeat-options.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/metricbeat-options.asciidoc index 2704b4eb..675d4626 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/metricbeat-options.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/metricbeat-options.asciidoc @@ -6,7 +6,7 @@ Metricbeat provides a couple different ways to enable modules and metricsets: * <> * <> -include::{libbeat-dir}/docs/shared-note-file-permissions.asciidoc[] +include::{libbeat-dir}/shared-note-file-permissions.asciidoc[] [float] [[enable-modules-d-configs]] @@ -214,6 +214,12 @@ A list of processors to apply to the data generated by the metricset. See <> for information about specifying processors in your config. +[float] +==== `keep_null` + +If this option is set to true, fields with `null` values will be published in +the output document. By default, `keep_null` is set to `false`. + [float] ==== `service.name` diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/appsearch.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/appsearch.asciidoc new file mode 100644 index 00000000..69a2b1da --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/appsearch.asciidoc @@ -0,0 +1,41 @@ +//// +This file is generated! See scripts/mage/docs_collector.go +//// + +[[metricbeat-module-appsearch]] +[role="xpack"] +== App Search module + +beta[] + +This is the App Search module. + + + +[float] +=== Example configuration + +The App Search module supports the standard configuration options that are described +in <>. Here is an example configuration: + +[source,yaml] +---- +metricbeat.modules: +- module: appsearch + metricsets: ["stats"] + enabled: true + period: 10s + hosts: ["http://localhost:3002"] + #username: "elastic" + #password: "changeme" +---- + +[float] +=== Metricsets + +The following metricsets are available: + +* <> + +include::appsearch/stats.asciidoc[] + diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/appsearch/stats.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/appsearch/stats.asciidoc new file mode 100644 index 00000000..e9bcccbe --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/appsearch/stats.asciidoc @@ -0,0 +1,24 @@ +//// +This file is generated! See scripts/mage/docs_collector.go +//// + +[[metricbeat-metricset-appsearch-stats]] +=== App Search stats metricset + +beta[] + +include::../../../../x-pack/metricbeat/module/appsearch/stats/_meta/docs.asciidoc[] + +This is a default metricset. If the host module is unconfigured, this metricset is enabled by default. + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../../x-pack/metricbeat/module/appsearch/stats/_meta/data.json[] +---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/aws.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/aws.asciidoc index 973037f3..961053c4 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/aws.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/aws.asciidoc @@ -105,20 +105,14 @@ in <>. Here is an example configuration: metricbeat.modules: - module: aws period: 300s + credential_profile_name: test-mb metricsets: - ec2 - access_key_id: '${AWS_ACCESS_KEY_ID:""}' - secret_access_key: '${AWS_SECRET_ACCESS_KEY:""}' - session_token: '${AWS_SESSION_TOKEN:""}' - default_region: '${AWS_REGION:us-west-1}' - module: aws period: 300s + credential_profile_name: test-mb metricsets: - sqs - access_key_id: '${AWS_ACCESS_KEY_ID:""}' - secret_access_key: '${AWS_SECRET_ACCESS_KEY:""}' - session_token: '${AWS_SESSION_TOKEN:""}' - default_region: '${AWS_REGION:us-west-1}' regions: - us-west-1 - module: aws @@ -129,18 +123,14 @@ metricbeat.modules: access_key_id: '${AWS_ACCESS_KEY_ID:""}' secret_access_key: '${AWS_SECRET_ACCESS_KEY:""}' session_token: '${AWS_SESSION_TOKEN:""}' - default_region: '${AWS_REGION:us-west-1}' - module: aws period: 300s + credential_profile_name: test-mb metricsets: - cloudwatch - access_key_id: '${AWS_ACCESS_KEY_ID:""}' - secret_access_key: '${AWS_SECRET_ACCESS_KEY:""}' - session_token: '${AWS_SESSION_TOKEN:""}' - default_region: '${AWS_REGION:us-west-1}' - cloudwatch_metrics: + metrics: - namespace: AWS/EC2 - metricname: CPUUtilization + name: ["CPUUtilization"] dimensions: - name: InstanceId value: i-0686946e22cf9494a @@ -149,12 +139,9 @@ metricbeat.modules: tags.resource_type_filter: elasticloadbalancing - module: aws period: 60s + credential_profile_name: test-mb metricsets: - rds - access_key_id: '${AWS_ACCESS_KEY_ID:""}' - secret_access_key: '${AWS_SECRET_ACCESS_KEY:""}' - session_token: '${AWS_SESSION_TOKEN:""}' - default_region: '${AWS_REGION:us-west-1}' ---- [float] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/aws/cloudwatch.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/aws/cloudwatch.asciidoc index 1cc8ed82..7fb2a68e 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/aws/cloudwatch.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/aws/cloudwatch.asciidoc @@ -5,8 +5,6 @@ This file is generated! See scripts/mage/docs_collector.go [[metricbeat-metricset-aws-cloudwatch]] === aws cloudwatch metricset -beta[] - include::../../../../x-pack/metricbeat/module/aws/cloudwatch/_meta/docs.asciidoc[] This is a default metricset. If the host module is unconfigured, this metricset is enabled by default. diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/aws/rds.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/aws/rds.asciidoc index c358ae16..c4564faa 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/aws/rds.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/aws/rds.asciidoc @@ -5,8 +5,6 @@ This file is generated! See scripts/mage/docs_collector.go [[metricbeat-metricset-aws-rds]] === aws rds metricset -beta[] - include::../../../../x-pack/metricbeat/module/aws/rds/_meta/docs.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/aws/s3_daily_storage.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/aws/s3_daily_storage.asciidoc index 0d7440e0..de7a74d7 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/aws/s3_daily_storage.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/aws/s3_daily_storage.asciidoc @@ -5,8 +5,6 @@ This file is generated! See scripts/mage/docs_collector.go [[metricbeat-metricset-aws-s3_daily_storage]] === aws s3_daily_storage metricset -beta[] - include::../../../../x-pack/metricbeat/module/aws/s3_daily_storage/_meta/docs.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/aws/s3_request.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/aws/s3_request.asciidoc index 3815c2c5..131a7205 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/aws/s3_request.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/aws/s3_request.asciidoc @@ -5,8 +5,6 @@ This file is generated! See scripts/mage/docs_collector.go [[metricbeat-metricset-aws-s3_request]] === aws s3_request metricset -beta[] - include::../../../../x-pack/metricbeat/module/aws/s3_request/_meta/docs.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/aws/sqs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/aws/sqs.asciidoc index dcf0eaad..8fa5a212 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/aws/sqs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/aws/sqs.asciidoc @@ -5,8 +5,6 @@ This file is generated! See scripts/mage/docs_collector.go [[metricbeat-metricset-aws-sqs]] === aws sqs metricset -beta[] - include::../../../../x-pack/metricbeat/module/aws/sqs/_meta/docs.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/azure.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/azure.asciidoc new file mode 100644 index 00000000..b37998ad --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/azure.asciidoc @@ -0,0 +1,151 @@ +//// +This file is generated! See scripts/mage/docs_collector.go +//// + +[[metricbeat-module-azure]] +[role="xpack"] +== azure module + +beta[] + +This is the azure module. + +The Azure Monitor feature collects and aggregates logs and metrics from a variety of sources into a common data platform where it can be used for analysis, visualization, and alerting. + + +The azure monitor metrics are numerical values that describe some aspect of a system at a particular point in time. They are collected at regular intervals and are identified with a timestamp, a name, a value, and one or more defining labels. + +The azure module will periodically retrieve the azure monitor metrics using the Azure REST APIs as MetricList. +Additional azure API calls will be executed in order to retrieve information regarding the resources targeted by the user. + +The azure module mericsets are `monitor`, `compute_vm` and `compute_vm_scaleset` + +[float] +=== Dashboards + +The azure module comes with several predefined dashboards for virtual machines, VM guest metrics and virtual machine scale sets. + +The VM overview dashboard shows information about CPU, memory, disk usage as well as operations per second. The two available filters help narrowing down the dashbord to specific regions and/or resource groups. For example: + +image::./images/metricbeat-azure-vm-overview.png[] + +If VM guest metrics are enabled then the guest metrics overview dashboard can help with monitoring ASP.NET applications and SQL Server metrics. For example: + +image::./images/metricbeat-azure-vm-guestmetrics-overview.png[] + +The virtual machine scale sets dashboard is similar to the VM dashboard and shows relevant health information about running vm scale sets. For example: + +image::./images/metricbeat-azure-vmss-overview.png[] + +[float] +=== Module-specific configuration notes + +All the tasks executed against the Azure Monitor REST API will use the Azure Resource Manager authentication model. +Therefore, all requests must be authenticated with Azure Active Directory (Azure AD). +One approach to authenticate the client application is to create an Azure AD service principal and retrieve the authentication (JWT) token. +For a more detailed walk-through, have a look at using Azure PowerShell to create a service principal to access resources https://docs.microsoft.com/en-us/powershell/azure/create-azure-service-principal-azureps?view=azps-2.7.0. + It is also possible to create a service principal via the Azure portal https://docs.microsoft.com/en-us/azure/active-directory/develop/howto-create-service-principal-portal. +Users will have to make sure the roles assigned to the application contain at least reading permissions to the monitor data, more on the roles here https://docs.microsoft.com/en-us/azure/role-based-access-control/built-in-roles. + +Required credentials for the `azure` module: + +`client_id`:: The unique identifier for the application (also known as Application Id) + +`client_secret`:: The client/application secret/key + +`subscription_id`:: The unique identifier for the azure subscription + +`tenant_id`:: The unique identifier of the Azure Active Directory instance + + +Users can use the azure credentials keys if configured `AZURE_CLIENT_ID`, `AZURE_CLIENT_SECRET`, `AZURE_TENANT_ID`, `AZURE_SUBSCRIPTION_ID` + +[float] +== Metricsets + +[float] +=== `monitor` +This metricset allows users to retrieve metrics from specified resources. Added filters can apply here as the interval of retrieving these metrics, metric names, +aggregation list, namespaces and metric dimensions. + +[float] +=== `compute_vm` +This metricset will collect metrics from the virtual machines, these metrics will have a timegrain every 5 minutes, +so the `period` for `compute_vm` metricset should be `300s` or multiples of `300s`. + +[float] +=== `compute_vm_scaleset` +This metricset will collect metrics from the virtual machine scalesets, these metrics will have a timegrain every 5 minutes, +so the `period` for `compute_vm_scaleset` metricset should be `300s` or multiples of `300s`. + + +[float] +== Additional notes about metrics and costs + +Costs: Metric queries are charged based on the number of standard API calls. More information on pricing here https://azure.microsoft.com/id-id/pricing/details/monitor/. + +Authentication: we are handling authentication on our side (creating/renewing the authentication token), so we advise users to use dedicated credentials for metricbeat only. + + +[float] +=== Example configuration + +The azure module supports the standard configuration options that are described +in <>. Here is an example configuration: + +[source,yaml] +---- +metricbeat.modules: +- module: azure + metricsets: + - monitor + enabled: true + period: 300s + client_id: '${AZURE_CLIENT_ID:""}' + client_secret: '${AZURE_CLIENT_SECRET:""}' + tenant_id: '${AZURE_TENANT_ID:""}' + subscription_id: '${AZURE_SUBSCRIPTION_ID:""}' + resources: + - resource_query: "resourceType eq 'Microsoft.DocumentDb/databaseAccounts'" + metrics: + - name: ["DataUsage", "DocumentCount", "DocumentQuota"] + namespace: "Microsoft.DocumentDb/databaseAccounts" + +- module: azure + metricsets: + - compute_vm + enabled: true + period: 300s + client_id: '${AZURE_CLIENT_ID:""}' + client_secret: '${AZURE_CLIENT_SECRET:""}' + tenant_id: '${AZURE_TENANT_ID:""}' + subscription_id: '${AZURE_SUBSCRIPTION_ID:""}' + +- module: azure + metricsets: + - compute_vm_scaleset + enabled: true + period: 300s + client_id: '${AZURE_CLIENT_ID:""}' + client_secret: '${AZURE_CLIENT_SECRET:""}' + tenant_id: '${AZURE_TENANT_ID:""}' + subscription_id: '${AZURE_SUBSCRIPTION_ID:""}' +---- + +[float] +=== Metricsets + +The following metricsets are available: + +* <> + +* <> + +* <> + +include::azure/compute_vm.asciidoc[] + +include::azure/compute_vm_scaleset.asciidoc[] + +include::azure/monitor.asciidoc[] + diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/azure/compute_vm.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/azure/compute_vm.asciidoc new file mode 100644 index 00000000..39d3be6e --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/azure/compute_vm.asciidoc @@ -0,0 +1,23 @@ +//// +This file is generated! See scripts/mage/docs_collector.go +//// + +[[metricbeat-metricset-azure-compute_vm]] +=== azure compute_vm metricset + +beta[] + +include::../../../../x-pack/metricbeat/module/azure/compute_vm/_meta/docs.asciidoc[] + + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../../x-pack/metricbeat/module/azure/compute_vm/_meta/data.json[] +---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/azure/compute_vm_scaleset.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/azure/compute_vm_scaleset.asciidoc new file mode 100644 index 00000000..0bc72248 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/azure/compute_vm_scaleset.asciidoc @@ -0,0 +1,23 @@ +//// +This file is generated! See scripts/mage/docs_collector.go +//// + +[[metricbeat-metricset-azure-compute_vm_scaleset]] +=== azure compute_vm_scaleset metricset + +beta[] + +include::../../../../x-pack/metricbeat/module/azure/compute_vm_scaleset/_meta/docs.asciidoc[] + + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../../x-pack/metricbeat/module/azure/compute_vm_scaleset/_meta/data.json[] +---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/azure/monitor.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/azure/monitor.asciidoc new file mode 100644 index 00000000..1596987e --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/azure/monitor.asciidoc @@ -0,0 +1,24 @@ +//// +This file is generated! See scripts/mage/docs_collector.go +//// + +[[metricbeat-metricset-azure-monitor]] +=== azure monitor metricset + +beta[] + +include::../../../../x-pack/metricbeat/module/azure/monitor/_meta/docs.asciidoc[] + +This is a default metricset. If the host module is unconfigured, this metricset is enabled by default. + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../../x-pack/metricbeat/module/azure/monitor/_meta/data.json[] +---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/elasticsearch.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/elasticsearch.asciidoc index 1226fcd0..4c54d694 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/elasticsearch.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/elasticsearch.asciidoc @@ -66,6 +66,8 @@ The following metricsets are available: * <> +* <> + * <> * <> @@ -86,6 +88,8 @@ include::elasticsearch/ccr.asciidoc[] include::elasticsearch/cluster_stats.asciidoc[] +include::elasticsearch/enrich.asciidoc[] + include::elasticsearch/index.asciidoc[] include::elasticsearch/index_recovery.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/elasticsearch/enrich.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/elasticsearch/enrich.asciidoc new file mode 100644 index 00000000..9e4f56ba --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/elasticsearch/enrich.asciidoc @@ -0,0 +1,21 @@ +//// +This file is generated! See scripts/mage/docs_collector.go +//// + +[[metricbeat-metricset-elasticsearch-enrich]] +=== Elasticsearch enrich metricset + +include::../../../module/elasticsearch/enrich/_meta/docs.asciidoc[] + + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../module/elasticsearch/enrich/_meta/data.json[] +---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/kubernetes.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/kubernetes.asciidoc index d69251fe..5dc12197 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/kubernetes.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/kubernetes.asciidoc @@ -79,6 +79,8 @@ metricbeat.modules: add_metadata: true # When used outside the cluster: #host: node_name + # If kube_config is not set, KUBECONFIG environment variable will be checked + # and if not present it will fall back to InCluster #kube_config: ~/.kube/config # State metrics from kube-state-metrics service: @@ -92,6 +94,7 @@ metricbeat.modules: - state_pod - state_container - state_cronjob + - state_resourcequota period: 10s hosts: ["kube-state-metrics:8080"] @@ -99,6 +102,8 @@ metricbeat.modules: add_metadata: true # When used outside the cluster: #host: node_name + # If kube_config is not set, KUBECONFIG environment variable will be checked + # and if not present it will fall back to InCluster #kube_config: ~/.kube/config # Kubernetes events @@ -179,6 +184,8 @@ The following metricsets are available: * <> +* <> + * <> * <> @@ -213,6 +220,8 @@ include::kubernetes/state_pod.asciidoc[] include::kubernetes/state_replicaset.asciidoc[] +include::kubernetes/state_resourcequota.asciidoc[] + include::kubernetes/state_statefulset.asciidoc[] include::kubernetes/system.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/kubernetes/state_resourcequota.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/kubernetes/state_resourcequota.asciidoc new file mode 100644 index 00000000..e3748774 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/kubernetes/state_resourcequota.asciidoc @@ -0,0 +1,23 @@ +//// +This file is generated! See scripts/mage/docs_collector.go +//// + +[[metricbeat-metricset-kubernetes-state_resourcequota]] +=== Kubernetes state_resourcequota metricset + +beta[] + +include::../../../module/kubernetes/state_resourcequota/_meta/docs.asciidoc[] + + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../module/kubernetes/state_resourcequota/_meta/data.json[] +---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/mssql.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/mssql.asciidoc index 4e2049cc..15ad0f80 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/mssql.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/mssql.asciidoc @@ -11,25 +11,51 @@ beta[] This is the https://www.microsoft.com/en-us/sql-server/sql-server-2017[Microsoft SQL 2017] Metricbeat module. It is still in beta and under active development to add new Metricsets and introduce enhancements. [float] -== Compatibility +=== Compatibility The module is being tested with https://hub.docker.com/r/microsoft/mssql-server-linux/[2017 GA] version under Linux [float] -== Metricsets +=== Metricsets The following Metricsets are already included: [float] -=== `transaction_log` +==== `transaction_log` `transaction_log` Metricset fetches information about the operation and transaction log of each MSSQL database in the monitored instance. All data is extracted from the https://docs.microsoft.com/en-us/sql/relational-databases/system-dynamic-management-views/database-related-dynamic-management-views-transact-sql?view=sql-server-2017[Database Dynamic Management Views] [float] -=== `performance` +==== `performance` `performance` Metricset fetches information from what's commonly known as https://docs.microsoft.com/en-us/sql/relational-databases/system-dynamic-management-views/sys-dm-os-performance-counters-transact-sql?view=sql-server-2017[Performance Counters] in MSSQL. +[float] +=== Module-specific configuration notes + +When configuring the `hosts` option, you can specify native user credentials +as part of the host string with the following format: + +---- +hosts: ["sqlserver://sa@localhost"]] +---- + +To use Active Directory domain credentials, you can separately specify the username and password +using the respective configuration options to allow the domain to be included in the username: + +---- +metricbeat.modules: +- module: mssql + metricsets: + - "transaction_log" + - "performance" + hosts: ["sqlserver://localhost"] + username: domain\username + password: verysecurepassword + period: 10 +---- + +Store sensitive values like passwords in the <>. [float] === Example configuration @@ -44,7 +70,9 @@ metricbeat.modules: metricsets: - "transaction_log" - "performance" - hosts: ["sqlserver://sa@localhost"] + hosts: ["sqlserver://localhost"] + username: domain\username + password: verysecurepassword period: 10s ---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/nats.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/nats.asciidoc index 9596d072..75b3b8a3 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/nats.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/nats.asciidoc @@ -12,7 +12,7 @@ The default metricsets are `stats`, `connections`, `routes` and `subscriptions`. [float] === Compatibility -The Nats module is tested with Nats 1.3.0. +The Nats module is tested with Nats 1.3.0 and 2.0.4 [float] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/prometheus.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/prometheus.asciidoc index 47685107..46a5f672 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules/prometheus.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/prometheus.asciidoc @@ -5,6 +5,7 @@ This file is generated! See scripts/mage/docs_collector.go [[metricbeat-module-prometheus]] == Prometheus module +[[prometheus-module]] This module periodically scrapes metrics from https://prometheus.io/docs/instrumenting/exporters/[Prometheus exporters]. diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/tomcat.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/tomcat.asciidoc new file mode 100644 index 00000000..37910f2b --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/tomcat.asciidoc @@ -0,0 +1,58 @@ +//// +This file is generated! See scripts/mage/docs_collector.go +//// + +[[metricbeat-module-tomcat]] +[role="xpack"] +== Tomcat module + +beta[] + +This module periodically fetches JMX metrics from Apache Tomcat. + +[float] +=== Compatibility +The module has been tested with Tomcat 7.0.24 and 9.0.24. Other versions are expected to work. + +[float] +=== Usage +The Tomcat module requires <>to fetch JMX metrics. Refer to the link for instructions about how to use Jolokia. + + +[float] +=== Example configuration + +The Tomcat module supports the standard configuration options that are described +in <>. Here is an example configuration: + +[source,yaml] +---- +metricbeat.modules: +- module: tomcat + metricsets: ['threading', 'cache', 'memory', 'requests'] + period: 10s + hosts: ['localhost:8080'] + path: "/jolokia/?ignoreErrors=true&canonicalNaming=false" +---- + +[float] +=== Metricsets + +The following metricsets are available: + +* <> + +* <> + +* <> + +* <> + +include::tomcat/cache.asciidoc[] + +include::tomcat/memory.asciidoc[] + +include::tomcat/requests.asciidoc[] + +include::tomcat/threading.asciidoc[] + diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/tomcat/cache.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/tomcat/cache.asciidoc new file mode 100644 index 00000000..ddddaae0 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/tomcat/cache.asciidoc @@ -0,0 +1,24 @@ +//// +This file is generated! See scripts/mage/docs_collector.go +//// + +[[metricbeat-metricset-tomcat-cache]] +=== Tomcat cache metricset + +beta[] + +include::../../../../x-pack/metricbeat/module/tomcat/cache/_meta/docs.asciidoc[] + +This is a default metricset. If the host module is unconfigured, this metricset is enabled by default. + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../../x-pack/metricbeat/module/tomcat/cache/_meta/data.json[] +---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/tomcat/memory.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/tomcat/memory.asciidoc new file mode 100644 index 00000000..1d850e84 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/tomcat/memory.asciidoc @@ -0,0 +1,24 @@ +//// +This file is generated! See scripts/mage/docs_collector.go +//// + +[[metricbeat-metricset-tomcat-memory]] +=== Tomcat memory metricset + +beta[] + +include::../../../../x-pack/metricbeat/module/tomcat/memory/_meta/docs.asciidoc[] + +This is a default metricset. If the host module is unconfigured, this metricset is enabled by default. + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../../x-pack/metricbeat/module/tomcat/memory/_meta/data.json[] +---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/tomcat/requests.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/tomcat/requests.asciidoc new file mode 100644 index 00000000..8f2db18d --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/tomcat/requests.asciidoc @@ -0,0 +1,24 @@ +//// +This file is generated! See scripts/mage/docs_collector.go +//// + +[[metricbeat-metricset-tomcat-requests]] +=== Tomcat requests metricset + +beta[] + +include::../../../../x-pack/metricbeat/module/tomcat/requests/_meta/docs.asciidoc[] + +This is a default metricset. If the host module is unconfigured, this metricset is enabled by default. + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../../x-pack/metricbeat/module/tomcat/requests/_meta/data.json[] +---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules/tomcat/threading.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules/tomcat/threading.asciidoc new file mode 100644 index 00000000..4d356cb2 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules/tomcat/threading.asciidoc @@ -0,0 +1,24 @@ +//// +This file is generated! See scripts/mage/docs_collector.go +//// + +[[metricbeat-metricset-tomcat-threading]] +=== Tomcat threading metricset + +beta[] + +include::../../../../x-pack/metricbeat/module/tomcat/threading/_meta/docs.asciidoc[] + +This is a default metricset. If the host module is unconfigured, this metricset is enabled by default. + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../../x-pack/metricbeat/module/tomcat/threading/_meta/data.json[] +---- diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/modules_list.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/modules_list.asciidoc index 285e0c8d..c507bb22 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/modules_list.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/modules_list.asciidoc @@ -9,15 +9,21 @@ This file is generated! See scripts/mage/docs_collector.go .1+| .1+| |<> |<> |image:./images/icon-yes.png[Prebuilt dashboards are available] | .1+| .1+| |<> +|<> beta[] |image:./images/icon-no.png[No prebuilt dashboards] | +.1+| .1+| |<> beta[] |<> |image:./images/icon-yes.png[Prebuilt dashboards are available] | -.8+| .8+| |<> beta[] +.8+| .8+| |<> |<> beta[] |<> |<> beta[] -|<> beta[] -|<> beta[] -|<> beta[] -|<> beta[] +|<> +|<> +|<> +|<> +|<> beta[] |image:./images/icon-yes.png[Prebuilt dashboards are available] | +.3+| .3+| |<> beta[] +|<> beta[] +|<> beta[] |<> |image:./images/icon-no.png[No prebuilt dashboards] | .2+| .2+| |<> |<> @@ -54,8 +60,9 @@ This file is generated! See scripts/mage/docs_collector.go |<> |image:./images/icon-no.png[No prebuilt dashboards] | .1+| .1+| |<> |<> |image:./images/icon-no.png[No prebuilt dashboards] | -.10+| .10+| |<> +.11+| .11+| |<> |<> +|<> |<> |<> |<> @@ -91,7 +98,7 @@ This file is generated! See scripts/mage/docs_collector.go .2+| .2+| |<> |<> |<> |image:./images/icon-yes.png[Prebuilt dashboards are available] | -.17+| .17+| |<> +.18+| .18+| |<> |<> |<> beta[] |<> @@ -105,6 +112,7 @@ This file is generated! See scripts/mage/docs_collector.go |<> |<> |<> +|<> beta[] |<> |<> |<> @@ -176,6 +184,11 @@ This file is generated! See scripts/mage/docs_collector.go |<> |<> |<> +|<> beta[] |image:./images/icon-no.png[No prebuilt dashboards] | +.4+| .4+| |<> beta[] +|<> beta[] +|<> beta[] +|<> beta[] |<> |image:./images/icon-no.png[No prebuilt dashboards] | .1+| .1+| |<> |<> |image:./images/icon-yes.png[Prebuilt dashboards are available] | @@ -197,7 +210,9 @@ This file is generated! See scripts/mage/docs_collector.go include::modules/aerospike.asciidoc[] include::modules/apache.asciidoc[] +include::modules/appsearch.asciidoc[] include::modules/aws.asciidoc[] +include::modules/azure.asciidoc[] include::modules/beat.asciidoc[] include::modules/ceph.asciidoc[] include::modules/cockroachdb.asciidoc[] @@ -235,6 +250,7 @@ include::modules/rabbitmq.asciidoc[] include::modules/redis.asciidoc[] include::modules/statsd.asciidoc[] include::modules/system.asciidoc[] +include::modules/tomcat.asciidoc[] include::modules/traefik.asciidoc[] include::modules/uwsgi.asciidoc[] include::modules/vsphere.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/overview.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/overview.asciidoc index 2f1cb508..95cf3c77 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/overview.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/overview.asciidoc @@ -28,4 +28,4 @@ See <> for the complete list of supported services. Metricbeat can insert the collected metrics directly into Elasticsearch or send them to Logstash, Redis, or Kafka. -include::{libbeat-dir}/docs/shared-libbeat-description.asciidoc[] +include::{libbeat-dir}/shared-libbeat-description.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/reload-configuration.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/reload-configuration.asciidoc index 630920aa..49fedc92 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/reload-configuration.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/reload-configuration.asciidoc @@ -38,7 +38,7 @@ definitions. For example: ------------------------------------------------------------------------------ -include::{libbeat-dir}/docs/shared-note-file-permissions.asciidoc[] +include::{libbeat-dir}/shared-note-file-permissions.asciidoc[] === Live reloading @@ -80,4 +80,4 @@ set the `period` to less than 1s because the modification time of files is often stored in seconds. Setting the `period` to less than 1s will result in unnecessary overhead. -include::{libbeat-dir}/docs/shared-note-file-permissions.asciidoc[] +include::{libbeat-dir}/shared-note-file-permissions.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/running-on-docker.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/running-on-docker.asciidoc index 97736a31..c51f1314 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/running-on-docker.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/running-on-docker.asciidoc @@ -1,4 +1,4 @@ -include::{libbeat-dir}/docs/shared-docker.asciidoc[] +include::{libbeat-dir}/shared-docker.asciidoc[] [float] [[monitoring-host]] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/running-on-kubernetes.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/running-on-kubernetes.asciidoc index 2b6a87b6..ef50ec50 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/running-on-kubernetes.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/running-on-kubernetes.asciidoc @@ -6,7 +6,7 @@ retrieve cluster metrics. ifeval::["{release-state}"=="unreleased"] -However, version {stack-version} of {beatname_uc} has not yet been +However, version {version} of {beatname_uc} has not yet been released, so no Docker image is currently available for this version. endif::[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/setting-up-running.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/setting-up-running.asciidoc index 3121309d..9ae0014d 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/setting-up-running.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/setting-up-running.asciidoc @@ -26,16 +26,16 @@ This section includes additional information on how to set up and run //MAINTAINERS: If you add a new file to this section, make sure you update the bulleted list ^^ too. -include::{libbeat-dir}/docs/shared-directory-layout.asciidoc[] +include::{libbeat-dir}/shared-directory-layout.asciidoc[] -include::{libbeat-dir}/docs/keystore.asciidoc[] +include::{libbeat-dir}/keystore.asciidoc[] -include::{libbeat-dir}/docs/command-reference.asciidoc[] +include::{libbeat-dir}/command-reference.asciidoc[] include::./running-on-docker.asciidoc[] include::./running-on-kubernetes.asciidoc[] -include::{libbeat-dir}/docs/shared-systemd.asciidoc[] +include::{libbeat-dir}/shared-systemd.asciidoc[] -include::{libbeat-dir}/docs/shared-shutdown.asciidoc[] +include::{libbeat-dir}/shared-shutdown.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/docs/troubleshooting.asciidoc b/vendor/github.com/elastic/beats/metricbeat/docs/troubleshooting.asciidoc index 805878c3..07ed381f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/docs/troubleshooting.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/docs/troubleshooting.asciidoc @@ -16,11 +16,11 @@ If you have issues installing or running {beatname_uc}, read the following tips: [[getting-help]] == Get help -include::{libbeat-dir}/docs/getting-help.asciidoc[] +include::{libbeat-dir}/getting-help.asciidoc[] //sets block macro for debugging.asciidoc included in next section [[enable-metricbeat-debugging]] == Debug -include::{libbeat-dir}/docs/debugging.asciidoc[] +include::{libbeat-dir}/debugging.asciidoc[] diff --git a/vendor/github.com/elastic/beats/metricbeat/helper/elastic/elastic.go b/vendor/github.com/elastic/beats/metricbeat/helper/elastic/elastic.go index e7cc0827..46cb9d48 100644 --- a/vendor/github.com/elastic/beats/metricbeat/helper/elastic/elastic.go +++ b/vendor/github.com/elastic/beats/metricbeat/helper/elastic/elastic.go @@ -106,3 +106,24 @@ func ReportAndLogError(err error, r mb.ReporterV2, l *logp.Logger) { r.Error(err) l.Error(err) } + +// FixTimestampField converts the given timestamp field in the given map from a float64 to an +// int, so that it is not serialized in scientific notation in the event. This is because +// Elasticsearch cannot accepts scientific notation to represent millis-since-epoch values +// for it's date fields: https://github.com/elastic/elasticsearch/pull/36691 +func FixTimestampField(m common.MapStr, field string) error { + v, err := m.GetValue(field) + if err == common.ErrKeyNotFound { + return nil + } + if err != nil { + return err + } + + switch vv := v.(type) { + case float64: + _, err := m.Put(field, int(vv)) + return err + } + return nil +} diff --git a/vendor/github.com/elastic/beats/metricbeat/helper/elastic/elastic_test.go b/vendor/github.com/elastic/beats/metricbeat/helper/elastic/elastic_test.go index 71cf1d60..c0de5ea8 100644 --- a/vendor/github.com/elastic/beats/metricbeat/helper/elastic/elastic_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/helper/elastic/elastic_test.go @@ -87,3 +87,56 @@ func TestReportErrorForMissingField(t *testing.T) { assert.Equal(t, expectedError, err) assert.Equal(t, expectedError, currentErr) } + +func TestFixTimestampField(t *testing.T) { + tests := []struct { + Name string + OriginalValue map[string]interface{} + ExpectedValue map[string]interface{} + }{ + { + "converts float64s in scientific notation to ints", + map[string]interface{}{ + "foo": 1.571284349E12, + }, + map[string]interface{}{ + "foo": 1571284349000, + }, + }, + { + "converts regular notation float64s to ints", + map[string]interface{}{ + "foo": float64(1234), + }, + map[string]interface{}{ + "foo": 1234, + }, + }, + { + "ignores missing fields", + map[string]interface{}{ + "bar": 12345, + }, + map[string]interface{}{ + "bar": 12345, + }, + }, + { + "leaves strings untouched", + map[string]interface{}{ + "foo": "bar", + }, + map[string]interface{}{ + "foo": "bar", + }, + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + err := FixTimestampField(test.OriginalValue, "foo") + assert.NoError(t, err) + assert.Equal(t, test.ExpectedValue, test.OriginalValue) + }) + } +} diff --git a/vendor/github.com/elastic/beats/metricbeat/helper/prometheus/module.go b/vendor/github.com/elastic/beats/metricbeat/helper/prometheus/module.go index 9ac6f48b..da1ea895 100644 --- a/vendor/github.com/elastic/beats/metricbeat/helper/prometheus/module.go +++ b/vendor/github.com/elastic/beats/metricbeat/helper/prometheus/module.go @@ -56,6 +56,6 @@ type prometheusMetricSet struct { mapping *MetricsMapping } -func (m *prometheusMetricSet) Fetch(r mb.ReporterV2) { - m.prometheus.ReportProcessedMetrics(m.mapping, r) +func (m *prometheusMetricSet) Fetch(r mb.ReporterV2) error { + return m.prometheus.ReportProcessedMetrics(m.mapping, r) } diff --git a/vendor/github.com/elastic/beats/metricbeat/helper/prometheus/prometheus.go b/vendor/github.com/elastic/beats/metricbeat/helper/prometheus/prometheus.go index d7e3676d..1eb5392b 100644 --- a/vendor/github.com/elastic/beats/metricbeat/helper/prometheus/prometheus.go +++ b/vendor/github.com/elastic/beats/metricbeat/helper/prometheus/prometheus.go @@ -38,7 +38,7 @@ type Prometheus interface { GetProcessedMetrics(mapping *MetricsMapping) ([]common.MapStr, error) - ReportProcessedMetrics(mapping *MetricsMapping, r mb.ReporterV2) + ReportProcessedMetrics(mapping *MetricsMapping, r mb.ReporterV2) error } type prometheus struct { @@ -214,11 +214,10 @@ type infoMetricData struct { Meta common.MapStr } -func (p *prometheus) ReportProcessedMetrics(mapping *MetricsMapping, r mb.ReporterV2) { +func (p *prometheus) ReportProcessedMetrics(mapping *MetricsMapping, r mb.ReporterV2) error { events, err := p.GetProcessedMetrics(mapping) if err != nil { - r.Error(err) - return + return errors.Wrap(err, "error getting processed metrics") } for _, event := range events { r.Event(mb.Event{ @@ -226,6 +225,8 @@ func (p *prometheus) ReportProcessedMetrics(mapping *MetricsMapping, r mb.Report Namespace: mapping.Namespace, }) } + + return nil } func getEvent(m map[string]common.MapStr, labels common.MapStr) common.MapStr { diff --git a/vendor/github.com/elastic/beats/metricbeat/helper/prometheus/ptest/ptest.go b/vendor/github.com/elastic/beats/metricbeat/helper/prometheus/ptest/ptest.go index c91a2690..4bcadbc5 100644 --- a/vendor/github.com/elastic/beats/metricbeat/helper/prometheus/ptest/ptest.go +++ b/vendor/github.com/elastic/beats/metricbeat/helper/prometheus/ptest/ptest.go @@ -156,13 +156,11 @@ func TestMetricSet(t *testing.T, module, metricset string, cases TestCases) { "hosts": []string{server.URL}, } - f := mbtest.NewReportingMetricSetV2(t, config) - reporter := &mbtest.CapturingReporterV2{} - f.Fetch(reporter) - assert.Nil(t, reporter.GetErrors(), "Errors while fetching metrics") + f := mbtest.NewFetcher(t, config) + events, errs := f.FetchEvents() + assert.Nil(t, errs, "Errors while fetching metrics") if *expectedFlag { - events := reporter.GetEvents() sort.SliceStable(events, func(i, j int) bool { h1, _ := hashstructure.Hash(events[i], nil) h2, _ := hashstructure.Hash(events[j], nil) @@ -185,7 +183,7 @@ func TestMetricSet(t *testing.T, module, metricset string, cases TestCases) { t.Fatal(err) } - for _, event := range reporter.GetEvents() { + for _, event := range events { // ensure the event is in expected list found := -1 for i, expectedEvent := range expectedEvents { diff --git a/vendor/github.com/elastic/beats/metricbeat/include/list.go b/vendor/github.com/elastic/beats/metricbeat/include/list_common.go similarity index 83% rename from vendor/github.com/elastic/beats/metricbeat/include/list.go rename to vendor/github.com/elastic/beats/metricbeat/include/list_common.go index 3f8a4af4..b5158ec8 100644 --- a/vendor/github.com/elastic/beats/metricbeat/include/list.go +++ b/vendor/github.com/elastic/beats/metricbeat/include/list_common.go @@ -49,21 +49,12 @@ import ( _ "github.com/elastic/beats/metricbeat/module/couchbase/node" _ "github.com/elastic/beats/metricbeat/module/couchdb" _ "github.com/elastic/beats/metricbeat/module/couchdb/server" - _ "github.com/elastic/beats/metricbeat/module/docker" - _ "github.com/elastic/beats/metricbeat/module/docker/container" - _ "github.com/elastic/beats/metricbeat/module/docker/cpu" - _ "github.com/elastic/beats/metricbeat/module/docker/diskio" - _ "github.com/elastic/beats/metricbeat/module/docker/event" - _ "github.com/elastic/beats/metricbeat/module/docker/healthcheck" - _ "github.com/elastic/beats/metricbeat/module/docker/image" - _ "github.com/elastic/beats/metricbeat/module/docker/info" - _ "github.com/elastic/beats/metricbeat/module/docker/memory" - _ "github.com/elastic/beats/metricbeat/module/docker/network" _ "github.com/elastic/beats/metricbeat/module/dropwizard" _ "github.com/elastic/beats/metricbeat/module/dropwizard/collector" _ "github.com/elastic/beats/metricbeat/module/elasticsearch" _ "github.com/elastic/beats/metricbeat/module/elasticsearch/ccr" _ "github.com/elastic/beats/metricbeat/module/elasticsearch/cluster_stats" + _ "github.com/elastic/beats/metricbeat/module/elasticsearch/enrich" _ "github.com/elastic/beats/metricbeat/module/elasticsearch/index" _ "github.com/elastic/beats/metricbeat/module/elasticsearch/index_recovery" _ "github.com/elastic/beats/metricbeat/module/elasticsearch/index_summary" @@ -98,25 +89,6 @@ import ( _ "github.com/elastic/beats/metricbeat/module/kibana" _ "github.com/elastic/beats/metricbeat/module/kibana/stats" _ "github.com/elastic/beats/metricbeat/module/kibana/status" - _ "github.com/elastic/beats/metricbeat/module/kubernetes" - _ "github.com/elastic/beats/metricbeat/module/kubernetes/apiserver" - _ "github.com/elastic/beats/metricbeat/module/kubernetes/container" - _ "github.com/elastic/beats/metricbeat/module/kubernetes/controllermanager" - _ "github.com/elastic/beats/metricbeat/module/kubernetes/event" - _ "github.com/elastic/beats/metricbeat/module/kubernetes/node" - _ "github.com/elastic/beats/metricbeat/module/kubernetes/pod" - _ "github.com/elastic/beats/metricbeat/module/kubernetes/proxy" - _ "github.com/elastic/beats/metricbeat/module/kubernetes/scheduler" - _ "github.com/elastic/beats/metricbeat/module/kubernetes/state_container" - _ "github.com/elastic/beats/metricbeat/module/kubernetes/state_cronjob" - _ "github.com/elastic/beats/metricbeat/module/kubernetes/state_deployment" - _ "github.com/elastic/beats/metricbeat/module/kubernetes/state_node" - _ "github.com/elastic/beats/metricbeat/module/kubernetes/state_pod" - _ "github.com/elastic/beats/metricbeat/module/kubernetes/state_replicaset" - _ "github.com/elastic/beats/metricbeat/module/kubernetes/state_statefulset" - _ "github.com/elastic/beats/metricbeat/module/kubernetes/system" - _ "github.com/elastic/beats/metricbeat/module/kubernetes/util" - _ "github.com/elastic/beats/metricbeat/module/kubernetes/volume" _ "github.com/elastic/beats/metricbeat/module/kvm" _ "github.com/elastic/beats/metricbeat/module/kvm/dommemstat" _ "github.com/elastic/beats/metricbeat/module/logstash" diff --git a/vendor/github.com/elastic/beats/metricbeat/include/list_docker.go b/vendor/github.com/elastic/beats/metricbeat/include/list_docker.go new file mode 100644 index 00000000..1fb90c32 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/include/list_docker.go @@ -0,0 +1,61 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Code generated by 'make imports' - DO NOT EDIT. + +// +build linux darwin windows + +/* +Package include imports all Module and MetricSet packages so that they register +their factories with the global registry. This package can be imported in the +main package to automatically register all of the standard supported Metricbeat +modules. +*/ +package include + +import ( + _ "github.com/elastic/beats/metricbeat/module/docker" + _ "github.com/elastic/beats/metricbeat/module/docker/container" + _ "github.com/elastic/beats/metricbeat/module/docker/cpu" + _ "github.com/elastic/beats/metricbeat/module/docker/diskio" + _ "github.com/elastic/beats/metricbeat/module/docker/event" + _ "github.com/elastic/beats/metricbeat/module/docker/healthcheck" + _ "github.com/elastic/beats/metricbeat/module/docker/image" + _ "github.com/elastic/beats/metricbeat/module/docker/info" + _ "github.com/elastic/beats/metricbeat/module/docker/memory" + _ "github.com/elastic/beats/metricbeat/module/docker/network" + _ "github.com/elastic/beats/metricbeat/module/kubernetes" + _ "github.com/elastic/beats/metricbeat/module/kubernetes/apiserver" + _ "github.com/elastic/beats/metricbeat/module/kubernetes/container" + _ "github.com/elastic/beats/metricbeat/module/kubernetes/controllermanager" + _ "github.com/elastic/beats/metricbeat/module/kubernetes/event" + _ "github.com/elastic/beats/metricbeat/module/kubernetes/node" + _ "github.com/elastic/beats/metricbeat/module/kubernetes/pod" + _ "github.com/elastic/beats/metricbeat/module/kubernetes/proxy" + _ "github.com/elastic/beats/metricbeat/module/kubernetes/scheduler" + _ "github.com/elastic/beats/metricbeat/module/kubernetes/state_container" + _ "github.com/elastic/beats/metricbeat/module/kubernetes/state_cronjob" + _ "github.com/elastic/beats/metricbeat/module/kubernetes/state_deployment" + _ "github.com/elastic/beats/metricbeat/module/kubernetes/state_node" + _ "github.com/elastic/beats/metricbeat/module/kubernetes/state_pod" + _ "github.com/elastic/beats/metricbeat/module/kubernetes/state_replicaset" + _ "github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota" + _ "github.com/elastic/beats/metricbeat/module/kubernetes/state_statefulset" + _ "github.com/elastic/beats/metricbeat/module/kubernetes/system" + _ "github.com/elastic/beats/metricbeat/module/kubernetes/util" + _ "github.com/elastic/beats/metricbeat/module/kubernetes/volume" +) diff --git a/vendor/github.com/elastic/beats/metricbeat/magefile.go b/vendor/github.com/elastic/beats/metricbeat/magefile.go index eb60c64e..2464e0a6 100644 --- a/vendor/github.com/elastic/beats/metricbeat/magefile.go +++ b/vendor/github.com/elastic/beats/metricbeat/magefile.go @@ -27,17 +27,30 @@ import ( "time" "github.com/magefile/mage/mg" - "github.com/magefile/mage/sh" devtools "github.com/elastic/beats/dev-tools/mage" metricbeat "github.com/elastic/beats/metricbeat/scripts/mage" + // mage:import + build "github.com/elastic/beats/dev-tools/mage/target/build" // mage:import "github.com/elastic/beats/dev-tools/mage/target/common" + // mage:import + _ "github.com/elastic/beats/dev-tools/mage/target/dashboards" + // mage:import + _ "github.com/elastic/beats/dev-tools/mage/target/docs" + // mage:import + _ "github.com/elastic/beats/dev-tools/mage/target/pkg" + // mage:import + _ "github.com/elastic/beats/dev-tools/mage/target/test" + // mage:import + _ "github.com/elastic/beats/dev-tools/mage/target/unittest" + // mage:import + update "github.com/elastic/beats/dev-tools/mage/target/update" ) func init() { - common.RegisterCheckDeps(Update) + common.RegisterCheckDeps(update.Update) devtools.BeatDescription = "Metricbeat is a lightweight shipper for metrics." } @@ -47,32 +60,6 @@ func CollectAll() { mg.Deps(CollectDocs, FieldsDocs) } -// Build builds the Beat binary. -func Build() error { - return devtools.Build(devtools.DefaultBuildArgs()) -} - -// GolangCrossBuild build the Beat binary inside of the golang-builder. -// Do not use directly, use crossBuild instead. -func GolangCrossBuild() error { - return devtools.GolangCrossBuild(devtools.DefaultGolangCrossBuildArgs()) -} - -// BuildGoDaemon builds the go-daemon binary (use crossBuildGoDaemon). -func BuildGoDaemon() error { - return devtools.BuildGoDaemon() -} - -// CrossBuild cross-builds the beat for all target platforms. -func CrossBuild() error { - return devtools.CrossBuild() -} - -// CrossBuildGoDaemon cross-builds the go-daemon binary using Docker. -func CrossBuildGoDaemon() error { - return devtools.CrossBuildGoDaemon() -} - // Package packages the Beat for distribution. // Use SNAPSHOT=true to build snapshots. // Use PLATFORMS to control the target platforms. @@ -84,8 +71,8 @@ func Package() { devtools.UseElasticBeatOSSPackaging() metricbeat.CustomizePackaging() - mg.Deps(Update, metricbeat.PrepareModulePackagingOSS) - mg.Deps(CrossBuild, CrossBuildGoDaemon) + mg.Deps(update.Update, metricbeat.PrepareModulePackagingOSS) + mg.Deps(build.CrossBuild, build.CrossBuildGoDaemon) mg.SerialDeps(devtools.Package, TestPackages) } @@ -108,18 +95,6 @@ func configYML() error { return devtools.Config(devtools.AllConfigTypes, metricbeat.OSSConfigFileParams(), ".") } -// Update updates the generated files (aka make update). -func Update() error { - // TODO to replace by a pure mage implementation: - // - Generate docs/fields.asciidoc - /* - mg.SerialDeps(Fields, Dashboards, Config, - metricbeat.PrepareModulePackagingOSS, - devtools.GenerateModuleIncludeListGo) - */ - return sh.Run("make", "update") -} - // MockedTests runs the HTTP tests using the mocked data inside each {module}/{metricset}/testdata folder. // Use MODULE={module_name} to run only mocked tests with a single module. // Use GENERATE=true or GENERATE=1 to regenerate JSON files. diff --git a/vendor/github.com/elastic/beats/metricbeat/mb/module/connector.go b/vendor/github.com/elastic/beats/metricbeat/mb/module/connector.go index b0cbf9d7..cb8233a6 100644 --- a/vendor/github.com/elastic/beats/metricbeat/mb/module/connector.go +++ b/vendor/github.com/elastic/beats/metricbeat/mb/module/connector.go @@ -31,11 +31,16 @@ type Connector struct { eventMeta common.EventMetadata dynamicFields *common.MapStrPointer timeSeries bool + keepNull bool } type connectorConfig struct { - Processors processors.PluginConfig `config:"processors"` - common.EventMetadata `config:",inline"` // Fields and tags to add to events. + Processors processors.PluginConfig `config:"processors"` + + // KeepNull determines whether published events will keep null values or omit them. + KeepNull bool `config:"keep_null"` + + common.EventMetadata `config:",inline"` // Fields and tags to add to events. } func NewConnector(pipeline beat.Pipeline, c *common.Config, dynFields *common.MapStrPointer) (*Connector, error) { @@ -54,6 +59,7 @@ func NewConnector(pipeline beat.Pipeline, c *common.Config, dynFields *common.Ma processors: processors, eventMeta: config.EventMetadata, dynamicFields: dynFields, + keepNull: config.KeepNull, }, nil } @@ -63,6 +69,7 @@ func (c *Connector) Connect() (beat.Client, error) { EventMetadata: c.eventMeta, Processor: c.processors, DynamicFields: c.dynamicFields, + KeepNull: c.keepNull, }, }) } diff --git a/vendor/github.com/elastic/beats/metricbeat/mb/testing/data/README.md b/vendor/github.com/elastic/beats/metricbeat/mb/testing/data/README.md index 2704ccd6..19318a6c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/mb/testing/data/README.md +++ b/vendor/github.com/elastic/beats/metricbeat/mb/testing/data/README.md @@ -13,7 +13,7 @@ The idea is simple, head to `beats/metricbeat/mb/testing/data` and run `go test An alternative is to just run from metricbeat `mage mockedTests` to achieve the same result but using environment variables instead of flags, for example: `MODULE=apache GENERATE=true mage mockedTests` ##### Worth to mention -- If the input file in `testdata` folder is prefixed (named) `docs`, whatever its extension is, and the flat `-generate` is passed; the framework will also create a `docs.json` file in `_meta` folder of the metricset as historically has been done in Metricbeat. +- If the input file in `testdata` folder is prefixed (named) `docs`, whatever its extension is, and the flag `-generate` is passed; the framework will also create a `docs.json` file in `_meta` folder of the metricset as historically has been done in Metricbeat. - Config file **must** be called `config.yml` and be located inside `metricbeat/module/{module}/{metricset}/_meta/testdata` ### Available flags / environment variables diff --git a/vendor/github.com/elastic/beats/metricbeat/mb/testing/testdata.go b/vendor/github.com/elastic/beats/metricbeat/mb/testing/testdata.go index 48dee790..2f9f8cc0 100644 --- a/vendor/github.com/elastic/beats/metricbeat/mb/testing/testdata.go +++ b/vendor/github.com/elastic/beats/metricbeat/mb/testing/testdata.go @@ -31,7 +31,6 @@ import ( "github.com/pkg/errors" "github.com/mitchellh/hashstructure" - "github.com/stretchr/testify/assert" "gopkg.in/yaml.v2" "github.com/elastic/beats/libbeat/asset" @@ -263,18 +262,30 @@ func runTest(t *testing.T, file string, module, metricSetName string, config Dat } } - output, err := json.Marshal(&data) - if err != nil { - t.Fatal(err) + for _, event := range data { + // ensure the event is in expected list + found := -1 + for i, expectedEvent := range expectedMap { + if event.String() == expectedEvent.String() { + found = i + break + } + } + if found > -1 { + expectedMap = append(expectedMap[:found], expectedMap[found+1:]...) + } else { + t.Errorf("Event was not expected: %+v", event) + } } - expectedJSON, err := json.Marshal(&expectedMap) - if err != nil { - t.Fatal(err) + if len(expectedMap) > 0 { + t.Error("Some events were missing:") + for _, e := range expectedMap { + t.Error(e) + } + t.Fatal() } - assert.Equal(t, string(expectedJSON), string(output)) - if strings.HasSuffix(file, "docs."+config.Suffix) { writeDataJSON(t, data[0], filepath.Join(config.WritePath, "data.json")) } diff --git a/vendor/github.com/elastic/beats/metricbeat/metricbeat.docker.yml b/vendor/github.com/elastic/beats/metricbeat/metricbeat.docker.yml index 982018ee..c34f0f55 100644 --- a/vendor/github.com/elastic/beats/metricbeat/metricbeat.docker.yml +++ b/vendor/github.com/elastic/beats/metricbeat/metricbeat.docker.yml @@ -4,6 +4,7 @@ metricbeat.config.modules: processors: - add_cloud_metadata: ~ +- add_docker_metadata: ~ output.elasticsearch: hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}' diff --git a/vendor/github.com/elastic/beats/metricbeat/metricbeat.reference.yml b/vendor/github.com/elastic/beats/metricbeat/metricbeat.reference.yml index 57fc03c1..db2d8946 100644 --- a/vendor/github.com/elastic/beats/metricbeat/metricbeat.reference.yml +++ b/vendor/github.com/elastic/beats/metricbeat/metricbeat.reference.yml @@ -435,6 +435,8 @@ metricbeat.modules: add_metadata: true # When used outside the cluster: #host: node_name + # If kube_config is not set, KUBECONFIG environment variable will be checked + # and if not present it will fall back to InCluster #kube_config: ~/.kube/config # State metrics from kube-state-metrics service: @@ -448,6 +450,7 @@ metricbeat.modules: - state_pod - state_container - state_cronjob + - state_resourcequota period: 10s hosts: ["kube-state-metrics:8080"] @@ -455,6 +458,8 @@ metricbeat.modules: add_metadata: true # When used outside the cluster: #host: node_name + # If kube_config is not set, KUBECONFIG environment variable will be checked + # and if not present it will fall back to InCluster #kube_config: ~/.kube/config # Kubernetes events @@ -1729,7 +1734,7 @@ setup.template.settings: #setup.ilm.enabled: auto # Set the prefix used in the index lifecycle write alias name. The default alias -# name is 'metricbeat-%{[agent.version]}'. +# name is 'metricbeat-%{[agent.version]}'. #setup.ilm.rollover_alias: "metricbeat" # Set the rollover index pattern. The default is "%{now/d}-000001". @@ -1992,12 +1997,21 @@ logging.files: # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# When using IP addresses, it is recommended to only use localhost. #http.host: localhost # Port on which the HTTP endpoint will bind. Default is 5066. #http.port: 5066 +# Define which user should be owning the named pipe. +#http.named_pipe.user: + +# Define which the permissions that should be applied to the named pipe, use the Security +# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with +# `http.user`. +#http.named_pipe.security_descriptor: + #============================= Process Security ================================ # Enable or disable seccomp system call filtering on Linux. Default is enabled. diff --git a/vendor/github.com/elastic/beats/metricbeat/metricbeat.yml b/vendor/github.com/elastic/beats/metricbeat/metricbeat.yml index 8a42c7bb..6d2d0519 100644 --- a/vendor/github.com/elastic/beats/metricbeat/metricbeat.yml +++ b/vendor/github.com/elastic/beats/metricbeat/metricbeat.yml @@ -120,6 +120,8 @@ output.elasticsearch: processors: - add_host_metadata: ~ - add_cloud_metadata: ~ + - add_docker_metadata: ~ + - add_kubernetes_metadata: ~ #================================ Logging ===================================== diff --git a/vendor/github.com/elastic/beats/metricbeat/module/aerospike/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/aerospike/_meta/Dockerfile index 9693bbde..d45e70c0 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/aerospike/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/aerospike/_meta/Dockerfile @@ -1,4 +1,5 @@ -FROM aerospike:3.9.0 +ARG AEROSPIKE_VERSION +FROM aerospike:${AEROSPIKE_VERSION} RUN apt-get update && apt-get install -y netcat HEALTHCHECK --interval=1s --retries=90 CMD nc -z localhost 3000 diff --git a/vendor/github.com/elastic/beats/metricbeat/module/apache/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/apache/_meta/Dockerfile index 41947105..1af40775 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/apache/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/apache/_meta/Dockerfile @@ -1,4 +1,4 @@ -ARG APACHE_VERSION=2.4.20 +ARG APACHE_VERSION=${APACHE_VERSION} FROM httpd:$APACHE_VERSION RUN sed -i "/jessie-updates/d" /etc/apt/sources.list RUN apt-get update && apt-get install -y curl diff --git a/vendor/github.com/elastic/beats/metricbeat/module/apache/docker-compose.yml b/vendor/github.com/elastic/beats/metricbeat/module/apache/docker-compose.yml new file mode 100644 index 00000000..d136f131 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/apache/docker-compose.yml @@ -0,0 +1,11 @@ +version: '2.3' + +services: + apache: + image: docker.elastic.co/observability-ci/beats-integration-apache:${APACHE_VERSION:-2.4.20}-1 + build: + context: ./_meta + args: + APACHE_VERSION: ${APACHE_VERSION:-2.4.20} + ports: + - 80 diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_apache.py b/vendor/github.com/elastic/beats/metricbeat/module/apache/test_apache.py similarity index 95% rename from vendor/github.com/elastic/beats/metricbeat/tests/system/test_apache.py rename to vendor/github.com/elastic/beats/metricbeat/module/apache/test_apache.py index 41fa7b31..65c7c439 100644 --- a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_apache.py +++ b/vendor/github.com/elastic/beats/metricbeat/module/apache/test_apache.py @@ -1,9 +1,12 @@ import os -import metricbeat import unittest from nose.plugins.attrib import attr import urllib2 import time +import sys + +sys.path.append(os.path.join(os.path.dirname(__file__), '../../tests/system')) +import metricbeat APACHE_FIELDS = metricbeat.COMMON_FIELDS + ["apache"] @@ -83,7 +86,7 @@ class ApacheStatusTest(metricbeat.BaseTest): class ApacheOldStatusTest(ApacheStatusTest): - COMPOSE_SERVICES = ['apache_2_4_12'] + COMPOSE_ENV = {'APACHE_VERSION': '2.4.12'} def verify_fields(self, evt): self.assertItemsEqual(self.de_dot(APACHE_FIELDS), evt.keys()) diff --git a/vendor/github.com/elastic/beats/metricbeat/module/beat/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/beat/_meta/Dockerfile index cb47cbd4..ecd234a2 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/beat/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/beat/_meta/Dockerfile @@ -1,4 +1,5 @@ -FROM docker.elastic.co/beats/metricbeat:7.3.0 +ARG BEAT_VERSION +FROM docker.elastic.co/beats/metricbeat:${BEAT_VERSION} COPY healthcheck.sh / HEALTHCHECK --interval=1s --retries=300 CMD sh /healthcheck.sh diff --git a/vendor/github.com/elastic/beats/metricbeat/module/beat/beat.go b/vendor/github.com/elastic/beats/metricbeat/module/beat/beat.go index 1539d83c..941885e6 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/beat/beat.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/beat/beat.go @@ -93,6 +93,9 @@ type Info struct { // State construct contains the relevant data from the Beat's /state endpoint type State struct { + Monitoring struct { + ClusterUUID string `json:"cluster_uuid"` + } `json:"monitoring"` Output struct { Name string `json:"name"` } `json:"output"` diff --git a/vendor/github.com/elastic/beats/metricbeat/module/beat/state/data_xpack.go b/vendor/github.com/elastic/beats/metricbeat/module/beat/state/data_xpack.go index 3b66f0fc..e92bdd92 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/beat/state/data_xpack.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/beat/state/data_xpack.go @@ -54,14 +54,16 @@ func eventMappingXPack(r mb.ReporterV2, m *MetricSet, info b.Info, content []byt "timestamp": now, } - var clusterUUID string - if isOutputES(state) { - clusterUUID = getClusterUUID(state) - if clusterUUID == "" { - // Output is ES but cluster UUID could not be determined. No point sending monitoring - // data with empty cluster UUID since it will not be associated with the correct ES - // production cluster. Log error instead. - return errors.Wrap(b.ErrClusterUUID, "could not determine cluster UUID") + clusterUUID := getMonitoringClusterUUID(state) + if clusterUUID == "" { + if isOutputES(state) { + clusterUUID = getClusterUUID(state) + if clusterUUID == "" { + // Output is ES but cluster UUID could not be determined. No point sending monitoring + // data with empty cluster UUID since it will not be associated with the correct ES + // production cluster. Log error instead. + return errors.Wrap(b.ErrClusterUUID, "could not determine cluster UUID") + } } } @@ -141,3 +143,27 @@ func isOutputES(state map[string]interface{}) bool { return name == "elasticsearch" } + +func getMonitoringClusterUUID(state map[string]interface{}) string { + m, exists := state["monitoring"] + if !exists { + return "" + } + + monitoring, ok := m.(map[string]interface{}) + if !ok { + return "" + } + + c, exists := monitoring["cluster_uuid"] + if !exists { + return "" + } + + clusterUUID, ok := c.(string) + if !ok { + return "" + } + + return clusterUUID +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/beat/stats/data_xpack.go b/vendor/github.com/elastic/beats/metricbeat/module/beat/stats/data_xpack.go index e9d4fd00..8b0da8a8 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/beat/stats/data_xpack.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/beat/stats/data_xpack.go @@ -83,11 +83,16 @@ func (m *MetricSet) getClusterUUID() (string, error) { return "", errors.Wrap(err, "could not get state information") } + clusterUUID := state.Monitoring.ClusterUUID + if clusterUUID != "" { + return clusterUUID, nil + } + if state.Output.Name != "elasticsearch" { return "", nil } - clusterUUID := state.Outputs.Elasticsearch.ClusterUUID + clusterUUID = state.Outputs.Elasticsearch.ClusterUUID if clusterUUID == "" { // Output is ES but cluster UUID could not be determined. No point sending monitoring // data with empty cluster UUID since it will not be associated with the correct ES diff --git a/vendor/github.com/elastic/beats/metricbeat/module/ceph/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/ceph/_meta/Dockerfile index 075f1d48..5919c2da 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/ceph/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/ceph/_meta/Dockerfile @@ -1,4 +1,5 @@ -FROM ceph/daemon:master-6373c6a-jewel-centos-7-x86_64 +ARG CEPH_VERSION +FROM ceph/daemon:${CEPH_VERSION} RUN yum -q install -y jq && yum clean all && rm -fr /var/cache/yum diff --git a/vendor/github.com/elastic/beats/metricbeat/module/consul/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/consul/_meta/Dockerfile index 982e0dca..640e783c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/consul/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/consul/_meta/Dockerfile @@ -1,4 +1,5 @@ -FROM consul:1.4.2 +ARG CONSUL_VERSION +FROM consul:${CONSUL_VERSION} ENV CONSUL_BIND_INTERFACE='eth0' diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/Dockerfile index 81667fce..7922b1b1 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchbase/_meta/Dockerfile @@ -1,4 +1,5 @@ -FROM couchbase:4.5.1 +ARG COUCHBASE_VERSION +FROM couchbase:${COUCHBASE_VERSION} HEALTHCHECK --interval=1s --retries=90 CMD [ "$(curl -s -o /dev/null -w ''%{http_code}'' http://localhost:8091/pools/default/buckets/beer-sample)" -eq "200" ] COPY configure-node.sh /opt/couchbase diff --git a/vendor/github.com/elastic/beats/metricbeat/module/couchdb/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/couchdb/_meta/Dockerfile index dc4561c6..f72cd9af 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/couchdb/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/couchdb/_meta/Dockerfile @@ -1,4 +1,5 @@ -FROM apache/couchdb:1.7 +ARG COUCHDB_VERSION +FROM apache/couchdb:${COUCHDB_VERSION} COPY ./local.ini /etc/couchdb/local.ini EXPOSE 5984 HEALTHCHECK --interval=1s --retries=90 CMD curl -f http://localhost:5984/ | grep Welcome diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/container/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/docker/container/_meta/data.json index 88123286..dd9f588a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/container/_meta/data.json +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/container/_meta/data.json @@ -38,8 +38,8 @@ "org_label-schema_version": "6.5.1" }, "size": { - "root_fs": 0, - "rw": 0 + "rw": 193031181, + "root_fs": 1290400448 }, "status": "Up 7 minutes (healthy)" } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/container/container.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/container/container.go index 940d6e33..543e4694 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/container/container.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/container/container.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +// +build linux darwin windows + package container import ( @@ -65,7 +67,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // This is based on https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/list-containers. func (m *MetricSet) Fetch(ctx context.Context, r mb.ReporterV2) error { // Fetch a list of all containers. - containers, err := m.dockerClient.ContainerList(ctx, types.ContainerListOptions{}) + containers, err := m.dockerClient.ContainerList(ctx, types.ContainerListOptions{Size: true}) if err != nil { return errors.Wrap(err, "failed to get docker containers list") } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/data.json index 370558ee..62f66524 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/data.json +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/data.json @@ -1,71 +1,108 @@ { "@timestamp": "2017-10-12T08:05:34.853Z", - "agent": { - "hostname": "host.example.com", - "name": "host.example.com" - }, "container": { - "id": "cc78e58acfda4501105dc4de8e3ae218f2da616213e6e3af168c40103829302a", + "id": "7f3ca1f1b2b310362e90f700d2b2e52ebd46ef6ddf10c0704f22b25686c466ab", "image": { - "name": "metricbeat_elasticsearch" + "name": "metricbeat_beat" }, - "name": "metricbeat_elasticsearch_1_df866b3a7b3d", + "name": "metricbeat_beat_run_8ba23fa682a6", "runtime": "docker" }, "docker": { "container": { "labels": { - "com_docker_compose_config-hash": "e3e0a2c6e5d1afb741bc8b1ecb09cda0395886b7a3e5084a9fd110be46d70f78", - "com_docker_compose_container-number": "1", - "com_docker_compose_oneoff": "False", + "com_docker_compose_oneoff": "True", "com_docker_compose_project": "metricbeat", - "com_docker_compose_service": "elasticsearch", - "com_docker_compose_slug": "df866b3a7b3d50c0802350cbe58ee5b34fa32b7f6ba7fe9e48cde2c12dd0201d", - "com_docker_compose_version": "1.23.1", - "license": "Elastic License", - "org_label-schema_build-date": "20181006", - "org_label-schema_license": "GPLv2", - "org_label-schema_name": "elasticsearch", - "org_label-schema_schema-version": "1.0", - "org_label-schema_url": "https://www.elastic.co/products/elasticsearch", - "org_label-schema_vcs-url": "https://github.com/elastic/elasticsearch-docker", - "org_label-schema_vendor": "Elastic", - "org_label-schema_version": "6.5.1" + "com_docker_compose_service": "beat", + "com_docker_compose_slug": "8ba23fa682a68e2dc082536da22f59eb2d200b3534909fe934807dd5d847424", + "com_docker_compose_version": "1.24.1" } }, "cpu": { "core": { "0": { - "pct": 0.03263313721518987, - "ticks": 38346196894 + "norm": { + "pct": 0.00105707400990099 + }, + "pct": 0.00845659207920792, + "ticks": 7410396430 }, "1": { - "pct": 0.014317838987341772, - "ticks": 37143007802 + "norm": { + "pct": 0.004389216831683168 + }, + "pct": 0.035113734653465345, + "ticks": 7079258391 }, "2": { - "pct": 0.0028625296202531647, - "ticks": 37194678570 + "norm": { + "pct": 0.003178435024752475 + }, + "pct": 0.0254274801980198, + "ticks": 7140978706 }, "3": { - "pct": 0.005687502784810126, - "ticks": 39335551141 + "norm": { + "pct": 0.0033261257425742574 + }, + "pct": 0.02660900594059406, + "ticks": 7705738146 + }, + "4": { + "norm": { + "pct": 0.0016827236386138613 + }, + "pct": 0.01346178910891089, + "ticks": 8131054429 + }, + "5": { + "norm": { + "pct": 0.000781541707920792 + }, + "pct": 0.006252333663366336, + "ticks": 7213899699 + }, + "6": { + "norm": { + "pct": 0.0005364748762376238 + }, + "pct": 0.00429179900990099, + "ticks": 7961016581 + }, + "7": { + "norm": { + "pct": 0.0005079449257425743 + }, + "pct": 0.004063559405940594, + "ticks": 7946529895 } }, "kernel": { - "pct": 0.010126582278481013, - "ticks": 10560000000 + "norm": { + "pct": 0.007425742574257425 + }, + "pct": 0.0594059405940594, + "ticks": 26810000000 }, "system": { - "pct": 4, - "ticks": 5566563680000000 + "norm": { + "pct": 1 + }, + "pct": 8, + "ticks": 65836400000000 }, "total": { - "pct": 0.05550100860759494 + "norm": { + "pct": 0.015459536757425743 + }, + "pct": 0.12367629405940594 }, "user": { - "pct": 0.05063291139240506, - "ticks": 139520000000 + "norm": { + "pct": 0.006188118811881188 + }, + "pct": 0.04950495049504951, + "ticks": 35720000000 } } }, @@ -75,7 +112,8 @@ "module": "docker" }, "metricset": { - "name": "cpu" + "name": "cpu", + "period": 10000 }, "service": { "address": "/var/run/docker.sock", diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/fields.yml index c6c8fc7a..228b7224 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/_meta/fields.yml @@ -9,6 +9,11 @@ format: percent description: > Percentage of time in kernel space. + - name: kernel.norm.pct + type: scaled_float + format: percent + description: > + Percentage of time in kernel space normalized by the number of CPU cores. - name: kernel.ticks type: long description: > @@ -18,6 +23,11 @@ format: percent description: > Percentage of total CPU time in the system. + - name: system.norm.pct + type: scaled_float + format: percent + description: > + Percentage of total CPU time in the system normalized by the number of CPU cores. - name: system.ticks type: long description: > @@ -27,6 +37,11 @@ format: percent description: > Percentage of time in user space. + - name: user.norm.pct + type: scaled_float + format: percent + description: > + Percentage of time in user space normalized by the number of CPU cores. - name: user.ticks type: long description: > @@ -36,12 +51,23 @@ format: percent description: > Total CPU usage. + - name: total.norm.pct + type: scaled_float + format: percent + description: > + Total CPU usage normalized by the number of CPU cores. - name: core.*.pct type: object object_type: scaled_float format: percent description: > Percentage of CPU time in this core. + - name: core.*.norm.pct + type: object + object_type: scaled_float + format: percent + description: > + Percentage of CPU time in this core, normalized by the number of CPU cores. - name: core.*.ticks type: object object_type: long diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/cpu.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/cpu.go index a9fcbbc7..a07b4f84 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/cpu.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/cpu.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +// +build linux darwin windows + package cpu import ( diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/cpu_test.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/cpu_test.go index 66b634a6..bcce3533 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/cpu_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/cpu_test.go @@ -34,9 +34,6 @@ func cpuUsageFor(stats types.StatsJSON) *cpuUsage { Stat: &docker.Stat{Stats: stats}, systemDelta: 1000000000, // Nanoseconds in a second } - if len(stats.CPUStats.CPUUsage.PercpuUsage) == 0 { - u.cpus = 1 - } return &u } @@ -44,31 +41,33 @@ func TestCPUService_PerCpuUsage(t *testing.T) { oldPerCpuValuesTest := [][]uint64{{1, 9, 9, 5}, {1, 2, 3, 4}, {0, 0, 0, 0}} newPerCpuValuesTest := [][]uint64{{100000001, 900000009, 900000009, 500000005}, {101, 202, 303, 404}, {0, 0, 0, 0}} var statsList = make([]types.StatsJSON, 3) + var onlineCPUS = uint32(4) for index := range statsList { statsList[index].PreCPUStats.CPUUsage.PercpuUsage = oldPerCpuValuesTest[index] statsList[index].CPUStats.CPUUsage.PercpuUsage = newPerCpuValuesTest[index] + statsList[index].CPUStats.OnlineCPUs = onlineCPUS } testCase := []struct { given types.StatsJSON expected common.MapStr }{ {statsList[0], common.MapStr{ - "0": common.MapStr{"pct": float64(0.40)}, - "1": common.MapStr{"pct": float64(3.60)}, - "2": common.MapStr{"pct": float64(3.60)}, - "3": common.MapStr{"pct": float64(2.00)}, + "0": common.MapStr{"pct": float64(0.40), "norm": common.MapStr{"pct": float64(0.40) / float64(onlineCPUS)}}, + "1": common.MapStr{"pct": float64(3.60), "norm": common.MapStr{"pct": float64(3.60) / float64(onlineCPUS)}}, + "2": common.MapStr{"pct": float64(3.60), "norm": common.MapStr{"pct": float64(3.60) / float64(onlineCPUS)}}, + "3": common.MapStr{"pct": float64(2.00), "norm": common.MapStr{"pct": float64(2.00) / float64(onlineCPUS)}}, }}, {statsList[1], common.MapStr{ - "0": common.MapStr{"pct": float64(0.0000004)}, - "1": common.MapStr{"pct": float64(0.0000008)}, - "2": common.MapStr{"pct": float64(0.0000012)}, - "3": common.MapStr{"pct": float64(0.0000016)}, + "0": common.MapStr{"pct": float64(0.0000004), "norm": common.MapStr{"pct": float64(0.0000004) / float64(onlineCPUS)}}, + "1": common.MapStr{"pct": float64(0.0000008), "norm": common.MapStr{"pct": float64(0.0000008) / float64(onlineCPUS)}}, + "2": common.MapStr{"pct": float64(0.0000012), "norm": common.MapStr{"pct": float64(0.0000012) / float64(onlineCPUS)}}, + "3": common.MapStr{"pct": float64(0.0000016), "norm": common.MapStr{"pct": float64(0.0000016) / float64(onlineCPUS)}}, }}, {statsList[2], common.MapStr{ - "0": common.MapStr{"pct": float64(0)}, - "1": common.MapStr{"pct": float64(0)}, - "2": common.MapStr{"pct": float64(0)}, - "3": common.MapStr{"pct": float64(0)}, + "0": common.MapStr{"pct": float64(0), "norm": common.MapStr{"pct": float64(0) / float64(onlineCPUS)}}, + "1": common.MapStr{"pct": float64(0), "norm": common.MapStr{"pct": float64(0) / float64(onlineCPUS)}}, + "2": common.MapStr{"pct": float64(0), "norm": common.MapStr{"pct": float64(0) / float64(onlineCPUS)}}, + "3": common.MapStr{"pct": float64(0), "norm": common.MapStr{"pct": float64(0) / float64(onlineCPUS)}}, }}, } for _, tt := range testCase { @@ -79,7 +78,7 @@ func TestCPUService_PerCpuUsage(t *testing.T) { s.(common.MapStr).Delete("ticks") } if !equalEvent(tt.expected, out) { - t.Errorf("PerCpuUsage(%v) => %v, want %v", tt.given.CPUStats.CPUUsage.PercpuUsage, out, tt.expected) + t.Errorf("PerCPUUsage(%v) => %v, want %v", tt.given.CPUStats.CPUUsage.PercpuUsage, out, tt.expected) } } } @@ -88,16 +87,18 @@ func TestCPUService_TotalUsage(t *testing.T) { oldTotalValuesTest := []uint64{100, 50, 10} totalValuesTest := []uint64{2, 500000050, 10} var statsList = make([]types.StatsJSON, 3) + var onlineCPUS = uint32(4) for index := range statsList { statsList[index].PreCPUStats.CPUUsage.TotalUsage = oldTotalValuesTest[index] statsList[index].CPUStats.CPUUsage.TotalUsage = totalValuesTest[index] + statsList[index].CPUStats.OnlineCPUs = onlineCPUS } testCase := []struct { given types.StatsJSON expected float64 }{ {statsList[0], -1}, - {statsList[1], 0.50}, + {statsList[1], 2}, {statsList[2], 0}, } for _, tt := range testCase { @@ -109,20 +110,49 @@ func TestCPUService_TotalUsage(t *testing.T) { } } -func TestCPUService_UsageInKernelmode(t *testing.T) { - usageOldValuesTest := []uint64{100, 10, 500000050} - usageValuesTest := []uint64{3, 500000010, 500000050} +func TestCPUService_TotalUsageNormalized(t *testing.T) { + oldTotalValuesTest := []uint64{100, 50, 10} + totalValuesTest := []uint64{2, 500000050, 10} var statsList = make([]types.StatsJSON, 3) + var onlineCPUS = uint32(4) for index := range statsList { - statsList[index].PreCPUStats.CPUUsage.UsageInKernelmode = usageOldValuesTest[index] - statsList[index].CPUStats.CPUUsage.UsageInKernelmode = usageValuesTest[index] + statsList[index].PreCPUStats.CPUUsage.TotalUsage = oldTotalValuesTest[index] + statsList[index].CPUStats.CPUUsage.TotalUsage = totalValuesTest[index] + statsList[index].CPUStats.OnlineCPUs = onlineCPUS } testCase := []struct { given types.StatsJSON expected float64 }{ {statsList[0], -1}, - {statsList[1], 0.50}, + {statsList[1], 0.5}, + {statsList[2], 0}, + } + for _, tt := range testCase { + usage := cpuUsageFor(tt.given) + out := usage.TotalNormalized() + if tt.expected != out { + t.Errorf("totalUsageNormalized(%v) => %v, want %v", tt.given.CPUStats.CPUUsage.TotalUsage, out, tt.expected) + } + } +} + +func TestCPUService_UsageInKernelmode(t *testing.T) { + usageOldValuesTest := []uint64{100, 10, 500000050} + usageValuesTest := []uint64{3, 500000010, 500000050} + var statsList = make([]types.StatsJSON, 3) + var onlineCPUS = uint32(4) + for index := range statsList { + statsList[index].PreCPUStats.CPUUsage.UsageInKernelmode = usageOldValuesTest[index] + statsList[index].CPUStats.CPUUsage.UsageInKernelmode = usageValuesTest[index] + statsList[index].CPUStats.OnlineCPUs = onlineCPUS + } + testCase := []struct { + given types.StatsJSON + expected float64 + }{ + {statsList[0], -1}, + {statsList[1], 2}, {statsList[2], 0}, } for _, tt := range testCase { @@ -134,21 +164,50 @@ func TestCPUService_UsageInKernelmode(t *testing.T) { } } -func TestCPUService_UsageInUsermode(t *testing.T) { - usageOldValuesTest := []uint64{0, 1965, 500} - usageValuesTest := []uint64{500000000, 325, 1000000500} +func TestCPUService_UsageInKernelmodeNormalized(t *testing.T) { + usageOldValuesTest := []uint64{100, 10, 500000050} + usageValuesTest := []uint64{3, 500000010, 500000050} var statsList = make([]types.StatsJSON, 3) + var onlineCPUS = uint32(4) for index := range statsList { - statsList[index].PreCPUStats.CPUUsage.UsageInUsermode = usageOldValuesTest[index] - statsList[index].CPUStats.CPUUsage.UsageInUsermode = usageValuesTest[index] + statsList[index].PreCPUStats.CPUUsage.UsageInKernelmode = usageOldValuesTest[index] + statsList[index].CPUStats.CPUUsage.UsageInKernelmode = usageValuesTest[index] + statsList[index].CPUStats.OnlineCPUs = onlineCPUS } testCase := []struct { given types.StatsJSON expected float64 }{ - {statsList[0], 0.50}, + {statsList[0], -1}, + {statsList[1], 0.5}, + {statsList[2], 0}, + } + for _, tt := range testCase { + usage := cpuUsageFor(tt.given) + out := usage.InKernelModeNormalized() + if out != tt.expected { + t.Errorf("usageInKernelmodeNormalized(%v) => %v, want %v", tt.given.CPUStats.CPUUsage.UsageInKernelmode, out, tt.expected) + } + } +} + +func TestCPUService_UsageInUsermode(t *testing.T) { + usageOldValuesTest := []uint64{0, 1965, 500} + usageValuesTest := []uint64{500000000, 325, 1000000500} + var statsList = make([]types.StatsJSON, 3) + var onlineCPUS = uint32(4) + for index := range statsList { + statsList[index].PreCPUStats.CPUUsage.UsageInUsermode = usageOldValuesTest[index] + statsList[index].CPUStats.CPUUsage.UsageInUsermode = usageValuesTest[index] + statsList[index].CPUStats.OnlineCPUs = onlineCPUS + } + testCase := []struct { + given types.StatsJSON + expected float64 + }{ + {statsList[0], 2}, {statsList[1], -1}, - {statsList[2], 1}, + {statsList[2], 4}, } for _, tt := range testCase { usage := cpuUsageFor(tt.given) @@ -159,6 +218,33 @@ func TestCPUService_UsageInUsermode(t *testing.T) { } } +func TestCPUService_UsageInUsermodeNormalized(t *testing.T) { + usageOldValuesTest := []uint64{0, 1965, 500} + usageValuesTest := []uint64{500000000, 325, 1000000500} + var statsList = make([]types.StatsJSON, 3) + var onlineCPUS = uint32(4) + for index := range statsList { + statsList[index].PreCPUStats.CPUUsage.UsageInUsermode = usageOldValuesTest[index] + statsList[index].CPUStats.CPUUsage.UsageInUsermode = usageValuesTest[index] + statsList[index].CPUStats.OnlineCPUs = onlineCPUS + } + testCase := []struct { + given types.StatsJSON + expected float64 + }{ + {statsList[0], 0.5}, + {statsList[1], -1}, + {statsList[2], 1}, + } + for _, tt := range testCase { + usage := cpuUsageFor(tt.given) + out := usage.InUserModeNormalized() + if out != tt.expected { + t.Errorf("usageInUsermodeNormalized(%v) => %v, want %v", tt.given.CPUStats.CPUUsage.UsageInUsermode, out, tt.expected) + } + } +} + func equalEvent(expectedEvent common.MapStr, event common.MapStr) bool { return reflect.DeepEqual(expectedEvent, event) } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/data.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/data.go index a8c3042e..3614d55e 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/data.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/data.go @@ -30,21 +30,33 @@ func eventsMapping(r mb.ReporterV2, cpuStatsList []CPUStats) { func eventMapping(r mb.ReporterV2, stats *CPUStats) { fields := common.MapStr{ - "core": stats.PerCpuUsage, + "core": stats.PerCPUUsage, "total": common.MapStr{ "pct": stats.TotalUsage, + "norm": common.MapStr{ + "pct": stats.TotalUsageNormalized, + }, }, "kernel": common.MapStr{ "ticks": stats.UsageInKernelmode, "pct": stats.UsageInKernelmodePercentage, + "norm": common.MapStr{ + "pct": stats.UsageInKernelmodePercentageNormalized, + }, }, "user": common.MapStr{ "ticks": stats.UsageInUsermode, "pct": stats.UsageInUsermodePercentage, + "norm": common.MapStr{ + "pct": stats.UsageInUsermodePercentageNormalized, + }, }, "system": common.MapStr{ "ticks": stats.SystemUsage, "pct": stats.SystemUsagePercentage, + "norm": common.MapStr{ + "pct": stats.SystemUsagePercentageNormalized, + }, }, } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/helper.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/helper.go index cfee7ac0..02d045f3 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/helper.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/cpu/helper.go @@ -26,16 +26,20 @@ import ( ) type CPUStats struct { - Time common.Time - Container *docker.Container - PerCpuUsage common.MapStr - TotalUsage float64 - UsageInKernelmode uint64 - UsageInKernelmodePercentage float64 - UsageInUsermode uint64 - UsageInUsermodePercentage float64 - SystemUsage uint64 - SystemUsagePercentage float64 + Time common.Time + Container *docker.Container + PerCPUUsage common.MapStr + TotalUsage float64 + TotalUsageNormalized float64 + UsageInKernelmode uint64 + UsageInKernelmodePercentage float64 + UsageInKernelmodePercentageNormalized float64 + UsageInUsermode uint64 + UsageInUsermodePercentage float64 + UsageInUsermodePercentageNormalized float64 + SystemUsage uint64 + SystemUsagePercentage float64 + SystemUsagePercentageNormalized float64 } // CPUService is a helper to collect docker CPU metrics @@ -61,19 +65,23 @@ func (c *CPUService) getCPUStats(myRawStat *docker.Stat, dedot bool) CPUStats { usage := cpuUsage{Stat: myRawStat} stats := CPUStats{ - Time: common.Time(myRawStat.Stats.Read), - Container: docker.NewContainer(myRawStat.Container, dedot), - TotalUsage: usage.Total(), - UsageInKernelmode: myRawStat.Stats.CPUStats.CPUUsage.UsageInKernelmode, - UsageInKernelmodePercentage: usage.InKernelMode(), - UsageInUsermode: myRawStat.Stats.CPUStats.CPUUsage.UsageInUsermode, - UsageInUsermodePercentage: usage.InUserMode(), - SystemUsage: myRawStat.Stats.CPUStats.SystemUsage, - SystemUsagePercentage: usage.System(), + Time: common.Time(myRawStat.Stats.Read), + Container: docker.NewContainer(myRawStat.Container, dedot), + TotalUsage: usage.Total(), + TotalUsageNormalized: usage.TotalNormalized(), + UsageInKernelmode: myRawStat.Stats.CPUStats.CPUUsage.UsageInKernelmode, + UsageInKernelmodePercentage: usage.InKernelMode(), + UsageInKernelmodePercentageNormalized: usage.InKernelModeNormalized(), + UsageInUsermode: myRawStat.Stats.CPUStats.CPUUsage.UsageInUsermode, + UsageInUsermodePercentage: usage.InUserMode(), + UsageInUsermodePercentageNormalized: usage.InUserModeNormalized(), + SystemUsage: myRawStat.Stats.CPUStats.SystemUsage, + SystemUsagePercentage: usage.System(), + SystemUsagePercentageNormalized: usage.SystemNormalized(), } if c.Cores { - stats.PerCpuUsage = usage.PerCPU() + stats.PerCPUUsage = usage.PerCPU() } return stats @@ -84,17 +92,33 @@ func (c *CPUService) getCPUStats(myRawStat *docker.Stat, dedot bool) CPUStats { type cpuUsage struct { *docker.Stat - cpus int + cpus uint32 systemDelta uint64 } -func (u *cpuUsage) CPUs() int { +// CPUS returns the number of cpus. If number of cpus is equal to zero, the field will +// be updated/initialized with the corresponding value retrieved from Docker API. +func (u *cpuUsage) CPUs() uint32 { if u.cpus == 0 { - u.cpus = len(u.Stats.CPUStats.CPUUsage.PercpuUsage) + if u.Stats.CPUStats.OnlineCPUs != 0 { + u.cpus = u.Stats.CPUStats.OnlineCPUs + } else { + //Certain versions of docker don't have `online_cpus` + //In addition to this, certain kernel versions will report spurious zeros from the cgroups usage_percpu + var realCPUCount uint32 + for _, rCPUUsage := range u.Stats.CPUStats.CPUUsage.PercpuUsage { + if rCPUUsage != 0 { + realCPUCount++ + } + } + u.cpus = realCPUCount + } + } return u.cpus } +// SystemDelta calculates system delta. func (u *cpuUsage) SystemDelta() uint64 { if u.systemDelta == 0 { u.systemDelta = u.Stats.CPUStats.SystemUsage - u.Stats.PreCPUStats.SystemUsage @@ -102,6 +126,7 @@ func (u *cpuUsage) SystemDelta() uint64 { return u.systemDelta } +// PerCPU calculates per CPU usage. func (u *cpuUsage) PerCPU() common.MapStr { var output common.MapStr if len(u.Stats.CPUStats.CPUUsage.PercpuUsage) == len(u.Stats.PreCPUStats.CPUUsage.PercpuUsage) { @@ -110,7 +135,14 @@ func (u *cpuUsage) PerCPU() common.MapStr { cpu := common.MapStr{} cpu["pct"] = u.calculatePercentage( u.Stats.CPUStats.CPUUsage.PercpuUsage[index], - u.Stats.PreCPUStats.CPUUsage.PercpuUsage[index]) + u.Stats.PreCPUStats.CPUUsage.PercpuUsage[index], + u.CPUs()) + cpu["norm"] = common.MapStr{ + "pct": u.calculatePercentage( + u.Stats.CPUStats.CPUUsage.PercpuUsage[index], + u.Stats.PreCPUStats.CPUUsage.PercpuUsage[index], + 1), + } cpu["ticks"] = u.Stats.CPUStats.CPUUsage.PercpuUsage[index] output[strconv.Itoa(index)] = cpu } @@ -118,27 +150,51 @@ func (u *cpuUsage) PerCPU() common.MapStr { return output } +// TotalNormalized calculates total CPU usage normalized. func (u *cpuUsage) Total() float64 { - return u.calculatePercentage(u.Stats.CPUStats.CPUUsage.TotalUsage, u.Stats.PreCPUStats.CPUUsage.TotalUsage) + return u.calculatePercentage(u.Stats.CPUStats.CPUUsage.TotalUsage, u.Stats.PreCPUStats.CPUUsage.TotalUsage, u.CPUs()) } +// TotalNormalized calculates total CPU usage normalized by the number of CPU cores. +func (u *cpuUsage) TotalNormalized() float64 { + return u.calculatePercentage(u.Stats.CPUStats.CPUUsage.TotalUsage, u.Stats.PreCPUStats.CPUUsage.TotalUsage, 1) +} + +// InKernelMode calculates percentage of time in kernel space. func (u *cpuUsage) InKernelMode() float64 { - return u.calculatePercentage(u.Stats.CPUStats.CPUUsage.UsageInKernelmode, u.Stats.PreCPUStats.CPUUsage.UsageInKernelmode) + return u.calculatePercentage(u.Stats.CPUStats.CPUUsage.UsageInKernelmode, u.Stats.PreCPUStats.CPUUsage.UsageInKernelmode, u.CPUs()) } +// InKernelModeNormalized calculates percentage of time in kernel space normalized by the number of CPU cores. +func (u *cpuUsage) InKernelModeNormalized() float64 { + return u.calculatePercentage(u.Stats.CPUStats.CPUUsage.UsageInKernelmode, u.Stats.PreCPUStats.CPUUsage.UsageInKernelmode, 1) +} + +// InUserMode calculates percentage of time in user space. func (u *cpuUsage) InUserMode() float64 { - return u.calculatePercentage(u.Stats.CPUStats.CPUUsage.UsageInUsermode, u.Stats.PreCPUStats.CPUUsage.UsageInUsermode) + return u.calculatePercentage(u.Stats.CPUStats.CPUUsage.UsageInUsermode, u.Stats.PreCPUStats.CPUUsage.UsageInUsermode, u.CPUs()) } +// InUserModeNormalized calculates percentage of time in user space normalized by the number of CPU cores. +func (u *cpuUsage) InUserModeNormalized() float64 { + return u.calculatePercentage(u.Stats.CPUStats.CPUUsage.UsageInUsermode, u.Stats.PreCPUStats.CPUUsage.UsageInUsermode, 1) +} + +// System calculates percentage of total CPU time in the system. func (u *cpuUsage) System() float64 { - return u.calculatePercentage(u.Stats.CPUStats.SystemUsage, u.Stats.PreCPUStats.SystemUsage) + return u.calculatePercentage(u.Stats.CPUStats.SystemUsage, u.Stats.PreCPUStats.SystemUsage, u.CPUs()) +} + +// SystemNormalized calculates percentage of total CPU time in the system, normalized by the number of CPU cores. +func (u *cpuUsage) SystemNormalized() float64 { + return u.calculatePercentage(u.Stats.CPUStats.SystemUsage, u.Stats.PreCPUStats.SystemUsage, 1) } // This function is meant to calculate the % CPU time change between two successive readings. // The "oldValue" refers to the CPU statistics of the last read. // Time here is expressed by second and not by nanoseconde. // The main goal is to expose the %, in the same way, it's displayed by docker Client. -func (u *cpuUsage) calculatePercentage(newValue uint64, oldValue uint64) float64 { +func (u *cpuUsage) calculatePercentage(newValue uint64, oldValue uint64, numCPUS uint32) float64 { if newValue < oldValue { logp.Err("Error calculating CPU time change for docker module: new stats value (%v) is lower than the old one(%v)", newValue, oldValue) return -1 @@ -148,5 +204,5 @@ func (u *cpuUsage) calculatePercentage(newValue uint64, oldValue uint64) float64 return 0 } - return float64(uint64(u.CPUs())*value) / float64(u.SystemDelta()) + return float64(uint64(numCPUS)*value) / float64(u.SystemDelta()) } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/diskio.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/diskio.go index fbfadc09..dacf8637 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/diskio.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/diskio/diskio.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +// +build linux darwin windows + package diskio import ( diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/docker.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/docker.go index adee725a..623dc169 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/docker.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/docker.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +// +build linux darwin windows + package docker import ( diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/event/event.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/event/event.go index f81168b9..c8ab896d 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/event/event.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/event/event.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +// +build linux darwin windows + package event import ( diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/fields.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/fields.go index 3fd5b1f0..d2fd8a42 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/fields.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/fields.go @@ -32,5 +32,5 @@ func init() { // AssetDocker returns asset data. // This is the base64 encoded gzipped contents of ../metricbeat/module/docker. func AssetDocker() string { - return "eJzsm0uP47jxwO/9KQrzP/yBwbSNBIscfAiw6ZlgGsnsNOaRHL00VbIZU6RCUvZ4P33Ah2xZoiRblt3di/WxaRd/LNaLLPY9rHE3g0TSNao7AMMMxxm8ee/+8OYOIEFNFcsNk2IGf70DAPCDoA0xGqjkHKnBBFIlszA2uQNQyJFonMGS3AHolVRmTqVI2XIGKeEa7wBShjzRMyf1HgTJsMJiP2aXWwlKFnn4S4THfh5FKlVG7J+BiMTBMW0Y1UAWsjBB7P9rUIUQTCyBSmEIE6j0JEip0lSJ9t/cj8TAOuAqStvLggyNYnQ/uf0cq6z81LGO0bKMiORorIRb424rVX2sA9F+HrxAMCtiYEs04A+khd1eJsCssLGOSZxLITEY50qIwfOg3hODsF2hJzio0PKFmeIY1goKPaZ2yqm95PisLJ+TJFGoNcbnZvnQaR+fYC+6Zcnst7p247Z63nLZbxizWGixzyqRktLM07omDmBcimVksIfNfb5JQ7iHkykQzp2BpIyjLu21xVCPALfXYPsaqA5EzqdWZIOwQBSl5YJUQFdELDEBzQRFP8CkiG+wIcsRLfoxI0t0MifNuJcXl0S8L4UwLEN4ePo+TrBboxLIJzk10fVrSjgm85RLUv+Czw0zyFFRFPXRHhU9+R9ZPdnttEtiIsCAzgnF+EYFXMPoOr5hEePqc8in7+DknUagd9pg9twKcz7qyb3qrEcEtC7ssRXnxXr9xScuNKrn1lbQkUXp2lqHejXT6pvd7enNNfVtb0mFJssWNCoVTt62ssnFf7Ax5P84v+U+H/sD0567a0Xtm92/pvPN4ZciW6A6kAbDiKDu63am10xeVCIzvYbH6edxsoVCEq8/BxRCP1NaZAV36drK1ZAUyh4ibCjjLN0n+thRoQ20Civza9RHh00cBH3AW+xMo5TtBSy9pe3HJyzgb/anDn44u2oeN3rRz9ItLZRCYYKOcxs5kcraoaxqlXEv7og7CeYKqbW+Gfxl8tNQTz4LdKtYQ2+j+I8T/OocaBj1S/EgS29QvAInCno+xTif241OQ9VFlhG1u14mIuK1+pRN9TJH5Q67r9a3XHYqN+F1OFlF6T3W6wr9Z/KzhnlHWEtO3BzX40OvZ+tyzi87x75y/GCJYlL3t40j3v76yVjiL1vJhjBOFhyj86ZKZqMvUxaKxqezsseb7tsK3c+tnfmTEmDGjCkdt24HBw5CrczrkHTOKuuB44Ic0hTWlwsaVta37BM4yuU/vi9j5GlbcaQYYxRbFF0JIHo8hvoR+aJV/IsoJgtthUw3hBdY4Tpe2zsbHVEkdnVSADP62LLLda2QcLOiK6TrEcJaRVrzcH30g4+VbybEENgyzkEKvoMFHiKCb/YltV6QtnFDoV3ukdDwvV8/fvj5n98+Pnz88PCPX4EJbVThvAlWRPs78UJjAkbComA8cWoLv2VZ7cbn/MicEsaZWGqjkKyjvsSEwWUjQffsP5WCFi6t2gkwgfqmXS83VDfLywYqk3j8jLnR4AjihAVln9sOQpHMIy1A6OoPnoBU1weKJC6pshnK3ILETdTNIguTF7EQNUJsqqK0zLPfmh/MzBsWVCWJe8gwpTTMdZ9rrK+PEPWcnHEuE1uKrAGuYxOeB+NkZ0MmS1AYlrJmh7LPk0I5fx2zge+C/bcoWQ+QsGQbG6jzkL3izcoqZk6uSPl4AAt51gG/A5YCM9aitdHvfLrarhhd+bNYOAj5xSVMITV85yZEUQ9pV3zT4F5asKzyuMET9T9sGLHL71vAronuTfJcO9wwZYrGMRHGbqI3SoBjCoXLgpNYaBr5mYFlIZwDJXSFicfSQLSWlLl7GSObRtZSbpXwnCyQD+3uXND49/P2wN3uxQET6UUdpEeRyjLgw4LYYtJWl8bkejadJpLqia8nJ1RmUxRLJnCqMEWFguKU5Gzqx+cKM2lwTnI23/xp8uefpv83TZjOOdnd+z7y/ZYleM8Oz84ufchV1tBjufXnDSpnpkdvls527pzYmvwKXuWdSuyve/xEkWd5TabwhO8GUO2PBZtU2sg8v4mqwkwnUcVu8K7B5DJtl6qucV8VapTylCu1iZZTcQ4Xt6Ms57fLW7XhZ2lGugwzedQVODvWfXISxilvfWR4OzD/RNRVGZ9nJM+ZWIYvv3n75jzVfiHboK3w4tjVci7BOm3pMDqxo+6AolLScolIZZax0U7BD06azfiuiWBTzb+ZSOS2blV9MXaQj47QwPBWGxewj//Ny5JboD0hWYft6lXwHlWxDTE430q1tgan0UzaGxgR9i7uHuYwN4S5QaPp5U0J4xMqi5Z7mc4OSyfM3wmzeb+wvhAPwpy1+cG4aglByk0XJ1F6tIrny9evR5Hi3FLned0wkCvULoVZC3JHjo6DdfRWu9d4oPcl24ncn1qIS6mtb/Tc872xdv279rc8w/c9Iz+eYdc/kR8ldeQ5I7y8fXaUrXsLL82RakptVGACjQ3Wl5Rgv3gRw2uweGEaLWEuqpNL0L1oN1VLfR5vbsY984JG/6OgMrOpMmxEqO4OTf5z3fi5noHUa39WLszJbPeSRHWfF3tcewBZmPFAmBO6xmbArHQElJKNKwkY7fjoxbtG6MlI4Qs3ONJ2M1V6N7dxmM+FWcrfo8PIcmEv1mH2hC/HYU5Hup3DdDMdEsxCFi3/szqkfRHPI/5f6Y7/X9R1YustldfjJ2MllvE2/I+Ecp2EMqqDtOSN36GDjJVIxneQPxLIwATyvwAAAP//Vy+nyQ==" + return "eJzsm1uP27gVgN/nVxykDwWCxEaLRR/mocB2kiKDNptBLu2jl6aObNYUqZKUHefXL3iRLUuUZMuyx7NYP1rW4cdz5eX4Laxwew+JpCtUdwCGGY738Oqd++LVHUCCmiqWGybFPfz9DgDAPwRtiNFAJedIDSaQKpmFZ5M7AIUcicZ7WJA7AL2UysyoFClb3ENKuMY7gJQhT/S9k/oWBMmwwmI/ZptbCUoWefgmwmM/jyKVKiP2ayAicXBMG0Y1kLksTBD7Zw2qEIKJBVApDGEClZ4EKVWaKtHul7snMbAOuIrSdrIgQ6MY3Q1uP4cqKz91rEO0LCMiOXhWwq1wu5Gq/qwD0X4evEAwS2JgQzTgd6SFNS8TYJbYmMckzqWQGIxzJcTgaVDviEHYLNET7FVo+cJIcQzrBYUeUzvl0F5yfFSWz0iSKNQa42OzfOiwj0+wE90yZfajrt24r542XfYDYx4LLf5ZJVJSmlla18QejEuxiDzsYXOfr9IQ7uFkCoRz5yAp46hLf21x1APAzSXYvgSqPZGLqSVZI8wRRem5IBXQJRELTEAzQdE/YFLEDWzIYkSPfszIAp3MSTPv5cU5Ge9zIQzLEB6evo2T7FaoBPJJTk10/poSjsks5ZLUf+Brwz3kqCiK+tMeFT35l6yerDntlJgIMKBzQjFuqIArpMpukBksF+HsByYw3zovFUU2R2VfsCajUrXlmDAzw+gq7oqRsOlLNU/fwMk7Trd6qw0+u1pd9vHkXsFWiwGtC/sWXKKD/RzXCDMc2zUCmBMbH7jQqJ5bp0GTFqXLeR3qLfhAg/ccy7tZXSwl9OnU+fPV9fl1F0WFJotOtGexd43vHPPaR5PXrTOQ8/9h45H/cnZNnz7MaEx77q4ZdRrmpqf1ZgR7tgds/9RPD+lfDuh2wR0x1O40gOkVk2dtvJleweP00zhrUIUkvqsdsL36mdIiK7jbBFi5GpJCMbFwhuQs3W0fYgcQbaBVWJlfYte1N+Ig6D3efGsaG+RewDKo2l4+YgL/sK86+OHsqnmI0Yt+km5poRQKE3Sc2+qHVNaOeqpeGY/ijvSUYK6QWu+7h79NfhoaySeBbhRr6G2U+HGCX1wADaO+lQiy9AbFCwiioOdjnPO5w+g4VF1kGVHby1UiIl5qTNlSL3NU7gjtxcaWq06lEV5GkFWU3uO9bkf0THHWcO8Ia8mJ68Nl+9BLn7qc05edY19kvLdEMam7O4wR75T8YCzxVzhkTRgnc47RcVMls9GnKQtF48NZ2eMN93WJ7nXrZ36nBJgxY8rArfvBnoNQK/MyJJ2jynriOKOGNIX11YKGl/VN+wiOcvqP78oceZwpDhRjjGLzoqsARLfHUN8inzWL/xDFZKGtkOma8AIrXIdze2OzI4rEzk4KYEYfenY5ryUSbpZ0iXQ1QlqrSGturg9e+FD5ZUIMgQ3jHKTgW5jjPiP4FoKkdsOsbd5QaKd7IDT87tcP73/+99cPDx/eP/zrV2BCG1W4aIIl0f6mrdCYgJEwLxhPnNrCuyyrndqdnplTwjgTC20UklU0lpgwuGgU6B77Uylo4cqqHQATqBvtcrWhaiwvG6hM4vkzFkaDM4gTFpR96iUzimQWaSyArq6DI5Dq+kCRxCVVjKHMNUjcQN0ssjB5EUtRI+SmKkrLODvTfGdm1vCgKkk8QoYppeGuu1pjY32ErOfkjHOY2LLIGhA6tuB5ME62NmWyBIVhKWv2PfRFUljOX8Zt4Jtg/y9K1j0kLNjaJuo8VK94C0QVMycXpHzcg4U664DfAEuBGevR2ug3vlxtlowu/V4sbIT85BKmkBq+dQOiqKe0C3ZKuf4tllVapjxRf7vUiL1DvrHEteZ4lzzVD9dMmaKxTYSxW3MaS4BDCoWLgpNYahq5ecmyEM6BErrExGNpIFpLyty5jJFNJ2tZbpXwnMyRD73dOaOdyI/bA3e9PiYm0rNukB5FKsuED3NiF5N2dWlMru+n00RSPfHryQmV2RTFggmcKkxRoaA4JTmb+uczhZk0OCM5m63/MvnrT9M/TROmc062b32Hw9sNS/At2zezntseWq6hxwrrT2tUzk0POiFPDu6c2DX5BaLKB9X+FtQPFGn2bTKFxuArQLW3IDeptJF5fhVVhZGOooqd4F2CyVXaLlVd4rwqrFHKXa7UJrqcinO4vB1lOf26vFUbfpRmpsswkwe3Aifnuo9OwjjLW58ZXg+sPxF1VZ7PMpLnTCzCj1+9fnWaaj+TTdBW+B+DW8u5Auu0pcPTiX3qNigqJS2HiFRmGRttF/zgpBnX0uEOegT8l4lEbupe1ZdjB8XoCBcY3mvjAnb5v3lYcg20JySrYK5eBe9QFVsTg7ONVCvrcBrNpP0CI8Lexd3DHMaGMDZoNL28KWF8QmXRci7TecPSCfNPwmzdL2wsxJMwZ21xMK5aQpJyw8VJlB5txfP5y5eDTHHqUud5wzCQK9SuhFkPcluOjo119FS713mgt+HtSO6PLcSl1NY+S9fiOJbVv2l/yjPc7hn5/gxW/0i+l9SRllS4PTv7xtQ228KtBVJNqY0VmEBjk/U5S7BfvIjha7D4wjS6hDlrnVyC7kS7oVrW5/HLzXhknnHR/yiozGypDIYIq7v9Jf+pYfxcbSD1tT8rJ+ZktkdJorr3iz2hPYAsjLgnzAldYTNhVm4ElJKNIwkYbfvoxbuL0KORwg+usKXtZqrc3VwnYD4VZiF/jwEjy4ndbMDsCG8nYI5Hul7AdDPtC8xcFi3/hB9yfRGvI/4Puof/Qnc3sfUrlZcTJ2MVlvEM/kdBuUxBGTVAWurG7zBAxiok4wfIHwVkYAH5LQAA//9rhd1c" } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/healthcheck/healthcheck.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/healthcheck/healthcheck.go index c1af433a..1b90ccd5 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/healthcheck/healthcheck.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/healthcheck/healthcheck.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +// +build linux darwin windows + package healthcheck import ( diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/image/image_integration_test.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/image/image_integration_test.go index 38284708..59165681 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/image/image_integration_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/image/image_integration_test.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +// +build integration + package image import ( diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/memory.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/memory.go index 873f5562..8108b30d 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/memory.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/memory/memory.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +// +build linux darwin windows + package memory import ( diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/network/data.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/network/data.go index 30f6bd34..94567f08 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/network/data.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/network/data.go @@ -23,8 +23,8 @@ import ( ) func eventsMapping(r mb.ReporterV2, netsStatsList []NetStats) { - for _, netsStats := range netsStatsList { - eventMapping(r, &netsStats) + for i := range netsStatsList { + eventMapping(r, &netsStatsList[i]) } } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/network/helper.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/network/helper.go index 3b6f58bf..794574ba 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/network/helper.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/network/helper.go @@ -71,22 +71,22 @@ func (n *NetService) getNetworkStatsPerContainer(rawStats []docker.Stat, dedot b formattedStats := []NetStats{} for _, myStats := range rawStats { for nameInterface, rawnNetStats := range myStats.Stats.Networks { - formattedStats = append(formattedStats, n.getNetworkStats(nameInterface, &rawnNetStats, &myStats, dedot)) + formattedStats = append(formattedStats, n.getNetworkStats(nameInterface, rawnNetStats, myStats, dedot)) } } return formattedStats } -func (n *NetService) getNetworkStats(nameInterface string, rawNetStats *types.NetworkStats, myRawstats *docker.Stat, dedot bool) NetStats { - newNetworkStats := createNetRaw(myRawstats.Stats.Read, rawNetStats) +func (n *NetService) getNetworkStats(nameInterface string, rawNetStats types.NetworkStats, myRawstats docker.Stat, dedot bool) NetStats { + newNetworkStats := createNetRaw(myRawstats.Stats.Read, &rawNetStats) oldNetworkStat, exist := n.NetworkStatPerContainer[myRawstats.Container.ID][nameInterface] netStats := NetStats{ Container: docker.NewContainer(myRawstats.Container, dedot), Time: myRawstats.Stats.Read, NameInterface: nameInterface, - Total: rawNetStats, + Total: &rawNetStats, } if exist { diff --git a/vendor/github.com/elastic/beats/metricbeat/module/docker/network/network.go b/vendor/github.com/elastic/beats/metricbeat/module/docker/network/network.go index a9b0b54c..84cdd4d1 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/docker/network/network.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/docker/network/network.go @@ -15,6 +15,8 @@ // specific language governing permissions and limitations // under the License. +// +build linux darwin windows + package network import ( diff --git a/vendor/github.com/elastic/beats/metricbeat/module/dropwizard/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/dropwizard/_meta/Dockerfile index b7f6673c..668844b9 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/dropwizard/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/dropwizard/_meta/Dockerfile @@ -1,4 +1,5 @@ -FROM maven:3.3-jdk-8 +ARG MAVEN_VERSION +FROM maven:${MAVEN_VERSION} COPY test /test HEALTHCHECK --interval=1s --retries=90 CMD curl -f http://localhost:8080/test/helloworld diff --git a/vendor/github.com/elastic/beats/metricbeat/module/dropwizard/_meta/test/pom.xml b/vendor/github.com/elastic/beats/metricbeat/module/dropwizard/_meta/test/pom.xml index 43012580..5efba5fb 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/dropwizard/_meta/test/pom.xml +++ b/vendor/github.com/elastic/beats/metricbeat/module/dropwizard/_meta/test/pom.xml @@ -17,7 +17,7 @@ io.dropwizard.metrics metrics-servlets - 3.1.0 + 4.0.0 diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/_meta/Dockerfile index a931d890..5797ece3 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/_meta/Dockerfile @@ -1,2 +1,3 @@ -FROM docker.elastic.co/elasticsearch/elasticsearch:7.3.0 +ARG ELASTICSEARCH_VERSION +FROM docker.elastic.co/elasticsearch/elasticsearch:${ELASTICSEARCH_VERSION} HEALTHCHECK --interval=1s --retries=300 CMD curl -f http://localhost:9200/_license diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/_meta/config-xpack.yml b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/_meta/config-xpack.yml index 982fbf3b..e31e4862 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/_meta/config-xpack.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/_meta/config-xpack.yml @@ -2,6 +2,7 @@ metricsets: - ccr - cluster_stats + - enrich - index - index_recovery - index_summary diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/ccr/ccr.go b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/ccr/ccr.go index 41e9c79e..d67d3b01 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/ccr/ccr.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/ccr/ccr.go @@ -82,7 +82,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { if ccrUnavailableMessage != "" { if time.Since(m.lastCCRLicenseMessageTimestamp) > 1*time.Minute { m.lastCCRLicenseMessageTimestamp = time.Now() - m.Logger().Warn(ccrUnavailableMessage) + m.Logger().Debug(ccrUnavailableMessage) } return nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/cluster_stats/data_xpack.go b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/cluster_stats/data_xpack.go index 02545542..0dacb45d 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/cluster_stats/data_xpack.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/cluster_stats/data_xpack.go @@ -33,11 +33,6 @@ import ( "github.com/elastic/beats/metricbeat/module/elasticsearch" ) -type clusterStatsLicense struct { - *elasticsearch.License - ClusterNeedsTLS bool `json:"cluster_needs_tls"` -} - func clusterNeedsTLSEnabled(license *elasticsearch.License, stackStats common.MapStr) (bool, error) { // TLS does not need to be enabled if license type is something other than trial if !license.IsOneOf("trial") { @@ -205,7 +200,8 @@ func eventMappingXPack(r mb.ReporterV2, m *MetricSet, info elasticsearch.Info, c return errors.Wrap(err, "failed to determine if cluster needs TLS enabled") } - l := clusterStatsLicense{license, clusterNeedsTLS} + l := license.ToMapStr() + l["cluster_needs_tls"] = clusterNeedsTLS isAPMFound, err := apmIndicesExist(clusterState) if err != nil { diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/elasticsearch.go b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/elasticsearch.go index a3d2b2ae..f44de547 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/elasticsearch.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/elasticsearch.go @@ -67,6 +67,7 @@ func validateXPackMetricsets(base mb.BaseModule) error { expectedXPackMetricsets := []string{ "ccr", + "enrich", "cluster_stats", "index", "index_recovery", @@ -86,6 +87,9 @@ func validateXPackMetricsets(base mb.BaseModule) error { // CCRStatsAPIAvailableVersion is the version of Elasticsearch since when the CCR stats API is available. var CCRStatsAPIAvailableVersion = common.MustNewVersion("6.5.0") +// EnrichStatsAPIAvailableVersion is the version of Elasticsearch since when the Enrich stats API is available. +var EnrichStatsAPIAvailableVersion = common.MustNewVersion("7.5.0") + // Global clusterIdCache. Assumption is that the same node id never can belong to a different cluster id. var clusterIDCache = map[string]string{} @@ -476,6 +480,33 @@ func (l *License) IsOneOf(candidateLicenses ...string) bool { return false } +// ToMapStr converts the license to a common.MapStr. This is necessary +// for proper marshaling of the data before it's sent over the wire. In +// particular it ensures that ms-since-epoch values are marshaled as longs +// and not floats in scientific notation as Elasticsearch does not like that. +func (l *License) ToMapStr() common.MapStr { + m := common.MapStr{ + "status": l.Status, + "uid": l.ID, + "type": l.Type, + "issue_date": l.IssueDate, + "issue_date_in_millis": l.IssueDateInMillis, + "expiry_date": l.ExpiryDate, + "max_nodes": l.MaxNodes, + "issued_to": l.IssuedTo, + "issuer": l.Issuer, + "start_date_in_millis": l.StartDateInMillis, + } + + if l.ExpiryDateInMillis != 0 { + // We don't want to record a 0 expiry date as this means the license has expired + // in the Stack Monitoring UI + m["expiry_date_in_millis"] = l.ExpiryDateInMillis + } + + return m +} + func getSettingGroup(allSettings common.MapStr, groupKey string) (common.MapStr, error) { hasSettingGroup, err := allSettings.HasKey(groupKey) if err != nil { diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/elasticsearch_integration_test.go b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/elasticsearch_integration_test.go index 641dcb89..f670a7bd 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/elasticsearch_integration_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/elasticsearch_integration_test.go @@ -39,6 +39,7 @@ import ( "github.com/elastic/beats/metricbeat/module/elasticsearch" _ "github.com/elastic/beats/metricbeat/module/elasticsearch/ccr" _ "github.com/elastic/beats/metricbeat/module/elasticsearch/cluster_stats" + _ "github.com/elastic/beats/metricbeat/module/elasticsearch/enrich" _ "github.com/elastic/beats/metricbeat/module/elasticsearch/index" _ "github.com/elastic/beats/metricbeat/module/elasticsearch/index_recovery" _ "github.com/elastic/beats/metricbeat/module/elasticsearch/index_summary" @@ -51,6 +52,7 @@ import ( var metricSets = []string{ "ccr", "cluster_stats", + "enrich", "index", "index_recovery", "index_summary", @@ -81,6 +83,9 @@ func TestFetch(t *testing.T) { err = createCCRStats(host) assert.NoError(t, err) + err = createEnrichStats(host) + assert.NoError(t, err) + for _, metricSet := range metricSets { checkSkip(t, metricSet, version) t.Run(metricSet, func(t *testing.T) { @@ -367,15 +372,98 @@ func checkExists(url string) bool { return false } -func checkSkip(t *testing.T, metricset string, version *common.Version) { - if metricset != "ccr" { - return +func createEnrichStats(host string) error { + err := createEnrichSourceIndex(host) + if err != nil { + return errors.Wrap(err, "error creating enrich source index") } - isCCRStatsAPIAvailable := elastic.IsFeatureAvailable(version, elasticsearch.CCRStatsAPIAvailableVersion) + err = createEnrichPolicy(host) + if err != nil { + return errors.Wrap(err, "error creating enrich policy") + } - if !isCCRStatsAPIAvailable { - t.Skip("elasticsearch CCR stats API is not available until " + elasticsearch.CCRStatsAPIAvailableVersion.String()) + err = executeEnrichPolicy(host) + if err != nil { + return errors.Wrap(err, "error executing enrich policy") + } + + err = createEnrichIngestPipeline(host) + if err != nil { + return errors.Wrap(err, "error creating ingest pipeline with enrich processor") + } + + err = ingestAndEnrichDoc(host) + if err != nil { + return errors.Wrap(err, "error ingesting doc for enrichment") + } + + return nil +} + +func createEnrichSourceIndex(host string) error { + sourceDoc, err := ioutil.ReadFile("enrich/_meta/test/source_doc.json") + if err != nil { + return err + } + + docURL := "/users/_doc/1?refresh=wait_for" + _, _, err = httpPutJSON(host, docURL, sourceDoc) + return err +} + +func createEnrichPolicy(host string) error { + policy, err := ioutil.ReadFile("enrich/_meta/test/policy.json") + if err != nil { + return err + } + + policyURL := "/_enrich/policy/users-policy" + _, _, err = httpPutJSON(host, policyURL, policy) + return err +} + +func executeEnrichPolicy(host string) error { + executeURL := "/_enrich/policy/users-policy/_execute" + _, _, err := httpPostJSON(host, executeURL, nil) + return err +} + +func createEnrichIngestPipeline(host string) error { + pipeline, err := ioutil.ReadFile("enrich/_meta/test/ingest_pipeline.json") + if err != nil { + return err + } + + pipelineURL := "/_ingest/pipeline/user_lookup" + _, _, err = httpPutJSON(host, pipelineURL, pipeline) + return err +} + +func ingestAndEnrichDoc(host string) error { + targetDoc, err := ioutil.ReadFile("enrich/_meta/test/target_doc.json") + if err != nil { + return err + } + + docURL := "/my_index/_doc/my_id?pipeline=user_lookup" + _, _, err = httpPutJSON(host, docURL, targetDoc) + return err +} + +func checkSkip(t *testing.T, metricset string, version *common.Version) { + checkSkipFeature := func(name string, availableVersion *common.Version) { + isAPIAvailable := elastic.IsFeatureAvailable(version, availableVersion) + if !isAPIAvailable { + t.Skipf("elasticsearch %s stats API is not available until %s", name, availableVersion) + } + } + + switch metricset { + case "ccr": + checkSkipFeature("CCR", elasticsearch.CCRStatsAPIAvailableVersion) + case "enrich": + checkSkipFeature("Enrich", elasticsearch.EnrichStatsAPIAvailableVersion) } } @@ -406,7 +494,15 @@ func getElasticsearchVersion(elasticsearchHostPort string) (*common.Version, err } func httpPutJSON(host, path string, body []byte) ([]byte, *http.Response, error) { - req, err := http.NewRequest("PUT", "http://"+host+path, bytes.NewReader(body)) + return httpSendJSON(host, path, "PUT", body) +} + +func httpPostJSON(host, path string, body []byte) ([]byte, *http.Response, error) { + return httpSendJSON(host, path, "POST", body) +} + +func httpSendJSON(host, path, method string, body []byte) ([]byte, *http.Response, error) { + req, err := http.NewRequest(method, "http://"+host+path, bytes.NewReader(body)) if err != nil { return nil, nil, err } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/data.json new file mode 100644 index 00000000..cc285111 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/data.json @@ -0,0 +1,38 @@ +{ + "@timestamp": "2017-10-12T08:05:34.853Z", + "elasticsearch": { + "cluster": { + "id": "et6blfihSoytMUvkpYtEKQ", + "name": "docker-cluster" + }, + "enrich": { + "executed_searches": { + "total": 1 + }, + "queue": { + "size": 0 + }, + "remote_requests": { + "current": 0, + "total": 1 + } + }, + "node": { + "id": "l_XOyQ65Teyn4kW4PUFjVg" + } + }, + "event": { + "dataset": "elasticsearch.enrich", + "duration": 115000, + "module": "elasticsearch" + }, + "metricset": { + "name": "enrich", + "period": 10000 + }, + "service": { + "address": "localhost:32780", + "name": "elasticsearch", + "type": "elasticsearch" + } +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/docs.asciidoc new file mode 100644 index 00000000..ab604a71 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/docs.asciidoc @@ -0,0 +1,3 @@ +This is the `enrich` metricset of the Elasticsearch module. It interrogates the +Enrich Stats API endpoint to fetch information about Enrich coordinator nodes +in the Elasticsearch cluster that are participating in ingest-time enrichment. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/fields.yml new file mode 100644 index 00000000..4b42a113 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/fields.yml @@ -0,0 +1,25 @@ +- name: enrich + type: group + description: > + Enrich stats + release: ga + fields: + - name: queue.size + type: long + description: > + Number of search requests in the queue. + - name: remote_requests + type: group + fields: + - name: current + type: long + description: > + Current number of outstanding remote requests. + - name: total + type: long + description: > + Number of outstanding remote requests executed since node startup. + - name: executed_searches.total + type: long + description: > + Number of search requests that enrich processors have executed since node startup. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/test/empty.750.json b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/test/empty.750.json new file mode 100644 index 00000000..0967ef42 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/test/empty.750.json @@ -0,0 +1 @@ +{} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/test/enrich_stats.750.json b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/test/enrich_stats.750.json new file mode 100644 index 00000000..061b7385 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/test/enrich_stats.750.json @@ -0,0 +1,28 @@ +{ + "executing_policies": [ + { + "name": "my-policy", + "task": { + "id" : 124, + "type" : "direct", + "action" : "cluster:admin/xpack/enrich/execute", + "start_time_in_millis" : 1458585884904, + "running_time_in_nanos" : 47402, + "cancellable" : false, + "parent_task_id" : "oTUltX4IQMOUUVeiohTt8A:123", + "headers" : { + "X-Opaque-Id" : "123456" + } + } + } + ], + "coordinator_stats": [ + { + "node_id": "1sFM8cmSROZYhPxVsiWew", + "queue_size": 0, + "remote_requests_current": 0, + "remote_requests_total": 0, + "executed_searches_total": 0 + } + ] +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/test/ingest_pipeline.json b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/test/ingest_pipeline.json new file mode 100644 index 00000000..19be19b5 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/test/ingest_pipeline.json @@ -0,0 +1,13 @@ +{ + "description" : "Enriching user details to messages", + "processors" : [ + { + "enrich" : { + "policy_name": "users-policy", + "field" : "email", + "target_field": "user", + "max_matches": "1" + } + } + ] +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/test/policy.json b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/test/policy.json new file mode 100644 index 00000000..d2459c35 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/test/policy.json @@ -0,0 +1,7 @@ +{ + "match": { + "indices": "users", + "match_field": "email", + "enrich_fields": ["first_name", "last_name", "city", "zip", "state"] + } +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/test/source_doc.json b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/test/source_doc.json new file mode 100644 index 00000000..11f73ce6 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/test/source_doc.json @@ -0,0 +1,10 @@ +{ + "email": "mardy.brown@asciidocsmith.com", + "first_name": "Mardy", + "last_name": "Brown", + "city": "New Orleans", + "county": "Orleans", + "state": "LA", + "zip": 70116, + "web": "mardy.asciidocsmith.com" +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/test/target_doc.json b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/test/target_doc.json new file mode 100644 index 00000000..45f673d8 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/_meta/test/target_doc.json @@ -0,0 +1,3 @@ +{ + "email": "mardy.brown@asciidocsmith.com" +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/data.go b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/data.go new file mode 100644 index 00000000..cc4fb5a2 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/data.go @@ -0,0 +1,92 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package enrich + +import ( + "encoding/json" + + "github.com/joeshaw/multierror" + "github.com/pkg/errors" + + "github.com/elastic/beats/libbeat/common" + s "github.com/elastic/beats/libbeat/common/schema" + c "github.com/elastic/beats/libbeat/common/schema/mapstriface" + "github.com/elastic/beats/metricbeat/mb" + "github.com/elastic/beats/metricbeat/module/elasticsearch" +) + +var ( + schema = s.Schema{ + "node_id": c.Str("node_id"), + "queue": s.Object{ + "size": c.Int("queue_size"), + }, + "remote_requests": s.Object{ + "current": c.Int("remote_requests_current"), + "total": c.Int("remote_requests_total"), + }, + "executed_searches": s.Object{ + "total": c.Int("executed_searches_total"), + }, + } +) + +type response struct { + ExecutingPolicies []map[string]interface{} `json:"executing_policies"` + CoordinatorStats []map[string]interface{} `json:"coordinator_stats"` +} + +func eventsMapping(r mb.ReporterV2, info elasticsearch.Info, content []byte) error { + var data response + err := json.Unmarshal(content, &data) + if err != nil { + return errors.Wrap(err, "failure parsing Elasticsearch Enrich Stats API response") + } + + var errs multierror.Errors + for _, stat := range data.CoordinatorStats { + + event := mb.Event{} + event.RootFields = common.MapStr{} + event.RootFields.Put("service.name", elasticsearch.ModuleName) + + event.ModuleFields = common.MapStr{} + event.ModuleFields.Put("cluster.name", info.ClusterName) + event.ModuleFields.Put("cluster.id", info.ClusterID) + + fields, err := schema.Apply(stat) + if err != nil { + errs = append(errs, errors.Wrap(err, "failure applying enrich coordinator stats schema")) + continue + } + + nodeID, err := fields.GetValue("node_id") + if err != nil { + errs = append(errs, errors.Wrap(err, "failure retrieving node ID from Elasticsearch Enrich Stats API response")) + } + + event.ModuleFields.Put("node.id", nodeID) + fields.Delete("node_id") + + event.MetricSetFields = fields + + r.Event(event) + } + + return errs.Err() +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/data_test.go b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/data_test.go new file mode 100644 index 00000000..08b7a299 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/data_test.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build !integration + +package enrich + +import ( + "io/ioutil" + "testing" + + "github.com/stretchr/testify/assert" + + mbtest "github.com/elastic/beats/metricbeat/mb/testing" + "github.com/elastic/beats/metricbeat/module/elasticsearch" +) + +var info = elasticsearch.Info{ + ClusterID: "1234", + ClusterName: "helloworld", +} + +func TestMapper(t *testing.T) { + elasticsearch.TestMapperWithInfo(t, "./_meta/test/enrich_stats.*.json", eventsMapping) +} + +func TestEmpty(t *testing.T) { + input, err := ioutil.ReadFile("./_meta/test/empty.750.json") + assert.NoError(t, err) + + reporter := &mbtest.CapturingReporterV2{} + eventsMapping(reporter, info, input) + assert.Equal(t, 0, len(reporter.GetErrors())) + assert.Equal(t, 0, len(reporter.GetEvents())) +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/data_xpack.go b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/data_xpack.go new file mode 100644 index 00000000..6b3029f4 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/data_xpack.go @@ -0,0 +1,76 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package enrich + +import ( + "encoding/json" + "time" + + "github.com/pkg/errors" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/metricbeat/helper/elastic" + "github.com/elastic/beats/metricbeat/mb" + "github.com/elastic/beats/metricbeat/module/elasticsearch" +) + +func eventsMappingXPack(r mb.ReporterV2, m *MetricSet, info elasticsearch.Info, content []byte) error { + var data response + err := json.Unmarshal(content, &data) + if err != nil { + return errors.Wrap(err, "failure parsing Elasticsearch Enrich Stats API response") + } + + now := common.Time(time.Now()) + intervalMS := m.Module().Config().Period / time.Millisecond + index := elastic.MakeXPackMonitoringIndexName(elastic.Elasticsearch) + + indexExecutingPolicies(r, data, info, now, intervalMS, index) + indexCoordinatorStats(r, data, info, now, intervalMS, index) + return nil +} + +func indexExecutingPolicies(r mb.ReporterV2, enrichData response, esInfo elasticsearch.Info, now common.Time, intervalMS time.Duration, indexName string) { + for _, stat := range enrichData.ExecutingPolicies { + event := mb.Event{} + event.RootFields = common.MapStr{ + "cluster_uuid": esInfo.ClusterID, + "timestamp": now, + "interval_ms": intervalMS, + "type": "enrich_executing_policy_stats", + "enrich_executing_policy_stats": stat, + } + event.Index = indexName + r.Event(event) + } +} + +func indexCoordinatorStats(r mb.ReporterV2, enrichData response, esInfo elasticsearch.Info, now common.Time, intervalMS time.Duration, indexName string) { + for _, stat := range enrichData.CoordinatorStats { + event := mb.Event{} + event.RootFields = common.MapStr{ + "cluster_uuid": esInfo.ClusterID, + "timestamp": now, + "interval_ms": intervalMS, + "type": "enrich_coordinator_stats", + "enrich_coordinator_stats": stat, + } + event.Index = indexName + r.Event(event) + } +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/enrich.go b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/enrich.go new file mode 100644 index 00000000..d2acf780 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/enrich/enrich.go @@ -0,0 +1,120 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package enrich + +import ( + "time" + + "github.com/pkg/errors" + + "github.com/elastic/beats/libbeat/common" + "github.com/elastic/beats/metricbeat/helper/elastic" + "github.com/elastic/beats/metricbeat/mb" + "github.com/elastic/beats/metricbeat/module/elasticsearch" +) + +func init() { + mb.Registry.MustAddMetricSet(elasticsearch.ModuleName, "enrich", New, + mb.WithHostParser(elasticsearch.HostParser), + ) +} + +const ( + enrichStatsPath = "/_enrich/_stats" +) + +// MetricSet type defines all fields of the MetricSet +type MetricSet struct { + *elasticsearch.MetricSet + lastLicenseMessageTimestamp time.Time +} + +// New create a new instance of the MetricSet +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + ms, err := elasticsearch.NewMetricSet(base, enrichStatsPath) + if err != nil { + return nil, err + } + return &MetricSet{MetricSet: ms}, nil +} + +// Fetch gathers stats for each enrich coordinator node +func (m *MetricSet) Fetch(r mb.ReporterV2) error { + isMaster, err := elasticsearch.IsMaster(m.HTTP, m.GetServiceURI()) + if err != nil { + return errors.Wrap(err, "error determining if connected Elasticsearch node is master") + } + + // Not master, no event sent + if !isMaster { + m.Logger().Debug("trying to fetch enrich stats from a non-master node") + return nil + } + + info, err := elasticsearch.GetInfo(m.HTTP, m.GetServiceURI()) + if err != nil { + return err + } + + enrichUnavailableMessage, err := m.checkEnrichAvailability(info.Version.Number) + if err != nil { + return errors.Wrap(err, "error determining if Enrich is available") + } + + if enrichUnavailableMessage != "" { + if time.Since(m.lastLicenseMessageTimestamp) > 10*time.Minute { + m.lastLicenseMessageTimestamp = time.Now() + m.Logger().Debug(enrichUnavailableMessage) + } + return nil + } + + content, err := m.HTTP.FetchContent() + if err != nil { + return err + } + + if m.XPack { + err = eventsMappingXPack(r, m, *info, content) + if err != nil { + // Since this is an x-pack code path, we log the error but don't + // return it. Otherwise it would get reported into `metricbeat-*` + // indices. + m.Logger().Error(err) + return nil + } + } else { + return eventsMapping(r, *info, content) + } + + return nil +} + +func (m *MetricSet) checkEnrichAvailability(currentElasticsearchVersion *common.Version) (message string, err error) { + isAvailable := elastic.IsFeatureAvailable(currentElasticsearchVersion, elasticsearch.EnrichStatsAPIAvailableVersion) + + if !isAvailable { + metricsetName := m.FullyQualifiedName() + message = "the " + metricsetName + " is only supported with Elasticsearch >= " + + elasticsearch.EnrichStatsAPIAvailableVersion.String() + ". " + + "You are currently running Elasticsearch " + currentElasticsearchVersion.String() + "." + return + } + + return "", nil +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/fields.go b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/fields.go index 90b49186..c107604b 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/fields.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/fields.go @@ -32,5 +32,5 @@ func init() { // AssetElasticsearch returns asset data. // This is the base64 encoded gzipped contents of ../metricbeat/module/elasticsearch. func AssetElasticsearch() string { - return "eJzsXN1u2zYUvvdTHOSqBRI9gC92061dBjQr1nTAMAwqLR7bTPijkJQT7+kHUpIjS5QlW3LsYvZdLec73/nlOSTVG3jE9RSQE2NZYpDoZDkBsMxynMLVL9XvryYAFE2iWWqZklP4aQIAsPUbEIpmHCcAGjkSg1NYkAmAQWuZXJgp/H1lDL+6hqultenVP+7ZUmkbJ0rO2WIKc8KN+/s5Q07N1Iu4AUkENmm6j12nTohWWVp8E+C4DVeFTHhmLOrI/WvzsER9xPWz0rTyfRA7/2zbocD1UqJJq1hGjyGU0R0ijSUWRxbsMcNipaLDpd0pinD7cwv6cN95/NxVDbMluoFdjbYO5A9aGXNTOkZjyllC3A+9zUzlt9sJU37qcVulxpFQ1FuP2hi2QVXhmKT40njabs8e2pefOyIQ1Lxg3CKp5CHIS2zwKZaqlQxXcnEYk8/khYlMgMGnDGWCIDMxQ+3IqRR17holwS6xZGuWpKZ3yXSuOFfPP5YLSs4dTvBKR7lxjuCGu43VnaG9MHhmdslyy+/mtnGUiZ81sxblURm+ist5IYV3ZSIjfQ9MWuVZb0yb6zPXSuyOo6pSlgmMDZMJxq60xhoJjYQ5gmb3TOA1MAnCXIOXuM3eiYc52mSJDSVa6S+4mhEeJ0tMHlPFpD0C8U9eBrzKgBXhGbp03Tb9zrXPDCrnRSEfXrwdQlZ376487zBQlVlm4N1CI8prWKMzzDVopO+jIBG3hIZ5hCpYBwu3kBrPgfn2IOpVADduUlkgcHaETWfQ3CtLeKXGe2Vd7JcR0cJEEPd0VCqvBSUHv0HOFmzGsTcpSiw5EiUH3cGjskaxZLyIuc3hzjhmCoV7OMgXn2bJbrdNDzZf/VrSbp3upiJsoRHWkZqdcu13mOmVUqqZIJo1ougYtHJZ6256m67O2dMlRCRQKL2OZmsbYDokxD57YMiMW2GVrohsLF31VmjvJasOsP9SVZuvYNhCdev41IatqjTr/DdaR01VYqK3ygCqkkygtD7KNn1sexp4chQ5Wgx19aPTyyXtTdNYpTEy7F9sSYUOqnOlBbFTaPvj3qo4CuXM4Dk7BTzqDvK48Lq+WRDkxDZiu4ntrDJvZdpX/qJSm2brfD4rqAbMHa5XkcZErVCvT1u4WD2rWm3ZYad8ES6V8htdIXkOfrxCWRPqkMJiixUuKHmmFEdSH5A7JN/rDIHVls6wbGPJYkSd/yi19biBMKvKtkQv0EYtXj5I/r2H9J1wu5dzsUtl6uVkJMEOGQilGo2Bd4nKOIUZwu2XzZdK+x85Pi1TXUFy3KW7SnJ7AQ/Hhsp0gqP656uH3O2fQuy4/qkKHsM/Bclx/VMlGd7NzquzyUStYJygOLfNAJdebxR6l17v0ut18t+v14PLlHbJ3Evm/oCZu9nW5tGDmg1Z9wU/xkR2ULfzTbKnDEFweFCz9m7QEjtii/WbmuWQYWmUWBL7GDZRqlWCxiCN3QynaRyK7UMn0i8leL51j6tmGIc4MbkinNGYEouj8rlfVo/Sc4WNP9IFZHaJGggIZgyTC0cI8yhxjTLJ/22XxELeS0tlXT+dEm2QBmaMRli7hndIUNf+fv+wXqE2TNWn7AFh5i+kFKhhrz6sRO/ltyum//wMt3Ku+h28dGndpXkPQiWpoAGqDIqivESSRkwye7Ly/CuSFByDrYrsdOhe+6pKCPJyWh0EeTlcBank6V1xp+TNCO4odTmlRzaq9PfK63Tt14dIcJU8Eh5u1Q/aHLydl+DgsJH6qyS50YKVefitC4cSD79yMfLJta/RrHJ8XUc+q6kHX5ixfvUt54rznXjOaE740SaEcvjqGONhxMHxoMHQC+2oYQ8r4YwapUrx0bLWlc5ieHK4ByWu4u1pEb7m0ctKv3PapNb0Wzs/2L5F2+pO6HIp9AjJulItP/FXbttCsEo4RfJ4Joy/IHnsSzk+H0N72qKftV03cSa0v+WNzc4atVZZkMjgnPvLAV+y7hwYX7Lu3LLOZHrFVqr9BYABife1wL7k3jkwvuTeqXOvtQNeJFGiOMfEKj1aF/zpA2xAw1nXowcuee3aAxzYDr9KgEVyaGFomxWhj9N7EN0UoIMjq0PIVhd0ZKvnDdH/2+7BXJybwL0ZGJKEHxlHMGtjUUAYuisJ/Sn8SXYcNlbReJrz5pIAWRHGyYy/LYv6i2UpSsrkIrbEPE7qovfY6fweAvwOiZKWMGmAQPEA3IMqUjVJD9kaNahtrHTbC8X7nwjeekhoQlYugCnNbDidDjkRDcBtX7cLShpw0y6Cj0oDvhCRcqdQZm8ESVNWo771iieT8VOGGTbf7Tz46JUJv5PmYRsRWn/rdO+QLF5m9cEzKMaOdk3ZLpkBZvy+Yo8ry8GXmsc59/ZMdt+WHvMywr3fSCUW+8jWyFVCrCsqof81YQQq/t5p5X1uYkqhxYvE0eS/AAAA//89GKel" + return "eJzsXN1u47oRvvdTDHK1B0j0AL7ozfacbQpsuuhmCxRFoaWlsc2EPwpJOXafviAp2bJMWbIkx14c+y6W9c03v5yhqDzAK26mgIxoQxONRCXLCYChhuEU7n6vfn83AUhRJ4pmhkoxhb9MAAD2fgNcpjnDCYBChkTjFBZkAqDRGCoWegr/udOa3d3D3dKY7O6/9tpSKhMnUszpYgpzwrS9f06RpXrqRDyAIBwPadqP2WRWiJJ5VnwT4LgPV4VMWK4Nqsj+tb1Yor7i5l2qtPJ9ENt/9u1Q4Dop0aRRLE3PIZSmR0RqQwyOLNhhhsUKmQ6X9iRThMe/NqAP953D9646MFuiDrCr0daC/FlJrR9KxyjMGE2I/aGzma78dj9hyk89bqvUGJIU1d6lJoZNUFU4KlJcH1xttmcH7cvPE+EIcl4wbpBU8uBkHWt8i4VsJMOkWPRj8pWsKc85aHzLUSQIIuczVJaczFB510gBZoklW70kNb1LpnPJmHz/tVxQcm5xglM68sY5gxuetla3hnbC4J2aJfWWP85t6ygdvytqDIqzMtyJ87wwhU9lImP6G1BhpGO9Na3XZ64kPx5HVaUM5RhrKhKMbWmNFZI04voMmj1TjvdABXB9D07iPnsrHuZokiUeKNFIf8HkjLA4WWLymkkqzBmIf3EyYCcDVoTlaNN13/RH1z49qJwXhXx48bYIed29x/K8xUBVZrmGTwuFKO5hg9Yw96Aw/S0KErFLaJhHqIK1sLALqXYcqGsPok4FcOsmmQcC50jYtAbNszSEVWq8U9bGfhkRDUw4sVdHpbIrKB78ARld0BnDzqRSYsiZKFnoFh6VNYom40XMo4e74pgpFO7gIFd8Dkt2s206sPnu1pJm67Q3FWELjbCO1OzktT9iph2lTFFOFD2IonPQ8rI27fS2XZ21p02IiCOXahPNNibAdEiIfXXAkGu7wkpVEXmwdKFQdDvsQp8163eHMMKS9ZZjjpGm/8Ng8gdM0bZa7GLHz5HKduXaOC/ZpsNLDJJRyKXBuLxjtBY8yZXCs+TLZ49cnThyow0RKRWLQp+tBZpzx9j4Pm/D20wLcI1JbjAtGke7ZNjIUibPwn4qb4i9i1FHIQXGjyCzJKZIHsiUTFBrqTQsyQq7KdE0k52cf3WA0xOvtsEBwzrFR8unttsBLQHWP59Smejoo5agVCY5R7ErIM72zbnkyKXI0GBorB6dnpd0Mk1tpPKFt2EtaqE6l4oTM4WmmzurYimUQ7vjbBVwqEfI48Lp+mFB4IltxbYTO7rMf5Rpd/x5pTmYbfwGSUE1YO5wvYoUJnKFanPZwkXrWdW3zvsuuFTK7TSH5Fn48QplTahFCostWsyg5JmUDEl9h6pF8rPKEWitdw3L1oYsRtT5n6W2DjcQZlXZhqgFmqjBy73kPztIvyQ3etmLXUpdLycjCbbIQNJUodbwKZE5S2GG8Pht+6VU7keWT8O2SkFy3KW7SnJ/AQ/HhsxVgqP657uDPO6fQuy4/qkKHsM/Bclx/VMlGX6c5KuzznmtYFygODcN4bdebxR6t17v1uu18j+t14PblHbL3Fvm/oKZu32uxKIXORuy7nN2jomsV7fzQ9C3HIEzeJGz5m7QEDNii/V3OfOQYWkpMSR2MayjYtsP09jOcCqNQ7HddyL9VoL7Z2e4OgzjECcqVoTRNE6JwVH5PC+rZ1m8wtqdqQCkZokKCHCqNRULSwh9lNhGmfi/3Yap76WFNLafzojSmAZmjIOwtg3vkKCu3X96WK9QaSrrU/aAMHMnwgrUsFdfVrzz8tsW0//6Co9iLrs9+WzTuk3zDoRKUkEDVBkURXmJJIuooOZi5flvSDKwDPYqstWhfe2rKsHJ+rI6cLLur4KQ4vKueJLiYQR3lLpc0iNbVbp7ZTddu/Uh4kwmr4SFW/Vem4OP8xIcLDam7iyXN1qwMg8/9mRR4uEPkEc+OuJqNK2cH6kjX9XUg2uqjVt9y7nieieeK5oTfrUJoRy+WsZ4GHFw7DUYOqEtNexlxa1Ro0xKNlrW2tJZDE8Wt1fiStacFuFzVp2s9A+WHlI79FszP9g/xt7oTmhzKXQIybpSDT9xZ96bQrBKOEPyeiWMvyF57Uo5vh5DO9q8m7VtN3EltH/4xuZojdrIPEhkcM792wLfsu4aGN+y7tqyTudqRVey+Q2cAYn3vcC+5d41ML7l3qVzr7EDXiRRIhnDxEg1Whf85TNsQcNZ16EHLnkd2wMc2A7vJMAi6VsYmmZF6OL0DkS3Bah3ZLUI2euCzmx13xD9ue0ezMW5DpybgSFJ+AdlCHqjDXIIQ7cloXsKf5Edh61VFF7meXNJgKwIZWTGPpZF/c3ODN37CrEh+nVSF33CTufPEOBPSKQwhAoNBIoLYC9UkapJ2mdrVKMysVRNb/Sf/kTw0UHCIWTlAJhU1ITTqc8T0QDc/nG7oKQBJ+0i+EMqwDXhGbMK5eaBkyyjNep771hTEfvXig6qVu9Hr5S7nTQHexCh9de+Tw7J4m1yFzyDYuxsx5TNkmqg2u0rdjiyHPyvAuM893ZMjp+WHvMwwrPbSCUGu8hWyGRCjC0qoX9bMgIVd+608g8ViC6FFm/yR5P/BwAA//+sJJat" } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/ml_job/data_xpack.go b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/ml_job/data_xpack.go index a68d9568..79e0703d 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/ml_job/data_xpack.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/ml_job/data_xpack.go @@ -49,13 +49,22 @@ func eventsMappingXPack(r mb.ReporterV2, m *MetricSet, info elasticsearch.Info, } var errs multierror.Errors - for _, job := range jobsArr { - job, ok = job.(map[string]interface{}) + for _, j := range jobsArr { + job, ok := j.(map[string]interface{}) if !ok { errs = append(errs, fmt.Errorf("job is not a map")) continue } + if err := elastic.FixTimestampField(job, "data_counts.earliest_record_timestamp"); err != nil { + errs = append(errs, err) + continue + } + if err := elastic.FixTimestampField(job, "data_counts.latest_record_timestamp"); err != nil { + errs = append(errs, err) + continue + } + event := mb.Event{} event.RootFields = common.MapStr{ "cluster_uuid": info.ClusterID, diff --git a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/test_elasticsearch.py b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/test_elasticsearch.py index af875ece..e8296440 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/test_elasticsearch.py +++ b/vendor/github.com/elastic/beats/metricbeat/module/elasticsearch/test_elasticsearch.py @@ -39,10 +39,14 @@ class Test(metricbeat.BaseTest): self.ccr_unfollow_index() self.es.indices.delete(index='test_index,pied_piper,rats', ignore_unavailable=True) self.delete_ml_job() + self.delete_enrich_ingest_pipeline() + self.delete_enrich_policy() + self.es.indices.delete(index='users,my_index', ignore_unavailable=True) super(Test, self).tearDown() @parameterized.expand([ "ccr", + "enrich", "index", "index_summary", "ml_job", @@ -62,6 +66,8 @@ class Test(metricbeat.BaseTest): self.create_ml_job() if metricset == "ccr": self.create_ccr_stats() + if metricset == "enrich": + self.create_enrich_stats() self.check_metricset("elasticsearch", metricset, self.get_hosts(), self.FIELDS + ["service"], extras={"index_recovery.active_only": "false"}) @@ -71,16 +77,19 @@ class Test(metricbeat.BaseTest): """ elasticsearch-xpack module tests """ - es = Elasticsearch(self.get_hosts()) self.create_ml_job() - self.create_ccr_stats() + if self.is_ccr_available(): + self.create_ccr_stats() + if self.is_enrich_available(): + self.create_enrich_stats() self.render_config_template(modules=[{ "name": "elasticsearch", "metricsets": [ "ccr", "cluster_stats", + "enrich", "index", "index_recovery", "index_summary", @@ -100,6 +109,51 @@ class Test(metricbeat.BaseTest): proc.check_kill_and_wait() self.assert_no_logged_warnings() + @unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test") + def test_xpack_cluster_stats(self): + """ + elasticsearch-xpack module test for type:cluster_stats + """ + + self.start_basic() + + self.render_config_template(modules=[{ + "name": "elasticsearch", + "metricsets": [ + "ccr", + "cluster_stats", + "enrich", + "index", + "index_recovery", + "index_summary", + "ml_job", + "node_stats", + "shard" + ], + "hosts": self.get_hosts(), + "period": "1s", + "extras": { + "xpack.enabled": "true" + } + }]) + proc = self.start_beat() + self.wait_log_contains('"type": "cluster_stats"') + + # self.wait_until(lambda: self.output_has_message('"type":"cluster_stats"')) + proc.check_kill_and_wait() + self.assert_no_logged_warnings() + + docs = self.read_output_json() + for doc in docs: + t = doc["type"] + if t != "cluster_stats": + continue + license = doc["license"] + issue_date = license["issue_date_in_millis"] + self.assertIsNot(type(issue_date), float) + + self.assertNotIn("expiry_date_in_millis", license) + def create_ml_job(self): # Check if an ml job already exists response = self.ml_es.get_jobs() @@ -167,6 +221,72 @@ class Test(metricbeat.BaseTest): self.es.indices.close('rats') self.es.transport.perform_request('POST', '/rats/_ccr/unfollow') + def create_enrich_stats(self): + self.create_enrich_source_index() + self.create_enrich_policy() + self.execute_enrich_policy() + self.create_enrich_ingest_pipeline() + self.ingest_and_enrich_doc() + + def create_enrich_source_index(self): + file = os.path.join(self.beat_path, 'module', 'elasticsearch', 'enrich', + '_meta', 'test', 'source_doc.json') + + source_doc = {} + with open(file, 'r') as f: + source_doc = json.load(f) + + self.es.index(index='users', id='1', doc_type='_doc', body=source_doc, refresh='wait_for') + + def create_enrich_policy(self): + file = os.path.join(self.beat_path, 'module', 'elasticsearch', 'enrich', + '_meta', 'test', 'policy.json') + + policy = {} + with open(file, 'r') as f: + policy = json.load(f) + + policy_url = '/_enrich/policy/users-policy' + self.es.transport.perform_request(method='PUT', url=policy_url, body=policy) + + def execute_enrich_policy(self): + execute_url = '/_enrich/policy/users-policy/_execute' + self.es.transport.perform_request('POST', execute_url) + + def create_enrich_ingest_pipeline(self): + file = os.path.join(self.beat_path, 'module', 'elasticsearch', 'enrich', + '_meta', 'test', 'ingest_pipeline.json') + + pipeline = {} + with open(file, 'r') as f: + pipeline = json.load(f) + + self.es.ingest.put_pipeline(id='user_lookup', body=pipeline) + + def ingest_and_enrich_doc(self): + file = os.path.join(self.beat_path, 'module', 'elasticsearch', 'enrich', + '_meta', 'test', 'target_doc.json') + + target_doc = {} + with open(file, 'r') as f: + target_doc = json.load(f) + + self.es.index(index='my_index', id='my_id', doc_type='_doc', body=target_doc, pipeline='user_lookup') + + def delete_enrich_policy(self): + exists = self.es.indices.exists('my_index') + if not exists: + return + + self.es.transport.perform_request('DELETE', '/_enrich/policy/users-policy') + + def delete_enrich_ingest_pipeline(self): + exists = self.es.indices.exists('my_index') + if not exists: + return + + self.es.ingest.delete_pipeline(id='user_lookup') + def start_trial(self): # Check if trial is already enabled response = self.es.transport.perform_request('GET', self.license_url) @@ -180,16 +300,39 @@ class Test(metricbeat.BaseTest): e = sys.exc_info()[0] print "Trial already enabled. Error: {}".format(e) - def check_skip(self, metricset): - if metricset != "ccr": + def start_basic(self): + # Check if basic license is already enabled + response = self.es.transport.perform_request('GET', self.license_url) + if response["license"]["type"] == "basic": return - es_version = self.get_version() - if es_version["major"] <= 6 and es_version["minor"] < 5: - # Skip CCR metricset system test for Elasticsearch versions < 6.5.0 as CCR Stats - # API endpoint is not available + try: + self.es.transport.perform_request('POST', self.license_url + "/start_basic?acknowledge=true") + except: + e = sys.exc_info()[0] + print "Basic license already enabled. Error: {}".format(e) + + def check_skip(self, metricset): + if metricset == 'ccr' and not self.is_ccr_available(): raise SkipTest("elasticsearch/ccr metricset system test only valid with Elasticsearch versions >= 6.5.0") + if metricset == 'enrich' and not self.is_enrich_available(): + raise SkipTest("elasticsearch/enrich metricset system test only valid with Elasticsearch versions >= 7.5.0") + + def is_ccr_available(self): + es_version = self.get_version() + major = es_version["major"] + minor = es_version["minor"] + + return major > 6 or (major == 6 and minor >= 5) + + def is_enrich_available(self): + es_version = self.get_version() + major = es_version["major"] + minor = es_version["minor"] + + return major > 7 or (major == 7 and minor >= 5) + def get_version(self): es_info = self.es.info() return semver.parse(es_info["version"]["number"]) diff --git a/vendor/github.com/elastic/beats/metricbeat/module/envoyproxy/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/envoyproxy/_meta/Dockerfile index 1de4e17c..3fe40454 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/envoyproxy/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/envoyproxy/_meta/Dockerfile @@ -1,4 +1,5 @@ -FROM envoyproxy/envoy:v1.7.0 +ARG ENVOYPROXY_VERSION +FROM envoyproxy/envoy:${ENVOYPROXY_VERSION} RUN apt-get update COPY ./envoy.json /etc/envoy.json EXPOSE 10000 9901 diff --git a/vendor/github.com/elastic/beats/metricbeat/module/etcd/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/etcd/_meta/Dockerfile index cf7e1876..ea136ead 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/etcd/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/etcd/_meta/Dockerfile @@ -1,4 +1,4 @@ -ARG ETCD_VERSION=v3.3.10 -FROM quay.io/coreos/etcd:$ETCD_VERSION +ARG ETCD_VERSION +FROM quay.io/coreos/etcd:v$ETCD_VERSION HEALTHCHECK --interval=1s --retries=90 CMD wget -O - http://localhost:2379/health | grep true CMD ["/usr/local/bin/etcd", "--advertise-client-urls", "http://0.0.0.0:2379,http://0.0.0.0:4001", "--listen-client-urls", "http://0.0.0.0:2379,http://0.0.0.0:4001"] diff --git a/vendor/github.com/elastic/beats/metricbeat/module/etcd/test_etcd.py b/vendor/github.com/elastic/beats/metricbeat/module/etcd/test_etcd.py index 33c5420e..f5630e08 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/etcd/test_etcd.py +++ b/vendor/github.com/elastic/beats/metricbeat/module/etcd/test_etcd.py @@ -27,4 +27,4 @@ class Test(metricbeat.BaseTest): class Test_3_2(Test): - COMPOSE_SERVICES = ['etcd_3_2'] + COMPOSE_ENV = {'ETCD_VERSION': '3.2.25'} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/Dockerfile index 995a9a94..a63a3b0e 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/Dockerfile @@ -1,4 +1,5 @@ -FROM haproxy:1.8 +ARG HAPROXY_VERSION +FROM haproxy:${HAPROXY_VERSION} RUN apt-get update && apt-get install -y netcat HEALTHCHECK --interval=1s --retries=90 CMD nc -z localhost 14567 COPY ./haproxy.conf /usr/local/etc/haproxy/haproxy.cfg diff --git a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/Dockerfile.1.6 b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/Dockerfile.1.6 deleted file mode 100644 index 911edb33..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/Dockerfile.1.6 +++ /dev/null @@ -1,5 +0,0 @@ -FROM haproxy:1.6 -RUN apt-get update && apt-get install -y netcat -HEALTHCHECK --interval=1s --retries=90 CMD nc -z localhost 14567 -COPY ./haproxy.conf /usr/local/etc/haproxy/haproxy.cfg -EXPOSE 14567 diff --git a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/Dockerfile.1.7 b/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/Dockerfile.1.7 deleted file mode 100644 index 29867269..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/haproxy/_meta/Dockerfile.1.7 +++ /dev/null @@ -1,5 +0,0 @@ -FROM haproxy:1.7 -RUN apt-get update && apt-get install -y netcat -HEALTHCHECK --interval=1s --retries=90 CMD nc -z localhost 14567 -COPY ./haproxy.conf /usr/local/etc/haproxy/haproxy.cfg -EXPOSE 14567 diff --git a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/_meta/Dockerfile index 9eae5455..d79cf464 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/_meta/Dockerfile @@ -3,7 +3,7 @@ FROM java:8-jdk-alpine ENV TOMCAT_VERSION 7.0.86 ENV TC apache-tomcat-${TOMCAT_VERSION} -ENV JOLOKIA_VERSION 1.5.0 +ARG JOLOKIA_VERSION RUN apk update && \ apk add curl openssl ca-certificates bash @@ -11,14 +11,18 @@ RUN apk update && \ HEALTHCHECK --interval=1s --retries=90 CMD curl -f localhost:8778/jolokia/ EXPOSE 8778 +COPY jolokia-${JOLOKIA_VERSION}.sum jolokia.sum + # Prepare a server where jolokia runs in proxy mode RUN wget http://archive.apache.org/dist/tomcat/tomcat-7/v${TOMCAT_VERSION}/bin/${TC}.tar.gz && \ tar xzf ${TC}.tar.gz -C /usr && \ rm ${TC}.tar.gz && \ - sed -i -e 's/Connector port="8080"/Connector port="8778"/g' /usr/${TC}/conf/server.xml && \ - curl -J -L -s -f -o - https://github.com/kadwanev/retry/releases/download/1.0.1/retry-1.0.1.tar.gz | tar xfz - -C /usr/local/bin && \ - retry --min 1 --max 180 -- curl -J -L -s -f --show-error -o /usr/${TC}/webapps/jolokia.war \ - "https://oss.sonatype.org/content/repositories/releases/org/jolokia/jolokia-war/${JOLOKIA_VERSION}/jolokia-war-${JOLOKIA_VERSION}.war" + sed -i -e 's/Connector port="8080"/Connector port="8778"/g' /usr/${TC}/conf/server.xml +RUN curl -J -L -s -f -o - https://github.com/kadwanev/retry/releases/download/1.0.1/retry-1.0.1.tar.gz | tar xfz - -C /usr/local/bin +RUN retry --min 1 --max 180 -- curl -J -L -s -f --show-error -O \ + "https://repo1.maven.org/maven2/org/jolokia/jolokia-war/${JOLOKIA_VERSION}/jolokia-war-${JOLOKIA_VERSION}.war" && \ + sha256sum -c jolokia.sum && \ + mv jolokia-war-${JOLOKIA_VERSION}.war /usr/${TC}/webapps/jolokia.war && rm jolokia.sum # JMX setting to request authentication with remote connection RUN echo "monitorRole QED" >> /usr/lib/jvm/java-1.8-openjdk/jre/lib/management/jmxremote.password && \ diff --git a/vendor/github.com/elastic/beats/metricbeat/module/jolokia/_meta/jolokia-1.5.0.sum b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/_meta/jolokia-1.5.0.sum new file mode 100644 index 00000000..b163700f --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/jolokia/_meta/jolokia-1.5.0.sum @@ -0,0 +1 @@ +7212d3f5637dea0863356454dd37b7ae4fc45b6d7e0e929b42312d215ce6f2b8 jolokia-war-1.5.0.war diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/Dockerfile index 6ecbe8da..ebfdbf29 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/kafka/_meta/Dockerfile @@ -1,6 +1,6 @@ FROM debian:stretch -ARG KAFKA_VERSION=2.1.1 +ARG KAFKA_VERSION ENV KAFKA_HOME /kafka diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kibana/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/kibana/_meta/Dockerfile index 31d4b757..850f34e6 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kibana/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/kibana/_meta/Dockerfile @@ -1,3 +1,4 @@ -FROM docker.elastic.co/kibana/kibana:7.3.0 +ARG KIBANA_VERSION +FROM docker.elastic.co/kibana/kibana:${KIBANA_VERSION} HEALTHCHECK --interval=1s --retries=300 --start-period=60s CMD python -c 'import urllib, json; response = urllib.urlopen("http://myelastic:changeme@localhost:5601/api/status"); data = json.loads(response.read()); exit(1) if data["status"]["overall"]["state"] != "green" else exit(0);' diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kibana/stats/stats.go b/vendor/github.com/elastic/beats/metricbeat/module/kibana/stats/stats.go index 139fa00a..d84c1dfc 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kibana/stats/stats.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kibana/stats/stats.go @@ -67,60 +67,8 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, err } - statsHTTP, err := helper.NewHTTP(base) - if err != nil { - return nil, err - } - - kibanaVersion, err := kibana.GetVersion(statsHTTP, statsPath) - if err != nil { - return nil, err - } - - isStatsAPIAvailable := kibana.IsStatsAPIAvailable(kibanaVersion) - if err != nil { - return nil, err - } - - if !isStatsAPIAvailable { - const errorMsg = "The %v metricset is only supported with Kibana >= %v. You are currently running Kibana %v" - return nil, fmt.Errorf(errorMsg, base.FullyQualifiedName(), kibana.StatsAPIAvailableVersion, kibanaVersion) - } - - if ms.XPackEnabled { - // Use legacy API response so we can passthru usage as-is - statsHTTP.SetURI(statsHTTP.GetURI() + "&legacy=true") - } - - var settingsHTTP *helper.HTTP - if ms.XPackEnabled { - isSettingsAPIAvailable := kibana.IsSettingsAPIAvailable(kibanaVersion) - if err != nil { - return nil, err - } - - if !isSettingsAPIAvailable { - const errorMsg = "The %v metricset with X-Pack enabled is only supported with Kibana >= %v. You are currently running Kibana %v" - return nil, fmt.Errorf(errorMsg, ms.FullyQualifiedName(), kibana.SettingsAPIAvailableVersion, kibanaVersion) - } - - settingsHTTP, err = helper.NewHTTP(base) - if err != nil { - return nil, err - } - - // HACK! We need to do this because there might be a basepath involved, so we - // only search/replace the actual API paths - settingsURI := strings.Replace(statsHTTP.GetURI(), statsPath, settingsPath, 1) - settingsHTTP.SetURI(settingsURI) - } - return &MetricSet{ - ms, - statsHTTP, - settingsHTTP, - time.Time{}, - kibana.IsUsageExcludable(kibanaVersion), + MetricSet: ms, }, nil } @@ -128,9 +76,18 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // It returns the event which is then forward to the output. In case of an error, a // descriptive error must be returned. func (m *MetricSet) Fetch(r mb.ReporterV2) error { + err := m.init() + if err != nil { + if m.XPackEnabled { + m.Logger().Error(err) + return nil + } + return err + } + now := time.Now() - err := m.fetchStats(r, now) + err = m.fetchStats(r, now) if err != nil { if m.XPackEnabled { m.Logger().Error(err) @@ -146,6 +103,53 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { return nil } +func (m *MetricSet) init() error { + statsHTTP, err := helper.NewHTTP(m.BaseMetricSet) + if err != nil { + return err + } + + kibanaVersion, err := kibana.GetVersion(statsHTTP, statsPath) + if err != nil { + return err + } + + isStatsAPIAvailable := kibana.IsStatsAPIAvailable(kibanaVersion) + if !isStatsAPIAvailable { + const errorMsg = "the %v metricset is only supported with Kibana >= %v. You are currently running Kibana %v" + return fmt.Errorf(errorMsg, m.FullyQualifiedName(), kibana.StatsAPIAvailableVersion, kibanaVersion) + } + if m.XPackEnabled { + // Use legacy API response so we can passthru usage as-is + statsHTTP.SetURI(statsHTTP.GetURI() + "&legacy=true") + } + + var settingsHTTP *helper.HTTP + if m.XPackEnabled { + isSettingsAPIAvailable := kibana.IsSettingsAPIAvailable(kibanaVersion) + if !isSettingsAPIAvailable { + const errorMsg = "the %v metricset with X-Pack enabled is only supported with Kibana >= %v. You are currently running Kibana %v" + return fmt.Errorf(errorMsg, m.FullyQualifiedName(), kibana.SettingsAPIAvailableVersion, kibanaVersion) + } + + settingsHTTP, err = helper.NewHTTP(m.BaseMetricSet) + if err != nil { + return err + } + + // HACK! We need to do this because there might be a basepath involved, so we + // only search/replace the actual API paths + settingsURI := strings.Replace(statsHTTP.GetURI(), statsPath, settingsPath, 1) + settingsHTTP.SetURI(settingsURI) + } + + m.statsHTTP = statsHTTP + m.settingsHTTP = settingsHTTP + m.isUsageExcludable = kibana.IsUsageExcludable(kibanaVersion) + + return nil +} + func (m *MetricSet) fetchStats(r mb.ReporterV2, now time.Time) error { var content []byte diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/config.reference.yml b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/config.reference.yml index 1a267f8d..c2dc2ddd 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/config.reference.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/config.reference.yml @@ -19,6 +19,8 @@ add_metadata: true # When used outside the cluster: #host: node_name + # If kube_config is not set, KUBECONFIG environment variable will be checked + # and if not present it will fall back to InCluster #kube_config: ~/.kube/config # State metrics from kube-state-metrics service: @@ -32,6 +34,7 @@ - state_pod - state_container - state_cronjob + - state_resourcequota period: 10s hosts: ["kube-state-metrics:8080"] @@ -39,6 +42,8 @@ add_metadata: true # When used outside the cluster: #host: node_name + # If kube_config is not set, KUBECONFIG environment variable will be checked + # and if not present it will fall back to InCluster #kube_config: ~/.kube/config # Kubernetes events diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/config.yml index 4f90ff89..f3ee956d 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/config.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/config.yml @@ -20,6 +20,8 @@ #annotations.dedot: true # When used outside the cluster: #host: node_name + # If kube_config is not set, KUBECONFIG environment variable will be checked + # and if not present it will fall back to InCluster #kube_config: ~/.kube/config # State metrics from kube-state-metrics service: @@ -32,6 +34,7 @@ # - state_pod # - state_container # - state_cronjob +# - state_resourcequota # period: 10s # hosts: ["kube-state-metrics:8080"] # add_metadata: true diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/test/docs/01_playground/playground-ubuntu.yaml b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/test/docs/01_playground/playground-ubuntu.yaml new file mode 100644 index 00000000..254c2080 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/test/docs/01_playground/playground-ubuntu.yaml @@ -0,0 +1,79 @@ + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: metricbeat-kube + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metricbeat +rules: +- apiGroups: + - "" + resources: + - nodes/stats + - nodes/metrics + - nodes/log + - nodes/spec + - nodes/proxy + verbs: + - get +- apiGroups: [""] + resources: ["pods", "nodes"] + verbs: ["get", "watch", "list"] +- apiGroups: ["extensions", "apps"] + resources: ["deployments", "replicasets", "statefulsets"] + verbs: ["get", "list", "watch"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: metricbeat-kube +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: metricbeat +subjects: +- kind: ServiceAccount + name: metricbeat-kube + namespace: default +--- + +apiVersion: v1 +kind: Pod +metadata: + name: playground +spec: + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + serviceAccountName: metricbeat-kube + containers: + - name: ubuntu + image: ubuntu:latest + command: [ "sleep" ] + args: [ "infinity" ] + volumeMounts: + - name: data + mountPath: /usr/share/filebeat/data + - name: varlibdockercontainers + mountPath: /var/lib/docker/containers + readOnly: true + - name: varlog + mountPath: /var/log + volumes: + - name: varlibdockercontainers + hostPath: + path: /var/lib/docker/containers + - name: varlog + hostPath: + path: /var/log + - name: data + hostPath: + path: /var/lib/filebeat-data + type: DirectoryOrCreate + diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/test/docs/02_objects/cronjob.yaml b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/test/docs/02_objects/cronjob.yaml new file mode 100644 index 00000000..eba346cf --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/test/docs/02_objects/cronjob.yaml @@ -0,0 +1,22 @@ +# Cronjob that will execute each minute. +# It will print a message and sleep (reporting being active) for 5 seconds + +apiVersion: batch/v1beta1 +kind: CronJob +metadata: + name: mycronjob +spec: + schedule: "*/1 * * * *" + jobTemplate: + spec: + template: + spec: + containers: + - name: mycron-container + image: alpine + imagePullPolicy: IfNotPresent + command: ['sh', '-c', 'echo elastic world ; sleep 5'] + + restartPolicy: OnFailure + terminationGracePeriodSeconds: 0 + concurrencyPolicy: Allow \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/test/docs/02_objects/resourcequota.yaml b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/test/docs/02_objects/resourcequota.yaml new file mode 100644 index 00000000..a16f0f46 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/test/docs/02_objects/resourcequota.yaml @@ -0,0 +1,97 @@ +# ResourceQuota tests + +# This example will create a namespace and apply some resource restrictions on it, then +# some of those restrictions will be challenged + +apiVersion: v1 +kind: Namespace +metadata: + name: rqtest + +--- + +apiVersion: v1 +kind: ResourceQuota +metadata: + namespace: rqtest + name: resources +spec: + hard: + requests.cpu: 1 + requests.memory: 1Gi + limits.cpu: 2 + limits.memory: 2Gi + +--- + +apiVersion: v1 +kind: ResourceQuota +metadata: + namespace: rqtest + name: objects +spec: + hard: + pods: 3 + configmaps: 1 + persistentvolumeclaims: 0 + replicationcontrollers: 1 + secrets: 1 + services: 2 + services.loadbalancers: 1 + +--- + +# See presistent volume claims above. They are not allowed (count 0), which means +# that this request will fail +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + namespace: rqtest + name: willfail +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi + +--- + +# ResourceQuota/objects above allows only for one Service element type LoadBalancer +# this call should succeed, but subsequent creations will fail. +# +# It doesn't matter if the LoadBalancer is really created, what counts is the API +# object creation. You can try this at your local kubernetes environment. + +apiVersion: v1 +kind: Service +metadata: + name: willsucceed + namespace: rqtest +spec: + selector: + app: something + ports: + - protocol: TCP + port: 80 + targetPort: 8080 + type: LoadBalancer + +--- + +# As a continuation of the above case, this Service type LoadBalancer will exceed +# assigned quota for the namespace and will fail + +apiVersion: v1 +kind: Service +metadata: + name: willfail + namespace: rqtest +spec: + selector: + app: something + ports: + - protocol: TCP + port: 80 + targetPort: 8080 + type: LoadBalancer diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/test/docs/README.md b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/test/docs/README.md new file mode 100644 index 00000000..f2a13a07 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/test/docs/README.md @@ -0,0 +1,139 @@ +# Testing on OSX + +A previous document regarding testing metricbeat at OSX existed, and have been moved to [./darwin.md](darwin.md) + +# Testing on Linux + +## Create Elasticsearch + Kibana instances + +You can rely on your EK tuple of choice as long as it is addresable from the kubernetes cluster. + +To boot a docker based EK this should suffice, be sure to replace image tags according to version: + +```bash +# Run Elasticsearch +docker run --name es -d -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" docker.elastic.co/elasticsearch/elasticsearch:7.3.0 + +# Run Kibana +docker run --name kibana -d --link es:elasticsearch -p 5601:5601 \ + docker.elastic.co/kibana/kibana:7.3.0 +``` + +## Prepare assets + +Generate binary and other assets for the beats branch you want to test, then copy them to a folder layed out to run: + +- create folder `/home/myuser/playground/metricbeat` +- copy to that folder `metricbeat` binary and `metricbeat.yml` +- recursive copy `modules.d` from source repo to destination folder +- recursive copy `_meta/kibana.generated/{version}/dashboard/` to `kibana/{version}/dashboard/` + +Configure `metricbeat.yml` and modules, do not use `localhost` to point to elasticsearch and kibana but the public ip of the host (one that will be routable from minikube) + + +## Create minikube cluster + +Follow instructions https://kubernetes.io/docs/tasks/tools/install-minikube/ and start the minikube cluster. + +Usually we should be ok with the kubernetes version that minikube creates, but you can force it by using `--kubernetes-version` flag. + +``` +minikube start --kubernetes-version v1.15.0 +``` + +## Playground Pod + +A playground Pod hosts the ubuntu container metricbeat will be running. A working playground is provided under [./01_playground](./01_playground) subfolder. + +This file contains: + +- a service account. +- a cluster role, if you are consuming kubernetes API resources, make sure that the APIGroup/Version, Resource and verb are listed here. +- a cluster role binding that links the service account to the service role +- an Ubuntu Pod: + - uses `hostNetwork`, so it can reach ports at the host instance (for instance, the kubelet) + - executes `sleep infinity`, so that it never exists, but does nothing + - in order to be useful for filebeat, it mounts `/var/log/`, `/var/lib/docker/containers` and `/var/lib/filebeat-data` + +At the time of writing this the Pod has been only used for 2 tests from the same person (hello), there is a lot of room for improvement. + +To deploy the pod _as is_ you need to: + +``` +kubectl apply -f https://raw.githubusercontent.com/elastic/beats/master/metricbeat/module/kubernetes/_meta/test/docs/01_playground/playground-ubuntu.yaml +``` + +## Test + + +Binary and assets needed for the test that we prepared above need to be copied to the playground pod. Use `kubectl` to copy the directory, further iterations might only need to copy the changing assets. + +Replace source folder and Pod namespace/name + +``` +kubectl cp --no-preserve /home/myuser/playground/metricbeat playground:/metricbeat +``` + +Now you can exec into the container and launch metricbeat + +``` + kubectl exec -ti playground /bin/bash + + cd /metricbeat + ./metricbeat -c metricbeat.yml -e + + ``` + +### Test Iterations + +When copying new assets to an already used playground Pod, you will most probably run into an issue: +``` +tar: metricbeat/kibana/7/dashboard/Metricbeat-aerospike-overview.json: Cannot open: Not a directory +tar: metricbeat/kibana/7/dashboard/Metricbeat-apache-overview.json: Cannot open: Not a directory +tar: metricbeat/kibana/7/dashboard/Metricbeat-ceph-overview.json: Cannot open: Not a directory +tar: metricbeat/kibana/7/dashboard/Metricbeat-consul-overview.json: Cannot open: Not a directory +``` + +I haven't looked much into this, there seems to be something going on when kubernetes untars the bundled directory. As a workaround, delete the metricbeat directory at the Pod before copying a new set of assets. + +# Testing kubernetes loads + +## Kube-state-metrics + +Kube-state-metrics needs to be deployed for all the `state_` prefix metricsets at kubernetes. Yamls are to be found at the [upstream project](https://github.com/kubernetes/kube-state-metrics/tree/master/kubernetes) + +Installing kube-state-metrics can be done either installing the yamls one by one from their remote location or cloning and installing the folder contents. Be sure to checkout the target release version before installing. + +``` +git clone git@github.com:kubernetes/kube-state-metrics.git +cd kube-state-metrics/ + +git checkout -b release-1.7 origin/release-1.7 +kubectl apply -f kubernetes/ +``` + + +## Core components test + +Testing core components (kubelet, apiserver, controller manager, scheduler) requires a diverse range of objects to be created. Using [Sonobuoy](https://github.com/heptio/sonobuoy) is the fastest path for testing,getting metrics and filling dashboards. + +Refer to the documentation at Sonobuoy, at the time of this writing installing and running can be achieved with a couple commands + +``` +go get -u -v github.com/heptio/sonobuoy +sonobuoy run --wait +``` + +## Regular kubernetes components + +You can find at [./02_objects](./02_objects) example kubernetes objects used during development and testing. +For now only a CronJob example is added, add your kubernetes object of choice under that folder if you consider it will be useful for other people when developing, testing and troubleshooting. + +# Going further + +- All improvements are welcome. +- Different ways to test are welcome and can live here side by side. +- Using kind seems to be a lot more lightweight. +- Probably some steps above can be tackled using [telepresence](https://www.telepresence.io/). +- Probably Sonobuoy can be replaced with kubernetes e2e tests. + diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/test/README.md b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/test/docs/darwin.md similarity index 100% rename from vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/test/README.md rename to vendor/github.com/elastic/beats/metricbeat/module/kubernetes/_meta/test/docs/darwin.md diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/apiserver/metricset.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/apiserver/metricset.go index ca55ab93..81a4c5e4 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/apiserver/metricset.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/apiserver/metricset.go @@ -18,6 +18,8 @@ package apiserver import ( + "github.com/pkg/errors" + "github.com/elastic/beats/metricbeat/helper/prometheus" "github.com/elastic/beats/metricbeat/mb" ) @@ -29,7 +31,7 @@ type metricset struct { prometheusMappings *prometheus.MetricsMapping } -var _ mb.ReportingMetricSetV2 = (*metricset)(nil) +var _ mb.ReportingMetricSetV2Error = (*metricset)(nil) // getMetricsetFactory as required by` mb.Registry.MustAddMetricSet` func getMetricsetFactory(prometheusMappings *prometheus.MetricsMapping) mb.MetricSetFactory { @@ -47,11 +49,10 @@ func getMetricsetFactory(prometheusMappings *prometheus.MetricsMapping) mb.Metri } // Fetch as expected by `mb.EventFetcher` -func (m *metricset) Fetch(reporter mb.ReporterV2) { +func (m *metricset) Fetch(reporter mb.ReporterV2) error { events, err := m.prometheusClient.GetProcessedMetrics(m.prometheusMappings) if err != nil { - reporter.Error(err) - return + return errors.Wrap(err, "error getting metrics") } rcPost14 := false @@ -88,4 +89,6 @@ func (m *metricset) Fetch(reporter mb.ReporterV2) { Namespace: m.prometheusMappings.Namespace, }) } + + return nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/container/container.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/container/container.go index ef9bb35d..acbf4371 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/container/container.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/container/container.go @@ -78,31 +78,30 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // Fetch methods implements the data gathering and data conversion to the right // format. It publishes the event which is then forwarded to the output. In case // of an error set the Error field of mb.Event or simply call report.Error(). -func (m *MetricSet) Fetch(reporter mb.ReporterV2) { +func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { m.enricher.Start() body, err := m.http.FetchContent() if err != nil { - err = errors.Wrap(err, "error doing HTTP request to fetch 'container' Metricset data") - logger.Error(err) - reporter.Error(err) - return + return errors.Wrap(err, "error doing HTTP request to fetch 'container' Metricset data") + } events, err := eventMapping(body, util.PerfMetrics) if err != nil { - logger.Error(err) - reporter.Error(err) - return + return errors.Wrap(err, "error in mapping") } m.enricher.Enrich(events) for _, e := range events { - reporter.Event(mb.TransformMapStrToEvent("kubernetes", e, nil)) + isOpen := reporter.Event(mb.TransformMapStrToEvent("kubernetes", e, nil)) + if !isOpen { + return nil + } } - return + return nil } // Close stops this metricset diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/event/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/event/_meta/fields.yml index 214e4fb4..603445f3 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/event/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/event/_meta/fields.yml @@ -21,7 +21,7 @@ description: > Timestamp of last occurrence of event - name: message - type: keyword + type: text description: > Message recorded for the given event copy_to: message diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/fields.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/fields.go index 563ceae7..1115f17f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/fields.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/fields.go @@ -32,5 +32,5 @@ func init() { // AssetKubernetes returns asset data. // This is the base64 encoded gzipped contents of ../metricbeat/module/kubernetes. func AssetKubernetes() string { - return "eJzsXU1v4zjSvudXEH3KvMj48GKxhxwWmMnMYIPu6QnyMX1YLAxaKtucSKSapJL2/PoFqU9LJEVZtJNOpEOjY9lVD6uKxWKRLP6IHmF3iR7zFXAKEsQZQpLIBC7Rh4/1hx/OEIpBRJxkkjB6if51hhBCzRdQCpKTSP2aQwJYwCXa4DOEBEhJ6EZcov98ECL5cIE+bKXMPvxXvdsyLpcRo2uyuURrnAg4Q2hNIInFpWbwI6I4hQ489chdpjhwlmflJwZ46rmma8ZTrD5GmMZISCyJkCQSiK1RxmKBUkzxBmK02rX4LEoKbTRtRDgjAvgT8PqNCZQDWEd+P91co4JgS5TVsy/S6ulCa8Pj8DUHIRdRQoDKva9UOB9h98x43HnnQKueK00PwTeIcqXXipFwouAgWM4jCIfjtqAMMTLS7gIQ+eqYGGzkezAiloUHgDRZdB4luZDALzRTkeEILmrp/ODE9QR8FQ7Wv+/vb1CPZM8yWRxQFJpnj2SfJ5VA5VIxCq+GEoNmgXosulhivlvynIaD8QXkFjiSW6h4oFyAQDHfoS6jLphHQrvcJiD5SGisvGtJfUAlacZoWB9VkURbTONEeamWUJxour57IhLl1DVJtGaVZjzcxBNwQVhA0ygJ1ij6zexC0JLbG9wmQqg6iYlwl3kKcssC2qPumAaivUYzEdAM6xZ3qVZsM84iEMLI0WSIpvG+TS/K8oWAqPe+ohmzfJV0/V6vIVc3D0hAxGjcRdZwSiFlfKeGdRIDlYvVronM+nwTRjeGl0VcdolsP95D9bP6EiIUVTxLDEMQnwiXOU5OibBkOQRwHYsFy4AuIpb3vN8gtD3Wn/N0BVx5XEUQrUkC9RcYt6tRSMwlxAGM5q4wGCQIjUC7mNK4Kx7GDqAmAsGsvx5Xc66j/UUuFhnwCKgkCSz+z9pCtvoLIpMCihfLMXKo+nwFAqUk4qzsTqiBY9eJqRkiTyfqx40rytM8wZI8ATKxckGbbrwVNE1Jj1AV/UEggvwNRc8OqekxoBWCUWptQXZpNYRD2sM4UsUtmMfQsCLvwCAyRgW8qHoLCGP02wd9fAW3UXpruA80hIpLKGZS/aA/vE1VDTOONEUaZOHib+VtGWqrxAfCAhmyLJ0mhwvyAkYLxtxNm1mCJdBod4glm7QlKoIXykQVguJvUgRO7TFpEFI4E6ox0fGCWeXRI8iTDjkla7QlQrINxykqQNjB+oYSY1BUNAtN+irvOJFDg4W2A+HiQz8wL6DHBrW/JqOcc+XHpsvumq4TstlKD1NndMNzSgndBJ2qNP4z0oOW+jUqGbmzyiCjeFHIPYgnb5L+pTYFwlJzMbLHeUzkAp5sihjLXtNDmp65vQVDDgoaxAF5ViS7zJuxhkpM6LQ1jpZ0a3pBljj0zHIpSWpO5cZYdl8MJGzuFEHUI9hKr3iP4kMZypsHlAu8AYMgbM1uQ9G/tfZDEyAX1b1GMm4iPEx8iEGbicEpd9lYfEn1DMi3/VzVRqekfsU4lKKnmFoHrD20mDIlFhvoQcCeYAujgHiAYQ2LxbDIjGNSg0pEOIF4uU4Ytn2xmnKUs5wQbVDSxQLhiqb6m611WkgyiRONHeEkYRGWeJWA+p2zsQlJifz+WhvDmlCIC/h19r1xg+fqE6tEEFmjnOrfQmxewEvYxj9/PNCqT2yjwvA1G+mM8BMmCTYnoaY7JNtMGPn0vKHpNPLXtZZO3VQU4QxHRO5U6GumXnvU8ptvXzqFJftLRjm7ty8V7dL9hUKUJ7CvVEwb283ROwo4iN1rG2j6ibU5rYUQDu6QIxQqxcgHkMUuwwPSpmEAtL+GFSx19D4cddcCB5bhjhdKvy6BFGKwNveVx5W/t9CPDC0t+kevPrr0afOEALM0CHuM2ZYQ721TQG+qj9ze3bl7SAX4mfFHQjcC7GmwtyCPL0UzkQDpJ5cMb2CN88SQSByTHjQjavJWig2y8KlHTfwX4yfCo3lZUdW9hzG5DrjP5z3MKG4Zk3oni9gJCenoycX7CHbMUmqH3+99DmaWUBl5v9xc7ARzjAfD7KKd2ecsSYAXhx8mZfivamLlUQpnfn8F0jfD/yKbUE+5L/3UG11PvMFV/RuO3Wecgt8+6r8ZDcj3mq45FpLnkcw59InP23mL5szbeeftvPN2Xo9mzNt5zUDm7bzeGOftvPN23nk77/TtvIYoc+wG32fGH7/mkJsjzkOGPgUaVMBZbLqbPpx/KgjWu+vKwdwVS+R0TSgR2yDhxENNzIc1juMQNvyl0osiOGDIMWRyG5SnpjjYfSQnQfprw7e9h1lTN0/MWAyLSE3ZI8nM8+tDDBeeSKQjiZAxsF64qCi7DHYLOJHbEDvDG+Y1VWROBR1jV76bU4HHsljlz+5mbynJ3sjaJwGOgS+IWKZYSEtOZsVYArgb6A0dW98259a1rolAHR5nXTR6v+pZl/2IlNX9FtrFN4r9r1XWCtQ4pPtG/UZusUSYA9oABY5lUS2k2i1c+tU9DoSqia0S7sdu7RI0Yrur3cAsunZK+6oYXhUXxCFiPBaF3GvjkySF4rMMc0miPMG8EALaYoFYpLegxwaE+pcSp5kBZd+ZuNJ+a8KFXJasqKVix/jtvfcVQNVOzQM1PNRnXatqH/c4OiDFYgBPkwsRvbU4e37LCeL3glRpDBA3FQLIE1CDRCKW7ZaSmUA0wxoWndnegehuNSVfcLUhditvHMj9fpfV6+xujilIHOO9tLbd8gf0UVBCWAgWEe1ononcOnXi6kvmXjl+kK/9EAfczf8gVx/wWK7Y6weaAWHULfmj5phLzm6eur5OWMaaJCIUPW9JtC297jMWzaBjRFOlwpfBy4b8WZYNaQvEnXnPScDVjAdKvuaAdH6YrImKEVgLiCE/UGdCIVkvE0IfA4K5/YQ4ZByEQlOWlLE5BEKfWPIE8dKA8Vh+oeJpkovLQ+CMhLecn26u66IzpfU41BW2+pDi/VhWIBpgHNZ50JbzcDA9Xn+tKI8QfdgO+3D9ywDv9gx0SgDfOlWmJw3zgbL5QJnlCX2g7LOyt+/7LNm8wdz0zBvMO0+4DebzPuIO4HkfsRn4vI/YsY+YglR2E8xf829v2vhuIQLypFO1Nlp1Qplz05KUJ2ZfPN9sfOpszdtWyD3HVKREytejk3ujTupM9Lxpv3g8pfnbvF9/pIDmrfrN0xPOe9il31prtpwG7oI6xTHuBtXrOMDd4LEd4q5jmpxaMziH+G2SqgjwSAfy7WPCMIMhJsizhyPfFIlPT0fjUinXqY54x48ayHPkQO9ZjB5jCxrj7N6hCM0jUD1Z3TtmMyWHnbH4u0xhzzPS4plnpM3zPSnku5uRvos1o1eyStKD9Roro4ypuPeuquypIbUuhCK6lVD8yusFXh+bl4I6sF9lj5prDYXrZgcXHHof6cC97mJvcmfRcPnWVw0LsTz31g7t04Y3vqxcCKQ+K68kog8JDoglwxtYHm31sgDlvZK6PAUa+zpqq0rDt92UWXvr2Iim5XFr62DJk/pwj6EuycF7522lTpo8chxkn7ypxElrZ3y3FskULj1yteC6JUimSm2fnqvEx5gTLsPFPZzHKj0Le4wr6+HoeG4PdkhBj1HlPAIjcxby8Czj4YA0oYSHTwEPf8MYU7zDWrrjMKseXbTDedLfp2BHkHIdY4t1hELkPOM/vkyHr3F6l+g4tECHv1b9wQ6UbhhZmCOMa/EvyTG6IMfhujQU4zi4FEdYRfoV4RhbgiOUKr2Lb4wvvTFaRCYyfkU3DrIbU3A4VGHjkOPHnrU16uFwRyOvQcnJ9DFfQRGol+H6jkbGjPfA0JYnIDxHhmHx3+1odKPg3CqyndvU2Lr+YOhePDu6aeZhxedxw5odk/WWtZB+xgp96Jq1zppmxvWXU0I3wdT+uSCNWrRH3aTnCXFi7OoEOcIABlCexBrcjbGbRC9vIKItxHkyrVJqK3dQ05sTB+8gcdA7aHogm6EaqK3YJE+CNOyutFOEpYQ0k33SFc/aHwRkq7qrie6ckJkTMkOQ5oTMnJAZiWhOyMwJmTkhMydk5oSMEYOzCGDB31QC0AlhTPm/3mysW3TvsEES/h9OPzH9lcZIMgQ0bjXGPCx5wp6SmBiBxtEBu4im9QgzJldPzFi8yDioaYpCoGuGplNh3LAYNURRSdSBoJwoheBbkXK2upZ4qaBTBnh3BmMZHkl6iKfFdCYQXgNGD8fEpKnNSs+6jF/3xfoHl4Tqiae5e52Ya9cJiWUe7pB1tsXCvmPQ3IBuI1w7kevmaEbovCz8eoGeMZH6PxJ4Sih2X6YIOLafAzcX0fVE2SDUTMzy3YuY1AzUvh+LUAmbXrXfA8AUfAYLYvcqh7bBTNLfl0JD6LxGdaUrTSqlXXEstp8Yy37G0SNbry/Qr5zrE2E3eZJcoPq/5fu+atXDeK195YHOr1iaJSAhvmgkcYUpZfI2p5oF4xfojz9+/0iSBOIfyuYvjB1lzLmPwfryeguy7bxDQde283iU2q9uHnT9L1GwdOi9CmpPAqlkBzEyM9yXk+tsyMCmxYxDpFzBJfrn4h8hkNdYPAXqwj4Mb+qWTJvUT1qTrFDi8e+LGhJBucm72Dw/WNOgUuDL427UVu3ft52GjTijf7FVqJCmoBboJsHe+ot/UIOuSiQ9Gt2lwakMjHRaIWNZG9zcN3z4NCRQxhLSoVQfvYhU2DzhUpUmq1CQUrMi0VxO3TOTVuQpliIXGdC4dxTdFRztcW8nFCojImrWaqLb2K4ua21I9DumIfuz1YxFWyR6qf4KwjMWxuLZtZ/CQi4rCwiGQwldV5avYPCcmjsIfDsSe0V5kH0MOE4ItXMesrlfSgI1a7yWwOsupZFETN/JwFUYuMYkaWnC5z/uP+2TvRiyhO3SiTdWtFxjQzDIdC/DhsoP3t2tP358NCItuJgmJM2YlyUkwv6zwYNwVFwQoWs2MpaIQRDuKMI0abb0S4Ox2WNTcmxQn4sMoinH5kJhbIqLWPTWOvZKTwerxcsDWBYbLzMIDqrg0wfUPgcbyDmErIYdMjXjTnpMmtrrgsztfAc6lzyHC7TGiQA1L8/pI2XP1N5vclqOFE4jnZSa0Sj3+LicYcj5fusQ7vGm2HVV7PaRX/f8uirzNABqQn3VClNdUOp0pbBbMn+pSdxn2wnsodlnrZgXRV6idRcDay28HEV3+iz7sUyzrRs1RxpWyFHh6KP83ZJ0ZgFPGaFC1boi5juNeh/7h4kK2fWNkdmWCbk8DkdF2sZ25CA8jnE5WB5W/eeISy0dmOVay2211nIDNCZ0s1gsDl1iCYluWtxRzU/tMWhIrDU3E96LPtruzAxCzWBLgmVph+mu4IhTxzZU+xw2xN0YE2ofbPcvfi3nihlwdFv8cWeoGOI7q30pXO4+HA6V6r9jsbGVrvpwLKGVF77pa5RKTmi10/nEBpxe9ecs6Z77QHtZxRW4vEsoKa7zJNlV3Aal2R7dYJ0n4RxLRTGYZzHfd+iXkzTc+KkTwuUFh/XVjOgcMhZtf9BJyruyBV3rO4Gr2xNercODvN2R+0frEFrVPfZsziZE9AJur5fCcwGswDUO4Nh6brkaQoupW5/rC6u7VnIL7OtQc6VcD2DNqWYhIQ3l74pixa0CbEGcnmF7Ghqz0mgoNlovKNuXUefr97qPHZCL6l4j39z1e/PNewZyc2XRdoj0xqsBzpfM7T/zJXN+eIaLIz6xJE9DrUQWxIIEJFO2b/Wl8mcBzBqIzLd+lY9n/5tv/RoroPnWr+Z5l7d+PXje9XWCq7V+s1yo1YVyimvHiiCvBPO/AAAA//94OWY9" + return "eJzsXUtz4ziSvtevQNTJveHRYWNjDz5sxIx7JtZR3TVeP6YPGxtqiExJaFMACwDtUv/6DYAviARAUIRkl00eHNYr80NmAkgkEom/oCfYX6GnYgWcggTxCSFJZAZX6POX5s3PnxBKQSSc5JIweoX+6xNCCLVfQDuQnCTq1xwywAKu0AZ/QkiAlIRuxBX6389CZJ8v0eetlPnn/1OfbRmXy4TRNdlcoTXOBHxCaE0gS8WVZvAXRPEOOvDUI/e54sBZkVfvWOCp54auGd9h9TbCNEVCYkmEJIlAbI1ylgq0wxRvIEWrvcFnUVEw0ZiIcE4E8GfgzSc2UB5gHfn99fYGlQQNUdbPoUjrpwvNhMfhWwFCLpKMAJUHX6lxPsH+hfG085kHrXquNT0E3yEplF5rRsKLgoNgBU8gHo67kjKkyEq7C0AUq1NicJHvwUhYHh8A0mTRRZIVQgK/1ExFjhO4bKTzkxfXM/BVPFj//fBwi3oke5bJ0oii0Dx7JPs8qQQql4pRfDVUGDQL1GPRxZLy/ZIXNB6M30BugSO5hZoHKgQIlPI96jLqgnkitMttApIvhKZqdK2oD6hklzMad4yqSaItpmmmRilDKF403bF7IhI1qGuSaM1qzQQME8/ABWERTaMi2KDoN7MLQUvuYHKbCKHuJDbCXeY7kFsW0R51x7QQ7TWaiYhm2LS4S7Vmm3OWgBBWjjZDtM33Jr0kLxYCkt7nNc2UFausO+71GnJ9+4gEJIymXWQtpx3sGN+raZ2kQOVitW89sz7fjNGN5cPSL7tCrh8foPqb+hIiFNU8KwxDEJ8JlwXOzomwYjkEcJ2KBcuBLhJW9Ea/QWgHrL8WuxVwNeIqgmhNMmi+wLhbjUJiLiGNYDT3pcEgQWgCeoipjLvmYe0AaiEQzfqbebXg2ttfFGKRA0+ASpLB4t+cLWSrPyCxKaD8YDlGDnWfr0GgHUk4q7oTauG4dWJrhih2E/Xjx5UUuyLDkjwDsrHyQZtuvDU0TUnPUDX9QSCC/Allz46p6TGgFYJRajUg+7QaY0A6wDhSxQbMU2hYkfdgEDmjAl5VvSWEMfrtgz69gk2UwRruA42h4gqKnVTf6Y9vU3XDrDNNGQZZ+Pg7eTum2jrwgbBAlihLp8nxnLyI3oI1dmMyy7AEmuyPsWSbtkRN8FKZqEJQvial42TOSYOQ4plQg4mOF8yqSJ5AnnXKqVijLRGSbTjeoRKEG2yoKzEGRU2z1GSo8k7jObRYqOkIl2+GgXkFPbaowzWZFJyrcWy67G7oOiObrQwwdUY3vKCU0E3UpUo7fiZ60lK/RhUjf1QZZJIuSrlHGcnboH+lTYGw1Fys7HGRErmAZ5cixrLX9JCmZ29vyZCDggZpRJ41yS7zdq6hEhM6bY/DkG5DL8oWh15ZLiXZ2UO5KZbdDwYCNveKIOoRNMIrwbP4UITy9hEVAm/AIghXs00o+rfOfmgD5KN60EjGbYSHiQ8xMJlYBuUuG8dYUj8D8jWf68bolNSvGYdK9BRT54R1gBZTpsTiAj0IOBBsaRSQDjBsYLEUFrl1TmpRiQRnkC7XGcOuL9ZLjmqVE6MNSrpYIFzTVK/ZWoeFJJM409gRzjKWYIlXGajfeRubkR2RP15rU1gTCmkJv4m+t8PghXrHKRFE1qig+reQ2jfwMrYJjx8PtOoXtlFu+JqNHIzwMyYZtgehpg9IrpUwCul5Q8tpFK5rLZ2mqSjBOU6I3CvX1069GVGrb75/6ZSWHC4ZNdi9f6noIT1cKESNBO6dimlzu917RxEnsQdtA20/cTbH2Ajh4Hc5YqFSjEIAOewyPiBtGhZAh3tY0UJHH2Og7lrgwDbc6VzptyWQUgzO5r5xv/JXA/1I19Khf/TmvcuQNk9wMCuDcPuYpoR4L00Bvas+cnd/7+8hNeAXxp8I3Qhwh8Hegzx+K5uJBMgwueR4A2tcZJZA4pjwoB1RG7dSbJCDTzNr4j8YPxMezcuJquk9jMl1xDyfj7CiuGNM6kwWsRcSdqMXFx/D2bFLyXS/P/oazC6hyvN+vbXYGdYYj5bVhRnZ5yzLgJeHHyZF+K8bYtVRCm98fwUyNML/Kkmo58xLP3ei65kTXNXfeOy+4h2E5VH/yWhEvjd0zbGQvEhkwaFPfE7nLZszp/PO6bxzOm9AM+Z0XjuQOZ03GOOczjun887pvNPTeS1e5tgE3xfGn74VUNg9zmOmPgUalMNZJt1Nn85/KQk22XXVZO7zJQq6JpSIbRR34rEhFsIap2kMG/6t1osiOGDIKeRyG5WnpjjYfSQnUfpry9fMYdbU7QszlsIiUUv2RDL7+voYw4VnkmhPIqYPrDcuaso+g90CzuQ2RmZ4y7yhiuyhoFNk5fs5lXgcm1Xh7G4PtpLcjWzGJMAp8AURyx0W0hGTWTGWAe46ekPH1rftuXWtayJQh8enLhqdr/qpy35EyOphC2bxjTL/tY5agZqHdN9oPpFbLBHmgDZAgWNZVgups4WrcfWAA6FqYauE+6VbuwSNSHd1G5hD115pX5fTq+KCOCSMp6KUe2N8kuygfC/HXJKkyDAvhYC2WCCW6BT01IJQ/1LiXW5B2R9MfGG/NeFCLitW1FGxY3x670MNULVT80AtD/Ve16rM4x4nB6RYDOBpYyGitxdXYpDwXYZbw68lncoSIG3LA5BnoBZxJCzfLyWzIWjnNCw6Sz136M2L7k5TCgXXWGG37MaR3B/2ebPJ7ue4A4lTfBDTdpv9gD5KSggLwRKiR5kXIrdenfg6kr1Ljp/hm0GIA+4Gf5CvAwTsVRx0As2AMOqX/EkDzBVnP09dXCcuY00SEYpetiTZVkPuCxbtjGNFU8fBl9FrhvyrqhliCsQfdi9IxK2MR0q+FYB0cJisiXIQmAHEEhxowqCQrZcZoU8Rwdz9gjjkHIRCU9WTcQ0IhD6z7BnSpQXjqcaFmqdNLr4RAuckvuX89famqThTWY9HXXFLDyneT1X5oQHGcQcPagweHqan66815RGij9thH29+HuBtLj+neO/GkTK9YphPk82nyRxP7NNkX5W9/dgHyebsctszZ5d3nnjZ5XMScQfwnERsBz4nEXuSiClIZTfRxmv+/V0b3x0kQJ51nNZFq4kmc27bjwrEHIrnu4tPE6153wp54JiKHZHy7ejkwaqTJgw9Z+yXT6A0/zEn648U0Jyn3z494XyEFH1jo9lxFLgL6hxnuFtUb+P0dovHdYK78WkK6ozgHDNuk53yAE90Gt89JwwzGGKCAns4Cg2RhPR0NC6UcrPTHu/4WQMFzhzoI4sxYG5BYwa7DyhC+wzULFYPzthMiWHnLP0hQ9jzirR85hVp+/xICvnhVqQfYs/ojeyS9GC9xbIoY8rtfagSe2pKbaqgiG4ZlLDaepH3x+atoA7sN9mj5kJD8brZ0dWGPkY48KC7uJvc2TRcvvddw1IsL729Q/ey4Z1vK5cCaQ7KK4noE4IDYsnxBpYn270sQQXvpC7Pgca9j2qUaPi+n7JqN86MaFoBV7YO1jtpTvZYipIcnTvvqnPSxpHTKHnytvomRmZ8txDJFC49co3guvVHpkrtkJ6vvseY4y3DlT28ZyoDq3qMq+nh6Xj+EeyYah6janlERuat4hFYw8MDaUL9jpDqHeGGMaZyh7Nux3FWPbpih/eYf0i1jii1OsZW6oiFyHvAf3yNjlDjDK7PcWx1jnCthoMdqNswsipHnKElvB7H6Gocx+vSUonj6DoccRUZVoFjbP2NWKoMrrwxvu7GaBHZyIRV3DjKbmzO4VB5jWPOHgcW1mimwz1NgiYlL9OnYgWlo16563uaWCPeA1NbkYEInBmGxX+/p8mtgnOnyHauUmPr5o2hS/Hc6KaZhxNfwPVqbkzOK9ZijjNO6EN3rHX2NHOuv7wjdBNN7V9L0sigPeoavUCIE31XL8gRBjCA8izW4G+M2yR6cQORbCEtsmllUo3YQUNvDhx8gMBB76DpkWyGCqAavkmRRWnYfWWnCEsJu1z2Sdc8m/EgIlvVXW1054DMHJAZgjQHZOaAzEhEc0BmDsjMAZk5IDMHZKwYvBUAS/62+n9eCGNq//VWY92Ke8dNkvDvcP6F6d9piiRDQFOjMfZpKRD2lMDECDSeDthFNK1H2DH5emLO0kXOQS1TFAJdMHQ3FcYtS1FLFFVEPQiqhVIMvjUpb6sbiVcKOqeDd28xluGZpId4mk9nAxE0YfRwTAyauqz0U5fx275V/+iSUD3xtBevE3vtOiGxLOIdss63WLgzBu0N6DbCl4ncNEczQhdV1ddL9IKJ1P9I4DtCsf8mRcCp+xy4vYJuIMoWoWZil++Bx6RWoO58LEIlbHqlfo8AU/IZrIbdqxxqgpmkv99KDaGLBtW1rjSplHbNsdj+wlj+N5w8sfX6Ev2dc30i7LbIskvU/Ft93letehhvtK9GoItrtsszkJBetpK4xpQyeVdQzYLxS/TPf/76hWQZpD9VzV9YO8qYcx+DxeV1CrLrvENJ15V5PErt17ePuv6XKFl69F47tWeBVLGDFNkZHsrJdzZkIGkx55CooeAK/efiP2Igb7AECtSHfRje1JRMl9TPWpOsVOLpL4saEkGV5F0mzw/WNKgV+Pq4W7XV+fuu07AJZ/QPtorl0pTUIl0j2Nt/CXdq0HWFpEejuzU4lYGVjuEyVoXB7X0jhE9LAuUsIx1KzdGLRLnNE25UaaMKJSm1KhLtzdQ9MzE8T7EUhciBpr2j6D7n6IC7GVCojYioVauNbmu7uqy1JdDvWYYcrlZzlmyR6IX6awgvWFiLZzfjFBZyWVtANBxK6LqsfA2DF9TeQeD7idgryoPsU8BpRqib85DN/VwRaFjjtQTedCmNJGH6Qgau3MA1JpmhiZB//C/di70U8oztdxOvqzCGxpZglOVeji2VH4K7W3/++GJFWnKxLUjaOS/PSILDV4NH4ai5IELXbKQvkYIg3FOEadJq6ecWY5tjU3FsUV+IHJIpx+ZiYWyLizj0Zhx7peeDZfAKAJan1ssMooMq+fQBmedgIw0OMathxwzN+IMek5b2uiCzGe9AF5IXcInWOBOg1uUFfaLshbr7TUGrmcJrpJNCMxrlAR/fYBhzvW8cwj3dErupim0e+fWvr+syTwOgJtRXrTE1BaXOVwrbkPlrLeK+uk5gD60+G8W8KvIKrb8YmLHxchLd6bPspzJNUzdqjTSskJPC0Uf5uyXp7AKeMkPFqnVF7Hca9d4OdxMVsptbK7MtE3J5Go6KtIvtyEl4HONqsjyu+s8Jt1o6MKu9lrt6r+UWaEroZrFYHLvFEhPdNL+jXp+6fdCYWBtuNryXfbTdlRnEWsFWBKvSDtOHghMuHU2o7jVsjLsxJtQ+2B7e+lqtFXPg6K58cW+pGBK6qn0tXP4+HA+V6r9jsbGVrvpwKqFVF77pa5QqTmi11/HEFpze9ecs6577QAdRxRX4RpdYUlwXWbavuQ1K09h+1gcYvhXsIJI/bWgxaEbaPThddPiuQvs/Gu1QjLgrpzEISg6EKu8cUnSxxTzVU5SA9CffkZI4myiHDXVupcju3Z0jWJgtLPuO+ukl+l019XfV1t9VY393zCCWhh/RPk1Oi7I0QJznGQGBZBtr7JBxvez/YzqFsC6yePNxTTHahGy/JjQslG+5JVfvo1T3gjY3mqILUL2qNN77qgXdQfsMHsKB8Jqh7ygn4cTTinF2s55VDoZqlxDRK3gLvci3D2ANrp03T61nY4Y2Rpy3pe5GyQbYt6HmWrkBwNpiAELCLtZ4V9b4NuoWRhn0LFmdaMwkYqnR2+RhuLMP5lsru48bkI/qQSPf3a2V84WVFnJzQV7TRXrnRTTnuxkPn/luxjA8wzVFn1lW7GJt4JfEojgkUxbsfan8qwTmdETmy/KqJ7D/zZfljRXQfFle+3zIy/IeA6/IO8ONdP9w3EPXhXKO2/pKJ68C8/8BAAD//97OQvo=" } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/node/node.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/node/node.go index cc545356..804600ec 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/node/node.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/node/node.go @@ -18,6 +18,8 @@ package node import ( + "github.com/pkg/errors" + "github.com/elastic/beats/libbeat/common" "github.com/elastic/beats/libbeat/common/kubernetes" "github.com/elastic/beats/libbeat/logp" @@ -79,28 +81,25 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // Fetch methods implements the data gathering and data conversion to the right // format. It publishes the event which is then forwarded to the output. In case // of an error set the Error field of mb.Event or simply call report.Error(). -func (m *MetricSet) Fetch(reporter mb.ReporterV2) { +func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { m.enricher.Start() body, err := m.http.FetchContent() if err != nil { - logger.Error(err) - reporter.Error(err) - return + return errors.Wrap(err, "error doing HTTP request to fetch 'node' Metricset data") + } event, err := eventMapping(body) if err != nil { - logger.Error(err) - reporter.Error(err) - return + return errors.Wrap(err, "error in mapping") } m.enricher.Enrich([]common.MapStr{event}) reporter.Event(mb.TransformMapStrToEvent("kubernetes", event, nil)) - return + return nil } // Close stops this metricset diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/pod/pod.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/pod/pod.go index b65a6a20..8762f6ad 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/pod/pod.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/pod/pod.go @@ -18,6 +18,8 @@ package pod import ( + "github.com/pkg/errors" + "github.com/elastic/beats/libbeat/common/kubernetes" "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/metricbeat/helper" @@ -78,29 +80,28 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // Fetch methods implements the data gathering and data conversion to the right // format. It publishes the event which is then forwarded to the output. In case // of an error set the Error field of mb.Event or simply call report.Error(). -func (m *MetricSet) Fetch(reporter mb.ReporterV2) { +func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { m.enricher.Start() body, err := m.http.FetchContent() if err != nil { - logger.Error(err) - reporter.Error(err) - return + return errors.Wrap(err, "error doing HTTP request to fetch 'pod' Metricset data") } events, err := eventMapping(body, util.PerfMetrics) if err != nil { - logger.Error(err) - reporter.Error(err) - return + return errors.Wrap(err, "error in mapping") } m.enricher.Enrich(events) for _, e := range events { - reporter.Event(mb.TransformMapStrToEvent("kubernetes", e, nil)) + isOpen := reporter.Event(mb.TransformMapStrToEvent("kubernetes", e, nil)) + if !isOpen { + return nil + } } - return + return nil } // Close stops this metricset diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/_meta/data.json index 2496258b..3da35450 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/_meta/data.json +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/_meta/data.json @@ -1,44 +1,72 @@ { - "@timestamp": "2017-05-10T16:42:27.816Z", - "beat": { - "hostname": "X1", - "name": "X1", - "version": "6.0.0-alpha1" + "@timestamp": "2019-10-02T16:47:01.499Z", + "metricset": { + "name": "state_container", + "period": 10000 + }, + "service": { + "address": "kube-state-metrics.kube-system:8080", + "type": "kubernetes" }, "kubernetes": { + "labels": { + "app": "playground" + }, "container": { "cpu": { "request": { "nanocores": 200000000 } - }, - "id": "docker://39f3267ad1b0c46025e664bfe0b70f3f18a9f172aad00463c8e87e0e93bbf628", - "image": "jenkinsci/jenkins:2.46.1", - "memory": { - "request": { - "bytes": 268435456 - } - }, - "name": "wise-lynx-jenkins", + }, + "image": "ubuntu:latest", + "id": "docker://5f8ce416d10ab0b28ce5c7d521de2264aa03ff4d001e1194076f6a02a330139f", + "name": "ubuntu", "status": { - "phase": "running", "ready": true, - "restarts": 4 + "restarts": 0, + "phase": "running" } }, - "namespace": "jenkins", + "pod": { + "name": "playground", + "uid": "d52bd3cb-df62-4cb5-b293-7009055bcaff" + }, + "namespace": "default", "node": { "name": "minikube" - }, - "pod": { - "name": "wise-lynx-jenkins-1616735317-svn6k" } }, - "metricset": { - "host": "192.168.99.100:18080", + "host": { + "os": { + "codename": "bionic", + "platform": "ubuntu", + "version": "18.04.3 LTS (Bionic Beaver)", + "family": "debian", + "name": "Ubuntu", + "kernel": "4.15.0" + }, + "containerized": false, + "hostname": "minikube", + "name": "minikube", + "architecture": "x86_64" + }, + "agent": { + "version": "8.0.0", + "type": "metricbeat", + "ephemeral_id": "fed15ef3-ab8f-4e11-aded-115ff923bc1e", + "hostname": "minikube", + "id": "0df400e0-a5fc-40cc-a0c6-b99029a30cd5" + }, + "ecs": { + "version": "1.1.0" + }, + "container": { + "runtime": "docker", + "id": "5f8ce416d10ab0b28ce5c7d521de2264aa03ff4d001e1194076f6a02a330139f" + }, + "event": { + "dataset": "kubernetes.container", "module": "kubernetes", - "name": "state_container", - "namespace": "container", - "rtt": 6219 + "duration": 33750820 } -} +} \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/_meta/test/kube-state-metrics.expected b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/_meta/test/kube-state-metrics.expected index 962cc8c9..4a13d974 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/_meta/test/kube-state-metrics.expected +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/_meta/test/kube-state-metrics.expected @@ -1,66 +1,38 @@ [ { - "RootFields": null, + "RootFields": { + "container": { + "id": "4fa227874ee68536bf902394fb662f07b99099798ca9cd5c1506b79075acc065", + "runtime": "docker" + } + }, "ModuleFields": { - "namespace": "kube-system", + "namespace": "default", "node": { "name": "minikube" }, "pod": { - "name": "tiller-deploy-3067024529-9lpmb" - } - }, - "MetricSetFields": { - "id": "docker://469f5d2b7854eb52e5d13dc0cd3e664c1b682b157aabaf596ffe4984f1516902", - "image": "gcr.io/kubernetes-helm/tiller:v2.3.1", - "name": "tiller", - "status": { - "phase": "running", - "ready": true, - "restarts": 1 - } - }, - "Index": "", - "ID": "", - "Namespace": "kubernetes.container", - "Timestamp": "0001-01-01T00:00:00Z", - "Error": null, - "Host": "", - "Service": "", - "Took": 0 - }, - { - "RootFields": null, - "ModuleFields": { - "namespace": "test", - "node": { - "name": "minikube-test" - }, - "pod": { - "name": "kube-dns-v20-5g5cb-test" + "name": "jumpy-owl-redis-3481028193-s78x9" } }, "MetricSetFields": { "cpu": { "request": { - "cores": 0.2 + "cores": 0.1 } }, - "id": "docker://fa3d83f648de42492b38fa3e8501d109376f391c50f2bd210c895c8477ae4b62-test", - "image": "gcr.io/google_containers/kubedns-amd64:1.9-test", + "id": "docker://4fa227874ee68536bf902394fb662f07b99099798ca9cd5c1506b79075acc065", + "image": "bitnami/redis:3.2.8-r2", "memory": { - "limit": { - "bytes": 278257920 - }, "request": { - "bytes": 83400320 + "bytes": 268435456 } }, - "name": "kubedns", + "name": "jumpy-owl-redis", "status": { - "phase": "terminated", + "phase": "waiting", "ready": false, - "restarts": 3 + "restarts": 270 } }, "Index": "", @@ -70,40 +42,17 @@ "Error": null, "Host": "", "Service": "", - "Took": 0 + "Took": 0, + "Period": 0, + "DisableTimeSeries": false }, { - "RootFields": null, - "ModuleFields": { - "namespace": "kube-system", - "node": { - "name": "minikube" - }, - "pod": { - "name": "kube-dns-v20-5g5cb" + "RootFields": { + "container": { + "id": "52fa55e051dc5b68e44c027588685b7edd85aaa03b07f7216d399249ff4fc821", + "runtime": "docker" } }, - "MetricSetFields": { - "id": "docker://9a4c9462cd078d7be4f0a9b94bcfeb69d5fdd76bff67142df3f58367ac7e8d61", - "image": "gcr.io/google_containers/kube-dnsmasq-amd64:1.4", - "name": "dnsmasq", - "status": { - "phase": "running", - "ready": true, - "restarts": 2 - } - }, - "Index": "", - "ID": "", - "Namespace": "kubernetes.container", - "Timestamp": "0001-01-01T00:00:00Z", - "Error": null, - "Host": "", - "Service": "", - "Took": 0 - }, - { - "RootFields": null, "ModuleFields": { "namespace": "kube-system", "node": { @@ -143,27 +92,47 @@ "Error": null, "Host": "", "Service": "", - "Took": 0 + "Took": 0, + "Period": 0, + "DisableTimeSeries": false }, { - "RootFields": null, + "RootFields": { + "container": { + "id": "fa3d83f648de42492b38fa3e8501d109376f391c50f2bd210c895c8477ae4b62-test", + "runtime": "docker" + } + }, "ModuleFields": { - "namespace": "kube-system", + "namespace": "test", "node": { - "name": "minikube" + "name": "minikube-test" }, "pod": { - "name": "kubernetes-dashboard-vw0l6" + "name": "kube-dns-v20-5g5cb-test" } }, "MetricSetFields": { - "id": "docker://3aaee8bdd311c015240e99fa2a5a5f2f26b11b51236a683b39d8c1902e423978", - "image": "gcr.io/google_containers/kubernetes-dashboard-amd64:v1.5.1", - "name": "kubernetes-dashboard", + "cpu": { + "request": { + "cores": 0.2 + } + }, + "id": "docker://fa3d83f648de42492b38fa3e8501d109376f391c50f2bd210c895c8477ae4b62-test", + "image": "gcr.io/google_containers/kubedns-amd64:1.9-test", + "memory": { + "limit": { + "bytes": 278257920 + }, + "request": { + "bytes": 83400320 + } + }, + "name": "kubedns", "status": { - "phase": "running", - "ready": true, - "restarts": 2 + "phase": "terminated", + "ready": false, + "restarts": 3 } }, "Index": "", @@ -173,10 +142,17 @@ "Error": null, "Host": "", "Service": "", - "Took": 0 + "Took": 0, + "Period": 0, + "DisableTimeSeries": false }, { - "RootFields": null, + "RootFields": { + "container": { + "id": "973cbe45982c5126a5caf8c58d964c0ab1d5bb2c165ccc59715fcc1ebd58ab3d", + "runtime": "docker" + } + }, "ModuleFields": { "namespace": "kube-system", "node": { @@ -219,37 +195,44 @@ "Error": null, "Host": "", "Service": "", - "Took": 0 + "Took": 0, + "Period": 0, + "DisableTimeSeries": false }, { - "RootFields": null, + "RootFields": { + "container": { + "id": "91fdd43f6b1b4c3dd133cfca53e0b1210bc557c2ae56006026b5ccdb5f52826f", + "runtime": "docker" + } + }, "ModuleFields": { - "namespace": "default", + "namespace": "kube-system", "node": { "name": "minikube" }, "pod": { - "name": "jumpy-owl-redis-3481028193-s78x9" + "name": "kube-addon-manager-minikube" } }, "MetricSetFields": { "cpu": { "request": { - "cores": 0.1 + "cores": 0.005 } }, - "id": "docker://4fa227874ee68536bf902394fb662f07b99099798ca9cd5c1506b79075acc065", - "image": "bitnami/redis:3.2.8-r2", + "id": "docker://91fdd43f6b1b4c3dd133cfca53e0b1210bc557c2ae56006026b5ccdb5f52826f", + "image": "gcr.io/google-containers/kube-addon-manager:v6.3", "memory": { "request": { - "bytes": 268435456 + "bytes": 52428800 } }, - "name": "jumpy-owl-redis", + "name": "kube-addon-manager", "status": { - "phase": "waiting", - "ready": false, - "restarts": 270 + "phase": "running", + "ready": true, + "restarts": 2 } }, "Index": "", @@ -259,10 +242,92 @@ "Error": null, "Host": "", "Service": "", - "Took": 0 + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": { + "container": { + "id": "469f5d2b7854eb52e5d13dc0cd3e664c1b682b157aabaf596ffe4984f1516902", + "runtime": "docker" + } + }, + "ModuleFields": { + "namespace": "kube-system", + "node": { + "name": "minikube" + }, + "pod": { + "name": "tiller-deploy-3067024529-9lpmb" + } + }, + "MetricSetFields": { + "id": "docker://469f5d2b7854eb52e5d13dc0cd3e664c1b682b157aabaf596ffe4984f1516902", + "image": "gcr.io/kubernetes-helm/tiller:v2.3.1", + "name": "tiller", + "status": { + "phase": "running", + "ready": true, + "restarts": 1 + } + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.container", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false }, { "RootFields": null, + "ModuleFields": { + "namespace": "kube-system", + "pod": { + "name": "kube-state-metrics-1303537707-mnzbp" + } + }, + "MetricSetFields": { + "cpu": { + "limit": { + "cores": 0.2 + }, + "request": { + "cores": 0.1 + } + }, + "memory": { + "limit": { + "bytes": 52428800 + }, + "request": { + "bytes": 31457280 + } + }, + "name": "kube-state-metrics" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.container", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": { + "container": { + "id": "fa3d83f648de42492b38fa3e8501d109376f391c50f2bd210c895c8477ae4b62", + "runtime": "docker" + } + }, "ModuleFields": { "namespace": "kube-system", "node": { @@ -302,33 +367,30 @@ "Error": null, "Host": "", "Service": "", - "Took": 0 + "Took": 0, + "Period": 0, + "DisableTimeSeries": false }, { - "RootFields": null, + "RootFields": { + "container": { + "id": "9a4c9462cd078d7be4f0a9b94bcfeb69d5fdd76bff67142df3f58367ac7e8d61", + "runtime": "docker" + } + }, "ModuleFields": { "namespace": "kube-system", "node": { "name": "minikube" }, "pod": { - "name": "kube-addon-manager-minikube" + "name": "kube-dns-v20-5g5cb" } }, "MetricSetFields": { - "cpu": { - "request": { - "cores": 0.005 - } - }, - "id": "docker://91fdd43f6b1b4c3dd133cfca53e0b1210bc557c2ae56006026b5ccdb5f52826f", - "image": "gcr.io/google-containers/kube-addon-manager:v6.3", - "memory": { - "request": { - "bytes": 52428800 - } - }, - "name": "kube-addon-manager", + "id": "docker://9a4c9462cd078d7be4f0a9b94bcfeb69d5fdd76bff67142df3f58367ac7e8d61", + "image": "gcr.io/google_containers/kube-dnsmasq-amd64:1.4", + "name": "dnsmasq", "status": { "phase": "running", "ready": true, @@ -342,34 +404,35 @@ "Error": null, "Host": "", "Service": "", - "Took": 0 + "Took": 0, + "Period": 0, + "DisableTimeSeries": false }, { - "RootFields": null, + "RootFields": { + "container": { + "id": "3aaee8bdd311c015240e99fa2a5a5f2f26b11b51236a683b39d8c1902e423978", + "runtime": "docker" + } + }, "ModuleFields": { "namespace": "kube-system", + "node": { + "name": "minikube" + }, "pod": { - "name": "kube-state-metrics-1303537707-mnzbp" + "name": "kubernetes-dashboard-vw0l6" } }, "MetricSetFields": { - "cpu": { - "limit": { - "cores": 0.2 - }, - "request": { - "cores": 0.1 - } - }, - "memory": { - "limit": { - "bytes": 52428800 - }, - "request": { - "bytes": 31457280 - } - }, - "name": "kube-state-metrics" + "id": "docker://3aaee8bdd311c015240e99fa2a5a5f2f26b11b51236a683b39d8c1902e423978", + "image": "gcr.io/google_containers/kubernetes-dashboard-amd64:v1.5.1", + "name": "kubernetes-dashboard", + "status": { + "phase": "running", + "ready": true, + "restarts": 2 + } }, "Index": "", "ID": "", @@ -378,10 +441,17 @@ "Error": null, "Host": "", "Service": "", - "Took": 0 + "Took": 0, + "Period": 0, + "DisableTimeSeries": false }, { - "RootFields": null, + "RootFields": { + "container": { + "id": "e2ee1c2c7b8d4e5fd8c834b83cba8377d6b0e39da18157688ccc1a06b7c53117", + "runtime": "docker" + } + }, "ModuleFields": { "namespace": "jenkins", "node": { @@ -418,6 +488,8 @@ "Error": null, "Host": "", "Service": "", - "Took": 0 + "Took": 0, + "Period": 0, + "DisableTimeSeries": false } ] \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/_meta/test/kube-state-metrics.v1.3.0.expected b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/_meta/test/kube-state-metrics.v1.3.0.expected index 1ca9f64b..4d11be7a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/_meta/test/kube-state-metrics.v1.3.0.expected +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/_meta/test/kube-state-metrics.v1.3.0.expected @@ -1,19 +1,29 @@ [ { - "RootFields": null, + "RootFields": { + "container": { + "id": "4beb9aab887ca162c9cb3534c4826156636241052cd548153eaa2a170b6d102f", + "runtime": "docker" + } + }, "ModuleFields": { "namespace": "kube-system", "node": { "name": "minikube" }, "pod": { - "name": "kube-proxy-znhg6" + "name": "kube-controller-manager-minikube" } }, "MetricSetFields": { - "id": "docker://76c260259ddfd0267b5acb4e514465215ef1ebfa93a4057d592828772e6b39f5", - "image": "gcr.io/google_containers/kube-proxy-amd64:v1.9.7", - "name": "kube-proxy", + "cpu": { + "request": { + "cores": 0.2 + } + }, + "id": "docker://4beb9aab887ca162c9cb3534c4826156636241052cd548153eaa2a170b6d102f", + "image": "gcr.io/google_containers/kube-controller-manager-amd64:v1.9.7", + "name": "kube-controller-manager", "status": { "phase": "running", "ready": true, @@ -27,10 +37,17 @@ "Error": null, "Host": "", "Service": "", - "Took": 0 + "Took": 0, + "Period": 0, + "DisableTimeSeries": false }, { - "RootFields": null, + "RootFields": { + "container": { + "id": "c46bc2164edcb5972be6fc9174155e61179cb04314c4f6da5d25d3a76acadee6", + "runtime": "docker" + } + }, "ModuleFields": { "namespace": "kube-system", "node": { @@ -57,161 +74,17 @@ "Error": null, "Host": "", "Service": "", - "Took": 0 + "Took": 0, + "Period": 0, + "DisableTimeSeries": false }, { - "RootFields": null, - "ModuleFields": { - "namespace": "kube-system", - "node": { - "name": "minikube" - }, - "pod": { - "name": "kube-addon-manager-minikube" + "RootFields": { + "container": { + "id": "88951e0178ea5131fa3e2d7cafacb3a7e63700795dd6fa0d40ed2e4ac1f52f9c", + "runtime": "docker" } }, - "MetricSetFields": { - "cpu": { - "request": { - "cores": 0.005 - } - }, - "id": "docker://ab382dbe8f8265f88ee9fec7de142f778da4a5fd9fe0334e3bdb6fe851124c08", - "image": "k8s.gcr.io/kube-addon-manager:v8.6", - "memory": { - "request": { - "bytes": 52428800 - } - }, - "name": "kube-addon-manager", - "status": { - "phase": "running", - "ready": true, - "restarts": 0 - } - }, - "Index": "", - "ID": "", - "Namespace": "kubernetes.container", - "Timestamp": "0001-01-01T00:00:00Z", - "Error": null, - "Host": "", - "Service": "", - "Took": 0 - }, - { - "RootFields": null, - "ModuleFields": { - "namespace": "kube-system", - "node": { - "name": "minikube" - }, - "pod": { - "name": "kube-apiserver-minikube" - } - }, - "MetricSetFields": { - "cpu": { - "request": { - "cores": 0.25 - } - }, - "id": "docker://e9568dfef1dd249cabac4bf09e6bf4a239fe738ae20eba072b6516676fce4bf6", - "image": "gcr.io/google_containers/kube-apiserver-amd64:v1.9.7", - "name": "kube-apiserver", - "status": { - "phase": "running", - "ready": true, - "restarts": 0 - } - }, - "Index": "", - "ID": "", - "Namespace": "kubernetes.container", - "Timestamp": "0001-01-01T00:00:00Z", - "Error": null, - "Host": "", - "Service": "", - "Took": 0 - }, - { - "RootFields": null, - "ModuleFields": { - "namespace": "kube-system", - "node": { - "name": "minikube" - }, - "pod": { - "name": "kube-state-metrics-6479d88c5c-5b6cl" - } - }, - "MetricSetFields": { - "cpu": { - "limit": { - "cores": 0.1 - }, - "request": { - "cores": 0.1 - } - }, - "id": "docker://948c4ebd8ca4fdf352e7fbf7f5c5d381af7e615ced435dc42fde0c1d25851320", - "image": "k8s.gcr.io/addon-resizer:1.7", - "memory": { - "limit": { - "bytes": 31457280 - }, - "request": { - "bytes": 31457280 - } - }, - "name": "addon-resizer", - "status": { - "phase": "running", - "ready": true, - "restarts": 0 - } - }, - "Index": "", - "ID": "", - "Namespace": "kubernetes.container", - "Timestamp": "0001-01-01T00:00:00Z", - "Error": null, - "Host": "", - "Service": "", - "Took": 0 - }, - { - "RootFields": null, - "ModuleFields": { - "namespace": "kube-system", - "node": { - "name": "minikube" - }, - "pod": { - "name": "etcd-minikube" - } - }, - "MetricSetFields": { - "id": "docker://6e96fd8a687409b2314dcc01f209bb0c813c2fb08b8f75ad1695e120d41e1a2a", - "image": "gcr.io/google_containers/etcd-amd64:3.1.11", - "name": "etcd", - "status": { - "phase": "running", - "ready": true, - "restarts": 0 - } - }, - "Index": "", - "ID": "", - "Namespace": "kubernetes.container", - "Timestamp": "0001-01-01T00:00:00Z", - "Error": null, - "Host": "", - "Service": "", - "Took": 0 - }, - { - "RootFields": null, "ModuleFields": { "namespace": "kube-system", "node": { @@ -254,27 +127,49 @@ "Error": null, "Host": "", "Service": "", - "Took": 0 + "Took": 0, + "Period": 0, + "DisableTimeSeries": false }, { - "RootFields": null, + "RootFields": { + "container": { + "id": "948c4ebd8ca4fdf352e7fbf7f5c5d381af7e615ced435dc42fde0c1d25851320", + "runtime": "docker" + } + }, "ModuleFields": { "namespace": "kube-system", "node": { "name": "minikube" }, "pod": { - "name": "storage-provisioner" + "name": "kube-state-metrics-6479d88c5c-5b6cl" } }, "MetricSetFields": { - "id": "docker://f4cc07b8e7ee5952738c69a0bff0c7b331c10af66faa541197684127d393b760", - "image": "gcr.io/k8s-minikube/storage-provisioner:v1.8.1", - "name": "storage-provisioner", + "cpu": { + "limit": { + "cores": 0.1 + }, + "request": { + "cores": 0.1 + } + }, + "id": "docker://948c4ebd8ca4fdf352e7fbf7f5c5d381af7e615ced435dc42fde0c1d25851320", + "image": "k8s.gcr.io/addon-resizer:1.7", + "memory": { + "limit": { + "bytes": 31457280 + }, + "request": { + "bytes": 31457280 + } + }, + "name": "addon-resizer", "status": { "phase": "running", "ready": true, - "reason": "ImagePullBackOff", "restarts": 0 } }, @@ -285,10 +180,17 @@ "Error": null, "Host": "", "Service": "", - "Took": 0 + "Took": 0, + "Period": 0, + "DisableTimeSeries": false }, { - "RootFields": null, + "RootFields": { + "container": { + "id": "1958e71d048065d38ce83dafda567c5fa9d0c1278cd7292d55b9f1d80b0a67f9", + "runtime": "docker" + } + }, "ModuleFields": { "namespace": "kube-system", "node": { @@ -328,10 +230,260 @@ "Error": null, "Host": "", "Service": "", - "Took": 0 + "Took": 0, + "Period": 0, + "DisableTimeSeries": false }, { - "RootFields": null, + "RootFields": { + "container": { + "id": "76c260259ddfd0267b5acb4e514465215ef1ebfa93a4057d592828772e6b39f5", + "runtime": "docker" + } + }, + "ModuleFields": { + "namespace": "kube-system", + "node": { + "name": "minikube" + }, + "pod": { + "name": "kube-proxy-znhg6" + } + }, + "MetricSetFields": { + "id": "docker://76c260259ddfd0267b5acb4e514465215ef1ebfa93a4057d592828772e6b39f5", + "image": "gcr.io/google_containers/kube-proxy-amd64:v1.9.7", + "name": "kube-proxy", + "status": { + "phase": "running", + "ready": true, + "restarts": 0 + } + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.container", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": { + "container": { + "id": "f4cc07b8e7ee5952738c69a0bff0c7b331c10af66faa541197684127d393b760", + "runtime": "docker" + } + }, + "ModuleFields": { + "namespace": "kube-system", + "node": { + "name": "minikube" + }, + "pod": { + "name": "storage-provisioner" + } + }, + "MetricSetFields": { + "id": "docker://f4cc07b8e7ee5952738c69a0bff0c7b331c10af66faa541197684127d393b760", + "image": "gcr.io/k8s-minikube/storage-provisioner:v1.8.1", + "name": "storage-provisioner", + "status": { + "phase": "running", + "ready": true, + "reason": "ImagePullBackOff", + "restarts": 0 + } + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.container", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": { + "container": { + "id": "e9568dfef1dd249cabac4bf09e6bf4a239fe738ae20eba072b6516676fce4bf6", + "runtime": "docker" + } + }, + "ModuleFields": { + "namespace": "kube-system", + "node": { + "name": "minikube" + }, + "pod": { + "name": "kube-apiserver-minikube" + } + }, + "MetricSetFields": { + "cpu": { + "request": { + "cores": 0.25 + } + }, + "id": "docker://e9568dfef1dd249cabac4bf09e6bf4a239fe738ae20eba072b6516676fce4bf6", + "image": "gcr.io/google_containers/kube-apiserver-amd64:v1.9.7", + "name": "kube-apiserver", + "status": { + "phase": "running", + "ready": true, + "restarts": 0 + } + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.container", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": { + "container": { + "id": "eadcbd54ba914dff6475ae64805887967cfb973aeb9b07364c94372658a71d11", + "runtime": "docker" + } + }, + "ModuleFields": { + "namespace": "kube-system", + "node": { + "name": "minikube" + }, + "pod": { + "name": "kube-scheduler-minikube" + } + }, + "MetricSetFields": { + "cpu": { + "request": { + "cores": 0.1 + } + }, + "id": "docker://eadcbd54ba914dff6475ae64805887967cfb973aeb9b07364c94372658a71d11", + "image": "gcr.io/google_containers/kube-scheduler-amd64:v1.9.7", + "name": "kube-scheduler", + "status": { + "phase": "running", + "ready": true, + "restarts": 0 + } + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.container", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": { + "container": { + "id": "6e96fd8a687409b2314dcc01f209bb0c813c2fb08b8f75ad1695e120d41e1a2a", + "runtime": "docker" + } + }, + "ModuleFields": { + "namespace": "kube-system", + "node": { + "name": "minikube" + }, + "pod": { + "name": "etcd-minikube" + } + }, + "MetricSetFields": { + "id": "docker://6e96fd8a687409b2314dcc01f209bb0c813c2fb08b8f75ad1695e120d41e1a2a", + "image": "gcr.io/google_containers/etcd-amd64:3.1.11", + "name": "etcd", + "status": { + "phase": "running", + "ready": true, + "restarts": 0 + } + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.container", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": { + "container": { + "id": "ab382dbe8f8265f88ee9fec7de142f778da4a5fd9fe0334e3bdb6fe851124c08", + "runtime": "docker" + } + }, + "ModuleFields": { + "namespace": "kube-system", + "node": { + "name": "minikube" + }, + "pod": { + "name": "kube-addon-manager-minikube" + } + }, + "MetricSetFields": { + "cpu": { + "request": { + "cores": 0.005 + } + }, + "id": "docker://ab382dbe8f8265f88ee9fec7de142f778da4a5fd9fe0334e3bdb6fe851124c08", + "image": "k8s.gcr.io/kube-addon-manager:v8.6", + "memory": { + "request": { + "bytes": 52428800 + } + }, + "name": "kube-addon-manager", + "status": { + "phase": "running", + "ready": true, + "restarts": 0 + } + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.container", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": { + "container": { + "id": "aad0addd205dc72dc7abc8f9d02a1b429a2f2e1df3acc60431ca6b79746c093b", + "runtime": "docker" + } + }, "ModuleFields": { "namespace": "kube-system", "node": { @@ -369,10 +521,17 @@ "Error": null, "Host": "", "Service": "", - "Took": 0 + "Took": 0, + "Period": 0, + "DisableTimeSeries": false }, { - "RootFields": null, + "RootFields": { + "container": { + "id": "e9560bbace13ca19de4b3771023198e8568f6b5ed6af3a949f10a5b8137b5be9", + "runtime": "docker" + } + }, "ModuleFields": { "namespace": "kube-system", "node": { @@ -409,76 +568,8 @@ "Error": null, "Host": "", "Service": "", - "Took": 0 - }, - { - "RootFields": null, - "ModuleFields": { - "namespace": "kube-system", - "node": { - "name": "minikube" - }, - "pod": { - "name": "kube-scheduler-minikube" - } - }, - "MetricSetFields": { - "cpu": { - "request": { - "cores": 0.1 - } - }, - "id": "docker://eadcbd54ba914dff6475ae64805887967cfb973aeb9b07364c94372658a71d11", - "image": "gcr.io/google_containers/kube-scheduler-amd64:v1.9.7", - "name": "kube-scheduler", - "status": { - "phase": "running", - "ready": true, - "restarts": 0 - } - }, - "Index": "", - "ID": "", - "Namespace": "kubernetes.container", - "Timestamp": "0001-01-01T00:00:00Z", - "Error": null, - "Host": "", - "Service": "", - "Took": 0 - }, - { - "RootFields": null, - "ModuleFields": { - "namespace": "kube-system", - "node": { - "name": "minikube" - }, - "pod": { - "name": "kube-controller-manager-minikube" - } - }, - "MetricSetFields": { - "cpu": { - "request": { - "cores": 0.2 - } - }, - "id": "docker://4beb9aab887ca162c9cb3534c4826156636241052cd548153eaa2a170b6d102f", - "image": "gcr.io/google_containers/kube-controller-manager-amd64:v1.9.7", - "name": "kube-controller-manager", - "status": { - "phase": "running", - "ready": true, - "restarts": 0 - } - }, - "Index": "", - "ID": "", - "Namespace": "kubernetes.container", - "Timestamp": "0001-01-01T00:00:00Z", - "Error": null, - "Host": "", - "Service": "", - "Took": 0 + "Took": 0, + "Period": 0, + "DisableTimeSeries": false } ] \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/_meta/testdata/ksm-v1_3_0.plain-expected.json b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/_meta/testdata/ksm-v1_3_0.plain-expected.json index b3e6a559..f8278f21 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/_meta/testdata/ksm-v1_3_0.plain-expected.json +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/_meta/testdata/ksm-v1_3_0.plain-expected.json @@ -7,21 +7,27 @@ }, "kubernetes": { "container": { - "id": "docker://469f5d2b7854eb52e5d13dc0cd3e664c1b682b157aabaf596ffe4984f1516902", - "image": "gcr.io/kubernetes-helm/tiller:v2.3.1", - "name": "tiller", - "status": { - "phase": "running", - "ready": true, - "restarts": 1 - } + "cpu": { + "limit": { + "cores": 0.2 + }, + "request": { + "cores": 0.1 + } + }, + "memory": { + "limit": { + "bytes": 52428800 + }, + "request": { + "bytes": 31457280 + } + }, + "name": "kube-state-metrics" }, "namespace": "kube-system", - "node": { - "name": "minikube" - }, "pod": { - "name": "tiller-deploy-3067024529-9lpmb" + "name": "kube-state-metrics-1303537707-mnzbp" } }, "metricset": { @@ -34,6 +40,58 @@ } }, { + "container": { + "id": "4fa227874ee68536bf902394fb662f07b99099798ca9cd5c1506b79075acc065", + "runtime": "docker" + }, + "event": { + "dataset": "kubernetes.container", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "container": { + "cpu": { + "request": { + "cores": 0.1 + } + }, + "id": "docker://4fa227874ee68536bf902394fb662f07b99099798ca9cd5c1506b79075acc065", + "image": "bitnami/redis:3.2.8-r2", + "memory": { + "request": { + "bytes": 268435456 + } + }, + "name": "jumpy-owl-redis", + "status": { + "phase": "waiting", + "ready": false, + "restarts": 270 + } + }, + "namespace": "default", + "node": { + "name": "minikube" + }, + "pod": { + "name": "jumpy-owl-redis-3481028193-s78x9" + } + }, + "metricset": { + "name": "state_container", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "container": { + "id": "9a4c9462cd078d7be4f0a9b94bcfeb69d5fdd76bff67142df3f58367ac7e8d61", + "runtime": "docker" + }, "event": { "dataset": "kubernetes.container", "duration": 115000, @@ -68,6 +126,10 @@ } }, { + "container": { + "id": "973cbe45982c5126a5caf8c58d964c0ab1d5bb2c165ccc59715fcc1ebd58ab3d", + "runtime": "docker" + }, "event": { "dataset": "kubernetes.container", "duration": 115000, @@ -118,262 +180,10 @@ } }, { - "event": { - "dataset": "kubernetes.container", - "duration": 115000, - "module": "kubernetes" + "container": { + "id": "fa3d83f648de42492b38fa3e8501d109376f391c50f2bd210c895c8477ae4b62", + "runtime": "docker" }, - "kubernetes": { - "container": { - "cpu": { - "request": { - "cores": 0.005 - } - }, - "id": "docker://91fdd43f6b1b4c3dd133cfca53e0b1210bc557c2ae56006026b5ccdb5f52826f", - "image": "gcr.io/google-containers/kube-addon-manager:v6.3", - "memory": { - "request": { - "bytes": 52428800 - } - }, - "name": "kube-addon-manager", - "status": { - "phase": "running", - "ready": true, - "restarts": 2 - } - }, - "namespace": "kube-system", - "node": { - "name": "minikube" - }, - "pod": { - "name": "kube-addon-manager-minikube" - } - }, - "metricset": { - "name": "state_container", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "kubernetes" - } - }, - { - "event": { - "dataset": "kubernetes.container", - "duration": 115000, - "module": "kubernetes" - }, - "kubernetes": { - "container": { - "cpu": { - "limit": { - "cores": 0.2 - }, - "request": { - "cores": 0.1 - } - }, - "memory": { - "limit": { - "bytes": 52428800 - }, - "request": { - "bytes": 31457280 - } - }, - "name": "kube-state-metrics" - }, - "namespace": "kube-system", - "pod": { - "name": "kube-state-metrics-1303537707-mnzbp" - } - }, - "metricset": { - "name": "state_container", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "kubernetes" - } - }, - { - "event": { - "dataset": "kubernetes.container", - "duration": 115000, - "module": "kubernetes" - }, - "kubernetes": { - "container": { - "cpu": { - "request": { - "cores": 0.1 - } - }, - "id": "docker://4fa227874ee68536bf902394fb662f07b99099798ca9cd5c1506b79075acc065", - "image": "bitnami/redis:3.2.8-r2", - "memory": { - "request": { - "bytes": 268435456 - } - }, - "name": "jumpy-owl-redis", - "status": { - "phase": "waiting", - "ready": false, - "restarts": 270 - } - }, - "namespace": "default", - "node": { - "name": "minikube" - }, - "pod": { - "name": "jumpy-owl-redis-3481028193-s78x9" - } - }, - "metricset": { - "name": "state_container", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "kubernetes" - } - }, - { - "event": { - "dataset": "kubernetes.container", - "duration": 115000, - "module": "kubernetes" - }, - "kubernetes": { - "container": { - "id": "docker://3aaee8bdd311c015240e99fa2a5a5f2f26b11b51236a683b39d8c1902e423978", - "image": "gcr.io/google_containers/kubernetes-dashboard-amd64:v1.5.1", - "name": "kubernetes-dashboard", - "status": { - "phase": "running", - "ready": true, - "restarts": 2 - } - }, - "namespace": "kube-system", - "node": { - "name": "minikube" - }, - "pod": { - "name": "kubernetes-dashboard-vw0l6" - } - }, - "metricset": { - "name": "state_container", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "kubernetes" - } - }, - { - "event": { - "dataset": "kubernetes.container", - "duration": 115000, - "module": "kubernetes" - }, - "kubernetes": { - "container": { - "cpu": { - "request": { - "cores": 0.01 - } - }, - "id": "docker://52fa55e051dc5b68e44c027588685b7edd85aaa03b07f7216d399249ff4fc821", - "image": "gcr.io/google_containers/exechealthz-amd64:1.2", - "memory": { - "limit": { - "bytes": 52428800 - }, - "request": { - "bytes": 52428800 - } - }, - "name": "healthz", - "status": { - "phase": "running", - "ready": true, - "restarts": 2 - } - }, - "namespace": "kube-system", - "node": { - "name": "minikube" - }, - "pod": { - "name": "kube-dns-v20-5g5cb" - } - }, - "metricset": { - "name": "state_container", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "kubernetes" - } - }, - { - "event": { - "dataset": "kubernetes.container", - "duration": 115000, - "module": "kubernetes" - }, - "kubernetes": { - "container": { - "cpu": { - "request": { - "cores": 0.2 - } - }, - "id": "docker://fa3d83f648de42492b38fa3e8501d109376f391c50f2bd210c895c8477ae4b62-test", - "image": "gcr.io/google_containers/kubedns-amd64:1.9-test", - "memory": { - "limit": { - "bytes": 278257920 - }, - "request": { - "bytes": 83400320 - } - }, - "name": "kubedns", - "status": { - "phase": "terminated", - "ready": false, - "restarts": 3 - } - }, - "namespace": "test", - "node": { - "name": "minikube-test" - }, - "pod": { - "name": "kube-dns-v20-5g5cb-test" - } - }, - "metricset": { - "name": "state_container", - "period": 10000 - }, - "service": { - "address": "127.0.0.1:55555", - "type": "kubernetes" - } - }, - { "event": { "dataset": "kubernetes.container", "duration": 115000, @@ -421,6 +231,10 @@ } }, { + "container": { + "id": "e2ee1c2c7b8d4e5fd8c834b83cba8377d6b0e39da18157688ccc1a06b7c53117", + "runtime": "docker" + }, "event": { "dataset": "kubernetes.container", "duration": 115000, @@ -463,5 +277,231 @@ "address": "127.0.0.1:55555", "type": "kubernetes" } + }, + { + "container": { + "id": "91fdd43f6b1b4c3dd133cfca53e0b1210bc557c2ae56006026b5ccdb5f52826f", + "runtime": "docker" + }, + "event": { + "dataset": "kubernetes.container", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "container": { + "cpu": { + "request": { + "cores": 0.005 + } + }, + "id": "docker://91fdd43f6b1b4c3dd133cfca53e0b1210bc557c2ae56006026b5ccdb5f52826f", + "image": "gcr.io/google-containers/kube-addon-manager:v6.3", + "memory": { + "request": { + "bytes": 52428800 + } + }, + "name": "kube-addon-manager", + "status": { + "phase": "running", + "ready": true, + "restarts": 2 + } + }, + "namespace": "kube-system", + "node": { + "name": "minikube" + }, + "pod": { + "name": "kube-addon-manager-minikube" + } + }, + "metricset": { + "name": "state_container", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "container": { + "id": "3aaee8bdd311c015240e99fa2a5a5f2f26b11b51236a683b39d8c1902e423978", + "runtime": "docker" + }, + "event": { + "dataset": "kubernetes.container", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "container": { + "id": "docker://3aaee8bdd311c015240e99fa2a5a5f2f26b11b51236a683b39d8c1902e423978", + "image": "gcr.io/google_containers/kubernetes-dashboard-amd64:v1.5.1", + "name": "kubernetes-dashboard", + "status": { + "phase": "running", + "ready": true, + "restarts": 2 + } + }, + "namespace": "kube-system", + "node": { + "name": "minikube" + }, + "pod": { + "name": "kubernetes-dashboard-vw0l6" + } + }, + "metricset": { + "name": "state_container", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "container": { + "id": "fa3d83f648de42492b38fa3e8501d109376f391c50f2bd210c895c8477ae4b62-test", + "runtime": "docker" + }, + "event": { + "dataset": "kubernetes.container", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "container": { + "cpu": { + "request": { + "cores": 0.2 + } + }, + "id": "docker://fa3d83f648de42492b38fa3e8501d109376f391c50f2bd210c895c8477ae4b62-test", + "image": "gcr.io/google_containers/kubedns-amd64:1.9-test", + "memory": { + "limit": { + "bytes": 278257920 + }, + "request": { + "bytes": 83400320 + } + }, + "name": "kubedns", + "status": { + "phase": "terminated", + "ready": false, + "restarts": 3 + } + }, + "namespace": "test", + "node": { + "name": "minikube-test" + }, + "pod": { + "name": "kube-dns-v20-5g5cb-test" + } + }, + "metricset": { + "name": "state_container", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "container": { + "id": "52fa55e051dc5b68e44c027588685b7edd85aaa03b07f7216d399249ff4fc821", + "runtime": "docker" + }, + "event": { + "dataset": "kubernetes.container", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "container": { + "cpu": { + "request": { + "cores": 0.01 + } + }, + "id": "docker://52fa55e051dc5b68e44c027588685b7edd85aaa03b07f7216d399249ff4fc821", + "image": "gcr.io/google_containers/exechealthz-amd64:1.2", + "memory": { + "limit": { + "bytes": 52428800 + }, + "request": { + "bytes": 52428800 + } + }, + "name": "healthz", + "status": { + "phase": "running", + "ready": true, + "restarts": 2 + } + }, + "namespace": "kube-system", + "node": { + "name": "minikube" + }, + "pod": { + "name": "kube-dns-v20-5g5cb" + } + }, + "metricset": { + "name": "state_container", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "container": { + "id": "469f5d2b7854eb52e5d13dc0cd3e664c1b682b157aabaf596ffe4984f1516902", + "runtime": "docker" + }, + "event": { + "dataset": "kubernetes.container", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "container": { + "id": "docker://469f5d2b7854eb52e5d13dc0cd3e664c1b682b157aabaf596ffe4984f1516902", + "image": "gcr.io/kubernetes-helm/tiller:v2.3.1", + "name": "tiller", + "status": { + "phase": "running", + "ready": true, + "restarts": 1 + } + }, + "namespace": "kube-system", + "node": { + "name": "minikube" + }, + "pod": { + "name": "tiller-deploy-3067024529-9lpmb" + } + }, + "metricset": { + "name": "state_container", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } } ] \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/state_container.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/state_container.go index 258c9bc8..e2771ac9 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/state_container.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_container/state_container.go @@ -18,6 +18,10 @@ package state_container import ( + "strings" + + "github.com/pkg/errors" + "github.com/elastic/beats/libbeat/common" p "github.com/elastic/beats/metricbeat/helper/prometheus" "github.com/elastic/beats/metricbeat/mb" @@ -105,14 +109,12 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // Fetch methods implements the data gathering and data conversion to the right // format. It publishes the event which is then forwarded to the output. In case // of an error set the Error field of mb.Event or simply call report.Error(). -func (m *MetricSet) Fetch(reporter mb.ReporterV2) { +func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { m.enricher.Start() events, err := m.prometheus.GetProcessedMetrics(mapping) if err != nil { - m.Logger().Error(err) - reporter.Error(err) - return + return errors.Wrap(err, "error getting event") } m.enricher.Enrich(events) @@ -131,6 +133,22 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) { } } + // applying ECS to kubernetes.container.id in the form :// + var rootFields common.MapStr + if containerID, ok := event["id"]; ok { + // we don't expect errors here, but if any we would obtain an + // empty string + cID := (containerID).(string) + split := strings.Index(cID, "://") + if split != -1 { + rootFields = common.MapStr{ + "container": common.MapStr{ + "runtime": cID[:split], + "id": cID[split+3:], + }} + } + } + var moduleFieldsMapStr common.MapStr moduleFields, ok := event[mb.ModuleDataKey] if ok { @@ -142,14 +160,16 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) { delete(event, mb.ModuleDataKey) if reported := reporter.Event(mb.Event{ + RootFields: rootFields, MetricSetFields: event, ModuleFields: moduleFieldsMapStr, Namespace: "kubernetes.container", }); !reported { - m.Logger().Debug("error trying to emit event") - return + return nil } } + + return nil } // Close stops this metricset diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_cronjob/state_cronjob.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_cronjob/state_cronjob.go index 39f3d4ed..58d61d5c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_cronjob/state_cronjob.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_cronjob/state_cronjob.go @@ -18,6 +18,8 @@ package state_cronjob import ( + "github.com/pkg/errors" + "github.com/elastic/beats/libbeat/common" p "github.com/elastic/beats/metricbeat/helper/prometheus" "github.com/elastic/beats/metricbeat/mb" @@ -74,12 +76,10 @@ func NewCronJobMetricSet(base mb.BaseMetricSet) (mb.MetricSet, error) { // module rooted fields at the event that gets reported // // Copied from other kube state metrics. -func (m *CronJobMetricSet) Fetch(reporter mb.ReporterV2) { +func (m *CronJobMetricSet) Fetch(reporter mb.ReporterV2) error { events, err := m.prometheus.GetProcessedMetrics(m.mapping) if err != nil { - m.Logger().Error(err) - reporter.Error(err) - return + return errors.Wrap(err, "error getting metrics") } for _, event := range events { @@ -98,10 +98,9 @@ func (m *CronJobMetricSet) Fetch(reporter mb.ReporterV2) { ModuleFields: moduleFieldsMapStr, Namespace: "kubernetes.cronjob", }); !reported { - m.Logger().Debug("error trying to emit event") - return + return nil } } - return + return nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_node/state_node.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_node/state_node.go index 8185ef1b..8835f3f0 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_node/state_node.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_node/state_node.go @@ -18,6 +18,8 @@ package state_node import ( + "github.com/pkg/errors" + "github.com/elastic/beats/libbeat/common/kubernetes" p "github.com/elastic/beats/metricbeat/helper/prometheus" "github.com/elastic/beats/metricbeat/mb" @@ -96,14 +98,12 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // Fetch methods implements the data gathering and data conversion to the right // format. It publishes the event which is then forwarded to the output. In case // of an error set the Error field of mb.Event or simply call report.Error(). -func (m *MetricSet) Fetch(reporter mb.ReporterV2) { +func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { m.enricher.Start() events, err := m.prometheus.GetProcessedMetrics(mapping) if err != nil { - m.Logger().Error(err) - reporter.Error(err) - return + return errors.Wrap(err, "error doing HTTP request to fetch 'state_node' Metricset data") } m.enricher.Enrich(events) @@ -111,10 +111,11 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) { event[mb.NamespaceKey] = "node" reported := reporter.Event(mb.TransformMapStrToEvent("kubernetes", event, nil)) if !reported { - m.Logger().Debug("error trying to emit event") - return + return nil } } + + return nil } // Close stops this metricset diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/README.md b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/README.md new file mode 100644 index 00000000..fe34b970 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/README.md @@ -0,0 +1,88 @@ +# Kube-state-metrics/ResourceQuota + +## Version history + +- September 2019, `v1.7.0` + +## Resources + +Docs for 1.7 release of `kube-state-metrics` ResourceQuota can be found here: +https://github.com/kubernetes/kube-state-metrics/blob/release-1.7/docs/resourcequota-metrics.md + + +## Metrics insight + + - kube_resourcequota{namespace,resourcequota,resource,type} Gauge + + Info about existing `ResourceQuota` and current status + + - kube_resourcequota_created{namespace,resourcequota} Gauge + + Creation time for `ResourceQuota` + +## Setup environment for manual tests + +- Setup kubernetes environment for beats testing + +https://github.com/elastic/beats/tree/master/metricbeat/module/kubernetes/_meta/test + +- Install `kube-state-metrics` + +As part of the referred document above, follow these instructions + +https://github.com/elastic/beats/tree/master/metricbeat/module/kubernetes/_meta/test#testing-kubernetes-loads + +- Create `ResourceQuota` objects + +The manifest are found at this location, not only creates the `ResourceQuota` objects, but also other resources that will fail because of the existence of the quota at the namespace: + +https://github.com/elastic/beats/tree/master/metricbeat/module/kubernetes/_meta/test/docs/02_objects/resourcequota.yaml + +It will create + +- named `rqtest` namespace, which will be assigned the resource quotas +- named `resources` resource quota, which will limit the ammount of CPU and memory that can be assigned to the namespace. (This settings won't be put to test) +- `objects` resource quota, which will limit the quantity of objects that can be created at this namespace: + - 3 Pods + - 1 Configmap + - 0 PersistentVolumeClaims + - 1 ReplicaController + - 1 Secret + - 2 Services + - 1 Service type LoadBalancer + +- It will also create regular objects at that same namespace + - 1 Service type LoadBalancer, that will succeed + - 1 Service type LoadBalancer, that **will fail** due to exceeding Quota + +- Copy binary and metricbeat assets to the playground pod. The module file targeting `ResourceQuota` should look like this: + +```yaml +- module: kubernetes + enabled: true + metricsets: + - state_resourcequota + period: 10s + hosts: ["kube-state-metrics.kube-system:8080"] + in_cluster: true +``` + +- Execute metricbeat from the playground + +You should see at elasticsearch/kibana: + +Events that indicate a hard limit on services of type LoadBalancer + +- `dataset`: `kubernetes.resourcequota` +- `kubernetes.resourcequota.name`: `objects` +- `kubernetes.resourcequota.resource`: `services.loadbalancers` +- `kubernetes.resourcequota.quota`: 1 +- `kubernetes.resourcequota.type`: `hard` + +Events that indicate the number of service type LoadBalancer used + +- `dataset`: `kubernetes.resourcequota` +- `kubernetes.resourcequota.name`: `objects` +- `kubernetes.resourcequota.resource`: `services.loadbalancers` +- `kubernetes.resourcequota.quota`: 1 +- `kubernetes.resourcequota.type`: `used` diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/_meta/data.json new file mode 100644 index 00000000..a74695a7 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/_meta/data.json @@ -0,0 +1,56 @@ +{ + "@timestamp": "2019-09-16T18:37:16.237Z", + "@metadata": { + "beat": "metricbeat", + "type": "_doc", + "version": "8.0.0" + }, + "agent": { + "ephemeral_id": "9a223001-a65f-4460-b106-553151987b09", + "hostname": "minikube", + "id": "191c7322-6d36-4f6c-b451-d0302b96841b", + "version": "8.0.0", + "type": "metricbeat" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "quota": 2, + "name": "objects", + "resource": "services", + "type": "hard" + } + }, + "event": { + "module": "kubernetes", + "duration": 13626177, + "dataset": "kubernetes.resourcequota" + }, + "metricset": { + "period": 10000, + "name": "state_resourcequota" + }, + "service": { + "address": "kube-state-metrics.kube-system:8080", + "type": "kubernetes" + }, + "ecs": { + "version": "1.1.0" + }, + "host": { + "name": "minikube", + "architecture": "x86_64", + "os": { + "kernel": "4.15.0", + "codename": "bionic", + "platform": "ubuntu", + "version": "18.04.3 LTS (Bionic Beaver)", + "family": "debian", + "name": "Ubuntu" + }, + "containerized": false, + "hostname": "minikube" + } + } +} + \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/_meta/docs.asciidoc new file mode 100644 index 00000000..6ff6b976 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/_meta/docs.asciidoc @@ -0,0 +1,2 @@ +The `state_resourcequota` metricset for kubernetes reads from `kube-state-metrics`. + diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/_meta/fields.yml new file mode 100644 index 00000000..9bc53822 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/_meta/fields.yml @@ -0,0 +1,26 @@ +- name: resourcequota + type: group + description: > + kubernetes resourcequota metrics + release: beta + fields: + - name: created.sec + type: double + description: Epoch seconds since the ResourceQuota was created + - name: quota + type: double + description: Quota informed (hard or used) for the resource + - name: name + type: keyword + description: ResourceQuota name + - name: type + type: keyword + description: Quota information type, `hard` or `used` + - name: resource + type: keyword + description: Resource name the quota applies to + + + + + diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/_meta/test/kube-state-metrics.1.7 b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/_meta/test/kube-state-metrics.1.7 new file mode 100644 index 00000000..11e5614b --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/_meta/test/kube-state-metrics.1.7 @@ -0,0 +1,1361 @@ +# HELP kube_certificatesigningrequest_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_certificatesigningrequest_labels gauge +# HELP kube_certificatesigningrequest_created Unix creation timestamp +# TYPE kube_certificatesigningrequest_created gauge +# HELP kube_certificatesigningrequest_condition The number of each certificatesigningrequest condition +# TYPE kube_certificatesigningrequest_condition gauge +# HELP kube_certificatesigningrequest_cert_length Length of the issued cert +# TYPE kube_certificatesigningrequest_cert_length gauge +# HELP kube_configmap_info Information about configmap. +# TYPE kube_configmap_info gauge +kube_configmap_info{namespace="kube-system",configmap="kubelet-config-1.12"} 1 +kube_configmap_info{namespace="default",configmap="elastic-operator-uuid"} 1 +kube_configmap_info{namespace="kube-public",configmap="cluster-info"} 1 +kube_configmap_info{namespace="kube-system",configmap="coredns"} 1 +kube_configmap_info{namespace="kube-system",configmap="extension-apiserver-authentication"} 1 +kube_configmap_info{namespace="kube-system",configmap="kube-proxy"} 1 +kube_configmap_info{namespace="kube-system",configmap="kubeadm-config"} 1 +# HELP kube_configmap_created Unix creation timestamp +# TYPE kube_configmap_created gauge +kube_configmap_created{namespace="kube-system",configmap="extension-apiserver-authentication"} 1.567009602e+09 +kube_configmap_created{namespace="kube-system",configmap="kube-proxy"} 1.567009606e+09 +kube_configmap_created{namespace="kube-system",configmap="kubeadm-config"} 1.567009605e+09 +kube_configmap_created{namespace="kube-system",configmap="kubelet-config-1.12"} 1.567009605e+09 +kube_configmap_created{namespace="default",configmap="elastic-operator-uuid"} 1.567060471e+09 +kube_configmap_created{namespace="kube-public",configmap="cluster-info"} 1.567009606e+09 +kube_configmap_created{namespace="kube-system",configmap="coredns"} 1.567009606e+09 +# HELP kube_configmap_metadata_resource_version Resource version representing a specific version of the configmap. +# TYPE kube_configmap_metadata_resource_version gauge +kube_configmap_metadata_resource_version{namespace="default",configmap="elastic-operator-uuid",resource_version="61228"} 1 +kube_configmap_metadata_resource_version{namespace="kube-public",configmap="cluster-info",resource_version="709401"} 1 +kube_configmap_metadata_resource_version{namespace="kube-system",configmap="coredns",resource_version="214"} 1 +kube_configmap_metadata_resource_version{namespace="kube-system",configmap="extension-apiserver-authentication",resource_version="53"} 1 +kube_configmap_metadata_resource_version{namespace="kube-system",configmap="kube-proxy",resource_version="709289"} 1 +kube_configmap_metadata_resource_version{namespace="kube-system",configmap="kubeadm-config",resource_version="171"} 1 +kube_configmap_metadata_resource_version{namespace="kube-system",configmap="kubelet-config-1.12",resource_version="174"} 1 +# HELP kube_cronjob_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_cronjob_labels gauge +# HELP kube_cronjob_info Info about cronjob. +# TYPE kube_cronjob_info gauge +# HELP kube_cronjob_created Unix creation timestamp +# TYPE kube_cronjob_created gauge +# HELP kube_cronjob_status_active Active holds pointers to currently running jobs. +# TYPE kube_cronjob_status_active gauge +# HELP kube_cronjob_status_last_schedule_time LastScheduleTime keeps information of when was the last time the job was successfully scheduled. +# TYPE kube_cronjob_status_last_schedule_time gauge +# HELP kube_cronjob_spec_suspend Suspend flag tells the controller to suspend subsequent executions. +# TYPE kube_cronjob_spec_suspend gauge +# HELP kube_cronjob_spec_starting_deadline_seconds Deadline in seconds for starting the job if it misses scheduled time for any reason. +# TYPE kube_cronjob_spec_starting_deadline_seconds gauge +# HELP kube_cronjob_next_schedule_time Next time the cronjob should be scheduled. The time after lastScheduleTime, or after the cron job's creation time if it's never been scheduled. Use this to determine if the job is delayed. +# TYPE kube_cronjob_next_schedule_time gauge +# HELP kube_daemonset_created Unix creation timestamp +# TYPE kube_daemonset_created gauge +kube_daemonset_created{namespace="kube-system",daemonset="kube-proxy"} 1.567009606e+09 +# HELP kube_daemonset_status_current_number_scheduled The number of nodes running at least one daemon pod and are supposed to. +# TYPE kube_daemonset_status_current_number_scheduled gauge +kube_daemonset_status_current_number_scheduled{namespace="kube-system",daemonset="kube-proxy"} 1 +# HELP kube_daemonset_status_desired_number_scheduled The number of nodes that should be running the daemon pod. +# TYPE kube_daemonset_status_desired_number_scheduled gauge +kube_daemonset_status_desired_number_scheduled{namespace="kube-system",daemonset="kube-proxy"} 1 +# HELP kube_daemonset_status_number_available The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available +# TYPE kube_daemonset_status_number_available gauge +kube_daemonset_status_number_available{namespace="kube-system",daemonset="kube-proxy"} 1 +# HELP kube_daemonset_status_number_misscheduled The number of nodes running a daemon pod but are not supposed to. +# TYPE kube_daemonset_status_number_misscheduled gauge +kube_daemonset_status_number_misscheduled{namespace="kube-system",daemonset="kube-proxy"} 0 +# HELP kube_daemonset_status_number_ready The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready. +# TYPE kube_daemonset_status_number_ready gauge +kube_daemonset_status_number_ready{namespace="kube-system",daemonset="kube-proxy"} 1 +# HELP kube_daemonset_status_number_unavailable The number of nodes that should be running the daemon pod and have none of the daemon pod running and available +# TYPE kube_daemonset_status_number_unavailable gauge +kube_daemonset_status_number_unavailable{namespace="kube-system",daemonset="kube-proxy"} 0 +# HELP kube_daemonset_updated_number_scheduled The total number of nodes that are running updated daemon pod +# TYPE kube_daemonset_updated_number_scheduled gauge +kube_daemonset_updated_number_scheduled{namespace="kube-system",daemonset="kube-proxy"} 1 +# HELP kube_daemonset_metadata_generation Sequence number representing a specific generation of the desired state. +# TYPE kube_daemonset_metadata_generation gauge +kube_daemonset_metadata_generation{namespace="kube-system",daemonset="kube-proxy"} 2 +# HELP kube_daemonset_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_daemonset_labels gauge +kube_daemonset_labels{namespace="kube-system",daemonset="kube-proxy",label_k8s_app="kube-proxy"} 1 +# HELP kube_deployment_created Unix creation timestamp +# TYPE kube_deployment_created gauge +kube_deployment_created{namespace="kube-system",deployment="kube-state-metrics"} 1.568622119e+09 +kube_deployment_created{namespace="kube-system",deployment="coredns"} 1.567009606e+09 +# HELP kube_deployment_status_replicas The number of replicas per deployment. +# TYPE kube_deployment_status_replicas gauge +kube_deployment_status_replicas{namespace="kube-system",deployment="kube-state-metrics"} 1 +kube_deployment_status_replicas{namespace="kube-system",deployment="coredns"} 2 +# HELP kube_deployment_status_replicas_available The number of available replicas per deployment. +# TYPE kube_deployment_status_replicas_available gauge +kube_deployment_status_replicas_available{namespace="kube-system",deployment="kube-state-metrics"} 1 +kube_deployment_status_replicas_available{namespace="kube-system",deployment="coredns"} 2 +# HELP kube_deployment_status_replicas_unavailable The number of unavailable replicas per deployment. +# TYPE kube_deployment_status_replicas_unavailable gauge +kube_deployment_status_replicas_unavailable{namespace="kube-system",deployment="kube-state-metrics"} 0 +kube_deployment_status_replicas_unavailable{namespace="kube-system",deployment="coredns"} 0 +# HELP kube_deployment_status_replicas_updated The number of updated replicas per deployment. +# TYPE kube_deployment_status_replicas_updated gauge +kube_deployment_status_replicas_updated{namespace="kube-system",deployment="kube-state-metrics"} 1 +kube_deployment_status_replicas_updated{namespace="kube-system",deployment="coredns"} 2 +# HELP kube_deployment_status_observed_generation The generation observed by the deployment controller. +# TYPE kube_deployment_status_observed_generation gauge +kube_deployment_status_observed_generation{namespace="kube-system",deployment="kube-state-metrics"} 1 +kube_deployment_status_observed_generation{namespace="kube-system",deployment="coredns"} 4 +# HELP kube_deployment_spec_replicas Number of desired pods for a deployment. +# TYPE kube_deployment_spec_replicas gauge +kube_deployment_spec_replicas{namespace="kube-system",deployment="kube-state-metrics"} 1 +kube_deployment_spec_replicas{namespace="kube-system",deployment="coredns"} 2 +# HELP kube_deployment_spec_paused Whether the deployment is paused and will not be processed by the deployment controller. +# TYPE kube_deployment_spec_paused gauge +kube_deployment_spec_paused{namespace="kube-system",deployment="kube-state-metrics"} 0 +kube_deployment_spec_paused{namespace="kube-system",deployment="coredns"} 0 +# HELP kube_deployment_spec_strategy_rollingupdate_max_unavailable Maximum number of unavailable replicas during a rolling update of a deployment. +# TYPE kube_deployment_spec_strategy_rollingupdate_max_unavailable gauge +kube_deployment_spec_strategy_rollingupdate_max_unavailable{namespace="kube-system",deployment="kube-state-metrics"} 1 +kube_deployment_spec_strategy_rollingupdate_max_unavailable{namespace="kube-system",deployment="coredns"} 1 +# HELP kube_deployment_spec_strategy_rollingupdate_max_surge Maximum number of replicas that can be scheduled above the desired number of replicas during a rolling update of a deployment. +# TYPE kube_deployment_spec_strategy_rollingupdate_max_surge gauge +kube_deployment_spec_strategy_rollingupdate_max_surge{namespace="kube-system",deployment="coredns"} 1 +kube_deployment_spec_strategy_rollingupdate_max_surge{namespace="kube-system",deployment="kube-state-metrics"} 1 +# HELP kube_deployment_metadata_generation Sequence number representing a specific generation of the desired state. +# TYPE kube_deployment_metadata_generation gauge +kube_deployment_metadata_generation{namespace="kube-system",deployment="kube-state-metrics"} 1 +kube_deployment_metadata_generation{namespace="kube-system",deployment="coredns"} 4 +# HELP kube_deployment_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_deployment_labels gauge +kube_deployment_labels{namespace="kube-system",deployment="kube-state-metrics",label_k8s_app="kube-state-metrics"} 1 +kube_deployment_labels{namespace="kube-system",deployment="coredns",label_k8s_app="kube-dns"} 1 +# HELP kube_endpoint_info Information about endpoint. +# TYPE kube_endpoint_info gauge +kube_endpoint_info{namespace="kube-system",endpoint="kube-state-metrics"} 1 +kube_endpoint_info{namespace="kube-system",endpoint="kube-scheduler"} 1 +kube_endpoint_info{namespace="default",endpoint="willsucceed"} 1 +kube_endpoint_info{namespace="default",endpoint="willfail"} 1 +kube_endpoint_info{namespace="rqtest",endpoint="willsucceed"} 1 +kube_endpoint_info{namespace="default",endpoint="kubernetes"} 1 +kube_endpoint_info{namespace="kube-system",endpoint="kube-controller-manager"} 1 +kube_endpoint_info{namespace="kube-system",endpoint="kube-dns"} 1 +kube_endpoint_info{namespace="default",endpoint="my-nginx"} 1 +# HELP kube_endpoint_created Unix creation timestamp +# TYPE kube_endpoint_created gauge +kube_endpoint_created{namespace="default",endpoint="kubernetes"} 1.567009602e+09 +kube_endpoint_created{namespace="kube-system",endpoint="kube-controller-manager"} 1.567009603e+09 +kube_endpoint_created{namespace="kube-system",endpoint="kube-dns"} 1.567009612e+09 +kube_endpoint_created{namespace="default",endpoint="my-nginx"} 1.567783021e+09 +kube_endpoint_created{namespace="kube-system",endpoint="kube-state-metrics"} 1.568195118e+09 +kube_endpoint_created{namespace="kube-system",endpoint="kube-scheduler"} 1.567009604e+09 +kube_endpoint_created{namespace="default",endpoint="willsucceed"} 1.568629567e+09 +kube_endpoint_created{namespace="default",endpoint="willfail"} 1.568629567e+09 +kube_endpoint_created{namespace="rqtest",endpoint="willsucceed"} 1.568629886e+09 +# HELP kube_endpoint_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_endpoint_labels gauge +kube_endpoint_labels{namespace="kube-system",endpoint="kube-state-metrics",label_k8s_app="kube-state-metrics"} 1 +kube_endpoint_labels{namespace="kube-system",endpoint="kube-scheduler"} 1 +kube_endpoint_labels{namespace="default",endpoint="willsucceed"} 1 +kube_endpoint_labels{namespace="default",endpoint="willfail"} 1 +kube_endpoint_labels{namespace="rqtest",endpoint="willsucceed"} 1 +kube_endpoint_labels{namespace="default",endpoint="kubernetes"} 1 +kube_endpoint_labels{namespace="kube-system",endpoint="kube-controller-manager"} 1 +kube_endpoint_labels{namespace="kube-system",endpoint="kube-dns",label_k8s_app="kube-dns",label_kubernetes_io_cluster_service="true",label_kubernetes_io_name="KubeDNS"} 1 +kube_endpoint_labels{namespace="default",endpoint="my-nginx",label_app="my-nginx"} 1 +# HELP kube_endpoint_address_available Number of addresses available in endpoint. +# TYPE kube_endpoint_address_available gauge +kube_endpoint_address_available{namespace="kube-system",endpoint="kube-scheduler"} 0 +kube_endpoint_address_available{namespace="default",endpoint="willsucceed"} 0 +kube_endpoint_address_available{namespace="default",endpoint="willfail"} 0 +kube_endpoint_address_available{namespace="rqtest",endpoint="willsucceed"} 0 +kube_endpoint_address_available{namespace="kube-system",endpoint="kube-state-metrics"} 2 +kube_endpoint_address_available{namespace="kube-system",endpoint="kube-controller-manager"} 0 +kube_endpoint_address_available{namespace="kube-system",endpoint="kube-dns"} 6 +kube_endpoint_address_available{namespace="default",endpoint="my-nginx"} 0 +kube_endpoint_address_available{namespace="default",endpoint="kubernetes"} 1 +# HELP kube_endpoint_address_not_ready Number of addresses not ready in endpoint +# TYPE kube_endpoint_address_not_ready gauge +kube_endpoint_address_not_ready{namespace="default",endpoint="kubernetes"} 0 +kube_endpoint_address_not_ready{namespace="kube-system",endpoint="kube-controller-manager"} 0 +kube_endpoint_address_not_ready{namespace="kube-system",endpoint="kube-dns"} 0 +kube_endpoint_address_not_ready{namespace="default",endpoint="my-nginx"} 0 +kube_endpoint_address_not_ready{namespace="kube-system",endpoint="kube-state-metrics"} 0 +kube_endpoint_address_not_ready{namespace="kube-system",endpoint="kube-scheduler"} 0 +kube_endpoint_address_not_ready{namespace="default",endpoint="willsucceed"} 0 +kube_endpoint_address_not_ready{namespace="default",endpoint="willfail"} 0 +kube_endpoint_address_not_ready{namespace="rqtest",endpoint="willsucceed"} 0 +# HELP kube_hpa_metadata_generation The generation observed by the HorizontalPodAutoscaler controller. +# TYPE kube_hpa_metadata_generation gauge +# HELP kube_hpa_spec_max_replicas Upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. +# TYPE kube_hpa_spec_max_replicas gauge +# HELP kube_hpa_spec_min_replicas Lower limit for the number of pods that can be set by the autoscaler, default 1. +# TYPE kube_hpa_spec_min_replicas gauge +# HELP kube_hpa_status_current_replicas Current number of replicas of pods managed by this autoscaler. +# TYPE kube_hpa_status_current_replicas gauge +# HELP kube_hpa_status_desired_replicas Desired number of replicas of pods managed by this autoscaler. +# TYPE kube_hpa_status_desired_replicas gauge +# HELP kube_hpa_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_hpa_labels gauge +# HELP kube_hpa_status_condition The condition of this autoscaler. +# TYPE kube_hpa_status_condition gauge +# HELP kube_ingress_info Information about ingress. +# TYPE kube_ingress_info gauge +# HELP kube_ingress_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_ingress_labels gauge +# HELP kube_ingress_created Unix creation timestamp +# TYPE kube_ingress_created gauge +# HELP kube_ingress_metadata_resource_version Resource version representing a specific version of ingress. +# TYPE kube_ingress_metadata_resource_version gauge +# HELP kube_ingress_path Ingress host, paths and backend service information. +# TYPE kube_ingress_path gauge +# HELP kube_job_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_job_labels gauge +# HELP kube_job_info Information about job. +# TYPE kube_job_info gauge +# HELP kube_job_created Unix creation timestamp +# TYPE kube_job_created gauge +# HELP kube_job_spec_parallelism The maximum desired number of pods the job should run at any given time. +# TYPE kube_job_spec_parallelism gauge +# HELP kube_job_spec_completions The desired number of successfully finished pods the job should be run with. +# TYPE kube_job_spec_completions gauge +# HELP kube_job_spec_active_deadline_seconds The duration in seconds relative to the startTime that the job may be active before the system tries to terminate it. +# TYPE kube_job_spec_active_deadline_seconds gauge +# HELP kube_job_status_succeeded The number of pods which reached Phase Succeeded. +# TYPE kube_job_status_succeeded gauge +# HELP kube_job_status_failed The number of pods which reached Phase Failed. +# TYPE kube_job_status_failed gauge +# HELP kube_job_status_active The number of actively running pods. +# TYPE kube_job_status_active gauge +# HELP kube_job_complete The job has completed its execution. +# TYPE kube_job_complete gauge +# HELP kube_job_failed The job has failed its execution. +# TYPE kube_job_failed gauge +# HELP kube_job_status_start_time StartTime represents time when the job was acknowledged by the Job Manager. +# TYPE kube_job_status_start_time gauge +# HELP kube_job_status_completion_time CompletionTime represents time when the job was completed. +# TYPE kube_job_status_completion_time gauge +# HELP kube_job_owner Information about the Job's owner. +# TYPE kube_job_owner gauge +# HELP kube_limitrange Information about limit range. +# TYPE kube_limitrange gauge +kube_limitrange{namespace="default-mem-example",limitrange="mem-limit-range",resource="memory",type="Container",constraint="default"} 5.36870912e+08 +kube_limitrange{namespace="default-mem-example",limitrange="mem-limit-range",resource="memory",type="Container",constraint="defaultRequest"} 2.68435456e+08 +# HELP kube_limitrange_created Unix creation timestamp +# TYPE kube_limitrange_created gauge +kube_limitrange_created{namespace="default-mem-example",limitrange="mem-limit-range"} 1.567347894e+09 +# HELP kube_namespace_created Unix creation timestamp +# TYPE kube_namespace_created gauge +kube_namespace_created{namespace="kube-system"} 1.567009598e+09 +kube_namespace_created{namespace="pablo"} 1.567347599e+09 +kube_namespace_created{namespace="kube-node-lease"} 1.567689471e+09 +kube_namespace_created{namespace="rqtest"} 1.568629266e+09 +kube_namespace_created{namespace="default"} 1.567009597e+09 +kube_namespace_created{namespace="default-mem-example"} 1.567347623e+09 +kube_namespace_created{namespace="kube-public"} 1.567009602e+09 +# HELP kube_namespace_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_namespace_labels gauge +kube_namespace_labels{namespace="default"} 1 +kube_namespace_labels{namespace="default-mem-example"} 1 +kube_namespace_labels{namespace="kube-public"} 1 +kube_namespace_labels{namespace="kube-system"} 1 +kube_namespace_labels{namespace="pablo"} 1 +kube_namespace_labels{namespace="kube-node-lease"} 1 +kube_namespace_labels{namespace="rqtest"} 1 +# HELP kube_namespace_status_phase kubernetes namespace status phase. +# TYPE kube_namespace_status_phase gauge +kube_namespace_status_phase{namespace="default-mem-example",phase="Active"} 1 +kube_namespace_status_phase{namespace="default-mem-example",phase="Terminating"} 0 +kube_namespace_status_phase{namespace="kube-public",phase="Active"} 1 +kube_namespace_status_phase{namespace="kube-public",phase="Terminating"} 0 +kube_namespace_status_phase{namespace="kube-system",phase="Active"} 1 +kube_namespace_status_phase{namespace="kube-system",phase="Terminating"} 0 +kube_namespace_status_phase{namespace="pablo",phase="Active"} 1 +kube_namespace_status_phase{namespace="pablo",phase="Terminating"} 0 +kube_namespace_status_phase{namespace="kube-node-lease",phase="Active"} 1 +kube_namespace_status_phase{namespace="kube-node-lease",phase="Terminating"} 0 +kube_namespace_status_phase{namespace="rqtest",phase="Active"} 1 +kube_namespace_status_phase{namespace="rqtest",phase="Terminating"} 0 +kube_namespace_status_phase{namespace="default",phase="Active"} 1 +kube_namespace_status_phase{namespace="default",phase="Terminating"} 0 +# HELP kube_node_info Information about a cluster node. +# TYPE kube_node_info gauge +kube_node_info{node="minikube",kernel_version="4.15.0",os_image="Buildroot 2018.05.3",container_runtime_version="docker://18.9.8",kubelet_version="v1.15.2",kubeproxy_version="v1.15.2",provider_id=""} 1 +# HELP kube_node_created Unix creation timestamp +# TYPE kube_node_created gauge +kube_node_created{node="minikube"} 1.567009598e+09 +# HELP kube_node_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_node_labels gauge +kube_node_labels{node="minikube",label_beta_kubernetes_io_arch="amd64",label_beta_kubernetes_io_os="linux",label_kubernetes_io_arch="amd64",label_kubernetes_io_hostname="minikube",label_kubernetes_io_os="linux",label_node_role_kubernetes_io_master=""} 1 +# HELP kube_node_spec_unschedulable Whether a node can schedule new pods. +# TYPE kube_node_spec_unschedulable gauge +kube_node_spec_unschedulable{node="minikube"} 0 +# HELP kube_node_spec_taint The taint of a cluster node. +# TYPE kube_node_spec_taint gauge +# HELP kube_node_status_condition The condition of a cluster node. +# TYPE kube_node_status_condition gauge +kube_node_status_condition{node="minikube",condition="MemoryPressure",status="true"} 0 +kube_node_status_condition{node="minikube",condition="MemoryPressure",status="false"} 1 +kube_node_status_condition{node="minikube",condition="MemoryPressure",status="unknown"} 0 +kube_node_status_condition{node="minikube",condition="DiskPressure",status="true"} 0 +kube_node_status_condition{node="minikube",condition="DiskPressure",status="false"} 1 +kube_node_status_condition{node="minikube",condition="DiskPressure",status="unknown"} 0 +kube_node_status_condition{node="minikube",condition="PIDPressure",status="true"} 0 +kube_node_status_condition{node="minikube",condition="PIDPressure",status="false"} 1 +kube_node_status_condition{node="minikube",condition="PIDPressure",status="unknown"} 0 +kube_node_status_condition{node="minikube",condition="Ready",status="true"} 1 +kube_node_status_condition{node="minikube",condition="Ready",status="false"} 0 +kube_node_status_condition{node="minikube",condition="Ready",status="unknown"} 0 +# HELP kube_node_status_phase The phase the node is currently in. +# TYPE kube_node_status_phase gauge +# HELP kube_node_status_capacity The capacity for different resources of a node. +# TYPE kube_node_status_capacity gauge +kube_node_status_capacity{node="minikube",resource="cpu",unit="core"} 4 +kube_node_status_capacity{node="minikube",resource="ephemeral_storage",unit="byte"} 1.8211606528e+10 +kube_node_status_capacity{node="minikube",resource="hugepages_2Mi",unit="byte"} 0 +kube_node_status_capacity{node="minikube",resource="memory",unit="byte"} 8.361435136e+09 +kube_node_status_capacity{node="minikube",resource="pods",unit="integer"} 110 +# HELP kube_node_status_capacity_pods The total pod resources of the node. +# TYPE kube_node_status_capacity_pods gauge +kube_node_status_capacity_pods{node="minikube"} 110 +# HELP kube_node_status_capacity_cpu_cores The total CPU resources of the node. +# TYPE kube_node_status_capacity_cpu_cores gauge +kube_node_status_capacity_cpu_cores{node="minikube"} 4 +# HELP kube_node_status_capacity_memory_bytes The total memory resources of the node. +# TYPE kube_node_status_capacity_memory_bytes gauge +kube_node_status_capacity_memory_bytes{node="minikube"} 8.361435136e+09 +# HELP kube_node_status_allocatable The allocatable for different resources of a node that are available for scheduling. +# TYPE kube_node_status_allocatable gauge +kube_node_status_allocatable{node="minikube",resource="cpu",unit="core"} 4 +kube_node_status_allocatable{node="minikube",resource="ephemeral_storage",unit="byte"} 1.6390445849e+10 +kube_node_status_allocatable{node="minikube",resource="hugepages_2Mi",unit="byte"} 0 +kube_node_status_allocatable{node="minikube",resource="memory",unit="byte"} 8.256577536e+09 +kube_node_status_allocatable{node="minikube",resource="pods",unit="integer"} 110 +# HELP kube_node_status_allocatable_pods The pod resources of a node that are available for scheduling. +# TYPE kube_node_status_allocatable_pods gauge +kube_node_status_allocatable_pods{node="minikube"} 110 +# HELP kube_node_status_allocatable_cpu_cores The CPU resources of a node that are available for scheduling. +# TYPE kube_node_status_allocatable_cpu_cores gauge +kube_node_status_allocatable_cpu_cores{node="minikube"} 4 +# HELP kube_node_status_allocatable_memory_bytes The memory resources of a node that are available for scheduling. +# TYPE kube_node_status_allocatable_memory_bytes gauge +kube_node_status_allocatable_memory_bytes{node="minikube"} 8.256577536e+09 +# HELP kube_persistentvolumeclaim_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_persistentvolumeclaim_labels gauge +# HELP kube_persistentvolumeclaim_info Information about persistent volume claim. +# TYPE kube_persistentvolumeclaim_info gauge +# HELP kube_persistentvolumeclaim_status_phase The phase the persistent volume claim is currently in. +# TYPE kube_persistentvolumeclaim_status_phase gauge +# HELP kube_persistentvolumeclaim_resource_requests_storage_bytes The capacity of storage requested by the persistent volume claim. +# TYPE kube_persistentvolumeclaim_resource_requests_storage_bytes gauge +# HELP kube_persistentvolumeclaim_access_mode The access mode(s) specified by the persistent volume claim. +# TYPE kube_persistentvolumeclaim_access_mode gauge +# HELP kube_persistentvolume_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_persistentvolume_labels gauge +# HELP kube_persistentvolume_status_phase The phase indicates if a volume is available, bound to a claim, or released by a claim. +# TYPE kube_persistentvolume_status_phase gauge +# HELP kube_persistentvolume_info Information about persistentvolume. +# TYPE kube_persistentvolume_info gauge +# HELP kube_persistentvolume_capacity_bytes Persistentvolume capacity in bytes. +# TYPE kube_persistentvolume_capacity_bytes gauge +# HELP kube_poddisruptionbudget_created Unix creation timestamp +# TYPE kube_poddisruptionbudget_created gauge +# HELP kube_poddisruptionbudget_status_current_healthy Current number of healthy pods +# TYPE kube_poddisruptionbudget_status_current_healthy gauge +# HELP kube_poddisruptionbudget_status_desired_healthy Minimum desired number of healthy pods +# TYPE kube_poddisruptionbudget_status_desired_healthy gauge +# HELP kube_poddisruptionbudget_status_pod_disruptions_allowed Number of pod disruptions that are currently allowed +# TYPE kube_poddisruptionbudget_status_pod_disruptions_allowed gauge +# HELP kube_poddisruptionbudget_status_expected_pods Total number of pods counted by this disruption budget +# TYPE kube_poddisruptionbudget_status_expected_pods gauge +# HELP kube_poddisruptionbudget_status_observed_generation Most recent generation observed when updating this PDB status +# TYPE kube_poddisruptionbudget_status_observed_generation gauge +# HELP kube_pod_info Information about pod. +# TYPE kube_pod_info gauge +kube_pod_info{namespace="kube-system",pod="storage-provisioner",host_ip="10.0.2.15",pod_ip="10.0.2.15",uid="a55a886b-c9b0-11e9-8c8c-080027dc36ee",node="minikube",created_by_kind="",created_by_name="",priority_class=""} 1 +kube_pod_info{namespace="kube-system",pod="kube-addon-manager-minikube",host_ip="10.0.2.15",pod_ip="10.0.2.15",uid="804c0947-e160-4f6f-afa2-c87e97a43a84",node="minikube",created_by_kind="",created_by_name="",priority_class=""} 1 +kube_pod_info{namespace="kube-system",pod="kube-proxy-6dvtf",host_ip="10.0.2.15",pod_ip="10.0.2.15",uid="6efd0863-1af9-4d5e-b124-4600b5edb8db",node="minikube",created_by_kind="DaemonSet",created_by_name="kube-proxy",priority_class="system-node-critical"} 1 +kube_pod_info{namespace="default",pod="stdout-logger",host_ip="10.0.2.15",pod_ip="172.17.0.4",uid="49f54912-cf08-11e9-8c8c-080027dc36ee",node="minikube",created_by_kind="",created_by_name="",priority_class=""} 1 +kube_pod_info{namespace="default",pod="playground",host_ip="10.0.2.15",pod_ip="10.0.2.15",uid="1fe957cf-932f-4381-b847-cc7c173be7c4",node="minikube",created_by_kind="",created_by_name="",priority_class=""} 1 +kube_pod_info{namespace="kube-system",pod="kube-apiserver-minikube",host_ip="10.0.2.15",pod_ip="10.0.2.15",uid="64b87ae5-398f-452d-af8a-76f903944f4d",node="minikube",created_by_kind="",created_by_name="",priority_class="system-cluster-critical"} 1 +kube_pod_info{namespace="default-mem-example",pod="default-mem-demo",host_ip="10.0.2.15",pod_ip="172.17.0.5",uid="664f8f65-ccc4-11e9-8c8c-080027dc36ee",node="minikube",created_by_kind="",created_by_name="",priority_class=""} 1 +kube_pod_info{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",host_ip="10.0.2.15",pod_ip="172.17.0.9",uid="bb432770-52e1-4478-94df-57d12dbd0638",node="minikube",created_by_kind="ReplicaSet",created_by_name="coredns-5c98db65d4",priority_class="system-cluster-critical"} 1 +kube_pod_info{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",host_ip="10.0.2.15",pod_ip="172.17.0.2",uid="8f536064-840c-4dce-8bd2-8783d41e5ed6",node="minikube",created_by_kind="ReplicaSet",created_by_name="kube-state-metrics-6766c6d46b",priority_class=""} 1 +kube_pod_info{namespace="kube-system",pod="etcd-minikube",host_ip="10.0.2.15",pod_ip="10.0.2.15",uid="fd24e4fc-608e-4482-b1f2-bda1b968c6f3",node="minikube",created_by_kind="",created_by_name="",priority_class="system-cluster-critical"} 1 +kube_pod_info{namespace="kube-system",pod="coredns-5c98db65d4-df89f",host_ip="10.0.2.15",pod_ip="172.17.0.8",uid="fcf6c6a0-98a1-4ad5-80c4-990e95046c1c",node="minikube",created_by_kind="ReplicaSet",created_by_name="coredns-5c98db65d4",priority_class="system-cluster-critical"} 1 +kube_pod_info{namespace="kube-system",pod="kube-controller-manager-minikube",host_ip="10.0.2.15",pod_ip="10.0.2.15",uid="aa16fc25-2f4f-4c52-9838-82b6debf9df6",node="minikube",created_by_kind="",created_by_name="",priority_class="system-cluster-critical"} 1 +kube_pod_info{namespace="kube-system",pod="kube-scheduler-minikube",host_ip="10.0.2.15",pod_ip="10.0.2.15",uid="a1c26908-05bd-4eb5-8dc8-f5c9744e4a7b",node="minikube",created_by_kind="",created_by_name="",priority_class="system-cluster-critical"} 1 +# HELP kube_pod_start_time Start time in unix timestamp for a pod. +# TYPE kube_pod_start_time gauge +kube_pod_start_time{namespace="kube-system",pod="kube-scheduler-minikube"} 1.567009586e+09 +kube_pod_start_time{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7"} 1.567689495e+09 +kube_pod_start_time{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql"} 1.568622119e+09 +kube_pod_start_time{namespace="kube-system",pod="etcd-minikube"} 1.567689466e+09 +kube_pod_start_time{namespace="kube-system",pod="coredns-5c98db65d4-df89f"} 1.567689495e+09 +kube_pod_start_time{namespace="kube-system",pod="kube-controller-manager-minikube"} 1.567689466e+09 +kube_pod_start_time{namespace="default-mem-example",pod="default-mem-demo"} 1.567347951e+09 +kube_pod_start_time{namespace="kube-system",pod="storage-provisioner"} 1.567009613e+09 +kube_pod_start_time{namespace="kube-system",pod="kube-addon-manager-minikube"} 1.567689466e+09 +kube_pod_start_time{namespace="kube-system",pod="kube-proxy-6dvtf"} 1.567689506e+09 +kube_pod_start_time{namespace="default",pod="stdout-logger"} 1.567597011e+09 +kube_pod_start_time{namespace="default",pod="playground"} 1.567693987e+09 +kube_pod_start_time{namespace="kube-system",pod="kube-apiserver-minikube"} 1.567689466e+09 +# HELP kube_pod_completion_time Completion time in unix timestamp for a pod. +# TYPE kube_pod_completion_time gauge +# HELP kube_pod_owner Information about the Pod's owner. +# TYPE kube_pod_owner gauge +kube_pod_owner{namespace="default-mem-example",pod="default-mem-demo",owner_kind="",owner_name="",owner_is_controller=""} 1 +kube_pod_owner{namespace="kube-system",pod="storage-provisioner",owner_kind="",owner_name="",owner_is_controller=""} 1 +kube_pod_owner{namespace="kube-system",pod="kube-addon-manager-minikube",owner_kind="",owner_name="",owner_is_controller=""} 1 +kube_pod_owner{namespace="kube-system",pod="kube-proxy-6dvtf",owner_kind="DaemonSet",owner_name="kube-proxy",owner_is_controller="true"} 1 +kube_pod_owner{namespace="default",pod="stdout-logger",owner_kind="",owner_name="",owner_is_controller=""} 1 +kube_pod_owner{namespace="default",pod="playground",owner_kind="",owner_name="",owner_is_controller=""} 1 +kube_pod_owner{namespace="kube-system",pod="kube-apiserver-minikube",owner_kind="",owner_name="",owner_is_controller=""} 1 +kube_pod_owner{namespace="kube-system",pod="kube-scheduler-minikube",owner_kind="",owner_name="",owner_is_controller=""} 1 +kube_pod_owner{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",owner_kind="ReplicaSet",owner_name="coredns-5c98db65d4",owner_is_controller="true"} 1 +kube_pod_owner{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",owner_kind="ReplicaSet",owner_name="kube-state-metrics-6766c6d46b",owner_is_controller="true"} 1 +kube_pod_owner{namespace="kube-system",pod="etcd-minikube",owner_kind="",owner_name="",owner_is_controller=""} 1 +kube_pod_owner{namespace="kube-system",pod="coredns-5c98db65d4-df89f",owner_kind="ReplicaSet",owner_name="coredns-5c98db65d4",owner_is_controller="true"} 1 +kube_pod_owner{namespace="kube-system",pod="kube-controller-manager-minikube",owner_kind="",owner_name="",owner_is_controller=""} 1 +# HELP kube_pod_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_pod_labels gauge +kube_pod_labels{namespace="default-mem-example",pod="default-mem-demo"} 1 +kube_pod_labels{namespace="kube-system",pod="storage-provisioner",label_addonmanager_kubernetes_io_mode="Reconcile",label_integration_test="storage-provisioner"} 1 +kube_pod_labels{namespace="kube-system",pod="kube-addon-manager-minikube",label_component="kube-addon-manager",label_kubernetes_io_minikube_addons="addon-manager",label_version="v9.0"} 1 +kube_pod_labels{namespace="kube-system",pod="kube-proxy-6dvtf",label_controller_revision_hash="84c6b844cd",label_k8s_app="kube-proxy",label_pod_template_generation="2"} 1 +kube_pod_labels{namespace="default",pod="stdout-logger",label_my_label_uses_underscores="ha.ha.ha"} 1 +kube_pod_labels{namespace="default",pod="playground"} 1 +kube_pod_labels{namespace="kube-system",pod="kube-apiserver-minikube",label_component="kube-apiserver",label_tier="control-plane"} 1 +kube_pod_labels{namespace="kube-system",pod="kube-scheduler-minikube",label_component="kube-scheduler",label_tier="control-plane"} 1 +kube_pod_labels{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",label_k8s_app="kube-dns",label_pod_template_hash="5c98db65d4"} 1 +kube_pod_labels{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",label_k8s_app="kube-state-metrics",label_pod_template_hash="6766c6d46b"} 1 +kube_pod_labels{namespace="kube-system",pod="etcd-minikube",label_component="etcd",label_tier="control-plane"} 1 +kube_pod_labels{namespace="kube-system",pod="coredns-5c98db65d4-df89f",label_k8s_app="kube-dns",label_pod_template_hash="5c98db65d4"} 1 +kube_pod_labels{namespace="kube-system",pod="kube-controller-manager-minikube",label_component="kube-controller-manager",label_tier="control-plane"} 1 +# HELP kube_pod_created Unix creation timestamp +# TYPE kube_pod_created gauge +kube_pod_created{namespace="default",pod="stdout-logger"} 1.567597011e+09 +kube_pod_created{namespace="default",pod="playground"} 1.567693987e+09 +kube_pod_created{namespace="kube-system",pod="kube-apiserver-minikube"} 1.567689473e+09 +kube_pod_created{namespace="default-mem-example",pod="default-mem-demo"} 1.567347951e+09 +kube_pod_created{namespace="kube-system",pod="storage-provisioner"} 1.567009613e+09 +kube_pod_created{namespace="kube-system",pod="kube-addon-manager-minikube"} 1.567689472e+09 +kube_pod_created{namespace="kube-system",pod="kube-proxy-6dvtf"} 1.567689506e+09 +kube_pod_created{namespace="kube-system",pod="coredns-5c98db65d4-df89f"} 1.567689495e+09 +kube_pod_created{namespace="kube-system",pod="kube-controller-manager-minikube"} 1.568621217e+09 +kube_pod_created{namespace="kube-system",pod="kube-scheduler-minikube"} 1.567689472e+09 +kube_pod_created{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7"} 1.567689495e+09 +kube_pod_created{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql"} 1.568622119e+09 +kube_pod_created{namespace="kube-system",pod="etcd-minikube"} 1.567689472e+09 +# HELP kube_pod_status_scheduled_time Unix timestamp when pod moved into scheduled status +# TYPE kube_pod_status_scheduled_time gauge +kube_pod_status_scheduled_time{namespace="kube-system",pod="kube-addon-manager-minikube"} 1.567689466e+09 +kube_pod_status_scheduled_time{namespace="kube-system",pod="kube-proxy-6dvtf"} 1.567689506e+09 +kube_pod_status_scheduled_time{namespace="default",pod="stdout-logger"} 1.567597011e+09 +kube_pod_status_scheduled_time{namespace="default",pod="playground"} 1.567693987e+09 +kube_pod_status_scheduled_time{namespace="kube-system",pod="kube-apiserver-minikube"} 1.567689466e+09 +kube_pod_status_scheduled_time{namespace="default-mem-example",pod="default-mem-demo"} 1.567347951e+09 +kube_pod_status_scheduled_time{namespace="kube-system",pod="storage-provisioner"} 1.567009613e+09 +kube_pod_status_scheduled_time{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql"} 1.568622119e+09 +kube_pod_status_scheduled_time{namespace="kube-system",pod="etcd-minikube"} 1.567689466e+09 +kube_pod_status_scheduled_time{namespace="kube-system",pod="coredns-5c98db65d4-df89f"} 1.567689495e+09 +kube_pod_status_scheduled_time{namespace="kube-system",pod="kube-controller-manager-minikube"} 1.567689466e+09 +kube_pod_status_scheduled_time{namespace="kube-system",pod="kube-scheduler-minikube"} 1.567009586e+09 +kube_pod_status_scheduled_time{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7"} 1.567689495e+09 +# HELP kube_pod_status_phase The pods current phase. +# TYPE kube_pod_status_phase gauge +kube_pod_status_phase{namespace="kube-system",pod="etcd-minikube",phase="Pending"} 0 +kube_pod_status_phase{namespace="kube-system",pod="etcd-minikube",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="kube-system",pod="etcd-minikube",phase="Failed"} 0 +kube_pod_status_phase{namespace="kube-system",pod="etcd-minikube",phase="Running"} 1 +kube_pod_status_phase{namespace="kube-system",pod="etcd-minikube",phase="Unknown"} 0 +kube_pod_status_phase{namespace="kube-system",pod="coredns-5c98db65d4-df89f",phase="Pending"} 0 +kube_pod_status_phase{namespace="kube-system",pod="coredns-5c98db65d4-df89f",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="kube-system",pod="coredns-5c98db65d4-df89f",phase="Failed"} 0 +kube_pod_status_phase{namespace="kube-system",pod="coredns-5c98db65d4-df89f",phase="Running"} 1 +kube_pod_status_phase{namespace="kube-system",pod="coredns-5c98db65d4-df89f",phase="Unknown"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-controller-manager-minikube",phase="Pending"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-controller-manager-minikube",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-controller-manager-minikube",phase="Failed"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-controller-manager-minikube",phase="Running"} 1 +kube_pod_status_phase{namespace="kube-system",pod="kube-controller-manager-minikube",phase="Unknown"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-scheduler-minikube",phase="Pending"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-scheduler-minikube",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-scheduler-minikube",phase="Failed"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-scheduler-minikube",phase="Running"} 1 +kube_pod_status_phase{namespace="kube-system",pod="kube-scheduler-minikube",phase="Unknown"} 0 +kube_pod_status_phase{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",phase="Pending"} 0 +kube_pod_status_phase{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",phase="Failed"} 0 +kube_pod_status_phase{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",phase="Running"} 1 +kube_pod_status_phase{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",phase="Unknown"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",phase="Pending"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",phase="Failed"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",phase="Running"} 1 +kube_pod_status_phase{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",phase="Unknown"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-proxy-6dvtf",phase="Pending"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-proxy-6dvtf",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-proxy-6dvtf",phase="Failed"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-proxy-6dvtf",phase="Running"} 1 +kube_pod_status_phase{namespace="kube-system",pod="kube-proxy-6dvtf",phase="Unknown"} 0 +kube_pod_status_phase{namespace="default",pod="stdout-logger",phase="Pending"} 0 +kube_pod_status_phase{namespace="default",pod="stdout-logger",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="default",pod="stdout-logger",phase="Failed"} 0 +kube_pod_status_phase{namespace="default",pod="stdout-logger",phase="Running"} 1 +kube_pod_status_phase{namespace="default",pod="stdout-logger",phase="Unknown"} 0 +kube_pod_status_phase{namespace="default",pod="playground",phase="Pending"} 0 +kube_pod_status_phase{namespace="default",pod="playground",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="default",pod="playground",phase="Failed"} 0 +kube_pod_status_phase{namespace="default",pod="playground",phase="Running"} 1 +kube_pod_status_phase{namespace="default",pod="playground",phase="Unknown"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-apiserver-minikube",phase="Pending"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-apiserver-minikube",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-apiserver-minikube",phase="Failed"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-apiserver-minikube",phase="Running"} 1 +kube_pod_status_phase{namespace="kube-system",pod="kube-apiserver-minikube",phase="Unknown"} 0 +kube_pod_status_phase{namespace="default-mem-example",pod="default-mem-demo",phase="Pending"} 0 +kube_pod_status_phase{namespace="default-mem-example",pod="default-mem-demo",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="default-mem-example",pod="default-mem-demo",phase="Failed"} 0 +kube_pod_status_phase{namespace="default-mem-example",pod="default-mem-demo",phase="Running"} 1 +kube_pod_status_phase{namespace="default-mem-example",pod="default-mem-demo",phase="Unknown"} 0 +kube_pod_status_phase{namespace="kube-system",pod="storage-provisioner",phase="Pending"} 0 +kube_pod_status_phase{namespace="kube-system",pod="storage-provisioner",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="kube-system",pod="storage-provisioner",phase="Failed"} 0 +kube_pod_status_phase{namespace="kube-system",pod="storage-provisioner",phase="Running"} 1 +kube_pod_status_phase{namespace="kube-system",pod="storage-provisioner",phase="Unknown"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-addon-manager-minikube",phase="Pending"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-addon-manager-minikube",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-addon-manager-minikube",phase="Failed"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-addon-manager-minikube",phase="Running"} 1 +kube_pod_status_phase{namespace="kube-system",pod="kube-addon-manager-minikube",phase="Unknown"} 0 +# HELP kube_pod_status_ready Describes whether the pod is ready to serve requests. +# TYPE kube_pod_status_ready gauge +kube_pod_status_ready{namespace="default-mem-example",pod="default-mem-demo",condition="true"} 1 +kube_pod_status_ready{namespace="default-mem-example",pod="default-mem-demo",condition="false"} 0 +kube_pod_status_ready{namespace="default-mem-example",pod="default-mem-demo",condition="unknown"} 0 +kube_pod_status_ready{namespace="kube-system",pod="storage-provisioner",condition="true"} 1 +kube_pod_status_ready{namespace="kube-system",pod="storage-provisioner",condition="false"} 0 +kube_pod_status_ready{namespace="kube-system",pod="storage-provisioner",condition="unknown"} 0 +kube_pod_status_ready{namespace="kube-system",pod="kube-addon-manager-minikube",condition="true"} 1 +kube_pod_status_ready{namespace="kube-system",pod="kube-addon-manager-minikube",condition="false"} 0 +kube_pod_status_ready{namespace="kube-system",pod="kube-addon-manager-minikube",condition="unknown"} 0 +kube_pod_status_ready{namespace="kube-system",pod="kube-proxy-6dvtf",condition="true"} 1 +kube_pod_status_ready{namespace="kube-system",pod="kube-proxy-6dvtf",condition="false"} 0 +kube_pod_status_ready{namespace="kube-system",pod="kube-proxy-6dvtf",condition="unknown"} 0 +kube_pod_status_ready{namespace="default",pod="stdout-logger",condition="true"} 1 +kube_pod_status_ready{namespace="default",pod="stdout-logger",condition="false"} 0 +kube_pod_status_ready{namespace="default",pod="stdout-logger",condition="unknown"} 0 +kube_pod_status_ready{namespace="default",pod="playground",condition="true"} 1 +kube_pod_status_ready{namespace="default",pod="playground",condition="false"} 0 +kube_pod_status_ready{namespace="default",pod="playground",condition="unknown"} 0 +kube_pod_status_ready{namespace="kube-system",pod="kube-apiserver-minikube",condition="true"} 1 +kube_pod_status_ready{namespace="kube-system",pod="kube-apiserver-minikube",condition="false"} 0 +kube_pod_status_ready{namespace="kube-system",pod="kube-apiserver-minikube",condition="unknown"} 0 +kube_pod_status_ready{namespace="kube-system",pod="kube-scheduler-minikube",condition="true"} 1 +kube_pod_status_ready{namespace="kube-system",pod="kube-scheduler-minikube",condition="false"} 0 +kube_pod_status_ready{namespace="kube-system",pod="kube-scheduler-minikube",condition="unknown"} 0 +kube_pod_status_ready{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",condition="true"} 1 +kube_pod_status_ready{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",condition="false"} 0 +kube_pod_status_ready{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",condition="unknown"} 0 +kube_pod_status_ready{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",condition="true"} 1 +kube_pod_status_ready{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",condition="false"} 0 +kube_pod_status_ready{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",condition="unknown"} 0 +kube_pod_status_ready{namespace="kube-system",pod="etcd-minikube",condition="true"} 1 +kube_pod_status_ready{namespace="kube-system",pod="etcd-minikube",condition="false"} 0 +kube_pod_status_ready{namespace="kube-system",pod="etcd-minikube",condition="unknown"} 0 +kube_pod_status_ready{namespace="kube-system",pod="coredns-5c98db65d4-df89f",condition="true"} 1 +kube_pod_status_ready{namespace="kube-system",pod="coredns-5c98db65d4-df89f",condition="false"} 0 +kube_pod_status_ready{namespace="kube-system",pod="coredns-5c98db65d4-df89f",condition="unknown"} 0 +kube_pod_status_ready{namespace="kube-system",pod="kube-controller-manager-minikube",condition="true"} 1 +kube_pod_status_ready{namespace="kube-system",pod="kube-controller-manager-minikube",condition="false"} 0 +kube_pod_status_ready{namespace="kube-system",pod="kube-controller-manager-minikube",condition="unknown"} 0 +# HELP kube_pod_status_scheduled Describes the status of the scheduling process for the pod. +# TYPE kube_pod_status_scheduled gauge +kube_pod_status_scheduled{namespace="default-mem-example",pod="default-mem-demo",condition="true"} 1 +kube_pod_status_scheduled{namespace="default-mem-example",pod="default-mem-demo",condition="false"} 0 +kube_pod_status_scheduled{namespace="default-mem-example",pod="default-mem-demo",condition="unknown"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="storage-provisioner",condition="true"} 1 +kube_pod_status_scheduled{namespace="kube-system",pod="storage-provisioner",condition="false"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="storage-provisioner",condition="unknown"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-addon-manager-minikube",condition="true"} 1 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-addon-manager-minikube",condition="false"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-addon-manager-minikube",condition="unknown"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-proxy-6dvtf",condition="true"} 1 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-proxy-6dvtf",condition="false"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-proxy-6dvtf",condition="unknown"} 0 +kube_pod_status_scheduled{namespace="default",pod="stdout-logger",condition="true"} 1 +kube_pod_status_scheduled{namespace="default",pod="stdout-logger",condition="false"} 0 +kube_pod_status_scheduled{namespace="default",pod="stdout-logger",condition="unknown"} 0 +kube_pod_status_scheduled{namespace="default",pod="playground",condition="true"} 1 +kube_pod_status_scheduled{namespace="default",pod="playground",condition="false"} 0 +kube_pod_status_scheduled{namespace="default",pod="playground",condition="unknown"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-apiserver-minikube",condition="true"} 1 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-apiserver-minikube",condition="false"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-apiserver-minikube",condition="unknown"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-scheduler-minikube",condition="true"} 1 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-scheduler-minikube",condition="false"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-scheduler-minikube",condition="unknown"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",condition="true"} 1 +kube_pod_status_scheduled{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",condition="false"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",condition="unknown"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",condition="true"} 1 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",condition="false"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",condition="unknown"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="etcd-minikube",condition="true"} 1 +kube_pod_status_scheduled{namespace="kube-system",pod="etcd-minikube",condition="false"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="etcd-minikube",condition="unknown"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="coredns-5c98db65d4-df89f",condition="true"} 1 +kube_pod_status_scheduled{namespace="kube-system",pod="coredns-5c98db65d4-df89f",condition="false"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="coredns-5c98db65d4-df89f",condition="unknown"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-controller-manager-minikube",condition="true"} 1 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-controller-manager-minikube",condition="false"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-controller-manager-minikube",condition="unknown"} 0 +# HELP kube_pod_container_info Information about a container in a pod. +# TYPE kube_pod_container_info gauge +kube_pod_container_info{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",image="k8s.gcr.io/kube-addon-manager:v9.0",image_id="docker://sha256:119701e77cbc4c6cb32c05d9c39050127eb865c1a9f21f830685379b6b65d6ae",container_id="docker://ae44f6171c1a602d5a034049cc98ed90d30b7013f4ec7a865404bfd6f821e3d6"} 1 +kube_pod_container_info{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",image="k8s.gcr.io/kube-proxy:v1.15.2",image_id="docker-pullable://k8s.gcr.io/kube-proxy@sha256:626f983f25f8b7799ca7ab001fd0985a72c2643c0acb877d2888c0aa4fcbdf56",container_id="docker://03d9bd9ed242a1b31c273a201aa6701d991cbd6ae5d7ff180d9d7e326efb48a9"} 1 +kube_pod_container_info{namespace="default",pod="stdout-logger",container="alpine",image="alpine:3.2",image_id="docker-pullable://alpine@sha256:e9a2035f9d0d7cee1cdd445f5bfa0c5c646455ee26f14565dce23cf2d2de7570",container_id="docker://be919c5f2a474dc6bbeca0835ffacd639937a060a0904846dc9f52e90c742d0a"} 1 +kube_pod_container_info{namespace="default",pod="playground",container="ubuntu",image="ubuntu:latest",image_id="docker-pullable://ubuntu@sha256:d1d454df0f579c6be4d8161d227462d69e163a8ff9d20a847533989cf0c94d90",container_id="docker://b17b21e347f9448c8b0c2edc1ed8d959bd9079e7befb3281c7a41fedcaecf38f"} 1 +kube_pod_container_info{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",image="k8s.gcr.io/kube-apiserver:v1.15.2",image_id="docker-pullable://k8s.gcr.io/kube-apiserver@sha256:5fae387bacf1def6c3915b4a3035cf8c8a4d06158b2e676721776d3d4afc05a2",container_id="docker://da080618e76a3dbb812b35c92517096fa12999fdc9ac47c8585380a27acc559f"} 1 +kube_pod_container_info{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",image="nginx:latest",image_id="docker-pullable://nginx@sha256:53ddb41e46de3d63376579acf46f9a41a8d7de33645db47a486de9769201fec9",container_id="docker://3c72f4f311796c758011a04d05b3fb899644ec085850dbcbe885b5144328478f"} 1 +kube_pod_container_info{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",image="gcr.io/k8s-minikube/storage-provisioner:v1.8.1",image_id="docker://sha256:4689081edb103a9e8174bf23a255bfbe0b2d9ed82edc907abab6989d1c60f02c",container_id="docker://0e4b04eaf652840ac51e584dd385c10fbcc137d1a5eee6329967f922b82b5a5a"} 1 +kube_pod_container_info{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",image="quay.io/coreos/kube-state-metrics:v1.7.2",image_id="docker-pullable://quay.io/coreos/kube-state-metrics@sha256:99a3e3297e281fec09fe850d6d4bccf4d9fd58ff62a5b37764d8a8bd1e79bd14",container_id="docker://fa1137e3a4047845b6bcdce50eb5934373b52200805be55a076407c96a9f5c2a"} 1 +kube_pod_container_info{namespace="kube-system",pod="etcd-minikube",container="etcd",image="k8s.gcr.io/etcd:3.3.10",image_id="docker-pullable://k8s.gcr.io/etcd@sha256:17da501f5d2a675be46040422a27b7cc21b8a43895ac998b171db1c346f361f7",container_id="docker://27c011eefe928f7ceb19808c699dc27770ddfd64397e0f9e7f2b850366e52b85"} 1 +kube_pod_container_info{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",image="k8s.gcr.io/coredns:1.3.1",image_id="docker-pullable://k8s.gcr.io/coredns@sha256:02382353821b12c21b062c59184e227e001079bb13ebd01f9d3270ba0fcbf1e4",container_id="docker://9c66d133e5a614edfe9f71997247346110b4488096f34e4963a68e2d36c06c67"} 1 +kube_pod_container_info{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",image="k8s.gcr.io/kube-controller-manager:v1.15.2",image_id="docker-pullable://k8s.gcr.io/kube-controller-manager@sha256:7d3fc48cf83aa0a7b8f129fa4255bb5530908e1a5b194be269ea8329b48e9598",container_id="docker://f90216c30dc12b460eef9e099d043f5c8c30b31677cc23cfb4870242086acecd"} 1 +kube_pod_container_info{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",image="k8s.gcr.io/kube-scheduler:v1.15.2",image_id="docker-pullable://k8s.gcr.io/kube-scheduler@sha256:8fd3c3251f07234a234469e201900e4274726f1fe0d5dc6fb7da911f1c851a1a",container_id="docker://4f3d2d395e04b6d8ff9aa74bc5bb558857780861b17dd888310a4bb27c383b4b"} 1 +kube_pod_container_info{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",image="k8s.gcr.io/coredns:1.3.1",image_id="docker-pullable://k8s.gcr.io/coredns@sha256:02382353821b12c21b062c59184e227e001079bb13ebd01f9d3270ba0fcbf1e4",container_id="docker://839b2b253ab43db87019fbaa0f8e836d31c757d40f44ce5e8678b3bb1573c719"} 1 +# HELP kube_pod_init_container_info Information about an init container in a pod. +# TYPE kube_pod_init_container_info gauge +# HELP kube_pod_container_status_waiting Describes whether the container is currently in waiting state. +# TYPE kube_pod_container_status_waiting gauge +kube_pod_container_status_waiting{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager"} 0 +kube_pod_container_status_waiting{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler"} 0 +kube_pod_container_status_waiting{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns"} 0 +kube_pod_container_status_waiting{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics"} 0 +kube_pod_container_status_waiting{namespace="kube-system",pod="etcd-minikube",container="etcd"} 0 +kube_pod_container_status_waiting{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns"} 0 +kube_pod_container_status_waiting{namespace="default",pod="playground",container="ubuntu"} 0 +kube_pod_container_status_waiting{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver"} 0 +kube_pod_container_status_waiting{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr"} 0 +kube_pod_container_status_waiting{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner"} 0 +kube_pod_container_status_waiting{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager"} 0 +kube_pod_container_status_waiting{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy"} 0 +kube_pod_container_status_waiting{namespace="default",pod="stdout-logger",container="alpine"} 0 +# HELP kube_pod_init_container_status_waiting Describes whether the init container is currently in waiting state. +# TYPE kube_pod_init_container_status_waiting gauge +# HELP kube_pod_container_status_waiting_reason Describes the reason the container is currently in waiting state. +# TYPE kube_pod_container_status_waiting_reason gauge +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="InvalidImageName"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="InvalidImageName"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="InvalidImageName"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="InvalidImageName"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="InvalidImageName"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="InvalidImageName"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="InvalidImageName"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="InvalidImageName"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="InvalidImageName"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="stdout-logger",container="alpine",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="stdout-logger",container="alpine",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="stdout-logger",container="alpine",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="stdout-logger",container="alpine",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="stdout-logger",container="alpine",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="stdout-logger",container="alpine",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="stdout-logger",container="alpine",reason="InvalidImageName"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="playground",container="ubuntu",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="playground",container="ubuntu",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="playground",container="ubuntu",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="playground",container="ubuntu",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="playground",container="ubuntu",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="playground",container="ubuntu",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="playground",container="ubuntu",reason="InvalidImageName"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="InvalidImageName"} 0 +kube_pod_container_status_waiting_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="InvalidImageName"} 0 +# HELP kube_pod_init_container_status_waiting_reason Describes the reason the init container is currently in waiting state. +# TYPE kube_pod_init_container_status_waiting_reason gauge +# HELP kube_pod_container_status_running Describes whether the container is currently in running state. +# TYPE kube_pod_container_status_running gauge +kube_pod_container_status_running{namespace="kube-system",pod="etcd-minikube",container="etcd"} 1 +kube_pod_container_status_running{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns"} 1 +kube_pod_container_status_running{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager"} 1 +kube_pod_container_status_running{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler"} 1 +kube_pod_container_status_running{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns"} 1 +kube_pod_container_status_running{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics"} 1 +kube_pod_container_status_running{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy"} 1 +kube_pod_container_status_running{namespace="default",pod="stdout-logger",container="alpine"} 1 +kube_pod_container_status_running{namespace="default",pod="playground",container="ubuntu"} 1 +kube_pod_container_status_running{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver"} 1 +kube_pod_container_status_running{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr"} 1 +kube_pod_container_status_running{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner"} 1 +kube_pod_container_status_running{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager"} 1 +# HELP kube_pod_init_container_status_running Describes whether the init container is currently in running state. +# TYPE kube_pod_init_container_status_running gauge +# HELP kube_pod_container_status_terminated Describes whether the container is currently in terminated state. +# TYPE kube_pod_container_status_terminated gauge +kube_pod_container_status_terminated{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy"} 0 +kube_pod_container_status_terminated{namespace="default",pod="stdout-logger",container="alpine"} 0 +kube_pod_container_status_terminated{namespace="default",pod="playground",container="ubuntu"} 0 +kube_pod_container_status_terminated{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver"} 0 +kube_pod_container_status_terminated{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr"} 0 +kube_pod_container_status_terminated{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner"} 0 +kube_pod_container_status_terminated{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager"} 0 +kube_pod_container_status_terminated{namespace="kube-system",pod="etcd-minikube",container="etcd"} 0 +kube_pod_container_status_terminated{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns"} 0 +kube_pod_container_status_terminated{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager"} 0 +kube_pod_container_status_terminated{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler"} 0 +kube_pod_container_status_terminated{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns"} 0 +kube_pod_container_status_terminated{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics"} 0 +# HELP kube_pod_init_container_status_terminated Describes whether the init container is currently in terminated state. +# TYPE kube_pod_init_container_status_terminated gauge +# HELP kube_pod_container_status_terminated_reason Describes the reason the container is currently in terminated state. +# TYPE kube_pod_container_status_terminated_reason gauge +kube_pod_container_status_terminated_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="DeadlineExceeded"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="DeadlineExceeded"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="DeadlineExceeded"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="DeadlineExceeded"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="stdout-logger",container="alpine",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="stdout-logger",container="alpine",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="stdout-logger",container="alpine",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="stdout-logger",container="alpine",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="stdout-logger",container="alpine",reason="DeadlineExceeded"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="playground",container="ubuntu",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="playground",container="ubuntu",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="playground",container="ubuntu",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="playground",container="ubuntu",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="playground",container="ubuntu",reason="DeadlineExceeded"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="DeadlineExceeded"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="DeadlineExceeded"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="DeadlineExceeded"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="DeadlineExceeded"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="DeadlineExceeded"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="DeadlineExceeded"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="DeadlineExceeded"} 0 +# HELP kube_pod_init_container_status_terminated_reason Describes the reason the init container is currently in terminated state. +# TYPE kube_pod_init_container_status_terminated_reason gauge +# HELP kube_pod_container_status_last_terminated_reason Describes the last reason the container was in terminated state. +# TYPE kube_pod_container_status_last_terminated_reason gauge +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="Error"} 1 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="DeadlineExceeded"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="Error"} 1 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="DeadlineExceeded"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="Error"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="DeadlineExceeded"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="stdout-logger",container="alpine",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="stdout-logger",container="alpine",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="stdout-logger",container="alpine",reason="Error"} 1 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="stdout-logger",container="alpine",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="stdout-logger",container="alpine",reason="DeadlineExceeded"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="playground",container="ubuntu",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="playground",container="ubuntu",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="playground",container="ubuntu",reason="Error"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="playground",container="ubuntu",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="playground",container="ubuntu",reason="DeadlineExceeded"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="Error"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="DeadlineExceeded"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="Completed"} 1 +kube_pod_container_status_last_terminated_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="Error"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="DeadlineExceeded"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="Error"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="DeadlineExceeded"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="Error"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="DeadlineExceeded"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="Error"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="DeadlineExceeded"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="Error"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="DeadlineExceeded"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="Error"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="DeadlineExceeded"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="Error"} 1 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="DeadlineExceeded"} 0 +# HELP kube_pod_init_container_status_last_terminated_reason Describes the last reason the init container was in terminated state. +# TYPE kube_pod_init_container_status_last_terminated_reason gauge +# HELP kube_pod_container_status_ready Describes whether the containers readiness check succeeded. +# TYPE kube_pod_container_status_ready gauge +kube_pod_container_status_ready{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler"} 1 +kube_pod_container_status_ready{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns"} 1 +kube_pod_container_status_ready{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics"} 1 +kube_pod_container_status_ready{namespace="kube-system",pod="etcd-minikube",container="etcd"} 1 +kube_pod_container_status_ready{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns"} 1 +kube_pod_container_status_ready{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager"} 1 +kube_pod_container_status_ready{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr"} 1 +kube_pod_container_status_ready{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner"} 1 +kube_pod_container_status_ready{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager"} 1 +kube_pod_container_status_ready{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy"} 1 +kube_pod_container_status_ready{namespace="default",pod="stdout-logger",container="alpine"} 1 +kube_pod_container_status_ready{namespace="default",pod="playground",container="ubuntu"} 1 +kube_pod_container_status_ready{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver"} 1 +# HELP kube_pod_init_container_status_ready Describes whether the init containers readiness check succeeded. +# TYPE kube_pod_init_container_status_ready gauge +# HELP kube_pod_container_status_restarts_total The number of container restarts per container. +# TYPE kube_pod_container_status_restarts_total counter +kube_pod_container_status_restarts_total{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns"} 0 +kube_pod_container_status_restarts_total{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager"} 0 +kube_pod_container_status_restarts_total{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler"} 2 +kube_pod_container_status_restarts_total{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns"} 0 +kube_pod_container_status_restarts_total{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics"} 0 +kube_pod_container_status_restarts_total{namespace="kube-system",pod="etcd-minikube",container="etcd"} 0 +kube_pod_container_status_restarts_total{namespace="default",pod="stdout-logger",container="alpine"} 1 +kube_pod_container_status_restarts_total{namespace="default",pod="playground",container="ubuntu"} 0 +kube_pod_container_status_restarts_total{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver"} 0 +kube_pod_container_status_restarts_total{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr"} 1 +kube_pod_container_status_restarts_total{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner"} 2 +kube_pod_container_status_restarts_total{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager"} 1 +kube_pod_container_status_restarts_total{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy"} 0 +# HELP kube_pod_init_container_status_restarts_total The number of restarts for the init container. +# TYPE kube_pod_init_container_status_restarts_total counter +# HELP kube_pod_container_resource_requests The number of requested request resource by a container. +# TYPE kube_pod_container_resource_requests gauge +kube_pod_container_resource_requests{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",node="minikube",resource="memory",unit="byte"} 2.68435456e+08 +kube_pod_container_resource_requests{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",node="minikube",resource="cpu",unit="core"} 0.005 +kube_pod_container_resource_requests{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",node="minikube",resource="memory",unit="byte"} 5.24288e+07 +kube_pod_container_resource_requests{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",node="minikube",resource="cpu",unit="core"} 0.25 +kube_pod_container_resource_requests{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",node="minikube",resource="cpu",unit="core"} 0.1 +kube_pod_container_resource_requests{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",node="minikube",resource="cpu",unit="core"} 0.1 +kube_pod_container_resource_requests{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",node="minikube",resource="memory",unit="byte"} 7.340032e+07 +kube_pod_container_resource_requests{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",node="minikube",resource="cpu",unit="core"} 0.1 +kube_pod_container_resource_requests{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",node="minikube",resource="memory",unit="byte"} 7.340032e+07 +kube_pod_container_resource_requests{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",node="minikube",resource="cpu",unit="core"} 0.2 +# HELP kube_pod_container_resource_limits The number of requested limit resource by a container. +# TYPE kube_pod_container_resource_limits gauge +kube_pod_container_resource_limits{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",node="minikube",resource="memory",unit="byte"} 5.36870912e+08 +kube_pod_container_resource_limits{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",node="minikube",resource="memory",unit="byte"} 1.7825792e+08 +kube_pod_container_resource_limits{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",node="minikube",resource="memory",unit="byte"} 1.7825792e+08 +# HELP kube_pod_init_container_resource_limits The number of requested limit resource by the init container. +# TYPE kube_pod_init_container_resource_limits gauge +# HELP kube_pod_container_resource_requests_cpu_cores The number of requested cpu cores by a container. +# TYPE kube_pod_container_resource_requests_cpu_cores gauge +kube_pod_container_resource_requests_cpu_cores{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",node="minikube"} 0.1 +kube_pod_container_resource_requests_cpu_cores{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",node="minikube"} 0.2 +kube_pod_container_resource_requests_cpu_cores{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",node="minikube"} 0.1 +kube_pod_container_resource_requests_cpu_cores{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",node="minikube"} 0.1 +kube_pod_container_resource_requests_cpu_cores{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",node="minikube"} 0.005 +kube_pod_container_resource_requests_cpu_cores{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",node="minikube"} 0.25 +# HELP kube_init_pod_container_resource_requests_cpu_cores The number of requested cpu cores by an init container. +# TYPE kube_init_pod_container_resource_requests_cpu_cores gauge +# HELP kube_pod_container_resource_requests_memory_bytes The number of requested memory bytes by a container. +# TYPE kube_pod_container_resource_requests_memory_bytes gauge +kube_pod_container_resource_requests_memory_bytes{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",node="minikube"} 7.340032e+07 +kube_pod_container_resource_requests_memory_bytes{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",node="minikube"} 7.340032e+07 +kube_pod_container_resource_requests_memory_bytes{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",node="minikube"} 2.68435456e+08 +kube_pod_container_resource_requests_memory_bytes{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",node="minikube"} 5.24288e+07 +# HELP kube_pod_container_resource_limits_cpu_cores The limit on cpu cores to be used by a container. +# TYPE kube_pod_container_resource_limits_cpu_cores gauge +# HELP kube_pod_container_resource_limits_memory_bytes The limit on memory to be used by a container in bytes. +# TYPE kube_pod_container_resource_limits_memory_bytes gauge +kube_pod_container_resource_limits_memory_bytes{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",node="minikube"} 5.36870912e+08 +kube_pod_container_resource_limits_memory_bytes{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",node="minikube"} 1.7825792e+08 +kube_pod_container_resource_limits_memory_bytes{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",node="minikube"} 1.7825792e+08 +# HELP kube_pod_spec_volumes_persistentvolumeclaims_info Information about persistentvolumeclaim volumes in a pod. +# TYPE kube_pod_spec_volumes_persistentvolumeclaims_info gauge +# HELP kube_pod_spec_volumes_persistentvolumeclaims_readonly Describes whether a persistentvolumeclaim is mounted read only. +# TYPE kube_pod_spec_volumes_persistentvolumeclaims_readonly gauge +# HELP kube_replicaset_created Unix creation timestamp +# TYPE kube_replicaset_created gauge +kube_replicaset_created{namespace="kube-system",replicaset="kube-state-metrics-6766c6d46b"} 1.568622119e+09 +kube_replicaset_created{namespace="kube-system",replicaset="coredns-576cbf47c7"} 1.567009611e+09 +kube_replicaset_created{namespace="kube-system",replicaset="coredns-5c98db65d4"} 1.567689495e+09 +# HELP kube_replicaset_status_replicas The number of replicas per ReplicaSet. +# TYPE kube_replicaset_status_replicas gauge +kube_replicaset_status_replicas{namespace="kube-system",replicaset="kube-state-metrics-6766c6d46b"} 1 +kube_replicaset_status_replicas{namespace="kube-system",replicaset="coredns-576cbf47c7"} 0 +kube_replicaset_status_replicas{namespace="kube-system",replicaset="coredns-5c98db65d4"} 2 +# HELP kube_replicaset_status_fully_labeled_replicas The number of fully labeled replicas per ReplicaSet. +# TYPE kube_replicaset_status_fully_labeled_replicas gauge +kube_replicaset_status_fully_labeled_replicas{namespace="kube-system",replicaset="kube-state-metrics-6766c6d46b"} 1 +kube_replicaset_status_fully_labeled_replicas{namespace="kube-system",replicaset="coredns-576cbf47c7"} 0 +kube_replicaset_status_fully_labeled_replicas{namespace="kube-system",replicaset="coredns-5c98db65d4"} 2 +# HELP kube_replicaset_status_ready_replicas The number of ready replicas per ReplicaSet. +# TYPE kube_replicaset_status_ready_replicas gauge +kube_replicaset_status_ready_replicas{namespace="kube-system",replicaset="kube-state-metrics-6766c6d46b"} 1 +kube_replicaset_status_ready_replicas{namespace="kube-system",replicaset="coredns-576cbf47c7"} 0 +kube_replicaset_status_ready_replicas{namespace="kube-system",replicaset="coredns-5c98db65d4"} 2 +# HELP kube_replicaset_status_observed_generation The generation observed by the ReplicaSet controller. +# TYPE kube_replicaset_status_observed_generation gauge +kube_replicaset_status_observed_generation{namespace="kube-system",replicaset="kube-state-metrics-6766c6d46b"} 1 +kube_replicaset_status_observed_generation{namespace="kube-system",replicaset="coredns-576cbf47c7"} 3 +kube_replicaset_status_observed_generation{namespace="kube-system",replicaset="coredns-5c98db65d4"} 2 +# HELP kube_replicaset_spec_replicas Number of desired pods for a ReplicaSet. +# TYPE kube_replicaset_spec_replicas gauge +kube_replicaset_spec_replicas{namespace="kube-system",replicaset="kube-state-metrics-6766c6d46b"} 1 +kube_replicaset_spec_replicas{namespace="kube-system",replicaset="coredns-576cbf47c7"} 0 +kube_replicaset_spec_replicas{namespace="kube-system",replicaset="coredns-5c98db65d4"} 2 +# HELP kube_replicaset_metadata_generation Sequence number representing a specific generation of the desired state. +# TYPE kube_replicaset_metadata_generation gauge +kube_replicaset_metadata_generation{namespace="kube-system",replicaset="kube-state-metrics-6766c6d46b"} 1 +kube_replicaset_metadata_generation{namespace="kube-system",replicaset="coredns-576cbf47c7"} 3 +kube_replicaset_metadata_generation{namespace="kube-system",replicaset="coredns-5c98db65d4"} 2 +# HELP kube_replicaset_owner Information about the ReplicaSet's owner. +# TYPE kube_replicaset_owner gauge +kube_replicaset_owner{namespace="kube-system",replicaset="coredns-576cbf47c7",owner_kind="Deployment",owner_name="coredns",owner_is_controller="true"} 1 +kube_replicaset_owner{namespace="kube-system",replicaset="coredns-5c98db65d4",owner_kind="Deployment",owner_name="coredns",owner_is_controller="true"} 1 +kube_replicaset_owner{namespace="kube-system",replicaset="kube-state-metrics-6766c6d46b",owner_kind="Deployment",owner_name="kube-state-metrics",owner_is_controller="true"} 1 +# HELP kube_replicaset_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_replicaset_labels gauge +kube_replicaset_labels{namespace="kube-system",replicaset="kube-state-metrics-6766c6d46b",label_pod_template_hash="6766c6d46b",label_k8s_app="kube-state-metrics"} 1 +kube_replicaset_labels{namespace="kube-system",replicaset="coredns-576cbf47c7",label_pod_template_hash="576cbf47c7",label_k8s_app="kube-dns"} 1 +kube_replicaset_labels{namespace="kube-system",replicaset="coredns-5c98db65d4",label_k8s_app="kube-dns",label_pod_template_hash="5c98db65d4"} 1 +# HELP kube_replicationcontroller_created Unix creation timestamp +# TYPE kube_replicationcontroller_created gauge +# HELP kube_replicationcontroller_status_replicas The number of replicas per ReplicationController. +# TYPE kube_replicationcontroller_status_replicas gauge +# HELP kube_replicationcontroller_status_fully_labeled_replicas The number of fully labeled replicas per ReplicationController. +# TYPE kube_replicationcontroller_status_fully_labeled_replicas gauge +# HELP kube_replicationcontroller_status_ready_replicas The number of ready replicas per ReplicationController. +# TYPE kube_replicationcontroller_status_ready_replicas gauge +# HELP kube_replicationcontroller_status_available_replicas The number of available replicas per ReplicationController. +# TYPE kube_replicationcontroller_status_available_replicas gauge +# HELP kube_replicationcontroller_status_observed_generation The generation observed by the ReplicationController controller. +# TYPE kube_replicationcontroller_status_observed_generation gauge +# HELP kube_replicationcontroller_spec_replicas Number of desired pods for a ReplicationController. +# TYPE kube_replicationcontroller_spec_replicas gauge +# HELP kube_replicationcontroller_metadata_generation Sequence number representing a specific generation of the desired state. +# TYPE kube_replicationcontroller_metadata_generation gauge +# HELP kube_resourcequota_created Unix creation timestamp +# TYPE kube_resourcequota_created gauge +kube_resourcequota_created{namespace="rqtest",resourcequota="resources"} 1.568629266e+09 +kube_resourcequota_created{namespace="rqtest",resourcequota="objects"} 1.568629266e+09 +# HELP kube_resourcequota Information about resource quota. +# TYPE kube_resourcequota gauge +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="pods",type="hard"} 3 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="replicationcontrollers",type="hard"} 1 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="secrets",type="hard"} 1 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="services",type="hard"} 2 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="services.loadbalancers",type="hard"} 1 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="configmaps",type="hard"} 1 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="persistentvolumeclaims",type="hard"} 0 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="configmaps",type="used"} 0 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="persistentvolumeclaims",type="used"} 0 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="pods",type="used"} 0 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="replicationcontrollers",type="used"} 0 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="secrets",type="used"} 1 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="services",type="used"} 1 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="services.loadbalancers",type="used"} 1 +kube_resourcequota{namespace="rqtest",resourcequota="resources",resource="limits.cpu",type="hard"} 2 +kube_resourcequota{namespace="rqtest",resourcequota="resources",resource="limits.memory",type="hard"} 2.147483648e+09 +kube_resourcequota{namespace="rqtest",resourcequota="resources",resource="requests.cpu",type="hard"} 1 +kube_resourcequota{namespace="rqtest",resourcequota="resources",resource="requests.memory",type="hard"} 1.073741824e+09 +kube_resourcequota{namespace="rqtest",resourcequota="resources",resource="limits.cpu",type="used"} 0 +kube_resourcequota{namespace="rqtest",resourcequota="resources",resource="limits.memory",type="used"} 0 +kube_resourcequota{namespace="rqtest",resourcequota="resources",resource="requests.cpu",type="used"} 0 +kube_resourcequota{namespace="rqtest",resourcequota="resources",resource="requests.memory",type="used"} 0 +# HELP kube_secret_info Information about secret. +# TYPE kube_secret_info gauge +kube_secret_info{namespace="kube-system",secret="token-cleaner-token-hvmvg"} 1 +kube_secret_info{namespace="kube-system",secret="coredns-token-sck4w"} 1 +kube_secret_info{namespace="kube-system",secret="pvc-protection-controller-token-l7sng"} 1 +kube_secret_info{namespace="kube-system",secret="replicaset-controller-token-w7cbh"} 1 +kube_secret_info{namespace="default",secret="default-token-t7g88"} 1 +kube_secret_info{namespace="kube-system",secret="disruption-controller-token-p746l"} 1 +kube_secret_info{namespace="pablo",secret="default-token-bcdzp"} 1 +kube_secret_info{namespace="kube-system",secret="storage-provisioner-token-77qbj"} 1 +kube_secret_info{namespace="default-mem-example",secret="default-token-ck879"} 1 +kube_secret_info{namespace="kube-system",secret="cronjob-controller-token-5pdpx"} 1 +kube_secret_info{namespace="kube-system",secret="namespace-controller-token-cz6nm"} 1 +kube_secret_info{namespace="kube-system",secret="pod-garbage-collector-token-r5p7w"} 1 +kube_secret_info{namespace="kube-system",secret="attachdetach-controller-token-ww2k9"} 1 +kube_secret_info{namespace="kube-system",secret="pv-protection-controller-token-wgqk6"} 1 +kube_secret_info{namespace="kube-system",secret="service-account-controller-token-trs9v"} 1 +kube_secret_info{namespace="kube-system",secret="deployment-controller-token-8jz6f"} 1 +kube_secret_info{namespace="kube-system",secret="default-token-wlxnx"} 1 +kube_secret_info{namespace="kube-system",secret="horizontal-pod-autoscaler-token-jk9wc"} 1 +kube_secret_info{namespace="kube-system",secret="bootstrap-signer-token-5rthh"} 1 +kube_secret_info{namespace="kube-system",secret="resourcequota-controller-token-ftfbv"} 1 +kube_secret_info{namespace="kube-system",secret="ttl-controller-token-cjgd4"} 1 +kube_secret_info{namespace="kube-system",secret="expand-controller-token-bqc2l"} 1 +kube_secret_info{namespace="kube-system",secret="persistent-volume-binder-token-5vnhp"} 1 +kube_secret_info{namespace="kube-system",secret="kube-state-metrics-token-q84xw"} 1 +kube_secret_info{namespace="kube-system",secret="clusterrole-aggregation-controller-token-p48ps"} 1 +kube_secret_info{namespace="kube-public",secret="default-token-fgbfg"} 1 +kube_secret_info{namespace="kube-system",secret="endpoint-controller-token-xhchl"} 1 +kube_secret_info{namespace="kube-system",secret="kube-proxy-token-xb2xk"} 1 +kube_secret_info{namespace="rqtest",secret="default-token-bx4mb"} 1 +kube_secret_info{namespace="kube-system",secret="metricbeat-kube-token-p7lz2"} 1 +kube_secret_info{namespace="kube-system",secret="certificate-controller-token-4tpf5"} 1 +kube_secret_info{namespace="kube-system",secret="replication-controller-token-4bbg4"} 1 +kube_secret_info{namespace="kube-node-lease",secret="default-token-4bnbt"} 1 +kube_secret_info{namespace="kube-system",secret="node-controller-token-vlp7g"} 1 +kube_secret_info{namespace="kube-system",secret="service-controller-token-n6q5z"} 1 +kube_secret_info{namespace="kube-system",secret="generic-garbage-collector-token-bwzxm"} 1 +kube_secret_info{namespace="kube-system",secret="job-controller-token-jzdhc"} 1 +kube_secret_info{namespace="kube-system",secret="statefulset-controller-token-c6ln8"} 1 +kube_secret_info{namespace="kube-system",secret="daemon-set-controller-token-hlmp4"} 1 +kube_secret_info{namespace="default",secret="metricbeat-kube-token-8bswn"} 1 +# HELP kube_secret_type Type about secret. +# TYPE kube_secret_type gauge +kube_secret_type{namespace="kube-system",secret="node-controller-token-vlp7g",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="service-controller-token-n6q5z",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="generic-garbage-collector-token-bwzxm",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="rqtest",secret="default-token-bx4mb",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="metricbeat-kube-token-p7lz2",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="certificate-controller-token-4tpf5",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="replication-controller-token-4bbg4",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-node-lease",secret="default-token-4bnbt",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="job-controller-token-jzdhc",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="statefulset-controller-token-c6ln8",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="daemon-set-controller-token-hlmp4",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="default",secret="metricbeat-kube-token-8bswn",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="disruption-controller-token-p746l",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="pablo",secret="default-token-bcdzp",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="storage-provisioner-token-77qbj",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="token-cleaner-token-hvmvg",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="coredns-token-sck4w",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="pvc-protection-controller-token-l7sng",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="replicaset-controller-token-w7cbh",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="default",secret="default-token-t7g88",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="default-mem-example",secret="default-token-ck879",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="cronjob-controller-token-5pdpx",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="namespace-controller-token-cz6nm",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="pod-garbage-collector-token-r5p7w",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="default-token-wlxnx",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="attachdetach-controller-token-ww2k9",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="pv-protection-controller-token-wgqk6",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="service-account-controller-token-trs9v",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="deployment-controller-token-8jz6f",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="horizontal-pod-autoscaler-token-jk9wc",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="bootstrap-signer-token-5rthh",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="kube-state-metrics-token-q84xw",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="resourcequota-controller-token-ftfbv",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="ttl-controller-token-cjgd4",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="expand-controller-token-bqc2l",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="persistent-volume-binder-token-5vnhp",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="clusterrole-aggregation-controller-token-p48ps",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-public",secret="default-token-fgbfg",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="endpoint-controller-token-xhchl",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="kube-proxy-token-xb2xk",type="kubernetes.io/service-account-token"} 1 +# HELP kube_secret_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_secret_labels gauge +kube_secret_labels{namespace="kube-system",secret="horizontal-pod-autoscaler-token-jk9wc"} 1 +kube_secret_labels{namespace="kube-system",secret="bootstrap-signer-token-5rthh"} 1 +kube_secret_labels{namespace="kube-system",secret="resourcequota-controller-token-ftfbv"} 1 +kube_secret_labels{namespace="kube-system",secret="ttl-controller-token-cjgd4"} 1 +kube_secret_labels{namespace="kube-system",secret="expand-controller-token-bqc2l"} 1 +kube_secret_labels{namespace="kube-system",secret="persistent-volume-binder-token-5vnhp"} 1 +kube_secret_labels{namespace="kube-system",secret="kube-state-metrics-token-q84xw"} 1 +kube_secret_labels{namespace="kube-system",secret="clusterrole-aggregation-controller-token-p48ps"} 1 +kube_secret_labels{namespace="kube-public",secret="default-token-fgbfg"} 1 +kube_secret_labels{namespace="kube-system",secret="endpoint-controller-token-xhchl"} 1 +kube_secret_labels{namespace="kube-system",secret="kube-proxy-token-xb2xk"} 1 +kube_secret_labels{namespace="rqtest",secret="default-token-bx4mb"} 1 +kube_secret_labels{namespace="kube-system",secret="metricbeat-kube-token-p7lz2"} 1 +kube_secret_labels{namespace="kube-system",secret="certificate-controller-token-4tpf5"} 1 +kube_secret_labels{namespace="kube-system",secret="replication-controller-token-4bbg4"} 1 +kube_secret_labels{namespace="kube-node-lease",secret="default-token-4bnbt"} 1 +kube_secret_labels{namespace="kube-system",secret="node-controller-token-vlp7g"} 1 +kube_secret_labels{namespace="kube-system",secret="service-controller-token-n6q5z"} 1 +kube_secret_labels{namespace="kube-system",secret="generic-garbage-collector-token-bwzxm"} 1 +kube_secret_labels{namespace="kube-system",secret="job-controller-token-jzdhc"} 1 +kube_secret_labels{namespace="kube-system",secret="statefulset-controller-token-c6ln8"} 1 +kube_secret_labels{namespace="kube-system",secret="daemon-set-controller-token-hlmp4"} 1 +kube_secret_labels{namespace="default",secret="metricbeat-kube-token-8bswn"} 1 +kube_secret_labels{namespace="kube-system",secret="token-cleaner-token-hvmvg"} 1 +kube_secret_labels{namespace="kube-system",secret="coredns-token-sck4w"} 1 +kube_secret_labels{namespace="kube-system",secret="pvc-protection-controller-token-l7sng"} 1 +kube_secret_labels{namespace="kube-system",secret="replicaset-controller-token-w7cbh"} 1 +kube_secret_labels{namespace="default",secret="default-token-t7g88"} 1 +kube_secret_labels{namespace="kube-system",secret="disruption-controller-token-p746l"} 1 +kube_secret_labels{namespace="pablo",secret="default-token-bcdzp"} 1 +kube_secret_labels{namespace="kube-system",secret="storage-provisioner-token-77qbj"} 1 +kube_secret_labels{namespace="default-mem-example",secret="default-token-ck879"} 1 +kube_secret_labels{namespace="kube-system",secret="cronjob-controller-token-5pdpx"} 1 +kube_secret_labels{namespace="kube-system",secret="namespace-controller-token-cz6nm"} 1 +kube_secret_labels{namespace="kube-system",secret="pod-garbage-collector-token-r5p7w"} 1 +kube_secret_labels{namespace="kube-system",secret="attachdetach-controller-token-ww2k9"} 1 +kube_secret_labels{namespace="kube-system",secret="pv-protection-controller-token-wgqk6"} 1 +kube_secret_labels{namespace="kube-system",secret="service-account-controller-token-trs9v"} 1 +kube_secret_labels{namespace="kube-system",secret="deployment-controller-token-8jz6f"} 1 +kube_secret_labels{namespace="kube-system",secret="default-token-wlxnx"} 1 +# HELP kube_secret_created Unix creation timestamp +# TYPE kube_secret_created gauge +kube_secret_created{namespace="kube-system",secret="statefulset-controller-token-c6ln8"} 1.567009608e+09 +kube_secret_created{namespace="kube-system",secret="daemon-set-controller-token-hlmp4"} 1.567009608e+09 +kube_secret_created{namespace="default",secret="metricbeat-kube-token-8bswn"} 1.567693987e+09 +kube_secret_created{namespace="kube-system",secret="job-controller-token-jzdhc"} 1.567009606e+09 +kube_secret_created{namespace="kube-system",secret="pvc-protection-controller-token-l7sng"} 1.567009609e+09 +kube_secret_created{namespace="kube-system",secret="replicaset-controller-token-w7cbh"} 1.567009605e+09 +kube_secret_created{namespace="default",secret="default-token-t7g88"} 1.567009611e+09 +kube_secret_created{namespace="kube-system",secret="disruption-controller-token-p746l"} 1.567009611e+09 +kube_secret_created{namespace="pablo",secret="default-token-bcdzp"} 1.567347599e+09 +kube_secret_created{namespace="kube-system",secret="storage-provisioner-token-77qbj"} 1.567009612e+09 +kube_secret_created{namespace="kube-system",secret="token-cleaner-token-hvmvg"} 1.567009611e+09 +kube_secret_created{namespace="kube-system",secret="coredns-token-sck4w"} 1.567009606e+09 +kube_secret_created{namespace="kube-system",secret="cronjob-controller-token-5pdpx"} 1.567009607e+09 +kube_secret_created{namespace="kube-system",secret="namespace-controller-token-cz6nm"} 1.567009609e+09 +kube_secret_created{namespace="kube-system",secret="pod-garbage-collector-token-r5p7w"} 1.567009607e+09 +kube_secret_created{namespace="default-mem-example",secret="default-token-ck879"} 1.567347623e+09 +kube_secret_created{namespace="kube-system",secret="pv-protection-controller-token-wgqk6"} 1.56700961e+09 +kube_secret_created{namespace="kube-system",secret="service-account-controller-token-trs9v"} 1.567009605e+09 +kube_secret_created{namespace="kube-system",secret="deployment-controller-token-8jz6f"} 1.567009605e+09 +kube_secret_created{namespace="kube-system",secret="default-token-wlxnx"} 1.567009611e+09 +kube_secret_created{namespace="kube-system",secret="attachdetach-controller-token-ww2k9"} 1.567009608e+09 +kube_secret_created{namespace="kube-system",secret="bootstrap-signer-token-5rthh"} 1.567009605e+09 +kube_secret_created{namespace="kube-system",secret="horizontal-pod-autoscaler-token-jk9wc"} 1.56700961e+09 +kube_secret_created{namespace="kube-system",secret="ttl-controller-token-cjgd4"} 1.567009605e+09 +kube_secret_created{namespace="kube-system",secret="expand-controller-token-bqc2l"} 1.567009606e+09 +kube_secret_created{namespace="kube-system",secret="persistent-volume-binder-token-5vnhp"} 1.567009607e+09 +kube_secret_created{namespace="kube-system",secret="kube-state-metrics-token-q84xw"} 1.568195118e+09 +kube_secret_created{namespace="kube-system",secret="resourcequota-controller-token-ftfbv"} 1.567009606e+09 +kube_secret_created{namespace="kube-public",secret="default-token-fgbfg"} 1.567009611e+09 +kube_secret_created{namespace="kube-system",secret="endpoint-controller-token-xhchl"} 1.567009609e+09 +kube_secret_created{namespace="kube-system",secret="kube-proxy-token-xb2xk"} 1.567009606e+09 +kube_secret_created{namespace="kube-system",secret="clusterrole-aggregation-controller-token-p48ps"} 1.567009605e+09 +kube_secret_created{namespace="kube-system",secret="certificate-controller-token-4tpf5"} 1.567009605e+09 +kube_secret_created{namespace="kube-system",secret="replication-controller-token-4bbg4"} 1.567009606e+09 +kube_secret_created{namespace="kube-node-lease",secret="default-token-4bnbt"} 1.567689495e+09 +kube_secret_created{namespace="kube-system",secret="node-controller-token-vlp7g"} 1.567009605e+09 +kube_secret_created{namespace="kube-system",secret="service-controller-token-n6q5z"} 1.567009609e+09 +kube_secret_created{namespace="kube-system",secret="generic-garbage-collector-token-bwzxm"} 1.56700961e+09 +kube_secret_created{namespace="rqtest",secret="default-token-bx4mb"} 1.568629266e+09 +kube_secret_created{namespace="kube-system",secret="metricbeat-kube-token-p7lz2"} 1.567585201e+09 +# HELP kube_secret_metadata_resource_version Resource version representing a specific version of secret. +# TYPE kube_secret_metadata_resource_version gauge +kube_secret_metadata_resource_version{namespace="kube-system",secret="ttl-controller-token-cjgd4",resource_version="201"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="expand-controller-token-bqc2l",resource_version="204"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="persistent-volume-binder-token-5vnhp",resource_version="247"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="kube-state-metrics-token-q84xw",resource_version="1322401"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="resourcequota-controller-token-ftfbv",resource_version="230"} 1 +kube_secret_metadata_resource_version{namespace="kube-public",secret="default-token-fgbfg",resource_version="307"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="endpoint-controller-token-xhchl",resource_version="264"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="kube-proxy-token-xb2xk",resource_version="227"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="clusterrole-aggregation-controller-token-p48ps",resource_version="185"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="certificate-controller-token-4tpf5",resource_version="191"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="replication-controller-token-4bbg4",resource_version="224"} 1 +kube_secret_metadata_resource_version{namespace="kube-node-lease",secret="default-token-4bnbt",resource_version="709402"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="node-controller-token-vlp7g",resource_version="182"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="service-controller-token-n6q5z",resource_version="270"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="generic-garbage-collector-token-bwzxm",resource_version="280"} 1 +kube_secret_metadata_resource_version{namespace="rqtest",secret="default-token-bx4mb",resource_version="1848837"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="metricbeat-kube-token-p7lz2",resource_version="688578"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="statefulset-controller-token-c6ln8",resource_version="257"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="daemon-set-controller-token-hlmp4",resource_version="254"} 1 +kube_secret_metadata_resource_version{namespace="default",secret="metricbeat-kube-token-8bswn",resource_version="715037"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="job-controller-token-jzdhc",resource_version="240"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="pvc-protection-controller-token-l7sng",resource_version="274"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="replicaset-controller-token-w7cbh",resource_version="188"} 1 +kube_secret_metadata_resource_version{namespace="default",secret="default-token-t7g88",resource_version="302"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="disruption-controller-token-p746l",resource_version="288"} 1 +kube_secret_metadata_resource_version{namespace="pablo",secret="default-token-bcdzp",resource_version="404521"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="storage-provisioner-token-77qbj",resource_version="345"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="token-cleaner-token-hvmvg",resource_version="292"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="coredns-token-sck4w",resource_version="219"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="cronjob-controller-token-5pdpx",resource_version="243"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="namespace-controller-token-cz6nm",resource_version="267"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="pod-garbage-collector-token-r5p7w",resource_version="250"} 1 +kube_secret_metadata_resource_version{namespace="default-mem-example",secret="default-token-ck879",resource_version="404555"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="pv-protection-controller-token-wgqk6",resource_version="277"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="service-account-controller-token-trs9v",resource_version="197"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="deployment-controller-token-8jz6f",resource_version="179"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="default-token-wlxnx",resource_version="304"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="attachdetach-controller-token-ww2k9",resource_version="260"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="bootstrap-signer-token-5rthh",resource_version="194"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="horizontal-pod-autoscaler-token-jk9wc",resource_version="283"} 1 +# HELP kube_service_info Information about service. +# TYPE kube_service_info gauge +kube_service_info{namespace="kube-system",service="kube-dns",cluster_ip="10.96.0.10",external_name="",load_balancer_ip=""} 1 +kube_service_info{namespace="default",service="my-nginx",cluster_ip="10.111.59.54",external_name="",load_balancer_ip=""} 1 +kube_service_info{namespace="kube-system",service="kube-state-metrics",cluster_ip="10.111.114.13",external_name="",load_balancer_ip=""} 1 +kube_service_info{namespace="default",service="willsucceed",cluster_ip="10.104.70.53",external_name="",load_balancer_ip=""} 1 +kube_service_info{namespace="default",service="willfail",cluster_ip="10.99.32.65",external_name="",load_balancer_ip=""} 1 +kube_service_info{namespace="rqtest",service="willsucceed",cluster_ip="10.97.184.107",external_name="",load_balancer_ip=""} 1 +kube_service_info{namespace="default",service="kubernetes",cluster_ip="10.96.0.1",external_name="",load_balancer_ip=""} 1 +# HELP kube_service_created Unix creation timestamp +# TYPE kube_service_created gauge +kube_service_created{namespace="default",service="kubernetes"} 1.567009602e+09 +kube_service_created{namespace="kube-system",service="kube-dns"} 1.567009606e+09 +kube_service_created{namespace="default",service="my-nginx"} 1.567783021e+09 +kube_service_created{namespace="kube-system",service="kube-state-metrics"} 1.568195118e+09 +kube_service_created{namespace="default",service="willsucceed"} 1.568629567e+09 +kube_service_created{namespace="default",service="willfail"} 1.568629567e+09 +kube_service_created{namespace="rqtest",service="willsucceed"} 1.568629886e+09 +# HELP kube_service_spec_type Type about service. +# TYPE kube_service_spec_type gauge +kube_service_spec_type{namespace="rqtest",service="willsucceed",type="LoadBalancer"} 1 +kube_service_spec_type{namespace="default",service="kubernetes",type="ClusterIP"} 1 +kube_service_spec_type{namespace="kube-system",service="kube-dns",type="ClusterIP"} 1 +kube_service_spec_type{namespace="default",service="my-nginx",type="LoadBalancer"} 1 +kube_service_spec_type{namespace="kube-system",service="kube-state-metrics",type="ClusterIP"} 1 +kube_service_spec_type{namespace="default",service="willsucceed",type="LoadBalancer"} 1 +kube_service_spec_type{namespace="default",service="willfail",type="LoadBalancer"} 1 +# HELP kube_service_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_service_labels gauge +kube_service_labels{namespace="default",service="kubernetes",label_component="apiserver",label_provider="kubernetes"} 1 +kube_service_labels{namespace="kube-system",service="kube-dns",label_kubernetes_io_name="KubeDNS",label_k8s_app="kube-dns",label_kubernetes_io_cluster_service="true"} 1 +kube_service_labels{namespace="default",service="my-nginx",label_app="my-nginx"} 1 +kube_service_labels{namespace="kube-system",service="kube-state-metrics",label_k8s_app="kube-state-metrics"} 1 +kube_service_labels{namespace="default",service="willsucceed"} 1 +kube_service_labels{namespace="default",service="willfail"} 1 +kube_service_labels{namespace="rqtest",service="willsucceed"} 1 +# HELP kube_service_spec_external_ip Service external ips. One series for each ip +# TYPE kube_service_spec_external_ip gauge +# HELP kube_service_status_load_balancer_ingress Service load balancer ingress status +# TYPE kube_service_status_load_balancer_ingress gauge +# HELP kube_statefulset_created Unix creation timestamp +# TYPE kube_statefulset_created gauge +# HELP kube_statefulset_status_replicas The number of replicas per StatefulSet. +# TYPE kube_statefulset_status_replicas gauge +# HELP kube_statefulset_status_replicas_current The number of current replicas per StatefulSet. +# TYPE kube_statefulset_status_replicas_current gauge +# HELP kube_statefulset_status_replicas_ready The number of ready replicas per StatefulSet. +# TYPE kube_statefulset_status_replicas_ready gauge +# HELP kube_statefulset_status_replicas_updated The number of updated replicas per StatefulSet. +# TYPE kube_statefulset_status_replicas_updated gauge +# HELP kube_statefulset_status_observed_generation The generation observed by the StatefulSet controller. +# TYPE kube_statefulset_status_observed_generation gauge +# HELP kube_statefulset_replicas Number of desired pods for a StatefulSet. +# TYPE kube_statefulset_replicas gauge +# HELP kube_statefulset_metadata_generation Sequence number representing a specific generation of the desired state for the StatefulSet. +# TYPE kube_statefulset_metadata_generation gauge +# HELP kube_statefulset_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_statefulset_labels gauge +# HELP kube_statefulset_status_current_revision Indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas). +# TYPE kube_statefulset_status_current_revision gauge +# HELP kube_statefulset_status_update_revision Indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas) +# TYPE kube_statefulset_status_update_revision gauge +# HELP kube_storageclass_info Information about storageclass. +# TYPE kube_storageclass_info gauge +kube_storageclass_info{storageclass="standard",provisioner="k8s.io/minikube-hostpath",reclaimPolicy="Delete",volumeBindingMode="Immediate"} 1 +# HELP kube_storageclass_created Unix creation timestamp +# TYPE kube_storageclass_created gauge +kube_storageclass_created{storageclass="standard"} 1.567009612e+09 +# HELP kube_storageclass_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_storageclass_labels gauge +kube_storageclass_labels{storageclass="standard",label_addonmanager_kubernetes_io_mode="EnsureExists"} 1 diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/_meta/test/kube-state-metrics.1.7.expected b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/_meta/test/kube-state-metrics.1.7.expected new file mode 100644 index 00000000..80ee16ce --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/_meta/test/kube-state-metrics.1.7.expected @@ -0,0 +1,530 @@ +[ + { + "RootFields": {}, + "ModuleFields": { + "namespace": "rqtest" + }, + "MetricSetFields": { + "name": "objects", + "quota": 0, + "resource": "replicationcontrollers", + "type": "used" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.resourcequota", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": { + "namespace": "rqtest" + }, + "MetricSetFields": { + "name": "objects", + "quota": 1, + "resource": "configmaps", + "type": "hard" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.resourcequota", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": { + "namespace": "rqtest" + }, + "MetricSetFields": { + "name": "resources", + "quota": 1, + "resource": "requests.cpu", + "type": "hard" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.resourcequota", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": { + "namespace": "rqtest" + }, + "MetricSetFields": { + "name": "objects", + "quota": 0, + "resource": "persistentvolumeclaims", + "type": "used" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.resourcequota", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": { + "namespace": "rqtest" + }, + "MetricSetFields": { + "name": "resources", + "quota": 2147483648, + "resource": "limits.memory", + "type": "hard" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.resourcequota", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": { + "namespace": "rqtest" + }, + "MetricSetFields": { + "name": "objects", + "quota": 1, + "resource": "replicationcontrollers", + "type": "hard" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.resourcequota", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": { + "namespace": "rqtest" + }, + "MetricSetFields": { + "name": "objects", + "quota": 1, + "resource": "services.loadbalancers", + "type": "used" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.resourcequota", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": { + "namespace": "rqtest" + }, + "MetricSetFields": { + "name": "objects", + "quota": 1, + "resource": "secrets", + "type": "used" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.resourcequota", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": { + "namespace": "rqtest" + }, + "MetricSetFields": { + "name": "resources", + "quota": 0, + "resource": "requests.memory", + "type": "used" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.resourcequota", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": { + "namespace": "rqtest" + }, + "MetricSetFields": { + "name": "resources", + "quota": 0, + "resource": "requests.cpu", + "type": "used" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.resourcequota", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": { + "namespace": "rqtest" + }, + "MetricSetFields": { + "name": "objects", + "quota": 2, + "resource": "services", + "type": "hard" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.resourcequota", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": { + "namespace": "rqtest" + }, + "MetricSetFields": { + "name": "objects", + "quota": 3, + "resource": "pods", + "type": "hard" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.resourcequota", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": { + "namespace": "rqtest" + }, + "MetricSetFields": { + "name": "resources", + "quota": 2, + "resource": "limits.cpu", + "type": "hard" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.resourcequota", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": { + "namespace": "rqtest" + }, + "MetricSetFields": { + "name": "objects", + "quota": 1, + "resource": "services", + "type": "used" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.resourcequota", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": { + "namespace": "rqtest" + }, + "MetricSetFields": { + "name": "resources", + "quota": 1073741824, + "resource": "requests.memory", + "type": "hard" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.resourcequota", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": { + "namespace": "rqtest" + }, + "MetricSetFields": { + "name": "objects", + "quota": 1, + "resource": "services.loadbalancers", + "type": "hard" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.resourcequota", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": { + "namespace": "rqtest" + }, + "MetricSetFields": { + "created": { + "sec": 1568629266 + }, + "name": "resources" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.resourcequota", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": { + "namespace": "rqtest" + }, + "MetricSetFields": { + "name": "objects", + "quota": 0, + "resource": "persistentvolumeclaims", + "type": "hard" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.resourcequota", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": { + "namespace": "rqtest" + }, + "MetricSetFields": { + "name": "resources", + "quota": 0, + "resource": "limits.cpu", + "type": "used" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.resourcequota", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": { + "namespace": "rqtest" + }, + "MetricSetFields": { + "name": "objects", + "quota": 0, + "resource": "configmaps", + "type": "used" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.resourcequota", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": { + "namespace": "rqtest" + }, + "MetricSetFields": { + "name": "objects", + "quota": 1, + "resource": "secrets", + "type": "hard" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.resourcequota", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": { + "namespace": "rqtest" + }, + "MetricSetFields": { + "created": { + "sec": 1568629266 + }, + "name": "objects" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.resourcequota", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": { + "namespace": "rqtest" + }, + "MetricSetFields": { + "name": "objects", + "quota": 0, + "resource": "pods", + "type": "used" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.resourcequota", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + }, + { + "RootFields": {}, + "ModuleFields": { + "namespace": "rqtest" + }, + "MetricSetFields": { + "name": "resources", + "quota": 0, + "resource": "limits.memory", + "type": "used" + }, + "Index": "", + "ID": "", + "Namespace": "kubernetes.resourcequota", + "Timestamp": "0001-01-01T00:00:00Z", + "Error": null, + "Host": "", + "Service": "", + "Took": 0, + "Period": 0, + "DisableTimeSeries": false + } +] \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/_meta/testdata.flaky/config.yml b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/_meta/testdata.flaky/config.yml new file mode 100644 index 00000000..ab6bf241 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/_meta/testdata.flaky/config.yml @@ -0,0 +1,3 @@ +type: http +url: "/metrics" +suffix: plain diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/_meta/testdata.flaky/kube-state-metrics.1.7.plain b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/_meta/testdata.flaky/kube-state-metrics.1.7.plain new file mode 100644 index 00000000..11e5614b --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/_meta/testdata.flaky/kube-state-metrics.1.7.plain @@ -0,0 +1,1361 @@ +# HELP kube_certificatesigningrequest_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_certificatesigningrequest_labels gauge +# HELP kube_certificatesigningrequest_created Unix creation timestamp +# TYPE kube_certificatesigningrequest_created gauge +# HELP kube_certificatesigningrequest_condition The number of each certificatesigningrequest condition +# TYPE kube_certificatesigningrequest_condition gauge +# HELP kube_certificatesigningrequest_cert_length Length of the issued cert +# TYPE kube_certificatesigningrequest_cert_length gauge +# HELP kube_configmap_info Information about configmap. +# TYPE kube_configmap_info gauge +kube_configmap_info{namespace="kube-system",configmap="kubelet-config-1.12"} 1 +kube_configmap_info{namespace="default",configmap="elastic-operator-uuid"} 1 +kube_configmap_info{namespace="kube-public",configmap="cluster-info"} 1 +kube_configmap_info{namespace="kube-system",configmap="coredns"} 1 +kube_configmap_info{namespace="kube-system",configmap="extension-apiserver-authentication"} 1 +kube_configmap_info{namespace="kube-system",configmap="kube-proxy"} 1 +kube_configmap_info{namespace="kube-system",configmap="kubeadm-config"} 1 +# HELP kube_configmap_created Unix creation timestamp +# TYPE kube_configmap_created gauge +kube_configmap_created{namespace="kube-system",configmap="extension-apiserver-authentication"} 1.567009602e+09 +kube_configmap_created{namespace="kube-system",configmap="kube-proxy"} 1.567009606e+09 +kube_configmap_created{namespace="kube-system",configmap="kubeadm-config"} 1.567009605e+09 +kube_configmap_created{namespace="kube-system",configmap="kubelet-config-1.12"} 1.567009605e+09 +kube_configmap_created{namespace="default",configmap="elastic-operator-uuid"} 1.567060471e+09 +kube_configmap_created{namespace="kube-public",configmap="cluster-info"} 1.567009606e+09 +kube_configmap_created{namespace="kube-system",configmap="coredns"} 1.567009606e+09 +# HELP kube_configmap_metadata_resource_version Resource version representing a specific version of the configmap. +# TYPE kube_configmap_metadata_resource_version gauge +kube_configmap_metadata_resource_version{namespace="default",configmap="elastic-operator-uuid",resource_version="61228"} 1 +kube_configmap_metadata_resource_version{namespace="kube-public",configmap="cluster-info",resource_version="709401"} 1 +kube_configmap_metadata_resource_version{namespace="kube-system",configmap="coredns",resource_version="214"} 1 +kube_configmap_metadata_resource_version{namespace="kube-system",configmap="extension-apiserver-authentication",resource_version="53"} 1 +kube_configmap_metadata_resource_version{namespace="kube-system",configmap="kube-proxy",resource_version="709289"} 1 +kube_configmap_metadata_resource_version{namespace="kube-system",configmap="kubeadm-config",resource_version="171"} 1 +kube_configmap_metadata_resource_version{namespace="kube-system",configmap="kubelet-config-1.12",resource_version="174"} 1 +# HELP kube_cronjob_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_cronjob_labels gauge +# HELP kube_cronjob_info Info about cronjob. +# TYPE kube_cronjob_info gauge +# HELP kube_cronjob_created Unix creation timestamp +# TYPE kube_cronjob_created gauge +# HELP kube_cronjob_status_active Active holds pointers to currently running jobs. +# TYPE kube_cronjob_status_active gauge +# HELP kube_cronjob_status_last_schedule_time LastScheduleTime keeps information of when was the last time the job was successfully scheduled. +# TYPE kube_cronjob_status_last_schedule_time gauge +# HELP kube_cronjob_spec_suspend Suspend flag tells the controller to suspend subsequent executions. +# TYPE kube_cronjob_spec_suspend gauge +# HELP kube_cronjob_spec_starting_deadline_seconds Deadline in seconds for starting the job if it misses scheduled time for any reason. +# TYPE kube_cronjob_spec_starting_deadline_seconds gauge +# HELP kube_cronjob_next_schedule_time Next time the cronjob should be scheduled. The time after lastScheduleTime, or after the cron job's creation time if it's never been scheduled. Use this to determine if the job is delayed. +# TYPE kube_cronjob_next_schedule_time gauge +# HELP kube_daemonset_created Unix creation timestamp +# TYPE kube_daemonset_created gauge +kube_daemonset_created{namespace="kube-system",daemonset="kube-proxy"} 1.567009606e+09 +# HELP kube_daemonset_status_current_number_scheduled The number of nodes running at least one daemon pod and are supposed to. +# TYPE kube_daemonset_status_current_number_scheduled gauge +kube_daemonset_status_current_number_scheduled{namespace="kube-system",daemonset="kube-proxy"} 1 +# HELP kube_daemonset_status_desired_number_scheduled The number of nodes that should be running the daemon pod. +# TYPE kube_daemonset_status_desired_number_scheduled gauge +kube_daemonset_status_desired_number_scheduled{namespace="kube-system",daemonset="kube-proxy"} 1 +# HELP kube_daemonset_status_number_available The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available +# TYPE kube_daemonset_status_number_available gauge +kube_daemonset_status_number_available{namespace="kube-system",daemonset="kube-proxy"} 1 +# HELP kube_daemonset_status_number_misscheduled The number of nodes running a daemon pod but are not supposed to. +# TYPE kube_daemonset_status_number_misscheduled gauge +kube_daemonset_status_number_misscheduled{namespace="kube-system",daemonset="kube-proxy"} 0 +# HELP kube_daemonset_status_number_ready The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready. +# TYPE kube_daemonset_status_number_ready gauge +kube_daemonset_status_number_ready{namespace="kube-system",daemonset="kube-proxy"} 1 +# HELP kube_daemonset_status_number_unavailable The number of nodes that should be running the daemon pod and have none of the daemon pod running and available +# TYPE kube_daemonset_status_number_unavailable gauge +kube_daemonset_status_number_unavailable{namespace="kube-system",daemonset="kube-proxy"} 0 +# HELP kube_daemonset_updated_number_scheduled The total number of nodes that are running updated daemon pod +# TYPE kube_daemonset_updated_number_scheduled gauge +kube_daemonset_updated_number_scheduled{namespace="kube-system",daemonset="kube-proxy"} 1 +# HELP kube_daemonset_metadata_generation Sequence number representing a specific generation of the desired state. +# TYPE kube_daemonset_metadata_generation gauge +kube_daemonset_metadata_generation{namespace="kube-system",daemonset="kube-proxy"} 2 +# HELP kube_daemonset_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_daemonset_labels gauge +kube_daemonset_labels{namespace="kube-system",daemonset="kube-proxy",label_k8s_app="kube-proxy"} 1 +# HELP kube_deployment_created Unix creation timestamp +# TYPE kube_deployment_created gauge +kube_deployment_created{namespace="kube-system",deployment="kube-state-metrics"} 1.568622119e+09 +kube_deployment_created{namespace="kube-system",deployment="coredns"} 1.567009606e+09 +# HELP kube_deployment_status_replicas The number of replicas per deployment. +# TYPE kube_deployment_status_replicas gauge +kube_deployment_status_replicas{namespace="kube-system",deployment="kube-state-metrics"} 1 +kube_deployment_status_replicas{namespace="kube-system",deployment="coredns"} 2 +# HELP kube_deployment_status_replicas_available The number of available replicas per deployment. +# TYPE kube_deployment_status_replicas_available gauge +kube_deployment_status_replicas_available{namespace="kube-system",deployment="kube-state-metrics"} 1 +kube_deployment_status_replicas_available{namespace="kube-system",deployment="coredns"} 2 +# HELP kube_deployment_status_replicas_unavailable The number of unavailable replicas per deployment. +# TYPE kube_deployment_status_replicas_unavailable gauge +kube_deployment_status_replicas_unavailable{namespace="kube-system",deployment="kube-state-metrics"} 0 +kube_deployment_status_replicas_unavailable{namespace="kube-system",deployment="coredns"} 0 +# HELP kube_deployment_status_replicas_updated The number of updated replicas per deployment. +# TYPE kube_deployment_status_replicas_updated gauge +kube_deployment_status_replicas_updated{namespace="kube-system",deployment="kube-state-metrics"} 1 +kube_deployment_status_replicas_updated{namespace="kube-system",deployment="coredns"} 2 +# HELP kube_deployment_status_observed_generation The generation observed by the deployment controller. +# TYPE kube_deployment_status_observed_generation gauge +kube_deployment_status_observed_generation{namespace="kube-system",deployment="kube-state-metrics"} 1 +kube_deployment_status_observed_generation{namespace="kube-system",deployment="coredns"} 4 +# HELP kube_deployment_spec_replicas Number of desired pods for a deployment. +# TYPE kube_deployment_spec_replicas gauge +kube_deployment_spec_replicas{namespace="kube-system",deployment="kube-state-metrics"} 1 +kube_deployment_spec_replicas{namespace="kube-system",deployment="coredns"} 2 +# HELP kube_deployment_spec_paused Whether the deployment is paused and will not be processed by the deployment controller. +# TYPE kube_deployment_spec_paused gauge +kube_deployment_spec_paused{namespace="kube-system",deployment="kube-state-metrics"} 0 +kube_deployment_spec_paused{namespace="kube-system",deployment="coredns"} 0 +# HELP kube_deployment_spec_strategy_rollingupdate_max_unavailable Maximum number of unavailable replicas during a rolling update of a deployment. +# TYPE kube_deployment_spec_strategy_rollingupdate_max_unavailable gauge +kube_deployment_spec_strategy_rollingupdate_max_unavailable{namespace="kube-system",deployment="kube-state-metrics"} 1 +kube_deployment_spec_strategy_rollingupdate_max_unavailable{namespace="kube-system",deployment="coredns"} 1 +# HELP kube_deployment_spec_strategy_rollingupdate_max_surge Maximum number of replicas that can be scheduled above the desired number of replicas during a rolling update of a deployment. +# TYPE kube_deployment_spec_strategy_rollingupdate_max_surge gauge +kube_deployment_spec_strategy_rollingupdate_max_surge{namespace="kube-system",deployment="coredns"} 1 +kube_deployment_spec_strategy_rollingupdate_max_surge{namespace="kube-system",deployment="kube-state-metrics"} 1 +# HELP kube_deployment_metadata_generation Sequence number representing a specific generation of the desired state. +# TYPE kube_deployment_metadata_generation gauge +kube_deployment_metadata_generation{namespace="kube-system",deployment="kube-state-metrics"} 1 +kube_deployment_metadata_generation{namespace="kube-system",deployment="coredns"} 4 +# HELP kube_deployment_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_deployment_labels gauge +kube_deployment_labels{namespace="kube-system",deployment="kube-state-metrics",label_k8s_app="kube-state-metrics"} 1 +kube_deployment_labels{namespace="kube-system",deployment="coredns",label_k8s_app="kube-dns"} 1 +# HELP kube_endpoint_info Information about endpoint. +# TYPE kube_endpoint_info gauge +kube_endpoint_info{namespace="kube-system",endpoint="kube-state-metrics"} 1 +kube_endpoint_info{namespace="kube-system",endpoint="kube-scheduler"} 1 +kube_endpoint_info{namespace="default",endpoint="willsucceed"} 1 +kube_endpoint_info{namespace="default",endpoint="willfail"} 1 +kube_endpoint_info{namespace="rqtest",endpoint="willsucceed"} 1 +kube_endpoint_info{namespace="default",endpoint="kubernetes"} 1 +kube_endpoint_info{namespace="kube-system",endpoint="kube-controller-manager"} 1 +kube_endpoint_info{namespace="kube-system",endpoint="kube-dns"} 1 +kube_endpoint_info{namespace="default",endpoint="my-nginx"} 1 +# HELP kube_endpoint_created Unix creation timestamp +# TYPE kube_endpoint_created gauge +kube_endpoint_created{namespace="default",endpoint="kubernetes"} 1.567009602e+09 +kube_endpoint_created{namespace="kube-system",endpoint="kube-controller-manager"} 1.567009603e+09 +kube_endpoint_created{namespace="kube-system",endpoint="kube-dns"} 1.567009612e+09 +kube_endpoint_created{namespace="default",endpoint="my-nginx"} 1.567783021e+09 +kube_endpoint_created{namespace="kube-system",endpoint="kube-state-metrics"} 1.568195118e+09 +kube_endpoint_created{namespace="kube-system",endpoint="kube-scheduler"} 1.567009604e+09 +kube_endpoint_created{namespace="default",endpoint="willsucceed"} 1.568629567e+09 +kube_endpoint_created{namespace="default",endpoint="willfail"} 1.568629567e+09 +kube_endpoint_created{namespace="rqtest",endpoint="willsucceed"} 1.568629886e+09 +# HELP kube_endpoint_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_endpoint_labels gauge +kube_endpoint_labels{namespace="kube-system",endpoint="kube-state-metrics",label_k8s_app="kube-state-metrics"} 1 +kube_endpoint_labels{namespace="kube-system",endpoint="kube-scheduler"} 1 +kube_endpoint_labels{namespace="default",endpoint="willsucceed"} 1 +kube_endpoint_labels{namespace="default",endpoint="willfail"} 1 +kube_endpoint_labels{namespace="rqtest",endpoint="willsucceed"} 1 +kube_endpoint_labels{namespace="default",endpoint="kubernetes"} 1 +kube_endpoint_labels{namespace="kube-system",endpoint="kube-controller-manager"} 1 +kube_endpoint_labels{namespace="kube-system",endpoint="kube-dns",label_k8s_app="kube-dns",label_kubernetes_io_cluster_service="true",label_kubernetes_io_name="KubeDNS"} 1 +kube_endpoint_labels{namespace="default",endpoint="my-nginx",label_app="my-nginx"} 1 +# HELP kube_endpoint_address_available Number of addresses available in endpoint. +# TYPE kube_endpoint_address_available gauge +kube_endpoint_address_available{namespace="kube-system",endpoint="kube-scheduler"} 0 +kube_endpoint_address_available{namespace="default",endpoint="willsucceed"} 0 +kube_endpoint_address_available{namespace="default",endpoint="willfail"} 0 +kube_endpoint_address_available{namespace="rqtest",endpoint="willsucceed"} 0 +kube_endpoint_address_available{namespace="kube-system",endpoint="kube-state-metrics"} 2 +kube_endpoint_address_available{namespace="kube-system",endpoint="kube-controller-manager"} 0 +kube_endpoint_address_available{namespace="kube-system",endpoint="kube-dns"} 6 +kube_endpoint_address_available{namespace="default",endpoint="my-nginx"} 0 +kube_endpoint_address_available{namespace="default",endpoint="kubernetes"} 1 +# HELP kube_endpoint_address_not_ready Number of addresses not ready in endpoint +# TYPE kube_endpoint_address_not_ready gauge +kube_endpoint_address_not_ready{namespace="default",endpoint="kubernetes"} 0 +kube_endpoint_address_not_ready{namespace="kube-system",endpoint="kube-controller-manager"} 0 +kube_endpoint_address_not_ready{namespace="kube-system",endpoint="kube-dns"} 0 +kube_endpoint_address_not_ready{namespace="default",endpoint="my-nginx"} 0 +kube_endpoint_address_not_ready{namespace="kube-system",endpoint="kube-state-metrics"} 0 +kube_endpoint_address_not_ready{namespace="kube-system",endpoint="kube-scheduler"} 0 +kube_endpoint_address_not_ready{namespace="default",endpoint="willsucceed"} 0 +kube_endpoint_address_not_ready{namespace="default",endpoint="willfail"} 0 +kube_endpoint_address_not_ready{namespace="rqtest",endpoint="willsucceed"} 0 +# HELP kube_hpa_metadata_generation The generation observed by the HorizontalPodAutoscaler controller. +# TYPE kube_hpa_metadata_generation gauge +# HELP kube_hpa_spec_max_replicas Upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. +# TYPE kube_hpa_spec_max_replicas gauge +# HELP kube_hpa_spec_min_replicas Lower limit for the number of pods that can be set by the autoscaler, default 1. +# TYPE kube_hpa_spec_min_replicas gauge +# HELP kube_hpa_status_current_replicas Current number of replicas of pods managed by this autoscaler. +# TYPE kube_hpa_status_current_replicas gauge +# HELP kube_hpa_status_desired_replicas Desired number of replicas of pods managed by this autoscaler. +# TYPE kube_hpa_status_desired_replicas gauge +# HELP kube_hpa_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_hpa_labels gauge +# HELP kube_hpa_status_condition The condition of this autoscaler. +# TYPE kube_hpa_status_condition gauge +# HELP kube_ingress_info Information about ingress. +# TYPE kube_ingress_info gauge +# HELP kube_ingress_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_ingress_labels gauge +# HELP kube_ingress_created Unix creation timestamp +# TYPE kube_ingress_created gauge +# HELP kube_ingress_metadata_resource_version Resource version representing a specific version of ingress. +# TYPE kube_ingress_metadata_resource_version gauge +# HELP kube_ingress_path Ingress host, paths and backend service information. +# TYPE kube_ingress_path gauge +# HELP kube_job_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_job_labels gauge +# HELP kube_job_info Information about job. +# TYPE kube_job_info gauge +# HELP kube_job_created Unix creation timestamp +# TYPE kube_job_created gauge +# HELP kube_job_spec_parallelism The maximum desired number of pods the job should run at any given time. +# TYPE kube_job_spec_parallelism gauge +# HELP kube_job_spec_completions The desired number of successfully finished pods the job should be run with. +# TYPE kube_job_spec_completions gauge +# HELP kube_job_spec_active_deadline_seconds The duration in seconds relative to the startTime that the job may be active before the system tries to terminate it. +# TYPE kube_job_spec_active_deadline_seconds gauge +# HELP kube_job_status_succeeded The number of pods which reached Phase Succeeded. +# TYPE kube_job_status_succeeded gauge +# HELP kube_job_status_failed The number of pods which reached Phase Failed. +# TYPE kube_job_status_failed gauge +# HELP kube_job_status_active The number of actively running pods. +# TYPE kube_job_status_active gauge +# HELP kube_job_complete The job has completed its execution. +# TYPE kube_job_complete gauge +# HELP kube_job_failed The job has failed its execution. +# TYPE kube_job_failed gauge +# HELP kube_job_status_start_time StartTime represents time when the job was acknowledged by the Job Manager. +# TYPE kube_job_status_start_time gauge +# HELP kube_job_status_completion_time CompletionTime represents time when the job was completed. +# TYPE kube_job_status_completion_time gauge +# HELP kube_job_owner Information about the Job's owner. +# TYPE kube_job_owner gauge +# HELP kube_limitrange Information about limit range. +# TYPE kube_limitrange gauge +kube_limitrange{namespace="default-mem-example",limitrange="mem-limit-range",resource="memory",type="Container",constraint="default"} 5.36870912e+08 +kube_limitrange{namespace="default-mem-example",limitrange="mem-limit-range",resource="memory",type="Container",constraint="defaultRequest"} 2.68435456e+08 +# HELP kube_limitrange_created Unix creation timestamp +# TYPE kube_limitrange_created gauge +kube_limitrange_created{namespace="default-mem-example",limitrange="mem-limit-range"} 1.567347894e+09 +# HELP kube_namespace_created Unix creation timestamp +# TYPE kube_namespace_created gauge +kube_namespace_created{namespace="kube-system"} 1.567009598e+09 +kube_namespace_created{namespace="pablo"} 1.567347599e+09 +kube_namespace_created{namespace="kube-node-lease"} 1.567689471e+09 +kube_namespace_created{namespace="rqtest"} 1.568629266e+09 +kube_namespace_created{namespace="default"} 1.567009597e+09 +kube_namespace_created{namespace="default-mem-example"} 1.567347623e+09 +kube_namespace_created{namespace="kube-public"} 1.567009602e+09 +# HELP kube_namespace_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_namespace_labels gauge +kube_namespace_labels{namespace="default"} 1 +kube_namespace_labels{namespace="default-mem-example"} 1 +kube_namespace_labels{namespace="kube-public"} 1 +kube_namespace_labels{namespace="kube-system"} 1 +kube_namespace_labels{namespace="pablo"} 1 +kube_namespace_labels{namespace="kube-node-lease"} 1 +kube_namespace_labels{namespace="rqtest"} 1 +# HELP kube_namespace_status_phase kubernetes namespace status phase. +# TYPE kube_namespace_status_phase gauge +kube_namespace_status_phase{namespace="default-mem-example",phase="Active"} 1 +kube_namespace_status_phase{namespace="default-mem-example",phase="Terminating"} 0 +kube_namespace_status_phase{namespace="kube-public",phase="Active"} 1 +kube_namespace_status_phase{namespace="kube-public",phase="Terminating"} 0 +kube_namespace_status_phase{namespace="kube-system",phase="Active"} 1 +kube_namespace_status_phase{namespace="kube-system",phase="Terminating"} 0 +kube_namespace_status_phase{namespace="pablo",phase="Active"} 1 +kube_namespace_status_phase{namespace="pablo",phase="Terminating"} 0 +kube_namespace_status_phase{namespace="kube-node-lease",phase="Active"} 1 +kube_namespace_status_phase{namespace="kube-node-lease",phase="Terminating"} 0 +kube_namespace_status_phase{namespace="rqtest",phase="Active"} 1 +kube_namespace_status_phase{namespace="rqtest",phase="Terminating"} 0 +kube_namespace_status_phase{namespace="default",phase="Active"} 1 +kube_namespace_status_phase{namespace="default",phase="Terminating"} 0 +# HELP kube_node_info Information about a cluster node. +# TYPE kube_node_info gauge +kube_node_info{node="minikube",kernel_version="4.15.0",os_image="Buildroot 2018.05.3",container_runtime_version="docker://18.9.8",kubelet_version="v1.15.2",kubeproxy_version="v1.15.2",provider_id=""} 1 +# HELP kube_node_created Unix creation timestamp +# TYPE kube_node_created gauge +kube_node_created{node="minikube"} 1.567009598e+09 +# HELP kube_node_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_node_labels gauge +kube_node_labels{node="minikube",label_beta_kubernetes_io_arch="amd64",label_beta_kubernetes_io_os="linux",label_kubernetes_io_arch="amd64",label_kubernetes_io_hostname="minikube",label_kubernetes_io_os="linux",label_node_role_kubernetes_io_master=""} 1 +# HELP kube_node_spec_unschedulable Whether a node can schedule new pods. +# TYPE kube_node_spec_unschedulable gauge +kube_node_spec_unschedulable{node="minikube"} 0 +# HELP kube_node_spec_taint The taint of a cluster node. +# TYPE kube_node_spec_taint gauge +# HELP kube_node_status_condition The condition of a cluster node. +# TYPE kube_node_status_condition gauge +kube_node_status_condition{node="minikube",condition="MemoryPressure",status="true"} 0 +kube_node_status_condition{node="minikube",condition="MemoryPressure",status="false"} 1 +kube_node_status_condition{node="minikube",condition="MemoryPressure",status="unknown"} 0 +kube_node_status_condition{node="minikube",condition="DiskPressure",status="true"} 0 +kube_node_status_condition{node="minikube",condition="DiskPressure",status="false"} 1 +kube_node_status_condition{node="minikube",condition="DiskPressure",status="unknown"} 0 +kube_node_status_condition{node="minikube",condition="PIDPressure",status="true"} 0 +kube_node_status_condition{node="minikube",condition="PIDPressure",status="false"} 1 +kube_node_status_condition{node="minikube",condition="PIDPressure",status="unknown"} 0 +kube_node_status_condition{node="minikube",condition="Ready",status="true"} 1 +kube_node_status_condition{node="minikube",condition="Ready",status="false"} 0 +kube_node_status_condition{node="minikube",condition="Ready",status="unknown"} 0 +# HELP kube_node_status_phase The phase the node is currently in. +# TYPE kube_node_status_phase gauge +# HELP kube_node_status_capacity The capacity for different resources of a node. +# TYPE kube_node_status_capacity gauge +kube_node_status_capacity{node="minikube",resource="cpu",unit="core"} 4 +kube_node_status_capacity{node="minikube",resource="ephemeral_storage",unit="byte"} 1.8211606528e+10 +kube_node_status_capacity{node="minikube",resource="hugepages_2Mi",unit="byte"} 0 +kube_node_status_capacity{node="minikube",resource="memory",unit="byte"} 8.361435136e+09 +kube_node_status_capacity{node="minikube",resource="pods",unit="integer"} 110 +# HELP kube_node_status_capacity_pods The total pod resources of the node. +# TYPE kube_node_status_capacity_pods gauge +kube_node_status_capacity_pods{node="minikube"} 110 +# HELP kube_node_status_capacity_cpu_cores The total CPU resources of the node. +# TYPE kube_node_status_capacity_cpu_cores gauge +kube_node_status_capacity_cpu_cores{node="minikube"} 4 +# HELP kube_node_status_capacity_memory_bytes The total memory resources of the node. +# TYPE kube_node_status_capacity_memory_bytes gauge +kube_node_status_capacity_memory_bytes{node="minikube"} 8.361435136e+09 +# HELP kube_node_status_allocatable The allocatable for different resources of a node that are available for scheduling. +# TYPE kube_node_status_allocatable gauge +kube_node_status_allocatable{node="minikube",resource="cpu",unit="core"} 4 +kube_node_status_allocatable{node="minikube",resource="ephemeral_storage",unit="byte"} 1.6390445849e+10 +kube_node_status_allocatable{node="minikube",resource="hugepages_2Mi",unit="byte"} 0 +kube_node_status_allocatable{node="minikube",resource="memory",unit="byte"} 8.256577536e+09 +kube_node_status_allocatable{node="minikube",resource="pods",unit="integer"} 110 +# HELP kube_node_status_allocatable_pods The pod resources of a node that are available for scheduling. +# TYPE kube_node_status_allocatable_pods gauge +kube_node_status_allocatable_pods{node="minikube"} 110 +# HELP kube_node_status_allocatable_cpu_cores The CPU resources of a node that are available for scheduling. +# TYPE kube_node_status_allocatable_cpu_cores gauge +kube_node_status_allocatable_cpu_cores{node="minikube"} 4 +# HELP kube_node_status_allocatable_memory_bytes The memory resources of a node that are available for scheduling. +# TYPE kube_node_status_allocatable_memory_bytes gauge +kube_node_status_allocatable_memory_bytes{node="minikube"} 8.256577536e+09 +# HELP kube_persistentvolumeclaim_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_persistentvolumeclaim_labels gauge +# HELP kube_persistentvolumeclaim_info Information about persistent volume claim. +# TYPE kube_persistentvolumeclaim_info gauge +# HELP kube_persistentvolumeclaim_status_phase The phase the persistent volume claim is currently in. +# TYPE kube_persistentvolumeclaim_status_phase gauge +# HELP kube_persistentvolumeclaim_resource_requests_storage_bytes The capacity of storage requested by the persistent volume claim. +# TYPE kube_persistentvolumeclaim_resource_requests_storage_bytes gauge +# HELP kube_persistentvolumeclaim_access_mode The access mode(s) specified by the persistent volume claim. +# TYPE kube_persistentvolumeclaim_access_mode gauge +# HELP kube_persistentvolume_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_persistentvolume_labels gauge +# HELP kube_persistentvolume_status_phase The phase indicates if a volume is available, bound to a claim, or released by a claim. +# TYPE kube_persistentvolume_status_phase gauge +# HELP kube_persistentvolume_info Information about persistentvolume. +# TYPE kube_persistentvolume_info gauge +# HELP kube_persistentvolume_capacity_bytes Persistentvolume capacity in bytes. +# TYPE kube_persistentvolume_capacity_bytes gauge +# HELP kube_poddisruptionbudget_created Unix creation timestamp +# TYPE kube_poddisruptionbudget_created gauge +# HELP kube_poddisruptionbudget_status_current_healthy Current number of healthy pods +# TYPE kube_poddisruptionbudget_status_current_healthy gauge +# HELP kube_poddisruptionbudget_status_desired_healthy Minimum desired number of healthy pods +# TYPE kube_poddisruptionbudget_status_desired_healthy gauge +# HELP kube_poddisruptionbudget_status_pod_disruptions_allowed Number of pod disruptions that are currently allowed +# TYPE kube_poddisruptionbudget_status_pod_disruptions_allowed gauge +# HELP kube_poddisruptionbudget_status_expected_pods Total number of pods counted by this disruption budget +# TYPE kube_poddisruptionbudget_status_expected_pods gauge +# HELP kube_poddisruptionbudget_status_observed_generation Most recent generation observed when updating this PDB status +# TYPE kube_poddisruptionbudget_status_observed_generation gauge +# HELP kube_pod_info Information about pod. +# TYPE kube_pod_info gauge +kube_pod_info{namespace="kube-system",pod="storage-provisioner",host_ip="10.0.2.15",pod_ip="10.0.2.15",uid="a55a886b-c9b0-11e9-8c8c-080027dc36ee",node="minikube",created_by_kind="",created_by_name="",priority_class=""} 1 +kube_pod_info{namespace="kube-system",pod="kube-addon-manager-minikube",host_ip="10.0.2.15",pod_ip="10.0.2.15",uid="804c0947-e160-4f6f-afa2-c87e97a43a84",node="minikube",created_by_kind="",created_by_name="",priority_class=""} 1 +kube_pod_info{namespace="kube-system",pod="kube-proxy-6dvtf",host_ip="10.0.2.15",pod_ip="10.0.2.15",uid="6efd0863-1af9-4d5e-b124-4600b5edb8db",node="minikube",created_by_kind="DaemonSet",created_by_name="kube-proxy",priority_class="system-node-critical"} 1 +kube_pod_info{namespace="default",pod="stdout-logger",host_ip="10.0.2.15",pod_ip="172.17.0.4",uid="49f54912-cf08-11e9-8c8c-080027dc36ee",node="minikube",created_by_kind="",created_by_name="",priority_class=""} 1 +kube_pod_info{namespace="default",pod="playground",host_ip="10.0.2.15",pod_ip="10.0.2.15",uid="1fe957cf-932f-4381-b847-cc7c173be7c4",node="minikube",created_by_kind="",created_by_name="",priority_class=""} 1 +kube_pod_info{namespace="kube-system",pod="kube-apiserver-minikube",host_ip="10.0.2.15",pod_ip="10.0.2.15",uid="64b87ae5-398f-452d-af8a-76f903944f4d",node="minikube",created_by_kind="",created_by_name="",priority_class="system-cluster-critical"} 1 +kube_pod_info{namespace="default-mem-example",pod="default-mem-demo",host_ip="10.0.2.15",pod_ip="172.17.0.5",uid="664f8f65-ccc4-11e9-8c8c-080027dc36ee",node="minikube",created_by_kind="",created_by_name="",priority_class=""} 1 +kube_pod_info{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",host_ip="10.0.2.15",pod_ip="172.17.0.9",uid="bb432770-52e1-4478-94df-57d12dbd0638",node="minikube",created_by_kind="ReplicaSet",created_by_name="coredns-5c98db65d4",priority_class="system-cluster-critical"} 1 +kube_pod_info{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",host_ip="10.0.2.15",pod_ip="172.17.0.2",uid="8f536064-840c-4dce-8bd2-8783d41e5ed6",node="minikube",created_by_kind="ReplicaSet",created_by_name="kube-state-metrics-6766c6d46b",priority_class=""} 1 +kube_pod_info{namespace="kube-system",pod="etcd-minikube",host_ip="10.0.2.15",pod_ip="10.0.2.15",uid="fd24e4fc-608e-4482-b1f2-bda1b968c6f3",node="minikube",created_by_kind="",created_by_name="",priority_class="system-cluster-critical"} 1 +kube_pod_info{namespace="kube-system",pod="coredns-5c98db65d4-df89f",host_ip="10.0.2.15",pod_ip="172.17.0.8",uid="fcf6c6a0-98a1-4ad5-80c4-990e95046c1c",node="minikube",created_by_kind="ReplicaSet",created_by_name="coredns-5c98db65d4",priority_class="system-cluster-critical"} 1 +kube_pod_info{namespace="kube-system",pod="kube-controller-manager-minikube",host_ip="10.0.2.15",pod_ip="10.0.2.15",uid="aa16fc25-2f4f-4c52-9838-82b6debf9df6",node="minikube",created_by_kind="",created_by_name="",priority_class="system-cluster-critical"} 1 +kube_pod_info{namespace="kube-system",pod="kube-scheduler-minikube",host_ip="10.0.2.15",pod_ip="10.0.2.15",uid="a1c26908-05bd-4eb5-8dc8-f5c9744e4a7b",node="minikube",created_by_kind="",created_by_name="",priority_class="system-cluster-critical"} 1 +# HELP kube_pod_start_time Start time in unix timestamp for a pod. +# TYPE kube_pod_start_time gauge +kube_pod_start_time{namespace="kube-system",pod="kube-scheduler-minikube"} 1.567009586e+09 +kube_pod_start_time{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7"} 1.567689495e+09 +kube_pod_start_time{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql"} 1.568622119e+09 +kube_pod_start_time{namespace="kube-system",pod="etcd-minikube"} 1.567689466e+09 +kube_pod_start_time{namespace="kube-system",pod="coredns-5c98db65d4-df89f"} 1.567689495e+09 +kube_pod_start_time{namespace="kube-system",pod="kube-controller-manager-minikube"} 1.567689466e+09 +kube_pod_start_time{namespace="default-mem-example",pod="default-mem-demo"} 1.567347951e+09 +kube_pod_start_time{namespace="kube-system",pod="storage-provisioner"} 1.567009613e+09 +kube_pod_start_time{namespace="kube-system",pod="kube-addon-manager-minikube"} 1.567689466e+09 +kube_pod_start_time{namespace="kube-system",pod="kube-proxy-6dvtf"} 1.567689506e+09 +kube_pod_start_time{namespace="default",pod="stdout-logger"} 1.567597011e+09 +kube_pod_start_time{namespace="default",pod="playground"} 1.567693987e+09 +kube_pod_start_time{namespace="kube-system",pod="kube-apiserver-minikube"} 1.567689466e+09 +# HELP kube_pod_completion_time Completion time in unix timestamp for a pod. +# TYPE kube_pod_completion_time gauge +# HELP kube_pod_owner Information about the Pod's owner. +# TYPE kube_pod_owner gauge +kube_pod_owner{namespace="default-mem-example",pod="default-mem-demo",owner_kind="",owner_name="",owner_is_controller=""} 1 +kube_pod_owner{namespace="kube-system",pod="storage-provisioner",owner_kind="",owner_name="",owner_is_controller=""} 1 +kube_pod_owner{namespace="kube-system",pod="kube-addon-manager-minikube",owner_kind="",owner_name="",owner_is_controller=""} 1 +kube_pod_owner{namespace="kube-system",pod="kube-proxy-6dvtf",owner_kind="DaemonSet",owner_name="kube-proxy",owner_is_controller="true"} 1 +kube_pod_owner{namespace="default",pod="stdout-logger",owner_kind="",owner_name="",owner_is_controller=""} 1 +kube_pod_owner{namespace="default",pod="playground",owner_kind="",owner_name="",owner_is_controller=""} 1 +kube_pod_owner{namespace="kube-system",pod="kube-apiserver-minikube",owner_kind="",owner_name="",owner_is_controller=""} 1 +kube_pod_owner{namespace="kube-system",pod="kube-scheduler-minikube",owner_kind="",owner_name="",owner_is_controller=""} 1 +kube_pod_owner{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",owner_kind="ReplicaSet",owner_name="coredns-5c98db65d4",owner_is_controller="true"} 1 +kube_pod_owner{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",owner_kind="ReplicaSet",owner_name="kube-state-metrics-6766c6d46b",owner_is_controller="true"} 1 +kube_pod_owner{namespace="kube-system",pod="etcd-minikube",owner_kind="",owner_name="",owner_is_controller=""} 1 +kube_pod_owner{namespace="kube-system",pod="coredns-5c98db65d4-df89f",owner_kind="ReplicaSet",owner_name="coredns-5c98db65d4",owner_is_controller="true"} 1 +kube_pod_owner{namespace="kube-system",pod="kube-controller-manager-minikube",owner_kind="",owner_name="",owner_is_controller=""} 1 +# HELP kube_pod_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_pod_labels gauge +kube_pod_labels{namespace="default-mem-example",pod="default-mem-demo"} 1 +kube_pod_labels{namespace="kube-system",pod="storage-provisioner",label_addonmanager_kubernetes_io_mode="Reconcile",label_integration_test="storage-provisioner"} 1 +kube_pod_labels{namespace="kube-system",pod="kube-addon-manager-minikube",label_component="kube-addon-manager",label_kubernetes_io_minikube_addons="addon-manager",label_version="v9.0"} 1 +kube_pod_labels{namespace="kube-system",pod="kube-proxy-6dvtf",label_controller_revision_hash="84c6b844cd",label_k8s_app="kube-proxy",label_pod_template_generation="2"} 1 +kube_pod_labels{namespace="default",pod="stdout-logger",label_my_label_uses_underscores="ha.ha.ha"} 1 +kube_pod_labels{namespace="default",pod="playground"} 1 +kube_pod_labels{namespace="kube-system",pod="kube-apiserver-minikube",label_component="kube-apiserver",label_tier="control-plane"} 1 +kube_pod_labels{namespace="kube-system",pod="kube-scheduler-minikube",label_component="kube-scheduler",label_tier="control-plane"} 1 +kube_pod_labels{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",label_k8s_app="kube-dns",label_pod_template_hash="5c98db65d4"} 1 +kube_pod_labels{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",label_k8s_app="kube-state-metrics",label_pod_template_hash="6766c6d46b"} 1 +kube_pod_labels{namespace="kube-system",pod="etcd-minikube",label_component="etcd",label_tier="control-plane"} 1 +kube_pod_labels{namespace="kube-system",pod="coredns-5c98db65d4-df89f",label_k8s_app="kube-dns",label_pod_template_hash="5c98db65d4"} 1 +kube_pod_labels{namespace="kube-system",pod="kube-controller-manager-minikube",label_component="kube-controller-manager",label_tier="control-plane"} 1 +# HELP kube_pod_created Unix creation timestamp +# TYPE kube_pod_created gauge +kube_pod_created{namespace="default",pod="stdout-logger"} 1.567597011e+09 +kube_pod_created{namespace="default",pod="playground"} 1.567693987e+09 +kube_pod_created{namespace="kube-system",pod="kube-apiserver-minikube"} 1.567689473e+09 +kube_pod_created{namespace="default-mem-example",pod="default-mem-demo"} 1.567347951e+09 +kube_pod_created{namespace="kube-system",pod="storage-provisioner"} 1.567009613e+09 +kube_pod_created{namespace="kube-system",pod="kube-addon-manager-minikube"} 1.567689472e+09 +kube_pod_created{namespace="kube-system",pod="kube-proxy-6dvtf"} 1.567689506e+09 +kube_pod_created{namespace="kube-system",pod="coredns-5c98db65d4-df89f"} 1.567689495e+09 +kube_pod_created{namespace="kube-system",pod="kube-controller-manager-minikube"} 1.568621217e+09 +kube_pod_created{namespace="kube-system",pod="kube-scheduler-minikube"} 1.567689472e+09 +kube_pod_created{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7"} 1.567689495e+09 +kube_pod_created{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql"} 1.568622119e+09 +kube_pod_created{namespace="kube-system",pod="etcd-minikube"} 1.567689472e+09 +# HELP kube_pod_status_scheduled_time Unix timestamp when pod moved into scheduled status +# TYPE kube_pod_status_scheduled_time gauge +kube_pod_status_scheduled_time{namespace="kube-system",pod="kube-addon-manager-minikube"} 1.567689466e+09 +kube_pod_status_scheduled_time{namespace="kube-system",pod="kube-proxy-6dvtf"} 1.567689506e+09 +kube_pod_status_scheduled_time{namespace="default",pod="stdout-logger"} 1.567597011e+09 +kube_pod_status_scheduled_time{namespace="default",pod="playground"} 1.567693987e+09 +kube_pod_status_scheduled_time{namespace="kube-system",pod="kube-apiserver-minikube"} 1.567689466e+09 +kube_pod_status_scheduled_time{namespace="default-mem-example",pod="default-mem-demo"} 1.567347951e+09 +kube_pod_status_scheduled_time{namespace="kube-system",pod="storage-provisioner"} 1.567009613e+09 +kube_pod_status_scheduled_time{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql"} 1.568622119e+09 +kube_pod_status_scheduled_time{namespace="kube-system",pod="etcd-minikube"} 1.567689466e+09 +kube_pod_status_scheduled_time{namespace="kube-system",pod="coredns-5c98db65d4-df89f"} 1.567689495e+09 +kube_pod_status_scheduled_time{namespace="kube-system",pod="kube-controller-manager-minikube"} 1.567689466e+09 +kube_pod_status_scheduled_time{namespace="kube-system",pod="kube-scheduler-minikube"} 1.567009586e+09 +kube_pod_status_scheduled_time{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7"} 1.567689495e+09 +# HELP kube_pod_status_phase The pods current phase. +# TYPE kube_pod_status_phase gauge +kube_pod_status_phase{namespace="kube-system",pod="etcd-minikube",phase="Pending"} 0 +kube_pod_status_phase{namespace="kube-system",pod="etcd-minikube",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="kube-system",pod="etcd-minikube",phase="Failed"} 0 +kube_pod_status_phase{namespace="kube-system",pod="etcd-minikube",phase="Running"} 1 +kube_pod_status_phase{namespace="kube-system",pod="etcd-minikube",phase="Unknown"} 0 +kube_pod_status_phase{namespace="kube-system",pod="coredns-5c98db65d4-df89f",phase="Pending"} 0 +kube_pod_status_phase{namespace="kube-system",pod="coredns-5c98db65d4-df89f",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="kube-system",pod="coredns-5c98db65d4-df89f",phase="Failed"} 0 +kube_pod_status_phase{namespace="kube-system",pod="coredns-5c98db65d4-df89f",phase="Running"} 1 +kube_pod_status_phase{namespace="kube-system",pod="coredns-5c98db65d4-df89f",phase="Unknown"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-controller-manager-minikube",phase="Pending"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-controller-manager-minikube",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-controller-manager-minikube",phase="Failed"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-controller-manager-minikube",phase="Running"} 1 +kube_pod_status_phase{namespace="kube-system",pod="kube-controller-manager-minikube",phase="Unknown"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-scheduler-minikube",phase="Pending"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-scheduler-minikube",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-scheduler-minikube",phase="Failed"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-scheduler-minikube",phase="Running"} 1 +kube_pod_status_phase{namespace="kube-system",pod="kube-scheduler-minikube",phase="Unknown"} 0 +kube_pod_status_phase{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",phase="Pending"} 0 +kube_pod_status_phase{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",phase="Failed"} 0 +kube_pod_status_phase{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",phase="Running"} 1 +kube_pod_status_phase{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",phase="Unknown"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",phase="Pending"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",phase="Failed"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",phase="Running"} 1 +kube_pod_status_phase{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",phase="Unknown"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-proxy-6dvtf",phase="Pending"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-proxy-6dvtf",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-proxy-6dvtf",phase="Failed"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-proxy-6dvtf",phase="Running"} 1 +kube_pod_status_phase{namespace="kube-system",pod="kube-proxy-6dvtf",phase="Unknown"} 0 +kube_pod_status_phase{namespace="default",pod="stdout-logger",phase="Pending"} 0 +kube_pod_status_phase{namespace="default",pod="stdout-logger",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="default",pod="stdout-logger",phase="Failed"} 0 +kube_pod_status_phase{namespace="default",pod="stdout-logger",phase="Running"} 1 +kube_pod_status_phase{namespace="default",pod="stdout-logger",phase="Unknown"} 0 +kube_pod_status_phase{namespace="default",pod="playground",phase="Pending"} 0 +kube_pod_status_phase{namespace="default",pod="playground",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="default",pod="playground",phase="Failed"} 0 +kube_pod_status_phase{namespace="default",pod="playground",phase="Running"} 1 +kube_pod_status_phase{namespace="default",pod="playground",phase="Unknown"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-apiserver-minikube",phase="Pending"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-apiserver-minikube",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-apiserver-minikube",phase="Failed"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-apiserver-minikube",phase="Running"} 1 +kube_pod_status_phase{namespace="kube-system",pod="kube-apiserver-minikube",phase="Unknown"} 0 +kube_pod_status_phase{namespace="default-mem-example",pod="default-mem-demo",phase="Pending"} 0 +kube_pod_status_phase{namespace="default-mem-example",pod="default-mem-demo",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="default-mem-example",pod="default-mem-demo",phase="Failed"} 0 +kube_pod_status_phase{namespace="default-mem-example",pod="default-mem-demo",phase="Running"} 1 +kube_pod_status_phase{namespace="default-mem-example",pod="default-mem-demo",phase="Unknown"} 0 +kube_pod_status_phase{namespace="kube-system",pod="storage-provisioner",phase="Pending"} 0 +kube_pod_status_phase{namespace="kube-system",pod="storage-provisioner",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="kube-system",pod="storage-provisioner",phase="Failed"} 0 +kube_pod_status_phase{namespace="kube-system",pod="storage-provisioner",phase="Running"} 1 +kube_pod_status_phase{namespace="kube-system",pod="storage-provisioner",phase="Unknown"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-addon-manager-minikube",phase="Pending"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-addon-manager-minikube",phase="Succeeded"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-addon-manager-minikube",phase="Failed"} 0 +kube_pod_status_phase{namespace="kube-system",pod="kube-addon-manager-minikube",phase="Running"} 1 +kube_pod_status_phase{namespace="kube-system",pod="kube-addon-manager-minikube",phase="Unknown"} 0 +# HELP kube_pod_status_ready Describes whether the pod is ready to serve requests. +# TYPE kube_pod_status_ready gauge +kube_pod_status_ready{namespace="default-mem-example",pod="default-mem-demo",condition="true"} 1 +kube_pod_status_ready{namespace="default-mem-example",pod="default-mem-demo",condition="false"} 0 +kube_pod_status_ready{namespace="default-mem-example",pod="default-mem-demo",condition="unknown"} 0 +kube_pod_status_ready{namespace="kube-system",pod="storage-provisioner",condition="true"} 1 +kube_pod_status_ready{namespace="kube-system",pod="storage-provisioner",condition="false"} 0 +kube_pod_status_ready{namespace="kube-system",pod="storage-provisioner",condition="unknown"} 0 +kube_pod_status_ready{namespace="kube-system",pod="kube-addon-manager-minikube",condition="true"} 1 +kube_pod_status_ready{namespace="kube-system",pod="kube-addon-manager-minikube",condition="false"} 0 +kube_pod_status_ready{namespace="kube-system",pod="kube-addon-manager-minikube",condition="unknown"} 0 +kube_pod_status_ready{namespace="kube-system",pod="kube-proxy-6dvtf",condition="true"} 1 +kube_pod_status_ready{namespace="kube-system",pod="kube-proxy-6dvtf",condition="false"} 0 +kube_pod_status_ready{namespace="kube-system",pod="kube-proxy-6dvtf",condition="unknown"} 0 +kube_pod_status_ready{namespace="default",pod="stdout-logger",condition="true"} 1 +kube_pod_status_ready{namespace="default",pod="stdout-logger",condition="false"} 0 +kube_pod_status_ready{namespace="default",pod="stdout-logger",condition="unknown"} 0 +kube_pod_status_ready{namespace="default",pod="playground",condition="true"} 1 +kube_pod_status_ready{namespace="default",pod="playground",condition="false"} 0 +kube_pod_status_ready{namespace="default",pod="playground",condition="unknown"} 0 +kube_pod_status_ready{namespace="kube-system",pod="kube-apiserver-minikube",condition="true"} 1 +kube_pod_status_ready{namespace="kube-system",pod="kube-apiserver-minikube",condition="false"} 0 +kube_pod_status_ready{namespace="kube-system",pod="kube-apiserver-minikube",condition="unknown"} 0 +kube_pod_status_ready{namespace="kube-system",pod="kube-scheduler-minikube",condition="true"} 1 +kube_pod_status_ready{namespace="kube-system",pod="kube-scheduler-minikube",condition="false"} 0 +kube_pod_status_ready{namespace="kube-system",pod="kube-scheduler-minikube",condition="unknown"} 0 +kube_pod_status_ready{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",condition="true"} 1 +kube_pod_status_ready{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",condition="false"} 0 +kube_pod_status_ready{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",condition="unknown"} 0 +kube_pod_status_ready{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",condition="true"} 1 +kube_pod_status_ready{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",condition="false"} 0 +kube_pod_status_ready{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",condition="unknown"} 0 +kube_pod_status_ready{namespace="kube-system",pod="etcd-minikube",condition="true"} 1 +kube_pod_status_ready{namespace="kube-system",pod="etcd-minikube",condition="false"} 0 +kube_pod_status_ready{namespace="kube-system",pod="etcd-minikube",condition="unknown"} 0 +kube_pod_status_ready{namespace="kube-system",pod="coredns-5c98db65d4-df89f",condition="true"} 1 +kube_pod_status_ready{namespace="kube-system",pod="coredns-5c98db65d4-df89f",condition="false"} 0 +kube_pod_status_ready{namespace="kube-system",pod="coredns-5c98db65d4-df89f",condition="unknown"} 0 +kube_pod_status_ready{namespace="kube-system",pod="kube-controller-manager-minikube",condition="true"} 1 +kube_pod_status_ready{namespace="kube-system",pod="kube-controller-manager-minikube",condition="false"} 0 +kube_pod_status_ready{namespace="kube-system",pod="kube-controller-manager-minikube",condition="unknown"} 0 +# HELP kube_pod_status_scheduled Describes the status of the scheduling process for the pod. +# TYPE kube_pod_status_scheduled gauge +kube_pod_status_scheduled{namespace="default-mem-example",pod="default-mem-demo",condition="true"} 1 +kube_pod_status_scheduled{namespace="default-mem-example",pod="default-mem-demo",condition="false"} 0 +kube_pod_status_scheduled{namespace="default-mem-example",pod="default-mem-demo",condition="unknown"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="storage-provisioner",condition="true"} 1 +kube_pod_status_scheduled{namespace="kube-system",pod="storage-provisioner",condition="false"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="storage-provisioner",condition="unknown"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-addon-manager-minikube",condition="true"} 1 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-addon-manager-minikube",condition="false"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-addon-manager-minikube",condition="unknown"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-proxy-6dvtf",condition="true"} 1 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-proxy-6dvtf",condition="false"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-proxy-6dvtf",condition="unknown"} 0 +kube_pod_status_scheduled{namespace="default",pod="stdout-logger",condition="true"} 1 +kube_pod_status_scheduled{namespace="default",pod="stdout-logger",condition="false"} 0 +kube_pod_status_scheduled{namespace="default",pod="stdout-logger",condition="unknown"} 0 +kube_pod_status_scheduled{namespace="default",pod="playground",condition="true"} 1 +kube_pod_status_scheduled{namespace="default",pod="playground",condition="false"} 0 +kube_pod_status_scheduled{namespace="default",pod="playground",condition="unknown"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-apiserver-minikube",condition="true"} 1 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-apiserver-minikube",condition="false"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-apiserver-minikube",condition="unknown"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-scheduler-minikube",condition="true"} 1 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-scheduler-minikube",condition="false"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-scheduler-minikube",condition="unknown"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",condition="true"} 1 +kube_pod_status_scheduled{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",condition="false"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",condition="unknown"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",condition="true"} 1 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",condition="false"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",condition="unknown"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="etcd-minikube",condition="true"} 1 +kube_pod_status_scheduled{namespace="kube-system",pod="etcd-minikube",condition="false"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="etcd-minikube",condition="unknown"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="coredns-5c98db65d4-df89f",condition="true"} 1 +kube_pod_status_scheduled{namespace="kube-system",pod="coredns-5c98db65d4-df89f",condition="false"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="coredns-5c98db65d4-df89f",condition="unknown"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-controller-manager-minikube",condition="true"} 1 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-controller-manager-minikube",condition="false"} 0 +kube_pod_status_scheduled{namespace="kube-system",pod="kube-controller-manager-minikube",condition="unknown"} 0 +# HELP kube_pod_container_info Information about a container in a pod. +# TYPE kube_pod_container_info gauge +kube_pod_container_info{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",image="k8s.gcr.io/kube-addon-manager:v9.0",image_id="docker://sha256:119701e77cbc4c6cb32c05d9c39050127eb865c1a9f21f830685379b6b65d6ae",container_id="docker://ae44f6171c1a602d5a034049cc98ed90d30b7013f4ec7a865404bfd6f821e3d6"} 1 +kube_pod_container_info{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",image="k8s.gcr.io/kube-proxy:v1.15.2",image_id="docker-pullable://k8s.gcr.io/kube-proxy@sha256:626f983f25f8b7799ca7ab001fd0985a72c2643c0acb877d2888c0aa4fcbdf56",container_id="docker://03d9bd9ed242a1b31c273a201aa6701d991cbd6ae5d7ff180d9d7e326efb48a9"} 1 +kube_pod_container_info{namespace="default",pod="stdout-logger",container="alpine",image="alpine:3.2",image_id="docker-pullable://alpine@sha256:e9a2035f9d0d7cee1cdd445f5bfa0c5c646455ee26f14565dce23cf2d2de7570",container_id="docker://be919c5f2a474dc6bbeca0835ffacd639937a060a0904846dc9f52e90c742d0a"} 1 +kube_pod_container_info{namespace="default",pod="playground",container="ubuntu",image="ubuntu:latest",image_id="docker-pullable://ubuntu@sha256:d1d454df0f579c6be4d8161d227462d69e163a8ff9d20a847533989cf0c94d90",container_id="docker://b17b21e347f9448c8b0c2edc1ed8d959bd9079e7befb3281c7a41fedcaecf38f"} 1 +kube_pod_container_info{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",image="k8s.gcr.io/kube-apiserver:v1.15.2",image_id="docker-pullable://k8s.gcr.io/kube-apiserver@sha256:5fae387bacf1def6c3915b4a3035cf8c8a4d06158b2e676721776d3d4afc05a2",container_id="docker://da080618e76a3dbb812b35c92517096fa12999fdc9ac47c8585380a27acc559f"} 1 +kube_pod_container_info{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",image="nginx:latest",image_id="docker-pullable://nginx@sha256:53ddb41e46de3d63376579acf46f9a41a8d7de33645db47a486de9769201fec9",container_id="docker://3c72f4f311796c758011a04d05b3fb899644ec085850dbcbe885b5144328478f"} 1 +kube_pod_container_info{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",image="gcr.io/k8s-minikube/storage-provisioner:v1.8.1",image_id="docker://sha256:4689081edb103a9e8174bf23a255bfbe0b2d9ed82edc907abab6989d1c60f02c",container_id="docker://0e4b04eaf652840ac51e584dd385c10fbcc137d1a5eee6329967f922b82b5a5a"} 1 +kube_pod_container_info{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",image="quay.io/coreos/kube-state-metrics:v1.7.2",image_id="docker-pullable://quay.io/coreos/kube-state-metrics@sha256:99a3e3297e281fec09fe850d6d4bccf4d9fd58ff62a5b37764d8a8bd1e79bd14",container_id="docker://fa1137e3a4047845b6bcdce50eb5934373b52200805be55a076407c96a9f5c2a"} 1 +kube_pod_container_info{namespace="kube-system",pod="etcd-minikube",container="etcd",image="k8s.gcr.io/etcd:3.3.10",image_id="docker-pullable://k8s.gcr.io/etcd@sha256:17da501f5d2a675be46040422a27b7cc21b8a43895ac998b171db1c346f361f7",container_id="docker://27c011eefe928f7ceb19808c699dc27770ddfd64397e0f9e7f2b850366e52b85"} 1 +kube_pod_container_info{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",image="k8s.gcr.io/coredns:1.3.1",image_id="docker-pullable://k8s.gcr.io/coredns@sha256:02382353821b12c21b062c59184e227e001079bb13ebd01f9d3270ba0fcbf1e4",container_id="docker://9c66d133e5a614edfe9f71997247346110b4488096f34e4963a68e2d36c06c67"} 1 +kube_pod_container_info{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",image="k8s.gcr.io/kube-controller-manager:v1.15.2",image_id="docker-pullable://k8s.gcr.io/kube-controller-manager@sha256:7d3fc48cf83aa0a7b8f129fa4255bb5530908e1a5b194be269ea8329b48e9598",container_id="docker://f90216c30dc12b460eef9e099d043f5c8c30b31677cc23cfb4870242086acecd"} 1 +kube_pod_container_info{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",image="k8s.gcr.io/kube-scheduler:v1.15.2",image_id="docker-pullable://k8s.gcr.io/kube-scheduler@sha256:8fd3c3251f07234a234469e201900e4274726f1fe0d5dc6fb7da911f1c851a1a",container_id="docker://4f3d2d395e04b6d8ff9aa74bc5bb558857780861b17dd888310a4bb27c383b4b"} 1 +kube_pod_container_info{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",image="k8s.gcr.io/coredns:1.3.1",image_id="docker-pullable://k8s.gcr.io/coredns@sha256:02382353821b12c21b062c59184e227e001079bb13ebd01f9d3270ba0fcbf1e4",container_id="docker://839b2b253ab43db87019fbaa0f8e836d31c757d40f44ce5e8678b3bb1573c719"} 1 +# HELP kube_pod_init_container_info Information about an init container in a pod. +# TYPE kube_pod_init_container_info gauge +# HELP kube_pod_container_status_waiting Describes whether the container is currently in waiting state. +# TYPE kube_pod_container_status_waiting gauge +kube_pod_container_status_waiting{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager"} 0 +kube_pod_container_status_waiting{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler"} 0 +kube_pod_container_status_waiting{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns"} 0 +kube_pod_container_status_waiting{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics"} 0 +kube_pod_container_status_waiting{namespace="kube-system",pod="etcd-minikube",container="etcd"} 0 +kube_pod_container_status_waiting{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns"} 0 +kube_pod_container_status_waiting{namespace="default",pod="playground",container="ubuntu"} 0 +kube_pod_container_status_waiting{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver"} 0 +kube_pod_container_status_waiting{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr"} 0 +kube_pod_container_status_waiting{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner"} 0 +kube_pod_container_status_waiting{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager"} 0 +kube_pod_container_status_waiting{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy"} 0 +kube_pod_container_status_waiting{namespace="default",pod="stdout-logger",container="alpine"} 0 +# HELP kube_pod_init_container_status_waiting Describes whether the init container is currently in waiting state. +# TYPE kube_pod_init_container_status_waiting gauge +# HELP kube_pod_container_status_waiting_reason Describes the reason the container is currently in waiting state. +# TYPE kube_pod_container_status_waiting_reason gauge +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="InvalidImageName"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="InvalidImageName"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="InvalidImageName"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="InvalidImageName"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="InvalidImageName"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="InvalidImageName"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="InvalidImageName"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="InvalidImageName"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="InvalidImageName"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="stdout-logger",container="alpine",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="stdout-logger",container="alpine",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="stdout-logger",container="alpine",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="stdout-logger",container="alpine",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="stdout-logger",container="alpine",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="stdout-logger",container="alpine",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="stdout-logger",container="alpine",reason="InvalidImageName"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="playground",container="ubuntu",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="playground",container="ubuntu",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="playground",container="ubuntu",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="playground",container="ubuntu",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="playground",container="ubuntu",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="playground",container="ubuntu",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="default",pod="playground",container="ubuntu",reason="InvalidImageName"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="InvalidImageName"} 0 +kube_pod_container_status_waiting_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="ContainerCreating"} 0 +kube_pod_container_status_waiting_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="CrashLoopBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="CreateContainerConfigError"} 0 +kube_pod_container_status_waiting_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="ErrImagePull"} 0 +kube_pod_container_status_waiting_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="ImagePullBackOff"} 0 +kube_pod_container_status_waiting_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="CreateContainerError"} 0 +kube_pod_container_status_waiting_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="InvalidImageName"} 0 +# HELP kube_pod_init_container_status_waiting_reason Describes the reason the init container is currently in waiting state. +# TYPE kube_pod_init_container_status_waiting_reason gauge +# HELP kube_pod_container_status_running Describes whether the container is currently in running state. +# TYPE kube_pod_container_status_running gauge +kube_pod_container_status_running{namespace="kube-system",pod="etcd-minikube",container="etcd"} 1 +kube_pod_container_status_running{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns"} 1 +kube_pod_container_status_running{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager"} 1 +kube_pod_container_status_running{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler"} 1 +kube_pod_container_status_running{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns"} 1 +kube_pod_container_status_running{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics"} 1 +kube_pod_container_status_running{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy"} 1 +kube_pod_container_status_running{namespace="default",pod="stdout-logger",container="alpine"} 1 +kube_pod_container_status_running{namespace="default",pod="playground",container="ubuntu"} 1 +kube_pod_container_status_running{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver"} 1 +kube_pod_container_status_running{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr"} 1 +kube_pod_container_status_running{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner"} 1 +kube_pod_container_status_running{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager"} 1 +# HELP kube_pod_init_container_status_running Describes whether the init container is currently in running state. +# TYPE kube_pod_init_container_status_running gauge +# HELP kube_pod_container_status_terminated Describes whether the container is currently in terminated state. +# TYPE kube_pod_container_status_terminated gauge +kube_pod_container_status_terminated{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy"} 0 +kube_pod_container_status_terminated{namespace="default",pod="stdout-logger",container="alpine"} 0 +kube_pod_container_status_terminated{namespace="default",pod="playground",container="ubuntu"} 0 +kube_pod_container_status_terminated{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver"} 0 +kube_pod_container_status_terminated{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr"} 0 +kube_pod_container_status_terminated{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner"} 0 +kube_pod_container_status_terminated{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager"} 0 +kube_pod_container_status_terminated{namespace="kube-system",pod="etcd-minikube",container="etcd"} 0 +kube_pod_container_status_terminated{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns"} 0 +kube_pod_container_status_terminated{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager"} 0 +kube_pod_container_status_terminated{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler"} 0 +kube_pod_container_status_terminated{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns"} 0 +kube_pod_container_status_terminated{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics"} 0 +# HELP kube_pod_init_container_status_terminated Describes whether the init container is currently in terminated state. +# TYPE kube_pod_init_container_status_terminated gauge +# HELP kube_pod_container_status_terminated_reason Describes the reason the container is currently in terminated state. +# TYPE kube_pod_container_status_terminated_reason gauge +kube_pod_container_status_terminated_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="DeadlineExceeded"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="DeadlineExceeded"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="DeadlineExceeded"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="DeadlineExceeded"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="stdout-logger",container="alpine",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="stdout-logger",container="alpine",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="stdout-logger",container="alpine",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="stdout-logger",container="alpine",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="stdout-logger",container="alpine",reason="DeadlineExceeded"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="playground",container="ubuntu",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="playground",container="ubuntu",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="playground",container="ubuntu",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="playground",container="ubuntu",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="default",pod="playground",container="ubuntu",reason="DeadlineExceeded"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="DeadlineExceeded"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="DeadlineExceeded"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="DeadlineExceeded"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="DeadlineExceeded"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="DeadlineExceeded"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="DeadlineExceeded"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="OOMKilled"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="Completed"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="Error"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="ContainerCannotRun"} 0 +kube_pod_container_status_terminated_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="DeadlineExceeded"} 0 +# HELP kube_pod_init_container_status_terminated_reason Describes the reason the init container is currently in terminated state. +# TYPE kube_pod_init_container_status_terminated_reason gauge +# HELP kube_pod_container_status_last_terminated_reason Describes the last reason the container was in terminated state. +# TYPE kube_pod_container_status_last_terminated_reason gauge +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="Error"} 1 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner",reason="DeadlineExceeded"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="Error"} 1 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",reason="DeadlineExceeded"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="Error"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy",reason="DeadlineExceeded"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="stdout-logger",container="alpine",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="stdout-logger",container="alpine",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="stdout-logger",container="alpine",reason="Error"} 1 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="stdout-logger",container="alpine",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="stdout-logger",container="alpine",reason="DeadlineExceeded"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="playground",container="ubuntu",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="playground",container="ubuntu",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="playground",container="ubuntu",reason="Error"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="playground",container="ubuntu",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default",pod="playground",container="ubuntu",reason="DeadlineExceeded"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="Error"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",reason="DeadlineExceeded"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="Completed"} 1 +kube_pod_container_status_last_terminated_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="Error"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",reason="DeadlineExceeded"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="Error"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",reason="DeadlineExceeded"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="Error"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics",reason="DeadlineExceeded"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="Error"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="etcd-minikube",container="etcd",reason="DeadlineExceeded"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="Error"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",reason="DeadlineExceeded"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="Error"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",reason="DeadlineExceeded"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="OOMKilled"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="Completed"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="Error"} 1 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="ContainerCannotRun"} 0 +kube_pod_container_status_last_terminated_reason{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",reason="DeadlineExceeded"} 0 +# HELP kube_pod_init_container_status_last_terminated_reason Describes the last reason the init container was in terminated state. +# TYPE kube_pod_init_container_status_last_terminated_reason gauge +# HELP kube_pod_container_status_ready Describes whether the containers readiness check succeeded. +# TYPE kube_pod_container_status_ready gauge +kube_pod_container_status_ready{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler"} 1 +kube_pod_container_status_ready{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns"} 1 +kube_pod_container_status_ready{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics"} 1 +kube_pod_container_status_ready{namespace="kube-system",pod="etcd-minikube",container="etcd"} 1 +kube_pod_container_status_ready{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns"} 1 +kube_pod_container_status_ready{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager"} 1 +kube_pod_container_status_ready{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr"} 1 +kube_pod_container_status_ready{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner"} 1 +kube_pod_container_status_ready{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager"} 1 +kube_pod_container_status_ready{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy"} 1 +kube_pod_container_status_ready{namespace="default",pod="stdout-logger",container="alpine"} 1 +kube_pod_container_status_ready{namespace="default",pod="playground",container="ubuntu"} 1 +kube_pod_container_status_ready{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver"} 1 +# HELP kube_pod_init_container_status_ready Describes whether the init containers readiness check succeeded. +# TYPE kube_pod_init_container_status_ready gauge +# HELP kube_pod_container_status_restarts_total The number of container restarts per container. +# TYPE kube_pod_container_status_restarts_total counter +kube_pod_container_status_restarts_total{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns"} 0 +kube_pod_container_status_restarts_total{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager"} 0 +kube_pod_container_status_restarts_total{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler"} 2 +kube_pod_container_status_restarts_total{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns"} 0 +kube_pod_container_status_restarts_total{namespace="kube-system",pod="kube-state-metrics-6766c6d46b-b47ql",container="kube-state-metrics"} 0 +kube_pod_container_status_restarts_total{namespace="kube-system",pod="etcd-minikube",container="etcd"} 0 +kube_pod_container_status_restarts_total{namespace="default",pod="stdout-logger",container="alpine"} 1 +kube_pod_container_status_restarts_total{namespace="default",pod="playground",container="ubuntu"} 0 +kube_pod_container_status_restarts_total{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver"} 0 +kube_pod_container_status_restarts_total{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr"} 1 +kube_pod_container_status_restarts_total{namespace="kube-system",pod="storage-provisioner",container="storage-provisioner"} 2 +kube_pod_container_status_restarts_total{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager"} 1 +kube_pod_container_status_restarts_total{namespace="kube-system",pod="kube-proxy-6dvtf",container="kube-proxy"} 0 +# HELP kube_pod_init_container_status_restarts_total The number of restarts for the init container. +# TYPE kube_pod_init_container_status_restarts_total counter +# HELP kube_pod_container_resource_requests The number of requested request resource by a container. +# TYPE kube_pod_container_resource_requests gauge +kube_pod_container_resource_requests{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",node="minikube",resource="memory",unit="byte"} 2.68435456e+08 +kube_pod_container_resource_requests{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",node="minikube",resource="cpu",unit="core"} 0.005 +kube_pod_container_resource_requests{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",node="minikube",resource="memory",unit="byte"} 5.24288e+07 +kube_pod_container_resource_requests{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",node="minikube",resource="cpu",unit="core"} 0.25 +kube_pod_container_resource_requests{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",node="minikube",resource="cpu",unit="core"} 0.1 +kube_pod_container_resource_requests{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",node="minikube",resource="cpu",unit="core"} 0.1 +kube_pod_container_resource_requests{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",node="minikube",resource="memory",unit="byte"} 7.340032e+07 +kube_pod_container_resource_requests{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",node="minikube",resource="cpu",unit="core"} 0.1 +kube_pod_container_resource_requests{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",node="minikube",resource="memory",unit="byte"} 7.340032e+07 +kube_pod_container_resource_requests{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",node="minikube",resource="cpu",unit="core"} 0.2 +# HELP kube_pod_container_resource_limits The number of requested limit resource by a container. +# TYPE kube_pod_container_resource_limits gauge +kube_pod_container_resource_limits{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",node="minikube",resource="memory",unit="byte"} 5.36870912e+08 +kube_pod_container_resource_limits{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",node="minikube",resource="memory",unit="byte"} 1.7825792e+08 +kube_pod_container_resource_limits{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",node="minikube",resource="memory",unit="byte"} 1.7825792e+08 +# HELP kube_pod_init_container_resource_limits The number of requested limit resource by the init container. +# TYPE kube_pod_init_container_resource_limits gauge +# HELP kube_pod_container_resource_requests_cpu_cores The number of requested cpu cores by a container. +# TYPE kube_pod_container_resource_requests_cpu_cores gauge +kube_pod_container_resource_requests_cpu_cores{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",node="minikube"} 0.1 +kube_pod_container_resource_requests_cpu_cores{namespace="kube-system",pod="kube-controller-manager-minikube",container="kube-controller-manager",node="minikube"} 0.2 +kube_pod_container_resource_requests_cpu_cores{namespace="kube-system",pod="kube-scheduler-minikube",container="kube-scheduler",node="minikube"} 0.1 +kube_pod_container_resource_requests_cpu_cores{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",node="minikube"} 0.1 +kube_pod_container_resource_requests_cpu_cores{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",node="minikube"} 0.005 +kube_pod_container_resource_requests_cpu_cores{namespace="kube-system",pod="kube-apiserver-minikube",container="kube-apiserver",node="minikube"} 0.25 +# HELP kube_init_pod_container_resource_requests_cpu_cores The number of requested cpu cores by an init container. +# TYPE kube_init_pod_container_resource_requests_cpu_cores gauge +# HELP kube_pod_container_resource_requests_memory_bytes The number of requested memory bytes by a container. +# TYPE kube_pod_container_resource_requests_memory_bytes gauge +kube_pod_container_resource_requests_memory_bytes{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",node="minikube"} 7.340032e+07 +kube_pod_container_resource_requests_memory_bytes{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",node="minikube"} 7.340032e+07 +kube_pod_container_resource_requests_memory_bytes{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",node="minikube"} 2.68435456e+08 +kube_pod_container_resource_requests_memory_bytes{namespace="kube-system",pod="kube-addon-manager-minikube",container="kube-addon-manager",node="minikube"} 5.24288e+07 +# HELP kube_pod_container_resource_limits_cpu_cores The limit on cpu cores to be used by a container. +# TYPE kube_pod_container_resource_limits_cpu_cores gauge +# HELP kube_pod_container_resource_limits_memory_bytes The limit on memory to be used by a container in bytes. +# TYPE kube_pod_container_resource_limits_memory_bytes gauge +kube_pod_container_resource_limits_memory_bytes{namespace="default-mem-example",pod="default-mem-demo",container="default-mem-demo-ctr",node="minikube"} 5.36870912e+08 +kube_pod_container_resource_limits_memory_bytes{namespace="kube-system",pod="coredns-5c98db65d4-df89f",container="coredns",node="minikube"} 1.7825792e+08 +kube_pod_container_resource_limits_memory_bytes{namespace="kube-system",pod="coredns-5c98db65d4-8tjk7",container="coredns",node="minikube"} 1.7825792e+08 +# HELP kube_pod_spec_volumes_persistentvolumeclaims_info Information about persistentvolumeclaim volumes in a pod. +# TYPE kube_pod_spec_volumes_persistentvolumeclaims_info gauge +# HELP kube_pod_spec_volumes_persistentvolumeclaims_readonly Describes whether a persistentvolumeclaim is mounted read only. +# TYPE kube_pod_spec_volumes_persistentvolumeclaims_readonly gauge +# HELP kube_replicaset_created Unix creation timestamp +# TYPE kube_replicaset_created gauge +kube_replicaset_created{namespace="kube-system",replicaset="kube-state-metrics-6766c6d46b"} 1.568622119e+09 +kube_replicaset_created{namespace="kube-system",replicaset="coredns-576cbf47c7"} 1.567009611e+09 +kube_replicaset_created{namespace="kube-system",replicaset="coredns-5c98db65d4"} 1.567689495e+09 +# HELP kube_replicaset_status_replicas The number of replicas per ReplicaSet. +# TYPE kube_replicaset_status_replicas gauge +kube_replicaset_status_replicas{namespace="kube-system",replicaset="kube-state-metrics-6766c6d46b"} 1 +kube_replicaset_status_replicas{namespace="kube-system",replicaset="coredns-576cbf47c7"} 0 +kube_replicaset_status_replicas{namespace="kube-system",replicaset="coredns-5c98db65d4"} 2 +# HELP kube_replicaset_status_fully_labeled_replicas The number of fully labeled replicas per ReplicaSet. +# TYPE kube_replicaset_status_fully_labeled_replicas gauge +kube_replicaset_status_fully_labeled_replicas{namespace="kube-system",replicaset="kube-state-metrics-6766c6d46b"} 1 +kube_replicaset_status_fully_labeled_replicas{namespace="kube-system",replicaset="coredns-576cbf47c7"} 0 +kube_replicaset_status_fully_labeled_replicas{namespace="kube-system",replicaset="coredns-5c98db65d4"} 2 +# HELP kube_replicaset_status_ready_replicas The number of ready replicas per ReplicaSet. +# TYPE kube_replicaset_status_ready_replicas gauge +kube_replicaset_status_ready_replicas{namespace="kube-system",replicaset="kube-state-metrics-6766c6d46b"} 1 +kube_replicaset_status_ready_replicas{namespace="kube-system",replicaset="coredns-576cbf47c7"} 0 +kube_replicaset_status_ready_replicas{namespace="kube-system",replicaset="coredns-5c98db65d4"} 2 +# HELP kube_replicaset_status_observed_generation The generation observed by the ReplicaSet controller. +# TYPE kube_replicaset_status_observed_generation gauge +kube_replicaset_status_observed_generation{namespace="kube-system",replicaset="kube-state-metrics-6766c6d46b"} 1 +kube_replicaset_status_observed_generation{namespace="kube-system",replicaset="coredns-576cbf47c7"} 3 +kube_replicaset_status_observed_generation{namespace="kube-system",replicaset="coredns-5c98db65d4"} 2 +# HELP kube_replicaset_spec_replicas Number of desired pods for a ReplicaSet. +# TYPE kube_replicaset_spec_replicas gauge +kube_replicaset_spec_replicas{namespace="kube-system",replicaset="kube-state-metrics-6766c6d46b"} 1 +kube_replicaset_spec_replicas{namespace="kube-system",replicaset="coredns-576cbf47c7"} 0 +kube_replicaset_spec_replicas{namespace="kube-system",replicaset="coredns-5c98db65d4"} 2 +# HELP kube_replicaset_metadata_generation Sequence number representing a specific generation of the desired state. +# TYPE kube_replicaset_metadata_generation gauge +kube_replicaset_metadata_generation{namespace="kube-system",replicaset="kube-state-metrics-6766c6d46b"} 1 +kube_replicaset_metadata_generation{namespace="kube-system",replicaset="coredns-576cbf47c7"} 3 +kube_replicaset_metadata_generation{namespace="kube-system",replicaset="coredns-5c98db65d4"} 2 +# HELP kube_replicaset_owner Information about the ReplicaSet's owner. +# TYPE kube_replicaset_owner gauge +kube_replicaset_owner{namespace="kube-system",replicaset="coredns-576cbf47c7",owner_kind="Deployment",owner_name="coredns",owner_is_controller="true"} 1 +kube_replicaset_owner{namespace="kube-system",replicaset="coredns-5c98db65d4",owner_kind="Deployment",owner_name="coredns",owner_is_controller="true"} 1 +kube_replicaset_owner{namespace="kube-system",replicaset="kube-state-metrics-6766c6d46b",owner_kind="Deployment",owner_name="kube-state-metrics",owner_is_controller="true"} 1 +# HELP kube_replicaset_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_replicaset_labels gauge +kube_replicaset_labels{namespace="kube-system",replicaset="kube-state-metrics-6766c6d46b",label_pod_template_hash="6766c6d46b",label_k8s_app="kube-state-metrics"} 1 +kube_replicaset_labels{namespace="kube-system",replicaset="coredns-576cbf47c7",label_pod_template_hash="576cbf47c7",label_k8s_app="kube-dns"} 1 +kube_replicaset_labels{namespace="kube-system",replicaset="coredns-5c98db65d4",label_k8s_app="kube-dns",label_pod_template_hash="5c98db65d4"} 1 +# HELP kube_replicationcontroller_created Unix creation timestamp +# TYPE kube_replicationcontroller_created gauge +# HELP kube_replicationcontroller_status_replicas The number of replicas per ReplicationController. +# TYPE kube_replicationcontroller_status_replicas gauge +# HELP kube_replicationcontroller_status_fully_labeled_replicas The number of fully labeled replicas per ReplicationController. +# TYPE kube_replicationcontroller_status_fully_labeled_replicas gauge +# HELP kube_replicationcontroller_status_ready_replicas The number of ready replicas per ReplicationController. +# TYPE kube_replicationcontroller_status_ready_replicas gauge +# HELP kube_replicationcontroller_status_available_replicas The number of available replicas per ReplicationController. +# TYPE kube_replicationcontroller_status_available_replicas gauge +# HELP kube_replicationcontroller_status_observed_generation The generation observed by the ReplicationController controller. +# TYPE kube_replicationcontroller_status_observed_generation gauge +# HELP kube_replicationcontroller_spec_replicas Number of desired pods for a ReplicationController. +# TYPE kube_replicationcontroller_spec_replicas gauge +# HELP kube_replicationcontroller_metadata_generation Sequence number representing a specific generation of the desired state. +# TYPE kube_replicationcontroller_metadata_generation gauge +# HELP kube_resourcequota_created Unix creation timestamp +# TYPE kube_resourcequota_created gauge +kube_resourcequota_created{namespace="rqtest",resourcequota="resources"} 1.568629266e+09 +kube_resourcequota_created{namespace="rqtest",resourcequota="objects"} 1.568629266e+09 +# HELP kube_resourcequota Information about resource quota. +# TYPE kube_resourcequota gauge +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="pods",type="hard"} 3 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="replicationcontrollers",type="hard"} 1 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="secrets",type="hard"} 1 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="services",type="hard"} 2 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="services.loadbalancers",type="hard"} 1 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="configmaps",type="hard"} 1 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="persistentvolumeclaims",type="hard"} 0 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="configmaps",type="used"} 0 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="persistentvolumeclaims",type="used"} 0 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="pods",type="used"} 0 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="replicationcontrollers",type="used"} 0 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="secrets",type="used"} 1 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="services",type="used"} 1 +kube_resourcequota{namespace="rqtest",resourcequota="objects",resource="services.loadbalancers",type="used"} 1 +kube_resourcequota{namespace="rqtest",resourcequota="resources",resource="limits.cpu",type="hard"} 2 +kube_resourcequota{namespace="rqtest",resourcequota="resources",resource="limits.memory",type="hard"} 2.147483648e+09 +kube_resourcequota{namespace="rqtest",resourcequota="resources",resource="requests.cpu",type="hard"} 1 +kube_resourcequota{namespace="rqtest",resourcequota="resources",resource="requests.memory",type="hard"} 1.073741824e+09 +kube_resourcequota{namespace="rqtest",resourcequota="resources",resource="limits.cpu",type="used"} 0 +kube_resourcequota{namespace="rqtest",resourcequota="resources",resource="limits.memory",type="used"} 0 +kube_resourcequota{namespace="rqtest",resourcequota="resources",resource="requests.cpu",type="used"} 0 +kube_resourcequota{namespace="rqtest",resourcequota="resources",resource="requests.memory",type="used"} 0 +# HELP kube_secret_info Information about secret. +# TYPE kube_secret_info gauge +kube_secret_info{namespace="kube-system",secret="token-cleaner-token-hvmvg"} 1 +kube_secret_info{namespace="kube-system",secret="coredns-token-sck4w"} 1 +kube_secret_info{namespace="kube-system",secret="pvc-protection-controller-token-l7sng"} 1 +kube_secret_info{namespace="kube-system",secret="replicaset-controller-token-w7cbh"} 1 +kube_secret_info{namespace="default",secret="default-token-t7g88"} 1 +kube_secret_info{namespace="kube-system",secret="disruption-controller-token-p746l"} 1 +kube_secret_info{namespace="pablo",secret="default-token-bcdzp"} 1 +kube_secret_info{namespace="kube-system",secret="storage-provisioner-token-77qbj"} 1 +kube_secret_info{namespace="default-mem-example",secret="default-token-ck879"} 1 +kube_secret_info{namespace="kube-system",secret="cronjob-controller-token-5pdpx"} 1 +kube_secret_info{namespace="kube-system",secret="namespace-controller-token-cz6nm"} 1 +kube_secret_info{namespace="kube-system",secret="pod-garbage-collector-token-r5p7w"} 1 +kube_secret_info{namespace="kube-system",secret="attachdetach-controller-token-ww2k9"} 1 +kube_secret_info{namespace="kube-system",secret="pv-protection-controller-token-wgqk6"} 1 +kube_secret_info{namespace="kube-system",secret="service-account-controller-token-trs9v"} 1 +kube_secret_info{namespace="kube-system",secret="deployment-controller-token-8jz6f"} 1 +kube_secret_info{namespace="kube-system",secret="default-token-wlxnx"} 1 +kube_secret_info{namespace="kube-system",secret="horizontal-pod-autoscaler-token-jk9wc"} 1 +kube_secret_info{namespace="kube-system",secret="bootstrap-signer-token-5rthh"} 1 +kube_secret_info{namespace="kube-system",secret="resourcequota-controller-token-ftfbv"} 1 +kube_secret_info{namespace="kube-system",secret="ttl-controller-token-cjgd4"} 1 +kube_secret_info{namespace="kube-system",secret="expand-controller-token-bqc2l"} 1 +kube_secret_info{namespace="kube-system",secret="persistent-volume-binder-token-5vnhp"} 1 +kube_secret_info{namespace="kube-system",secret="kube-state-metrics-token-q84xw"} 1 +kube_secret_info{namespace="kube-system",secret="clusterrole-aggregation-controller-token-p48ps"} 1 +kube_secret_info{namespace="kube-public",secret="default-token-fgbfg"} 1 +kube_secret_info{namespace="kube-system",secret="endpoint-controller-token-xhchl"} 1 +kube_secret_info{namespace="kube-system",secret="kube-proxy-token-xb2xk"} 1 +kube_secret_info{namespace="rqtest",secret="default-token-bx4mb"} 1 +kube_secret_info{namespace="kube-system",secret="metricbeat-kube-token-p7lz2"} 1 +kube_secret_info{namespace="kube-system",secret="certificate-controller-token-4tpf5"} 1 +kube_secret_info{namespace="kube-system",secret="replication-controller-token-4bbg4"} 1 +kube_secret_info{namespace="kube-node-lease",secret="default-token-4bnbt"} 1 +kube_secret_info{namespace="kube-system",secret="node-controller-token-vlp7g"} 1 +kube_secret_info{namespace="kube-system",secret="service-controller-token-n6q5z"} 1 +kube_secret_info{namespace="kube-system",secret="generic-garbage-collector-token-bwzxm"} 1 +kube_secret_info{namespace="kube-system",secret="job-controller-token-jzdhc"} 1 +kube_secret_info{namespace="kube-system",secret="statefulset-controller-token-c6ln8"} 1 +kube_secret_info{namespace="kube-system",secret="daemon-set-controller-token-hlmp4"} 1 +kube_secret_info{namespace="default",secret="metricbeat-kube-token-8bswn"} 1 +# HELP kube_secret_type Type about secret. +# TYPE kube_secret_type gauge +kube_secret_type{namespace="kube-system",secret="node-controller-token-vlp7g",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="service-controller-token-n6q5z",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="generic-garbage-collector-token-bwzxm",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="rqtest",secret="default-token-bx4mb",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="metricbeat-kube-token-p7lz2",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="certificate-controller-token-4tpf5",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="replication-controller-token-4bbg4",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-node-lease",secret="default-token-4bnbt",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="job-controller-token-jzdhc",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="statefulset-controller-token-c6ln8",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="daemon-set-controller-token-hlmp4",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="default",secret="metricbeat-kube-token-8bswn",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="disruption-controller-token-p746l",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="pablo",secret="default-token-bcdzp",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="storage-provisioner-token-77qbj",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="token-cleaner-token-hvmvg",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="coredns-token-sck4w",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="pvc-protection-controller-token-l7sng",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="replicaset-controller-token-w7cbh",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="default",secret="default-token-t7g88",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="default-mem-example",secret="default-token-ck879",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="cronjob-controller-token-5pdpx",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="namespace-controller-token-cz6nm",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="pod-garbage-collector-token-r5p7w",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="default-token-wlxnx",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="attachdetach-controller-token-ww2k9",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="pv-protection-controller-token-wgqk6",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="service-account-controller-token-trs9v",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="deployment-controller-token-8jz6f",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="horizontal-pod-autoscaler-token-jk9wc",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="bootstrap-signer-token-5rthh",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="kube-state-metrics-token-q84xw",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="resourcequota-controller-token-ftfbv",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="ttl-controller-token-cjgd4",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="expand-controller-token-bqc2l",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="persistent-volume-binder-token-5vnhp",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="clusterrole-aggregation-controller-token-p48ps",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-public",secret="default-token-fgbfg",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="endpoint-controller-token-xhchl",type="kubernetes.io/service-account-token"} 1 +kube_secret_type{namespace="kube-system",secret="kube-proxy-token-xb2xk",type="kubernetes.io/service-account-token"} 1 +# HELP kube_secret_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_secret_labels gauge +kube_secret_labels{namespace="kube-system",secret="horizontal-pod-autoscaler-token-jk9wc"} 1 +kube_secret_labels{namespace="kube-system",secret="bootstrap-signer-token-5rthh"} 1 +kube_secret_labels{namespace="kube-system",secret="resourcequota-controller-token-ftfbv"} 1 +kube_secret_labels{namespace="kube-system",secret="ttl-controller-token-cjgd4"} 1 +kube_secret_labels{namespace="kube-system",secret="expand-controller-token-bqc2l"} 1 +kube_secret_labels{namespace="kube-system",secret="persistent-volume-binder-token-5vnhp"} 1 +kube_secret_labels{namespace="kube-system",secret="kube-state-metrics-token-q84xw"} 1 +kube_secret_labels{namespace="kube-system",secret="clusterrole-aggregation-controller-token-p48ps"} 1 +kube_secret_labels{namespace="kube-public",secret="default-token-fgbfg"} 1 +kube_secret_labels{namespace="kube-system",secret="endpoint-controller-token-xhchl"} 1 +kube_secret_labels{namespace="kube-system",secret="kube-proxy-token-xb2xk"} 1 +kube_secret_labels{namespace="rqtest",secret="default-token-bx4mb"} 1 +kube_secret_labels{namespace="kube-system",secret="metricbeat-kube-token-p7lz2"} 1 +kube_secret_labels{namespace="kube-system",secret="certificate-controller-token-4tpf5"} 1 +kube_secret_labels{namespace="kube-system",secret="replication-controller-token-4bbg4"} 1 +kube_secret_labels{namespace="kube-node-lease",secret="default-token-4bnbt"} 1 +kube_secret_labels{namespace="kube-system",secret="node-controller-token-vlp7g"} 1 +kube_secret_labels{namespace="kube-system",secret="service-controller-token-n6q5z"} 1 +kube_secret_labels{namespace="kube-system",secret="generic-garbage-collector-token-bwzxm"} 1 +kube_secret_labels{namespace="kube-system",secret="job-controller-token-jzdhc"} 1 +kube_secret_labels{namespace="kube-system",secret="statefulset-controller-token-c6ln8"} 1 +kube_secret_labels{namespace="kube-system",secret="daemon-set-controller-token-hlmp4"} 1 +kube_secret_labels{namespace="default",secret="metricbeat-kube-token-8bswn"} 1 +kube_secret_labels{namespace="kube-system",secret="token-cleaner-token-hvmvg"} 1 +kube_secret_labels{namespace="kube-system",secret="coredns-token-sck4w"} 1 +kube_secret_labels{namespace="kube-system",secret="pvc-protection-controller-token-l7sng"} 1 +kube_secret_labels{namespace="kube-system",secret="replicaset-controller-token-w7cbh"} 1 +kube_secret_labels{namespace="default",secret="default-token-t7g88"} 1 +kube_secret_labels{namespace="kube-system",secret="disruption-controller-token-p746l"} 1 +kube_secret_labels{namespace="pablo",secret="default-token-bcdzp"} 1 +kube_secret_labels{namespace="kube-system",secret="storage-provisioner-token-77qbj"} 1 +kube_secret_labels{namespace="default-mem-example",secret="default-token-ck879"} 1 +kube_secret_labels{namespace="kube-system",secret="cronjob-controller-token-5pdpx"} 1 +kube_secret_labels{namespace="kube-system",secret="namespace-controller-token-cz6nm"} 1 +kube_secret_labels{namespace="kube-system",secret="pod-garbage-collector-token-r5p7w"} 1 +kube_secret_labels{namespace="kube-system",secret="attachdetach-controller-token-ww2k9"} 1 +kube_secret_labels{namespace="kube-system",secret="pv-protection-controller-token-wgqk6"} 1 +kube_secret_labels{namespace="kube-system",secret="service-account-controller-token-trs9v"} 1 +kube_secret_labels{namespace="kube-system",secret="deployment-controller-token-8jz6f"} 1 +kube_secret_labels{namespace="kube-system",secret="default-token-wlxnx"} 1 +# HELP kube_secret_created Unix creation timestamp +# TYPE kube_secret_created gauge +kube_secret_created{namespace="kube-system",secret="statefulset-controller-token-c6ln8"} 1.567009608e+09 +kube_secret_created{namespace="kube-system",secret="daemon-set-controller-token-hlmp4"} 1.567009608e+09 +kube_secret_created{namespace="default",secret="metricbeat-kube-token-8bswn"} 1.567693987e+09 +kube_secret_created{namespace="kube-system",secret="job-controller-token-jzdhc"} 1.567009606e+09 +kube_secret_created{namespace="kube-system",secret="pvc-protection-controller-token-l7sng"} 1.567009609e+09 +kube_secret_created{namespace="kube-system",secret="replicaset-controller-token-w7cbh"} 1.567009605e+09 +kube_secret_created{namespace="default",secret="default-token-t7g88"} 1.567009611e+09 +kube_secret_created{namespace="kube-system",secret="disruption-controller-token-p746l"} 1.567009611e+09 +kube_secret_created{namespace="pablo",secret="default-token-bcdzp"} 1.567347599e+09 +kube_secret_created{namespace="kube-system",secret="storage-provisioner-token-77qbj"} 1.567009612e+09 +kube_secret_created{namespace="kube-system",secret="token-cleaner-token-hvmvg"} 1.567009611e+09 +kube_secret_created{namespace="kube-system",secret="coredns-token-sck4w"} 1.567009606e+09 +kube_secret_created{namespace="kube-system",secret="cronjob-controller-token-5pdpx"} 1.567009607e+09 +kube_secret_created{namespace="kube-system",secret="namespace-controller-token-cz6nm"} 1.567009609e+09 +kube_secret_created{namespace="kube-system",secret="pod-garbage-collector-token-r5p7w"} 1.567009607e+09 +kube_secret_created{namespace="default-mem-example",secret="default-token-ck879"} 1.567347623e+09 +kube_secret_created{namespace="kube-system",secret="pv-protection-controller-token-wgqk6"} 1.56700961e+09 +kube_secret_created{namespace="kube-system",secret="service-account-controller-token-trs9v"} 1.567009605e+09 +kube_secret_created{namespace="kube-system",secret="deployment-controller-token-8jz6f"} 1.567009605e+09 +kube_secret_created{namespace="kube-system",secret="default-token-wlxnx"} 1.567009611e+09 +kube_secret_created{namespace="kube-system",secret="attachdetach-controller-token-ww2k9"} 1.567009608e+09 +kube_secret_created{namespace="kube-system",secret="bootstrap-signer-token-5rthh"} 1.567009605e+09 +kube_secret_created{namespace="kube-system",secret="horizontal-pod-autoscaler-token-jk9wc"} 1.56700961e+09 +kube_secret_created{namespace="kube-system",secret="ttl-controller-token-cjgd4"} 1.567009605e+09 +kube_secret_created{namespace="kube-system",secret="expand-controller-token-bqc2l"} 1.567009606e+09 +kube_secret_created{namespace="kube-system",secret="persistent-volume-binder-token-5vnhp"} 1.567009607e+09 +kube_secret_created{namespace="kube-system",secret="kube-state-metrics-token-q84xw"} 1.568195118e+09 +kube_secret_created{namespace="kube-system",secret="resourcequota-controller-token-ftfbv"} 1.567009606e+09 +kube_secret_created{namespace="kube-public",secret="default-token-fgbfg"} 1.567009611e+09 +kube_secret_created{namespace="kube-system",secret="endpoint-controller-token-xhchl"} 1.567009609e+09 +kube_secret_created{namespace="kube-system",secret="kube-proxy-token-xb2xk"} 1.567009606e+09 +kube_secret_created{namespace="kube-system",secret="clusterrole-aggregation-controller-token-p48ps"} 1.567009605e+09 +kube_secret_created{namespace="kube-system",secret="certificate-controller-token-4tpf5"} 1.567009605e+09 +kube_secret_created{namespace="kube-system",secret="replication-controller-token-4bbg4"} 1.567009606e+09 +kube_secret_created{namespace="kube-node-lease",secret="default-token-4bnbt"} 1.567689495e+09 +kube_secret_created{namespace="kube-system",secret="node-controller-token-vlp7g"} 1.567009605e+09 +kube_secret_created{namespace="kube-system",secret="service-controller-token-n6q5z"} 1.567009609e+09 +kube_secret_created{namespace="kube-system",secret="generic-garbage-collector-token-bwzxm"} 1.56700961e+09 +kube_secret_created{namespace="rqtest",secret="default-token-bx4mb"} 1.568629266e+09 +kube_secret_created{namespace="kube-system",secret="metricbeat-kube-token-p7lz2"} 1.567585201e+09 +# HELP kube_secret_metadata_resource_version Resource version representing a specific version of secret. +# TYPE kube_secret_metadata_resource_version gauge +kube_secret_metadata_resource_version{namespace="kube-system",secret="ttl-controller-token-cjgd4",resource_version="201"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="expand-controller-token-bqc2l",resource_version="204"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="persistent-volume-binder-token-5vnhp",resource_version="247"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="kube-state-metrics-token-q84xw",resource_version="1322401"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="resourcequota-controller-token-ftfbv",resource_version="230"} 1 +kube_secret_metadata_resource_version{namespace="kube-public",secret="default-token-fgbfg",resource_version="307"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="endpoint-controller-token-xhchl",resource_version="264"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="kube-proxy-token-xb2xk",resource_version="227"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="clusterrole-aggregation-controller-token-p48ps",resource_version="185"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="certificate-controller-token-4tpf5",resource_version="191"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="replication-controller-token-4bbg4",resource_version="224"} 1 +kube_secret_metadata_resource_version{namespace="kube-node-lease",secret="default-token-4bnbt",resource_version="709402"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="node-controller-token-vlp7g",resource_version="182"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="service-controller-token-n6q5z",resource_version="270"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="generic-garbage-collector-token-bwzxm",resource_version="280"} 1 +kube_secret_metadata_resource_version{namespace="rqtest",secret="default-token-bx4mb",resource_version="1848837"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="metricbeat-kube-token-p7lz2",resource_version="688578"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="statefulset-controller-token-c6ln8",resource_version="257"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="daemon-set-controller-token-hlmp4",resource_version="254"} 1 +kube_secret_metadata_resource_version{namespace="default",secret="metricbeat-kube-token-8bswn",resource_version="715037"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="job-controller-token-jzdhc",resource_version="240"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="pvc-protection-controller-token-l7sng",resource_version="274"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="replicaset-controller-token-w7cbh",resource_version="188"} 1 +kube_secret_metadata_resource_version{namespace="default",secret="default-token-t7g88",resource_version="302"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="disruption-controller-token-p746l",resource_version="288"} 1 +kube_secret_metadata_resource_version{namespace="pablo",secret="default-token-bcdzp",resource_version="404521"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="storage-provisioner-token-77qbj",resource_version="345"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="token-cleaner-token-hvmvg",resource_version="292"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="coredns-token-sck4w",resource_version="219"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="cronjob-controller-token-5pdpx",resource_version="243"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="namespace-controller-token-cz6nm",resource_version="267"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="pod-garbage-collector-token-r5p7w",resource_version="250"} 1 +kube_secret_metadata_resource_version{namespace="default-mem-example",secret="default-token-ck879",resource_version="404555"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="pv-protection-controller-token-wgqk6",resource_version="277"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="service-account-controller-token-trs9v",resource_version="197"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="deployment-controller-token-8jz6f",resource_version="179"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="default-token-wlxnx",resource_version="304"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="attachdetach-controller-token-ww2k9",resource_version="260"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="bootstrap-signer-token-5rthh",resource_version="194"} 1 +kube_secret_metadata_resource_version{namespace="kube-system",secret="horizontal-pod-autoscaler-token-jk9wc",resource_version="283"} 1 +# HELP kube_service_info Information about service. +# TYPE kube_service_info gauge +kube_service_info{namespace="kube-system",service="kube-dns",cluster_ip="10.96.0.10",external_name="",load_balancer_ip=""} 1 +kube_service_info{namespace="default",service="my-nginx",cluster_ip="10.111.59.54",external_name="",load_balancer_ip=""} 1 +kube_service_info{namespace="kube-system",service="kube-state-metrics",cluster_ip="10.111.114.13",external_name="",load_balancer_ip=""} 1 +kube_service_info{namespace="default",service="willsucceed",cluster_ip="10.104.70.53",external_name="",load_balancer_ip=""} 1 +kube_service_info{namespace="default",service="willfail",cluster_ip="10.99.32.65",external_name="",load_balancer_ip=""} 1 +kube_service_info{namespace="rqtest",service="willsucceed",cluster_ip="10.97.184.107",external_name="",load_balancer_ip=""} 1 +kube_service_info{namespace="default",service="kubernetes",cluster_ip="10.96.0.1",external_name="",load_balancer_ip=""} 1 +# HELP kube_service_created Unix creation timestamp +# TYPE kube_service_created gauge +kube_service_created{namespace="default",service="kubernetes"} 1.567009602e+09 +kube_service_created{namespace="kube-system",service="kube-dns"} 1.567009606e+09 +kube_service_created{namespace="default",service="my-nginx"} 1.567783021e+09 +kube_service_created{namespace="kube-system",service="kube-state-metrics"} 1.568195118e+09 +kube_service_created{namespace="default",service="willsucceed"} 1.568629567e+09 +kube_service_created{namespace="default",service="willfail"} 1.568629567e+09 +kube_service_created{namespace="rqtest",service="willsucceed"} 1.568629886e+09 +# HELP kube_service_spec_type Type about service. +# TYPE kube_service_spec_type gauge +kube_service_spec_type{namespace="rqtest",service="willsucceed",type="LoadBalancer"} 1 +kube_service_spec_type{namespace="default",service="kubernetes",type="ClusterIP"} 1 +kube_service_spec_type{namespace="kube-system",service="kube-dns",type="ClusterIP"} 1 +kube_service_spec_type{namespace="default",service="my-nginx",type="LoadBalancer"} 1 +kube_service_spec_type{namespace="kube-system",service="kube-state-metrics",type="ClusterIP"} 1 +kube_service_spec_type{namespace="default",service="willsucceed",type="LoadBalancer"} 1 +kube_service_spec_type{namespace="default",service="willfail",type="LoadBalancer"} 1 +# HELP kube_service_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_service_labels gauge +kube_service_labels{namespace="default",service="kubernetes",label_component="apiserver",label_provider="kubernetes"} 1 +kube_service_labels{namespace="kube-system",service="kube-dns",label_kubernetes_io_name="KubeDNS",label_k8s_app="kube-dns",label_kubernetes_io_cluster_service="true"} 1 +kube_service_labels{namespace="default",service="my-nginx",label_app="my-nginx"} 1 +kube_service_labels{namespace="kube-system",service="kube-state-metrics",label_k8s_app="kube-state-metrics"} 1 +kube_service_labels{namespace="default",service="willsucceed"} 1 +kube_service_labels{namespace="default",service="willfail"} 1 +kube_service_labels{namespace="rqtest",service="willsucceed"} 1 +# HELP kube_service_spec_external_ip Service external ips. One series for each ip +# TYPE kube_service_spec_external_ip gauge +# HELP kube_service_status_load_balancer_ingress Service load balancer ingress status +# TYPE kube_service_status_load_balancer_ingress gauge +# HELP kube_statefulset_created Unix creation timestamp +# TYPE kube_statefulset_created gauge +# HELP kube_statefulset_status_replicas The number of replicas per StatefulSet. +# TYPE kube_statefulset_status_replicas gauge +# HELP kube_statefulset_status_replicas_current The number of current replicas per StatefulSet. +# TYPE kube_statefulset_status_replicas_current gauge +# HELP kube_statefulset_status_replicas_ready The number of ready replicas per StatefulSet. +# TYPE kube_statefulset_status_replicas_ready gauge +# HELP kube_statefulset_status_replicas_updated The number of updated replicas per StatefulSet. +# TYPE kube_statefulset_status_replicas_updated gauge +# HELP kube_statefulset_status_observed_generation The generation observed by the StatefulSet controller. +# TYPE kube_statefulset_status_observed_generation gauge +# HELP kube_statefulset_replicas Number of desired pods for a StatefulSet. +# TYPE kube_statefulset_replicas gauge +# HELP kube_statefulset_metadata_generation Sequence number representing a specific generation of the desired state for the StatefulSet. +# TYPE kube_statefulset_metadata_generation gauge +# HELP kube_statefulset_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_statefulset_labels gauge +# HELP kube_statefulset_status_current_revision Indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas). +# TYPE kube_statefulset_status_current_revision gauge +# HELP kube_statefulset_status_update_revision Indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas) +# TYPE kube_statefulset_status_update_revision gauge +# HELP kube_storageclass_info Information about storageclass. +# TYPE kube_storageclass_info gauge +kube_storageclass_info{storageclass="standard",provisioner="k8s.io/minikube-hostpath",reclaimPolicy="Delete",volumeBindingMode="Immediate"} 1 +# HELP kube_storageclass_created Unix creation timestamp +# TYPE kube_storageclass_created gauge +kube_storageclass_created{storageclass="standard"} 1.567009612e+09 +# HELP kube_storageclass_labels Kubernetes labels converted to Prometheus labels. +# TYPE kube_storageclass_labels gauge +kube_storageclass_labels{storageclass="standard",label_addonmanager_kubernetes_io_mode="EnsureExists"} 1 diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/_meta/testdata.flaky/kube-state-metrics.1.7.plain-expected.json b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/_meta/testdata.flaky/kube-state-metrics.1.7.plain-expected.json new file mode 100644 index 00000000..c8e174bf --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/_meta/testdata.flaky/kube-state-metrics.1.7.plain-expected.json @@ -0,0 +1,578 @@ +[ + { + "event": { + "dataset": "kubernetes.resourcequota", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "name": "resources", + "quota": 0, + "resource": "requests.cpu", + "type": "used" + } + }, + "metricset": { + "name": "state_resourcequota", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "event": { + "dataset": "kubernetes.resourcequota", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "name": "resources", + "quota": 2, + "resource": "limits.cpu", + "type": "hard" + } + }, + "metricset": { + "name": "state_resourcequota", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "event": { + "dataset": "kubernetes.resourcequota", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "name": "objects", + "quota": 3, + "resource": "pods", + "type": "hard" + } + }, + "metricset": { + "name": "state_resourcequota", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "event": { + "dataset": "kubernetes.resourcequota", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "name": "resources", + "quota": 1, + "resource": "requests.cpu", + "type": "hard" + } + }, + "metricset": { + "name": "state_resourcequota", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "event": { + "dataset": "kubernetes.resourcequota", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "name": "objects", + "quota": 1, + "resource": "configmaps", + "type": "hard" + } + }, + "metricset": { + "name": "state_resourcequota", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "event": { + "dataset": "kubernetes.resourcequota", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "name": "objects", + "quota": 1, + "resource": "replicationcontrollers", + "type": "hard" + } + }, + "metricset": { + "name": "state_resourcequota", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "event": { + "dataset": "kubernetes.resourcequota", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "created": { + "sec": 1568629266 + }, + "name": "objects" + } + }, + "metricset": { + "name": "state_resourcequota", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "event": { + "dataset": "kubernetes.resourcequota", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "name": "objects", + "quota": 0, + "resource": "persistentvolumeclaims", + "type": "hard" + } + }, + "metricset": { + "name": "state_resourcequota", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "event": { + "dataset": "kubernetes.resourcequota", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "created": { + "sec": 1568629266 + }, + "name": "resources" + } + }, + "metricset": { + "name": "state_resourcequota", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "event": { + "dataset": "kubernetes.resourcequota", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "name": "resources", + "quota": 1073741824, + "resource": "requests.memory", + "type": "hard" + } + }, + "metricset": { + "name": "state_resourcequota", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "event": { + "dataset": "kubernetes.resourcequota", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "name": "objects", + "quota": 2, + "resource": "services", + "type": "hard" + } + }, + "metricset": { + "name": "state_resourcequota", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "event": { + "dataset": "kubernetes.resourcequota", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "name": "objects", + "quota": 0, + "resource": "replicationcontrollers", + "type": "used" + } + }, + "metricset": { + "name": "state_resourcequota", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "event": { + "dataset": "kubernetes.resourcequota", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "name": "objects", + "quota": 0, + "resource": "configmaps", + "type": "used" + } + }, + "metricset": { + "name": "state_resourcequota", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "event": { + "dataset": "kubernetes.resourcequota", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "name": "resources", + "quota": 0, + "resource": "requests.memory", + "type": "used" + } + }, + "metricset": { + "name": "state_resourcequota", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "event": { + "dataset": "kubernetes.resourcequota", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "name": "objects", + "quota": 1, + "resource": "services", + "type": "used" + } + }, + "metricset": { + "name": "state_resourcequota", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "event": { + "dataset": "kubernetes.resourcequota", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "name": "resources", + "quota": 2147483648, + "resource": "limits.memory", + "type": "hard" + } + }, + "metricset": { + "name": "state_resourcequota", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "event": { + "dataset": "kubernetes.resourcequota", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "name": "objects", + "quota": 1, + "resource": "secrets", + "type": "hard" + } + }, + "metricset": { + "name": "state_resourcequota", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "event": { + "dataset": "kubernetes.resourcequota", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "name": "objects", + "quota": 1, + "resource": "services.loadbalancers", + "type": "used" + } + }, + "metricset": { + "name": "state_resourcequota", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "event": { + "dataset": "kubernetes.resourcequota", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "name": "resources", + "quota": 0, + "resource": "limits.memory", + "type": "used" + } + }, + "metricset": { + "name": "state_resourcequota", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "event": { + "dataset": "kubernetes.resourcequota", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "name": "objects", + "quota": 0, + "resource": "persistentvolumeclaims", + "type": "used" + } + }, + "metricset": { + "name": "state_resourcequota", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "event": { + "dataset": "kubernetes.resourcequota", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "name": "objects", + "quota": 1, + "resource": "secrets", + "type": "used" + } + }, + "metricset": { + "name": "state_resourcequota", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "event": { + "dataset": "kubernetes.resourcequota", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "name": "resources", + "quota": 0, + "resource": "limits.cpu", + "type": "used" + } + }, + "metricset": { + "name": "state_resourcequota", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "event": { + "dataset": "kubernetes.resourcequota", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "name": "objects", + "quota": 0, + "resource": "pods", + "type": "used" + } + }, + "metricset": { + "name": "state_resourcequota", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + }, + { + "event": { + "dataset": "kubernetes.resourcequota", + "duration": 115000, + "module": "kubernetes" + }, + "kubernetes": { + "namespace": "rqtest", + "resourcequota": { + "name": "objects", + "quota": 1, + "resource": "services.loadbalancers", + "type": "hard" + } + }, + "metricset": { + "name": "state_resourcequota", + "period": 10000 + }, + "service": { + "address": "127.0.0.1:55555", + "type": "kubernetes" + } + } +] \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/state_resourcequota.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/state_resourcequota.go new file mode 100644 index 00000000..0cd31adf --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/state_resourcequota.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package state_resourcequota + +import ( + p "github.com/elastic/beats/metricbeat/helper/prometheus" + "github.com/elastic/beats/metricbeat/mb" +) + +func init() { + mb.Registry.MustAddMetricSet("kubernetes", "state_resourcequota", + NewResourceQuotaMetricSet, + mb.WithHostParser(p.HostParser)) +} + +// ResourceQuotaMetricSet is a prometheus based MetricSet that looks for +// mb.ModuleDataKey prefixed fields and puts then at the module level +// +// Copying the code from other kube state metrics, this should be improved to +// avoid all these ugly tricks +type ResourceQuotaMetricSet struct { + mb.BaseMetricSet + prometheus p.Prometheus + mapping *p.MetricsMapping +} + +// NewResourceQuotaMetricSet returns a prometheus based metricset for ResourceQuotas +func NewResourceQuotaMetricSet(base mb.BaseMetricSet) (mb.MetricSet, error) { + prometheus, err := p.NewPrometheusClient(base) + if err != nil { + return nil, err + } + + return &ResourceQuotaMetricSet{ + BaseMetricSet: base, + prometheus: prometheus, + mapping: &p.MetricsMapping{ + Metrics: map[string]p.MetricMap{ + "kube_resourcequota_created": p.Metric("created.sec"), + "kube_resourcequota": p.Metric("quota"), + }, + Labels: map[string]p.LabelMap{ + "namespace": p.KeyLabel(mb.ModuleDataKey + ".namespace"), + "resourcequota": p.KeyLabel("name"), + + "resource": p.KeyLabel("resource"), + "type": p.KeyLabel("type"), + }, + }, + }, nil +} + +// Fetch prometheus metrics and treats those prefixed by mb.ModuleDataKey as +// module rooted fields at the event that gets reported +func (m *ResourceQuotaMetricSet) Fetch(reporter mb.ReporterV2) { + events, err := m.prometheus.GetProcessedMetrics(m.mapping) + if err != nil { + m.Logger().Error(err) + reporter.Error(err) + return + } + + for _, event := range events { + event[mb.NamespaceKey] = "resourcequota" + reported := reporter.Event(mb.TransformMapStrToEvent("kubernetes", event, nil)) + if !reported { + m.Logger().Debug("error trying to emit event") + return + } + } + return +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/state_resourcequota_test.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/state_resourcequota_test.go new file mode 100644 index 00000000..61c1edf4 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/state_resourcequota/state_resourcequota_test.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// +build !integration + +package state_resourcequota + +import ( + "testing" + + "github.com/elastic/beats/metricbeat/helper/prometheus/ptest" +) + +const testFile = "_meta/test/metrics" + +func FLAKYTestEventMapping(t *testing.T) { + ptest.TestMetricSet(t, "kubernetes", "state_resourcequota", + ptest.TestCases{ + { + MetricsFile: "./_meta/test/kube-state-metrics.1.7", + ExpectedFile: "./_meta/test/kube-state-metrics.1.7.expected", + }, + }, + ) +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/system/system.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/system/system.go index 28d17a2b..fd9495b3 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/system/system.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/system/system.go @@ -18,6 +18,8 @@ package system import ( + "github.com/pkg/errors" + "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/metricbeat/helper" "github.com/elastic/beats/metricbeat/mb" @@ -73,23 +75,22 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // Fetch methods implements the data gathering and data conversion to the right // format. It publishes the event which is then forwarded to the output. In case // of an error set the Error field of mb.Event or simply call report.Error(). -func (m *MetricSet) Fetch(reporter mb.ReporterV2) { +func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { body, err := m.http.FetchContent() if err != nil { - logger.Error(err) - reporter.Error(err) - return + return errors.Wrap(err, "error doing HTTP request to fetch 'system' Metricset data") } events, err := eventMapping(body) if err != nil { - logger.Error(err) - reporter.Error(err) - return + return errors.Wrap(err, "error in mapping") } for _, e := range events { - reporter.Event(mb.TransformMapStrToEvent("kubernetes", e, nil)) + isOpen := reporter.Event(mb.TransformMapStrToEvent("kubernetes", e, nil)) + if !isOpen { + return nil + } } - return + return nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/util/kubernetes.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/util/kubernetes.go index 74377872..2f208cd8 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/util/kubernetes.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/util/kubernetes.go @@ -92,6 +92,8 @@ func GetWatcher(base mb.BaseMetricSet, resource kubernetes.Resource, nodeScope b options.Node = kubernetes.DiscoverKubernetesNode(config.Host, kubernetes.IsInCluster(config.KubeConfig), client) } + logp.Debug("kubernetes", "Initializing a new Kubernetes watcher using host: %v", config.Host) + return kubernetes.NewWatcher(client, resource, options) } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/volume/volume.go b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/volume/volume.go index bc9ebb59..b4a9589c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/volume/volume.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/kubernetes/volume/volume.go @@ -18,6 +18,8 @@ package volume import ( + "github.com/pkg/errors" + "github.com/elastic/beats/libbeat/logp" "github.com/elastic/beats/metricbeat/helper" "github.com/elastic/beats/metricbeat/mb" @@ -73,18 +75,19 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // Fetch methods implements the data gathering and data conversion to the right // format. It publishes the event which is then forwarded to the output. In case // of an error set the Error field of mb.Event or simply call report.Error(). -func (m *MetricSet) Fetch(reporter mb.ReporterV2) { +func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { body, err := m.http.FetchContent() if err != nil { - logger.Error(err) - reporter.Error(err) - return + return errors.Wrap(err, "error doing HTTP request to fetch 'volume' Metricset data") } events, err := eventMapping(body) for _, e := range events { - reporter.Event(mb.TransformMapStrToEvent("kubernetes", e, nil)) + isOpen := reporter.Event(mb.TransformMapStrToEvent("kubernetes", e, nil)) + if !isOpen { + return nil + } } - return + return nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/libvirt.yml b/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/libvirt.yml deleted file mode 100644 index 28c2532f..00000000 --- a/vendor/github.com/elastic/beats/metricbeat/module/kvm/vendor/github.com/digitalocean/go-libvirt/libvirt.yml +++ /dev/null @@ -1,59 +0,0 @@ -# Configuration file for c-for-go, which go-libvirt uses to translate the const -# and type definitions from the C-language sources in the libvirt project into -# Go. This file is used by the c-for-go binary (github.com/xlab/c-for-go), which -# is called when 'go generate' is run. See libvirt.go for the command line used. ---- -GENERATOR: - PackageName: libvirt - PackageLicense: | - Copyright 2017 The go-libvirt Authors. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - Includes: [] - -PARSER: - # We can't use environment variables here, but we don't want to process the - # libvirt version installed in the system folders (if any). Instead we'll - # rely on our caller to link the libvirt source directory to lv_source/, and - # run on that code. This isn't ideal, but changes to c-for-go are needed to - # fix it. - IncludePaths: [./lv_source/include] - SourcesPaths: - - libvirt/libvirt.h - -TRANSLATOR: - ConstRules: - defines: eval - Rules: - global: - - {action: accept, from: "^vir"} - post-global: - - {action: replace, from: "^vir"} - - {load: snakecase} - # Follow golint's capitalization conventions. - - {action: replace, from: "Api([A-Z]|$)", to: "API$1"} - - {action: replace, from: "Cpu([A-Z]|$)", to: "CPU$1"} - - {action: replace, from: "Dns([A-Z]|$)", to: "DNS$1"} - - {action: replace, from: "Eof([A-Z]|$)", to: "EOF$1"} - - {action: replace, from: "Id([A-Z]|$)", to: "ID$1"} - - {action: replace, from: "Ip([A-Z]|$)", to: "IP$1"} - - {action: replace, from: "Tls([A-Z]|$)", to: "TLS$1"} - - {action: replace, from: "Uuid([A-Z]|$)", to: "UUID$1"} - - {action: replace, from: "Uri([A-Z]|$)", to: "URI$1"} - - {action: replace, from: "Vcpu([A-Z]|$)", to: "VCPU$1"} - - {action: replace, from: "Xml([A-Z]|$)", to: "XML$1"} - const: - - {action: accept, from: "^VIR_"} - # Special case to prevent a collision with a type: - - {action: replace, from: "^VIR_DOMAIN_JOB_OPERATION", to: "VIR_DOMAIN_JOB_OPERATION_STR"} - - {transform: lower} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/logstash/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/logstash/_meta/Dockerfile index ef98d38e..f363b845 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/logstash/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/logstash/_meta/Dockerfile @@ -1,4 +1,5 @@ -FROM docker.elastic.co/logstash/logstash:7.3.0 +ARG LOGSTASH_VERSION +FROM docker.elastic.co/logstash/logstash:${LOGSTASH_VERSION} COPY healthcheck.sh / ENV XPACK_MONITORING_ENABLED=FALSE diff --git a/vendor/github.com/elastic/beats/metricbeat/module/logstash/logstash.go b/vendor/github.com/elastic/beats/metricbeat/module/logstash/logstash.go index 6e5bd523..5f221b02 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/logstash/logstash.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/logstash/logstash.go @@ -19,6 +19,7 @@ package logstash import ( "encoding/json" + "fmt" "net/url" "github.com/pkg/errors" @@ -158,8 +159,25 @@ func GetPipelines(m *MetricSet) ([]PipelineState, error) { return pipelines, nil } -// GetVersion returns the version of the Logstash node -func GetVersion(m *MetricSet) (*common.Version, error) { +// CheckPipelineGraphAPIsAvailable returns an error if pipeline graph APIs are not +// available in the version of the Logstash node. +func (m *MetricSet) CheckPipelineGraphAPIsAvailable() error { + logstashVersion, err := m.getVersion() + if err != nil { + return err + } + + arePipelineGraphAPIsAvailable := elastic.IsFeatureAvailable(logstashVersion, PipelineGraphAPIsAvailableVersion) + + if !arePipelineGraphAPIsAvailable { + const errorMsg = "the %v metricset with X-Pack enabled is only supported with Logstash >= %v. You are currently running Logstash %v" + return fmt.Errorf(errorMsg, m.FullyQualifiedName(), PipelineGraphAPIsAvailableVersion, logstashVersion) + } + + return nil +} + +func (m *MetricSet) getVersion() (*common.Version, error) { const rootPath = "/" content, err := fetchPath(m.HTTP, rootPath, "") if err != nil { @@ -178,12 +196,6 @@ func GetVersion(m *MetricSet) (*common.Version, error) { return response.Version, nil } -// ArePipelineGraphAPIsAvailable returns whether Logstash APIs that returns pipeline graphs -// are available in the given version of Logstash -func ArePipelineGraphAPIsAvailable(currentLogstashVersion *common.Version) bool { - return elastic.IsFeatureAvailable(currentLogstashVersion, PipelineGraphAPIsAvailableVersion) -} - func fetchPath(httpHelper *helper.HTTP, path string, query string) ([]byte, error) { currentURI := httpHelper.GetURI() defer httpHelper.SetURI(currentURI) diff --git a/vendor/github.com/elastic/beats/metricbeat/module/logstash/node/node.go b/vendor/github.com/elastic/beats/metricbeat/module/logstash/node/node.go index fb07165c..75b4a3bc 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/logstash/node/node.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/logstash/node/node.go @@ -18,8 +18,6 @@ package node import ( - "fmt" - "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/mb/parse" "github.com/elastic/beats/metricbeat/module/logstash" @@ -58,23 +56,6 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, err } - if ms.XPack { - logstashVersion, err := logstash.GetVersion(ms) - if err != nil { - return nil, err - } - - arePipelineGraphAPIsAvailable := logstash.ArePipelineGraphAPIsAvailable(logstashVersion) - if err != nil { - return nil, err - } - - if !arePipelineGraphAPIsAvailable { - const errorMsg = "The %v metricset with X-Pack enabled is only supported with Logstash >= %v. You are currently running Logstash %v" - return nil, fmt.Errorf(errorMsg, ms.FullyQualifiedName(), logstash.PipelineGraphAPIsAvailableVersion, logstashVersion) - } - } - return &MetricSet{ ms, }, nil @@ -106,3 +87,11 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { return nil } + +func (m *MetricSet) init() error { + if m.XPack { + return m.CheckPipelineGraphAPIsAvailable() + } + + return nil +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/logstash/node_stats/node_stats.go b/vendor/github.com/elastic/beats/metricbeat/module/logstash/node_stats/node_stats.go index b2b24cfb..37af1279 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/logstash/node_stats/node_stats.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/logstash/node_stats/node_stats.go @@ -18,8 +18,6 @@ package node_stats import ( - "fmt" - "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/mb/parse" "github.com/elastic/beats/metricbeat/module/logstash" @@ -59,25 +57,6 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { return nil, err } - if ms.XPack { - logstashVersion, err := logstash.GetVersion(ms) - if err != nil { - return nil, err - } - - arePipelineGraphAPIsAvailable := logstash.ArePipelineGraphAPIsAvailable(logstashVersion) - if err != nil { - return nil, err - } - - if !arePipelineGraphAPIsAvailable { - const errorMsg = "The %v metricset with X-Pack enabled is only supported with Logstash >= %v. You are currently running Logstash %v" - return nil, fmt.Errorf(errorMsg, ms.FullyQualifiedName(), logstash.PipelineGraphAPIsAvailableVersion, logstashVersion) - } - - ms.HTTP.SetURI(ms.HTTP.GetURI() + "?vertices=true") - } - return &MetricSet{ ms, }, nil @@ -87,6 +66,15 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { // It returns the event which is then forward to the output. In case of an error, a // descriptive error must be returned. func (m *MetricSet) Fetch(r mb.ReporterV2) error { + err := m.init() + if err != nil { + if m.XPack { + m.Logger().Error(err) + return nil + } + return err + } + content, err := m.HTTP.FetchContent() if err != nil { if m.XPack { @@ -107,3 +95,16 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { return nil } + +func (m *MetricSet) init() error { + if m.XPack { + err := m.CheckPipelineGraphAPIsAvailable() + if err != nil { + return err + } + + m.HTTP.SetURI(m.HTTP.GetURI() + "?vertices=true") + } + + return nil +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/memcached/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/memcached/_meta/Dockerfile index ed9b1cb7..6cefa99b 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/memcached/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/memcached/_meta/Dockerfile @@ -1,4 +1,5 @@ -FROM memcached:1.4.35-alpine +ARG MEMCACHED_VERSION +FROM memcached:${MEMCACHED_VERSION}-alpine USER root RUN apk update diff --git a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/_meta/Dockerfile index 22ccbf5d..f103b04e 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/_meta/Dockerfile @@ -1,4 +1,5 @@ -FROM mongo:3.4 +ARG MONGODB_VERSION +FROM mongo:${MONGODB_VERSION} RUN sed -i "/jessie-updates/d" /etc/apt/sources.list RUN apt-get update && apt-get install -y netcat HEALTHCHECK --interval=1s --retries=90 CMD nc -z localhost 27017 diff --git a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/fields.go b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/fields.go index 9394cc66..d58cbd01 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/fields.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/fields.go @@ -32,5 +32,5 @@ func init() { // AssetMongodb returns asset data. // This is the base64 encoded gzipped contents of ../metricbeat/module/mongodb. func AssetMongodb() string { - return "eJzsXV+T4zZyf99PgXIebFfNausuqTxsXVxln+9yTnnPjr1X95BKcSCyJcIDAjwAlFb36VP4R4IUQFISpZlVRg/JeUdq/LrRaDQa3Y236AkO71HF2ZYX6zcIKaIovEdffND/8v13X7xBqACZC1Irwtl79M0bhBD6AEqQXKKcUwq5ggJtBK+Q+xGSIHYg5OoNQrLkQmU5ZxuyfY82mEp4g5AACljCe7TF+jugFGFb+R79zxdS0i/+9w1CGwK0kO/NaG8RwxWEKPVHHWpNQPCmdv8SAWrAOlSVBb1yfwhHCEfRPEmFlWz/EhtrZLxwTCcgwhnSNIlUWmwDJPrTl4j/DDGGOFtB9EE+wWHPRTH42whU/fkeK7zGEgzpDlZ03I6l5cb/YyemGQj0/11y7GpNGDaD8w0qvCgwK8Lpm4FLcYXpSpEKVo2MAqScbU9D91HTRHtM9ApBmjbacIEoz58kIgxVJBdcQs5Z0dOnIaqcN0wtiok11RqEFpkGYyAi2AFTckJM+utRJMP1hRIrICQmABcJkY+yOINNw6oWuJe+Frweb470jzDGJmAJhH9tp6GFNmMuQnh7QRTcUoZmwFOFaFFeX4oduBNU+h8NCAJy6bWvBScaxrTg3BDz1rzHs+Sq70TkocAnyBsFxYRwtqAqLlIadpFwsHzySqWHQHkjpF6kfD9TUB7bdQSlOZYIe1hYPmlPyYPVO82E6AiTINQ1JGcpa+Ex2KOC502l9Xye1Bys6wjNY3GjzFuATV3gpBG7SFCGspbTiTJyiK4sIzvKPBkJqPjuKjIqgMI5MnKIriwjg26mjHJeVVjjvYKUrLk0YvI+ph9unrhacFcS2BGqYwPfHT4uPiI5EqgWfEcKbScZ4jsQOwJ7DQejGgtF8oZiYY9+LcIV+lgS2c5wjyyRqOJSoZyzHASDAu2JKs1P0Y7TRltkQ70ldtEpDO+2GV//lknyT1itDwpmq8yGiwqr96j/o4kjV5w6YQq2IMaJaH6vC3PmiTT62w2hcF10hBXw6QZDzKEcP9Y2VQaflLZSZ1Lg698gP/vXUnGBt1eeBSYN/axar6q4ukwDNZpsFGYHQqYCEeecJiv8GxfzjhdpGoSdRcP/3qpAthEAGSUybufPYY411YWsJTUjQSjAFtWQVmQ2Hja5m8TN88ge82sXc1MlVkjARhtSpEpzVhDGhXKxHr0bgd0a/Jnr0UYcH7UrqjDL+1Gn1pEnzLJHOHtACj8Bwohy/oSwQqVStXz/7l3Bc7lyAcxVzqt3FWYNpu8EbEAAy+Gd23Xf2eCpRt7Id//iQqnmv1bHchrbmvw2PluBJjyFX6DmQknEmZGfllvMazg+wn8swQE1fo4LfrZeBhZgCGrUMu6I6PkBnJdoh2kDeo/H8d3fMGhn2oLVhNUgXnXk3yAs0R4o1f/fIGm/usGEQuFdN87G4ilDxlZ/cP/rm5UjI0t9MO2PYM+I7Vf9iEbIxpfRKnMs05HBLLtzxmr5Hx9tyrAQmUmgm6RNiOnaGN2QthVH9CsTJmcQAT2dROvibbcCtlgNY853w+C6IbTItBW7Vw61A50NTyx3xiFj9oiQ1ZzfPbNjUeDPnLlifd+TV2iHjOV3O38bwmIA7oK3Lagsr4qMEgYZr+9XSTWjFEuVgRAjp7l74JLHhrgb9vQB7Z75q7HAFSi4Wx0tuVR37Zvaa6675U5mFZZ3rJ9ml0gH6O+Nz2hI6X6YJFJlPvx0t16c4TJ+V3E3PNYkSv8+eBN8Q+jdujUCaipBZdq9EWtyt8sw5NMG+O+d0xKwUGvAd+vteEZtWk5Wc0kiidJ3w669nLpz5bVzea/c7UusZHVoBLkXDt/EaNgMzDdz2ZssGWAKEyZtVpGALRYFYVuf52lvkjErUNNPL0IzLtMUqaDIeHONLOePsctQg9ndj5d4BxYB4o1CkrDc3s3ala49jxyk1ByKXl7YkAtew+lWbwYHKcnr8Twrx6jm6SrjmWY9Lno0ra+zGEBuGroJCKHbXDUtcG6ooO+/++8GxGH1k/nPFeMfLUIkQSHFUS1MWiNywGOs93xidu5iPJO5Rzvm4+QEXWzQLgDZWwK+cotIVGFitM2nmOeUzEghTeQlXJB2YXJWunRHhHOzCLWBqXhBNiS31Us1VgoEO5LvlMmxCbJpP3tZg9PlCLtx01bEhsaeAZkfeKymRzUivpquC80PjNaHtsxjwne5PUg37nRhSPqS5ez1oleMFpFNfjGkkfVQTQpMCdpo5mC2DVvsw2sQrv6P9VLDukv1U5eUIZztMVGr6io1Va34ccUbpnx2j00kp5S4RHJr0zRPNrUHlVgiWWvmahAbLiothi2oH7FUfzKyaqURw43sDmXl5lKs0VdkBSu0/xptBWAFQg/K0O8marmsdK5V0BXTzwSbliWsxZKTDYFiGfbclnyt2R9mdZmJ7gMPWBw4dlgi7T3Jhiq/KPYOL1KlAFlyWmj/IhTZxGpuR1tqIf+F00LaXA8Q0uzAEnYgMDU0zXJ29S96K9TG8BDy3NvOS8wKChI1Uiu8mWpMg5VvKJ66ymWOWYZZkXFRjNxwLKvFvvrOpTlqU4ck17uV+5L7U44Z4+0yt148Fyrg2coCM5u3PqXPOWcbSo7Ty2/CJzCnB+HqtHBmlGYeMpvpuNwm43NhzTHE9D7Q8rUq2CaLInmQCqpztIpBMRmhX1bcZjREFFTSI0BFI8LS03aVva0pZgh2mDY4tjse89K6Bjd3Rc7nJapOAmrq/O2ldcml9CIBVDtP2vxqpQpG9OfvdDpw+GVc14CFydfGlHoHwKeyywdTEYdUySXYRYYFsC8VqsBaEVNSbsjp0+bJxjGx5MZFNUNcqJ+KvsOC8EaG7TX0TjGUnEdzbnDAb0OjB9MUR1ODhAOZ83yWG1cjdVZHc47CQ5La3VqGXq71h6ZEcSIxDWshUjIvoWjS8UY0a6LQjMkKR2Wg8mpsyJlcoF4O6J6LYZOMC2nCp5w2kuxioewLyGqgWfRG50Kiy1LcYEIbEd1NT6AaOBVNktZytoCwrBZ8K0BOr5CFVXr5GXjhOt0aEQpQpw39idQE4OKwDKmNgDE2Z2puwyTZMkyhyGxB/KgST5LrtpapvXGSlCwb0wYsK/g+dnfQkVpzTgHHvzNQ4YzoXXuD85ToLMV446iOGq5rGpvGJR0Z7bHocbzHYq8G/GG2PWOETg2vKd+e69FgpaCqlcwUz9aQ8woyGzTCIqWxM2dyjVVeXmAeZwbxB7IzwuhJ0N9W6ZOY40y7/jP9W/+Za6xTkaw+3xOmbSbrKHHscZK3UoAC4VxwKY3777Paknz2ryDjYcvr8zIZzoxEMs3qdLeAR+sluUbCyNWZpuuk+6bhXFl9DSC7SUsfptfNZnNGIu0MjD5KZkeQx9ii5kceWI4kb0QO7pdoDRsuIJwRTQiY8r2rsNXR+IRETrErR7jrEoL2+GDOxgLnT8HKt1+86Hx3Ay3wVwu9+18vZcJm8dOhrvBIR4trwK/wJ1I1lekE4CPHDqjtyRIUaOfchBqU7+/p2bPr98GEYIhEjJuLlg3ZNgKv6VHCRJ/jm3LrJyvkNufMtOjw/z0+WZ0nTxTBNNNL5np+hB/GrcxUWta89VDVqVthNN8VsPk/mfc1Lic2coabke2UPtgs5sm5IUx7hPVhfgRv3rTcSvOHm7FLt8HF5E4wvnxdv71b+4Zu2MDWPqB9SfLSNJ8Q8I8GpLKBQ1wUJmcTU3dBNvQl4rlk3QdL07iqbx9mqgD6zPzNY7l+Ts6lnncibKTdVfAEFzrHvL1ML7L1HkaW54yVKaAAIScCz9f3h+0VmlsiIJHDk95ZawGU49OLBE4/nD/WAt5uQOXlo95bt6BtCAhovWeNQwZXT8bBsZd4iDDF0S/fftC6RirtzfanSJWCN9uyjufwzdkaCp7f2qx2rGrWobBcVlBxcfAXbi7hxgrOyu0eLN8R62l2g0BD/LJ0yN9iJjKXl5pIe8DWLPbbew42uXM47lzj1DU3uqLqhtfdJynvzNn8nHTZysIf0p001oc2UOYO9ZFOuAuoAXqRvsHDMPL0MFgKfZmdKYc3MSG4NoyrtgHgSgIWefkmJolz+gCum/wJVAafStzI62WHRhPUgthdXkL+pJ2vEszdhilLNbl4vFGmi4rJTkKyIQqv6QFRLLZ608y5KBCmlKfUqnNprF9/IwbDuKSZMdfsG+8woXhNI9hHUndd3srVsY+hi7E09QaGGubuX5wX08b32yzdjfNwXezRdSI0rf4UdUszccqamQv/jAlLDoH15IPGAj5NtWVxxCfG2ne+2boGtMb5k55hVrSXLra3d+gOn8BQr55yGMaKK9TJvUE72skW1MH1kIumhTIf671pDjNLLQR7MpoA2V0IOqg+wkFMzYAgsPNKVaxXW1C/dL/7gW34V1+fnKdI/gkrZy1ON1TxjrCzZYISm7essbUIRRh9c/NMmB1uxOpqnpqx3g/XYyfOSivgMB8wyU+SsQ0R0j4kIRWuYr70xbbB0/a22AzpsucBC0pAqq8DM+6uAyJrPMkFxbdmQo/oeKBYXc7BnrCC769kmguycd2E0RrUHoAFE4FZYbkZwT/lmoLK2KKPb310fX+7Pb21uJNYTO15pDbbwon8YW6hXVgCZE7BONeekTkQ8rAc1ri0PlZVtAz8Cuo/QdkOzr7Z8GTxhNHqxXz8tlsPUdfx8H8Ie163QUe9J9WcuHOU6ShszpAPQamuvQKUSvubWsJBDYYtQdYHTmAmzV9pDeYI28bsRB2Gu7I7oqYNurtU/ywl4LM4jNK1lEaXypD9wt6pfpbsBwqgf/IbbwTDdED4dMNB8WKO2fdA8aE1tThVTlgLUmFxsPFgpU1CfbBqnUq4mn6n4NO5DorWiEj1wgxuDcfHO4w1XXoeQj4l5XuQKslhwAtJdwp4GbxssFRxXqIKVgIuBOfDxxbO17L4ru4QfumzdaxnYQAP738o3vp1cg8adx7n6Vk84vUFaeSZs5xcfVGNdbvoUgr7sz+pBg9jILzmTaq2yA7SHtmIa6Fyooo6Ua1KfvRoCprM9J01P3/RuxYuCmGySjfhMpxEZU3LdXD9NDRb6fOtVwgjpXSI6CI4PxJpD7B+MBQf7BhU3A1eVEoyREYiYx2juqRGXYwh+mN71J+BR0DOdyAI295i7rrR2jU7PokBvGvV9Lfi8ojc+fWxG/sxajnC8ocnxvfsFhJ0IL+0cmuxOgRzgd5MliYYOxed6f/U1L+/hRxdTiP553xdbOHdXBP9yFMCxGJNFIhbyM8NNSUzj+jqIvN40mfWGy3Qx8Jo+jyVKm6yGPuQ0raWU7rG+dMNzVhna93YUxreYrz9bjATYcNKwFSVN/GFBruBsbZuePQfaIOpnAP06rJshzrSwcC2zrmOGxnUp8u5IKq7y/C1H8FXT3+Ndew9SEwJHk5yjVVpQ8kkh1X81xPi+8H1U/Bjx5/udUHiE4H5O+xIzH1GXN2HsL28PTX0M5eSrCnYYg37AJ+56ZeIi+gbcF0/sXiKTlL/5grPEh4m3cRBUJ5jmkVOU+cG/n/UBF2kX7qXA7v7w7YhoMM6EV7EUoJQciVg21AcbzhzuqC6V5odXTeOuUevBS+avENsF1ZceB7eHgt23CL9cniO7qXwKrk8tEpeDKuRR/V3l+PSRC8Fpvc8fQRaamEG+uYpnwIxirXrprdYpOn7fhfY8OKua/lHWM5Nm7sAAMKRR7pcKhSh7qLJpDj7t0hjszAdk3JorrBlD1qYBqy5+5IBcpd+41qYmmK9KF1Hk7CcNgX0L0NlCZQiCdJsdOiPnElSgG22Ywv/eOwVUIQe2yyzR3PxURT25VxTTPdJtTdcBVbNUb+sQNs9lSvIcyDQhpkcki47LqpFoZzjVHPMfOZOmi+T+TFSenEpb9/aNGNz+qI0zoobvX/DfYKe9MXie0nbDtI55XIycxA+KYFjLz2dH4emWJned66zY27m6tQlXAKus0bi7aJPX89mAvUSnUwlqk/70fOp0UWSntrFP1ps9hOjh0DFOUN/Y+TTux8Ja0azC7eQbXBDb5YcqUdEdkTf9tBUbqGCyKewQAv9jLfxheh+bZ7b1jo+7BPpKcYsp4q9PYZ8G2ZTMWZoEMm+VD2TgXCuyA5cVcFUO1zK15hmlOfDAtUlUmg12e6N85DNuQtiaK/MUSBaYDyZpz8rQ99oQpeLnwve5eK3jenbuTKpTa4vvbkeaw1aCU60P/L8KVWEYIrXiUSmCosekFaHHaYmBYj7xWfVwR2CRuTj9svMtIkakU+6rGW2gKKVgGbcAq0hxy43Gpv5jzM/XqrSm/PEd2bVZcyuypgoc3TM7TFRvjG7SdTT+m0S9R7sBtZUrrd7NxUrAbgwrqzWj95fTOqGkEdJsEMxOAo3E0RUBL5bo+eBHrxUNGNDyZi60PT0D/q8fl682YybFHOt32iscOac31stSOcRQeG97uDWHjt2+glD0nfrbTdyCcevDoSfF7d2B+vWNLWye6Dlo+cnBi3IjUi4OJLIy12PJ7I2m6PnW4WzORqfpCBO97TYSf/b7vkL569Zz0YbAsB5ae3/HzT9bx6sT+adnj9UvIBvInLX3Nc+Fhr83kZErc/w0HoYD0HtygOqQGEzil7GiR5TPfoOhW048YD2D+gX89u/uzINAbIWIO0xGwsoHrruhtoDUt1fXGG5+Zf2OyM1AGYiVpa3Fc6No2uvElYWFZIl39vTbKy6x4hmjyVyvy38VUvXMV+TORZAb+Cu3f/8UaWHay8oDBDzL/ZdNdnvCx7tJOudoJYk2oMAVAItbD8q30jcXC/PZKORxzzkTdVQbFaP/k7Qv6zzXPveSsjJxKgF4MLscqdOW1JanuLJFXJ2XSRPfpe8FtbXzVSLlZndgvrEYiUXZxP7ZUlif7+MWLCsLhRYQOlCaQWULhRVQGkJOTVyESE1chEJNXIR8TTyUtkMrMuFIhpQu1BSA2oXCmxA7Qy5tZTSYd9XQ/hqCF8N4ash/P9hCLtT0aspXIrYqymcR+nVFKbJvJrCm5vCChTOtGP4agmXIvZqCedRerWEaTKvlvDmljDW/wi9WsFXK/hqBV+t4N1awTcxcvE3B545qZGwZ01m7Jqc+ecSlMCbDckf2uTGB9Mehux8KoTNDo4mrAXbTqNePl/mjtd1zJnF1ZX7mA5zrdrnEPoTMDPLn9dyRbEClh8Xs5+t9D+1LXVa0u1VapsBaB4135c89oSMyXQdvoDrkgh8OyElSH7qOhKAC89v+oG4C+bHzE3OqzVhUDjuD8Nr5THV0fiu9sj+keb0Ml2CbCbXCamLFboUTlcvPfH2+EuWsAN4KxGPZIydLWPXou4lS7mFeCs5+wHPkW/SLiYekT4/KarfCba1hIF6aNN9qGfneHf+gQRxrbKl4/7zerD+UyZu5+kSva3xjtL0Br2XA57WJfPEyI1Y88/NJxnqg4/SnWTINOa/Rk1UjCM72M0my3bDvhFvdrCb8eZeGLoRc8fvGV2XO2dBb8Rda6+JlE1XxtaV8izAYdKu+y5sR8V7i1v2frPvz8rKd0KaZfCjdGM282UZ/IDLSdu/xCq7qe0PmJuzDSw4hTfdBgI25+wIC7J52x0h4HPW5rAgo7fdHAJGZ+0TUcIpn23mPmFLQJfaH0zjA9tSMyzQd69XmWplX9HhMJ/Xv2AdS6m/fJL+RFQJAv37vyEu0L/+/gEVUIN98IczVxChsNiCQljkJVGQq0aAKUJoiw6ilIOHuxzjOa9qQiee3uziJZIUwNSqWl9JN7sQ4S/ffrClrbDFtp77qw/fff0QFL7FKrqjhCf52hGhGkyvwlbHVZQdvvGjd+rZsZXeCSd5qnBdQ3GLmbIjOfhRJmMzFS/0+s7Vo/i2Qo0E6Si/deNsCAX5YMOS7XvalDwBNW3D1wn7pP+Sql/uNxDgG3TgjQgCBvHrHP/3yUnI9kSVmevQ/jJmxDamaG1BzPraj5m6DRe+wTxh23g9mwm9ZWucP0lbNBz39tecU8AndhH7KBpA+9K29hZgqsSGz+9j36Sr3X5cYawGL0CJQxK6e1MuA7YlDGL9xCz2s97w+BZJ5YyuLcyzZWyuMCl43KNrzWPQIIdmfN/cEwFFpsg20XnpjM3zV4UVkYrkMthC/67H+aiHSeKzn8mWP5z50nIlMJM42vJonIEZTCQZCQcN9hJidr+tuODxcbMAVrxJvVc5cb86uy71r2EJtVcaG/gOmJviaIh7rHnQrdCT/AmU7FqEzMHtm22Yn94Mu90terCn3o/GxXPphrl3Oks1DOpn1QyL/TTFMKifUy9C0COHPJxHD43XsXtmNFMabdoFwY7kwebZfuNc41fhT6RqqmR6AZoj7qk0g5n8688Hi8exbd6tG8Wv3ZwXA/7XoI1V2I6ov18l5yuICxGhDi+SrfaBCAPRcufK5mfwpVVYmnY1N1vZZki7ss1L2CchNRvFjaH696Gm1/cQrbENiQDC9fC6USfxeqznZNCeazztRo9L2ypo255nO6/4XLtpLNNLWaD2lt+8kRou1Tlu2EthodMqa2X8KmjXbLQVCervZNmGUMhe1NT4/Uwjm7GdbWgjy/OBnyxsM95kF59+UtDNwM1rmtSuyBxHj6PXwaZHmw/twPIbQjuwPAotGojo3rjOjDKkumdfHs33T2j3QoSKu2m2/Sa56X1p+7M65+JYcLzf33PDRZD8aKI1vjPOhw/f/rz73YWRj/SavDT45zvY+jdXXQ5WuvdNG9ksj1rre2YMWPPcvVuurVCTDJrDz3H3+yU4jF1bha3w0VeV/NoyH9xt+Ldnpem2GyUsax9/IGzrXjL2+vv1QJm+a3sl2WaYR1FM+8FryWmjXM/nB32aPe4DjR6dOjyaM9kj3oEJQVbyMdVQ1fVJRmtQeqZ9c2ib6DunO7QZ4Xrz4wawnZ2sXL0kvQxRDcLtFbADNvIyq3nK+npQ20h50EG106aHTpPMFeZgd4uSVZw/aTZzXtUU1JjjiqXKNoQRvcCSDCbyFma1s1bRZ7o9smLIz4Rl7wL/VzLo3QBvBVBzDx0E903IwmaZmvcl0H+l8KDei4rWutc1YGGz4Y/y2yMm/ohi3+QbMKZzd4cZAdPbx9Ft/nQgvKquc308aFsYBrwTjwYH18LJPEv/XTPfDr1psyd2mKa13f3sNjeRPozQv+0KuT7mOErWLJgzObYXYYqbUndzlJAvgHdzrPYc2emPEjVkDOihVpwrD213hH0U4UpCCEZAxmz0gmZxnR9PviFKZoRldiojLceXQN6+KmXT5s2g1ibx3AT6CrQv9XEPB318TY5HCTSSPGRpWjKE9X9FWGEyfBBu3WbGC0AN044JRiXg3QGlw1SUu5a8uXYItTXdNMIkqRQEbxmXZCTQDVjQQ3Yze2e8XM+lq9bSnqzX2DVsuGtlL/MSioYmYl1n6vv4G60XhKZ+OHov2ByHur1xkNPUXoRPGLsdFoQ3EtUlljYwG+xuLiibNABRimkhzYuKFQnfDy140D32A01OsUuv8vrjjlKuUb7VrVhFaff5mz8hGEa11fGOe+85F0DcrB9L0n7XPjg2EaIVUGeUb7N1s9mAeBY5WedeI8HCeffuKnTCxPrPrxWmFIR/Ya21Tu5o4/UsdPumA0p6y21zbJ5NKjhXDab00J4pBzJBfybJm1SE5EEqMMXLUFjXt4AdycGuqA3WbmuOGcKbDeTqDAGFPsmzySgQTeBu4E0w90e5OuHnzyYIeiVRCahwndWC7LCCbEdg/4ySMmBq+xhRfXjL2Vu70FyqoM3zSpLV4OVq8dXmdvJnFIt2QDyKOVD7vtzLAD7m6iUJJ1zAN/8XAAD//46oVvs=" + return "eJzsXV+TI7dxf79PgVIeJFXt8cpOKg9Xjqoky46V0lmKdC4/pFKz4EyThBYDjAEMefSnT6EBzGCGmD/8u3vM8iGxbsnGrxuNRqPR3XhLnmD/npRSrGWxfEOIYYbDe/LFB/sv33/3xRtCCtC5YpVhUrwn37whhJAPYBTLNckl55AbKMhKyZL4HxENagtKL94QojdSmSyXYsXW78mKcg1vCFHAgWp4T9bUfgeMYWKt35P/+UJr/sX/viFkxYAX+j2O9pYIWkKM0n7MvrIElKwr/y8JoAjWoyod6IX/QzxCPIrlSRtqdPOX1Fgj48VjegExKYilybSxYushsZ+uRMKnjzHG2QiiC/IJ9jupit7fRqDaz/fU0CXVgKRbWMlxW5YuN/4fWzHNQGD/7yXHLpdMUBxcrkgRREFFEU/fDFxGGsoXhpWwqHUSIJdifRy6j5Ym2VFmVwixtMlKKsJl/qQJE6RkuZIacimKjj71UeWyFuaimERdLkFZkVkwCJHAFoTRE2KyX08i6a8vMrACYmIKaDEg8lEWZ7CJrFqBB+lbwdvx5kj/AGNqAi6B8K/NNDTQZsxFDG+nmIFbyhAHPFaIDuX1pdiCO0Kl/1GDYqAvvfat4FQthBWcH2Lemg94LrnqWxEFKPAJ8tpAMSGcNZhSqiENO0s4VD8FpbJDkLxW2i5SuZspqIDtOoKyHGtCAyyqn6ynFMDanWZCdExoUOYaknOUrfAE7Egh87q0ej5Pah7WdYQWsPhR5i3AuirooBE7S1BI2crpSBl5RFeWkRtlnowUlHJ7FRkVwOEUGXlEV5YRopspo1yWJbV4ryAlZy5RTMHHDMPNE1cD7koCO0B1aODbw8fZRyRPglRKbllh7aQgcgtqy2Bn4VBSUWVYXnOq3NGvQbggHzdMNzPcIcs0KaU2JJciByWgIDtmNvhTspW8thYZqTfEzjqF0e06k8vfMs3+CYvl3sBslVlJVVLznnR/NHHkSlNnwsAa1DgRy+91Yc48kSZ/u2IcrouOiQI+3WCIOZTTx9q6zOCTsVbqRApy+RvkJ/9aG6no+sqzIDTSz8rlokyryzRQ1GRUmC0oPRSIOOU0WdLfpJp3vBimwcRJNMLvnQpkKwWQcabTdv4U5kRdnsnaoGYMEIqwJTWkEZmLh03uJmnzPLLH/NrG3MyGGqJgZQ0pMRs8Kyh0oXysx+5G4LaGcOZ6dBHHR+uKGirybtSpceSZcOwxKR6IoU9AKOFSPhFqyMaYSr9/966QuV74AOYil+W7koqa8ncKVqBA5PDO77rvXPDUIq/1u3/xoVT8r8WhnMa2prCNz1agCU/hF6ikMppIgfKzckt5DYdH+I8b8EDRz/HBz8bLoAqQoEWt046InR+g+YZsKa/B7vE0vfsjg26mHVhL2PTiVQf+DaGa7IBz+/8RSfPVFWUciuC6STEWT+kztviD/1/fLDwZvbEH0+4I7ozYfDWMiEJGX8aqzKFMRwZz7M4Zq+F/fLQpw8J0poGvBm1CStfG6Ma0nTiSX5kwOb0I6PEkGhdvvVawpqYfc74bBpc140Vmrdi9cmgd6Kx/YrkzDoVwR4SskvLumR2LAn/mzBXL+568wjpkIr/b+VsxkQJwF7ytwWR5WWScCchkdb9KahnlVJsMlBo5zd0DlzI1xN2wZw9o98xfRRUtwcDd6uhGanPXvqm75rpb7nRWUn3H+om7xHCA/t74TIaU7odJpk0Wwk9368Uhl+m7irvhsWJJ+vfBm5Irxu/WrVFQcQ0ms+6NWrK7XYYxny7Af++cboAqswR6t95OYNSl5WSV1CyRKH037LrLqTtXXjeX98rdbkONLve1YvfC4ZsUDZeB+WYue5MlA8JQJrTLKlKwpqpgYh3yPN1NMhUFqbvpRWTGZZphJRSZrK+R5fwxdRmKmP39+IZuwSEgsjZEM5G7u1m30q3nkYPWlkPVyQvrcyErON7qzeBgSPJ2vMDKIap5uipkZllPi55M6+ssBoifhnYCYuguV80KXCIV8v13/12D2i9+wv9cCPnRISQaDDGSVArTGokHnmK94xOLUxfjicw9ujEfJyfobIN2BsjOEgiVW0yTkjLUtpBinnM2I4V0IC/hjLQLzFlp0x0JzXERWgNTyoKtWO6qlypqDChxIN8pk+MSZIf97MsanDZH2I87bEVcaOwZkIWBx2p6TK3Sq+m60MLAZLlvyjwmfJfbg/TjTheGDF+ynLxe7IqxInLJL0iaOA8VU2A2YI1mDrhtuGIfWYHy9X+ikxrWXqofu6SQcLajzCzKq9RUNeKnpayFCdk9LpGcc+YTyZ1Nszy51B6yoZroyjJXgVpJVVoxrMH8SLX5E8qqkUYKN3E7lJObT7EmX7EFLMjua7JWQA0oO6ggv5uo5XLSuVZBV0o/B9h0LFErlpytGBSXYc9vydea/X5WF050F3jEYs+xo5pY70nX3IRFsfN4idko0BvJC+tfxCKbWM3NaJdayH+RvNAu1wOUxh1YwxYU5UgTl7Ovf7FboTWG+5jnzna+oaLgoEmtrcLjVFMerXykeOwq1zkVGRVFJlUxcsNxWS0O1Xc+zdGaOqKl3a38l/yfciqEbJa58+KlMhHPThZUuLz1KX3OpVhxdphefhM+QXg9iFengzOjNHOfuUzHy20yIRcWjyHY+8DK16lgkyxK9F4bKE/RKgFFiNDf1ETimIQZKHXAQYpaxQWozVp7W3EqCGwpr2lqjzzkqHEQbspT65aczlFStRRU3Pvel9Yrn95LFHDrSFlTbBUsGjGcxYdTg+Mv06oCqjB3m3IenIGQ1q4fsDqOmI3U4BYcVSC+NKQEZ1GwvBzJ2ZPn0YZyYPmNi2qGuEg3LX1LFZO1jltt2F2jL7mA5tRAQdiSRg+pQxxNDRIPhGf7LEe3Y+jcTuYci/skret1GXq51R8+JIojiVlYFyKl8w0U9XDskcyaKDJjsuJRBZi8HBtyJhekkw+6k6rfMONMmvAp57Vm21RY+wyyFmiWvN05k+hlKa4o47VK3n0fQTVyMOpBWpezBUxklZJrBXp6hVxYpS8/Ay9cpxsjwgGqYUN/JDUFtNhfhtRKwRibMzW3FpqtBeVQZK44flSJJ8m1W8vU3jhJSm9qbAmWFXKXukdoSS2l5EDT3+mpcMbsrr2i+ZDoHMV0E6mWGq0qnprGSzoy1mOx4wSPxV0ThINtc96InRpZcbk+1aOhxkBZGZ0ZmS0hlyVkLoBE1ZDGzpzJJTX55gzzODOg35MdCqMjwXBzZU9lnjPr+s/0b8NnrrEeOt50+Z4wbTNZJwPHHi95JwUoCM2V1Brd/5DhNshn9zoyHcK8Pi+Toc1EVBNXp78RPFgvg2skjmKdaLqOunvqz5XT1wiyn7ThI/WyXq1OSKqdgTFEzNwI+hBb0vzovciJlrXKwf+SLGElFcQzYgmBMKGPFXU6mp6QxCl24Qm3HUPIju7xbKxo/hStfPfFs853N9CCcM3QuQsOUmZiFj8t6pKOdLe4BvySfmJlXWJXgBBF9kBdf5aoWDuXGGowoddnYM+t3wcMwTBNhMRLlxVb14ou+UHyRJfjm3IbJivmNpcC23WE/x6frNaTZ4ZRntklcz0/IgzjV+ZQita89VBWQzfEZL4r4HKBsuBrnE9s5Aw3I/Np+GBzMU/OD4GtEpb7+RG8edNyK83vb8Y+9YYWkzvB+PL1vfdu7Rv6YSNb+0B2G5ZvsBGFgn/UoI0LHNKiwPxNyv1lWd+XSOeVtR+qsYlV1z7MVAHymfmbh3L9nJxLO+9MuUi7r+aJLncOeXuZXmTjPYwszxkrU0EBSk8Enq/vD7vrNL9EQBOPZ3hnrRRwSY8vGDj+cP5YKXi7ApNvHu3eugZrQ0BB4z1bHDq6ekIHx13oESaMJL98+8HqGiutN9udIrNRsl5vqnQ+35ytoZD5rc1qy6plHQrHZQmlVPtw4eaTb5zgnNzuwfIdsD7MbhRoSF+Z9vm7mInM9bkm0h2wLYvdVp+9Te4UjlvXeKgojVxRdeNL76OUd+Zsfk667GQRDuleGst9Eyjzh/pEV9wLqAF5kb7BQz/y9NBbCl2ZnSiHNykh+JaMi6YZ4EIDVfnmTUoSp/QEXNb5E5gMPm1ora+XKZpMVotid/kG8ifrfG0A7zawRBXz8mRtsKMKZioRXTNDl3xPOFVru2nmUhWEci6H1Kp1aZxffyMG47gkzphv/E23lHG65AnsI2m8Pm/l6tjH0KVYmnoPw/Tz+M/Oi2ni+03G7sp7uD726LsSYts/w/3SHDhlzcyLf/a0JY/D+fNRq4GQuNowOuIZU+tBXxX/4RoHsqT5k51tUTQXMK7nd+waH8FWp86yH9JKK9fRPUNb2oOtqaOrIh9ZiyU/1pMTDzaXWhTulDQBsr0c9FBDtINhLYFisA2qVSwXazC/tL/7QazkV18fnb/I/gkLbzmON1rpTrGzZUIGNnJdUWcdijgS5+eZCTfciAW2PNVjPSGux06alUbAcW7gID+DjK2Y0u6BCW1omfKrz7YNgXawyzikz6oHqjgDbb6OTLq/Gkis8UEuOL01E3ZEzwOn5nwOdkwUcncl01ywle8yTJZgdgAimggqCsfNCP4pNxVMJi76KNdH3w+43d8bizuJBWvSEzXbDk7iD3ML8OLSIDwR09x6SXg4lHGZLLq3IW5VNAz8CuY/wbjOzqEJ8WRRBWr1xfz9posPM9fx9n+Ie2E3AUi7J1WS+TMVdhrG8+RDVMLrrgO1sb6nlXBUm+FKk+3hEwSm/xurwZJQ17CdmX1/V/bH1WGD7i/YP0sJhIwOVLqG0uhS6bNfuPvVz5L9SAHsT36TtRKU9wgfbzg4vZhj9j1wum9MLR0qM6wUK6nau9iwsSah2ju1Hkq+mn6/4NOpDorViEQlwwxukePDHcaZLjsPMZ+ayx1oM8hhxAsb7iDwMnhZUW3SvCQVbAO0UFL2H2E4XcvSu7pH+GXI3HGeBQLu3wVxug7r5B407jTOh2fxgNcXpJEnzvLg6ktqrN9FL6WwP4eTavRgBqFLWQ/VGblBmiMb861VjlRRL6rFRh48pkIms35nzc9f7K5Fi0JhhukqXoaTqJxpuQ6un/pma/h8GxQCpTQcJT0Lzo9MuwNsGIykBzsElXaDLyolHSNjibEOUZ0TTVNjiP7YHPVn4FGQyy0oJta3mLt2tGbNjk9iBO9a0cdGXAGRP78+tmM/Ji1HXArxJORO3EKCHuSXTm4NVo9gLtCbyRKDsXPRYV+ouvr9LeTo8xvZP+frYgPv5poYRp4SIFVLZkDdQn5+qCmZBURXF1nAM3xmvdECfSxQ0+epVHGTxdiFNGxrJedLmj/d0Iy1ttaPPaXhDcbb7wYzEdZiA5SbzU18od5ugNbWD0/+g6wo13OAXl2WzVAHOhjZ1jnXcSODhtQ5H0T1dxmhDiT66vGvtI69E0k5o/1JrqjZuFAyy2GR/vWE+H7wvRXC2OknfX2Q+Ehg4T47EXOfEVcPIewg70CN/Cy1ZksOrnDDPcyHt/6aSJV8G67tM5ZO1xnUv7nCc4T7CThpEFzmlGeJ09Spgf8fLUEf6df+RcH2/rBpFOixToQXqdagjF4oWNecphvRHC+o9vVmT9ePg/folZJFnbeI3cJKCy/A21ElDlunnw/P0z0XXqkvD63UZ8Oq9UEt3vm4LNFzgdk9zx6BLrUwI30LlI+BmMTadtm7WKTp+2532Pjirm0FyEQusf1dBIDQxONdPi2KcX/RhOnO4Y3S1CxMx6Q8mits2b3WphFr/r6kh9yn3/jWpli4l6TraTKR87qA7mWo3gDnRIPGjY78UQrNCnCNd1wRoEy9DkrIY5Nx9ogXH0XhXtTFwrpPprnhKqipD/poRdoeqFxBnj2B1gJzSNpMuaQWxXJOU82pCJk7w3xh5sdIGca5vH3rUo7x9MV5mhU/eveG+wg96Yol9Jh2naVzLvVkFiF8MoqmXoA6PQ7NqcGeeL7jY45zdewS3gCtslrT9UWfxJ7NBOkkOmFVakj7sfNp0SWSnprFP1p49pPg+0jFpSB/E+zTux+ZqEdzDNeQrWjNb9Yi0I5I3IihHSJWcZGC6ae4WIv8TNfpheh/jc9wWx3v948MFFOW06TeJCOhPTNWjyENpsWXpmMyCM0N24KvMJhqk8vlkvKMy7xfrHqJdFpLtn37PGZz7oLo2ys8CiSLjSdz9mdl66MmtHn5uZJtXn7TsL6ZK0xt8v3q8XqsMWgb8KL9UeZPQwUJWMjONMGKLL4nVh22lGMKkAyLz6mDPwSNyMfvlxm2jBqRz3CJy2wBJasCcdyCLCGnPk+a4vynmR8vW+nM+cB3ZtVozK7QmCh59MztKDOhYTsm6ln9xkS9B7eB1aXv+d5OxUIBLdCVtfrR+Qumbih9kATbF4OncDNBJEUQOjcGHvg+SMUy1pcM1ogOT3+v/+vnxZvLuBlirvEb0Qpn3vm91YL0HhEUweuObu2pZ6ebMKRDF99mI9dw+BpB/Hlxa7e3brHBldsDHR8dPzFqTY4ikepAIi93PR7J2myOnm8VzuZofJKiON3TxU7637bPYnh/zXk21hAAzTfO/v/B0v/mwflkwen5QykL+CYhd8t9FWKh0e9dRNT5DA+Nh/EQ1a48kBIMxVHsMh7oN9Wh71G45hMPZPdAfsHf/t2XaSjQlQLtjtlUQfHQdjq0HpBp/+KLzPFfmu+M1ADgRCwcbwuao6PrrhIWDhXRG7lzp9lUdQ+KZkc18b8twlVL20nfkjkUQGfg9hmA+aPqANddUCAQ/Bf33pru9gtPdpUNTlBDkuxAAdkAL1xvqtBgHK+XZ7JR60Me8rqsOcXVY78T9TJrPdeutxJzMjFqAbTAXe7YaRuUVqB4dLWcWxeDJ79zXhHr6uZQu5WZnYO6xFIlFycT++WSxP5+HrFoWZ0psIjSmdKKKJ0pqojSJeRU64sIqdYXkVCtLyKeWp8rm551OVNEPWpnSqpH7UyB9aidILeG0nDY99UQvhrCV0P4agj/fxjC9lT0agovRezVFM6j9GoKh8m8msKbm8ISDM2sY/hqCS9F7NUSzqP0agmHybxawptbwlT/I/JqBV+t4KsVfLWCd2sF36TIpd8feOakRiaeNZmxbXIWnk4wiq5WLH9okhsfsD0M24ZUCJcdnExYi7ad2rx8vvCO13fMmcXVlXua9nOtmqcRuhMwM8tfVnrBqQGRHxazn6z0PzUtdRrSzVVqkwGIj53vNjL1nAxmuvZfw/VJBKGdkFEsP3YdKaBF4Hf4sbgz5gfnJpflkgkoPPf7/rXymOpYfFdrB3qgOZ1MlyibyXdCamOFPoXT10tPvEn+kiXsAd5KxCMZYyfL2Leoe8lSbiDeSs5hwFPkO2gXBx6UPj0pqtsJtrGEkXpY072vZud4t/6BBnW7x+jtYN1nTfzO0yZ6O+OdpBkMeicHfFiX8LmRG7EWnp4fZKgLPkl3kiFs0n+NmqgUR26wm02W64l9I97cYDfjzb82dCPmDt82ui533oLeiLvGXjOt67aMrS3luQCHg3Y9dGE7KN67uGXvNvv+rKx8K6RZBj9JN2UzX5bBj7ictP2XWGU3tf0Rc3O2gQtO4U23gYjNOTvCBdm87Y4Q8Tlrc7ggo7fdHCJGZ+0TScJDPtvMfcKVgF5qf8DGB66lZlyg71+ywmrlUNHhMZ/Wv2CZSqk/f5L+xMwGFPn3fyNSkX/9/QMpoAL3+I8UviDCULUGQ6jKN8xAbmoFWITQFB0kKUePeHnGc1lWjE88w9nGSzQrQJhFubySbrYhwl++/eBKW2FNXT33Vx+++/ohKnxLVXQnCU/ytWXK1JRfha2WqyQ7chVGb9WzZWt4J5zkqaRVBcUtZsqN5OEnmUzNVLrQ6ztfjxLaCtUatKf81o+zYhz0gwtLNm9rc/YEHNuGLwfsk/3LUP1yt4GAXJG9rFUUMEhf54S/T05CtmNmk/kO7S9jRlxjisYWpKyv++DUraQKDeaZWKfr2TD0li1p/qRd0XDa219KyYEe2UXso6qB7DautbcCrBLrP8VPQ5OuZvvxhbEWvAKj9oPQ/ftyGYg1E5DqJ+awn/SGx7dEG290XWGeK2PzhUnR4x5tax5EQzya8X1zxxQUmWHrgc5LJ2yevxpqmDYs19EW+nc7zkc7zCA+95ls+SNFKC03igpNky2PxhmYwcQgI/Gg0V7CcPdbqzMeIscFsJD10NuVE/ers+tS/xqXUAelcYHviLkpjvq4x5oH3Qo9y5/A6LZFyBzcodkG/vRm2N1u0YE99ZY0LZ5LN/De6STVQNTPqhkO+3GKgaifUy9i0COHPJonD43XsXs4GpZGY7sg2LI82jybb5xq/Er6iZV1OZheQOaIeyrNYCb/9vPB4fFs47t1o/itm/NiwP8atbGK2xF196vB+YriQkyZ/Ytkq3kgAiE67nzZ/Ay+rAprbFdzs5WNQ7qVja9iH4UUN4obQw3vQ02v7z5atA0DAYTr4fWjTuINWE/JoD3VeLqNnm5cq6B1c55tveJT7SZappeyQN0tP76RGi/VOW7YS2Gh1SpnZcIqaNZsshUJ6e5k2YpxyF7U1IT9zCKbsZ2teK03pwM/Wtg43mQXn25S0M3AzWua1KzInCaPo9fBZkebD20v8htC24s8CS0ZiGjfuM5QGYa6Z58fzQ9PaHdChEb6aXb9JiX2vnT9Wb1zcSg42e3vuZIqSn7EaE3ojPPhw7c/b393ZuRjeE2eG/wLHWzDm6s+B2u4900T2dwctNYPzCBYfPreL9dGqIMM4uHnsPv9JThMXVvFrfDJV6X+2jEf3W2Et2c1dttNEtZViD8wsfYvGQf9/bqnTN81vZJcM8yDKKb70KWWvDa+5/ODPc0e9oEmj14dHvFM9ki3gCHIUj8ONVT1fZLJEoyd6dAc2iX6zukOjSNcb378AK6zk5NrkGSQIalA+b0CtiBGXmbFp6yvB7WJlEcdVFttemg1Ca8we7tbkqyR8smymcuy4mDGHFeqTbZigtkFNsjgQN7CrHbWJvlMd0BW9PmZsOxt4P9KBr0d4K0CjvfQUXAfQxYuyxTflyD/NYSHdF5UdNa9qoAqlw1/kN+eMPEHFLsmH8Fg5+4WMwFht4+D2/zpQHhZXuf6uNe2MA54DzwaHF0LD+ZZhu/ifHv02GZPbSkf1nb/s9vcRIYwQve2K+b6kOMkWVwwJ3LsLsKMxFJ3PEroF8A7HqsDR276k0SRDILua8Wp8rB2R7lHEa4khGgEgmajEzRL6/x48g0zOmMic1OZaDl+CeTNq1IubR4HdTZJ5hjoK8huY497NOrjizkeG+CJ5CFH05FhovsrJgrM8CG0cZuFLIDUwjomlGyAbvdkOEzFpW/Jm1uH0FrTVa0wSaVgdC2kZiOBbqCK77Ob2Tv0cgOXvlrLerJBY5ewkr6Vvc43UNR8INZ1or6Pv9F6Rmjqh4P3gvE41O6NvZym5iJ8wthtqWKy1qTaUO0Cs9Hu5oOygwYgSXFYSPOiYsWA70cueNA99AMxp9inVwX98Ucp3yjf6VaqorT9/C2cEJBRa3WC4955zgWIxPXjSLrvugfHJkK0CqqMy3W2rFcrUM8iJ+fcWyRUee/eX4VOmNjw+bWknIMKL6w11skfbYKexW7fdEDJbrlNjs2zSYXmpqac75szZU8m5M9s8CaVEL3XBrB4GQrn+hawZTm4FbWi1m3NqSB0tYLcnCCg2Cd5NhlFooncDbqK5v4gVyf+/BmDoFcSlYKSVlml2JYayLYMds8oKQRTuceIqv1bKd66heZTBV2e1yBZC14vLr7a/E7+jGKxDkhAMQdq15d7GcDHXL1BwgMu4Jv/CwAA//+2umBX" } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/metrics/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/metrics/_meta/fields.yml index a3b89c0c..a7a16fdd 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/metrics/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/metrics/_meta/fields.yml @@ -313,11 +313,11 @@ description: > Reports data from the query execution system. fields: - - name: scanned_indexes + - name: scanned_indexes.count type: long description: > The total number of index items scanned during queries and query-plan evaluation. - - name: scanned_documents + - name: scanned_documents.count type: long description: > The total number of documents scanned during queries and query-plan evaluation. @@ -522,11 +522,11 @@ description: > Reports on the operation of the resource use of the ttl index process. fields: - - name: deleted_documents + - name: deleted_documents.count type: long description: > The total number of documents deleted from collections with a ttl index. - - name: passes + - name: passes.count type: long description: > The number of times the background process removes documents from collections with a ttl index. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/metrics/data.go b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/metrics/data.go index 8ec730da..920cf6f0 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/metrics/data.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/metrics/data.go @@ -22,7 +22,7 @@ import ( c "github.com/elastic/beats/libbeat/common/schema/mapstriface" ) -var schema = s.Schema{ +var schemaMetrics = s.Schema{ "commands": c.Dict("metrics.commands", s.Schema{ "is_self": c.Dict("_isSelf", commandSchema), "aggregate": c.Dict("aggregate", commandSchema), diff --git a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/metrics/metrics.go b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/metrics/metrics.go index 4b678696..5681b160 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/mongodb/metrics/metrics.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/mongodb/metrics/metrics.go @@ -19,11 +19,11 @@ package metrics import ( "github.com/pkg/errors" + "gopkg.in/mgo.v2/bson" + "github.com/elastic/beats/libbeat/common/schema" "github.com/elastic/beats/metricbeat/mb" "github.com/elastic/beats/metricbeat/module/mongodb" - - "gopkg.in/mgo.v2/bson" ) func init() { @@ -68,7 +68,7 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { return errors.Wrap(err, "failed to retrieve serverStatus") } - data, err := schema.Apply(result) + data, err := schemaMetrics.Apply(result, schema.FailOnRequired) if err != nil { return errors.Wrap(err, "failed to apply schema") } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/mysql/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/mysql/_meta/Dockerfile index 443e5427..2051c726 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/mysql/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/mysql/_meta/Dockerfile @@ -1,5 +1,5 @@ -ARG MYSQL_IMAGE=mysql:5.7.12 -FROM $MYSQL_IMAGE +ARG MYSQL_IMAGE +FROM ${MYSQL_IMAGE} ENV MYSQL_ROOT_PASSWORD test diff --git a/vendor/github.com/elastic/beats/metricbeat/module/nats/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/nats/_meta/Dockerfile.1.3 similarity index 100% rename from vendor/github.com/elastic/beats/metricbeat/module/nats/_meta/Dockerfile rename to vendor/github.com/elastic/beats/metricbeat/module/nats/_meta/Dockerfile.1.3 diff --git a/vendor/github.com/elastic/beats/metricbeat/module/nats/_meta/Dockerfile.2.0.X b/vendor/github.com/elastic/beats/metricbeat/module/nats/_meta/Dockerfile.2.0.X new file mode 100644 index 00000000..84c57189 --- /dev/null +++ b/vendor/github.com/elastic/beats/metricbeat/module/nats/_meta/Dockerfile.2.0.X @@ -0,0 +1,14 @@ +ARG NATS_VERSION=2.0.4 +FROM nats:$NATS_VERSION + +# create an enhanced container with nc command available since nats is based +# on scratch image making healthcheck impossible +FROM alpine:latest +COPY --from=0 /nats-server /nats-server +COPY --from=0 nats-server.conf nats-server.conf +# Expose client, management, and cluster ports +EXPOSE 4222 8222 6222 +HEALTHCHECK --interval=1s --retries=10 CMD nc -w 1 0.0.0.0 8222 0 { t.Fatalf("Expected 0 error, had %d. %v\n", len(errs), errs) } @@ -48,8 +48,8 @@ func TestFetch(t *testing.T) { func TestData(t *testing.T) { service := compose.EnsureUp(t, "nginx") - f := mbtest.NewReportingMetricSetV2(t, getConfig(service.Host())) - if err := mbtest.WriteEventsReporterV2(f, t, ""); err != nil { + f := mbtest.NewReportingMetricSetV2Error(t, getConfig(service.Host())) + if err := mbtest.WriteEventsReporterV2Error(f, t, ""); err != nil { t.Fatal("write", err) } } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/php_fpm/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/php_fpm/_meta/Dockerfile index 363ac407..c06453ad 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/php_fpm/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/php_fpm/_meta/Dockerfile @@ -1,4 +1,5 @@ -FROM tsouza/nginx-php-fpm:php-7.1 +ARG PHPFPM_VERSION +FROM tsouza/nginx-php-fpm:php-${PHPFPM_VERSION} RUN echo "pm.status_path = /status" >> /usr/local/etc/php-fpm.d/www.conf ADD ./php-fpm.conf /etc/nginx/sites-enabled diff --git a/vendor/github.com/elastic/beats/metricbeat/module/postgresql/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/postgresql/_meta/Dockerfile index 855cac79..addb37dd 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/postgresql/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/postgresql/_meta/Dockerfile @@ -1,3 +1,4 @@ -FROM postgres:9.5.3 +ARG POSTGRESQL_VERSION +FROM postgres:${POSTGRESQL_VERSION} COPY docker-entrypoint-initdb.d /docker-entrypoint-initdb.d -HEALTHCHECK --interval=10s --retries=6 CMD psql -h localhost -U postgres -l \ No newline at end of file +HEALTHCHECK --interval=10s --retries=6 CMD psql -h localhost -U postgres -l diff --git a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/_meta/Dockerfile index 55b29cbb..11a2228a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/prometheus/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/prometheus/_meta/Dockerfile @@ -1,3 +1,4 @@ -FROM prom/prometheus:v2.6.0 +ARG PROMETHEUS_VERSION +FROM prom/prometheus:v${PROMETHEUS_VERSION} HEALTHCHECK --interval=1s --retries=90 CMD nc -w 1 localhost 9090 0 { eventList[labelsHash]["labels"] = promEvent.labels @@ -97,8 +107,13 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) { // Converts hash list to slice for _, e := range eventList { - reporter.Event(mb.Event{ + isOpen := reporter.Event(mb.Event{ RootFields: common.MapStr{"prometheus": e}, }) + if !isOpen { + break + } } + + return nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/_meta/Dockerfile index 10c10faa..22576e1d 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/rabbitmq/_meta/Dockerfile @@ -1,4 +1,5 @@ -FROM rabbitmq:3.7.4-management +ARG RABBITMQ_VERSION +FROM rabbitmq:${RABBITMQ_VERSION}-management RUN apt-get update && apt-get install -y netcat && apt-get clean HEALTHCHECK --interval=1s --retries=90 CMD nc -w 1 -v 127.0.0.1 15672 - The average size (in sectors) of the requests that were issued to the device. + The average size (in bytes) of the requests that were issued to the device. - name: iostat.queue.avg_size type: float diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/diskio/diskstat_windows_helper.go b/vendor/github.com/elastic/beats/metricbeat/module/system/diskio/diskstat_windows_helper.go index 93433655..158045c7 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/diskio/diskstat_windows_helper.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/diskio/diskstat_windows_helper.go @@ -134,6 +134,16 @@ func ioCounter(path string, diskPerformance *diskPerformance) error { // enablePerformanceCounters will enable performance counters by adding the EnableCounterForIoctl registry key func enablePerformanceCounters() error { key, err := registry.OpenKey(registry.LOCAL_MACHINE, "SYSTEM\\CurrentControlSet\\Services\\partmgr", registry.READ|registry.WRITE) + // closing handler for the registry key. If the key is not one of the predefined registry keys (which is the case here), a call the RegCloseKey function should be executed after using the handle. + defer func() { + if key != 0 { + clErr := key.Close() + if clErr != nil { + logp.L().Named("diskio").Errorf("cannot close handler for HKLM:SYSTEM\\CurrentControlSet\\Services\\Partmgr\\EnableCounterForIoctl key in the registry: %s", clErr) + } + } + }() + if err != nil { return errors.Errorf("cannot open new key in the registry in order to enable the performance counters: %s", err) } @@ -144,6 +154,7 @@ func enablePerformanceCounters() error { } logp.L().Named("diskio").Info("The registry key EnableCounterForIoctl at HKLM:SYSTEM\\CurrentControlSet\\Services\\Partmgr has been created in order to enable the performance counters") } + return nil } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/fields.go b/vendor/github.com/elastic/beats/metricbeat/module/system/fields.go index 994c269e..146a5774 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/fields.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/fields.go @@ -32,5 +32,5 @@ func init() { // AssetSystem returns asset data. // This is the base64 encoded gzipped contents of ../metricbeat/module/system. func AssetSystem() string { - return "eJzsfVuPGzfy77s/RWEOFhnvmZE93iSbnYcDOPYGGCBZDzwOdoGDP2SquyRxh012SLZk5dP/wUvf2VK31Lo4iB6cjNRN/qpYLFYVi8VbeMbNPaiN0pi8ANBUM7yHqyf7xdULgBhVJGmqqeD38P9eAAC4H0FpojMFCWpJI3UDjD4jvHv8FQiPIcFEyA1kiizwBvSSaCASIRKMYaQxhrkUCeglgkhREk35wqOYvABQSyH1NBJ8Thf3oGWGLwAkMiQK72FBXgDMKbJY3VtAt8BJghUyzEdvUvOsFFnqvwmQYj6f3WufIRJcE8oVMBER5lvL6Zv456v9VvuOhMTiy1DvWxBUUNyadipQDD89ApgLCQQU5QuGtj8QcyCQZExT+16Fg/mnzrT80ySiSgiNa1/npDDBF40ftlBjPgb6O4OKZ8kMZYmq9uT/gUeUEXJNFqiCgDKFcpJGOghLRYRhPJ0zQZoPzIVMiL6H1LU/DPynJeYvkoVltCFH0wRBpcg1UG6BgUpJhB201SjQNHpW47DWgCOJyLg+EJiXl0tk7jNKjmwIFSMyeCeHB6DjNMLL47DgwMT6NpVUSKo3kEoRoVKo+lBzMk7vi5LG7AJ5blH1AH46Qe4BSKwJ1RfISw4GGFwLDjFVzy/70XFKHTEMn/zt8pisUK5oZEwzY9ItCY+Z+WNJZLw21hzlGqXMUr1zPsrfTsf60VArMddf07gYvPtReO6x2QO5RsIub2QoB8pXgmVcE7lxKmC2sX7OikqdEWbfWC8pQ/vtcpMalighW52tiarxS+glynwJFHLSeuHtilBGZgxBcLYxi+evnH7pxchT6sXLZVDhy6XZQa5clGYtb9JQZTxmdZh3Zty8MQfK+Wb5QNnWIZWovPVlR0AoPXEPC37Lzfxh9HdsuolQmRkK1pQxWJIVGgeVfKFJlsCKsMxOms93r1//Bf7quvts2241VvZTa5cwiSTegCbPRj6o8q1SrgWQKLJi53TLqt1oAIuB8od2TeEDb4cI1E2r2Y3IICLcDVqV5UXwZiGRaJTmC+74Bj8JCfiFJCnDG6Bz+FurWSdS5nWi4fvXfzHQboxcOeHyYY9JlGaTnJufnfTMEO5+6BycP5YL+8dyEr9e9+uP4u18RVbrn3Z5gMI/rdtxrFst9IUy0tiCqMCRbVfUh5ihFZyHD/82WqjLKPlXaRn1sk+MJXWRLBgapr5YQoYu9JdJyEGr/WWS1H/Jv1D8e6z7l0nJ6Iv/V0XmvhbAZRL5tZoBl8bNPlbATR4IURjnTC5jNta5DtDesBg+taJ7X8vO9CXv6X4du6AXuJl40Ztw594K2X9FPDfyfRe5P/ceqjwxckrFiyYrhmw/mCYq+w/mT3j4UKSR9czByz/D9yjMv8HxfMbNWsjmxoGPH9+Disnd8OG25JkudwkbSkrY1C2eA+D1hPCN8j3k6W7waUkVJGQDXGiYoRGOFY3dMk4YK5neatPH6HcQJJHEE7vhMeLksZZSxcIwnRiRMSNkREZlkZHwecbYZge+taQajw7Q9rInQsvB2Ub331HLTcHQS3uAt81YGHXY8IHDz5RnX9wWF212BQ07UGGkhfQt2c2elFEvaRyIUlliOGOfAkV/t3bod3dveo3g+RlkcGjk4/Aob6wnm1qt7mabFSuz7hxR7BPKjE8QCR4rv7x5tWJnbK+BPRtEN2d3GovHBhjGGAuzDj68+rAboPHeJna0Jf6WodKTBOUC1TRFOVUYBbGHPMwd4Jtb9Xaa+y4V2D7tLjk4StyO7Rolwm8ZZhiDFnYyxLiiO30bT5YTkdPSZfs8NmG18TrpQJXoqVIt9BU69xig047MuJTYEfEEbFltRiDjx3K9LWzfFuam7R5e0nYSRIx/MS4hZIWSLLDq08yFbEhZcES0MBaocVgwHjL/TzgqTsSOOSyOpNONS2PSjDQw+Ywnq8XU2CjHIcVaP9eU52bTSzNQBndPHdCPFqvFj0yJ7QMY8oVeHoWIU070cUXJBTBw2mlnHS5GrgdHiBGnqsH10hL18OrDuOMxy9RmPGoew7H7OJPGTFwvabSsk9C9LF7PCI/XNNZLyDRl9HdiurVMKJ96OYH37nFFdCbdIyKKMuO6uKy5MulRQcSEskNfz2PMWYJcS5FuDgknlYErfyCy3ebwEBHJG53OqB41+FegNQ2bIWvDLWGcfzOoxOtx3hhuEk1XmEtPKgQrnPZvX//j+9YozynD2tlX2CtuWDbTyl4ufxojibkg+kRRBRsitPs6FX5rYZz+jKeSrihD42nY3SnKXTeTIHQ3SacDQ5yDwpjVpNp7+PwqxtUr8+vd5yAi0+8RoJg2mlDwi/42DMKG3KepoB2xvr2x2IaNprVtt3gTRmOl9YiBA9M+cBGjMtJi5qj9ph07r0CSeFZp3y7VBt10bK5V+CUR92Ga5fuJuObGuMK77RzLFJ42dGw6HAjv/KtbA/S2TIVCFJVZYA5ax7xIuZYqS1llEcv3wshiIXFBis0wwphTOY3jLeWrB5/f2Xc75F919ePRwFxkTd+4Nn0OmNafAmqvQ95cVwEnbpvUh0e2TTgqOz4l1RCLSLXiAQGuw3YNvJUVu9C3cIa8B/BMtBqwMQeaAM1kORtAO1N3AAyp49MhdGrv2gJNWaYsT1+2XR4mSHyI+jAunmkj92EPnPBXd1dDlbD5ifLFdE4iLeS9ce2GKeKfK/AL95IRpSGhPNMYnsNX310S0u881g6Fc3V3UWjvAnDDuG0S4rlkIiALENMiK6FfcmGbnHMNRVhgxqDobNLVIVUH0mQfClN03GPDLe3s6oIdZN65JlohCl9xbITwxMncDruuOdy7K0idzt341SyxvWCd0Kt1/lmZ22ctKj/mhS/kcq5ccDQWqGzqFeURy+Li4Uhwl+cx2+TmZESiJSogvG1/zbL5HKWCa4WFr+pZQyKdETZpmCEX7471GlhH2372ehvJW9taWRMQY5s5aji3y4rfai0HZwScwCL1BFX4WZHBBw0SvTJULrJPjRAhjxBmqNfoz757kbZ5DdVYjR+hYFkE82k+CTGmyGOVa94PTy5OlgiJEKMmlKkbSK0ahGiJ0XPhI1dk+HOHSMD5fSjP7vCUf9B2H4SwKGPWkZ8RMywVXhSJYlTb2U+V2xmwMaiyzWDX1tMo9UOuD2yjH57+Y5qkCgioLGlqpXxgKSeRjefn4/qBw78pj8Va3fj38bf2bPOsFcVY+df7jlWHzoE+egd26p6eIxfY8GhNnV15u2vSVDfdiiiVOKdf7uHq/1uy/qdp89TjG2axsK2UtoQxH6jSNFJuH6bcxDM4amVNcxELRTB3hyPO7EyXxPQVpXPpWmuNDMN7LjVV7pQOZ+/lzdSsH+NzKpbZAtPWSfQzTFYDBCySs8/T4GGDFmnhASnjvRWC/M5JKgS79Hn7S8XaoxwIM26aGaKSnDHmxg4CDpoT9dyPmlx1ptjASHpoHNEpTcW9hcgYsnK1f2h6HEJyFDDLtPXqQvI0kDKVSWPenZcwsUIZiSShg6dGjHOSMR3adelNwwHz+73r3mWmzIUMgi+OhKFeC/n8oglvSNzHt1EJ/PhvqofbajXS89/tgcR5Y2/jdOfaUC8HhgJtiK0Jvs8hN5Hpk+5KN0/qGJ93V5YePytCiRHS3RmShpEpiZ5x1Oy0EoxvuyfDjodEFkh6MobyCUop5HHY4pr2R3AdIsoXPcbqVJgU8ng3IsonsRRp2lo1x0FEeSQSmxTlx65Mm/Xd9uDYMQGKTC/EdoDVUC1VQNiabNor0WtjvL8ncm0sSB7Dj0/vYYYRyRT60ImxBSSmQupyd6T7OHOR+OkOah+0Hvk2KuuR/8YsRiQmmtxUr++4qd6L0rhUBMZdjwijpMnLlOhlQfck8GpCFy7buLhwpd2jLVcyYAnsE6H2PLNNN/L7rmTGOeWLq/AecNpxVclu8ttv9qE+PaDDPXtc7N9j+9U+PUZJzCgfeYznGWNgjFzC41vTvPMKtTCjLrWz2R3uG79dYyavDsS/iVxkiQ2sK0yJJH7WBzNX6IILiVMyEyu8hzevv/0hSHKmUO4xlVxtvf3mUbTed1iNSUj5YhpTaQ/mNKPWfXpHvgr2Lmb/xZan7b6cHigByFdUCm5GDlZEUuOSqm4pcOWmjQoNnesmlVIa8JNE/PHp/Y0L8Tsl++EJ/hNWGfXK3jBeeOrd46+3KsWIzmlUjUulZVWQoZGnztpMsMu76xnuCxRK0VWNvK1oUxOsq7Bll/MjoS0qdhuwLrCnKI/QSY/XF1283l0RFM4cbW3UqvGWTDEWltIiMSRLY7taPuiKCaVoQhmRfr8i2O1fTC8FI6sdxFSljGxKG0qLNFfZebGadl2SMHM76qx9VRzGVc0xq7dcNVwrdepbuTllbozhItUgCW8HyD3RUiTwun2Sq8niLYXR4MR6IVwwrQnYycQx8bp99K3Du4WfRnuETkCW6OK20TsEncG0zuvd50y09aNM19iRvB3ck+6ZgeFC70PXo13r3a716kxxyFIC8iJe3seqsntJtoiAVOpsuyQG/UdUNDYy+4QanujvOGlMwwBBIoqylLqtlYSYf9wz1x/f/vJyO6mXp5nHo08tiTyXENq+4xAxmeo8eBX2A/bIsfqJMiyeEdJbSHmcwS1aCr04uVg5VRVTeh4ocueyZsza5a3ssVWGSJEftCg0zpnVeaBs+71XAkYTqidKzAdvNvYVEDHXrpd8S3oH9MKmCDZZ85UqbUeEwwwhWhpjI27aOUQD4Ru7Ku1ixZK0XL2xWGGaPhYrKm0bVthyizMESfIaulII3eEehibe3lPyl/yQGfd4VFnexPVkS+vYEgLW5Cbq2WWTJVi/MTf/+LeK02sSy9hny8RYEuUbUkua2j34VoNc8FvDDt+yZaDCWgeWfzWX26qFod5sKxoFO+JKPRgMXpoe3ltXxUiSsEf4HDUKiFIiojZItKZ66WonGDaHLfsH6xPZ8g38Gw0kb/XhvQtV+PJleeu2NUt3fmFRsFUy27LJU2VRSvTyeEwyree5bF6OmpUG/Ncqmzkv4xvlDkO6s9eDWGZ7OwXT2hEd2DpnB3AsSrOSF6CiJcYZQ3cnN7GVCN1xDaKei9QDP4+Cbb517+T6WXAtBWNes61FEdEsupLqBt799GQVyMdP4UbN70oTHjsweR1MtoE5obJsyuuZVAqjL6jghLFwyqs7auIylwunKs9bzoexSLJdI10s9QQ+fqrACLYrkTDvoTVAKdSqcjdb0P8M2qNQ1sKuD4Blss/0z2u1EFjQFXJje1KxLX2nW5nBLoUGPeYrNCXw4X0ejWlKz1YAHepiLwjhSWA+j/uojc7WQupkK5HRXE38gAUzdWCXQTKEVNuPHQtfnj+hkRR5eUib4yLWIHGRMSLNqtjZlGPJNyrXE1pYWZaoRCYjVKCWImOxtUuwSGUawJPfMqHJ8VnyqXHqppMxbiITFs5it5ByNUmqc1RmPJ+fgqOfm3BNFMQ4p87s6+ZyVTi6zuCEuGddtWPz7i23uTsLlD5aaDfBfVAGjcIrJpLFU1V4nY3W6tfkRmONrZNKtDzvLPbasZuTaeaZ4szvJFO2ZvUbMEJPF8uqNbqVvVJf8Hwt5mU3fzvmK1V7TFSpJzLj1tW6BGbY4LbgC1T2HIymPBOZ8nOus2HKGy5KfRK7O17DXOvJJndk08E4NpvKxEuvamx63YowZZVObcKYSVFXMd3KzUxtywpkJN2eHdwmXS+l0JphfHImGFlRXaM6cwfVPDa4tkTSwNWx+SfPy127bV2j2/NkHb3EjWfQlyXJbEUPWwR/vlUvVdSdkeraCLl4AJVg18K+6r/JcX70JbSIT+eFA11Nv2vKgRMuasUQ/UwrxmOHgREap34+E4m2RIF7+U3eC8rrcwUymvLPn9Z08TmzNe33Z09jNVbvuaoIeu0AtlEBVf95h7wPmuMuQWccWgtaquAL4DZBIhFxV3Z2GF9+q+XJEF67DduXQ6CmKMMRFtieNpR/aulDh0tWQWVLexZkCw5IoqV9tCFhW5ZvqnaL2NatWRimPf3pKB8WdpmhfyrQIynQ4YoywWRid9A6N4ahzwzdtbM4gPBqsRC/uTfbdIa/rvPz8X3NopLghHy5HKKXWEQFq9Ujxqbc7XddItVl6MUtMvXqB4baPFvW+O3dyyeJlvjSuSmBeLXd6KlY7pnquz4Y7s0JZdnx4yn1vV7vuViClkXNBbftd90Y05ewbqXVlh9ploveHluCiVpfmm5YYl6AIsqkRK7risIWsbAnuV0RPT+HOtsbc24VzLpUvdJKszGLcZtZNaZsMSQOZdbFq6I8kuQFrsk0O9jb4yRjKyC1viQV1JxsdkA7W7xujbpVVgOV0vOl2is+RfRoZsvz5dstTRbsNF86Wx3OmYtXJmLe4M82BdEdJdxHcTxfpunSHLcxbRfT9lRH6UWqipqOMIvMp3ePRfm9drXzIYReqmqo6oQmxQEdsSM4tp/2tGz6GvSEZ1aTTy2FsYtLe1saBbcuU2k0B7J7s2q4feEClq6M3ZRwES7h0JsBI8rKWy74JhGZKi1Q6+uC4ODL7jEkSt9KjJBrtrm1s+3654+/djOIUaVrB1GTdK7gWi0TTF7eDFVGNeYZL/3EzPuJMrydkei5TE4vmfPzx18LcvegyvL6xPQ8mgXCdjz2GC0pSiKjJY0ImzpWTS9LNVbDxoUnlsP21lNRjqCiJ5zu6965HYVdan2Z3Co9st5862yyzs/9+JYXBP16NGlRwrSqLmozr9vBbc7IvTh1BrXZzamwQg3yaA/pSEiaYnxZFD/52+cctbcOIvj/2GthulXxuDonJQuc2sJjJ0+SMTqCFKcr6t6plnSxQImxeWJbAMxCHygP/xVy+hXQbYHuIByufjFPXbk/FSyNCPHy7IoPBrgiwWxjz7Bosc39dTWWbakIe7gmptXTHT0lSk07wy5HSDyzpQrNvzb7TFTqfVN3KM/dEtU75bJKh8i6Yo3HJURkFSftUFK2HcjtRcoplkW/9WZmiCRcpcTuuxRVcF/eABfdcd9xDVep1NT0fDFc+1ej6p6YAykYGeTXsNSZNUkvhtanYttjz9HLOK5opMnsglb8Xyrh2IhwLrQ7qxAxQhOMe1GaUzljzzSkwweky/zIRFSt8/lnlszYWTJ7JMm4bMJLkdjmbYVO8VhdM0cpXbTPXTviL3qcWaFyN/p2OzDdmzWD2ETFabIuSwY8vPqQV0IU3Kb5G267DDlD/v6E2zxsLI/W+9xjd00vo9Gms+DiRGVJQuoXQFHN8B4evX351H5gaGVG30StJnDlYDQq8HUFw1dVHf3KqF7XQlVKxRawhb/9vYm3cuLEETYSkkr5bc+wIVho3P+C795ATKODUCiGmB6DJXnDw9DoMYuuVsC4dgdh+V0kMzr+CLlmByGJkYzPEtNoFwp40N8oWKHcQMYZfUbmTR2q3al045YSaYvNUw5KJP4sHWGgqM68SqUaErLxTmyYtIw/c7FuOpeHU1cSVjk2skRXRtU4XSzm33ibTUuKK6P3pXHJPKK2ipaEHnRxauP9o9dX38UrkhRV7txS1zUliW6dzjugXxsFpHpz64aiBwKGKwwvHnvX2zRj4dqtAwhzYMOjqYEtwnK6F4p3PhHRNA6u8RugDsvHtw/vgUhJNu5cZZzxmHAdrlUdU/Wcb5+NNI2qF2S4mK3rZEv/x1zgbQ+VQapeUrYNk/Whx2eJbTbezZI5oWy0pax6vbltd3f/dn6pyV+DCI5UyxYSYov2SLL299pbEEGU1r0YV3IqYRXbeFVoloLFNgwPd6/ffHtr3J8cwjZ4Zn4ewSDx+LyB7SG6ULK0J8JMvzvQFvpJRM+oD1maPr179K2oEoSb+ofZ/a4Mc5fi7C7n7C/3mHS931XGOQhiThLaquXSF4F57pDO7eW7ExoumtX6uijtfvePN5PXkzeTO2OVvHn9+u7+9fsff7h/++M/39//8N3fvr+/vxsmej/bS4AfHoHEsfS1wGhRbIdweHhcfWs6e3hcfV881Ie2VMjwxa2B+VHQ9+bNPvBNVzswSUyExgtg+EcLZGSOe+pOwnJPQH+eG9dhgIFUAPv797dv7u5u7+7+fvu37yd8PfG/TCLRuo5uB+bHTx9BYiRkHKhthh4oPDzm16KKmSa2isqKEpC4Qqnae5MPj8CEeO4MaDXYgJrF05RlaioGXRRQ3oe0L/m2kvx8jpEPZKa3zsSNhS0Reo2ffn7/Mjd4PS/MoLkMEMHRXonbbJWRGbLazRM3tgHT2v+9s/7V1VyIyYzIyUIwwhcTIReTK8Pfq+oXraB0UcTetBGjRplQnlcqN81DJBL0VQEJB0xmGMcYQyTSTWG4E90qA2BfWGqd3r96lWYzRiOVzef0i8XRW5an9mab8Uz+f5rm/EOznExX/qEYEyuBXtzAJ1LuQNx9acf4131sBeDLoe8JYsBVEdtRjH1Dx0+V2zmg1vRWHPhl38tn8AtGmd3uOoQf9nj/YJEIvzW8430vDJlnjE0HiELdBu4Onz/Z3yHw+6HRczF3ZXRz+5mWMXNfjP4gC7pdMmzveqtvrRxz7izq5iDsKlBqExXaDl9uWLlKXoHfe+zOGGCWh93oqrV3lcZAEH9ELEUX1vgJe686Gq0OrunSeGD7j82OigvdDBn5HtvZpuZK5nvgN2U5zDKqU15Q6vJkbPqIKyWaustLfscJvBNSokptYRQt8noQCm3c+ZXRmK/URr3iqF/RdPXtKx2l0wSTCXzoKMvbvQ0fLs53cKXU3aMLPTfnhUyXZHseVvdI90RrEbu57gfJd4uxEfl8aLv5u5WCLh0yNgG5PtnN93565Qj4DLRteqYJD5WxCKhaBm8aHhlgGaeqdDuImxETCqdr0nm09yhoGwiNjpiWSKbB+/nquDVNLgN2ASSEujC14lEXoV/f/0EWIUPIGRehLL7ERWj76ELPRejUKrwL9Zb/KWZH2igVO9jW/+ya+Fw/M9S8PtV35D2CAwPmvvzhJBl8AXT+auNnytNMT/OHEsoY9UXBhimIT0uED085rbZ+atnU5MX/BgAA//+AupAR" + return "eJzsXf9vGzey/z1/BeGHQ517thLn2l7PPzwgTa6AgbY24gR3wMODQu2OJJ655JbkSlH/+gcOud+50q60kpWi/qF3saXhZ4bD4cxwOLwmT7C5JXqjDSQvCDHMcLglF4/4i4sXhMSgI8VSw6S4Jf/zghBC3B+JNtRkmiRgFIv0FeHsCci7h0+EipgkkEi1IZmmC7giZkkNoQpIJDmHyEBM5komxCyByBQUNUwsPIrJC0L0UiozjaSYs8UtMSqDF4Qo4EA13JIFfUHInAGP9S0CuiaCJlBhw/6YTWo/q2SW+t8EWLE/n93XPpNICkOZ0ITLiHJPLedv4j9fHbc6diQVFL8Mjb4FQQXFtaVTgWLl6RGQuVSEEs3EggOOR+ScUJJk3DD8XkWC+U9daPlPk4kqIyyu/TpnhUuxaPxhCzf2x0J/Z1GJLJmBKlHVPvlf5AFUBMLQBeggoEyDmqSRCcLSEeUQT+dc0uYH5lIl1NyS1NEfBv7jEvIv0gUK2rJjWAJEpyAMYQKBEZ3SCDp4q3FgWPSkxxGtBUcTmQlzIDCvL+co3CdQAvgQLkYU8E4JD0AnWATnJ2EpCJfr61QxqZjZkFTJCLQG3Yebk0l6X5Qs5mcoc0TVA/jpFLkHILmmzJyhLAWxwMilFCRm+ullPz5OaSOG4VO/nZ+QNagVi6xrZl26JRUxt/9YUhWvrTfHhAGlstTsXI/qt9OJfjTUWs7N1zQvFu9+HD733OyB3ADl5zczTBAmVpJnwlC1cSZgtsE4Z8WUySjHb6yXjAP+drlJrUi0VK3B1lTX5CXNElS+BUo1aX3h7YoyTmcciBR8YzfPT4J96SXIU9rF8xVQEcul2UGhXJRmrWjScmUjZn1YdGbDvDEnysVm+UQhdZIq0N77whmQ2kzch6W4Fnb9cPY7NMNEUlkZmqwZ52RJV2ADVPqFJVlCVpRnuGg+37x+/RfyVzfcZ6TdIlaOU6NLuQIab4ihT1Y/mPZUmTCS0ChCtXO2ZdUmGsBiofyhQ1NyL9opAn3VIruRGYmocJNWFXmRvFkooAaU/YVwciM/SUXgC01SDleEzcnfWmSdStmvU0O+f/0XC+3K6pVTLp/2mERpNsml+dlpzwzIzQ+dk/PHCmH/WEHi1xt+/VGina/Ia/3TLw9w+Kd3O453a6Q5U0FaXxA0cWzjjnoXc0DFubv/l7VCXU7Jr6Vn1Ms/sZ7UWYpgaJr6bBkZutGfJyMH7fbnyVL/Lf9M8e+x758nJ6Nv/l8Vm/t6AOfJ5NfqBpybNPt4AVd5IkRDnAu5zNlgcB3gveExfGxl976Wk+lzPtP9Ok5Bz/Aw8awP4Z77KGT/HfG5ke+7yf159lCVidVTJl80RTHk+MGSqJw/2H+Su/uijKxnDV7+M/yMwv43OJ9PsFlL1Tw48PnjW6JjejN8upE9O+QuZQPFKJ+6zXMAvJ4QvtF+hLzcjXxcMk0SuiFCGjIDqxwrFrttnHJeCr1F0+fodzCkgMYTPPAYcfGgp1TxMOwgVmXsDFmV0VlkNXyecb7ZgW+tmIGjA8RR9kSIEpxtTP8TtdwVDH1pD/BIBmHUYZN7QX5mIvvijrhYcyjS8AM1REYqTwkPe1LOvKYJQrXOEisZ/BTR7Hf0Q7+7edNrBp9fQBaHATGOjHJiPcXUorpbbKhWdt85otonjNuYIJIi1n5782YFV2yviX02iG7N7nQWjw0wjDGWdh+8e3W/G6CN3iY42wp+y0CbSQJqAXqagppqiILYQxHmDvDNo3pc5n5ITXBMPCUnjhN3YrsGBeS3DDKIiZG4GGJYsZ2xjWfLqchp+cIxj81Ybb5OOlEleqZ1C32Fzz0m6LQzMy4nOCOegS27zQhs/Fjut4Xv28Lc9N3DW9pOhqiNL8ZlhK5A0QVUY5q5VA0tC86IkdYDtQELxEPW/wlnxanYMafFsXS6eWksmpEmJl/xdLWYWh/lOKyg93PJhBPvSztNFnVPC9CPE7ThR+YDxyAcxMIsj8LEKZf5uIrk0hcw7fSyDlciN4JjxCpT1d16iUzdvbofdz5mmd6Mx81DOHMfZ8o6iesli5Z1Fro3xcsZFfGaxWZJMsM4+53aYVEI5adeTsh793FNTabcR2QUZTZwcTVzZcmjJhGXGqe+XsWYiwSEUTLdHJJMKtNW/jpkm+bwBBHNiU5nzIya+ivQWsJ2ytpwSxjPfxRU4vU4r6w0qWEryLUnlZIXIfu3r//xfWuW54xD7eYr2StrWJJp1S6XfxqjhLlg+kQ5BUwQ4qlORd5G2pA/E6liK8bBxhl4NpXveJMgdLdIpwMTnIOSmNWS2lvy+VUMq1f2rzefg4jsuEeAYmk0ocAX820YBCbcp6lkHZm+vbEgYWtpkXZLNmE0qK1HTBtY+kTIGLTVFrtG8TftzHkFkoJn1fbtWm3RTceWWkVeCmAfoaHcTyQ1N8cV2W2XWKbhtIljO+BAeM+/uzVAb6tTKFRR2w3moH3Mq5SjVNnKKptYfhJGFwsFC1ochVHOnclpXG4pv3rw7Z19D0N+rZsfj4bMZdaMjGvL54Bl/TFg9jr0zQ0VCOK2aX14ZtuMg8b5KbkmsYx0KxsQkDrZboG3imIX+hbOUPRAvBDRAjbWQBOgXSzPBhBX6g6AIXN8OoTO7F0i0JRnGmX6sh3ycEnjQ8yHDfEsjTyGPXDBX9xcDDXC9k9MLKZzGhmpbm1oN8wQ/1yBX4SXnGpDEiYyA+E1fPHdOSH9zmPtMDgXN2eF9iYAN4wbSxCfSycCukBiVtQk9CstbLPzXFMRVpgxOHo27erQqgN5wg+FOTrupeGWdXZdwQ5y7xyJVorC9xsbIT1xsrAD9zWHe3f/qNOFG5/sFtsL1gmjWheflZV96FH5OS9iIVdx5ZKjsQSNhVdMRDyLiw9HUrgqj9kmdycjGi1BEyra/tcsm89BaXKpoYhVvWhoZDLKJw035OzDsV4T63jbz19vI3mL1MqOgBBj3aiV3C4vfqu3HFwR5AQeqWeoIs+KDt4ZosAbQ+0y+8wqEYgIyAzMGvzNd6/SWNVQzdX4GQo2RbA/zU+SGFIQsc4t7/2jy5MlUgGJwVDG9RVJ0QySaAnRUxEjV3T4c4dKkOePoby4w0v+zuA5COVRxjGQn1E7LRVZFGVizODqZ9qdDGAOqqQZHBojjdI+5PYAid4//tuSZJpQorOkaZXyiWWCRpjPz+f1XpB/MRHLtb7y34ff2qvNi1YWc+W/3neuOmwO6WN3yE7b03PmAgceraWzq2p3TZvmptsQpQrm7MstufhfZOv/mj5PPb9hNwukUvoS1n1g2rBIu3OY8hDP4qg1Nc1VLJTB3J2OeOZgumSmryo9l61Fb2QY3ucyU+VJ6SC4MjOTtHWDuwfmGqYo94yQFEJIrR3MzG4ETBwPABO7x1dAY7rEIrCDYaDsC4KkTrAHAjT2gxNx2yAgRbKsHnR/bVY767cIcy6W2QJCM3lyw22BeH18bpsdvHbSYi08IWXuv8KQP0VLpeTnbsN/qXj+TBDKbchup6hkZwyPZgcDB62Jeh1QTa86y63ISHvSOKpThg17K5ENatRq/2OKcRjJUZBZZjDCD+nTQM50pqyr/7yMyRWoSCYJG7w0YpjTjJvQCVxvHg5Y3+/d8K5KaS7VMPB2X5lU/ZQm8NCG0UJWmfqQ71Pht8PKVyF1uSFklzBbsPogqlgJyvmMRk+jDP0ud8gqosECyyTTeB9Rp5zZ/zPHtoBrmlbhFXc5waylqiIanrL1NCo5W/+b6q3U2uMG+d/xJvG8cSx5ugupYJYDs/iYHW+C73M71UYBpywoaV6x0yC6Ok5VwoTnRKggAra7uNmFU9ETjFpYWoLxtHsK7HhIVIGkp2CYmIBSUh1HLI60vzvvEDGx6DFXp8KkQcS7ETExiZW0xvooiJiIZIL1jH7uyop3P2wPiR0ToMzMQm4HWD1lYZpQvqab9mb52sZa76laW4dfxOTHx/dkBhHNNPisp3XdFKRSmfJgs7sPQbE1uw4LB+1HnkZlP/K/sZsRjamhV9V3d66qDxo1XgMi4+5HlDPalGVKzbLgexL4asIW7qJA8VJSe0TsMzRgC+xzuORlhqQbpbkXKhOCicVFuHwj7XhjaDf77W/24T49YMA9R1zsP2L7q31GjJKYMzHyHM8zzomNSaiIry15F8QbaWddGRdiOdxX/qTVLl4TOLqiapEleCamIaWK+lUfLDpjCyEVTOlMruCWvHn97Q9BljMNao+l5Jpi7reOovW+02pdQiYW05gp7C/QPHDqMzqIVXB0OfsPtBIj7pfTAzUAxIopKezMkRVVjM64z3oEtcD1ibcmNNSQgVZ64JCfFMCPj++v3OmcM7L3j+TfYZNRb8lPxssmvnv4dK1TiNicRdU0Ylq28xmaKOxsqkYG5ZG7s7OBDkemapG3dVtrgnWt8XA7PxLaotW+BevysJqJCJz2eHvRJevdrXzJMyfHG02mvCdTzAVyWtR0ZWmMu+WdqbhQmiWMU+WPGoPD/sWOUgiyOkDMdMrppvShjExzk513mWo3FAoLt6NB4lclYVjVArM65arjWnlgolVWV5a1WSkyQxQVXSkhvDnwun0JsyniLR0NyYntQrjTYROw04lj4nUlMFund4s8rfUIXV4u0cVtp3cIOotpnT9UkQsRG7/ZoaHj3kWwnKRn8ZQ7KRm6H+3a73btV8+UNi41IO++52OsqriXdIsKKK2f7VDLov8AmsVWZx/BkEf2O0wayzDAkIyiLGXuJCyh9j/uM5cf3v7ycjur52eZx+NPL6l6LiXEseMQM5nuvDMZjgP2KI/8iXEoPiOV95DyPIPbtDR4dXK5cqYrrvQ80J3SFbzZvct72WObDJmCOGhTaFwRrctAI/3eOwFnCTMTLeeDz4b7KoicGzdKXkGwA3rhUwRJ1mKlCu2ICjIDEi2tsxE3/RxqCBUb3JV2iWJJW6HeWKKwpI8ligptKwrskzoDomje/FpJaTrCw9DC23tJ/pLfDxUejy77ErmRsCcWdv9Al5vqJ1cImkD9qev8x3+ruHiqoMx9tlyMJdWekF6yFEsmWgSFFNdWHJ4yClBDbQCUXy3kRrMwNJptZaPIjrxSDwETr0137zFUsZok8fat40YTqrWMGCaJ1swsXdsTK+awZ3+HMRF2XhHfGEJzqnfvXarC9x3MqSM15Dt/aSxIlc62HPKQ2nmxWR5PSJZ6Xobq9ajZJMT/WmczF2V8o909Ztc2YZDIcLRTCK2d0SHDjvy7JRalWSkLoqMlxBkH95g+xRai7qYV1U9FpYhfR0Gab913cvsshVGSc2/Z1rLIaBZDKX1F3v30iAbkw8cwUft3baiIHZi8gS3fkDllqiTl7UyqpLUXTArKebha3d0Sc5cOiqAqv3KQT2NRH78GtliaCfnwsQIjSFcB5T5Ca4DSYHTlUcVg/Bn0R0nZxL4+AShkf0knb7NEyYKtQFjfk8lt1Vb9qjuCBo30WK+kqYF37/NsTFN7tgLoMBd7QQgvAvvzsI/Z6KQWMidbmYzmeuInLFhYRQYXtGxhFcfBufDvaiQsUjLv64olSXJNFCwyTpXdFTtJOZF8o3M7YSTqsgItMxWBJnopMx6jXwJF5dkAmfyWSUOPL5KPjQtznYJxC5ny8AUUhJSbSVpdoyoT+fqUAvzaJJdUkxjmzLl93VKuKkfX9bmQ9DBUO7bs3gqs3VmA8tlCPAT3SRmwBq9YSIinavA6idZaT+VOY02sk0q2PB8s9taxW5Jp5oXi3O+8uOsNsUrPFsuqN7pVvMqc8Xot1mW3fDvWK9N7LFRlJioTGGqdgzAwuS3FAjReYTNMZDLTfs11EmaiEaLUF7F7nDkstZ5icretHYxji6msk/WmBsvrVpRrNDq1BWMXRd3EdBs3u7RRFMBpur2Yu826WSppDIf45EKwuqK7ZnXm7ph6bOQSmWSBN5/zn7yMeu2Oda1tz4t1zBI2XkBfljTDZjz4esV8q12qmDur1bUZcvkApgjuhX3Nf1Pi4uhbaJGfznt+unacl0wQQYWs9TH1K62Yjx0ORmie+sVMNNqSBe4VN/koKG+tF6hoyn/+9KaLn2f2pv357Gm8xuoDdRVFr/VOsCagGj/v0PdBa9wV6IzDa8FLFXwBHAskEhl3VWeH8eXP0Z4M4aU7sH05BGoKKpxhIdvLhvKfWvnQ4ZpVcNmyngXbUhCg0RI/2tCwLds307tVbOvRLBlmPf1lNp8WdpWhfxrQIxnQ4YYygWSCJ2idB8OkzwrddbI4gPFqnx9/uDfbdKa/ypb7gxlO6JfzYXoJRVaw2vhlbM7dedc5cl2mXtwmU29cYrnNq2Vt3N69fdJoCS9dmBLIV+NBT8Vzz3Tf/cFKb04Zz46fT6mf9frIBRlaFu1S3LHfZWNOX5J1q6y2/FGAl/X7M6zX52YblpD3jokypUCYuqHA/jN48d71v/RrqJPemGurENa52pVWmY3djNvCqglliyNxqLDO3hTlmSSvcE2h4WRvz5OMbYD0+pxMUHOx4YR2UrxszToaq4FG6elc/RVfIno0t+Xp/P2Wpgh2ui+dVIdL5uyNiZw35LPNQHRnCfcxHE/n6bo0521M38XSnpooPUtTUbMRdpP5+O6h6JzZfqhgCKPnahqqNqHJccBG7EiO7Wc9UUxfg53wwmrKqWUwdklpb0+jkNZ5Go3mRHYfVg33L1zC0nWgnFIhwy0cegtgRF15K6TYJDLTpQfqOqVJQXzHTA5Um2sFEQjDN9e42i5//vCpW0CcaVO7iJqkc00u9TKB5OXVUGNUE56N0k8svJ8Yh+sZjZ7K4vRSOD9/+FSwuwdXKOsT8/NgNwgceOw5WjJQVEVLFlE+daKanpdprKaNi0gsh+29p6IdQcVOONvXfXI7irj0+jylVUZkveXWSbIuz/3klvfy/XosadF9uGouaiuvO8Btrsi9JPUMZrNbUmGDGpTRHtqRYJOv8+L40T8c6bi9dhCJ/x980anbFI9rc1K6gCn2iTt5kYy1EbS4XVGPTo1iiwUoiO0ntiXAEPpAffiPVNOvgG8EuoNxcvGL/dSF+6cmS6tCory74pMBrr833+AdFiO3hb+uPTq2isDLNTGr3u7oqVF62pl2OULhGfbQs//F6jNZadXP3KU81wW5d8lllY+u3oDHZkRmlSDtUFa2XcjtxcoptkV/9GZXiKJCpxTPXYqmxS+viJDded9xHVel9dSOfDZS+7XRdU/OCS0EGZTXsNKZNU3PhtfH4thjz9nLBKxYZOjsjHb8Xyrp2IgKIY27qxBxyhKIe3GacznjTyxkwweUy/zIZVTt8/lnlczYVTJ7FMm4asJz0djmQ6PO8KCtmYNSLtvnXgzyb7TOUKncY9zdAUz3Yc0gMTF5mqrLUgB3r+7zTohSYJm/lbarkLPs78841mFDebXe1x67F7Y5izadDRcnOksSWn+7jRkOt+TB+5eP7Q8M7czoSdR6AlcuRoMmvq9g+JW5o7/21utFt0qr2AI2wmW6hbdy48QxNhKSSrd0L7AhWFjc/23+3kAs0UEoNAdIjyGSnPAwNGbMpqsVMI7uICy/y2TGxp8hR3YQkhjo+CKJ8eGWMApyZ77RZAVqQzLB2RNw7+ow426l27CUKnwbgAmiZeLv0lFONDOZN6nMkIRufBAbZi0TT0Kum8Hl4dyVjFWujSzBtVG1QRePxTfeZzOKwcrafWVDMo+obaIVZQe9edz4/tH7q++SFU2KLnduq+taktS0bucdMC5mAZnZXLup6IGAwwrCm8fe/TbtXDi6dQBhCWxENLWwZVhP90LxzhciWuLEEb8izGH58PbuPaFK0Y27VxlnIqbChHtVx0w/5cdnIy2j6nsmLmfrBtky/jE3eByhMknV9wW3YcIYenyRINl4t0jmlPHRtrLK+I7u7vFxfenJX4MIjtTLliQUm/YoukYUzt7qIEoML8bVnEpaBYlXlWYpeYxpeHLz+s231zb8ySFsg2fX5xEcEo/PO9geokslK7wRZsfdgbawTzJ6AnPI1vTx3YOnoksQbukf5ve7NsxdhrO7nbN/3GPS9f2uNs5BEHOasFYvl74I7OcOGRzfzZ6wcNOs1q+L1u43/3gzeT15M7mxXsmb169vbl+///GH27c//vP97Q/f/e3729ubYar3M77fffdAaBwr3wuMFc12qCB3D6tv7WB3D6vviw/14S2VKvzmcmB9FPy9ebMPfDvUDkwKEmngDAT+AYGMLHHP3UlE7hnoL3MbOgxwkApgf//++s3NzfXNzd+v//b9RKwn/i+TSLZeD9yB+eHjB6IgkioO9DYDD5TcPeQvGsuZodhFZcUoUbACpdtnk3cPhEv51JnQaogBDI+nKc/0VA56KKB8D2lf9rGT/HwOkU9kptfOxY0ltgi9hI8/v3+ZO7xeFnbSXAWIFICvWTepcjoDXnt54goJWGr/fYPx1cVcysmMqslCcioWE6kWkwsr34vqL1pJ6aKJvaURgwGVMJF3KrfkSSQT8F0BqSCQzCCOISaRTDeF405Nqw0AfmFpTHr76lWazTiLdDafsy+Io7cuT/Flm/Fc/n9acv5Ds5xN1/6hmBPUQK9uxBdS7kDc/WjH+M99bAXg26HvCWLAUxHbUYz9QsdPldc5SI30VhzwZd/HZ+ALRBkedx0iD7zeP1glwt8aPvC+D4bMM86nA1Sh7gN3p88f8e8k8PdDs+dy7tro5v4zK3Pmvhn9QR50u2XY3v1W36IeC+E86uYk7GpQioUK7YAvd6xcJ6/A33uczlhgKMNudNXeu9pAIIk/IpZiCHR+wtGriUbrg2uHtBHY/nOzo+NCt0BGfnZ4tqmFkvkZ+FXZDrPM6pTvybo6GSwfca1EU/d4ye8wIe+kUqBTbIxiZN4PQgPmnV9Zi/lKb/QrAeYVS1ffvjJROk0gmZD7jra83cfw4eZ8B3dK3T27pOfhvFTpkm6vw+qe6Z5oEXH5yjy2oMJhIbYqn09tt3y3ctBlQ8ZmILcnu+Xez64cAZ+Fts3ONOGBth4B0+Fn80cGWOapKsMOkmbEpYbpmnZe7T0K2gZCayOmJZJp8H2+Om7DkvOAXQAJoS5crXjUTejT+z/IJmQZecZNKIvPcRPaPruk5yZ0ahPehXrL/ylWR9poFTvY1//sSHyu3xlqPp/qB/IRwYEJc9/+cJIMfgA6/2rjz0ykmZnmH0oY58w3BRtmID4ugdw/5rxi/9SS1OTF/wcAAP//y3p6mg==" } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/memory/_meta/data.json b/vendor/github.com/elastic/beats/metricbeat/module/system/memory/_meta/data.json index db5d2f6e..b38aaa79 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/memory/_meta/data.json +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/memory/_meta/data.json @@ -1,9 +1,5 @@ { "@timestamp": "2017-10-12T08:05:34.853Z", - "agent": { - "hostname": "host.example.com", - "name": "host.example.com" - }, "event": { "dataset": "system.memory", "duration": 115000, @@ -18,18 +14,24 @@ "system": { "memory": { "actual": { - "free": 4974256128, + "free": 671084544, "used": { - "bytes": 11597463552, - "pct": 0.6998 + "bytes": 362037248, + "pct": 0.3504 } }, - "free": 1320112128, + "free": 340848640, "hugepages": { "default_size": 2097152, "free": 0, "reserved": 0, "surplus": 0, + "swap": { + "out": { + "fallback": 0, + "pages": 0 + } + }, "total": 0, "used": { "bytes": 0, @@ -37,17 +39,27 @@ } }, "swap": { - "free": 3482378240, - "total": 8589930496, + "free": 0, + "in": { + "pages": 0 + }, + "out": { + "pages": 0 + }, + "readahead": { + "cached": 0, + "pages": 0 + }, + "total": 0, "used": { - "bytes": 5107552256, - "pct": 0.5946 + "bytes": 0, + "pct": 0 } }, - "total": 16571719680, + "total": 1033121792, "used": { - "bytes": 15251607552, - "pct": 0.9203 + "bytes": 692273152, + "pct": 0.6701 } } } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/memory/_meta/fields.yml b/vendor/github.com/elastic/beats/metricbeat/module/system/memory/_meta/fields.yml index 9544dfee..5fbe09e3 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/memory/_meta/fields.yml +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/memory/_meta/fields.yml @@ -79,6 +79,22 @@ description: > Available swap memory. + - name: out.pages + type: long + description: count of pages swapped out + + - name: in.pages + type: long + description: count of pages swapped in + + - name: readahead.pages + type: long + description: swap readahead pages + + - name: readahead.cached + type: long + description: swap readahead cache hits + - name: used.pct type: scaled_float format: percent @@ -131,3 +147,14 @@ format: bytes description: > Default size for huge pages. + + - name: swap.out + type: group + description: huge pages swapped out + fields: + - name: pages + type: long + description: pages swapped out + - name: fallback + type: long + description: Count of huge pages that must be split before swapout diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/memory/memory.go b/vendor/github.com/elastic/beats/metricbeat/module/system/memory/memory.go index 34987a04..57c6d588 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/memory/memory.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/memory/memory.go @@ -75,6 +75,11 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { }, } + vmstat, err := mem.GetVMStat() + if err != nil { + return errors.Wrap(err, "VMStat") + } + swap := common.MapStr{ "total": swapStat.Total, "used": common.MapStr{ @@ -83,6 +88,24 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { }, "free": swapStat.Free, } + + if vmstat != nil { + // Swap in and swap out numbers + swap["in"] = common.MapStr{ + "pages": vmstat.Pswpin, + } + swap["out"] = common.MapStr{ + "pages": vmstat.Pswpout, + } + //Swap readahead + //See https://www.kernel.org/doc/ols/2007/ols2007v2-pages-273-284.pdf + swap["readahead"] = common.MapStr{ + "pages": vmstat.SwapRa, + "cached": vmstat.SwapRaHit, + } + + } + memory["swap"] = swap hugePagesStat, err := mem.GetHugeTLBPages() @@ -91,7 +114,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { } if hugePagesStat != nil { mem.AddHugeTLBPagesPercentage(hugePagesStat) - memory["hugepages"] = common.MapStr{ + thp := common.MapStr{ "total": hugePagesStat.Total, "used": common.MapStr{ "bytes": hugePagesStat.TotalAllocatedSize, @@ -102,6 +125,15 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { "surplus": hugePagesStat.Surplus, "default_size": hugePagesStat.DefaultSize, } + if vmstat != nil { + thp["swap"] = common.MapStr{ + "out": common.MapStr{ + "pages": vmstat.ThpSwpout, + "fallback": vmstat.ThpSwpoutFallback, + }, + } + } + memory["hugepages"] = thp } r.Event(mb.Event{ diff --git a/vendor/github.com/elastic/beats/metricbeat/module/system/raid/_meta/docs.asciidoc b/vendor/github.com/elastic/beats/metricbeat/module/system/raid/_meta/docs.asciidoc index 45ec3046..e8465147 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/system/raid/_meta/docs.asciidoc +++ b/vendor/github.com/elastic/beats/metricbeat/module/system/raid/_meta/docs.asciidoc @@ -1,6 +1,7 @@ This is the raid metricset of the module system. It collects stats about the raid. This metricset is available on: + - Linux The config option `raid.mount_point:` can be used to configure the location of the raid metadata. If running this metricset inside a container, you will need to mount `/sys/block` inside the container under the path specified by `--system.hostfs` diff --git a/vendor/github.com/elastic/beats/metricbeat/module/traefik/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/traefik/_meta/Dockerfile index a36e1f09..32994f31 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/traefik/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/traefik/_meta/Dockerfile @@ -1,4 +1,5 @@ -FROM traefik:1.6-alpine +ARG TRAEFIK_VERSION +FROM traefik:${TRAEFIK_VERSION}-alpine COPY ./traefik.toml /etc/traefik/traefik.toml diff --git a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_integration_windows_test.go b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_integration_windows_test.go index 18ecaccb..1fe12ab2 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_integration_windows_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_integration_windows_test.go @@ -58,11 +58,11 @@ func TestData(t *testing.T) { }, } - ms := mbtest.NewReportingMetricSetV2(t, config) - mbtest.ReportingFetchV2(ms) + ms := mbtest.NewReportingMetricSetV2Error(t, config) + mbtest.ReportingFetchV2Error(ms) time.Sleep(60 * time.Millisecond) - events, errs := mbtest.ReportingFetchV2(ms) + events, errs := mbtest.ReportingFetchV2Error(ms) if len(errs) > 0 { t.Fatal(errs) } @@ -83,15 +83,16 @@ func TestCounterWithNoInstanceName(t *testing.T) { "instance_label": "processor.name", "measurement_label": "processor.time.total.pct", "query": `\UDPv4\Datagrams Sent/sec`, + //"query": `\UDPv4\Verzonden datagrammen per seconde`, }, }, } - ms := mbtest.NewReportingMetricSetV2(t, config) - mbtest.ReportingFetchV2(ms) + ms := mbtest.NewReportingMetricSetV2Error(t, config) + mbtest.ReportingFetchV2Error(ms) time.Sleep(60 * time.Millisecond) - events, errs := mbtest.ReportingFetchV2(ms) + events, errs := mbtest.ReportingFetchV2Error(ms) if len(errs) > 0 { t.Fatal(errs) } @@ -112,7 +113,11 @@ func TestQuery(t *testing.T) { } defer q.Close() counter := CounterConfig{Format: "float", InstanceName: "TestInstanceName"} - err = q.AddCounter(processorTimeCounter, counter, false) + path, err := q.GetCounterPaths(processorTimeCounter) + if err != nil { + t.Fatal(err) + } + err = q.AddCounter(path[0], counter, false) if err != nil { t.Fatal(err) } @@ -131,9 +136,9 @@ func TestQuery(t *testing.T) { assert.Len(t, values, 1) - value, found := values[processorTimeCounter] + value, found := values[path[0]] if !found { - t.Fatal(processorTimeCounter, "not found") + t.Fatal(path[0], "not found") } assert.NoError(t, value[0].Err) @@ -233,7 +238,12 @@ func TestLongOutputFormat(t *testing.T) { } defer query.Close() counter := CounterConfig{Format: "long"} - err = query.AddCounter(processorTimeCounter, counter, false) + path, err := query.GetCounterPaths(processorTimeCounter) + if err != nil { + t.Fatal(err) + } + assert.NotZero(t, len(path)) + err = query.AddCounter(path[0], counter, false) if err != nil && err != PDH_NO_MORE_DATA { t.Fatal(err) } @@ -255,7 +265,7 @@ func TestLongOutputFormat(t *testing.T) { t.Fatal(err) } - _, okLong := values[processorTimeCounter][0].Measurement.(int32) + _, okLong := values[path[0]][0].Measurement.(int32) assert.True(t, okLong) } @@ -268,7 +278,12 @@ func TestFloatOutputFormat(t *testing.T) { } defer query.Close() counter := CounterConfig{Format: "float"} - err = query.AddCounter(processorTimeCounter, counter, false) + path, err := query.GetCounterPaths(processorTimeCounter) + if err != nil { + t.Fatal(err) + } + assert.NotZero(t, len(path)) + err = query.AddCounter(path[0], counter, false) if err != nil && err != PDH_NO_MORE_DATA { t.Fatal(err) } @@ -290,7 +305,7 @@ func TestFloatOutputFormat(t *testing.T) { t.Fatal(err) } - _, okFloat := values[processorTimeCounter][0].Measurement.(float64) + _, okFloat := values[path[0]][0].Measurement.(float64) assert.True(t, okFloat) } @@ -318,7 +333,7 @@ func TestWildcardQuery(t *testing.T) { if err != nil { t.Fatal(err) } - + assert.NotZero(t, len(values)) pctKey, err := values[0].MetricSetFields.HasKey("processor.time.pct") if err != nil { t.Fatal(err) @@ -355,7 +370,7 @@ func TestWildcardQueryNoInstanceName(t *testing.T) { if err != nil { t.Fatal(err) } - + assert.NotZero(t, len(values)) pctKey, err := values[0].MetricSetFields.HasKey("process.private.bytes") if err != nil { t.Fatal(err) diff --git a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_query_windows.go b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_query_windows.go index 519ac778..5c5a7149 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_query_windows.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_query_windows.go @@ -22,6 +22,7 @@ package perfmon import ( "regexp" "runtime" + "strings" "syscall" "unsafe" @@ -29,7 +30,7 @@ import ( ) var ( - instanceNameRegexp = regexp.MustCompile(`.*\((.*)\).*`) + instanceNameRegexp = regexp.MustCompile(`.*?\((.*?)\).*`) objectNameRegexp = regexp.MustCompile(`(?:^\\\\[^\\]+\\|^\\)([^\\]+)`) ) @@ -64,10 +65,16 @@ func (q *Query) Open() error { return nil } +// AddEnglishCounter adds the specified counter to the query. +func (q *Query) AddEnglishCounter(counterPath string) (PdhCounterHandle, error) { + h, err := PdhAddEnglishCounter(q.handle, counterPath, 0) + return h, err +} + // AddCounter adds the specified counter to the query. func (q *Query) AddCounter(counterPath string, counter CounterConfig, wildcard bool) error { if _, found := q.counters[counterPath]; found { - return errors.Errorf("counter %s has been already added", counterPath) + return nil } var err error var instanceName string @@ -93,6 +100,67 @@ func (q *Query) AddCounter(counterPath string, counter CounterConfig, wildcard b return nil } +// GetCounterPaths func will check the computer or log file and return the counter paths that match the given counter path which contains wildcard characters. +func (q *Query) GetCounterPaths(counterPath string) ([]string, error) { + paths, err := q.ExpandWildCardPath(counterPath) + if err == nil { + return paths, err + } + //check if Windows installed language is not ENG, the ExpandWildCardPath will return either one of the errors below. + if err == PDH_CSTATUS_NO_OBJECT || err == PDH_CSTATUS_NO_COUNTER { + handle, err := q.AddEnglishCounter(counterPath) + if err != nil { + return nil, err + } + defer PdhRemoveCounter(handle) + info, err := PdhGetCounterInfo(handle) + if err != nil { + return nil, err + } + path := UTF16PtrToString(info.SzFullPath) + if path != counterPath { + return q.ExpandWildCardPath(path) + } + } + return nil, err +} + +// RemoveUnusedCounters will remove all counter handles for the paths that are not found anymore +func (q *Query) RemoveUnusedCounters(counters []string) error { + // check if the expandwildcard func did expand th wildcard queries, if not, no counters will be removed + for _, counter := range counters { + if strings.Contains(counter, "*") { + return nil + } + } + unused := make(map[string]*Counter) + for counterPath, counter := range q.counters { + if !matchCounter(counterPath, counters) { + unused[counterPath] = counter + } + } + if len(unused) == 0 { + return nil + } + for counterPath, cnt := range unused { + err := PdhRemoveCounter(cnt.handle) + if err != nil { + return err + } + delete(q.counters, counterPath) + } + return nil +} + +func matchCounter(counterPath string, counterList []string) bool { + for _, cn := range counterList { + if cn == counterPath { + return true + } + } + return false +} + // CollectData collects the value for all counters in the query. func (q *Query) CollectData() error { return PdhCollectQueryData(q.handle) @@ -130,6 +198,9 @@ func (q *Query) ExpandWildCardPath(wildCardPath string) ([]string, error) { if err != nil { return nil, err } + if expdPaths == nil { + return nil, errors.New("no counter paths found") + } return UTF16ToStringArray(expdPaths), nil } @@ -203,7 +274,7 @@ func UTF16ToStringArray(buf []uint16) []string { stringLine := UTF16PtrToString(&buf[0]) for stringLine != "" { strings = append(strings, stringLine) - nextLineStart += len(stringLine) + 1 + nextLineStart += len([]rune(stringLine)) + 1 remainingBuf := buf[nextLineStart:] stringLine = UTF16PtrToString(&remainingBuf[0]) } diff --git a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_query_windows_test.go b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_query_windows_test.go index 0699daf1..4e45ec82 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_query_windows_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_query_windows_test.go @@ -18,6 +18,7 @@ package perfmon import ( + "syscall" "testing" "github.com/stretchr/testify/assert" @@ -34,13 +35,15 @@ func TestOpenSuccessful(t *testing.T) { // TestAddCounterInvalidArgWhenQueryClosed will check if addcounter func fails when query is closed. func TestAddCounterInvalidArgWhenQueryClosed(t *testing.T) { var q Query - counter := CounterConfig{Format: "float", InstanceName: "TestInstanceName"} - queryPath, err := q.ExpandWildCardPath(validQuery) - if err != nil { - t.Fatal(err) + queryPath, err := q.GetCounterPaths(validQuery) + // if windows os language is ENG then err will be nil, else the GetCounterPaths will execute the AddCounter + if assert.NoError(t, err) { + counter := CounterConfig{Format: "float", InstanceName: "TestInstanceName"} + err = q.AddCounter(queryPath[0], counter, false) + assert.Error(t, err, PDH_INVALID_HANDLE) + } else { + assert.Error(t, err, PDH_INVALID_ARGUMENT) } - err = q.AddCounter(queryPath[0], counter, false) - assert.EqualValues(t, err, PDH_INVALID_ARGUMENT) } // func TestGetFormattedCounterValuesEmptyCounterList will check if getting the counter values will fail when no counter handles are added. @@ -68,7 +71,7 @@ func TestSuccessfulQuery(t *testing.T) { } defer q.Close() counter := CounterConfig{Format: "float", InstanceName: "TestInstanceName"} - queryPath, err := q.ExpandWildCardPath(validQuery) + queryPath, err := q.GetCounterPaths(validQuery) if err != nil { t.Fatal(err) } @@ -87,3 +90,44 @@ func TestSuccessfulQuery(t *testing.T) { assert.Nil(t, err) assert.NotNil(t, list) } + +// TestInstanceNameRegexp tests regular expression for instance. +func TestInstanceNameRegexp(t *testing.T) { + queryPaths := []string{`\SQLServer:Databases(*)\Log File(s) Used Size (KB)`, `\Search Indexer(*)\L0 Indexes (Wordlists)`, + `\Search Indexer(*)\L0 Merges (flushes) Now.`, `\NUMA Node Memory(*)\Free & Zero Page List MBytes`} + for _, path := range queryPaths { + matches := instanceNameRegexp.FindStringSubmatch(path) + if assert.Len(t, matches, 2, "regular expression did not return any matches") { + assert.Equal(t, matches[1], "*") + } + } +} + +// TestObjectNameRegexp tests regular expression for object. +func TestObjectNameRegexp(t *testing.T) { + queryPaths := []string{`\Web Service Cache\Output Cache Current Flushed Items`, + `\Web Service Cache\Output Cache Total Flushed Items`, `\Web Service Cache\Total Flushed Metadata`, + `\Web Service Cache\Kernel: Current URIs Cached`} + for _, path := range queryPaths { + matches := objectNameRegexp.FindStringSubmatch(path) + if assert.Len(t, matches, 2, "regular expression did not return any matches") { + assert.Equal(t, matches[1], "Web Service Cache") + } + } +} + +func TestUTF16ToStringArray(t *testing.T) { + var array = []string{"\\\\DESKTOP-RFOOE09\\Physikalischer Datenträger(0 C:)\\Schreibvorgänge/s", "\\\\DESKTOP-RFOOE09\\Physikalischer Datenträger(_Total)\\Schreibvorgänge/s", ""} + var unicode []uint16 + for _, i := range array { + uni, err := syscall.UTF16FromString(i) + assert.NoError(t, err) + unicode = append(unicode, uni...) + } + response := UTF16ToStringArray(unicode) + assert.NotNil(t, response) + assert.Equal(t, len(response), 2) + for _, res := range response { + assert.Contains(t, array, res) + } +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_windows.go b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_windows.go index 82005b5e..b817ddae 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_windows.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_windows.go @@ -23,13 +23,16 @@ import ( "strconv" "syscall" "unicode/utf16" + "unsafe" "golang.org/x/sys/windows" ) // Windows API calls //sys _PdhOpenQuery(dataSource *uint16, userData uintptr, query *PdhQueryHandle) (errcode error) [failretval!=0] = pdh.PdhOpenQueryW -//sys _PdhAddCounter(query PdhQueryHandle, counterPath string, userData uintptr, counter *PdhCounterHandle) (errcode error) [failretval!=0] = pdh.PdhAddEnglishCounterW +//sys _PdhAddEnglishCounter(query PdhQueryHandle, counterPath string, userData uintptr, counter *PdhCounterHandle) (errcode error) [failretval!=0] = pdh.PdhAddEnglishCounterW +//sys _PdhAddCounter(query PdhQueryHandle, counterPath string, userData uintptr, counter *PdhCounterHandle) (errcode error) [failretval!=0] = pdh.PdhAddCounterW +//sys _PdhRemoveCounter(counter PdhCounterHandle) (errcode error) [failretval!=0] = pdh.PdhRemoveCounter //sys _PdhCollectQueryData(query PdhQueryHandle) (errcode error) [failretval!=0] = pdh.PdhCollectQueryData //sys _PdhGetFormattedCounterValueDouble(counter PdhCounterHandle, format PdhCounterFormat, counterType *uint32, value *PdhCounterValueDouble) (errcode error) [failretval!=0] = pdh.PdhGetFormattedCounterValue //sys _PdhGetFormattedCounterValueLarge(counter PdhCounterHandle, format PdhCounterFormat, counterType *uint32, value *PdhCounterValueLarge) (errcode error) [failretval!=0] = pdh.PdhGetFormattedCounterValue @@ -37,6 +40,7 @@ import ( //sys _PdhCloseQuery(query PdhQueryHandle) (errcode error) [failretval!=0] = pdh.PdhCloseQuery //sys _PdhExpandWildCardPath(dataSource *uint16, wildcardPath *uint16, expandedPathList *uint16, pathListLength *uint32) (errcode error) [failretval!=0] = pdh.PdhExpandWildCardPathW //sys _PdhExpandCounterPath(wildcardPath *uint16, expandedPathList *uint16, pathListLength *uint32) (errcode error) [failretval!=0] = pdh.PdhExpandCounterPathW +//sys _PdhGetCounterInfo(counter PdhCounterHandle, text uint16, size *uint32, lpBuffer *byte) (errcode error) [failretval!=0] = pdh.PdhGetCounterInfoW type PdhQueryHandle uintptr @@ -46,6 +50,28 @@ type PdhCounterHandle uintptr var InvalidCounterHandle = ^PdhCounterHandle(0) +// PdhCounterInfo struct contains the performance counter details +type PdhCounterInfo struct { + DwLength uint32 + DwType uint32 + CVersion uint32 + CStatus uint32 + LScale int32 + LDefaultScale int32 + DwUserData *uint32 + DwQueryUserData *uint32 + SzFullPath *uint16 // pointer to a string + SzMachineName *uint16 // pointer to a string + SzObjectName *uint16 // pointer to a string + SzInstanceName *uint16 // pointer to a string + SzParentInstance *uint16 // pointer to a string + DwInstanceIndex uint32 // pointer to a string + SzCounterName *uint16 // pointer to a string + Padding [4]byte + SzExplainText *uint16 // pointer to a string + DataBuffer [1]uint32 // pointer to an extra space +} + // PdhCounterValueDouble for double values type PdhCounterValueDouble struct { CStatus uint32 @@ -88,6 +114,16 @@ func PdhOpenQuery(dataSource string, userData uintptr) (PdhQueryHandle, error) { return handle, nil } +// PdhAddEnglishCounter adds the specified counter to the query. +func PdhAddEnglishCounter(query PdhQueryHandle, counterPath string, userData uintptr) (PdhCounterHandle, error) { + var handle PdhCounterHandle + if err := _PdhAddEnglishCounter(query, counterPath, userData, &handle); err != nil { + return InvalidCounterHandle, PdhErrno(err.(syscall.Errno)) + } + + return handle, nil +} + // PdhAddCounter adds the specified counter to the query. func PdhAddCounter(query PdhQueryHandle, counterPath string, userData uintptr) (PdhCounterHandle, error) { var handle PdhCounterHandle @@ -98,6 +134,15 @@ func PdhAddCounter(query PdhQueryHandle, counterPath string, userData uintptr) ( return handle, nil } +// PdhRemoveCounter removes the specified counter to the query. +func PdhRemoveCounter(counter PdhCounterHandle) error { + if err := _PdhRemoveCounter(counter); err != nil { + return PdhErrno(err.(syscall.Errno)) + } + + return nil +} + // PdhCollectQueryData collects the current raw data value for all counters in the specified query. func PdhCollectQueryData(query PdhQueryHandle) error { if err := _PdhCollectQueryData(query); err != nil { @@ -172,6 +217,27 @@ func PdhExpandCounterPath(utfPath *uint16) ([]uint16, error) { return nil, nil } +// PdhGetCounterInfo returns the counter information for given handle +func PdhGetCounterInfo(handle PdhCounterHandle) (*PdhCounterInfo, error) { + var bufSize uint32 + var buff []byte + if err := _PdhGetCounterInfo(handle, 0, &bufSize, nil); err != nil { + if PdhErrno(err.(syscall.Errno)) != PDH_MORE_DATA { + return nil, PdhErrno(err.(syscall.Errno)) + } + buff = make([]byte, bufSize) + bufSize = uint32(len(buff)) + + if err = _PdhGetCounterInfo(handle, 0, &bufSize, &buff[0]); err == nil { + counterInfo := (*PdhCounterInfo)(unsafe.Pointer(&buff[0])) + if counterInfo != nil { + return counterInfo, nil + } + } + } + return nil, nil +} + // PdhCloseQuery closes all counters contained in the specified query. func PdhCloseQuery(query PdhQueryHandle) error { if err := _PdhCloseQuery(query); err != nil { diff --git a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_windows_test.go b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_windows_test.go index 2579eb14..be08eac3 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_windows_test.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/pdh_windows_test.go @@ -44,7 +44,7 @@ func TestPdhOpenQueryInvalidQuery(t *testing.T) { func TestPdhAddCounterInvalidCounter(t *testing.T) { handle, err := PdhAddCounter(InvalidQueryHandle, validQuery, 0) assert.EqualValues(t, handle, InvalidCounterHandle) - assert.EqualValues(t, err, PDH_INVALID_ARGUMENT) + assert.EqualValues(t, err, PDH_INVALID_HANDLE) } // TestPdhGetFormattedCounterValueInvalidCounter will test for invalid counters. @@ -88,8 +88,20 @@ func TestPdhSuccessfulCounterRetrieval(t *testing.T) { t.Fatal(err) } queryList, err := PdhExpandWildCardPath(utfPath) - if err != nil { - t.Fatal(err) + if err == PDH_CSTATUS_NO_OBJECT || err == PDH_CSTATUS_NO_COUNTER { + handle, err := PdhAddEnglishCounter(queryHandle, validQuery, 0) + if err != nil { + t.Fatal(err) + } + defer PdhRemoveCounter(handle) + info, err := PdhGetCounterInfo(handle) + if err != nil { + t.Fatal(err) + } + queryList, err = PdhExpandWildCardPath(info.SzFullPath) + if err != nil { + t.Fatal(err) + } } queries := UTF16ToStringArray(queryList) var counters []PdhCounterHandle diff --git a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/perfmon.go b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/perfmon.go index a73dbb57..05210599 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/perfmon.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/perfmon.go @@ -80,7 +80,6 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { if err != nil { return nil, errors.Wrap(err, "initialization of reader failed") } - return &MetricSet{ BaseMetricSet: base, reader: reader, @@ -88,17 +87,35 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { }, nil } -func (m *MetricSet) Fetch(report mb.ReporterV2) { +// Fetch fetches events and reports them upstream +func (m *MetricSet) Fetch(report mb.ReporterV2) error { + // if the ignore_non_existent_counters flag is set and no valid counter paths are found the Read func will still execute, a check is done before + if len(m.reader.query.counters) == 0 { + return errors.New("no counters to read") + } + + // refresh performance counter list + // Some counters, such as rate counters, require two counter values in order to compute a displayable value. In this case we must call PdhCollectQueryData twice before calling PdhGetFormattedCounterValue. + // For more information, see Collecting Performance Data (https://docs.microsoft.com/en-us/windows/desktop/PerfCtrs/collecting-performance-data). + // A flag is set if the second call has been executed else refresh will fail (reader.executed) + if m.reader.executed { + err := m.reader.RefreshCounterPaths() + if err != nil { + return errors.Wrap(err, "failed retrieving counters") + } + } events, err := m.reader.Read() if err != nil { - m.log.Debugw("Failed reading counters", "error", err) - err = errors.Wrap(err, "failed reading counters") - report.Error(err) + return errors.Wrap(err, "failed reading counters") } for _, event := range events { - report.Event(event) + isOpen := report.Event(event) + if !isOpen { + break + } } + return nil } // Close will be called when metricbeat is stopped, should close the query. diff --git a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/reader.go b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/reader.go index 2f8da23a..b7837862 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/reader.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/reader.go @@ -37,12 +37,12 @@ var ( // Reader will contain the config options type Reader struct { - query Query // PDH Query - instanceLabel map[string]string // Mapping of counter path to key used for the label (e.g. processor.name) - measurement map[string]string // Mapping of counter path to key used for the value (e.g. processor.cpu_time). - executed bool // Indicates if the query has been executed. - log *logp.Logger // - groupMeasurements bool // Indicates if measurements with the same instance label should be sent in the same event + query Query // PDH Query + instanceLabel map[string]string // Mapping of counter path to key used for the label (e.g. processor.name) + measurement map[string]string // Mapping of counter path to key used for the value (e.g. processor.cpu_time). + executed bool // Indicates if the query has been executed. + log *logp.Logger // + config Config // Metricset configuration } // NewReader creates a new instance of Reader. @@ -51,16 +51,15 @@ func NewReader(config Config) (*Reader, error) { if err := query.Open(); err != nil { return nil, err } - r := &Reader{ - query: query, - instanceLabel: map[string]string{}, - measurement: map[string]string{}, - log: logp.NewLogger("perfmon"), - groupMeasurements: config.GroupMeasurements, + query: query, + instanceLabel: map[string]string{}, + measurement: map[string]string{}, + log: logp.NewLogger("perfmon"), + config: config, } for _, counter := range config.CounterConfig { - childQueries, err := query.ExpandWildCardPath(counter.Query) + childQueries, err := query.GetCounterPaths(counter.Query) if err != nil { if config.IgnoreNECounters { switch err { @@ -77,15 +76,18 @@ func NewReader(config Config) (*Reader, error) { } // check if the pdhexpandcounterpath/pdhexpandwildcardpath functions have expanded the counter successfully. if len(childQueries) == 0 || (len(childQueries) == 1 && strings.Contains(childQueries[0], "*")) { - query.Close() + // covering cases when PdhExpandWildCardPathW returns no counter paths or is unable to expand and the ignore_non_existent_counters flag is set + if config.IgnoreNECounters { + r.log.Infow("Ignoring non existent counter", "initial query", counter.Query, + logp.Namespace("perfmon"), "expanded query", childQueries) + continue + } return nil, errors.Errorf(`failed to expand counter (query="%v")`, counter.Query) } for _, v := range childQueries { if err := query.AddCounter(v, counter, len(childQueries) > 1); err != nil { - query.Close() return nil, errors.Wrapf(err, `failed to add counter (query="%v")`, counter.Query) } - r.instanceLabel[v] = counter.InstanceLabel r.measurement[v] = counter.MeasurementLabel } @@ -94,8 +96,48 @@ func NewReader(config Config) (*Reader, error) { return r, nil } +// RefreshCounterPaths will recheck for any new instances and add them to the counter list +func (r *Reader) RefreshCounterPaths() error { + var newCounters []string + for _, counter := range r.config.CounterConfig { + childQueries, err := r.query.GetCounterPaths(counter.Query) + if err != nil { + if r.config.IgnoreNECounters { + switch err { + case PDH_CSTATUS_NO_COUNTER, PDH_CSTATUS_NO_COUNTERNAME, + PDH_CSTATUS_NO_INSTANCE, PDH_CSTATUS_NO_OBJECT: + r.log.Infow("Ignoring non existent counter", "error", err, + logp.Namespace("perfmon"), "query", counter.Query) + continue + } + } else { + return errors.Wrapf(err, `failed to expand counter (query="%v")`, counter.Query) + } + } + newCounters = append(newCounters, childQueries...) + // there are cases when the ExpandWildCardPath will retrieve a successful status but not an expanded query so we need to check for the size of the list + if err == nil && len(childQueries) >= 1 && !strings.Contains(childQueries[0], "*") { + for _, v := range childQueries { + if err := r.query.AddCounter(v, counter, len(childQueries) > 1); err != nil { + return errors.Wrapf(err, "failed to add counter (query='%v')", counter.Query) + } + r.instanceLabel[v] = counter.InstanceLabel + r.measurement[v] = counter.MeasurementLabel + } + } + } + err := r.query.RemoveUnusedCounters(newCounters) + if err != nil { + return errors.Wrap(err, "failed removing unused counter values") + } + + return nil +} + // Read executes a query and returns those values in an event. func (r *Reader) Read() ([]mb.Event, error) { + // Some counters, such as rate counters, require two counter values in order to compute a displayable value. In this case we must call PdhCollectQueryData twice before calling PdhGetFormattedCounterValue. + // For more information, see Collecting Performance Data (https://docs.microsoft.com/en-us/windows/desktop/PerfCtrs/collecting-performance-data). if err := r.query.CollectData(); err != nil { return nil, errors.Wrap(err, "failed querying counter values") } @@ -119,7 +161,7 @@ func (r *Reader) Read() ([]mb.Event, error) { } var eventKey string - if r.groupMeasurements && val.Err == nil { + if r.config.GroupMeasurements && val.Err == nil { // Send measurements with the same instance label as part of the same event eventKey = val.Instance } else { diff --git a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/zpdh_windows.go b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/zpdh_windows.go index 50947364..85cb93dc 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/zpdh_windows.go +++ b/vendor/github.com/elastic/beats/metricbeat/module/windows/perfmon/zpdh_windows.go @@ -58,11 +58,14 @@ var ( procPdhOpenQueryW = modpdh.NewProc("PdhOpenQueryW") procPdhAddEnglishCounterW = modpdh.NewProc("PdhAddEnglishCounterW") + procPdhAddCounterW = modpdh.NewProc("PdhAddCounterW") + procPdhRemoveCounter = modpdh.NewProc("PdhRemoveCounter") procPdhCollectQueryData = modpdh.NewProc("PdhCollectQueryData") procPdhGetFormattedCounterValue = modpdh.NewProc("PdhGetFormattedCounterValue") procPdhCloseQuery = modpdh.NewProc("PdhCloseQuery") procPdhExpandWildCardPathW = modpdh.NewProc("PdhExpandWildCardPathW") procPdhExpandCounterPathW = modpdh.NewProc("PdhExpandCounterPathW") + procPdhGetCounterInfoW = modpdh.NewProc("PdhGetCounterInfoW") ) func _PdhOpenQuery(dataSource *uint16, userData uintptr, query *PdhQueryHandle) (errcode error) { @@ -73,6 +76,23 @@ func _PdhOpenQuery(dataSource *uint16, userData uintptr, query *PdhQueryHandle) return } +func _PdhAddEnglishCounter(query PdhQueryHandle, counterPath string, userData uintptr, counter *PdhCounterHandle) (errcode error) { + var _p0 *uint16 + _p0, errcode = syscall.UTF16PtrFromString(counterPath) + if errcode != nil { + return + } + return __PdhAddEnglishCounter(query, _p0, userData, counter) +} + +func __PdhAddEnglishCounter(query PdhQueryHandle, counterPath *uint16, userData uintptr, counter *PdhCounterHandle) (errcode error) { + r0, _, _ := syscall.Syscall6(procPdhAddEnglishCounterW.Addr(), 4, uintptr(query), uintptr(unsafe.Pointer(counterPath)), uintptr(userData), uintptr(unsafe.Pointer(counter)), 0, 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + func _PdhAddCounter(query PdhQueryHandle, counterPath string, userData uintptr, counter *PdhCounterHandle) (errcode error) { var _p0 *uint16 _p0, errcode = syscall.UTF16PtrFromString(counterPath) @@ -83,7 +103,15 @@ func _PdhAddCounter(query PdhQueryHandle, counterPath string, userData uintptr, } func __PdhAddCounter(query PdhQueryHandle, counterPath *uint16, userData uintptr, counter *PdhCounterHandle) (errcode error) { - r0, _, _ := syscall.Syscall6(procPdhAddEnglishCounterW.Addr(), 4, uintptr(query), uintptr(unsafe.Pointer(counterPath)), uintptr(userData), uintptr(unsafe.Pointer(counter)), 0, 0) + r0, _, _ := syscall.Syscall6(procPdhAddCounterW.Addr(), 4, uintptr(query), uintptr(unsafe.Pointer(counterPath)), uintptr(userData), uintptr(unsafe.Pointer(counter)), 0, 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} + +func _PdhRemoveCounter(counter PdhCounterHandle) (errcode error) { + r0, _, _ := syscall.Syscall(procPdhRemoveCounter.Addr(), 1, uintptr(counter), 0, 0) if r0 != 0 { errcode = syscall.Errno(r0) } @@ -145,3 +173,11 @@ func _PdhExpandCounterPath(wildcardPath *uint16, expandedPathList *uint16, pathL } return } + +func _PdhGetCounterInfo(counter PdhCounterHandle, text uint16, size *uint32, lpBuffer *byte) (errcode error) { + r0, _, _ := syscall.Syscall6(procPdhGetCounterInfoW.Addr(), 4, uintptr(counter), uintptr(text), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(lpBuffer)), 0, 0) + if r0 != 0 { + errcode = syscall.Errno(r0) + } + return +} diff --git a/vendor/github.com/elastic/beats/metricbeat/module/zookeeper/_meta/Dockerfile b/vendor/github.com/elastic/beats/metricbeat/module/zookeeper/_meta/Dockerfile index c523a060..7c02354e 100644 --- a/vendor/github.com/elastic/beats/metricbeat/module/zookeeper/_meta/Dockerfile +++ b/vendor/github.com/elastic/beats/metricbeat/module/zookeeper/_meta/Dockerfile @@ -1,2 +1,3 @@ -FROM jplock/zookeeper:3.5.5 +ARG ZOOKEEPER_VERSION +FROM jplock/zookeeper:${ZOOKEEPER_VERSION} HEALTHCHECK --interval=1s --retries=90 CMD nc -z localhost 2181 diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/aerospike.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/aerospike.yml.disabled index 8f29c0e1..e4a069b5 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/aerospike.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/aerospike.yml.disabled @@ -1,5 +1,5 @@ # Module: aerospike -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-aerospike.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-aerospike.html - module: aerospike #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/apache.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/apache.yml.disabled index 57e8c2a6..bcb0c131 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/apache.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/apache.yml.disabled @@ -1,5 +1,5 @@ # Module: apache -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-apache.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-apache.html - module: apache #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/beat-xpack.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/beat-xpack.yml.disabled index 5ddd5081..31b93623 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/beat-xpack.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/beat-xpack.yml.disabled @@ -1,5 +1,5 @@ # Module: beat -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-beat.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-beat.html - module: beat metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/beat.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/beat.yml.disabled index 4cc32cde..28a07374 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/beat.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/beat.yml.disabled @@ -1,5 +1,5 @@ # Module: beat -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-beat.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-beat.html - module: beat metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/ceph.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/ceph.yml.disabled index 04e6e516..8beafd0c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/ceph.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/ceph.yml.disabled @@ -1,5 +1,5 @@ # Module: ceph -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-ceph.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-ceph.html - module: ceph #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/consul.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/consul.yml.disabled index 48391c86..e1d633d9 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/consul.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/consul.yml.disabled @@ -1,5 +1,5 @@ # Module: consul -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-consul.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-consul.html - module: consul metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/couchbase.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/couchbase.yml.disabled index 8fd48b66..83578d40 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/couchbase.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/couchbase.yml.disabled @@ -1,5 +1,5 @@ # Module: couchbase -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-couchbase.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-couchbase.html - module: couchbase #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/couchdb.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/couchdb.yml.disabled index 668ad058..82138ce4 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/couchdb.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/couchdb.yml.disabled @@ -1,5 +1,5 @@ # Module: couchdb -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-couchdb.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-couchdb.html - module: couchdb metricsets: ["server"] diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/docker.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/docker.yml.disabled index 4f2261f9..d94c4adb 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/docker.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/docker.yml.disabled @@ -1,5 +1,5 @@ # Module: docker -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-docker.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-docker.html - module: docker #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/dropwizard.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/dropwizard.yml.disabled index a35a3e9e..3efde2f3 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/dropwizard.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/dropwizard.yml.disabled @@ -1,5 +1,5 @@ # Module: dropwizard -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-dropwizard.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-dropwizard.html - module: dropwizard #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/elasticsearch-xpack.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/elasticsearch-xpack.yml.disabled index 0b2ea6a7..3aafd132 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/elasticsearch-xpack.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/elasticsearch-xpack.yml.disabled @@ -1,10 +1,11 @@ # Module: elasticsearch -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-elasticsearch.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-elasticsearch.html - module: elasticsearch metricsets: - ccr - cluster_stats + - enrich - index - index_recovery - index_summary diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/elasticsearch.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/elasticsearch.yml.disabled index 8876d76f..0a80d69e 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/elasticsearch.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/elasticsearch.yml.disabled @@ -1,5 +1,5 @@ # Module: elasticsearch -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-elasticsearch.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-elasticsearch.html - module: elasticsearch #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/envoyproxy.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/envoyproxy.yml.disabled index 4526cc5f..e15c8ed6 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/envoyproxy.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/envoyproxy.yml.disabled @@ -1,5 +1,5 @@ # Module: envoyproxy -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-envoyproxy.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-envoyproxy.html - module: envoyproxy #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/etcd.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/etcd.yml.disabled index 53c7627c..dcf26f1d 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/etcd.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/etcd.yml.disabled @@ -1,5 +1,5 @@ # Module: etcd -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-etcd.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-etcd.html - module: etcd #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/golang.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/golang.yml.disabled index ffe04562..7fa9e654 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/golang.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/golang.yml.disabled @@ -1,5 +1,5 @@ # Module: golang -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-golang.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-golang.html - module: golang #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/graphite.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/graphite.yml.disabled index adf9cf71..be549515 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/graphite.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/graphite.yml.disabled @@ -1,5 +1,5 @@ # Module: graphite -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-graphite.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-graphite.html - module: graphite #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/haproxy.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/haproxy.yml.disabled index 8d034a7f..da2052ea 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/haproxy.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/haproxy.yml.disabled @@ -1,5 +1,5 @@ # Module: haproxy -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-haproxy.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-haproxy.html - module: haproxy #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/http.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/http.yml.disabled index b19cb049..f31d4ebb 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/http.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/http.yml.disabled @@ -1,5 +1,5 @@ # Module: http -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-http.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-http.html - module: http #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/jolokia.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/jolokia.yml.disabled index 8da3bb07..6fc61bc5 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/jolokia.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/jolokia.yml.disabled @@ -1,5 +1,5 @@ # Module: jolokia -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-jolokia.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-jolokia.html - module: jolokia #metricsets: ["jmx"] diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/kafka.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/kafka.yml.disabled index a8c07876..ef97c36c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/kafka.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/kafka.yml.disabled @@ -1,5 +1,5 @@ # Module: kafka -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-kafka.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-kafka.html - module: kafka #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/kibana-xpack.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/kibana-xpack.yml.disabled index c8119e62..14a404bc 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/kibana-xpack.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/kibana-xpack.yml.disabled @@ -1,5 +1,5 @@ # Module: kibana -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-kibana.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-kibana.html - module: kibana metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/kibana.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/kibana.yml.disabled index 93c1625e..8b52aa92 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/kibana.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/kibana.yml.disabled @@ -1,5 +1,5 @@ # Module: kibana -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-kibana.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-kibana.html - module: kibana #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/kubernetes.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/kubernetes.yml.disabled index 163848c0..1b52eef5 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/kubernetes.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/kubernetes.yml.disabled @@ -1,5 +1,5 @@ # Module: kubernetes -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-kubernetes.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-kubernetes.html # Node metrics, from kubelet: - module: kubernetes @@ -23,6 +23,8 @@ #annotations.dedot: true # When used outside the cluster: #host: node_name + # If kube_config is not set, KUBECONFIG environment variable will be checked + # and if not present it will fall back to InCluster #kube_config: ~/.kube/config # State metrics from kube-state-metrics service: @@ -35,6 +37,7 @@ # - state_pod # - state_container # - state_cronjob +# - state_resourcequota # period: 10s # hosts: ["kube-state-metrics:8080"] # add_metadata: true diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/kvm.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/kvm.yml.disabled index ec7efdf2..59f124b0 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/kvm.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/kvm.yml.disabled @@ -1,5 +1,5 @@ # Module: kvm -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-kvm.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-kvm.html - module: kvm #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/logstash-xpack.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/logstash-xpack.yml.disabled index 2fd7df9a..c022684e 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/logstash-xpack.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/logstash-xpack.yml.disabled @@ -1,5 +1,5 @@ # Module: logstash -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-logstash.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-logstash.html - module: logstash metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/logstash.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/logstash.yml.disabled index 8729ad47..dea3bd7c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/logstash.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/logstash.yml.disabled @@ -1,5 +1,5 @@ # Module: logstash -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-logstash.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-logstash.html - module: logstash #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/memcached.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/memcached.yml.disabled index 903b01cd..5be91192 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/memcached.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/memcached.yml.disabled @@ -1,5 +1,5 @@ # Module: memcached -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-memcached.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-memcached.html - module: memcached # metricsets: ["stats"] diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/mongodb.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/mongodb.yml.disabled index 9eb88a9a..0cd4628a 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/mongodb.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/mongodb.yml.disabled @@ -1,5 +1,5 @@ # Module: mongodb -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-mongodb.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-mongodb.html - module: mongodb #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/munin.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/munin.yml.disabled index 3316143b..b0ebad35 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/munin.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/munin.yml.disabled @@ -1,5 +1,5 @@ # Module: munin -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-munin.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-munin.html - module: munin #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/mysql.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/mysql.yml.disabled index 5e78c6a3..be3a2fd3 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/mysql.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/mysql.yml.disabled @@ -1,5 +1,5 @@ # Module: mysql -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-mysql.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-mysql.html - module: mysql #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/nats.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/nats.yml.disabled index 9e224eba..eec23787 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/nats.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/nats.yml.disabled @@ -1,5 +1,5 @@ # Module: nats -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-nats.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-nats.html - module: nats metricsets: ["connections", "routes", "stats", "subscriptions"] diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/nginx.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/nginx.yml.disabled index 608a7a5a..f94e3fca 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/nginx.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/nginx.yml.disabled @@ -1,5 +1,5 @@ # Module: nginx -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-nginx.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-nginx.html - module: nginx #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/php_fpm.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/php_fpm.yml.disabled index 6f3ed6fc..825a6ed2 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/php_fpm.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/php_fpm.yml.disabled @@ -1,5 +1,5 @@ # Module: php_fpm -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-php_fpm.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-php_fpm.html - module: php_fpm #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/postgresql.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/postgresql.yml.disabled index c98e75e4..8ca3cd01 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/postgresql.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/postgresql.yml.disabled @@ -1,5 +1,5 @@ # Module: postgresql -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-postgresql.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-postgresql.html - module: postgresql #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/prometheus.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/prometheus.yml.disabled index 35f91f7d..657ffb4f 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/prometheus.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/prometheus.yml.disabled @@ -1,5 +1,5 @@ # Module: prometheus -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-prometheus.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-prometheus.html - module: prometheus period: 10s diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/rabbitmq.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/rabbitmq.yml.disabled index 3b3f9d2a..91328c25 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/rabbitmq.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/rabbitmq.yml.disabled @@ -1,5 +1,5 @@ # Module: rabbitmq -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-rabbitmq.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-rabbitmq.html - module: rabbitmq #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/redis.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/redis.yml.disabled index 93555e19..3f4e4994 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/redis.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/redis.yml.disabled @@ -1,5 +1,5 @@ # Module: redis -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-redis.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-redis.html - module: redis #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/system.yml b/vendor/github.com/elastic/beats/metricbeat/modules.d/system.yml index 34379f07..5d2ac90c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/system.yml +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/system.yml @@ -1,5 +1,5 @@ # Module: system -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-system.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-system.html - module: system period: 10s diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/traefik.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/traefik.yml.disabled index faf59262..dd4f5861 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/traefik.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/traefik.yml.disabled @@ -1,5 +1,5 @@ # Module: traefik -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-traefik.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-traefik.html - module: traefik metricsets: ["health"] diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/uwsgi.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/uwsgi.yml.disabled index c3744080..be159c19 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/uwsgi.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/uwsgi.yml.disabled @@ -1,5 +1,5 @@ # Module: uwsgi -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-uwsgi.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-uwsgi.html - module: uwsgi #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/vsphere.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/vsphere.yml.disabled index a9e2f4ae..055aac8c 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/vsphere.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/vsphere.yml.disabled @@ -1,5 +1,5 @@ # Module: vsphere -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-vsphere.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-vsphere.html - module: vsphere #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/windows.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/windows.yml.disabled index 0a94c04f..99d4e405 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/windows.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/windows.yml.disabled @@ -1,5 +1,5 @@ # Module: windows -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-windows.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-windows.html - module: windows #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/modules.d/zookeeper.yml.disabled b/vendor/github.com/elastic/beats/metricbeat/modules.d/zookeeper.yml.disabled index 2756d622..ee03fc29 100644 --- a/vendor/github.com/elastic/beats/metricbeat/modules.d/zookeeper.yml.disabled +++ b/vendor/github.com/elastic/beats/metricbeat/modules.d/zookeeper.yml.disabled @@ -1,5 +1,5 @@ # Module: zookeeper -# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.4/metricbeat-module-zookeeper.html +# Docs: https://www.elastic.co/guide/en/beats/metricbeat/7.5/metricbeat-module-zookeeper.html - module: zookeeper #metricsets: diff --git a/vendor/github.com/elastic/beats/metricbeat/scripts/generate_imports_helper.py b/vendor/github.com/elastic/beats/metricbeat/scripts/generate_imports_helper.py index 8a4fe83e..3a1df4a3 100644 --- a/vendor/github.com/elastic/beats/metricbeat/scripts/generate_imports_helper.py +++ b/vendor/github.com/elastic/beats/metricbeat/scripts/generate_imports_helper.py @@ -6,22 +6,60 @@ their factories with the global registry. This package can be imported in the main package to automatically register all of the standard supported Metricbeat modules.""" +modules_by_platform = [ + { + "file_suffix": "_docker", + "build_tags": "// +build linux darwin windows\n\n", + "modules": ["docker", "kubernetes"], + }, +] + def get_importable_lines(go_beat_path, import_line): path = abspath("module") - imported_lines = [] + imports_by_module = [] + common_lines = [] modules = [m for m in listdir(path) if isdir(join(path, m)) and m != "_meta"] - for module in modules: - module_import = import_line.format(beat_path=go_beat_path, module="module", name=module) - imported_lines.append(module_import) + not_common_modules = [] + for m in modules_by_platform: + not_common_modules.extend(m["modules"]) - module_path = join(path, module) - ignore = ["_meta", "vendor", "mtest"] - metricsets = [m for m in listdir(module_path) if isdir(join(module_path, m)) and m not in ignore] - for metricset in metricsets: - metricset_name = "{}/{}".format(module, metricset) - metricset_import = import_line.format(beat_path=go_beat_path, module="module", name=metricset_name) - imported_lines.append(metricset_import) + for platform_info in modules_by_platform: + lines = [] + for module in modules: + module_import = import_line.format(beat_path=go_beat_path, module="module", name=module) + if module in not_common_modules: + lines = _collect_imports_from_module(path, module, module_import, go_beat_path, import_line, lines) + else: + common_lines = _collect_imports_from_module( + path, module, module_import, go_beat_path, import_line, common_lines) + + if lines is not None: + imports_by_module.append({ + "file_suffix": platform_info["file_suffix"], + "build_tags": platform_info["build_tags"], + "imported_lines": lines, + }) + + imports_by_module.append({ + "file_suffix": "_common", + "build_tags": "", + "imported_lines": sorted(common_lines), + }) + + return imports_by_module + + +def _collect_imports_from_module(path, module, module_import, go_beat_path, import_line, imported_lines): + imported_lines.append(module_import) + + module_path = join(path, module) + ignore = ["_meta", "vendor", "mtest"] + metricsets = [m for m in listdir(module_path) if isdir(join(module_path, m)) and m not in ignore] + for metricset in metricsets: + metricset_name = "{}/{}".format(module, metricset) + metricset_import = import_line.format(beat_path=go_beat_path, module="module", name=metricset_name) + imported_lines.append(metricset_import) return sorted(imported_lines) diff --git a/vendor/github.com/elastic/beats/metricbeat/scripts/mage/config.go b/vendor/github.com/elastic/beats/metricbeat/scripts/mage/config.go index b27af4e6..d9e8c5fc 100644 --- a/vendor/github.com/elastic/beats/metricbeat/scripts/mage/config.go +++ b/vendor/github.com/elastic/beats/metricbeat/scripts/mage/config.go @@ -44,6 +44,9 @@ func configFileParams(moduleDirs ...string) devtools.ConfigFileParams { devtools.OSSBeatDir("_meta/beat.docker.yml"), devtools.LibbeatDir("_meta/config.docker.yml"), }, + ExtraVars: map[string]interface{}{ + "UseKubernetesMetadataProcessor": true, + }, } } diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_beat.py b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_beat.py index f93a32fd..4ce00873 100644 --- a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_beat.py +++ b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_beat.py @@ -3,6 +3,7 @@ import metricbeat import unittest import time from parameterized import parameterized +from elasticsearch import Elasticsearch class Test(metricbeat.BaseTest): @@ -17,31 +18,35 @@ class Test(metricbeat.BaseTest): """ beat metricset tests """ - self.check_metricset("beat", metricset, self.get_hosts(), self.FIELDS + ["service"]) + self.check_metricset("beat", metricset, [self.compose_host("metricbeat")], self.FIELDS + ["service"]) @unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test") def test_xpack(self): """ beat-xpack module tests """ - self.render_config_template(modules=[{ - "name": "beat", - "metricsets": self.METRICSETS, - "hosts": self.get_hosts(), - "period": "1s", - "extras": { - "xpack.enabled": "true" - } - }]) # Give the monitored Metricbeat instance enough time to collect metrics and index them # into Elasticsearch, so it may establish the connection to Elasticsearch and determine # it's cluster UUID in the process. Otherwise, the monitoring Metricbeat instance will # show errors in its log about not being able to determine the Elasticsearch cluster UUID # to be associated with the monitored Metricbeat instance. - time.sleep(30) + self.wait_until(cond=self.mb_connected_to_es, max_timeout=50) + + self.render_config_template(modules=[{ + "name": "beat", + "metricsets": self.METRICSETS, + "hosts": [self.compose_host("metricbeat")], + "period": "1s", + "extras": { + "xpack.enabled": "true" + } + }]) proc = self.start_beat() self.wait_until(lambda: self.output_lines() > 0) proc.check_kill_and_wait() self.assert_no_logged_warnings() + + def mb_connected_to_es(self): + return self.service_log_contains('metricbeat', 'Connection to backoff(elasticsearch(') diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_cmd.py b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_cmd.py index 9f2bc443..fa53e0de 100644 --- a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_cmd.py +++ b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_cmd.py @@ -138,7 +138,7 @@ class TestCommands(metricbeat.BaseTest): assert exit_code == 0 try: assert any(( - self.log_contains("ERROR error making http request"), + self.log_contains("ERROR error fetching status"), self.log_contains("ERROR timeout waiting for an event"), )) except: diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_couchbase.py b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_couchbase.py index ad1326ee..c7522e3e 100644 --- a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_couchbase.py +++ b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_couchbase.py @@ -4,9 +4,13 @@ import unittest from parameterized import parameterized +@unittest.skip("See https://github.com/elastic/beats/issues/14660") class Test(metricbeat.BaseTest): - COMPOSE_SERVICES = ['couchbase'] + # Commented out as part of skipping test. See https://github.com/elastic/beats/issues/14660. + # Otherwise, the tests are skipped but Docker Compose still tries to bring up + # the Couchbase service container and fails. + # COMPOSE_SERVICES = ['couchbase'] FIELDS = ['couchbase'] @parameterized.expand([ diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_haproxy.py b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_haproxy.py index 16189829..84c3ade0 100644 --- a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_haproxy.py +++ b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_haproxy.py @@ -94,8 +94,8 @@ class HaproxyTest(metricbeat.BaseTest): class Haproxy_1_6_Test(HaproxyTest): - COMPOSE_SERVICES = ['haproxy_1_6'] + COMPOSE_ENV = {'HAPROXY_VERSION': '1.6'} class Haproxy_1_7_Test(HaproxyTest): - COMPOSE_SERVICES = ['haproxy_1_7'] + COMPOSE_ENV = {'HAPROXY_VERSION': '1.7'} diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_kafka.py b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_kafka.py index f5f88201..f08d105e 100644 --- a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_kafka.py +++ b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_kafka.py @@ -59,10 +59,10 @@ class KafkaTest(metricbeat.BaseTest): class Kafka_1_1_0_Test(KafkaTest): - COMPOSE_SERVICES = ['kafka_1_1_0'] + COMPOSE_ENV = {"KAFKA_VERSION": "1.1.0"} VERSION = "1.1.0" class Kafka_0_10_2_Test(KafkaTest): - COMPOSE_SERVICES = ['kafka_0_10_2'] - VERSION = "0.10.2" + COMPOSE_ENV = {"KAFKA_VERSION": "0.10.2.2"} + VERSION = "0.10.2.2" diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_logstash.py b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_logstash.py index 25fd2a39..da5022e8 100644 --- a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_logstash.py +++ b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_logstash.py @@ -28,6 +28,7 @@ class Test(metricbeat.BaseTest): self.check_metricset("logstash", "node_stats", self.get_hosts(), self.FIELDS) @unittest.skipUnless(metricbeat.INTEGRATION_TESTS, "integration test") + @unittest.skip("flaky; see https://github.com/elastic/beats/issues/13947") def test_xpack(self): """ logstash-xpack module tests diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_mysql.py b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_mysql.py index fc89378b..f0859e32 100644 --- a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_mysql.py +++ b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_mysql.py @@ -47,24 +47,42 @@ class Test(metricbeat.BaseTest): class TestMysql80(Test): - COMPOSE_SERVICES = ['mysql_8_0'] + COMPOSE_ENV = { + 'MYSQL_VARIANT': 'mysql', + 'MYSQL_VERSION': '8.0.13', + } class TestPercona57(Test): - COMPOSE_SERVICES = ['percona_5_7'] + COMPOSE_ENV = { + 'MYSQL_VARIANT': 'percona', + 'MYSQL_VERSION': '5.7.24', + } class TestPercona80(Test): - COMPOSE_SERVICES = ['percona_8_0'] + COMPOSE_ENV = { + 'MYSQL_VARIANT': 'percona', + 'MYSQL_VERSION': '8.0.13-4', + } class TestMariadb102(Test): - COMPOSE_SERVICES = ['mariadb_10_2'] + COMPOSE_ENV = { + 'MYSQL_VARIANT': 'mariadb', + 'MYSQL_VERSION': '10.2.23', + } class TestMariadb103(Test): - COMPOSE_SERVICES = ['mariadb_10_3'] + COMPOSE_ENV = { + 'MYSQL_VARIANT': 'mariadb', + 'MYSQL_VERSION': '10.3.14', + } class TestMariadb104(Test): - COMPOSE_SERVICES = ['mariadb_10_4'] + COMPOSE_ENV = { + 'MYSQL_VARIANT': 'mariadb', + 'MYSQL_VERSION': '10.4.4', + } diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_nats.py b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_nats.py index 1254a6c1..de3b6460 100644 --- a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_nats.py +++ b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_nats.py @@ -5,7 +5,7 @@ import unittest NATS_FIELDS = metricbeat.COMMON_FIELDS + ["nats"] -class Test(metricbeat.BaseTest): +class TestNats(metricbeat.BaseTest): COMPOSE_SERVICES = ['nats'] @@ -108,3 +108,7 @@ class Test(metricbeat.BaseTest): self.assertItemsEqual(self.de_dot(NATS_FIELDS), evt.keys(), evt) self.assert_fields_are_documented(evt) + + +class TestNats1_3(TestNats): + COMPOSE_SERVICES = ['nats_1_3'] diff --git a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_redis.py b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_redis.py index 2f65c4ef..9a30ffe3 100644 --- a/vendor/github.com/elastic/beats/metricbeat/tests/system/test_redis.py +++ b/vendor/github.com/elastic/beats/metricbeat/tests/system/test_redis.py @@ -162,8 +162,8 @@ class Test(metricbeat.BaseTest): class TestRedis4(Test): - COMPOSE_SERVICES = ['redis_4'] + COMPOSE_ENV = {'REDIS_VERSION': '4.0.11'} class TestRedis5(Test): - COMPOSE_SERVICES = ['redis_5'] + COMPOSE_ENV = {'REDIS_VERSION': '5.0.5'} diff --git a/vendor/github.com/elastic/beats/packetbeat/_meta/beat.reference.yml b/vendor/github.com/elastic/beats/packetbeat/_meta/beat.reference.yml index b250e545..a379b1e4 100644 --- a/vendor/github.com/elastic/beats/packetbeat/_meta/beat.reference.yml +++ b/vendor/github.com/elastic/beats/packetbeat/_meta/beat.reference.yml @@ -53,6 +53,9 @@ packetbeat.flows: # Configure reporting period. If set to -1, only killed flows will be reported period: 10s + # Set to true to publish fields with null values in events. + #keep_null: false + #========================== Transaction protocols ============================= packetbeat.protocols: @@ -60,6 +63,9 @@ packetbeat.protocols: # Enable ICMPv4 and ICMPv6 monitoring. Default: true #enabled: true + # Set to true to publish fields with null values in events. + #keep_null: false + - type: amqp # Enable AMQP monitoring. Default: true #enabled: true @@ -93,6 +99,9 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s @@ -117,6 +126,9 @@ packetbeat.protocols: # is included in published events. The default is true. enable `send_response` first before enable this option. #send_response_header: true + # Set to true to publish fields with null values in events. + #keep_null: false + # Configures the default compression algorithm being used to uncompress compressed frames by name. Currently only `snappy` is can be configured. # By default no compressor is configured. #compressor: "snappy" @@ -128,6 +140,9 @@ packetbeat.protocols: # Configure the DHCP for IPv4 ports. ports: [67, 68] + # Set to true to publish fields with null values in events. + #keep_null: false + - type: dns # Enable DNS monitoring. Default: true #enabled: true @@ -154,6 +169,9 @@ packetbeat.protocols: # send_request: true # send_response: true + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s @@ -216,6 +234,9 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s @@ -269,6 +290,9 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s @@ -289,6 +313,9 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s @@ -309,6 +336,9 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s @@ -329,6 +359,9 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s @@ -393,6 +426,9 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s @@ -423,6 +459,9 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s @@ -443,6 +482,9 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s @@ -474,6 +516,9 @@ packetbeat.protocols: # in PEM format under the `raw` key. The default is false. #include_raw_certificates: false + # Set to true to publish fields with null values in events. + #keep_null: false + #=========================== Monitored processes ============================== # Packetbeat can enrich events with information about the process associated diff --git a/vendor/github.com/elastic/beats/packetbeat/beater/packetbeat.go b/vendor/github.com/elastic/beats/packetbeat/beater/packetbeat.go index a4e812ad..c7c6d530 100644 --- a/vendor/github.com/elastic/beats/packetbeat/beater/packetbeat.go +++ b/vendor/github.com/elastic/beats/packetbeat/beater/packetbeat.go @@ -181,6 +181,7 @@ func (pb *packetbeat) setupFlows() error { Processing: beat.ProcessingConfig{ EventMetadata: config.Flows.EventMetadata, Processor: processors, + KeepNull: config.Flows.KeepNull, }, }) if err != nil { diff --git a/vendor/github.com/elastic/beats/packetbeat/config/config.go b/vendor/github.com/elastic/beats/packetbeat/config/config.go index 1c40aa82..f4a1cb7e 100644 --- a/vendor/github.com/elastic/beats/packetbeat/config/config.go +++ b/vendor/github.com/elastic/beats/packetbeat/config/config.go @@ -55,6 +55,7 @@ type Flows struct { Period string `config:"period"` EventMetadata common.EventMetadata `config:",inline"` Processors processors.PluginConfig `config:"processors"` + KeepNull bool `config:"keep_null"` } type ProtocolCommon struct { diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/configuring-howto.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/configuring-howto.asciidoc index 90a19ee2..0ac11ad1 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/configuring-howto.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/configuring-howto.asciidoc @@ -47,36 +47,36 @@ include::./packetbeat-options.asciidoc[] include::./packetbeat-general-options.asciidoc[] -include::{libbeat-dir}/docs/queueconfig.asciidoc[] +include::{libbeat-dir}/queueconfig.asciidoc[] -include::{libbeat-dir}/docs/outputconfig.asciidoc[] +include::{libbeat-dir}/outputconfig.asciidoc[] -include::{libbeat-dir}/docs/shared-ilm.asciidoc[] +include::{libbeat-dir}/shared-ilm.asciidoc[] -include::{libbeat-dir}/docs/shared-ssl-config.asciidoc[] +include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::./packetbeat-filtering.asciidoc[] -include::{libbeat-dir}/docs/shared-config-ingest.asciidoc[] +include::{libbeat-dir}/shared-config-ingest.asciidoc[] -include::{libbeat-dir}/docs/shared-geoip.asciidoc[] +include::{libbeat-dir}/shared-geoip.asciidoc[] -include::{libbeat-dir}/docs/shared-path-config.asciidoc[] +include::{libbeat-dir}/shared-path-config.asciidoc[] -include::{libbeat-dir}/docs/shared-kibana-config.asciidoc[] +include::{libbeat-dir}/shared-kibana-config.asciidoc[] -include::{libbeat-dir}/docs/setup-config.asciidoc[] +include::{libbeat-dir}/setup-config.asciidoc[] -include::{libbeat-dir}/docs/loggingconfig.asciidoc[] +include::{libbeat-dir}/loggingconfig.asciidoc[] :standalone: -include::{libbeat-dir}/docs/shared-env-vars.asciidoc[] +include::{libbeat-dir}/shared-env-vars.asciidoc[] :standalone!: :standalone: -include::{libbeat-dir}/docs/yaml.asciidoc[] +include::{libbeat-dir}/yaml.asciidoc[] :standalone!: -include::{libbeat-dir}/docs/http-endpoint.asciidoc[] +include::{libbeat-dir}/http-endpoint.asciidoc[] -include::{libbeat-dir}/docs/reference-yml.asciidoc[] +include::{libbeat-dir}/reference-yml.asciidoc[] diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/faq.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/faq.asciidoc index eedc28ca..da1f4ae6 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/faq.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/faq.asciidoc @@ -57,8 +57,8 @@ response messages are not sent. include::./faq-mysql-ssl.asciidoc[] -include::{libbeat-dir}/docs/faq-limit-bandwidth.asciidoc[] +include::{libbeat-dir}/faq-limit-bandwidth.asciidoc[] -include::{libbeat-dir}/docs/shared-faq.asciidoc[] +include::{libbeat-dir}/shared-faq.asciidoc[] -include::{libbeat-dir}/docs/faq-refresh-index.asciidoc[] +include::{libbeat-dir}/faq-refresh-index.asciidoc[] diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/gettingstarted.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/gettingstarted.asciidoc index 0b852196..a00f7737 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/gettingstarted.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/gettingstarted.asciidoc @@ -4,7 +4,7 @@ The best way to understand the value of a network packet analytics system like Packetbeat is to try it on your own traffic. -include::{libbeat-dir}/docs/shared-getting-started-intro.asciidoc[] +include::{libbeat-dir}/shared-getting-started-intro.asciidoc[] * <> * <> @@ -17,14 +17,14 @@ include::{libbeat-dir}/docs/shared-getting-started-intro.asciidoc[] [[packetbeat-installation]] === Step 1: Install Packetbeat -include::{libbeat-dir}/docs/shared-download-and-install.asciidoc[] +include::{libbeat-dir}/shared-download-and-install.asciidoc[] [[deb]] *deb:* ifeval::["{release-state}"=="unreleased"] -Version {stack-version} of {beatname_uc} has not yet been released. +Version {version} of {beatname_uc} has not yet been released. endif::[] @@ -44,7 +44,7 @@ endif::[] ifeval::["{release-state}"=="unreleased"] -Version {stack-version} of {beatname_uc} has not yet been released. +Version {version} of {beatname_uc} has not yet been released. endif::[] @@ -69,7 +69,7 @@ See <> for deploying Docker containers. ifeval::["{release-state}"=="unreleased"] -Version {stack-version} of {beatname_uc} has not yet been released. +Version {version} of {beatname_uc} has not yet been released. endif::[] @@ -83,14 +83,14 @@ tar xzvf packetbeat-{version}-darwin-x86_64.tar.gz endif::[] -include::{libbeat-dir}/docs/shared-brew-install.asciidoc[] +include::{libbeat-dir}/shared-brew-install.asciidoc[] [[linux]] *linux:* ifeval::["{release-state}"=="unreleased"] -Version {stack-version} of {beatname_uc} has not yet been released. +Version {version} of {beatname_uc} has not yet been released. endif::[] @@ -109,7 +109,7 @@ endif::[] ifeval::["{release-state}"=="unreleased"] -Version {stack-version} of {beatname_uc} has not yet been released. +Version {version} of {beatname_uc} has not yet been released. endif::[] @@ -151,7 +151,7 @@ more information about these options, see <>. [[packetbeat-configuration]] === Step 2: Configure Packetbeat -include::{libbeat-dir}/docs/shared-configuring.asciidoc[] +include::{libbeat-dir}/shared-configuring.asciidoc[] To configure Packetbeat: @@ -233,25 +233,25 @@ packetbeat.protocols: ---------------------------------------------------------------------- + -include::{libbeat-dir}/docs/step-configure-output.asciidoc[] +include::{libbeat-dir}/step-configure-output.asciidoc[] -include::{libbeat-dir}/docs/step-configure-kibana-endpoint.asciidoc[] +include::{libbeat-dir}/step-configure-kibana-endpoint.asciidoc[] -include::{libbeat-dir}/docs/step-configure-credentials.asciidoc[] +include::{libbeat-dir}/step-configure-credentials.asciidoc[] -include::{libbeat-dir}/docs/step-test-config.asciidoc[] +include::{libbeat-dir}/step-test-config.asciidoc[] -include::{libbeat-dir}/docs/step-look-at-config.asciidoc[] +include::{libbeat-dir}/step-look-at-config.asciidoc[] [[packetbeat-template]] === Step 3: Load the index template in Elasticsearch -include::{libbeat-dir}/docs/shared-template-load.asciidoc[] +include::{libbeat-dir}/shared-template-load.asciidoc[] [[load-kibana-dashboards]] === Step 4: Set up the Kibana dashboards -include::{libbeat-dir}/docs/dashboards.asciidoc[] +include::{libbeat-dir}/dashboards.asciidoc[] [[packetbeat-starting]] === Step 5: Start Packetbeat @@ -287,7 +287,7 @@ configuration file, or run Packetbeat with `--strict.perms=false` specified. See {beats-ref}/config-file-permissions.html[Config File Ownership and Permissions]in the _Beats Platform Reference_. -include::{libbeat-dir}/docs/shared-brew-run.asciidoc[] +include::{libbeat-dir}/shared-brew-run.asciidoc[] *win:* @@ -325,7 +325,7 @@ To make it easier for you to get application performance insights from packet data, we have created example {beatname_uc} dashboards. You loaded the dashboards earlier when you ran the `setup` command. -include::{libbeat-dir}/docs/opendashboards.asciidoc[] +include::{libbeat-dir}/opendashboards.asciidoc[] The dashboards are provided as examples. We recommend that you {kibana-ref}/dashboard.html[customize] them to meet your needs. diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/index.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/index.asciidoc index 2a717fa9..149fed09 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/index.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/index.asciidoc @@ -1,12 +1,13 @@ = Packetbeat Reference -:libbeat-dir: {docdir}/../../libbeat +:libbeat-dir: {docdir}/../../libbeat/docs -include::{libbeat-dir}/docs/version.asciidoc[] +include::{libbeat-dir}/version.asciidoc[] + +include::{asciidoc-dir}/../../shared/versions/stack/{source_branch}.asciidoc[] include::{asciidoc-dir}/../../shared/attributes.asciidoc[] -:version: {stack-version} :beatname_lc: packetbeat :beatname_uc: Packetbeat :beatname_pkg: {beatname_lc} @@ -21,14 +22,18 @@ include::{asciidoc-dir}/../../shared/attributes.asciidoc[] :linux_os: :docker_platform: :win_os: +:no_decode_cef_processor: +:no_decode_csv_fields_processor: +:no_script_processor: +:no_timestamp_processor: -include::{libbeat-dir}/docs/shared-beats-attributes.asciidoc[] +include::{libbeat-dir}/shared-beats-attributes.asciidoc[] include::./overview.asciidoc[] include::./gettingstarted.asciidoc[] -include::{libbeat-dir}/docs/repositories.asciidoc[] +include::{libbeat-dir}/repositories.asciidoc[] include::./setting-up-running.asciidoc[] @@ -38,9 +43,9 @@ include::./configuring-howto.asciidoc[] include::./fields.asciidoc[] -include::{libbeat-dir}/docs/monitoring/monitoring-beats.asciidoc[] +include::{libbeat-dir}/monitoring/monitoring-beats.asciidoc[] -include::{libbeat-dir}/docs/shared-securing-beat.asciidoc[] +include::{libbeat-dir}/shared-securing-beat.asciidoc[] include::./visualizing-data-packetbeat.asciidoc[] @@ -50,4 +55,4 @@ include::./troubleshooting.asciidoc[] include::./faq.asciidoc[] -include::{libbeat-dir}/docs/contributing-to-beats.asciidoc[] +include::{libbeat-dir}/contributing-to-beats.asciidoc[] diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/overview.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/overview.asciidoc index 51a84590..b0797cdc 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/overview.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/overview.asciidoc @@ -44,4 +44,4 @@ network traffic gathered by Packetbeat can be used for analysing the log files gathered by Logstash. This way, you can have network traffic and log analysis in the same system. -include::{libbeat-dir}/docs/shared-libbeat-description.asciidoc[] +include::{libbeat-dir}/shared-libbeat-description.asciidoc[] diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-filtering.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-filtering.asciidoc index 4fefe9b0..50dad2d8 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-filtering.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-filtering.asciidoc @@ -1,7 +1,7 @@ [[filtering-and-enhancing-data]] == Filter and enhance the exported data -include::{libbeat-dir}/docs/processors.asciidoc[] +include::{libbeat-dir}/processors.asciidoc[] For example, the following configuration includes a subset of the Packetbeat DNS fields so that only the requests and their response codes are reported: @@ -70,4 +70,4 @@ processors: fields: ["request", "response"] ---- -include::{libbeat-dir}/docs/processors-using.asciidoc[] +include::{libbeat-dir}/processors-using.asciidoc[] diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-general-options.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-general-options.asciidoc index 60e017c7..2013d310 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-general-options.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-general-options.asciidoc @@ -4,5 +4,5 @@ You can specify settings in the +{beatname_lc}.yml+ config file to control the general behavior of {beatname_uc}. -include::{libbeat-dir}/docs/generalconfig.asciidoc[] +include::{libbeat-dir}/generalconfig.asciidoc[] diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-options.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-options.asciidoc index b04506b3..f8767956 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-options.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/packetbeat-options.asciidoc @@ -288,7 +288,7 @@ Here’s an example of a flow information sent by Packetbeat. See "agent": { "hostname": "host.example.com", "name": "host.example.com", - "version": "{stack-version}" + "version": "{version}" }, "destination": { "bytes": 460, @@ -395,6 +395,11 @@ A list of processors to apply to the data generated by the protocol. See <> for information about specifying processors in your config. +[float] +==== `keep_null` + +If this option is set to true, fields with `null` values will be published in +the output document. By default, `keep_null` is set to `false`. [[configuration-protocols]] == Specify which transaction protocols to monitor @@ -547,6 +552,12 @@ A list of processors to apply to the data generated by the protocol. See <> for information about specifying processors in your config. +[float] +==== `keep_null` + +If this option is set to true, fields with `null` values will be published in +the output document. By default, `keep_null` is set to `false`. + [[packetbeat-icmp-options]] === Capture ICMP traffic @@ -1160,9 +1171,9 @@ TLS is a cryptographic protocol that provides secure communications on top of an existing application protocol, like HTTP or MySQL. Packetbeat intercepts the initial handshake in a TLS connection and extracts -useful information which helps an operator to diagnose problems as well as -strengthen the security of his or her network and systems. It does not -decrypt any information from the encapsulated protocol nor does it reveal any +useful information that helps operators diagnose problems and +strengthen the security of their network and systems. It does not +decrypt any information from the encapsulated protocol, nor does it reveal any sensitive information such as cryptographic keys. TLS versions 1.0 to 1.3 and SSL 3.0 are supported. diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/running-on-docker.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/running-on-docker.asciidoc index 77fcad1b..66ee487c 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/running-on-docker.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/running-on-docker.asciidoc @@ -1,4 +1,4 @@ -include::{libbeat-dir}/docs/shared-docker.asciidoc[] +include::{libbeat-dir}/shared-docker.asciidoc[] [float] ==== Required network capabilities diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/setting-up-running.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/setting-up-running.asciidoc index 66e1ead2..8ec02a23 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/setting-up-running.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/setting-up-running.asciidoc @@ -24,14 +24,14 @@ This section includes additional information on how to set up and run //MAINTAINERS: If you add a new file to this section, make sure you update the bulleted list ^^ too. -include::{libbeat-dir}/docs/shared-directory-layout.asciidoc[] +include::{libbeat-dir}/shared-directory-layout.asciidoc[] -include::{libbeat-dir}/docs/keystore.asciidoc[] +include::{libbeat-dir}/keystore.asciidoc[] -include::{libbeat-dir}/docs/command-reference.asciidoc[] +include::{libbeat-dir}/command-reference.asciidoc[] include::./running-on-docker.asciidoc[] -include::{libbeat-dir}/docs/shared-systemd.asciidoc[] +include::{libbeat-dir}/shared-systemd.asciidoc[] -include::{libbeat-dir}/docs/shared-shutdown.asciidoc[] +include::{libbeat-dir}/shared-shutdown.asciidoc[] diff --git a/vendor/github.com/elastic/beats/packetbeat/docs/troubleshooting.asciidoc b/vendor/github.com/elastic/beats/packetbeat/docs/troubleshooting.asciidoc index 4ec06650..738d9ad8 100644 --- a/vendor/github.com/elastic/beats/packetbeat/docs/troubleshooting.asciidoc +++ b/vendor/github.com/elastic/beats/packetbeat/docs/troubleshooting.asciidoc @@ -18,14 +18,14 @@ following tips: [[getting-help]] == Get help -include::{libbeat-dir}/docs/getting-help.asciidoc[] +include::{libbeat-dir}/getting-help.asciidoc[] //sets block macro for debugging.asciidoc included in next section [[enable-packetbeat-debugging]] == Debug -include::{libbeat-dir}/docs/debugging.asciidoc[] +include::{libbeat-dir}/debugging.asciidoc[] //sets block macro for recording-trace content included in next section diff --git a/vendor/github.com/elastic/beats/packetbeat/packetbeat.docker.yml b/vendor/github.com/elastic/beats/packetbeat/packetbeat.docker.yml index b99d6f67..a2a6f8c3 100644 --- a/vendor/github.com/elastic/beats/packetbeat/packetbeat.docker.yml +++ b/vendor/github.com/elastic/beats/packetbeat/packetbeat.docker.yml @@ -39,6 +39,7 @@ packetbeat.protocols.tls: processors: - add_cloud_metadata: ~ +- add_docker_metadata: ~ output.elasticsearch: hosts: '${ELASTICSEARCH_HOSTS:elasticsearch:9200}' diff --git a/vendor/github.com/elastic/beats/packetbeat/packetbeat.reference.yml b/vendor/github.com/elastic/beats/packetbeat/packetbeat.reference.yml index d520eb5c..f0169bf0 100644 --- a/vendor/github.com/elastic/beats/packetbeat/packetbeat.reference.yml +++ b/vendor/github.com/elastic/beats/packetbeat/packetbeat.reference.yml @@ -53,6 +53,9 @@ packetbeat.flows: # Configure reporting period. If set to -1, only killed flows will be reported period: 10s + # Set to true to publish fields with null values in events. + #keep_null: false + #========================== Transaction protocols ============================= packetbeat.protocols: @@ -60,6 +63,9 @@ packetbeat.protocols: # Enable ICMPv4 and ICMPv6 monitoring. Default: true #enabled: true + # Set to true to publish fields with null values in events. + #keep_null: false + - type: amqp # Enable AMQP monitoring. Default: true #enabled: true @@ -93,6 +99,9 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s @@ -117,6 +126,9 @@ packetbeat.protocols: # is included in published events. The default is true. enable `send_response` first before enable this option. #send_response_header: true + # Set to true to publish fields with null values in events. + #keep_null: false + # Configures the default compression algorithm being used to uncompress compressed frames by name. Currently only `snappy` is can be configured. # By default no compressor is configured. #compressor: "snappy" @@ -128,6 +140,9 @@ packetbeat.protocols: # Configure the DHCP for IPv4 ports. ports: [67, 68] + # Set to true to publish fields with null values in events. + #keep_null: false + - type: dns # Enable DNS monitoring. Default: true #enabled: true @@ -154,6 +169,9 @@ packetbeat.protocols: # send_request: true # send_response: true + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s @@ -216,6 +234,9 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s @@ -269,6 +290,9 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s @@ -289,6 +313,9 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s @@ -309,6 +336,9 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s @@ -329,6 +359,9 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s @@ -393,6 +426,9 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s @@ -423,6 +459,9 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s @@ -443,6 +482,9 @@ packetbeat.protocols: # field) is sent to Elasticsearch. The default is false. #send_response: false + # Set to true to publish fields with null values in events. + #keep_null: false + # Transaction timeout. Expired transactions will no longer be correlated to # incoming responses, but sent to Elasticsearch immediately. #transaction_timeout: 10s @@ -474,6 +516,9 @@ packetbeat.protocols: # in PEM format under the `raw` key. The default is false. #include_raw_certificates: false + # Set to true to publish fields with null values in events. + #keep_null: false + #=========================== Monitored processes ============================== # Packetbeat can enrich events with information about the process associated @@ -1447,7 +1492,7 @@ setup.template.settings: #setup.ilm.enabled: auto # Set the prefix used in the index lifecycle write alias name. The default alias -# name is 'packetbeat-%{[agent.version]}'. +# name is 'packetbeat-%{[agent.version]}'. #setup.ilm.rollover_alias: "packetbeat" # Set the rollover index pattern. The default is "%{now/d}-000001". @@ -1710,12 +1755,21 @@ logging.files: # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# When using IP addresses, it is recommended to only use localhost. #http.host: localhost # Port on which the HTTP endpoint will bind. Default is 5066. #http.port: 5066 +# Define which user should be owning the named pipe. +#http.named_pipe.user: + +# Define which the permissions that should be applied to the named pipe, use the Security +# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with +# `http.user`. +#http.named_pipe.security_descriptor: + #============================= Process Security ================================ # Enable or disable seccomp system call filtering on Linux. Default is enabled. diff --git a/vendor/github.com/elastic/beats/packetbeat/packetbeat.yml b/vendor/github.com/elastic/beats/packetbeat/packetbeat.yml index cb7680c1..0a306eae 100644 --- a/vendor/github.com/elastic/beats/packetbeat/packetbeat.yml +++ b/vendor/github.com/elastic/beats/packetbeat/packetbeat.yml @@ -202,6 +202,7 @@ output.elasticsearch: processors: - add_host_metadata: ~ - add_cloud_metadata: ~ + - add_docker_metadata: ~ #================================ Logging ===================================== diff --git a/vendor/github.com/elastic/beats/packetbeat/protos/http/event.go b/vendor/github.com/elastic/beats/packetbeat/protos/http/event.go index 9105ad88..f02aae42 100644 --- a/vendor/github.com/elastic/beats/packetbeat/protos/http/event.go +++ b/vendor/github.com/elastic/beats/packetbeat/protos/http/event.go @@ -21,6 +21,7 @@ import ( "net" "net/url" "strconv" + "strings" "github.com/elastic/beats/libbeat/common" "github.com/elastic/ecs/code/go/ecs" @@ -95,6 +96,8 @@ func synthesizeFullURL(u *ecs.Url, port int64) string { host := u.Domain if port != 80 { host = net.JoinHostPort(u.Domain, strconv.Itoa(int(u.Port))) + } else if strings.IndexByte(u.Domain, ':') != -1 { + host = "[" + u.Domain + "]" } urlBuilder := url.URL{ diff --git a/vendor/github.com/elastic/beats/packetbeat/protos/http/http.go b/vendor/github.com/elastic/beats/packetbeat/protos/http/http.go index f81a44d4..69821429 100644 --- a/vendor/github.com/elastic/beats/packetbeat/protos/http/http.go +++ b/vendor/github.com/elastic/beats/packetbeat/protos/http/http.go @@ -22,6 +22,7 @@ import ( "fmt" "net" "net/url" + "strconv" "strings" "time" @@ -532,11 +533,16 @@ func (http *httpPlugin) newTransaction(requ, resp *message) beat.Event { logp.Warn("Fail to parse HTTP parameters: %v", err) } - host := string(requ.host) pbf.Source.Bytes = int64(requ.size) + host, port := extractHostHeader(string(requ.host)) if net.ParseIP(host) == nil { pbf.Destination.Domain = host } + if port == 0 { + port = int(pbf.Destination.Port) + } else if port != int(pbf.Destination.Port) { + requ.notes = append(requ.notes, "Host header port number mismatch") + } pbf.Event.Start = requ.ts pbf.Network.ForwardedIP = string(requ.realIP) pbf.Error.Message = requ.notes @@ -554,7 +560,7 @@ func (http *httpPlugin) newTransaction(requ, resp *message) beat.Event { httpFields.RequestHeaders = http.collectHeaders(requ) // url - u := newURL(host, int64(pbf.Destination.Port), path, params) + u := newURL(host, int64(port), path, params) pb.MarshalStruct(evt.Fields, "url", u) // user-agent @@ -701,6 +707,23 @@ func parseCookieValue(raw string) string { return raw } +func extractHostHeader(header string) (host string, port int) { + if len(header) == 0 || net.ParseIP(header) != nil { + return header, port + } + // Split :port trailer + if pos := strings.LastIndexByte(header, ':'); pos != -1 { + if num, err := strconv.Atoi(header[pos+1:]); err == nil && num > 0 && num < 65536 { + header, port = header[:pos], num + } + } + // Remove square bracket boxing of IPv6 address. + if last := len(header) - 1; header[0] == '[' && header[last] == ']' && net.ParseIP(header[1:last]) != nil { + header = header[1:last] + } + return header, port +} + func (http *httpPlugin) hideHeaders(m *message) { if !m.isRequest || !http.redactAuthorization { return diff --git a/vendor/github.com/elastic/beats/packetbeat/protos/http/http_test.go b/vendor/github.com/elastic/beats/packetbeat/protos/http/http_test.go index a1045628..12e56fee 100644 --- a/vendor/github.com/elastic/beats/packetbeat/protos/http/http_test.go +++ b/vendor/github.com/elastic/beats/packetbeat/protos/http/http_test.go @@ -1659,6 +1659,124 @@ func TestHTTP_Decoding_disabled(t *testing.T) { assert.Equal(t, deflateBody, body) } +func TestHttpParser_hostHeader(t *testing.T) { + template := "HEAD /_cat/shards HTTP/1.1\r\n" + + "Host: %s\r\n" + + "\r\n" + var store eventStore + http := httpModForTests(&store) + for _, test := range []struct { + title, host string + port uint16 + expected common.MapStr + }{ + { + title: "domain alone", + host: "elasticsearch", + expected: common.MapStr{ + "destination.domain": "elasticsearch", + "url.full": "http://elasticsearch/_cat/shards", + }, + }, + { + title: "domain with port", + port: 9200, + host: "elasticsearch:9200", + expected: common.MapStr{ + "destination.domain": "elasticsearch", + "url.full": "http://elasticsearch:9200/_cat/shards", + }, + }, + { + title: "ipv4", + host: "127.0.0.1", + expected: common.MapStr{ + "destination.domain": nil, + "url.full": "http://127.0.0.1/_cat/shards", + }, + }, + { + title: "ipv4 with port", + port: 9200, + host: "127.0.0.1:9200", + expected: common.MapStr{ + "destination.domain": nil, + "url.full": "http://127.0.0.1:9200/_cat/shards", + }, + }, + { + title: "ipv6 unboxed", + host: "fd00::42", + expected: common.MapStr{ + "destination.domain": nil, + "url.full": "http://[fd00::42]/_cat/shards", + }, + }, + { + title: "ipv6 boxed", + host: "[fd00::42]", + expected: common.MapStr{ + "destination.domain": nil, + "url.full": "http://[fd00::42]/_cat/shards", + }, + }, + { + title: "ipv6 boxed with port", + port: 9200, + host: "[::1]:9200", + expected: common.MapStr{ + "destination.domain": nil, + "url.full": "http://[::1]:9200/_cat/shards", + }, + }, + { + title: "non boxed ipv6", + // This one is now illegal but it seems at some point the RFC + // didn't enforce the brackets when the port was omitted. + host: "fd00::1234", + expected: common.MapStr{ + "destination.domain": nil, + "url.full": "http://[fd00::1234]/_cat/shards", + }, + }, + { + title: "non-matching port", + port: 80, + host: "myhost:9200", + expected: common.MapStr{ + "destination.domain": "myhost", + "url.full": "http://myhost:9200/_cat/shards", + "error.message": []string{"Unmatched request", "Host header port number mismatch"}, + }, + }, + } { + t.Run(test.title, func(t *testing.T) { + request := fmt.Sprintf(template, test.host) + tcptuple := testCreateTCPTuple() + if test.port != 0 { + tcptuple.DstPort = test.port + } + packet := protos.Packet{Payload: []byte(request)} + private := protos.ProtocolData(&httpConnectionData{}) + private = http.Parse(&packet, tcptuple, 1, private) + http.Expired(tcptuple, private) + trans := expectTransaction(t, &store) + if !assert.NotNil(t, trans) { + t.Fatal("nil transaction") + } + for field, expected := range test.expected { + actual, err := trans.GetValue(field) + assert.Equal(t, expected, actual, field) + if expected != nil { + assert.Nil(t, err, field) + } else { + assert.Equal(t, common.ErrKeyNotFound, err, field) + } + } + }) + } +} + func benchmarkHTTPMessage(b *testing.B, data []byte) { http := httpModForTests(nil) parser := newParser(&http.parserConfig) diff --git a/vendor/github.com/elastic/beats/packetbeat/publish/publish.go b/vendor/github.com/elastic/beats/packetbeat/publish/publish.go index db264852..844f6d4e 100644 --- a/vendor/github.com/elastic/beats/packetbeat/publish/publish.go +++ b/vendor/github.com/elastic/beats/packetbeat/publish/publish.go @@ -86,6 +86,7 @@ func (p *TransactionPublisher) CreateReporter( meta := struct { Event common.EventMetadata `config:",inline"` Processors processors.PluginConfig `config:"processors"` + KeepNull bool `config:"keep_null"` }{} if err := config.Unpack(&meta); err != nil { return nil, err @@ -100,6 +101,7 @@ func (p *TransactionPublisher) CreateReporter( Processing: beat.ProcessingConfig{ EventMetadata: meta.Event, Processor: processors, + KeepNull: meta.KeepNull, }, } if p.canDrop { diff --git a/vendor/github.com/elastic/beats/script/build_docs.sh b/vendor/github.com/elastic/beats/script/build_docs.sh index 14b04dd4..50a70bb1 100755 --- a/vendor/github.com/elastic/beats/script/build_docs.sh +++ b/vendor/github.com/elastic/beats/script/build_docs.sh @@ -38,5 +38,5 @@ do params="$params --resource=${resource_dir}" fi - $docs_dir/build_docs --asciidoctor --respect_edit_url_overrides $params --doc "$index" --out "$dest_dir" + $docs_dir/build_docs --direct_html --respect_edit_url_overrides $params --doc "$index" --out "$dest_dir" done diff --git a/vendor/github.com/elastic/beats/script/generate_imports.py b/vendor/github.com/elastic/beats/script/generate_imports.py index 56bf8644..055eda71 100644 --- a/vendor/github.com/elastic/beats/script/generate_imports.py +++ b/vendor/github.com/elastic/beats/script/generate_imports.py @@ -27,7 +27,7 @@ import_template = """// Licensed to Elasticsearch B.V. under one or more contrib // Code generated by 'make imports' - DO NOT EDIT. -/* +{buildtags}/* {comment} */ package {package} @@ -39,19 +39,24 @@ import ( def generate_and_write_to_file(outfile, go_beat_path): - imported_beat_lines = get_importable_lines(go_beat_path, import_line_format) - imported_lines = "\n".join(imported_beat_lines) - package = basename(dirname(outfile)) - list_go = import_template.format(package=package, - comment=comment, - imports=imported_lines) - with open(outfile, "w") as output: - output.write(list_go) + outputs = get_importable_lines(go_beat_path, import_line_format) + + for output_data in outputs: + if len(output_data["imported_lines"]) == 0: + continue + imported_lines = "\n".join(output_data["imported_lines"]) + package = basename(dirname(outfile)) + list_go = import_template.format(package=package, + comment=comment, + buildtags=output_data["build_tags"], + imports=imported_lines) + with open(outfile.format(suffix=output_data["file_suffix"]), "w") as output: + output.write(list_go) if __name__ == "__main__": parser = ArgumentParser(description="Generate imports for Beats packages") - parser.add_argument("--out", default="include/list.go") + parser.add_argument("--out", default="include/list{suffix}.go") parser.add_argument("beats_path") args = parser.parse_args() diff --git a/vendor/github.com/elastic/beats/script/modules_collector.py b/vendor/github.com/elastic/beats/script/modules_collector.py deleted file mode 100644 index 51b38980..00000000 --- a/vendor/github.com/elastic/beats/script/modules_collector.py +++ /dev/null @@ -1,61 +0,0 @@ -import os -import argparse -import yaml -import six -import glob -import re - -# Collects module configs to modules.d - -REFERENCE_CONFIG_RE = re.compile('.+\.reference\.yml') - - -def collect(beat_name, docs_branch): - - base_dir = "module" - path = os.path.abspath("module") - - # TODO add module release status if beta or experimental - header = """# Module: {module} -# Docs: https://www.elastic.co/guide/en/beats/{beat_name}/{docs_branch}/{beat_name}-module-{module}.html - -""" - - # Create directory for module confs - os.mkdir(os.path.abspath('modules.d')) - - # Iterate over all modules - for module in sorted(os.listdir(base_dir)): - - module_confs = path + '/' + module + '/_meta/config*.yml' - for module_conf in glob.glob(module_confs): - - # Ignore reference confs - if REFERENCE_CONFIG_RE.match(module_conf): - continue - - if os.path.isfile(module_conf) == False: - continue - - module_file = header.format(module=module, beat_name=beat_name, docs_branch=docs_branch) - disabled_config_filename = os.path.basename(module_conf).replace('config', module) + '.disabled' - - with open(module_conf) as f: - module_file += f.read() - - # Write disabled module conf - with open(os.path.abspath('modules.d/') + '/' + disabled_config_filename, 'w') as f: - f.write(module_file) - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Collects modules confs") - parser.add_argument("--beat", help="Beat name") - parser.add_argument("--docs_branch", help="Docs branch") - - args = parser.parse_args() - beat_name = args.beat - docs_branch = args.docs_branch - - collect(beat_name, docs_branch) diff --git a/vendor/github.com/elastic/beats/testing/environments/docker/kafka/run.sh b/vendor/github.com/elastic/beats/testing/environments/docker/kafka/run.sh index 2d44882e..873f6951 100755 --- a/vendor/github.com/elastic/beats/testing/environments/docker/kafka/run.sh +++ b/vendor/github.com/elastic/beats/testing/environments/docker/kafka/run.sh @@ -21,7 +21,8 @@ mkdir -p ${KAFKA_LOGS_DIR} ${KAFKA_HOME}/bin/kafka-server-start.sh ${KAFKA_HOME}/config/server.properties \ --override delete.topic.enable=true --override advertised.host.name=${KAFKA_ADVERTISED_HOST} \ --override listeners=PLAINTEXT://0.0.0.0:9092 \ - --override logs.dir=${KAFKA_LOGS_DIR} --override log.flush.interval.ms=200 & + --override logs.dir=${KAFKA_LOGS_DIR} --override log.flush.interval.ms=200 \ + --override num.partitions=3 & wait_for_port 9092 diff --git a/vendor/github.com/elastic/beats/testing/environments/latest.yml b/vendor/github.com/elastic/beats/testing/environments/latest.yml index daa01fc6..6a79281f 100644 --- a/vendor/github.com/elastic/beats/testing/environments/latest.yml +++ b/vendor/github.com/elastic/beats/testing/environments/latest.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:7.4.1 + image: docker.elastic.co/elasticsearch/elasticsearch:7.5.1 healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9200"] retries: 300 @@ -16,7 +16,7 @@ services: - "xpack.security.enabled=false" logstash: - image: docker.elastic.co/logstash/logstash:7.4.1 + image: docker.elastic.co/logstash/logstash:7.5.1 healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 300 @@ -26,7 +26,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:7.4.1 + image: docker.elastic.co/kibana/kibana:7.5.1 healthcheck: test: ["CMD", "curl", "-f", "http://localhost:5601"] retries: 300 diff --git a/vendor/github.com/elastic/beats/testing/environments/snapshot-oss.yml b/vendor/github.com/elastic/beats/testing/environments/snapshot-oss.yml index 4bf57c4e..a1c150d1 100644 --- a/vendor/github.com/elastic/beats/testing/environments/snapshot-oss.yml +++ b/vendor/github.com/elastic/beats/testing/environments/snapshot-oss.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.4.1-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.5.1-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9200"] retries: 300 @@ -15,7 +15,7 @@ services: - "http.host=0.0.0.0" logstash: - image: docker.elastic.co/logstash/logstash-oss:7.4.1-SNAPSHOT + image: docker.elastic.co/logstash/logstash-oss:7.5.1-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -25,7 +25,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana-oss:7.4.1-SNAPSHOT + image: docker.elastic.co/kibana/kibana-oss:7.5.1-SNAPSHOT healthcheck: test: ["CMD-SHELL", 'python -c ''import urllib, json; response = urllib.urlopen("http://localhost:5601/api/status"); data = json.loads(response.read()); exit(1) if data["status"]["overall"]["state"] != "green" else exit(0);'''] retries: 600 diff --git a/vendor/github.com/elastic/beats/testing/environments/snapshot.yml b/vendor/github.com/elastic/beats/testing/environments/snapshot.yml index 5b690e34..a4dc5a00 100644 --- a/vendor/github.com/elastic/beats/testing/environments/snapshot.yml +++ b/vendor/github.com/elastic/beats/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:7.4.1-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:7.5.1-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9200"] retries: 300 @@ -16,7 +16,7 @@ services: - "xpack.security.enabled=false" logstash: - image: docker.elastic.co/logstash/logstash:7.4.1-SNAPSHOT + image: docker.elastic.co/logstash/logstash:7.5.1-SNAPSHOT healthcheck: test: ["CMD", "curl", "-f", "http://localhost:9600/_node/stats"] retries: 600 @@ -26,7 +26,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:7.4.1-SNAPSHOT + image: docker.elastic.co/kibana/kibana:7.5.1-SNAPSHOT healthcheck: test: ["CMD-SHELL", 'python -c ''import urllib, json; response = urllib.urlopen("http://localhost:5601/api/status"); data = json.loads(response.read()); exit(1) if data["status"]["overall"]["state"] != "green" else exit(0);'''] retries: 600 diff --git a/vendor/github.com/elastic/beats/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md b/vendor/github.com/elastic/beats/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md new file mode 100644 index 00000000..0786fdf4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md @@ -0,0 +1,24 @@ +# How to contribute + +We'd love to accept your patches and contributions to this project. There are +just a few small guidelines you need to follow. + +## Contributor License Agreement + +Contributions to this project must be accompanied by a Contributor License +Agreement. You (or your employer) retain the copyright to your contribution, +this simply gives us permission to use and redistribute your contributions as +part of the project. Head over to to see +your current agreements on file or to sign a new one. + +You generally only need to submit a CLA once, so if you've already submitted one +(even if it was for a different project), you probably don't need to do it +again. + +## Code reviews + +All submissions, including submissions by project members, require review. We +use GitHub pull requests for this purpose. Consult [GitHub Help] for more +information on using pull requests. + +[GitHub Help]: https://help.github.com/articles/about-pull-requests/ diff --git a/vendor/github.com/elastic/beats/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE b/vendor/github.com/elastic/beats/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md b/vendor/github.com/elastic/beats/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md new file mode 100644 index 00000000..3b9e908f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md @@ -0,0 +1,61 @@ +# OpenCensus Agent Go Exporter + +[![Build Status][travis-image]][travis-url] [![GoDoc][godoc-image]][godoc-url] + + +This repository contains the Go implementation of the OpenCensus Agent (OC-Agent) Exporter. +OC-Agent is a deamon process running in a VM that can retrieve spans/stats/metrics from +OpenCensus Library, export them to other backends and possibly push configurations back to +Library. See more details on [OC-Agent Readme][OCAgentReadme]. + +Note: This is an experimental repository and is likely to get backwards-incompatible changes. +Ultimately we may want to move the OC-Agent Go Exporter to [OpenCensus Go core library][OpenCensusGo]. + +## Installation + +```bash +$ go get -u contrib.go.opencensus.io/exporter/ocagent +``` + +## Usage + +```go +import ( + "context" + "fmt" + "log" + "time" + + "contrib.go.opencensus.io/exporter/ocagent" + "go.opencensus.io/trace" +) + +func Example() { + exp, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithServiceName("your-service-name")) + if err != nil { + log.Fatalf("Failed to create the agent exporter: %v", err) + } + defer exp.Stop() + + // Now register it as a trace exporter. + trace.RegisterExporter(exp) + + // Then use the OpenCensus tracing library, like we normally would. + ctx, span := trace.StartSpan(context.Background(), "AgentExporter-Example") + defer span.End() + + for i := 0; i < 10; i++ { + _, iSpan := trace.StartSpan(ctx, fmt.Sprintf("Sample-%d", i)) + <-time.After(6 * time.Millisecond) + iSpan.End() + } +} +``` + +[OCAgentReadme]: https://github.com/census-instrumentation/opencensus-proto/tree/master/opencensus/proto/agent#opencensus-agent-proto +[OpenCensusGo]: https://github.com/census-instrumentation/opencensus-go +[godoc-image]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent?status.svg +[godoc-url]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent +[travis-image]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent.svg?branch=master +[travis-url]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent + diff --git a/vendor/github.com/elastic/beats/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go b/vendor/github.com/elastic/beats/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go new file mode 100644 index 00000000..297e44b6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go @@ -0,0 +1,38 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +import ( + "math/rand" + "time" +) + +var randSrc = rand.New(rand.NewSource(time.Now().UnixNano())) + +// retries function fn upto n times, if fn returns an error lest it returns nil early. +// It applies exponential backoff in units of (1< 0 { + ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers)) + } + traceExporter, err := traceSvcClient.Export(ctx) + if err != nil { + return fmt.Errorf("Exporter.Start:: TraceServiceClient: %v", err) + } + + firstTraceMessage := &agenttracepb.ExportTraceServiceRequest{ + Node: node, + Resource: ae.resource, + } + if err := traceExporter.Send(firstTraceMessage); err != nil { + return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err) + } + + ae.mu.Lock() + ae.traceExporter = traceExporter + ae.mu.Unlock() + + // Initiate the config service by sending over node identifier info. + configStream, err := traceSvcClient.Config(context.Background()) + if err != nil { + return fmt.Errorf("Exporter.Start:: ConfigStream: %v", err) + } + firstCfgMessage := &agenttracepb.CurrentLibraryConfig{Node: node} + if err := configStream.Send(firstCfgMessage); err != nil { + return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err) + } + + // In the background, handle trace configurations that are beamed down + // by the agent, but also reply to it with the applied configuration. + go ae.handleConfigStreaming(configStream) + + return nil +} + +func (ae *Exporter) createMetricsServiceConnection(cc *grpc.ClientConn, node *commonpb.Node) error { + metricsSvcClient := agentmetricspb.NewMetricsServiceClient(cc) + metricsExporter, err := metricsSvcClient.Export(context.Background()) + if err != nil { + return fmt.Errorf("MetricsExporter: failed to start the service client: %v", err) + } + // Initiate the metrics service by sending over the first message just containing the Node and Resource. + firstMetricsMessage := &agentmetricspb.ExportMetricsServiceRequest{ + Node: node, + Resource: ae.resource, + } + if err := metricsExporter.Send(firstMetricsMessage); err != nil { + return fmt.Errorf("MetricsExporter:: failed to send the first message: %v", err) + } + + ae.mu.Lock() + ae.metricsExporter = metricsExporter + ae.mu.Unlock() + + // With that we are good to go and can start sending metrics + return nil +} + +func (ae *Exporter) dialToAgent() (*grpc.ClientConn, error) { + addr := ae.prepareAgentAddress() + var dialOpts []grpc.DialOption + if ae.clientTransportCredentials != nil { + dialOpts = append(dialOpts, grpc.WithTransportCredentials(ae.clientTransportCredentials)) + } else if ae.canDialInsecure { + dialOpts = append(dialOpts, grpc.WithInsecure()) + } + if ae.compressor != "" { + dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(ae.compressor))) + } + dialOpts = append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{})) + if len(ae.grpcDialOptions) != 0 { + dialOpts = append(dialOpts, ae.grpcDialOptions...) + } + + ctx := context.Background() + if len(ae.headers) > 0 { + ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers)) + } + return grpc.DialContext(ctx, addr, dialOpts...) +} + +func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService_ConfigClient) error { + // Note: We haven't yet implemented configuration sending so we + // should NOT be changing connection states within this function for now. + for { + recv, err := configStream.Recv() + if err != nil { + // TODO: Check if this is a transient error or exponential backoff-able. + return err + } + cfg := recv.Config + if cfg == nil { + continue + } + + // Otherwise now apply the trace configuration sent down from the agent + if psamp := cfg.GetProbabilitySampler(); psamp != nil { + trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(psamp.SamplingProbability)}) + } else if csamp := cfg.GetConstantSampler(); csamp != nil { + alwaysSample := csamp.Decision == tracepb.ConstantSampler_ALWAYS_ON + if alwaysSample { + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + } else { + trace.ApplyConfig(trace.Config{DefaultSampler: trace.NeverSample()}) + } + } else { // TODO: Add the rate limiting sampler here + } + + // Then finally send back to upstream the newly applied configuration + err = configStream.Send(&agenttracepb.CurrentLibraryConfig{Config: &tracepb.TraceConfig{Sampler: cfg.Sampler}}) + if err != nil { + return err + } + } +} + +// Stop shuts down all the connections and resources +// related to the exporter. +func (ae *Exporter) Stop() error { + ae.mu.RLock() + cc := ae.grpcClientConn + started := ae.started + stopped := ae.stopped + ae.mu.RUnlock() + + if !started { + return errNotStarted + } + if stopped { + // TODO: tell the user that we've already stopped, so perhaps a sentinel error? + return nil + } + + ae.Flush() + + // Now close the underlying gRPC connection. + var err error + if cc != nil { + err = cc.Close() + } + + // At this point we can change the state variables: started and stopped + ae.mu.Lock() + ae.started = false + ae.stopped = true + ae.mu.Unlock() + close(ae.stopCh) + + // Ensure that the backgroundConnector returns + <-ae.backgroundConnectionDoneCh + + return err +} + +func (ae *Exporter) ExportSpan(sd *trace.SpanData) { + if sd == nil { + return + } + _ = ae.traceBundler.Add(sd, 1) +} + +func (ae *Exporter) ExportTraceServiceRequest(batch *agenttracepb.ExportTraceServiceRequest) error { + if batch == nil || len(batch.Spans) == 0 { + return nil + } + + select { + case <-ae.stopCh: + return errStopped + + default: + if lastConnectErr := ae.lastConnectError(); lastConnectErr != nil { + return fmt.Errorf("ExportTraceServiceRequest: no active connection, last connection error: %v", lastConnectErr) + } + + ae.senderMu.Lock() + err := ae.traceExporter.Send(batch) + ae.senderMu.Unlock() + if err != nil { + if err == io.EOF { + ae.recvMu.Lock() + // Perform a .Recv to try to find out why the RPC actually ended. + // See: + // * https://github.com/grpc/grpc-go/blob/d389f9fac68eea0dcc49957d0b4cca5b3a0a7171/stream.go#L98-L100 + // * https://groups.google.com/forum/#!msg/grpc-io/XcN4hA9HonI/F_UDiejTAwAJ + for { + _, err = ae.traceExporter.Recv() + if err != nil { + break + } + } + ae.recvMu.Unlock() + } + + ae.setStateDisconnected(err) + if err != io.EOF { + return err + } + } + return nil + } +} + +func (ae *Exporter) ExportView(vd *view.Data) { + if vd == nil { + return + } + _ = ae.viewDataBundler.Add(vd, 1) +} + +func ocSpanDataToPbSpans(sdl []*trace.SpanData) []*tracepb.Span { + if len(sdl) == 0 { + return nil + } + protoSpans := make([]*tracepb.Span, 0, len(sdl)) + for _, sd := range sdl { + if sd != nil { + protoSpans = append(protoSpans, ocSpanToProtoSpan(sd)) + } + } + return protoSpans +} + +func (ae *Exporter) uploadTraces(sdl []*trace.SpanData) { + select { + case <-ae.stopCh: + return + + default: + if !ae.connected() { + return + } + + protoSpans := ocSpanDataToPbSpans(sdl) + if len(protoSpans) == 0 { + return + } + ae.senderMu.Lock() + err := ae.traceExporter.Send(&agenttracepb.ExportTraceServiceRequest{ + Spans: protoSpans, + }) + ae.senderMu.Unlock() + if err != nil { + ae.setStateDisconnected(err) + } + } +} + +func ocViewDataToPbMetrics(vdl []*view.Data) []*metricspb.Metric { + if len(vdl) == 0 { + return nil + } + metrics := make([]*metricspb.Metric, 0, len(vdl)) + for _, vd := range vdl { + if vd != nil { + vmetric, err := viewDataToMetric(vd) + // TODO: (@odeke-em) somehow report this error, if it is non-nil. + if err == nil && vmetric != nil { + metrics = append(metrics, vmetric) + } + } + } + return metrics +} + +func (ae *Exporter) uploadViewData(vdl []*view.Data) { + select { + case <-ae.stopCh: + return + + default: + if !ae.connected() { + return + } + + protoMetrics := ocViewDataToPbMetrics(vdl) + if len(protoMetrics) == 0 { + return + } + err := ae.metricsExporter.Send(&agentmetricspb.ExportMetricsServiceRequest{ + Metrics: protoMetrics, + // TODO:(@odeke-em) + // a) Figure out how to derive a Node from the environment + // b) Figure out how to derive a Resource from the environment + // or better letting users of the exporter configure it. + }) + if err != nil { + ae.setStateDisconnected(err) + } + } +} + +func (ae *Exporter) Flush() { + ae.traceBundler.Flush() + ae.viewDataBundler.Flush() +} + +func resourceProtoFromEnv() *resourcepb.Resource { + rs, _ := resource.FromEnv(context.Background()) + if rs == nil { + return nil + } + return resourceToResourcePb(rs) +} + +func resourceToResourcePb(rs *resource.Resource) *resourcepb.Resource { + rprs := &resourcepb.Resource{ + Type: rs.Type, + } + if rs.Labels != nil { + rprs.Labels = make(map[string]string) + for k, v := range rs.Labels { + rprs.Labels[k] = v + } + } + return rprs +} diff --git a/vendor/github.com/elastic/beats/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go b/vendor/github.com/elastic/beats/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go new file mode 100644 index 00000000..6820216f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go @@ -0,0 +1,161 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +import ( + "time" + + "go.opencensus.io/resource" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +const ( + DefaultAgentPort uint16 = 55678 + DefaultAgentHost string = "localhost" +) + +type ExporterOption interface { + withExporter(e *Exporter) +} + +type resourceDetector resource.Detector + +var _ ExporterOption = (*resourceDetector)(nil) + +func (rd resourceDetector) withExporter(e *Exporter) { + e.resourceDetector = resource.Detector(rd) +} + +// WithResourceDetector allows one to register a resource detector. Resource Detector is used +// to detect resources associated with the application. Detected resource is exported +// along with the metrics. If the detector fails then it panics. +// If a resource detector is not provided then by default it detects from the environment. +func WithResourceDetector(rd resource.Detector) ExporterOption { + return resourceDetector(rd) +} + +type insecureGrpcConnection int + +var _ ExporterOption = (*insecureGrpcConnection)(nil) + +func (igc *insecureGrpcConnection) withExporter(e *Exporter) { + e.canDialInsecure = true +} + +// WithInsecure disables client transport security for the exporter's gRPC connection +// just like grpc.WithInsecure() https://godoc.org/google.golang.org/grpc#WithInsecure +// does. Note, by default, client security is required unless WithInsecure is used. +func WithInsecure() ExporterOption { return new(insecureGrpcConnection) } + +type addressSetter string + +func (as addressSetter) withExporter(e *Exporter) { + e.agentAddress = string(as) +} + +var _ ExporterOption = (*addressSetter)(nil) + +// WithAddress allows one to set the address that the exporter will +// connect to the agent on. If unset, it will instead try to use +// connect to DefaultAgentHost:DefaultAgentPort +func WithAddress(addr string) ExporterOption { + return addressSetter(addr) +} + +type serviceNameSetter string + +func (sns serviceNameSetter) withExporter(e *Exporter) { + e.serviceName = string(sns) +} + +var _ ExporterOption = (*serviceNameSetter)(nil) + +// WithServiceName allows one to set/override the service name +// that the exporter will report to the agent. +func WithServiceName(serviceName string) ExporterOption { + return serviceNameSetter(serviceName) +} + +type reconnectionPeriod time.Duration + +func (rp reconnectionPeriod) withExporter(e *Exporter) { + e.reconnectionPeriod = time.Duration(rp) +} + +func WithReconnectionPeriod(rp time.Duration) ExporterOption { + return reconnectionPeriod(rp) +} + +type compressorSetter string + +func (c compressorSetter) withExporter(e *Exporter) { + e.compressor = string(c) +} + +// UseCompressor will set the compressor for the gRPC client to use when sending requests. +// It is the responsibility of the caller to ensure that the compressor set has been registered +// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some +// compressors auto-register on import, such as gzip, which can be registered by calling +// `import _ "google.golang.org/grpc/encoding/gzip"` +func UseCompressor(compressorName string) ExporterOption { + return compressorSetter(compressorName) +} + +type headerSetter map[string]string + +func (h headerSetter) withExporter(e *Exporter) { + e.headers = map[string]string(h) +} + +// WithHeaders will send the provided headers when the gRPC stream connection +// is instantiated +func WithHeaders(headers map[string]string) ExporterOption { + return headerSetter(headers) +} + +type clientCredentials struct { + credentials.TransportCredentials +} + +var _ ExporterOption = (*clientCredentials)(nil) + +// WithTLSCredentials allows the connection to use TLS credentials +// when talking to the server. It takes in grpc.TransportCredentials instead +// of say a Certificate file or a tls.Certificate, because the retrieving +// these credentials can be done in many ways e.g. plain file, in code tls.Config +// or by certificate rotation, so it is up to the caller to decide what to use. +func WithTLSCredentials(creds credentials.TransportCredentials) ExporterOption { + return &clientCredentials{TransportCredentials: creds} +} + +func (cc *clientCredentials) withExporter(e *Exporter) { + e.clientTransportCredentials = cc.TransportCredentials +} + +type grpcDialOptions []grpc.DialOption + +var _ ExporterOption = (*grpcDialOptions)(nil) + +// WithGRPCDialOption opens support to any grpc.DialOption to be used. If it conflicts +// with some other configuration the GRPC specified via the agent the ones here will +// take preference since they are set last. +func WithGRPCDialOption(opts ...grpc.DialOption) ExporterOption { + return grpcDialOptions(opts) +} + +func (opts grpcDialOptions) withExporter(e *Exporter) { + e.grpcDialOptions = opts +} diff --git a/vendor/github.com/elastic/beats/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go b/vendor/github.com/elastic/beats/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go new file mode 100644 index 00000000..983ebe7b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go @@ -0,0 +1,248 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +import ( + "math" + "time" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/tracestate" + + tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + "github.com/golang/protobuf/ptypes/timestamp" +) + +const ( + maxAnnotationEventsPerSpan = 32 + maxMessageEventsPerSpan = 128 +) + +func ocSpanToProtoSpan(sd *trace.SpanData) *tracepb.Span { + if sd == nil { + return nil + } + var namePtr *tracepb.TruncatableString + if sd.Name != "" { + namePtr = &tracepb.TruncatableString{Value: sd.Name} + } + return &tracepb.Span{ + TraceId: sd.TraceID[:], + SpanId: sd.SpanID[:], + ParentSpanId: sd.ParentSpanID[:], + Status: ocStatusToProtoStatus(sd.Status), + StartTime: timeToTimestamp(sd.StartTime), + EndTime: timeToTimestamp(sd.EndTime), + Links: ocLinksToProtoLinks(sd.Links), + Kind: ocSpanKindToProtoSpanKind(sd.SpanKind), + Name: namePtr, + Attributes: ocAttributesToProtoAttributes(sd.Attributes), + TimeEvents: ocTimeEventsToProtoTimeEvents(sd.Annotations, sd.MessageEvents), + Tracestate: ocTracestateToProtoTracestate(sd.Tracestate), + } +} + +var blankStatus trace.Status + +func ocStatusToProtoStatus(status trace.Status) *tracepb.Status { + if status == blankStatus { + return nil + } + return &tracepb.Status{ + Code: status.Code, + Message: status.Message, + } +} + +func ocLinksToProtoLinks(links []trace.Link) *tracepb.Span_Links { + if len(links) == 0 { + return nil + } + + sl := make([]*tracepb.Span_Link, 0, len(links)) + for _, ocLink := range links { + // This redefinition is necessary to prevent ocLink.*ID[:] copies + // being reused -- in short we need a new ocLink per iteration. + ocLink := ocLink + + sl = append(sl, &tracepb.Span_Link{ + TraceId: ocLink.TraceID[:], + SpanId: ocLink.SpanID[:], + Type: ocLinkTypeToProtoLinkType(ocLink.Type), + }) + } + + return &tracepb.Span_Links{ + Link: sl, + } +} + +func ocLinkTypeToProtoLinkType(oct trace.LinkType) tracepb.Span_Link_Type { + switch oct { + case trace.LinkTypeChild: + return tracepb.Span_Link_CHILD_LINKED_SPAN + case trace.LinkTypeParent: + return tracepb.Span_Link_PARENT_LINKED_SPAN + default: + return tracepb.Span_Link_TYPE_UNSPECIFIED + } +} + +func ocAttributesToProtoAttributes(attrs map[string]interface{}) *tracepb.Span_Attributes { + if len(attrs) == 0 { + return nil + } + outMap := make(map[string]*tracepb.AttributeValue) + for k, v := range attrs { + switch v := v.(type) { + case bool: + outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_BoolValue{BoolValue: v}} + + case int: + outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: int64(v)}} + + case int64: + outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: v}} + + case string: + outMap[k] = &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_StringValue{ + StringValue: &tracepb.TruncatableString{Value: v}, + }, + } + } + } + return &tracepb.Span_Attributes{ + AttributeMap: outMap, + } +} + +// This code is mostly copied from +// https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/blob/master/trace_proto.go#L46 +func ocTimeEventsToProtoTimeEvents(as []trace.Annotation, es []trace.MessageEvent) *tracepb.Span_TimeEvents { + if len(as) == 0 && len(es) == 0 { + return nil + } + + timeEvents := &tracepb.Span_TimeEvents{} + var annotations, droppedAnnotationsCount int + var messageEvents, droppedMessageEventsCount int + + // Transform annotations + for i, a := range as { + if annotations >= maxAnnotationEventsPerSpan { + droppedAnnotationsCount = len(as) - i + break + } + annotations++ + timeEvents.TimeEvent = append(timeEvents.TimeEvent, + &tracepb.Span_TimeEvent{ + Time: timeToTimestamp(a.Time), + Value: transformAnnotationToTimeEvent(&a), + }, + ) + } + + // Transform message events + for i, e := range es { + if messageEvents >= maxMessageEventsPerSpan { + droppedMessageEventsCount = len(es) - i + break + } + messageEvents++ + timeEvents.TimeEvent = append(timeEvents.TimeEvent, + &tracepb.Span_TimeEvent{ + Time: timeToTimestamp(e.Time), + Value: transformMessageEventToTimeEvent(&e), + }, + ) + } + + // Process dropped counter + timeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount) + timeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount) + + return timeEvents +} + +func transformAnnotationToTimeEvent(a *trace.Annotation) *tracepb.Span_TimeEvent_Annotation_ { + return &tracepb.Span_TimeEvent_Annotation_{ + Annotation: &tracepb.Span_TimeEvent_Annotation{ + Description: &tracepb.TruncatableString{Value: a.Message}, + Attributes: ocAttributesToProtoAttributes(a.Attributes), + }, + } +} + +func transformMessageEventToTimeEvent(e *trace.MessageEvent) *tracepb.Span_TimeEvent_MessageEvent_ { + return &tracepb.Span_TimeEvent_MessageEvent_{ + MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{ + Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType), + Id: uint64(e.MessageID), + UncompressedSize: uint64(e.UncompressedByteSize), + CompressedSize: uint64(e.CompressedByteSize), + }, + } +} + +// clip32 clips an int to the range of an int32. +func clip32(x int) int32 { + if x < math.MinInt32 { + return math.MinInt32 + } + if x > math.MaxInt32 { + return math.MaxInt32 + } + return int32(x) +} + +func timeToTimestamp(t time.Time) *timestamp.Timestamp { + nanoTime := t.UnixNano() + return ×tamp.Timestamp{ + Seconds: nanoTime / 1e9, + Nanos: int32(nanoTime % 1e9), + } +} + +func ocSpanKindToProtoSpanKind(kind int) tracepb.Span_SpanKind { + switch kind { + case trace.SpanKindClient: + return tracepb.Span_CLIENT + case trace.SpanKindServer: + return tracepb.Span_SERVER + default: + return tracepb.Span_SPAN_KIND_UNSPECIFIED + } +} + +func ocTracestateToProtoTracestate(ts *tracestate.Tracestate) *tracepb.Span_Tracestate { + if ts == nil { + return nil + } + return &tracepb.Span_Tracestate{ + Entries: ocTracestateEntriesToProtoTracestateEntries(ts.Entries()), + } +} + +func ocTracestateEntriesToProtoTracestateEntries(entries []tracestate.Entry) []*tracepb.Span_Tracestate_Entry { + protoEntries := make([]*tracepb.Span_Tracestate_Entry, 0, len(entries)) + for _, entry := range entries { + protoEntries = append(protoEntries, &tracepb.Span_Tracestate_Entry{ + Key: entry.Key, + Value: entry.Value, + }) + } + return protoEntries +} diff --git a/vendor/github.com/elastic/beats/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go b/vendor/github.com/elastic/beats/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go new file mode 100644 index 00000000..43f18dec --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go @@ -0,0 +1,274 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +import ( + "errors" + "time" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + + "github.com/golang/protobuf/ptypes/timestamp" + + metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" +) + +var ( + errNilMeasure = errors.New("expecting a non-nil stats.Measure") + errNilView = errors.New("expecting a non-nil view.View") + errNilViewData = errors.New("expecting a non-nil view.Data") +) + +func viewDataToMetric(vd *view.Data) (*metricspb.Metric, error) { + if vd == nil { + return nil, errNilViewData + } + + descriptor, err := viewToMetricDescriptor(vd.View) + if err != nil { + return nil, err + } + + timeseries, err := viewDataToTimeseries(vd) + if err != nil { + return nil, err + } + + metric := &metricspb.Metric{ + MetricDescriptor: descriptor, + Timeseries: timeseries, + } + return metric, nil +} + +func viewToMetricDescriptor(v *view.View) (*metricspb.MetricDescriptor, error) { + if v == nil { + return nil, errNilView + } + if v.Measure == nil { + return nil, errNilMeasure + } + + desc := &metricspb.MetricDescriptor{ + Name: stringOrCall(v.Name, v.Measure.Name), + Description: stringOrCall(v.Description, v.Measure.Description), + Unit: v.Measure.Unit(), + Type: aggregationToMetricDescriptorType(v), + LabelKeys: tagKeysToLabelKeys(v.TagKeys), + } + return desc, nil +} + +func stringOrCall(first string, call func() string) string { + if first != "" { + return first + } + return call() +} + +type measureType uint + +const ( + measureUnknown measureType = iota + measureInt64 + measureFloat64 +) + +func measureTypeFromMeasure(m stats.Measure) measureType { + switch m.(type) { + default: + return measureUnknown + case *stats.Float64Measure: + return measureFloat64 + case *stats.Int64Measure: + return measureInt64 + } +} + +func aggregationToMetricDescriptorType(v *view.View) metricspb.MetricDescriptor_Type { + if v == nil || v.Aggregation == nil { + return metricspb.MetricDescriptor_UNSPECIFIED + } + if v.Measure == nil { + return metricspb.MetricDescriptor_UNSPECIFIED + } + + switch v.Aggregation.Type { + case view.AggTypeCount: + // Cumulative on int64 + return metricspb.MetricDescriptor_CUMULATIVE_INT64 + + case view.AggTypeDistribution: + // Cumulative types + return metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION + + case view.AggTypeLastValue: + // Gauge types + switch measureTypeFromMeasure(v.Measure) { + case measureFloat64: + return metricspb.MetricDescriptor_GAUGE_DOUBLE + case measureInt64: + return metricspb.MetricDescriptor_GAUGE_INT64 + } + + case view.AggTypeSum: + // Cumulative types + switch measureTypeFromMeasure(v.Measure) { + case measureFloat64: + return metricspb.MetricDescriptor_CUMULATIVE_DOUBLE + case measureInt64: + return metricspb.MetricDescriptor_CUMULATIVE_INT64 + } + } + + // For all other cases, return unspecified. + return metricspb.MetricDescriptor_UNSPECIFIED +} + +func tagKeysToLabelKeys(tagKeys []tag.Key) []*metricspb.LabelKey { + labelKeys := make([]*metricspb.LabelKey, 0, len(tagKeys)) + for _, tagKey := range tagKeys { + labelKeys = append(labelKeys, &metricspb.LabelKey{ + Key: tagKey.Name(), + }) + } + return labelKeys +} + +func viewDataToTimeseries(vd *view.Data) ([]*metricspb.TimeSeries, error) { + if vd == nil || len(vd.Rows) == 0 { + return nil, nil + } + + // Given that view.Data only contains Start, End + // the timestamps for all the row data will be the exact same + // per aggregation. However, the values will differ. + // Each row has its own tags. + startTimestamp := timeToProtoTimestamp(vd.Start) + endTimestamp := timeToProtoTimestamp(vd.End) + + mType := measureTypeFromMeasure(vd.View.Measure) + timeseries := make([]*metricspb.TimeSeries, 0, len(vd.Rows)) + // It is imperative that the ordering of "LabelValues" matches those + // of the Label keys in the metric descriptor. + for _, row := range vd.Rows { + labelValues := labelValuesFromTags(row.Tags) + point := rowToPoint(vd.View, row, endTimestamp, mType) + timeseries = append(timeseries, &metricspb.TimeSeries{ + StartTimestamp: startTimestamp, + LabelValues: labelValues, + Points: []*metricspb.Point{point}, + }) + } + + if len(timeseries) == 0 { + return nil, nil + } + + return timeseries, nil +} + +func timeToProtoTimestamp(t time.Time) *timestamp.Timestamp { + unixNano := t.UnixNano() + return ×tamp.Timestamp{ + Seconds: int64(unixNano / 1e9), + Nanos: int32(unixNano % 1e9), + } +} + +func rowToPoint(v *view.View, row *view.Row, endTimestamp *timestamp.Timestamp, mType measureType) *metricspb.Point { + pt := &metricspb.Point{ + Timestamp: endTimestamp, + } + + switch data := row.Data.(type) { + case *view.CountData: + pt.Value = &metricspb.Point_Int64Value{Int64Value: data.Value} + + case *view.DistributionData: + pt.Value = &metricspb.Point_DistributionValue{ + DistributionValue: &metricspb.DistributionValue{ + Count: data.Count, + Sum: float64(data.Count) * data.Mean, // because Mean := Sum/Count + // TODO: Add Exemplar + Buckets: bucketsToProtoBuckets(data.CountPerBucket), + BucketOptions: &metricspb.DistributionValue_BucketOptions{ + Type: &metricspb.DistributionValue_BucketOptions_Explicit_{ + Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{ + Bounds: v.Aggregation.Buckets, + }, + }, + }, + SumOfSquaredDeviation: data.SumOfSquaredDev, + }} + + case *view.LastValueData: + setPointValue(pt, data.Value, mType) + + case *view.SumData: + setPointValue(pt, data.Value, mType) + } + + return pt +} + +// Not returning anything from this function because metricspb.Point.is_Value is an unexported +// interface hence we just have to set its value by pointer. +func setPointValue(pt *metricspb.Point, value float64, mType measureType) { + if mType == measureInt64 { + pt.Value = &metricspb.Point_Int64Value{Int64Value: int64(value)} + } else { + pt.Value = &metricspb.Point_DoubleValue{DoubleValue: value} + } +} + +func bucketsToProtoBuckets(countPerBucket []int64) []*metricspb.DistributionValue_Bucket { + distBuckets := make([]*metricspb.DistributionValue_Bucket, len(countPerBucket)) + for i := 0; i < len(countPerBucket); i++ { + count := countPerBucket[i] + + distBuckets[i] = &metricspb.DistributionValue_Bucket{ + Count: count, + } + } + + return distBuckets +} + +func labelValuesFromTags(tags []tag.Tag) []*metricspb.LabelValue { + if len(tags) == 0 { + return nil + } + + labelValues := make([]*metricspb.LabelValue, 0, len(tags)) + for _, tag_ := range tags { + labelValues = append(labelValues, &metricspb.LabelValue{ + Value: tag_.Value, + + // It is imperative that we set the "HasValue" attribute, + // in order to distinguish missing a label from the empty string. + // https://godoc.org/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1#LabelValue.HasValue + // + // OpenCensus-Go uses non-pointers for tags as seen by this function's arguments, + // so the best case that we can use to distinguish missing labels/tags from the + // empty string is by checking if the Tag.Key.Name() != "" to indicate that we have + // a value. + HasValue: tag_.Key.Name() != "", + }) + } + return labelValues +} diff --git a/vendor/github.com/elastic/beats/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go b/vendor/github.com/elastic/beats/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go new file mode 100644 index 00000000..68be4c75 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go @@ -0,0 +1,17 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ocagent + +const Version = "0.0.1" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/LICENSE new file mode 100644 index 00000000..af39a91e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/NOTICE b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/NOTICE new file mode 100644 index 00000000..2d1d7260 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/NOTICE @@ -0,0 +1,5 @@ +Microsoft Azure-SDK-for-Go +Copyright 2014-2017 Microsoft + +This product includes software developed at +the Microsoft Corporation (https://www.microsoft.com). diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/actiongroups.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/actiongroups.go new file mode 100644 index 00000000..d7d55d29 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/actiongroups.go @@ -0,0 +1,598 @@ +package insights + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// ActionGroupsClient is the monitor Management Client +type ActionGroupsClient struct { + BaseClient +} + +// NewActionGroupsClient creates an instance of the ActionGroupsClient client. +func NewActionGroupsClient(subscriptionID string) ActionGroupsClient { + return NewActionGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewActionGroupsClientWithBaseURI creates an instance of the ActionGroupsClient client. +func NewActionGroupsClientWithBaseURI(baseURI string, subscriptionID string) ActionGroupsClient { + return ActionGroupsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create a new action group or update an existing one. +// Parameters: +// resourceGroupName - the name of the resource group. +// actionGroupName - the name of the action group. +// actionGroup - the action group to create or use for the update. +func (client ActionGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, actionGroupName string, actionGroup ActionGroupResource) (result ActionGroupResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ActionGroupsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: actionGroup, + Constraints: []validation.Constraint{{Target: "actionGroup.ActionGroup", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "actionGroup.ActionGroup.GroupShortName", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "actionGroup.ActionGroup.GroupShortName", Name: validation.MaxLength, Rule: 12, Chain: nil}}}, + {Target: "actionGroup.ActionGroup.Enabled", Name: validation.Null, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("insights.ActionGroupsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, actionGroupName, actionGroup) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ActionGroupsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, actionGroupName string, actionGroup ActionGroupResource) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "actionGroupName": autorest.Encode("path", actionGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}", pathParameters), + autorest.WithJSON(actionGroup), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ActionGroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ActionGroupsClient) CreateOrUpdateResponder(resp *http.Response) (result ActionGroupResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete an action group. +// Parameters: +// resourceGroupName - the name of the resource group. +// actionGroupName - the name of the action group. +func (client ActionGroupsClient) Delete(ctx context.Context, resourceGroupName string, actionGroupName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ActionGroupsClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, actionGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ActionGroupsClient) DeletePreparer(ctx context.Context, resourceGroupName string, actionGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "actionGroupName": autorest.Encode("path", actionGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ActionGroupsClient) DeleteSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ActionGroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// EnableReceiver enable a receiver in an action group. This changes the receiver's status from Disabled to Enabled. +// This operation is only supported for Email or SMS receivers. +// Parameters: +// resourceGroupName - the name of the resource group. +// actionGroupName - the name of the action group. +// enableRequest - the receiver to re-enable. +func (client ActionGroupsClient) EnableReceiver(ctx context.Context, resourceGroupName string, actionGroupName string, enableRequest EnableRequest) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ActionGroupsClient.EnableReceiver") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: enableRequest, + Constraints: []validation.Constraint{{Target: "enableRequest.ReceiverName", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("insights.ActionGroupsClient", "EnableReceiver", err.Error()) + } + + req, err := client.EnableReceiverPreparer(ctx, resourceGroupName, actionGroupName, enableRequest) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "EnableReceiver", nil, "Failure preparing request") + return + } + + resp, err := client.EnableReceiverSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "EnableReceiver", resp, "Failure sending request") + return + } + + result, err = client.EnableReceiverResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "EnableReceiver", resp, "Failure responding to request") + } + + return +} + +// EnableReceiverPreparer prepares the EnableReceiver request. +func (client ActionGroupsClient) EnableReceiverPreparer(ctx context.Context, resourceGroupName string, actionGroupName string, enableRequest EnableRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "actionGroupName": autorest.Encode("path", actionGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}/subscribe", pathParameters), + autorest.WithJSON(enableRequest), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// EnableReceiverSender sends the EnableReceiver request. The method will close the +// http.Response Body if it receives an error. +func (client ActionGroupsClient) EnableReceiverSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// EnableReceiverResponder handles the response to the EnableReceiver request. The method always +// closes the http.Response Body. +func (client ActionGroupsClient) EnableReceiverResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusConflict), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get get an action group. +// Parameters: +// resourceGroupName - the name of the resource group. +// actionGroupName - the name of the action group. +func (client ActionGroupsClient) Get(ctx context.Context, resourceGroupName string, actionGroupName string) (result ActionGroupResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ActionGroupsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, actionGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ActionGroupsClient) GetPreparer(ctx context.Context, resourceGroupName string, actionGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "actionGroupName": autorest.Encode("path", actionGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ActionGroupsClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ActionGroupsClient) GetResponder(resp *http.Response) (result ActionGroupResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup get a list of all action groups in a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client ActionGroupsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ActionGroupList, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ActionGroupsClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client ActionGroupsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client ActionGroupsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client ActionGroupsClient) ListByResourceGroupResponder(resp *http.Response) (result ActionGroupList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListBySubscriptionID get a list of all action groups in a subscription. +func (client ActionGroupsClient) ListBySubscriptionID(ctx context.Context) (result ActionGroupList, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ActionGroupsClient.ListBySubscriptionID") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListBySubscriptionIDPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "ListBySubscriptionID", nil, "Failure preparing request") + return + } + + resp, err := client.ListBySubscriptionIDSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "ListBySubscriptionID", resp, "Failure sending request") + return + } + + result, err = client.ListBySubscriptionIDResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "ListBySubscriptionID", resp, "Failure responding to request") + } + + return +} + +// ListBySubscriptionIDPreparer prepares the ListBySubscriptionID request. +func (client ActionGroupsClient) ListBySubscriptionIDPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/microsoft.insights/actionGroups", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListBySubscriptionIDSender sends the ListBySubscriptionID request. The method will close the +// http.Response Body if it receives an error. +func (client ActionGroupsClient) ListBySubscriptionIDSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListBySubscriptionIDResponder handles the response to the ListBySubscriptionID request. The method always +// closes the http.Response Body. +func (client ActionGroupsClient) ListBySubscriptionIDResponder(resp *http.Response) (result ActionGroupList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates an existing action group's tags. To update other fields use the CreateOrUpdate method. +// Parameters: +// resourceGroupName - the name of the resource group. +// actionGroupName - the name of the action group. +// actionGroupPatch - parameters supplied to the operation. +func (client ActionGroupsClient) Update(ctx context.Context, resourceGroupName string, actionGroupName string, actionGroupPatch ActionGroupPatchBody) (result ActionGroupResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ActionGroupsClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, actionGroupName, actionGroupPatch) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client ActionGroupsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, actionGroupName string, actionGroupPatch ActionGroupPatchBody) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "actionGroupName": autorest.Encode("path", actionGroupName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-06-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/actionGroups/{actionGroupName}", pathParameters), + autorest.WithJSON(actionGroupPatch), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client ActionGroupsClient) UpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client ActionGroupsClient) UpdateResponder(resp *http.Response) (result ActionGroupResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/activitylogalerts.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/activitylogalerts.go new file mode 100644 index 00000000..ed927c20 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/activitylogalerts.go @@ -0,0 +1,513 @@ +package insights + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// ActivityLogAlertsClient is the monitor Management Client +type ActivityLogAlertsClient struct { + BaseClient +} + +// NewActivityLogAlertsClient creates an instance of the ActivityLogAlertsClient client. +func NewActivityLogAlertsClient(subscriptionID string) ActivityLogAlertsClient { + return NewActivityLogAlertsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewActivityLogAlertsClientWithBaseURI creates an instance of the ActivityLogAlertsClient client. +func NewActivityLogAlertsClientWithBaseURI(baseURI string, subscriptionID string) ActivityLogAlertsClient { + return ActivityLogAlertsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create a new activity log alert or update an existing one. +// Parameters: +// resourceGroupName - the name of the resource group. +// activityLogAlertName - the name of the activity log alert. +// activityLogAlert - the activity log alert to create or use for the update. +func (client ActivityLogAlertsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, activityLogAlertName string, activityLogAlert ActivityLogAlertResource) (result ActivityLogAlertResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ActivityLogAlertsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: activityLogAlert, + Constraints: []validation.Constraint{{Target: "activityLogAlert.ActivityLogAlert", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "activityLogAlert.ActivityLogAlert.Scopes", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "activityLogAlert.ActivityLogAlert.Condition", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "activityLogAlert.ActivityLogAlert.Condition.AllOf", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "activityLogAlert.ActivityLogAlert.Actions", Name: validation.Null, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("insights.ActivityLogAlertsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, activityLogAlertName, activityLogAlert) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ActivityLogAlertsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, activityLogAlertName string, activityLogAlert ActivityLogAlertResource) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "activityLogAlertName": autorest.Encode("path", activityLogAlertName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/activityLogAlerts/{activityLogAlertName}", pathParameters), + autorest.WithJSON(activityLogAlert), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ActivityLogAlertsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ActivityLogAlertsClient) CreateOrUpdateResponder(resp *http.Response) (result ActivityLogAlertResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete an activity log alert. +// Parameters: +// resourceGroupName - the name of the resource group. +// activityLogAlertName - the name of the activity log alert. +func (client ActivityLogAlertsClient) Delete(ctx context.Context, resourceGroupName string, activityLogAlertName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ActivityLogAlertsClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, activityLogAlertName) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ActivityLogAlertsClient) DeletePreparer(ctx context.Context, resourceGroupName string, activityLogAlertName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "activityLogAlertName": autorest.Encode("path", activityLogAlertName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/activityLogAlerts/{activityLogAlertName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ActivityLogAlertsClient) DeleteSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ActivityLogAlertsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get get an activity log alert. +// Parameters: +// resourceGroupName - the name of the resource group. +// activityLogAlertName - the name of the activity log alert. +func (client ActivityLogAlertsClient) Get(ctx context.Context, resourceGroupName string, activityLogAlertName string) (result ActivityLogAlertResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ActivityLogAlertsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, activityLogAlertName) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ActivityLogAlertsClient) GetPreparer(ctx context.Context, resourceGroupName string, activityLogAlertName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "activityLogAlertName": autorest.Encode("path", activityLogAlertName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/activityLogAlerts/{activityLogAlertName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ActivityLogAlertsClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ActivityLogAlertsClient) GetResponder(resp *http.Response) (result ActivityLogAlertResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup get a list of all activity log alerts in a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client ActivityLogAlertsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ActivityLogAlertList, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ActivityLogAlertsClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client ActivityLogAlertsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/activityLogAlerts", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client ActivityLogAlertsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client ActivityLogAlertsClient) ListByResourceGroupResponder(resp *http.Response) (result ActivityLogAlertList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListBySubscriptionID get a list of all activity log alerts in a subscription. +func (client ActivityLogAlertsClient) ListBySubscriptionID(ctx context.Context) (result ActivityLogAlertList, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ActivityLogAlertsClient.ListBySubscriptionID") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListBySubscriptionIDPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "ListBySubscriptionID", nil, "Failure preparing request") + return + } + + resp, err := client.ListBySubscriptionIDSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "ListBySubscriptionID", resp, "Failure sending request") + return + } + + result, err = client.ListBySubscriptionIDResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "ListBySubscriptionID", resp, "Failure responding to request") + } + + return +} + +// ListBySubscriptionIDPreparer prepares the ListBySubscriptionID request. +func (client ActivityLogAlertsClient) ListBySubscriptionIDPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/microsoft.insights/activityLogAlerts", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListBySubscriptionIDSender sends the ListBySubscriptionID request. The method will close the +// http.Response Body if it receives an error. +func (client ActivityLogAlertsClient) ListBySubscriptionIDSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListBySubscriptionIDResponder handles the response to the ListBySubscriptionID request. The method always +// closes the http.Response Body. +func (client ActivityLogAlertsClient) ListBySubscriptionIDResponder(resp *http.Response) (result ActivityLogAlertList, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates an existing ActivityLogAlertResource's tags. To update other fields use the CreateOrUpdate method. +// Parameters: +// resourceGroupName - the name of the resource group. +// activityLogAlertName - the name of the activity log alert. +// activityLogAlertPatch - parameters supplied to the operation. +func (client ActivityLogAlertsClient) Update(ctx context.Context, resourceGroupName string, activityLogAlertName string, activityLogAlertPatch ActivityLogAlertPatchBody) (result ActivityLogAlertResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ActivityLogAlertsClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, activityLogAlertName, activityLogAlertPatch) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client ActivityLogAlertsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, activityLogAlertName string, activityLogAlertPatch ActivityLogAlertPatchBody) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "activityLogAlertName": autorest.Encode("path", activityLogAlertName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2017-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/microsoft.insights/activityLogAlerts/{activityLogAlertName}", pathParameters), + autorest.WithJSON(activityLogAlertPatch), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client ActivityLogAlertsClient) UpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client ActivityLogAlertsClient) UpdateResponder(resp *http.Response) (result ActivityLogAlertResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/activitylogs.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/activitylogs.go new file mode 100644 index 00000000..60e31eca --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/activitylogs.go @@ -0,0 +1,173 @@ +package insights + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// ActivityLogsClient is the monitor Management Client +type ActivityLogsClient struct { + BaseClient +} + +// NewActivityLogsClient creates an instance of the ActivityLogsClient client. +func NewActivityLogsClient(subscriptionID string) ActivityLogsClient { + return NewActivityLogsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewActivityLogsClientWithBaseURI creates an instance of the ActivityLogsClient client. +func NewActivityLogsClientWithBaseURI(baseURI string, subscriptionID string) ActivityLogsClient { + return ActivityLogsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List provides the list of records from the activity logs. +// Parameters: +// filter - reduces the set of data collected.
This argument is required and it also requires at least the +// start date/time.
The **$filter** argument is very restricted and allows only the following patterns.
- +// *List events for a resource group*: $filter=eventTimestamp ge '2014-07-16T04:36:37.6407898Z' and +// eventTimestamp le '2014-07-20T04:36:37.6407898Z' and resourceGroupName eq 'resourceGroupName'.
- *List +// events for resource*: $filter=eventTimestamp ge '2014-07-16T04:36:37.6407898Z' and eventTimestamp le +// '2014-07-20T04:36:37.6407898Z' and resourceUri eq 'resourceURI'.
- *List events for a subscription in a +// time range*: $filter=eventTimestamp ge '2014-07-16T04:36:37.6407898Z' and eventTimestamp le +// '2014-07-20T04:36:37.6407898Z'.
- *List events for a resource provider*: $filter=eventTimestamp ge +// '2014-07-16T04:36:37.6407898Z' and eventTimestamp le '2014-07-20T04:36:37.6407898Z' and resourceProvider eq +// 'resourceProviderName'.
- *List events for a correlation Id*: $filter=eventTimestamp ge +// '2014-07-16T04:36:37.6407898Z' and eventTimestamp le '2014-07-20T04:36:37.6407898Z' and correlationId eq +// 'correlationID'.

**NOTE**: No other syntax is allowed. +// selectParameter - used to fetch events with only the given properties.
The **$select** argument is a +// comma separated list of property names to be returned. Possible values are: *authorization*, *claims*, +// *correlationId*, *description*, *eventDataId*, *eventName*, *eventTimestamp*, *httpRequest*, *level*, +// *operationId*, *operationName*, *properties*, *resourceGroupName*, *resourceProviderName*, *resourceId*, +// *status*, *submissionTimestamp*, *subStatus*, *subscriptionId* +func (client ActivityLogsClient) List(ctx context.Context, filter string, selectParameter string) (result EventDataCollectionPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ActivityLogsClient.List") + defer func() { + sc := -1 + if result.edc.Response.Response != nil { + sc = result.edc.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, filter, selectParameter) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActivityLogsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.edc.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.ActivityLogsClient", "List", resp, "Failure sending request") + return + } + + result.edc, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActivityLogsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ActivityLogsClient) ListPreparer(ctx context.Context, filter string, selectParameter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-04-01" + queryParameters := map[string]interface{}{ + "$filter": autorest.Encode("query", filter), + "api-version": APIVersion, + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/microsoft.insights/eventtypes/management/values", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ActivityLogsClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ActivityLogsClient) ListResponder(resp *http.Response) (result EventDataCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ActivityLogsClient) listNextResults(ctx context.Context, lastResults EventDataCollection) (result EventDataCollection, err error) { + req, err := lastResults.eventDataCollectionPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "insights.ActivityLogsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "insights.ActivityLogsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ActivityLogsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ActivityLogsClient) ListComplete(ctx context.Context, filter string, selectParameter string) (result EventDataCollectionIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ActivityLogsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, filter, selectParameter) + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/alertruleincidents.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/alertruleincidents.go new file mode 100644 index 00000000..86569442 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/alertruleincidents.go @@ -0,0 +1,197 @@ +package insights + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// AlertRuleIncidentsClient is the monitor Management Client +type AlertRuleIncidentsClient struct { + BaseClient +} + +// NewAlertRuleIncidentsClient creates an instance of the AlertRuleIncidentsClient client. +func NewAlertRuleIncidentsClient(subscriptionID string) AlertRuleIncidentsClient { + return NewAlertRuleIncidentsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAlertRuleIncidentsClientWithBaseURI creates an instance of the AlertRuleIncidentsClient client. +func NewAlertRuleIncidentsClientWithBaseURI(baseURI string, subscriptionID string) AlertRuleIncidentsClient { + return AlertRuleIncidentsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets an incident associated to an alert rule +// Parameters: +// resourceGroupName - the name of the resource group. +// ruleName - the name of the rule. +// incidentName - the name of the incident to retrieve. +func (client AlertRuleIncidentsClient) Get(ctx context.Context, resourceGroupName string, ruleName string, incidentName string) (result Incident, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AlertRuleIncidentsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, ruleName, incidentName) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AlertRuleIncidentsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.AlertRuleIncidentsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AlertRuleIncidentsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client AlertRuleIncidentsClient) GetPreparer(ctx context.Context, resourceGroupName string, ruleName string, incidentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "incidentName": autorest.Encode("path", incidentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "ruleName": autorest.Encode("path", ruleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/alertrules/{ruleName}/incidents/{incidentName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client AlertRuleIncidentsClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client AlertRuleIncidentsClient) GetResponder(resp *http.Response) (result Incident, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByAlertRule gets a list of incidents associated to an alert rule +// Parameters: +// resourceGroupName - the name of the resource group. +// ruleName - the name of the rule. +func (client AlertRuleIncidentsClient) ListByAlertRule(ctx context.Context, resourceGroupName string, ruleName string) (result IncidentListResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AlertRuleIncidentsClient.ListByAlertRule") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListByAlertRulePreparer(ctx, resourceGroupName, ruleName) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AlertRuleIncidentsClient", "ListByAlertRule", nil, "Failure preparing request") + return + } + + resp, err := client.ListByAlertRuleSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.AlertRuleIncidentsClient", "ListByAlertRule", resp, "Failure sending request") + return + } + + result, err = client.ListByAlertRuleResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AlertRuleIncidentsClient", "ListByAlertRule", resp, "Failure responding to request") + } + + return +} + +// ListByAlertRulePreparer prepares the ListByAlertRule request. +func (client AlertRuleIncidentsClient) ListByAlertRulePreparer(ctx context.Context, resourceGroupName string, ruleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "ruleName": autorest.Encode("path", ruleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/alertrules/{ruleName}/incidents", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByAlertRuleSender sends the ListByAlertRule request. The method will close the +// http.Response Body if it receives an error. +func (client AlertRuleIncidentsClient) ListByAlertRuleSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListByAlertRuleResponder handles the response to the ListByAlertRule request. The method always +// closes the http.Response Body. +func (client AlertRuleIncidentsClient) ListByAlertRuleResponder(resp *http.Response) (result IncidentListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/alertrules.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/alertrules.go new file mode 100644 index 00000000..34239f10 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/alertrules.go @@ -0,0 +1,512 @@ +package insights + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// AlertRulesClient is the monitor Management Client +type AlertRulesClient struct { + BaseClient +} + +// NewAlertRulesClient creates an instance of the AlertRulesClient client. +func NewAlertRulesClient(subscriptionID string) AlertRulesClient { + return NewAlertRulesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAlertRulesClientWithBaseURI creates an instance of the AlertRulesClient client. +func NewAlertRulesClientWithBaseURI(baseURI string, subscriptionID string) AlertRulesClient { + return AlertRulesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates an alert rule. +// Parameters: +// resourceGroupName - the name of the resource group. +// ruleName - the name of the rule. +// parameters - the parameters of the rule to create or update. +func (client AlertRulesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, ruleName string, parameters AlertRuleResource) (result AlertRuleResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AlertRulesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.AlertRule", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.AlertRule.Name", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.AlertRule.IsEnabled", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.AlertRule.Condition", Name: validation.Null, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("insights.AlertRulesClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, ruleName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client AlertRulesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, ruleName string, parameters AlertRuleResource) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "ruleName": autorest.Encode("path", ruleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/alertrules/{ruleName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client AlertRulesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client AlertRulesClient) CreateOrUpdateResponder(resp *http.Response) (result AlertRuleResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes an alert rule +// Parameters: +// resourceGroupName - the name of the resource group. +// ruleName - the name of the rule. +func (client AlertRulesClient) Delete(ctx context.Context, resourceGroupName string, ruleName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AlertRulesClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, ruleName) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client AlertRulesClient) DeletePreparer(ctx context.Context, resourceGroupName string, ruleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "ruleName": autorest.Encode("path", ruleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/alertrules/{ruleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client AlertRulesClient) DeleteSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client AlertRulesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets an alert rule +// Parameters: +// resourceGroupName - the name of the resource group. +// ruleName - the name of the rule. +func (client AlertRulesClient) Get(ctx context.Context, resourceGroupName string, ruleName string) (result AlertRuleResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AlertRulesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, ruleName) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client AlertRulesClient) GetPreparer(ctx context.Context, resourceGroupName string, ruleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "ruleName": autorest.Encode("path", ruleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/alertrules/{ruleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client AlertRulesClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client AlertRulesClient) GetResponder(resp *http.Response) (result AlertRuleResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup list the alert rules within a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client AlertRulesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result AlertRuleResourceCollection, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AlertRulesClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client AlertRulesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/alertrules", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client AlertRulesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client AlertRulesClient) ListByResourceGroupResponder(resp *http.Response) (result AlertRuleResourceCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListBySubscription list the alert rules within a subscription. +func (client AlertRulesClient) ListBySubscription(ctx context.Context) (result AlertRuleResourceCollection, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AlertRulesClient.ListBySubscription") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListBySubscriptionPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "ListBySubscription", nil, "Failure preparing request") + return + } + + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "ListBySubscription", resp, "Failure sending request") + return + } + + result, err = client.ListBySubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "ListBySubscription", resp, "Failure responding to request") + } + + return +} + +// ListBySubscriptionPreparer prepares the ListBySubscription request. +func (client AlertRulesClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/microsoft.insights/alertrules", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListBySubscriptionSender sends the ListBySubscription request. The method will close the +// http.Response Body if it receives an error. +func (client AlertRulesClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always +// closes the http.Response Body. +func (client AlertRulesClient) ListBySubscriptionResponder(resp *http.Response) (result AlertRuleResourceCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates an existing AlertRuleResource. To update other fields use the CreateOrUpdate method. +// Parameters: +// resourceGroupName - the name of the resource group. +// ruleName - the name of the rule. +// alertRulesResource - parameters supplied to the operation. +func (client AlertRulesClient) Update(ctx context.Context, resourceGroupName string, ruleName string, alertRulesResource AlertRuleResourcePatch) (result AlertRuleResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AlertRulesClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, ruleName, alertRulesResource) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client AlertRulesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, ruleName string, alertRulesResource AlertRuleResourcePatch) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "ruleName": autorest.Encode("path", ruleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/alertrules/{ruleName}", pathParameters), + autorest.WithJSON(alertRulesResource), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client AlertRulesClient) UpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client AlertRulesClient) UpdateResponder(resp *http.Response) (result AlertRuleResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/autoscalesettings.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/autoscalesettings.go new file mode 100644 index 00000000..450ade01 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/autoscalesettings.go @@ -0,0 +1,587 @@ +package insights + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// AutoscaleSettingsClient is the monitor Management Client +type AutoscaleSettingsClient struct { + BaseClient +} + +// NewAutoscaleSettingsClient creates an instance of the AutoscaleSettingsClient client. +func NewAutoscaleSettingsClient(subscriptionID string) AutoscaleSettingsClient { + return NewAutoscaleSettingsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewAutoscaleSettingsClientWithBaseURI creates an instance of the AutoscaleSettingsClient client. +func NewAutoscaleSettingsClientWithBaseURI(baseURI string, subscriptionID string) AutoscaleSettingsClient { + return AutoscaleSettingsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates an autoscale setting. +// Parameters: +// resourceGroupName - the name of the resource group. +// autoscaleSettingName - the autoscale setting name. +// parameters - parameters supplied to the operation. +func (client AutoscaleSettingsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, autoscaleSettingName string, parameters AutoscaleSettingResource) (result AutoscaleSettingResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.AutoscaleSetting", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.AutoscaleSetting.Profiles", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.AutoscaleSetting.Profiles", Name: validation.MaxItems, Rule: 20, Chain: nil}}}, + }}}}}); err != nil { + return result, validation.NewError("insights.AutoscaleSettingsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, autoscaleSettingName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client AutoscaleSettingsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, autoscaleSettingName string, parameters AutoscaleSettingResource) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "autoscaleSettingName": autorest.Encode("path", autoscaleSettingName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/autoscalesettings/{autoscaleSettingName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client AutoscaleSettingsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client AutoscaleSettingsClient) CreateOrUpdateResponder(resp *http.Response) (result AutoscaleSettingResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes and autoscale setting +// Parameters: +// resourceGroupName - the name of the resource group. +// autoscaleSettingName - the autoscale setting name. +func (client AutoscaleSettingsClient) Delete(ctx context.Context, resourceGroupName string, autoscaleSettingName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingsClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, autoscaleSettingName) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client AutoscaleSettingsClient) DeletePreparer(ctx context.Context, resourceGroupName string, autoscaleSettingName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "autoscaleSettingName": autorest.Encode("path", autoscaleSettingName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/autoscalesettings/{autoscaleSettingName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client AutoscaleSettingsClient) DeleteSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client AutoscaleSettingsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets an autoscale setting +// Parameters: +// resourceGroupName - the name of the resource group. +// autoscaleSettingName - the autoscale setting name. +func (client AutoscaleSettingsClient) Get(ctx context.Context, resourceGroupName string, autoscaleSettingName string) (result AutoscaleSettingResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, autoscaleSettingName) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client AutoscaleSettingsClient) GetPreparer(ctx context.Context, resourceGroupName string, autoscaleSettingName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "autoscaleSettingName": autorest.Encode("path", autoscaleSettingName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/autoscalesettings/{autoscaleSettingName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client AutoscaleSettingsClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client AutoscaleSettingsClient) GetResponder(resp *http.Response) (result AutoscaleSettingResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup lists the autoscale settings for a resource group +// Parameters: +// resourceGroupName - the name of the resource group. +func (client AutoscaleSettingsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result AutoscaleSettingResourceCollectionPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingsClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.asrc.Response.Response != nil { + sc = result.asrc.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listByResourceGroupNextResults + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.asrc.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result.asrc, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client AutoscaleSettingsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/autoscalesettings", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client AutoscaleSettingsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client AutoscaleSettingsClient) ListByResourceGroupResponder(resp *http.Response) (result AutoscaleSettingResourceCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByResourceGroupNextResults retrieves the next set of results, if any. +func (client AutoscaleSettingsClient) listByResourceGroupNextResults(ctx context.Context, lastResults AutoscaleSettingResourceCollection) (result AutoscaleSettingResourceCollection, err error) { + req, err := lastResults.autoscaleSettingResourceCollectionPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client AutoscaleSettingsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result AutoscaleSettingResourceCollectionIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingsClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByResourceGroup(ctx, resourceGroupName) + return +} + +// ListBySubscription lists the autoscale settings for a subscription +func (client AutoscaleSettingsClient) ListBySubscription(ctx context.Context) (result AutoscaleSettingResourceCollectionPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingsClient.ListBySubscription") + defer func() { + sc := -1 + if result.asrc.Response.Response != nil { + sc = result.asrc.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listBySubscriptionNextResults + req, err := client.ListBySubscriptionPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "ListBySubscription", nil, "Failure preparing request") + return + } + + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.asrc.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "ListBySubscription", resp, "Failure sending request") + return + } + + result.asrc, err = client.ListBySubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "ListBySubscription", resp, "Failure responding to request") + } + + return +} + +// ListBySubscriptionPreparer prepares the ListBySubscription request. +func (client AutoscaleSettingsClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/microsoft.insights/autoscalesettings", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListBySubscriptionSender sends the ListBySubscription request. The method will close the +// http.Response Body if it receives an error. +func (client AutoscaleSettingsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always +// closes the http.Response Body. +func (client AutoscaleSettingsClient) ListBySubscriptionResponder(resp *http.Response) (result AutoscaleSettingResourceCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listBySubscriptionNextResults retrieves the next set of results, if any. +func (client AutoscaleSettingsClient) listBySubscriptionNextResults(ctx context.Context, lastResults AutoscaleSettingResourceCollection) (result AutoscaleSettingResourceCollection, err error) { + req, err := lastResults.autoscaleSettingResourceCollectionPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "listBySubscriptionNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "listBySubscriptionNextResults", resp, "Failure sending next results request") + } + result, err = client.ListBySubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "listBySubscriptionNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListBySubscriptionComplete enumerates all values, automatically crossing page boundaries as required. +func (client AutoscaleSettingsClient) ListBySubscriptionComplete(ctx context.Context) (result AutoscaleSettingResourceCollectionIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingsClient.ListBySubscription") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListBySubscription(ctx) + return +} + +// Update updates an existing AutoscaleSettingsResource. To update other fields use the CreateOrUpdate method. +// Parameters: +// resourceGroupName - the name of the resource group. +// autoscaleSettingName - the autoscale setting name. +// autoscaleSettingResource - parameters supplied to the operation. +func (client AutoscaleSettingsClient) Update(ctx context.Context, resourceGroupName string, autoscaleSettingName string, autoscaleSettingResource AutoscaleSettingResourcePatch) (result AutoscaleSettingResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingsClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, autoscaleSettingName, autoscaleSettingResource) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client AutoscaleSettingsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, autoscaleSettingName string, autoscaleSettingResource AutoscaleSettingResourcePatch) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "autoscaleSettingName": autorest.Encode("path", autoscaleSettingName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2015-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/autoscalesettings/{autoscaleSettingName}", pathParameters), + autorest.WithJSON(autoscaleSettingResource), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client AutoscaleSettingsClient) UpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client AutoscaleSettingsClient) UpdateResponder(resp *http.Response) (result AutoscaleSettingResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/baselines.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/baselines.go new file mode 100644 index 00000000..990d564d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/baselines.go @@ -0,0 +1,154 @@ +package insights + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// BaselinesClient is the monitor Management Client +type BaselinesClient struct { + BaseClient +} + +// NewBaselinesClient creates an instance of the BaselinesClient client. +func NewBaselinesClient(subscriptionID string) BaselinesClient { + return NewBaselinesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewBaselinesClientWithBaseURI creates an instance of the BaselinesClient client. +func NewBaselinesClientWithBaseURI(baseURI string, subscriptionID string) BaselinesClient { + return BaselinesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List **Lists the metric baseline values for a resource**. +// Parameters: +// resourceURI - the identifier of the resource. +// metricnames - the names of the metrics (comma separated) to retrieve. +// metricnamespace - metric namespace to query metric definitions for. +// timespan - the timespan of the query. It is a string with the following format +// 'startDateTime_ISO/endDateTime_ISO'. +// interval - the interval (i.e. timegrain) of the query. +// aggregation - the list of aggregation types (comma separated) to retrieve. +// sensitivities - the list of sensitivities (comma separated) to retrieve. +// filter - the **$filter** is used to reduce the set of metric data returned.
Example:
Metric contains +// metadata A, B and C.
- Return all time series of C where A = a1 and B = b1 or b2
**$filter=A eq ‘a1’ +// and B eq ‘b1’ or B eq ‘b2’ and C eq ‘*’**
- Invalid variant:
**$filter=A eq ‘a1’ and B eq ‘b1’ and C +// eq ‘*’ or B = ‘b2’**
This is invalid because the logical or operator cannot separate two different +// metadata names.
- Return all time series where A = a1, B = b1 and C = c1:
**$filter=A eq ‘a1’ and B eq +// ‘b1’ and C eq ‘c1’**
- Return all time series where A = a1
**$filter=A eq ‘a1’ and B eq ‘*’ and C eq +// ‘*’**. +// resultType - allows retrieving only metadata of the baseline. On data request all information is retrieved. +func (client BaselinesClient) List(ctx context.Context, resourceURI string, metricnames string, metricnamespace string, timespan string, interval *string, aggregation string, sensitivities string, filter string, resultType ResultType) (result MetricBaselinesResponse, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/BaselinesClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListPreparer(ctx, resourceURI, metricnames, metricnamespace, timespan, interval, aggregation, sensitivities, filter, resultType) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.BaselinesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.BaselinesClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.BaselinesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client BaselinesClient) ListPreparer(ctx context.Context, resourceURI string, metricnames string, metricnamespace string, timespan string, interval *string, aggregation string, sensitivities string, filter string, resultType ResultType) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceUri": resourceURI, + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(metricnames) > 0 { + queryParameters["metricnames"] = autorest.Encode("query", metricnames) + } + if len(metricnamespace) > 0 { + queryParameters["metricnamespace"] = autorest.Encode("query", metricnamespace) + } + if len(timespan) > 0 { + queryParameters["timespan"] = autorest.Encode("query", timespan) + } + if interval != nil { + queryParameters["interval"] = autorest.Encode("query", *interval) + } + if len(aggregation) > 0 { + queryParameters["aggregation"] = autorest.Encode("query", aggregation) + } + if len(sensitivities) > 0 { + queryParameters["sensitivities"] = autorest.Encode("query", sensitivities) + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if len(string(resultType)) > 0 { + queryParameters["resultType"] = autorest.Encode("query", resultType) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{resourceUri}/providers/microsoft.insights/metricBaselines", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client BaselinesClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client BaselinesClient) ListResponder(resp *http.Response) (result MetricBaselinesResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/client.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/client.go new file mode 100644 index 00000000..5ed38802 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/client.go @@ -0,0 +1,51 @@ +// Package insights implements the Azure ARM Insights service API version . +// +// Monitor Management Client +package insights + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // DefaultBaseURI is the default URI used for the service Insights + DefaultBaseURI = "https://management.azure.com" +) + +// BaseClient is the base client for Insights. +type BaseClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the BaseClient client. +func New(subscriptionID string) BaseClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the BaseClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { + return BaseClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/diagnosticsettings.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/diagnosticsettings.go new file mode 100644 index 00000000..3b607d4d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/diagnosticsettings.go @@ -0,0 +1,345 @@ +package insights + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// DiagnosticSettingsClient is the monitor Management Client +type DiagnosticSettingsClient struct { + BaseClient +} + +// NewDiagnosticSettingsClient creates an instance of the DiagnosticSettingsClient client. +func NewDiagnosticSettingsClient(subscriptionID string) DiagnosticSettingsClient { + return NewDiagnosticSettingsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDiagnosticSettingsClientWithBaseURI creates an instance of the DiagnosticSettingsClient client. +func NewDiagnosticSettingsClientWithBaseURI(baseURI string, subscriptionID string) DiagnosticSettingsClient { + return DiagnosticSettingsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates diagnostic settings for the specified resource. +// Parameters: +// resourceURI - the identifier of the resource. +// parameters - parameters supplied to the operation. +// name - the name of the diagnostic setting. +func (client DiagnosticSettingsClient) CreateOrUpdate(ctx context.Context, resourceURI string, parameters DiagnosticSettingsResource, name string) (result DiagnosticSettingsResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiagnosticSettingsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, resourceURI, parameters, name) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client DiagnosticSettingsClient) CreateOrUpdatePreparer(ctx context.Context, resourceURI string, parameters DiagnosticSettingsResource, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceUri": resourceURI, + } + + const APIVersion = "2017-05-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{resourceUri}/providers/microsoft.insights/diagnosticSettings/{name}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client DiagnosticSettingsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client DiagnosticSettingsClient) CreateOrUpdateResponder(resp *http.Response) (result DiagnosticSettingsResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes existing diagnostic settings for the specified resource. +// Parameters: +// resourceURI - the identifier of the resource. +// name - the name of the diagnostic setting. +func (client DiagnosticSettingsClient) Delete(ctx context.Context, resourceURI string, name string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiagnosticSettingsClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceURI, name) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client DiagnosticSettingsClient) DeletePreparer(ctx context.Context, resourceURI string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceUri": resourceURI, + } + + const APIVersion = "2017-05-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{resourceUri}/providers/microsoft.insights/diagnosticSettings/{name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client DiagnosticSettingsClient) DeleteSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client DiagnosticSettingsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the active diagnostic settings for the specified resource. +// Parameters: +// resourceURI - the identifier of the resource. +// name - the name of the diagnostic setting. +func (client DiagnosticSettingsClient) Get(ctx context.Context, resourceURI string, name string) (result DiagnosticSettingsResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiagnosticSettingsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceURI, name) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client DiagnosticSettingsClient) GetPreparer(ctx context.Context, resourceURI string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceUri": resourceURI, + } + + const APIVersion = "2017-05-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{resourceUri}/providers/microsoft.insights/diagnosticSettings/{name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client DiagnosticSettingsClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client DiagnosticSettingsClient) GetResponder(resp *http.Response) (result DiagnosticSettingsResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets the active diagnostic settings list for the specified resource. +// Parameters: +// resourceURI - the identifier of the resource. +func (client DiagnosticSettingsClient) List(ctx context.Context, resourceURI string) (result DiagnosticSettingsResourceCollection, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiagnosticSettingsClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListPreparer(ctx, resourceURI) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client DiagnosticSettingsClient) ListPreparer(ctx context.Context, resourceURI string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceUri": resourceURI, + } + + const APIVersion = "2017-05-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{resourceUri}/providers/microsoft.insights/diagnosticSettings", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client DiagnosticSettingsClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client DiagnosticSettingsClient) ListResponder(resp *http.Response) (result DiagnosticSettingsResourceCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/diagnosticsettingscategory.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/diagnosticsettingscategory.go new file mode 100644 index 00000000..587b7f56 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/diagnosticsettingscategory.go @@ -0,0 +1,191 @@ +package insights + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// DiagnosticSettingsCategoryClient is the monitor Management Client +type DiagnosticSettingsCategoryClient struct { + BaseClient +} + +// NewDiagnosticSettingsCategoryClient creates an instance of the DiagnosticSettingsCategoryClient client. +func NewDiagnosticSettingsCategoryClient(subscriptionID string) DiagnosticSettingsCategoryClient { + return NewDiagnosticSettingsCategoryClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDiagnosticSettingsCategoryClientWithBaseURI creates an instance of the DiagnosticSettingsCategoryClient client. +func NewDiagnosticSettingsCategoryClientWithBaseURI(baseURI string, subscriptionID string) DiagnosticSettingsCategoryClient { + return DiagnosticSettingsCategoryClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets the diagnostic settings category for the specified resource. +// Parameters: +// resourceURI - the identifier of the resource. +// name - the name of the diagnostic setting. +func (client DiagnosticSettingsCategoryClient) Get(ctx context.Context, resourceURI string, name string) (result DiagnosticSettingsCategoryResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiagnosticSettingsCategoryClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceURI, name) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsCategoryClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsCategoryClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsCategoryClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client DiagnosticSettingsCategoryClient) GetPreparer(ctx context.Context, resourceURI string, name string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "name": autorest.Encode("path", name), + "resourceUri": resourceURI, + } + + const APIVersion = "2017-05-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{resourceUri}/providers/microsoft.insights/diagnosticSettingsCategories/{name}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client DiagnosticSettingsCategoryClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client DiagnosticSettingsCategoryClient) GetResponder(resp *http.Response) (result DiagnosticSettingsCategoryResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List lists the diagnostic settings categories for the specified resource. +// Parameters: +// resourceURI - the identifier of the resource. +func (client DiagnosticSettingsCategoryClient) List(ctx context.Context, resourceURI string) (result DiagnosticSettingsCategoryResourceCollection, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DiagnosticSettingsCategoryClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListPreparer(ctx, resourceURI) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsCategoryClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsCategoryClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsCategoryClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client DiagnosticSettingsCategoryClient) ListPreparer(ctx context.Context, resourceURI string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceUri": resourceURI, + } + + const APIVersion = "2017-05-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{resourceUri}/providers/microsoft.insights/diagnosticSettingsCategories", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client DiagnosticSettingsCategoryClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client DiagnosticSettingsCategoryClient) ListResponder(resp *http.Response) (result DiagnosticSettingsCategoryResourceCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/eventcategories.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/eventcategories.go new file mode 100644 index 00000000..b09fb63c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/eventcategories.go @@ -0,0 +1,110 @@ +package insights + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// EventCategoriesClient is the monitor Management Client +type EventCategoriesClient struct { + BaseClient +} + +// NewEventCategoriesClient creates an instance of the EventCategoriesClient client. +func NewEventCategoriesClient(subscriptionID string) EventCategoriesClient { + return NewEventCategoriesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewEventCategoriesClientWithBaseURI creates an instance of the EventCategoriesClient client. +func NewEventCategoriesClientWithBaseURI(baseURI string, subscriptionID string) EventCategoriesClient { + return EventCategoriesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List get the list of available event categories supported in the Activity Logs Service.
The current list includes +// the following: Administrative, Security, ServiceHealth, Alert, Recommendation, Policy. +func (client EventCategoriesClient) List(ctx context.Context) (result EventCategoryCollection, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/EventCategoriesClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.EventCategoriesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.EventCategoriesClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.EventCategoriesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client EventCategoriesClient) ListPreparer(ctx context.Context) (*http.Request, error) { + const APIVersion = "2015-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/microsoft.insights/eventcategories"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client EventCategoriesClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client EventCategoriesClient) ListResponder(resp *http.Response) (result EventCategoryCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/logprofiles.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/logprofiles.go new file mode 100644 index 00000000..6060395c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/logprofiles.go @@ -0,0 +1,433 @@ +package insights + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// LogProfilesClient is the monitor Management Client +type LogProfilesClient struct { + BaseClient +} + +// NewLogProfilesClient creates an instance of the LogProfilesClient client. +func NewLogProfilesClient(subscriptionID string) LogProfilesClient { + return NewLogProfilesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewLogProfilesClientWithBaseURI creates an instance of the LogProfilesClient client. +func NewLogProfilesClientWithBaseURI(baseURI string, subscriptionID string) LogProfilesClient { + return LogProfilesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or update a log profile in Azure Monitoring REST API. +// Parameters: +// logProfileName - the name of the log profile. +// parameters - parameters supplied to the operation. +func (client LogProfilesClient) CreateOrUpdate(ctx context.Context, logProfileName string, parameters LogProfileResource) (result LogProfileResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LogProfilesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.LogProfileProperties", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.LogProfileProperties.Locations", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.LogProfileProperties.Categories", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.LogProfileProperties.RetentionPolicy", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.LogProfileProperties.RetentionPolicy.Enabled", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.LogProfileProperties.RetentionPolicy.Days", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.LogProfileProperties.RetentionPolicy.Days", Name: validation.InclusiveMinimum, Rule: 0, Chain: nil}}}, + }}, + }}}}}); err != nil { + return result, validation.NewError("insights.LogProfilesClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, logProfileName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client LogProfilesClient) CreateOrUpdatePreparer(ctx context.Context, logProfileName string, parameters LogProfileResource) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "logProfileName": autorest.Encode("path", logProfileName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/microsoft.insights/logprofiles/{logProfileName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client LogProfilesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client LogProfilesClient) CreateOrUpdateResponder(resp *http.Response) (result LogProfileResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes the log profile. +// Parameters: +// logProfileName - the name of the log profile. +func (client LogProfilesClient) Delete(ctx context.Context, logProfileName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LogProfilesClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, logProfileName) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client LogProfilesClient) DeletePreparer(ctx context.Context, logProfileName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "logProfileName": autorest.Encode("path", logProfileName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/microsoft.insights/logprofiles/{logProfileName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client LogProfilesClient) DeleteSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client LogProfilesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets the log profile. +// Parameters: +// logProfileName - the name of the log profile. +func (client LogProfilesClient) Get(ctx context.Context, logProfileName string) (result LogProfileResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LogProfilesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, logProfileName) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client LogProfilesClient) GetPreparer(ctx context.Context, logProfileName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "logProfileName": autorest.Encode("path", logProfileName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/microsoft.insights/logprofiles/{logProfileName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client LogProfilesClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client LogProfilesClient) GetResponder(resp *http.Response) (result LogProfileResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List list the log profiles. +func (client LogProfilesClient) List(ctx context.Context) (result LogProfileCollection, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LogProfilesClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client LogProfilesClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/microsoft.insights/logprofiles", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client LogProfilesClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client LogProfilesClient) ListResponder(resp *http.Response) (result LogProfileCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update updates an existing LogProfilesResource. To update other fields use the CreateOrUpdate method. +// Parameters: +// logProfileName - the name of the log profile. +// logProfilesResource - parameters supplied to the operation. +func (client LogProfilesClient) Update(ctx context.Context, logProfileName string, logProfilesResource LogProfileResourcePatch) (result LogProfileResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/LogProfilesClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, logProfileName, logProfilesResource) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client LogProfilesClient) UpdatePreparer(ctx context.Context, logProfileName string, logProfilesResource LogProfileResourcePatch) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "logProfileName": autorest.Encode("path", logProfileName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2016-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/microsoft.insights/logprofiles/{logProfileName}", pathParameters), + autorest.WithJSON(logProfilesResource), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client LogProfilesClient) UpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client LogProfilesClient) UpdateResponder(resp *http.Response) (result LogProfileResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/metricalerts.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/metricalerts.go new file mode 100644 index 00000000..055a96e3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/metricalerts.go @@ -0,0 +1,515 @@ +package insights + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// MetricAlertsClient is the monitor Management Client +type MetricAlertsClient struct { + BaseClient +} + +// NewMetricAlertsClient creates an instance of the MetricAlertsClient client. +func NewMetricAlertsClient(subscriptionID string) MetricAlertsClient { + return NewMetricAlertsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewMetricAlertsClientWithBaseURI creates an instance of the MetricAlertsClient client. +func NewMetricAlertsClientWithBaseURI(baseURI string, subscriptionID string) MetricAlertsClient { + return MetricAlertsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate create or update an metric alert definition. +// Parameters: +// resourceGroupName - the name of the resource group. +// ruleName - the name of the rule. +// parameters - the parameters of the rule to create or update. +func (client MetricAlertsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, ruleName string, parameters MetricAlertResource) (result MetricAlertResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/MetricAlertsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.MetricAlertProperties", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.MetricAlertProperties.Description", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.MetricAlertProperties.Severity", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.MetricAlertProperties.Enabled", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.MetricAlertProperties.EvaluationFrequency", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.MetricAlertProperties.WindowSize", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.MetricAlertProperties.Criteria", Name: validation.Null, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("insights.MetricAlertsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, ruleName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client MetricAlertsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, ruleName string, parameters MetricAlertResource) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "ruleName": autorest.Encode("path", ruleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts/{ruleName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client MetricAlertsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client MetricAlertsClient) CreateOrUpdateResponder(resp *http.Response) (result MetricAlertResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete delete an alert rule definition. +// Parameters: +// resourceGroupName - the name of the resource group. +// ruleName - the name of the rule. +func (client MetricAlertsClient) Delete(ctx context.Context, resourceGroupName string, ruleName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/MetricAlertsClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, ruleName) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client MetricAlertsClient) DeletePreparer(ctx context.Context, resourceGroupName string, ruleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "ruleName": autorest.Encode("path", ruleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts/{ruleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client MetricAlertsClient) DeleteSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client MetricAlertsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get retrieve an alert rule definition. +// Parameters: +// resourceGroupName - the name of the resource group. +// ruleName - the name of the rule. +func (client MetricAlertsClient) Get(ctx context.Context, resourceGroupName string, ruleName string) (result MetricAlertResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/MetricAlertsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, ruleName) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client MetricAlertsClient) GetPreparer(ctx context.Context, resourceGroupName string, ruleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "ruleName": autorest.Encode("path", ruleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts/{ruleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client MetricAlertsClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client MetricAlertsClient) GetResponder(resp *http.Response) (result MetricAlertResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup retrieve alert rule definitions in a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +func (client MetricAlertsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result MetricAlertResourceCollection, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/MetricAlertsClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client MetricAlertsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client MetricAlertsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client MetricAlertsClient) ListByResourceGroupResponder(resp *http.Response) (result MetricAlertResourceCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListBySubscription retrieve alert rule definitions in a subscription. +func (client MetricAlertsClient) ListBySubscription(ctx context.Context) (result MetricAlertResourceCollection, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/MetricAlertsClient.ListBySubscription") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListBySubscriptionPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "ListBySubscription", nil, "Failure preparing request") + return + } + + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "ListBySubscription", resp, "Failure sending request") + return + } + + result, err = client.ListBySubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "ListBySubscription", resp, "Failure responding to request") + } + + return +} + +// ListBySubscriptionPreparer prepares the ListBySubscription request. +func (client MetricAlertsClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Insights/metricAlerts", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListBySubscriptionSender sends the ListBySubscription request. The method will close the +// http.Response Body if it receives an error. +func (client MetricAlertsClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always +// closes the http.Response Body. +func (client MetricAlertsClient) ListBySubscriptionResponder(resp *http.Response) (result MetricAlertResourceCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update update an metric alert definition. +// Parameters: +// resourceGroupName - the name of the resource group. +// ruleName - the name of the rule. +// parameters - the parameters of the rule to update. +func (client MetricAlertsClient) Update(ctx context.Context, resourceGroupName string, ruleName string, parameters MetricAlertResourcePatch) (result MetricAlertResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/MetricAlertsClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, ruleName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client MetricAlertsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, ruleName string, parameters MetricAlertResourcePatch) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "ruleName": autorest.Encode("path", ruleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts/{ruleName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client MetricAlertsClient) UpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client MetricAlertsClient) UpdateResponder(resp *http.Response) (result MetricAlertResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/metricalertsstatus.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/metricalertsstatus.go new file mode 100644 index 00000000..04617c64 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/metricalertsstatus.go @@ -0,0 +1,197 @@ +package insights + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// MetricAlertsStatusClient is the monitor Management Client +type MetricAlertsStatusClient struct { + BaseClient +} + +// NewMetricAlertsStatusClient creates an instance of the MetricAlertsStatusClient client. +func NewMetricAlertsStatusClient(subscriptionID string) MetricAlertsStatusClient { + return NewMetricAlertsStatusClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewMetricAlertsStatusClientWithBaseURI creates an instance of the MetricAlertsStatusClient client. +func NewMetricAlertsStatusClientWithBaseURI(baseURI string, subscriptionID string) MetricAlertsStatusClient { + return MetricAlertsStatusClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List retrieve an alert rule status. +// Parameters: +// resourceGroupName - the name of the resource group. +// ruleName - the name of the rule. +func (client MetricAlertsStatusClient) List(ctx context.Context, resourceGroupName string, ruleName string) (result MetricAlertStatusCollection, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/MetricAlertsStatusClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListPreparer(ctx, resourceGroupName, ruleName) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricAlertsStatusClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.MetricAlertsStatusClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricAlertsStatusClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client MetricAlertsStatusClient) ListPreparer(ctx context.Context, resourceGroupName string, ruleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "ruleName": autorest.Encode("path", ruleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts/{ruleName}/status", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client MetricAlertsStatusClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client MetricAlertsStatusClient) ListResponder(resp *http.Response) (result MetricAlertStatusCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByName retrieve an alert rule status. +// Parameters: +// resourceGroupName - the name of the resource group. +// ruleName - the name of the rule. +// statusName - the name of the status. +func (client MetricAlertsStatusClient) ListByName(ctx context.Context, resourceGroupName string, ruleName string, statusName string) (result MetricAlertStatusCollection, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/MetricAlertsStatusClient.ListByName") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListByNamePreparer(ctx, resourceGroupName, ruleName, statusName) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricAlertsStatusClient", "ListByName", nil, "Failure preparing request") + return + } + + resp, err := client.ListByNameSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.MetricAlertsStatusClient", "ListByName", resp, "Failure sending request") + return + } + + result, err = client.ListByNameResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricAlertsStatusClient", "ListByName", resp, "Failure responding to request") + } + + return +} + +// ListByNamePreparer prepares the ListByName request. +func (client MetricAlertsStatusClient) ListByNamePreparer(ctx context.Context, resourceGroupName string, ruleName string, statusName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "ruleName": autorest.Encode("path", ruleName), + "statusName": autorest.Encode("path", statusName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/metricAlerts/{ruleName}/status/{statusName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByNameSender sends the ListByName request. The method will close the +// http.Response Body if it receives an error. +func (client MetricAlertsStatusClient) ListByNameSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListByNameResponder handles the response to the ListByName request. The method always +// closes the http.Response Body. +func (client MetricAlertsStatusClient) ListByNameResponder(resp *http.Response) (result MetricAlertStatusCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/metricbaseline.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/metricbaseline.go new file mode 100644 index 00000000..d12bd9d4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/metricbaseline.go @@ -0,0 +1,229 @@ +package insights + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// MetricBaselineClient is the monitor Management Client +type MetricBaselineClient struct { + BaseClient +} + +// NewMetricBaselineClient creates an instance of the MetricBaselineClient client. +func NewMetricBaselineClient(subscriptionID string) MetricBaselineClient { + return NewMetricBaselineClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewMetricBaselineClientWithBaseURI creates an instance of the MetricBaselineClient client. +func NewMetricBaselineClientWithBaseURI(baseURI string, subscriptionID string) MetricBaselineClient { + return MetricBaselineClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CalculateBaseline **Lists the baseline values for a resource**. +// Parameters: +// resourceURI - the identifier of the resource. It has the following structure: +// subscriptions/{subscriptionName}/resourceGroups/{resourceGroupName}/providers/{providerName}/{resourceName}. +// For example: +// subscriptions/b368ca2f-e298-46b7-b0ab-012281956afa/resourceGroups/vms/providers/Microsoft.Compute/virtualMachines/vm1 +// timeSeriesInformation - information that need to be specified to calculate a baseline on a time series. +func (client MetricBaselineClient) CalculateBaseline(ctx context.Context, resourceURI string, timeSeriesInformation TimeSeriesInformation) (result CalculateBaselineResponse, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/MetricBaselineClient.CalculateBaseline") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: timeSeriesInformation, + Constraints: []validation.Constraint{{Target: "timeSeriesInformation.Sensitivities", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "timeSeriesInformation.Values", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("insights.MetricBaselineClient", "CalculateBaseline", err.Error()) + } + + req, err := client.CalculateBaselinePreparer(ctx, resourceURI, timeSeriesInformation) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricBaselineClient", "CalculateBaseline", nil, "Failure preparing request") + return + } + + resp, err := client.CalculateBaselineSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.MetricBaselineClient", "CalculateBaseline", resp, "Failure sending request") + return + } + + result, err = client.CalculateBaselineResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricBaselineClient", "CalculateBaseline", resp, "Failure responding to request") + } + + return +} + +// CalculateBaselinePreparer prepares the CalculateBaseline request. +func (client MetricBaselineClient) CalculateBaselinePreparer(ctx context.Context, resourceURI string, timeSeriesInformation TimeSeriesInformation) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceUri": resourceURI, + } + + const APIVersion = "2017-11-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{resourceUri}/providers/microsoft.insights/calculatebaseline", pathParameters), + autorest.WithJSON(timeSeriesInformation), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CalculateBaselineSender sends the CalculateBaseline request. The method will close the +// http.Response Body if it receives an error. +func (client MetricBaselineClient) CalculateBaselineSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// CalculateBaselineResponder handles the response to the CalculateBaseline request. The method always +// closes the http.Response Body. +func (client MetricBaselineClient) CalculateBaselineResponder(resp *http.Response) (result CalculateBaselineResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get **Gets the baseline values for a specific metric**. +// Parameters: +// resourceURI - the identifier of the resource. It has the following structure: +// subscriptions/{subscriptionName}/resourceGroups/{resourceGroupName}/providers/{providerName}/{resourceName}. +// For example: +// subscriptions/b368ca2f-e298-46b7-b0ab-012281956afa/resourceGroups/vms/providers/Microsoft.Compute/virtualMachines/vm1 +// metricName - the name of the metric to retrieve the baseline for. +// timespan - the timespan of the query. It is a string with the following format +// 'startDateTime_ISO/endDateTime_ISO'. +// interval - the interval (i.e. timegrain) of the query. +// aggregation - the aggregation type of the metric to retrieve the baseline for. +// sensitivities - the list of sensitivities (comma separated) to retrieve. +// resultType - allows retrieving only metadata of the baseline. On data request all information is retrieved. +func (client MetricBaselineClient) Get(ctx context.Context, resourceURI string, metricName string, timespan string, interval *string, aggregation string, sensitivities string, resultType ResultType) (result BaselineResponse, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/MetricBaselineClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceURI, metricName, timespan, interval, aggregation, sensitivities, resultType) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricBaselineClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.MetricBaselineClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricBaselineClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client MetricBaselineClient) GetPreparer(ctx context.Context, resourceURI string, metricName string, timespan string, interval *string, aggregation string, sensitivities string, resultType ResultType) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "metricName": autorest.Encode("path", metricName), + "resourceUri": resourceURI, + } + + const APIVersion = "2017-11-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(timespan) > 0 { + queryParameters["timespan"] = autorest.Encode("query", timespan) + } + if interval != nil { + queryParameters["interval"] = autorest.Encode("query", *interval) + } + if len(aggregation) > 0 { + queryParameters["aggregation"] = autorest.Encode("query", aggregation) + } + if len(sensitivities) > 0 { + queryParameters["sensitivities"] = autorest.Encode("query", sensitivities) + } + if len(string(resultType)) > 0 { + queryParameters["resultType"] = autorest.Encode("query", resultType) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{resourceUri}/providers/microsoft.insights/baseline/{metricName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client MetricBaselineClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client MetricBaselineClient) GetResponder(resp *http.Response) (result BaselineResponse, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/metricdefinitions.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/metricdefinitions.go new file mode 100644 index 00000000..29a5bbee --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/metricdefinitions.go @@ -0,0 +1,119 @@ +package insights + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// MetricDefinitionsClient is the monitor Management Client +type MetricDefinitionsClient struct { + BaseClient +} + +// NewMetricDefinitionsClient creates an instance of the MetricDefinitionsClient client. +func NewMetricDefinitionsClient(subscriptionID string) MetricDefinitionsClient { + return NewMetricDefinitionsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewMetricDefinitionsClientWithBaseURI creates an instance of the MetricDefinitionsClient client. +func NewMetricDefinitionsClientWithBaseURI(baseURI string, subscriptionID string) MetricDefinitionsClient { + return MetricDefinitionsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists the metric definitions for the resource. +// Parameters: +// resourceURI - the identifier of the resource. +// metricnamespace - metric namespace to query metric definitions for. +func (client MetricDefinitionsClient) List(ctx context.Context, resourceURI string, metricnamespace string) (result MetricDefinitionCollection, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/MetricDefinitionsClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListPreparer(ctx, resourceURI, metricnamespace) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricDefinitionsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.MetricDefinitionsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricDefinitionsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client MetricDefinitionsClient) ListPreparer(ctx context.Context, resourceURI string, metricnamespace string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceUri": resourceURI, + } + + const APIVersion = "2018-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(metricnamespace) > 0 { + queryParameters["metricnamespace"] = autorest.Encode("query", metricnamespace) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{resourceUri}/providers/microsoft.insights/metricDefinitions", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client MetricDefinitionsClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client MetricDefinitionsClient) ListResponder(resp *http.Response) (result MetricDefinitionCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/metricnamespaces.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/metricnamespaces.go new file mode 100644 index 00000000..5ff2cdaa --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/metricnamespaces.go @@ -0,0 +1,119 @@ +package insights + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// MetricNamespacesClient is the monitor Management Client +type MetricNamespacesClient struct { + BaseClient +} + +// NewMetricNamespacesClient creates an instance of the MetricNamespacesClient client. +func NewMetricNamespacesClient(subscriptionID string) MetricNamespacesClient { + return NewMetricNamespacesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewMetricNamespacesClientWithBaseURI creates an instance of the MetricNamespacesClient client. +func NewMetricNamespacesClientWithBaseURI(baseURI string, subscriptionID string) MetricNamespacesClient { + return MetricNamespacesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists the metric namespaces for the resource. +// Parameters: +// resourceURI - the identifier of the resource. +// startTime - the ISO 8601 conform Date start time from which to query for metric namespaces. +func (client MetricNamespacesClient) List(ctx context.Context, resourceURI string, startTime string) (result MetricNamespaceCollection, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/MetricNamespacesClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListPreparer(ctx, resourceURI, startTime) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricNamespacesClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.MetricNamespacesClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricNamespacesClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client MetricNamespacesClient) ListPreparer(ctx context.Context, resourceURI string, startTime string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceUri": resourceURI, + } + + const APIVersion = "2017-12-01-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(startTime) > 0 { + queryParameters["startTime"] = autorest.Encode("query", startTime) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{resourceUri}/providers/microsoft.insights/metricNamespaces", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client MetricNamespacesClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client MetricNamespacesClient) ListResponder(resp *http.Response) (result MetricNamespaceCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/metrics.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/metrics.go new file mode 100644 index 00000000..f1952a3f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/metrics.go @@ -0,0 +1,163 @@ +package insights + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// MetricsClient is the monitor Management Client +type MetricsClient struct { + BaseClient +} + +// NewMetricsClient creates an instance of the MetricsClient client. +func NewMetricsClient(subscriptionID string) MetricsClient { + return NewMetricsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewMetricsClientWithBaseURI creates an instance of the MetricsClient client. +func NewMetricsClientWithBaseURI(baseURI string, subscriptionID string) MetricsClient { + return MetricsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List **Lists the metric values for a resource**. +// Parameters: +// resourceURI - the identifier of the resource. +// timespan - the timespan of the query. It is a string with the following format +// 'startDateTime_ISO/endDateTime_ISO'. +// interval - the interval (i.e. timegrain) of the query. +// metricnames - the names of the metrics (comma separated) to retrieve. +// aggregation - the list of aggregation types (comma separated) to retrieve. +// top - the maximum number of records to retrieve. +// Valid only if $filter is specified. +// Defaults to 10. +// orderby - the aggregation to use for sorting results and the direction of the sort. +// Only one order can be specified. +// Examples: sum asc. +// filter - the **$filter** is used to reduce the set of metric data returned.
Example:
Metric contains +// metadata A, B and C.
- Return all time series of C where A = a1 and B = b1 or b2
**$filter=A eq ‘a1’ +// and B eq ‘b1’ or B eq ‘b2’ and C eq ‘*’**
- Invalid variant:
**$filter=A eq ‘a1’ and B eq ‘b1’ and C +// eq ‘*’ or B = ‘b2’**
This is invalid because the logical or operator cannot separate two different +// metadata names.
- Return all time series where A = a1, B = b1 and C = c1:
**$filter=A eq ‘a1’ and B eq +// ‘b1’ and C eq ‘c1’**
- Return all time series where A = a1
**$filter=A eq ‘a1’ and B eq ‘*’ and C eq +// ‘*’**. +// resultType - reduces the set of data collected. The syntax allowed depends on the operation. See the +// operation's description for details. +// metricnamespace - metric namespace to query metric definitions for. +func (client MetricsClient) List(ctx context.Context, resourceURI string, timespan string, interval *string, metricnames string, aggregation string, top *int32, orderby string, filter string, resultType ResultType, metricnamespace string) (result Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/MetricsClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListPreparer(ctx, resourceURI, timespan, interval, metricnames, aggregation, top, orderby, filter, resultType, metricnamespace) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.MetricsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.MetricsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client MetricsClient) ListPreparer(ctx context.Context, resourceURI string, timespan string, interval *string, metricnames string, aggregation string, top *int32, orderby string, filter string, resultType ResultType, metricnamespace string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceUri": resourceURI, + } + + const APIVersion = "2018-01-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(timespan) > 0 { + queryParameters["timespan"] = autorest.Encode("query", timespan) + } + if interval != nil { + queryParameters["interval"] = autorest.Encode("query", *interval) + } + if len(metricnames) > 0 { + queryParameters["metricnames"] = autorest.Encode("query", metricnames) + } + if len(aggregation) > 0 { + queryParameters["aggregation"] = autorest.Encode("query", aggregation) + } + if top != nil { + queryParameters["top"] = autorest.Encode("query", *top) + } + if len(orderby) > 0 { + queryParameters["orderby"] = autorest.Encode("query", orderby) + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if len(string(resultType)) > 0 { + queryParameters["resultType"] = autorest.Encode("query", resultType) + } + if len(metricnamespace) > 0 { + queryParameters["metricnamespace"] = autorest.Encode("query", metricnamespace) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{resourceUri}/providers/microsoft.insights/metrics", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client MetricsClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client MetricsClient) ListResponder(resp *http.Response) (result Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/models.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/models.go new file mode 100644 index 00000000..effcd2f0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/models.go @@ -0,0 +1,5822 @@ +package insights + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/json" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// The package's fully qualified name. +const fqdn = "github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights" + +// AggregationType enumerates the values for aggregation type. +type AggregationType string + +const ( + // Average ... + Average AggregationType = "Average" + // Count ... + Count AggregationType = "Count" + // Maximum ... + Maximum AggregationType = "Maximum" + // Minimum ... + Minimum AggregationType = "Minimum" + // None ... + None AggregationType = "None" + // Total ... + Total AggregationType = "Total" +) + +// PossibleAggregationTypeValues returns an array of possible values for the AggregationType const type. +func PossibleAggregationTypeValues() []AggregationType { + return []AggregationType{Average, Count, Maximum, Minimum, None, Total} +} + +// AlertSeverity enumerates the values for alert severity. +type AlertSeverity string + +const ( + // Four ... + Four AlertSeverity = "4" + // One ... + One AlertSeverity = "1" + // Three ... + Three AlertSeverity = "3" + // Two ... + Two AlertSeverity = "2" + // Zero ... + Zero AlertSeverity = "0" +) + +// PossibleAlertSeverityValues returns an array of possible values for the AlertSeverity const type. +func PossibleAlertSeverityValues() []AlertSeverity { + return []AlertSeverity{Four, One, Three, Two, Zero} +} + +// BaselineSensitivity enumerates the values for baseline sensitivity. +type BaselineSensitivity string + +const ( + // High ... + High BaselineSensitivity = "High" + // Low ... + Low BaselineSensitivity = "Low" + // Medium ... + Medium BaselineSensitivity = "Medium" +) + +// PossibleBaselineSensitivityValues returns an array of possible values for the BaselineSensitivity const type. +func PossibleBaselineSensitivityValues() []BaselineSensitivity { + return []BaselineSensitivity{High, Low, Medium} +} + +// CategoryType enumerates the values for category type. +type CategoryType string + +const ( + // Logs ... + Logs CategoryType = "Logs" + // Metrics ... + Metrics CategoryType = "Metrics" +) + +// PossibleCategoryTypeValues returns an array of possible values for the CategoryType const type. +func PossibleCategoryTypeValues() []CategoryType { + return []CategoryType{Logs, Metrics} +} + +// ComparisonOperationType enumerates the values for comparison operation type. +type ComparisonOperationType string + +const ( + // Equals ... + Equals ComparisonOperationType = "Equals" + // GreaterThan ... + GreaterThan ComparisonOperationType = "GreaterThan" + // GreaterThanOrEqual ... + GreaterThanOrEqual ComparisonOperationType = "GreaterThanOrEqual" + // LessThan ... + LessThan ComparisonOperationType = "LessThan" + // LessThanOrEqual ... + LessThanOrEqual ComparisonOperationType = "LessThanOrEqual" + // NotEquals ... + NotEquals ComparisonOperationType = "NotEquals" +) + +// PossibleComparisonOperationTypeValues returns an array of possible values for the ComparisonOperationType const type. +func PossibleComparisonOperationTypeValues() []ComparisonOperationType { + return []ComparisonOperationType{Equals, GreaterThan, GreaterThanOrEqual, LessThan, LessThanOrEqual, NotEquals} +} + +// ConditionalOperator enumerates the values for conditional operator. +type ConditionalOperator string + +const ( + // ConditionalOperatorEqual ... + ConditionalOperatorEqual ConditionalOperator = "Equal" + // ConditionalOperatorGreaterThan ... + ConditionalOperatorGreaterThan ConditionalOperator = "GreaterThan" + // ConditionalOperatorLessThan ... + ConditionalOperatorLessThan ConditionalOperator = "LessThan" +) + +// PossibleConditionalOperatorValues returns an array of possible values for the ConditionalOperator const type. +func PossibleConditionalOperatorValues() []ConditionalOperator { + return []ConditionalOperator{ConditionalOperatorEqual, ConditionalOperatorGreaterThan, ConditionalOperatorLessThan} +} + +// ConditionOperator enumerates the values for condition operator. +type ConditionOperator string + +const ( + // ConditionOperatorGreaterThan ... + ConditionOperatorGreaterThan ConditionOperator = "GreaterThan" + // ConditionOperatorGreaterThanOrEqual ... + ConditionOperatorGreaterThanOrEqual ConditionOperator = "GreaterThanOrEqual" + // ConditionOperatorLessThan ... + ConditionOperatorLessThan ConditionOperator = "LessThan" + // ConditionOperatorLessThanOrEqual ... + ConditionOperatorLessThanOrEqual ConditionOperator = "LessThanOrEqual" +) + +// PossibleConditionOperatorValues returns an array of possible values for the ConditionOperator const type. +func PossibleConditionOperatorValues() []ConditionOperator { + return []ConditionOperator{ConditionOperatorGreaterThan, ConditionOperatorGreaterThanOrEqual, ConditionOperatorLessThan, ConditionOperatorLessThanOrEqual} +} + +// CriterionType enumerates the values for criterion type. +type CriterionType string + +const ( + // CriterionTypeDynamicThresholdCriterion ... + CriterionTypeDynamicThresholdCriterion CriterionType = "DynamicThresholdCriterion" + // CriterionTypeMultiMetricCriteria ... + CriterionTypeMultiMetricCriteria CriterionType = "MultiMetricCriteria" + // CriterionTypeStaticThresholdCriterion ... + CriterionTypeStaticThresholdCriterion CriterionType = "StaticThresholdCriterion" +) + +// PossibleCriterionTypeValues returns an array of possible values for the CriterionType const type. +func PossibleCriterionTypeValues() []CriterionType { + return []CriterionType{CriterionTypeDynamicThresholdCriterion, CriterionTypeMultiMetricCriteria, CriterionTypeStaticThresholdCriterion} +} + +// DataStatus enumerates the values for data status. +type DataStatus string + +const ( + // NotPresent ... + NotPresent DataStatus = "notPresent" + // Present ... + Present DataStatus = "present" +) + +// PossibleDataStatusValues returns an array of possible values for the DataStatus const type. +func PossibleDataStatusValues() []DataStatus { + return []DataStatus{NotPresent, Present} +} + +// Enabled enumerates the values for enabled. +type Enabled string + +const ( + // False ... + False Enabled = "false" + // True ... + True Enabled = "true" +) + +// PossibleEnabledValues returns an array of possible values for the Enabled const type. +func PossibleEnabledValues() []Enabled { + return []Enabled{False, True} +} + +// EventLevel enumerates the values for event level. +type EventLevel string + +const ( + // EventLevelCritical ... + EventLevelCritical EventLevel = "Critical" + // EventLevelError ... + EventLevelError EventLevel = "Error" + // EventLevelInformational ... + EventLevelInformational EventLevel = "Informational" + // EventLevelVerbose ... + EventLevelVerbose EventLevel = "Verbose" + // EventLevelWarning ... + EventLevelWarning EventLevel = "Warning" +) + +// PossibleEventLevelValues returns an array of possible values for the EventLevel const type. +func PossibleEventLevelValues() []EventLevel { + return []EventLevel{EventLevelCritical, EventLevelError, EventLevelInformational, EventLevelVerbose, EventLevelWarning} +} + +// MetricStatisticType enumerates the values for metric statistic type. +type MetricStatisticType string + +const ( + // MetricStatisticTypeAverage ... + MetricStatisticTypeAverage MetricStatisticType = "Average" + // MetricStatisticTypeMax ... + MetricStatisticTypeMax MetricStatisticType = "Max" + // MetricStatisticTypeMin ... + MetricStatisticTypeMin MetricStatisticType = "Min" + // MetricStatisticTypeSum ... + MetricStatisticTypeSum MetricStatisticType = "Sum" +) + +// PossibleMetricStatisticTypeValues returns an array of possible values for the MetricStatisticType const type. +func PossibleMetricStatisticTypeValues() []MetricStatisticType { + return []MetricStatisticType{MetricStatisticTypeAverage, MetricStatisticTypeMax, MetricStatisticTypeMin, MetricStatisticTypeSum} +} + +// MetricTriggerType enumerates the values for metric trigger type. +type MetricTriggerType string + +const ( + // MetricTriggerTypeConsecutive ... + MetricTriggerTypeConsecutive MetricTriggerType = "Consecutive" + // MetricTriggerTypeTotal ... + MetricTriggerTypeTotal MetricTriggerType = "Total" +) + +// PossibleMetricTriggerTypeValues returns an array of possible values for the MetricTriggerType const type. +func PossibleMetricTriggerTypeValues() []MetricTriggerType { + return []MetricTriggerType{MetricTriggerTypeConsecutive, MetricTriggerTypeTotal} +} + +// OdataType enumerates the values for odata type. +type OdataType string + +const ( + // OdataTypeMicrosoftAzureManagementInsightsModelsRuleManagementEventDataSource ... + OdataTypeMicrosoftAzureManagementInsightsModelsRuleManagementEventDataSource OdataType = "Microsoft.Azure.Management.Insights.Models.RuleManagementEventDataSource" + // OdataTypeMicrosoftAzureManagementInsightsModelsRuleMetricDataSource ... + OdataTypeMicrosoftAzureManagementInsightsModelsRuleMetricDataSource OdataType = "Microsoft.Azure.Management.Insights.Models.RuleMetricDataSource" + // OdataTypeRuleDataSource ... + OdataTypeRuleDataSource OdataType = "RuleDataSource" +) + +// PossibleOdataTypeValues returns an array of possible values for the OdataType const type. +func PossibleOdataTypeValues() []OdataType { + return []OdataType{OdataTypeMicrosoftAzureManagementInsightsModelsRuleManagementEventDataSource, OdataTypeMicrosoftAzureManagementInsightsModelsRuleMetricDataSource, OdataTypeRuleDataSource} +} + +// OdataTypeBasicAction enumerates the values for odata type basic action. +type OdataTypeBasicAction string + +const ( + // OdataTypeAction ... + OdataTypeAction OdataTypeBasicAction = "Action" + // OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction ... + OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction OdataTypeBasicAction = "Microsoft.WindowsAzure.Management.Monitoring.Alerts.Models.Microsoft.AppInsights.Nexus.DataContracts.Resources.ScheduledQueryRules.AlertingAction" + // OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesLogToMetricAction ... + OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesLogToMetricAction OdataTypeBasicAction = "Microsoft.WindowsAzure.Management.Monitoring.Alerts.Models.Microsoft.AppInsights.Nexus.DataContracts.Resources.ScheduledQueryRules.LogToMetricAction" +) + +// PossibleOdataTypeBasicActionValues returns an array of possible values for the OdataTypeBasicAction const type. +func PossibleOdataTypeBasicActionValues() []OdataTypeBasicAction { + return []OdataTypeBasicAction{OdataTypeAction, OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction, OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesLogToMetricAction} +} + +// OdataTypeBasicMetricAlertCriteria enumerates the values for odata type basic metric alert criteria. +type OdataTypeBasicMetricAlertCriteria string + +const ( + // OdataTypeMetricAlertCriteria ... + OdataTypeMetricAlertCriteria OdataTypeBasicMetricAlertCriteria = "MetricAlertCriteria" + // OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria ... + OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria OdataTypeBasicMetricAlertCriteria = "Microsoft.Azure.Monitor.MultipleResourceMultipleMetricCriteria" + // OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria ... + OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria OdataTypeBasicMetricAlertCriteria = "Microsoft.Azure.Monitor.SingleResourceMultipleMetricCriteria" +) + +// PossibleOdataTypeBasicMetricAlertCriteriaValues returns an array of possible values for the OdataTypeBasicMetricAlertCriteria const type. +func PossibleOdataTypeBasicMetricAlertCriteriaValues() []OdataTypeBasicMetricAlertCriteria { + return []OdataTypeBasicMetricAlertCriteria{OdataTypeMetricAlertCriteria, OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria, OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria} +} + +// OdataTypeBasicRuleAction enumerates the values for odata type basic rule action. +type OdataTypeBasicRuleAction string + +const ( + // OdataTypeMicrosoftAzureManagementInsightsModelsRuleEmailAction ... + OdataTypeMicrosoftAzureManagementInsightsModelsRuleEmailAction OdataTypeBasicRuleAction = "Microsoft.Azure.Management.Insights.Models.RuleEmailAction" + // OdataTypeMicrosoftAzureManagementInsightsModelsRuleWebhookAction ... + OdataTypeMicrosoftAzureManagementInsightsModelsRuleWebhookAction OdataTypeBasicRuleAction = "Microsoft.Azure.Management.Insights.Models.RuleWebhookAction" + // OdataTypeRuleAction ... + OdataTypeRuleAction OdataTypeBasicRuleAction = "RuleAction" +) + +// PossibleOdataTypeBasicRuleActionValues returns an array of possible values for the OdataTypeBasicRuleAction const type. +func PossibleOdataTypeBasicRuleActionValues() []OdataTypeBasicRuleAction { + return []OdataTypeBasicRuleAction{OdataTypeMicrosoftAzureManagementInsightsModelsRuleEmailAction, OdataTypeMicrosoftAzureManagementInsightsModelsRuleWebhookAction, OdataTypeRuleAction} +} + +// OdataTypeBasicRuleCondition enumerates the values for odata type basic rule condition. +type OdataTypeBasicRuleCondition string + +const ( + // OdataTypeMicrosoftAzureManagementInsightsModelsLocationThresholdRuleCondition ... + OdataTypeMicrosoftAzureManagementInsightsModelsLocationThresholdRuleCondition OdataTypeBasicRuleCondition = "Microsoft.Azure.Management.Insights.Models.LocationThresholdRuleCondition" + // OdataTypeMicrosoftAzureManagementInsightsModelsManagementEventRuleCondition ... + OdataTypeMicrosoftAzureManagementInsightsModelsManagementEventRuleCondition OdataTypeBasicRuleCondition = "Microsoft.Azure.Management.Insights.Models.ManagementEventRuleCondition" + // OdataTypeMicrosoftAzureManagementInsightsModelsThresholdRuleCondition ... + OdataTypeMicrosoftAzureManagementInsightsModelsThresholdRuleCondition OdataTypeBasicRuleCondition = "Microsoft.Azure.Management.Insights.Models.ThresholdRuleCondition" + // OdataTypeRuleCondition ... + OdataTypeRuleCondition OdataTypeBasicRuleCondition = "RuleCondition" +) + +// PossibleOdataTypeBasicRuleConditionValues returns an array of possible values for the OdataTypeBasicRuleCondition const type. +func PossibleOdataTypeBasicRuleConditionValues() []OdataTypeBasicRuleCondition { + return []OdataTypeBasicRuleCondition{OdataTypeMicrosoftAzureManagementInsightsModelsLocationThresholdRuleCondition, OdataTypeMicrosoftAzureManagementInsightsModelsManagementEventRuleCondition, OdataTypeMicrosoftAzureManagementInsightsModelsThresholdRuleCondition, OdataTypeRuleCondition} +} + +// OnboardingStatus enumerates the values for onboarding status. +type OnboardingStatus string + +const ( + // NotOnboarded ... + NotOnboarded OnboardingStatus = "notOnboarded" + // Onboarded ... + Onboarded OnboardingStatus = "onboarded" + // Unknown ... + Unknown OnboardingStatus = "unknown" +) + +// PossibleOnboardingStatusValues returns an array of possible values for the OnboardingStatus const type. +func PossibleOnboardingStatusValues() []OnboardingStatus { + return []OnboardingStatus{NotOnboarded, Onboarded, Unknown} +} + +// ProvisioningState enumerates the values for provisioning state. +type ProvisioningState string + +const ( + // Canceled ... + Canceled ProvisioningState = "Canceled" + // Deploying ... + Deploying ProvisioningState = "Deploying" + // Failed ... + Failed ProvisioningState = "Failed" + // Succeeded ... + Succeeded ProvisioningState = "Succeeded" +) + +// PossibleProvisioningStateValues returns an array of possible values for the ProvisioningState const type. +func PossibleProvisioningStateValues() []ProvisioningState { + return []ProvisioningState{Canceled, Deploying, Failed, Succeeded} +} + +// QueryType enumerates the values for query type. +type QueryType string + +const ( + // ResultCount ... + ResultCount QueryType = "ResultCount" +) + +// PossibleQueryTypeValues returns an array of possible values for the QueryType const type. +func PossibleQueryTypeValues() []QueryType { + return []QueryType{ResultCount} +} + +// ReceiverStatus enumerates the values for receiver status. +type ReceiverStatus string + +const ( + // ReceiverStatusDisabled ... + ReceiverStatusDisabled ReceiverStatus = "Disabled" + // ReceiverStatusEnabled ... + ReceiverStatusEnabled ReceiverStatus = "Enabled" + // ReceiverStatusNotSpecified ... + ReceiverStatusNotSpecified ReceiverStatus = "NotSpecified" +) + +// PossibleReceiverStatusValues returns an array of possible values for the ReceiverStatus const type. +func PossibleReceiverStatusValues() []ReceiverStatus { + return []ReceiverStatus{ReceiverStatusDisabled, ReceiverStatusEnabled, ReceiverStatusNotSpecified} +} + +// RecurrenceFrequency enumerates the values for recurrence frequency. +type RecurrenceFrequency string + +const ( + // RecurrenceFrequencyDay ... + RecurrenceFrequencyDay RecurrenceFrequency = "Day" + // RecurrenceFrequencyHour ... + RecurrenceFrequencyHour RecurrenceFrequency = "Hour" + // RecurrenceFrequencyMinute ... + RecurrenceFrequencyMinute RecurrenceFrequency = "Minute" + // RecurrenceFrequencyMonth ... + RecurrenceFrequencyMonth RecurrenceFrequency = "Month" + // RecurrenceFrequencyNone ... + RecurrenceFrequencyNone RecurrenceFrequency = "None" + // RecurrenceFrequencySecond ... + RecurrenceFrequencySecond RecurrenceFrequency = "Second" + // RecurrenceFrequencyWeek ... + RecurrenceFrequencyWeek RecurrenceFrequency = "Week" + // RecurrenceFrequencyYear ... + RecurrenceFrequencyYear RecurrenceFrequency = "Year" +) + +// PossibleRecurrenceFrequencyValues returns an array of possible values for the RecurrenceFrequency const type. +func PossibleRecurrenceFrequencyValues() []RecurrenceFrequency { + return []RecurrenceFrequency{RecurrenceFrequencyDay, RecurrenceFrequencyHour, RecurrenceFrequencyMinute, RecurrenceFrequencyMonth, RecurrenceFrequencyNone, RecurrenceFrequencySecond, RecurrenceFrequencyWeek, RecurrenceFrequencyYear} +} + +// ResultType enumerates the values for result type. +type ResultType string + +const ( + // Data ... + Data ResultType = "Data" + // Metadata ... + Metadata ResultType = "Metadata" +) + +// PossibleResultTypeValues returns an array of possible values for the ResultType const type. +func PossibleResultTypeValues() []ResultType { + return []ResultType{Data, Metadata} +} + +// ScaleDirection enumerates the values for scale direction. +type ScaleDirection string + +const ( + // ScaleDirectionDecrease ... + ScaleDirectionDecrease ScaleDirection = "Decrease" + // ScaleDirectionIncrease ... + ScaleDirectionIncrease ScaleDirection = "Increase" + // ScaleDirectionNone ... + ScaleDirectionNone ScaleDirection = "None" +) + +// PossibleScaleDirectionValues returns an array of possible values for the ScaleDirection const type. +func PossibleScaleDirectionValues() []ScaleDirection { + return []ScaleDirection{ScaleDirectionDecrease, ScaleDirectionIncrease, ScaleDirectionNone} +} + +// ScaleType enumerates the values for scale type. +type ScaleType string + +const ( + // ChangeCount ... + ChangeCount ScaleType = "ChangeCount" + // ExactCount ... + ExactCount ScaleType = "ExactCount" + // PercentChangeCount ... + PercentChangeCount ScaleType = "PercentChangeCount" +) + +// PossibleScaleTypeValues returns an array of possible values for the ScaleType const type. +func PossibleScaleTypeValues() []ScaleType { + return []ScaleType{ChangeCount, ExactCount, PercentChangeCount} +} + +// Sensitivity enumerates the values for sensitivity. +type Sensitivity string + +const ( + // SensitivityHigh ... + SensitivityHigh Sensitivity = "High" + // SensitivityLow ... + SensitivityLow Sensitivity = "Low" + // SensitivityMedium ... + SensitivityMedium Sensitivity = "Medium" +) + +// PossibleSensitivityValues returns an array of possible values for the Sensitivity const type. +func PossibleSensitivityValues() []Sensitivity { + return []Sensitivity{SensitivityHigh, SensitivityLow, SensitivityMedium} +} + +// TimeAggregationOperator enumerates the values for time aggregation operator. +type TimeAggregationOperator string + +const ( + // TimeAggregationOperatorAverage ... + TimeAggregationOperatorAverage TimeAggregationOperator = "Average" + // TimeAggregationOperatorLast ... + TimeAggregationOperatorLast TimeAggregationOperator = "Last" + // TimeAggregationOperatorMaximum ... + TimeAggregationOperatorMaximum TimeAggregationOperator = "Maximum" + // TimeAggregationOperatorMinimum ... + TimeAggregationOperatorMinimum TimeAggregationOperator = "Minimum" + // TimeAggregationOperatorTotal ... + TimeAggregationOperatorTotal TimeAggregationOperator = "Total" +) + +// PossibleTimeAggregationOperatorValues returns an array of possible values for the TimeAggregationOperator const type. +func PossibleTimeAggregationOperatorValues() []TimeAggregationOperator { + return []TimeAggregationOperator{TimeAggregationOperatorAverage, TimeAggregationOperatorLast, TimeAggregationOperatorMaximum, TimeAggregationOperatorMinimum, TimeAggregationOperatorTotal} +} + +// TimeAggregationType enumerates the values for time aggregation type. +type TimeAggregationType string + +const ( + // TimeAggregationTypeAverage ... + TimeAggregationTypeAverage TimeAggregationType = "Average" + // TimeAggregationTypeCount ... + TimeAggregationTypeCount TimeAggregationType = "Count" + // TimeAggregationTypeLast ... + TimeAggregationTypeLast TimeAggregationType = "Last" + // TimeAggregationTypeMaximum ... + TimeAggregationTypeMaximum TimeAggregationType = "Maximum" + // TimeAggregationTypeMinimum ... + TimeAggregationTypeMinimum TimeAggregationType = "Minimum" + // TimeAggregationTypeTotal ... + TimeAggregationTypeTotal TimeAggregationType = "Total" +) + +// PossibleTimeAggregationTypeValues returns an array of possible values for the TimeAggregationType const type. +func PossibleTimeAggregationTypeValues() []TimeAggregationType { + return []TimeAggregationType{TimeAggregationTypeAverage, TimeAggregationTypeCount, TimeAggregationTypeLast, TimeAggregationTypeMaximum, TimeAggregationTypeMinimum, TimeAggregationTypeTotal} +} + +// Unit enumerates the values for unit. +type Unit string + +const ( + // UnitBitsPerSecond ... + UnitBitsPerSecond Unit = "BitsPerSecond" + // UnitBytes ... + UnitBytes Unit = "Bytes" + // UnitByteSeconds ... + UnitByteSeconds Unit = "ByteSeconds" + // UnitBytesPerSecond ... + UnitBytesPerSecond Unit = "BytesPerSecond" + // UnitCores ... + UnitCores Unit = "Cores" + // UnitCount ... + UnitCount Unit = "Count" + // UnitCountPerSecond ... + UnitCountPerSecond Unit = "CountPerSecond" + // UnitMilliCores ... + UnitMilliCores Unit = "MilliCores" + // UnitMilliSeconds ... + UnitMilliSeconds Unit = "MilliSeconds" + // UnitNanoCores ... + UnitNanoCores Unit = "NanoCores" + // UnitPercent ... + UnitPercent Unit = "Percent" + // UnitSeconds ... + UnitSeconds Unit = "Seconds" + // UnitUnspecified ... + UnitUnspecified Unit = "Unspecified" +) + +// PossibleUnitValues returns an array of possible values for the Unit const type. +func PossibleUnitValues() []Unit { + return []Unit{UnitBitsPerSecond, UnitBytes, UnitByteSeconds, UnitBytesPerSecond, UnitCores, UnitCount, UnitCountPerSecond, UnitMilliCores, UnitMilliSeconds, UnitNanoCores, UnitPercent, UnitSeconds, UnitUnspecified} +} + +// BasicAction action descriptor. +type BasicAction interface { + AsAlertingAction() (*AlertingAction, bool) + AsLogToMetricAction() (*LogToMetricAction, bool) + AsAction() (*Action, bool) +} + +// Action action descriptor. +type Action struct { + // OdataType - Possible values include: 'OdataTypeAction', 'OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction', 'OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesLogToMetricAction' + OdataType OdataTypeBasicAction `json:"odata.type,omitempty"` +} + +func unmarshalBasicAction(body []byte) (BasicAction, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["odata.type"] { + case string(OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction): + var aa AlertingAction + err := json.Unmarshal(body, &aa) + return aa, err + case string(OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesLogToMetricAction): + var ltma LogToMetricAction + err := json.Unmarshal(body, <ma) + return ltma, err + default: + var a Action + err := json.Unmarshal(body, &a) + return a, err + } +} +func unmarshalBasicActionArray(body []byte) ([]BasicAction, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + aArray := make([]BasicAction, len(rawMessages)) + + for index, rawMessage := range rawMessages { + a, err := unmarshalBasicAction(*rawMessage) + if err != nil { + return nil, err + } + aArray[index] = a + } + return aArray, nil +} + +// MarshalJSON is the custom marshaler for Action. +func (a Action) MarshalJSON() ([]byte, error) { + a.OdataType = OdataTypeAction + objectMap := make(map[string]interface{}) + if a.OdataType != "" { + objectMap["odata.type"] = a.OdataType + } + return json.Marshal(objectMap) +} + +// AsAlertingAction is the BasicAction implementation for Action. +func (a Action) AsAlertingAction() (*AlertingAction, bool) { + return nil, false +} + +// AsLogToMetricAction is the BasicAction implementation for Action. +func (a Action) AsLogToMetricAction() (*LogToMetricAction, bool) { + return nil, false +} + +// AsAction is the BasicAction implementation for Action. +func (a Action) AsAction() (*Action, bool) { + return &a, true +} + +// AsBasicAction is the BasicAction implementation for Action. +func (a Action) AsBasicAction() (BasicAction, bool) { + return &a, true +} + +// ActionGroup an Azure action group. +type ActionGroup struct { + // GroupShortName - The short name of the action group. This will be used in SMS messages. + GroupShortName *string `json:"groupShortName,omitempty"` + // Enabled - Indicates whether this action group is enabled. If an action group is not enabled, then none of its receivers will receive communications. + Enabled *bool `json:"enabled,omitempty"` + // EmailReceivers - The list of email receivers that are part of this action group. + EmailReceivers *[]EmailReceiver `json:"emailReceivers,omitempty"` + // SmsReceivers - The list of SMS receivers that are part of this action group. + SmsReceivers *[]SmsReceiver `json:"smsReceivers,omitempty"` + // WebhookReceivers - The list of webhook receivers that are part of this action group. + WebhookReceivers *[]WebhookReceiver `json:"webhookReceivers,omitempty"` + // ItsmReceivers - The list of ITSM receivers that are part of this action group. + ItsmReceivers *[]ItsmReceiver `json:"itsmReceivers,omitempty"` + // AzureAppPushReceivers - The list of AzureAppPush receivers that are part of this action group. + AzureAppPushReceivers *[]AzureAppPushReceiver `json:"azureAppPushReceivers,omitempty"` + // AutomationRunbookReceivers - The list of AutomationRunbook receivers that are part of this action group. + AutomationRunbookReceivers *[]AutomationRunbookReceiver `json:"automationRunbookReceivers,omitempty"` + // VoiceReceivers - The list of voice receivers that are part of this action group. + VoiceReceivers *[]VoiceReceiver `json:"voiceReceivers,omitempty"` + // LogicAppReceivers - The list of logic app receivers that are part of this action group. + LogicAppReceivers *[]LogicAppReceiver `json:"logicAppReceivers,omitempty"` + // AzureFunctionReceivers - The list of azure function receivers that are part of this action group. + AzureFunctionReceivers *[]AzureFunctionReceiver `json:"azureFunctionReceivers,omitempty"` + // ArmRoleReceivers - The list of ARM role receivers that are part of this action group. Roles are Azure RBAC roles and only built-in roles are supported. + ArmRoleReceivers *[]ArmRoleReceiver `json:"armRoleReceivers,omitempty"` +} + +// ActionGroupList a list of action groups. +type ActionGroupList struct { + autorest.Response `json:"-"` + // Value - The list of action groups. + Value *[]ActionGroupResource `json:"value,omitempty"` + // NextLink - Provides the link to retrieve the next set of elements. + NextLink *string `json:"nextLink,omitempty"` +} + +// ActionGroupPatch an Azure action group for patch operations. +type ActionGroupPatch struct { + // Enabled - Indicates whether this action group is enabled. If an action group is not enabled, then none of its actions will be activated. + Enabled *bool `json:"enabled,omitempty"` +} + +// ActionGroupPatchBody an action group object for the body of patch operations. +type ActionGroupPatchBody struct { + // Tags - Resource tags + Tags map[string]*string `json:"tags"` + // ActionGroupPatch - The action group settings for an update operation. + *ActionGroupPatch `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for ActionGroupPatchBody. +func (agpb ActionGroupPatchBody) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if agpb.Tags != nil { + objectMap["tags"] = agpb.Tags + } + if agpb.ActionGroupPatch != nil { + objectMap["properties"] = agpb.ActionGroupPatch + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ActionGroupPatchBody struct. +func (agpb *ActionGroupPatchBody) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + agpb.Tags = tags + } + case "properties": + if v != nil { + var actionGroupPatch ActionGroupPatch + err = json.Unmarshal(*v, &actionGroupPatch) + if err != nil { + return err + } + agpb.ActionGroupPatch = &actionGroupPatch + } + } + } + + return nil +} + +// ActionGroupResource an action group resource. +type ActionGroupResource struct { + autorest.Response `json:"-"` + // ActionGroup - The action groups properties of the resource. + *ActionGroup `json:"properties,omitempty"` + // ID - READ-ONLY; Azure resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Azure resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Azure resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for ActionGroupResource. +func (agr ActionGroupResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if agr.ActionGroup != nil { + objectMap["properties"] = agr.ActionGroup + } + if agr.Location != nil { + objectMap["location"] = agr.Location + } + if agr.Tags != nil { + objectMap["tags"] = agr.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ActionGroupResource struct. +func (agr *ActionGroupResource) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var actionGroup ActionGroup + err = json.Unmarshal(*v, &actionGroup) + if err != nil { + return err + } + agr.ActionGroup = &actionGroup + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + agr.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + agr.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + agr.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + agr.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + agr.Tags = tags + } + } + } + + return nil +} + +// ActivityLogAlert an Azure activity log alert. +type ActivityLogAlert struct { + // Scopes - A list of resourceIds that will be used as prefixes. The alert will only apply to activityLogs with resourceIds that fall under one of these prefixes. This list must include at least one item. + Scopes *[]string `json:"scopes,omitempty"` + // Enabled - Indicates whether this activity log alert is enabled. If an activity log alert is not enabled, then none of its actions will be activated. + Enabled *bool `json:"enabled,omitempty"` + // Condition - The condition that will cause this alert to activate. + Condition *ActivityLogAlertAllOfCondition `json:"condition,omitempty"` + // Actions - The actions that will activate when the condition is met. + Actions *ActivityLogAlertActionList `json:"actions,omitempty"` + // Description - A description of this activity log alert. + Description *string `json:"description,omitempty"` +} + +// ActivityLogAlertActionGroup a pointer to an Azure Action Group. +type ActivityLogAlertActionGroup struct { + // ActionGroupID - The resourceId of the action group. This cannot be null or empty. + ActionGroupID *string `json:"actionGroupId,omitempty"` + // WebhookProperties - the dictionary of custom properties to include with the post operation. These data are appended to the webhook payload. + WebhookProperties map[string]*string `json:"webhookProperties"` +} + +// MarshalJSON is the custom marshaler for ActivityLogAlertActionGroup. +func (alaag ActivityLogAlertActionGroup) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if alaag.ActionGroupID != nil { + objectMap["actionGroupId"] = alaag.ActionGroupID + } + if alaag.WebhookProperties != nil { + objectMap["webhookProperties"] = alaag.WebhookProperties + } + return json.Marshal(objectMap) +} + +// ActivityLogAlertActionList a list of activity log alert actions. +type ActivityLogAlertActionList struct { + // ActionGroups - The list of activity log alerts. + ActionGroups *[]ActivityLogAlertActionGroup `json:"actionGroups,omitempty"` +} + +// ActivityLogAlertAllOfCondition an Activity Log alert condition that is met when all its member +// conditions are met. +type ActivityLogAlertAllOfCondition struct { + // AllOf - The list of activity log alert conditions. + AllOf *[]ActivityLogAlertLeafCondition `json:"allOf,omitempty"` +} + +// ActivityLogAlertLeafCondition an Activity Log alert condition that is met by comparing an activity log +// field and value. +type ActivityLogAlertLeafCondition struct { + // Field - The name of the field that this condition will examine. The possible values for this field are (case-insensitive): 'resourceId', 'category', 'caller', 'level', 'operationName', 'resourceGroup', 'resourceProvider', 'status', 'subStatus', 'resourceType', or anything beginning with 'properties.'. + Field *string `json:"field,omitempty"` + // Equals - The field value will be compared to this value (case-insensitive) to determine if the condition is met. + Equals *string `json:"equals,omitempty"` +} + +// ActivityLogAlertList a list of activity log alerts. +type ActivityLogAlertList struct { + autorest.Response `json:"-"` + // Value - The list of activity log alerts. + Value *[]ActivityLogAlertResource `json:"value,omitempty"` + // NextLink - Provides the link to retrieve the next set of elements. + NextLink *string `json:"nextLink,omitempty"` +} + +// ActivityLogAlertPatch an Azure activity log alert for patch operations. +type ActivityLogAlertPatch struct { + // Enabled - Indicates whether this activity log alert is enabled. If an activity log alert is not enabled, then none of its actions will be activated. + Enabled *bool `json:"enabled,omitempty"` +} + +// ActivityLogAlertPatchBody an activity log alert object for the body of patch operations. +type ActivityLogAlertPatchBody struct { + // Tags - Resource tags + Tags map[string]*string `json:"tags"` + // ActivityLogAlertPatch - The activity log alert settings for an update operation. + *ActivityLogAlertPatch `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for ActivityLogAlertPatchBody. +func (alapb ActivityLogAlertPatchBody) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if alapb.Tags != nil { + objectMap["tags"] = alapb.Tags + } + if alapb.ActivityLogAlertPatch != nil { + objectMap["properties"] = alapb.ActivityLogAlertPatch + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ActivityLogAlertPatchBody struct. +func (alapb *ActivityLogAlertPatchBody) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + alapb.Tags = tags + } + case "properties": + if v != nil { + var activityLogAlertPatch ActivityLogAlertPatch + err = json.Unmarshal(*v, &activityLogAlertPatch) + if err != nil { + return err + } + alapb.ActivityLogAlertPatch = &activityLogAlertPatch + } + } + } + + return nil +} + +// ActivityLogAlertResource an activity log alert resource. +type ActivityLogAlertResource struct { + autorest.Response `json:"-"` + // ActivityLogAlert - The activity log alert properties of the resource. + *ActivityLogAlert `json:"properties,omitempty"` + // ID - READ-ONLY; Azure resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Azure resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Azure resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for ActivityLogAlertResource. +func (alar ActivityLogAlertResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if alar.ActivityLogAlert != nil { + objectMap["properties"] = alar.ActivityLogAlert + } + if alar.Location != nil { + objectMap["location"] = alar.Location + } + if alar.Tags != nil { + objectMap["tags"] = alar.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for ActivityLogAlertResource struct. +func (alar *ActivityLogAlertResource) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var activityLogAlert ActivityLogAlert + err = json.Unmarshal(*v, &activityLogAlert) + if err != nil { + return err + } + alar.ActivityLogAlert = &activityLogAlert + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + alar.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + alar.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + alar.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + alar.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + alar.Tags = tags + } + } + } + + return nil +} + +// AlertingAction specify action need to be taken when rule type is Alert +type AlertingAction struct { + // Severity - Severity of the alert. Possible values include: 'Zero', 'One', 'Two', 'Three', 'Four' + Severity AlertSeverity `json:"severity,omitempty"` + // AznsAction - Azure action group reference. + AznsAction *AzNsActionGroup `json:"aznsAction,omitempty"` + // ThrottlingInMin - time (in minutes) for which Alerts should be throttled or suppressed. + ThrottlingInMin *int32 `json:"throttlingInMin,omitempty"` + // Trigger - The trigger condition that results in the alert rule being. + Trigger *TriggerCondition `json:"trigger,omitempty"` + // OdataType - Possible values include: 'OdataTypeAction', 'OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction', 'OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesLogToMetricAction' + OdataType OdataTypeBasicAction `json:"odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for AlertingAction. +func (aa AlertingAction) MarshalJSON() ([]byte, error) { + aa.OdataType = OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction + objectMap := make(map[string]interface{}) + if aa.Severity != "" { + objectMap["severity"] = aa.Severity + } + if aa.AznsAction != nil { + objectMap["aznsAction"] = aa.AznsAction + } + if aa.ThrottlingInMin != nil { + objectMap["throttlingInMin"] = aa.ThrottlingInMin + } + if aa.Trigger != nil { + objectMap["trigger"] = aa.Trigger + } + if aa.OdataType != "" { + objectMap["odata.type"] = aa.OdataType + } + return json.Marshal(objectMap) +} + +// AsAlertingAction is the BasicAction implementation for AlertingAction. +func (aa AlertingAction) AsAlertingAction() (*AlertingAction, bool) { + return &aa, true +} + +// AsLogToMetricAction is the BasicAction implementation for AlertingAction. +func (aa AlertingAction) AsLogToMetricAction() (*LogToMetricAction, bool) { + return nil, false +} + +// AsAction is the BasicAction implementation for AlertingAction. +func (aa AlertingAction) AsAction() (*Action, bool) { + return nil, false +} + +// AsBasicAction is the BasicAction implementation for AlertingAction. +func (aa AlertingAction) AsBasicAction() (BasicAction, bool) { + return &aa, true +} + +// AlertRule an alert rule. +type AlertRule struct { + // Name - the name of the alert rule. + Name *string `json:"name,omitempty"` + // Description - the description of the alert rule that will be included in the alert email. + Description *string `json:"description,omitempty"` + // IsEnabled - the flag that indicates whether the alert rule is enabled. + IsEnabled *bool `json:"isEnabled,omitempty"` + // Condition - the condition that results in the alert rule being activated. + Condition BasicRuleCondition `json:"condition,omitempty"` + // Actions - the array of actions that are performed when the alert rule becomes active, and when an alert condition is resolved. + Actions *[]BasicRuleAction `json:"actions,omitempty"` + // LastUpdatedTime - READ-ONLY; Last time the rule was updated in ISO8601 format. + LastUpdatedTime *date.Time `json:"lastUpdatedTime,omitempty"` +} + +// UnmarshalJSON is the custom unmarshaler for AlertRule struct. +func (ar *AlertRule) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + ar.Name = &name + } + case "description": + if v != nil { + var description string + err = json.Unmarshal(*v, &description) + if err != nil { + return err + } + ar.Description = &description + } + case "isEnabled": + if v != nil { + var isEnabled bool + err = json.Unmarshal(*v, &isEnabled) + if err != nil { + return err + } + ar.IsEnabled = &isEnabled + } + case "condition": + if v != nil { + condition, err := unmarshalBasicRuleCondition(*v) + if err != nil { + return err + } + ar.Condition = condition + } + case "actions": + if v != nil { + actions, err := unmarshalBasicRuleActionArray(*v) + if err != nil { + return err + } + ar.Actions = &actions + } + case "lastUpdatedTime": + if v != nil { + var lastUpdatedTime date.Time + err = json.Unmarshal(*v, &lastUpdatedTime) + if err != nil { + return err + } + ar.LastUpdatedTime = &lastUpdatedTime + } + } + } + + return nil +} + +// AlertRuleResource the alert rule resource. +type AlertRuleResource struct { + autorest.Response `json:"-"` + // AlertRule - The alert rule properties of the resource. + *AlertRule `json:"properties,omitempty"` + // ID - READ-ONLY; Azure resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Azure resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Azure resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for AlertRuleResource. +func (arr AlertRuleResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if arr.AlertRule != nil { + objectMap["properties"] = arr.AlertRule + } + if arr.Location != nil { + objectMap["location"] = arr.Location + } + if arr.Tags != nil { + objectMap["tags"] = arr.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for AlertRuleResource struct. +func (arr *AlertRuleResource) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var alertRule AlertRule + err = json.Unmarshal(*v, &alertRule) + if err != nil { + return err + } + arr.AlertRule = &alertRule + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + arr.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + arr.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + arr.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + arr.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + arr.Tags = tags + } + } + } + + return nil +} + +// AlertRuleResourceCollection represents a collection of alert rule resources. +type AlertRuleResourceCollection struct { + autorest.Response `json:"-"` + // Value - the values for the alert rule resources. + Value *[]AlertRuleResource `json:"value,omitempty"` +} + +// AlertRuleResourcePatch the alert rule object for patch operations. +type AlertRuleResourcePatch struct { + // Tags - Resource tags + Tags map[string]*string `json:"tags"` + // AlertRule - The properties of an alert rule. + *AlertRule `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for AlertRuleResourcePatch. +func (arrp AlertRuleResourcePatch) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if arrp.Tags != nil { + objectMap["tags"] = arrp.Tags + } + if arrp.AlertRule != nil { + objectMap["properties"] = arrp.AlertRule + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for AlertRuleResourcePatch struct. +func (arrp *AlertRuleResourcePatch) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + arrp.Tags = tags + } + case "properties": + if v != nil { + var alertRule AlertRule + err = json.Unmarshal(*v, &alertRule) + if err != nil { + return err + } + arrp.AlertRule = &alertRule + } + } + } + + return nil +} + +// ArmRoleReceiver an arm role receiver. +type ArmRoleReceiver struct { + // Name - The name of the arm role receiver. Names must be unique across all receivers within an action group. + Name *string `json:"name,omitempty"` + // RoleID - The arm role id. + RoleID *string `json:"roleId,omitempty"` + // UseCommonAlertSchema - Indicates whether to use common alert schema. + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty"` +} + +// AutomationRunbookReceiver the Azure Automation Runbook notification receiver. +type AutomationRunbookReceiver struct { + // AutomationAccountID - The Azure automation account Id which holds this runbook and authenticate to Azure resource. + AutomationAccountID *string `json:"automationAccountId,omitempty"` + // RunbookName - The name for this runbook. + RunbookName *string `json:"runbookName,omitempty"` + // WebhookResourceID - The resource id for webhook linked to this runbook. + WebhookResourceID *string `json:"webhookResourceId,omitempty"` + // IsGlobalRunbook - Indicates whether this instance is global runbook. + IsGlobalRunbook *bool `json:"isGlobalRunbook,omitempty"` + // Name - Indicates name of the webhook. + Name *string `json:"name,omitempty"` + // ServiceURI - The URI where webhooks should be sent. + ServiceURI *string `json:"serviceUri,omitempty"` + // UseCommonAlertSchema - Indicates whether to use common alert schema. + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty"` +} + +// AutoscaleNotification autoscale notification. +type AutoscaleNotification struct { + // Operation - the operation associated with the notification and its value must be "scale" + Operation *string `json:"operation,omitempty"` + // Email - the email notification. + Email *EmailNotification `json:"email,omitempty"` + // Webhooks - the collection of webhook notifications. + Webhooks *[]WebhookNotification `json:"webhooks,omitempty"` +} + +// AutoscaleProfile autoscale profile. +type AutoscaleProfile struct { + // Name - the name of the profile. + Name *string `json:"name,omitempty"` + // Capacity - the number of instances that can be used during this profile. + Capacity *ScaleCapacity `json:"capacity,omitempty"` + // Rules - the collection of rules that provide the triggers and parameters for the scaling action. A maximum of 10 rules can be specified. + Rules *[]ScaleRule `json:"rules,omitempty"` + // FixedDate - the specific date-time for the profile. This element is not used if the Recurrence element is used. + FixedDate *TimeWindow `json:"fixedDate,omitempty"` + // Recurrence - the repeating times at which this profile begins. This element is not used if the FixedDate element is used. + Recurrence *Recurrence `json:"recurrence,omitempty"` +} + +// AutoscaleSetting a setting that contains all of the configuration for the automatic scaling of a +// resource. +type AutoscaleSetting struct { + // Profiles - the collection of automatic scaling profiles that specify different scaling parameters for different time periods. A maximum of 20 profiles can be specified. + Profiles *[]AutoscaleProfile `json:"profiles,omitempty"` + // Notifications - the collection of notifications. + Notifications *[]AutoscaleNotification `json:"notifications,omitempty"` + // Enabled - the enabled flag. Specifies whether automatic scaling is enabled for the resource. The default value is 'true'. + Enabled *bool `json:"enabled,omitempty"` + // Name - the name of the autoscale setting. + Name *string `json:"name,omitempty"` + // TargetResourceURI - the resource identifier of the resource that the autoscale setting should be added to. + TargetResourceURI *string `json:"targetResourceUri,omitempty"` +} + +// AutoscaleSettingResource the autoscale setting resource. +type AutoscaleSettingResource struct { + autorest.Response `json:"-"` + // AutoscaleSetting - The autoscale setting of the resource. + *AutoscaleSetting `json:"properties,omitempty"` + // ID - READ-ONLY; Azure resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Azure resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Azure resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for AutoscaleSettingResource. +func (asr AutoscaleSettingResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if asr.AutoscaleSetting != nil { + objectMap["properties"] = asr.AutoscaleSetting + } + if asr.Location != nil { + objectMap["location"] = asr.Location + } + if asr.Tags != nil { + objectMap["tags"] = asr.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for AutoscaleSettingResource struct. +func (asr *AutoscaleSettingResource) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var autoscaleSetting AutoscaleSetting + err = json.Unmarshal(*v, &autoscaleSetting) + if err != nil { + return err + } + asr.AutoscaleSetting = &autoscaleSetting + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + asr.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + asr.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + asr.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + asr.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + asr.Tags = tags + } + } + } + + return nil +} + +// AutoscaleSettingResourceCollection represents a collection of autoscale setting resources. +type AutoscaleSettingResourceCollection struct { + autorest.Response `json:"-"` + // Value - the values for the autoscale setting resources. + Value *[]AutoscaleSettingResource `json:"value,omitempty"` + // NextLink - URL to get the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// AutoscaleSettingResourceCollectionIterator provides access to a complete listing of +// AutoscaleSettingResource values. +type AutoscaleSettingResourceCollectionIterator struct { + i int + page AutoscaleSettingResourceCollectionPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *AutoscaleSettingResourceCollectionIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingResourceCollectionIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *AutoscaleSettingResourceCollectionIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter AutoscaleSettingResourceCollectionIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter AutoscaleSettingResourceCollectionIterator) Response() AutoscaleSettingResourceCollection { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter AutoscaleSettingResourceCollectionIterator) Value() AutoscaleSettingResource { + if !iter.page.NotDone() { + return AutoscaleSettingResource{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the AutoscaleSettingResourceCollectionIterator type. +func NewAutoscaleSettingResourceCollectionIterator(page AutoscaleSettingResourceCollectionPage) AutoscaleSettingResourceCollectionIterator { + return AutoscaleSettingResourceCollectionIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (asrc AutoscaleSettingResourceCollection) IsEmpty() bool { + return asrc.Value == nil || len(*asrc.Value) == 0 +} + +// autoscaleSettingResourceCollectionPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (asrc AutoscaleSettingResourceCollection) autoscaleSettingResourceCollectionPreparer(ctx context.Context) (*http.Request, error) { + if asrc.NextLink == nil || len(to.String(asrc.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(asrc.NextLink))) +} + +// AutoscaleSettingResourceCollectionPage contains a page of AutoscaleSettingResource values. +type AutoscaleSettingResourceCollectionPage struct { + fn func(context.Context, AutoscaleSettingResourceCollection) (AutoscaleSettingResourceCollection, error) + asrc AutoscaleSettingResourceCollection +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *AutoscaleSettingResourceCollectionPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingResourceCollectionPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.asrc) + if err != nil { + return err + } + page.asrc = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *AutoscaleSettingResourceCollectionPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page AutoscaleSettingResourceCollectionPage) NotDone() bool { + return !page.asrc.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page AutoscaleSettingResourceCollectionPage) Response() AutoscaleSettingResourceCollection { + return page.asrc +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page AutoscaleSettingResourceCollectionPage) Values() []AutoscaleSettingResource { + if page.asrc.IsEmpty() { + return nil + } + return *page.asrc.Value +} + +// Creates a new instance of the AutoscaleSettingResourceCollectionPage type. +func NewAutoscaleSettingResourceCollectionPage(getNextPage func(context.Context, AutoscaleSettingResourceCollection) (AutoscaleSettingResourceCollection, error)) AutoscaleSettingResourceCollectionPage { + return AutoscaleSettingResourceCollectionPage{fn: getNextPage} +} + +// AutoscaleSettingResourcePatch the autoscale setting object for patch operations. +type AutoscaleSettingResourcePatch struct { + // Tags - Resource tags + Tags map[string]*string `json:"tags"` + // AutoscaleSetting - The autoscale setting properties of the update operation. + *AutoscaleSetting `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for AutoscaleSettingResourcePatch. +func (asrp AutoscaleSettingResourcePatch) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if asrp.Tags != nil { + objectMap["tags"] = asrp.Tags + } + if asrp.AutoscaleSetting != nil { + objectMap["properties"] = asrp.AutoscaleSetting + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for AutoscaleSettingResourcePatch struct. +func (asrp *AutoscaleSettingResourcePatch) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + asrp.Tags = tags + } + case "properties": + if v != nil { + var autoscaleSetting AutoscaleSetting + err = json.Unmarshal(*v, &autoscaleSetting) + if err != nil { + return err + } + asrp.AutoscaleSetting = &autoscaleSetting + } + } + } + + return nil +} + +// AzNsActionGroup azure action group +type AzNsActionGroup struct { + // ActionGroup - Azure Action Group reference. + ActionGroup *[]string `json:"actionGroup,omitempty"` + // EmailSubject - Custom subject override for all email ids in Azure action group + EmailSubject *string `json:"emailSubject,omitempty"` + // CustomWebhookPayload - Custom payload to be sent for all webhook URI in Azure action group + CustomWebhookPayload *string `json:"customWebhookPayload,omitempty"` +} + +// AzureAppPushReceiver the Azure mobile App push notification receiver. +type AzureAppPushReceiver struct { + // Name - The name of the Azure mobile app push receiver. Names must be unique across all receivers within an action group. + Name *string `json:"name,omitempty"` + // EmailAddress - The email address registered for the Azure mobile app. + EmailAddress *string `json:"emailAddress,omitempty"` +} + +// AzureFunctionReceiver an azure function receiver. +type AzureFunctionReceiver struct { + // Name - The name of the azure function receiver. Names must be unique across all receivers within an action group. + Name *string `json:"name,omitempty"` + // FunctionAppResourceID - The azure resource id of the function app. + FunctionAppResourceID *string `json:"functionAppResourceId,omitempty"` + // FunctionName - The function name in the function app. + FunctionName *string `json:"functionName,omitempty"` + // HTTPTriggerURL - The http trigger url where http request sent to. + HTTPTriggerURL *string `json:"httpTriggerUrl,omitempty"` + // UseCommonAlertSchema - Indicates whether to use common alert schema. + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty"` +} + +// Baseline the baseline values for a single sensitivity value. +type Baseline struct { + // Sensitivity - the sensitivity of the baseline. Possible values include: 'SensitivityLow', 'SensitivityMedium', 'SensitivityHigh' + Sensitivity Sensitivity `json:"sensitivity,omitempty"` + // LowThresholds - The low thresholds of the baseline. + LowThresholds *[]float64 `json:"lowThresholds,omitempty"` + // HighThresholds - The high thresholds of the baseline. + HighThresholds *[]float64 `json:"highThresholds,omitempty"` +} + +// BaselineMetadata represents a baseline metadata value. +type BaselineMetadata struct { + // Name - Name of the baseline metadata. + Name *string `json:"name,omitempty"` + // Value - Value of the baseline metadata. + Value *string `json:"value,omitempty"` +} + +// BaselineMetadataValue represents a baseline metadata value. +type BaselineMetadataValue struct { + // Name - the name of the metadata. + Name *LocalizableString `json:"name,omitempty"` + // Value - the value of the metadata. + Value *string `json:"value,omitempty"` +} + +// BaselineProperties the baseline properties class. +type BaselineProperties struct { + // Timespan - The timespan for which the data was retrieved. Its value consists of two datetimes concatenated, separated by '/'. This may be adjusted in the future and returned back from what was originally requested. + Timespan *string `json:"timespan,omitempty"` + // Interval - The interval (window size) for which the metric data was returned in. This may be adjusted in the future and returned back from what was originally requested. This is not present if a metadata request was made. + Interval *string `json:"interval,omitempty"` + // Aggregation - The aggregation type of the metric. + Aggregation *string `json:"aggregation,omitempty"` + // Timestamps - the array of timestamps of the baselines. + Timestamps *[]date.Time `json:"timestamps,omitempty"` + // Baseline - the baseline values for each sensitivity. + Baseline *[]Baseline `json:"baseline,omitempty"` + // Metadata - the baseline metadata values. + Metadata *[]BaselineMetadataValue `json:"metadata,omitempty"` +} + +// BaselineResponse the response to a baseline query. +type BaselineResponse struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; the metric baseline Id. + ID *string `json:"id,omitempty"` + // Type - READ-ONLY; the resource type of the baseline resource. + Type *string `json:"type,omitempty"` + // Name - READ-ONLY; the name and the display name of the metric, i.e. it is localizable string. + Name *LocalizableString `json:"name,omitempty"` + // BaselineProperties - the properties of the baseline. + *BaselineProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for BaselineResponse. +func (br BaselineResponse) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if br.BaselineProperties != nil { + objectMap["properties"] = br.BaselineProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for BaselineResponse struct. +func (br *BaselineResponse) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + br.ID = &ID + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + br.Type = &typeVar + } + case "name": + if v != nil { + var name LocalizableString + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + br.Name = &name + } + case "properties": + if v != nil { + var baselineProperties BaselineProperties + err = json.Unmarshal(*v, &baselineProperties) + if err != nil { + return err + } + br.BaselineProperties = &baselineProperties + } + } + } + + return nil +} + +// CalculateBaselineResponse the response to a calculate baseline call. +type CalculateBaselineResponse struct { + autorest.Response `json:"-"` + // Type - the resource type of the baseline resource. + Type *string `json:"type,omitempty"` + // Timestamps - the array of timestamps of the baselines. + Timestamps *[]date.Time `json:"timestamps,omitempty"` + // Baseline - the baseline values for each sensitivity. + Baseline *[]Baseline `json:"baseline,omitempty"` +} + +// Criteria specifies the criteria for converting log to metric. +type Criteria struct { + // MetricName - Name of the metric + MetricName *string `json:"metricName,omitempty"` + // Dimensions - List of Dimensions for creating metric + Dimensions *[]Dimension `json:"dimensions,omitempty"` +} + +// DataContainer information about a container with data for a given resource. +type DataContainer struct { + // Workspace - Log Analytics workspace information. + Workspace *WorkspaceInfo `json:"workspace,omitempty"` +} + +// DiagnosticSettings the diagnostic settings. +type DiagnosticSettings struct { + // StorageAccountID - The resource ID of the storage account to which you would like to send Diagnostic Logs. + StorageAccountID *string `json:"storageAccountId,omitempty"` + // ServiceBusRuleID - The service bus rule Id of the diagnostic setting. This is here to maintain backwards compatibility. + ServiceBusRuleID *string `json:"serviceBusRuleId,omitempty"` + // EventHubAuthorizationRuleID - The resource Id for the event hub authorization rule. + EventHubAuthorizationRuleID *string `json:"eventHubAuthorizationRuleId,omitempty"` + // EventHubName - The name of the event hub. If none is specified, the default event hub will be selected. + EventHubName *string `json:"eventHubName,omitempty"` + // Metrics - The list of metric settings. + Metrics *[]MetricSettings `json:"metrics,omitempty"` + // Logs - The list of logs settings. + Logs *[]LogSettings `json:"logs,omitempty"` + // WorkspaceID - The full ARM resource ID of the Log Analytics workspace to which you would like to send Diagnostic Logs. Example: /subscriptions/4b9e8510-67ab-4e9a-95a9-e2f1e570ea9c/resourceGroups/insights-integration/providers/Microsoft.OperationalInsights/workspaces/viruela2 + WorkspaceID *string `json:"workspaceId,omitempty"` + // LogAnalyticsDestinationType - A string indicating whether the export to Log Analytics should use the default destination type, i.e. AzureDiagnostics, or use a destination type constructed as follows: _. Possible values are: Dedicated and null (null is default.) + LogAnalyticsDestinationType *string `json:"logAnalyticsDestinationType,omitempty"` +} + +// DiagnosticSettingsCategory the diagnostic settings Category. +type DiagnosticSettingsCategory struct { + // CategoryType - The type of the diagnostic settings category. Possible values include: 'Metrics', 'Logs' + CategoryType CategoryType `json:"categoryType,omitempty"` +} + +// DiagnosticSettingsCategoryResource the diagnostic settings category resource. +type DiagnosticSettingsCategoryResource struct { + autorest.Response `json:"-"` + // DiagnosticSettingsCategory - The properties of a Diagnostic Settings Category. + *DiagnosticSettingsCategory `json:"properties,omitempty"` + // ID - READ-ONLY; Azure resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Azure resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Azure resource type + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for DiagnosticSettingsCategoryResource. +func (dscr DiagnosticSettingsCategoryResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dscr.DiagnosticSettingsCategory != nil { + objectMap["properties"] = dscr.DiagnosticSettingsCategory + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for DiagnosticSettingsCategoryResource struct. +func (dscr *DiagnosticSettingsCategoryResource) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var diagnosticSettingsCategory DiagnosticSettingsCategory + err = json.Unmarshal(*v, &diagnosticSettingsCategory) + if err != nil { + return err + } + dscr.DiagnosticSettingsCategory = &diagnosticSettingsCategory + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + dscr.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + dscr.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + dscr.Type = &typeVar + } + } + } + + return nil +} + +// DiagnosticSettingsCategoryResourceCollection represents a collection of diagnostic setting category +// resources. +type DiagnosticSettingsCategoryResourceCollection struct { + autorest.Response `json:"-"` + // Value - The collection of diagnostic settings category resources. + Value *[]DiagnosticSettingsCategoryResource `json:"value,omitempty"` +} + +// DiagnosticSettingsResource the diagnostic setting resource. +type DiagnosticSettingsResource struct { + autorest.Response `json:"-"` + // DiagnosticSettings - Properties of a Diagnostic Settings Resource. + *DiagnosticSettings `json:"properties,omitempty"` + // ID - READ-ONLY; Azure resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Azure resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Azure resource type + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for DiagnosticSettingsResource. +func (dsr DiagnosticSettingsResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if dsr.DiagnosticSettings != nil { + objectMap["properties"] = dsr.DiagnosticSettings + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for DiagnosticSettingsResource struct. +func (dsr *DiagnosticSettingsResource) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var diagnosticSettings DiagnosticSettings + err = json.Unmarshal(*v, &diagnosticSettings) + if err != nil { + return err + } + dsr.DiagnosticSettings = &diagnosticSettings + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + dsr.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + dsr.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + dsr.Type = &typeVar + } + } + } + + return nil +} + +// DiagnosticSettingsResourceCollection represents a collection of alert rule resources. +type DiagnosticSettingsResourceCollection struct { + autorest.Response `json:"-"` + // Value - The collection of diagnostic settings resources;. + Value *[]DiagnosticSettingsResource `json:"value,omitempty"` +} + +// Dimension specifies the criteria for converting log to metric. +type Dimension struct { + // Name - Name of the dimension + Name *string `json:"name,omitempty"` + // Operator - Operator for dimension values + Operator *string `json:"operator,omitempty"` + // Values - List of dimension values + Values *[]string `json:"values,omitempty"` +} + +// DynamicMetricCriteria criterion for dynamic threshold. +type DynamicMetricCriteria struct { + // Operator - The operator used to compare the metric value against the threshold. + Operator interface{} `json:"operator,omitempty"` + // AlertSensitivity - The extent of deviation required to trigger an alert. This will affect how tight the threshold is to the metric series pattern. + AlertSensitivity interface{} `json:"alertSensitivity,omitempty"` + // FailingPeriods - The minimum number of violations required within the selected lookback time window required to raise an alert. + FailingPeriods *DynamicThresholdFailingPeriods `json:"failingPeriods,omitempty"` + // IgnoreDataBefore - Use this option to set the date from which to start learning the metric historical data and calculate the dynamic thresholds (in ISO8601 format) + IgnoreDataBefore *date.Time `json:"ignoreDataBefore,omitempty"` + // AdditionalProperties - Unmatched properties from the message are deserialized this collection + AdditionalProperties map[string]interface{} `json:""` + // Name - Name of the criteria. + Name *string `json:"name,omitempty"` + // MetricName - Name of the metric. + MetricName *string `json:"metricName,omitempty"` + // MetricNamespace - Namespace of the metric. + MetricNamespace *string `json:"metricNamespace,omitempty"` + // TimeAggregation - the criteria time aggregation types. + TimeAggregation interface{} `json:"timeAggregation,omitempty"` + // Dimensions - List of dimension conditions. + Dimensions *[]MetricDimension `json:"dimensions,omitempty"` + // CriterionType - Possible values include: 'CriterionTypeMultiMetricCriteria', 'CriterionTypeStaticThresholdCriterion', 'CriterionTypeDynamicThresholdCriterion' + CriterionType CriterionType `json:"criterionType,omitempty"` +} + +// MarshalJSON is the custom marshaler for DynamicMetricCriteria. +func (dmc DynamicMetricCriteria) MarshalJSON() ([]byte, error) { + dmc.CriterionType = CriterionTypeDynamicThresholdCriterion + objectMap := make(map[string]interface{}) + if dmc.Operator != nil { + objectMap["operator"] = dmc.Operator + } + if dmc.AlertSensitivity != nil { + objectMap["alertSensitivity"] = dmc.AlertSensitivity + } + if dmc.FailingPeriods != nil { + objectMap["failingPeriods"] = dmc.FailingPeriods + } + if dmc.IgnoreDataBefore != nil { + objectMap["ignoreDataBefore"] = dmc.IgnoreDataBefore + } + if dmc.Name != nil { + objectMap["name"] = dmc.Name + } + if dmc.MetricName != nil { + objectMap["metricName"] = dmc.MetricName + } + if dmc.MetricNamespace != nil { + objectMap["metricNamespace"] = dmc.MetricNamespace + } + if dmc.TimeAggregation != nil { + objectMap["timeAggregation"] = dmc.TimeAggregation + } + if dmc.Dimensions != nil { + objectMap["dimensions"] = dmc.Dimensions + } + if dmc.CriterionType != "" { + objectMap["criterionType"] = dmc.CriterionType + } + for k, v := range dmc.AdditionalProperties { + objectMap[k] = v + } + return json.Marshal(objectMap) +} + +// AsMetricCriteria is the BasicMultiMetricCriteria implementation for DynamicMetricCriteria. +func (dmc DynamicMetricCriteria) AsMetricCriteria() (*MetricCriteria, bool) { + return nil, false +} + +// AsDynamicMetricCriteria is the BasicMultiMetricCriteria implementation for DynamicMetricCriteria. +func (dmc DynamicMetricCriteria) AsDynamicMetricCriteria() (*DynamicMetricCriteria, bool) { + return &dmc, true +} + +// AsMultiMetricCriteria is the BasicMultiMetricCriteria implementation for DynamicMetricCriteria. +func (dmc DynamicMetricCriteria) AsMultiMetricCriteria() (*MultiMetricCriteria, bool) { + return nil, false +} + +// AsBasicMultiMetricCriteria is the BasicMultiMetricCriteria implementation for DynamicMetricCriteria. +func (dmc DynamicMetricCriteria) AsBasicMultiMetricCriteria() (BasicMultiMetricCriteria, bool) { + return &dmc, true +} + +// UnmarshalJSON is the custom unmarshaler for DynamicMetricCriteria struct. +func (dmc *DynamicMetricCriteria) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "operator": + if v != nil { + var operator interface{} + err = json.Unmarshal(*v, &operator) + if err != nil { + return err + } + dmc.Operator = operator + } + case "alertSensitivity": + if v != nil { + var alertSensitivity interface{} + err = json.Unmarshal(*v, &alertSensitivity) + if err != nil { + return err + } + dmc.AlertSensitivity = alertSensitivity + } + case "failingPeriods": + if v != nil { + var failingPeriods DynamicThresholdFailingPeriods + err = json.Unmarshal(*v, &failingPeriods) + if err != nil { + return err + } + dmc.FailingPeriods = &failingPeriods + } + case "ignoreDataBefore": + if v != nil { + var ignoreDataBefore date.Time + err = json.Unmarshal(*v, &ignoreDataBefore) + if err != nil { + return err + } + dmc.IgnoreDataBefore = &ignoreDataBefore + } + default: + if v != nil { + var additionalProperties interface{} + err = json.Unmarshal(*v, &additionalProperties) + if err != nil { + return err + } + if dmc.AdditionalProperties == nil { + dmc.AdditionalProperties = make(map[string]interface{}) + } + dmc.AdditionalProperties[k] = additionalProperties + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + dmc.Name = &name + } + case "metricName": + if v != nil { + var metricName string + err = json.Unmarshal(*v, &metricName) + if err != nil { + return err + } + dmc.MetricName = &metricName + } + case "metricNamespace": + if v != nil { + var metricNamespace string + err = json.Unmarshal(*v, &metricNamespace) + if err != nil { + return err + } + dmc.MetricNamespace = &metricNamespace + } + case "timeAggregation": + if v != nil { + var timeAggregation interface{} + err = json.Unmarshal(*v, &timeAggregation) + if err != nil { + return err + } + dmc.TimeAggregation = timeAggregation + } + case "dimensions": + if v != nil { + var dimensions []MetricDimension + err = json.Unmarshal(*v, &dimensions) + if err != nil { + return err + } + dmc.Dimensions = &dimensions + } + case "criterionType": + if v != nil { + var criterionType CriterionType + err = json.Unmarshal(*v, &criterionType) + if err != nil { + return err + } + dmc.CriterionType = criterionType + } + } + } + + return nil +} + +// DynamicThresholdFailingPeriods the minimum number of violations required within the selected lookback +// time window required to raise an alert. +type DynamicThresholdFailingPeriods struct { + // NumberOfEvaluationPeriods - The number of aggregated lookback points. The lookback time window is calculated based on the aggregation granularity (windowSize) and the selected number of aggregated points. + NumberOfEvaluationPeriods *float64 `json:"numberOfEvaluationPeriods,omitempty"` + // MinFailingPeriodsToAlert - The number of violations to trigger an alert. Should be smaller or equal to numberOfEvaluationPeriods. + MinFailingPeriodsToAlert *float64 `json:"minFailingPeriodsToAlert,omitempty"` +} + +// EmailNotification email notification of an autoscale event. +type EmailNotification struct { + // SendToSubscriptionAdministrator - a value indicating whether to send email to subscription administrator. + SendToSubscriptionAdministrator *bool `json:"sendToSubscriptionAdministrator,omitempty"` + // SendToSubscriptionCoAdministrators - a value indicating whether to send email to subscription co-administrators. + SendToSubscriptionCoAdministrators *bool `json:"sendToSubscriptionCoAdministrators,omitempty"` + // CustomEmails - the custom e-mails list. This value can be null or empty, in which case this attribute will be ignored. + CustomEmails *[]string `json:"customEmails,omitempty"` +} + +// EmailReceiver an email receiver. +type EmailReceiver struct { + // Name - The name of the email receiver. Names must be unique across all receivers within an action group. + Name *string `json:"name,omitempty"` + // EmailAddress - The email address of this receiver. + EmailAddress *string `json:"emailAddress,omitempty"` + // UseCommonAlertSchema - Indicates whether to use common alert schema. + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty"` + // Status - READ-ONLY; The receiver status of the e-mail. Possible values include: 'ReceiverStatusNotSpecified', 'ReceiverStatusEnabled', 'ReceiverStatusDisabled' + Status ReceiverStatus `json:"status,omitempty"` +} + +// EnableRequest describes a receiver that should be resubscribed. +type EnableRequest struct { + // ReceiverName - The name of the receiver to resubscribe. + ReceiverName *string `json:"receiverName,omitempty"` +} + +// Error error details. +type Error struct { + // Code - Error code identifying the specific error. + Code *string `json:"code,omitempty"` + // Message - Error message in the caller's locale. + Message *string `json:"message,omitempty"` +} + +// ErrorResponse describes the format of Error response. +type ErrorResponse struct { + // Code - Error code + Code *string `json:"code,omitempty"` + // Message - Error message indicating why the operation failed. + Message *string `json:"message,omitempty"` +} + +// EventCategoryCollection a collection of event categories. Currently possible values are: Administrative, +// Security, ServiceHealth, Alert, Recommendation, Policy. +type EventCategoryCollection struct { + autorest.Response `json:"-"` + // Value - the list that includes the Azure event categories. + Value *[]LocalizableString `json:"value,omitempty"` +} + +// EventData the Azure event log entries are of type EventData +type EventData struct { + // Authorization - READ-ONLY; The sender authorization information. + Authorization *SenderAuthorization `json:"authorization,omitempty"` + // Claims - READ-ONLY; key value pairs to identify ARM permissions. + Claims map[string]*string `json:"claims"` + // Caller - READ-ONLY; the email address of the user who has performed the operation, the UPN claim or SPN claim based on availability. + Caller *string `json:"caller,omitempty"` + // Description - READ-ONLY; the description of the event. + Description *string `json:"description,omitempty"` + // ID - READ-ONLY; the Id of this event as required by ARM for RBAC. It contains the EventDataID and a timestamp information. + ID *string `json:"id,omitempty"` + // EventDataID - READ-ONLY; the event data Id. This is a unique identifier for an event. + EventDataID *string `json:"eventDataId,omitempty"` + // CorrelationID - READ-ONLY; the correlation Id, usually a GUID in the string format. The correlation Id is shared among the events that belong to the same uber operation. + CorrelationID *string `json:"correlationId,omitempty"` + // EventName - READ-ONLY; the event name. This value should not be confused with OperationName. For practical purposes, OperationName might be more appealing to end users. + EventName *LocalizableString `json:"eventName,omitempty"` + // Category - READ-ONLY; the event category. + Category *LocalizableString `json:"category,omitempty"` + // HTTPRequest - READ-ONLY; the HTTP request info. Usually includes the 'clientRequestId', 'clientIpAddress' (IP address of the user who initiated the event) and 'method' (HTTP method e.g. PUT). + HTTPRequest *HTTPRequestInfo `json:"httpRequest,omitempty"` + // Level - READ-ONLY; the event level. Possible values include: 'EventLevelCritical', 'EventLevelError', 'EventLevelWarning', 'EventLevelInformational', 'EventLevelVerbose' + Level EventLevel `json:"level,omitempty"` + // ResourceGroupName - READ-ONLY; the resource group name of the impacted resource. + ResourceGroupName *string `json:"resourceGroupName,omitempty"` + // ResourceProviderName - READ-ONLY; the resource provider name of the impacted resource. + ResourceProviderName *LocalizableString `json:"resourceProviderName,omitempty"` + // ResourceID - READ-ONLY; the resource uri that uniquely identifies the resource that caused this event. + ResourceID *string `json:"resourceId,omitempty"` + // ResourceType - READ-ONLY; the resource type + ResourceType *LocalizableString `json:"resourceType,omitempty"` + // OperationID - READ-ONLY; It is usually a GUID shared among the events corresponding to single operation. This value should not be confused with EventName. + OperationID *string `json:"operationId,omitempty"` + // OperationName - READ-ONLY; the operation name. + OperationName *LocalizableString `json:"operationName,omitempty"` + // Properties - READ-ONLY; the set of pairs (usually a Dictionary) that includes details about the event. + Properties map[string]*string `json:"properties"` + // Status - READ-ONLY; a string describing the status of the operation. Some typical values are: Started, In progress, Succeeded, Failed, Resolved. + Status *LocalizableString `json:"status,omitempty"` + // SubStatus - READ-ONLY; the event sub status. Most of the time, when included, this captures the HTTP status code of the REST call. Common values are: OK (HTTP Status Code: 200), Created (HTTP Status Code: 201), Accepted (HTTP Status Code: 202), No Content (HTTP Status Code: 204), Bad Request(HTTP Status Code: 400), Not Found (HTTP Status Code: 404), Conflict (HTTP Status Code: 409), Internal Server Error (HTTP Status Code: 500), Service Unavailable (HTTP Status Code:503), Gateway Timeout (HTTP Status Code: 504) + SubStatus *LocalizableString `json:"subStatus,omitempty"` + // EventTimestamp - READ-ONLY; the timestamp of when the event was generated by the Azure service processing the request corresponding the event. It in ISO 8601 format. + EventTimestamp *date.Time `json:"eventTimestamp,omitempty"` + // SubmissionTimestamp - READ-ONLY; the timestamp of when the event became available for querying via this API. It is in ISO 8601 format. This value should not be confused eventTimestamp. As there might be a delay between the occurrence time of the event, and the time that the event is submitted to the Azure logging infrastructure. + SubmissionTimestamp *date.Time `json:"submissionTimestamp,omitempty"` + // SubscriptionID - READ-ONLY; the Azure subscription Id usually a GUID. + SubscriptionID *string `json:"subscriptionId,omitempty"` + // TenantID - READ-ONLY; the Azure tenant Id + TenantID *string `json:"tenantId,omitempty"` +} + +// MarshalJSON is the custom marshaler for EventData. +func (ed EventData) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + return json.Marshal(objectMap) +} + +// EventDataCollection represents collection of events. +type EventDataCollection struct { + autorest.Response `json:"-"` + // Value - this list that includes the Azure audit logs. + Value *[]EventData `json:"value,omitempty"` + // NextLink - Provides the link to retrieve the next set of events. + NextLink *string `json:"nextLink,omitempty"` +} + +// EventDataCollectionIterator provides access to a complete listing of EventData values. +type EventDataCollectionIterator struct { + i int + page EventDataCollectionPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *EventDataCollectionIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/EventDataCollectionIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *EventDataCollectionIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter EventDataCollectionIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter EventDataCollectionIterator) Response() EventDataCollection { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter EventDataCollectionIterator) Value() EventData { + if !iter.page.NotDone() { + return EventData{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the EventDataCollectionIterator type. +func NewEventDataCollectionIterator(page EventDataCollectionPage) EventDataCollectionIterator { + return EventDataCollectionIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (edc EventDataCollection) IsEmpty() bool { + return edc.Value == nil || len(*edc.Value) == 0 +} + +// eventDataCollectionPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (edc EventDataCollection) eventDataCollectionPreparer(ctx context.Context) (*http.Request, error) { + if edc.NextLink == nil || len(to.String(edc.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(edc.NextLink))) +} + +// EventDataCollectionPage contains a page of EventData values. +type EventDataCollectionPage struct { + fn func(context.Context, EventDataCollection) (EventDataCollection, error) + edc EventDataCollection +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *EventDataCollectionPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/EventDataCollectionPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.edc) + if err != nil { + return err + } + page.edc = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *EventDataCollectionPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page EventDataCollectionPage) NotDone() bool { + return !page.edc.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page EventDataCollectionPage) Response() EventDataCollection { + return page.edc +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page EventDataCollectionPage) Values() []EventData { + if page.edc.IsEmpty() { + return nil + } + return *page.edc.Value +} + +// Creates a new instance of the EventDataCollectionPage type. +func NewEventDataCollectionPage(getNextPage func(context.Context, EventDataCollection) (EventDataCollection, error)) EventDataCollectionPage { + return EventDataCollectionPage{fn: getNextPage} +} + +// HTTPRequestInfo the Http request info. +type HTTPRequestInfo struct { + // ClientRequestID - the client request id. + ClientRequestID *string `json:"clientRequestId,omitempty"` + // ClientIPAddress - the client Ip Address + ClientIPAddress *string `json:"clientIpAddress,omitempty"` + // Method - the Http request method. + Method *string `json:"method,omitempty"` + // URI - the Uri. + URI *string `json:"uri,omitempty"` +} + +// Incident an alert incident indicates the activation status of an alert rule. +type Incident struct { + autorest.Response `json:"-"` + // Name - READ-ONLY; Incident name. + Name *string `json:"name,omitempty"` + // RuleName - READ-ONLY; Rule name that is associated with the incident. + RuleName *string `json:"ruleName,omitempty"` + // IsActive - READ-ONLY; A boolean to indicate whether the incident is active or resolved. + IsActive *bool `json:"isActive,omitempty"` + // ActivatedTime - READ-ONLY; The time at which the incident was activated in ISO8601 format. + ActivatedTime *date.Time `json:"activatedTime,omitempty"` + // ResolvedTime - READ-ONLY; The time at which the incident was resolved in ISO8601 format. If null, it means the incident is still active. + ResolvedTime *date.Time `json:"resolvedTime,omitempty"` +} + +// IncidentListResult the List incidents operation response. +type IncidentListResult struct { + autorest.Response `json:"-"` + // Value - the incident collection. + Value *[]Incident `json:"value,omitempty"` +} + +// ItsmReceiver an Itsm receiver. +type ItsmReceiver struct { + // Name - The name of the Itsm receiver. Names must be unique across all receivers within an action group. + Name *string `json:"name,omitempty"` + // WorkspaceID - OMS LA instance identifier. + WorkspaceID *string `json:"workspaceId,omitempty"` + // ConnectionID - Unique identification of ITSM connection among multiple defined in above workspace. + ConnectionID *string `json:"connectionId,omitempty"` + // TicketConfiguration - JSON blob for the configurations of the ITSM action. CreateMultipleWorkItems option will be part of this blob as well. + TicketConfiguration *string `json:"ticketConfiguration,omitempty"` + // Region - Region in which workspace resides. Supported values:'centralindia','japaneast','southeastasia','australiasoutheast','uksouth','westcentralus','canadacentral','eastus','westeurope' + Region *string `json:"region,omitempty"` +} + +// LocalizableString the localizable string class. +type LocalizableString struct { + // Value - the invariant value. + Value *string `json:"value,omitempty"` + // LocalizedValue - the locale specific value. + LocalizedValue *string `json:"localizedValue,omitempty"` +} + +// LocationThresholdRuleCondition a rule condition based on a certain number of locations failing. +type LocationThresholdRuleCondition struct { + // WindowSize - the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day. + WindowSize *string `json:"windowSize,omitempty"` + // FailedLocationCount - the number of locations that must fail to activate the alert. + FailedLocationCount *int32 `json:"failedLocationCount,omitempty"` + // DataSource - the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource. + DataSource BasicRuleDataSource `json:"dataSource,omitempty"` + // OdataType - Possible values include: 'OdataTypeRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsThresholdRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsLocationThresholdRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsManagementEventRuleCondition' + OdataType OdataTypeBasicRuleCondition `json:"odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for LocationThresholdRuleCondition. +func (ltrc LocationThresholdRuleCondition) MarshalJSON() ([]byte, error) { + ltrc.OdataType = OdataTypeMicrosoftAzureManagementInsightsModelsLocationThresholdRuleCondition + objectMap := make(map[string]interface{}) + if ltrc.WindowSize != nil { + objectMap["windowSize"] = ltrc.WindowSize + } + if ltrc.FailedLocationCount != nil { + objectMap["failedLocationCount"] = ltrc.FailedLocationCount + } + objectMap["dataSource"] = ltrc.DataSource + if ltrc.OdataType != "" { + objectMap["odata.type"] = ltrc.OdataType + } + return json.Marshal(objectMap) +} + +// AsThresholdRuleCondition is the BasicRuleCondition implementation for LocationThresholdRuleCondition. +func (ltrc LocationThresholdRuleCondition) AsThresholdRuleCondition() (*ThresholdRuleCondition, bool) { + return nil, false +} + +// AsLocationThresholdRuleCondition is the BasicRuleCondition implementation for LocationThresholdRuleCondition. +func (ltrc LocationThresholdRuleCondition) AsLocationThresholdRuleCondition() (*LocationThresholdRuleCondition, bool) { + return <rc, true +} + +// AsManagementEventRuleCondition is the BasicRuleCondition implementation for LocationThresholdRuleCondition. +func (ltrc LocationThresholdRuleCondition) AsManagementEventRuleCondition() (*ManagementEventRuleCondition, bool) { + return nil, false +} + +// AsRuleCondition is the BasicRuleCondition implementation for LocationThresholdRuleCondition. +func (ltrc LocationThresholdRuleCondition) AsRuleCondition() (*RuleCondition, bool) { + return nil, false +} + +// AsBasicRuleCondition is the BasicRuleCondition implementation for LocationThresholdRuleCondition. +func (ltrc LocationThresholdRuleCondition) AsBasicRuleCondition() (BasicRuleCondition, bool) { + return <rc, true +} + +// UnmarshalJSON is the custom unmarshaler for LocationThresholdRuleCondition struct. +func (ltrc *LocationThresholdRuleCondition) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "windowSize": + if v != nil { + var windowSize string + err = json.Unmarshal(*v, &windowSize) + if err != nil { + return err + } + ltrc.WindowSize = &windowSize + } + case "failedLocationCount": + if v != nil { + var failedLocationCount int32 + err = json.Unmarshal(*v, &failedLocationCount) + if err != nil { + return err + } + ltrc.FailedLocationCount = &failedLocationCount + } + case "dataSource": + if v != nil { + dataSource, err := unmarshalBasicRuleDataSource(*v) + if err != nil { + return err + } + ltrc.DataSource = dataSource + } + case "odata.type": + if v != nil { + var odataType OdataTypeBasicRuleCondition + err = json.Unmarshal(*v, &odataType) + if err != nil { + return err + } + ltrc.OdataType = odataType + } + } + } + + return nil +} + +// LogicAppReceiver a logic app receiver. +type LogicAppReceiver struct { + // Name - The name of the logic app receiver. Names must be unique across all receivers within an action group. + Name *string `json:"name,omitempty"` + // ResourceID - The azure resource id of the logic app receiver. + ResourceID *string `json:"resourceId,omitempty"` + // CallbackURL - The callback url where http request sent to. + CallbackURL *string `json:"callbackUrl,omitempty"` + // UseCommonAlertSchema - Indicates whether to use common alert schema. + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty"` +} + +// LogMetricTrigger a log metrics trigger descriptor. +type LogMetricTrigger struct { + // ThresholdOperator - Evaluation operation for Metric -'GreaterThan' or 'LessThan' or 'Equal'. Possible values include: 'ConditionalOperatorGreaterThan', 'ConditionalOperatorLessThan', 'ConditionalOperatorEqual' + ThresholdOperator ConditionalOperator `json:"thresholdOperator,omitempty"` + // Threshold - The threshold of the metric trigger. + Threshold *float64 `json:"threshold,omitempty"` + // MetricTriggerType - Metric Trigger Type - 'Consecutive' or 'Total'. Possible values include: 'MetricTriggerTypeConsecutive', 'MetricTriggerTypeTotal' + MetricTriggerType MetricTriggerType `json:"metricTriggerType,omitempty"` + // MetricColumn - Evaluation of metric on a particular column + MetricColumn *string `json:"metricColumn,omitempty"` +} + +// LogProfileCollection represents a collection of log profiles. +type LogProfileCollection struct { + autorest.Response `json:"-"` + // Value - the values of the log profiles. + Value *[]LogProfileResource `json:"value,omitempty"` +} + +// LogProfileProperties the log profile properties. +type LogProfileProperties struct { + // StorageAccountID - the resource id of the storage account to which you would like to send the Activity Log. + StorageAccountID *string `json:"storageAccountId,omitempty"` + // ServiceBusRuleID - The service bus rule ID of the service bus namespace in which you would like to have Event Hubs created for streaming the Activity Log. The rule ID is of the format: '{service bus resource ID}/authorizationrules/{key name}'. + ServiceBusRuleID *string `json:"serviceBusRuleId,omitempty"` + // Locations - List of regions for which Activity Log events should be stored or streamed. It is a comma separated list of valid ARM locations including the 'global' location. + Locations *[]string `json:"locations,omitempty"` + // Categories - the categories of the logs. These categories are created as is convenient to the user. Some values are: 'Write', 'Delete', and/or 'Action.' + Categories *[]string `json:"categories,omitempty"` + // RetentionPolicy - the retention policy for the events in the log. + RetentionPolicy *RetentionPolicy `json:"retentionPolicy,omitempty"` +} + +// LogProfileResource the log profile resource. +type LogProfileResource struct { + autorest.Response `json:"-"` + // LogProfileProperties - The log profile properties of the resource. + *LogProfileProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Azure resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Azure resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Azure resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for LogProfileResource. +func (lpr LogProfileResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if lpr.LogProfileProperties != nil { + objectMap["properties"] = lpr.LogProfileProperties + } + if lpr.Location != nil { + objectMap["location"] = lpr.Location + } + if lpr.Tags != nil { + objectMap["tags"] = lpr.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for LogProfileResource struct. +func (lpr *LogProfileResource) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var logProfileProperties LogProfileProperties + err = json.Unmarshal(*v, &logProfileProperties) + if err != nil { + return err + } + lpr.LogProfileProperties = &logProfileProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + lpr.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + lpr.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + lpr.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + lpr.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + lpr.Tags = tags + } + } + } + + return nil +} + +// LogProfileResourcePatch the log profile resource for patch operations. +type LogProfileResourcePatch struct { + // Tags - Resource tags + Tags map[string]*string `json:"tags"` + // LogProfileProperties - The log profile properties for an update operation. + *LogProfileProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for LogProfileResourcePatch. +func (lprp LogProfileResourcePatch) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if lprp.Tags != nil { + objectMap["tags"] = lprp.Tags + } + if lprp.LogProfileProperties != nil { + objectMap["properties"] = lprp.LogProfileProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for LogProfileResourcePatch struct. +func (lprp *LogProfileResourcePatch) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + lprp.Tags = tags + } + case "properties": + if v != nil { + var logProfileProperties LogProfileProperties + err = json.Unmarshal(*v, &logProfileProperties) + if err != nil { + return err + } + lprp.LogProfileProperties = &logProfileProperties + } + } + } + + return nil +} + +// LogSearchRule log Search Rule Definition +type LogSearchRule struct { + // Description - The description of the Log Search rule. + Description *string `json:"description,omitempty"` + // Enabled - The flag which indicates whether the Log Search rule is enabled. Value should be true or false. Possible values include: 'True', 'False' + Enabled Enabled `json:"enabled,omitempty"` + // LastUpdatedTime - READ-ONLY; Last time the rule was updated in IS08601 format. + LastUpdatedTime *date.Time `json:"lastUpdatedTime,omitempty"` + // ProvisioningState - READ-ONLY; Provisioning state of the scheduled query rule. Possible values include: 'Succeeded', 'Deploying', 'Canceled', 'Failed' + ProvisioningState ProvisioningState `json:"provisioningState,omitempty"` + // Source - Data Source against which rule will Query Data + Source *Source `json:"source,omitempty"` + // Schedule - Schedule (Frequency, Time Window) for rule. Required for action type - AlertingAction + Schedule *Schedule `json:"schedule,omitempty"` + // Action - Action needs to be taken on rule execution. + Action BasicAction `json:"action,omitempty"` +} + +// UnmarshalJSON is the custom unmarshaler for LogSearchRule struct. +func (lsr *LogSearchRule) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "description": + if v != nil { + var description string + err = json.Unmarshal(*v, &description) + if err != nil { + return err + } + lsr.Description = &description + } + case "enabled": + if v != nil { + var enabled Enabled + err = json.Unmarshal(*v, &enabled) + if err != nil { + return err + } + lsr.Enabled = enabled + } + case "lastUpdatedTime": + if v != nil { + var lastUpdatedTime date.Time + err = json.Unmarshal(*v, &lastUpdatedTime) + if err != nil { + return err + } + lsr.LastUpdatedTime = &lastUpdatedTime + } + case "provisioningState": + if v != nil { + var provisioningState ProvisioningState + err = json.Unmarshal(*v, &provisioningState) + if err != nil { + return err + } + lsr.ProvisioningState = provisioningState + } + case "source": + if v != nil { + var source Source + err = json.Unmarshal(*v, &source) + if err != nil { + return err + } + lsr.Source = &source + } + case "schedule": + if v != nil { + var schedule Schedule + err = json.Unmarshal(*v, &schedule) + if err != nil { + return err + } + lsr.Schedule = &schedule + } + case "action": + if v != nil { + action, err := unmarshalBasicAction(*v) + if err != nil { + return err + } + lsr.Action = action + } + } + } + + return nil +} + +// LogSearchRulePatch log Search Rule Definition for Patching +type LogSearchRulePatch struct { + // Enabled - The flag which indicates whether the Log Search rule is enabled. Value should be true or false. Possible values include: 'True', 'False' + Enabled Enabled `json:"enabled,omitempty"` +} + +// LogSearchRuleResource the Log Search Rule resource. +type LogSearchRuleResource struct { + autorest.Response `json:"-"` + // LogSearchRule - The rule properties of the resource. + *LogSearchRule `json:"properties,omitempty"` + // ID - READ-ONLY; Azure resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Azure resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Azure resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for LogSearchRuleResource. +func (lsrr LogSearchRuleResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if lsrr.LogSearchRule != nil { + objectMap["properties"] = lsrr.LogSearchRule + } + if lsrr.Location != nil { + objectMap["location"] = lsrr.Location + } + if lsrr.Tags != nil { + objectMap["tags"] = lsrr.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for LogSearchRuleResource struct. +func (lsrr *LogSearchRuleResource) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var logSearchRule LogSearchRule + err = json.Unmarshal(*v, &logSearchRule) + if err != nil { + return err + } + lsrr.LogSearchRule = &logSearchRule + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + lsrr.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + lsrr.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + lsrr.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + lsrr.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + lsrr.Tags = tags + } + } + } + + return nil +} + +// LogSearchRuleResourceCollection represents a collection of Log Search rule resources. +type LogSearchRuleResourceCollection struct { + autorest.Response `json:"-"` + // Value - The values for the Log Search Rule resources. + Value *[]LogSearchRuleResource `json:"value,omitempty"` +} + +// LogSearchRuleResourcePatch the log search rule resource for patch operations. +type LogSearchRuleResourcePatch struct { + // Tags - Resource tags + Tags map[string]*string `json:"tags"` + // LogSearchRulePatch - The log search rule properties of the resource. + *LogSearchRulePatch `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for LogSearchRuleResourcePatch. +func (lsrrp LogSearchRuleResourcePatch) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if lsrrp.Tags != nil { + objectMap["tags"] = lsrrp.Tags + } + if lsrrp.LogSearchRulePatch != nil { + objectMap["properties"] = lsrrp.LogSearchRulePatch + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for LogSearchRuleResourcePatch struct. +func (lsrrp *LogSearchRuleResourcePatch) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + lsrrp.Tags = tags + } + case "properties": + if v != nil { + var logSearchRulePatch LogSearchRulePatch + err = json.Unmarshal(*v, &logSearchRulePatch) + if err != nil { + return err + } + lsrrp.LogSearchRulePatch = &logSearchRulePatch + } + } + } + + return nil +} + +// LogSettings part of MultiTenantDiagnosticSettings. Specifies the settings for a particular log. +type LogSettings struct { + // Category - Name of a Diagnostic Log category for a resource type this setting is applied to. To obtain the list of Diagnostic Log categories for a resource, first perform a GET diagnostic settings operation. + Category *string `json:"category,omitempty"` + // Enabled - a value indicating whether this log is enabled. + Enabled *bool `json:"enabled,omitempty"` + // RetentionPolicy - the retention policy for this log. + RetentionPolicy *RetentionPolicy `json:"retentionPolicy,omitempty"` +} + +// LogToMetricAction specify action need to be taken when rule type is converting log to metric +type LogToMetricAction struct { + // Criteria - Criteria of Metric + Criteria *[]Criteria `json:"criteria,omitempty"` + // OdataType - Possible values include: 'OdataTypeAction', 'OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction', 'OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesLogToMetricAction' + OdataType OdataTypeBasicAction `json:"odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for LogToMetricAction. +func (ltma LogToMetricAction) MarshalJSON() ([]byte, error) { + ltma.OdataType = OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesLogToMetricAction + objectMap := make(map[string]interface{}) + if ltma.Criteria != nil { + objectMap["criteria"] = ltma.Criteria + } + if ltma.OdataType != "" { + objectMap["odata.type"] = ltma.OdataType + } + return json.Marshal(objectMap) +} + +// AsAlertingAction is the BasicAction implementation for LogToMetricAction. +func (ltma LogToMetricAction) AsAlertingAction() (*AlertingAction, bool) { + return nil, false +} + +// AsLogToMetricAction is the BasicAction implementation for LogToMetricAction. +func (ltma LogToMetricAction) AsLogToMetricAction() (*LogToMetricAction, bool) { + return <ma, true +} + +// AsAction is the BasicAction implementation for LogToMetricAction. +func (ltma LogToMetricAction) AsAction() (*Action, bool) { + return nil, false +} + +// AsBasicAction is the BasicAction implementation for LogToMetricAction. +func (ltma LogToMetricAction) AsBasicAction() (BasicAction, bool) { + return <ma, true +} + +// ManagementEventAggregationCondition how the data that is collected should be combined over time. +type ManagementEventAggregationCondition struct { + // Operator - the condition operator. Possible values include: 'ConditionOperatorGreaterThan', 'ConditionOperatorGreaterThanOrEqual', 'ConditionOperatorLessThan', 'ConditionOperatorLessThanOrEqual' + Operator ConditionOperator `json:"operator,omitempty"` + // Threshold - The threshold value that activates the alert. + Threshold *float64 `json:"threshold,omitempty"` + // WindowSize - the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day. + WindowSize *string `json:"windowSize,omitempty"` +} + +// ManagementEventRuleCondition a management event rule condition. +type ManagementEventRuleCondition struct { + // Aggregation - How the data that is collected should be combined over time and when the alert is activated. Note that for management event alerts aggregation is optional – if it is not provided then any event will cause the alert to activate. + Aggregation *ManagementEventAggregationCondition `json:"aggregation,omitempty"` + // DataSource - the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource. + DataSource BasicRuleDataSource `json:"dataSource,omitempty"` + // OdataType - Possible values include: 'OdataTypeRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsThresholdRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsLocationThresholdRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsManagementEventRuleCondition' + OdataType OdataTypeBasicRuleCondition `json:"odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ManagementEventRuleCondition. +func (merc ManagementEventRuleCondition) MarshalJSON() ([]byte, error) { + merc.OdataType = OdataTypeMicrosoftAzureManagementInsightsModelsManagementEventRuleCondition + objectMap := make(map[string]interface{}) + if merc.Aggregation != nil { + objectMap["aggregation"] = merc.Aggregation + } + objectMap["dataSource"] = merc.DataSource + if merc.OdataType != "" { + objectMap["odata.type"] = merc.OdataType + } + return json.Marshal(objectMap) +} + +// AsThresholdRuleCondition is the BasicRuleCondition implementation for ManagementEventRuleCondition. +func (merc ManagementEventRuleCondition) AsThresholdRuleCondition() (*ThresholdRuleCondition, bool) { + return nil, false +} + +// AsLocationThresholdRuleCondition is the BasicRuleCondition implementation for ManagementEventRuleCondition. +func (merc ManagementEventRuleCondition) AsLocationThresholdRuleCondition() (*LocationThresholdRuleCondition, bool) { + return nil, false +} + +// AsManagementEventRuleCondition is the BasicRuleCondition implementation for ManagementEventRuleCondition. +func (merc ManagementEventRuleCondition) AsManagementEventRuleCondition() (*ManagementEventRuleCondition, bool) { + return &merc, true +} + +// AsRuleCondition is the BasicRuleCondition implementation for ManagementEventRuleCondition. +func (merc ManagementEventRuleCondition) AsRuleCondition() (*RuleCondition, bool) { + return nil, false +} + +// AsBasicRuleCondition is the BasicRuleCondition implementation for ManagementEventRuleCondition. +func (merc ManagementEventRuleCondition) AsBasicRuleCondition() (BasicRuleCondition, bool) { + return &merc, true +} + +// UnmarshalJSON is the custom unmarshaler for ManagementEventRuleCondition struct. +func (merc *ManagementEventRuleCondition) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "aggregation": + if v != nil { + var aggregation ManagementEventAggregationCondition + err = json.Unmarshal(*v, &aggregation) + if err != nil { + return err + } + merc.Aggregation = &aggregation + } + case "dataSource": + if v != nil { + dataSource, err := unmarshalBasicRuleDataSource(*v) + if err != nil { + return err + } + merc.DataSource = dataSource + } + case "odata.type": + if v != nil { + var odataType OdataTypeBasicRuleCondition + err = json.Unmarshal(*v, &odataType) + if err != nil { + return err + } + merc.OdataType = odataType + } + } + } + + return nil +} + +// MetadataValue represents a metric metadata value. +type MetadataValue struct { + // Name - the name of the metadata. + Name *LocalizableString `json:"name,omitempty"` + // Value - the value of the metadata. + Value *string `json:"value,omitempty"` +} + +// Metric the result data of a query. +type Metric struct { + // ID - the metric Id. + ID *string `json:"id,omitempty"` + // Type - the resource type of the metric resource. + Type *string `json:"type,omitempty"` + // Name - the name and the display name of the metric, i.e. it is localizable string. + Name *LocalizableString `json:"name,omitempty"` + // Unit - the unit of the metric. Possible values include: 'UnitCount', 'UnitBytes', 'UnitSeconds', 'UnitCountPerSecond', 'UnitBytesPerSecond', 'UnitPercent', 'UnitMilliSeconds', 'UnitByteSeconds', 'UnitUnspecified', 'UnitCores', 'UnitMilliCores', 'UnitNanoCores', 'UnitBitsPerSecond' + Unit Unit `json:"unit,omitempty"` + // Timeseries - the time series returned when a data query is performed. + Timeseries *[]TimeSeriesElement `json:"timeseries,omitempty"` +} + +// MetricAlertAction an alert action. +type MetricAlertAction struct { + // ActionGroupID - the id of the action group to use. + ActionGroupID *string `json:"actionGroupId,omitempty"` + // WebhookProperties - The properties of a webhook object. + WebhookProperties map[string]*string `json:"webhookProperties"` +} + +// MarshalJSON is the custom marshaler for MetricAlertAction. +func (maa MetricAlertAction) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if maa.ActionGroupID != nil { + objectMap["actionGroupId"] = maa.ActionGroupID + } + if maa.WebhookProperties != nil { + objectMap["webhookProperties"] = maa.WebhookProperties + } + return json.Marshal(objectMap) +} + +// BasicMetricAlertCriteria the rule criteria that defines the conditions of the alert rule. +type BasicMetricAlertCriteria interface { + AsMetricAlertSingleResourceMultipleMetricCriteria() (*MetricAlertSingleResourceMultipleMetricCriteria, bool) + AsMetricAlertMultipleResourceMultipleMetricCriteria() (*MetricAlertMultipleResourceMultipleMetricCriteria, bool) + AsMetricAlertCriteria() (*MetricAlertCriteria, bool) +} + +// MetricAlertCriteria the rule criteria that defines the conditions of the alert rule. +type MetricAlertCriteria struct { + // AdditionalProperties - Unmatched properties from the message are deserialized this collection + AdditionalProperties map[string]interface{} `json:""` + // OdataType - Possible values include: 'OdataTypeMetricAlertCriteria', 'OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria', 'OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria' + OdataType OdataTypeBasicMetricAlertCriteria `json:"odata.type,omitempty"` +} + +func unmarshalBasicMetricAlertCriteria(body []byte) (BasicMetricAlertCriteria, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["odata.type"] { + case string(OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria): + var masrmmc MetricAlertSingleResourceMultipleMetricCriteria + err := json.Unmarshal(body, &masrmmc) + return masrmmc, err + case string(OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria): + var mamrmmc MetricAlertMultipleResourceMultipleMetricCriteria + err := json.Unmarshal(body, &mamrmmc) + return mamrmmc, err + default: + var mac MetricAlertCriteria + err := json.Unmarshal(body, &mac) + return mac, err + } +} +func unmarshalBasicMetricAlertCriteriaArray(body []byte) ([]BasicMetricAlertCriteria, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + macArray := make([]BasicMetricAlertCriteria, len(rawMessages)) + + for index, rawMessage := range rawMessages { + mac, err := unmarshalBasicMetricAlertCriteria(*rawMessage) + if err != nil { + return nil, err + } + macArray[index] = mac + } + return macArray, nil +} + +// MarshalJSON is the custom marshaler for MetricAlertCriteria. +func (mac MetricAlertCriteria) MarshalJSON() ([]byte, error) { + mac.OdataType = OdataTypeMetricAlertCriteria + objectMap := make(map[string]interface{}) + if mac.OdataType != "" { + objectMap["odata.type"] = mac.OdataType + } + for k, v := range mac.AdditionalProperties { + objectMap[k] = v + } + return json.Marshal(objectMap) +} + +// AsMetricAlertSingleResourceMultipleMetricCriteria is the BasicMetricAlertCriteria implementation for MetricAlertCriteria. +func (mac MetricAlertCriteria) AsMetricAlertSingleResourceMultipleMetricCriteria() (*MetricAlertSingleResourceMultipleMetricCriteria, bool) { + return nil, false +} + +// AsMetricAlertMultipleResourceMultipleMetricCriteria is the BasicMetricAlertCriteria implementation for MetricAlertCriteria. +func (mac MetricAlertCriteria) AsMetricAlertMultipleResourceMultipleMetricCriteria() (*MetricAlertMultipleResourceMultipleMetricCriteria, bool) { + return nil, false +} + +// AsMetricAlertCriteria is the BasicMetricAlertCriteria implementation for MetricAlertCriteria. +func (mac MetricAlertCriteria) AsMetricAlertCriteria() (*MetricAlertCriteria, bool) { + return &mac, true +} + +// AsBasicMetricAlertCriteria is the BasicMetricAlertCriteria implementation for MetricAlertCriteria. +func (mac MetricAlertCriteria) AsBasicMetricAlertCriteria() (BasicMetricAlertCriteria, bool) { + return &mac, true +} + +// UnmarshalJSON is the custom unmarshaler for MetricAlertCriteria struct. +func (mac *MetricAlertCriteria) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + default: + if v != nil { + var additionalProperties interface{} + err = json.Unmarshal(*v, &additionalProperties) + if err != nil { + return err + } + if mac.AdditionalProperties == nil { + mac.AdditionalProperties = make(map[string]interface{}) + } + mac.AdditionalProperties[k] = additionalProperties + } + case "odata.type": + if v != nil { + var odataType OdataTypeBasicMetricAlertCriteria + err = json.Unmarshal(*v, &odataType) + if err != nil { + return err + } + mac.OdataType = odataType + } + } + } + + return nil +} + +// MetricAlertMultipleResourceMultipleMetricCriteria specifies the metric alert criteria for multiple +// resource that has multiple metric criteria. +type MetricAlertMultipleResourceMultipleMetricCriteria struct { + // AllOf - the list of multiple metric criteria for this 'all of' operation. + AllOf *[]BasicMultiMetricCriteria `json:"allOf,omitempty"` + // AdditionalProperties - Unmatched properties from the message are deserialized this collection + AdditionalProperties map[string]interface{} `json:""` + // OdataType - Possible values include: 'OdataTypeMetricAlertCriteria', 'OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria', 'OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria' + OdataType OdataTypeBasicMetricAlertCriteria `json:"odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for MetricAlertMultipleResourceMultipleMetricCriteria. +func (mamrmmc MetricAlertMultipleResourceMultipleMetricCriteria) MarshalJSON() ([]byte, error) { + mamrmmc.OdataType = OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria + objectMap := make(map[string]interface{}) + if mamrmmc.AllOf != nil { + objectMap["allOf"] = mamrmmc.AllOf + } + if mamrmmc.OdataType != "" { + objectMap["odata.type"] = mamrmmc.OdataType + } + for k, v := range mamrmmc.AdditionalProperties { + objectMap[k] = v + } + return json.Marshal(objectMap) +} + +// AsMetricAlertSingleResourceMultipleMetricCriteria is the BasicMetricAlertCriteria implementation for MetricAlertMultipleResourceMultipleMetricCriteria. +func (mamrmmc MetricAlertMultipleResourceMultipleMetricCriteria) AsMetricAlertSingleResourceMultipleMetricCriteria() (*MetricAlertSingleResourceMultipleMetricCriteria, bool) { + return nil, false +} + +// AsMetricAlertMultipleResourceMultipleMetricCriteria is the BasicMetricAlertCriteria implementation for MetricAlertMultipleResourceMultipleMetricCriteria. +func (mamrmmc MetricAlertMultipleResourceMultipleMetricCriteria) AsMetricAlertMultipleResourceMultipleMetricCriteria() (*MetricAlertMultipleResourceMultipleMetricCriteria, bool) { + return &mamrmmc, true +} + +// AsMetricAlertCriteria is the BasicMetricAlertCriteria implementation for MetricAlertMultipleResourceMultipleMetricCriteria. +func (mamrmmc MetricAlertMultipleResourceMultipleMetricCriteria) AsMetricAlertCriteria() (*MetricAlertCriteria, bool) { + return nil, false +} + +// AsBasicMetricAlertCriteria is the BasicMetricAlertCriteria implementation for MetricAlertMultipleResourceMultipleMetricCriteria. +func (mamrmmc MetricAlertMultipleResourceMultipleMetricCriteria) AsBasicMetricAlertCriteria() (BasicMetricAlertCriteria, bool) { + return &mamrmmc, true +} + +// UnmarshalJSON is the custom unmarshaler for MetricAlertMultipleResourceMultipleMetricCriteria struct. +func (mamrmmc *MetricAlertMultipleResourceMultipleMetricCriteria) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "allOf": + if v != nil { + allOf, err := unmarshalBasicMultiMetricCriteriaArray(*v) + if err != nil { + return err + } + mamrmmc.AllOf = &allOf + } + default: + if v != nil { + var additionalProperties interface{} + err = json.Unmarshal(*v, &additionalProperties) + if err != nil { + return err + } + if mamrmmc.AdditionalProperties == nil { + mamrmmc.AdditionalProperties = make(map[string]interface{}) + } + mamrmmc.AdditionalProperties[k] = additionalProperties + } + case "odata.type": + if v != nil { + var odataType OdataTypeBasicMetricAlertCriteria + err = json.Unmarshal(*v, &odataType) + if err != nil { + return err + } + mamrmmc.OdataType = odataType + } + } + } + + return nil +} + +// MetricAlertProperties an alert rule. +type MetricAlertProperties struct { + // Description - the description of the metric alert that will be included in the alert email. + Description *string `json:"description,omitempty"` + // Severity - Alert severity {0, 1, 2, 3, 4} + Severity *int32 `json:"severity,omitempty"` + // Enabled - the flag that indicates whether the metric alert is enabled. + Enabled *bool `json:"enabled,omitempty"` + // Scopes - the list of resource id's that this metric alert is scoped to. + Scopes *[]string `json:"scopes,omitempty"` + // EvaluationFrequency - how often the metric alert is evaluated represented in ISO 8601 duration format. + EvaluationFrequency *string `json:"evaluationFrequency,omitempty"` + // WindowSize - the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. + WindowSize *string `json:"windowSize,omitempty"` + // TargetResourceType - the resource type of the target resource(s) on which the alert is created/updated. Mandatory for MultipleResourceMultipleMetricCriteria. + TargetResourceType *string `json:"targetResourceType,omitempty"` + // TargetResourceRegion - the region of the target resource(s) on which the alert is created/updated. Mandatory for MultipleResourceMultipleMetricCriteria. + TargetResourceRegion *string `json:"targetResourceRegion,omitempty"` + // Criteria - defines the specific alert criteria information. + Criteria BasicMetricAlertCriteria `json:"criteria,omitempty"` + // AutoMitigate - the flag that indicates whether the alert should be auto resolved or not. + AutoMitigate *bool `json:"autoMitigate,omitempty"` + // Actions - the array of actions that are performed when the alert rule becomes active, and when an alert condition is resolved. + Actions *[]MetricAlertAction `json:"actions,omitempty"` + // LastUpdatedTime - READ-ONLY; Last time the rule was updated in ISO8601 format. + LastUpdatedTime *date.Time `json:"lastUpdatedTime,omitempty"` +} + +// UnmarshalJSON is the custom unmarshaler for MetricAlertProperties struct. +func (mapVar *MetricAlertProperties) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "description": + if v != nil { + var description string + err = json.Unmarshal(*v, &description) + if err != nil { + return err + } + mapVar.Description = &description + } + case "severity": + if v != nil { + var severity int32 + err = json.Unmarshal(*v, &severity) + if err != nil { + return err + } + mapVar.Severity = &severity + } + case "enabled": + if v != nil { + var enabled bool + err = json.Unmarshal(*v, &enabled) + if err != nil { + return err + } + mapVar.Enabled = &enabled + } + case "scopes": + if v != nil { + var scopes []string + err = json.Unmarshal(*v, &scopes) + if err != nil { + return err + } + mapVar.Scopes = &scopes + } + case "evaluationFrequency": + if v != nil { + var evaluationFrequency string + err = json.Unmarshal(*v, &evaluationFrequency) + if err != nil { + return err + } + mapVar.EvaluationFrequency = &evaluationFrequency + } + case "windowSize": + if v != nil { + var windowSize string + err = json.Unmarshal(*v, &windowSize) + if err != nil { + return err + } + mapVar.WindowSize = &windowSize + } + case "targetResourceType": + if v != nil { + var targetResourceType string + err = json.Unmarshal(*v, &targetResourceType) + if err != nil { + return err + } + mapVar.TargetResourceType = &targetResourceType + } + case "targetResourceRegion": + if v != nil { + var targetResourceRegion string + err = json.Unmarshal(*v, &targetResourceRegion) + if err != nil { + return err + } + mapVar.TargetResourceRegion = &targetResourceRegion + } + case "criteria": + if v != nil { + criteria, err := unmarshalBasicMetricAlertCriteria(*v) + if err != nil { + return err + } + mapVar.Criteria = criteria + } + case "autoMitigate": + if v != nil { + var autoMitigate bool + err = json.Unmarshal(*v, &autoMitigate) + if err != nil { + return err + } + mapVar.AutoMitigate = &autoMitigate + } + case "actions": + if v != nil { + var actions []MetricAlertAction + err = json.Unmarshal(*v, &actions) + if err != nil { + return err + } + mapVar.Actions = &actions + } + case "lastUpdatedTime": + if v != nil { + var lastUpdatedTime date.Time + err = json.Unmarshal(*v, &lastUpdatedTime) + if err != nil { + return err + } + mapVar.LastUpdatedTime = &lastUpdatedTime + } + } + } + + return nil +} + +// MetricAlertResource the metric alert resource. +type MetricAlertResource struct { + autorest.Response `json:"-"` + // MetricAlertProperties - The alert rule properties of the resource. + *MetricAlertProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Azure resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Azure resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Azure resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for MetricAlertResource. +func (mar MetricAlertResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if mar.MetricAlertProperties != nil { + objectMap["properties"] = mar.MetricAlertProperties + } + if mar.Location != nil { + objectMap["location"] = mar.Location + } + if mar.Tags != nil { + objectMap["tags"] = mar.Tags + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for MetricAlertResource struct. +func (mar *MetricAlertResource) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var metricAlertProperties MetricAlertProperties + err = json.Unmarshal(*v, &metricAlertProperties) + if err != nil { + return err + } + mar.MetricAlertProperties = &metricAlertProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + mar.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + mar.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + mar.Type = &typeVar + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + mar.Location = &location + } + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + mar.Tags = tags + } + } + } + + return nil +} + +// MetricAlertResourceCollection represents a collection of alert rule resources. +type MetricAlertResourceCollection struct { + autorest.Response `json:"-"` + // Value - the values for the alert rule resources. + Value *[]MetricAlertResource `json:"value,omitempty"` +} + +// MetricAlertResourcePatch the metric alert resource for patch operations. +type MetricAlertResourcePatch struct { + // Tags - Resource tags + Tags map[string]*string `json:"tags"` + // MetricAlertProperties - The alert rule properties of the resource. + *MetricAlertProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for MetricAlertResourcePatch. +func (marp MetricAlertResourcePatch) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if marp.Tags != nil { + objectMap["tags"] = marp.Tags + } + if marp.MetricAlertProperties != nil { + objectMap["properties"] = marp.MetricAlertProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for MetricAlertResourcePatch struct. +func (marp *MetricAlertResourcePatch) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "tags": + if v != nil { + var tags map[string]*string + err = json.Unmarshal(*v, &tags) + if err != nil { + return err + } + marp.Tags = tags + } + case "properties": + if v != nil { + var metricAlertProperties MetricAlertProperties + err = json.Unmarshal(*v, &metricAlertProperties) + if err != nil { + return err + } + marp.MetricAlertProperties = &metricAlertProperties + } + } + } + + return nil +} + +// MetricAlertSingleResourceMultipleMetricCriteria specifies the metric alert criteria for a single +// resource that has multiple metric criteria. +type MetricAlertSingleResourceMultipleMetricCriteria struct { + // AllOf - The list of metric criteria for this 'all of' operation. + AllOf *[]MetricCriteria `json:"allOf,omitempty"` + // AdditionalProperties - Unmatched properties from the message are deserialized this collection + AdditionalProperties map[string]interface{} `json:""` + // OdataType - Possible values include: 'OdataTypeMetricAlertCriteria', 'OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria', 'OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria' + OdataType OdataTypeBasicMetricAlertCriteria `json:"odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for MetricAlertSingleResourceMultipleMetricCriteria. +func (masrmmc MetricAlertSingleResourceMultipleMetricCriteria) MarshalJSON() ([]byte, error) { + masrmmc.OdataType = OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria + objectMap := make(map[string]interface{}) + if masrmmc.AllOf != nil { + objectMap["allOf"] = masrmmc.AllOf + } + if masrmmc.OdataType != "" { + objectMap["odata.type"] = masrmmc.OdataType + } + for k, v := range masrmmc.AdditionalProperties { + objectMap[k] = v + } + return json.Marshal(objectMap) +} + +// AsMetricAlertSingleResourceMultipleMetricCriteria is the BasicMetricAlertCriteria implementation for MetricAlertSingleResourceMultipleMetricCriteria. +func (masrmmc MetricAlertSingleResourceMultipleMetricCriteria) AsMetricAlertSingleResourceMultipleMetricCriteria() (*MetricAlertSingleResourceMultipleMetricCriteria, bool) { + return &masrmmc, true +} + +// AsMetricAlertMultipleResourceMultipleMetricCriteria is the BasicMetricAlertCriteria implementation for MetricAlertSingleResourceMultipleMetricCriteria. +func (masrmmc MetricAlertSingleResourceMultipleMetricCriteria) AsMetricAlertMultipleResourceMultipleMetricCriteria() (*MetricAlertMultipleResourceMultipleMetricCriteria, bool) { + return nil, false +} + +// AsMetricAlertCriteria is the BasicMetricAlertCriteria implementation for MetricAlertSingleResourceMultipleMetricCriteria. +func (masrmmc MetricAlertSingleResourceMultipleMetricCriteria) AsMetricAlertCriteria() (*MetricAlertCriteria, bool) { + return nil, false +} + +// AsBasicMetricAlertCriteria is the BasicMetricAlertCriteria implementation for MetricAlertSingleResourceMultipleMetricCriteria. +func (masrmmc MetricAlertSingleResourceMultipleMetricCriteria) AsBasicMetricAlertCriteria() (BasicMetricAlertCriteria, bool) { + return &masrmmc, true +} + +// UnmarshalJSON is the custom unmarshaler for MetricAlertSingleResourceMultipleMetricCriteria struct. +func (masrmmc *MetricAlertSingleResourceMultipleMetricCriteria) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "allOf": + if v != nil { + var allOf []MetricCriteria + err = json.Unmarshal(*v, &allOf) + if err != nil { + return err + } + masrmmc.AllOf = &allOf + } + default: + if v != nil { + var additionalProperties interface{} + err = json.Unmarshal(*v, &additionalProperties) + if err != nil { + return err + } + if masrmmc.AdditionalProperties == nil { + masrmmc.AdditionalProperties = make(map[string]interface{}) + } + masrmmc.AdditionalProperties[k] = additionalProperties + } + case "odata.type": + if v != nil { + var odataType OdataTypeBasicMetricAlertCriteria + err = json.Unmarshal(*v, &odataType) + if err != nil { + return err + } + masrmmc.OdataType = odataType + } + } + } + + return nil +} + +// MetricAlertStatus an alert status. +type MetricAlertStatus struct { + // Name - The status name. + Name *string `json:"name,omitempty"` + // ID - The alert rule arm id. + ID *string `json:"id,omitempty"` + // Type - The extended resource type name. + Type *string `json:"type,omitempty"` + // Properties - The alert status properties of the metric alert status. + Properties *MetricAlertStatusProperties `json:"properties,omitempty"` +} + +// MetricAlertStatusCollection represents a collection of alert rule resources. +type MetricAlertStatusCollection struct { + autorest.Response `json:"-"` + // Value - the values for the alert rule resources. + Value *[]MetricAlertStatus `json:"value,omitempty"` +} + +// MetricAlertStatusProperties an alert status properties. +type MetricAlertStatusProperties struct { + // Dimensions - An object describing the type of the dimensions. + Dimensions map[string]*string `json:"dimensions"` + // Status - status value + Status *string `json:"status,omitempty"` + // Timestamp - UTC time when the status was checked. + Timestamp *date.Time `json:"timestamp,omitempty"` +} + +// MarshalJSON is the custom marshaler for MetricAlertStatusProperties. +func (masp MetricAlertStatusProperties) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if masp.Dimensions != nil { + objectMap["dimensions"] = masp.Dimensions + } + if masp.Status != nil { + objectMap["status"] = masp.Status + } + if masp.Timestamp != nil { + objectMap["timestamp"] = masp.Timestamp + } + return json.Marshal(objectMap) +} + +// MetricAvailability metric availability specifies the time grain (aggregation interval or frequency) and +// the retention period for that time grain. +type MetricAvailability struct { + // TimeGrain - the time grain specifies the aggregation interval for the metric. Expressed as a duration 'PT1M', 'P1D', etc. + TimeGrain *string `json:"timeGrain,omitempty"` + // Retention - the retention period for the metric at the specified timegrain. Expressed as a duration 'PT1M', 'P1D', etc. + Retention *string `json:"retention,omitempty"` +} + +// MetricBaselinesProperties the response to a metric baselines query. +type MetricBaselinesProperties struct { + // Timespan - The timespan for which the data was retrieved. Its value consists of two datetimes concatenated, separated by '/'. This may be adjusted in the future and returned back from what was originally requested. + Timespan *string `json:"timespan,omitempty"` + // Interval - The interval (window size) for which the metric data was returned in. This may be adjusted in the future and returned back from what was originally requested. This is not present if a metadata request was made. + Interval *string `json:"interval,omitempty"` + // Namespace - The namespace of the metrics been queried. + Namespace *string `json:"namespace,omitempty"` + // Baselines - The baseline for each time series that was queried. + Baselines *[]TimeSeriesBaseline `json:"baselines,omitempty"` +} + +// MetricBaselinesResponse a list of metric baselines. +type MetricBaselinesResponse struct { + autorest.Response `json:"-"` + // Value - The list of metric baselines. + Value *[]SingleMetricBaseline `json:"value,omitempty"` +} + +// MetricCriteria criterion to filter metrics. +type MetricCriteria struct { + // Operator - the criteria operator. + Operator interface{} `json:"operator,omitempty"` + // Threshold - the criteria threshold value that activates the alert. + Threshold *float64 `json:"threshold,omitempty"` + // AdditionalProperties - Unmatched properties from the message are deserialized this collection + AdditionalProperties map[string]interface{} `json:""` + // Name - Name of the criteria. + Name *string `json:"name,omitempty"` + // MetricName - Name of the metric. + MetricName *string `json:"metricName,omitempty"` + // MetricNamespace - Namespace of the metric. + MetricNamespace *string `json:"metricNamespace,omitempty"` + // TimeAggregation - the criteria time aggregation types. + TimeAggregation interface{} `json:"timeAggregation,omitempty"` + // Dimensions - List of dimension conditions. + Dimensions *[]MetricDimension `json:"dimensions,omitempty"` + // CriterionType - Possible values include: 'CriterionTypeMultiMetricCriteria', 'CriterionTypeStaticThresholdCriterion', 'CriterionTypeDynamicThresholdCriterion' + CriterionType CriterionType `json:"criterionType,omitempty"` +} + +// MarshalJSON is the custom marshaler for MetricCriteria. +func (mc MetricCriteria) MarshalJSON() ([]byte, error) { + mc.CriterionType = CriterionTypeStaticThresholdCriterion + objectMap := make(map[string]interface{}) + if mc.Operator != nil { + objectMap["operator"] = mc.Operator + } + if mc.Threshold != nil { + objectMap["threshold"] = mc.Threshold + } + if mc.Name != nil { + objectMap["name"] = mc.Name + } + if mc.MetricName != nil { + objectMap["metricName"] = mc.MetricName + } + if mc.MetricNamespace != nil { + objectMap["metricNamespace"] = mc.MetricNamespace + } + if mc.TimeAggregation != nil { + objectMap["timeAggregation"] = mc.TimeAggregation + } + if mc.Dimensions != nil { + objectMap["dimensions"] = mc.Dimensions + } + if mc.CriterionType != "" { + objectMap["criterionType"] = mc.CriterionType + } + for k, v := range mc.AdditionalProperties { + objectMap[k] = v + } + return json.Marshal(objectMap) +} + +// AsMetricCriteria is the BasicMultiMetricCriteria implementation for MetricCriteria. +func (mc MetricCriteria) AsMetricCriteria() (*MetricCriteria, bool) { + return &mc, true +} + +// AsDynamicMetricCriteria is the BasicMultiMetricCriteria implementation for MetricCriteria. +func (mc MetricCriteria) AsDynamicMetricCriteria() (*DynamicMetricCriteria, bool) { + return nil, false +} + +// AsMultiMetricCriteria is the BasicMultiMetricCriteria implementation for MetricCriteria. +func (mc MetricCriteria) AsMultiMetricCriteria() (*MultiMetricCriteria, bool) { + return nil, false +} + +// AsBasicMultiMetricCriteria is the BasicMultiMetricCriteria implementation for MetricCriteria. +func (mc MetricCriteria) AsBasicMultiMetricCriteria() (BasicMultiMetricCriteria, bool) { + return &mc, true +} + +// UnmarshalJSON is the custom unmarshaler for MetricCriteria struct. +func (mc *MetricCriteria) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "operator": + if v != nil { + var operator interface{} + err = json.Unmarshal(*v, &operator) + if err != nil { + return err + } + mc.Operator = operator + } + case "threshold": + if v != nil { + var threshold float64 + err = json.Unmarshal(*v, &threshold) + if err != nil { + return err + } + mc.Threshold = &threshold + } + default: + if v != nil { + var additionalProperties interface{} + err = json.Unmarshal(*v, &additionalProperties) + if err != nil { + return err + } + if mc.AdditionalProperties == nil { + mc.AdditionalProperties = make(map[string]interface{}) + } + mc.AdditionalProperties[k] = additionalProperties + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + mc.Name = &name + } + case "metricName": + if v != nil { + var metricName string + err = json.Unmarshal(*v, &metricName) + if err != nil { + return err + } + mc.MetricName = &metricName + } + case "metricNamespace": + if v != nil { + var metricNamespace string + err = json.Unmarshal(*v, &metricNamespace) + if err != nil { + return err + } + mc.MetricNamespace = &metricNamespace + } + case "timeAggregation": + if v != nil { + var timeAggregation interface{} + err = json.Unmarshal(*v, &timeAggregation) + if err != nil { + return err + } + mc.TimeAggregation = timeAggregation + } + case "dimensions": + if v != nil { + var dimensions []MetricDimension + err = json.Unmarshal(*v, &dimensions) + if err != nil { + return err + } + mc.Dimensions = &dimensions + } + case "criterionType": + if v != nil { + var criterionType CriterionType + err = json.Unmarshal(*v, &criterionType) + if err != nil { + return err + } + mc.CriterionType = criterionType + } + } + } + + return nil +} + +// MetricDefinition metric definition class specifies the metadata for a metric. +type MetricDefinition struct { + // IsDimensionRequired - Flag to indicate whether the dimension is required. + IsDimensionRequired *bool `json:"isDimensionRequired,omitempty"` + // ResourceID - the resource identifier of the resource that emitted the metric. + ResourceID *string `json:"resourceId,omitempty"` + // Namespace - the namespace the metric belongs to. + Namespace *string `json:"namespace,omitempty"` + // Name - the name and the display name of the metric, i.e. it is a localizable string. + Name *LocalizableString `json:"name,omitempty"` + // Unit - the unit of the metric. Possible values include: 'UnitCount', 'UnitBytes', 'UnitSeconds', 'UnitCountPerSecond', 'UnitBytesPerSecond', 'UnitPercent', 'UnitMilliSeconds', 'UnitByteSeconds', 'UnitUnspecified', 'UnitCores', 'UnitMilliCores', 'UnitNanoCores', 'UnitBitsPerSecond' + Unit Unit `json:"unit,omitempty"` + // PrimaryAggregationType - the primary aggregation type value defining how to use the values for display. Possible values include: 'None', 'Average', 'Count', 'Minimum', 'Maximum', 'Total' + PrimaryAggregationType AggregationType `json:"primaryAggregationType,omitempty"` + // SupportedAggregationTypes - the collection of what aggregation types are supported. + SupportedAggregationTypes *[]AggregationType `json:"supportedAggregationTypes,omitempty"` + // MetricAvailabilities - the collection of what aggregation intervals are available to be queried. + MetricAvailabilities *[]MetricAvailability `json:"metricAvailabilities,omitempty"` + // ID - the resource identifier of the metric definition. + ID *string `json:"id,omitempty"` + // Dimensions - the name and the display name of the dimension, i.e. it is a localizable string. + Dimensions *[]LocalizableString `json:"dimensions,omitempty"` +} + +// MetricDefinitionCollection represents collection of metric definitions. +type MetricDefinitionCollection struct { + autorest.Response `json:"-"` + // Value - the values for the metric definitions. + Value *[]MetricDefinition `json:"value,omitempty"` +} + +// MetricDimension specifies a metric dimension. +type MetricDimension struct { + // Name - Name of the dimension. + Name *string `json:"name,omitempty"` + // Operator - the dimension operator. Only 'Include' and 'Exclude' are supported + Operator *string `json:"operator,omitempty"` + // Values - list of dimension values. + Values *[]string `json:"values,omitempty"` +} + +// MetricNamespace metric namespace class specifies the metadata for a metric namespace. +type MetricNamespace struct { + // ID - The ID of the metricNamespace. + ID *string `json:"id,omitempty"` + // Type - The type of the namespace. + Type *string `json:"type,omitempty"` + // Name - The name of the namespace. + Name *string `json:"name,omitempty"` + // Properties - Properties which include the fully qualified namespace name. + Properties *MetricNamespaceName `json:"properties,omitempty"` +} + +// MetricNamespaceCollection represents collection of metric namespaces. +type MetricNamespaceCollection struct { + autorest.Response `json:"-"` + // Value - The values for the metric namespaces. + Value *[]MetricNamespace `json:"value,omitempty"` +} + +// MetricNamespaceName the fully qualified metric namespace name. +type MetricNamespaceName struct { + // MetricNamespaceName - The metric namespace name. + MetricNamespaceName *string `json:"metricNamespaceName,omitempty"` +} + +// MetricSettings part of MultiTenantDiagnosticSettings. Specifies the settings for a particular metric. +type MetricSettings struct { + // TimeGrain - the timegrain of the metric in ISO8601 format. + TimeGrain *string `json:"timeGrain,omitempty"` + // Category - Name of a Diagnostic Metric category for a resource type this setting is applied to. To obtain the list of Diagnostic metric categories for a resource, first perform a GET diagnostic settings operation. + Category *string `json:"category,omitempty"` + // Enabled - a value indicating whether this category is enabled. + Enabled *bool `json:"enabled,omitempty"` + // RetentionPolicy - the retention policy for this category. + RetentionPolicy *RetentionPolicy `json:"retentionPolicy,omitempty"` +} + +// MetricSingleDimension the metric dimension name and value. +type MetricSingleDimension struct { + // Name - Name of the dimension. + Name *string `json:"name,omitempty"` + // Value - Value of the dimension. + Value *string `json:"value,omitempty"` +} + +// MetricTrigger the trigger that results in a scaling action. +type MetricTrigger struct { + // MetricName - the name of the metric that defines what the rule monitors. + MetricName *string `json:"metricName,omitempty"` + // MetricResourceURI - the resource identifier of the resource the rule monitors. + MetricResourceURI *string `json:"metricResourceUri,omitempty"` + // TimeGrain - the granularity of metrics the rule monitors. Must be one of the predefined values returned from metric definitions for the metric. Must be between 12 hours and 1 minute. + TimeGrain *string `json:"timeGrain,omitempty"` + // Statistic - the metric statistic type. How the metrics from multiple instances are combined. Possible values include: 'MetricStatisticTypeAverage', 'MetricStatisticTypeMin', 'MetricStatisticTypeMax', 'MetricStatisticTypeSum' + Statistic MetricStatisticType `json:"statistic,omitempty"` + // TimeWindow - the range of time in which instance data is collected. This value must be greater than the delay in metric collection, which can vary from resource-to-resource. Must be between 12 hours and 5 minutes. + TimeWindow *string `json:"timeWindow,omitempty"` + // TimeAggregation - time aggregation type. How the data that is collected should be combined over time. The default value is Average. Possible values include: 'TimeAggregationTypeAverage', 'TimeAggregationTypeMinimum', 'TimeAggregationTypeMaximum', 'TimeAggregationTypeTotal', 'TimeAggregationTypeCount', 'TimeAggregationTypeLast' + TimeAggregation TimeAggregationType `json:"timeAggregation,omitempty"` + // Operator - the operator that is used to compare the metric data and the threshold. Possible values include: 'Equals', 'NotEquals', 'GreaterThan', 'GreaterThanOrEqual', 'LessThan', 'LessThanOrEqual' + Operator ComparisonOperationType `json:"operator,omitempty"` + // Threshold - the threshold of the metric that triggers the scale action. + Threshold *float64 `json:"threshold,omitempty"` +} + +// MetricValue represents a metric value. +type MetricValue struct { + // TimeStamp - the timestamp for the metric value in ISO 8601 format. + TimeStamp *date.Time `json:"timeStamp,omitempty"` + // Average - the average value in the time range. + Average *float64 `json:"average,omitempty"` + // Minimum - the least value in the time range. + Minimum *float64 `json:"minimum,omitempty"` + // Maximum - the greatest value in the time range. + Maximum *float64 `json:"maximum,omitempty"` + // Total - the sum of all of the values in the time range. + Total *float64 `json:"total,omitempty"` + // Count - the number of samples in the time range. Can be used to determine the number of values that contributed to the average value. + Count *float64 `json:"count,omitempty"` +} + +// BasicMultiMetricCriteria the types of conditions for a multi resource alert. +type BasicMultiMetricCriteria interface { + AsMetricCriteria() (*MetricCriteria, bool) + AsDynamicMetricCriteria() (*DynamicMetricCriteria, bool) + AsMultiMetricCriteria() (*MultiMetricCriteria, bool) +} + +// MultiMetricCriteria the types of conditions for a multi resource alert. +type MultiMetricCriteria struct { + // AdditionalProperties - Unmatched properties from the message are deserialized this collection + AdditionalProperties map[string]interface{} `json:""` + // Name - Name of the criteria. + Name *string `json:"name,omitempty"` + // MetricName - Name of the metric. + MetricName *string `json:"metricName,omitempty"` + // MetricNamespace - Namespace of the metric. + MetricNamespace *string `json:"metricNamespace,omitempty"` + // TimeAggregation - the criteria time aggregation types. + TimeAggregation interface{} `json:"timeAggregation,omitempty"` + // Dimensions - List of dimension conditions. + Dimensions *[]MetricDimension `json:"dimensions,omitempty"` + // CriterionType - Possible values include: 'CriterionTypeMultiMetricCriteria', 'CriterionTypeStaticThresholdCriterion', 'CriterionTypeDynamicThresholdCriterion' + CriterionType CriterionType `json:"criterionType,omitempty"` +} + +func unmarshalBasicMultiMetricCriteria(body []byte) (BasicMultiMetricCriteria, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["criterionType"] { + case string(CriterionTypeStaticThresholdCriterion): + var mc MetricCriteria + err := json.Unmarshal(body, &mc) + return mc, err + case string(CriterionTypeDynamicThresholdCriterion): + var dmc DynamicMetricCriteria + err := json.Unmarshal(body, &dmc) + return dmc, err + default: + var mmc MultiMetricCriteria + err := json.Unmarshal(body, &mmc) + return mmc, err + } +} +func unmarshalBasicMultiMetricCriteriaArray(body []byte) ([]BasicMultiMetricCriteria, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + mmcArray := make([]BasicMultiMetricCriteria, len(rawMessages)) + + for index, rawMessage := range rawMessages { + mmc, err := unmarshalBasicMultiMetricCriteria(*rawMessage) + if err != nil { + return nil, err + } + mmcArray[index] = mmc + } + return mmcArray, nil +} + +// MarshalJSON is the custom marshaler for MultiMetricCriteria. +func (mmc MultiMetricCriteria) MarshalJSON() ([]byte, error) { + mmc.CriterionType = CriterionTypeMultiMetricCriteria + objectMap := make(map[string]interface{}) + if mmc.Name != nil { + objectMap["name"] = mmc.Name + } + if mmc.MetricName != nil { + objectMap["metricName"] = mmc.MetricName + } + if mmc.MetricNamespace != nil { + objectMap["metricNamespace"] = mmc.MetricNamespace + } + if mmc.TimeAggregation != nil { + objectMap["timeAggregation"] = mmc.TimeAggregation + } + if mmc.Dimensions != nil { + objectMap["dimensions"] = mmc.Dimensions + } + if mmc.CriterionType != "" { + objectMap["criterionType"] = mmc.CriterionType + } + for k, v := range mmc.AdditionalProperties { + objectMap[k] = v + } + return json.Marshal(objectMap) +} + +// AsMetricCriteria is the BasicMultiMetricCriteria implementation for MultiMetricCriteria. +func (mmc MultiMetricCriteria) AsMetricCriteria() (*MetricCriteria, bool) { + return nil, false +} + +// AsDynamicMetricCriteria is the BasicMultiMetricCriteria implementation for MultiMetricCriteria. +func (mmc MultiMetricCriteria) AsDynamicMetricCriteria() (*DynamicMetricCriteria, bool) { + return nil, false +} + +// AsMultiMetricCriteria is the BasicMultiMetricCriteria implementation for MultiMetricCriteria. +func (mmc MultiMetricCriteria) AsMultiMetricCriteria() (*MultiMetricCriteria, bool) { + return &mmc, true +} + +// AsBasicMultiMetricCriteria is the BasicMultiMetricCriteria implementation for MultiMetricCriteria. +func (mmc MultiMetricCriteria) AsBasicMultiMetricCriteria() (BasicMultiMetricCriteria, bool) { + return &mmc, true +} + +// UnmarshalJSON is the custom unmarshaler for MultiMetricCriteria struct. +func (mmc *MultiMetricCriteria) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + default: + if v != nil { + var additionalProperties interface{} + err = json.Unmarshal(*v, &additionalProperties) + if err != nil { + return err + } + if mmc.AdditionalProperties == nil { + mmc.AdditionalProperties = make(map[string]interface{}) + } + mmc.AdditionalProperties[k] = additionalProperties + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + mmc.Name = &name + } + case "metricName": + if v != nil { + var metricName string + err = json.Unmarshal(*v, &metricName) + if err != nil { + return err + } + mmc.MetricName = &metricName + } + case "metricNamespace": + if v != nil { + var metricNamespace string + err = json.Unmarshal(*v, &metricNamespace) + if err != nil { + return err + } + mmc.MetricNamespace = &metricNamespace + } + case "timeAggregation": + if v != nil { + var timeAggregation interface{} + err = json.Unmarshal(*v, &timeAggregation) + if err != nil { + return err + } + mmc.TimeAggregation = timeAggregation + } + case "dimensions": + if v != nil { + var dimensions []MetricDimension + err = json.Unmarshal(*v, &dimensions) + if err != nil { + return err + } + mmc.Dimensions = &dimensions + } + case "criterionType": + if v != nil { + var criterionType CriterionType + err = json.Unmarshal(*v, &criterionType) + if err != nil { + return err + } + mmc.CriterionType = criterionType + } + } + } + + return nil +} + +// Operation microsoft Insights API operation definition. +type Operation struct { + // Name - Operation name: {provider}/{resource}/{operation} + Name *string `json:"name,omitempty"` + // Display - Display metadata associated with the operation. + Display *OperationDisplay `json:"display,omitempty"` +} + +// OperationDisplay display metadata associated with the operation. +type OperationDisplay struct { + // Provider - Service provider: Microsoft.Insights + Provider *string `json:"provider,omitempty"` + // Resource - Resource on which the operation is performed: AlertRules, Autoscale, etc. + Resource *string `json:"resource,omitempty"` + // Operation - Operation type: Read, write, delete, etc. + Operation *string `json:"operation,omitempty"` +} + +// OperationListResult result of the request to list Microsoft.Insights operations. It contains a list of +// operations and a URL link to get the next set of results. +type OperationListResult struct { + autorest.Response `json:"-"` + // Value - List of operations supported by the Microsoft.Insights provider. + Value *[]Operation `json:"value,omitempty"` + // NextLink - URL to get the next set of operation list results if there are any. + NextLink *string `json:"nextLink,omitempty"` +} + +// ProxyOnlyResource a proxy only azure resource object +type ProxyOnlyResource struct { + // ID - READ-ONLY; Azure resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Azure resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Azure resource type + Type *string `json:"type,omitempty"` +} + +// ProxyResource an azure resource object +type ProxyResource struct { + // ID - READ-ONLY; Azure resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Azure resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Azure resource type + Type *string `json:"type,omitempty"` +} + +// Recurrence the repeating times at which this profile begins. This element is not used if the FixedDate +// element is used. +type Recurrence struct { + // Frequency - the recurrence frequency. How often the schedule profile should take effect. This value must be Week, meaning each week will have the same set of profiles. For example, to set a daily schedule, set **schedule** to every day of the week. The frequency property specifies that the schedule is repeated weekly. Possible values include: 'RecurrenceFrequencyNone', 'RecurrenceFrequencySecond', 'RecurrenceFrequencyMinute', 'RecurrenceFrequencyHour', 'RecurrenceFrequencyDay', 'RecurrenceFrequencyWeek', 'RecurrenceFrequencyMonth', 'RecurrenceFrequencyYear' + Frequency RecurrenceFrequency `json:"frequency,omitempty"` + // Schedule - the scheduling constraints for when the profile begins. + Schedule *RecurrentSchedule `json:"schedule,omitempty"` +} + +// RecurrentSchedule the scheduling constraints for when the profile begins. +type RecurrentSchedule struct { + // TimeZone - the timezone for the hours of the profile. Some examples of valid time zones are: Dateline Standard Time, UTC-11, Hawaiian Standard Time, Alaskan Standard Time, Pacific Standard Time (Mexico), Pacific Standard Time, US Mountain Standard Time, Mountain Standard Time (Mexico), Mountain Standard Time, Central America Standard Time, Central Standard Time, Central Standard Time (Mexico), Canada Central Standard Time, SA Pacific Standard Time, Eastern Standard Time, US Eastern Standard Time, Venezuela Standard Time, Paraguay Standard Time, Atlantic Standard Time, Central Brazilian Standard Time, SA Western Standard Time, Pacific SA Standard Time, Newfoundland Standard Time, E. South America Standard Time, Argentina Standard Time, SA Eastern Standard Time, Greenland Standard Time, Montevideo Standard Time, Bahia Standard Time, UTC-02, Mid-Atlantic Standard Time, Azores Standard Time, Cape Verde Standard Time, Morocco Standard Time, UTC, GMT Standard Time, Greenwich Standard Time, W. Europe Standard Time, Central Europe Standard Time, Romance Standard Time, Central European Standard Time, W. Central Africa Standard Time, Namibia Standard Time, Jordan Standard Time, GTB Standard Time, Middle East Standard Time, Egypt Standard Time, Syria Standard Time, E. Europe Standard Time, South Africa Standard Time, FLE Standard Time, Turkey Standard Time, Israel Standard Time, Kaliningrad Standard Time, Libya Standard Time, Arabic Standard Time, Arab Standard Time, Belarus Standard Time, Russian Standard Time, E. Africa Standard Time, Iran Standard Time, Arabian Standard Time, Azerbaijan Standard Time, Russia Time Zone 3, Mauritius Standard Time, Georgian Standard Time, Caucasus Standard Time, Afghanistan Standard Time, West Asia Standard Time, Ekaterinburg Standard Time, Pakistan Standard Time, India Standard Time, Sri Lanka Standard Time, Nepal Standard Time, Central Asia Standard Time, Bangladesh Standard Time, N. Central Asia Standard Time, Myanmar Standard Time, SE Asia Standard Time, North Asia Standard Time, China Standard Time, North Asia East Standard Time, Singapore Standard Time, W. Australia Standard Time, Taipei Standard Time, Ulaanbaatar Standard Time, Tokyo Standard Time, Korea Standard Time, Yakutsk Standard Time, Cen. Australia Standard Time, AUS Central Standard Time, E. Australia Standard Time, AUS Eastern Standard Time, West Pacific Standard Time, Tasmania Standard Time, Magadan Standard Time, Vladivostok Standard Time, Russia Time Zone 10, Central Pacific Standard Time, Russia Time Zone 11, New Zealand Standard Time, UTC+12, Fiji Standard Time, Kamchatka Standard Time, Tonga Standard Time, Samoa Standard Time, Line Islands Standard Time + TimeZone *string `json:"timeZone,omitempty"` + // Days - the collection of days that the profile takes effect on. Possible values are Sunday through Saturday. + Days *[]string `json:"days,omitempty"` + // Hours - A collection of hours that the profile takes effect on. Values supported are 0 to 23 on the 24-hour clock (AM/PM times are not supported). + Hours *[]int32 `json:"hours,omitempty"` + // Minutes - A collection of minutes at which the profile takes effect at. + Minutes *[]int32 `json:"minutes,omitempty"` +} + +// Resource an azure resource object +type Resource struct { + // ID - READ-ONLY; Azure resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Azure resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Azure resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Resource. +func (r Resource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if r.Location != nil { + objectMap["location"] = r.Location + } + if r.Tags != nil { + objectMap["tags"] = r.Tags + } + return json.Marshal(objectMap) +} + +// Response the response to a metrics query. +type Response struct { + autorest.Response `json:"-"` + // Cost - The integer value representing the cost of the query, for data case. + Cost *float64 `json:"cost,omitempty"` + // Timespan - The timespan for which the data was retrieved. Its value consists of two datetimes concatenated, separated by '/'. This may be adjusted in the future and returned back from what was originally requested. + Timespan *string `json:"timespan,omitempty"` + // Interval - The interval (window size) for which the metric data was returned in. This may be adjusted in the future and returned back from what was originally requested. This is not present if a metadata request was made. + Interval *string `json:"interval,omitempty"` + // Namespace - The namespace of the metrics been queried + Namespace *string `json:"namespace,omitempty"` + // Resourceregion - The region of the resource been queried for metrics. + Resourceregion *string `json:"resourceregion,omitempty"` + // Value - the value of the collection. + Value *[]Metric `json:"value,omitempty"` +} + +// ResponseWithError an error response from the API. +type ResponseWithError struct { + // Error - Error information. + Error *Error `json:"error,omitempty"` +} + +// RetentionPolicy specifies the retention policy for the log. +type RetentionPolicy struct { + // Enabled - a value indicating whether the retention policy is enabled. + Enabled *bool `json:"enabled,omitempty"` + // Days - the number of days for the retention in days. A value of 0 will retain the events indefinitely. + Days *int32 `json:"days,omitempty"` +} + +// BasicRuleAction the action that is performed when the alert rule becomes active, and when an alert condition is +// resolved. +type BasicRuleAction interface { + AsRuleEmailAction() (*RuleEmailAction, bool) + AsRuleWebhookAction() (*RuleWebhookAction, bool) + AsRuleAction() (*RuleAction, bool) +} + +// RuleAction the action that is performed when the alert rule becomes active, and when an alert condition is +// resolved. +type RuleAction struct { + // OdataType - Possible values include: 'OdataTypeRuleAction', 'OdataTypeMicrosoftAzureManagementInsightsModelsRuleEmailAction', 'OdataTypeMicrosoftAzureManagementInsightsModelsRuleWebhookAction' + OdataType OdataTypeBasicRuleAction `json:"odata.type,omitempty"` +} + +func unmarshalBasicRuleAction(body []byte) (BasicRuleAction, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["odata.type"] { + case string(OdataTypeMicrosoftAzureManagementInsightsModelsRuleEmailAction): + var rea RuleEmailAction + err := json.Unmarshal(body, &rea) + return rea, err + case string(OdataTypeMicrosoftAzureManagementInsightsModelsRuleWebhookAction): + var rwa RuleWebhookAction + err := json.Unmarshal(body, &rwa) + return rwa, err + default: + var ra RuleAction + err := json.Unmarshal(body, &ra) + return ra, err + } +} +func unmarshalBasicRuleActionArray(body []byte) ([]BasicRuleAction, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + raArray := make([]BasicRuleAction, len(rawMessages)) + + for index, rawMessage := range rawMessages { + ra, err := unmarshalBasicRuleAction(*rawMessage) + if err != nil { + return nil, err + } + raArray[index] = ra + } + return raArray, nil +} + +// MarshalJSON is the custom marshaler for RuleAction. +func (ra RuleAction) MarshalJSON() ([]byte, error) { + ra.OdataType = OdataTypeRuleAction + objectMap := make(map[string]interface{}) + if ra.OdataType != "" { + objectMap["odata.type"] = ra.OdataType + } + return json.Marshal(objectMap) +} + +// AsRuleEmailAction is the BasicRuleAction implementation for RuleAction. +func (ra RuleAction) AsRuleEmailAction() (*RuleEmailAction, bool) { + return nil, false +} + +// AsRuleWebhookAction is the BasicRuleAction implementation for RuleAction. +func (ra RuleAction) AsRuleWebhookAction() (*RuleWebhookAction, bool) { + return nil, false +} + +// AsRuleAction is the BasicRuleAction implementation for RuleAction. +func (ra RuleAction) AsRuleAction() (*RuleAction, bool) { + return &ra, true +} + +// AsBasicRuleAction is the BasicRuleAction implementation for RuleAction. +func (ra RuleAction) AsBasicRuleAction() (BasicRuleAction, bool) { + return &ra, true +} + +// BasicRuleCondition the condition that results in the alert rule being activated. +type BasicRuleCondition interface { + AsThresholdRuleCondition() (*ThresholdRuleCondition, bool) + AsLocationThresholdRuleCondition() (*LocationThresholdRuleCondition, bool) + AsManagementEventRuleCondition() (*ManagementEventRuleCondition, bool) + AsRuleCondition() (*RuleCondition, bool) +} + +// RuleCondition the condition that results in the alert rule being activated. +type RuleCondition struct { + // DataSource - the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource. + DataSource BasicRuleDataSource `json:"dataSource,omitempty"` + // OdataType - Possible values include: 'OdataTypeRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsThresholdRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsLocationThresholdRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsManagementEventRuleCondition' + OdataType OdataTypeBasicRuleCondition `json:"odata.type,omitempty"` +} + +func unmarshalBasicRuleCondition(body []byte) (BasicRuleCondition, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["odata.type"] { + case string(OdataTypeMicrosoftAzureManagementInsightsModelsThresholdRuleCondition): + var trc ThresholdRuleCondition + err := json.Unmarshal(body, &trc) + return trc, err + case string(OdataTypeMicrosoftAzureManagementInsightsModelsLocationThresholdRuleCondition): + var ltrc LocationThresholdRuleCondition + err := json.Unmarshal(body, <rc) + return ltrc, err + case string(OdataTypeMicrosoftAzureManagementInsightsModelsManagementEventRuleCondition): + var merc ManagementEventRuleCondition + err := json.Unmarshal(body, &merc) + return merc, err + default: + var rc RuleCondition + err := json.Unmarshal(body, &rc) + return rc, err + } +} +func unmarshalBasicRuleConditionArray(body []byte) ([]BasicRuleCondition, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + rcArray := make([]BasicRuleCondition, len(rawMessages)) + + for index, rawMessage := range rawMessages { + rc, err := unmarshalBasicRuleCondition(*rawMessage) + if err != nil { + return nil, err + } + rcArray[index] = rc + } + return rcArray, nil +} + +// MarshalJSON is the custom marshaler for RuleCondition. +func (rc RuleCondition) MarshalJSON() ([]byte, error) { + rc.OdataType = OdataTypeRuleCondition + objectMap := make(map[string]interface{}) + objectMap["dataSource"] = rc.DataSource + if rc.OdataType != "" { + objectMap["odata.type"] = rc.OdataType + } + return json.Marshal(objectMap) +} + +// AsThresholdRuleCondition is the BasicRuleCondition implementation for RuleCondition. +func (rc RuleCondition) AsThresholdRuleCondition() (*ThresholdRuleCondition, bool) { + return nil, false +} + +// AsLocationThresholdRuleCondition is the BasicRuleCondition implementation for RuleCondition. +func (rc RuleCondition) AsLocationThresholdRuleCondition() (*LocationThresholdRuleCondition, bool) { + return nil, false +} + +// AsManagementEventRuleCondition is the BasicRuleCondition implementation for RuleCondition. +func (rc RuleCondition) AsManagementEventRuleCondition() (*ManagementEventRuleCondition, bool) { + return nil, false +} + +// AsRuleCondition is the BasicRuleCondition implementation for RuleCondition. +func (rc RuleCondition) AsRuleCondition() (*RuleCondition, bool) { + return &rc, true +} + +// AsBasicRuleCondition is the BasicRuleCondition implementation for RuleCondition. +func (rc RuleCondition) AsBasicRuleCondition() (BasicRuleCondition, bool) { + return &rc, true +} + +// UnmarshalJSON is the custom unmarshaler for RuleCondition struct. +func (rc *RuleCondition) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "dataSource": + if v != nil { + dataSource, err := unmarshalBasicRuleDataSource(*v) + if err != nil { + return err + } + rc.DataSource = dataSource + } + case "odata.type": + if v != nil { + var odataType OdataTypeBasicRuleCondition + err = json.Unmarshal(*v, &odataType) + if err != nil { + return err + } + rc.OdataType = odataType + } + } + } + + return nil +} + +// BasicRuleDataSource the resource from which the rule collects its data. +type BasicRuleDataSource interface { + AsRuleMetricDataSource() (*RuleMetricDataSource, bool) + AsRuleManagementEventDataSource() (*RuleManagementEventDataSource, bool) + AsRuleDataSource() (*RuleDataSource, bool) +} + +// RuleDataSource the resource from which the rule collects its data. +type RuleDataSource struct { + // ResourceURI - the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule. + ResourceURI *string `json:"resourceUri,omitempty"` + // OdataType - Possible values include: 'OdataTypeRuleDataSource', 'OdataTypeMicrosoftAzureManagementInsightsModelsRuleMetricDataSource', 'OdataTypeMicrosoftAzureManagementInsightsModelsRuleManagementEventDataSource' + OdataType OdataType `json:"odata.type,omitempty"` +} + +func unmarshalBasicRuleDataSource(body []byte) (BasicRuleDataSource, error) { + var m map[string]interface{} + err := json.Unmarshal(body, &m) + if err != nil { + return nil, err + } + + switch m["odata.type"] { + case string(OdataTypeMicrosoftAzureManagementInsightsModelsRuleMetricDataSource): + var rmds RuleMetricDataSource + err := json.Unmarshal(body, &rmds) + return rmds, err + case string(OdataTypeMicrosoftAzureManagementInsightsModelsRuleManagementEventDataSource): + var rmeds RuleManagementEventDataSource + err := json.Unmarshal(body, &rmeds) + return rmeds, err + default: + var rds RuleDataSource + err := json.Unmarshal(body, &rds) + return rds, err + } +} +func unmarshalBasicRuleDataSourceArray(body []byte) ([]BasicRuleDataSource, error) { + var rawMessages []*json.RawMessage + err := json.Unmarshal(body, &rawMessages) + if err != nil { + return nil, err + } + + rdsArray := make([]BasicRuleDataSource, len(rawMessages)) + + for index, rawMessage := range rawMessages { + rds, err := unmarshalBasicRuleDataSource(*rawMessage) + if err != nil { + return nil, err + } + rdsArray[index] = rds + } + return rdsArray, nil +} + +// MarshalJSON is the custom marshaler for RuleDataSource. +func (rds RuleDataSource) MarshalJSON() ([]byte, error) { + rds.OdataType = OdataTypeRuleDataSource + objectMap := make(map[string]interface{}) + if rds.ResourceURI != nil { + objectMap["resourceUri"] = rds.ResourceURI + } + if rds.OdataType != "" { + objectMap["odata.type"] = rds.OdataType + } + return json.Marshal(objectMap) +} + +// AsRuleMetricDataSource is the BasicRuleDataSource implementation for RuleDataSource. +func (rds RuleDataSource) AsRuleMetricDataSource() (*RuleMetricDataSource, bool) { + return nil, false +} + +// AsRuleManagementEventDataSource is the BasicRuleDataSource implementation for RuleDataSource. +func (rds RuleDataSource) AsRuleManagementEventDataSource() (*RuleManagementEventDataSource, bool) { + return nil, false +} + +// AsRuleDataSource is the BasicRuleDataSource implementation for RuleDataSource. +func (rds RuleDataSource) AsRuleDataSource() (*RuleDataSource, bool) { + return &rds, true +} + +// AsBasicRuleDataSource is the BasicRuleDataSource implementation for RuleDataSource. +func (rds RuleDataSource) AsBasicRuleDataSource() (BasicRuleDataSource, bool) { + return &rds, true +} + +// RuleEmailAction specifies the action to send email when the rule condition is evaluated. The +// discriminator is always RuleEmailAction in this case. +type RuleEmailAction struct { + // SendToServiceOwners - Whether the administrators (service and co-administrators) of the service should be notified when the alert is activated. + SendToServiceOwners *bool `json:"sendToServiceOwners,omitempty"` + // CustomEmails - the list of administrator's custom email addresses to notify of the activation of the alert. + CustomEmails *[]string `json:"customEmails,omitempty"` + // OdataType - Possible values include: 'OdataTypeRuleAction', 'OdataTypeMicrosoftAzureManagementInsightsModelsRuleEmailAction', 'OdataTypeMicrosoftAzureManagementInsightsModelsRuleWebhookAction' + OdataType OdataTypeBasicRuleAction `json:"odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for RuleEmailAction. +func (rea RuleEmailAction) MarshalJSON() ([]byte, error) { + rea.OdataType = OdataTypeMicrosoftAzureManagementInsightsModelsRuleEmailAction + objectMap := make(map[string]interface{}) + if rea.SendToServiceOwners != nil { + objectMap["sendToServiceOwners"] = rea.SendToServiceOwners + } + if rea.CustomEmails != nil { + objectMap["customEmails"] = rea.CustomEmails + } + if rea.OdataType != "" { + objectMap["odata.type"] = rea.OdataType + } + return json.Marshal(objectMap) +} + +// AsRuleEmailAction is the BasicRuleAction implementation for RuleEmailAction. +func (rea RuleEmailAction) AsRuleEmailAction() (*RuleEmailAction, bool) { + return &rea, true +} + +// AsRuleWebhookAction is the BasicRuleAction implementation for RuleEmailAction. +func (rea RuleEmailAction) AsRuleWebhookAction() (*RuleWebhookAction, bool) { + return nil, false +} + +// AsRuleAction is the BasicRuleAction implementation for RuleEmailAction. +func (rea RuleEmailAction) AsRuleAction() (*RuleAction, bool) { + return nil, false +} + +// AsBasicRuleAction is the BasicRuleAction implementation for RuleEmailAction. +func (rea RuleEmailAction) AsBasicRuleAction() (BasicRuleAction, bool) { + return &rea, true +} + +// RuleManagementEventClaimsDataSource the claims for a rule management event data source. +type RuleManagementEventClaimsDataSource struct { + // EmailAddress - the email address. + EmailAddress *string `json:"emailAddress,omitempty"` +} + +// RuleManagementEventDataSource a rule management event data source. The discriminator fields is always +// RuleManagementEventDataSource in this case. +type RuleManagementEventDataSource struct { + // EventName - the event name. + EventName *string `json:"eventName,omitempty"` + // EventSource - the event source. + EventSource *string `json:"eventSource,omitempty"` + // Level - the level. + Level *string `json:"level,omitempty"` + // OperationName - The name of the operation that should be checked for. If no name is provided, any operation will match. + OperationName *string `json:"operationName,omitempty"` + // ResourceGroupName - the resource group name. + ResourceGroupName *string `json:"resourceGroupName,omitempty"` + // ResourceProviderName - the resource provider name. + ResourceProviderName *string `json:"resourceProviderName,omitempty"` + // Status - The status of the operation that should be checked for. If no status is provided, any status will match. + Status *string `json:"status,omitempty"` + // SubStatus - the substatus. + SubStatus *string `json:"subStatus,omitempty"` + // Claims - the claims. + Claims *RuleManagementEventClaimsDataSource `json:"claims,omitempty"` + // ResourceURI - the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule. + ResourceURI *string `json:"resourceUri,omitempty"` + // OdataType - Possible values include: 'OdataTypeRuleDataSource', 'OdataTypeMicrosoftAzureManagementInsightsModelsRuleMetricDataSource', 'OdataTypeMicrosoftAzureManagementInsightsModelsRuleManagementEventDataSource' + OdataType OdataType `json:"odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for RuleManagementEventDataSource. +func (rmeds RuleManagementEventDataSource) MarshalJSON() ([]byte, error) { + rmeds.OdataType = OdataTypeMicrosoftAzureManagementInsightsModelsRuleManagementEventDataSource + objectMap := make(map[string]interface{}) + if rmeds.EventName != nil { + objectMap["eventName"] = rmeds.EventName + } + if rmeds.EventSource != nil { + objectMap["eventSource"] = rmeds.EventSource + } + if rmeds.Level != nil { + objectMap["level"] = rmeds.Level + } + if rmeds.OperationName != nil { + objectMap["operationName"] = rmeds.OperationName + } + if rmeds.ResourceGroupName != nil { + objectMap["resourceGroupName"] = rmeds.ResourceGroupName + } + if rmeds.ResourceProviderName != nil { + objectMap["resourceProviderName"] = rmeds.ResourceProviderName + } + if rmeds.Status != nil { + objectMap["status"] = rmeds.Status + } + if rmeds.SubStatus != nil { + objectMap["subStatus"] = rmeds.SubStatus + } + if rmeds.Claims != nil { + objectMap["claims"] = rmeds.Claims + } + if rmeds.ResourceURI != nil { + objectMap["resourceUri"] = rmeds.ResourceURI + } + if rmeds.OdataType != "" { + objectMap["odata.type"] = rmeds.OdataType + } + return json.Marshal(objectMap) +} + +// AsRuleMetricDataSource is the BasicRuleDataSource implementation for RuleManagementEventDataSource. +func (rmeds RuleManagementEventDataSource) AsRuleMetricDataSource() (*RuleMetricDataSource, bool) { + return nil, false +} + +// AsRuleManagementEventDataSource is the BasicRuleDataSource implementation for RuleManagementEventDataSource. +func (rmeds RuleManagementEventDataSource) AsRuleManagementEventDataSource() (*RuleManagementEventDataSource, bool) { + return &rmeds, true +} + +// AsRuleDataSource is the BasicRuleDataSource implementation for RuleManagementEventDataSource. +func (rmeds RuleManagementEventDataSource) AsRuleDataSource() (*RuleDataSource, bool) { + return nil, false +} + +// AsBasicRuleDataSource is the BasicRuleDataSource implementation for RuleManagementEventDataSource. +func (rmeds RuleManagementEventDataSource) AsBasicRuleDataSource() (BasicRuleDataSource, bool) { + return &rmeds, true +} + +// RuleMetricDataSource a rule metric data source. The discriminator value is always RuleMetricDataSource +// in this case. +type RuleMetricDataSource struct { + // MetricName - the name of the metric that defines what the rule monitors. + MetricName *string `json:"metricName,omitempty"` + // ResourceURI - the resource identifier of the resource the rule monitors. **NOTE**: this property cannot be updated for an existing rule. + ResourceURI *string `json:"resourceUri,omitempty"` + // OdataType - Possible values include: 'OdataTypeRuleDataSource', 'OdataTypeMicrosoftAzureManagementInsightsModelsRuleMetricDataSource', 'OdataTypeMicrosoftAzureManagementInsightsModelsRuleManagementEventDataSource' + OdataType OdataType `json:"odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for RuleMetricDataSource. +func (rmds RuleMetricDataSource) MarshalJSON() ([]byte, error) { + rmds.OdataType = OdataTypeMicrosoftAzureManagementInsightsModelsRuleMetricDataSource + objectMap := make(map[string]interface{}) + if rmds.MetricName != nil { + objectMap["metricName"] = rmds.MetricName + } + if rmds.ResourceURI != nil { + objectMap["resourceUri"] = rmds.ResourceURI + } + if rmds.OdataType != "" { + objectMap["odata.type"] = rmds.OdataType + } + return json.Marshal(objectMap) +} + +// AsRuleMetricDataSource is the BasicRuleDataSource implementation for RuleMetricDataSource. +func (rmds RuleMetricDataSource) AsRuleMetricDataSource() (*RuleMetricDataSource, bool) { + return &rmds, true +} + +// AsRuleManagementEventDataSource is the BasicRuleDataSource implementation for RuleMetricDataSource. +func (rmds RuleMetricDataSource) AsRuleManagementEventDataSource() (*RuleManagementEventDataSource, bool) { + return nil, false +} + +// AsRuleDataSource is the BasicRuleDataSource implementation for RuleMetricDataSource. +func (rmds RuleMetricDataSource) AsRuleDataSource() (*RuleDataSource, bool) { + return nil, false +} + +// AsBasicRuleDataSource is the BasicRuleDataSource implementation for RuleMetricDataSource. +func (rmds RuleMetricDataSource) AsBasicRuleDataSource() (BasicRuleDataSource, bool) { + return &rmds, true +} + +// RuleWebhookAction specifies the action to post to service when the rule condition is evaluated. The +// discriminator is always RuleWebhookAction in this case. +type RuleWebhookAction struct { + // ServiceURI - the service uri to Post the notification when the alert activates or resolves. + ServiceURI *string `json:"serviceUri,omitempty"` + // Properties - the dictionary of custom properties to include with the post operation. These data are appended to the webhook payload. + Properties map[string]*string `json:"properties"` + // OdataType - Possible values include: 'OdataTypeRuleAction', 'OdataTypeMicrosoftAzureManagementInsightsModelsRuleEmailAction', 'OdataTypeMicrosoftAzureManagementInsightsModelsRuleWebhookAction' + OdataType OdataTypeBasicRuleAction `json:"odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for RuleWebhookAction. +func (rwa RuleWebhookAction) MarshalJSON() ([]byte, error) { + rwa.OdataType = OdataTypeMicrosoftAzureManagementInsightsModelsRuleWebhookAction + objectMap := make(map[string]interface{}) + if rwa.ServiceURI != nil { + objectMap["serviceUri"] = rwa.ServiceURI + } + if rwa.Properties != nil { + objectMap["properties"] = rwa.Properties + } + if rwa.OdataType != "" { + objectMap["odata.type"] = rwa.OdataType + } + return json.Marshal(objectMap) +} + +// AsRuleEmailAction is the BasicRuleAction implementation for RuleWebhookAction. +func (rwa RuleWebhookAction) AsRuleEmailAction() (*RuleEmailAction, bool) { + return nil, false +} + +// AsRuleWebhookAction is the BasicRuleAction implementation for RuleWebhookAction. +func (rwa RuleWebhookAction) AsRuleWebhookAction() (*RuleWebhookAction, bool) { + return &rwa, true +} + +// AsRuleAction is the BasicRuleAction implementation for RuleWebhookAction. +func (rwa RuleWebhookAction) AsRuleAction() (*RuleAction, bool) { + return nil, false +} + +// AsBasicRuleAction is the BasicRuleAction implementation for RuleWebhookAction. +func (rwa RuleWebhookAction) AsBasicRuleAction() (BasicRuleAction, bool) { + return &rwa, true +} + +// ScaleAction the parameters for the scaling action. +type ScaleAction struct { + // Direction - the scale direction. Whether the scaling action increases or decreases the number of instances. Possible values include: 'ScaleDirectionNone', 'ScaleDirectionIncrease', 'ScaleDirectionDecrease' + Direction ScaleDirection `json:"direction,omitempty"` + // Type - the type of action that should occur when the scale rule fires. Possible values include: 'ChangeCount', 'PercentChangeCount', 'ExactCount' + Type ScaleType `json:"type,omitempty"` + // Value - the number of instances that are involved in the scaling action. This value must be 1 or greater. The default value is 1. + Value *string `json:"value,omitempty"` + // Cooldown - the amount of time to wait since the last scaling action before this action occurs. It must be between 1 week and 1 minute in ISO 8601 format. + Cooldown *string `json:"cooldown,omitempty"` +} + +// ScaleCapacity the number of instances that can be used during this profile. +type ScaleCapacity struct { + // Minimum - the minimum number of instances for the resource. + Minimum *string `json:"minimum,omitempty"` + // Maximum - the maximum number of instances for the resource. The actual maximum number of instances is limited by the cores that are available in the subscription. + Maximum *string `json:"maximum,omitempty"` + // Default - the number of instances that will be set if metrics are not available for evaluation. The default is only used if the current instance count is lower than the default. + Default *string `json:"default,omitempty"` +} + +// ScaleRule a rule that provide the triggers and parameters for the scaling action. +type ScaleRule struct { + // MetricTrigger - the trigger that results in a scaling action. + MetricTrigger *MetricTrigger `json:"metricTrigger,omitempty"` + // ScaleAction - the parameters for the scaling action. + ScaleAction *ScaleAction `json:"scaleAction,omitempty"` +} + +// Schedule defines how often to run the search and the time interval. +type Schedule struct { + // FrequencyInMinutes - frequency (in minutes) at which rule condition should be evaluated. + FrequencyInMinutes *int32 `json:"frequencyInMinutes,omitempty"` + // TimeWindowInMinutes - Time window for which data needs to be fetched for query (should be greater than or equal to frequencyInMinutes). + TimeWindowInMinutes *int32 `json:"timeWindowInMinutes,omitempty"` +} + +// SenderAuthorization the authorization used by the user who has performed the operation that led to this +// event. This captures the RBAC properties of the event. These usually include the 'action', 'role' and +// the 'scope' +type SenderAuthorization struct { + // Action - the permissible actions. For instance: microsoft.support/supporttickets/write + Action *string `json:"action,omitempty"` + // Role - the role of the user. For instance: Subscription Admin + Role *string `json:"role,omitempty"` + // Scope - the scope. + Scope *string `json:"scope,omitempty"` +} + +// SingleBaseline the baseline values for a single sensitivity value. +type SingleBaseline struct { + // Sensitivity - the sensitivity of the baseline. Possible values include: 'Low', 'Medium', 'High' + Sensitivity BaselineSensitivity `json:"sensitivity,omitempty"` + // LowThresholds - The low thresholds of the baseline. + LowThresholds *[]float64 `json:"lowThresholds,omitempty"` + // HighThresholds - The high thresholds of the baseline. + HighThresholds *[]float64 `json:"highThresholds,omitempty"` +} + +// SingleMetricBaseline the baseline results of a single metric. +type SingleMetricBaseline struct { + // ID - The metric baseline Id. + ID *string `json:"id,omitempty"` + // Type - The resource type of the metric baseline resource. + Type *string `json:"type,omitempty"` + // Name - The name of the metric for which the baselines were retrieved. + Name *string `json:"name,omitempty"` + // MetricBaselinesProperties - The metric baseline properties of the metric. + *MetricBaselinesProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for SingleMetricBaseline. +func (smb SingleMetricBaseline) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if smb.ID != nil { + objectMap["id"] = smb.ID + } + if smb.Type != nil { + objectMap["type"] = smb.Type + } + if smb.Name != nil { + objectMap["name"] = smb.Name + } + if smb.MetricBaselinesProperties != nil { + objectMap["properties"] = smb.MetricBaselinesProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for SingleMetricBaseline struct. +func (smb *SingleMetricBaseline) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + smb.ID = &ID + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + smb.Type = &typeVar + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + smb.Name = &name + } + case "properties": + if v != nil { + var metricBaselinesProperties MetricBaselinesProperties + err = json.Unmarshal(*v, &metricBaselinesProperties) + if err != nil { + return err + } + smb.MetricBaselinesProperties = &metricBaselinesProperties + } + } + } + + return nil +} + +// SmsReceiver an SMS receiver. +type SmsReceiver struct { + // Name - The name of the SMS receiver. Names must be unique across all receivers within an action group. + Name *string `json:"name,omitempty"` + // CountryCode - The country code of the SMS receiver. + CountryCode *string `json:"countryCode,omitempty"` + // PhoneNumber - The phone number of the SMS receiver. + PhoneNumber *string `json:"phoneNumber,omitempty"` + // Status - READ-ONLY; The status of the receiver. Possible values include: 'ReceiverStatusNotSpecified', 'ReceiverStatusEnabled', 'ReceiverStatusDisabled' + Status ReceiverStatus `json:"status,omitempty"` +} + +// Source specifies the log search query. +type Source struct { + // Query - Log search query. Required for action type - AlertingAction + Query *string `json:"query,omitempty"` + // AuthorizedResources - List of Resource referred into query + AuthorizedResources *[]string `json:"authorizedResources,omitempty"` + // DataSourceID - The resource uri over which log search query is to be run. + DataSourceID *string `json:"dataSourceId,omitempty"` + // QueryType - Set value to 'ResultCount'. Possible values include: 'ResultCount' + QueryType QueryType `json:"queryType,omitempty"` +} + +// ThresholdRuleCondition a rule condition based on a metric crossing a threshold. +type ThresholdRuleCondition struct { + // Operator - the operator used to compare the data and the threshold. Possible values include: 'ConditionOperatorGreaterThan', 'ConditionOperatorGreaterThanOrEqual', 'ConditionOperatorLessThan', 'ConditionOperatorLessThanOrEqual' + Operator ConditionOperator `json:"operator,omitempty"` + // Threshold - the threshold value that activates the alert. + Threshold *float64 `json:"threshold,omitempty"` + // WindowSize - the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day. + WindowSize *string `json:"windowSize,omitempty"` + // TimeAggregation - the time aggregation operator. How the data that are collected should be combined over time. The default value is the PrimaryAggregationType of the Metric. Possible values include: 'TimeAggregationOperatorAverage', 'TimeAggregationOperatorMinimum', 'TimeAggregationOperatorMaximum', 'TimeAggregationOperatorTotal', 'TimeAggregationOperatorLast' + TimeAggregation TimeAggregationOperator `json:"timeAggregation,omitempty"` + // DataSource - the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource. + DataSource BasicRuleDataSource `json:"dataSource,omitempty"` + // OdataType - Possible values include: 'OdataTypeRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsThresholdRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsLocationThresholdRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsManagementEventRuleCondition' + OdataType OdataTypeBasicRuleCondition `json:"odata.type,omitempty"` +} + +// MarshalJSON is the custom marshaler for ThresholdRuleCondition. +func (trc ThresholdRuleCondition) MarshalJSON() ([]byte, error) { + trc.OdataType = OdataTypeMicrosoftAzureManagementInsightsModelsThresholdRuleCondition + objectMap := make(map[string]interface{}) + if trc.Operator != "" { + objectMap["operator"] = trc.Operator + } + if trc.Threshold != nil { + objectMap["threshold"] = trc.Threshold + } + if trc.WindowSize != nil { + objectMap["windowSize"] = trc.WindowSize + } + if trc.TimeAggregation != "" { + objectMap["timeAggregation"] = trc.TimeAggregation + } + objectMap["dataSource"] = trc.DataSource + if trc.OdataType != "" { + objectMap["odata.type"] = trc.OdataType + } + return json.Marshal(objectMap) +} + +// AsThresholdRuleCondition is the BasicRuleCondition implementation for ThresholdRuleCondition. +func (trc ThresholdRuleCondition) AsThresholdRuleCondition() (*ThresholdRuleCondition, bool) { + return &trc, true +} + +// AsLocationThresholdRuleCondition is the BasicRuleCondition implementation for ThresholdRuleCondition. +func (trc ThresholdRuleCondition) AsLocationThresholdRuleCondition() (*LocationThresholdRuleCondition, bool) { + return nil, false +} + +// AsManagementEventRuleCondition is the BasicRuleCondition implementation for ThresholdRuleCondition. +func (trc ThresholdRuleCondition) AsManagementEventRuleCondition() (*ManagementEventRuleCondition, bool) { + return nil, false +} + +// AsRuleCondition is the BasicRuleCondition implementation for ThresholdRuleCondition. +func (trc ThresholdRuleCondition) AsRuleCondition() (*RuleCondition, bool) { + return nil, false +} + +// AsBasicRuleCondition is the BasicRuleCondition implementation for ThresholdRuleCondition. +func (trc ThresholdRuleCondition) AsBasicRuleCondition() (BasicRuleCondition, bool) { + return &trc, true +} + +// UnmarshalJSON is the custom unmarshaler for ThresholdRuleCondition struct. +func (trc *ThresholdRuleCondition) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "operator": + if v != nil { + var operator ConditionOperator + err = json.Unmarshal(*v, &operator) + if err != nil { + return err + } + trc.Operator = operator + } + case "threshold": + if v != nil { + var threshold float64 + err = json.Unmarshal(*v, &threshold) + if err != nil { + return err + } + trc.Threshold = &threshold + } + case "windowSize": + if v != nil { + var windowSize string + err = json.Unmarshal(*v, &windowSize) + if err != nil { + return err + } + trc.WindowSize = &windowSize + } + case "timeAggregation": + if v != nil { + var timeAggregation TimeAggregationOperator + err = json.Unmarshal(*v, &timeAggregation) + if err != nil { + return err + } + trc.TimeAggregation = timeAggregation + } + case "dataSource": + if v != nil { + dataSource, err := unmarshalBasicRuleDataSource(*v) + if err != nil { + return err + } + trc.DataSource = dataSource + } + case "odata.type": + if v != nil { + var odataType OdataTypeBasicRuleCondition + err = json.Unmarshal(*v, &odataType) + if err != nil { + return err + } + trc.OdataType = odataType + } + } + } + + return nil +} + +// TimeSeriesBaseline the baseline values for a single time series. +type TimeSeriesBaseline struct { + // Aggregation - The aggregation type of the metric. + Aggregation *string `json:"aggregation,omitempty"` + // Dimensions - The dimensions of this time series. + Dimensions *[]MetricSingleDimension `json:"dimensions,omitempty"` + // Timestamps - The list of timestamps of the baselines. + Timestamps *[]date.Time `json:"timestamps,omitempty"` + // Data - The baseline values for each sensitivity. + Data *[]SingleBaseline `json:"data,omitempty"` + // Metadata - The baseline metadata values. + Metadata *[]BaselineMetadata `json:"metadata,omitempty"` +} + +// TimeSeriesElement a time series result type. The discriminator value is always TimeSeries in this case. +type TimeSeriesElement struct { + // Metadatavalues - the metadata values returned if $filter was specified in the call. + Metadatavalues *[]MetadataValue `json:"metadatavalues,omitempty"` + // Data - An array of data points representing the metric values. This is only returned if a result type of data is specified. + Data *[]MetricValue `json:"data,omitempty"` +} + +// TimeSeriesInformation the time series info needed for calculating the baseline. +type TimeSeriesInformation struct { + // Sensitivities - the list of sensitivities for calculating the baseline. + Sensitivities *[]string `json:"sensitivities,omitempty"` + // Values - The metric values to calculate the baseline. + Values *[]float64 `json:"values,omitempty"` + // Timestamps - the array of timestamps of the baselines. + Timestamps *[]date.Time `json:"timestamps,omitempty"` +} + +// TimeWindow a specific date-time for the profile. +type TimeWindow struct { + // TimeZone - the timezone of the start and end times for the profile. Some examples of valid time zones are: Dateline Standard Time, UTC-11, Hawaiian Standard Time, Alaskan Standard Time, Pacific Standard Time (Mexico), Pacific Standard Time, US Mountain Standard Time, Mountain Standard Time (Mexico), Mountain Standard Time, Central America Standard Time, Central Standard Time, Central Standard Time (Mexico), Canada Central Standard Time, SA Pacific Standard Time, Eastern Standard Time, US Eastern Standard Time, Venezuela Standard Time, Paraguay Standard Time, Atlantic Standard Time, Central Brazilian Standard Time, SA Western Standard Time, Pacific SA Standard Time, Newfoundland Standard Time, E. South America Standard Time, Argentina Standard Time, SA Eastern Standard Time, Greenland Standard Time, Montevideo Standard Time, Bahia Standard Time, UTC-02, Mid-Atlantic Standard Time, Azores Standard Time, Cape Verde Standard Time, Morocco Standard Time, UTC, GMT Standard Time, Greenwich Standard Time, W. Europe Standard Time, Central Europe Standard Time, Romance Standard Time, Central European Standard Time, W. Central Africa Standard Time, Namibia Standard Time, Jordan Standard Time, GTB Standard Time, Middle East Standard Time, Egypt Standard Time, Syria Standard Time, E. Europe Standard Time, South Africa Standard Time, FLE Standard Time, Turkey Standard Time, Israel Standard Time, Kaliningrad Standard Time, Libya Standard Time, Arabic Standard Time, Arab Standard Time, Belarus Standard Time, Russian Standard Time, E. Africa Standard Time, Iran Standard Time, Arabian Standard Time, Azerbaijan Standard Time, Russia Time Zone 3, Mauritius Standard Time, Georgian Standard Time, Caucasus Standard Time, Afghanistan Standard Time, West Asia Standard Time, Ekaterinburg Standard Time, Pakistan Standard Time, India Standard Time, Sri Lanka Standard Time, Nepal Standard Time, Central Asia Standard Time, Bangladesh Standard Time, N. Central Asia Standard Time, Myanmar Standard Time, SE Asia Standard Time, North Asia Standard Time, China Standard Time, North Asia East Standard Time, Singapore Standard Time, W. Australia Standard Time, Taipei Standard Time, Ulaanbaatar Standard Time, Tokyo Standard Time, Korea Standard Time, Yakutsk Standard Time, Cen. Australia Standard Time, AUS Central Standard Time, E. Australia Standard Time, AUS Eastern Standard Time, West Pacific Standard Time, Tasmania Standard Time, Magadan Standard Time, Vladivostok Standard Time, Russia Time Zone 10, Central Pacific Standard Time, Russia Time Zone 11, New Zealand Standard Time, UTC+12, Fiji Standard Time, Kamchatka Standard Time, Tonga Standard Time, Samoa Standard Time, Line Islands Standard Time + TimeZone *string `json:"timeZone,omitempty"` + // Start - the start time for the profile in ISO 8601 format. + Start *date.Time `json:"start,omitempty"` + // End - the end time for the profile in ISO 8601 format. + End *date.Time `json:"end,omitempty"` +} + +// TriggerCondition the condition that results in the Log Search rule. +type TriggerCondition struct { + // ThresholdOperator - Evaluation operation for rule - 'GreaterThan' or 'LessThan. Possible values include: 'ConditionalOperatorGreaterThan', 'ConditionalOperatorLessThan', 'ConditionalOperatorEqual' + ThresholdOperator ConditionalOperator `json:"thresholdOperator,omitempty"` + // Threshold - Result or count threshold based on which rule should be triggered. + Threshold *float64 `json:"threshold,omitempty"` + // MetricTrigger - Trigger condition for metric query rule + MetricTrigger *LogMetricTrigger `json:"metricTrigger,omitempty"` +} + +// VMInsightsOnboardingStatus VM Insights onboarding status for a resource. +type VMInsightsOnboardingStatus struct { + autorest.Response `json:"-"` + // VMInsightsOnboardingStatusProperties - Resource properties. + *VMInsightsOnboardingStatusProperties `json:"properties,omitempty"` + // ID - READ-ONLY; Azure resource Id + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Azure resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Azure resource type + Type *string `json:"type,omitempty"` +} + +// MarshalJSON is the custom marshaler for VMInsightsOnboardingStatus. +func (vios VMInsightsOnboardingStatus) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if vios.VMInsightsOnboardingStatusProperties != nil { + objectMap["properties"] = vios.VMInsightsOnboardingStatusProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for VMInsightsOnboardingStatus struct. +func (vios *VMInsightsOnboardingStatus) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "properties": + if v != nil { + var VMInsightsOnboardingStatusProperties VMInsightsOnboardingStatusProperties + err = json.Unmarshal(*v, &VMInsightsOnboardingStatusProperties) + if err != nil { + return err + } + vios.VMInsightsOnboardingStatusProperties = &VMInsightsOnboardingStatusProperties + } + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + vios.ID = &ID + } + case "name": + if v != nil { + var name string + err = json.Unmarshal(*v, &name) + if err != nil { + return err + } + vios.Name = &name + } + case "type": + if v != nil { + var typeVar string + err = json.Unmarshal(*v, &typeVar) + if err != nil { + return err + } + vios.Type = &typeVar + } + } + } + + return nil +} + +// VMInsightsOnboardingStatusProperties resource properties. +type VMInsightsOnboardingStatusProperties struct { + // ResourceID - Azure Resource Manager identifier of the resource whose onboarding status is being represented. + ResourceID *string `json:"resourceId,omitempty"` + // OnboardingStatus - The onboarding status for the resource. Note that, a higher level scope, e.g., resource group or subscription, is considered onboarded if at least one resource under it is onboarded. Possible values include: 'Onboarded', 'NotOnboarded', 'Unknown' + OnboardingStatus OnboardingStatus `json:"onboardingStatus,omitempty"` + // DataStatus - The status of VM Insights data from the resource. When reported as `present` the data array will contain information about the data containers to which data for the specified resource is being routed. Possible values include: 'Present', 'NotPresent' + DataStatus DataStatus `json:"dataStatus,omitempty"` + // Data - Containers that currently store VM Insights data for the specified resource. + Data *[]DataContainer `json:"data,omitempty"` +} + +// VoiceReceiver a voice receiver. +type VoiceReceiver struct { + // Name - The name of the voice receiver. Names must be unique across all receivers within an action group. + Name *string `json:"name,omitempty"` + // CountryCode - The country code of the voice receiver. + CountryCode *string `json:"countryCode,omitempty"` + // PhoneNumber - The phone number of the voice receiver. + PhoneNumber *string `json:"phoneNumber,omitempty"` +} + +// WebhookNotification webhook notification of an autoscale event. +type WebhookNotification struct { + // ServiceURI - the service address to receive the notification. + ServiceURI *string `json:"serviceUri,omitempty"` + // Properties - a property bag of settings. This value can be empty. + Properties map[string]*string `json:"properties"` +} + +// MarshalJSON is the custom marshaler for WebhookNotification. +func (wn WebhookNotification) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if wn.ServiceURI != nil { + objectMap["serviceUri"] = wn.ServiceURI + } + if wn.Properties != nil { + objectMap["properties"] = wn.Properties + } + return json.Marshal(objectMap) +} + +// WebhookReceiver a webhook receiver. +type WebhookReceiver struct { + // Name - The name of the webhook receiver. Names must be unique across all receivers within an action group. + Name *string `json:"name,omitempty"` + // ServiceURI - The URI where webhooks should be sent. + ServiceURI *string `json:"serviceUri,omitempty"` + // UseCommonAlertSchema - Indicates whether to use common alert schema. + UseCommonAlertSchema *bool `json:"useCommonAlertSchema,omitempty"` + // UseAadAuth - Indicates whether or not use AAD authentication. + UseAadAuth *bool `json:"useAadAuth,omitempty"` + // ObjectID - Indicates the webhook app object Id for aad auth. + ObjectID *string `json:"objectId,omitempty"` + // IdentifierURI - Indicates the identifier uri for aad auth. + IdentifierURI *string `json:"identifierUri,omitempty"` + // TenantID - Indicates the tenant id for aad auth. + TenantID *string `json:"tenantId,omitempty"` +} + +// WorkspaceInfo information about a Log Analytics Workspace. +type WorkspaceInfo struct { + // ID - Azure Resource Manager identifier of the Log Analytics Workspace. + ID *string `json:"id,omitempty"` + // Location - Location of the Log Analytics workspace. + Location *string `json:"location,omitempty"` + // WorkspaceInfoProperties - Resource properties. + *WorkspaceInfoProperties `json:"properties,omitempty"` +} + +// MarshalJSON is the custom marshaler for WorkspaceInfo. +func (wi WorkspaceInfo) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if wi.ID != nil { + objectMap["id"] = wi.ID + } + if wi.Location != nil { + objectMap["location"] = wi.Location + } + if wi.WorkspaceInfoProperties != nil { + objectMap["properties"] = wi.WorkspaceInfoProperties + } + return json.Marshal(objectMap) +} + +// UnmarshalJSON is the custom unmarshaler for WorkspaceInfo struct. +func (wi *WorkspaceInfo) UnmarshalJSON(body []byte) error { + var m map[string]*json.RawMessage + err := json.Unmarshal(body, &m) + if err != nil { + return err + } + for k, v := range m { + switch k { + case "id": + if v != nil { + var ID string + err = json.Unmarshal(*v, &ID) + if err != nil { + return err + } + wi.ID = &ID + } + case "location": + if v != nil { + var location string + err = json.Unmarshal(*v, &location) + if err != nil { + return err + } + wi.Location = &location + } + case "properties": + if v != nil { + var workspaceInfoProperties WorkspaceInfoProperties + err = json.Unmarshal(*v, &workspaceInfoProperties) + if err != nil { + return err + } + wi.WorkspaceInfoProperties = &workspaceInfoProperties + } + } + } + + return nil +} + +// WorkspaceInfoProperties resource properties. +type WorkspaceInfoProperties struct { + // CustomerID - Log Analytics workspace identifier. + CustomerID *string `json:"customerId,omitempty"` +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/operations.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/operations.go new file mode 100644 index 00000000..bd8af5c2 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/operations.go @@ -0,0 +1,109 @@ +package insights + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// OperationsClient is the monitor Management Client +type OperationsClient struct { + BaseClient +} + +// NewOperationsClient creates an instance of the OperationsClient client. +func NewOperationsClient(subscriptionID string) OperationsClient { + return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client. +func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { + return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists all of the available operations from Microsoft.Insights provider. +func (client OperationsClient) List(ctx context.Context) (result OperationListResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.OperationsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.OperationsClient", "List", resp, "Failure sending request") + return + } + + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.OperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { + const APIVersion = "2015-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/microsoft.insights/operations"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/scheduledqueryrules.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/scheduledqueryrules.go new file mode 100644 index 00000000..b4bb9bca --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/scheduledqueryrules.go @@ -0,0 +1,527 @@ +package insights + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// ScheduledQueryRulesClient is the monitor Management Client +type ScheduledQueryRulesClient struct { + BaseClient +} + +// NewScheduledQueryRulesClient creates an instance of the ScheduledQueryRulesClient client. +func NewScheduledQueryRulesClient(subscriptionID string) ScheduledQueryRulesClient { + return NewScheduledQueryRulesClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewScheduledQueryRulesClientWithBaseURI creates an instance of the ScheduledQueryRulesClient client. +func NewScheduledQueryRulesClientWithBaseURI(baseURI string, subscriptionID string) ScheduledQueryRulesClient { + return ScheduledQueryRulesClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate creates or updates an log search rule. +// Parameters: +// resourceGroupName - the name of the resource group. +// ruleName - the name of the rule. +// parameters - the parameters of the rule to create or update. +func (client ScheduledQueryRulesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, ruleName string, parameters LogSearchRuleResource) (result LogSearchRuleResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ScheduledQueryRulesClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.LogSearchRule", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.LogSearchRule.Source", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.LogSearchRule.Source.DataSourceID", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.LogSearchRule.Schedule", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.LogSearchRule.Schedule.FrequencyInMinutes", Name: validation.Null, Rule: true, Chain: nil}, + {Target: "parameters.LogSearchRule.Schedule.TimeWindowInMinutes", Name: validation.Null, Rule: true, Chain: nil}, + }}, + {Target: "parameters.LogSearchRule.Action", Name: validation.Null, Rule: true, Chain: nil}, + }}}}}); err != nil { + return result, validation.NewError("insights.ScheduledQueryRulesClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, ruleName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client ScheduledQueryRulesClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, ruleName string, parameters LogSearchRuleResource) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "ruleName": autorest.Encode("path", ruleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-04-16" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/scheduledQueryRules/{ruleName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client ScheduledQueryRulesClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client ScheduledQueryRulesClient) CreateOrUpdateResponder(resp *http.Response) (result LogSearchRuleResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a Log Search rule +// Parameters: +// resourceGroupName - the name of the resource group. +// ruleName - the name of the rule. +func (client ScheduledQueryRulesClient) Delete(ctx context.Context, resourceGroupName string, ruleName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ScheduledQueryRulesClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, resourceGroupName, ruleName) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client ScheduledQueryRulesClient) DeletePreparer(ctx context.Context, resourceGroupName string, ruleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "ruleName": autorest.Encode("path", ruleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-04-16" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/scheduledQueryRules/{ruleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client ScheduledQueryRulesClient) DeleteSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client ScheduledQueryRulesClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets an Log Search rule +// Parameters: +// resourceGroupName - the name of the resource group. +// ruleName - the name of the rule. +func (client ScheduledQueryRulesClient) Get(ctx context.Context, resourceGroupName string, ruleName string) (result LogSearchRuleResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ScheduledQueryRulesClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceGroupName, ruleName) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ScheduledQueryRulesClient) GetPreparer(ctx context.Context, resourceGroupName string, ruleName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "ruleName": autorest.Encode("path", ruleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-04-16" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/scheduledQueryRules/{ruleName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ScheduledQueryRulesClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ScheduledQueryRulesClient) GetResponder(resp *http.Response) (result LogSearchRuleResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListByResourceGroup list the Log Search rules within a resource group. +// Parameters: +// resourceGroupName - the name of the resource group. +// filter - the filter to apply on the operation. For more information please see +// https://msdn.microsoft.com/en-us/library/azure/dn931934.aspx +func (client ScheduledQueryRulesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string, filter string) (result LogSearchRuleResourceCollection, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ScheduledQueryRulesClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName, filter) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client ScheduledQueryRulesClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-04-16" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/scheduledQueryRules", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client ScheduledQueryRulesClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client ScheduledQueryRulesClient) ListByResourceGroupResponder(resp *http.Response) (result LogSearchRuleResourceCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListBySubscription list the Log Search rules within a subscription group. +// Parameters: +// filter - the filter to apply on the operation. For more information please see +// https://msdn.microsoft.com/en-us/library/azure/dn931934.aspx +func (client ScheduledQueryRulesClient) ListBySubscription(ctx context.Context, filter string) (result LogSearchRuleResourceCollection, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ScheduledQueryRulesClient.ListBySubscription") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.ListBySubscriptionPreparer(ctx, filter) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "ListBySubscription", nil, "Failure preparing request") + return + } + + resp, err := client.ListBySubscriptionSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "ListBySubscription", resp, "Failure sending request") + return + } + + result, err = client.ListBySubscriptionResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "ListBySubscription", resp, "Failure responding to request") + } + + return +} + +// ListBySubscriptionPreparer prepares the ListBySubscription request. +func (client ScheduledQueryRulesClient) ListBySubscriptionPreparer(ctx context.Context, filter string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-04-16" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/microsoft.insights/scheduledQueryRules", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListBySubscriptionSender sends the ListBySubscription request. The method will close the +// http.Response Body if it receives an error. +func (client ScheduledQueryRulesClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always +// closes the http.Response Body. +func (client ScheduledQueryRulesClient) ListBySubscriptionResponder(resp *http.Response) (result LogSearchRuleResourceCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Update update log search Rule. +// Parameters: +// resourceGroupName - the name of the resource group. +// ruleName - the name of the rule. +// parameters - the parameters of the rule to update. +func (client ScheduledQueryRulesClient) Update(ctx context.Context, resourceGroupName string, ruleName string, parameters LogSearchRuleResourcePatch) (result LogSearchRuleResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ScheduledQueryRulesClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdatePreparer(ctx, resourceGroupName, ruleName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client ScheduledQueryRulesClient) UpdatePreparer(ctx context.Context, resourceGroupName string, ruleName string, parameters LogSearchRuleResourcePatch) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "ruleName": autorest.Encode("path", ruleName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2018-04-16" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/microsoft.insights/scheduledQueryRules/{ruleName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client ScheduledQueryRulesClient) UpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client ScheduledQueryRulesClient) UpdateResponder(resp *http.Response) (result LogSearchRuleResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/tenantactivitylogs.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/tenantactivitylogs.go new file mode 100644 index 00000000..23739ed5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/tenantactivitylogs.go @@ -0,0 +1,174 @@ +package insights + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// TenantActivityLogsClient is the monitor Management Client +type TenantActivityLogsClient struct { + BaseClient +} + +// NewTenantActivityLogsClient creates an instance of the TenantActivityLogsClient client. +func NewTenantActivityLogsClient(subscriptionID string) TenantActivityLogsClient { + return NewTenantActivityLogsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewTenantActivityLogsClientWithBaseURI creates an instance of the TenantActivityLogsClient client. +func NewTenantActivityLogsClientWithBaseURI(baseURI string, subscriptionID string) TenantActivityLogsClient { + return TenantActivityLogsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List gets the Activity Logs for the Tenant.
Everything that is applicable to the API to get the Activity Logs for +// the subscription is applicable to this API (the parameters, $filter, etc.).
One thing to point out here is that +// this API does *not* retrieve the logs at the individual subscription of the tenant but only surfaces the logs that +// were generated at the tenant level. +// Parameters: +// filter - reduces the set of data collected.
The **$filter** is very restricted and allows only the +// following patterns.
- List events for a resource group: $filter=eventTimestamp ge '' and +// eventTimestamp le '' and eventChannels eq 'Admin, Operation' and resourceGroupName eq +// ''.
- List events for resource: $filter=eventTimestamp ge '' and +// eventTimestamp le '' and eventChannels eq 'Admin, Operation' and resourceUri eq +// ''.
- List events for a subscription: $filter=eventTimestamp ge '' and +// eventTimestamp le '' and eventChannels eq 'Admin, Operation'.
- List events for a resource +// provider: $filter=eventTimestamp ge '' and eventTimestamp le '' and eventChannels eq +// 'Admin, Operation' and resourceProvider eq ''.
- List events for a correlation Id: +// api-version=2014-04-01&$filter=eventTimestamp ge '2014-07-16T04:36:37.6407898Z' and eventTimestamp le +// '2014-07-20T04:36:37.6407898Z' and eventChannels eq 'Admin, Operation' and correlationId eq +// ''.
**NOTE**: No other syntax is allowed. +// selectParameter - used to fetch events with only the given properties.
The **$select** argument is a +// comma separated list of property names to be returned. Possible values are: *authorization*, *claims*, +// *correlationId*, *description*, *eventDataId*, *eventName*, *eventTimestamp*, *httpRequest*, *level*, +// *operationId*, *operationName*, *properties*, *resourceGroupName*, *resourceProviderName*, *resourceId*, +// *status*, *submissionTimestamp*, *subStatus*, *subscriptionId* +func (client TenantActivityLogsClient) List(ctx context.Context, filter string, selectParameter string) (result EventDataCollectionPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TenantActivityLogsClient.List") + defer func() { + sc := -1 + if result.edc.Response.Response != nil { + sc = result.edc.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, filter, selectParameter) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.TenantActivityLogsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.edc.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.TenantActivityLogsClient", "List", resp, "Failure sending request") + return + } + + result.edc, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.TenantActivityLogsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client TenantActivityLogsClient) ListPreparer(ctx context.Context, filter string, selectParameter string) (*http.Request, error) { + const APIVersion = "2015-04-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if len(selectParameter) > 0 { + queryParameters["$select"] = autorest.Encode("query", selectParameter) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/microsoft.insights/eventtypes/management/values"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client TenantActivityLogsClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client TenantActivityLogsClient) ListResponder(resp *http.Response) (result EventDataCollection, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client TenantActivityLogsClient) listNextResults(ctx context.Context, lastResults EventDataCollection) (result EventDataCollection, err error) { + req, err := lastResults.eventDataCollectionPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "insights.TenantActivityLogsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "insights.TenantActivityLogsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.TenantActivityLogsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client TenantActivityLogsClient) ListComplete(ctx context.Context, filter string, selectParameter string) (result EventDataCollectionIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TenantActivityLogsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, filter, selectParameter) + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/version.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/version.go new file mode 100644 index 00000000..3d7944fc --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/version.go @@ -0,0 +1,30 @@ +package insights + +import "github.com/Azure/azure-sdk-for-go/version" + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/" + version.Number + " insights/2019-06-01" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return version.Number +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/vminsights.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/vminsights.go new file mode 100644 index 00000000..76178c59 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights/vminsights.go @@ -0,0 +1,116 @@ +package insights + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// VMInsightsClient is the monitor Management Client +type VMInsightsClient struct { + BaseClient +} + +// NewVMInsightsClient creates an instance of the VMInsightsClient client. +func NewVMInsightsClient(subscriptionID string) VMInsightsClient { + return NewVMInsightsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewVMInsightsClientWithBaseURI creates an instance of the VMInsightsClient client. +func NewVMInsightsClientWithBaseURI(baseURI string, subscriptionID string) VMInsightsClient { + return VMInsightsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// GetOnboardingStatus retrieves the VM Insights onboarding status for the specified resource or resource scope. +// Parameters: +// resourceURI - the fully qualified Azure Resource manager identifier of the resource, or scope, whose status +// to retrieve. +func (client VMInsightsClient) GetOnboardingStatus(ctx context.Context, resourceURI string) (result VMInsightsOnboardingStatus, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/VMInsightsClient.GetOnboardingStatus") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetOnboardingStatusPreparer(ctx, resourceURI) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.VMInsightsClient", "GetOnboardingStatus", nil, "Failure preparing request") + return + } + + resp, err := client.GetOnboardingStatusSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "insights.VMInsightsClient", "GetOnboardingStatus", resp, "Failure sending request") + return + } + + result, err = client.GetOnboardingStatusResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "insights.VMInsightsClient", "GetOnboardingStatus", resp, "Failure responding to request") + } + + return +} + +// GetOnboardingStatusPreparer prepares the GetOnboardingStatus request. +func (client VMInsightsClient) GetOnboardingStatusPreparer(ctx context.Context, resourceURI string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceUri": resourceURI, + } + + const APIVersion = "2018-11-27-preview" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{resourceUri}/providers/Microsoft.Insights/vmInsightsOnboardingStatuses/default", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetOnboardingStatusSender sends the GetOnboardingStatus request. The method will close the +// http.Response Body if it receives an error. +func (client VMInsightsClient) GetOnboardingStatusSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetOnboardingStatusResponder handles the response to the GetOnboardingStatus request. The method always +// closes the http.Response Body. +func (client VMInsightsClient) GetOnboardingStatusResponder(resp *http.Response) (result VMInsightsOnboardingStatus, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/client.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/client.go new file mode 100644 index 00000000..2b25d219 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/client.go @@ -0,0 +1,51 @@ +// Package resources implements the Azure ARM Resources service API version 2019-03-01. +// +// Provides operations for working with resources and resource groups. +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "github.com/Azure/go-autorest/autorest" +) + +const ( + // DefaultBaseURI is the default URI used for the service Resources + DefaultBaseURI = "https://management.azure.com" +) + +// BaseClient is the base client for Resources. +type BaseClient struct { + autorest.Client + BaseURI string + SubscriptionID string +} + +// New creates an instance of the BaseClient client. +func New(subscriptionID string) BaseClient { + return NewWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewWithBaseURI creates an instance of the BaseClient client. +func NewWithBaseURI(baseURI string, subscriptionID string) BaseClient { + return BaseClient{ + Client: autorest.NewClientWithUserAgent(UserAgent()), + BaseURI: baseURI, + SubscriptionID: subscriptionID, + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/deploymentoperations.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/deploymentoperations.go new file mode 100644 index 00000000..7b0413e9 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/deploymentoperations.go @@ -0,0 +1,474 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// DeploymentOperationsClient is the provides operations for working with resources and resource groups. +type DeploymentOperationsClient struct { + BaseClient +} + +// NewDeploymentOperationsClient creates an instance of the DeploymentOperationsClient client. +func NewDeploymentOperationsClient(subscriptionID string) DeploymentOperationsClient { + return NewDeploymentOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDeploymentOperationsClientWithBaseURI creates an instance of the DeploymentOperationsClient client. +func NewDeploymentOperationsClientWithBaseURI(baseURI string, subscriptionID string) DeploymentOperationsClient { + return DeploymentOperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets a deployments operation. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// deploymentName - the name of the deployment. +// operationID - the ID of the operation to get. +func (client DeploymentOperationsClient) Get(ctx context.Context, resourceGroupName string, deploymentName string, operationID string) (result DeploymentOperation, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentOperationsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}, + {TargetValue: deploymentName, + Constraints: []validation.Constraint{{Target: "deploymentName", Name: validation.MaxLength, Rule: 64, Chain: nil}, + {Target: "deploymentName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "deploymentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.DeploymentOperationsClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, deploymentName, operationID) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client DeploymentOperationsClient) GetPreparer(ctx context.Context, resourceGroupName string, deploymentName string, operationID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "operationId": autorest.Encode("path", operationID), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations/{operationId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentOperationsClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client DeploymentOperationsClient) GetResponder(resp *http.Response) (result DeploymentOperation, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetAtSubscriptionScope gets a deployments operation. +// Parameters: +// deploymentName - the name of the deployment. +// operationID - the ID of the operation to get. +func (client DeploymentOperationsClient) GetAtSubscriptionScope(ctx context.Context, deploymentName string, operationID string) (result DeploymentOperation, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentOperationsClient.GetAtSubscriptionScope") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: deploymentName, + Constraints: []validation.Constraint{{Target: "deploymentName", Name: validation.MaxLength, Rule: 64, Chain: nil}, + {Target: "deploymentName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "deploymentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.DeploymentOperationsClient", "GetAtSubscriptionScope", err.Error()) + } + + req, err := client.GetAtSubscriptionScopePreparer(ctx, deploymentName, operationID) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "GetAtSubscriptionScope", nil, "Failure preparing request") + return + } + + resp, err := client.GetAtSubscriptionScopeSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "GetAtSubscriptionScope", resp, "Failure sending request") + return + } + + result, err = client.GetAtSubscriptionScopeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "GetAtSubscriptionScope", resp, "Failure responding to request") + } + + return +} + +// GetAtSubscriptionScopePreparer prepares the GetAtSubscriptionScope request. +func (client DeploymentOperationsClient) GetAtSubscriptionScopePreparer(ctx context.Context, deploymentName string, operationID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "operationId": autorest.Encode("path", operationID), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations/{operationId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetAtSubscriptionScopeSender sends the GetAtSubscriptionScope request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentOperationsClient) GetAtSubscriptionScopeSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetAtSubscriptionScopeResponder handles the response to the GetAtSubscriptionScope request. The method always +// closes the http.Response Body. +func (client DeploymentOperationsClient) GetAtSubscriptionScopeResponder(resp *http.Response) (result DeploymentOperation, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all deployments operations for a deployment. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// deploymentName - the name of the deployment with the operation to get. +// top - the number of results to return. +func (client DeploymentOperationsClient) List(ctx context.Context, resourceGroupName string, deploymentName string, top *int32) (result DeploymentOperationsListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentOperationsClient.List") + defer func() { + sc := -1 + if result.dolr.Response.Response != nil { + sc = result.dolr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}, + {TargetValue: deploymentName, + Constraints: []validation.Constraint{{Target: "deploymentName", Name: validation.MaxLength, Rule: 64, Chain: nil}, + {Target: "deploymentName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "deploymentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.DeploymentOperationsClient", "List", err.Error()) + } + + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, resourceGroupName, deploymentName, top) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.dolr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "List", resp, "Failure sending request") + return + } + + result.dolr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client DeploymentOperationsClient) ListPreparer(ctx context.Context, resourceGroupName string, deploymentName string, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/deployments/{deploymentName}/operations", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentOperationsClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client DeploymentOperationsClient) ListResponder(resp *http.Response) (result DeploymentOperationsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client DeploymentOperationsClient) listNextResults(ctx context.Context, lastResults DeploymentOperationsListResult) (result DeploymentOperationsListResult, err error) { + req, err := lastResults.deploymentOperationsListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client DeploymentOperationsClient) ListComplete(ctx context.Context, resourceGroupName string, deploymentName string, top *int32) (result DeploymentOperationsListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentOperationsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, resourceGroupName, deploymentName, top) + return +} + +// ListAtSubscriptionScope gets all deployments operations for a deployment. +// Parameters: +// deploymentName - the name of the deployment with the operation to get. +// top - the number of results to return. +func (client DeploymentOperationsClient) ListAtSubscriptionScope(ctx context.Context, deploymentName string, top *int32) (result DeploymentOperationsListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentOperationsClient.ListAtSubscriptionScope") + defer func() { + sc := -1 + if result.dolr.Response.Response != nil { + sc = result.dolr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: deploymentName, + Constraints: []validation.Constraint{{Target: "deploymentName", Name: validation.MaxLength, Rule: 64, Chain: nil}, + {Target: "deploymentName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "deploymentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.DeploymentOperationsClient", "ListAtSubscriptionScope", err.Error()) + } + + result.fn = client.listAtSubscriptionScopeNextResults + req, err := client.ListAtSubscriptionScopePreparer(ctx, deploymentName, top) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "ListAtSubscriptionScope", nil, "Failure preparing request") + return + } + + resp, err := client.ListAtSubscriptionScopeSender(req) + if err != nil { + result.dolr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "ListAtSubscriptionScope", resp, "Failure sending request") + return + } + + result.dolr, err = client.ListAtSubscriptionScopeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "ListAtSubscriptionScope", resp, "Failure responding to request") + } + + return +} + +// ListAtSubscriptionScopePreparer prepares the ListAtSubscriptionScope request. +func (client DeploymentOperationsClient) ListAtSubscriptionScopePreparer(ctx context.Context, deploymentName string, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/operations", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListAtSubscriptionScopeSender sends the ListAtSubscriptionScope request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentOperationsClient) ListAtSubscriptionScopeSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListAtSubscriptionScopeResponder handles the response to the ListAtSubscriptionScope request. The method always +// closes the http.Response Body. +func (client DeploymentOperationsClient) ListAtSubscriptionScopeResponder(resp *http.Response) (result DeploymentOperationsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listAtSubscriptionScopeNextResults retrieves the next set of results, if any. +func (client DeploymentOperationsClient) listAtSubscriptionScopeNextResults(ctx context.Context, lastResults DeploymentOperationsListResult) (result DeploymentOperationsListResult, err error) { + req, err := lastResults.deploymentOperationsListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "listAtSubscriptionScopeNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListAtSubscriptionScopeSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "listAtSubscriptionScopeNextResults", resp, "Failure sending next results request") + } + result, err = client.ListAtSubscriptionScopeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentOperationsClient", "listAtSubscriptionScopeNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListAtSubscriptionScopeComplete enumerates all values, automatically crossing page boundaries as required. +func (client DeploymentOperationsClient) ListAtSubscriptionScopeComplete(ctx context.Context, deploymentName string, top *int32) (result DeploymentOperationsListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentOperationsClient.ListAtSubscriptionScope") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListAtSubscriptionScope(ctx, deploymentName, top) + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/deployments.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/deployments.go new file mode 100644 index 00000000..47edc33e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/deployments.go @@ -0,0 +1,1553 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// DeploymentsClient is the provides operations for working with resources and resource groups. +type DeploymentsClient struct { + BaseClient +} + +// NewDeploymentsClient creates an instance of the DeploymentsClient client. +func NewDeploymentsClient(subscriptionID string) DeploymentsClient { + return NewDeploymentsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewDeploymentsClientWithBaseURI creates an instance of the DeploymentsClient client. +func NewDeploymentsClientWithBaseURI(baseURI string, subscriptionID string) DeploymentsClient { + return DeploymentsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Cancel you can cancel a deployment only if the provisioningState is Accepted or Running. After the deployment is +// canceled, the provisioningState is set to Canceled. Canceling a template deployment stops the currently running +// template deployment and leaves the resource group partially deployed. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// deploymentName - the name of the deployment to cancel. +func (client DeploymentsClient) Cancel(ctx context.Context, resourceGroupName string, deploymentName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.Cancel") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}, + {TargetValue: deploymentName, + Constraints: []validation.Constraint{{Target: "deploymentName", Name: validation.MaxLength, Rule: 64, Chain: nil}, + {Target: "deploymentName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "deploymentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.DeploymentsClient", "Cancel", err.Error()) + } + + req, err := client.CancelPreparer(ctx, resourceGroupName, deploymentName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Cancel", nil, "Failure preparing request") + return + } + + resp, err := client.CancelSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Cancel", resp, "Failure sending request") + return + } + + result, err = client.CancelResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Cancel", resp, "Failure responding to request") + } + + return +} + +// CancelPreparer prepares the Cancel request. +func (client DeploymentsClient) CancelPreparer(ctx context.Context, resourceGroupName string, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CancelSender sends the Cancel request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) CancelSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// CancelResponder handles the response to the Cancel request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) CancelResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// CancelAtSubscriptionScope you can cancel a deployment only if the provisioningState is Accepted or Running. After +// the deployment is canceled, the provisioningState is set to Canceled. Canceling a template deployment stops the +// currently running template deployment and leaves the resources partially deployed. +// Parameters: +// deploymentName - the name of the deployment to cancel. +func (client DeploymentsClient) CancelAtSubscriptionScope(ctx context.Context, deploymentName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.CancelAtSubscriptionScope") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: deploymentName, + Constraints: []validation.Constraint{{Target: "deploymentName", Name: validation.MaxLength, Rule: 64, Chain: nil}, + {Target: "deploymentName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "deploymentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.DeploymentsClient", "CancelAtSubscriptionScope", err.Error()) + } + + req, err := client.CancelAtSubscriptionScopePreparer(ctx, deploymentName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CancelAtSubscriptionScope", nil, "Failure preparing request") + return + } + + resp, err := client.CancelAtSubscriptionScopeSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CancelAtSubscriptionScope", resp, "Failure sending request") + return + } + + result, err = client.CancelAtSubscriptionScopeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CancelAtSubscriptionScope", resp, "Failure responding to request") + } + + return +} + +// CancelAtSubscriptionScopePreparer prepares the CancelAtSubscriptionScope request. +func (client DeploymentsClient) CancelAtSubscriptionScopePreparer(ctx context.Context, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/cancel", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CancelAtSubscriptionScopeSender sends the CancelAtSubscriptionScope request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) CancelAtSubscriptionScopeSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// CancelAtSubscriptionScopeResponder handles the response to the CancelAtSubscriptionScope request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) CancelAtSubscriptionScopeResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// CheckExistence checks whether the deployment exists. +// Parameters: +// resourceGroupName - the name of the resource group with the deployment to check. The name is case +// insensitive. +// deploymentName - the name of the deployment to check. +func (client DeploymentsClient) CheckExistence(ctx context.Context, resourceGroupName string, deploymentName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.CheckExistence") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}, + {TargetValue: deploymentName, + Constraints: []validation.Constraint{{Target: "deploymentName", Name: validation.MaxLength, Rule: 64, Chain: nil}, + {Target: "deploymentName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "deploymentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.DeploymentsClient", "CheckExistence", err.Error()) + } + + req, err := client.CheckExistencePreparer(ctx, resourceGroupName, deploymentName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CheckExistence", nil, "Failure preparing request") + return + } + + resp, err := client.CheckExistenceSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CheckExistence", resp, "Failure sending request") + return + } + + result, err = client.CheckExistenceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CheckExistence", resp, "Failure responding to request") + } + + return +} + +// CheckExistencePreparer prepares the CheckExistence request. +func (client DeploymentsClient) CheckExistencePreparer(ctx context.Context, resourceGroupName string, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsHead(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CheckExistenceSender sends the CheckExistence request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) CheckExistenceSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// CheckExistenceResponder handles the response to the CheckExistence request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) CheckExistenceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusNotFound), + autorest.ByClosing()) + result.Response = resp + return +} + +// CheckExistenceAtSubscriptionScope checks whether the deployment exists. +// Parameters: +// deploymentName - the name of the deployment to check. +func (client DeploymentsClient) CheckExistenceAtSubscriptionScope(ctx context.Context, deploymentName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.CheckExistenceAtSubscriptionScope") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: deploymentName, + Constraints: []validation.Constraint{{Target: "deploymentName", Name: validation.MaxLength, Rule: 64, Chain: nil}, + {Target: "deploymentName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "deploymentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.DeploymentsClient", "CheckExistenceAtSubscriptionScope", err.Error()) + } + + req, err := client.CheckExistenceAtSubscriptionScopePreparer(ctx, deploymentName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CheckExistenceAtSubscriptionScope", nil, "Failure preparing request") + return + } + + resp, err := client.CheckExistenceAtSubscriptionScopeSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CheckExistenceAtSubscriptionScope", resp, "Failure sending request") + return + } + + result, err = client.CheckExistenceAtSubscriptionScopeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CheckExistenceAtSubscriptionScope", resp, "Failure responding to request") + } + + return +} + +// CheckExistenceAtSubscriptionScopePreparer prepares the CheckExistenceAtSubscriptionScope request. +func (client DeploymentsClient) CheckExistenceAtSubscriptionScopePreparer(ctx context.Context, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsHead(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CheckExistenceAtSubscriptionScopeSender sends the CheckExistenceAtSubscriptionScope request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) CheckExistenceAtSubscriptionScopeSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// CheckExistenceAtSubscriptionScopeResponder handles the response to the CheckExistenceAtSubscriptionScope request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) CheckExistenceAtSubscriptionScopeResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusNotFound), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdate you can provide the template and parameters directly in the request or link to JSON files. +// Parameters: +// resourceGroupName - the name of the resource group to deploy the resources to. The name is case insensitive. +// The resource group must already exist. +// deploymentName - the name of the deployment. +// parameters - additional parameters supplied to the operation. +func (client DeploymentsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, deploymentName string, parameters Deployment) (result DeploymentsCreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}, + {TargetValue: deploymentName, + Constraints: []validation.Constraint{{Target: "deploymentName", Name: validation.MaxLength, Rule: 64, Chain: nil}, + {Target: "deploymentName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "deploymentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Properties", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.Properties.TemplateLink", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.TemplateLink.URI", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.Properties.ParametersLink", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.ParametersLink.URI", Name: validation.Null, Rule: true, Chain: nil}}}, + }}}}}); err != nil { + return result, validation.NewError("resources.DeploymentsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, deploymentName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client DeploymentsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, deploymentName string, parameters Deployment) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) CreateOrUpdateSender(req *http.Request) (future DeploymentsCreateOrUpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) CreateOrUpdateResponder(resp *http.Response) (result DeploymentExtended, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdateAtSubscriptionScope you can provide the template and parameters directly in the request or link to +// JSON files. +// Parameters: +// deploymentName - the name of the deployment. +// parameters - additional parameters supplied to the operation. +func (client DeploymentsClient) CreateOrUpdateAtSubscriptionScope(ctx context.Context, deploymentName string, parameters Deployment) (result DeploymentsCreateOrUpdateAtSubscriptionScopeFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.CreateOrUpdateAtSubscriptionScope") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: deploymentName, + Constraints: []validation.Constraint{{Target: "deploymentName", Name: validation.MaxLength, Rule: 64, Chain: nil}, + {Target: "deploymentName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "deploymentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Properties", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.Properties.TemplateLink", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.TemplateLink.URI", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.Properties.ParametersLink", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.ParametersLink.URI", Name: validation.Null, Rule: true, Chain: nil}}}, + }}}}}); err != nil { + return result, validation.NewError("resources.DeploymentsClient", "CreateOrUpdateAtSubscriptionScope", err.Error()) + } + + req, err := client.CreateOrUpdateAtSubscriptionScopePreparer(ctx, deploymentName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CreateOrUpdateAtSubscriptionScope", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateAtSubscriptionScopeSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "CreateOrUpdateAtSubscriptionScope", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdateAtSubscriptionScopePreparer prepares the CreateOrUpdateAtSubscriptionScope request. +func (client DeploymentsClient) CreateOrUpdateAtSubscriptionScopePreparer(ctx context.Context, deploymentName string, parameters Deployment) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateAtSubscriptionScopeSender sends the CreateOrUpdateAtSubscriptionScope request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) CreateOrUpdateAtSubscriptionScopeSender(req *http.Request) (future DeploymentsCreateOrUpdateAtSubscriptionScopeFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateAtSubscriptionScopeResponder handles the response to the CreateOrUpdateAtSubscriptionScope request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) CreateOrUpdateAtSubscriptionScopeResponder(resp *http.Response) (result DeploymentExtended, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete a template deployment that is currently running cannot be deleted. Deleting a template deployment removes the +// associated deployment operations. Deleting a template deployment does not affect the state of the resource group. +// This is an asynchronous operation that returns a status of 202 until the template deployment is successfully +// deleted. The Location response header contains the URI that is used to obtain the status of the process. While the +// process is running, a call to the URI in the Location header returns a status of 202. When the process finishes, the +// URI in the Location header returns a status of 204 on success. If the asynchronous request failed, the URI in the +// Location header returns an error-level status code. +// Parameters: +// resourceGroupName - the name of the resource group with the deployment to delete. The name is case +// insensitive. +// deploymentName - the name of the deployment to delete. +func (client DeploymentsClient) Delete(ctx context.Context, resourceGroupName string, deploymentName string) (result DeploymentsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}, + {TargetValue: deploymentName, + Constraints: []validation.Constraint{{Target: "deploymentName", Name: validation.MaxLength, Rule: 64, Chain: nil}, + {Target: "deploymentName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "deploymentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.DeploymentsClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, deploymentName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client DeploymentsClient) DeletePreparer(ctx context.Context, resourceGroupName string, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) DeleteSender(req *http.Request) (future DeploymentsDeleteFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteAtSubscriptionScope a template deployment that is currently running cannot be deleted. Deleting a template +// deployment removes the associated deployment operations. This is an asynchronous operation that returns a status of +// 202 until the template deployment is successfully deleted. The Location response header contains the URI that is +// used to obtain the status of the process. While the process is running, a call to the URI in the Location header +// returns a status of 202. When the process finishes, the URI in the Location header returns a status of 204 on +// success. If the asynchronous request failed, the URI in the Location header returns an error-level status code. +// Parameters: +// deploymentName - the name of the deployment to delete. +func (client DeploymentsClient) DeleteAtSubscriptionScope(ctx context.Context, deploymentName string) (result DeploymentsDeleteAtSubscriptionScopeFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.DeleteAtSubscriptionScope") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: deploymentName, + Constraints: []validation.Constraint{{Target: "deploymentName", Name: validation.MaxLength, Rule: 64, Chain: nil}, + {Target: "deploymentName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "deploymentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.DeploymentsClient", "DeleteAtSubscriptionScope", err.Error()) + } + + req, err := client.DeleteAtSubscriptionScopePreparer(ctx, deploymentName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "DeleteAtSubscriptionScope", nil, "Failure preparing request") + return + } + + result, err = client.DeleteAtSubscriptionScopeSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "DeleteAtSubscriptionScope", result.Response(), "Failure sending request") + return + } + + return +} + +// DeleteAtSubscriptionScopePreparer prepares the DeleteAtSubscriptionScope request. +func (client DeploymentsClient) DeleteAtSubscriptionScopePreparer(ctx context.Context, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteAtSubscriptionScopeSender sends the DeleteAtSubscriptionScope request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) DeleteAtSubscriptionScopeSender(req *http.Request) (future DeploymentsDeleteAtSubscriptionScopeFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteAtSubscriptionScopeResponder handles the response to the DeleteAtSubscriptionScope request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) DeleteAtSubscriptionScopeResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// ExportTemplate exports the template used for specified deployment. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// deploymentName - the name of the deployment from which to get the template. +func (client DeploymentsClient) ExportTemplate(ctx context.Context, resourceGroupName string, deploymentName string) (result DeploymentExportResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.ExportTemplate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}, + {TargetValue: deploymentName, + Constraints: []validation.Constraint{{Target: "deploymentName", Name: validation.MaxLength, Rule: 64, Chain: nil}, + {Target: "deploymentName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "deploymentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.DeploymentsClient", "ExportTemplate", err.Error()) + } + + req, err := client.ExportTemplatePreparer(ctx, resourceGroupName, deploymentName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "ExportTemplate", nil, "Failure preparing request") + return + } + + resp, err := client.ExportTemplateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "ExportTemplate", resp, "Failure sending request") + return + } + + result, err = client.ExportTemplateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "ExportTemplate", resp, "Failure responding to request") + } + + return +} + +// ExportTemplatePreparer prepares the ExportTemplate request. +func (client DeploymentsClient) ExportTemplatePreparer(ctx context.Context, resourceGroupName string, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ExportTemplateSender sends the ExportTemplate request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) ExportTemplateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ExportTemplateResponder handles the response to the ExportTemplate request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) ExportTemplateResponder(resp *http.Response) (result DeploymentExportResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ExportTemplateAtSubscriptionScope exports the template used for specified deployment. +// Parameters: +// deploymentName - the name of the deployment from which to get the template. +func (client DeploymentsClient) ExportTemplateAtSubscriptionScope(ctx context.Context, deploymentName string) (result DeploymentExportResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.ExportTemplateAtSubscriptionScope") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: deploymentName, + Constraints: []validation.Constraint{{Target: "deploymentName", Name: validation.MaxLength, Rule: 64, Chain: nil}, + {Target: "deploymentName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "deploymentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.DeploymentsClient", "ExportTemplateAtSubscriptionScope", err.Error()) + } + + req, err := client.ExportTemplateAtSubscriptionScopePreparer(ctx, deploymentName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "ExportTemplateAtSubscriptionScope", nil, "Failure preparing request") + return + } + + resp, err := client.ExportTemplateAtSubscriptionScopeSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "ExportTemplateAtSubscriptionScope", resp, "Failure sending request") + return + } + + result, err = client.ExportTemplateAtSubscriptionScopeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "ExportTemplateAtSubscriptionScope", resp, "Failure responding to request") + } + + return +} + +// ExportTemplateAtSubscriptionScopePreparer prepares the ExportTemplateAtSubscriptionScope request. +func (client DeploymentsClient) ExportTemplateAtSubscriptionScopePreparer(ctx context.Context, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/exportTemplate", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ExportTemplateAtSubscriptionScopeSender sends the ExportTemplateAtSubscriptionScope request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) ExportTemplateAtSubscriptionScopeSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ExportTemplateAtSubscriptionScopeResponder handles the response to the ExportTemplateAtSubscriptionScope request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) ExportTemplateAtSubscriptionScopeResponder(resp *http.Response) (result DeploymentExportResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get gets a deployment. +// Parameters: +// resourceGroupName - the name of the resource group. The name is case insensitive. +// deploymentName - the name of the deployment to get. +func (client DeploymentsClient) Get(ctx context.Context, resourceGroupName string, deploymentName string) (result DeploymentExtended, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}, + {TargetValue: deploymentName, + Constraints: []validation.Constraint{{Target: "deploymentName", Name: validation.MaxLength, Rule: 64, Chain: nil}, + {Target: "deploymentName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "deploymentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.DeploymentsClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, deploymentName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client DeploymentsClient) GetPreparer(ctx context.Context, resourceGroupName string, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) GetResponder(resp *http.Response) (result DeploymentExtended, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetAtSubscriptionScope gets a deployment. +// Parameters: +// deploymentName - the name of the deployment to get. +func (client DeploymentsClient) GetAtSubscriptionScope(ctx context.Context, deploymentName string) (result DeploymentExtended, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.GetAtSubscriptionScope") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: deploymentName, + Constraints: []validation.Constraint{{Target: "deploymentName", Name: validation.MaxLength, Rule: 64, Chain: nil}, + {Target: "deploymentName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "deploymentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.DeploymentsClient", "GetAtSubscriptionScope", err.Error()) + } + + req, err := client.GetAtSubscriptionScopePreparer(ctx, deploymentName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "GetAtSubscriptionScope", nil, "Failure preparing request") + return + } + + resp, err := client.GetAtSubscriptionScopeSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "GetAtSubscriptionScope", resp, "Failure sending request") + return + } + + result, err = client.GetAtSubscriptionScopeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "GetAtSubscriptionScope", resp, "Failure responding to request") + } + + return +} + +// GetAtSubscriptionScopePreparer prepares the GetAtSubscriptionScope request. +func (client DeploymentsClient) GetAtSubscriptionScopePreparer(ctx context.Context, deploymentName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetAtSubscriptionScopeSender sends the GetAtSubscriptionScope request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) GetAtSubscriptionScopeSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetAtSubscriptionScopeResponder handles the response to the GetAtSubscriptionScope request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) GetAtSubscriptionScopeResponder(resp *http.Response) (result DeploymentExtended, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ListAtSubscriptionScope get all the deployments for a subscription. +// Parameters: +// filter - the filter to apply on the operation. For example, you can use $filter=provisioningState eq +// '{state}'. +// top - the number of results to get. If null is passed, returns all deployments. +func (client DeploymentsClient) ListAtSubscriptionScope(ctx context.Context, filter string, top *int32) (result DeploymentListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.ListAtSubscriptionScope") + defer func() { + sc := -1 + if result.dlr.Response.Response != nil { + sc = result.dlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listAtSubscriptionScopeNextResults + req, err := client.ListAtSubscriptionScopePreparer(ctx, filter, top) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "ListAtSubscriptionScope", nil, "Failure preparing request") + return + } + + resp, err := client.ListAtSubscriptionScopeSender(req) + if err != nil { + result.dlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "ListAtSubscriptionScope", resp, "Failure sending request") + return + } + + result.dlr, err = client.ListAtSubscriptionScopeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "ListAtSubscriptionScope", resp, "Failure responding to request") + } + + return +} + +// ListAtSubscriptionScopePreparer prepares the ListAtSubscriptionScope request. +func (client DeploymentsClient) ListAtSubscriptionScopePreparer(ctx context.Context, filter string, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListAtSubscriptionScopeSender sends the ListAtSubscriptionScope request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) ListAtSubscriptionScopeSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListAtSubscriptionScopeResponder handles the response to the ListAtSubscriptionScope request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) ListAtSubscriptionScopeResponder(resp *http.Response) (result DeploymentListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listAtSubscriptionScopeNextResults retrieves the next set of results, if any. +func (client DeploymentsClient) listAtSubscriptionScopeNextResults(ctx context.Context, lastResults DeploymentListResult) (result DeploymentListResult, err error) { + req, err := lastResults.deploymentListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.DeploymentsClient", "listAtSubscriptionScopeNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListAtSubscriptionScopeSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.DeploymentsClient", "listAtSubscriptionScopeNextResults", resp, "Failure sending next results request") + } + result, err = client.ListAtSubscriptionScopeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "listAtSubscriptionScopeNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListAtSubscriptionScopeComplete enumerates all values, automatically crossing page boundaries as required. +func (client DeploymentsClient) ListAtSubscriptionScopeComplete(ctx context.Context, filter string, top *int32) (result DeploymentListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.ListAtSubscriptionScope") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListAtSubscriptionScope(ctx, filter, top) + return +} + +// ListByResourceGroup get all the deployments for a resource group. +// Parameters: +// resourceGroupName - the name of the resource group with the deployments to get. The name is case +// insensitive. +// filter - the filter to apply on the operation. For example, you can use $filter=provisioningState eq +// '{state}'. +// top - the number of results to get. If null is passed, returns all deployments. +func (client DeploymentsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string, filter string, top *int32) (result DeploymentListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.dlr.Response.Response != nil { + sc = result.dlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.DeploymentsClient", "ListByResourceGroup", err.Error()) + } + + result.fn = client.listByResourceGroupNextResults + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName, filter, top) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.dlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result.dlr, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client DeploymentsClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string, filter string, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) ListByResourceGroupResponder(resp *http.Response) (result DeploymentListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByResourceGroupNextResults retrieves the next set of results, if any. +func (client DeploymentsClient) listByResourceGroupNextResults(ctx context.Context, lastResults DeploymentListResult) (result DeploymentListResult, err error) { + req, err := lastResults.deploymentListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.DeploymentsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.DeploymentsClient", "listByResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "listByResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client DeploymentsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string, filter string, top *int32) (result DeploymentListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.ListByResourceGroup") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByResourceGroup(ctx, resourceGroupName, filter, top) + return +} + +// Validate validates whether the specified template is syntactically correct and will be accepted by Azure Resource +// Manager.. +// Parameters: +// resourceGroupName - the name of the resource group the template will be deployed to. The name is case +// insensitive. +// deploymentName - the name of the deployment. +// parameters - parameters to validate. +func (client DeploymentsClient) Validate(ctx context.Context, resourceGroupName string, deploymentName string, parameters Deployment) (result DeploymentValidateResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.Validate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}, + {TargetValue: deploymentName, + Constraints: []validation.Constraint{{Target: "deploymentName", Name: validation.MaxLength, Rule: 64, Chain: nil}, + {Target: "deploymentName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "deploymentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Properties", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.Properties.TemplateLink", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.TemplateLink.URI", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.Properties.ParametersLink", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.ParametersLink.URI", Name: validation.Null, Rule: true, Chain: nil}}}, + }}}}}); err != nil { + return result, validation.NewError("resources.DeploymentsClient", "Validate", err.Error()) + } + + req, err := client.ValidatePreparer(ctx, resourceGroupName, deploymentName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Validate", nil, "Failure preparing request") + return + } + + resp, err := client.ValidateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Validate", resp, "Failure sending request") + return + } + + result, err = client.ValidateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "Validate", resp, "Failure responding to request") + } + + return +} + +// ValidatePreparer prepares the Validate request. +func (client DeploymentsClient) ValidatePreparer(ctx context.Context, resourceGroupName string, deploymentName string, parameters Deployment) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}/validate", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ValidateSender sends the Validate request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) ValidateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ValidateResponder handles the response to the Validate request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) ValidateResponder(resp *http.Response) (result DeploymentValidateResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusBadRequest), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ValidateAtSubscriptionScope validates whether the specified template is syntactically correct and will be accepted +// by Azure Resource Manager.. +// Parameters: +// deploymentName - the name of the deployment. +// parameters - parameters to validate. +func (client DeploymentsClient) ValidateAtSubscriptionScope(ctx context.Context, deploymentName string, parameters Deployment) (result DeploymentValidateResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentsClient.ValidateAtSubscriptionScope") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: deploymentName, + Constraints: []validation.Constraint{{Target: "deploymentName", Name: validation.MaxLength, Rule: 64, Chain: nil}, + {Target: "deploymentName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "deploymentName", Name: validation.Pattern, Rule: `^[-\w\._\(\)]+$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Properties", Name: validation.Null, Rule: true, + Chain: []validation.Constraint{{Target: "parameters.Properties.TemplateLink", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.TemplateLink.URI", Name: validation.Null, Rule: true, Chain: nil}}}, + {Target: "parameters.Properties.ParametersLink", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Properties.ParametersLink.URI", Name: validation.Null, Rule: true, Chain: nil}}}, + }}}}}); err != nil { + return result, validation.NewError("resources.DeploymentsClient", "ValidateAtSubscriptionScope", err.Error()) + } + + req, err := client.ValidateAtSubscriptionScopePreparer(ctx, deploymentName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "ValidateAtSubscriptionScope", nil, "Failure preparing request") + return + } + + resp, err := client.ValidateAtSubscriptionScopeSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "ValidateAtSubscriptionScope", resp, "Failure sending request") + return + } + + result, err = client.ValidateAtSubscriptionScopeResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsClient", "ValidateAtSubscriptionScope", resp, "Failure responding to request") + } + + return +} + +// ValidateAtSubscriptionScopePreparer prepares the ValidateAtSubscriptionScope request. +func (client DeploymentsClient) ValidateAtSubscriptionScopePreparer(ctx context.Context, deploymentName string, parameters Deployment) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "deploymentName": autorest.Encode("path", deploymentName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Resources/deployments/{deploymentName}/validate", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ValidateAtSubscriptionScopeSender sends the ValidateAtSubscriptionScope request. The method will close the +// http.Response Body if it receives an error. +func (client DeploymentsClient) ValidateAtSubscriptionScopeSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ValidateAtSubscriptionScopeResponder handles the response to the ValidateAtSubscriptionScope request. The method always +// closes the http.Response Body. +func (client DeploymentsClient) ValidateAtSubscriptionScopeResponder(resp *http.Response) (result DeploymentValidateResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusBadRequest), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/groups.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/groups.go new file mode 100644 index 00000000..a7446095 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/groups.go @@ -0,0 +1,676 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// GroupsClient is the provides operations for working with resources and resource groups. +type GroupsClient struct { + BaseClient +} + +// NewGroupsClient creates an instance of the GroupsClient client. +func NewGroupsClient(subscriptionID string) GroupsClient { + return NewGroupsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewGroupsClientWithBaseURI creates an instance of the GroupsClient client. +func NewGroupsClientWithBaseURI(baseURI string, subscriptionID string) GroupsClient { + return GroupsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckExistence checks whether a resource group exists. +// Parameters: +// resourceGroupName - the name of the resource group to check. The name is case insensitive. +func (client GroupsClient) CheckExistence(ctx context.Context, resourceGroupName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.CheckExistence") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.GroupsClient", "CheckExistence", err.Error()) + } + + req, err := client.CheckExistencePreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "CheckExistence", nil, "Failure preparing request") + return + } + + resp, err := client.CheckExistenceSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "CheckExistence", resp, "Failure sending request") + return + } + + result, err = client.CheckExistenceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "CheckExistence", resp, "Failure responding to request") + } + + return +} + +// CheckExistencePreparer prepares the CheckExistence request. +func (client GroupsClient) CheckExistencePreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsHead(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CheckExistenceSender sends the CheckExistence request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) CheckExistenceSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// CheckExistenceResponder handles the response to the CheckExistence request. The method always +// closes the http.Response Body. +func (client GroupsClient) CheckExistenceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusNotFound), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdate creates or updates a resource group. +// Parameters: +// resourceGroupName - the name of the resource group to create or update. Can include alphanumeric, +// underscore, parentheses, hyphen, period (except at end), and Unicode characters that match the allowed +// characters. +// parameters - parameters supplied to the create or update a resource group. +func (client GroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, parameters Group) (result Group, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Location", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.GroupsClient", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client GroupsClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, parameters Group) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + parameters.ID = nil + parameters.Name = nil + parameters.Type = nil + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client GroupsClient) CreateOrUpdateResponder(resp *http.Response) (result Group, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete when you delete a resource group, all of its resources are also deleted. Deleting a resource group deletes +// all of its template deployments and currently stored operations. +// Parameters: +// resourceGroupName - the name of the resource group to delete. The name is case insensitive. +func (client GroupsClient) Delete(ctx context.Context, resourceGroupName string) (result GroupsDeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.GroupsClient", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client GroupsClient) DeletePreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) DeleteSender(req *http.Request) (future GroupsDeleteFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client GroupsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByClosing()) + result.Response = resp + return +} + +// ExportTemplate captures the specified resource group as a template. +// Parameters: +// resourceGroupName - the name of the resource group to export as a template. +// parameters - parameters for exporting the template. +func (client GroupsClient) ExportTemplate(ctx context.Context, resourceGroupName string, parameters ExportTemplateRequest) (result GroupExportResult, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.ExportTemplate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.GroupsClient", "ExportTemplate", err.Error()) + } + + req, err := client.ExportTemplatePreparer(ctx, resourceGroupName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "ExportTemplate", nil, "Failure preparing request") + return + } + + resp, err := client.ExportTemplateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "ExportTemplate", resp, "Failure sending request") + return + } + + result, err = client.ExportTemplateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "ExportTemplate", resp, "Failure responding to request") + } + + return +} + +// ExportTemplatePreparer prepares the ExportTemplate request. +func (client GroupsClient) ExportTemplatePreparer(ctx context.Context, resourceGroupName string, parameters ExportTemplateRequest) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/exportTemplate", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ExportTemplateSender sends the ExportTemplate request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) ExportTemplateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ExportTemplateResponder handles the response to the ExportTemplate request. The method always +// closes the http.Response Body. +func (client GroupsClient) ExportTemplateResponder(resp *http.Response) (result GroupExportResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Get gets a resource group. +// Parameters: +// resourceGroupName - the name of the resource group to get. The name is case insensitive. +func (client GroupsClient) Get(ctx context.Context, resourceGroupName string) (result Group, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.GroupsClient", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client GroupsClient) GetPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client GroupsClient) GetResponder(resp *http.Response) (result Group, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all the resource groups for a subscription. +// Parameters: +// filter - the filter to apply on the operation.

You can filter by tag names and values. For example, +// to filter for a tag name and value, use $filter=tagName eq 'tag1' and tagValue eq 'Value1' +// top - the number of results to return. If null is passed, returns all resource groups. +func (client GroupsClient) List(ctx context.Context, filter string, top *int32) (result GroupListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.List") + defer func() { + sc := -1 + if result.glr.Response.Response != nil { + sc = result.glr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, filter, top) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.glr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "List", resp, "Failure sending request") + return + } + + result.glr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client GroupsClient) ListPreparer(ctx context.Context, filter string, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client GroupsClient) ListResponder(resp *http.Response) (result GroupListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client GroupsClient) listNextResults(ctx context.Context, lastResults GroupListResult) (result GroupListResult, err error) { + req, err := lastResults.groupListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.GroupsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client GroupsClient) ListComplete(ctx context.Context, filter string, top *int32) (result GroupListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, filter, top) + return +} + +// Update resource groups can be updated through a simple PATCH operation to a group address. The format of the request +// is the same as that for creating a resource group. If a field is unspecified, the current value is retained. +// Parameters: +// resourceGroupName - the name of the resource group to update. The name is case insensitive. +// parameters - parameters supplied to update a resource group. +func (client GroupsClient) Update(ctx context.Context, resourceGroupName string, parameters GroupPatchable) (result Group, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GroupsClient.Update") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.GroupsClient", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, resourceGroupName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Update", nil, "Failure preparing request") + return + } + + resp, err := client.UpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Update", resp, "Failure sending request") + return + } + + result, err = client.UpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsClient", "Update", resp, "Failure responding to request") + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client GroupsClient) UpdatePreparer(ctx context.Context, resourceGroupName string, parameters GroupPatchable) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client GroupsClient) UpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client GroupsClient) UpdateResponder(resp *http.Response) (result Group, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/models.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/models.go new file mode 100644 index 00000000..194aabca --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/models.go @@ -0,0 +1,2051 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "encoding/json" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/to" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// The package's fully qualified name. +const fqdn = "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources" + +// DeploymentMode enumerates the values for deployment mode. +type DeploymentMode string + +const ( + // Complete ... + Complete DeploymentMode = "Complete" + // Incremental ... + Incremental DeploymentMode = "Incremental" +) + +// PossibleDeploymentModeValues returns an array of possible values for the DeploymentMode const type. +func PossibleDeploymentModeValues() []DeploymentMode { + return []DeploymentMode{Complete, Incremental} +} + +// OnErrorDeploymentType enumerates the values for on error deployment type. +type OnErrorDeploymentType string + +const ( + // LastSuccessful ... + LastSuccessful OnErrorDeploymentType = "LastSuccessful" + // SpecificDeployment ... + SpecificDeployment OnErrorDeploymentType = "SpecificDeployment" +) + +// PossibleOnErrorDeploymentTypeValues returns an array of possible values for the OnErrorDeploymentType const type. +func PossibleOnErrorDeploymentTypeValues() []OnErrorDeploymentType { + return []OnErrorDeploymentType{LastSuccessful, SpecificDeployment} +} + +// ResourceIdentityType enumerates the values for resource identity type. +type ResourceIdentityType string + +const ( + // None ... + None ResourceIdentityType = "None" + // SystemAssigned ... + SystemAssigned ResourceIdentityType = "SystemAssigned" + // SystemAssignedUserAssigned ... + SystemAssignedUserAssigned ResourceIdentityType = "SystemAssigned, UserAssigned" + // UserAssigned ... + UserAssigned ResourceIdentityType = "UserAssigned" +) + +// PossibleResourceIdentityTypeValues returns an array of possible values for the ResourceIdentityType const type. +func PossibleResourceIdentityTypeValues() []ResourceIdentityType { + return []ResourceIdentityType{None, SystemAssigned, SystemAssignedUserAssigned, UserAssigned} +} + +// AliasPathType the type of the paths for alias. +type AliasPathType struct { + // Path - The path of an alias. + Path *string `json:"path,omitempty"` + // APIVersions - The API versions. + APIVersions *[]string `json:"apiVersions,omitempty"` +} + +// AliasType the alias type. +type AliasType struct { + // Name - The alias name. + Name *string `json:"name,omitempty"` + // Paths - The paths for an alias. + Paths *[]AliasPathType `json:"paths,omitempty"` +} + +// BasicDependency deployment dependency information. +type BasicDependency struct { + // ID - The ID of the dependency. + ID *string `json:"id,omitempty"` + // ResourceType - The dependency resource type. + ResourceType *string `json:"resourceType,omitempty"` + // ResourceName - The dependency resource name. + ResourceName *string `json:"resourceName,omitempty"` +} + +// CreateOrUpdateByIDFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type CreateOrUpdateByIDFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *CreateOrUpdateByIDFuture) Result(client Client) (gr GenericResource, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.CreateOrUpdateByIDFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.CreateOrUpdateByIDFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gr.Response.Response, err = future.GetResult(sender); err == nil && gr.Response.Response.StatusCode != http.StatusNoContent { + gr, err = client.CreateOrUpdateByIDResponder(gr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.CreateOrUpdateByIDFuture", "Result", gr.Response.Response, "Failure responding to request") + } + } + return +} + +// CreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type CreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *CreateOrUpdateFuture) Result(client Client) (gr GenericResource, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.CreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.CreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gr.Response.Response, err = future.GetResult(sender); err == nil && gr.Response.Response.StatusCode != http.StatusNoContent { + gr, err = client.CreateOrUpdateResponder(gr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.CreateOrUpdateFuture", "Result", gr.Response.Response, "Failure responding to request") + } + } + return +} + +// DebugSetting ... +type DebugSetting struct { + // DetailLevel - Specifies the type of information to log for debugging. The permitted values are none, requestContent, responseContent, or both requestContent and responseContent separated by a comma. The default is none. When setting this value, carefully consider the type of information you are passing in during deployment. By logging information about the request or response, you could potentially expose sensitive data that is retrieved through the deployment operations. + DetailLevel *string `json:"detailLevel,omitempty"` +} + +// DeleteByIDFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type DeleteByIDFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DeleteByIDFuture) Result(client Client) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeleteByIDFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.DeleteByIDFuture") + return + } + ar.Response = future.Response() + return +} + +// DeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type DeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DeleteFuture) Result(client Client) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.DeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// Dependency deployment dependency information. +type Dependency struct { + // DependsOn - The list of dependencies. + DependsOn *[]BasicDependency `json:"dependsOn,omitempty"` + // ID - The ID of the dependency. + ID *string `json:"id,omitempty"` + // ResourceType - The dependency resource type. + ResourceType *string `json:"resourceType,omitempty"` + // ResourceName - The dependency resource name. + ResourceName *string `json:"resourceName,omitempty"` +} + +// Deployment deployment operation parameters. +type Deployment struct { + // Location - The location to store the deployment data. + Location *string `json:"location,omitempty"` + // Properties - The deployment properties. + Properties *DeploymentProperties `json:"properties,omitempty"` +} + +// DeploymentExportResult the deployment export result. +type DeploymentExportResult struct { + autorest.Response `json:"-"` + // Template - The template content. + Template interface{} `json:"template,omitempty"` +} + +// DeploymentExtended deployment information. +type DeploymentExtended struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; The ID of the deployment. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the deployment. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the deployment. + Type *string `json:"type,omitempty"` + // Location - the location of the deployment. + Location *string `json:"location,omitempty"` + // Properties - Deployment properties. + Properties *DeploymentPropertiesExtended `json:"properties,omitempty"` +} + +// DeploymentExtendedFilter deployment filter. +type DeploymentExtendedFilter struct { + // ProvisioningState - The provisioning state. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// DeploymentListResult list of deployments. +type DeploymentListResult struct { + autorest.Response `json:"-"` + // Value - An array of deployments. + Value *[]DeploymentExtended `json:"value,omitempty"` + // NextLink - READ-ONLY; The URL to use for getting the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// DeploymentListResultIterator provides access to a complete listing of DeploymentExtended values. +type DeploymentListResultIterator struct { + i int + page DeploymentListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DeploymentListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *DeploymentListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DeploymentListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DeploymentListResultIterator) Response() DeploymentListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DeploymentListResultIterator) Value() DeploymentExtended { + if !iter.page.NotDone() { + return DeploymentExtended{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the DeploymentListResultIterator type. +func NewDeploymentListResultIterator(page DeploymentListResultPage) DeploymentListResultIterator { + return DeploymentListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (dlr DeploymentListResult) IsEmpty() bool { + return dlr.Value == nil || len(*dlr.Value) == 0 +} + +// deploymentListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (dlr DeploymentListResult) deploymentListResultPreparer(ctx context.Context) (*http.Request, error) { + if dlr.NextLink == nil || len(to.String(dlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(dlr.NextLink))) +} + +// DeploymentListResultPage contains a page of DeploymentExtended values. +type DeploymentListResultPage struct { + fn func(context.Context, DeploymentListResult) (DeploymentListResult, error) + dlr DeploymentListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DeploymentListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.dlr) + if err != nil { + return err + } + page.dlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *DeploymentListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DeploymentListResultPage) NotDone() bool { + return !page.dlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DeploymentListResultPage) Response() DeploymentListResult { + return page.dlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DeploymentListResultPage) Values() []DeploymentExtended { + if page.dlr.IsEmpty() { + return nil + } + return *page.dlr.Value +} + +// Creates a new instance of the DeploymentListResultPage type. +func NewDeploymentListResultPage(getNextPage func(context.Context, DeploymentListResult) (DeploymentListResult, error)) DeploymentListResultPage { + return DeploymentListResultPage{fn: getNextPage} +} + +// DeploymentOperation deployment operation information. +type DeploymentOperation struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; Full deployment operation ID. + ID *string `json:"id,omitempty"` + // OperationID - READ-ONLY; Deployment operation ID. + OperationID *string `json:"operationId,omitempty"` + // Properties - Deployment properties. + Properties *DeploymentOperationProperties `json:"properties,omitempty"` +} + +// DeploymentOperationProperties deployment operation properties. +type DeploymentOperationProperties struct { + // ProvisioningState - READ-ONLY; The state of the provisioning. + ProvisioningState *string `json:"provisioningState,omitempty"` + // Timestamp - READ-ONLY; The date and time of the operation. + Timestamp *date.Time `json:"timestamp,omitempty"` + // ServiceRequestID - READ-ONLY; Deployment operation service request id. + ServiceRequestID *string `json:"serviceRequestId,omitempty"` + // StatusCode - READ-ONLY; Operation status code. + StatusCode *string `json:"statusCode,omitempty"` + // StatusMessage - READ-ONLY; Operation status message. + StatusMessage interface{} `json:"statusMessage,omitempty"` + // TargetResource - READ-ONLY; The target resource. + TargetResource *TargetResource `json:"targetResource,omitempty"` + // Request - READ-ONLY; The HTTP request message. + Request *HTTPMessage `json:"request,omitempty"` + // Response - READ-ONLY; The HTTP response message. + Response *HTTPMessage `json:"response,omitempty"` +} + +// DeploymentOperationsListResult list of deployment operations. +type DeploymentOperationsListResult struct { + autorest.Response `json:"-"` + // Value - An array of deployment operations. + Value *[]DeploymentOperation `json:"value,omitempty"` + // NextLink - READ-ONLY; The URL to use for getting the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// DeploymentOperationsListResultIterator provides access to a complete listing of DeploymentOperation +// values. +type DeploymentOperationsListResultIterator struct { + i int + page DeploymentOperationsListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *DeploymentOperationsListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentOperationsListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *DeploymentOperationsListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter DeploymentOperationsListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter DeploymentOperationsListResultIterator) Response() DeploymentOperationsListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter DeploymentOperationsListResultIterator) Value() DeploymentOperation { + if !iter.page.NotDone() { + return DeploymentOperation{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the DeploymentOperationsListResultIterator type. +func NewDeploymentOperationsListResultIterator(page DeploymentOperationsListResultPage) DeploymentOperationsListResultIterator { + return DeploymentOperationsListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (dolr DeploymentOperationsListResult) IsEmpty() bool { + return dolr.Value == nil || len(*dolr.Value) == 0 +} + +// deploymentOperationsListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (dolr DeploymentOperationsListResult) deploymentOperationsListResultPreparer(ctx context.Context) (*http.Request, error) { + if dolr.NextLink == nil || len(to.String(dolr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(dolr.NextLink))) +} + +// DeploymentOperationsListResultPage contains a page of DeploymentOperation values. +type DeploymentOperationsListResultPage struct { + fn func(context.Context, DeploymentOperationsListResult) (DeploymentOperationsListResult, error) + dolr DeploymentOperationsListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *DeploymentOperationsListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/DeploymentOperationsListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.dolr) + if err != nil { + return err + } + page.dolr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *DeploymentOperationsListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page DeploymentOperationsListResultPage) NotDone() bool { + return !page.dolr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page DeploymentOperationsListResultPage) Response() DeploymentOperationsListResult { + return page.dolr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page DeploymentOperationsListResultPage) Values() []DeploymentOperation { + if page.dolr.IsEmpty() { + return nil + } + return *page.dolr.Value +} + +// Creates a new instance of the DeploymentOperationsListResultPage type. +func NewDeploymentOperationsListResultPage(getNextPage func(context.Context, DeploymentOperationsListResult) (DeploymentOperationsListResult, error)) DeploymentOperationsListResultPage { + return DeploymentOperationsListResultPage{fn: getNextPage} +} + +// DeploymentProperties deployment properties. +type DeploymentProperties struct { + // Template - The template content. You use this element when you want to pass the template syntax directly in the request rather than link to an existing template. It can be a JObject or well-formed JSON string. Use either the templateLink property or the template property, but not both. + Template interface{} `json:"template,omitempty"` + // TemplateLink - The URI of the template. Use either the templateLink property or the template property, but not both. + TemplateLink *TemplateLink `json:"templateLink,omitempty"` + // Parameters - Name and value pairs that define the deployment parameters for the template. You use this element when you want to provide the parameter values directly in the request rather than link to an existing parameter file. Use either the parametersLink property or the parameters property, but not both. It can be a JObject or a well formed JSON string. + Parameters interface{} `json:"parameters,omitempty"` + // ParametersLink - The URI of parameters file. You use this element to link to an existing parameters file. Use either the parametersLink property or the parameters property, but not both. + ParametersLink *ParametersLink `json:"parametersLink,omitempty"` + // Mode - The mode that is used to deploy resources. This value can be either Incremental or Complete. In Incremental mode, resources are deployed without deleting existing resources that are not included in the template. In Complete mode, resources are deployed and existing resources in the resource group that are not included in the template are deleted. Be careful when using Complete mode as you may unintentionally delete resources. Possible values include: 'Incremental', 'Complete' + Mode DeploymentMode `json:"mode,omitempty"` + // DebugSetting - The debug setting of the deployment. + DebugSetting *DebugSetting `json:"debugSetting,omitempty"` + // OnErrorDeployment - The deployment on error behavior. + OnErrorDeployment *OnErrorDeployment `json:"onErrorDeployment,omitempty"` +} + +// DeploymentPropertiesExtended deployment properties with additional details. +type DeploymentPropertiesExtended struct { + // ProvisioningState - READ-ONLY; The state of the provisioning. + ProvisioningState *string `json:"provisioningState,omitempty"` + // CorrelationID - READ-ONLY; The correlation ID of the deployment. + CorrelationID *string `json:"correlationId,omitempty"` + // Timestamp - READ-ONLY; The timestamp of the template deployment. + Timestamp *date.Time `json:"timestamp,omitempty"` + // Outputs - Key/value pairs that represent deployment output. + Outputs interface{} `json:"outputs,omitempty"` + // Providers - The list of resource providers needed for the deployment. + Providers *[]Provider `json:"providers,omitempty"` + // Dependencies - The list of deployment dependencies. + Dependencies *[]Dependency `json:"dependencies,omitempty"` + // Template - The template content. Use only one of Template or TemplateLink. + Template interface{} `json:"template,omitempty"` + // TemplateLink - The URI referencing the template. Use only one of Template or TemplateLink. + TemplateLink *TemplateLink `json:"templateLink,omitempty"` + // Parameters - Deployment parameters. Use only one of Parameters or ParametersLink. + Parameters interface{} `json:"parameters,omitempty"` + // ParametersLink - The URI referencing the parameters. Use only one of Parameters or ParametersLink. + ParametersLink *ParametersLink `json:"parametersLink,omitempty"` + // Mode - The deployment mode. Possible values are Incremental and Complete. Possible values include: 'Incremental', 'Complete' + Mode DeploymentMode `json:"mode,omitempty"` + // DebugSetting - The debug setting of the deployment. + DebugSetting *DebugSetting `json:"debugSetting,omitempty"` + // OnErrorDeployment - The deployment on error behavior. + OnErrorDeployment *OnErrorDeploymentExtended `json:"onErrorDeployment,omitempty"` +} + +// DeploymentsCreateOrUpdateAtSubscriptionScopeFuture an abstraction for monitoring and retrieving the +// results of a long-running operation. +type DeploymentsCreateOrUpdateAtSubscriptionScopeFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DeploymentsCreateOrUpdateAtSubscriptionScopeFuture) Result(client DeploymentsClient) (de DeploymentExtended, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsCreateOrUpdateAtSubscriptionScopeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.DeploymentsCreateOrUpdateAtSubscriptionScopeFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if de.Response.Response, err = future.GetResult(sender); err == nil && de.Response.Response.StatusCode != http.StatusNoContent { + de, err = client.CreateOrUpdateAtSubscriptionScopeResponder(de.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsCreateOrUpdateAtSubscriptionScopeFuture", "Result", de.Response.Response, "Failure responding to request") + } + } + return +} + +// DeploymentsCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type DeploymentsCreateOrUpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DeploymentsCreateOrUpdateFuture) Result(client DeploymentsClient) (de DeploymentExtended, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsCreateOrUpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.DeploymentsCreateOrUpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if de.Response.Response, err = future.GetResult(sender); err == nil && de.Response.Response.StatusCode != http.StatusNoContent { + de, err = client.CreateOrUpdateResponder(de.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsCreateOrUpdateFuture", "Result", de.Response.Response, "Failure responding to request") + } + } + return +} + +// DeploymentsDeleteAtSubscriptionScopeFuture an abstraction for monitoring and retrieving the results of a +// long-running operation. +type DeploymentsDeleteAtSubscriptionScopeFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DeploymentsDeleteAtSubscriptionScopeFuture) Result(client DeploymentsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsDeleteAtSubscriptionScopeFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.DeploymentsDeleteAtSubscriptionScopeFuture") + return + } + ar.Response = future.Response() + return +} + +// DeploymentsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type DeploymentsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *DeploymentsDeleteFuture) Result(client DeploymentsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.DeploymentsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.DeploymentsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// DeploymentValidateResult information from validate template deployment response. +type DeploymentValidateResult struct { + autorest.Response `json:"-"` + // Error - Validation error. + Error *ManagementErrorWithDetails `json:"error,omitempty"` + // Properties - The template deployment properties. + Properties *DeploymentPropertiesExtended `json:"properties,omitempty"` +} + +// ExportTemplateRequest export resource group template request parameters. +type ExportTemplateRequest struct { + // ResourcesProperty - The IDs of the resources to filter the export by. To export all resources, supply an array with single entry '*'. + ResourcesProperty *[]string `json:"resources,omitempty"` + // Options - The export template options. A CSV-formatted list containing zero or more of the following: 'IncludeParameterDefaultValue', 'IncludeComments', 'SkipResourceNameParameterization', 'SkipAllParameterization' + Options *string `json:"options,omitempty"` +} + +// GenericResource resource information. +type GenericResource struct { + autorest.Response `json:"-"` + // Plan - The plan of the resource. + Plan *Plan `json:"plan,omitempty"` + // Properties - The resource properties. + Properties interface{} `json:"properties,omitempty"` + // Kind - The kind of the resource. + Kind *string `json:"kind,omitempty"` + // ManagedBy - ID of the resource that manages this resource. + ManagedBy *string `json:"managedBy,omitempty"` + // Sku - The SKU of the resource. + Sku *Sku `json:"sku,omitempty"` + // Identity - The identity of the resource. + Identity *Identity `json:"identity,omitempty"` + // ID - READ-ONLY; Resource ID + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for GenericResource. +func (gr GenericResource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if gr.Plan != nil { + objectMap["plan"] = gr.Plan + } + if gr.Properties != nil { + objectMap["properties"] = gr.Properties + } + if gr.Kind != nil { + objectMap["kind"] = gr.Kind + } + if gr.ManagedBy != nil { + objectMap["managedBy"] = gr.ManagedBy + } + if gr.Sku != nil { + objectMap["sku"] = gr.Sku + } + if gr.Identity != nil { + objectMap["identity"] = gr.Identity + } + if gr.Location != nil { + objectMap["location"] = gr.Location + } + if gr.Tags != nil { + objectMap["tags"] = gr.Tags + } + return json.Marshal(objectMap) +} + +// GenericResourceFilter resource filter. +type GenericResourceFilter struct { + // ResourceType - The resource type. + ResourceType *string `json:"resourceType,omitempty"` + // Tagname - The tag name. + Tagname *string `json:"tagname,omitempty"` + // Tagvalue - The tag value. + Tagvalue *string `json:"tagvalue,omitempty"` +} + +// Group resource group information. +type Group struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; The ID of the resource group. + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; The name of the resource group. + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; The type of the resource group. + Type *string `json:"type,omitempty"` + Properties *GroupProperties `json:"properties,omitempty"` + // Location - The location of the resource group. It cannot be changed after the resource group has been created. It must be one of the supported Azure locations. + Location *string `json:"location,omitempty"` + // ManagedBy - The ID of the resource that manages this resource group. + ManagedBy *string `json:"managedBy,omitempty"` + // Tags - The tags attached to the resource group. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Group. +func (g Group) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if g.Properties != nil { + objectMap["properties"] = g.Properties + } + if g.Location != nil { + objectMap["location"] = g.Location + } + if g.ManagedBy != nil { + objectMap["managedBy"] = g.ManagedBy + } + if g.Tags != nil { + objectMap["tags"] = g.Tags + } + return json.Marshal(objectMap) +} + +// GroupExportResult resource group export result. +type GroupExportResult struct { + autorest.Response `json:"-"` + // Template - The template content. + Template interface{} `json:"template,omitempty"` + // Error - The error. + Error *ManagementErrorWithDetails `json:"error,omitempty"` +} + +// GroupFilter resource group filter. +type GroupFilter struct { + // TagName - The tag name. + TagName *string `json:"tagName,omitempty"` + // TagValue - The tag value. + TagValue *string `json:"tagValue,omitempty"` +} + +// GroupListResult list of resource groups. +type GroupListResult struct { + autorest.Response `json:"-"` + // Value - An array of resource groups. + Value *[]Group `json:"value,omitempty"` + // NextLink - READ-ONLY; The URL to use for getting the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// GroupListResultIterator provides access to a complete listing of Group values. +type GroupListResultIterator struct { + i int + page GroupListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *GroupListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GroupListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *GroupListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter GroupListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter GroupListResultIterator) Response() GroupListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter GroupListResultIterator) Value() Group { + if !iter.page.NotDone() { + return Group{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the GroupListResultIterator type. +func NewGroupListResultIterator(page GroupListResultPage) GroupListResultIterator { + return GroupListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (glr GroupListResult) IsEmpty() bool { + return glr.Value == nil || len(*glr.Value) == 0 +} + +// groupListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (glr GroupListResult) groupListResultPreparer(ctx context.Context) (*http.Request, error) { + if glr.NextLink == nil || len(to.String(glr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(glr.NextLink))) +} + +// GroupListResultPage contains a page of Group values. +type GroupListResultPage struct { + fn func(context.Context, GroupListResult) (GroupListResult, error) + glr GroupListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *GroupListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/GroupListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.glr) + if err != nil { + return err + } + page.glr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *GroupListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page GroupListResultPage) NotDone() bool { + return !page.glr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page GroupListResultPage) Response() GroupListResult { + return page.glr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page GroupListResultPage) Values() []Group { + if page.glr.IsEmpty() { + return nil + } + return *page.glr.Value +} + +// Creates a new instance of the GroupListResultPage type. +func NewGroupListResultPage(getNextPage func(context.Context, GroupListResult) (GroupListResult, error)) GroupListResultPage { + return GroupListResultPage{fn: getNextPage} +} + +// GroupPatchable resource group information. +type GroupPatchable struct { + // Name - The name of the resource group. + Name *string `json:"name,omitempty"` + Properties *GroupProperties `json:"properties,omitempty"` + // ManagedBy - The ID of the resource that manages this resource group. + ManagedBy *string `json:"managedBy,omitempty"` + // Tags - The tags attached to the resource group. + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for GroupPatchable. +func (gp GroupPatchable) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if gp.Name != nil { + objectMap["name"] = gp.Name + } + if gp.Properties != nil { + objectMap["properties"] = gp.Properties + } + if gp.ManagedBy != nil { + objectMap["managedBy"] = gp.ManagedBy + } + if gp.Tags != nil { + objectMap["tags"] = gp.Tags + } + return json.Marshal(objectMap) +} + +// GroupProperties the resource group properties. +type GroupProperties struct { + // ProvisioningState - READ-ONLY; The provisioning state. + ProvisioningState *string `json:"provisioningState,omitempty"` +} + +// GroupsDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type GroupsDeleteFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *GroupsDeleteFuture) Result(client GroupsClient) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.GroupsDeleteFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.GroupsDeleteFuture") + return + } + ar.Response = future.Response() + return +} + +// HTTPMessage HTTP message. +type HTTPMessage struct { + // Content - HTTP message content. + Content interface{} `json:"content,omitempty"` +} + +// Identity identity for the resource. +type Identity struct { + // PrincipalID - READ-ONLY; The principal ID of resource identity. + PrincipalID *string `json:"principalId,omitempty"` + // TenantID - READ-ONLY; The tenant ID of resource. + TenantID *string `json:"tenantId,omitempty"` + // Type - The identity type. Possible values include: 'SystemAssigned', 'UserAssigned', 'SystemAssignedUserAssigned', 'None' + Type ResourceIdentityType `json:"type,omitempty"` + // UserAssignedIdentities - The list of user identities associated with the resource. The user identity dictionary key references will be ARM resource ids in the form: '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'. + UserAssignedIdentities map[string]*IdentityUserAssignedIdentitiesValue `json:"userAssignedIdentities"` +} + +// MarshalJSON is the custom marshaler for Identity. +func (i Identity) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if i.Type != "" { + objectMap["type"] = i.Type + } + if i.UserAssignedIdentities != nil { + objectMap["userAssignedIdentities"] = i.UserAssignedIdentities + } + return json.Marshal(objectMap) +} + +// IdentityUserAssignedIdentitiesValue ... +type IdentityUserAssignedIdentitiesValue struct { + // PrincipalID - READ-ONLY; The principal id of user assigned identity. + PrincipalID *string `json:"principalId,omitempty"` + // ClientID - READ-ONLY; The client id of user assigned identity. + ClientID *string `json:"clientId,omitempty"` +} + +// ListResult list of resource groups. +type ListResult struct { + autorest.Response `json:"-"` + // Value - An array of resources. + Value *[]GenericResource `json:"value,omitempty"` + // NextLink - READ-ONLY; The URL to use for getting the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// ListResultIterator provides access to a complete listing of GenericResource values. +type ListResultIterator struct { + i int + page ListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ListResultIterator) Response() ListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ListResultIterator) Value() GenericResource { + if !iter.page.NotDone() { + return GenericResource{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ListResultIterator type. +func NewListResultIterator(page ListResultPage) ListResultIterator { + return ListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (lr ListResult) IsEmpty() bool { + return lr.Value == nil || len(*lr.Value) == 0 +} + +// listResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (lr ListResult) listResultPreparer(ctx context.Context) (*http.Request, error) { + if lr.NextLink == nil || len(to.String(lr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(lr.NextLink))) +} + +// ListResultPage contains a page of GenericResource values. +type ListResultPage struct { + fn func(context.Context, ListResult) (ListResult, error) + lr ListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.lr) + if err != nil { + return err + } + page.lr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ListResultPage) NotDone() bool { + return !page.lr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ListResultPage) Response() ListResult { + return page.lr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ListResultPage) Values() []GenericResource { + if page.lr.IsEmpty() { + return nil + } + return *page.lr.Value +} + +// Creates a new instance of the ListResultPage type. +func NewListResultPage(getNextPage func(context.Context, ListResult) (ListResult, error)) ListResultPage { + return ListResultPage{fn: getNextPage} +} + +// ManagementErrorWithDetails the detailed error message of resource management. +type ManagementErrorWithDetails struct { + // Code - READ-ONLY; The error code returned when exporting the template. + Code *string `json:"code,omitempty"` + // Message - READ-ONLY; The error message describing the export error. + Message *string `json:"message,omitempty"` + // Target - READ-ONLY; The target of the error. + Target *string `json:"target,omitempty"` + // Details - READ-ONLY; Validation error. + Details *[]ManagementErrorWithDetails `json:"details,omitempty"` +} + +// MoveInfo parameters of move resources. +type MoveInfo struct { + // ResourcesProperty - The IDs of the resources. + ResourcesProperty *[]string `json:"resources,omitempty"` + // TargetResourceGroup - The target resource group. + TargetResourceGroup *string `json:"targetResourceGroup,omitempty"` +} + +// MoveResourcesFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type MoveResourcesFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *MoveResourcesFuture) Result(client Client) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.MoveResourcesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.MoveResourcesFuture") + return + } + ar.Response = future.Response() + return +} + +// OnErrorDeployment deployment on error behavior. +type OnErrorDeployment struct { + // Type - The deployment on error behavior type. Possible values are LastSuccessful and SpecificDeployment. Possible values include: 'LastSuccessful', 'SpecificDeployment' + Type OnErrorDeploymentType `json:"type,omitempty"` + // DeploymentName - The deployment to be used on error case. + DeploymentName *string `json:"deploymentName,omitempty"` +} + +// OnErrorDeploymentExtended deployment on error behavior with additional details. +type OnErrorDeploymentExtended struct { + // ProvisioningState - READ-ONLY; The state of the provisioning for the on error deployment. + ProvisioningState *string `json:"provisioningState,omitempty"` + // Type - The deployment on error behavior type. Possible values are LastSuccessful and SpecificDeployment. Possible values include: 'LastSuccessful', 'SpecificDeployment' + Type OnErrorDeploymentType `json:"type,omitempty"` + // DeploymentName - The deployment to be used on error case. + DeploymentName *string `json:"deploymentName,omitempty"` +} + +// Operation microsoft.Resources operation +type Operation struct { + // Name - Operation name: {provider}/{resource}/{operation} + Name *string `json:"name,omitempty"` + // Display - The object that represents the operation. + Display *OperationDisplay `json:"display,omitempty"` +} + +// OperationDisplay the object that represents the operation. +type OperationDisplay struct { + // Provider - Service provider: Microsoft.Resources + Provider *string `json:"provider,omitempty"` + // Resource - Resource on which the operation is performed: Profile, endpoint, etc. + Resource *string `json:"resource,omitempty"` + // Operation - Operation type: Read, write, delete, etc. + Operation *string `json:"operation,omitempty"` + // Description - Description of the operation. + Description *string `json:"description,omitempty"` +} + +// OperationListResult result of the request to list Microsoft.Resources operations. It contains a list of +// operations and a URL link to get the next set of results. +type OperationListResult struct { + autorest.Response `json:"-"` + // Value - List of Microsoft.Resources operations. + Value *[]Operation `json:"value,omitempty"` + // NextLink - URL to get the next set of operation list results if there are any. + NextLink *string `json:"nextLink,omitempty"` +} + +// OperationListResultIterator provides access to a complete listing of Operation values. +type OperationListResultIterator struct { + i int + page OperationListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *OperationListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OperationListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *OperationListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter OperationListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter OperationListResultIterator) Response() OperationListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter OperationListResultIterator) Value() Operation { + if !iter.page.NotDone() { + return Operation{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the OperationListResultIterator type. +func NewOperationListResultIterator(page OperationListResultPage) OperationListResultIterator { + return OperationListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (olr OperationListResult) IsEmpty() bool { + return olr.Value == nil || len(*olr.Value) == 0 +} + +// operationListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (olr OperationListResult) operationListResultPreparer(ctx context.Context) (*http.Request, error) { + if olr.NextLink == nil || len(to.String(olr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(olr.NextLink))) +} + +// OperationListResultPage contains a page of Operation values. +type OperationListResultPage struct { + fn func(context.Context, OperationListResult) (OperationListResult, error) + olr OperationListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *OperationListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OperationListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.olr) + if err != nil { + return err + } + page.olr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *OperationListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page OperationListResultPage) NotDone() bool { + return !page.olr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page OperationListResultPage) Response() OperationListResult { + return page.olr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page OperationListResultPage) Values() []Operation { + if page.olr.IsEmpty() { + return nil + } + return *page.olr.Value +} + +// Creates a new instance of the OperationListResultPage type. +func NewOperationListResultPage(getNextPage func(context.Context, OperationListResult) (OperationListResult, error)) OperationListResultPage { + return OperationListResultPage{fn: getNextPage} +} + +// ParametersLink entity representing the reference to the deployment parameters. +type ParametersLink struct { + // URI - The URI of the parameters file. + URI *string `json:"uri,omitempty"` + // ContentVersion - If included, must match the ContentVersion in the template. + ContentVersion *string `json:"contentVersion,omitempty"` +} + +// Plan plan for the resource. +type Plan struct { + // Name - The plan ID. + Name *string `json:"name,omitempty"` + // Publisher - The publisher ID. + Publisher *string `json:"publisher,omitempty"` + // Product - The offer ID. + Product *string `json:"product,omitempty"` + // PromotionCode - The promotion code. + PromotionCode *string `json:"promotionCode,omitempty"` + // Version - The plan's version. + Version *string `json:"version,omitempty"` +} + +// Provider resource provider information. +type Provider struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; The provider ID. + ID *string `json:"id,omitempty"` + // Namespace - The namespace of the resource provider. + Namespace *string `json:"namespace,omitempty"` + // RegistrationState - READ-ONLY; The registration state of the resource provider. + RegistrationState *string `json:"registrationState,omitempty"` + // RegistrationPolicy - READ-ONLY; The registration policy of the resource provider. + RegistrationPolicy *string `json:"registrationPolicy,omitempty"` + // ResourceTypes - READ-ONLY; The collection of provider resource types. + ResourceTypes *[]ProviderResourceType `json:"resourceTypes,omitempty"` +} + +// ProviderListResult list of resource providers. +type ProviderListResult struct { + autorest.Response `json:"-"` + // Value - An array of resource providers. + Value *[]Provider `json:"value,omitempty"` + // NextLink - READ-ONLY; The URL to use for getting the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// ProviderListResultIterator provides access to a complete listing of Provider values. +type ProviderListResultIterator struct { + i int + page ProviderListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *ProviderListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ProviderListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *ProviderListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter ProviderListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter ProviderListResultIterator) Response() ProviderListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter ProviderListResultIterator) Value() Provider { + if !iter.page.NotDone() { + return Provider{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the ProviderListResultIterator type. +func NewProviderListResultIterator(page ProviderListResultPage) ProviderListResultIterator { + return ProviderListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (plr ProviderListResult) IsEmpty() bool { + return plr.Value == nil || len(*plr.Value) == 0 +} + +// providerListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (plr ProviderListResult) providerListResultPreparer(ctx context.Context) (*http.Request, error) { + if plr.NextLink == nil || len(to.String(plr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(plr.NextLink))) +} + +// ProviderListResultPage contains a page of Provider values. +type ProviderListResultPage struct { + fn func(context.Context, ProviderListResult) (ProviderListResult, error) + plr ProviderListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *ProviderListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ProviderListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.plr) + if err != nil { + return err + } + page.plr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *ProviderListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page ProviderListResultPage) NotDone() bool { + return !page.plr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page ProviderListResultPage) Response() ProviderListResult { + return page.plr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page ProviderListResultPage) Values() []Provider { + if page.plr.IsEmpty() { + return nil + } + return *page.plr.Value +} + +// Creates a new instance of the ProviderListResultPage type. +func NewProviderListResultPage(getNextPage func(context.Context, ProviderListResult) (ProviderListResult, error)) ProviderListResultPage { + return ProviderListResultPage{fn: getNextPage} +} + +// ProviderOperationDisplayProperties resource provider operation's display properties. +type ProviderOperationDisplayProperties struct { + // Publisher - Operation description. + Publisher *string `json:"publisher,omitempty"` + // Provider - Operation provider. + Provider *string `json:"provider,omitempty"` + // Resource - Operation resource. + Resource *string `json:"resource,omitempty"` + // Operation - Resource provider operation. + Operation *string `json:"operation,omitempty"` + // Description - Operation description. + Description *string `json:"description,omitempty"` +} + +// ProviderResourceType resource type managed by the resource provider. +type ProviderResourceType struct { + // ResourceType - The resource type. + ResourceType *string `json:"resourceType,omitempty"` + // Locations - The collection of locations where this resource type can be created. + Locations *[]string `json:"locations,omitempty"` + // Aliases - The aliases that are supported by this resource type. + Aliases *[]AliasType `json:"aliases,omitempty"` + // APIVersions - The API version. + APIVersions *[]string `json:"apiVersions,omitempty"` + // Capabilities - The additional capabilities offered by this resource type. + Capabilities *string `json:"capabilities,omitempty"` + // Properties - The properties. + Properties map[string]*string `json:"properties"` +} + +// MarshalJSON is the custom marshaler for ProviderResourceType. +func (prt ProviderResourceType) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if prt.ResourceType != nil { + objectMap["resourceType"] = prt.ResourceType + } + if prt.Locations != nil { + objectMap["locations"] = prt.Locations + } + if prt.Aliases != nil { + objectMap["aliases"] = prt.Aliases + } + if prt.APIVersions != nil { + objectMap["apiVersions"] = prt.APIVersions + } + if prt.Capabilities != nil { + objectMap["capabilities"] = prt.Capabilities + } + if prt.Properties != nil { + objectMap["properties"] = prt.Properties + } + return json.Marshal(objectMap) +} + +// Resource specified resource. +type Resource struct { + // ID - READ-ONLY; Resource ID + ID *string `json:"id,omitempty"` + // Name - READ-ONLY; Resource name + Name *string `json:"name,omitempty"` + // Type - READ-ONLY; Resource type + Type *string `json:"type,omitempty"` + // Location - Resource location + Location *string `json:"location,omitempty"` + // Tags - Resource tags + Tags map[string]*string `json:"tags"` +} + +// MarshalJSON is the custom marshaler for Resource. +func (r Resource) MarshalJSON() ([]byte, error) { + objectMap := make(map[string]interface{}) + if r.Location != nil { + objectMap["location"] = r.Location + } + if r.Tags != nil { + objectMap["tags"] = r.Tags + } + return json.Marshal(objectMap) +} + +// Sku SKU for the resource. +type Sku struct { + // Name - The SKU name. + Name *string `json:"name,omitempty"` + // Tier - The SKU tier. + Tier *string `json:"tier,omitempty"` + // Size - The SKU size. + Size *string `json:"size,omitempty"` + // Family - The SKU family. + Family *string `json:"family,omitempty"` + // Model - The SKU model. + Model *string `json:"model,omitempty"` + // Capacity - The SKU capacity. + Capacity *int32 `json:"capacity,omitempty"` +} + +// SubResource sub-resource. +type SubResource struct { + // ID - Resource ID + ID *string `json:"id,omitempty"` +} + +// TagCount tag count. +type TagCount struct { + // Type - Type of count. + Type *string `json:"type,omitempty"` + // Value - Value of count. + Value *int32 `json:"value,omitempty"` +} + +// TagDetails tag details. +type TagDetails struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; The tag ID. + ID *string `json:"id,omitempty"` + // TagName - The tag name. + TagName *string `json:"tagName,omitempty"` + // Count - The total number of resources that use the resource tag. When a tag is initially created and has no associated resources, the value is 0. + Count *TagCount `json:"count,omitempty"` + // Values - The list of tag values. + Values *[]TagValue `json:"values,omitempty"` +} + +// TagsListResult list of subscription tags. +type TagsListResult struct { + autorest.Response `json:"-"` + // Value - An array of tags. + Value *[]TagDetails `json:"value,omitempty"` + // NextLink - READ-ONLY; The URL to use for getting the next set of results. + NextLink *string `json:"nextLink,omitempty"` +} + +// TagsListResultIterator provides access to a complete listing of TagDetails values. +type TagsListResultIterator struct { + i int + page TagsListResultPage +} + +// NextWithContext advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +func (iter *TagsListResultIterator) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TagsListResultIterator.NextWithContext") + defer func() { + sc := -1 + if iter.Response().Response.Response != nil { + sc = iter.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + iter.i++ + if iter.i < len(iter.page.Values()) { + return nil + } + err = iter.page.NextWithContext(ctx) + if err != nil { + iter.i-- + return err + } + iter.i = 0 + return nil +} + +// Next advances to the next value. If there was an error making +// the request the iterator does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (iter *TagsListResultIterator) Next() error { + return iter.NextWithContext(context.Background()) +} + +// NotDone returns true if the enumeration should be started or is not yet complete. +func (iter TagsListResultIterator) NotDone() bool { + return iter.page.NotDone() && iter.i < len(iter.page.Values()) +} + +// Response returns the raw server response from the last page request. +func (iter TagsListResultIterator) Response() TagsListResult { + return iter.page.Response() +} + +// Value returns the current value or a zero-initialized value if the +// iterator has advanced beyond the end of the collection. +func (iter TagsListResultIterator) Value() TagDetails { + if !iter.page.NotDone() { + return TagDetails{} + } + return iter.page.Values()[iter.i] +} + +// Creates a new instance of the TagsListResultIterator type. +func NewTagsListResultIterator(page TagsListResultPage) TagsListResultIterator { + return TagsListResultIterator{page: page} +} + +// IsEmpty returns true if the ListResult contains no values. +func (tlr TagsListResult) IsEmpty() bool { + return tlr.Value == nil || len(*tlr.Value) == 0 +} + +// tagsListResultPreparer prepares a request to retrieve the next set of results. +// It returns nil if no more results exist. +func (tlr TagsListResult) tagsListResultPreparer(ctx context.Context) (*http.Request, error) { + if tlr.NextLink == nil || len(to.String(tlr.NextLink)) < 1 { + return nil, nil + } + return autorest.Prepare((&http.Request{}).WithContext(ctx), + autorest.AsJSON(), + autorest.AsGet(), + autorest.WithBaseURL(to.String(tlr.NextLink))) +} + +// TagsListResultPage contains a page of TagDetails values. +type TagsListResultPage struct { + fn func(context.Context, TagsListResult) (TagsListResult, error) + tlr TagsListResult +} + +// NextWithContext advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +func (page *TagsListResultPage) NextWithContext(ctx context.Context) (err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TagsListResultPage.NextWithContext") + defer func() { + sc := -1 + if page.Response().Response.Response != nil { + sc = page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + next, err := page.fn(ctx, page.tlr) + if err != nil { + return err + } + page.tlr = next + return nil +} + +// Next advances to the next page of values. If there was an error making +// the request the page does not advance and the error is returned. +// Deprecated: Use NextWithContext() instead. +func (page *TagsListResultPage) Next() error { + return page.NextWithContext(context.Background()) +} + +// NotDone returns true if the page enumeration should be started or is not yet complete. +func (page TagsListResultPage) NotDone() bool { + return !page.tlr.IsEmpty() +} + +// Response returns the raw server response from the last page request. +func (page TagsListResultPage) Response() TagsListResult { + return page.tlr +} + +// Values returns the slice of values for the current page or nil if there are no values. +func (page TagsListResultPage) Values() []TagDetails { + if page.tlr.IsEmpty() { + return nil + } + return *page.tlr.Value +} + +// Creates a new instance of the TagsListResultPage type. +func NewTagsListResultPage(getNextPage func(context.Context, TagsListResult) (TagsListResult, error)) TagsListResultPage { + return TagsListResultPage{fn: getNextPage} +} + +// TagValue tag information. +type TagValue struct { + autorest.Response `json:"-"` + // ID - READ-ONLY; The tag ID. + ID *string `json:"id,omitempty"` + // TagValue - The tag value. + TagValue *string `json:"tagValue,omitempty"` + // Count - The tag value count. + Count *TagCount `json:"count,omitempty"` +} + +// TargetResource target resource. +type TargetResource struct { + // ID - The ID of the resource. + ID *string `json:"id,omitempty"` + // ResourceName - The name of the resource. + ResourceName *string `json:"resourceName,omitempty"` + // ResourceType - The type of the resource. + ResourceType *string `json:"resourceType,omitempty"` +} + +// TemplateLink entity representing the reference to the template. +type TemplateLink struct { + // URI - The URI of the template to deploy. + URI *string `json:"uri,omitempty"` + // ContentVersion - If included, must match the ContentVersion in the template. + ContentVersion *string `json:"contentVersion,omitempty"` +} + +// UpdateByIDFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type UpdateByIDFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *UpdateByIDFuture) Result(client Client) (gr GenericResource, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.UpdateByIDFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.UpdateByIDFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gr.Response.Response, err = future.GetResult(sender); err == nil && gr.Response.Response.StatusCode != http.StatusNoContent { + gr, err = client.UpdateByIDResponder(gr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.UpdateByIDFuture", "Result", gr.Response.Response, "Failure responding to request") + } + } + return +} + +// UpdateFuture an abstraction for monitoring and retrieving the results of a long-running operation. +type UpdateFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *UpdateFuture) Result(client Client) (gr GenericResource, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.UpdateFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.UpdateFuture") + return + } + sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + if gr.Response.Response, err = future.GetResult(sender); err == nil && gr.Response.Response.StatusCode != http.StatusNoContent { + gr, err = client.UpdateResponder(gr.Response.Response) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.UpdateFuture", "Result", gr.Response.Response, "Failure responding to request") + } + } + return +} + +// ValidateMoveResourcesFuture an abstraction for monitoring and retrieving the results of a long-running +// operation. +type ValidateMoveResourcesFuture struct { + azure.Future +} + +// Result returns the result of the asynchronous operation. +// If the operation has not completed it will return an error. +func (future *ValidateMoveResourcesFuture) Result(client Client) (ar autorest.Response, err error) { + var done bool + done, err = future.DoneWithContext(context.Background(), client) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ValidateMoveResourcesFuture", "Result", future.Response(), "Polling failure") + return + } + if !done { + err = azure.NewAsyncOpIncompleteError("resources.ValidateMoveResourcesFuture") + return + } + ar.Response = future.Response() + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/operations.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/operations.go new file mode 100644 index 00000000..b5a4f9cd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/operations.go @@ -0,0 +1,147 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// OperationsClient is the provides operations for working with resources and resource groups. +type OperationsClient struct { + BaseClient +} + +// NewOperationsClient creates an instance of the OperationsClient client. +func NewOperationsClient(subscriptionID string) OperationsClient { + return NewOperationsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewOperationsClientWithBaseURI creates an instance of the OperationsClient client. +func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) OperationsClient { + return OperationsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// List lists all of the available Microsoft.Resources REST API operations. +func (client OperationsClient) List(ctx context.Context) (result OperationListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List") + defer func() { + sc := -1 + if result.olr.Response.Response != nil { + sc = result.olr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.OperationsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.olr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.OperationsClient", "List", resp, "Failure sending request") + return + } + + result.olr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.OperationsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client OperationsClient) ListPreparer(ctx context.Context) (*http.Request, error) { + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPath("/providers/Microsoft.Resources/operations"), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client OperationsClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client OperationsClient) ListResponder(resp *http.Response) (result OperationListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client OperationsClient) listNextResults(ctx context.Context, lastResults OperationListResult) (result OperationListResult, err error) { + req, err := lastResults.operationListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.OperationsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.OperationsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.OperationsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client OperationsClient) ListComplete(ctx context.Context) (result OperationListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx) + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/providers.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/providers.go new file mode 100644 index 00000000..e9fff221 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/providers.go @@ -0,0 +1,392 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// ProvidersClient is the provides operations for working with resources and resource groups. +type ProvidersClient struct { + BaseClient +} + +// NewProvidersClient creates an instance of the ProvidersClient client. +func NewProvidersClient(subscriptionID string) ProvidersClient { + return NewProvidersClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewProvidersClientWithBaseURI creates an instance of the ProvidersClient client. +func NewProvidersClientWithBaseURI(baseURI string, subscriptionID string) ProvidersClient { + return ProvidersClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// Get gets the specified resource provider. +// Parameters: +// resourceProviderNamespace - the namespace of the resource provider. +// expand - the $expand query parameter. For example, to include property aliases in response, use +// $expand=resourceTypes/aliases. +func (client ProvidersClient) Get(ctx context.Context, resourceProviderNamespace string, expand string) (result Provider, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ProvidersClient.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetPreparer(ctx, resourceProviderNamespace, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client ProvidersClient) GetPreparer(ctx context.Context, resourceProviderNamespace string, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client ProvidersClient) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client ProvidersClient) GetResponder(resp *http.Response) (result Provider, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List gets all resource providers for a subscription. +// Parameters: +// top - the number of results to return. If null is passed returns all deployments. +// expand - the properties to include in the results. For example, use &$expand=metadata in the query string to +// retrieve resource provider metadata. To include property aliases in response, use +// $expand=resourceTypes/aliases. +func (client ProvidersClient) List(ctx context.Context, top *int32, expand string) (result ProviderListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ProvidersClient.List") + defer func() { + sc := -1 + if result.plr.Response.Response != nil { + sc = result.plr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, top, expand) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.plr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "List", resp, "Failure sending request") + return + } + + result.plr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client ProvidersClient) ListPreparer(ctx context.Context, top *int32, expand string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client ProvidersClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client ProvidersClient) ListResponder(resp *http.Response) (result ProviderListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client ProvidersClient) listNextResults(ctx context.Context, lastResults ProviderListResult) (result ProviderListResult, err error) { + req, err := lastResults.providerListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.ProvidersClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.ProvidersClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client ProvidersClient) ListComplete(ctx context.Context, top *int32, expand string) (result ProviderListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ProvidersClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, top, expand) + return +} + +// Register registers a subscription with a resource provider. +// Parameters: +// resourceProviderNamespace - the namespace of the resource provider to register. +func (client ProvidersClient) Register(ctx context.Context, resourceProviderNamespace string) (result Provider, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ProvidersClient.Register") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.RegisterPreparer(ctx, resourceProviderNamespace) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Register", nil, "Failure preparing request") + return + } + + resp, err := client.RegisterSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Register", resp, "Failure sending request") + return + } + + result, err = client.RegisterResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Register", resp, "Failure responding to request") + } + + return +} + +// RegisterPreparer prepares the Register request. +func (client ProvidersClient) RegisterPreparer(ctx context.Context, resourceProviderNamespace string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// RegisterSender sends the Register request. The method will close the +// http.Response Body if it receives an error. +func (client ProvidersClient) RegisterSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// RegisterResponder handles the response to the Register request. The method always +// closes the http.Response Body. +func (client ProvidersClient) RegisterResponder(resp *http.Response) (result Provider, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Unregister unregisters a subscription from a resource provider. +// Parameters: +// resourceProviderNamespace - the namespace of the resource provider to unregister. +func (client ProvidersClient) Unregister(ctx context.Context, resourceProviderNamespace string) (result Provider, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/ProvidersClient.Unregister") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UnregisterPreparer(ctx, resourceProviderNamespace) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Unregister", nil, "Failure preparing request") + return + } + + resp, err := client.UnregisterSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Unregister", resp, "Failure sending request") + return + } + + result, err = client.UnregisterResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.ProvidersClient", "Unregister", resp, "Failure responding to request") + } + + return +} + +// UnregisterPreparer prepares the Unregister request. +func (client ProvidersClient) UnregisterPreparer(ctx context.Context, resourceProviderNamespace string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/unregister", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UnregisterSender sends the Unregister request. The method will close the +// http.Response Body if it receives an error. +func (client ProvidersClient) UnregisterSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// UnregisterResponder handles the response to the Unregister request. The method always +// closes the http.Response Body. +func (client ProvidersClient) UnregisterResponder(resp *http.Response) (result Provider, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/resources.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/resources.go new file mode 100644 index 00000000..a473370f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/resources.go @@ -0,0 +1,1352 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/validation" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// Client is the provides operations for working with resources and resource groups. +type Client struct { + BaseClient +} + +// NewClient creates an instance of the Client client. +func NewClient(subscriptionID string) Client { + return NewClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewClientWithBaseURI creates an instance of the Client client. +func NewClientWithBaseURI(baseURI string, subscriptionID string) Client { + return Client{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CheckExistence checks whether a resource exists. +// Parameters: +// resourceGroupName - the name of the resource group containing the resource to check. The name is case +// insensitive. +// resourceProviderNamespace - the resource provider of the resource to check. +// parentResourcePath - the parent resource identity. +// resourceType - the resource type. +// resourceName - the name of the resource to check whether it exists. +func (client Client) CheckExistence(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/Client.CheckExistence") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.Client", "CheckExistence", err.Error()) + } + + req, err := client.CheckExistencePreparer(ctx, resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "CheckExistence", nil, "Failure preparing request") + return + } + + resp, err := client.CheckExistenceSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "resources.Client", "CheckExistence", resp, "Failure sending request") + return + } + + result, err = client.CheckExistenceResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "CheckExistence", resp, "Failure responding to request") + } + + return +} + +// CheckExistencePreparer prepares the CheckExistence request. +func (client Client) CheckExistencePreparer(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsHead(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CheckExistenceSender sends the CheckExistence request. The method will close the +// http.Response Body if it receives an error. +func (client Client) CheckExistenceSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// CheckExistenceResponder handles the response to the CheckExistence request. The method always +// closes the http.Response Body. +func (client Client) CheckExistenceResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusNotFound), + autorest.ByClosing()) + result.Response = resp + return +} + +// CheckExistenceByID checks by ID whether a resource exists. +// Parameters: +// resourceID - the fully qualified ID of the resource, including the resource name and resource type. Use the +// format, +// /subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name} +func (client Client) CheckExistenceByID(ctx context.Context, resourceID string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/Client.CheckExistenceByID") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CheckExistenceByIDPreparer(ctx, resourceID) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "CheckExistenceByID", nil, "Failure preparing request") + return + } + + resp, err := client.CheckExistenceByIDSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "resources.Client", "CheckExistenceByID", resp, "Failure sending request") + return + } + + result, err = client.CheckExistenceByIDResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "CheckExistenceByID", resp, "Failure responding to request") + } + + return +} + +// CheckExistenceByIDPreparer prepares the CheckExistenceByID request. +func (client Client) CheckExistenceByIDPreparer(ctx context.Context, resourceID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceId": resourceID, + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsHead(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{resourceId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CheckExistenceByIDSender sends the CheckExistenceByID request. The method will close the +// http.Response Body if it receives an error. +func (client Client) CheckExistenceByIDSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// CheckExistenceByIDResponder handles the response to the CheckExistenceByID request. The method always +// closes the http.Response Body. +func (client Client) CheckExistenceByIDResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent, http.StatusNotFound), + autorest.ByClosing()) + result.Response = resp + return +} + +// CreateOrUpdate creates a resource. +// Parameters: +// resourceGroupName - the name of the resource group for the resource. The name is case insensitive. +// resourceProviderNamespace - the namespace of the resource provider. +// parentResourcePath - the parent resource identity. +// resourceType - the resource type of the resource to create. +// resourceName - the name of the resource to create. +// parameters - parameters for creating or updating the resource. +func (client Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, parameters GenericResource) (result CreateOrUpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/Client.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}, + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Kind", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Kind", Name: validation.Pattern, Rule: `^[-\w\._,\(\)]+$`, Chain: nil}}}}}}); err != nil { + return result, validation.NewError("resources.Client", "CreateOrUpdate", err.Error()) + } + + req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "CreateOrUpdate", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client Client) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, parameters GenericResource) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client Client) CreateOrUpdateSender(req *http.Request) (future CreateOrUpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client Client) CreateOrUpdateResponder(resp *http.Response) (result GenericResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdateByID create a resource by ID. +// Parameters: +// resourceID - the fully qualified ID of the resource, including the resource name and resource type. Use the +// format, +// /subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name} +// parameters - create or update resource parameters. +func (client Client) CreateOrUpdateByID(ctx context.Context, resourceID string, parameters GenericResource) (result CreateOrUpdateByIDFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/Client.CreateOrUpdateByID") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: parameters, + Constraints: []validation.Constraint{{Target: "parameters.Kind", Name: validation.Null, Rule: false, + Chain: []validation.Constraint{{Target: "parameters.Kind", Name: validation.Pattern, Rule: `^[-\w\._,\(\)]+$`, Chain: nil}}}}}}); err != nil { + return result, validation.NewError("resources.Client", "CreateOrUpdateByID", err.Error()) + } + + req, err := client.CreateOrUpdateByIDPreparer(ctx, resourceID, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "CreateOrUpdateByID", nil, "Failure preparing request") + return + } + + result, err = client.CreateOrUpdateByIDSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "CreateOrUpdateByID", result.Response(), "Failure sending request") + return + } + + return +} + +// CreateOrUpdateByIDPreparer prepares the CreateOrUpdateByID request. +func (client Client) CreateOrUpdateByIDPreparer(ctx context.Context, resourceID string, parameters GenericResource) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceId": resourceID, + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{resourceId}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateByIDSender sends the CreateOrUpdateByID request. The method will close the +// http.Response Body if it receives an error. +func (client Client) CreateOrUpdateByIDSender(req *http.Request) (future CreateOrUpdateByIDFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// CreateOrUpdateByIDResponder handles the response to the CreateOrUpdateByID request. The method always +// closes the http.Response Body. +func (client Client) CreateOrUpdateByIDResponder(resp *http.Response) (result GenericResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete deletes a resource. +// Parameters: +// resourceGroupName - the name of the resource group that contains the resource to delete. The name is case +// insensitive. +// resourceProviderNamespace - the namespace of the resource provider. +// parentResourcePath - the parent resource identity. +// resourceType - the resource type. +// resourceName - the name of the resource to delete. +func (client Client) Delete(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (result DeleteFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/Client.Delete") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.Client", "Delete", err.Error()) + } + + req, err := client.DeletePreparer(ctx, resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "Delete", nil, "Failure preparing request") + return + } + + result, err = client.DeleteSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "Delete", result.Response(), "Failure sending request") + return + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client Client) DeletePreparer(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client Client) DeleteSender(req *http.Request) (future DeleteFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client Client) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteByID deletes a resource by ID. +// Parameters: +// resourceID - the fully qualified ID of the resource, including the resource name and resource type. Use the +// format, +// /subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name} +func (client Client) DeleteByID(ctx context.Context, resourceID string) (result DeleteByIDFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/Client.DeleteByID") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeleteByIDPreparer(ctx, resourceID) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "DeleteByID", nil, "Failure preparing request") + return + } + + result, err = client.DeleteByIDSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "DeleteByID", result.Response(), "Failure sending request") + return + } + + return +} + +// DeleteByIDPreparer prepares the DeleteByID request. +func (client Client) DeleteByIDPreparer(ctx context.Context, resourceID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceId": resourceID, + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{resourceId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteByIDSender sends the DeleteByID request. The method will close the +// http.Response Body if it receives an error. +func (client Client) DeleteByIDSender(req *http.Request) (future DeleteByIDFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// DeleteByIDResponder handles the response to the DeleteByID request. The method always +// closes the http.Response Body. +func (client Client) DeleteByIDResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Get gets a resource. +// Parameters: +// resourceGroupName - the name of the resource group containing the resource to get. The name is case +// insensitive. +// resourceProviderNamespace - the namespace of the resource provider. +// parentResourcePath - the parent resource identity. +// resourceType - the resource type of the resource. +// resourceName - the name of the resource to get. +func (client Client) Get(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (result GenericResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/Client.Get") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.Client", "Get", err.Error()) + } + + req, err := client.GetPreparer(ctx, resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "Get", nil, "Failure preparing request") + return + } + + resp, err := client.GetSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.Client", "Get", resp, "Failure sending request") + return + } + + result, err = client.GetResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "Get", resp, "Failure responding to request") + } + + return +} + +// GetPreparer prepares the Get request. +func (client Client) GetPreparer(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetSender sends the Get request. The method will close the +// http.Response Body if it receives an error. +func (client Client) GetSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetResponder handles the response to the Get request. The method always +// closes the http.Response Body. +func (client Client) GetResponder(resp *http.Response) (result GenericResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// GetByID gets a resource by ID. +// Parameters: +// resourceID - the fully qualified ID of the resource, including the resource name and resource type. Use the +// format, +// /subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name} +func (client Client) GetByID(ctx context.Context, resourceID string) (result GenericResource, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/Client.GetByID") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.GetByIDPreparer(ctx, resourceID) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "GetByID", nil, "Failure preparing request") + return + } + + resp, err := client.GetByIDSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.Client", "GetByID", resp, "Failure sending request") + return + } + + result, err = client.GetByIDResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "GetByID", resp, "Failure responding to request") + } + + return +} + +// GetByIDPreparer prepares the GetByID request. +func (client Client) GetByIDPreparer(ctx context.Context, resourceID string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceId": resourceID, + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{resourceId}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// GetByIDSender sends the GetByID request. The method will close the +// http.Response Body if it receives an error. +func (client Client) GetByIDSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + return autorest.SendWithSender(client, req, sd...) +} + +// GetByIDResponder handles the response to the GetByID request. The method always +// closes the http.Response Body. +func (client Client) GetByIDResponder(resp *http.Response) (result GenericResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// List get all the resources in a subscription. +// Parameters: +// filter - the filter to apply on the operation.

The properties you can use for eq (equals) or ne (not +// equals) are: location, resourceType, name, resourceGroup, identity, identity/principalId, plan, +// plan/publisher, plan/product, plan/name, plan/version, and plan/promotionCode.

For example, to filter +// by a resource type, use: $filter=resourceType eq 'Microsoft.Network/virtualNetworks'

You can use +// substringof(value, property) in the filter. The properties you can use for substring are: name and +// resourceGroup.

For example, to get all resources with 'demo' anywhere in the name, use: +// $filter=substringof('demo', name)

You can link more than one substringof together by adding and/or +// operators.

You can filter by tag names and values. For example, to filter for a tag name and value, +// use $filter=tagName eq 'tag1' and tagValue eq 'Value1'

You can use some properties together when +// filtering. The combinations you can use are: substringof and/or resourceType, plan and plan/publisher and +// plan/name, identity and identity/principalId. +// expand - the $expand query parameter. You can expand createdTime and changedTime. For example, to expand +// both properties, use $expand=changedTime,createdTime +// top - the number of results to return. If null is passed, returns all resource groups. +func (client Client) List(ctx context.Context, filter string, expand string, top *int32) (result ListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/Client.List") + defer func() { + sc := -1 + if result.lr.Response.Response != nil { + sc = result.lr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx, filter, expand, top) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.lr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.Client", "List", resp, "Failure sending request") + return + } + + result.lr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client Client) ListPreparer(ctx context.Context, filter string, expand string, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resources", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client Client) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client Client) ListResponder(resp *http.Response) (result ListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client Client) listNextResults(ctx context.Context, lastResults ListResult) (result ListResult, err error) { + req, err := lastResults.listResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.Client", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.Client", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client Client) ListComplete(ctx context.Context, filter string, expand string, top *int32) (result ListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/Client.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx, filter, expand, top) + return +} + +// ListByResourceGroup get all the resources for a resource group. +// Parameters: +// resourceGroupName - the resource group with the resources to get. +// filter - the filter to apply on the operation.

The properties you can use for eq (equals) or ne (not +// equals) are: location, resourceType, name, resourceGroup, identity, identity/principalId, plan, +// plan/publisher, plan/product, plan/name, plan/version, and plan/promotionCode.

For example, to filter +// by a resource type, use: $filter=resourceType eq 'Microsoft.Network/virtualNetworks'

You can use +// substringof(value, property) in the filter. The properties you can use for substring are: name and +// resourceGroup.

For example, to get all resources with 'demo' anywhere in the name, use: +// $filter=substringof('demo', name)

You can link more than one substringof together by adding and/or +// operators.

You can filter by tag names and values. For example, to filter for a tag name and value, +// use $filter=tagName eq 'tag1' and tagValue eq 'Value1'

You can use some properties together when +// filtering. The combinations you can use are: substringof and/or resourceType, plan and plan/publisher and +// plan/name, identity and identity/principalId. +// expand - the $expand query parameter. You can expand createdTime and changedTime. For example, to expand +// both properties, use $expand=changedTime,createdTime +// top - the number of results to return. If null is passed, returns all resources. +func (client Client) ListByResourceGroup(ctx context.Context, resourceGroupName string, filter string, expand string, top *int32) (result ListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/Client.ListByResourceGroup") + defer func() { + sc := -1 + if result.lr.Response.Response != nil { + sc = result.lr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.Client", "ListByResourceGroup", err.Error()) + } + + result.fn = client.listByResourceGroupNextResults + req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName, filter, expand, top) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "ListByResourceGroup", nil, "Failure preparing request") + return + } + + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.lr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.Client", "ListByResourceGroup", resp, "Failure sending request") + return + } + + result.lr, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "ListByResourceGroup", resp, "Failure responding to request") + } + + return +} + +// ListByResourceGroupPreparer prepares the ListByResourceGroup request. +func (client Client) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string, filter string, expand string, top *int32) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + if len(filter) > 0 { + queryParameters["$filter"] = autorest.Encode("query", filter) + } + if len(expand) > 0 { + queryParameters["$expand"] = autorest.Encode("query", expand) + } + if top != nil { + queryParameters["$top"] = autorest.Encode("query", *top) + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/resources", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the +// http.Response Body if it receives an error. +func (client Client) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always +// closes the http.Response Body. +func (client Client) ListByResourceGroupResponder(resp *http.Response) (result ListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listByResourceGroupNextResults retrieves the next set of results, if any. +func (client Client) listByResourceGroupNextResults(ctx context.Context, lastResults ListResult) (result ListResult, err error) { + req, err := lastResults.listResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.Client", "listByResourceGroupNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListByResourceGroupSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.Client", "listByResourceGroupNextResults", resp, "Failure sending next results request") + } + result, err = client.ListByResourceGroupResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "listByResourceGroupNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required. +func (client Client) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string, filter string, expand string, top *int32) (result ListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/Client.ListByResourceGroup") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.ListByResourceGroup(ctx, resourceGroupName, filter, expand, top) + return +} + +// MoveResources the resources to move must be in the same source resource group. The target resource group may be in a +// different subscription. When moving resources, both the source group and the target group are locked for the +// duration of the operation. Write and delete operations are blocked on the groups until the move completes. +// Parameters: +// sourceResourceGroupName - the name of the resource group containing the resources to move. +// parameters - parameters for moving resources. +func (client Client) MoveResources(ctx context.Context, sourceResourceGroupName string, parameters MoveInfo) (result MoveResourcesFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/Client.MoveResources") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: sourceResourceGroupName, + Constraints: []validation.Constraint{{Target: "sourceResourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "sourceResourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "sourceResourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.Client", "MoveResources", err.Error()) + } + + req, err := client.MoveResourcesPreparer(ctx, sourceResourceGroupName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "MoveResources", nil, "Failure preparing request") + return + } + + result, err = client.MoveResourcesSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "MoveResources", result.Response(), "Failure sending request") + return + } + + return +} + +// MoveResourcesPreparer prepares the MoveResources request. +func (client Client) MoveResourcesPreparer(ctx context.Context, sourceResourceGroupName string, parameters MoveInfo) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "sourceResourceGroupName": autorest.Encode("path", sourceResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/moveResources", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// MoveResourcesSender sends the MoveResources request. The method will close the +// http.Response Body if it receives an error. +func (client Client) MoveResourcesSender(req *http.Request) (future MoveResourcesFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// MoveResourcesResponder handles the response to the MoveResources request. The method always +// closes the http.Response Body. +func (client Client) MoveResourcesResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// Update updates a resource. +// Parameters: +// resourceGroupName - the name of the resource group for the resource. The name is case insensitive. +// resourceProviderNamespace - the namespace of the resource provider. +// parentResourcePath - the parent resource identity. +// resourceType - the resource type of the resource to update. +// resourceName - the name of the resource to update. +// parameters - parameters for updating the resource. +func (client Client) Update(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, parameters GenericResource) (result UpdateFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/Client.Update") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: resourceGroupName, + Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "resourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.Client", "Update", err.Error()) + } + + req, err := client.UpdatePreparer(ctx, resourceGroupName, resourceProviderNamespace, parentResourcePath, resourceType, resourceName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "Update", nil, "Failure preparing request") + return + } + + result, err = client.UpdateSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "Update", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdatePreparer prepares the Update request. +func (client Client) UpdatePreparer(ctx context.Context, resourceGroupName string, resourceProviderNamespace string, parentResourcePath string, resourceType string, resourceName string, parameters GenericResource) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "parentResourcePath": parentResourcePath, + "resourceGroupName": autorest.Encode("path", resourceGroupName), + "resourceName": autorest.Encode("path", resourceName), + "resourceProviderNamespace": autorest.Encode("path", resourceProviderNamespace), + "resourceType": resourceType, + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateSender sends the Update request. The method will close the +// http.Response Body if it receives an error. +func (client Client) UpdateSender(req *http.Request) (future UpdateFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateResponder handles the response to the Update request. The method always +// closes the http.Response Body. +func (client Client) UpdateResponder(resp *http.Response) (result GenericResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// UpdateByID updates a resource by ID. +// Parameters: +// resourceID - the fully qualified ID of the resource, including the resource name and resource type. Use the +// format, +// /subscriptions/{guid}/resourceGroups/{resource-group-name}/{resource-provider-namespace}/{resource-type}/{resource-name} +// parameters - update resource parameters. +func (client Client) UpdateByID(ctx context.Context, resourceID string, parameters GenericResource) (result UpdateByIDFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/Client.UpdateByID") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.UpdateByIDPreparer(ctx, resourceID, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "UpdateByID", nil, "Failure preparing request") + return + } + + result, err = client.UpdateByIDSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "UpdateByID", result.Response(), "Failure sending request") + return + } + + return +} + +// UpdateByIDPreparer prepares the UpdateByID request. +func (client Client) UpdateByIDPreparer(ctx context.Context, resourceID string, parameters GenericResource) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "resourceId": resourceID, + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPatch(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/{resourceId}", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// UpdateByIDSender sends the UpdateByID request. The method will close the +// http.Response Body if it receives an error. +func (client Client) UpdateByIDSender(req *http.Request) (future UpdateByIDFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// UpdateByIDResponder handles the response to the UpdateByID request. The method always +// closes the http.Response Body. +func (client Client) UpdateByIDResponder(resp *http.Response) (result GenericResource, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// ValidateMoveResources this operation checks whether the specified resources can be moved to the target. The +// resources to move must be in the same source resource group. The target resource group may be in a different +// subscription. If validation succeeds, it returns HTTP response code 204 (no content). If validation fails, it +// returns HTTP response code 409 (Conflict) with an error message. Retrieve the URL in the Location header value to +// check the result of the long-running operation. +// Parameters: +// sourceResourceGroupName - the name of the resource group containing the resources to validate for move. +// parameters - parameters for moving resources. +func (client Client) ValidateMoveResources(ctx context.Context, sourceResourceGroupName string, parameters MoveInfo) (result ValidateMoveResourcesFuture, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/Client.ValidateMoveResources") + defer func() { + sc := -1 + if result.Response() != nil { + sc = result.Response().StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + if err := validation.Validate([]validation.Validation{ + {TargetValue: sourceResourceGroupName, + Constraints: []validation.Constraint{{Target: "sourceResourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil}, + {Target: "sourceResourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}, + {Target: "sourceResourceGroupName", Name: validation.Pattern, Rule: `^[-\p{L}\._\(\)\w]+$`, Chain: nil}}}}); err != nil { + return result, validation.NewError("resources.Client", "ValidateMoveResources", err.Error()) + } + + req, err := client.ValidateMoveResourcesPreparer(ctx, sourceResourceGroupName, parameters) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "ValidateMoveResources", nil, "Failure preparing request") + return + } + + result, err = client.ValidateMoveResourcesSender(req) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.Client", "ValidateMoveResources", result.Response(), "Failure sending request") + return + } + + return +} + +// ValidateMoveResourcesPreparer prepares the ValidateMoveResources request. +func (client Client) ValidateMoveResourcesPreparer(ctx context.Context, sourceResourceGroupName string, parameters MoveInfo) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "sourceResourceGroupName": autorest.Encode("path", sourceResourceGroupName), + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsContentType("application/json; charset=utf-8"), + autorest.AsPost(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{sourceResourceGroupName}/validateMoveResources", pathParameters), + autorest.WithJSON(parameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ValidateMoveResourcesSender sends the ValidateMoveResources request. The method will close the +// http.Response Body if it receives an error. +func (client Client) ValidateMoveResourcesSender(req *http.Request) (future ValidateMoveResourcesFuture, err error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + var resp *http.Response + resp, err = autorest.SendWithSender(client, req, sd...) + if err != nil { + return + } + future.Future, err = azure.NewFutureFromResponse(resp) + return +} + +// ValidateMoveResourcesResponder handles the response to the ValidateMoveResources request. The method always +// closes the http.Response Body. +func (client Client) ValidateMoveResourcesResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent, http.StatusConflict), + autorest.ByClosing()) + result.Response = resp + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/tags.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/tags.go new file mode 100644 index 00000000..bb2b9632 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/tags.go @@ -0,0 +1,454 @@ +package resources + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +import ( + "context" + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/tracing" + "net/http" +) + +// TagsClient is the provides operations for working with resources and resource groups. +type TagsClient struct { + BaseClient +} + +// NewTagsClient creates an instance of the TagsClient client. +func NewTagsClient(subscriptionID string) TagsClient { + return NewTagsClientWithBaseURI(DefaultBaseURI, subscriptionID) +} + +// NewTagsClientWithBaseURI creates an instance of the TagsClient client. +func NewTagsClientWithBaseURI(baseURI string, subscriptionID string) TagsClient { + return TagsClient{NewWithBaseURI(baseURI, subscriptionID)} +} + +// CreateOrUpdate the tag name can have a maximum of 512 characters and is case insensitive. Tag names created by Azure +// have prefixes of microsoft, azure, or windows. You cannot create tags with one of these prefixes. +// Parameters: +// tagName - the name of the tag to create. +func (client TagsClient) CreateOrUpdate(ctx context.Context, tagName string) (result TagDetails, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TagsClient.CreateOrUpdate") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdatePreparer(ctx, tagName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "CreateOrUpdate", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.TagsClient", "CreateOrUpdate", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "CreateOrUpdate", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdatePreparer prepares the CreateOrUpdate request. +func (client TagsClient) CreateOrUpdatePreparer(ctx context.Context, tagName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "tagName": autorest.Encode("path", tagName), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/tagNames/{tagName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) CreateOrUpdateSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always +// closes the http.Response Body. +func (client TagsClient) CreateOrUpdateResponder(resp *http.Response) (result TagDetails, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// CreateOrUpdateValue creates a tag value. The name of the tag must already exist. +// Parameters: +// tagName - the name of the tag. +// tagValue - the value of the tag to create. +func (client TagsClient) CreateOrUpdateValue(ctx context.Context, tagName string, tagValue string) (result TagValue, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TagsClient.CreateOrUpdateValue") + defer func() { + sc := -1 + if result.Response.Response != nil { + sc = result.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.CreateOrUpdateValuePreparer(ctx, tagName, tagValue) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "CreateOrUpdateValue", nil, "Failure preparing request") + return + } + + resp, err := client.CreateOrUpdateValueSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.TagsClient", "CreateOrUpdateValue", resp, "Failure sending request") + return + } + + result, err = client.CreateOrUpdateValueResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "CreateOrUpdateValue", resp, "Failure responding to request") + } + + return +} + +// CreateOrUpdateValuePreparer prepares the CreateOrUpdateValue request. +func (client TagsClient) CreateOrUpdateValuePreparer(ctx context.Context, tagName string, tagValue string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "tagName": autorest.Encode("path", tagName), + "tagValue": autorest.Encode("path", tagValue), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPut(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// CreateOrUpdateValueSender sends the CreateOrUpdateValue request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) CreateOrUpdateValueSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// CreateOrUpdateValueResponder handles the response to the CreateOrUpdateValue request. The method always +// closes the http.Response Body. +func (client TagsClient) CreateOrUpdateValueResponder(resp *http.Response) (result TagValue, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// Delete you must remove all values from a resource tag before you can delete it. +// Parameters: +// tagName - the name of the tag. +func (client TagsClient) Delete(ctx context.Context, tagName string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TagsClient.Delete") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeletePreparer(ctx, tagName) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "Delete", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "resources.TagsClient", "Delete", resp, "Failure sending request") + return + } + + result, err = client.DeleteResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "Delete", resp, "Failure responding to request") + } + + return +} + +// DeletePreparer prepares the Delete request. +func (client TagsClient) DeletePreparer(ctx context.Context, tagName string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "tagName": autorest.Encode("path", tagName), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/tagNames/{tagName}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteSender sends the Delete request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) DeleteSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// DeleteResponder handles the response to the Delete request. The method always +// closes the http.Response Body. +func (client TagsClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// DeleteValue deletes a tag value. +// Parameters: +// tagName - the name of the tag. +// tagValue - the value of the tag to delete. +func (client TagsClient) DeleteValue(ctx context.Context, tagName string, tagValue string) (result autorest.Response, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TagsClient.DeleteValue") + defer func() { + sc := -1 + if result.Response != nil { + sc = result.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + req, err := client.DeleteValuePreparer(ctx, tagName, tagValue) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "DeleteValue", nil, "Failure preparing request") + return + } + + resp, err := client.DeleteValueSender(req) + if err != nil { + result.Response = resp + err = autorest.NewErrorWithError(err, "resources.TagsClient", "DeleteValue", resp, "Failure sending request") + return + } + + result, err = client.DeleteValueResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "DeleteValue", resp, "Failure responding to request") + } + + return +} + +// DeleteValuePreparer prepares the DeleteValue request. +func (client TagsClient) DeleteValuePreparer(ctx context.Context, tagName string, tagValue string) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + "tagName": autorest.Encode("path", tagName), + "tagValue": autorest.Encode("path", tagValue), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsDelete(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/tagNames/{tagName}/tagValues/{tagValue}", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// DeleteValueSender sends the DeleteValue request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) DeleteValueSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// DeleteValueResponder handles the response to the DeleteValue request. The method always +// closes the http.Response Body. +func (client TagsClient) DeleteValueResponder(resp *http.Response) (result autorest.Response, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusNoContent), + autorest.ByClosing()) + result.Response = resp + return +} + +// List gets the names and values of all resource tags that are defined in a subscription. +func (client TagsClient) List(ctx context.Context) (result TagsListResultPage, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TagsClient.List") + defer func() { + sc := -1 + if result.tlr.Response.Response != nil { + sc = result.tlr.Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.fn = client.listNextResults + req, err := client.ListPreparer(ctx) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "List", nil, "Failure preparing request") + return + } + + resp, err := client.ListSender(req) + if err != nil { + result.tlr.Response = autorest.Response{Response: resp} + err = autorest.NewErrorWithError(err, "resources.TagsClient", "List", resp, "Failure sending request") + return + } + + result.tlr, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "List", resp, "Failure responding to request") + } + + return +} + +// ListPreparer prepares the List request. +func (client TagsClient) ListPreparer(ctx context.Context) (*http.Request, error) { + pathParameters := map[string]interface{}{ + "subscriptionId": autorest.Encode("path", client.SubscriptionID), + } + + const APIVersion = "2019-03-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(client.BaseURI), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/tagNames", pathParameters), + autorest.WithQueryParameters(queryParameters)) + return preparer.Prepare((&http.Request{}).WithContext(ctx)) +} + +// ListSender sends the List request. The method will close the +// http.Response Body if it receives an error. +func (client TagsClient) ListSender(req *http.Request) (*http.Response, error) { + sd := autorest.GetSendDecorators(req.Context(), azure.DoRetryWithRegistration(client.Client)) + return autorest.SendWithSender(client, req, sd...) +} + +// ListResponder handles the response to the List request. The method always +// closes the http.Response Body. +func (client TagsClient) ListResponder(resp *http.Response) (result TagsListResult, err error) { + err = autorest.Respond( + resp, + client.ByInspecting(), + azure.WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&result), + autorest.ByClosing()) + result.Response = autorest.Response{Response: resp} + return +} + +// listNextResults retrieves the next set of results, if any. +func (client TagsClient) listNextResults(ctx context.Context, lastResults TagsListResult) (result TagsListResult, err error) { + req, err := lastResults.tagsListResultPreparer(ctx) + if err != nil { + return result, autorest.NewErrorWithError(err, "resources.TagsClient", "listNextResults", nil, "Failure preparing next results request") + } + if req == nil { + return + } + resp, err := client.ListSender(req) + if err != nil { + result.Response = autorest.Response{Response: resp} + return result, autorest.NewErrorWithError(err, "resources.TagsClient", "listNextResults", resp, "Failure sending next results request") + } + result, err = client.ListResponder(resp) + if err != nil { + err = autorest.NewErrorWithError(err, "resources.TagsClient", "listNextResults", resp, "Failure responding to next results request") + } + return +} + +// ListComplete enumerates all values, automatically crossing page boundaries as required. +func (client TagsClient) ListComplete(ctx context.Context) (result TagsListResultIterator, err error) { + if tracing.IsEnabled() { + ctx = tracing.StartSpan(ctx, fqdn+"/TagsClient.List") + defer func() { + sc := -1 + if result.Response().Response.Response != nil { + sc = result.page.Response().Response.Response.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + } + result.page, err = client.List(ctx) + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/version.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/version.go new file mode 100644 index 00000000..c100a29a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources/version.go @@ -0,0 +1,30 @@ +package resources + +import "github.com/Azure/azure-sdk-for-go/version" + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// UserAgent returns the UserAgent string to use when sending http.Requests. +func UserAgent() string { + return "Azure-SDK-For-Go/" + version.Number + " resources/2019-03-01" +} + +// Version returns the semantic version (see http://semver.org) of the client. +func Version() string { + return version.Number +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/version/version.go new file mode 100644 index 00000000..60e618da --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/azure-sdk-for-go/version/version.go @@ -0,0 +1,21 @@ +package version + +// Copyright (c) Microsoft and contributors. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Code generated by Microsoft (R) AutoRest Code Generator. +// Changes may cause incorrect behavior and will be lost if the code is regenerated. + +// Number contains the semantic version of this SDK. +const Number = "v32.0.0" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/LICENSE new file mode 100644 index 00000000..b9d6a27e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Microsoft Corporation + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/README.md new file mode 100644 index 00000000..fec416a9 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -0,0 +1,292 @@ +# Azure Active Directory authentication for Go + +This is a standalone package for authenticating with Azure Active +Directory from other Go libraries and applications, in particular the [Azure SDK +for Go](https://github.com/Azure/azure-sdk-for-go). + +Note: Despite the package's name it is not related to other "ADAL" libraries +maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues +should be opened in [this repo's](https://github.com/Azure/go-autorest/issues) +or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue +trackers. + +## Install + +```bash +go get -u github.com/Azure/go-autorest/autorest/adal +``` + +## Usage + +An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). + +### Register an Azure AD Application with secret + + +1. Register a new application with a `secret` credential + + ``` + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --password secret + ``` + +2. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "Application ID" + ``` + + * Replace `Application ID` with `appId` from step 1. + +### Register an Azure AD Application with certificate + +1. Create a private key + + ``` + openssl genrsa -out "example-app.key" 2048 + ``` + +2. Create the certificate + + ``` + openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" + openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 + ``` + +3. Create the PKCS12 version of the certificate containing also the private key + + ``` + openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: + + ``` + +4. Register a new application with the certificate content form `example-app.crt` + + ``` + certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" + + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --key-usage Verify --end-date 2018-01-01 \ + --key-value "${certificateContents}" + ``` + +5. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "APPLICATION_ID" + ``` + + * Replace `APPLICATION_ID` with `appId` from step 4. + + +### Grant the necessary permissions + +Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained +level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles) +which can be assigned to a service principal of an Azure AD application depending of your needs. + +``` +az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" +``` + +* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. +* Replace the `ROLE_NAME` with a role name of your choice. + +It is also possible to define custom role definitions. + +``` +az role definition create --role-definition role-definition.json +``` + +* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. + + +### Acquire Access Token + +The common configuration used by all flows: + +```Go +const activeDirectoryEndpoint = "https://login.microsoftonline.com/" +tenantID := "TENANT_ID" +oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) + +applicationID := "APPLICATION_ID" + +callback := func(token adal.Token) error { + // This is called after the token is acquired +} + +// The resource for which the token is acquired +resource := "https://management.core.windows.net/" +``` + +* Replace the `TENANT_ID` with your tenant ID. +* Replace the `APPLICATION_ID` with the value from previous section. + +#### Client Credentials + +```Go +applicationSecret := "APPLICATION_SECRET" + +spt, err := adal.NewServicePrincipalToken( + *oauthConfig, + appliationID, + applicationSecret, + resource, + callbacks...) +if err != nil { + return nil, err +} + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Replace the `APPLICATION_SECRET` with the `password` value from previous section. + +#### Client Certificate + +```Go +certificatePath := "./example-app.pfx" + +certData, err := ioutil.ReadFile(certificatePath) +if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) +} + +// Get the certificate and private key from pfx file +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) +} + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + *oauthConfig, + applicationID, + certificate, + rsaPrivateKey, + resource, + callbacks...) + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Update the certificate path to point to the example-app.pfx file which was created in previous section. + + +#### Device Code + +```Go +oauthClient := &http.Client{} + +// Acquire the device code +deviceCode, err := adal.InitiateDeviceAuth( + oauthClient, + *oauthConfig, + applicationID, + resource) +if err != nil { + return nil, fmt.Errorf("Failed to start device auth flow: %s", err) +} + +// Display the authentication message +fmt.Println(*deviceCode.Message) + +// Wait here until the user is authenticated +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +if err != nil { + return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) +} + +spt, err := adal.NewServicePrincipalTokenFromManualToken( + *oauthConfig, + applicationID, + resource, + *token, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Username password authenticate + +```Go +spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( + *oauthConfig, + applicationID, + username, + password, + resource, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Authorization code authenticate + +``` Go +spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( + *oauthConfig, + applicationID, + clientSecret, + authorizationCode, + redirectURI, + resource, + callbacks...) + +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +### Command Line Tool + +A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. + +``` +adal -h + +Usage of ./adal: + -applicationId string + application id + -certificatePath string + path to pk12/PFC application certificate + -mode string + authentication mode (device, secret, cert, refresh) (default "device") + -resource string + resource for which the token is requested + -secret string + application secret + -tenantId string + tenant id + -tokenCachePath string + location of oath token cache (default "/home/cgc/.adal/accessToken.json") +``` + +Example acquire a token for `https://management.core.windows.net/` using device code flow: + +``` +adal -mode device \ + -applicationId "APPLICATION_ID" \ + -tenantId "TENANT_ID" \ + -resource https://management.core.windows.net/ + +``` diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/config.go new file mode 100644 index 00000000..fa596474 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -0,0 +1,151 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "fmt" + "net/url" +) + +const ( + activeDirectoryEndpointTemplate = "%s/oauth2/%s%s" +) + +// OAuthConfig represents the endpoints needed +// in OAuth operations +type OAuthConfig struct { + AuthorityEndpoint url.URL `json:"authorityEndpoint"` + AuthorizeEndpoint url.URL `json:"authorizeEndpoint"` + TokenEndpoint url.URL `json:"tokenEndpoint"` + DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"` +} + +// IsZero returns true if the OAuthConfig object is zero-initialized. +func (oac OAuthConfig) IsZero() bool { + return oac == OAuthConfig{} +} + +func validateStringParam(param, name string) error { + if len(param) == 0 { + return fmt.Errorf("parameter '" + name + "' cannot be empty") + } + return nil +} + +// NewOAuthConfig returns an OAuthConfig with tenant specific urls +func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + apiVer := "1.0" + return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer) +} + +// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls. +// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value. +func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) { + if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { + return nil, err + } + api := "" + // it's legal for tenantID to be empty so don't validate it + if apiVersion != nil { + if err := validateStringParam(*apiVersion, "apiVersion"); err != nil { + return nil, err + } + api = fmt.Sprintf("?api-version=%s", *apiVersion) + } + u, err := url.Parse(activeDirectoryEndpoint) + if err != nil { + return nil, err + } + authorityURL, err := u.Parse(tenantID) + if err != nil { + return nil, err + } + authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api)) + if err != nil { + return nil, err + } + tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api)) + if err != nil { + return nil, err + } + deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api)) + if err != nil { + return nil, err + } + + return &OAuthConfig{ + AuthorityEndpoint: *authorityURL, + AuthorizeEndpoint: *authorizeURL, + TokenEndpoint: *tokenURL, + DeviceCodeEndpoint: *deviceCodeURL, + }, nil +} + +// MultiTenantOAuthConfig provides endpoints for primary and aulixiary tenant IDs. +type MultiTenantOAuthConfig interface { + PrimaryTenant() *OAuthConfig + AuxiliaryTenants() []*OAuthConfig +} + +// OAuthOptions contains optional OAuthConfig creation arguments. +type OAuthOptions struct { + APIVersion string +} + +func (c OAuthOptions) apiVersion() string { + if c.APIVersion != "" { + return fmt.Sprintf("?api-version=%s", c.APIVersion) + } + return "1.0" +} + +// NewMultiTenantOAuthConfig creates an object that support multitenant OAuth configuration. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/authenticate-multi-tenant for more information. +func NewMultiTenantOAuthConfig(activeDirectoryEndpoint, primaryTenantID string, auxiliaryTenantIDs []string, options OAuthOptions) (MultiTenantOAuthConfig, error) { + if len(auxiliaryTenantIDs) == 0 || len(auxiliaryTenantIDs) > 3 { + return nil, errors.New("must specify one to three auxiliary tenants") + } + mtCfg := multiTenantOAuthConfig{ + cfgs: make([]*OAuthConfig, len(auxiliaryTenantIDs)+1), + } + apiVer := options.apiVersion() + pri, err := NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, primaryTenantID, &apiVer) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for primary tenant: %v", err) + } + mtCfg.cfgs[0] = pri + for i := range auxiliaryTenantIDs { + aux, err := NewOAuthConfig(activeDirectoryEndpoint, auxiliaryTenantIDs[i]) + if err != nil { + return nil, fmt.Errorf("failed to create OAuthConfig for tenant '%s': %v", auxiliaryTenantIDs[i], err) + } + mtCfg.cfgs[i+1] = aux + } + return mtCfg, nil +} + +type multiTenantOAuthConfig struct { + // first config in the slice is the primary tenant + cfgs []*OAuthConfig +} + +func (m multiTenantOAuthConfig) PrimaryTenant() *OAuthConfig { + return m.cfgs[0] +} + +func (m multiTenantOAuthConfig) AuxiliaryTenants() []*OAuthConfig { + return m.cfgs[1:] +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go new file mode 100644 index 00000000..b38f4c24 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go @@ -0,0 +1,242 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + This file is largely based on rjw57/oauth2device's code, with the follow differences: + * scope -> resource, and only allow a single one + * receive "Message" in the DeviceCode struct and show it to users as the prompt + * azure-xplat-cli has the following behavior that this emulates: + - does not send client_secret during the token exchange + - sends resource again in the token exchange request +*/ + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" +) + +const ( + logPrefix = "autorest/adal/devicetoken:" +) + +var ( + // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow + ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) + + // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow + ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) + + // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow + ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) + + // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow + ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) + + // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow + ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) + + // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow + ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) + + // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow + ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) + + errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" + errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" + errTokenSendingFails = "Error occurred while sending request with device code for a token" + errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" + errStatusNotOK = "Error HTTP status != 200" +) + +// DeviceCode is the object returned by the device auth endpoint +// It contains information to instruct the user to complete the auth flow +type DeviceCode struct { + DeviceCode *string `json:"device_code,omitempty"` + UserCode *string `json:"user_code,omitempty"` + VerificationURL *string `json:"verification_url,omitempty"` + ExpiresIn *int64 `json:"expires_in,string,omitempty"` + Interval *int64 `json:"interval,string,omitempty"` + + Message *string `json:"message"` // Azure specific + Resource string // store the following, stored when initiating, used when exchanging + OAuthConfig OAuthConfig + ClientID string +} + +// TokenError is the object returned by the token exchange endpoint +// when something is amiss +type TokenError struct { + Error *string `json:"error,omitempty"` + ErrorCodes []int `json:"error_codes,omitempty"` + ErrorDescription *string `json:"error_description,omitempty"` + Timestamp *string `json:"timestamp,omitempty"` + TraceID *string `json:"trace_id,omitempty"` +} + +// DeviceToken is the object return by the token exchange endpoint +// It can either look like a Token or an ErrorToken, so put both here +// and check for presence of "Error" to know if we are in error state +type deviceToken struct { + Token + TokenError +} + +// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + v := url.Values{ + "client_id": []string{clientID}, + "resource": []string{resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) + } + + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrDeviceCodeEmpty + } + + var code DeviceCode + err = json.Unmarshal(rb, &code) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + code.ClientID = clientID + code.Resource = resource + code.OAuthConfig = oauthConfig + + return &code, nil +} + +// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + v := url.Values{ + "client_id": []string{code.ClientID}, + "code": []string{*code.DeviceCode}, + "grant_type": []string{OAuthGrantTypeDeviceCode}, + "resource": []string{code.Resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrOAuthTokenEmpty + } + + var token deviceToken + err = json.Unmarshal(rb, &token) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if token.Error == nil { + return &token.Token, nil + } + + switch *token.Error { + case "authorization_pending": + return nil, ErrDeviceAuthorizationPending + case "slow_down": + return nil, ErrDeviceSlowDown + case "access_denied": + return nil, ErrDeviceAccessDenied + case "code_expired": + return nil, ErrDeviceCodeExpired + default: + return nil, ErrDeviceGeneric + } +} + +// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. +// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + intervalDuration := time.Duration(*code.Interval) * time.Second + waitDuration := intervalDuration + + for { + token, err := CheckForUserCompletion(sender, code) + + if err == nil { + return token, nil + } + + switch err { + case ErrDeviceSlowDown: + waitDuration += waitDuration + case ErrDeviceAuthorizationPending: + // noop + default: // everything else is "fatal" to us + return nil, err + } + + if waitDuration > (intervalDuration * 3) { + return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) + } + + time.Sleep(waitDuration) + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod new file mode 100644 index 00000000..5c95dbfe --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/go.mod @@ -0,0 +1,11 @@ +module github.com/Azure/go-autorest/autorest/adal + +go 1.12 + +require ( + github.com/Azure/go-autorest/autorest/date v0.1.0 + github.com/Azure/go-autorest/autorest/mocks v0.1.0 + github.com/Azure/go-autorest/tracing v0.1.0 + github.com/dgrijalva/jwt-go v3.2.0+incompatible + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum new file mode 100644 index 00000000..ef9d1016 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/go.sum @@ -0,0 +1,139 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= +contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/tracing v0.1.0 h1:TRBxC5Pj/fIuh4Qob0ZpkggbfT8RC0SubHbpV3p4/Vc= +github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go new file mode 100644 index 00000000..9e15f275 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go @@ -0,0 +1,73 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// LoadToken restores a Token object from a file located at 'path'. +func LoadToken(path string) (*Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var token Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&token); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) + } + return &token, nil +} + +// SaveToken persists an oauth token at the given location on disk. +// It moves the new file into place so it can safely be used to replace an existing file +// that maybe accessed by multiple processes. +func SaveToken(path string, mode os.FileMode, token Token) error { + dir := filepath.Dir(path) + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) + } + + newFile, err := ioutil.TempFile(dir, "token") + if err != nil { + return fmt.Errorf("failed to create the temp file to write the token: %v", err) + } + tempPath := newFile.Name() + + if err := json.NewEncoder(newFile).Encode(token); err != nil { + return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) + } + if err := newFile.Close(); err != nil { + return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) + } + + // Atomic replace to avoid multi-writer file corruptions + if err := os.Rename(tempPath, path); err != nil { + return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) + } + if err := os.Chmod(path, mode); err != nil { + return fmt.Errorf("failed to chmod the token file %s: %v", path, err) + } + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go new file mode 100644 index 00000000..834401e0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -0,0 +1,60 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "net/http" +) + +const ( + contentType = "Content-Type" + mimeTypeFormPost = "application/x-www-form-urlencoded" +) + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(&http.Client{}, decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/token.go new file mode 100644 index 00000000..4083e76e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -0,0 +1,1055 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math" + "net" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/tracing" + "github.com/dgrijalva/jwt-go" +) + +const ( + defaultRefresh = 5 * time.Minute + + // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow + OAuthGrantTypeDeviceCode = "device_code" + + // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows + OAuthGrantTypeClientCredentials = "client_credentials" + + // OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows + OAuthGrantTypeUserPass = "password" + + // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows + OAuthGrantTypeRefreshToken = "refresh_token" + + // OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows + OAuthGrantTypeAuthorizationCode = "authorization_code" + + // metadataHeader is the header required by MSI extension + metadataHeader = "Metadata" + + // msiEndpoint is the well known endpoint for getting MSI authentications tokens + msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + + // the default number of attempts to refresh an MSI authentication token + defaultMaxMSIRefreshAttempts = 5 +) + +// OAuthTokenProvider is an interface which should be implemented by an access token retriever +type OAuthTokenProvider interface { + OAuthToken() string +} + +// MultitenantOAuthTokenProvider provides tokens used for multi-tenant authorization. +type MultitenantOAuthTokenProvider interface { + PrimaryOAuthToken() string + AuxiliaryOAuthTokens() []string +} + +// TokenRefreshError is an interface used by errors returned during token refresh. +type TokenRefreshError interface { + error + Response() *http.Response +} + +// Refresher is an interface for token refresh functionality +type Refresher interface { + Refresh() error + RefreshExchange(resource string) error + EnsureFresh() error +} + +// RefresherWithContext is an interface for token refresh functionality +type RefresherWithContext interface { + RefreshWithContext(ctx context.Context) error + RefreshExchangeWithContext(ctx context.Context, resource string) error + EnsureFreshWithContext(ctx context.Context) error +} + +// TokenRefreshCallback is the type representing callbacks that will be called after +// a successful token refresh +type TokenRefreshCallback func(Token) error + +// Token encapsulates the access token used to authorize Azure requests. +// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response +type Token struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + ExpiresIn json.Number `json:"expires_in"` + ExpiresOn json.Number `json:"expires_on"` + NotBefore json.Number `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` +} + +func newToken() Token { + return Token{ + ExpiresIn: "0", + ExpiresOn: "0", + NotBefore: "0", + } +} + +// IsZero returns true if the token object is zero-initialized. +func (t Token) IsZero() bool { + return t == Token{} +} + +// Expires returns the time.Time when the Token expires. +func (t Token) Expires() time.Time { + s, err := t.ExpiresOn.Float64() + if err != nil { + s = -3600 + } + + expiration := date.NewUnixTimeFromSeconds(s) + + return time.Time(expiration).UTC() +} + +// IsExpired returns true if the Token is expired, false otherwise. +func (t Token) IsExpired() bool { + return t.WillExpireIn(0) +} + +// WillExpireIn returns true if the Token will expire after the passed time.Duration interval +// from now, false otherwise. +func (t Token) WillExpireIn(d time.Duration) bool { + return !t.Expires().After(time.Now().Add(d)) +} + +//OAuthToken return the current access token +func (t *Token) OAuthToken() string { + return t.AccessToken +} + +// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form +// that is submitted when acquiring an oAuth token. +type ServicePrincipalSecret interface { + SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error +} + +// ServicePrincipalNoSecret represents a secret type that contains no secret +// meaning it is not valid for fetching a fresh token. This is used by Manual +type ServicePrincipalNoSecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret +// It only returns an error for the ServicePrincipalNoSecret type +func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") +} + +// MarshalJSON implements the json.Marshaler interface. +func (noSecret ServicePrincipalNoSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalNoSecret", + }) +} + +// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. +type ServicePrincipalTokenSecret struct { + ClientSecret string `json:"value"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using the client_secret. +func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("client_secret", tokenSecret.ClientSecret) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (tokenSecret ServicePrincipalTokenSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalTokenSecret", + Value: tokenSecret.ClientSecret, + }) +} + +// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. +type ServicePrincipalCertificateSecret struct { + Certificate *x509.Certificate + PrivateKey *rsa.PrivateKey +} + +// SignJwt returns the JWT signed with the certificate's private key. +func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { + hasher := sha1.New() + _, err := hasher.Write(secret.Certificate.Raw) + if err != nil { + return "", err + } + + thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + + // The jti (JWT ID) claim provides a unique identifier for the JWT. + jti := make([]byte, 20) + _, err = rand.Read(jti) + if err != nil { + return "", err + } + + token := jwt.New(jwt.SigningMethodRS256) + token.Header["x5t"] = thumbprint + x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)} + token.Header["x5c"] = x5c + token.Claims = jwt.MapClaims{ + "aud": spt.inner.OauthConfig.TokenEndpoint.String(), + "iss": spt.inner.ClientID, + "sub": spt.inner.ClientID, + "jti": base64.URLEncoding.EncodeToString(jti), + "nbf": time.Now().Unix(), + "exp": time.Now().Add(time.Hour * 24).Unix(), + } + + signedString, err := token.SignedString(secret.PrivateKey) + return signedString, err +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. +func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.SignJwt(spt) + if err != nil { + return err + } + + v.Set("client_assertion", jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalCertificateSecret is not supported") +} + +// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. +type ServicePrincipalMSISecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (msiSecret ServicePrincipalMSISecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalMSISecret is not supported") +} + +// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth. +type ServicePrincipalUsernamePasswordSecret struct { + Username string `json:"username"` + Password string `json:"password"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("username", secret.Username) + v.Set("password", secret.Password) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalUsernamePasswordSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Username string `json:"username"` + Password string `json:"password"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalUsernamePasswordSecret", + Username: secret.Username, + Password: secret.Password, + }) +} + +// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth. +type ServicePrincipalAuthorizationCodeSecret struct { + ClientSecret string `json:"value"` + AuthorizationCode string `json:"authCode"` + RedirectURI string `json:"redirect"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("code", secret.AuthorizationCode) + v.Set("client_secret", secret.ClientSecret) + v.Set("redirect_uri", secret.RedirectURI) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + AuthCode string `json:"authCode"` + Redirect string `json:"redirect"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalAuthorizationCodeSecret", + Value: secret.ClientSecret, + AuthCode: secret.AuthorizationCode, + Redirect: secret.RedirectURI, + }) +} + +// ServicePrincipalToken encapsulates a Token created for a Service Principal. +type ServicePrincipalToken struct { + inner servicePrincipalToken + refreshLock *sync.RWMutex + sender Sender + refreshCallbacks []TokenRefreshCallback + // MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token. + MaxMSIRefreshAttempts int +} + +// MarshalTokenJSON returns the marshalled inner token. +func (spt ServicePrincipalToken) MarshalTokenJSON() ([]byte, error) { + return json.Marshal(spt.inner.Token) +} + +// SetRefreshCallbacks replaces any existing refresh callbacks with the specified callbacks. +func (spt *ServicePrincipalToken) SetRefreshCallbacks(callbacks []TokenRefreshCallback) { + spt.refreshCallbacks = callbacks +} + +// MarshalJSON implements the json.Marshaler interface. +func (spt ServicePrincipalToken) MarshalJSON() ([]byte, error) { + return json.Marshal(spt.inner) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { + // need to determine the token type + raw := map[string]interface{}{} + err := json.Unmarshal(data, &raw) + if err != nil { + return err + } + secret := raw["secret"].(map[string]interface{}) + switch secret["type"] { + case "ServicePrincipalNoSecret": + spt.inner.Secret = &ServicePrincipalNoSecret{} + case "ServicePrincipalTokenSecret": + spt.inner.Secret = &ServicePrincipalTokenSecret{} + case "ServicePrincipalCertificateSecret": + return errors.New("unmarshalling ServicePrincipalCertificateSecret is not supported") + case "ServicePrincipalMSISecret": + return errors.New("unmarshalling ServicePrincipalMSISecret is not supported") + case "ServicePrincipalUsernamePasswordSecret": + spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{} + case "ServicePrincipalAuthorizationCodeSecret": + spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{} + default: + return fmt.Errorf("unrecognized token type '%s'", secret["type"]) + } + err = json.Unmarshal(data, &spt.inner) + if err != nil { + return err + } + // Don't override the refreshLock or the sender if those have been already set. + if spt.refreshLock == nil { + spt.refreshLock = &sync.RWMutex{} + } + if spt.sender == nil { + spt.sender = &http.Client{Transport: tracing.Transport} + } + return nil +} + +// internal type used for marshalling/unmarshalling +type servicePrincipalToken struct { + Token Token `json:"token"` + Secret ServicePrincipalSecret `json:"secret"` + OauthConfig OAuthConfig `json:"oauth"` + ClientID string `json:"clientID"` + Resource string `json:"resource"` + AutoRefresh bool `json:"autoRefresh"` + RefreshWithin time.Duration `json:"refreshWithin"` +} + +func validateOAuthConfig(oac OAuthConfig) error { + if oac.IsZero() { + return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized") + } + return nil +} + +// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. +func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(id, "id"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + spt := &ServicePrincipalToken{ + inner: servicePrincipalToken{ + Token: newToken(), + OauthConfig: oauthConfig, + Secret: secret, + ClientID: id, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + }, + refreshLock: &sync.RWMutex{}, + sender: &http.Client{Transport: tracing.Transport}, + refreshCallbacks: callbacks, + } + return spt, nil +} + +// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token +func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalNoSecret{}, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalTokenFromManualTokenSecret creates a ServicePrincipalToken using the supplied token and secret +func NewServicePrincipalTokenFromManualTokenSecret(oauthConfig OAuthConfig, clientID string, resource string, token Token, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + secret, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal +// credentials scoped to the named resource. +func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalTokenSecret{ + ClientSecret: secret, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes. +func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if certificate == nil { + return nil, fmt.Errorf("parameter 'certificate' cannot be nil") + } + if privateKey == nil { + return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password. +func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(username, "username"); err != nil { + return nil, err + } + if err := validateStringParam(password, "password"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalUsernamePasswordSecret{ + Username: username, + Password: password, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the +func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(clientSecret, "clientSecret"); err != nil { + return nil, err + } + if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil { + return nil, err + } + if err := validateStringParam(redirectURI, "redirectURI"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalAuthorizationCodeSecret{ + ClientSecret: clientSecret, + AuthorizationCode: authorizationCode, + RedirectURI: redirectURI, + }, + callbacks..., + ) +} + +// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. +func GetMSIVMEndpoint() (string, error) { + return msiEndpoint, nil +} + +// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the system assigned identity when creating the token. +func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, callbacks...) +} + +// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the specified user assigned identity when creating the token. +func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, callbacks...) +} + +func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if userAssignedID != nil { + if err := validateStringParam(*userAssignedID, "userAssignedID"); err != nil { + return nil, err + } + } + // We set the oauth config token endpoint to be MSI's endpoint + msiEndpointURL, err := url.Parse(msiEndpoint) + if err != nil { + return nil, err + } + + v := url.Values{} + v.Set("resource", resource) + v.Set("api-version", "2018-02-01") + if userAssignedID != nil { + v.Set("client_id", *userAssignedID) + } + msiEndpointURL.RawQuery = v.Encode() + + spt := &ServicePrincipalToken{ + inner: servicePrincipalToken{ + Token: newToken(), + OauthConfig: OAuthConfig{ + TokenEndpoint: *msiEndpointURL, + }, + Secret: &ServicePrincipalMSISecret{}, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + }, + refreshLock: &sync.RWMutex{}, + sender: &http.Client{Transport: tracing.Transport}, + refreshCallbacks: callbacks, + MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, + } + + if userAssignedID != nil { + spt.inner.ClientID = *userAssignedID + } + + return spt, nil +} + +// internal type that implements TokenRefreshError +type tokenRefreshError struct { + message string + resp *http.Response +} + +// Error implements the error interface which is part of the TokenRefreshError interface. +func (tre tokenRefreshError) Error() string { + return tre.message +} + +// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation. +func (tre tokenRefreshError) Response() *http.Response { + return tre.resp +} + +func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError { + return tokenRefreshError{message: message, resp: resp} +} + +// EnsureFresh will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFresh() error { + return spt.EnsureFreshWithContext(context.Background()) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if spt.inner.AutoRefresh && spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { + // take the write lock then check to see if the token was already refreshed + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { + return spt.refreshInternal(ctx, spt.inner.Resource) + } + } + return nil +} + +// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization +func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { + if spt.refreshCallbacks != nil { + for _, callback := range spt.refreshCallbacks { + err := callback(spt.inner.Token) + if err != nil { + return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) + } + } + } + return nil +} + +// Refresh obtains a fresh token for the Service Principal. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) Refresh() error { + return spt.RefreshWithContext(context.Background()) +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, spt.inner.Resource) +} + +// RefreshExchange refreshes the token, but for a different resource. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { + return spt.RefreshExchangeWithContext(context.Background(), resource) +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, resource) +} + +func (spt *ServicePrincipalToken) getGrantType() string { + switch spt.inner.Secret.(type) { + case *ServicePrincipalUsernamePasswordSecret: + return OAuthGrantTypeUserPass + case *ServicePrincipalAuthorizationCodeSecret: + return OAuthGrantTypeAuthorizationCode + default: + return OAuthGrantTypeClientCredentials + } +} + +func isIMDS(u url.URL) bool { + imds, err := url.Parse(msiEndpoint) + if err != nil { + return false + } + return u.Host == imds.Host && u.Path == imds.Path +} + +func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error { + req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil) + if err != nil { + return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) + } + req.Header.Add("User-Agent", UserAgent()) + req = req.WithContext(ctx) + if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) { + v := url.Values{} + v.Set("client_id", spt.inner.ClientID) + v.Set("resource", resource) + + if spt.inner.Token.RefreshToken != "" { + v.Set("grant_type", OAuthGrantTypeRefreshToken) + v.Set("refresh_token", spt.inner.Token.RefreshToken) + // web apps must specify client_secret when refreshing tokens + // see https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code#refreshing-the-access-tokens + if spt.getGrantType() == OAuthGrantTypeAuthorizationCode { + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + } else { + v.Set("grant_type", spt.getGrantType()) + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + req.Body = body + } + + if _, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok { + req.Method = http.MethodGet + req.Header.Set(metadataHeader, "true") + } + + var resp *http.Response + if isIMDS(spt.inner.OauthConfig.TokenEndpoint) { + resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts) + } else { + resp, err = spt.sender.Do(req) + } + if err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: Failed to execute the refresh request. Error = '%v'", err), nil) + } + + defer resp.Body.Close() + rb, err := ioutil.ReadAll(resp.Body) + + if resp.StatusCode != http.StatusOK { + if err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v", resp.StatusCode, err), resp) + } + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)), resp) + } + + // for the following error cases don't return a TokenRefreshError. the operation succeeded + // but some transient failure happened during deserialization. by returning a generic error + // the retry logic will kick in (we don't retry on TokenRefreshError). + + if err != nil { + return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return fmt.Errorf("adal: Empty service principal token received during refresh") + } + var token Token + err = json.Unmarshal(rb, &token) + if err != nil { + return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)) + } + + spt.inner.Token = token + + return spt.InvokeRefreshCallbacks(token) +} + +// retry logic specific to retrieving a token from the IMDS endpoint +func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http.Response, err error) { + // copied from client.go due to circular dependency + retries := []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } + // extra retry status codes specific to IMDS + retries = append(retries, + http.StatusNotFound, + http.StatusGone, + // all remaining 5xx + http.StatusNotImplemented, + http.StatusHTTPVersionNotSupported, + http.StatusVariantAlsoNegotiates, + http.StatusInsufficientStorage, + http.StatusLoopDetected, + http.StatusNotExtended, + http.StatusNetworkAuthenticationRequired) + + // see https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/how-to-use-vm-token#retry-guidance + + const maxDelay time.Duration = 60 * time.Second + + attempt := 0 + delay := time.Duration(0) + + for attempt < maxAttempts { + resp, err = sender.Do(req) + // retry on temporary network errors, e.g. transient network failures. + // if we don't receive a response then assume we can't connect to the + // endpoint so we're likely not running on an Azure VM so don't retry. + if (err != nil && !isTemporaryNetworkError(err)) || resp == nil || resp.StatusCode == http.StatusOK || !containsInt(retries, resp.StatusCode) { + return + } + + // perform exponential backoff with a cap. + // must increment attempt before calculating delay. + attempt++ + // the base value of 2 is the "delta backoff" as specified in the guidance doc + delay += (time.Duration(math.Pow(2, float64(attempt))) * time.Second) + if delay > maxDelay { + delay = maxDelay + } + + select { + case <-time.After(delay): + // intentionally left blank + case <-req.Context().Done(): + err = req.Context().Err() + return + } + } + return +} + +// returns true if the specified error is a temporary network error or false if it's not. +// if the error doesn't implement the net.Error interface the return value is true. +func isTemporaryNetworkError(err error) bool { + if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { + return true + } + return false +} + +// returns true if slice ints contains the value n +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +// SetAutoRefresh enables or disables automatic refreshing of stale tokens. +func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { + spt.inner.AutoRefresh = autoRefresh +} + +// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will +// refresh the token. +func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { + spt.inner.RefreshWithin = d + return +} + +// SetSender sets the http.Client used when obtaining the Service Principal token. An +// undecorated http.Client is used by default. +func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } + +// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token. +func (spt *ServicePrincipalToken) OAuthToken() string { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token.OAuthToken() +} + +// Token returns a copy of the current token. +func (spt *ServicePrincipalToken) Token() Token { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token +} + +// MultiTenantServicePrincipalToken contains tokens for multi-tenant authorization. +type MultiTenantServicePrincipalToken struct { + PrimaryToken *ServicePrincipalToken + AuxiliaryTokens []*ServicePrincipalToken +} + +// PrimaryOAuthToken returns the primary authorization token. +func (mt *MultiTenantServicePrincipalToken) PrimaryOAuthToken() string { + return mt.PrimaryToken.OAuthToken() +} + +// AuxiliaryOAuthTokens returns one to three auxiliary authorization tokens. +func (mt *MultiTenantServicePrincipalToken) AuxiliaryOAuthTokens() []string { + tokens := make([]string, len(mt.AuxiliaryTokens)) + for i := range mt.AuxiliaryTokens { + tokens[i] = mt.AuxiliaryTokens[i].OAuthToken() + } + return tokens +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (mt *MultiTenantServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if err := mt.PrimaryToken.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh primary token: %v", err) + } + for _, aux := range mt.AuxiliaryTokens { + if err := aux.EnsureFreshWithContext(ctx); err != nil { + return fmt.Errorf("failed to refresh auxiliary token: %v", err) + } + } + return nil +} + +// NewMultiTenantServicePrincipalToken creates a new MultiTenantServicePrincipalToken with the specified credentials and resource. +func NewMultiTenantServicePrincipalToken(multiTenantCfg MultiTenantOAuthConfig, clientID string, secret string, resource string) (*MultiTenantServicePrincipalToken, error) { + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + auxTenants := multiTenantCfg.AuxiliaryTenants() + m := MultiTenantServicePrincipalToken{ + AuxiliaryTokens: make([]*ServicePrincipalToken, len(auxTenants)), + } + primary, err := NewServicePrincipalToken(*multiTenantCfg.PrimaryTenant(), clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for primary tenant: %v", err) + } + m.PrimaryToken = primary + for i := range auxTenants { + aux, err := NewServicePrincipalToken(*auxTenants[i], clientID, secret, resource) + if err != nil { + return nil, fmt.Errorf("failed to create SPT for auxiliary tenant: %v", err) + } + m.AuxiliaryTokens[i] = aux + } + return &m, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/version.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/version.go new file mode 100644 index 00000000..c867b348 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/adal/version.go @@ -0,0 +1,45 @@ +package adal + +import ( + "fmt" + "runtime" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +const number = "v1.0.0" + +var ( + ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) +) + +// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version. +func UserAgent() string { + return ua +} + +// AddToUserAgent adds an extension to the current user agent +func AddToUserAgent(extension string) error { + if extension != "" { + ua = fmt.Sprintf("%s %s", ua, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/authorization.go new file mode 100644 index 00000000..380865cd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -0,0 +1,336 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/base64" + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/tracing" +) + +const ( + bearerChallengeHeader = "Www-Authenticate" + bearer = "Bearer" + tenantID = "tenantID" + apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" + bingAPISdkHeader = "X-BingApis-SDK-Client" + golangBingAPISdkHeaderValue = "Go-SDK" + authorization = "Authorization" + basic = "Basic" +) + +// Authorizer is the interface that provides a PrepareDecorator used to supply request +// authorization. Most often, the Authorizer decorator runs last so it has access to the full +// state of the formed HTTP request. +type Authorizer interface { + WithAuthorization() PrepareDecorator +} + +// NullAuthorizer implements a default, "do nothing" Authorizer. +type NullAuthorizer struct{} + +// WithAuthorization returns a PrepareDecorator that does nothing. +func (na NullAuthorizer) WithAuthorization() PrepareDecorator { + return WithNothing() +} + +// APIKeyAuthorizer implements API Key authorization. +type APIKeyAuthorizer struct { + headers map[string]interface{} + queryParameters map[string]interface{} +} + +// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(headers, nil) +} + +// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters. +func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(nil, queryParameters) +} + +// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer { + return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters. +func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) + } +} + +// CognitiveServicesAuthorizer implements authorization for Cognitive Services. +type CognitiveServicesAuthorizer struct { + subscriptionKey string +} + +// NewCognitiveServicesAuthorizer is +func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer { + return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey} +} + +// WithAuthorization is +func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[apiKeyAuthorizerHeader] = csa.subscriptionKey + headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// BearerAuthorizer implements the bearer authorization +type BearerAuthorizer struct { + tokenProvider adal.OAuthTokenProvider +} + +// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider +func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { + return &BearerAuthorizer{tokenProvider: tp} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the token. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // the ordering is important here, prefer RefresherWithContext if available + if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + } else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok { + err = refresher.EnsureFresh() + } + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp, + "Failed to refresh the Token for request to %s", r.URL) + } + return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken()))) + } + return r, err + }) + } +} + +// BearerAuthorizerCallbackFunc is the authentication callback signature. +type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) + +// BearerAuthorizerCallback implements bearer authorization via a callback. +type BearerAuthorizerCallback struct { + sender Sender + callback BearerAuthorizerCallbackFunc +} + +// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback +// is invoked when the HTTP request is submitted. +func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { + if sender == nil { + sender = &http.Client{Transport: tracing.Transport} + } + return &BearerAuthorizerCallback{sender: sender, callback: callback} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value +// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // make a copy of the request and remove the body as it's not + // required and avoids us having to create a copy of it. + rCopy := *r + removeRequestBody(&rCopy) + + resp, err := bacb.sender.Do(&rCopy) + if err == nil && resp.StatusCode == 401 { + defer resp.Body.Close() + if hasBearerChallenge(resp) { + bc, err := newBearerChallenge(resp) + if err != nil { + return r, err + } + if bacb.callback != nil { + ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) + if err != nil { + return r, err + } + return Prepare(r, ba.WithAuthorization()) + } + } + } + } + return r, err + }) + } +} + +// returns true if the HTTP response contains a bearer challenge +func hasBearerChallenge(resp *http.Response) bool { + authHeader := resp.Header.Get(bearerChallengeHeader) + if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { + return false + } + return true +} + +type bearerChallenge struct { + values map[string]string +} + +func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) { + challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader)) + trimmedChallenge := challenge[len(bearer)+1:] + + // challenge is a set of key=value pairs that are comma delimited + pairs := strings.Split(trimmedChallenge, ",") + if len(pairs) < 1 { + err = fmt.Errorf("challenge '%s' contains no pairs", challenge) + return bc, err + } + + bc.values = make(map[string]string) + for i := range pairs { + trimmedPair := strings.TrimSpace(pairs[i]) + pair := strings.Split(trimmedPair, "=") + if len(pair) == 2 { + // remove the enclosing quotes + key := strings.Trim(pair[0], "\"") + value := strings.Trim(pair[1], "\"") + + switch key { + case "authorization", "authorization_uri": + // strip the tenant ID from the authorization URL + asURL, err := url.Parse(value) + if err != nil { + return bc, err + } + bc.values[tenantID] = asURL.Path[1:] + default: + bc.values[key] = value + } + } + } + + return bc, err +} + +// EventGridKeyAuthorizer implements authorization for event grid using key authentication. +type EventGridKeyAuthorizer struct { + topicKey string +} + +// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer +// with the specified topic key. +func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer { + return EventGridKeyAuthorizer{topicKey: topicKey} +} + +// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header. +func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { + headers := map[string]interface{}{ + "aeg-sas-key": egta.topicKey, + } + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header +// with the value "Basic " where is a base64-encoded username:password tuple. +type BasicAuthorizer struct { + userName string + password string +} + +// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password. +func NewBasicAuthorizer(userName, password string) *BasicAuthorizer { + return &BasicAuthorizer{ + userName: userName, + password: password, + } +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Basic " followed by the base64-encoded username:password tuple. +func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password))) + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// MultiTenantServicePrincipalTokenAuthorizer provides authentication across tenants. +type MultiTenantServicePrincipalTokenAuthorizer interface { + WithAuthorization() PrepareDecorator +} + +// NewMultiTenantServicePrincipalTokenAuthorizer crates a BearerAuthorizer using the given token provider +func NewMultiTenantServicePrincipalTokenAuthorizer(tp adal.MultitenantOAuthTokenProvider) MultiTenantServicePrincipalTokenAuthorizer { + return &multiTenantSPTAuthorizer{tp: tp} +} + +type multiTenantSPTAuthorizer struct { + tp adal.MultitenantOAuthTokenProvider +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header using the +// primary token along with the auxiliary authorization header using the auxiliary tokens. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (mt multiTenantSPTAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + if refresher, ok := mt.tp.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.multiTenantSPTAuthorizer", "WithAuthorization", resp, + "Failed to refresh one or more Tokens for request to %s", r.URL) + } + } + r, err = Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", mt.tp.PrimaryOAuthToken()))) + if err != nil { + return r, err + } + auxTokens := mt.tp.AuxiliaryOAuthTokens() + for i := range auxTokens { + auxTokens[i] = fmt.Sprintf("Bearer %s", auxTokens[i]) + } + return Prepare(r, WithHeader(headerAuxAuthorization, strings.Join(auxTokens, "; "))) + }) + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/autorest.go new file mode 100644 index 00000000..aafdf021 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -0,0 +1,150 @@ +/* +Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines +and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) +generated Go code. + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + +will set the URL to: + + https://microsoft.com/a/b/c + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., ByUnmarshallingJson) is likely incorrect. + +Lastly, the Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure +correct parsing and formatting. + +Errors raised by autorest objects and methods will conform to the autorest.Error interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. +*/ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "net/http" + "time" +) + +const ( + // HeaderLocation specifies the HTTP Location header. + HeaderLocation = "Location" + + // HeaderRetryAfter specifies the HTTP Retry-After header. + HeaderRetryAfter = "Retry-After" +) + +// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set +// and false otherwise. +func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { + if resp == nil { + return false + } + return containsInt(codes, resp.StatusCode) +} + +// GetLocation retrieves the URL from the Location header of the passed response. +func GetLocation(resp *http.Response) string { + return resp.Header.Get(HeaderLocation) +} + +// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If +// the header is absent or is malformed, it will return the supplied default delay time.Duration. +func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { + retry := resp.Header.Get(HeaderRetryAfter) + if retry == "" { + return defaultDelay + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + return defaultDelay + } + + return d +} + +// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. +func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare(&http.Request{Cancel: cancel}, + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} + +// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response. +func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare((&http.Request{}).WithContext(ctx), + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/async.go new file mode 100644 index 00000000..1cb41cbe --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -0,0 +1,924 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/tracing" +) + +const ( + headerAsyncOperation = "Azure-AsyncOperation" +) + +const ( + operationInProgress string = "InProgress" + operationCanceled string = "Canceled" + operationFailed string = "Failed" + operationSucceeded string = "Succeeded" +) + +var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK} + +// Future provides a mechanism to access the status and results of an asynchronous request. +// Since futures are stateful they should be passed by value to avoid race conditions. +type Future struct { + pt pollingTracker +} + +// NewFutureFromResponse returns a new Future object initialized +// with the initial response from an asynchronous operation. +func NewFutureFromResponse(resp *http.Response) (Future, error) { + pt, err := createPollingTracker(resp) + return Future{pt: pt}, err +} + +// Response returns the last HTTP response. +func (f Future) Response() *http.Response { + if f.pt == nil { + return nil + } + return f.pt.latestResponse() +} + +// Status returns the last status message of the operation. +func (f Future) Status() string { + if f.pt == nil { + return "" + } + return f.pt.pollingStatus() +} + +// PollingMethod returns the method used to monitor the status of the asynchronous operation. +func (f Future) PollingMethod() PollingMethodType { + if f.pt == nil { + return PollingUnknown + } + return f.pt.pollingMethod() +} + +// DoneWithContext queries the service to see if the operation has completed. +func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + + if f.pt == nil { + return false, autorest.NewError("Future", "Done", "future is not initialized") + } + if f.pt.hasTerminated() { + return true, f.pt.pollingError() + } + if err := f.pt.pollForStatus(ctx, sender); err != nil { + return false, err + } + if err := f.pt.checkForErrors(); err != nil { + return f.pt.hasTerminated(), err + } + if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil { + return false, err + } + if err := f.pt.initPollingMethod(); err != nil { + return false, err + } + if err := f.pt.updatePollingMethod(); err != nil { + return false, err + } + return f.pt.hasTerminated(), f.pt.pollingError() +} + +// GetPollingDelay returns a duration the application should wait before checking +// the status of the asynchronous request and true; this value is returned from +// the service via the Retry-After response header. If the header wasn't returned +// then the function returns the zero-value time.Duration and false. +func (f Future) GetPollingDelay() (time.Duration, bool) { + if f.pt == nil { + return 0, false + } + resp := f.pt.latestResponse() + if resp == nil { + return 0, false + } + + retry := resp.Header.Get(autorest.HeaderRetryAfter) + if retry == "" { + return 0, false + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + panic(err) + } + + return d, true +} + +// WaitForCompletionRef will return when one of the following conditions is met: the long +// running operation has completed, the provided context is cancelled, or the client's +// polling duration has been exceeded. It will retry failed polling attempts based on +// the retry value defined in the client up to the maximum retry attempts. +// If no deadline is specified in the context then the client.PollingDuration will be +// used to determine if a default deadline should be used. +// If PollingDuration is greater than zero the value will be used as the context's timeout. +// If PollingDuration is zero then no default deadline will be used. +func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) { + ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef") + defer func() { + sc := -1 + resp := f.Response() + if resp != nil { + sc = resp.StatusCode + } + tracing.EndSpan(ctx, sc, err) + }() + cancelCtx := ctx + // if the provided context already has a deadline don't override it + _, hasDeadline := ctx.Deadline() + if d := client.PollingDuration; !hasDeadline && d != 0 { + var cancel context.CancelFunc + cancelCtx, cancel = context.WithTimeout(ctx, d) + defer cancel() + } + + done, err := f.DoneWithContext(ctx, client) + for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) { + if attempts >= client.RetryAttempts { + return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded") + } + // we want delayAttempt to be zero in the non-error case so + // that DelayForBackoff doesn't perform exponential back-off + var delayAttempt int + var delay time.Duration + if err == nil { + // check for Retry-After delay, if not present use the client's polling delay + var ok bool + delay, ok = f.GetPollingDelay() + if !ok { + delay = client.PollingDelay + } + } else { + // there was an error polling for status so perform exponential + // back-off based on the number of attempts using the client's retry + // duration. update attempts after delayAttempt to avoid off-by-one. + delayAttempt = attempts + delay = client.RetryDuration + attempts++ + } + // wait until the delay elapses or the context is cancelled + delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done()) + if !delayElapsed { + return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled") + } + } + return +} + +// MarshalJSON implements the json.Marshaler interface. +func (f Future) MarshalJSON() ([]byte, error) { + return json.Marshal(f.pt) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (f *Future) UnmarshalJSON(data []byte) error { + // unmarshal into JSON object to determine the tracker type + obj := map[string]interface{}{} + err := json.Unmarshal(data, &obj) + if err != nil { + return err + } + if obj["method"] == nil { + return autorest.NewError("Future", "UnmarshalJSON", "missing 'method' property") + } + method := obj["method"].(string) + switch strings.ToUpper(method) { + case http.MethodDelete: + f.pt = &pollingTrackerDelete{} + case http.MethodPatch: + f.pt = &pollingTrackerPatch{} + case http.MethodPost: + f.pt = &pollingTrackerPost{} + case http.MethodPut: + f.pt = &pollingTrackerPut{} + default: + return autorest.NewError("Future", "UnmarshalJSON", "unsupoorted method '%s'", method) + } + // now unmarshal into the tracker + return json.Unmarshal(data, &f.pt) +} + +// PollingURL returns the URL used for retrieving the status of the long-running operation. +func (f Future) PollingURL() string { + if f.pt == nil { + return "" + } + return f.pt.pollingURL() +} + +// GetResult should be called once polling has completed successfully. +// It makes the final GET call to retrieve the resultant payload. +func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) { + if f.pt.finalGetURL() == "" { + // we can end up in this situation if the async operation returns a 200 + // with no polling URLs. in that case return the response which should + // contain the JSON payload (only do this for successful terminal cases). + if lr := f.pt.latestResponse(); lr != nil && f.pt.hasSucceeded() { + return lr, nil + } + return nil, autorest.NewError("Future", "GetResult", "missing URL for retrieving result") + } + req, err := http.NewRequest(http.MethodGet, f.pt.finalGetURL(), nil) + if err != nil { + return nil, err + } + return sender.Do(req) +} + +type pollingTracker interface { + // these methods can differ per tracker + + // checks the response headers and status code to determine the polling mechanism + updatePollingMethod() error + + // checks the response for tracker-specific error conditions + checkForErrors() error + + // returns true if provisioning state should be checked + provisioningStateApplicable() bool + + // methods common to all trackers + + // initializes a tracker's polling URL and method, called for each iteration. + // these values can be overridden by each polling tracker as required. + initPollingMethod() error + + // initializes the tracker's internal state, call this when the tracker is created + initializeState() error + + // makes an HTTP request to check the status of the LRO + pollForStatus(ctx context.Context, sender autorest.Sender) error + + // updates internal tracker state, call this after each call to pollForStatus + updatePollingState(provStateApl bool) error + + // returns the error response from the service, can be nil + pollingError() error + + // returns the polling method being used + pollingMethod() PollingMethodType + + // returns the state of the LRO as returned from the service + pollingStatus() string + + // returns the URL used for polling status + pollingURL() string + + // returns the URL used for the final GET to retrieve the resource + finalGetURL() string + + // returns true if the LRO is in a terminal state + hasTerminated() bool + + // returns true if the LRO is in a failed terminal state + hasFailed() bool + + // returns true if the LRO is in a successful terminal state + hasSucceeded() bool + + // returns the cached HTTP response after a call to pollForStatus(), can be nil + latestResponse() *http.Response +} + +type pollingTrackerBase struct { + // resp is the last response, either from the submission of the LRO or from polling + resp *http.Response + + // method is the HTTP verb, this is needed for deserialization + Method string `json:"method"` + + // rawBody is the raw JSON response body + rawBody map[string]interface{} + + // denotes if polling is using async-operation or location header + Pm PollingMethodType `json:"pollingMethod"` + + // the URL to poll for status + URI string `json:"pollingURI"` + + // the state of the LRO as returned from the service + State string `json:"lroState"` + + // the URL to GET for the final result + FinalGetURI string `json:"resultURI"` + + // used to hold an error object returned from the service + Err *ServiceError `json:"error,omitempty"` +} + +func (pt *pollingTrackerBase) initializeState() error { + // determine the initial polling state based on response body and/or HTTP status + // code. this is applicable to the initial LRO response, not polling responses! + pt.Method = pt.resp.Request.Method + if err := pt.updateRawBody(); err != nil { + return err + } + switch pt.resp.StatusCode { + case http.StatusOK: + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + if pt.hasFailed() { + pt.updateErrorFromResponse() + return pt.pollingError() + } + } else { + pt.State = operationSucceeded + } + case http.StatusCreated: + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + } else { + pt.State = operationInProgress + } + case http.StatusAccepted: + pt.State = operationInProgress + case http.StatusNoContent: + pt.State = operationSucceeded + default: + pt.State = operationFailed + pt.updateErrorFromResponse() + return pt.pollingError() + } + return pt.initPollingMethod() +} + +func (pt pollingTrackerBase) getProvisioningState() *string { + if pt.rawBody != nil && pt.rawBody["properties"] != nil { + p := pt.rawBody["properties"].(map[string]interface{}) + if ps := p["provisioningState"]; ps != nil { + s := ps.(string) + return &s + } + } + return nil +} + +func (pt *pollingTrackerBase) updateRawBody() error { + pt.rawBody = map[string]interface{}{} + if pt.resp.ContentLength != 0 { + defer pt.resp.Body.Close() + b, err := ioutil.ReadAll(pt.resp.Body) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body") + } + // observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty + if len(b) == 0 { + return nil + } + // put the body back so it's available to other callers + pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + if err = json.Unmarshal(b, &pt.rawBody); err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to unmarshal response body") + } + } + return nil +} + +func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error { + req, err := http.NewRequest(http.MethodGet, pt.URI, nil) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request") + } + + req = req.WithContext(ctx) + preparer := autorest.CreatePreparer(autorest.GetPrepareDecorators(ctx)...) + req, err = preparer.Prepare(req) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed preparing HTTP request") + } + pt.resp, err = sender.Do(req) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request") + } + if autorest.ResponseHasStatusCode(pt.resp, pollingCodes[:]...) { + // reset the service error on success case + pt.Err = nil + err = pt.updateRawBody() + } else { + // check response body for error content + pt.updateErrorFromResponse() + err = pt.pollingError() + } + return err +} + +// attempts to unmarshal a ServiceError type from the response body. +// if that fails then make a best attempt at creating something meaningful. +// NOTE: this assumes that the async operation has failed. +func (pt *pollingTrackerBase) updateErrorFromResponse() { + var err error + if pt.resp.ContentLength != 0 { + type respErr struct { + ServiceError *ServiceError `json:"error"` + } + re := respErr{} + defer pt.resp.Body.Close() + var b []byte + if b, err = ioutil.ReadAll(pt.resp.Body); err != nil || len(b) == 0 { + goto Default + } + if err = json.Unmarshal(b, &re); err != nil { + goto Default + } + // unmarshalling the error didn't yield anything, try unwrapped error + if re.ServiceError == nil { + err = json.Unmarshal(b, &re.ServiceError) + if err != nil { + goto Default + } + } + // the unmarshaller will ensure re.ServiceError is non-nil + // even if there was no content unmarshalled so check the code. + if re.ServiceError.Code != "" { + pt.Err = re.ServiceError + return + } + } +Default: + se := &ServiceError{ + Code: pt.pollingStatus(), + Message: "The async operation failed.", + } + if err != nil { + se.InnerError = make(map[string]interface{}) + se.InnerError["unmarshalError"] = err.Error() + } + // stick the response body into the error object in hopes + // it contains something useful to help diagnose the failure. + if len(pt.rawBody) > 0 { + se.AdditionalInfo = []map[string]interface{}{ + pt.rawBody, + } + } + pt.Err = se +} + +func (pt *pollingTrackerBase) updatePollingState(provStateApl bool) error { + if pt.Pm == PollingAsyncOperation && pt.rawBody["status"] != nil { + pt.State = pt.rawBody["status"].(string) + } else { + if pt.resp.StatusCode == http.StatusAccepted { + pt.State = operationInProgress + } else if provStateApl { + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + } else { + pt.State = operationSucceeded + } + } else { + return autorest.NewError("pollingTrackerBase", "updatePollingState", "the response from the async operation has an invalid status code") + } + } + // if the operation has failed update the error state + if pt.hasFailed() { + pt.updateErrorFromResponse() + } + return nil +} + +func (pt pollingTrackerBase) pollingError() error { + if pt.Err == nil { + return nil + } + return pt.Err +} + +func (pt pollingTrackerBase) pollingMethod() PollingMethodType { + return pt.Pm +} + +func (pt pollingTrackerBase) pollingStatus() string { + return pt.State +} + +func (pt pollingTrackerBase) pollingURL() string { + return pt.URI +} + +func (pt pollingTrackerBase) finalGetURL() string { + return pt.FinalGetURI +} + +func (pt pollingTrackerBase) hasTerminated() bool { + return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) || strings.EqualFold(pt.State, operationSucceeded) +} + +func (pt pollingTrackerBase) hasFailed() bool { + return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) +} + +func (pt pollingTrackerBase) hasSucceeded() bool { + return strings.EqualFold(pt.State, operationSucceeded) +} + +func (pt pollingTrackerBase) latestResponse() *http.Response { + return pt.resp +} + +// error checking common to all trackers +func (pt pollingTrackerBase) baseCheckForErrors() error { + // for Azure-AsyncOperations the response body cannot be nil or empty + if pt.Pm == PollingAsyncOperation { + if pt.resp.Body == nil || pt.resp.ContentLength == 0 { + return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "for Azure-AsyncOperation response body cannot be nil") + } + if pt.rawBody["status"] == nil { + return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "missing status property in Azure-AsyncOperation response body") + } + } + return nil +} + +// default initialization of polling URL/method. each verb tracker will update this as required. +func (pt *pollingTrackerBase) initPollingMethod() error { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + return nil + } + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh != "" { + pt.URI = lh + pt.Pm = PollingLocation + return nil + } + // it's ok if we didn't find a polling header, this will be handled elsewhere + return nil +} + +// DELETE + +type pollingTrackerDelete struct { + pollingTrackerBase +} + +func (pt *pollingTrackerDelete) updatePollingMethod() error { + // for 201 the Location header is required + if pt.resp.StatusCode == http.StatusCreated { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerDelete", "updateHeaders", "missing Location header in 201 response") + } else { + pt.URI = lh + } + pt.Pm = PollingLocation + pt.FinalGetURI = pt.URI + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerDelete) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerDelete) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent +} + +// PATCH + +type pollingTrackerPatch struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPatch) updatePollingMethod() error { + // by default we can use the original URL for polling and final GET + if pt.URI == "" { + pt.URI = pt.resp.Request.URL.String() + } + if pt.FinalGetURI == "" { + pt.FinalGetURI = pt.resp.Request.URL.String() + } + if pt.Pm == PollingUnknown { + pt.Pm = PollingRequestURI + } + // for 201 it's permissible for no headers to be returned + if pt.resp.StatusCode == http.StatusCreated { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + // note the absence of the "final GET" mechanism for PATCH + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + if ao == "" { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerPatch", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } else { + pt.URI = lh + pt.Pm = PollingLocation + } + } + } + return nil +} + +func (pt pollingTrackerPatch) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerPatch) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated +} + +// POST + +type pollingTrackerPost struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPost) updatePollingMethod() error { + // 201 requires Location header + if pt.resp.StatusCode == http.StatusCreated { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "missing Location header in 201 response") + } else { + pt.URI = lh + pt.FinalGetURI = lh + pt.Pm = PollingLocation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerPost) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerPost) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent +} + +// PUT + +type pollingTrackerPut struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPut) updatePollingMethod() error { + // by default we can use the original URL for polling and final GET + if pt.URI == "" { + pt.URI = pt.resp.Request.URL.String() + } + if pt.FinalGetURI == "" { + pt.FinalGetURI = pt.resp.Request.URL.String() + } + if pt.Pm == PollingUnknown { + pt.Pm = PollingRequestURI + } + // for 201 it's permissible for no headers to be returned + if pt.resp.StatusCode == http.StatusCreated { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPut", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerPut) checkForErrors() error { + err := pt.baseCheckForErrors() + if err != nil { + return err + } + // if there are no LRO headers then the body cannot be empty + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } + lh, err := getURLFromLocationHeader(pt.resp) + if err != nil { + return err + } + if ao == "" && lh == "" && len(pt.rawBody) == 0 { + return autorest.NewError("pollingTrackerPut", "checkForErrors", "the response did not contain a body") + } + return nil +} + +func (pt pollingTrackerPut) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated +} + +// creates a polling tracker based on the verb of the original request +func createPollingTracker(resp *http.Response) (pollingTracker, error) { + var pt pollingTracker + switch strings.ToUpper(resp.Request.Method) { + case http.MethodDelete: + pt = &pollingTrackerDelete{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPatch: + pt = &pollingTrackerPatch{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPost: + pt = &pollingTrackerPost{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPut: + pt = &pollingTrackerPut{pollingTrackerBase: pollingTrackerBase{resp: resp}} + default: + return nil, autorest.NewError("azure", "createPollingTracker", "unsupported HTTP method %s", resp.Request.Method) + } + if err := pt.initializeState(); err != nil { + return pt, err + } + // this initializes the polling header values, we do this during creation in case the + // initial response send us invalid values; this way the API call will return a non-nil + // error (not doing this means the error shows up in Future.Done) + return pt, pt.updatePollingMethod() +} + +// gets the polling URL from the Azure-AsyncOperation header. +// ensures the URL is well-formed and absolute. +func getURLFromAsyncOpHeader(resp *http.Response) (string, error) { + s := resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) + if s == "" { + return "", nil + } + if !isValidURL(s) { + return "", autorest.NewError("azure", "getURLFromAsyncOpHeader", "invalid polling URL '%s'", s) + } + return s, nil +} + +// gets the polling URL from the Location header. +// ensures the URL is well-formed and absolute. +func getURLFromLocationHeader(resp *http.Response) (string, error) { + s := resp.Header.Get(http.CanonicalHeaderKey(autorest.HeaderLocation)) + if s == "" { + return "", nil + } + if !isValidURL(s) { + return "", autorest.NewError("azure", "getURLFromLocationHeader", "invalid polling URL '%s'", s) + } + return s, nil +} + +// verify that the URL is valid and absolute +func isValidURL(s string) bool { + u, err := url.Parse(s) + return err == nil && u.IsAbs() +} + +// PollingMethodType defines a type used for enumerating polling mechanisms. +type PollingMethodType string + +const ( + // PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header. + PollingAsyncOperation PollingMethodType = "AsyncOperation" + + // PollingLocation indicates the polling method uses the Location header. + PollingLocation PollingMethodType = "Location" + + // PollingRequestURI indicates the polling method uses the original request URI. + PollingRequestURI PollingMethodType = "RequestURI" + + // PollingUnknown indicates an unknown polling method and is the default value. + PollingUnknown PollingMethodType = "" +) + +// AsyncOpIncompleteError is the type that's returned from a future that has not completed. +type AsyncOpIncompleteError struct { + // FutureType is the name of the type composed of a azure.Future. + FutureType string +} + +// Error returns an error message including the originating type name of the error. +func (e AsyncOpIncompleteError) Error() string { + return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType) +} + +// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters. +func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError { + return AsyncOpIncompleteError{ + FutureType: futureType, + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go new file mode 100644 index 00000000..b6ef1283 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go @@ -0,0 +1,737 @@ +package auth + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "strings" + "unicode/utf16" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/Azure/go-autorest/autorest/azure/cli" + "github.com/dimchansky/utfbom" + "golang.org/x/crypto/pkcs12" +) + +// The possible keys in the Values map. +const ( + SubscriptionID = "AZURE_SUBSCRIPTION_ID" + TenantID = "AZURE_TENANT_ID" + AuxiliaryTenantIDs = "AZURE_AUXILIARY_TENANT_IDS" + ClientID = "AZURE_CLIENT_ID" + ClientSecret = "AZURE_CLIENT_SECRET" + CertificatePath = "AZURE_CERTIFICATE_PATH" + CertificatePassword = "AZURE_CERTIFICATE_PASSWORD" + Username = "AZURE_USERNAME" + Password = "AZURE_PASSWORD" + EnvironmentName = "AZURE_ENVIRONMENT" + Resource = "AZURE_AD_RESOURCE" + ActiveDirectoryEndpoint = "ActiveDirectoryEndpoint" + ResourceManagerEndpoint = "ResourceManagerEndpoint" + GraphResourceID = "GraphResourceID" + SQLManagementEndpoint = "SQLManagementEndpoint" + GalleryEndpoint = "GalleryEndpoint" + ManagementEndpoint = "ManagementEndpoint" +) + +// NewAuthorizerFromEnvironment creates an Authorizer configured from environment variables in the order: +// 1. Client credentials +// 2. Client certificate +// 3. Username password +// 4. MSI +func NewAuthorizerFromEnvironment() (autorest.Authorizer, error) { + settings, err := GetSettingsFromEnvironment() + if err != nil { + return nil, err + } + return settings.GetAuthorizer() +} + +// NewAuthorizerFromEnvironmentWithResource creates an Authorizer configured from environment variables in the order: +// 1. Client credentials +// 2. Client certificate +// 3. Username password +// 4. MSI +func NewAuthorizerFromEnvironmentWithResource(resource string) (autorest.Authorizer, error) { + settings, err := GetSettingsFromEnvironment() + if err != nil { + return nil, err + } + settings.Values[Resource] = resource + return settings.GetAuthorizer() +} + +// EnvironmentSettings contains the available authentication settings. +type EnvironmentSettings struct { + Values map[string]string + Environment azure.Environment +} + +// GetSettingsFromEnvironment returns the available authentication settings from the environment. +func GetSettingsFromEnvironment() (s EnvironmentSettings, err error) { + s = EnvironmentSettings{ + Values: map[string]string{}, + } + s.setValue(SubscriptionID) + s.setValue(TenantID) + s.setValue(AuxiliaryTenantIDs) + s.setValue(ClientID) + s.setValue(ClientSecret) + s.setValue(CertificatePath) + s.setValue(CertificatePassword) + s.setValue(Username) + s.setValue(Password) + s.setValue(EnvironmentName) + s.setValue(Resource) + if v := s.Values[EnvironmentName]; v == "" { + s.Environment = azure.PublicCloud + } else { + s.Environment, err = azure.EnvironmentFromName(v) + } + if s.Values[Resource] == "" { + s.Values[Resource] = s.Environment.ResourceManagerEndpoint + } + return +} + +// GetSubscriptionID returns the available subscription ID or an empty string. +func (settings EnvironmentSettings) GetSubscriptionID() string { + return settings.Values[SubscriptionID] +} + +// adds the specified environment variable value to the Values map if it exists +func (settings EnvironmentSettings) setValue(key string) { + if v := os.Getenv(key); v != "" { + settings.Values[key] = v + } +} + +// helper to return client and tenant IDs +func (settings EnvironmentSettings) getClientAndTenant() (string, string) { + clientID := settings.Values[ClientID] + tenantID := settings.Values[TenantID] + return clientID, tenantID +} + +// GetClientCredentials creates a config object from the available client credentials. +// An error is returned if no client credentials are available. +func (settings EnvironmentSettings) GetClientCredentials() (ClientCredentialsConfig, error) { + secret := settings.Values[ClientSecret] + if secret == "" { + return ClientCredentialsConfig{}, errors.New("missing client secret") + } + clientID, tenantID := settings.getClientAndTenant() + config := NewClientCredentialsConfig(clientID, secret, tenantID) + config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint + config.Resource = settings.Values[Resource] + if auxTenants, ok := settings.Values[AuxiliaryTenantIDs]; ok { + config.AuxTenants = strings.Split(auxTenants, ";") + for i := range config.AuxTenants { + config.AuxTenants[i] = strings.TrimSpace(config.AuxTenants[i]) + } + } + return config, nil +} + +// GetClientCertificate creates a config object from the available certificate credentials. +// An error is returned if no certificate credentials are available. +func (settings EnvironmentSettings) GetClientCertificate() (ClientCertificateConfig, error) { + certPath := settings.Values[CertificatePath] + if certPath == "" { + return ClientCertificateConfig{}, errors.New("missing certificate path") + } + certPwd := settings.Values[CertificatePassword] + clientID, tenantID := settings.getClientAndTenant() + config := NewClientCertificateConfig(certPath, certPwd, clientID, tenantID) + config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint + config.Resource = settings.Values[Resource] + return config, nil +} + +// GetUsernamePassword creates a config object from the available username/password credentials. +// An error is returned if no username/password credentials are available. +func (settings EnvironmentSettings) GetUsernamePassword() (UsernamePasswordConfig, error) { + username := settings.Values[Username] + password := settings.Values[Password] + if username == "" || password == "" { + return UsernamePasswordConfig{}, errors.New("missing username/password") + } + clientID, tenantID := settings.getClientAndTenant() + config := NewUsernamePasswordConfig(username, password, clientID, tenantID) + config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint + config.Resource = settings.Values[Resource] + return config, nil +} + +// GetMSI creates a MSI config object from the available client ID. +func (settings EnvironmentSettings) GetMSI() MSIConfig { + config := NewMSIConfig() + config.Resource = settings.Values[Resource] + config.ClientID = settings.Values[ClientID] + return config +} + +// GetDeviceFlow creates a device-flow config object from the available client and tenant IDs. +func (settings EnvironmentSettings) GetDeviceFlow() DeviceFlowConfig { + clientID, tenantID := settings.getClientAndTenant() + config := NewDeviceFlowConfig(clientID, tenantID) + config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint + config.Resource = settings.Values[Resource] + return config +} + +// GetAuthorizer creates an Authorizer configured from environment variables in the order: +// 1. Client credentials +// 2. Client certificate +// 3. Username password +// 4. MSI +func (settings EnvironmentSettings) GetAuthorizer() (autorest.Authorizer, error) { + //1.Client Credentials + if c, e := settings.GetClientCredentials(); e == nil { + return c.Authorizer() + } + + //2. Client Certificate + if c, e := settings.GetClientCertificate(); e == nil { + return c.Authorizer() + } + + //3. Username Password + if c, e := settings.GetUsernamePassword(); e == nil { + return c.Authorizer() + } + + // 4. MSI + return settings.GetMSI().Authorizer() +} + +// NewAuthorizerFromFile creates an Authorizer configured from a configuration file in the following order. +// 1. Client credentials +// 2. Client certificate +func NewAuthorizerFromFile(baseURI string) (autorest.Authorizer, error) { + settings, err := GetSettingsFromFile() + if err != nil { + return nil, err + } + if a, err := settings.ClientCredentialsAuthorizer(baseURI); err == nil { + return a, err + } + if a, err := settings.ClientCertificateAuthorizer(baseURI); err == nil { + return a, err + } + return nil, errors.New("auth file missing client and certificate credentials") +} + +// NewAuthorizerFromFileWithResource creates an Authorizer configured from a configuration file in the following order. +// 1. Client credentials +// 2. Client certificate +func NewAuthorizerFromFileWithResource(resource string) (autorest.Authorizer, error) { + s, err := GetSettingsFromFile() + if err != nil { + return nil, err + } + if a, err := s.ClientCredentialsAuthorizerWithResource(resource); err == nil { + return a, err + } + if a, err := s.ClientCertificateAuthorizerWithResource(resource); err == nil { + return a, err + } + return nil, errors.New("auth file missing client and certificate credentials") +} + +// NewAuthorizerFromCLI creates an Authorizer configured from Azure CLI 2.0 for local development scenarios. +func NewAuthorizerFromCLI() (autorest.Authorizer, error) { + settings, err := GetSettingsFromEnvironment() + if err != nil { + return nil, err + } + + if settings.Values[Resource] == "" { + settings.Values[Resource] = settings.Environment.ResourceManagerEndpoint + } + + return NewAuthorizerFromCLIWithResource(settings.Values[Resource]) +} + +// NewAuthorizerFromCLIWithResource creates an Authorizer configured from Azure CLI 2.0 for local development scenarios. +func NewAuthorizerFromCLIWithResource(resource string) (autorest.Authorizer, error) { + token, err := cli.GetTokenFromCLI(resource) + if err != nil { + return nil, err + } + + adalToken, err := token.ToADALToken() + if err != nil { + return nil, err + } + + return autorest.NewBearerAuthorizer(&adalToken), nil +} + +// GetSettingsFromFile returns the available authentication settings from an Azure CLI authentication file. +func GetSettingsFromFile() (FileSettings, error) { + s := FileSettings{} + fileLocation := os.Getenv("AZURE_AUTH_LOCATION") + if fileLocation == "" { + return s, errors.New("environment variable AZURE_AUTH_LOCATION is not set") + } + + contents, err := ioutil.ReadFile(fileLocation) + if err != nil { + return s, err + } + + // Auth file might be encoded + decoded, err := decode(contents) + if err != nil { + return s, err + } + + authFile := map[string]interface{}{} + err = json.Unmarshal(decoded, &authFile) + if err != nil { + return s, err + } + + s.Values = map[string]string{} + s.setKeyValue(ClientID, authFile["clientId"]) + s.setKeyValue(ClientSecret, authFile["clientSecret"]) + s.setKeyValue(CertificatePath, authFile["clientCertificate"]) + s.setKeyValue(CertificatePassword, authFile["clientCertificatePassword"]) + s.setKeyValue(SubscriptionID, authFile["subscriptionId"]) + s.setKeyValue(TenantID, authFile["tenantId"]) + s.setKeyValue(ActiveDirectoryEndpoint, authFile["activeDirectoryEndpointUrl"]) + s.setKeyValue(ResourceManagerEndpoint, authFile["resourceManagerEndpointUrl"]) + s.setKeyValue(GraphResourceID, authFile["activeDirectoryGraphResourceId"]) + s.setKeyValue(SQLManagementEndpoint, authFile["sqlManagementEndpointUrl"]) + s.setKeyValue(GalleryEndpoint, authFile["galleryEndpointUrl"]) + s.setKeyValue(ManagementEndpoint, authFile["managementEndpointUrl"]) + return s, nil +} + +// FileSettings contains the available authentication settings. +type FileSettings struct { + Values map[string]string +} + +// GetSubscriptionID returns the available subscription ID or an empty string. +func (settings FileSettings) GetSubscriptionID() string { + return settings.Values[SubscriptionID] +} + +// adds the specified value to the Values map if it isn't nil +func (settings FileSettings) setKeyValue(key string, val interface{}) { + if val != nil { + settings.Values[key] = val.(string) + } +} + +// returns the specified AAD endpoint or the public cloud endpoint if unspecified +func (settings FileSettings) getAADEndpoint() string { + if v, ok := settings.Values[ActiveDirectoryEndpoint]; ok { + return v + } + return azure.PublicCloud.ActiveDirectoryEndpoint +} + +// ServicePrincipalTokenFromClientCredentials creates a ServicePrincipalToken from the available client credentials. +func (settings FileSettings) ServicePrincipalTokenFromClientCredentials(baseURI string) (*adal.ServicePrincipalToken, error) { + resource, err := settings.getResourceForToken(baseURI) + if err != nil { + return nil, err + } + return settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource) +} + +// ClientCredentialsAuthorizer creates an authorizer from the available client credentials. +func (settings FileSettings) ClientCredentialsAuthorizer(baseURI string) (autorest.Authorizer, error) { + resource, err := settings.getResourceForToken(baseURI) + if err != nil { + return nil, err + } + return settings.ClientCredentialsAuthorizerWithResource(resource) +} + +// ServicePrincipalTokenFromClientCredentialsWithResource creates a ServicePrincipalToken +// from the available client credentials and the specified resource. +func (settings FileSettings) ServicePrincipalTokenFromClientCredentialsWithResource(resource string) (*adal.ServicePrincipalToken, error) { + if _, ok := settings.Values[ClientSecret]; !ok { + return nil, errors.New("missing client secret") + } + config, err := adal.NewOAuthConfig(settings.getAADEndpoint(), settings.Values[TenantID]) + if err != nil { + return nil, err + } + return adal.NewServicePrincipalToken(*config, settings.Values[ClientID], settings.Values[ClientSecret], resource) +} + +func (settings FileSettings) clientCertificateConfigWithResource(resource string) (ClientCertificateConfig, error) { + if _, ok := settings.Values[CertificatePath]; !ok { + return ClientCertificateConfig{}, errors.New("missing certificate path") + } + cfg := NewClientCertificateConfig(settings.Values[CertificatePath], settings.Values[CertificatePassword], settings.Values[ClientID], settings.Values[TenantID]) + cfg.AADEndpoint = settings.getAADEndpoint() + cfg.Resource = resource + return cfg, nil +} + +// ClientCredentialsAuthorizerWithResource creates an authorizer from the available client credentials and the specified resource. +func (settings FileSettings) ClientCredentialsAuthorizerWithResource(resource string) (autorest.Authorizer, error) { + spToken, err := settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource) + if err != nil { + return nil, err + } + return autorest.NewBearerAuthorizer(spToken), nil +} + +// ServicePrincipalTokenFromClientCertificate creates a ServicePrincipalToken from the available certificate credentials. +func (settings FileSettings) ServicePrincipalTokenFromClientCertificate(baseURI string) (*adal.ServicePrincipalToken, error) { + resource, err := settings.getResourceForToken(baseURI) + if err != nil { + return nil, err + } + return settings.ServicePrincipalTokenFromClientCertificateWithResource(resource) +} + +// ClientCertificateAuthorizer creates an authorizer from the available certificate credentials. +func (settings FileSettings) ClientCertificateAuthorizer(baseURI string) (autorest.Authorizer, error) { + resource, err := settings.getResourceForToken(baseURI) + if err != nil { + return nil, err + } + return settings.ClientCertificateAuthorizerWithResource(resource) +} + +// ServicePrincipalTokenFromClientCertificateWithResource creates a ServicePrincipalToken from the available certificate credentials. +func (settings FileSettings) ServicePrincipalTokenFromClientCertificateWithResource(resource string) (*adal.ServicePrincipalToken, error) { + cfg, err := settings.clientCertificateConfigWithResource(resource) + if err != nil { + return nil, err + } + return cfg.ServicePrincipalToken() +} + +// ClientCertificateAuthorizerWithResource creates an authorizer from the available certificate credentials and the specified resource. +func (settings FileSettings) ClientCertificateAuthorizerWithResource(resource string) (autorest.Authorizer, error) { + cfg, err := settings.clientCertificateConfigWithResource(resource) + if err != nil { + return nil, err + } + return cfg.Authorizer() +} + +func decode(b []byte) ([]byte, error) { + reader, enc := utfbom.Skip(bytes.NewReader(b)) + + switch enc { + case utfbom.UTF16LittleEndian: + u16 := make([]uint16, (len(b)/2)-1) + err := binary.Read(reader, binary.LittleEndian, &u16) + if err != nil { + return nil, err + } + return []byte(string(utf16.Decode(u16))), nil + case utfbom.UTF16BigEndian: + u16 := make([]uint16, (len(b)/2)-1) + err := binary.Read(reader, binary.BigEndian, &u16) + if err != nil { + return nil, err + } + return []byte(string(utf16.Decode(u16))), nil + } + return ioutil.ReadAll(reader) +} + +func (settings FileSettings) getResourceForToken(baseURI string) (string, error) { + // Compare dafault base URI from the SDK to the endpoints from the public cloud + // Base URI and token resource are the same string. This func finds the authentication + // file field that matches the SDK base URI. The SDK defines the public cloud + // endpoint as its default base URI + if !strings.HasSuffix(baseURI, "/") { + baseURI += "/" + } + switch baseURI { + case azure.PublicCloud.ServiceManagementEndpoint: + return settings.Values[ManagementEndpoint], nil + case azure.PublicCloud.ResourceManagerEndpoint: + return settings.Values[ResourceManagerEndpoint], nil + case azure.PublicCloud.ActiveDirectoryEndpoint: + return settings.Values[ActiveDirectoryEndpoint], nil + case azure.PublicCloud.GalleryEndpoint: + return settings.Values[GalleryEndpoint], nil + case azure.PublicCloud.GraphEndpoint: + return settings.Values[GraphResourceID], nil + } + return "", fmt.Errorf("auth: base URI not found in endpoints") +} + +// NewClientCredentialsConfig creates an AuthorizerConfig object configured to obtain an Authorizer through Client Credentials. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewClientCredentialsConfig(clientID string, clientSecret string, tenantID string) ClientCredentialsConfig { + return ClientCredentialsConfig{ + ClientID: clientID, + ClientSecret: clientSecret, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +// NewClientCertificateConfig creates a ClientCertificateConfig object configured to obtain an Authorizer through client certificate. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewClientCertificateConfig(certificatePath string, certificatePassword string, clientID string, tenantID string) ClientCertificateConfig { + return ClientCertificateConfig{ + CertificatePath: certificatePath, + CertificatePassword: certificatePassword, + ClientID: clientID, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +// NewUsernamePasswordConfig creates an UsernamePasswordConfig object configured to obtain an Authorizer through username and password. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewUsernamePasswordConfig(username string, password string, clientID string, tenantID string) UsernamePasswordConfig { + return UsernamePasswordConfig{ + Username: username, + Password: password, + ClientID: clientID, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +// NewMSIConfig creates an MSIConfig object configured to obtain an Authorizer through MSI. +func NewMSIConfig() MSIConfig { + return MSIConfig{ + Resource: azure.PublicCloud.ResourceManagerEndpoint, + } +} + +// NewDeviceFlowConfig creates a DeviceFlowConfig object configured to obtain an Authorizer through device flow. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewDeviceFlowConfig(clientID string, tenantID string) DeviceFlowConfig { + return DeviceFlowConfig{ + ClientID: clientID, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +//AuthorizerConfig provides an authorizer from the configuration provided. +type AuthorizerConfig interface { + Authorizer() (autorest.Authorizer, error) +} + +// ClientCredentialsConfig provides the options to get a bearer authorizer from client credentials. +type ClientCredentialsConfig struct { + ClientID string + ClientSecret string + TenantID string + AuxTenants []string + AADEndpoint string + Resource string +} + +// ServicePrincipalToken creates a ServicePrincipalToken from client credentials. +func (ccc ClientCredentialsConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID) + if err != nil { + return nil, err + } + return adal.NewServicePrincipalToken(*oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource) +} + +// MultiTenantServicePrincipalToken creates a MultiTenantServicePrincipalToken from client credentials. +func (ccc ClientCredentialsConfig) MultiTenantServicePrincipalToken() (*adal.MultiTenantServicePrincipalToken, error) { + oauthConfig, err := adal.NewMultiTenantOAuthConfig(ccc.AADEndpoint, ccc.TenantID, ccc.AuxTenants, adal.OAuthOptions{}) + if err != nil { + return nil, err + } + return adal.NewMultiTenantServicePrincipalToken(oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource) +} + +// Authorizer gets the authorizer from client credentials. +func (ccc ClientCredentialsConfig) Authorizer() (autorest.Authorizer, error) { + if len(ccc.AuxTenants) == 0 { + spToken, err := ccc.ServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get SPT from client credentials: %v", err) + } + return autorest.NewBearerAuthorizer(spToken), nil + } + mtSPT, err := ccc.MultiTenantServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get multitenant SPT from client credentials: %v", err) + } + return autorest.NewMultiTenantServicePrincipalTokenAuthorizer(mtSPT), nil +} + +// ClientCertificateConfig provides the options to get a bearer authorizer from a client certificate. +type ClientCertificateConfig struct { + ClientID string + CertificatePath string + CertificatePassword string + TenantID string + AADEndpoint string + Resource string +} + +// ServicePrincipalToken creates a ServicePrincipalToken from client certificate. +func (ccc ClientCertificateConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID) + if err != nil { + return nil, err + } + certData, err := ioutil.ReadFile(ccc.CertificatePath) + if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err) + } + certificate, rsaPrivateKey, err := decodePkcs12(certData, ccc.CertificatePassword) + if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) + } + return adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, ccc.ClientID, certificate, rsaPrivateKey, ccc.Resource) +} + +// Authorizer gets an authorizer object from client certificate. +func (ccc ClientCertificateConfig) Authorizer() (autorest.Authorizer, error) { + spToken, err := ccc.ServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from certificate auth: %v", err) + } + return autorest.NewBearerAuthorizer(spToken), nil +} + +// DeviceFlowConfig provides the options to get a bearer authorizer using device flow authentication. +type DeviceFlowConfig struct { + ClientID string + TenantID string + AADEndpoint string + Resource string +} + +// Authorizer gets the authorizer from device flow. +func (dfc DeviceFlowConfig) Authorizer() (autorest.Authorizer, error) { + spToken, err := dfc.ServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from device flow: %v", err) + } + return autorest.NewBearerAuthorizer(spToken), nil +} + +// ServicePrincipalToken gets the service principal token from device flow. +func (dfc DeviceFlowConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(dfc.AADEndpoint, dfc.TenantID) + if err != nil { + return nil, err + } + oauthClient := &autorest.Client{} + deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthConfig, dfc.ClientID, dfc.Resource) + if err != nil { + return nil, fmt.Errorf("failed to start device auth flow: %s", err) + } + log.Println(*deviceCode.Message) + token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) + if err != nil { + return nil, fmt.Errorf("failed to finish device auth flow: %s", err) + } + return adal.NewServicePrincipalTokenFromManualToken(*oauthConfig, dfc.ClientID, dfc.Resource, *token) +} + +func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { + privateKey, certificate, err := pkcs12.Decode(pkcs, password) + if err != nil { + return nil, nil, err + } + + rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey) + if !isRsaKey { + return nil, nil, fmt.Errorf("PKCS#12 certificate must contain an RSA private key") + } + + return certificate, rsaPrivateKey, nil +} + +// UsernamePasswordConfig provides the options to get a bearer authorizer from a username and a password. +type UsernamePasswordConfig struct { + ClientID string + Username string + Password string + TenantID string + AADEndpoint string + Resource string +} + +// ServicePrincipalToken creates a ServicePrincipalToken from username and password. +func (ups UsernamePasswordConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) { + oauthConfig, err := adal.NewOAuthConfig(ups.AADEndpoint, ups.TenantID) + if err != nil { + return nil, err + } + return adal.NewServicePrincipalTokenFromUsernamePassword(*oauthConfig, ups.ClientID, ups.Username, ups.Password, ups.Resource) +} + +// Authorizer gets the authorizer from a username and a password. +func (ups UsernamePasswordConfig) Authorizer() (autorest.Authorizer, error) { + spToken, err := ups.ServicePrincipalToken() + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from username and password auth: %v", err) + } + return autorest.NewBearerAuthorizer(spToken), nil +} + +// MSIConfig provides the options to get a bearer authorizer through MSI. +type MSIConfig struct { + Resource string + ClientID string +} + +// Authorizer gets the authorizer from MSI. +func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) { + msiEndpoint, err := adal.GetMSIVMEndpoint() + if err != nil { + return nil, err + } + + var spToken *adal.ServicePrincipalToken + if mc.ClientID == "" { + spToken, err = adal.NewServicePrincipalTokenFromMSI(msiEndpoint, mc.Resource) + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from MSI: %v", err) + } + } else { + spToken, err = adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, mc.Resource, mc.ClientID) + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from MSI for user assigned identity: %v", err) + } + } + + return autorest.NewBearerAuthorizer(spToken), nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.mod b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.mod new file mode 100644 index 00000000..9605c971 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.mod @@ -0,0 +1,11 @@ +module github.com/Azure/go-autorest/autorest/azure/auth + +go 1.12 + +require ( + github.com/Azure/go-autorest/autorest v0.1.0 + github.com/Azure/go-autorest/autorest/adal v0.1.0 + github.com/Azure/go-autorest/autorest/azure/cli v0.1.0 + github.com/dimchansky/utfbom v1.1.0 + golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480 +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.sum b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.sum new file mode 100644 index 00000000..39f37022 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/auth/go.sum @@ -0,0 +1,153 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= +contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= +github.com/Azure/go-autorest/autorest v0.1.0 h1:z68s0uL7bVfplrwwCUsYoMezUVQdym6EPOllAT02BtU= +github.com/Azure/go-autorest/autorest v0.1.0/go.mod h1:AKyIcETwSUFxIcs/Wnq/C+kwCtlEYGUVd7FPNb2slmg= +github.com/Azure/go-autorest/autorest/adal v0.1.0 h1:RSw/7EAullliqwkZvgIGDYZWQm1PGKXI8c4aY/87yuU= +github.com/Azure/go-autorest/autorest/adal v0.1.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= +github.com/Azure/go-autorest/autorest/azure/cli v0.1.0 h1:YTtBrcb6mhA+PoSW8WxFDoIIyjp13XqJeX80ssQtri4= +github.com/Azure/go-autorest/autorest/azure/cli v0.1.0/go.mod h1:Dk8CUAt/b/PzkfeRsWzVG9Yj3ps8mS8ECztu43rdU8U= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.1.0 h1:TRBxC5Pj/fIuh4Qob0ZpkggbfT8RC0SubHbpV3p4/Vc= +github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480 h1:O5YqonU5IWby+w98jVUG9h7zlCWCcH4RHyPVReBmhzk= +golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e h1:nFYrTHrdrAOpShe27kaFHjsqYSEQ0KWqdWLu3xuZJts= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go new file mode 100644 index 00000000..3a0a439f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -0,0 +1,326 @@ +// Package azure provides Azure-specific implementations used with AutoRest. +// See the included examples for more detail. +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +const ( + // HeaderClientID is the Azure extension header to set a user-specified request ID. + HeaderClientID = "x-ms-client-request-id" + + // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID + // should be included in the response. + HeaderReturnClientID = "x-ms-return-client-request-id" + + // HeaderRequestID is the Azure extension header of the service generated request ID returned + // in the response. + HeaderRequestID = "x-ms-request-id" +) + +// ServiceError encapsulates the error response from an Azure service. +// It adhears to the OData v4 specification for error responses. +type ServiceError struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` +} + +func (se ServiceError) Error() string { + result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) + + if se.Target != nil { + result += fmt.Sprintf(" Target=%q", *se.Target) + } + + if se.Details != nil { + d, err := json.Marshal(se.Details) + if err != nil { + result += fmt.Sprintf(" Details=%v", se.Details) + } + result += fmt.Sprintf(" Details=%v", string(d)) + } + + if se.InnerError != nil { + d, err := json.Marshal(se.InnerError) + if err != nil { + result += fmt.Sprintf(" InnerError=%v", se.InnerError) + } + result += fmt.Sprintf(" InnerError=%v", string(d)) + } + + if se.AdditionalInfo != nil { + d, err := json.Marshal(se.AdditionalInfo) + if err != nil { + result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo) + } + result += fmt.Sprintf(" AdditionalInfo=%v", string(d)) + } + + return result +} + +// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type. +func (se *ServiceError) UnmarshalJSON(b []byte) error { + // per the OData v4 spec the details field must be an array of JSON objects. + // unfortunately not all services adhear to the spec and just return a single + // object instead of an array with one object. so we have to perform some + // shenanigans to accommodate both cases. + // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 + + type serviceError1 struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` + } + + type serviceError2 struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` + } + + se1 := serviceError1{} + err := json.Unmarshal(b, &se1) + if err == nil { + se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError, se1.AdditionalInfo) + return nil + } + + se2 := serviceError2{} + err = json.Unmarshal(b, &se2) + if err == nil { + se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError, se2.AdditionalInfo) + se.Details = append(se.Details, se2.Details) + return nil + } + return err +} + +func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}, additional []map[string]interface{}) { + se.Code = code + se.Message = message + se.Target = target + se.Details = details + se.InnerError = inner + se.AdditionalInfo = additional +} + +// RequestError describes an error response returned by Azure service. +type RequestError struct { + autorest.DetailedError + + // The error returned by the Azure service. + ServiceError *ServiceError `json:"error"` + + // The request id (from the x-ms-request-id-header) of the request. + RequestID string +} + +// Error returns a human-friendly error message from service error. +func (e RequestError) Error() string { + return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", + e.StatusCode, e.ServiceError) +} + +// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. +func IsAzureError(e error) bool { + _, ok := e.(*RequestError) + return ok +} + +// Resource contains details about an Azure resource. +type Resource struct { + SubscriptionID string + ResourceGroup string + Provider string + ResourceType string + ResourceName string +} + +// ParseResourceID parses a resource ID into a ResourceDetails struct. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-functions-resource#return-value-4. +func ParseResourceID(resourceID string) (Resource, error) { + + const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)` + resourceIDPattern := regexp.MustCompile(resourceIDPatternText) + match := resourceIDPattern.FindStringSubmatch(resourceID) + + if len(match) == 0 { + return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID) + } + + v := strings.Split(match[5], "/") + resourceName := v[len(v)-1] + + result := Resource{ + SubscriptionID: match[1], + ResourceGroup: match[2], + Provider: match[3], + ResourceType: match[4], + ResourceName: resourceName, + } + + return result, nil +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { + if v, ok := original.(*RequestError); ok { + return *v + } + + statusCode := autorest.UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + return RequestError{ + DetailedError: autorest.DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + }, + } +} + +// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id +// header to true such that UUID accompanies the http.Response. +func WithReturningClientID(uuid string) autorest.PrepareDecorator { + preparer := autorest.CreatePreparer( + WithClientID(uuid), + WithReturnClientID(true)) + + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + return preparer.Prepare(r) + }) + } +} + +// WithClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). +func WithClientID(uuid string) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderClientID, uuid) +} + +// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-return-client-request-id whose boolean value indicates if the value of the +// x-ms-client-request-id header should be included in the http.Response. +func WithReturnClientID(b bool) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) +} + +// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the +// http.Request sent to the service (and returned in the http.Response) +func ExtractClientID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderClientID, resp) +} + +// ExtractRequestID extracts the Azure server generated request identifier from the +// x-ms-request-id header. +func ExtractRequestID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderRequestID, resp) +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an +// azure.RequestError by reading the response body unless the response HTTP status code +// is among the set passed. +// +// If there is a chance service may return responses other than the Azure error +// format and the response cannot be parsed into an error, a decoding error will +// be returned containing the response body. In any case, the Responder will +// return an error if the status code is not satisfied. +// +// If this Responder returns an error, the response body will be replaced with +// an in-memory reader, which needs no further closing. +func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { + var e RequestError + defer resp.Body.Close() + + // Copy and replace the Body in case it does not contain an error object. + // This will leave the Body available to the caller. + b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e) + resp.Body = ioutil.NopCloser(&b) + if decodeErr != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) + } + if e.ServiceError == nil { + // Check if error is unwrapped ServiceError + if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil { + return err + } + } + if e.ServiceError.Message == "" { + // if we're here it means the returned error wasn't OData v4 compliant. + // try to unmarshal the body as raw JSON in hopes of getting something. + rawBody := map[string]interface{}{} + if err := json.Unmarshal(b.Bytes(), &rawBody); err != nil { + return err + } + e.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + } + if len(rawBody) > 0 { + e.ServiceError.Details = []map[string]interface{}{rawBody} + } + } + e.Response = resp + e.RequestID = ExtractRequestID(resp) + if e.StatusCode == nil { + e.StatusCode = resp.StatusCode + } + err = &e + } + return err + }) + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod new file mode 100644 index 00000000..a147222b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.mod @@ -0,0 +1,10 @@ +module github.com/Azure/go-autorest/autorest/azure/cli + +go 1.12 + +require ( + github.com/Azure/go-autorest/autorest/adal v0.1.0 + github.com/Azure/go-autorest/autorest/date v0.1.0 + github.com/dimchansky/utfbom v1.1.0 + github.com/mitchellh/go-homedir v1.1.0 +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum new file mode 100644 index 00000000..1d098cff --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/cli/go.sum @@ -0,0 +1,144 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= +contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= +github.com/Azure/go-autorest/autorest/adal v0.1.0 h1:RSw/7EAullliqwkZvgIGDYZWQm1PGKXI8c4aY/87yuU= +github.com/Azure/go-autorest/autorest/adal v0.1.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/tracing v0.1.0 h1:TRBxC5Pj/fIuh4Qob0ZpkggbfT8RC0SubHbpV3p4/Vc= +github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= +github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go new file mode 100644 index 00000000..a336b958 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go @@ -0,0 +1,79 @@ +package cli + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/dimchansky/utfbom" + "github.com/mitchellh/go-homedir" +) + +// Profile represents a Profile from the Azure CLI +type Profile struct { + InstallationID string `json:"installationId"` + Subscriptions []Subscription `json:"subscriptions"` +} + +// Subscription represents a Subscription from the Azure CLI +type Subscription struct { + EnvironmentName string `json:"environmentName"` + ID string `json:"id"` + IsDefault bool `json:"isDefault"` + Name string `json:"name"` + State string `json:"state"` + TenantID string `json:"tenantId"` + User *User `json:"user"` +} + +// User represents a User from the Azure CLI +type User struct { + Name string `json:"name"` + Type string `json:"type"` +} + +const azureProfileJSON = "azureProfile.json" + +// ProfilePath returns the path where the Azure Profile is stored from the Azure CLI +func ProfilePath() (string, error) { + if cfgDir := os.Getenv("AZURE_CONFIG_DIR"); cfgDir != "" { + return filepath.Join(cfgDir, azureProfileJSON), nil + } + return homedir.Expand("~/.azure/" + azureProfileJSON) +} + +// LoadProfile restores a Profile object from a file located at 'path'. +func LoadProfile(path string) (result Profile, err error) { + var contents []byte + contents, err = ioutil.ReadFile(path) + if err != nil { + err = fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + return + } + reader := utfbom.SkipOnly(bytes.NewReader(contents)) + + dec := json.NewDecoder(reader) + if err = dec.Decode(&result); err != nil { + err = fmt.Errorf("failed to decode contents of file (%s) into a Profile representation: %v", path, err) + return + } + + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go new file mode 100644 index 00000000..810075ba --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go @@ -0,0 +1,170 @@ +package cli + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "os/exec" + "regexp" + "runtime" + "strconv" + "time" + + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/date" + "github.com/mitchellh/go-homedir" +) + +// Token represents an AccessToken from the Azure CLI +type Token struct { + AccessToken string `json:"accessToken"` + Authority string `json:"_authority"` + ClientID string `json:"_clientId"` + ExpiresOn string `json:"expiresOn"` + IdentityProvider string `json:"identityProvider"` + IsMRRT bool `json:"isMRRT"` + RefreshToken string `json:"refreshToken"` + Resource string `json:"resource"` + TokenType string `json:"tokenType"` + UserID string `json:"userId"` +} + +// ToADALToken converts an Azure CLI `Token`` to an `adal.Token`` +func (t Token) ToADALToken() (converted adal.Token, err error) { + tokenExpirationDate, err := ParseExpirationDate(t.ExpiresOn) + if err != nil { + err = fmt.Errorf("Error parsing Token Expiration Date %q: %+v", t.ExpiresOn, err) + return + } + + difference := tokenExpirationDate.Sub(date.UnixEpoch()) + + converted = adal.Token{ + AccessToken: t.AccessToken, + Type: t.TokenType, + ExpiresIn: "3600", + ExpiresOn: json.Number(strconv.Itoa(int(difference.Seconds()))), + RefreshToken: t.RefreshToken, + Resource: t.Resource, + } + return +} + +// AccessTokensPath returns the path where access tokens are stored from the Azure CLI +// TODO(#199): add unit test. +func AccessTokensPath() (string, error) { + // Azure-CLI allows user to customize the path of access tokens thorugh environment variable. + var accessTokenPath = os.Getenv("AZURE_ACCESS_TOKEN_FILE") + var err error + + // Fallback logic to default path on non-cloud-shell environment. + // TODO(#200): remove the dependency on hard-coding path. + if accessTokenPath == "" { + accessTokenPath, err = homedir.Expand("~/.azure/accessTokens.json") + } + + return accessTokenPath, err +} + +// ParseExpirationDate parses either a Azure CLI or CloudShell date into a time object +func ParseExpirationDate(input string) (*time.Time, error) { + // CloudShell (and potentially the Azure CLI in future) + expirationDate, cloudShellErr := time.Parse(time.RFC3339, input) + if cloudShellErr != nil { + // Azure CLI (Python) e.g. 2017-08-31 19:48:57.998857 (plus the local timezone) + const cliFormat = "2006-01-02 15:04:05.999999" + expirationDate, cliErr := time.ParseInLocation(cliFormat, input, time.Local) + if cliErr == nil { + return &expirationDate, nil + } + + return nil, fmt.Errorf("Error parsing expiration date %q.\n\nCloudShell Error: \n%+v\n\nCLI Error:\n%+v", input, cloudShellErr, cliErr) + } + + return &expirationDate, nil +} + +// LoadTokens restores a set of Token objects from a file located at 'path'. +func LoadTokens(path string) ([]Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var tokens []Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&tokens); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into a `cli.Token` representation: %v", path, err) + } + + return tokens, nil +} + +// GetTokenFromCLI gets a token using Azure CLI 2.0 for local development scenarios. +func GetTokenFromCLI(resource string) (*Token, error) { + // This is the path that a developer can set to tell this class what the install path for Azure CLI is. + const azureCLIPath = "AzureCLIPath" + + // The default install paths are used to find Azure CLI. This is for security, so that any path in the calling program's Path environment is not used to execute Azure CLI. + azureCLIDefaultPathWindows := fmt.Sprintf("%s\\Microsoft SDKs\\Azure\\CLI2\\wbin; %s\\Microsoft SDKs\\Azure\\CLI2\\wbin", os.Getenv("ProgramFiles(x86)"), os.Getenv("ProgramFiles")) + + // Default path for non-Windows. + const azureCLIDefaultPath = "/bin:/sbin:/usr/bin:/usr/local/bin" + + // Validate resource, since it gets sent as a command line argument to Azure CLI + const invalidResourceErrorTemplate = "Resource %s is not in expected format. Only alphanumeric characters, [dot], [colon], [hyphen], and [forward slash] are allowed." + match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource) + if err != nil { + return nil, err + } + if !match { + return nil, fmt.Errorf(invalidResourceErrorTemplate, resource) + } + + // Execute Azure CLI to get token + var cliCmd *exec.Cmd + if runtime.GOOS == "windows" { + cliCmd = exec.Command(fmt.Sprintf("%s\\system32\\cmd.exe", os.Getenv("windir"))) + cliCmd.Env = os.Environ() + cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s;%s", os.Getenv(azureCLIPath), azureCLIDefaultPathWindows)) + cliCmd.Args = append(cliCmd.Args, "/c", "az") + } else { + cliCmd = exec.Command("az") + cliCmd.Env = os.Environ() + cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s:%s", os.Getenv(azureCLIPath), azureCLIDefaultPath)) + } + cliCmd.Args = append(cliCmd.Args, "account", "get-access-token", "-o", "json", "--resource", resource) + + var stderr bytes.Buffer + cliCmd.Stderr = &stderr + + output, err := cliCmd.Output() + if err != nil { + return nil, fmt.Errorf("Invoking Azure CLI failed with the following error: %s", stderr.String()) + } + + tokenResponse := Token{} + err = json.Unmarshal(output, &tokenResponse) + if err != nil { + return nil, err + } + + return &tokenResponse, err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go new file mode 100644 index 00000000..6c20b817 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -0,0 +1,244 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "strings" +) + +const ( + // EnvironmentFilepathName captures the name of the environment variable containing the path to the file + // to be used while populating the Azure Environment. + EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" + + // NotAvailable is used for endpoints and resource IDs that are not available for a given cloud. + NotAvailable = "N/A" +) + +var environments = map[string]Environment{ + "AZURECHINACLOUD": ChinaCloud, + "AZUREGERMANCLOUD": GermanCloud, + "AZUREPUBLICCLOUD": PublicCloud, + "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, +} + +// ResourceIdentifier contains a set of Azure resource IDs. +type ResourceIdentifier struct { + Graph string `json:"graph"` + KeyVault string `json:"keyVault"` + Datalake string `json:"datalake"` + Batch string `json:"batch"` + OperationalInsights string `json:"operationalInsights"` + Storage string `json:"storage"` +} + +// Environment represents a set of endpoints for each of Azure's Clouds. +type Environment struct { + Name string `json:"name"` + ManagementPortalURL string `json:"managementPortalURL"` + PublishSettingsURL string `json:"publishSettingsURL"` + ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` + ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` + GalleryEndpoint string `json:"galleryEndpoint"` + KeyVaultEndpoint string `json:"keyVaultEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + ServiceBusEndpoint string `json:"serviceBusEndpoint"` + BatchManagementEndpoint string `json:"batchManagementEndpoint"` + StorageEndpointSuffix string `json:"storageEndpointSuffix"` + SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` + TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` + KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` + ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` + ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` + ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` + ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` + CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"` + TokenAudience string `json:"tokenAudience"` + ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"` +} + +var ( + // PublicCloud is the default public Azure cloud environment + PublicCloud = Environment{ + Name: "AzurePublicCloud", + ManagementPortalURL: "https://manage.windowsazure.com/", + PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.windows.net/", + ResourceManagerEndpoint: "https://management.azure.com/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.azure.com/", + KeyVaultEndpoint: "https://vault.azure.net/", + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.windows.net/", + BatchManagementEndpoint: "https://batch.core.windows.net/", + StorageEndpointSuffix: "core.windows.net", + SQLDatabaseDNSSuffix: "database.windows.net", + TrafficManagerDNSSuffix: "trafficmanager.net", + KeyVaultDNSSuffix: "vault.azure.net", + ServiceBusEndpointSuffix: "servicebus.windows.net", + ServiceManagementVMDNSSuffix: "cloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.azure.com", + ContainerRegistryDNSSuffix: "azurecr.io", + CosmosDBDNSSuffix: "documents.azure.com", + TokenAudience: "https://management.azure.com/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.windows.net/", + KeyVault: "https://vault.azure.net", + Datalake: "https://datalake.azure.net/", + Batch: "https://batch.core.windows.net/", + OperationalInsights: "https://api.loganalytics.io", + Storage: "https://storage.azure.com/", + }, + } + + // USGovernmentCloud is the cloud environment for the US Government + USGovernmentCloud = Environment{ + Name: "AzureUSGovernmentCloud", + ManagementPortalURL: "https://manage.windowsazure.us/", + PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", + ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.us/", + GalleryEndpoint: "https://gallery.usgovcloudapi.net/", + KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/", + BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/", + StorageEndpointSuffix: "core.usgovcloudapi.net", + SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", + TrafficManagerDNSSuffix: "usgovtrafficmanager.net", + KeyVaultDNSSuffix: "vault.usgovcloudapi.net", + ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", + ServiceManagementVMDNSSuffix: "usgovcloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", + ContainerRegistryDNSSuffix: "azurecr.us", + CosmosDBDNSSuffix: "documents.azure.us", + TokenAudience: "https://management.usgovcloudapi.net/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.windows.net/", + KeyVault: "https://vault.usgovcloudapi.net", + Datalake: NotAvailable, + Batch: "https://batch.core.usgovcloudapi.net/", + OperationalInsights: "https://api.loganalytics.us", + Storage: "https://storage.azure.com/", + }, + } + + // ChinaCloud is the cloud environment operated in China + ChinaCloud = Environment{ + Name: "AzureChinaCloud", + ManagementPortalURL: "https://manage.chinacloudapi.com/", + PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", + ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", + ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", + GalleryEndpoint: "https://gallery.chinacloudapi.cn/", + KeyVaultEndpoint: "https://vault.azure.cn/", + GraphEndpoint: "https://graph.chinacloudapi.cn/", + ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/", + BatchManagementEndpoint: "https://batch.chinacloudapi.cn/", + StorageEndpointSuffix: "core.chinacloudapi.cn", + SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", + TrafficManagerDNSSuffix: "trafficmanager.cn", + KeyVaultDNSSuffix: "vault.azure.cn", + ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", + ServiceManagementVMDNSSuffix: "chinacloudapp.cn", + ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", + ContainerRegistryDNSSuffix: "azurecr.cn", + CosmosDBDNSSuffix: "documents.azure.cn", + TokenAudience: "https://management.chinacloudapi.cn/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.chinacloudapi.cn/", + KeyVault: "https://vault.azure.cn", + Datalake: NotAvailable, + Batch: "https://batch.chinacloudapi.cn/", + OperationalInsights: NotAvailable, + Storage: "https://storage.azure.com/", + }, + } + + // GermanCloud is the cloud environment operated in Germany + GermanCloud = Environment{ + Name: "AzureGermanCloud", + ManagementPortalURL: "http://portal.microsoftazure.de/", + PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.cloudapi.de/", + ResourceManagerEndpoint: "https://management.microsoftazure.de/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", + GalleryEndpoint: "https://gallery.cloudapi.de/", + KeyVaultEndpoint: "https://vault.microsoftazure.de/", + GraphEndpoint: "https://graph.cloudapi.de/", + ServiceBusEndpoint: "https://servicebus.cloudapi.de/", + BatchManagementEndpoint: "https://batch.cloudapi.de/", + StorageEndpointSuffix: "core.cloudapi.de", + SQLDatabaseDNSSuffix: "database.cloudapi.de", + TrafficManagerDNSSuffix: "azuretrafficmanager.de", + KeyVaultDNSSuffix: "vault.microsoftazure.de", + ServiceBusEndpointSuffix: "servicebus.cloudapi.de", + ServiceManagementVMDNSSuffix: "azurecloudapp.de", + ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", + ContainerRegistryDNSSuffix: NotAvailable, + CosmosDBDNSSuffix: "documents.microsoftazure.de", + TokenAudience: "https://management.microsoftazure.de/", + ResourceIdentifiers: ResourceIdentifier{ + Graph: "https://graph.cloudapi.de/", + KeyVault: "https://vault.microsoftazure.de", + Datalake: NotAvailable, + Batch: "https://batch.cloudapi.de/", + OperationalInsights: NotAvailable, + Storage: "https://storage.azure.com/", + }, + } +) + +// EnvironmentFromName returns an Environment based on the common name specified. +func EnvironmentFromName(name string) (Environment, error) { + // IMPORTANT + // As per @radhikagupta5: + // This is technical debt, fundamentally here because Kubernetes is not currently accepting + // contributions to the providers. Once that is an option, the provider should be updated to + // directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation + // from this method based on the name that is provided to us. + if strings.EqualFold(name, "AZURESTACKCLOUD") { + return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName)) + } + + name = strings.ToUpper(name) + env, ok := environments[name] + if !ok { + return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) + } + + return env, nil +} + +// EnvironmentFromFile loads an Environment from a configuration file available on disk. +// This function is particularly useful in the Hybrid Cloud model, where one must define their own +// endpoints. +func EnvironmentFromFile(location string) (unmarshaled Environment, err error) { + fileContents, err := ioutil.ReadFile(location) + if err != nil { + return + } + + err = json.Unmarshal(fileContents, &unmarshaled) + + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go new file mode 100644 index 00000000..507f9e95 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go @@ -0,0 +1,245 @@ +package azure + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +type audience []string + +type authentication struct { + LoginEndpoint string `json:"loginEndpoint"` + Audiences audience `json:"audiences"` +} + +type environmentMetadataInfo struct { + GalleryEndpoint string `json:"galleryEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + PortalEndpoint string `json:"portalEndpoint"` + Authentication authentication `json:"authentication"` +} + +// EnvironmentProperty represent property names that clients can override +type EnvironmentProperty string + +const ( + // EnvironmentName ... + EnvironmentName EnvironmentProperty = "name" + // EnvironmentManagementPortalURL .. + EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL" + // EnvironmentPublishSettingsURL ... + EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL" + // EnvironmentServiceManagementEndpoint ... + EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint" + // EnvironmentResourceManagerEndpoint ... + EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint" + // EnvironmentActiveDirectoryEndpoint ... + EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint" + // EnvironmentGalleryEndpoint ... + EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint" + // EnvironmentKeyVaultEndpoint ... + EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint" + // EnvironmentGraphEndpoint ... + EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint" + // EnvironmentServiceBusEndpoint ... + EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint" + // EnvironmentBatchManagementEndpoint ... + EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint" + // EnvironmentStorageEndpointSuffix ... + EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix" + // EnvironmentSQLDatabaseDNSSuffix ... + EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix" + // EnvironmentTrafficManagerDNSSuffix ... + EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix" + // EnvironmentKeyVaultDNSSuffix ... + EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix" + // EnvironmentServiceBusEndpointSuffix ... + EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix" + // EnvironmentServiceManagementVMDNSSuffix ... + EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix" + // EnvironmentResourceManagerVMDNSSuffix ... + EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix" + // EnvironmentContainerRegistryDNSSuffix ... + EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix" + // EnvironmentTokenAudience ... + EnvironmentTokenAudience EnvironmentProperty = "tokenAudience" +) + +// OverrideProperty represents property name and value that clients can override +type OverrideProperty struct { + Key EnvironmentProperty + Value string +} + +// EnvironmentFromURL loads an Environment from a URL +// This function is particularly useful in the Hybrid Cloud model, where one may define their own +// endpoints. +func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) { + var metadataEnvProperties environmentMetadataInfo + + if resourceManagerEndpoint == "" { + return environment, fmt.Errorf("Metadata resource manager endpoint is empty") + } + + if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil { + return environment, err + } + + // Give priority to user's override values + overrideProperties(&environment, properties) + + if environment.Name == "" { + environment.Name = "HybridEnvironment" + } + stampDNSSuffix := environment.StorageEndpointSuffix + if stampDNSSuffix == "" { + stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/") + environment.StorageEndpointSuffix = stampDNSSuffix + } + if environment.KeyVaultDNSSuffix == "" { + environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix) + } + if environment.KeyVaultEndpoint == "" { + environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix) + } + if environment.TokenAudience == "" { + environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0] + } + if environment.ActiveDirectoryEndpoint == "" { + environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint + } + if environment.ResourceManagerEndpoint == "" { + environment.ResourceManagerEndpoint = resourceManagerEndpoint + } + if environment.GalleryEndpoint == "" { + environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint + } + if environment.GraphEndpoint == "" { + environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint + } + + return environment, nil +} + +func overrideProperties(environment *Environment, properties []OverrideProperty) { + for _, property := range properties { + switch property.Key { + case EnvironmentName: + { + environment.Name = property.Value + } + case EnvironmentManagementPortalURL: + { + environment.ManagementPortalURL = property.Value + } + case EnvironmentPublishSettingsURL: + { + environment.PublishSettingsURL = property.Value + } + case EnvironmentServiceManagementEndpoint: + { + environment.ServiceManagementEndpoint = property.Value + } + case EnvironmentResourceManagerEndpoint: + { + environment.ResourceManagerEndpoint = property.Value + } + case EnvironmentActiveDirectoryEndpoint: + { + environment.ActiveDirectoryEndpoint = property.Value + } + case EnvironmentGalleryEndpoint: + { + environment.GalleryEndpoint = property.Value + } + case EnvironmentKeyVaultEndpoint: + { + environment.KeyVaultEndpoint = property.Value + } + case EnvironmentGraphEndpoint: + { + environment.GraphEndpoint = property.Value + } + case EnvironmentServiceBusEndpoint: + { + environment.ServiceBusEndpoint = property.Value + } + case EnvironmentBatchManagementEndpoint: + { + environment.BatchManagementEndpoint = property.Value + } + case EnvironmentStorageEndpointSuffix: + { + environment.StorageEndpointSuffix = property.Value + } + case EnvironmentSQLDatabaseDNSSuffix: + { + environment.SQLDatabaseDNSSuffix = property.Value + } + case EnvironmentTrafficManagerDNSSuffix: + { + environment.TrafficManagerDNSSuffix = property.Value + } + case EnvironmentKeyVaultDNSSuffix: + { + environment.KeyVaultDNSSuffix = property.Value + } + case EnvironmentServiceBusEndpointSuffix: + { + environment.ServiceBusEndpointSuffix = property.Value + } + case EnvironmentServiceManagementVMDNSSuffix: + { + environment.ServiceManagementVMDNSSuffix = property.Value + } + case EnvironmentResourceManagerVMDNSSuffix: + { + environment.ResourceManagerVMDNSSuffix = property.Value + } + case EnvironmentContainerRegistryDNSSuffix: + { + environment.ContainerRegistryDNSSuffix = property.Value + } + case EnvironmentTokenAudience: + { + environment.TokenAudience = property.Value + } + } + } +} + +func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) { + client := autorest.NewClientWithUserAgent("") + managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0") + req, _ := http.NewRequest("GET", managementEndpoint, nil) + response, err := client.Do(req) + if err != nil { + return environment, err + } + defer response.Body.Close() + jsonResponse, err := ioutil.ReadAll(response.Body) + if err != nil { + return environment, err + } + err = json.Unmarshal(jsonResponse, &environment) + return environment, err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go new file mode 100644 index 00000000..86ce9f2b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go @@ -0,0 +1,200 @@ +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azure + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. +// It also handles request retries +func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := autorest.NewRetriableRequest(r) + for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + + resp, err = autorest.SendWithSender(s, rr.Request(), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return resp, err + } + + if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration { + return resp, err + } + var re RequestError + err = autorest.Respond( + resp, + autorest.ByUnmarshallingJSON(&re), + ) + if err != nil { + return resp, err + } + err = re + + if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { + regErr := register(client, r, re) + if regErr != nil { + return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %s", regErr, err) + } + } + } + return resp, err + }) + } +} + +func getProvider(re RequestError) (string, error) { + if re.ServiceError != nil && len(re.ServiceError.Details) > 0 { + return re.ServiceError.Details[0]["target"].(string), nil + } + return "", errors.New("provider was not found in the response") +} + +func register(client autorest.Client, originalReq *http.Request, re RequestError) error { + subID := getSubscription(originalReq.URL.Path) + if subID == "" { + return errors.New("missing parameter subscriptionID to register resource provider") + } + providerName, err := getProvider(re) + if err != nil { + return fmt.Errorf("missing parameter provider to register resource provider: %s", err) + } + newURL := url.URL{ + Scheme: originalReq.URL.Scheme, + Host: originalReq.URL.Host, + } + + // taken from the resources SDK + // with almost identical code, this sections are easier to mantain + // It is also not a good idea to import the SDK here + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", providerName), + "subscriptionId": autorest.Encode("path", subID), + } + + const APIVersion = "2016-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + + req, err := preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + type Provider struct { + RegistrationState *string `json:"registrationState,omitempty"` + } + var provider Provider + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + // poll for registered provisioning state + registrationStartTime := time.Now() + for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) { + // taken from the resources SDK + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + req, err = preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + if provider.RegistrationState != nil && + *provider.RegistrationState == "Registered" { + break + } + + delayed := autorest.DelayWithRetryAfter(resp, originalReq.Context().Done()) + if !delayed && !autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Context().Done()) { + return originalReq.Context().Err() + } + } + if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) { + return errors.New("polling for resource provider registration has exceeded the polling duration") + } + return err +} + +func getSubscription(path string) string { + parts := strings.Split(path, "/") + for i, v := range parts { + if v == "subscriptions" && (i+1) < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/client.go new file mode 100644 index 00000000..92da6adb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -0,0 +1,324 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/tls" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/http/cookiejar" + "strings" + "time" + + "github.com/Azure/go-autorest/logger" + "github.com/Azure/go-autorest/tracing" +) + +const ( + // DefaultPollingDelay is a reasonable delay between polling requests. + DefaultPollingDelay = 60 * time.Second + + // DefaultPollingDuration is a reasonable total polling duration. + DefaultPollingDuration = 15 * time.Minute + + // DefaultRetryAttempts is number of attempts for retry status codes (5xx). + DefaultRetryAttempts = 3 + + // DefaultRetryDuration is the duration to wait between retries. + DefaultRetryDuration = 30 * time.Second +) + +var ( + // StatusCodesForRetry are a defined group of status code for which the client will retry + StatusCodesForRetry = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } +) + +const ( + requestFormat = `HTTP Request Begin =================================================== +%s +===================================================== HTTP Request End +` + responseFormat = `HTTP Response Begin =================================================== +%s +===================================================== HTTP Response End +` +) + +// Response serves as the base for all responses from generated clients. It provides access to the +// last http.Response. +type Response struct { + *http.Response `json:"-"` +} + +// IsHTTPStatus returns true if the returned HTTP status code matches the provided status code. +// If there was no response (i.e. the underlying http.Response is nil) the return value is false. +func (r Response) IsHTTPStatus(statusCode int) bool { + if r.Response == nil { + return false + } + return r.Response.StatusCode == statusCode +} + +// HasHTTPStatus returns true if the returned HTTP status code matches one of the provided status codes. +// If there was no response (i.e. the underlying http.Response is nil) or not status codes are provided +// the return value is false. +func (r Response) HasHTTPStatus(statusCodes ...int) bool { + return ResponseHasStatusCode(r.Response, statusCodes...) +} + +// LoggingInspector implements request and response inspectors that log the full request and +// response to a supplied log. +type LoggingInspector struct { + Logger *log.Logger +} + +// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + var body, b bytes.Buffer + + defer r.Body.Close() + + r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) + if err := r.Write(&b); err != nil { + return nil, fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(requestFormat, b.String()) + + r.Body = ioutil.NopCloser(&body) + return p.Prepare(r) + }) + } +} + +// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + var body, b bytes.Buffer + defer resp.Body.Close() + resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) + if err := resp.Write(&b); err != nil { + return fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(responseFormat, b.String()) + + resp.Body = ioutil.NopCloser(&body) + return r.Respond(resp) + }) + } +} + +// Client is the base for autorest generated clients. It provides default, "do nothing" +// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the +// standard, undecorated http.Client as a default Sender. +// +// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and +// return responses that compose with Response. +// +// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom +// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit +// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence +// sending the request by providing a decorated Sender. +type Client struct { + Authorizer Authorizer + Sender Sender + RequestInspector PrepareDecorator + ResponseInspector RespondDecorator + + // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header + PollingDelay time.Duration + + // PollingDuration sets the maximum polling time after which an error is returned. + // Setting this to zero will use the provided context to control the duration. + PollingDuration time.Duration + + // RetryAttempts sets the default number of retry attempts for client. + RetryAttempts int + + // RetryDuration sets the delay duration for retries. + RetryDuration time.Duration + + // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent + // through the Do method. + UserAgent string + + Jar http.CookieJar + + // Set to true to skip attempted registration of resource providers (false by default). + SkipResourceProviderRegistration bool +} + +// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed +// string. +func NewClientWithUserAgent(ua string) Client { + return newClient(ua, tls.RenegotiateNever) +} + +// ClientOptions contains various Client configuration options. +type ClientOptions struct { + // UserAgent is an optional user-agent string to append to the default user agent. + UserAgent string + + // Renegotiation is an optional setting to control client-side TLS renegotiation. + Renegotiation tls.RenegotiationSupport +} + +// NewClientWithOptions returns an instance of a Client with the specified values. +func NewClientWithOptions(options ClientOptions) Client { + return newClient(options.UserAgent, options.Renegotiation) +} + +func newClient(ua string, renegotiation tls.RenegotiationSupport) Client { + c := Client{ + PollingDelay: DefaultPollingDelay, + PollingDuration: DefaultPollingDuration, + RetryAttempts: DefaultRetryAttempts, + RetryDuration: DefaultRetryDuration, + UserAgent: UserAgent(), + } + c.Sender = c.sender(renegotiation) + c.AddToUserAgent(ua) + return c +} + +// AddToUserAgent adds an extension to the current user agent +func (c *Client) AddToUserAgent(extension string) error { + if extension != "" { + c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) +} + +// Do implements the Sender interface by invoking the active Sender after applying authorization. +// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent +// is set, apply set the User-Agent header. +func (c Client) Do(r *http.Request) (*http.Response, error) { + if r.UserAgent() == "" { + r, _ = Prepare(r, + WithUserAgent(c.UserAgent)) + } + // NOTE: c.WithInspection() must be last in the list so that it can inspect all preceding operations + r, err := Prepare(r, + c.WithAuthorization(), + c.WithInspection()) + if err != nil { + var resp *http.Response + if detErr, ok := err.(DetailedError); ok { + // if the authorization failed (e.g. invalid credentials) there will + // be a response associated with the error, be sure to return it. + resp = detErr.Response + } + return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") + } + logger.Instance.WriteRequest(r, logger.Filter{ + Header: func(k string, v []string) (bool, []string) { + // remove the auth token from the log + if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") { + v = []string{"**REDACTED**"} + } + return true, v + }, + }) + resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r) + logger.Instance.WriteResponse(resp, logger.Filter{}) + Respond(resp, c.ByInspecting()) + return resp, err +} + +// sender returns the Sender to which to send requests. +func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender { + if c.Sender == nil { + // Use behaviour compatible with DefaultTransport, but require TLS minimum version. + var defaultTransport = http.DefaultTransport.(*http.Transport) + transport := tracing.Transport + // for non-default values of TLS renegotiation create a new tracing transport. + // updating tracing.Transport affects all clients which is not what we want. + if renengotiation != tls.RenegotiateNever { + transport = tracing.NewTransport() + } + transport.Base = &http.Transport{ + Proxy: defaultTransport.Proxy, + DialContext: defaultTransport.DialContext, + MaxIdleConns: defaultTransport.MaxIdleConns, + IdleConnTimeout: defaultTransport.IdleConnTimeout, + TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, + ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, + TLSClientConfig: &tls.Config{ + MinVersion: tls.VersionTLS12, + Renegotiation: renengotiation, + }, + } + j, _ := cookiejar.New(nil) + return &http.Client{Jar: j, Transport: transport} + } + + return c.Sender +} + +// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator +// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. +func (c Client) WithAuthorization() PrepareDecorator { + return c.authorizer().WithAuthorization() +} + +// authorizer returns the Authorizer to use. +func (c Client) authorizer() Authorizer { + if c.Authorizer == nil { + return NullAuthorizer{} + } + return c.Authorizer +} + +// WithInspection is a convenience method that passes the request to the supplied RequestInspector, +// if present, or returns the WithNothing PrepareDecorator otherwise. +func (c Client) WithInspection() PrepareDecorator { + if c.RequestInspector == nil { + return WithNothing() + } + return c.RequestInspector +} + +// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, +// if present, or returns the ByIgnoring RespondDecorator otherwise. +func (c Client) ByInspecting() RespondDecorator { + if c.ResponseInspector == nil { + return ByIgnoring() + } + return c.ResponseInspector +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/date/date.go new file mode 100644 index 00000000..c4571065 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/date/date.go @@ -0,0 +1,96 @@ +/* +Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) +defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of +time.Time types. And both convert to time.Time through a ToTime method. +*/ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "time" +) + +const ( + fullDate = "2006-01-02" + fullDateJSON = `"2006-01-02"` + dateFormat = "%04d-%02d-%02d" + jsonFormat = `"%04d-%02d-%02d"` +) + +// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., +// 2006-01-02). +type Date struct { + time.Time +} + +// ParseDate create a new Date from the passed string. +func ParseDate(date string) (d Date, err error) { + return parseDate(date, fullDate) +} + +func parseDate(date string, format string) (Date, error) { + d, err := time.Parse(format, date) + return Date{Time: d}, err +} + +// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalBinary() ([]byte, error) { + return d.MarshalText() +} + +// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalBinary(data []byte) error { + return d.UnmarshalText(data) +} + +// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalJSON() (json []byte, err error) { + return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalJSON(data []byte) (err error) { + d.Time, err = time.Parse(fullDateJSON, string(data)) + return err +} + +// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalText() (text []byte, err error) { + return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalText(data []byte) (err error) { + d.Time, err = time.Parse(fullDate, string(data)) + return err +} + +// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). +func (d Date) String() string { + return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) +} + +// ToTime returns a Date as a time.Time +func (d Date) ToTime() time.Time { + return d.Time +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/date/go.mod b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/date/go.mod new file mode 100644 index 00000000..13a1e980 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/date/go.mod @@ -0,0 +1,3 @@ +module github.com/Azure/go-autorest/autorest/date + +go 1.12 diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/date/time.go new file mode 100644 index 00000000..b453fad0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/date/time.go @@ -0,0 +1,103 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "regexp" + "time" +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +const ( + azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` + azureUtcFormat = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` + rfc3339 = time.RFC3339Nano + tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` +) + +// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +type Time struct { + time.Time +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalBinary() ([]byte, error) { + return t.Time.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalJSON() (json []byte, err error) { + return t.Time.MarshalJSON() +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalJSON(data []byte) (err error) { + timeFormat := azureUtcFormatJSON + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339JSON + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalText() (text []byte, err error) { + return t.Time.MarshalText() +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalText(data []byte) (err error) { + timeFormat := azureUtcFormat + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339 + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// String returns the Time formatted as an RFC3339 date-time string (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) String() string { + // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} + +// ToTime returns a Time as a time.Time +func (t Time) ToTime() time.Time { + return t.Time +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go new file mode 100644 index 00000000..48fb39ba --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go @@ -0,0 +1,100 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` + rfc1123 = time.RFC1123 +) + +// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +type TimeRFC1123 struct { + time.Time +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123JSON, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalJSON() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") + } + b := []byte(t.Format(rfc1123JSON)) + return b, nil +} + +// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalText() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") + } + + b := []byte(t.Format(rfc1123)) + return b, nil +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalBinary() ([]byte, error) { + return t.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// ToTime returns a Time as a time.Time +func (t TimeRFC1123) ToTime() time.Time { + return t.Time +} + +// String returns the Time formatted as an RFC1123 date-time string (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) String() string { + // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go new file mode 100644 index 00000000..7073959b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go @@ -0,0 +1,123 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "time" +) + +// unixEpoch is the moment in time that should be treated as timestamp 0. +var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) + +// UnixTime marshals and unmarshals a time that is represented as the number +// of seconds (ignoring skip-seconds) since the Unix Epoch. +type UnixTime time.Time + +// Duration returns the time as a Duration since the UnixEpoch. +func (t UnixTime) Duration() time.Duration { + return time.Time(t).Sub(unixEpoch) +} + +// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. +func NewUnixTimeFromSeconds(seconds float64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) +} + +// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. +func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(nanoseconds)) +} + +// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. +func NewUnixTimeFromDuration(dur time.Duration) UnixTime { + return UnixTime(unixEpoch.Add(dur)) +} + +// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' +func UnixEpoch() time.Time { + return unixEpoch +} + +// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. +// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) +func (t UnixTime) MarshalJSON() ([]byte, error) { + buffer := &bytes.Buffer{} + enc := json.NewEncoder(buffer) + err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) + if err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since +// midnight January 1st, 1970. +func (t *UnixTime) UnmarshalJSON(text []byte) error { + dec := json.NewDecoder(bytes.NewReader(text)) + + var secondsSinceEpoch float64 + if err := dec.Decode(&secondsSinceEpoch); err != nil { + return err + } + + *t = NewUnixTimeFromSeconds(secondsSinceEpoch) + + return nil +} + +// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. +func (t UnixTime) MarshalText() ([]byte, error) { + cast := time.Time(t) + return cast.MarshalText() +} + +// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. +func (t *UnixTime) UnmarshalText(raw []byte) error { + var unmarshaled time.Time + + if err := unmarshaled.UnmarshalText(raw); err != nil { + return err + } + + *t = UnixTime(unmarshaled) + return nil +} + +// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. +func (t UnixTime) MarshalBinary() ([]byte, error) { + buf := &bytes.Buffer{} + + payload := int64(t.Duration()) + + if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. +func (t *UnixTime) UnmarshalBinary(raw []byte) error { + var nanosecondsSinceEpoch int64 + + if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { + return err + } + *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/date/utility.go new file mode 100644 index 00000000..12addf0e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/date/utility.go @@ -0,0 +1,25 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "strings" + "time" +) + +// ParseTime to parse Time string to specified format. +func ParseTime(format string, t string) (d time.Time, err error) { + return time.Parse(format, strings.ToUpper(t)) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/error.go new file mode 100644 index 00000000..f724f333 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/error.go @@ -0,0 +1,98 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" +) + +const ( + // UndefinedStatusCode is used when HTTP status code is not available for an error. + UndefinedStatusCode = 0 +) + +// DetailedError encloses a error with details of the package, method, and associated HTTP +// status code (if any). +type DetailedError struct { + Original error + + // PackageType is the package type of the object emitting the error. For types, the value + // matches that produced the the '%T' format specifier of the fmt package. For other elements, + // such as functions, it is just the package name (e.g., "autorest"). + PackageType string + + // Method is the name of the method raising the error. + Method string + + // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. + StatusCode interface{} + + // Message is the error message. + Message string + + // Service Error is the response body of failed API in bytes + ServiceError []byte + + // Response is the response object that was returned during failure if applicable. + Response *http.Response +} + +// NewError creates a new Error conforming object from the passed packageType, method, and +// message. message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, nil, message, args...) +} + +// NewErrorWithResponse creates a new Error conforming object from the passed +// packageType, method, statusCode of the given resp (UndefinedStatusCode if +// resp is nil), and message. message is treated as a format string to which the +// optional args apply. +func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, resp, message, args...) +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + if v, ok := original.(DetailedError); ok { + return v + } + + statusCode := UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + + return DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + Response: resp, + } +} + +// Error returns a formatted containing all available details (i.e., PackageType, Method, +// StatusCode, Message, and original error (if any)). +func (e DetailedError) Error() string { + if e.Original == nil { + return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) + } + return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/go.mod b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/go.mod new file mode 100644 index 00000000..6b7c7f27 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/go.mod @@ -0,0 +1,12 @@ +module github.com/Azure/go-autorest/autorest + +go 1.12 + +require ( + github.com/Azure/go-autorest/autorest/adal v0.2.0 + github.com/Azure/go-autorest/autorest/mocks v0.1.0 + github.com/Azure/go-autorest/logger v0.1.0 + github.com/Azure/go-autorest/tracing v0.1.0 + go.opencensus.io v0.20.2 + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/go.sum b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/go.sum new file mode 100644 index 00000000..a6848303 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/go.sum @@ -0,0 +1,143 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +contrib.go.opencensus.io/exporter/ocagent v0.4.12 h1:jGFvw3l57ViIVEPKKEUXPcLYIXJmQxLUh6ey1eJhwyc= +contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA= +github.com/Azure/go-autorest/autorest/adal v0.2.0 h1:7IBDu1jgh+ADHXnEYExkV9RE/ztOOlxdACkkPRthGKw= +github.com/Azure/go-autorest/autorest/adal v0.2.0/go.mod h1:MeS4XhScH55IST095THyTxElntu7WqB7pNbZo8Q5G3E= +github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/mocks v0.1.0 h1:Kx+AUU2Te+A3JIyYn6Dfs+cFgx5XorQKuIXrZGoq/SI= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.1.0 h1:TRBxC5Pj/fIuh4Qob0ZpkggbfT8RC0SubHbpV3p4/Vc= +github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= +github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2 h1:NAfh7zF0/3/HqtMvJNZ/RFrSlCE6ZTlHmKfhL/Dm1Jk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= +google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/preparer.go new file mode 100644 index 00000000..9f864ab1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -0,0 +1,548 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "context" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "strings" +) + +const ( + mimeTypeJSON = "application/json" + mimeTypeOctetStream = "application/octet-stream" + mimeTypeFormPost = "application/x-www-form-urlencoded" + + headerAuthorization = "Authorization" + headerAuxAuthorization = "x-ms-authorization-auxiliary" + headerContentType = "Content-Type" + headerUserAgent = "User-Agent" +) + +// used as a key type in context.WithValue() +type ctxPrepareDecorators struct{} + +// WithPrepareDecorators adds the specified PrepareDecorators to the provided context. +// If no PrepareDecorators are provided the context is unchanged. +func WithPrepareDecorators(ctx context.Context, prepareDecorator []PrepareDecorator) context.Context { + if len(prepareDecorator) == 0 { + return ctx + } + return context.WithValue(ctx, ctxPrepareDecorators{}, prepareDecorator) +} + +// GetPrepareDecorators returns the PrepareDecorators in the provided context or the provided default PrepareDecorators. +func GetPrepareDecorators(ctx context.Context, defaultPrepareDecorators ...PrepareDecorator) []PrepareDecorator { + inCtx := ctx.Value(ctxPrepareDecorators{}) + if pd, ok := inCtx.([]PrepareDecorator); ok { + return pd + } + return defaultPrepareDecorators +} + +// Preparer is the interface that wraps the Prepare method. +// +// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations +// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. +type Preparer interface { + Prepare(*http.Request) (*http.Request, error) +} + +// PreparerFunc is a method that implements the Preparer interface. +type PreparerFunc func(*http.Request) (*http.Request, error) + +// Prepare implements the Preparer interface on PreparerFunc. +func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { + return pf(r) +} + +// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then affect the result. +type PrepareDecorator func(Preparer) Preparer + +// CreatePreparer creates, decorates, and returns a Preparer. +// Without decorators, the returned Preparer returns the passed http.Request unmodified. +// Preparers are safe to share and re-use. +func CreatePreparer(decorators ...PrepareDecorator) Preparer { + return DecoratePreparer( + Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), + decorators...) +} + +// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it +// applies to the Preparer. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (change the http.Request and then pass it +// along) or a post-decorator (pass the http.Request along and alter it on return). +func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { + for _, decorate := range decorators { + p = decorate(p) + } + return p +} + +// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. +// It creates a Preparer from the decorators which it then applies to the passed http.Request. +func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { + if r == nil { + return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") + } + return CreatePreparer(decorators...).Prepare(r) +} + +// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed +// http.Request. +func WithNothing() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return p.Prepare(r) + }) + } +} + +// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to +// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before +// adding the header. +func WithHeader(header string, value string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(header), value) + } + return r, err + }) + } +} + +// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to +// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before +// adding them. +func WithHeaders(headers map[string]interface{}) PrepareDecorator { + h := ensureValueStrings(headers) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + + for name, value := range h { + r.Header.Set(http.CanonicalHeaderKey(name), value) + } + } + return r, err + }) + } +} + +// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the supplied token. +func WithBearerAuthorization(token string) PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) +} + +// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value +// is the passed contentType. +func AsContentType(contentType string) PrepareDecorator { + return WithHeader(headerContentType, contentType) +} + +// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the +// passed string. +func WithUserAgent(ua string) PrepareDecorator { + return WithHeader(headerUserAgent, ua) +} + +// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/x-www-form-urlencoded". +func AsFormURLEncoded() PrepareDecorator { + return AsContentType(mimeTypeFormPost) +} + +// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/json". +func AsJSON() PrepareDecorator { + return AsContentType(mimeTypeJSON) +} + +// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header. +func AsOctetStream() PrepareDecorator { + return AsContentType(mimeTypeOctetStream) +} + +// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The +// decorator does not validate that the passed method string is a known HTTP method. +func WithMethod(method string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Method = method + return p.Prepare(r) + }) + } +} + +// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. +func AsDelete() PrepareDecorator { return WithMethod("DELETE") } + +// AsGet returns a PrepareDecorator that sets the HTTP method to GET. +func AsGet() PrepareDecorator { return WithMethod("GET") } + +// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. +func AsHead() PrepareDecorator { return WithMethod("HEAD") } + +// AsMerge returns a PrepareDecorator that sets the HTTP method to MERGE. +func AsMerge() PrepareDecorator { return WithMethod("MERGE") } + +// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. +func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } + +// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. +func AsPatch() PrepareDecorator { return WithMethod("PATCH") } + +// AsPost returns a PrepareDecorator that sets the HTTP method to POST. +func AsPost() PrepareDecorator { return WithMethod("POST") } + +// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. +func AsPut() PrepareDecorator { return WithMethod("PUT") } + +// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed +// from the supplied baseUrl. +func WithBaseURL(baseURL string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var u *url.URL + if u, err = url.Parse(baseURL); err != nil { + return r, err + } + if u.Scheme == "" { + err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) + } + if err == nil { + r.URL = u + } + } + return r, err + }) + } +} + +// WithBytes returns a PrepareDecorator that takes a list of bytes +// which passes the bytes directly to the body +func WithBytes(input *[]byte) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if input == nil { + return r, fmt.Errorf("Input Bytes was nil") + } + + r.ContentLength = int64(len(*input)) + r.Body = ioutil.NopCloser(bytes.NewReader(*input)) + } + return r, err + }) + } +} + +// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the +// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. +func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(urlParameters) + for key, value := range parameters { + baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) + } + return WithBaseURL(baseURL) +} + +// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the +// http.Request body. +func WithFormData(v url.Values) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + s := v.Encode() + + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost) + r.ContentLength = int64(len(s)) + r.Body = ioutil.NopCloser(strings.NewReader(s)) + } + return r, err + }) + } +} + +// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters +// into the http.Request body. +func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var body bytes.Buffer + writer := multipart.NewWriter(&body) + for key, value := range formDataParameters { + if rc, ok := value.(io.ReadCloser); ok { + var fd io.Writer + if fd, err = writer.CreateFormFile(key, key); err != nil { + return r, err + } + if _, err = io.Copy(fd, rc); err != nil { + return r, err + } + } else { + if err = writer.WriteField(key, ensureValueString(value)); err != nil { + return r, err + } + } + } + if err = writer.Close(); err != nil { + return r, err + } + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) + r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + r.ContentLength = int64(body.Len()) + return r, err + } + return r, err + }) + } +} + +// WithFile returns a PrepareDecorator that sends file in request body. +func WithFile(f io.ReadCloser) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := ioutil.ReadAll(f) + if err != nil { + return r, err + } + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + r.ContentLength = int64(len(b)) + } + return r, err + }) + } +} + +// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request +// and sets the Content-Length header. +func WithBool(v bool) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the +// request and sets the Content-Length header. +func WithFloat32(v float32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the +// request and sets the Content-Length header. +func WithFloat64(v float64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request +// and sets the Content-Length header. +func WithInt32(v int32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request +// and sets the Content-Length header. +func WithInt64(v int64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithString returns a PrepareDecorator that encodes the passed string into the body of the request +// and sets the Content-Length header. +func WithString(v string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + r.ContentLength = int64(len(v)) + r.Body = ioutil.NopCloser(strings.NewReader(v)) + } + return r, err + }) + } +} + +// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the +// request and sets the Content-Length header. +func WithJSON(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := json.Marshal(v) + if err == nil { + r.ContentLength = int64(len(b)) + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + } + return r, err + }) + } +} + +// WithXML returns a PrepareDecorator that encodes the data passed as XML into the body of the +// request and sets the Content-Length header. +func WithXML(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := xml.Marshal(v) + if err == nil { + // we have to tack on an XML header + withHeader := xml.Header + string(b) + bytesWithHeader := []byte(withHeader) + + r.ContentLength = int64(len(bytesWithHeader)) + r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader)) + } + } + return r, err + }) + } +} + +// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path +// is absolute (that is, it begins with a "/"), it replaces the existing path. +func WithPath(path string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPath", "Invoked with a nil URL") + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The +// values will be escaped (aka URL encoded) before insertion into the path. +func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := escapeValueStrings(ensureValueStrings(pathParameters)) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. +func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(pathParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +func parseURL(u *url.URL, path string) (*url.URL, error) { + p := strings.TrimRight(u.String(), "/") + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + return url.Parse(p + path) +} + +// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters +// given in the supplied map (i.e., key=value). +func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(queryParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") + } + + v := r.URL.Query() + for key, value := range parameters { + d, err := url.QueryUnescape(value) + if err != nil { + return r, err + } + v.Add(key, d) + } + r.URL.RawQuery = v.Encode() + } + return r, err + }) + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/responder.go new file mode 100644 index 00000000..349e1963 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -0,0 +1,269 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" +) + +// Responder is the interface that wraps the Respond method. +// +// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold +// state since Responders may be shared and re-used. +type Responder interface { + Respond(*http.Response) error +} + +// ResponderFunc is a method that implements the Responder interface. +type ResponderFunc func(*http.Response) error + +// Respond implements the Responder interface on ResponderFunc. +func (rf ResponderFunc) Respond(r *http.Response) error { + return rf(r) +} + +// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to +// the http.Response and pass it along or, first, pass the http.Response along then react. +type RespondDecorator func(Responder) Responder + +// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned +// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share +// and re-used: It depends on the applied decorators. For example, a standard decorator that closes +// the response body is fine to share whereas a decorator that reads the body into a passed struct +// is not. +// +// To prevent memory leaks, ensure that at least one Responder closes the response body. +func CreateResponder(decorators ...RespondDecorator) Responder { + return DecorateResponder( + Responder(ResponderFunc(func(r *http.Response) error { return nil })), + decorators...) +} + +// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it +// applies to the Responder. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (react to the http.Response and then pass it +// along) or a post-decorator (pass the http.Response along and then react). +func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { + for _, decorate := range decorators { + r = decorate(r) + } + return r +} + +// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. +// It creates a Responder from the decorators it then applies to the passed http.Response. +func Respond(r *http.Response, decorators ...RespondDecorator) error { + if r == nil { + return nil + } + return CreateResponder(decorators...).Respond(r) +} + +// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined +// to the next RespondDecorator. +func ByIgnoring() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + return r.Respond(resp) + }) + } +} + +// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as +// the Body is read. +func ByCopying(b *bytes.Buffer) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + resp.Body = TeeReadCloser(resp.Body, b) + } + return err + }) + } +} + +// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which +// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed +// Responder is invoked prior to discarding the response body, the decorator may occur anywhere +// within the set. +func ByDiscardingBody() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + return fmt.Errorf("Error discarding the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it +// closes the response body. Since the passed Responder is invoked prior to closing the response +// body, the decorator may occur anywhere within the set. +func ByClosing() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which +// it closes the response if the passed Responder returns an error and the response body exists. +func ByClosingIfError() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil && resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByUnmarshallingBytes returns a RespondDecorator that copies the Bytes returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingBytes(v *[]byte) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + bytes, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + *v = bytes + } + } + return err + }) + } +} + +// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingJSON(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + // Some responses might include a BOM, remove for successful unmarshalling + b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else if len(strings.Trim(string(b), " ")) > 0 { + errInner = json.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingXML(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + errInner = xml.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response +// StatusCode is among the set passed. On error, response body is fully read into a buffer and +// presented in the returned error, as well as in the response body. +func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + if resp.Body != nil { + defer resp.Body.Close() + b, _ := ioutil.ReadAll(resp.Body) + derr.ServiceError = b + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + err = derr + } + return err + }) + } +} + +// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is +// anything other than HTTP 200. +func WithErrorUnlessOK() RespondDecorator { + return WithErrorUnlessStatusCode(http.StatusOK) +} + +// ExtractHeader extracts all values of the specified header from the http.Response. It returns an +// empty string slice if the passed http.Response is nil or the header does not exist. +func ExtractHeader(header string, resp *http.Response) []string { + if resp != nil && resp.Header != nil { + return resp.Header[http.CanonicalHeaderKey(header)] + } + return nil +} + +// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It +// returns an empty string if the passed http.Response is nil or the header does not exist. +func ExtractHeaderValue(header string, resp *http.Response) string { + h := ExtractHeader(header, resp) + if len(h) > 0 { + return h[0] + } + return "" +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go new file mode 100644 index 00000000..fa11dbed --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go @@ -0,0 +1,52 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. +func NewRetriableRequest(req *http.Request) *RetriableRequest { + return &RetriableRequest{req: req} +} + +// Request returns the wrapped HTTP request. +func (rr *RetriableRequest) Request() *http.Request { + return rr.req +} + +func (rr *RetriableRequest) prepareFromByteReader() (err error) { + // fall back to making a copy (only do this once) + b := []byte{} + if rr.req.ContentLength > 0 { + b = make([]byte, rr.req.ContentLength) + _, err = io.ReadFull(rr.req.Body, b) + if err != nil { + return err + } + } else { + b, err = ioutil.ReadAll(rr.req.Body) + if err != nil { + return err + } + } + rr.br = bytes.NewReader(b) + rr.req.Body = ioutil.NopCloser(rr.br) + return err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go new file mode 100644 index 00000000..7143cc61 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go @@ -0,0 +1,54 @@ +// +build !go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.br != nil { + _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go new file mode 100644 index 00000000..ae15c6bf --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go @@ -0,0 +1,66 @@ +// +build go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + rc io.ReadCloser + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.rc != nil { + rr.req.Body = rr.rc + } else if rr.br != nil { + _, err = rr.br.Seek(0, io.SeekStart) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.req.GetBody != nil { + // this will allow us to preserve the body without having to + // make a copy. note we need to do this on each iteration + rr.rc, err = rr.req.GetBody() + if err != nil { + return err + } + } else if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.GetBody = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/sender.go new file mode 100644 index 00000000..92a55404 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -0,0 +1,386 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "fmt" + "log" + "math" + "net/http" + "strconv" + "time" + + "github.com/Azure/go-autorest/tracing" +) + +// used as a key type in context.WithValue() +type ctxSendDecorators struct{} + +// WithSendDecorators adds the specified SendDecorators to the provided context. +// If no SendDecorators are provided the context is unchanged. +func WithSendDecorators(ctx context.Context, sendDecorator []SendDecorator) context.Context { + if len(sendDecorator) == 0 { + return ctx + } + return context.WithValue(ctx, ctxSendDecorators{}, sendDecorator) +} + +// GetSendDecorators returns the SendDecorators in the provided context or the provided default SendDecorators. +func GetSendDecorators(ctx context.Context, defaultSendDecorators ...SendDecorator) []SendDecorator { + inCtx := ctx.Value(ctxSendDecorators{}) + if sd, ok := inCtx.([]SendDecorator); ok { + return sd + } + return defaultSendDecorators +} + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(&http.Client{}, decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +// Send sends, by means of the default http.Client, the passed http.Request, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// Send is a convenience method and not recommended for production. Advanced users should use +// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). +// +// Send will not poll or retry requests. +func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return SendWithSender(&http.Client{Transport: tracing.Transport}, r, decorators...) +} + +// SendWithSender sends the passed http.Request, through the provided Sender, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// SendWithSender will not poll or retry requests. +func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return DecorateSender(s, decorators...).Do(r) +} + +// AfterDelay returns a SendDecorator that delays for the passed time.Duration before +// invoking the Sender. The delay may be terminated by closing the optional channel on the +// http.Request. If canceled, no further Senders are invoked. +func AfterDelay(d time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if !DelayForBackoff(d, 0, r.Context().Done()) { + return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") + } + return s.Do(r) + }) + } +} + +// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. +func AsIs() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return s.Do(r) + }) + } +} + +// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which +// it closes the response if the passed Sender returns an error and the response body exists. +func DoCloseIfError() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + Respond(resp, ByDiscardingBody(), ByClosing()) + } + return resp, err + }) + } +} + +// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is +// among the set passed. Since these are artificial errors, the response body may still require +// closing. +func DoErrorIfStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response +// StatusCode is among the set passed. Since these are artificial errors, the response body +// may still require closing. +func DoErrorUnlessStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the +// passed status codes. It expects the http.Response to contain a Location header providing the +// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than +// the supplied duration. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + + if err == nil && ResponseHasStatusCode(resp, codes...) { + r, err = NewPollingRequestWithContext(r.Context(), resp) + + for err == nil && ResponseHasStatusCode(resp, codes...) { + Respond(resp, + ByDiscardingBody(), + ByClosing()) + resp, err = SendWithSender(s, r, + AfterDelay(GetRetryAfter(resp, delay))) + } + } + + return resp, err + }) + } +} + +// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + for attempt := 0; attempt < attempts; attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + } + return resp, err + }) + } +} + +// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by cancelling the context on the http.Request. +// NOTE: Code http.StatusTooManyRequests (429) will *not* be counted against the number of attempts. +func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return doRetryForStatusCodesImpl(s, r, false, attempts, backoff, 0, codes...) + }) + } +} + +// DoRetryForStatusCodesWithCap returns a SendDecorator that retries for specified statusCodes for up to the +// specified number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). To cap the maximum possible delay between iterations specify a value greater +// than zero for cap. Retrying may be canceled by cancelling the context on the http.Request. +func DoRetryForStatusCodesWithCap(attempts int, backoff, cap time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return doRetryForStatusCodesImpl(s, r, true, attempts, backoff, cap, codes...) + }) + } +} + +func doRetryForStatusCodesImpl(s Sender, r *http.Request, count429 bool, attempts int, backoff, cap time.Duration, codes ...int) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + // Increment to add the first call (attempts denotes number of retries) + for attempt := 0; attempt < attempts+1; { + err = rr.Prepare() + if err != nil { + return + } + resp, err = s.Do(rr.Request()) + // if the error isn't temporary don't bother retrying + if err != nil && !IsTemporaryNetworkError(err) { + return + } + // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication + // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. + if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { + return resp, err + } + delayed := DelayWithRetryAfter(resp, r.Context().Done()) + if !delayed && !DelayForBackoffWithCap(backoff, cap, attempt, r.Context().Done()) { + return resp, r.Context().Err() + } + // when count429 == false don't count a 429 against the number + // of attempts so that we continue to retry until it succeeds + if count429 || (resp == nil || resp.StatusCode != http.StatusTooManyRequests) { + attempt++ + } + } + return resp, err +} + +// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header. +// The value of Retry-After can be either the number of seconds or a date in RFC1123 format. +// The function returns true after successfully waiting for the specified duration. If there is +// no Retry-After header or the wait is cancelled the return value is false. +func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { + if resp == nil { + return false + } + var dur time.Duration + ra := resp.Header.Get("Retry-After") + if retryAfter, _ := strconv.Atoi(ra); retryAfter > 0 { + dur = time.Duration(retryAfter) * time.Second + } else if t, err := time.Parse(time.RFC1123, ra); err == nil { + dur = t.Sub(time.Now()) + } + if dur > 0 { + select { + case <-time.After(dur): + return true + case <-cancel: + return false + } + } + return false +} + +// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal +// to or greater than the specified duration, exponentially backing off between requests using the +// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the +// optional channel on the http.Request. +func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + end := time.Now().Add(d) + for attempt := 0; time.Now().Before(end); attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + } + return resp, err + }) + } +} + +// WithLogging returns a SendDecorator that implements simple before and after logging of the +// request. +func WithLogging(logger *log.Logger) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + logger.Printf("Sending %s %s", r.Method, r.URL) + resp, err := s.Do(r) + if err != nil { + logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) + } else { + logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) + } + return resp, err + }) + } +} + +// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, +// returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { + return DelayForBackoffWithCap(backoff, 0, attempt, cancel) +} + +// DelayForBackoffWithCap invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. To cap the maximum possible delay specify a value greater than zero for cap. +// The delay may be canceled by closing the passed channel. If terminated early, returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoffWithCap(backoff, cap time.Duration, attempt int, cancel <-chan struct{}) bool { + d := time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second + if cap > 0 && d > cap { + d = cap + } + select { + case <-time.After(d): + return true + case <-cancel: + return false + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/to/convert.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/to/convert.go new file mode 100644 index 00000000..86694bd2 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/to/convert.go @@ -0,0 +1,152 @@ +/* +Package to provides helpers to ease working with pointer values of marshalled structures. +*/ +package to + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// String returns a string value for the passed string pointer. It returns the empty string if the +// pointer is nil. +func String(s *string) string { + if s != nil { + return *s + } + return "" +} + +// StringPtr returns a pointer to the passed string. +func StringPtr(s string) *string { + return &s +} + +// StringSlice returns a string slice value for the passed string slice pointer. It returns a nil +// slice if the pointer is nil. +func StringSlice(s *[]string) []string { + if s != nil { + return *s + } + return nil +} + +// StringSlicePtr returns a pointer to the passed string slice. +func StringSlicePtr(s []string) *[]string { + return &s +} + +// StringMap returns a map of strings built from the map of string pointers. The empty string is +// used for nil pointers. +func StringMap(msp map[string]*string) map[string]string { + ms := make(map[string]string, len(msp)) + for k, sp := range msp { + if sp != nil { + ms[k] = *sp + } else { + ms[k] = "" + } + } + return ms +} + +// StringMapPtr returns a pointer to a map of string pointers built from the passed map of strings. +func StringMapPtr(ms map[string]string) *map[string]*string { + msp := make(map[string]*string, len(ms)) + for k, s := range ms { + msp[k] = StringPtr(s) + } + return &msp +} + +// Bool returns a bool value for the passed bool pointer. It returns false if the pointer is nil. +func Bool(b *bool) bool { + if b != nil { + return *b + } + return false +} + +// BoolPtr returns a pointer to the passed bool. +func BoolPtr(b bool) *bool { + return &b +} + +// Int returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int(i *int) int { + if i != nil { + return *i + } + return 0 +} + +// IntPtr returns a pointer to the passed int. +func IntPtr(i int) *int { + return &i +} + +// Int32 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int32(i *int32) int32 { + if i != nil { + return *i + } + return 0 +} + +// Int32Ptr returns a pointer to the passed int32. +func Int32Ptr(i int32) *int32 { + return &i +} + +// Int64 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int64(i *int64) int64 { + if i != nil { + return *i + } + return 0 +} + +// Int64Ptr returns a pointer to the passed int64. +func Int64Ptr(i int64) *int64 { + return &i +} + +// Float32 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. +func Float32(i *float32) float32 { + if i != nil { + return *i + } + return 0.0 +} + +// Float32Ptr returns a pointer to the passed float32. +func Float32Ptr(i float32) *float32 { + return &i +} + +// Float64 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. +func Float64(i *float64) float64 { + if i != nil { + return *i + } + return 0.0 +} + +// Float64Ptr returns a pointer to the passed float64. +func Float64Ptr(i float64) *float64 { + return &i +} + +// ByteSlicePtr returns a pointer to the passed byte slice. +func ByteSlicePtr(b []byte) *[]byte { + return &b +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/to/go.mod b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/to/go.mod new file mode 100644 index 00000000..a2054be3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/to/go.mod @@ -0,0 +1,3 @@ +module github.com/Azure/go-autorest/autorest/to + +go 1.12 diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/utility.go new file mode 100644 index 00000000..08cf11c1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -0,0 +1,228 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net" + "net/http" + "net/url" + "reflect" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" +) + +// EncodedAs is a series of constants specifying various data encodings +type EncodedAs string + +const ( + // EncodedAsJSON states that data is encoded as JSON + EncodedAsJSON EncodedAs = "JSON" + + // EncodedAsXML states that data is encoded as Xml + EncodedAsXML EncodedAs = "XML" +) + +// Decoder defines the decoding method json.Decoder and xml.Decoder share +type Decoder interface { + Decode(v interface{}) error +} + +// NewDecoder creates a new decoder appropriate to the passed encoding. +// encodedAs specifies the type of encoding and r supplies the io.Reader containing the +// encoded data. +func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { + if encodedAs == EncodedAsJSON { + return json.NewDecoder(r) + } else if encodedAs == EncodedAsXML { + return xml.NewDecoder(r) + } + return nil +} + +// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy +// is especially useful if there is a chance the data will fail to decode. +// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v +// is the decoding destination. +func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { + b := bytes.Buffer{} + return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) +} + +// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. +// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. +// Further, when it is closed, it ensures that rc is closed as well. +func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { + return &teeReadCloser{rc, io.TeeReader(rc, w)} +} + +type teeReadCloser struct { + rc io.ReadCloser + r io.Reader +} + +func (t *teeReadCloser) Read(p []byte) (int, error) { + return t.r.Read(p) +} + +func (t *teeReadCloser) Close() error { + return t.rc.Close() +} + +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +func escapeValueStrings(m map[string]string) map[string]string { + for key, value := range m { + m[key] = url.QueryEscape(value) + } + return m +} + +func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { + mapOfStrings := make(map[string]string) + for key, value := range mapOfInterface { + mapOfStrings[key] = ensureValueString(value) + } + return mapOfStrings +} + +func ensureValueString(value interface{}) string { + if value == nil { + return "" + } + switch v := value.(type) { + case string: + return v + case []byte: + return string(v) + default: + return fmt.Sprintf("%v", v) + } +} + +// MapToValues method converts map[string]interface{} to url.Values. +func MapToValues(m map[string]interface{}) url.Values { + v := url.Values{} + for key, value := range m { + x := reflect.ValueOf(value) + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + for i := 0; i < x.Len(); i++ { + v.Add(key, ensureValueString(x.Index(i))) + } + } else { + v.Add(key, ensureValueString(value)) + } + } + return v +} + +// AsStringSlice method converts interface{} to []string. This expects a +//that the parameter passed to be a slice or array of a type that has the underlying +//type a string. +func AsStringSlice(s interface{}) ([]string, error) { + v := reflect.ValueOf(s) + if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { + return nil, NewError("autorest", "AsStringSlice", "the value's type is not an array.") + } + stringSlice := make([]string, 0, v.Len()) + + for i := 0; i < v.Len(); i++ { + stringSlice = append(stringSlice, v.Index(i).String()) + } + return stringSlice, nil +} + +// String method converts interface v to string. If interface is a list, it +// joins list elements using the separator. Note that only sep[0] will be used for +// joining if any separator is specified. +func String(v interface{}, sep ...string) string { + if len(sep) == 0 { + return ensureValueString(v) + } + stringSlice, ok := v.([]string) + if ok == false { + var err error + stringSlice, err = AsStringSlice(v) + if err != nil { + panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err)) + } + } + return ensureValueString(strings.Join(stringSlice, sep[0])) +} + +// Encode method encodes url path and query parameters. +func Encode(location string, v interface{}, sep ...string) string { + s := String(v, sep...) + switch strings.ToLower(location) { + case "path": + return pathEscape(s) + case "query": + return queryEscape(s) + default: + return s + } +} + +func pathEscape(s string) string { + return strings.Replace(url.QueryEscape(s), "+", "%20", -1) +} + +func queryEscape(s string) string { + return url.QueryEscape(s) +} + +// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't). +// This is mainly useful for long-running operations that use the Azure-AsyncOperation +// header, so we change the initial PUT into a GET to retrieve the final result. +func ChangeToGet(req *http.Request) *http.Request { + req.Method = "GET" + req.Body = nil + req.ContentLength = 0 + req.Header.Del("Content-Length") + return req +} + +// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError +// interface. If err is a DetailedError it will walk the chain of Original errors. +func IsTokenRefreshError(err error) bool { + if _, ok := err.(adal.TokenRefreshError); ok { + return true + } + if de, ok := err.(DetailedError); ok { + return IsTokenRefreshError(de.Original) + } + return false +} + +// IsTemporaryNetworkError returns true if the specified error is a temporary network error or false +// if it's not. If the error doesn't implement the net.Error interface the return value is true. +func IsTemporaryNetworkError(err error) bool { + if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { + return true + } + return false +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/validation/error.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/validation/error.go new file mode 100644 index 00000000..fed156db --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/validation/error.go @@ -0,0 +1,48 @@ +package validation + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" +) + +// Error is the type that's returned when the validation of an APIs arguments constraints fails. +type Error struct { + // PackageType is the package type of the object emitting the error. For types, the value + // matches that produced the the '%T' format specifier of the fmt package. For other elements, + // such as functions, it is just the package name (e.g., "autorest"). + PackageType string + + // Method is the name of the method raising the error. + Method string + + // Message is the error message. + Message string +} + +// Error returns a string containing the details of the validation failure. +func (e Error) Error() string { + return fmt.Sprintf("%s#%s: Invalid input: %s", e.PackageType, e.Method, e.Message) +} + +// NewError creates a new Error object with the specified parameters. +// message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) Error { + return Error{ + PackageType: packageType, + Method: method, + Message: fmt.Sprintf(message, args...), + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod new file mode 100644 index 00000000..c44c51ff --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/validation/go.mod @@ -0,0 +1,5 @@ +module github.com/Azure/go-autorest/autorest/validation + +go 1.12 + +require github.com/stretchr/testify v1.3.0 diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum new file mode 100644 index 00000000..4347755a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/validation/go.sum @@ -0,0 +1,7 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go new file mode 100644 index 00000000..65899b69 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go @@ -0,0 +1,400 @@ +/* +Package validation provides methods for validating parameter value using reflection. +*/ +package validation + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "reflect" + "regexp" + "strings" +) + +// Constraint stores constraint name, target field name +// Rule and chain validations. +type Constraint struct { + + // Target field name for validation. + Target string + + // Constraint name e.g. minLength, MaxLength, Pattern, etc. + Name string + + // Rule for constraint e.g. greater than 10, less than 5 etc. + Rule interface{} + + // Chain Validations for struct type + Chain []Constraint +} + +// Validation stores parameter-wise validation. +type Validation struct { + TargetValue interface{} + Constraints []Constraint +} + +// Constraint list +const ( + Empty = "Empty" + Null = "Null" + ReadOnly = "ReadOnly" + Pattern = "Pattern" + MaxLength = "MaxLength" + MinLength = "MinLength" + MaxItems = "MaxItems" + MinItems = "MinItems" + MultipleOf = "MultipleOf" + UniqueItems = "UniqueItems" + InclusiveMaximum = "InclusiveMaximum" + ExclusiveMaximum = "ExclusiveMaximum" + ExclusiveMinimum = "ExclusiveMinimum" + InclusiveMinimum = "InclusiveMinimum" +) + +// Validate method validates constraints on parameter +// passed in validation array. +func Validate(m []Validation) error { + for _, item := range m { + v := reflect.ValueOf(item.TargetValue) + for _, constraint := range item.Constraints { + var err error + switch v.Kind() { + case reflect.Ptr: + err = validatePtr(v, constraint) + case reflect.String: + err = validateString(v, constraint) + case reflect.Struct: + err = validateStruct(v, constraint) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + err = validateInt(v, constraint) + case reflect.Float32, reflect.Float64: + err = validateFloat(v, constraint) + case reflect.Array, reflect.Slice, reflect.Map: + err = validateArrayMap(v, constraint) + default: + err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind())) + } + + if err != nil { + return err + } + } + } + return nil +} + +func validateStruct(x reflect.Value, v Constraint, name ...string) error { + //Get field name from target name which is in format a.b.c + s := strings.Split(v.Target, ".") + f := x.FieldByName(s[len(s)-1]) + if isZero(f) { + return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.Target)) + } + + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(f), + Constraints: []Constraint{v}, + }, + }) +} + +func validatePtr(x reflect.Value, v Constraint) error { + if v.Name == ReadOnly { + if !x.IsNil() { + return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request") + } + return nil + } + if x.IsNil() { + return checkNil(x, v) + } + if v.Chain != nil { + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(x.Elem()), + Constraints: v.Chain, + }, + }) + } + return nil +} + +func validateInt(x reflect.Value, v Constraint) error { + i := x.Int() + r, ok := toInt64(v.Rule) + if !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) + } + switch v.Name { + case MultipleOf: + if i%r != 0 { + return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r)) + } + case ExclusiveMinimum: + if i <= r { + return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) + } + case ExclusiveMaximum: + if i >= r { + return createError(x, v, fmt.Sprintf("value must be less than %v", r)) + } + case InclusiveMinimum: + if i < r { + return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) + } + case InclusiveMaximum: + if i > r { + return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) + } + default: + return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.Name)) + } + return nil +} + +func validateFloat(x reflect.Value, v Constraint) error { + f := x.Float() + r, ok := v.Rule.(float64) + if !ok { + return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.Name, v.Rule)) + } + switch v.Name { + case ExclusiveMinimum: + if f <= r { + return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) + } + case ExclusiveMaximum: + if f >= r { + return createError(x, v, fmt.Sprintf("value must be less than %v", r)) + } + case InclusiveMinimum: + if f < r { + return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) + } + case InclusiveMaximum: + if f > r { + return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) + } + default: + return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.Name)) + } + return nil +} + +func validateString(x reflect.Value, v Constraint) error { + s := x.String() + switch v.Name { + case Empty: + if len(s) == 0 { + return checkEmpty(x, v) + } + case Pattern: + reg, err := regexp.Compile(v.Rule.(string)) + if err != nil { + return createError(x, v, err.Error()) + } + if !reg.MatchString(s) { + return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.Rule)) + } + case MaxLength: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) + } + if len(s) > v.Rule.(int) { + return createError(x, v, fmt.Sprintf("value length must be less than or equal to %v", v.Rule)) + } + case MinLength: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) + } + if len(s) < v.Rule.(int) { + return createError(x, v, fmt.Sprintf("value length must be greater than or equal to %v", v.Rule)) + } + case ReadOnly: + if len(s) > 0 { + return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request") + } + default: + return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.Name)) + } + + if v.Chain != nil { + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(x), + Constraints: v.Chain, + }, + }) + } + return nil +} + +func validateArrayMap(x reflect.Value, v Constraint) error { + switch v.Name { + case Null: + if x.IsNil() { + return checkNil(x, v) + } + case Empty: + if x.IsNil() || x.Len() == 0 { + return checkEmpty(x, v) + } + case MaxItems: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) + } + if x.Len() > v.Rule.(int) { + return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.Rule, x.Len())) + } + case MinItems: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) + } + if x.Len() < v.Rule.(int) { + return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.Rule, x.Len())) + } + case UniqueItems: + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + if !checkForUniqueInArray(x) { + return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) + } + } else if x.Kind() == reflect.Map { + if !checkForUniqueInMap(x) { + return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) + } + } else { + return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.Name, x.Kind())) + } + case ReadOnly: + if x.Len() != 0 { + return createError(x, v, "readonly parameter; must send as nil or empty in request") + } + case Pattern: + reg, err := regexp.Compile(v.Rule.(string)) + if err != nil { + return createError(x, v, err.Error()) + } + keys := x.MapKeys() + for _, k := range keys { + if !reg.MatchString(k.String()) { + return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.Rule)) + } + } + default: + return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.Name)) + } + + if v.Chain != nil { + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(x), + Constraints: v.Chain, + }, + }) + } + return nil +} + +func checkNil(x reflect.Value, v Constraint) error { + if _, ok := v.Rule.(bool); !ok { + return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) + } + if v.Rule.(bool) { + return createError(x, v, "value can not be null; required parameter") + } + return nil +} + +func checkEmpty(x reflect.Value, v Constraint) error { + if _, ok := v.Rule.(bool); !ok { + return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) + } + + if v.Rule.(bool) { + return createError(x, v, "value can not be null or empty; required parameter") + } + return nil +} + +func checkForUniqueInArray(x reflect.Value) bool { + if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { + return false + } + arrOfInterface := make([]interface{}, x.Len()) + + for i := 0; i < x.Len(); i++ { + arrOfInterface[i] = x.Index(i).Interface() + } + + m := make(map[interface{}]bool) + for _, val := range arrOfInterface { + if m[val] { + return false + } + m[val] = true + } + return true +} + +func checkForUniqueInMap(x reflect.Value) bool { + if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { + return false + } + mapOfInterface := make(map[interface{}]interface{}, x.Len()) + + keys := x.MapKeys() + for _, k := range keys { + mapOfInterface[k.Interface()] = x.MapIndex(k).Interface() + } + + m := make(map[interface{}]bool) + for _, val := range mapOfInterface { + if m[val] { + return false + } + m[val] = true + } + return true +} + +func getInterfaceValue(x reflect.Value) interface{} { + if x.Kind() == reflect.Invalid { + return nil + } + return x.Interface() +} + +func isZero(x interface{}) bool { + return x == reflect.Zero(reflect.TypeOf(x)).Interface() +} + +func createError(x reflect.Value, v Constraint, err string) error { + return fmt.Errorf("autorest/validation: validation failed: parameter=%s constraint=%s value=%#v details: %s", + v.Target, v.Name, getInterfaceValue(x), err) +} + +func toInt64(v interface{}) (int64, bool) { + if i64, ok := v.(int64); ok { + return i64, true + } + // older generators emit max constants as int, so if int64 fails fall back to int + if i32, ok := v.(int); ok { + return int64(i32), true + } + return 0, false +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/version.go new file mode 100644 index 00000000..c4465169 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -0,0 +1,41 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "runtime" +) + +const number = "v12.4.1" + +var ( + userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + number, + ) +) + +// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version. +func UserAgent() string { + return userAgent +} + +// Version returns the semantic version (see http://semver.org). +func Version() string { + return number +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/logger/go.mod b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/logger/go.mod new file mode 100644 index 00000000..f22ed56b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/logger/go.mod @@ -0,0 +1,3 @@ +module github.com/Azure/go-autorest/logger + +go 1.12 diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/logger/logger.go new file mode 100644 index 00000000..da09f394 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/logger/logger.go @@ -0,0 +1,328 @@ +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" +) + +// LevelType tells a logger the minimum level to log. When code reports a log entry, +// the LogLevel indicates the level of the log entry. The logger only records entries +// whose level is at least the level it was told to log. See the Log* constants. +// For example, if a logger is configured with LogError, then LogError, LogPanic, +// and LogFatal entries will be logged; lower level entries are ignored. +type LevelType uint32 + +const ( + // LogNone tells a logger not to log any entries passed to it. + LogNone LevelType = iota + + // LogFatal tells a logger to log all LogFatal entries passed to it. + LogFatal + + // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. + LogPanic + + // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. + LogError + + // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogWarning + + // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogInfo + + // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogDebug +) + +const ( + logNone = "NONE" + logFatal = "FATAL" + logPanic = "PANIC" + logError = "ERROR" + logWarning = "WARNING" + logInfo = "INFO" + logDebug = "DEBUG" + logUnknown = "UNKNOWN" +) + +// ParseLevel converts the specified string into the corresponding LevelType. +func ParseLevel(s string) (lt LevelType, err error) { + switch strings.ToUpper(s) { + case logFatal: + lt = LogFatal + case logPanic: + lt = LogPanic + case logError: + lt = LogError + case logWarning: + lt = LogWarning + case logInfo: + lt = LogInfo + case logDebug: + lt = LogDebug + default: + err = fmt.Errorf("bad log level '%s'", s) + } + return +} + +// String implements the stringer interface for LevelType. +func (lt LevelType) String() string { + switch lt { + case LogNone: + return logNone + case LogFatal: + return logFatal + case LogPanic: + return logPanic + case LogError: + return logError + case LogWarning: + return logWarning + case LogInfo: + return logInfo + case LogDebug: + return logDebug + default: + return logUnknown + } +} + +// Filter defines functions for filtering HTTP request/response content. +type Filter struct { + // URL returns a potentially modified string representation of a request URL. + URL func(u *url.URL) string + + // Header returns a potentially modified set of values for the specified key. + // To completely exclude the header key/values return false. + Header func(key string, val []string) (bool, []string) + + // Body returns a potentially modified request/response body. + Body func(b []byte) []byte +} + +func (f Filter) processURL(u *url.URL) string { + if f.URL == nil { + return u.String() + } + return f.URL(u) +} + +func (f Filter) processHeader(k string, val []string) (bool, []string) { + if f.Header == nil { + return true, val + } + return f.Header(k, val) +} + +func (f Filter) processBody(b []byte) []byte { + if f.Body == nil { + return b + } + return f.Body(b) +} + +// Writer defines methods for writing to a logging facility. +type Writer interface { + // Writeln writes the specified message with the standard log entry header and new-line character. + Writeln(level LevelType, message string) + + // Writef writes the specified format specifier with the standard log entry header and no new-line character. + Writef(level LevelType, format string, a ...interface{}) + + // WriteRequest writes the specified HTTP request to the logger if the log level is greater than + // or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no request content is excluded. + WriteRequest(req *http.Request, filter Filter) + + // WriteResponse writes the specified HTTP response to the logger if the log level is greater than + // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no response content is excluded. + WriteResponse(resp *http.Response, filter Filter) +} + +// Instance is the default log writer initialized during package init. +// This can be replaced with a custom implementation as required. +var Instance Writer + +// default log level +var logLevel = LogNone + +// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL. +// If no value was specified the default value is LogNone. +// Custom loggers can call this to retrieve the configured log level. +func Level() LevelType { + return logLevel +} + +func init() { + // separated for testing purposes + initDefaultLogger() +} + +func initDefaultLogger() { + // init with nilLogger so callers don't have to do a nil check on Default + Instance = nilLogger{} + llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL")) + if llStr == "" { + return + } + var err error + logLevel, err = ParseLevel(llStr) + if err != nil { + fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error()) + return + } + if logLevel == LogNone { + return + } + // default to stderr + dest := os.Stderr + lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE") + if strings.EqualFold(lfStr, "stdout") { + dest = os.Stdout + } else if lfStr != "" { + lf, err := os.Create(lfStr) + if err == nil { + dest = lf + } else { + fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error()) + } + } + Instance = fileLogger{ + logLevel: logLevel, + mu: &sync.Mutex{}, + logFile: dest, + } +} + +// the nil logger does nothing +type nilLogger struct{} + +func (nilLogger) Writeln(LevelType, string) {} + +func (nilLogger) Writef(LevelType, string, ...interface{}) {} + +func (nilLogger) WriteRequest(*http.Request, Filter) {} + +func (nilLogger) WriteResponse(*http.Response, Filter) {} + +// A File is used instead of a Logger so the stream can be flushed after every write. +type fileLogger struct { + logLevel LevelType + mu *sync.Mutex // for synchronizing writes to logFile + logFile *os.File +} + +func (fl fileLogger) Writeln(level LevelType, message string) { + fl.Writef(level, "%s\n", message) +} + +func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) { + if fl.logLevel >= level { + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...)) + fl.logFile.Sync() + } +} + +func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) { + if req == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL)) + // dump headers + for k, v := range req.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(req.Header, req.Body) { + // dump body + body, err := ioutil.ReadAll(req.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + if nc, ok := req.Body.(io.Seeker); ok { + // rewind to the beginning + nc.Seek(0, io.SeekStart) + } else { + // recreate the body + req.Body = ioutil.NopCloser(bytes.NewReader(body)) + } + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { + if resp == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL)) + // dump headers + for k, v := range resp.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(resp.Header, resp.Body) { + // dump body + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + resp.Body = ioutil.NopCloser(bytes.NewReader(body)) + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +// returns true if the provided body should be included in the log +func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { + ct := header.Get("Content-Type") + return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream") +} + +// creates standard header for log entries, it contains a timestamp and the log level +func entryHeader(level LevelType) string { + // this format provides a fixed number of digits so the size of the timestamp is constant + return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String()) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/tracing/go.mod b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/tracing/go.mod new file mode 100644 index 00000000..6a9394c5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/tracing/go.mod @@ -0,0 +1,13 @@ +module github.com/Azure/go-autorest/tracing + +go 1.12 + +require ( + // later releases of ocagent aren't compatible with our version of opencensus + contrib.go.opencensus.io/exporter/ocagent v0.3.0 + // keep this pre-v0.22.0 to avoid dependency on protobuf v1.3+ + go.opencensus.io v0.21.0 +) + +// pin this to v0.1.0 to avoid breaking changes incompatible with our version of ocagent +replace github.com/census-instrumentation/opencensus-proto => github.com/census-instrumentation/opencensus-proto v0.1.0 diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/tracing/go.sum b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/tracing/go.sum new file mode 100644 index 00000000..736a4f4f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/tracing/go.sum @@ -0,0 +1,68 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +contrib.go.opencensus.io/exporter/ocagent v0.3.0 h1:fyqPXp7d+BBV3tXa7EE1CYrObJr7R9jTAOO/AsdcQBg= +contrib.go.opencensus.io/exporter/ocagent v0.3.0/go.mod h1:0fnkYHF+ORKj7HWzOExKkUHeFX79gXSKUQbpnAM+wzo= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/census-instrumentation/opencensus-proto v0.1.0 h1:VwZ9smxzX8u14/125wHIX7ARV+YhR+L4JADswwxWK0Y= +github.com/census-instrumentation/opencensus-proto v0.1.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +go.opencensus.io v0.17.0/go.mod h1:mp1VrMQxhlqqDpKvH4UcQUa4YwlzNmymAjPrDdfxNpI= +go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf h1:rjxqQmxjyqerRKEj+tZW+MCm4LgpFXu18bsEoCMgDsk= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.15.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/tracing/tracing.go new file mode 100644 index 00000000..28951c28 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Azure/go-autorest/tracing/tracing.go @@ -0,0 +1,195 @@ +package tracing + +// Copyright 2018 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "fmt" + "net/http" + "os" + + "contrib.go.opencensus.io/exporter/ocagent" + "go.opencensus.io/plugin/ochttp" + "go.opencensus.io/plugin/ochttp/propagation/tracecontext" + "go.opencensus.io/stats/view" + "go.opencensus.io/trace" +) + +var ( + // Transport is the default tracing RoundTripper. The custom options setter will control + // if traces are being emitted or not. + Transport = NewTransport() + + // enabled is the flag for marking if tracing is enabled. + enabled = false + + // Sampler is the tracing sampler. If tracing is disabled it will never sample. Otherwise + // it will be using the parent sampler or the default. + sampler = trace.NeverSample() + + // Views for metric instrumentation. + views = map[string]*view.View{} + + // the trace exporter + traceExporter trace.Exporter +) + +func init() { + enableFromEnv() +} + +func enableFromEnv() { + _, ok := os.LookupEnv("AZURE_SDK_TRACING_ENABLED") + _, legacyOk := os.LookupEnv("AZURE_SDK_TRACING_ENABELD") + if ok || legacyOk { + agentEndpoint, ok := os.LookupEnv("OCAGENT_TRACE_EXPORTER_ENDPOINT") + + if ok { + EnableWithAIForwarding(agentEndpoint) + } else { + Enable() + } + } +} + +// NewTransport returns a new instance of a tracing-aware RoundTripper. +func NewTransport() *ochttp.Transport { + return &ochttp.Transport{ + Propagation: &tracecontext.HTTPFormat{}, + GetStartOptions: getStartOptions, + } +} + +// IsEnabled returns true if monitoring is enabled for the sdk. +func IsEnabled() bool { + return enabled +} + +// Enable will start instrumentation for metrics and traces. +func Enable() error { + enabled = true + sampler = nil + + err := initStats() + return err +} + +// Disable will disable instrumentation for metrics and traces. +func Disable() { + disableStats() + sampler = trace.NeverSample() + if traceExporter != nil { + trace.UnregisterExporter(traceExporter) + } + enabled = false +} + +// EnableWithAIForwarding will start instrumentation and will connect to app insights forwarder +// exporter making the metrics and traces available in app insights. +func EnableWithAIForwarding(agentEndpoint string) (err error) { + err = Enable() + if err != nil { + return err + } + + traceExporter, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithAddress(agentEndpoint)) + if err != nil { + return err + } + trace.RegisterExporter(traceExporter) + return +} + +// getStartOptions is the custom options setter for the ochttp package. +func getStartOptions(*http.Request) trace.StartOptions { + return trace.StartOptions{ + Sampler: sampler, + } +} + +// initStats registers the views for the http metrics +func initStats() (err error) { + clientViews := []*view.View{ + ochttp.ClientCompletedCount, + ochttp.ClientRoundtripLatencyDistribution, + ochttp.ClientReceivedBytesDistribution, + ochttp.ClientSentBytesDistribution, + } + for _, cv := range clientViews { + vn := fmt.Sprintf("Azure/go-autorest/tracing-%s", cv.Name) + views[vn] = cv.WithName(vn) + err = view.Register(views[vn]) + if err != nil { + return err + } + } + return +} + +// disableStats will unregister the previously registered metrics +func disableStats() { + for _, v := range views { + view.Unregister(v) + } +} + +// StartSpan starts a trace span +func StartSpan(ctx context.Context, name string) context.Context { + ctx, _ = trace.StartSpan(ctx, name, trace.WithSampler(sampler)) + return ctx +} + +// EndSpan ends a previously started span stored in the context +func EndSpan(ctx context.Context, httpStatusCode int, err error) { + span := trace.FromContext(ctx) + + if span == nil { + return + } + + if err != nil { + span.SetStatus(trace.Status{Message: err.Error(), Code: toTraceStatusCode(httpStatusCode)}) + } + span.End() +} + +// toTraceStatusCode converts HTTP Codes to OpenCensus codes as defined +// at https://github.com/census-instrumentation/opencensus-specs/blob/master/trace/HTTP.md#status +func toTraceStatusCode(httpStatusCode int) int32 { + switch { + case http.StatusOK <= httpStatusCode && httpStatusCode < http.StatusBadRequest: + return trace.StatusCodeOK + case httpStatusCode == http.StatusBadRequest: + return trace.StatusCodeInvalidArgument + case httpStatusCode == http.StatusUnauthorized: // 401 is actually unauthenticated. + return trace.StatusCodeUnauthenticated + case httpStatusCode == http.StatusForbidden: + return trace.StatusCodePermissionDenied + case httpStatusCode == http.StatusNotFound: + return trace.StatusCodeNotFound + case httpStatusCode == http.StatusTooManyRequests: + return trace.StatusCodeResourceExhausted + case httpStatusCode == 499: + return trace.StatusCodeCancelled + case httpStatusCode == http.StatusNotImplemented: + return trace.StatusCodeUnimplemented + case httpStatusCode == http.StatusServiceUnavailable: + return trace.StatusCodeUnavailable + case httpStatusCode == http.StatusGatewayTimeout: + return trace.StatusCodeDeadlineExceeded + default: + return trace.StatusCodeUnknown + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/backup.go b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/backup.go index 27d6ace0..2be34af4 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/backup.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/backup.go @@ -68,10 +68,20 @@ func NewBackupStreamReader(r io.Reader) *BackupStreamReader { return &BackupStreamReader{r, 0} } -// Next returns the next backup stream and prepares for calls to Write(). It skips the remainder of the current stream if +// Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if // it was not completely read. func (r *BackupStreamReader) Next() (*BackupHeader, error) { if r.bytesLeft > 0 { + if s, ok := r.r.(io.Seeker); ok { + // Make sure Seek on io.SeekCurrent sometimes succeeds + // before trying the actual seek. + if _, err := s.Seek(0, io.SeekCurrent); err == nil { + if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil { + return nil, err + } + r.bytesLeft = 0 + } + } if _, err := io.Copy(ioutil.Discard, r); err != nil { return nil, err } @@ -220,7 +230,7 @@ type BackupFileWriter struct { ctx uintptr } -// NewBackupFileWrtier returns a new BackupFileWriter from a file handle. If includeSecurity is true, +// NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true, // Write() will attempt to restore the security descriptor from the stream. func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { w := &BackupFileWriter{f, includeSecurity, 0} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/ea.go b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/ea.go new file mode 100644 index 00000000..4051c1b3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/ea.go @@ -0,0 +1,137 @@ +package winio + +import ( + "bytes" + "encoding/binary" + "errors" +) + +type fileFullEaInformation struct { + NextEntryOffset uint32 + Flags uint8 + NameLength uint8 + ValueLength uint16 +} + +var ( + fileFullEaInformationSize = binary.Size(&fileFullEaInformation{}) + + errInvalidEaBuffer = errors.New("invalid extended attribute buffer") + errEaNameTooLarge = errors.New("extended attribute name too large") + errEaValueTooLarge = errors.New("extended attribute value too large") +) + +// ExtendedAttribute represents a single Windows EA. +type ExtendedAttribute struct { + Name string + Value []byte + Flags uint8 +} + +func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) { + var info fileFullEaInformation + err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info) + if err != nil { + err = errInvalidEaBuffer + return + } + + nameOffset := fileFullEaInformationSize + nameLen := int(info.NameLength) + valueOffset := nameOffset + int(info.NameLength) + 1 + valueLen := int(info.ValueLength) + nextOffset := int(info.NextEntryOffset) + if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) { + err = errInvalidEaBuffer + return + } + + ea.Name = string(b[nameOffset : nameOffset+nameLen]) + ea.Value = b[valueOffset : valueOffset+valueLen] + ea.Flags = info.Flags + if info.NextEntryOffset != 0 { + nb = b[info.NextEntryOffset:] + } + return +} + +// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION +// buffer retrieved from BackupRead, ZwQueryEaFile, etc. +func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) { + for len(b) != 0 { + ea, nb, err := parseEa(b) + if err != nil { + return nil, err + } + + eas = append(eas, ea) + b = nb + } + return +} + +func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error { + if int(uint8(len(ea.Name))) != len(ea.Name) { + return errEaNameTooLarge + } + if int(uint16(len(ea.Value))) != len(ea.Value) { + return errEaValueTooLarge + } + entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value)) + withPadding := (entrySize + 3) &^ 3 + nextOffset := uint32(0) + if !last { + nextOffset = withPadding + } + info := fileFullEaInformation{ + NextEntryOffset: nextOffset, + Flags: ea.Flags, + NameLength: uint8(len(ea.Name)), + ValueLength: uint16(len(ea.Value)), + } + + err := binary.Write(buf, binary.LittleEndian, &info) + if err != nil { + return err + } + + _, err = buf.Write([]byte(ea.Name)) + if err != nil { + return err + } + + err = buf.WriteByte(0) + if err != nil { + return err + } + + _, err = buf.Write(ea.Value) + if err != nil { + return err + } + + _, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize]) + if err != nil { + return err + } + + return nil +} + +// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION +// buffer for use with BackupWrite, ZwSetEaFile, etc. +func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) { + var buf bytes.Buffer + for i := range eas { + last := false + if i == len(eas)-1 { + last = true + } + + err := writeEa(&buf, &eas[i], last) + if err != nil { + return nil, err + } + } + return buf.Bytes(), nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/file.go b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/file.go index 613f31b5..0385e410 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/file.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/file.go @@ -16,13 +16,20 @@ import ( //sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort //sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus //sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes -//sys timeBeginPeriod(period uint32) (n int32) = winmm.timeBeginPeriod +//sys wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult type atomicBool int32 func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } +func (b *atomicBool) swap(new bool) bool { + var newInt int32 + if new { + newInt = 1 + } + return atomic.SwapInt32((*int32)(b), newInt) == 1 +} const ( cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 @@ -71,7 +78,9 @@ func initIo() { type win32File struct { handle syscall.Handle wg sync.WaitGroup - closing bool + wgLock sync.RWMutex + closing atomicBool + socket bool readDeadline deadlineHandler writeDeadline deadlineHandler } @@ -102,19 +111,29 @@ func makeWin32File(h syscall.Handle) (*win32File, error) { } func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { - return makeWin32File(h) + // If we return the result of makeWin32File directly, it can result in an + // interface-wrapped nil, rather than a nil interface value. + f, err := makeWin32File(h) + if err != nil { + return nil, err + } + return f, nil } // closeHandle closes the resources associated with a Win32 handle func (f *win32File) closeHandle() { - if !f.closing { + f.wgLock.Lock() + // Atomically set that we are closing, releasing the resources only once. + if !f.closing.swap(true) { + f.wgLock.Unlock() // cancel all IO and wait for it to complete - f.closing = true cancelIoEx(f.handle, nil) f.wg.Wait() // at this point, no new IO can start syscall.Close(f.handle) f.handle = 0 + } else { + f.wgLock.Unlock() } } @@ -127,10 +146,13 @@ func (f *win32File) Close() error { // prepareIo prepares for a new IO operation. // The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. func (f *win32File) prepareIo() (*ioOperation, error) { - f.wg.Add(1) - if f.closing { + f.wgLock.RLock() + if f.closing.isSet() { + f.wgLock.RUnlock() return nil, ErrFileClosed } + f.wg.Add(1) + f.wgLock.RUnlock() c := &ioOperation{} c.ch = make(chan ioResult) return c, nil @@ -138,8 +160,6 @@ func (f *win32File) prepareIo() (*ioOperation, error) { // ioCompletionProcessor processes completed async IOs forever func ioCompletionProcessor(h syscall.Handle) { - // Set the timer resolution to 1. This fixes a performance regression in golang 1.6. - timeBeginPeriod(1) for { var bytes uint32 var key uintptr @@ -159,7 +179,7 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er return int(bytes), err } - if f.closing { + if f.closing.isSet() { cancelIoEx(f.handle, &c.o) } @@ -175,9 +195,13 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er case r = <-c.ch: err = r.err if err == syscall.ERROR_OPERATION_ABORTED { - if f.closing { + if f.closing.isSet() { err = ErrFileClosed } + } else if err != nil && f.socket { + // err is from Win32. Query the overlapped structure to get the winsock error. + var bytes, flags uint32 + err = wsaGetOverlappedResult(f.handle, &c.o, &bytes, false, &flags) } case <-timeout: cancelIoEx(f.handle, &c.o) @@ -253,6 +277,10 @@ func (f *win32File) Flush() error { return syscall.FlushFileBuffers(f.handle) } +func (f *win32File) Fd() uintptr { + return uintptr(f.handle) +} + func (d *deadlineHandler) set(deadline time.Time) error { d.setLock.Lock() defer d.setLock.Unlock() diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/fileinfo.go b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/fileinfo.go index b1d60abb..ada2fbab 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/fileinfo.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/fileinfo.go @@ -20,7 +20,8 @@ const ( // FileBasicInfo contains file access time and file attributes information. type FileBasicInfo struct { CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime - FileAttributes uintptr // includes padding + FileAttributes uint32 + pad uint32 // padding } // GetFileBasicInfo retrieves times and attributes for a file. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/go.mod b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/go.mod new file mode 100644 index 00000000..b3846826 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/go.mod @@ -0,0 +1,9 @@ +module github.com/Microsoft/go-winio + +go 1.12 + +require ( + github.com/pkg/errors v0.8.1 + github.com/sirupsen/logrus v1.4.1 + golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/go.sum b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/go.sum new file mode 100644 index 00000000..babb4a70 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/go.sum @@ -0,0 +1,16 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sirupsen/logrus v1.4.1 h1:GL2rEmy6nsikmW0r8opw9JIRScdMF5hA8cOYLH7In1k= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/hvsock.go b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/hvsock.go new file mode 100644 index 00000000..dbfe790e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/hvsock.go @@ -0,0 +1,305 @@ +package winio + +import ( + "fmt" + "io" + "net" + "os" + "syscall" + "time" + "unsafe" + + "github.com/Microsoft/go-winio/pkg/guid" +) + +//sys bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) [failretval==socketError] = ws2_32.bind + +const ( + afHvSock = 34 // AF_HYPERV + + socketError = ^uintptr(0) +) + +// An HvsockAddr is an address for a AF_HYPERV socket. +type HvsockAddr struct { + VMID guid.GUID + ServiceID guid.GUID +} + +type rawHvsockAddr struct { + Family uint16 + _ uint16 + VMID guid.GUID + ServiceID guid.GUID +} + +// Network returns the address's network name, "hvsock". +func (addr *HvsockAddr) Network() string { + return "hvsock" +} + +func (addr *HvsockAddr) String() string { + return fmt.Sprintf("%s:%s", &addr.VMID, &addr.ServiceID) +} + +// VsockServiceID returns an hvsock service ID corresponding to the specified AF_VSOCK port. +func VsockServiceID(port uint32) guid.GUID { + g, _ := guid.FromString("00000000-facb-11e6-bd58-64006a7986d3") + g.Data1 = port + return g +} + +func (addr *HvsockAddr) raw() rawHvsockAddr { + return rawHvsockAddr{ + Family: afHvSock, + VMID: addr.VMID, + ServiceID: addr.ServiceID, + } +} + +func (addr *HvsockAddr) fromRaw(raw *rawHvsockAddr) { + addr.VMID = raw.VMID + addr.ServiceID = raw.ServiceID +} + +// HvsockListener is a socket listener for the AF_HYPERV address family. +type HvsockListener struct { + sock *win32File + addr HvsockAddr +} + +// HvsockConn is a connected socket of the AF_HYPERV address family. +type HvsockConn struct { + sock *win32File + local, remote HvsockAddr +} + +func newHvSocket() (*win32File, error) { + fd, err := syscall.Socket(afHvSock, syscall.SOCK_STREAM, 1) + if err != nil { + return nil, os.NewSyscallError("socket", err) + } + f, err := makeWin32File(fd) + if err != nil { + syscall.Close(fd) + return nil, err + } + f.socket = true + return f, nil +} + +// ListenHvsock listens for connections on the specified hvsock address. +func ListenHvsock(addr *HvsockAddr) (_ *HvsockListener, err error) { + l := &HvsockListener{addr: *addr} + sock, err := newHvSocket() + if err != nil { + return nil, l.opErr("listen", err) + } + sa := addr.raw() + err = bind(sock.handle, unsafe.Pointer(&sa), int32(unsafe.Sizeof(sa))) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("socket", err)) + } + err = syscall.Listen(sock.handle, 16) + if err != nil { + return nil, l.opErr("listen", os.NewSyscallError("listen", err)) + } + return &HvsockListener{sock: sock, addr: *addr}, nil +} + +func (l *HvsockListener) opErr(op string, err error) error { + return &net.OpError{Op: op, Net: "hvsock", Addr: &l.addr, Err: err} +} + +// Addr returns the listener's network address. +func (l *HvsockListener) Addr() net.Addr { + return &l.addr +} + +// Accept waits for the next connection and returns it. +func (l *HvsockListener) Accept() (_ net.Conn, err error) { + sock, err := newHvSocket() + if err != nil { + return nil, l.opErr("accept", err) + } + defer func() { + if sock != nil { + sock.Close() + } + }() + c, err := l.sock.prepareIo() + if err != nil { + return nil, l.opErr("accept", err) + } + defer l.sock.wg.Done() + + // AcceptEx, per documentation, requires an extra 16 bytes per address. + const addrlen = uint32(16 + unsafe.Sizeof(rawHvsockAddr{})) + var addrbuf [addrlen * 2]byte + + var bytes uint32 + err = syscall.AcceptEx(l.sock.handle, sock.handle, &addrbuf[0], 0, addrlen, addrlen, &bytes, &c.o) + _, err = l.sock.asyncIo(c, nil, bytes, err) + if err != nil { + return nil, l.opErr("accept", os.NewSyscallError("acceptex", err)) + } + conn := &HvsockConn{ + sock: sock, + } + conn.local.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[0]))) + conn.remote.fromRaw((*rawHvsockAddr)(unsafe.Pointer(&addrbuf[addrlen]))) + sock = nil + return conn, nil +} + +// Close closes the listener, causing any pending Accept calls to fail. +func (l *HvsockListener) Close() error { + return l.sock.Close() +} + +/* Need to finish ConnectEx handling +func DialHvsock(ctx context.Context, addr *HvsockAddr) (*HvsockConn, error) { + sock, err := newHvSocket() + if err != nil { + return nil, err + } + defer func() { + if sock != nil { + sock.Close() + } + }() + c, err := sock.prepareIo() + if err != nil { + return nil, err + } + defer sock.wg.Done() + var bytes uint32 + err = windows.ConnectEx(windows.Handle(sock.handle), sa, nil, 0, &bytes, &c.o) + _, err = sock.asyncIo(ctx, c, nil, bytes, err) + if err != nil { + return nil, err + } + conn := &HvsockConn{ + sock: sock, + remote: *addr, + } + sock = nil + return conn, nil +} +*/ + +func (conn *HvsockConn) opErr(op string, err error) error { + return &net.OpError{Op: op, Net: "hvsock", Source: &conn.local, Addr: &conn.remote, Err: err} +} + +func (conn *HvsockConn) Read(b []byte) (int, error) { + c, err := conn.sock.prepareIo() + if err != nil { + return 0, conn.opErr("read", err) + } + defer conn.sock.wg.Done() + buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var flags, bytes uint32 + err = syscall.WSARecv(conn.sock.handle, &buf, 1, &bytes, &flags, &c.o, nil) + n, err := conn.sock.asyncIo(c, &conn.sock.readDeadline, bytes, err) + if err != nil { + if _, ok := err.(syscall.Errno); ok { + err = os.NewSyscallError("wsarecv", err) + } + return 0, conn.opErr("read", err) + } else if n == 0 { + err = io.EOF + } + return n, err +} + +func (conn *HvsockConn) Write(b []byte) (int, error) { + t := 0 + for len(b) != 0 { + n, err := conn.write(b) + if err != nil { + return t + n, err + } + t += n + b = b[n:] + } + return t, nil +} + +func (conn *HvsockConn) write(b []byte) (int, error) { + c, err := conn.sock.prepareIo() + if err != nil { + return 0, conn.opErr("write", err) + } + defer conn.sock.wg.Done() + buf := syscall.WSABuf{Buf: &b[0], Len: uint32(len(b))} + var bytes uint32 + err = syscall.WSASend(conn.sock.handle, &buf, 1, &bytes, 0, &c.o, nil) + n, err := conn.sock.asyncIo(c, &conn.sock.writeDeadline, bytes, err) + if err != nil { + if _, ok := err.(syscall.Errno); ok { + err = os.NewSyscallError("wsasend", err) + } + return 0, conn.opErr("write", err) + } + return n, err +} + +// Close closes the socket connection, failing any pending read or write calls. +func (conn *HvsockConn) Close() error { + return conn.sock.Close() +} + +func (conn *HvsockConn) shutdown(how int) error { + err := syscall.Shutdown(conn.sock.handle, syscall.SHUT_RD) + if err != nil { + return os.NewSyscallError("shutdown", err) + } + return nil +} + +// CloseRead shuts down the read end of the socket. +func (conn *HvsockConn) CloseRead() error { + err := conn.shutdown(syscall.SHUT_RD) + if err != nil { + return conn.opErr("close", err) + } + return nil +} + +// CloseWrite shuts down the write end of the socket, notifying the other endpoint that +// no more data will be written. +func (conn *HvsockConn) CloseWrite() error { + err := conn.shutdown(syscall.SHUT_WR) + if err != nil { + return conn.opErr("close", err) + } + return nil +} + +// LocalAddr returns the local address of the connection. +func (conn *HvsockConn) LocalAddr() net.Addr { + return &conn.local +} + +// RemoteAddr returns the remote address of the connection. +func (conn *HvsockConn) RemoteAddr() net.Addr { + return &conn.remote +} + +// SetDeadline implements the net.Conn SetDeadline method. +func (conn *HvsockConn) SetDeadline(t time.Time) error { + conn.SetReadDeadline(t) + conn.SetWriteDeadline(t) + return nil +} + +// SetReadDeadline implements the net.Conn SetReadDeadline method. +func (conn *HvsockConn) SetReadDeadline(t time.Time) error { + return conn.sock.SetReadDeadline(t) +} + +// SetWriteDeadline implements the net.Conn SetWriteDeadline method. +func (conn *HvsockConn) SetWriteDeadline(t time.Time) error { + return conn.sock.SetWriteDeadline(t) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/pipe.go b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/pipe.go index da706cc8..d6a46f6a 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/pipe.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/pipe.go @@ -3,10 +3,13 @@ package winio import ( + "context" "errors" + "fmt" "io" "net" "os" + "runtime" "syscall" "time" "unsafe" @@ -15,31 +18,72 @@ import ( //sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe //sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *syscall.SecurityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW //sys createFile(name string, access uint32, mode uint32, sa *syscall.SecurityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW -//sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW //sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo //sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW //sys localAlloc(uFlags uint32, length uint32) (ptr uintptr) = LocalAlloc +//sys ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) = ntdll.NtCreateNamedPipeFile +//sys rtlNtStatusToDosError(status ntstatus) (winerr error) = ntdll.RtlNtStatusToDosErrorNoTeb +//sys rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) = ntdll.RtlDosPathNameToNtPathName_U +//sys rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) = ntdll.RtlDefaultNpAcl + +type ioStatusBlock struct { + Status, Information uintptr +} + +type objectAttributes struct { + Length uintptr + RootDirectory uintptr + ObjectName *unicodeString + Attributes uintptr + SecurityDescriptor *securityDescriptor + SecurityQoS uintptr +} + +type unicodeString struct { + Length uint16 + MaximumLength uint16 + Buffer uintptr +} + +type securityDescriptor struct { + Revision byte + Sbz1 byte + Control uint16 + Owner uintptr + Group uintptr + Sacl uintptr + Dacl uintptr +} + +type ntstatus int32 + +func (status ntstatus) Err() error { + if status >= 0 { + return nil + } + return rtlNtStatusToDosError(status) +} const ( cERROR_PIPE_BUSY = syscall.Errno(231) + cERROR_NO_DATA = syscall.Errno(232) cERROR_PIPE_CONNECTED = syscall.Errno(535) cERROR_SEM_TIMEOUT = syscall.Errno(121) - cPIPE_ACCESS_DUPLEX = 0x3 - cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000 - cSECURITY_SQOS_PRESENT = 0x100000 - cSECURITY_ANONYMOUS = 0 - - cPIPE_REJECT_REMOTE_CLIENTS = 0x8 - - cPIPE_UNLIMITED_INSTANCES = 255 - - cNMPWAIT_USE_DEFAULT_WAIT = 0 - cNMPWAIT_NOWAIT = 1 + cSECURITY_SQOS_PRESENT = 0x100000 + cSECURITY_ANONYMOUS = 0 cPIPE_TYPE_MESSAGE = 4 cPIPE_READMODE_MESSAGE = 2 + + cFILE_OPEN = 1 + cFILE_CREATE = 2 + + cFILE_PIPE_MESSAGE_TYPE = 1 + cFILE_PIPE_REJECT_REMOTE_CLIENTS = 2 + + cSE_DACL_PRESENT = 4 ) var ( @@ -120,6 +164,11 @@ func (f *win32MessageBytePipe) Read(b []byte) (int, error) { // zero-byte message, ensure that all future Read() calls // also return EOF. f.readEOF = true + } else if err == syscall.ERROR_MORE_DATA { + // ERROR_MORE_DATA indicates that the pipe's read mode is message mode + // and the message still has more bytes. Treat this as a success, since + // this package presents all named pipes as byte streams. + err = nil } return n, err } @@ -132,40 +181,53 @@ func (s pipeAddress) String() string { return string(s) } +// tryDialPipe attempts to dial the pipe at `path` until `ctx` cancellation or timeout. +func tryDialPipe(ctx context.Context, path *string) (syscall.Handle, error) { + for { + select { + case <-ctx.Done(): + return syscall.Handle(0), ctx.Err() + default: + h, err := createFile(*path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) + if err == nil { + return h, nil + } + if err != cERROR_PIPE_BUSY { + return h, &os.PathError{Err: err, Op: "open", Path: *path} + } + // Wait 10 msec and try again. This is a rather simplistic + // view, as we always try each 10 milliseconds. + time.Sleep(time.Millisecond * 10) + } + } +} + // DialPipe connects to a named pipe by path, timing out if the connection -// takes longer than the specified duration. If timeout is nil, then the timeout -// is the default timeout established by the pipe server. +// takes longer than the specified duration. If timeout is nil, then we use +// a default timeout of 2 seconds. (We do not use WaitNamedPipe.) func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { var absTimeout time.Time if timeout != nil { absTimeout = time.Now().Add(*timeout) + } else { + absTimeout = time.Now().Add(time.Second * 2) } + ctx, _ := context.WithDeadline(context.Background(), absTimeout) + conn, err := DialPipeContext(ctx, path) + if err == context.DeadlineExceeded { + return nil, ErrTimeout + } + return conn, err +} + +// DialPipeContext attempts to connect to a named pipe by `path` until `ctx` +// cancellation or timeout. +func DialPipeContext(ctx context.Context, path string) (net.Conn, error) { var err error var h syscall.Handle - for { - h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) - if err != cERROR_PIPE_BUSY { - break - } - now := time.Now() - var ms uint32 - if absTimeout.IsZero() { - ms = cNMPWAIT_USE_DEFAULT_WAIT - } else if now.After(absTimeout) { - ms = cNMPWAIT_NOWAIT - } else { - ms = uint32(absTimeout.Sub(now).Nanoseconds() / 1000 / 1000) - } - err = waitNamedPipe(path, ms) - if err != nil { - if err == cERROR_SEM_TIMEOUT { - return nil, ErrTimeout - } - break - } - } + h, err = tryDialPipe(ctx, &path) if err != nil { - return nil, &os.PathError{Op: "open", Path: path, Err: err} + return nil, err } var flags uint32 @@ -174,16 +236,6 @@ func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { return nil, err } - var state uint32 - err = getNamedPipeHandleState(h, &state, nil, nil, nil, nil, 0) - if err != nil { - return nil, err - } - - if state&cPIPE_READMODE_MESSAGE != 0 { - return nil, &os.PathError{Op: "open", Path: path, Err: errors.New("message readmode pipes not supported")} - } - f, err := makeWin32File(h) if err != nil { syscall.Close(h) @@ -206,43 +258,87 @@ type acceptResponse struct { } type win32PipeListener struct { - firstHandle syscall.Handle - path string - securityDescriptor []byte - config PipeConfig - acceptCh chan (chan acceptResponse) - closeCh chan int - doneCh chan int + firstHandle syscall.Handle + path string + config PipeConfig + acceptCh chan (chan acceptResponse) + closeCh chan int + doneCh chan int } -func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) { - var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED - if first { - flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE - } - - var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS - if c.MessageMode { - mode |= cPIPE_TYPE_MESSAGE - } - - sa := &syscall.SecurityAttributes{} - sa.Length = uint32(unsafe.Sizeof(*sa)) - if securityDescriptor != nil { - len := uint32(len(securityDescriptor)) - sa.SecurityDescriptor = localAlloc(0, len) - defer localFree(sa.SecurityDescriptor) - copy((*[0xffff]byte)(unsafe.Pointer(sa.SecurityDescriptor))[:], securityDescriptor) - } - h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, sa) +func makeServerPipeHandle(path string, sd []byte, c *PipeConfig, first bool) (syscall.Handle, error) { + path16, err := syscall.UTF16FromString(path) if err != nil { return 0, &os.PathError{Op: "open", Path: path, Err: err} } + + var oa objectAttributes + oa.Length = unsafe.Sizeof(oa) + + var ntPath unicodeString + if err := rtlDosPathNameToNtPathName(&path16[0], &ntPath, 0, 0).Err(); err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + defer localFree(ntPath.Buffer) + oa.ObjectName = &ntPath + + // The security descriptor is only needed for the first pipe. + if first { + if sd != nil { + len := uint32(len(sd)) + sdb := localAlloc(0, len) + defer localFree(sdb) + copy((*[0xffff]byte)(unsafe.Pointer(sdb))[:], sd) + oa.SecurityDescriptor = (*securityDescriptor)(unsafe.Pointer(sdb)) + } else { + // Construct the default named pipe security descriptor. + var dacl uintptr + if err := rtlDefaultNpAcl(&dacl).Err(); err != nil { + return 0, fmt.Errorf("getting default named pipe ACL: %s", err) + } + defer localFree(dacl) + + sdb := &securityDescriptor{ + Revision: 1, + Control: cSE_DACL_PRESENT, + Dacl: dacl, + } + oa.SecurityDescriptor = sdb + } + } + + typ := uint32(cFILE_PIPE_REJECT_REMOTE_CLIENTS) + if c.MessageMode { + typ |= cFILE_PIPE_MESSAGE_TYPE + } + + disposition := uint32(cFILE_OPEN) + access := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | syscall.SYNCHRONIZE) + if first { + disposition = cFILE_CREATE + // By not asking for read or write access, the named pipe file system + // will put this pipe into an initially disconnected state, blocking + // client connections until the next call with first == false. + access = syscall.SYNCHRONIZE + } + + timeout := int64(-50 * 10000) // 50ms + + var ( + h syscall.Handle + iosb ioStatusBlock + ) + err = ntCreateNamedPipeFile(&h, access, &oa, &iosb, syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE, disposition, 0, typ, 0, 0, 0xffffffff, uint32(c.InputBufferSize), uint32(c.OutputBufferSize), &timeout).Err() + if err != nil { + return 0, &os.PathError{Op: "open", Path: path, Err: err} + } + + runtime.KeepAlive(ntPath) return h, nil } func (l *win32PipeListener) makeServerPipe() (*win32File, error) { - h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false) + h, err := makeServerPipeHandle(l.path, nil, &l.config, false) if err != nil { return nil, err } @@ -254,6 +350,36 @@ func (l *win32PipeListener) makeServerPipe() (*win32File, error) { return f, nil } +func (l *win32PipeListener) makeConnectedServerPipe() (*win32File, error) { + p, err := l.makeServerPipe() + if err != nil { + return nil, err + } + + // Wait for the client to connect. + ch := make(chan error) + go func(p *win32File) { + ch <- connectPipe(p) + }(p) + + select { + case err = <-ch: + if err != nil { + p.Close() + p = nil + } + case <-l.closeCh: + // Abort the connect request by closing the handle. + p.Close() + p = nil + err = <-ch + if err == nil || err == ErrFileClosed { + err = ErrPipeListenerClosed + } + } + return p, err +} + func (l *win32PipeListener) listenerRoutine() { closed := false for !closed { @@ -261,31 +387,20 @@ func (l *win32PipeListener) listenerRoutine() { case <-l.closeCh: closed = true case responseCh := <-l.acceptCh: - p, err := l.makeServerPipe() - if err == nil { - // Wait for the client to connect. - ch := make(chan error) - go func() { - ch <- connectPipe(p) - }() - select { - case err = <-ch: - if err != nil { - p.Close() - p = nil - } - case <-l.closeCh: - // Abort the connect request by closing the handle. - p.Close() - p = nil - err = <-ch - if err == nil || err == ErrFileClosed { - err = ErrPipeListenerClosed - } - closed = true + var ( + p *win32File + err error + ) + for { + p, err = l.makeConnectedServerPipe() + // If the connection was immediately closed by the client, try + // again. + if err != cERROR_NO_DATA { + break } } responseCh <- acceptResponse{p, err} + closed = err == ErrPipeListenerClosed } } syscall.Close(l.firstHandle) @@ -334,22 +449,13 @@ func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { if err != nil { return nil, err } - // Immediately open and then close a client handle so that the named pipe is - // created but not currently accepting connections. - h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) - if err != nil { - syscall.Close(h) - return nil, err - } - syscall.Close(h2) l := &win32PipeListener{ - firstHandle: h, - path: path, - securityDescriptor: sd, - config: *c, - acceptCh: make(chan (chan acceptResponse)), - closeCh: make(chan int), - doneCh: make(chan int), + firstHandle: h, + path: path, + config: *c, + acceptCh: make(chan (chan acceptResponse)), + closeCh: make(chan int), + doneCh: make(chan int), } go l.listenerRoutine() return l, nil diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go new file mode 100644 index 00000000..58640657 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/pkg/guid/guid.go @@ -0,0 +1,235 @@ +// Package guid provides a GUID type. The backing structure for a GUID is +// identical to that used by the golang.org/x/sys/windows GUID type. +// There are two main binary encodings used for a GUID, the big-endian encoding, +// and the Windows (mixed-endian) encoding. See here for details: +// https://en.wikipedia.org/wiki/Universally_unique_identifier#Encoding +package guid + +import ( + "crypto/rand" + "crypto/sha1" + "encoding" + "encoding/binary" + "fmt" + "strconv" + + "golang.org/x/sys/windows" +) + +// Variant specifies which GUID variant (or "type") of the GUID. It determines +// how the entirety of the rest of the GUID is interpreted. +type Variant uint8 + +// The variants specified by RFC 4122. +const ( + // VariantUnknown specifies a GUID variant which does not conform to one of + // the variant encodings specified in RFC 4122. + VariantUnknown Variant = iota + VariantNCS + VariantRFC4122 + VariantMicrosoft + VariantFuture +) + +// Version specifies how the bits in the GUID were generated. For instance, a +// version 4 GUID is randomly generated, and a version 5 is generated from the +// hash of an input string. +type Version uint8 + +var _ = (encoding.TextMarshaler)(GUID{}) +var _ = (encoding.TextUnmarshaler)(&GUID{}) + +// GUID represents a GUID/UUID. It has the same structure as +// golang.org/x/sys/windows.GUID so that it can be used with functions expecting +// that type. It is defined as its own type so that stringification and +// marshaling can be supported. The representation matches that used by native +// Windows code. +type GUID windows.GUID + +// NewV4 returns a new version 4 (pseudorandom) GUID, as defined by RFC 4122. +func NewV4() (GUID, error) { + var b [16]byte + if _, err := rand.Read(b[:]); err != nil { + return GUID{}, err + } + + g := FromArray(b) + g.setVersion(4) // Version 4 means randomly generated. + g.setVariant(VariantRFC4122) + + return g, nil +} + +// NewV5 returns a new version 5 (generated from a string via SHA-1 hashing) +// GUID, as defined by RFC 4122. The RFC is unclear on the encoding of the name, +// and the sample code treats it as a series of bytes, so we do the same here. +// +// Some implementations, such as those found on Windows, treat the name as a +// big-endian UTF16 stream of bytes. If that is desired, the string can be +// encoded as such before being passed to this function. +func NewV5(namespace GUID, name []byte) (GUID, error) { + b := sha1.New() + namespaceBytes := namespace.ToArray() + b.Write(namespaceBytes[:]) + b.Write(name) + + a := [16]byte{} + copy(a[:], b.Sum(nil)) + + g := FromArray(a) + g.setVersion(5) // Version 5 means generated from a string. + g.setVariant(VariantRFC4122) + + return g, nil +} + +func fromArray(b [16]byte, order binary.ByteOrder) GUID { + var g GUID + g.Data1 = order.Uint32(b[0:4]) + g.Data2 = order.Uint16(b[4:6]) + g.Data3 = order.Uint16(b[6:8]) + copy(g.Data4[:], b[8:16]) + return g +} + +func (g GUID) toArray(order binary.ByteOrder) [16]byte { + b := [16]byte{} + order.PutUint32(b[0:4], g.Data1) + order.PutUint16(b[4:6], g.Data2) + order.PutUint16(b[6:8], g.Data3) + copy(b[8:16], g.Data4[:]) + return b +} + +// FromArray constructs a GUID from a big-endian encoding array of 16 bytes. +func FromArray(b [16]byte) GUID { + return fromArray(b, binary.BigEndian) +} + +// ToArray returns an array of 16 bytes representing the GUID in big-endian +// encoding. +func (g GUID) ToArray() [16]byte { + return g.toArray(binary.BigEndian) +} + +// FromWindowsArray constructs a GUID from a Windows encoding array of bytes. +func FromWindowsArray(b [16]byte) GUID { + return fromArray(b, binary.LittleEndian) +} + +// ToWindowsArray returns an array of 16 bytes representing the GUID in Windows +// encoding. +func (g GUID) ToWindowsArray() [16]byte { + return g.toArray(binary.LittleEndian) +} + +func (g GUID) String() string { + return fmt.Sprintf( + "%08x-%04x-%04x-%04x-%012x", + g.Data1, + g.Data2, + g.Data3, + g.Data4[:2], + g.Data4[2:]) +} + +// FromString parses a string containing a GUID and returns the GUID. The only +// format currently supported is the `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx` +// format. +func FromString(s string) (GUID, error) { + if len(s) != 36 { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + + var g GUID + + data1, err := strconv.ParseUint(s[0:8], 16, 32) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data1 = uint32(data1) + + data2, err := strconv.ParseUint(s[9:13], 16, 16) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data2 = uint16(data2) + + data3, err := strconv.ParseUint(s[14:18], 16, 16) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data3 = uint16(data3) + + for i, x := range []int{19, 21, 24, 26, 28, 30, 32, 34} { + v, err := strconv.ParseUint(s[x:x+2], 16, 8) + if err != nil { + return GUID{}, fmt.Errorf("invalid GUID %q", s) + } + g.Data4[i] = uint8(v) + } + + return g, nil +} + +func (g *GUID) setVariant(v Variant) { + d := g.Data4[0] + switch v { + case VariantNCS: + d = (d & 0x7f) + case VariantRFC4122: + d = (d & 0x3f) | 0x80 + case VariantMicrosoft: + d = (d & 0x1f) | 0xc0 + case VariantFuture: + d = (d & 0x0f) | 0xe0 + case VariantUnknown: + fallthrough + default: + panic(fmt.Sprintf("invalid variant: %d", v)) + } + g.Data4[0] = d +} + +// Variant returns the GUID variant, as defined in RFC 4122. +func (g GUID) Variant() Variant { + b := g.Data4[0] + if b&0x80 == 0 { + return VariantNCS + } else if b&0xc0 == 0x80 { + return VariantRFC4122 + } else if b&0xe0 == 0xc0 { + return VariantMicrosoft + } else if b&0xe0 == 0xe0 { + return VariantFuture + } + return VariantUnknown +} + +func (g *GUID) setVersion(v Version) { + g.Data3 = (g.Data3 & 0x0fff) | (uint16(v) << 12) +} + +// Version returns the GUID version, as defined in RFC 4122. +func (g GUID) Version() Version { + return Version((g.Data3 & 0xF000) >> 12) +} + +// MarshalText returns the textual representation of the GUID. +func (g GUID) MarshalText() ([]byte, error) { + return []byte(g.String()), nil +} + +// UnmarshalText takes the textual representation of a GUID, and unmarhals it +// into this GUID. +func (g *GUID) UnmarshalText(text []byte) error { + g2, err := FromString(string(text)) + if err != nil { + return err + } + *g = g2 + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/syscall.go b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/syscall.go index 20d64cf4..5cb52bc7 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/syscall.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/syscall.go @@ -1,3 +1,3 @@ package winio -//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go hvsock.go diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go index 4f7a52ee..e26b01fa 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/go-winio/zsyscall_windows.go @@ -1,4 +1,4 @@ -// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT +// Code generated by 'go generate'; DO NOT EDIT. package winio @@ -38,21 +38,25 @@ func errnoErr(e syscall.Errno) error { var ( modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - modwinmm = windows.NewLazySystemDLL("winmm.dll") + modws2_32 = windows.NewLazySystemDLL("ws2_32.dll") + modntdll = windows.NewLazySystemDLL("ntdll.dll") modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") procCancelIoEx = modkernel32.NewProc("CancelIoEx") procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") - proctimeBeginPeriod = modwinmm.NewProc("timeBeginPeriod") + procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") procCreateFileW = modkernel32.NewProc("CreateFileW") - procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW") procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") procLocalAlloc = modkernel32.NewProc("LocalAlloc") + procNtCreateNamedPipeFile = modntdll.NewProc("NtCreateNamedPipeFile") + procRtlNtStatusToDosErrorNoTeb = modntdll.NewProc("RtlNtStatusToDosErrorNoTeb") + procRtlDosPathNameToNtPathName_U = modntdll.NewProc("RtlDosPathNameToNtPathName_U") + procRtlDefaultNpAcl = modntdll.NewProc("RtlDefaultNpAcl") procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") @@ -71,6 +75,7 @@ var ( procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") procBackupRead = modkernel32.NewProc("BackupRead") procBackupWrite = modkernel32.NewProc("BackupWrite") + procbind = modws2_32.NewProc("bind") ) func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { @@ -122,9 +127,21 @@ func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err erro return } -func timeBeginPeriod(period uint32) (n int32) { - r0, _, _ := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0) - n = int32(r0) +func wsaGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } return } @@ -184,27 +201,6 @@ func _createFile(name *uint16, access uint32, mode uint32, sa *syscall.SecurityA return } -func waitNamedPipe(name string, timeout uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _waitNamedPipe(_p0, timeout) -} - -func _waitNamedPipe(name *uint16, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0) - if r1 == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) if r1 == 0 { @@ -235,6 +231,32 @@ func localAlloc(uFlags uint32, length uint32) (ptr uintptr) { return } +func ntCreateNamedPipeFile(pipe *syscall.Handle, access uint32, oa *objectAttributes, iosb *ioStatusBlock, share uint32, disposition uint32, options uint32, typ uint32, readMode uint32, completionMode uint32, maxInstances uint32, inboundQuota uint32, outputQuota uint32, timeout *int64) (status ntstatus) { + r0, _, _ := syscall.Syscall15(procNtCreateNamedPipeFile.Addr(), 14, uintptr(unsafe.Pointer(pipe)), uintptr(access), uintptr(unsafe.Pointer(oa)), uintptr(unsafe.Pointer(iosb)), uintptr(share), uintptr(disposition), uintptr(options), uintptr(typ), uintptr(readMode), uintptr(completionMode), uintptr(maxInstances), uintptr(inboundQuota), uintptr(outputQuota), uintptr(unsafe.Pointer(timeout)), 0) + status = ntstatus(r0) + return +} + +func rtlNtStatusToDosError(status ntstatus) (winerr error) { + r0, _, _ := syscall.Syscall(procRtlNtStatusToDosErrorNoTeb.Addr(), 1, uintptr(status), 0, 0) + if r0 != 0 { + winerr = syscall.Errno(r0) + } + return +} + +func rtlDosPathNameToNtPathName(name *uint16, ntName *unicodeString, filePart uintptr, reserved uintptr) (status ntstatus) { + r0, _, _ := syscall.Syscall6(procRtlDosPathNameToNtPathName_U.Addr(), 4, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(ntName)), uintptr(filePart), uintptr(reserved), 0, 0) + status = ntstatus(r0) + return +} + +func rtlDefaultNpAcl(dacl *uintptr) (status ntstatus) { + r0, _, _ := syscall.Syscall(procRtlDefaultNpAcl.Addr(), 1, uintptr(unsafe.Pointer(dacl)), 0, 0) + status = ntstatus(r0) + return +} + func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { var _p0 *uint16 _p0, err = syscall.UTF16PtrFromString(accountName) @@ -526,3 +548,15 @@ func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, p } return } + +func bind(s syscall.Handle, name unsafe.Pointer, namelen int32) (err error) { + r1, _, e1 := syscall.Syscall(procbind.Addr(), 3, uintptr(s), uintptr(name), uintptr(namelen)) + if r1 == socketError { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/hcsshim/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/hcsshim/LICENSE new file mode 100644 index 00000000..49d21669 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/hcsshim/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go new file mode 100644 index 00000000..477fe707 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/hcsshim/osversion/osversion_windows.go @@ -0,0 +1,57 @@ +package osversion + +import ( + "fmt" + + "golang.org/x/sys/windows" +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx +type osVersionInfoEx struct { + OSVersionInfoSize uint32 + MajorVersion uint32 + MinorVersion uint32 + BuildNumber uint32 + PlatformID uint32 + CSDVersion [128]uint16 + ServicePackMajor uint16 + ServicePackMinor uint16 + SuiteMask uint16 + ProductType byte + Reserve byte +} + +// Get gets the operating system version on Windows. +// The calling application must be manifested to get the correct version information. +func Get() OSVersion { + var err error + osv := OSVersion{} + osv.Version, err = windows.GetVersion() + if err != nil { + // GetVersion never fails. + panic(err) + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv +} + +// Build gets the build-number on Windows +// The calling application must be manifested to get the correct version information. +func Build() uint16 { + return Get().Build +} + +func (osv OSVersion) ToString() string { + return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go new file mode 100644 index 00000000..3488cc45 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/Microsoft/hcsshim/osversion/windowsbuilds.go @@ -0,0 +1,27 @@ +package osversion + +const ( + // RS1 (version 1607, codename "Redstone 1") corresponds to Windows Server + // 2016 (ltsc2016) and Windows 10 (Anniversary Update). + RS1 = 14393 + + // RS2 (version 1703, codename "Redstone 2") was a client-only update, and + // corresponds to Windows 10 (Creators Update). + RS2 = 15063 + + // RS3 (version 1709, codename "Redstone 3") corresponds to Windows Server + // 1709 (Semi-Annual Channel (SAC)), and Windows 10 (Fall Creators Update). + RS3 = 16299 + + // RS4 (version 1803, codename "Redstone 4") corresponds to Windows Server + // 1803 (Semi-Annual Channel (SAC)), and Windows 10 (April 2018 Update). + RS4 = 17134 + + // RS5 (version 1809, codename "Redstone 5") corresponds to Windows Server + // 2019 (ltsc2019), and Windows 10 (October 2018 Update). + RS5 = 17763 + + // V19H1 (version 1903) corresponds to Windows Sever 1903 (semi-annual + // channel). + V19H1 = 18362 +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/broker.go index 9c3e5a04..aafd38c7 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/broker.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/Shopify/sarama/broker.go @@ -387,6 +387,23 @@ func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, return nil, err } + // In new consumer group / partition pairs, Microsoft's Azure Event Hubs + // communicates the "no error / new offset" state by omitting the requested + // entries from the OffsetFetchResponse (in contrast to other implementations + // which indicate this by returning an explicit offset of -1). To handle this + // case, we check all entries in the request and add an offset to the response + // table for any that are missing. + for topic, partitions := range request.partitions { + if response.Blocks[topic] == nil { + response.Blocks[topic] = make(map[int32]*OffsetFetchResponseBlock) + } + for _, p := range partitions { + if response.Blocks[topic][p] == nil { + response.Blocks[topic][p] = &OffsetFetchResponseBlock{Offset: -1} + } + } + } + return response, nil } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/Sirupsen/logrus/appveyor.yml b/vendor/github.com/elastic/beats/vendor/github.com/Sirupsen/logrus/appveyor.yml deleted file mode 100644 index 96c2ce15..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/Sirupsen/logrus/appveyor.yml +++ /dev/null @@ -1,14 +0,0 @@ -version: "{build}" -platform: x64 -clone_folder: c:\gopath\src\github.com\sirupsen\logrus -environment: - GOPATH: c:\gopath -branches: - only: - - master -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - go version -build_script: - - go get -t - - go test diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt deleted file mode 100644 index 5f14d116..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/NOTICE.txt +++ /dev/null @@ -1,3 +0,0 @@ -AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. -Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/buildspec-NoGoMods.yml b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/buildspec-NoGoMods.yml deleted file mode 100644 index 79015897..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/buildspec-NoGoMods.yml +++ /dev/null @@ -1,17 +0,0 @@ -version: 0.2 - -phases: - build: - commands: - - echo Build started on `date` - - export GOPATH=/go - - export SDK_CB_ROOT=`pwd` - - export SDK_GO_ROOT=/go/src/github.com/aws/aws-sdk-go-v2 - - mkdir -p /go/src/github.com/aws - - ln -s $SDK_CB_ROOT $SDK_GO_ROOT - - cd $SDK_GO_ROOT - - make get-deps ci-test - - cd $SDK_CB_ROOT - post_build: - commands: - - echo Build completed on `date` diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/buildspec.yml b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/buildspec.yml deleted file mode 100644 index 7f0e60ed..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/buildspec.yml +++ /dev/null @@ -1,12 +0,0 @@ -version: 0.2 - -phases: - build: - commands: - - echo Build started on `date` - - export GOPATH=/go - - export SDK_CODEBUILD_ROOT=`pwd` - - make get-deps-verify ci-test - post_build: - commands: - - echo Build completed on `date` diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_client.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_client.go new file mode 100644 index 00000000..9e7596f4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_client.go @@ -0,0 +1,79 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/signer/v4" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Client provides the API operation methods for making requests to +// IAM. See this package's package overview docs +// for details on the service. +// +// The client's methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type Client struct { + *aws.Client +} + +// Used for custom client initialization logic +var initClient func(*Client) + +// Used for custom request initialization logic +var initRequest func(*Client, *aws.Request) + +const ( + ServiceName = "IAM" // Service's name + ServiceID = "IAM" // Service's identifier + EndpointsID = "iam" // Service's Endpoint identifier +) + +// New creates a new instance of the client from the provided Config. +// +// Example: +// // Create a client from just a config. +// svc := iam.New(myConfig) +func New(config aws.Config) *Client { + svc := &Client{ + Client: aws.NewClient( + config, + aws.Metadata{ + ServiceName: ServiceName, + ServiceID: ServiceID, + EndpointsID: EndpointsID, + SigningName: "iam", + SigningRegion: config.Region, + APIVersion: "2010-05-08", + }, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc) + } + + return svc +} + +// newRequest creates a new request for a client operation and runs any +// custom request initialization. +func (c *Client) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(c, req) + } + + return req +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_doc.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_doc.go new file mode 100644 index 00000000..be2a10ab --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_doc.go @@ -0,0 +1,80 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package iam provides the client and types for making API +// requests to IAM. +// +// AWS Identity and Access Management (IAM) is a web service that you can use +// to manage users and user permissions under your AWS account. This guide provides +// descriptions of IAM actions that you can call programmatically. For general +// information about IAM, see AWS Identity and Access Management (IAM) (http://aws.amazon.com/iam/). +// For the user guide for IAM, see Using IAM (https://docs.aws.amazon.com/IAM/latest/UserGuide/). +// +// AWS provides SDKs that consist of libraries and sample code for various programming +// languages and platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs +// provide a convenient way to create programmatic access to IAM and AWS. For +// example, the SDKs take care of tasks such as cryptographically signing requests +// (see below), managing errors, and retrying requests automatically. For information +// about the AWS SDKs, including how to download and install them, see the Tools +// for Amazon Web Services (http://aws.amazon.com/tools/) page. +// +// We recommend that you use the AWS SDKs to make programmatic API calls to +// IAM. However, you can also use the IAM Query API to make direct calls to +// the IAM web service. To learn more about the IAM Query API, see Making Query +// Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the Using IAM guide. IAM supports GET and POST requests for all actions. +// That is, the API does not require you to use GET for some actions and POST +// for others. However, GET requests are subject to the limitation size of a +// URL. Therefore, for operations that require larger sizes, use a POST request. +// +// Signing Requests +// +// Requests must be signed using an access key ID and a secret access key. We +// strongly recommend that you do not use your AWS account access key ID and +// secret access key for everyday work with IAM. You can use the access key +// ID and secret access key for an IAM user or you can use the AWS Security +// Token Service to generate temporary security credentials and use those to +// sign requests. +// +// To sign requests, we recommend that you use Signature Version 4 (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// If you have an existing application that uses Signature Version 2, you do +// not have to update it to use Signature Version 4. However, some operations +// now require Signature Version 4. The documentation for operations that require +// version 4 indicate this requirement. +// +// Additional Resources +// +// For more information, see the following: +// +// * AWS Security Credentials (https://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html). +// This topic provides general information about the types of credentials +// used for accessing AWS. +// +// * IAM Best Practices (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAMBestPractices.html). +// This topic presents a list of suggestions for using the IAM service to +// help secure your AWS resources. +// +// * Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html). +// This set of topics walk you through the process of signing a request using +// an access key ID and secret access key. +// +// See https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08 for more information on this service. +// +// See iam package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/iam/ +// +// Using the Client +// +// To use IAM with the SDK use the New function to create +// a new service client. With that client you can make API requests to the service. +// These clients are safe to use concurrently. +// +// See the SDK's documentation for more information on how to use the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws.Config documentation for more information on configuring SDK clients. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the IAM client for more information on +// creating client for this service. +// https://docs.aws.amazon.com/sdk-for-go/api/service/iam/#New +package iam diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_enums.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_enums.go new file mode 100644 index 00000000..bc26e802 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_enums.go @@ -0,0 +1,363 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +type ContextKeyTypeEnum string + +// Enum values for ContextKeyTypeEnum +const ( + ContextKeyTypeEnumString ContextKeyTypeEnum = "string" + ContextKeyTypeEnumStringList ContextKeyTypeEnum = "stringList" + ContextKeyTypeEnumNumeric ContextKeyTypeEnum = "numeric" + ContextKeyTypeEnumNumericList ContextKeyTypeEnum = "numericList" + ContextKeyTypeEnumBoolean ContextKeyTypeEnum = "boolean" + ContextKeyTypeEnumBooleanList ContextKeyTypeEnum = "booleanList" + ContextKeyTypeEnumIp ContextKeyTypeEnum = "ip" + ContextKeyTypeEnumIpList ContextKeyTypeEnum = "ipList" + ContextKeyTypeEnumBinary ContextKeyTypeEnum = "binary" + ContextKeyTypeEnumBinaryList ContextKeyTypeEnum = "binaryList" + ContextKeyTypeEnumDate ContextKeyTypeEnum = "date" + ContextKeyTypeEnumDateList ContextKeyTypeEnum = "dateList" +) + +func (enum ContextKeyTypeEnum) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum ContextKeyTypeEnum) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type DeletionTaskStatusType string + +// Enum values for DeletionTaskStatusType +const ( + DeletionTaskStatusTypeSucceeded DeletionTaskStatusType = "SUCCEEDED" + DeletionTaskStatusTypeInProgress DeletionTaskStatusType = "IN_PROGRESS" + DeletionTaskStatusTypeFailed DeletionTaskStatusType = "FAILED" + DeletionTaskStatusTypeNotStarted DeletionTaskStatusType = "NOT_STARTED" +) + +func (enum DeletionTaskStatusType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum DeletionTaskStatusType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type EntityType string + +// Enum values for EntityType +const ( + EntityTypeUser EntityType = "User" + EntityTypeRole EntityType = "Role" + EntityTypeGroup EntityType = "Group" + EntityTypeLocalManagedPolicy EntityType = "LocalManagedPolicy" + EntityTypeAwsmanagedPolicy EntityType = "AWSManagedPolicy" +) + +func (enum EntityType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum EntityType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type PermissionsBoundaryAttachmentType string + +// Enum values for PermissionsBoundaryAttachmentType +const ( + PermissionsBoundaryAttachmentTypePermissionsBoundaryPolicy PermissionsBoundaryAttachmentType = "PermissionsBoundaryPolicy" +) + +func (enum PermissionsBoundaryAttachmentType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum PermissionsBoundaryAttachmentType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type PolicyEvaluationDecisionType string + +// Enum values for PolicyEvaluationDecisionType +const ( + PolicyEvaluationDecisionTypeAllowed PolicyEvaluationDecisionType = "allowed" + PolicyEvaluationDecisionTypeExplicitDeny PolicyEvaluationDecisionType = "explicitDeny" + PolicyEvaluationDecisionTypeImplicitDeny PolicyEvaluationDecisionType = "implicitDeny" +) + +func (enum PolicyEvaluationDecisionType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum PolicyEvaluationDecisionType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type PolicySourceType string + +// Enum values for PolicySourceType +const ( + PolicySourceTypeUser PolicySourceType = "user" + PolicySourceTypeGroup PolicySourceType = "group" + PolicySourceTypeRole PolicySourceType = "role" + PolicySourceTypeAwsManaged PolicySourceType = "aws-managed" + PolicySourceTypeUserManaged PolicySourceType = "user-managed" + PolicySourceTypeResource PolicySourceType = "resource" + PolicySourceTypeNone PolicySourceType = "none" +) + +func (enum PolicySourceType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum PolicySourceType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +// The policy usage type that indicates whether the policy is used as a permissions +// policy or as the permissions boundary for an entity. +// +// For more information about permissions boundaries, see Permissions Boundaries +// for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) +// in the IAM User Guide. +type PolicyUsageType string + +// Enum values for PolicyUsageType +const ( + PolicyUsageTypePermissionsPolicy PolicyUsageType = "PermissionsPolicy" + PolicyUsageTypePermissionsBoundary PolicyUsageType = "PermissionsBoundary" +) + +func (enum PolicyUsageType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum PolicyUsageType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type ReportFormatType string + +// Enum values for ReportFormatType +const ( + ReportFormatTypeTextCsv ReportFormatType = "text/csv" +) + +func (enum ReportFormatType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum ReportFormatType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type ReportStateType string + +// Enum values for ReportStateType +const ( + ReportStateTypeStarted ReportStateType = "STARTED" + ReportStateTypeInprogress ReportStateType = "INPROGRESS" + ReportStateTypeComplete ReportStateType = "COMPLETE" +) + +func (enum ReportStateType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum ReportStateType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type AssignmentStatusType string + +// Enum values for AssignmentStatusType +const ( + AssignmentStatusTypeAssigned AssignmentStatusType = "Assigned" + AssignmentStatusTypeUnassigned AssignmentStatusType = "Unassigned" + AssignmentStatusTypeAny AssignmentStatusType = "Any" +) + +func (enum AssignmentStatusType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum AssignmentStatusType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type EncodingType string + +// Enum values for EncodingType +const ( + EncodingTypeSsh EncodingType = "SSH" + EncodingTypePem EncodingType = "PEM" +) + +func (enum EncodingType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum EncodingType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type GlobalEndpointTokenVersion string + +// Enum values for GlobalEndpointTokenVersion +const ( + GlobalEndpointTokenVersionV1token GlobalEndpointTokenVersion = "v1Token" + GlobalEndpointTokenVersionV2token GlobalEndpointTokenVersion = "v2Token" +) + +func (enum GlobalEndpointTokenVersion) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum GlobalEndpointTokenVersion) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type JobStatusType string + +// Enum values for JobStatusType +const ( + JobStatusTypeInProgress JobStatusType = "IN_PROGRESS" + JobStatusTypeCompleted JobStatusType = "COMPLETED" + JobStatusTypeFailed JobStatusType = "FAILED" +) + +func (enum JobStatusType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum JobStatusType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type PolicyOwnerEntityType string + +// Enum values for PolicyOwnerEntityType +const ( + PolicyOwnerEntityTypeUser PolicyOwnerEntityType = "USER" + PolicyOwnerEntityTypeRole PolicyOwnerEntityType = "ROLE" + PolicyOwnerEntityTypeGroup PolicyOwnerEntityType = "GROUP" +) + +func (enum PolicyOwnerEntityType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum PolicyOwnerEntityType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type PolicyScopeType string + +// Enum values for PolicyScopeType +const ( + PolicyScopeTypeAll PolicyScopeType = "All" + PolicyScopeTypeAws PolicyScopeType = "AWS" + PolicyScopeTypeLocal PolicyScopeType = "Local" +) + +func (enum PolicyScopeType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum PolicyScopeType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type PolicyType string + +// Enum values for PolicyType +const ( + PolicyTypeInline PolicyType = "INLINE" + PolicyTypeManaged PolicyType = "MANAGED" +) + +func (enum PolicyType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum PolicyType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type StatusType string + +// Enum values for StatusType +const ( + StatusTypeActive StatusType = "Active" + StatusTypeInactive StatusType = "Inactive" +) + +func (enum StatusType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum StatusType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} + +type SummaryKeyType string + +// Enum values for SummaryKeyType +const ( + SummaryKeyTypeUsers SummaryKeyType = "Users" + SummaryKeyTypeUsersQuota SummaryKeyType = "UsersQuota" + SummaryKeyTypeGroups SummaryKeyType = "Groups" + SummaryKeyTypeGroupsQuota SummaryKeyType = "GroupsQuota" + SummaryKeyTypeServerCertificates SummaryKeyType = "ServerCertificates" + SummaryKeyTypeServerCertificatesQuota SummaryKeyType = "ServerCertificatesQuota" + SummaryKeyTypeUserPolicySizeQuota SummaryKeyType = "UserPolicySizeQuota" + SummaryKeyTypeGroupPolicySizeQuota SummaryKeyType = "GroupPolicySizeQuota" + SummaryKeyTypeGroupsPerUserQuota SummaryKeyType = "GroupsPerUserQuota" + SummaryKeyTypeSigningCertificatesPerUserQuota SummaryKeyType = "SigningCertificatesPerUserQuota" + SummaryKeyTypeAccessKeysPerUserQuota SummaryKeyType = "AccessKeysPerUserQuota" + SummaryKeyTypeMfadevices SummaryKeyType = "MFADevices" + SummaryKeyTypeMfadevicesInUse SummaryKeyType = "MFADevicesInUse" + SummaryKeyTypeAccountMfaenabled SummaryKeyType = "AccountMFAEnabled" + SummaryKeyTypeAccountAccessKeysPresent SummaryKeyType = "AccountAccessKeysPresent" + SummaryKeyTypeAccountSigningCertificatesPresent SummaryKeyType = "AccountSigningCertificatesPresent" + SummaryKeyTypeAttachedPoliciesPerGroupQuota SummaryKeyType = "AttachedPoliciesPerGroupQuota" + SummaryKeyTypeAttachedPoliciesPerRoleQuota SummaryKeyType = "AttachedPoliciesPerRoleQuota" + SummaryKeyTypeAttachedPoliciesPerUserQuota SummaryKeyType = "AttachedPoliciesPerUserQuota" + SummaryKeyTypePolicies SummaryKeyType = "Policies" + SummaryKeyTypePoliciesQuota SummaryKeyType = "PoliciesQuota" + SummaryKeyTypePolicySizeQuota SummaryKeyType = "PolicySizeQuota" + SummaryKeyTypePolicyVersionsInUse SummaryKeyType = "PolicyVersionsInUse" + SummaryKeyTypePolicyVersionsInUseQuota SummaryKeyType = "PolicyVersionsInUseQuota" + SummaryKeyTypeVersionsPerPolicyQuota SummaryKeyType = "VersionsPerPolicyQuota" + SummaryKeyTypeGlobalEndpointTokenVersion SummaryKeyType = "GlobalEndpointTokenVersion" +) + +func (enum SummaryKeyType) MarshalValue() (string, error) { + return string(enum), nil +} + +func (enum SummaryKeyType) MarshalValueBuf(b []byte) ([]byte, error) { + b = b[0:0] + return append(b, enum...), nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_errors.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_errors.go new file mode 100644 index 00000000..403317b8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_errors.go @@ -0,0 +1,193 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +const ( + + // ErrCodeConcurrentModificationException for service response error code + // "ConcurrentModification". + // + // The request was rejected because multiple requests to change this object + // were submitted simultaneously. Wait a few minutes and submit your request + // again. + ErrCodeConcurrentModificationException = "ConcurrentModification" + + // ErrCodeCredentialReportExpiredException for service response error code + // "ReportExpired". + // + // The request was rejected because the most recent credential report has expired. + // To generate a new credential report, use GenerateCredentialReport. For more + // information about credential report expiration, see Getting Credential Reports + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/credential-reports.html) + // in the IAM User Guide. + ErrCodeCredentialReportExpiredException = "ReportExpired" + + // ErrCodeCredentialReportNotPresentException for service response error code + // "ReportNotPresent". + // + // The request was rejected because the credential report does not exist. To + // generate a credential report, use GenerateCredentialReport. + ErrCodeCredentialReportNotPresentException = "ReportNotPresent" + + // ErrCodeCredentialReportNotReadyException for service response error code + // "ReportInProgress". + // + // The request was rejected because the credential report is still being generated. + ErrCodeCredentialReportNotReadyException = "ReportInProgress" + + // ErrCodeDeleteConflictException for service response error code + // "DeleteConflict". + // + // The request was rejected because it attempted to delete a resource that has + // attached subordinate entities. The error message describes these entities. + ErrCodeDeleteConflictException = "DeleteConflict" + + // ErrCodeDuplicateCertificateException for service response error code + // "DuplicateCertificate". + // + // The request was rejected because the same certificate is associated with + // an IAM user in the account. + ErrCodeDuplicateCertificateException = "DuplicateCertificate" + + // ErrCodeDuplicateSSHPublicKeyException for service response error code + // "DuplicateSSHPublicKey". + // + // The request was rejected because the SSH public key is already associated + // with the specified IAM user. + ErrCodeDuplicateSSHPublicKeyException = "DuplicateSSHPublicKey" + + // ErrCodeEntityAlreadyExistsException for service response error code + // "EntityAlreadyExists". + // + // The request was rejected because it attempted to create a resource that already + // exists. + ErrCodeEntityAlreadyExistsException = "EntityAlreadyExists" + + // ErrCodeEntityTemporarilyUnmodifiableException for service response error code + // "EntityTemporarilyUnmodifiable". + // + // The request was rejected because it referenced an entity that is temporarily + // unmodifiable, such as a user name that was deleted and then recreated. The + // error indicates that the request is likely to succeed if you try again after + // waiting several minutes. The error message describes the entity. + ErrCodeEntityTemporarilyUnmodifiableException = "EntityTemporarilyUnmodifiable" + + // ErrCodeInvalidAuthenticationCodeException for service response error code + // "InvalidAuthenticationCode". + // + // The request was rejected because the authentication code was not recognized. + // The error message describes the specific error. + ErrCodeInvalidAuthenticationCodeException = "InvalidAuthenticationCode" + + // ErrCodeInvalidCertificateException for service response error code + // "InvalidCertificate". + // + // The request was rejected because the certificate is invalid. + ErrCodeInvalidCertificateException = "InvalidCertificate" + + // ErrCodeInvalidInputException for service response error code + // "InvalidInput". + // + // The request was rejected because an invalid or out-of-range value was supplied + // for an input parameter. + ErrCodeInvalidInputException = "InvalidInput" + + // ErrCodeInvalidPublicKeyException for service response error code + // "InvalidPublicKey". + // + // The request was rejected because the public key is malformed or otherwise + // invalid. + ErrCodeInvalidPublicKeyException = "InvalidPublicKey" + + // ErrCodeInvalidUserTypeException for service response error code + // "InvalidUserType". + // + // The request was rejected because the type of user for the transaction was + // incorrect. + ErrCodeInvalidUserTypeException = "InvalidUserType" + + // ErrCodeKeyPairMismatchException for service response error code + // "KeyPairMismatch". + // + // The request was rejected because the public key certificate and the private + // key do not match. + ErrCodeKeyPairMismatchException = "KeyPairMismatch" + + // ErrCodeLimitExceededException for service response error code + // "LimitExceeded". + // + // The request was rejected because it attempted to create resources beyond + // the current AWS account limits. The error message describes the limit exceeded. + ErrCodeLimitExceededException = "LimitExceeded" + + // ErrCodeMalformedCertificateException for service response error code + // "MalformedCertificate". + // + // The request was rejected because the certificate was malformed or expired. + // The error message describes the specific error. + ErrCodeMalformedCertificateException = "MalformedCertificate" + + // ErrCodeMalformedPolicyDocumentException for service response error code + // "MalformedPolicyDocument". + // + // The request was rejected because the policy document was malformed. The error + // message describes the specific error. + ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" + + // ErrCodeNoSuchEntityException for service response error code + // "NoSuchEntity". + // + // The request was rejected because it referenced a resource entity that does + // not exist. The error message describes the resource. + ErrCodeNoSuchEntityException = "NoSuchEntity" + + // ErrCodePasswordPolicyViolationException for service response error code + // "PasswordPolicyViolation". + // + // The request was rejected because the provided password did not meet the requirements + // imposed by the account password policy. + ErrCodePasswordPolicyViolationException = "PasswordPolicyViolation" + + // ErrCodePolicyEvaluationException for service response error code + // "PolicyEvaluation". + // + // The request failed because a provided policy could not be successfully evaluated. + // An additional detailed message indicates the source of the failure. + ErrCodePolicyEvaluationException = "PolicyEvaluation" + + // ErrCodePolicyNotAttachableException for service response error code + // "PolicyNotAttachable". + // + // The request failed because AWS service role policies can only be attached + // to the service-linked role for that service. + ErrCodePolicyNotAttachableException = "PolicyNotAttachable" + + // ErrCodeServiceFailureException for service response error code + // "ServiceFailure". + // + // The request processing has failed because of an unknown error, exception + // or failure. + ErrCodeServiceFailureException = "ServiceFailure" + + // ErrCodeServiceNotSupportedException for service response error code + // "NotSupportedService". + // + // The specified service does not support service-specific credentials. + ErrCodeServiceNotSupportedException = "NotSupportedService" + + // ErrCodeUnmodifiableEntityException for service response error code + // "UnmodifiableEntity". + // + // The request was rejected because only the service that depends on the service-linked + // role can modify or delete the role on your behalf. The error message includes + // the name of the service that depends on this service-linked role. You must + // request the change through that service. + ErrCodeUnmodifiableEntityException = "UnmodifiableEntity" + + // ErrCodeUnrecognizedPublicKeyEncodingException for service response error code + // "UnrecognizedPublicKeyEncoding". + // + // The request was rejected because the public key encoding format is unsupported + // or unrecognized. + ErrCodeUnrecognizedPublicKeyEncodingException = "UnrecognizedPublicKeyEncoding" +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AddClientIDToOpenIDConnectProvider.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AddClientIDToOpenIDConnectProvider.go new file mode 100644 index 00000000..820344a7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AddClientIDToOpenIDConnectProvider.go @@ -0,0 +1,143 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AddClientIDToOpenIDConnectProviderRequest +type AddClientIDToOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // The client ID (also known as audience) to add to the IAM OpenID Connect provider + // resource. + // + // ClientID is a required field + ClientID *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM OpenID Connect (OIDC) provider + // resource to add the client ID to. You can get a list of OIDC provider ARNs + // by using the ListOpenIDConnectProviders operation. + // + // OpenIDConnectProviderArn is a required field + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddClientIDToOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddClientIDToOpenIDConnectProviderInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "AddClientIDToOpenIDConnectProviderInput"} + + if s.ClientID == nil { + invalidParams.Add(aws.NewErrParamRequired("ClientID")) + } + if s.ClientID != nil && len(*s.ClientID) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ClientID", 1)) + } + + if s.OpenIDConnectProviderArn == nil { + invalidParams.Add(aws.NewErrParamRequired("OpenIDConnectProviderArn")) + } + if s.OpenIDConnectProviderArn != nil && len(*s.OpenIDConnectProviderArn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("OpenIDConnectProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AddClientIDToOpenIDConnectProviderOutput +type AddClientIDToOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddClientIDToOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +const opAddClientIDToOpenIDConnectProvider = "AddClientIDToOpenIDConnectProvider" + +// AddClientIDToOpenIDConnectProviderRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Adds a new client ID (also known as audience) to the list of client IDs already +// registered for the specified IAM OpenID Connect (OIDC) provider resource. +// +// This operation is idempotent; it does not fail or return an error if you +// add an existing client ID to the provider. +// +// // Example sending a request using AddClientIDToOpenIDConnectProviderRequest. +// req := client.AddClientIDToOpenIDConnectProviderRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AddClientIDToOpenIDConnectProvider +func (c *Client) AddClientIDToOpenIDConnectProviderRequest(input *AddClientIDToOpenIDConnectProviderInput) AddClientIDToOpenIDConnectProviderRequest { + op := &aws.Operation{ + Name: opAddClientIDToOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddClientIDToOpenIDConnectProviderInput{} + } + + req := c.newRequest(op, input, &AddClientIDToOpenIDConnectProviderOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return AddClientIDToOpenIDConnectProviderRequest{Request: req, Input: input, Copy: c.AddClientIDToOpenIDConnectProviderRequest} +} + +// AddClientIDToOpenIDConnectProviderRequest is the request type for the +// AddClientIDToOpenIDConnectProvider API operation. +type AddClientIDToOpenIDConnectProviderRequest struct { + *aws.Request + Input *AddClientIDToOpenIDConnectProviderInput + Copy func(*AddClientIDToOpenIDConnectProviderInput) AddClientIDToOpenIDConnectProviderRequest +} + +// Send marshals and sends the AddClientIDToOpenIDConnectProvider API request. +func (r AddClientIDToOpenIDConnectProviderRequest) Send(ctx context.Context) (*AddClientIDToOpenIDConnectProviderResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &AddClientIDToOpenIDConnectProviderResponse{ + AddClientIDToOpenIDConnectProviderOutput: r.Request.Data.(*AddClientIDToOpenIDConnectProviderOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// AddClientIDToOpenIDConnectProviderResponse is the response type for the +// AddClientIDToOpenIDConnectProvider API operation. +type AddClientIDToOpenIDConnectProviderResponse struct { + *AddClientIDToOpenIDConnectProviderOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// AddClientIDToOpenIDConnectProvider request. +func (r *AddClientIDToOpenIDConnectProviderResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AddRoleToInstanceProfile.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AddRoleToInstanceProfile.go new file mode 100644 index 00000000..8376afcf --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AddRoleToInstanceProfile.go @@ -0,0 +1,158 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AddRoleToInstanceProfileRequest +type AddRoleToInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to update. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // InstanceProfileName is a required field + InstanceProfileName *string `min:"1" type:"string" required:"true"` + + // The name of the role to add. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddRoleToInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddRoleToInstanceProfileInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "AddRoleToInstanceProfileInput"} + + if s.InstanceProfileName == nil { + invalidParams.Add(aws.NewErrParamRequired("InstanceProfileName")) + } + if s.InstanceProfileName != nil && len(*s.InstanceProfileName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("InstanceProfileName", 1)) + } + + if s.RoleName == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AddRoleToInstanceProfileOutput +type AddRoleToInstanceProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddRoleToInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +const opAddRoleToInstanceProfile = "AddRoleToInstanceProfile" + +// AddRoleToInstanceProfileRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Adds the specified IAM role to the specified instance profile. An instance +// profile can contain only one role, and this limit cannot be increased. You +// can remove the existing role and then add a different role to an instance +// profile. You must then wait for the change to appear across all of AWS because +// of eventual consistency (https://en.wikipedia.org/wiki/Eventual_consistency). +// To force the change, you must disassociate the instance profile (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DisassociateIamInstanceProfile.html) +// and then associate the instance profile (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_AssociateIamInstanceProfile.html), +// or you can stop your instance and then restart it. +// +// The caller of this API must be granted the PassRole permission on the IAM +// role by a permissions policy. +// +// For more information about roles, go to Working with Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// For more information about instance profiles, go to About Instance Profiles +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// +// // Example sending a request using AddRoleToInstanceProfileRequest. +// req := client.AddRoleToInstanceProfileRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AddRoleToInstanceProfile +func (c *Client) AddRoleToInstanceProfileRequest(input *AddRoleToInstanceProfileInput) AddRoleToInstanceProfileRequest { + op := &aws.Operation{ + Name: opAddRoleToInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddRoleToInstanceProfileInput{} + } + + req := c.newRequest(op, input, &AddRoleToInstanceProfileOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return AddRoleToInstanceProfileRequest{Request: req, Input: input, Copy: c.AddRoleToInstanceProfileRequest} +} + +// AddRoleToInstanceProfileRequest is the request type for the +// AddRoleToInstanceProfile API operation. +type AddRoleToInstanceProfileRequest struct { + *aws.Request + Input *AddRoleToInstanceProfileInput + Copy func(*AddRoleToInstanceProfileInput) AddRoleToInstanceProfileRequest +} + +// Send marshals and sends the AddRoleToInstanceProfile API request. +func (r AddRoleToInstanceProfileRequest) Send(ctx context.Context) (*AddRoleToInstanceProfileResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &AddRoleToInstanceProfileResponse{ + AddRoleToInstanceProfileOutput: r.Request.Data.(*AddRoleToInstanceProfileOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// AddRoleToInstanceProfileResponse is the response type for the +// AddRoleToInstanceProfile API operation. +type AddRoleToInstanceProfileResponse struct { + *AddRoleToInstanceProfileOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// AddRoleToInstanceProfile request. +func (r *AddRoleToInstanceProfileResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AddUserToGroup.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AddUserToGroup.go new file mode 100644 index 00000000..64f12801 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AddUserToGroup.go @@ -0,0 +1,144 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AddUserToGroupRequest +type AddUserToGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group to update. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // The name of the user to add. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AddUserToGroupInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AddUserToGroupInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "AddUserToGroupInput"} + + if s.GroupName == nil { + invalidParams.Add(aws.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("GroupName", 1)) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AddUserToGroupOutput +type AddUserToGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AddUserToGroupOutput) String() string { + return awsutil.Prettify(s) +} + +const opAddUserToGroup = "AddUserToGroup" + +// AddUserToGroupRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Adds the specified user to the specified group. +// +// // Example sending a request using AddUserToGroupRequest. +// req := client.AddUserToGroupRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AddUserToGroup +func (c *Client) AddUserToGroupRequest(input *AddUserToGroupInput) AddUserToGroupRequest { + op := &aws.Operation{ + Name: opAddUserToGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AddUserToGroupInput{} + } + + req := c.newRequest(op, input, &AddUserToGroupOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return AddUserToGroupRequest{Request: req, Input: input, Copy: c.AddUserToGroupRequest} +} + +// AddUserToGroupRequest is the request type for the +// AddUserToGroup API operation. +type AddUserToGroupRequest struct { + *aws.Request + Input *AddUserToGroupInput + Copy func(*AddUserToGroupInput) AddUserToGroupRequest +} + +// Send marshals and sends the AddUserToGroup API request. +func (r AddUserToGroupRequest) Send(ctx context.Context) (*AddUserToGroupResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &AddUserToGroupResponse{ + AddUserToGroupOutput: r.Request.Data.(*AddUserToGroupOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// AddUserToGroupResponse is the response type for the +// AddUserToGroup API operation. +type AddUserToGroupResponse struct { + *AddUserToGroupOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// AddUserToGroup request. +func (r *AddUserToGroupResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AttachGroupPolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AttachGroupPolicy.go new file mode 100644 index 00000000..6be907ab --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AttachGroupPolicy.go @@ -0,0 +1,151 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AttachGroupPolicyRequest +type AttachGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) of the group to attach the policy to. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM policy you want to attach. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachGroupPolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "AttachGroupPolicyInput"} + + if s.GroupName == nil { + invalidParams.Add(aws.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("GroupName", 1)) + } + + if s.PolicyArn == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AttachGroupPolicyOutput +type AttachGroupPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opAttachGroupPolicy = "AttachGroupPolicy" + +// AttachGroupPolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Attaches the specified managed policy to the specified IAM group. +// +// You use this API to attach a managed policy to a group. To embed an inline +// policy in a group, use PutGroupPolicy. +// +// For more information about policies, see Managed Policies and Inline Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// // Example sending a request using AttachGroupPolicyRequest. +// req := client.AttachGroupPolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AttachGroupPolicy +func (c *Client) AttachGroupPolicyRequest(input *AttachGroupPolicyInput) AttachGroupPolicyRequest { + op := &aws.Operation{ + Name: opAttachGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachGroupPolicyInput{} + } + + req := c.newRequest(op, input, &AttachGroupPolicyOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return AttachGroupPolicyRequest{Request: req, Input: input, Copy: c.AttachGroupPolicyRequest} +} + +// AttachGroupPolicyRequest is the request type for the +// AttachGroupPolicy API operation. +type AttachGroupPolicyRequest struct { + *aws.Request + Input *AttachGroupPolicyInput + Copy func(*AttachGroupPolicyInput) AttachGroupPolicyRequest +} + +// Send marshals and sends the AttachGroupPolicy API request. +func (r AttachGroupPolicyRequest) Send(ctx context.Context) (*AttachGroupPolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &AttachGroupPolicyResponse{ + AttachGroupPolicyOutput: r.Request.Data.(*AttachGroupPolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// AttachGroupPolicyResponse is the response type for the +// AttachGroupPolicy API operation. +type AttachGroupPolicyResponse struct { + *AttachGroupPolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// AttachGroupPolicy request. +func (r *AttachGroupPolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AttachRolePolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AttachRolePolicy.go new file mode 100644 index 00000000..2acd80ee --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AttachRolePolicy.go @@ -0,0 +1,155 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AttachRolePolicyRequest +type AttachRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM policy you want to attach. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The name (friendly name, not ARN) of the role to attach the policy to. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachRolePolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "AttachRolePolicyInput"} + + if s.PolicyArn == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyArn", 20)) + } + + if s.RoleName == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AttachRolePolicyOutput +type AttachRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opAttachRolePolicy = "AttachRolePolicy" + +// AttachRolePolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Attaches the specified managed policy to the specified IAM role. When you +// attach a managed policy to a role, the managed policy becomes part of the +// role's permission (access) policy. +// +// You cannot use a managed policy as the role's trust policy. The role's trust +// policy is created at the same time as the role, using CreateRole. You can +// update a role's trust policy using UpdateAssumeRolePolicy. +// +// Use this API to attach a managed policy to a role. To embed an inline policy +// in a role, use PutRolePolicy. For more information about policies, see Managed +// Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// // Example sending a request using AttachRolePolicyRequest. +// req := client.AttachRolePolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AttachRolePolicy +func (c *Client) AttachRolePolicyRequest(input *AttachRolePolicyInput) AttachRolePolicyRequest { + op := &aws.Operation{ + Name: opAttachRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachRolePolicyInput{} + } + + req := c.newRequest(op, input, &AttachRolePolicyOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return AttachRolePolicyRequest{Request: req, Input: input, Copy: c.AttachRolePolicyRequest} +} + +// AttachRolePolicyRequest is the request type for the +// AttachRolePolicy API operation. +type AttachRolePolicyRequest struct { + *aws.Request + Input *AttachRolePolicyInput + Copy func(*AttachRolePolicyInput) AttachRolePolicyRequest +} + +// Send marshals and sends the AttachRolePolicy API request. +func (r AttachRolePolicyRequest) Send(ctx context.Context) (*AttachRolePolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &AttachRolePolicyResponse{ + AttachRolePolicyOutput: r.Request.Data.(*AttachRolePolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// AttachRolePolicyResponse is the response type for the +// AttachRolePolicy API operation. +type AttachRolePolicyResponse struct { + *AttachRolePolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// AttachRolePolicy request. +func (r *AttachRolePolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AttachUserPolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AttachUserPolicy.go new file mode 100644 index 00000000..7dd457bd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_AttachUserPolicy.go @@ -0,0 +1,151 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AttachUserPolicyRequest +type AttachUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM policy you want to attach. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The name (friendly name, not ARN) of the IAM user to attach the policy to. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AttachUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AttachUserPolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "AttachUserPolicyInput"} + + if s.PolicyArn == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyArn", 20)) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AttachUserPolicyOutput +type AttachUserPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s AttachUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opAttachUserPolicy = "AttachUserPolicy" + +// AttachUserPolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Attaches the specified managed policy to the specified user. +// +// You use this API to attach a managed policy to a user. To embed an inline +// policy in a user, use PutUserPolicy. +// +// For more information about policies, see Managed Policies and Inline Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// // Example sending a request using AttachUserPolicyRequest. +// req := client.AttachUserPolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AttachUserPolicy +func (c *Client) AttachUserPolicyRequest(input *AttachUserPolicyInput) AttachUserPolicyRequest { + op := &aws.Operation{ + Name: opAttachUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AttachUserPolicyInput{} + } + + req := c.newRequest(op, input, &AttachUserPolicyOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return AttachUserPolicyRequest{Request: req, Input: input, Copy: c.AttachUserPolicyRequest} +} + +// AttachUserPolicyRequest is the request type for the +// AttachUserPolicy API operation. +type AttachUserPolicyRequest struct { + *aws.Request + Input *AttachUserPolicyInput + Copy func(*AttachUserPolicyInput) AttachUserPolicyRequest +} + +// Send marshals and sends the AttachUserPolicy API request. +func (r AttachUserPolicyRequest) Send(ctx context.Context) (*AttachUserPolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &AttachUserPolicyResponse{ + AttachUserPolicyOutput: r.Request.Data.(*AttachUserPolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// AttachUserPolicyResponse is the response type for the +// AttachUserPolicy API operation. +type AttachUserPolicyResponse struct { + *AttachUserPolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// AttachUserPolicy request. +func (r *AttachUserPolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ChangePassword.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ChangePassword.go new file mode 100644 index 00000000..74f62b2c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ChangePassword.go @@ -0,0 +1,151 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ChangePasswordRequest +type ChangePasswordInput struct { + _ struct{} `type:"structure"` + + // The new password. The new password must conform to the AWS account's password + // policy, if one exists. + // + // The regex pattern (http://wikipedia.org/wiki/regex) that is used to validate + // this parameter is a string of characters. That string can include almost + // any printable ASCII character from the space (\u0020) through the end of + // the ASCII character range (\u00FF). You can also include the tab (\u0009), + // line feed (\u000A), and carriage return (\u000D) characters. Any of these + // characters are valid in a password. However, many tools, such as the AWS + // Management Console, might restrict the ability to type certain characters + // because they have special meaning within that tool. + // + // NewPassword is a required field + NewPassword *string `min:"1" type:"string" required:"true"` + + // The IAM user's current password. + // + // OldPassword is a required field + OldPassword *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ChangePasswordInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ChangePasswordInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ChangePasswordInput"} + + if s.NewPassword == nil { + invalidParams.Add(aws.NewErrParamRequired("NewPassword")) + } + if s.NewPassword != nil && len(*s.NewPassword) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("NewPassword", 1)) + } + + if s.OldPassword == nil { + invalidParams.Add(aws.NewErrParamRequired("OldPassword")) + } + if s.OldPassword != nil && len(*s.OldPassword) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("OldPassword", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ChangePasswordOutput +type ChangePasswordOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ChangePasswordOutput) String() string { + return awsutil.Prettify(s) +} + +const opChangePassword = "ChangePassword" + +// ChangePasswordRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Changes the password of the IAM user who is calling this operation. The AWS +// account root user password is not affected by this operation. +// +// To change the password for a different user, see UpdateLoginProfile. For +// more information about modifying passwords, see Managing Passwords (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingLogins.html) +// in the IAM User Guide. +// +// // Example sending a request using ChangePasswordRequest. +// req := client.ChangePasswordRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ChangePassword +func (c *Client) ChangePasswordRequest(input *ChangePasswordInput) ChangePasswordRequest { + op := &aws.Operation{ + Name: opChangePassword, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ChangePasswordInput{} + } + + req := c.newRequest(op, input, &ChangePasswordOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return ChangePasswordRequest{Request: req, Input: input, Copy: c.ChangePasswordRequest} +} + +// ChangePasswordRequest is the request type for the +// ChangePassword API operation. +type ChangePasswordRequest struct { + *aws.Request + Input *ChangePasswordInput + Copy func(*ChangePasswordInput) ChangePasswordRequest +} + +// Send marshals and sends the ChangePassword API request. +func (r ChangePasswordRequest) Send(ctx context.Context) (*ChangePasswordResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ChangePasswordResponse{ + ChangePasswordOutput: r.Request.Data.(*ChangePasswordOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ChangePasswordResponse is the response type for the +// ChangePassword API operation. +type ChangePasswordResponse struct { + *ChangePasswordOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ChangePassword request. +func (r *ChangePasswordResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateAccessKey.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateAccessKey.go new file mode 100644 index 00000000..f55f78c2 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateAccessKey.go @@ -0,0 +1,141 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateAccessKeyRequest +type CreateAccessKeyInput struct { + _ struct{} `type:"structure"` + + // The name of the IAM user that the new key will belong to. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateAccessKeyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAccessKeyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateAccessKeyInput"} + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful CreateAccessKey request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateAccessKeyResponse +type CreateAccessKeyOutput struct { + _ struct{} `type:"structure"` + + // A structure with details about the access key. + // + // AccessKey is a required field + AccessKey *AccessKey `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateAccessKeyOutput) String() string { + return awsutil.Prettify(s) +} + +const opCreateAccessKey = "CreateAccessKey" + +// CreateAccessKeyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Creates a new AWS secret access key and corresponding AWS access key ID for +// the specified user. The default status for new keys is Active. +// +// If you do not specify a user name, IAM determines the user name implicitly +// based on the AWS access key ID signing the request. This operation works +// for access keys under the AWS account. Consequently, you can use this operation +// to manage AWS account root user credentials. This is true even if the AWS +// account has no associated users. +// +// For information about limits on the number of keys you can create, see Limitations +// on IAM Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// To ensure the security of your AWS account, the secret access key is accessible +// only during key and user creation. You must save the key (for example, in +// a text file) if you want to be able to access it again. If a secret key is +// lost, you can delete the access keys for the associated user and then create +// new keys. +// +// // Example sending a request using CreateAccessKeyRequest. +// req := client.CreateAccessKeyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateAccessKey +func (c *Client) CreateAccessKeyRequest(input *CreateAccessKeyInput) CreateAccessKeyRequest { + op := &aws.Operation{ + Name: opCreateAccessKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAccessKeyInput{} + } + + req := c.newRequest(op, input, &CreateAccessKeyOutput{}) + return CreateAccessKeyRequest{Request: req, Input: input, Copy: c.CreateAccessKeyRequest} +} + +// CreateAccessKeyRequest is the request type for the +// CreateAccessKey API operation. +type CreateAccessKeyRequest struct { + *aws.Request + Input *CreateAccessKeyInput + Copy func(*CreateAccessKeyInput) CreateAccessKeyRequest +} + +// Send marshals and sends the CreateAccessKey API request. +func (r CreateAccessKeyRequest) Send(ctx context.Context) (*CreateAccessKeyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateAccessKeyResponse{ + CreateAccessKeyOutput: r.Request.Data.(*CreateAccessKeyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateAccessKeyResponse is the response type for the +// CreateAccessKey API operation. +type CreateAccessKeyResponse struct { + *CreateAccessKeyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateAccessKey request. +func (r *CreateAccessKeyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateAccountAlias.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateAccountAlias.go new file mode 100644 index 00000000..cbf4352b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateAccountAlias.go @@ -0,0 +1,131 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateAccountAliasRequest +type CreateAccountAliasInput struct { + _ struct{} `type:"structure"` + + // The account alias to create. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of lowercase letters, digits, and dashes. + // You cannot start or finish with a dash, nor can you have two dashes in a + // row. + // + // AccountAlias is a required field + AccountAlias *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateAccountAliasInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateAccountAliasInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateAccountAliasInput"} + + if s.AccountAlias == nil { + invalidParams.Add(aws.NewErrParamRequired("AccountAlias")) + } + if s.AccountAlias != nil && len(*s.AccountAlias) < 3 { + invalidParams.Add(aws.NewErrParamMinLen("AccountAlias", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateAccountAliasOutput +type CreateAccountAliasOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s CreateAccountAliasOutput) String() string { + return awsutil.Prettify(s) +} + +const opCreateAccountAlias = "CreateAccountAlias" + +// CreateAccountAliasRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Creates an alias for your AWS account. For information about using an AWS +// account alias, see Using an Alias for Your AWS Account ID (https://docs.aws.amazon.com/IAM/latest/UserGuide/AccountAlias.html) +// in the IAM User Guide. +// +// // Example sending a request using CreateAccountAliasRequest. +// req := client.CreateAccountAliasRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateAccountAlias +func (c *Client) CreateAccountAliasRequest(input *CreateAccountAliasInput) CreateAccountAliasRequest { + op := &aws.Operation{ + Name: opCreateAccountAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateAccountAliasInput{} + } + + req := c.newRequest(op, input, &CreateAccountAliasOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return CreateAccountAliasRequest{Request: req, Input: input, Copy: c.CreateAccountAliasRequest} +} + +// CreateAccountAliasRequest is the request type for the +// CreateAccountAlias API operation. +type CreateAccountAliasRequest struct { + *aws.Request + Input *CreateAccountAliasInput + Copy func(*CreateAccountAliasInput) CreateAccountAliasRequest +} + +// Send marshals and sends the CreateAccountAlias API request. +func (r CreateAccountAliasRequest) Send(ctx context.Context) (*CreateAccountAliasResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateAccountAliasResponse{ + CreateAccountAliasOutput: r.Request.Data.(*CreateAccountAliasOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateAccountAliasResponse is the response type for the +// CreateAccountAlias API operation. +type CreateAccountAliasResponse struct { + *CreateAccountAliasOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateAccountAlias request. +func (r *CreateAccountAliasResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateGroup.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateGroup.go new file mode 100644 index 00000000..ff372628 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateGroup.go @@ -0,0 +1,152 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateGroupRequest +type CreateGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group to create. Do not include the path in this value. + // + // IAM user, group, role, and policy names must be unique within the account. + // Names are not distinguished by case. For example, you cannot create resources + // named both "MyResource" and "myresource". + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // The path to the group. For more information about paths, see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + Path *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateGroupInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateGroupInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateGroupInput"} + + if s.GroupName == nil { + invalidParams.Add(aws.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("GroupName", 1)) + } + if s.Path != nil && len(*s.Path) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Path", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful CreateGroup request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateGroupResponse +type CreateGroupOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the new group. + // + // Group is a required field + Group *Group `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateGroupOutput) String() string { + return awsutil.Prettify(s) +} + +const opCreateGroup = "CreateGroup" + +// CreateGroupRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Creates a new group. +// +// For information about the number of groups you can create, see Limitations +// on IAM Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// // Example sending a request using CreateGroupRequest. +// req := client.CreateGroupRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateGroup +func (c *Client) CreateGroupRequest(input *CreateGroupInput) CreateGroupRequest { + op := &aws.Operation{ + Name: opCreateGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateGroupInput{} + } + + req := c.newRequest(op, input, &CreateGroupOutput{}) + return CreateGroupRequest{Request: req, Input: input, Copy: c.CreateGroupRequest} +} + +// CreateGroupRequest is the request type for the +// CreateGroup API operation. +type CreateGroupRequest struct { + *aws.Request + Input *CreateGroupInput + Copy func(*CreateGroupInput) CreateGroupRequest +} + +// Send marshals and sends the CreateGroup API request. +func (r CreateGroupRequest) Send(ctx context.Context) (*CreateGroupResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateGroupResponse{ + CreateGroupOutput: r.Request.Data.(*CreateGroupOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateGroupResponse is the response type for the +// CreateGroup API operation. +type CreateGroupResponse struct { + *CreateGroupOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateGroup request. +func (r *CreateGroupResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateInstanceProfile.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateInstanceProfile.go new file mode 100644 index 00000000..5451c64e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateInstanceProfile.go @@ -0,0 +1,153 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateInstanceProfileRequest +type CreateInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to create. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // InstanceProfileName is a required field + InstanceProfileName *string `min:"1" type:"string" required:"true"` + + // The path to the instance profile. For more information about paths, see IAM + // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + Path *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s CreateInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateInstanceProfileInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateInstanceProfileInput"} + + if s.InstanceProfileName == nil { + invalidParams.Add(aws.NewErrParamRequired("InstanceProfileName")) + } + if s.InstanceProfileName != nil && len(*s.InstanceProfileName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("InstanceProfileName", 1)) + } + if s.Path != nil && len(*s.Path) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Path", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful CreateInstanceProfile request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateInstanceProfileResponse +type CreateInstanceProfileOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the new instance profile. + // + // InstanceProfile is a required field + InstanceProfile *InstanceProfile `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +const opCreateInstanceProfile = "CreateInstanceProfile" + +// CreateInstanceProfileRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Creates a new instance profile. For information about instance profiles, +// go to About Instance Profiles (https://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// +// For information about the number of instance profiles you can create, see +// Limitations on IAM Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// // Example sending a request using CreateInstanceProfileRequest. +// req := client.CreateInstanceProfileRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateInstanceProfile +func (c *Client) CreateInstanceProfileRequest(input *CreateInstanceProfileInput) CreateInstanceProfileRequest { + op := &aws.Operation{ + Name: opCreateInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateInstanceProfileInput{} + } + + req := c.newRequest(op, input, &CreateInstanceProfileOutput{}) + return CreateInstanceProfileRequest{Request: req, Input: input, Copy: c.CreateInstanceProfileRequest} +} + +// CreateInstanceProfileRequest is the request type for the +// CreateInstanceProfile API operation. +type CreateInstanceProfileRequest struct { + *aws.Request + Input *CreateInstanceProfileInput + Copy func(*CreateInstanceProfileInput) CreateInstanceProfileRequest +} + +// Send marshals and sends the CreateInstanceProfile API request. +func (r CreateInstanceProfileRequest) Send(ctx context.Context) (*CreateInstanceProfileResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateInstanceProfileResponse{ + CreateInstanceProfileOutput: r.Request.Data.(*CreateInstanceProfileOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateInstanceProfileResponse is the response type for the +// CreateInstanceProfile API operation. +type CreateInstanceProfileResponse struct { + *CreateInstanceProfileOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateInstanceProfile request. +func (r *CreateInstanceProfileResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateLoginProfile.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateLoginProfile.go new file mode 100644 index 00000000..4e33cc94 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateLoginProfile.go @@ -0,0 +1,158 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateLoginProfileRequest +type CreateLoginProfileInput struct { + _ struct{} `type:"structure"` + + // The new password for the user. + // + // The regex pattern (http://wikipedia.org/wiki/regex) that is used to validate + // this parameter is a string of characters. That string can include almost + // any printable ASCII character from the space (\u0020) through the end of + // the ASCII character range (\u00FF). You can also include the tab (\u0009), + // line feed (\u000A), and carriage return (\u000D) characters. Any of these + // characters are valid in a password. However, many tools, such as the AWS + // Management Console, might restrict the ability to type certain characters + // because they have special meaning within that tool. + // + // Password is a required field + Password *string `min:"1" type:"string" required:"true"` + + // Specifies whether the user is required to set a new password on next sign-in. + PasswordResetRequired *bool `type:"boolean"` + + // The name of the IAM user to create a password for. The user must already + // exist. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateLoginProfileInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateLoginProfileInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateLoginProfileInput"} + + if s.Password == nil { + invalidParams.Add(aws.NewErrParamRequired("Password")) + } + if s.Password != nil && len(*s.Password) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Password", 1)) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful CreateLoginProfile request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateLoginProfileResponse +type CreateLoginProfileOutput struct { + _ struct{} `type:"structure"` + + // A structure containing the user name and password create date. + // + // LoginProfile is a required field + LoginProfile *LoginProfile `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateLoginProfileOutput) String() string { + return awsutil.Prettify(s) +} + +const opCreateLoginProfile = "CreateLoginProfile" + +// CreateLoginProfileRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Creates a password for the specified user, giving the user the ability to +// access AWS services through the AWS Management Console. For more information +// about managing passwords, see Managing Passwords (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingLogins.html) +// in the IAM User Guide. +// +// // Example sending a request using CreateLoginProfileRequest. +// req := client.CreateLoginProfileRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateLoginProfile +func (c *Client) CreateLoginProfileRequest(input *CreateLoginProfileInput) CreateLoginProfileRequest { + op := &aws.Operation{ + Name: opCreateLoginProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateLoginProfileInput{} + } + + req := c.newRequest(op, input, &CreateLoginProfileOutput{}) + return CreateLoginProfileRequest{Request: req, Input: input, Copy: c.CreateLoginProfileRequest} +} + +// CreateLoginProfileRequest is the request type for the +// CreateLoginProfile API operation. +type CreateLoginProfileRequest struct { + *aws.Request + Input *CreateLoginProfileInput + Copy func(*CreateLoginProfileInput) CreateLoginProfileRequest +} + +// Send marshals and sends the CreateLoginProfile API request. +func (r CreateLoginProfileRequest) Send(ctx context.Context) (*CreateLoginProfileResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateLoginProfileResponse{ + CreateLoginProfileOutput: r.Request.Data.(*CreateLoginProfileOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateLoginProfileResponse is the response type for the +// CreateLoginProfile API operation. +type CreateLoginProfileResponse struct { + *CreateLoginProfileOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateLoginProfile request. +func (r *CreateLoginProfileResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateOpenIDConnectProvider.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateOpenIDConnectProvider.go new file mode 100644 index 00000000..28be62da --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateOpenIDConnectProvider.go @@ -0,0 +1,194 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateOpenIDConnectProviderRequest +type CreateOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // A list of client IDs (also known as audiences). When a mobile or web app + // registers with an OpenID Connect provider, they establish a value that identifies + // the application. (This is the value that's sent as the client_id parameter + // on OAuth requests.) + // + // You can register multiple client IDs with the same provider. For example, + // you might have multiple applications that use the same OIDC provider. You + // cannot register more than 100 client IDs with a single IAM OIDC provider. + // + // There is no defined format for a client ID. The CreateOpenIDConnectProviderRequest + // operation accepts client IDs up to 255 characters long. + ClientIDList []string `type:"list"` + + // A list of server certificate thumbprints for the OpenID Connect (OIDC) identity + // provider's server certificates. Typically this list includes only one entry. + // However, IAM lets you have up to five thumbprints for an OIDC provider. This + // lets you maintain multiple thumbprints if the identity provider is rotating + // certificates. + // + // The server certificate thumbprint is the hex-encoded SHA-1 hash value of + // the X.509 certificate used by the domain where the OpenID Connect provider + // makes its keys available. It is always a 40-character string. + // + // You must provide at least one thumbprint when creating an IAM OIDC provider. + // For example, assume that the OIDC provider is server.example.com and the + // provider stores its keys at https://keys.server.example.com/openid-connect. + // In that case, the thumbprint string would be the hex-encoded SHA-1 hash value + // of the certificate used by https://keys.server.example.com. + // + // For more information about obtaining the OIDC provider's thumbprint, see + // Obtaining the Thumbprint for an OpenID Connect Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/identity-providers-oidc-obtain-thumbprint.html) + // in the IAM User Guide. + // + // ThumbprintList is a required field + ThumbprintList []string `type:"list" required:"true"` + + // The URL of the identity provider. The URL must begin with https:// and should + // correspond to the iss claim in the provider's OpenID Connect ID tokens. Per + // the OIDC standard, path components are allowed but query parameters are not. + // Typically the URL consists of only a hostname, like https://server.example.org + // or https://example.com. + // + // You cannot register the same provider multiple times in a single AWS account. + // If you try to submit a URL that has already been used for an OpenID Connect + // provider in the AWS account, you will get an error. + // + // Url is a required field + Url *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateOpenIDConnectProviderInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateOpenIDConnectProviderInput"} + + if s.ThumbprintList == nil { + invalidParams.Add(aws.NewErrParamRequired("ThumbprintList")) + } + + if s.Url == nil { + invalidParams.Add(aws.NewErrParamRequired("Url")) + } + if s.Url != nil && len(*s.Url) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Url", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful CreateOpenIDConnectProvider request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateOpenIDConnectProviderResponse +type CreateOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the new IAM OpenID Connect provider that + // is created. For more information, see OpenIDConnectProviderListEntry. + OpenIDConnectProviderArn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s CreateOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +const opCreateOpenIDConnectProvider = "CreateOpenIDConnectProvider" + +// CreateOpenIDConnectProviderRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Creates an IAM entity to describe an identity provider (IdP) that supports +// OpenID Connect (OIDC) (http://openid.net/connect/). +// +// The OIDC provider that you create with this operation can be used as a principal +// in a role's trust policy. Such a policy establishes a trust relationship +// between AWS and the OIDC provider. +// +// When you create the IAM OIDC provider, you specify the following: +// +// * The URL of the OIDC identity provider (IdP) to trust +// +// * A list of client IDs (also known as audiences) that identify the application +// or applications that are allowed to authenticate using the OIDC provider +// +// * A list of thumbprints of the server certificate(s) that the IdP uses +// +// You get all of this information from the OIDC IdP that you want to use to +// access AWS. +// +// The trust for the OIDC provider is derived from the IAM provider that this +// operation creates. Therefore, it is best to limit access to the CreateOpenIDConnectProvider +// operation to highly privileged users. +// +// // Example sending a request using CreateOpenIDConnectProviderRequest. +// req := client.CreateOpenIDConnectProviderRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateOpenIDConnectProvider +func (c *Client) CreateOpenIDConnectProviderRequest(input *CreateOpenIDConnectProviderInput) CreateOpenIDConnectProviderRequest { + op := &aws.Operation{ + Name: opCreateOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateOpenIDConnectProviderInput{} + } + + req := c.newRequest(op, input, &CreateOpenIDConnectProviderOutput{}) + return CreateOpenIDConnectProviderRequest{Request: req, Input: input, Copy: c.CreateOpenIDConnectProviderRequest} +} + +// CreateOpenIDConnectProviderRequest is the request type for the +// CreateOpenIDConnectProvider API operation. +type CreateOpenIDConnectProviderRequest struct { + *aws.Request + Input *CreateOpenIDConnectProviderInput + Copy func(*CreateOpenIDConnectProviderInput) CreateOpenIDConnectProviderRequest +} + +// Send marshals and sends the CreateOpenIDConnectProvider API request. +func (r CreateOpenIDConnectProviderRequest) Send(ctx context.Context) (*CreateOpenIDConnectProviderResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateOpenIDConnectProviderResponse{ + CreateOpenIDConnectProviderOutput: r.Request.Data.(*CreateOpenIDConnectProviderOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateOpenIDConnectProviderResponse is the response type for the +// CreateOpenIDConnectProvider API operation. +type CreateOpenIDConnectProviderResponse struct { + *CreateOpenIDConnectProviderOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateOpenIDConnectProvider request. +func (r *CreateOpenIDConnectProviderResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreatePolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreatePolicy.go new file mode 100644 index 00000000..685064db --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreatePolicy.go @@ -0,0 +1,192 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreatePolicyRequest +type CreatePolicyInput struct { + _ struct{} `type:"structure"` + + // A friendly description of the policy. + // + // Typically used to store information about the permissions defined in the + // policy. For example, "Grants access to production DynamoDB tables." + // + // The policy description is immutable. After a value is assigned, it cannot + // be changed. + Description *string `type:"string"` + + // The path for the policy. + // + // For more information about paths, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + Path *string `type:"string"` + + // The JSON policy document that you want to use as the content for the new + // policy. + // + // You must provide policies in JSON format in IAM. However, for AWS CloudFormation + // templates formatted in YAML, you can provide the policy in JSON or YAML format. + // AWS CloudFormation always converts a YAML policy to JSON format before submitting + // it to IAM. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // PolicyDocument is a required field + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The friendly name of the policy. + // + // IAM user, group, role, and policy names must be unique within the account. + // Names are not distinguished by case. For example, you cannot create resources + // named both "MyResource" and "myresource". + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreatePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreatePolicyInput"} + + if s.PolicyDocument == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyDocument")) + } + if s.PolicyDocument != nil && len(*s.PolicyDocument) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyDocument", 1)) + } + + if s.PolicyName == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful CreatePolicy request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreatePolicyResponse +type CreatePolicyOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the new policy. + Policy *Policy `type:"structure"` +} + +// String returns the string representation +func (s CreatePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opCreatePolicy = "CreatePolicy" + +// CreatePolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Creates a new managed policy for your AWS account. +// +// This operation creates a policy version with a version identifier of v1 and +// sets v1 as the policy's default version. For more information about policy +// versions, see Versioning for Managed Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) +// in the IAM User Guide. +// +// For more information about managed policies in general, see Managed Policies +// and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// // Example sending a request using CreatePolicyRequest. +// req := client.CreatePolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreatePolicy +func (c *Client) CreatePolicyRequest(input *CreatePolicyInput) CreatePolicyRequest { + op := &aws.Operation{ + Name: opCreatePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePolicyInput{} + } + + req := c.newRequest(op, input, &CreatePolicyOutput{}) + return CreatePolicyRequest{Request: req, Input: input, Copy: c.CreatePolicyRequest} +} + +// CreatePolicyRequest is the request type for the +// CreatePolicy API operation. +type CreatePolicyRequest struct { + *aws.Request + Input *CreatePolicyInput + Copy func(*CreatePolicyInput) CreatePolicyRequest +} + +// Send marshals and sends the CreatePolicy API request. +func (r CreatePolicyRequest) Send(ctx context.Context) (*CreatePolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreatePolicyResponse{ + CreatePolicyOutput: r.Request.Data.(*CreatePolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreatePolicyResponse is the response type for the +// CreatePolicy API operation. +type CreatePolicyResponse struct { + *CreatePolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreatePolicy request. +func (r *CreatePolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreatePolicyVersion.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreatePolicyVersion.go new file mode 100644 index 00000000..40ca4315 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreatePolicyVersion.go @@ -0,0 +1,181 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreatePolicyVersionRequest +type CreatePolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM policy to which you want to add + // a new version. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The JSON policy document that you want to use as the content for this new + // version of the policy. + // + // You must provide policies in JSON format in IAM. However, for AWS CloudFormation + // templates formatted in YAML, you can provide the policy in JSON or YAML format. + // AWS CloudFormation always converts a YAML policy to JSON format before submitting + // it to IAM. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // PolicyDocument is a required field + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // Specifies whether to set this version as the policy's default version. + // + // When this parameter is true, the new policy version becomes the operative + // version. That is, it becomes the version that is in effect for the IAM users, + // groups, and roles that the policy is attached to. + // + // For more information about managed policy versions, see Versioning for Managed + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the IAM User Guide. + SetAsDefault *bool `type:"boolean"` +} + +// String returns the string representation +func (s CreatePolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreatePolicyVersionInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreatePolicyVersionInput"} + + if s.PolicyArn == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyArn", 20)) + } + + if s.PolicyDocument == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyDocument")) + } + if s.PolicyDocument != nil && len(*s.PolicyDocument) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyDocument", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful CreatePolicyVersion request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreatePolicyVersionResponse +type CreatePolicyVersionOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the new policy version. + PolicyVersion *PolicyVersion `type:"structure"` +} + +// String returns the string representation +func (s CreatePolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +const opCreatePolicyVersion = "CreatePolicyVersion" + +// CreatePolicyVersionRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Creates a new version of the specified managed policy. To update a managed +// policy, you create a new policy version. A managed policy can have up to +// five versions. If the policy has five versions, you must delete an existing +// version using DeletePolicyVersion before you create a new version. +// +// Optionally, you can set the new version as the policy's default version. +// The default version is the version that is in effect for the IAM users, groups, +// and roles to which the policy is attached. +// +// For more information about managed policy versions, see Versioning for Managed +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) +// in the IAM User Guide. +// +// // Example sending a request using CreatePolicyVersionRequest. +// req := client.CreatePolicyVersionRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreatePolicyVersion +func (c *Client) CreatePolicyVersionRequest(input *CreatePolicyVersionInput) CreatePolicyVersionRequest { + op := &aws.Operation{ + Name: opCreatePolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreatePolicyVersionInput{} + } + + req := c.newRequest(op, input, &CreatePolicyVersionOutput{}) + return CreatePolicyVersionRequest{Request: req, Input: input, Copy: c.CreatePolicyVersionRequest} +} + +// CreatePolicyVersionRequest is the request type for the +// CreatePolicyVersion API operation. +type CreatePolicyVersionRequest struct { + *aws.Request + Input *CreatePolicyVersionInput + Copy func(*CreatePolicyVersionInput) CreatePolicyVersionRequest +} + +// Send marshals and sends the CreatePolicyVersion API request. +func (r CreatePolicyVersionRequest) Send(ctx context.Context) (*CreatePolicyVersionResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreatePolicyVersionResponse{ + CreatePolicyVersionOutput: r.Request.Data.(*CreatePolicyVersionOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreatePolicyVersionResponse is the response type for the +// CreatePolicyVersion API operation. +type CreatePolicyVersionResponse struct { + *CreatePolicyVersionOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreatePolicyVersion request. +func (r *CreatePolicyVersionResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateRole.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateRole.go new file mode 100644 index 00000000..f63bf1b4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateRole.go @@ -0,0 +1,228 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateRoleRequest +type CreateRoleInput struct { + _ struct{} `type:"structure"` + + // The trust relationship policy document that grants an entity permission to + // assume the role. + // + // You must provide policies in JSON format in IAM. However, for AWS CloudFormation + // templates formatted in YAML, you can provide the policy in JSON or YAML format. + // AWS CloudFormation always converts a YAML policy to JSON format before submitting + // it to IAM. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // AssumeRolePolicyDocument is a required field + AssumeRolePolicyDocument *string `min:"1" type:"string" required:"true"` + + // A description of the role. + Description *string `type:"string"` + + // The maximum session duration (in seconds) that you want to set for the specified + // role. If you do not specify a value for this setting, the default maximum + // of one hour is applied. This setting can have a value from 1 hour to 12 hours. + // + // Anyone who assumes the role from the AWS CLI or API can use the DurationSeconds + // API parameter or the duration-seconds CLI parameter to request a longer session. + // The MaxSessionDuration setting determines the maximum duration that can be + // requested using the DurationSeconds parameter. If users don't specify a value + // for the DurationSeconds parameter, their security credentials are valid for + // one hour by default. This applies when you use the AssumeRole* API operations + // or the assume-role* CLI operations but does not apply when you use those + // operations to create a console URL. For more information, see Using IAM Roles + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) in the + // IAM User Guide. + MaxSessionDuration *int64 `min:"3600" type:"integer"` + + // The path to the role. For more information about paths, see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + Path *string `min:"1" type:"string"` + + // The ARN of the policy that is used to set the permissions boundary for the + // role. + PermissionsBoundary *string `min:"20" type:"string"` + + // The name of the role to create. + // + // IAM user, group, role, and policy names must be unique within the account. + // Names are not distinguished by case. For example, you cannot create resources + // named both "MyResource" and "myresource". + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` + + // A list of tags that you want to attach to the newly created role. Each tag + // consists of a key name and an associated value. For more information about + // tagging, see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) + // in the IAM User Guide. + // + // If any one of the tags is invalid or if you exceed the allowed number of + // tags per role, then the entire request fails and the role is not created. + Tags []Tag `type:"list"` +} + +// String returns the string representation +func (s CreateRoleInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateRoleInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateRoleInput"} + + if s.AssumeRolePolicyDocument == nil { + invalidParams.Add(aws.NewErrParamRequired("AssumeRolePolicyDocument")) + } + if s.AssumeRolePolicyDocument != nil && len(*s.AssumeRolePolicyDocument) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("AssumeRolePolicyDocument", 1)) + } + if s.MaxSessionDuration != nil && *s.MaxSessionDuration < 3600 { + invalidParams.Add(aws.NewErrParamMinValue("MaxSessionDuration", 3600)) + } + if s.Path != nil && len(*s.Path) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Path", 1)) + } + if s.PermissionsBoundary != nil && len(*s.PermissionsBoundary) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("PermissionsBoundary", 20)) + } + + if s.RoleName == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleName", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful CreateRole request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateRoleResponse +type CreateRoleOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the new role. + // + // Role is a required field + Role *Role `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateRoleOutput) String() string { + return awsutil.Prettify(s) +} + +const opCreateRole = "CreateRole" + +// CreateRoleRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Creates a new role for your AWS account. For more information about roles, +// go to IAM Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// For information about limitations on role names and the number of roles you +// can create, go to Limitations on IAM Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// // Example sending a request using CreateRoleRequest. +// req := client.CreateRoleRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateRole +func (c *Client) CreateRoleRequest(input *CreateRoleInput) CreateRoleRequest { + op := &aws.Operation{ + Name: opCreateRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateRoleInput{} + } + + req := c.newRequest(op, input, &CreateRoleOutput{}) + return CreateRoleRequest{Request: req, Input: input, Copy: c.CreateRoleRequest} +} + +// CreateRoleRequest is the request type for the +// CreateRole API operation. +type CreateRoleRequest struct { + *aws.Request + Input *CreateRoleInput + Copy func(*CreateRoleInput) CreateRoleRequest +} + +// Send marshals and sends the CreateRole API request. +func (r CreateRoleRequest) Send(ctx context.Context) (*CreateRoleResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateRoleResponse{ + CreateRoleOutput: r.Request.Data.(*CreateRoleOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateRoleResponse is the response type for the +// CreateRole API operation. +type CreateRoleResponse struct { + *CreateRoleOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateRole request. +func (r *CreateRoleResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateSAMLProvider.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateSAMLProvider.go new file mode 100644 index 00000000..acefd5ef --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateSAMLProvider.go @@ -0,0 +1,168 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateSAMLProviderRequest +type CreateSAMLProviderInput struct { + _ struct{} `type:"structure"` + + // The name of the provider to create. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // An XML document generated by an identity provider (IdP) that supports SAML + // 2.0. The document includes the issuer's name, expiration information, and + // keys that can be used to validate the SAML authentication response (assertions) + // that are received from the IdP. You must generate the metadata document using + // the identity management software that is used as your organization's IdP. + // + // For more information, see About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) + // in the IAM User Guide + // + // SAMLMetadataDocument is a required field + SAMLMetadataDocument *string `min:"1000" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateSAMLProviderInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateSAMLProviderInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateSAMLProviderInput"} + + if s.Name == nil { + invalidParams.Add(aws.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Name", 1)) + } + + if s.SAMLMetadataDocument == nil { + invalidParams.Add(aws.NewErrParamRequired("SAMLMetadataDocument")) + } + if s.SAMLMetadataDocument != nil && len(*s.SAMLMetadataDocument) < 1000 { + invalidParams.Add(aws.NewErrParamMinLen("SAMLMetadataDocument", 1000)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful CreateSAMLProvider request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateSAMLProviderResponse +type CreateSAMLProviderOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the new SAML provider resource in IAM. + SAMLProviderArn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s CreateSAMLProviderOutput) String() string { + return awsutil.Prettify(s) +} + +const opCreateSAMLProvider = "CreateSAMLProvider" + +// CreateSAMLProviderRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Creates an IAM resource that describes an identity provider (IdP) that supports +// SAML 2.0. +// +// The SAML provider resource that you create with this operation can be used +// as a principal in an IAM role's trust policy. Such a policy can enable federated +// users who sign in using the SAML IdP to assume the role. You can create an +// IAM role that supports Web-based single sign-on (SSO) to the AWS Management +// Console or one that supports API access to AWS. +// +// When you create the SAML provider resource, you upload a SAML metadata document +// that you get from your IdP. That document includes the issuer's name, expiration +// information, and keys that can be used to validate the SAML authentication +// response (assertions) that the IdP sends. You must generate the metadata +// document using the identity management software that is used as your organization's +// IdP. +// +// This operation requires Signature Version 4 (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// +// For more information, see Enabling SAML 2.0 Federated Users to Access the +// AWS Management Console (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html) +// and About SAML 2.0-based Federation (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// +// // Example sending a request using CreateSAMLProviderRequest. +// req := client.CreateSAMLProviderRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateSAMLProvider +func (c *Client) CreateSAMLProviderRequest(input *CreateSAMLProviderInput) CreateSAMLProviderRequest { + op := &aws.Operation{ + Name: opCreateSAMLProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateSAMLProviderInput{} + } + + req := c.newRequest(op, input, &CreateSAMLProviderOutput{}) + return CreateSAMLProviderRequest{Request: req, Input: input, Copy: c.CreateSAMLProviderRequest} +} + +// CreateSAMLProviderRequest is the request type for the +// CreateSAMLProvider API operation. +type CreateSAMLProviderRequest struct { + *aws.Request + Input *CreateSAMLProviderInput + Copy func(*CreateSAMLProviderInput) CreateSAMLProviderRequest +} + +// Send marshals and sends the CreateSAMLProvider API request. +func (r CreateSAMLProviderRequest) Send(ctx context.Context) (*CreateSAMLProviderResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateSAMLProviderResponse{ + CreateSAMLProviderOutput: r.Request.Data.(*CreateSAMLProviderOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateSAMLProviderResponse is the response type for the +// CreateSAMLProvider API operation. +type CreateSAMLProviderResponse struct { + *CreateSAMLProviderOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateSAMLProvider request. +func (r *CreateSAMLProviderResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateServiceLinkedRole.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateServiceLinkedRole.go new file mode 100644 index 00000000..14bdb271 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateServiceLinkedRole.go @@ -0,0 +1,159 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateServiceLinkedRoleRequest +type CreateServiceLinkedRoleInput struct { + _ struct{} `type:"structure"` + + // The service principal for the AWS service to which this role is attached. + // You use a string similar to a URL but without the http:// in front. For example: + // elasticbeanstalk.amazonaws.com. + // + // Service principals are unique and case-sensitive. To find the exact service + // principal for your service-linked role, see AWS Services That Work with IAM + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) + // in the IAM User Guide. Look for the services that have Yes in the Service-Linked + // Role column. Choose the Yes link to view the service-linked role documentation + // for that service. + // + // AWSServiceName is a required field + AWSServiceName *string `min:"1" type:"string" required:"true"` + + // A string that you provide, which is combined with the service-provided prefix + // to form the complete role name. If you make multiple requests for the same + // service, then you must supply a different CustomSuffix for each request. + // Otherwise the request fails with a duplicate role name error. For example, + // you could add -1 or -debug to the suffix. + // + // Some services do not support the CustomSuffix parameter. If you provide an + // optional suffix and the operation fails, try the operation again without + // the suffix. + CustomSuffix *string `min:"1" type:"string"` + + // The description of the role. + Description *string `type:"string"` +} + +// String returns the string representation +func (s CreateServiceLinkedRoleInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateServiceLinkedRoleInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateServiceLinkedRoleInput"} + + if s.AWSServiceName == nil { + invalidParams.Add(aws.NewErrParamRequired("AWSServiceName")) + } + if s.AWSServiceName != nil && len(*s.AWSServiceName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("AWSServiceName", 1)) + } + if s.CustomSuffix != nil && len(*s.CustomSuffix) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("CustomSuffix", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateServiceLinkedRoleResponse +type CreateServiceLinkedRoleOutput struct { + _ struct{} `type:"structure"` + + // A Role object that contains details about the newly created role. + Role *Role `type:"structure"` +} + +// String returns the string representation +func (s CreateServiceLinkedRoleOutput) String() string { + return awsutil.Prettify(s) +} + +const opCreateServiceLinkedRole = "CreateServiceLinkedRole" + +// CreateServiceLinkedRoleRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Creates an IAM role that is linked to a specific AWS service. The service +// controls the attached policies and when the role can be deleted. This helps +// ensure that the service is not broken by an unexpectedly changed or deleted +// role, which could put your AWS resources into an unknown state. Allowing +// the service to control the role helps improve service stability and proper +// cleanup when a service and its role are no longer needed. For more information, +// see Using Service-Linked Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html) +// in the IAM User Guide. +// +// To attach a policy to this service-linked role, you must make the request +// using the AWS service that depends on this role. +// +// // Example sending a request using CreateServiceLinkedRoleRequest. +// req := client.CreateServiceLinkedRoleRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateServiceLinkedRole +func (c *Client) CreateServiceLinkedRoleRequest(input *CreateServiceLinkedRoleInput) CreateServiceLinkedRoleRequest { + op := &aws.Operation{ + Name: opCreateServiceLinkedRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateServiceLinkedRoleInput{} + } + + req := c.newRequest(op, input, &CreateServiceLinkedRoleOutput{}) + return CreateServiceLinkedRoleRequest{Request: req, Input: input, Copy: c.CreateServiceLinkedRoleRequest} +} + +// CreateServiceLinkedRoleRequest is the request type for the +// CreateServiceLinkedRole API operation. +type CreateServiceLinkedRoleRequest struct { + *aws.Request + Input *CreateServiceLinkedRoleInput + Copy func(*CreateServiceLinkedRoleInput) CreateServiceLinkedRoleRequest +} + +// Send marshals and sends the CreateServiceLinkedRole API request. +func (r CreateServiceLinkedRoleRequest) Send(ctx context.Context) (*CreateServiceLinkedRoleResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateServiceLinkedRoleResponse{ + CreateServiceLinkedRoleOutput: r.Request.Data.(*CreateServiceLinkedRoleOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateServiceLinkedRoleResponse is the response type for the +// CreateServiceLinkedRole API operation. +type CreateServiceLinkedRoleResponse struct { + *CreateServiceLinkedRoleOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateServiceLinkedRole request. +func (r *CreateServiceLinkedRoleResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateServiceSpecificCredential.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateServiceSpecificCredential.go new file mode 100644 index 00000000..239b7f64 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateServiceSpecificCredential.go @@ -0,0 +1,157 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateServiceSpecificCredentialRequest +type CreateServiceSpecificCredentialInput struct { + _ struct{} `type:"structure"` + + // The name of the AWS service that is to be associated with the credentials. + // The service you specify here is the only service that can be accessed using + // these credentials. + // + // ServiceName is a required field + ServiceName *string `type:"string" required:"true"` + + // The name of the IAM user that is to be associated with the credentials. The + // new service-specific credentials have the same permissions as the associated + // user except that they can be used only to access the specified service. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateServiceSpecificCredentialInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateServiceSpecificCredentialInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateServiceSpecificCredentialInput"} + + if s.ServiceName == nil { + invalidParams.Add(aws.NewErrParamRequired("ServiceName")) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateServiceSpecificCredentialResponse +type CreateServiceSpecificCredentialOutput struct { + _ struct{} `type:"structure"` + + // A structure that contains information about the newly created service-specific + // credential. + // + // This is the only time that the password for this credential set is available. + // It cannot be recovered later. Instead, you must reset the password with ResetServiceSpecificCredential. + ServiceSpecificCredential *ServiceSpecificCredential `type:"structure"` +} + +// String returns the string representation +func (s CreateServiceSpecificCredentialOutput) String() string { + return awsutil.Prettify(s) +} + +const opCreateServiceSpecificCredential = "CreateServiceSpecificCredential" + +// CreateServiceSpecificCredentialRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Generates a set of credentials consisting of a user name and password that +// can be used to access the service specified in the request. These credentials +// are generated by IAM, and can be used only for the specified service. +// +// You can have a maximum of two sets of service-specific credentials for each +// supported service per user. +// +// The only supported service at this time is AWS CodeCommit. +// +// You can reset the password to a new service-generated value by calling ResetServiceSpecificCredential. +// +// For more information about service-specific credentials, see Using IAM with +// AWS CodeCommit: Git Credentials, SSH Keys, and AWS Access Keys (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_ssh-keys.html) +// in the IAM User Guide. +// +// // Example sending a request using CreateServiceSpecificCredentialRequest. +// req := client.CreateServiceSpecificCredentialRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateServiceSpecificCredential +func (c *Client) CreateServiceSpecificCredentialRequest(input *CreateServiceSpecificCredentialInput) CreateServiceSpecificCredentialRequest { + op := &aws.Operation{ + Name: opCreateServiceSpecificCredential, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateServiceSpecificCredentialInput{} + } + + req := c.newRequest(op, input, &CreateServiceSpecificCredentialOutput{}) + return CreateServiceSpecificCredentialRequest{Request: req, Input: input, Copy: c.CreateServiceSpecificCredentialRequest} +} + +// CreateServiceSpecificCredentialRequest is the request type for the +// CreateServiceSpecificCredential API operation. +type CreateServiceSpecificCredentialRequest struct { + *aws.Request + Input *CreateServiceSpecificCredentialInput + Copy func(*CreateServiceSpecificCredentialInput) CreateServiceSpecificCredentialRequest +} + +// Send marshals and sends the CreateServiceSpecificCredential API request. +func (r CreateServiceSpecificCredentialRequest) Send(ctx context.Context) (*CreateServiceSpecificCredentialResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateServiceSpecificCredentialResponse{ + CreateServiceSpecificCredentialOutput: r.Request.Data.(*CreateServiceSpecificCredentialOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateServiceSpecificCredentialResponse is the response type for the +// CreateServiceSpecificCredential API operation. +type CreateServiceSpecificCredentialResponse struct { + *CreateServiceSpecificCredentialOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateServiceSpecificCredential request. +func (r *CreateServiceSpecificCredentialResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateUser.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateUser.go new file mode 100644 index 00000000..88a325fd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateUser.go @@ -0,0 +1,174 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateUserRequest +type CreateUserInput struct { + _ struct{} `type:"structure"` + + // The path for the user name. For more information about paths, see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + Path *string `min:"1" type:"string"` + + // The ARN of the policy that is used to set the permissions boundary for the + // user. + PermissionsBoundary *string `min:"20" type:"string"` + + // A list of tags that you want to attach to the newly created user. Each tag + // consists of a key name and an associated value. For more information about + // tagging, see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) + // in the IAM User Guide. + // + // If any one of the tags is invalid or if you exceed the allowed number of + // tags per user, then the entire request fails and the user is not created. + Tags []Tag `type:"list"` + + // The name of the user to create. + // + // IAM user, group, role, and policy names must be unique within the account. + // Names are not distinguished by case. For example, you cannot create resources + // named both "MyResource" and "myresource". + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateUserInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateUserInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateUserInput"} + if s.Path != nil && len(*s.Path) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Path", 1)) + } + if s.PermissionsBoundary != nil && len(*s.PermissionsBoundary) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("PermissionsBoundary", 20)) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful CreateUser request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateUserResponse +type CreateUserOutput struct { + _ struct{} `type:"structure"` + + // A structure with details about the new IAM user. + User *User `type:"structure"` +} + +// String returns the string representation +func (s CreateUserOutput) String() string { + return awsutil.Prettify(s) +} + +const opCreateUser = "CreateUser" + +// CreateUserRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Creates a new IAM user for your AWS account. +// +// For information about limitations on the number of IAM users you can create, +// see Limitations on IAM Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// // Example sending a request using CreateUserRequest. +// req := client.CreateUserRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateUser +func (c *Client) CreateUserRequest(input *CreateUserInput) CreateUserRequest { + op := &aws.Operation{ + Name: opCreateUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateUserInput{} + } + + req := c.newRequest(op, input, &CreateUserOutput{}) + return CreateUserRequest{Request: req, Input: input, Copy: c.CreateUserRequest} +} + +// CreateUserRequest is the request type for the +// CreateUser API operation. +type CreateUserRequest struct { + *aws.Request + Input *CreateUserInput + Copy func(*CreateUserInput) CreateUserRequest +} + +// Send marshals and sends the CreateUser API request. +func (r CreateUserRequest) Send(ctx context.Context) (*CreateUserResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateUserResponse{ + CreateUserOutput: r.Request.Data.(*CreateUserOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateUserResponse is the response type for the +// CreateUser API operation. +type CreateUserResponse struct { + *CreateUserOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateUser request. +func (r *CreateUserResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateVirtualMFADevice.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateVirtualMFADevice.go new file mode 100644 index 00000000..ac14f859 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_CreateVirtualMFADevice.go @@ -0,0 +1,163 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateVirtualMFADeviceRequest +type CreateVirtualMFADeviceInput struct { + _ struct{} `type:"structure"` + + // The path for the virtual MFA device. For more information about paths, see + // IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + Path *string `min:"1" type:"string"` + + // The name of the virtual MFA device. Use with path to uniquely identify a + // virtual MFA device. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // VirtualMFADeviceName is a required field + VirtualMFADeviceName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s CreateVirtualMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateVirtualMFADeviceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "CreateVirtualMFADeviceInput"} + if s.Path != nil && len(*s.Path) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Path", 1)) + } + + if s.VirtualMFADeviceName == nil { + invalidParams.Add(aws.NewErrParamRequired("VirtualMFADeviceName")) + } + if s.VirtualMFADeviceName != nil && len(*s.VirtualMFADeviceName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("VirtualMFADeviceName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful CreateVirtualMFADevice request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateVirtualMFADeviceResponse +type CreateVirtualMFADeviceOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the new virtual MFA device. + // + // VirtualMFADevice is a required field + VirtualMFADevice *VirtualMFADevice `type:"structure" required:"true"` +} + +// String returns the string representation +func (s CreateVirtualMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +const opCreateVirtualMFADevice = "CreateVirtualMFADevice" + +// CreateVirtualMFADeviceRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Creates a new virtual MFA device for the AWS account. After creating the +// virtual MFA, use EnableMFADevice to attach the MFA device to an IAM user. +// For more information about creating and working with virtual MFA devices, +// go to Using a Virtual MFA Device (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_VirtualMFA.html) +// in the IAM User Guide. +// +// For information about limits on the number of MFA devices you can create, +// see Limitations on Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// The seed information contained in the QR code and the Base32 string should +// be treated like any other secret access information. In other words, protect +// the seed information as you would your AWS access keys or your passwords. +// After you provision your virtual device, you should ensure that the information +// is destroyed following secure procedures. +// +// // Example sending a request using CreateVirtualMFADeviceRequest. +// req := client.CreateVirtualMFADeviceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/CreateVirtualMFADevice +func (c *Client) CreateVirtualMFADeviceRequest(input *CreateVirtualMFADeviceInput) CreateVirtualMFADeviceRequest { + op := &aws.Operation{ + Name: opCreateVirtualMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &CreateVirtualMFADeviceInput{} + } + + req := c.newRequest(op, input, &CreateVirtualMFADeviceOutput{}) + return CreateVirtualMFADeviceRequest{Request: req, Input: input, Copy: c.CreateVirtualMFADeviceRequest} +} + +// CreateVirtualMFADeviceRequest is the request type for the +// CreateVirtualMFADevice API operation. +type CreateVirtualMFADeviceRequest struct { + *aws.Request + Input *CreateVirtualMFADeviceInput + Copy func(*CreateVirtualMFADeviceInput) CreateVirtualMFADeviceRequest +} + +// Send marshals and sends the CreateVirtualMFADevice API request. +func (r CreateVirtualMFADeviceRequest) Send(ctx context.Context) (*CreateVirtualMFADeviceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &CreateVirtualMFADeviceResponse{ + CreateVirtualMFADeviceOutput: r.Request.Data.(*CreateVirtualMFADeviceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// CreateVirtualMFADeviceResponse is the response type for the +// CreateVirtualMFADevice API operation. +type CreateVirtualMFADeviceResponse struct { + *CreateVirtualMFADeviceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// CreateVirtualMFADevice request. +func (r *CreateVirtualMFADeviceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeactivateMFADevice.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeactivateMFADevice.go new file mode 100644 index 00000000..abef4108 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeactivateMFADevice.go @@ -0,0 +1,150 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeactivateMFADeviceRequest +type DeactivateMFADeviceInput struct { + _ struct{} `type:"structure"` + + // The serial number that uniquely identifies the MFA device. For virtual MFA + // devices, the serial number is the device ARN. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@:/- + // + // SerialNumber is a required field + SerialNumber *string `min:"9" type:"string" required:"true"` + + // The name of the user whose MFA device you want to deactivate. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeactivateMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeactivateMFADeviceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeactivateMFADeviceInput"} + + if s.SerialNumber == nil { + invalidParams.Add(aws.NewErrParamRequired("SerialNumber")) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(aws.NewErrParamMinLen("SerialNumber", 9)) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeactivateMFADeviceOutput +type DeactivateMFADeviceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeactivateMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeactivateMFADevice = "DeactivateMFADevice" + +// DeactivateMFADeviceRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Deactivates the specified MFA device and removes it from association with +// the user name for which it was originally enabled. +// +// For more information about creating and working with virtual MFA devices, +// go to Enabling a Virtual Multi-factor Authentication (MFA) Device (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_VirtualMFA.html) +// in the IAM User Guide. +// +// // Example sending a request using DeactivateMFADeviceRequest. +// req := client.DeactivateMFADeviceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeactivateMFADevice +func (c *Client) DeactivateMFADeviceRequest(input *DeactivateMFADeviceInput) DeactivateMFADeviceRequest { + op := &aws.Operation{ + Name: opDeactivateMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeactivateMFADeviceInput{} + } + + req := c.newRequest(op, input, &DeactivateMFADeviceOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeactivateMFADeviceRequest{Request: req, Input: input, Copy: c.DeactivateMFADeviceRequest} +} + +// DeactivateMFADeviceRequest is the request type for the +// DeactivateMFADevice API operation. +type DeactivateMFADeviceRequest struct { + *aws.Request + Input *DeactivateMFADeviceInput + Copy func(*DeactivateMFADeviceInput) DeactivateMFADeviceRequest +} + +// Send marshals and sends the DeactivateMFADevice API request. +func (r DeactivateMFADeviceRequest) Send(ctx context.Context) (*DeactivateMFADeviceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeactivateMFADeviceResponse{ + DeactivateMFADeviceOutput: r.Request.Data.(*DeactivateMFADeviceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeactivateMFADeviceResponse is the response type for the +// DeactivateMFADevice API operation. +type DeactivateMFADeviceResponse struct { + *DeactivateMFADeviceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeactivateMFADevice request. +func (r *DeactivateMFADeviceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteAccessKey.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteAccessKey.go new file mode 100644 index 00000000..7cfb2c59 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteAccessKey.go @@ -0,0 +1,145 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteAccessKeyRequest +type DeleteAccessKeyInput struct { + _ struct{} `type:"structure"` + + // The access key ID for the access key ID and secret access key you want to + // delete. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that can consist of any upper or lowercased letter + // or digit. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The name of the user whose access key pair you want to delete. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteAccessKeyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAccessKeyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteAccessKeyInput"} + + if s.AccessKeyId == nil { + invalidParams.Add(aws.NewErrParamRequired("AccessKeyId")) + } + if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { + invalidParams.Add(aws.NewErrParamMinLen("AccessKeyId", 16)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteAccessKeyOutput +type DeleteAccessKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAccessKeyOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteAccessKey = "DeleteAccessKey" + +// DeleteAccessKeyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Deletes the access key pair associated with the specified IAM user. +// +// If you do not specify a user name, IAM determines the user name implicitly +// based on the AWS access key ID signing the request. This operation works +// for access keys under the AWS account. Consequently, you can use this operation +// to manage AWS account root user credentials even if the AWS account has no +// associated users. +// +// // Example sending a request using DeleteAccessKeyRequest. +// req := client.DeleteAccessKeyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteAccessKey +func (c *Client) DeleteAccessKeyRequest(input *DeleteAccessKeyInput) DeleteAccessKeyRequest { + op := &aws.Operation{ + Name: opDeleteAccessKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAccessKeyInput{} + } + + req := c.newRequest(op, input, &DeleteAccessKeyOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteAccessKeyRequest{Request: req, Input: input, Copy: c.DeleteAccessKeyRequest} +} + +// DeleteAccessKeyRequest is the request type for the +// DeleteAccessKey API operation. +type DeleteAccessKeyRequest struct { + *aws.Request + Input *DeleteAccessKeyInput + Copy func(*DeleteAccessKeyInput) DeleteAccessKeyRequest +} + +// Send marshals and sends the DeleteAccessKey API request. +func (r DeleteAccessKeyRequest) Send(ctx context.Context) (*DeleteAccessKeyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteAccessKeyResponse{ + DeleteAccessKeyOutput: r.Request.Data.(*DeleteAccessKeyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteAccessKeyResponse is the response type for the +// DeleteAccessKey API operation. +type DeleteAccessKeyResponse struct { + *DeleteAccessKeyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteAccessKey request. +func (r *DeleteAccessKeyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteAccountAlias.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteAccountAlias.go new file mode 100644 index 00000000..4ecc7c18 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteAccountAlias.go @@ -0,0 +1,131 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteAccountAliasRequest +type DeleteAccountAliasInput struct { + _ struct{} `type:"structure"` + + // The name of the account alias to delete. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of lowercase letters, digits, and dashes. + // You cannot start or finish with a dash, nor can you have two dashes in a + // row. + // + // AccountAlias is a required field + AccountAlias *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteAccountAliasInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteAccountAliasInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteAccountAliasInput"} + + if s.AccountAlias == nil { + invalidParams.Add(aws.NewErrParamRequired("AccountAlias")) + } + if s.AccountAlias != nil && len(*s.AccountAlias) < 3 { + invalidParams.Add(aws.NewErrParamMinLen("AccountAlias", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteAccountAliasOutput +type DeleteAccountAliasOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAccountAliasOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteAccountAlias = "DeleteAccountAlias" + +// DeleteAccountAliasRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Deletes the specified AWS account alias. For information about using an AWS +// account alias, see Using an Alias for Your AWS Account ID (https://docs.aws.amazon.com/IAM/latest/UserGuide/AccountAlias.html) +// in the IAM User Guide. +// +// // Example sending a request using DeleteAccountAliasRequest. +// req := client.DeleteAccountAliasRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteAccountAlias +func (c *Client) DeleteAccountAliasRequest(input *DeleteAccountAliasInput) DeleteAccountAliasRequest { + op := &aws.Operation{ + Name: opDeleteAccountAlias, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAccountAliasInput{} + } + + req := c.newRequest(op, input, &DeleteAccountAliasOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteAccountAliasRequest{Request: req, Input: input, Copy: c.DeleteAccountAliasRequest} +} + +// DeleteAccountAliasRequest is the request type for the +// DeleteAccountAlias API operation. +type DeleteAccountAliasRequest struct { + *aws.Request + Input *DeleteAccountAliasInput + Copy func(*DeleteAccountAliasInput) DeleteAccountAliasRequest +} + +// Send marshals and sends the DeleteAccountAlias API request. +func (r DeleteAccountAliasRequest) Send(ctx context.Context) (*DeleteAccountAliasResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteAccountAliasResponse{ + DeleteAccountAliasOutput: r.Request.Data.(*DeleteAccountAliasOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteAccountAliasResponse is the response type for the +// DeleteAccountAlias API operation. +type DeleteAccountAliasResponse struct { + *DeleteAccountAliasOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteAccountAlias request. +func (r *DeleteAccountAliasResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteAccountPasswordPolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteAccountPasswordPolicy.go new file mode 100644 index 00000000..bbbe5d7c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteAccountPasswordPolicy.go @@ -0,0 +1,102 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteAccountPasswordPolicyInput +type DeleteAccountPasswordPolicyInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAccountPasswordPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteAccountPasswordPolicyOutput +type DeleteAccountPasswordPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteAccountPasswordPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteAccountPasswordPolicy = "DeleteAccountPasswordPolicy" + +// DeleteAccountPasswordPolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Deletes the password policy for the AWS account. There are no parameters. +// +// // Example sending a request using DeleteAccountPasswordPolicyRequest. +// req := client.DeleteAccountPasswordPolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteAccountPasswordPolicy +func (c *Client) DeleteAccountPasswordPolicyRequest(input *DeleteAccountPasswordPolicyInput) DeleteAccountPasswordPolicyRequest { + op := &aws.Operation{ + Name: opDeleteAccountPasswordPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteAccountPasswordPolicyInput{} + } + + req := c.newRequest(op, input, &DeleteAccountPasswordPolicyOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteAccountPasswordPolicyRequest{Request: req, Input: input, Copy: c.DeleteAccountPasswordPolicyRequest} +} + +// DeleteAccountPasswordPolicyRequest is the request type for the +// DeleteAccountPasswordPolicy API operation. +type DeleteAccountPasswordPolicyRequest struct { + *aws.Request + Input *DeleteAccountPasswordPolicyInput + Copy func(*DeleteAccountPasswordPolicyInput) DeleteAccountPasswordPolicyRequest +} + +// Send marshals and sends the DeleteAccountPasswordPolicy API request. +func (r DeleteAccountPasswordPolicyRequest) Send(ctx context.Context) (*DeleteAccountPasswordPolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteAccountPasswordPolicyResponse{ + DeleteAccountPasswordPolicyOutput: r.Request.Data.(*DeleteAccountPasswordPolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteAccountPasswordPolicyResponse is the response type for the +// DeleteAccountPasswordPolicy API operation. +type DeleteAccountPasswordPolicyResponse struct { + *DeleteAccountPasswordPolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteAccountPasswordPolicy request. +func (r *DeleteAccountPasswordPolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteGroup.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteGroup.go new file mode 100644 index 00000000..6fe80e58 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteGroup.go @@ -0,0 +1,129 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteGroupRequest +type DeleteGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the IAM group to delete. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteGroupInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteGroupInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteGroupInput"} + + if s.GroupName == nil { + invalidParams.Add(aws.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("GroupName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteGroupOutput +type DeleteGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteGroupOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteGroup = "DeleteGroup" + +// DeleteGroupRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Deletes the specified IAM group. The group must not contain any users or +// have any attached policies. +// +// // Example sending a request using DeleteGroupRequest. +// req := client.DeleteGroupRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteGroup +func (c *Client) DeleteGroupRequest(input *DeleteGroupInput) DeleteGroupRequest { + op := &aws.Operation{ + Name: opDeleteGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteGroupInput{} + } + + req := c.newRequest(op, input, &DeleteGroupOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteGroupRequest{Request: req, Input: input, Copy: c.DeleteGroupRequest} +} + +// DeleteGroupRequest is the request type for the +// DeleteGroup API operation. +type DeleteGroupRequest struct { + *aws.Request + Input *DeleteGroupInput + Copy func(*DeleteGroupInput) DeleteGroupRequest +} + +// Send marshals and sends the DeleteGroup API request. +func (r DeleteGroupRequest) Send(ctx context.Context) (*DeleteGroupResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteGroupResponse{ + DeleteGroupOutput: r.Request.Data.(*DeleteGroupOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteGroupResponse is the response type for the +// DeleteGroup API operation. +type DeleteGroupResponse struct { + *DeleteGroupOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteGroup request. +func (r *DeleteGroupResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteGroupPolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteGroupPolicy.go new file mode 100644 index 00000000..18fc1ecf --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteGroupPolicy.go @@ -0,0 +1,151 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteGroupPolicyRequest +type DeleteGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) identifying the group that the policy is + // embedded in. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // The name identifying the policy document to delete. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteGroupPolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteGroupPolicyInput"} + + if s.GroupName == nil { + invalidParams.Add(aws.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("GroupName", 1)) + } + + if s.PolicyName == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteGroupPolicyOutput +type DeleteGroupPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteGroupPolicy = "DeleteGroupPolicy" + +// DeleteGroupPolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Deletes the specified inline policy that is embedded in the specified IAM +// group. +// +// A group can also have managed policies attached to it. To detach a managed +// policy from a group, use DetachGroupPolicy. For more information about policies, +// refer to Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// // Example sending a request using DeleteGroupPolicyRequest. +// req := client.DeleteGroupPolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteGroupPolicy +func (c *Client) DeleteGroupPolicyRequest(input *DeleteGroupPolicyInput) DeleteGroupPolicyRequest { + op := &aws.Operation{ + Name: opDeleteGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteGroupPolicyInput{} + } + + req := c.newRequest(op, input, &DeleteGroupPolicyOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteGroupPolicyRequest{Request: req, Input: input, Copy: c.DeleteGroupPolicyRequest} +} + +// DeleteGroupPolicyRequest is the request type for the +// DeleteGroupPolicy API operation. +type DeleteGroupPolicyRequest struct { + *aws.Request + Input *DeleteGroupPolicyInput + Copy func(*DeleteGroupPolicyInput) DeleteGroupPolicyRequest +} + +// Send marshals and sends the DeleteGroupPolicy API request. +func (r DeleteGroupPolicyRequest) Send(ctx context.Context) (*DeleteGroupPolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteGroupPolicyResponse{ + DeleteGroupPolicyOutput: r.Request.Data.(*DeleteGroupPolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteGroupPolicyResponse is the response type for the +// DeleteGroupPolicy API operation. +type DeleteGroupPolicyResponse struct { + *DeleteGroupPolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteGroupPolicy request. +func (r *DeleteGroupPolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteInstanceProfile.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteInstanceProfile.go new file mode 100644 index 00000000..7f595ac8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteInstanceProfile.go @@ -0,0 +1,137 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteInstanceProfileRequest +type DeleteInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to delete. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // InstanceProfileName is a required field + InstanceProfileName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteInstanceProfileInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteInstanceProfileInput"} + + if s.InstanceProfileName == nil { + invalidParams.Add(aws.NewErrParamRequired("InstanceProfileName")) + } + if s.InstanceProfileName != nil && len(*s.InstanceProfileName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("InstanceProfileName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteInstanceProfileOutput +type DeleteInstanceProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteInstanceProfile = "DeleteInstanceProfile" + +// DeleteInstanceProfileRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Deletes the specified instance profile. The instance profile must not have +// an associated role. +// +// Make sure that you do not have any Amazon EC2 instances running with the +// instance profile you are about to delete. Deleting a role or instance profile +// that is associated with a running instance will break any applications running +// on the instance. +// +// For more information about instance profiles, go to About Instance Profiles +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// +// // Example sending a request using DeleteInstanceProfileRequest. +// req := client.DeleteInstanceProfileRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteInstanceProfile +func (c *Client) DeleteInstanceProfileRequest(input *DeleteInstanceProfileInput) DeleteInstanceProfileRequest { + op := &aws.Operation{ + Name: opDeleteInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteInstanceProfileInput{} + } + + req := c.newRequest(op, input, &DeleteInstanceProfileOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteInstanceProfileRequest{Request: req, Input: input, Copy: c.DeleteInstanceProfileRequest} +} + +// DeleteInstanceProfileRequest is the request type for the +// DeleteInstanceProfile API operation. +type DeleteInstanceProfileRequest struct { + *aws.Request + Input *DeleteInstanceProfileInput + Copy func(*DeleteInstanceProfileInput) DeleteInstanceProfileRequest +} + +// Send marshals and sends the DeleteInstanceProfile API request. +func (r DeleteInstanceProfileRequest) Send(ctx context.Context) (*DeleteInstanceProfileResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteInstanceProfileResponse{ + DeleteInstanceProfileOutput: r.Request.Data.(*DeleteInstanceProfileOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteInstanceProfileResponse is the response type for the +// DeleteInstanceProfile API operation. +type DeleteInstanceProfileResponse struct { + *DeleteInstanceProfileOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteInstanceProfile request. +func (r *DeleteInstanceProfileResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteLoginProfile.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteLoginProfile.go new file mode 100644 index 00000000..e678f824 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteLoginProfile.go @@ -0,0 +1,134 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteLoginProfileRequest +type DeleteLoginProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the user whose password you want to delete. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteLoginProfileInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteLoginProfileInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteLoginProfileInput"} + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteLoginProfileOutput +type DeleteLoginProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteLoginProfileOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteLoginProfile = "DeleteLoginProfile" + +// DeleteLoginProfileRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Deletes the password for the specified IAM user, which terminates the user's +// ability to access AWS services through the AWS Management Console. +// +// Deleting a user's password does not prevent a user from accessing AWS through +// the command line interface or the API. To prevent all user access, you must +// also either make any access keys inactive or delete them. For more information +// about making keys inactive or deleting them, see UpdateAccessKey and DeleteAccessKey. +// +// // Example sending a request using DeleteLoginProfileRequest. +// req := client.DeleteLoginProfileRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteLoginProfile +func (c *Client) DeleteLoginProfileRequest(input *DeleteLoginProfileInput) DeleteLoginProfileRequest { + op := &aws.Operation{ + Name: opDeleteLoginProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteLoginProfileInput{} + } + + req := c.newRequest(op, input, &DeleteLoginProfileOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteLoginProfileRequest{Request: req, Input: input, Copy: c.DeleteLoginProfileRequest} +} + +// DeleteLoginProfileRequest is the request type for the +// DeleteLoginProfile API operation. +type DeleteLoginProfileRequest struct { + *aws.Request + Input *DeleteLoginProfileInput + Copy func(*DeleteLoginProfileInput) DeleteLoginProfileRequest +} + +// Send marshals and sends the DeleteLoginProfile API request. +func (r DeleteLoginProfileRequest) Send(ctx context.Context) (*DeleteLoginProfileResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteLoginProfileResponse{ + DeleteLoginProfileOutput: r.Request.Data.(*DeleteLoginProfileOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteLoginProfileResponse is the response type for the +// DeleteLoginProfile API operation. +type DeleteLoginProfileResponse struct { + *DeleteLoginProfileOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteLoginProfile request. +func (r *DeleteLoginProfileResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteOpenIDConnectProvider.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteOpenIDConnectProvider.go new file mode 100644 index 00000000..9e5e566d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteOpenIDConnectProvider.go @@ -0,0 +1,133 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteOpenIDConnectProviderRequest +type DeleteOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM OpenID Connect provider resource + // object to delete. You can get a list of OpenID Connect provider resource + // ARNs by using the ListOpenIDConnectProviders operation. + // + // OpenIDConnectProviderArn is a required field + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteOpenIDConnectProviderInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteOpenIDConnectProviderInput"} + + if s.OpenIDConnectProviderArn == nil { + invalidParams.Add(aws.NewErrParamRequired("OpenIDConnectProviderArn")) + } + if s.OpenIDConnectProviderArn != nil && len(*s.OpenIDConnectProviderArn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("OpenIDConnectProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteOpenIDConnectProviderOutput +type DeleteOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteOpenIDConnectProvider = "DeleteOpenIDConnectProvider" + +// DeleteOpenIDConnectProviderRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Deletes an OpenID Connect identity provider (IdP) resource object in IAM. +// +// Deleting an IAM OIDC provider resource does not update any roles that reference +// the provider as a principal in their trust policies. Any attempt to assume +// a role that references a deleted provider fails. +// +// This operation is idempotent; it does not fail or return an error if you +// call the operation for a provider that does not exist. +// +// // Example sending a request using DeleteOpenIDConnectProviderRequest. +// req := client.DeleteOpenIDConnectProviderRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteOpenIDConnectProvider +func (c *Client) DeleteOpenIDConnectProviderRequest(input *DeleteOpenIDConnectProviderInput) DeleteOpenIDConnectProviderRequest { + op := &aws.Operation{ + Name: opDeleteOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteOpenIDConnectProviderInput{} + } + + req := c.newRequest(op, input, &DeleteOpenIDConnectProviderOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteOpenIDConnectProviderRequest{Request: req, Input: input, Copy: c.DeleteOpenIDConnectProviderRequest} +} + +// DeleteOpenIDConnectProviderRequest is the request type for the +// DeleteOpenIDConnectProvider API operation. +type DeleteOpenIDConnectProviderRequest struct { + *aws.Request + Input *DeleteOpenIDConnectProviderInput + Copy func(*DeleteOpenIDConnectProviderInput) DeleteOpenIDConnectProviderRequest +} + +// Send marshals and sends the DeleteOpenIDConnectProvider API request. +func (r DeleteOpenIDConnectProviderRequest) Send(ctx context.Context) (*DeleteOpenIDConnectProviderResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteOpenIDConnectProviderResponse{ + DeleteOpenIDConnectProviderOutput: r.Request.Data.(*DeleteOpenIDConnectProviderOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteOpenIDConnectProviderResponse is the response type for the +// DeleteOpenIDConnectProvider API operation. +type DeleteOpenIDConnectProviderResponse struct { + *DeleteOpenIDConnectProviderOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteOpenIDConnectProvider request. +func (r *DeleteOpenIDConnectProviderResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeletePolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeletePolicy.go new file mode 100644 index 00000000..30aef52a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeletePolicy.go @@ -0,0 +1,150 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeletePolicyRequest +type DeletePolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM policy you want to delete. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeletePolicyInput"} + + if s.PolicyArn == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeletePolicyOutput +type DeletePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeletePolicy = "DeletePolicy" + +// DeletePolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Deletes the specified managed policy. +// +// Before you can delete a managed policy, you must first detach the policy +// from all users, groups, and roles that it is attached to. In addition, you +// must delete all the policy's versions. The following steps describe the process +// for deleting a managed policy: +// +// * Detach the policy from all users, groups, and roles that the policy +// is attached to, using the DetachUserPolicy, DetachGroupPolicy, or DetachRolePolicy +// API operations. To list all the users, groups, and roles that a policy +// is attached to, use ListEntitiesForPolicy. +// +// * Delete all versions of the policy using DeletePolicyVersion. To list +// the policy's versions, use ListPolicyVersions. You cannot use DeletePolicyVersion +// to delete the version that is marked as the default version. You delete +// the policy's default version in the next step of the process. +// +// * Delete the policy (this automatically deletes the policy's default version) +// using this API. +// +// For information about managed policies, see Managed Policies and Inline Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// // Example sending a request using DeletePolicyRequest. +// req := client.DeletePolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeletePolicy +func (c *Client) DeletePolicyRequest(input *DeletePolicyInput) DeletePolicyRequest { + op := &aws.Operation{ + Name: opDeletePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePolicyInput{} + } + + req := c.newRequest(op, input, &DeletePolicyOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeletePolicyRequest{Request: req, Input: input, Copy: c.DeletePolicyRequest} +} + +// DeletePolicyRequest is the request type for the +// DeletePolicy API operation. +type DeletePolicyRequest struct { + *aws.Request + Input *DeletePolicyInput + Copy func(*DeletePolicyInput) DeletePolicyRequest +} + +// Send marshals and sends the DeletePolicy API request. +func (r DeletePolicyRequest) Send(ctx context.Context) (*DeletePolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeletePolicyResponse{ + DeletePolicyOutput: r.Request.Data.(*DeletePolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeletePolicyResponse is the response type for the +// DeletePolicy API operation. +type DeletePolicyResponse struct { + *DeletePolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeletePolicy request. +func (r *DeletePolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeletePolicyVersion.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeletePolicyVersion.go new file mode 100644 index 00000000..d054da11 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeletePolicyVersion.go @@ -0,0 +1,155 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeletePolicyVersionRequest +type DeletePolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM policy from which you want to delete + // a version. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The policy version to delete. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that consists of the lowercase letter 'v' followed + // by one or two digits, and optionally followed by a period '.' and a string + // of letters and digits. + // + // For more information about managed policy versions, see Versioning for Managed + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the IAM User Guide. + // + // VersionId is a required field + VersionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s DeletePolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeletePolicyVersionInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeletePolicyVersionInput"} + + if s.PolicyArn == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyArn", 20)) + } + + if s.VersionId == nil { + invalidParams.Add(aws.NewErrParamRequired("VersionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeletePolicyVersionOutput +type DeletePolicyVersionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeletePolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeletePolicyVersion = "DeletePolicyVersion" + +// DeletePolicyVersionRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Deletes the specified version from the specified managed policy. +// +// You cannot delete the default version from a policy using this API. To delete +// the default version from a policy, use DeletePolicy. To find out which version +// of a policy is marked as the default version, use ListPolicyVersions. +// +// For information about versions for managed policies, see Versioning for Managed +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) +// in the IAM User Guide. +// +// // Example sending a request using DeletePolicyVersionRequest. +// req := client.DeletePolicyVersionRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeletePolicyVersion +func (c *Client) DeletePolicyVersionRequest(input *DeletePolicyVersionInput) DeletePolicyVersionRequest { + op := &aws.Operation{ + Name: opDeletePolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeletePolicyVersionInput{} + } + + req := c.newRequest(op, input, &DeletePolicyVersionOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeletePolicyVersionRequest{Request: req, Input: input, Copy: c.DeletePolicyVersionRequest} +} + +// DeletePolicyVersionRequest is the request type for the +// DeletePolicyVersion API operation. +type DeletePolicyVersionRequest struct { + *aws.Request + Input *DeletePolicyVersionInput + Copy func(*DeletePolicyVersionInput) DeletePolicyVersionRequest +} + +// Send marshals and sends the DeletePolicyVersion API request. +func (r DeletePolicyVersionRequest) Send(ctx context.Context) (*DeletePolicyVersionResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeletePolicyVersionResponse{ + DeletePolicyVersionOutput: r.Request.Data.(*DeletePolicyVersionOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeletePolicyVersionResponse is the response type for the +// DeletePolicyVersion API operation. +type DeletePolicyVersionResponse struct { + *DeletePolicyVersionOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeletePolicyVersion request. +func (r *DeletePolicyVersionResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteRole.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteRole.go new file mode 100644 index 00000000..ea6fc451 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteRole.go @@ -0,0 +1,134 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteRoleRequest +type DeleteRoleInput struct { + _ struct{} `type:"structure"` + + // The name of the role to delete. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRoleInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRoleInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteRoleInput"} + + if s.RoleName == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteRoleOutput +type DeleteRoleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRoleOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteRole = "DeleteRole" + +// DeleteRoleRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Deletes the specified role. The role must not have any policies attached. +// For more information about roles, go to Working with Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// +// Make sure that you do not have any Amazon EC2 instances running with the +// role you are about to delete. Deleting a role or instance profile that is +// associated with a running instance will break any applications running on +// the instance. +// +// // Example sending a request using DeleteRoleRequest. +// req := client.DeleteRoleRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteRole +func (c *Client) DeleteRoleRequest(input *DeleteRoleInput) DeleteRoleRequest { + op := &aws.Operation{ + Name: opDeleteRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRoleInput{} + } + + req := c.newRequest(op, input, &DeleteRoleOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteRoleRequest{Request: req, Input: input, Copy: c.DeleteRoleRequest} +} + +// DeleteRoleRequest is the request type for the +// DeleteRole API operation. +type DeleteRoleRequest struct { + *aws.Request + Input *DeleteRoleInput + Copy func(*DeleteRoleInput) DeleteRoleRequest +} + +// Send marshals and sends the DeleteRole API request. +func (r DeleteRoleRequest) Send(ctx context.Context) (*DeleteRoleResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteRoleResponse{ + DeleteRoleOutput: r.Request.Data.(*DeleteRoleOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteRoleResponse is the response type for the +// DeleteRole API operation. +type DeleteRoleResponse struct { + *DeleteRoleOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteRole request. +func (r *DeleteRoleResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteRolePermissionsBoundary.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteRolePermissionsBoundary.go new file mode 100644 index 00000000..c27583f7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteRolePermissionsBoundary.go @@ -0,0 +1,129 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteRolePermissionsBoundaryRequest +type DeleteRolePermissionsBoundaryInput struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) of the IAM role from which you want to + // remove the permissions boundary. + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRolePermissionsBoundaryInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRolePermissionsBoundaryInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteRolePermissionsBoundaryInput"} + + if s.RoleName == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteRolePermissionsBoundaryOutput +type DeleteRolePermissionsBoundaryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRolePermissionsBoundaryOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteRolePermissionsBoundary = "DeleteRolePermissionsBoundary" + +// DeleteRolePermissionsBoundaryRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Deletes the permissions boundary for the specified IAM role. +// +// Deleting the permissions boundary for a role might increase its permissions. +// For example, it might allow anyone who assumes the role to perform all the +// actions granted in its permissions policies. +// +// // Example sending a request using DeleteRolePermissionsBoundaryRequest. +// req := client.DeleteRolePermissionsBoundaryRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteRolePermissionsBoundary +func (c *Client) DeleteRolePermissionsBoundaryRequest(input *DeleteRolePermissionsBoundaryInput) DeleteRolePermissionsBoundaryRequest { + op := &aws.Operation{ + Name: opDeleteRolePermissionsBoundary, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRolePermissionsBoundaryInput{} + } + + req := c.newRequest(op, input, &DeleteRolePermissionsBoundaryOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteRolePermissionsBoundaryRequest{Request: req, Input: input, Copy: c.DeleteRolePermissionsBoundaryRequest} +} + +// DeleteRolePermissionsBoundaryRequest is the request type for the +// DeleteRolePermissionsBoundary API operation. +type DeleteRolePermissionsBoundaryRequest struct { + *aws.Request + Input *DeleteRolePermissionsBoundaryInput + Copy func(*DeleteRolePermissionsBoundaryInput) DeleteRolePermissionsBoundaryRequest +} + +// Send marshals and sends the DeleteRolePermissionsBoundary API request. +func (r DeleteRolePermissionsBoundaryRequest) Send(ctx context.Context) (*DeleteRolePermissionsBoundaryResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteRolePermissionsBoundaryResponse{ + DeleteRolePermissionsBoundaryOutput: r.Request.Data.(*DeleteRolePermissionsBoundaryOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteRolePermissionsBoundaryResponse is the response type for the +// DeleteRolePermissionsBoundary API operation. +type DeleteRolePermissionsBoundaryResponse struct { + *DeleteRolePermissionsBoundaryOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteRolePermissionsBoundary request. +func (r *DeleteRolePermissionsBoundaryResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteRolePolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteRolePolicy.go new file mode 100644 index 00000000..f9f81eb3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteRolePolicy.go @@ -0,0 +1,151 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteRolePolicyRequest +type DeleteRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the inline policy to delete from the specified IAM role. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name (friendly name, not ARN) identifying the role that the policy is + // embedded in. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteRolePolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteRolePolicyInput"} + + if s.PolicyName == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyName", 1)) + } + + if s.RoleName == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteRolePolicyOutput +type DeleteRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteRolePolicy = "DeleteRolePolicy" + +// DeleteRolePolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Deletes the specified inline policy that is embedded in the specified IAM +// role. +// +// A role can also have managed policies attached to it. To detach a managed +// policy from a role, use DetachRolePolicy. For more information about policies, +// refer to Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// // Example sending a request using DeleteRolePolicyRequest. +// req := client.DeleteRolePolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteRolePolicy +func (c *Client) DeleteRolePolicyRequest(input *DeleteRolePolicyInput) DeleteRolePolicyRequest { + op := &aws.Operation{ + Name: opDeleteRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteRolePolicyInput{} + } + + req := c.newRequest(op, input, &DeleteRolePolicyOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteRolePolicyRequest{Request: req, Input: input, Copy: c.DeleteRolePolicyRequest} +} + +// DeleteRolePolicyRequest is the request type for the +// DeleteRolePolicy API operation. +type DeleteRolePolicyRequest struct { + *aws.Request + Input *DeleteRolePolicyInput + Copy func(*DeleteRolePolicyInput) DeleteRolePolicyRequest +} + +// Send marshals and sends the DeleteRolePolicy API request. +func (r DeleteRolePolicyRequest) Send(ctx context.Context) (*DeleteRolePolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteRolePolicyResponse{ + DeleteRolePolicyOutput: r.Request.Data.(*DeleteRolePolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteRolePolicyResponse is the response type for the +// DeleteRolePolicy API operation. +type DeleteRolePolicyResponse struct { + *DeleteRolePolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteRolePolicy request. +func (r *DeleteRolePolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteSAMLProvider.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteSAMLProvider.go new file mode 100644 index 00000000..d65c1d8d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteSAMLProvider.go @@ -0,0 +1,131 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteSAMLProviderRequest +type DeleteSAMLProviderInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the SAML provider to delete. + // + // SAMLProviderArn is a required field + SAMLProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSAMLProviderInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSAMLProviderInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteSAMLProviderInput"} + + if s.SAMLProviderArn == nil { + invalidParams.Add(aws.NewErrParamRequired("SAMLProviderArn")) + } + if s.SAMLProviderArn != nil && len(*s.SAMLProviderArn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("SAMLProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteSAMLProviderOutput +type DeleteSAMLProviderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSAMLProviderOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteSAMLProvider = "DeleteSAMLProvider" + +// DeleteSAMLProviderRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Deletes a SAML provider resource in IAM. +// +// Deleting the provider resource from IAM does not update any roles that reference +// the SAML provider resource's ARN as a principal in their trust policies. +// Any attempt to assume a role that references a non-existent provider resource +// ARN fails. +// +// This operation requires Signature Version 4 (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// +// // Example sending a request using DeleteSAMLProviderRequest. +// req := client.DeleteSAMLProviderRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteSAMLProvider +func (c *Client) DeleteSAMLProviderRequest(input *DeleteSAMLProviderInput) DeleteSAMLProviderRequest { + op := &aws.Operation{ + Name: opDeleteSAMLProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSAMLProviderInput{} + } + + req := c.newRequest(op, input, &DeleteSAMLProviderOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteSAMLProviderRequest{Request: req, Input: input, Copy: c.DeleteSAMLProviderRequest} +} + +// DeleteSAMLProviderRequest is the request type for the +// DeleteSAMLProvider API operation. +type DeleteSAMLProviderRequest struct { + *aws.Request + Input *DeleteSAMLProviderInput + Copy func(*DeleteSAMLProviderInput) DeleteSAMLProviderRequest +} + +// Send marshals and sends the DeleteSAMLProvider API request. +func (r DeleteSAMLProviderRequest) Send(ctx context.Context) (*DeleteSAMLProviderResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteSAMLProviderResponse{ + DeleteSAMLProviderOutput: r.Request.Data.(*DeleteSAMLProviderOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteSAMLProviderResponse is the response type for the +// DeleteSAMLProvider API operation. +type DeleteSAMLProviderResponse struct { + *DeleteSAMLProviderOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteSAMLProvider request. +func (r *DeleteSAMLProviderResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteSSHPublicKey.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteSSHPublicKey.go new file mode 100644 index 00000000..10c5f415 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteSSHPublicKey.go @@ -0,0 +1,150 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteSSHPublicKeyRequest +type DeleteSSHPublicKeyInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the SSH public key. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that can consist of any upper or lowercased letter + // or digit. + // + // SSHPublicKeyId is a required field + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The name of the IAM user associated with the SSH public key. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteSSHPublicKeyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSSHPublicKeyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteSSHPublicKeyInput"} + + if s.SSHPublicKeyId == nil { + invalidParams.Add(aws.NewErrParamRequired("SSHPublicKeyId")) + } + if s.SSHPublicKeyId != nil && len(*s.SSHPublicKeyId) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("SSHPublicKeyId", 20)) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteSSHPublicKeyOutput +type DeleteSSHPublicKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSSHPublicKeyOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteSSHPublicKey = "DeleteSSHPublicKey" + +// DeleteSSHPublicKeyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Deletes the specified SSH public key. +// +// The SSH public key deleted by this operation is used only for authenticating +// the associated IAM user to an AWS CodeCommit repository. For more information +// about using SSH keys to authenticate to an AWS CodeCommit repository, see +// Set up AWS CodeCommit for SSH Connections (https://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +// +// // Example sending a request using DeleteSSHPublicKeyRequest. +// req := client.DeleteSSHPublicKeyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteSSHPublicKey +func (c *Client) DeleteSSHPublicKeyRequest(input *DeleteSSHPublicKeyInput) DeleteSSHPublicKeyRequest { + op := &aws.Operation{ + Name: opDeleteSSHPublicKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSSHPublicKeyInput{} + } + + req := c.newRequest(op, input, &DeleteSSHPublicKeyOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteSSHPublicKeyRequest{Request: req, Input: input, Copy: c.DeleteSSHPublicKeyRequest} +} + +// DeleteSSHPublicKeyRequest is the request type for the +// DeleteSSHPublicKey API operation. +type DeleteSSHPublicKeyRequest struct { + *aws.Request + Input *DeleteSSHPublicKeyInput + Copy func(*DeleteSSHPublicKeyInput) DeleteSSHPublicKeyRequest +} + +// Send marshals and sends the DeleteSSHPublicKey API request. +func (r DeleteSSHPublicKeyRequest) Send(ctx context.Context) (*DeleteSSHPublicKeyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteSSHPublicKeyResponse{ + DeleteSSHPublicKeyOutput: r.Request.Data.(*DeleteSSHPublicKeyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteSSHPublicKeyResponse is the response type for the +// DeleteSSHPublicKey API operation. +type DeleteSSHPublicKeyResponse struct { + *DeleteSSHPublicKeyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteSSHPublicKey request. +func (r *DeleteSSHPublicKeyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteServerCertificate.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteServerCertificate.go new file mode 100644 index 00000000..1a74adae --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteServerCertificate.go @@ -0,0 +1,143 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteServerCertificateRequest +type DeleteServerCertificateInput struct { + _ struct{} `type:"structure"` + + // The name of the server certificate you want to delete. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // ServerCertificateName is a required field + ServerCertificateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteServerCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteServerCertificateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteServerCertificateInput"} + + if s.ServerCertificateName == nil { + invalidParams.Add(aws.NewErrParamRequired("ServerCertificateName")) + } + if s.ServerCertificateName != nil && len(*s.ServerCertificateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ServerCertificateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteServerCertificateOutput +type DeleteServerCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteServerCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteServerCertificate = "DeleteServerCertificate" + +// DeleteServerCertificateRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Deletes the specified server certificate. +// +// For more information about working with server certificates, see Working +// with Server Certificates (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. This topic also includes a list of AWS services that +// can use the server certificates that you manage with IAM. +// +// If you are using a server certificate with Elastic Load Balancing, deleting +// the certificate could have implications for your application. If Elastic +// Load Balancing doesn't detect the deletion of bound certificates, it may +// continue to use the certificates. This could cause Elastic Load Balancing +// to stop accepting traffic. We recommend that you remove the reference to +// the certificate from Elastic Load Balancing before using this command to +// delete the certificate. For more information, go to DeleteLoadBalancerListeners +// (https://docs.aws.amazon.com/ElasticLoadBalancing/latest/APIReference/API_DeleteLoadBalancerListeners.html) +// in the Elastic Load Balancing API Reference. +// +// // Example sending a request using DeleteServerCertificateRequest. +// req := client.DeleteServerCertificateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteServerCertificate +func (c *Client) DeleteServerCertificateRequest(input *DeleteServerCertificateInput) DeleteServerCertificateRequest { + op := &aws.Operation{ + Name: opDeleteServerCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteServerCertificateInput{} + } + + req := c.newRequest(op, input, &DeleteServerCertificateOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteServerCertificateRequest{Request: req, Input: input, Copy: c.DeleteServerCertificateRequest} +} + +// DeleteServerCertificateRequest is the request type for the +// DeleteServerCertificate API operation. +type DeleteServerCertificateRequest struct { + *aws.Request + Input *DeleteServerCertificateInput + Copy func(*DeleteServerCertificateInput) DeleteServerCertificateRequest +} + +// Send marshals and sends the DeleteServerCertificate API request. +func (r DeleteServerCertificateRequest) Send(ctx context.Context) (*DeleteServerCertificateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteServerCertificateResponse{ + DeleteServerCertificateOutput: r.Request.Data.(*DeleteServerCertificateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteServerCertificateResponse is the response type for the +// DeleteServerCertificate API operation. +type DeleteServerCertificateResponse struct { + *DeleteServerCertificateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteServerCertificate request. +func (r *DeleteServerCertificateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteServiceLinkedRole.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteServiceLinkedRole.go new file mode 100644 index 00000000..d0b02ac6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteServiceLinkedRole.go @@ -0,0 +1,145 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteServiceLinkedRoleRequest +type DeleteServiceLinkedRoleInput struct { + _ struct{} `type:"structure"` + + // The name of the service-linked role to be deleted. + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteServiceLinkedRoleInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteServiceLinkedRoleInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteServiceLinkedRoleInput"} + + if s.RoleName == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteServiceLinkedRoleResponse +type DeleteServiceLinkedRoleOutput struct { + _ struct{} `type:"structure"` + + // The deletion task identifier that you can use to check the status of the + // deletion. This identifier is returned in the format task/aws-service-role///. + // + // DeletionTaskId is a required field + DeletionTaskId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteServiceLinkedRoleOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteServiceLinkedRole = "DeleteServiceLinkedRole" + +// DeleteServiceLinkedRoleRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Submits a service-linked role deletion request and returns a DeletionTaskId, +// which you can use to check the status of the deletion. Before you call this +// operation, confirm that the role has no active sessions and that any resources +// used by the role in the linked service are deleted. If you call this operation +// more than once for the same service-linked role and an earlier deletion task +// is not complete, then the DeletionTaskId of the earlier request is returned. +// +// If you submit a deletion request for a service-linked role whose linked service +// is still accessing a resource, then the deletion task fails. If it fails, +// the GetServiceLinkedRoleDeletionStatus API operation returns the reason for +// the failure, usually including the resources that must be deleted. To delete +// the service-linked role, you must first remove those resources from the linked +// service and then submit the deletion request again. Resources are specific +// to the service that is linked to the role. For more information about removing +// resources from a service, see the AWS documentation (http://docs.aws.amazon.com/) +// for your service. +// +// For more information about service-linked roles, see Roles Terms and Concepts: +// AWS Service-Linked Role (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_terms-and-concepts.html#iam-term-service-linked-role) +// in the IAM User Guide. +// +// // Example sending a request using DeleteServiceLinkedRoleRequest. +// req := client.DeleteServiceLinkedRoleRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteServiceLinkedRole +func (c *Client) DeleteServiceLinkedRoleRequest(input *DeleteServiceLinkedRoleInput) DeleteServiceLinkedRoleRequest { + op := &aws.Operation{ + Name: opDeleteServiceLinkedRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteServiceLinkedRoleInput{} + } + + req := c.newRequest(op, input, &DeleteServiceLinkedRoleOutput{}) + return DeleteServiceLinkedRoleRequest{Request: req, Input: input, Copy: c.DeleteServiceLinkedRoleRequest} +} + +// DeleteServiceLinkedRoleRequest is the request type for the +// DeleteServiceLinkedRole API operation. +type DeleteServiceLinkedRoleRequest struct { + *aws.Request + Input *DeleteServiceLinkedRoleInput + Copy func(*DeleteServiceLinkedRoleInput) DeleteServiceLinkedRoleRequest +} + +// Send marshals and sends the DeleteServiceLinkedRole API request. +func (r DeleteServiceLinkedRoleRequest) Send(ctx context.Context) (*DeleteServiceLinkedRoleResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteServiceLinkedRoleResponse{ + DeleteServiceLinkedRoleOutput: r.Request.Data.(*DeleteServiceLinkedRoleOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteServiceLinkedRoleResponse is the response type for the +// DeleteServiceLinkedRole API operation. +type DeleteServiceLinkedRoleResponse struct { + *DeleteServiceLinkedRoleOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteServiceLinkedRole request. +func (r *DeleteServiceLinkedRoleResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteServiceSpecificCredential.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteServiceSpecificCredential.go new file mode 100644 index 00000000..ae863597 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteServiceSpecificCredential.go @@ -0,0 +1,141 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteServiceSpecificCredentialRequest +type DeleteServiceSpecificCredentialInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the service-specific credential. You can get this + // value by calling ListServiceSpecificCredentials. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that can consist of any upper or lowercased letter + // or digit. + // + // ServiceSpecificCredentialId is a required field + ServiceSpecificCredentialId *string `min:"20" type:"string" required:"true"` + + // The name of the IAM user associated with the service-specific credential. + // If this value is not specified, then the operation assumes the user whose + // credentials are used to call the operation. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteServiceSpecificCredentialInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteServiceSpecificCredentialInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteServiceSpecificCredentialInput"} + + if s.ServiceSpecificCredentialId == nil { + invalidParams.Add(aws.NewErrParamRequired("ServiceSpecificCredentialId")) + } + if s.ServiceSpecificCredentialId != nil && len(*s.ServiceSpecificCredentialId) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("ServiceSpecificCredentialId", 20)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteServiceSpecificCredentialOutput +type DeleteServiceSpecificCredentialOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteServiceSpecificCredentialOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteServiceSpecificCredential = "DeleteServiceSpecificCredential" + +// DeleteServiceSpecificCredentialRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Deletes the specified service-specific credential. +// +// // Example sending a request using DeleteServiceSpecificCredentialRequest. +// req := client.DeleteServiceSpecificCredentialRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteServiceSpecificCredential +func (c *Client) DeleteServiceSpecificCredentialRequest(input *DeleteServiceSpecificCredentialInput) DeleteServiceSpecificCredentialRequest { + op := &aws.Operation{ + Name: opDeleteServiceSpecificCredential, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteServiceSpecificCredentialInput{} + } + + req := c.newRequest(op, input, &DeleteServiceSpecificCredentialOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteServiceSpecificCredentialRequest{Request: req, Input: input, Copy: c.DeleteServiceSpecificCredentialRequest} +} + +// DeleteServiceSpecificCredentialRequest is the request type for the +// DeleteServiceSpecificCredential API operation. +type DeleteServiceSpecificCredentialRequest struct { + *aws.Request + Input *DeleteServiceSpecificCredentialInput + Copy func(*DeleteServiceSpecificCredentialInput) DeleteServiceSpecificCredentialRequest +} + +// Send marshals and sends the DeleteServiceSpecificCredential API request. +func (r DeleteServiceSpecificCredentialRequest) Send(ctx context.Context) (*DeleteServiceSpecificCredentialResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteServiceSpecificCredentialResponse{ + DeleteServiceSpecificCredentialOutput: r.Request.Data.(*DeleteServiceSpecificCredentialOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteServiceSpecificCredentialResponse is the response type for the +// DeleteServiceSpecificCredential API operation. +type DeleteServiceSpecificCredentialResponse struct { + *DeleteServiceSpecificCredentialOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteServiceSpecificCredential request. +func (r *DeleteServiceSpecificCredentialResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteSigningCertificate.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteSigningCertificate.go new file mode 100644 index 00000000..b490ad71 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteSigningCertificate.go @@ -0,0 +1,144 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteSigningCertificateRequest +type DeleteSigningCertificateInput struct { + _ struct{} `type:"structure"` + + // The ID of the signing certificate to delete. + // + // The format of this parameter, as described by its regex (http://wikipedia.org/wiki/regex) + // pattern, is a string of characters that can be upper- or lower-cased letters + // or digits. + // + // CertificateId is a required field + CertificateId *string `min:"24" type:"string" required:"true"` + + // The name of the user the signing certificate belongs to. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s DeleteSigningCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteSigningCertificateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteSigningCertificateInput"} + + if s.CertificateId == nil { + invalidParams.Add(aws.NewErrParamRequired("CertificateId")) + } + if s.CertificateId != nil && len(*s.CertificateId) < 24 { + invalidParams.Add(aws.NewErrParamMinLen("CertificateId", 24)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteSigningCertificateOutput +type DeleteSigningCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteSigningCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteSigningCertificate = "DeleteSigningCertificate" + +// DeleteSigningCertificateRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Deletes a signing certificate associated with the specified IAM user. +// +// If you do not specify a user name, IAM determines the user name implicitly +// based on the AWS access key ID signing the request. This operation works +// for access keys under the AWS account. Consequently, you can use this operation +// to manage AWS account root user credentials even if the AWS account has no +// associated IAM users. +// +// // Example sending a request using DeleteSigningCertificateRequest. +// req := client.DeleteSigningCertificateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteSigningCertificate +func (c *Client) DeleteSigningCertificateRequest(input *DeleteSigningCertificateInput) DeleteSigningCertificateRequest { + op := &aws.Operation{ + Name: opDeleteSigningCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteSigningCertificateInput{} + } + + req := c.newRequest(op, input, &DeleteSigningCertificateOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteSigningCertificateRequest{Request: req, Input: input, Copy: c.DeleteSigningCertificateRequest} +} + +// DeleteSigningCertificateRequest is the request type for the +// DeleteSigningCertificate API operation. +type DeleteSigningCertificateRequest struct { + *aws.Request + Input *DeleteSigningCertificateInput + Copy func(*DeleteSigningCertificateInput) DeleteSigningCertificateRequest +} + +// Send marshals and sends the DeleteSigningCertificate API request. +func (r DeleteSigningCertificateRequest) Send(ctx context.Context) (*DeleteSigningCertificateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteSigningCertificateResponse{ + DeleteSigningCertificateOutput: r.Request.Data.(*DeleteSigningCertificateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteSigningCertificateResponse is the response type for the +// DeleteSigningCertificate API operation. +type DeleteSigningCertificateResponse struct { + *DeleteSigningCertificateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteSigningCertificate request. +func (r *DeleteSigningCertificateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteUser.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteUser.go new file mode 100644 index 00000000..c28b5894 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteUser.go @@ -0,0 +1,150 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteUserRequest +type DeleteUserInput struct { + _ struct{} `type:"structure"` + + // The name of the user to delete. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteUserInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteUserInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteUserInput"} + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteUserOutput +type DeleteUserOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUserOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteUser = "DeleteUser" + +// DeleteUserRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Deletes the specified IAM user. Unlike the AWS Management Console, when you +// delete a user programmatically, you must delete the items attached to the +// user manually, or the deletion fails. For more information, see Deleting +// an IAM User (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_manage.html#id_users_deleting_cli). +// Before attempting to delete a user, remove the following items: +// +// * Password (DeleteLoginProfile) +// +// * Access keys (DeleteAccessKey) +// +// * Signing certificate (DeleteSigningCertificate) +// +// * SSH public key (DeleteSSHPublicKey) +// +// * Git credentials (DeleteServiceSpecificCredential) +// +// * Multi-factor authentication (MFA) device (DeactivateMFADevice, DeleteVirtualMFADevice) +// +// * Inline policies (DeleteUserPolicy) +// +// * Attached managed policies (DetachUserPolicy) +// +// * Group memberships (RemoveUserFromGroup) +// +// // Example sending a request using DeleteUserRequest. +// req := client.DeleteUserRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteUser +func (c *Client) DeleteUserRequest(input *DeleteUserInput) DeleteUserRequest { + op := &aws.Operation{ + Name: opDeleteUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUserInput{} + } + + req := c.newRequest(op, input, &DeleteUserOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteUserRequest{Request: req, Input: input, Copy: c.DeleteUserRequest} +} + +// DeleteUserRequest is the request type for the +// DeleteUser API operation. +type DeleteUserRequest struct { + *aws.Request + Input *DeleteUserInput + Copy func(*DeleteUserInput) DeleteUserRequest +} + +// Send marshals and sends the DeleteUser API request. +func (r DeleteUserRequest) Send(ctx context.Context) (*DeleteUserResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteUserResponse{ + DeleteUserOutput: r.Request.Data.(*DeleteUserOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteUserResponse is the response type for the +// DeleteUser API operation. +type DeleteUserResponse struct { + *DeleteUserOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteUser request. +func (r *DeleteUserResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteUserPermissionsBoundary.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteUserPermissionsBoundary.go new file mode 100644 index 00000000..c783b4e6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteUserPermissionsBoundary.go @@ -0,0 +1,129 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteUserPermissionsBoundaryRequest +type DeleteUserPermissionsBoundaryInput struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) of the IAM user from which you want to + // remove the permissions boundary. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteUserPermissionsBoundaryInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteUserPermissionsBoundaryInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteUserPermissionsBoundaryInput"} + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteUserPermissionsBoundaryOutput +type DeleteUserPermissionsBoundaryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUserPermissionsBoundaryOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteUserPermissionsBoundary = "DeleteUserPermissionsBoundary" + +// DeleteUserPermissionsBoundaryRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Deletes the permissions boundary for the specified IAM user. +// +// Deleting the permissions boundary for a user might increase its permissions +// by allowing the user to perform all the actions granted in its permissions +// policies. +// +// // Example sending a request using DeleteUserPermissionsBoundaryRequest. +// req := client.DeleteUserPermissionsBoundaryRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteUserPermissionsBoundary +func (c *Client) DeleteUserPermissionsBoundaryRequest(input *DeleteUserPermissionsBoundaryInput) DeleteUserPermissionsBoundaryRequest { + op := &aws.Operation{ + Name: opDeleteUserPermissionsBoundary, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUserPermissionsBoundaryInput{} + } + + req := c.newRequest(op, input, &DeleteUserPermissionsBoundaryOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteUserPermissionsBoundaryRequest{Request: req, Input: input, Copy: c.DeleteUserPermissionsBoundaryRequest} +} + +// DeleteUserPermissionsBoundaryRequest is the request type for the +// DeleteUserPermissionsBoundary API operation. +type DeleteUserPermissionsBoundaryRequest struct { + *aws.Request + Input *DeleteUserPermissionsBoundaryInput + Copy func(*DeleteUserPermissionsBoundaryInput) DeleteUserPermissionsBoundaryRequest +} + +// Send marshals and sends the DeleteUserPermissionsBoundary API request. +func (r DeleteUserPermissionsBoundaryRequest) Send(ctx context.Context) (*DeleteUserPermissionsBoundaryResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteUserPermissionsBoundaryResponse{ + DeleteUserPermissionsBoundaryOutput: r.Request.Data.(*DeleteUserPermissionsBoundaryOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteUserPermissionsBoundaryResponse is the response type for the +// DeleteUserPermissionsBoundary API operation. +type DeleteUserPermissionsBoundaryResponse struct { + *DeleteUserPermissionsBoundaryOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteUserPermissionsBoundary request. +func (r *DeleteUserPermissionsBoundaryResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteUserPolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteUserPolicy.go new file mode 100644 index 00000000..9c7238de --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteUserPolicy.go @@ -0,0 +1,151 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteUserPolicyRequest +type DeleteUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The name identifying the policy document to delete. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name (friendly name, not ARN) identifying the user that the policy is + // embedded in. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteUserPolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteUserPolicyInput"} + + if s.PolicyName == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyName", 1)) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteUserPolicyOutput +type DeleteUserPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteUserPolicy = "DeleteUserPolicy" + +// DeleteUserPolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Deletes the specified inline policy that is embedded in the specified IAM +// user. +// +// A user can also have managed policies attached to it. To detach a managed +// policy from a user, use DetachUserPolicy. For more information about policies, +// refer to Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// // Example sending a request using DeleteUserPolicyRequest. +// req := client.DeleteUserPolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteUserPolicy +func (c *Client) DeleteUserPolicyRequest(input *DeleteUserPolicyInput) DeleteUserPolicyRequest { + op := &aws.Operation{ + Name: opDeleteUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteUserPolicyInput{} + } + + req := c.newRequest(op, input, &DeleteUserPolicyOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteUserPolicyRequest{Request: req, Input: input, Copy: c.DeleteUserPolicyRequest} +} + +// DeleteUserPolicyRequest is the request type for the +// DeleteUserPolicy API operation. +type DeleteUserPolicyRequest struct { + *aws.Request + Input *DeleteUserPolicyInput + Copy func(*DeleteUserPolicyInput) DeleteUserPolicyRequest +} + +// Send marshals and sends the DeleteUserPolicy API request. +func (r DeleteUserPolicyRequest) Send(ctx context.Context) (*DeleteUserPolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteUserPolicyResponse{ + DeleteUserPolicyOutput: r.Request.Data.(*DeleteUserPolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteUserPolicyResponse is the response type for the +// DeleteUserPolicy API operation. +type DeleteUserPolicyResponse struct { + *DeleteUserPolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteUserPolicy request. +func (r *DeleteUserPolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteVirtualMFADevice.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteVirtualMFADevice.go new file mode 100644 index 00000000..bb1fad9c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DeleteVirtualMFADevice.go @@ -0,0 +1,132 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteVirtualMFADeviceRequest +type DeleteVirtualMFADeviceInput struct { + _ struct{} `type:"structure"` + + // The serial number that uniquely identifies the MFA device. For virtual MFA + // devices, the serial number is the same as the ARN. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@:/- + // + // SerialNumber is a required field + SerialNumber *string `min:"9" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteVirtualMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteVirtualMFADeviceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DeleteVirtualMFADeviceInput"} + + if s.SerialNumber == nil { + invalidParams.Add(aws.NewErrParamRequired("SerialNumber")) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(aws.NewErrParamMinLen("SerialNumber", 9)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteVirtualMFADeviceOutput +type DeleteVirtualMFADeviceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteVirtualMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +const opDeleteVirtualMFADevice = "DeleteVirtualMFADevice" + +// DeleteVirtualMFADeviceRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Deletes a virtual MFA device. +// +// You must deactivate a user's virtual MFA device before you can delete it. +// For information about deactivating MFA devices, see DeactivateMFADevice. +// +// // Example sending a request using DeleteVirtualMFADeviceRequest. +// req := client.DeleteVirtualMFADeviceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeleteVirtualMFADevice +func (c *Client) DeleteVirtualMFADeviceRequest(input *DeleteVirtualMFADeviceInput) DeleteVirtualMFADeviceRequest { + op := &aws.Operation{ + Name: opDeleteVirtualMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DeleteVirtualMFADeviceInput{} + } + + req := c.newRequest(op, input, &DeleteVirtualMFADeviceOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DeleteVirtualMFADeviceRequest{Request: req, Input: input, Copy: c.DeleteVirtualMFADeviceRequest} +} + +// DeleteVirtualMFADeviceRequest is the request type for the +// DeleteVirtualMFADevice API operation. +type DeleteVirtualMFADeviceRequest struct { + *aws.Request + Input *DeleteVirtualMFADeviceInput + Copy func(*DeleteVirtualMFADeviceInput) DeleteVirtualMFADeviceRequest +} + +// Send marshals and sends the DeleteVirtualMFADevice API request. +func (r DeleteVirtualMFADeviceRequest) Send(ctx context.Context) (*DeleteVirtualMFADeviceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DeleteVirtualMFADeviceResponse{ + DeleteVirtualMFADeviceOutput: r.Request.Data.(*DeleteVirtualMFADeviceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DeleteVirtualMFADeviceResponse is the response type for the +// DeleteVirtualMFADevice API operation. +type DeleteVirtualMFADeviceResponse struct { + *DeleteVirtualMFADeviceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DeleteVirtualMFADevice request. +func (r *DeleteVirtualMFADeviceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DetachGroupPolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DetachGroupPolicy.go new file mode 100644 index 00000000..f58bf7f4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DetachGroupPolicy.go @@ -0,0 +1,149 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DetachGroupPolicyRequest +type DetachGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) of the IAM group to detach the policy from. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM policy you want to detach. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachGroupPolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DetachGroupPolicyInput"} + + if s.GroupName == nil { + invalidParams.Add(aws.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("GroupName", 1)) + } + + if s.PolicyArn == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DetachGroupPolicyOutput +type DetachGroupPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opDetachGroupPolicy = "DetachGroupPolicy" + +// DetachGroupPolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Removes the specified managed policy from the specified IAM group. +// +// A group can also have inline policies embedded with it. To delete an inline +// policy, use the DeleteGroupPolicy API. For information about policies, see +// Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// // Example sending a request using DetachGroupPolicyRequest. +// req := client.DetachGroupPolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DetachGroupPolicy +func (c *Client) DetachGroupPolicyRequest(input *DetachGroupPolicyInput) DetachGroupPolicyRequest { + op := &aws.Operation{ + Name: opDetachGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachGroupPolicyInput{} + } + + req := c.newRequest(op, input, &DetachGroupPolicyOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DetachGroupPolicyRequest{Request: req, Input: input, Copy: c.DetachGroupPolicyRequest} +} + +// DetachGroupPolicyRequest is the request type for the +// DetachGroupPolicy API operation. +type DetachGroupPolicyRequest struct { + *aws.Request + Input *DetachGroupPolicyInput + Copy func(*DetachGroupPolicyInput) DetachGroupPolicyRequest +} + +// Send marshals and sends the DetachGroupPolicy API request. +func (r DetachGroupPolicyRequest) Send(ctx context.Context) (*DetachGroupPolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DetachGroupPolicyResponse{ + DetachGroupPolicyOutput: r.Request.Data.(*DetachGroupPolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DetachGroupPolicyResponse is the response type for the +// DetachGroupPolicy API operation. +type DetachGroupPolicyResponse struct { + *DetachGroupPolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DetachGroupPolicy request. +func (r *DetachGroupPolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DetachRolePolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DetachRolePolicy.go new file mode 100644 index 00000000..76220b7e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DetachRolePolicy.go @@ -0,0 +1,149 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DetachRolePolicyRequest +type DetachRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM policy you want to detach. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The name (friendly name, not ARN) of the IAM role to detach the policy from. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachRolePolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DetachRolePolicyInput"} + + if s.PolicyArn == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyArn", 20)) + } + + if s.RoleName == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DetachRolePolicyOutput +type DetachRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opDetachRolePolicy = "DetachRolePolicy" + +// DetachRolePolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Removes the specified managed policy from the specified role. +// +// A role can also have inline policies embedded with it. To delete an inline +// policy, use the DeleteRolePolicy API. For information about policies, see +// Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// // Example sending a request using DetachRolePolicyRequest. +// req := client.DetachRolePolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DetachRolePolicy +func (c *Client) DetachRolePolicyRequest(input *DetachRolePolicyInput) DetachRolePolicyRequest { + op := &aws.Operation{ + Name: opDetachRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachRolePolicyInput{} + } + + req := c.newRequest(op, input, &DetachRolePolicyOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DetachRolePolicyRequest{Request: req, Input: input, Copy: c.DetachRolePolicyRequest} +} + +// DetachRolePolicyRequest is the request type for the +// DetachRolePolicy API operation. +type DetachRolePolicyRequest struct { + *aws.Request + Input *DetachRolePolicyInput + Copy func(*DetachRolePolicyInput) DetachRolePolicyRequest +} + +// Send marshals and sends the DetachRolePolicy API request. +func (r DetachRolePolicyRequest) Send(ctx context.Context) (*DetachRolePolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DetachRolePolicyResponse{ + DetachRolePolicyOutput: r.Request.Data.(*DetachRolePolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DetachRolePolicyResponse is the response type for the +// DetachRolePolicy API operation. +type DetachRolePolicyResponse struct { + *DetachRolePolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DetachRolePolicy request. +func (r *DetachRolePolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DetachUserPolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DetachUserPolicy.go new file mode 100644 index 00000000..308da852 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_DetachUserPolicy.go @@ -0,0 +1,149 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DetachUserPolicyRequest +type DetachUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM policy you want to detach. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The name (friendly name, not ARN) of the IAM user to detach the policy from. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DetachUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DetachUserPolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "DetachUserPolicyInput"} + + if s.PolicyArn == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyArn", 20)) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DetachUserPolicyOutput +type DetachUserPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DetachUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opDetachUserPolicy = "DetachUserPolicy" + +// DetachUserPolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Removes the specified managed policy from the specified user. +// +// A user can also have inline policies embedded with it. To delete an inline +// policy, use the DeleteUserPolicy API. For information about policies, see +// Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// // Example sending a request using DetachUserPolicyRequest. +// req := client.DetachUserPolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DetachUserPolicy +func (c *Client) DetachUserPolicyRequest(input *DetachUserPolicyInput) DetachUserPolicyRequest { + op := &aws.Operation{ + Name: opDetachUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DetachUserPolicyInput{} + } + + req := c.newRequest(op, input, &DetachUserPolicyOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return DetachUserPolicyRequest{Request: req, Input: input, Copy: c.DetachUserPolicyRequest} +} + +// DetachUserPolicyRequest is the request type for the +// DetachUserPolicy API operation. +type DetachUserPolicyRequest struct { + *aws.Request + Input *DetachUserPolicyInput + Copy func(*DetachUserPolicyInput) DetachUserPolicyRequest +} + +// Send marshals and sends the DetachUserPolicy API request. +func (r DetachUserPolicyRequest) Send(ctx context.Context) (*DetachUserPolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &DetachUserPolicyResponse{ + DetachUserPolicyOutput: r.Request.Data.(*DetachUserPolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// DetachUserPolicyResponse is the response type for the +// DetachUserPolicy API operation. +type DetachUserPolicyResponse struct { + *DetachUserPolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// DetachUserPolicy request. +func (r *DetachUserPolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_EnableMFADevice.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_EnableMFADevice.go new file mode 100644 index 00000000..b635b2ac --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_EnableMFADevice.go @@ -0,0 +1,189 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/EnableMFADeviceRequest +type EnableMFADeviceInput struct { + _ struct{} `type:"structure"` + + // An authentication code emitted by the device. + // + // The format for this parameter is a string of six digits. + // + // Submit your request immediately after generating the authentication codes. + // If you generate the codes and then wait too long to submit the request, the + // MFA device successfully associates with the user but the MFA device becomes + // out of sync. This happens because time-based one-time passwords (TOTP) expire + // after a short period of time. If this happens, you can resync the device + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_sync.html). + // + // AuthenticationCode1 is a required field + AuthenticationCode1 *string `min:"6" type:"string" required:"true"` + + // A subsequent authentication code emitted by the device. + // + // The format for this parameter is a string of six digits. + // + // Submit your request immediately after generating the authentication codes. + // If you generate the codes and then wait too long to submit the request, the + // MFA device successfully associates with the user but the MFA device becomes + // out of sync. This happens because time-based one-time passwords (TOTP) expire + // after a short period of time. If this happens, you can resync the device + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_sync.html). + // + // AuthenticationCode2 is a required field + AuthenticationCode2 *string `min:"6" type:"string" required:"true"` + + // The serial number that uniquely identifies the MFA device. For virtual MFA + // devices, the serial number is the device ARN. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@:/- + // + // SerialNumber is a required field + SerialNumber *string `min:"9" type:"string" required:"true"` + + // The name of the IAM user for whom you want to enable the MFA device. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s EnableMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableMFADeviceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "EnableMFADeviceInput"} + + if s.AuthenticationCode1 == nil { + invalidParams.Add(aws.NewErrParamRequired("AuthenticationCode1")) + } + if s.AuthenticationCode1 != nil && len(*s.AuthenticationCode1) < 6 { + invalidParams.Add(aws.NewErrParamMinLen("AuthenticationCode1", 6)) + } + + if s.AuthenticationCode2 == nil { + invalidParams.Add(aws.NewErrParamRequired("AuthenticationCode2")) + } + if s.AuthenticationCode2 != nil && len(*s.AuthenticationCode2) < 6 { + invalidParams.Add(aws.NewErrParamMinLen("AuthenticationCode2", 6)) + } + + if s.SerialNumber == nil { + invalidParams.Add(aws.NewErrParamRequired("SerialNumber")) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(aws.NewErrParamMinLen("SerialNumber", 9)) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/EnableMFADeviceOutput +type EnableMFADeviceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s EnableMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +const opEnableMFADevice = "EnableMFADevice" + +// EnableMFADeviceRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Enables the specified MFA device and associates it with the specified IAM +// user. When enabled, the MFA device is required for every subsequent login +// by the IAM user associated with the device. +// +// // Example sending a request using EnableMFADeviceRequest. +// req := client.EnableMFADeviceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/EnableMFADevice +func (c *Client) EnableMFADeviceRequest(input *EnableMFADeviceInput) EnableMFADeviceRequest { + op := &aws.Operation{ + Name: opEnableMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableMFADeviceInput{} + } + + req := c.newRequest(op, input, &EnableMFADeviceOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return EnableMFADeviceRequest{Request: req, Input: input, Copy: c.EnableMFADeviceRequest} +} + +// EnableMFADeviceRequest is the request type for the +// EnableMFADevice API operation. +type EnableMFADeviceRequest struct { + *aws.Request + Input *EnableMFADeviceInput + Copy func(*EnableMFADeviceInput) EnableMFADeviceRequest +} + +// Send marshals and sends the EnableMFADevice API request. +func (r EnableMFADeviceRequest) Send(ctx context.Context) (*EnableMFADeviceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &EnableMFADeviceResponse{ + EnableMFADeviceOutput: r.Request.Data.(*EnableMFADeviceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// EnableMFADeviceResponse is the response type for the +// EnableMFADevice API operation. +type EnableMFADeviceResponse struct { + *EnableMFADeviceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// EnableMFADevice request. +func (r *EnableMFADeviceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GenerateCredentialReport.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GenerateCredentialReport.go new file mode 100644 index 00000000..c8b755e3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GenerateCredentialReport.go @@ -0,0 +1,107 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GenerateCredentialReportInput +type GenerateCredentialReportInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GenerateCredentialReportInput) String() string { + return awsutil.Prettify(s) +} + +// Contains the response to a successful GenerateCredentialReport request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GenerateCredentialReportResponse +type GenerateCredentialReportOutput struct { + _ struct{} `type:"structure"` + + // Information about the credential report. + Description *string `type:"string"` + + // Information about the state of the credential report. + State ReportStateType `type:"string" enum:"true"` +} + +// String returns the string representation +func (s GenerateCredentialReportOutput) String() string { + return awsutil.Prettify(s) +} + +const opGenerateCredentialReport = "GenerateCredentialReport" + +// GenerateCredentialReportRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Generates a credential report for the AWS account. For more information about +// the credential report, see Getting Credential Reports (https://docs.aws.amazon.com/IAM/latest/UserGuide/credential-reports.html) +// in the IAM User Guide. +// +// // Example sending a request using GenerateCredentialReportRequest. +// req := client.GenerateCredentialReportRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GenerateCredentialReport +func (c *Client) GenerateCredentialReportRequest(input *GenerateCredentialReportInput) GenerateCredentialReportRequest { + op := &aws.Operation{ + Name: opGenerateCredentialReport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GenerateCredentialReportInput{} + } + + req := c.newRequest(op, input, &GenerateCredentialReportOutput{}) + return GenerateCredentialReportRequest{Request: req, Input: input, Copy: c.GenerateCredentialReportRequest} +} + +// GenerateCredentialReportRequest is the request type for the +// GenerateCredentialReport API operation. +type GenerateCredentialReportRequest struct { + *aws.Request + Input *GenerateCredentialReportInput + Copy func(*GenerateCredentialReportInput) GenerateCredentialReportRequest +} + +// Send marshals and sends the GenerateCredentialReport API request. +func (r GenerateCredentialReportRequest) Send(ctx context.Context) (*GenerateCredentialReportResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GenerateCredentialReportResponse{ + GenerateCredentialReportOutput: r.Request.Data.(*GenerateCredentialReportOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GenerateCredentialReportResponse is the response type for the +// GenerateCredentialReport API operation. +type GenerateCredentialReportResponse struct { + *GenerateCredentialReportOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GenerateCredentialReport request. +func (r *GenerateCredentialReportResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GenerateServiceLastAccessedDetails.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GenerateServiceLastAccessedDetails.go new file mode 100644 index 00000000..91482a85 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GenerateServiceLastAccessedDetails.go @@ -0,0 +1,175 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GenerateServiceLastAccessedDetailsRequest +type GenerateServiceLastAccessedDetailsInput struct { + _ struct{} `type:"structure"` + + // The ARN of the IAM resource (user, group, role, or managed policy) used to + // generate information about when the resource was last used in an attempt + // to access an AWS service. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GenerateServiceLastAccessedDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GenerateServiceLastAccessedDetailsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GenerateServiceLastAccessedDetailsInput"} + + if s.Arn == nil { + invalidParams.Add(aws.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("Arn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GenerateServiceLastAccessedDetailsResponse +type GenerateServiceLastAccessedDetailsOutput struct { + _ struct{} `type:"structure"` + + // The job ID that you can use in the GetServiceLastAccessedDetails or GetServiceLastAccessedDetailsWithEntities + // operations. + JobId *string `min:"36" type:"string"` +} + +// String returns the string representation +func (s GenerateServiceLastAccessedDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +const opGenerateServiceLastAccessedDetails = "GenerateServiceLastAccessedDetails" + +// GenerateServiceLastAccessedDetailsRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Generates a request for a report that includes details about when an IAM +// resource (user, group, role, or policy) was last used in an attempt to access +// AWS services. Recent activity usually appears within four hours. IAM reports +// activity for the last 365 days, or less if your Region began supporting this +// feature within the last year. For more information, see Regions Where Data +// Is Tracked (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#access-advisor_tracking-period). +// +// The service last accessed data includes all attempts to access an AWS API, +// not just the successful ones. This includes all attempts that were made using +// the AWS Management Console, the AWS API through any of the SDKs, or any of +// the command line tools. An unexpected entry in the service last accessed +// data does not mean that your account has been compromised, because the request +// might have been denied. Refer to your CloudTrail logs as the authoritative +// source for information about all API calls and whether they were successful +// or denied access. For more information, see Logging IAM Events with CloudTrail +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/cloudtrail-integration.html) +// in the IAM User Guide. +// +// The GenerateServiceLastAccessedDetails operation returns a JobId. Use this +// parameter in the following operations to retrieve the following details from +// your report: +// +// * GetServiceLastAccessedDetails – Use this operation for users, groups, +// roles, or policies to list every AWS service that the resource could access +// using permissions policies. For each service, the response includes information +// about the most recent access attempt. +// +// * GetServiceLastAccessedDetailsWithEntities – Use this operation for +// groups and policies to list information about the associated entities +// (users or roles) that attempted to access a specific AWS service. +// +// To check the status of the GenerateServiceLastAccessedDetails request, use +// the JobId parameter in the same operations and test the JobStatus response +// parameter. +// +// For additional information about the permissions policies that allow an identity +// (user, group, or role) to access specific services, use the ListPoliciesGrantingServiceAccess +// operation. +// +// Service last accessed data does not use other policy types when determining +// whether a resource could access a service. These other policy types include +// resource-based policies, access control lists, AWS Organizations policies, +// IAM permissions boundaries, and AWS STS assume role policies. It only applies +// permissions policy logic. For more about the evaluation of policy types, +// see Evaluating Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-basics) +// in the IAM User Guide. +// +// For more information about service last accessed data, see Reducing Policy +// Scope by Viewing User Activity (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html) +// in the IAM User Guide. +// +// // Example sending a request using GenerateServiceLastAccessedDetailsRequest. +// req := client.GenerateServiceLastAccessedDetailsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GenerateServiceLastAccessedDetails +func (c *Client) GenerateServiceLastAccessedDetailsRequest(input *GenerateServiceLastAccessedDetailsInput) GenerateServiceLastAccessedDetailsRequest { + op := &aws.Operation{ + Name: opGenerateServiceLastAccessedDetails, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GenerateServiceLastAccessedDetailsInput{} + } + + req := c.newRequest(op, input, &GenerateServiceLastAccessedDetailsOutput{}) + return GenerateServiceLastAccessedDetailsRequest{Request: req, Input: input, Copy: c.GenerateServiceLastAccessedDetailsRequest} +} + +// GenerateServiceLastAccessedDetailsRequest is the request type for the +// GenerateServiceLastAccessedDetails API operation. +type GenerateServiceLastAccessedDetailsRequest struct { + *aws.Request + Input *GenerateServiceLastAccessedDetailsInput + Copy func(*GenerateServiceLastAccessedDetailsInput) GenerateServiceLastAccessedDetailsRequest +} + +// Send marshals and sends the GenerateServiceLastAccessedDetails API request. +func (r GenerateServiceLastAccessedDetailsRequest) Send(ctx context.Context) (*GenerateServiceLastAccessedDetailsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GenerateServiceLastAccessedDetailsResponse{ + GenerateServiceLastAccessedDetailsOutput: r.Request.Data.(*GenerateServiceLastAccessedDetailsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GenerateServiceLastAccessedDetailsResponse is the response type for the +// GenerateServiceLastAccessedDetails API operation. +type GenerateServiceLastAccessedDetailsResponse struct { + *GenerateServiceLastAccessedDetailsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GenerateServiceLastAccessedDetails request. +func (r *GenerateServiceLastAccessedDetailsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccessKeyLastUsed.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccessKeyLastUsed.go new file mode 100644 index 00000000..b4ce5ca5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccessKeyLastUsed.go @@ -0,0 +1,136 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetAccessKeyLastUsedRequest +type GetAccessKeyLastUsedInput struct { + _ struct{} `type:"structure"` + + // The identifier of an access key. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that can consist of any upper or lowercased letter + // or digit. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetAccessKeyLastUsedInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAccessKeyLastUsedInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetAccessKeyLastUsedInput"} + + if s.AccessKeyId == nil { + invalidParams.Add(aws.NewErrParamRequired("AccessKeyId")) + } + if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { + invalidParams.Add(aws.NewErrParamMinLen("AccessKeyId", 16)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetAccessKeyLastUsed request. It is +// also returned as a member of the AccessKeyMetaData structure returned by +// the ListAccessKeys action. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetAccessKeyLastUsedResponse +type GetAccessKeyLastUsedOutput struct { + _ struct{} `type:"structure"` + + // Contains information about the last time the access key was used. + AccessKeyLastUsed *AccessKeyLastUsed `type:"structure"` + + // The name of the AWS IAM user that owns this access key. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetAccessKeyLastUsedOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetAccessKeyLastUsed = "GetAccessKeyLastUsed" + +// GetAccessKeyLastUsedRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Retrieves information about when the specified access key was last used. +// The information includes the date and time of last use, along with the AWS +// service and Region that were specified in the last request made with that +// key. +// +// // Example sending a request using GetAccessKeyLastUsedRequest. +// req := client.GetAccessKeyLastUsedRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetAccessKeyLastUsed +func (c *Client) GetAccessKeyLastUsedRequest(input *GetAccessKeyLastUsedInput) GetAccessKeyLastUsedRequest { + op := &aws.Operation{ + Name: opGetAccessKeyLastUsed, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccessKeyLastUsedInput{} + } + + req := c.newRequest(op, input, &GetAccessKeyLastUsedOutput{}) + return GetAccessKeyLastUsedRequest{Request: req, Input: input, Copy: c.GetAccessKeyLastUsedRequest} +} + +// GetAccessKeyLastUsedRequest is the request type for the +// GetAccessKeyLastUsed API operation. +type GetAccessKeyLastUsedRequest struct { + *aws.Request + Input *GetAccessKeyLastUsedInput + Copy func(*GetAccessKeyLastUsedInput) GetAccessKeyLastUsedRequest +} + +// Send marshals and sends the GetAccessKeyLastUsed API request. +func (r GetAccessKeyLastUsedRequest) Send(ctx context.Context) (*GetAccessKeyLastUsedResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetAccessKeyLastUsedResponse{ + GetAccessKeyLastUsedOutput: r.Request.Data.(*GetAccessKeyLastUsedOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetAccessKeyLastUsedResponse is the response type for the +// GetAccessKeyLastUsed API operation. +type GetAccessKeyLastUsedResponse struct { + *GetAccessKeyLastUsedOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetAccessKeyLastUsed request. +func (r *GetAccessKeyLastUsedResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccountAuthorizationDetails.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccountAuthorizationDetails.go new file mode 100644 index 00000000..6ec3f4a5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccountAuthorizationDetails.go @@ -0,0 +1,230 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetAccountAuthorizationDetailsRequest +type GetAccountAuthorizationDetailsInput struct { + _ struct{} `type:"structure"` + + // A list of entity types used to filter the results. Only the entities that + // match the types you specify are included in the output. Use the value LocalManagedPolicy + // to include customer managed policies. + // + // The format for this parameter is a comma-separated (if more than one) list + // of strings. Each string value in the list must be one of the valid values + // listed below. + Filter []EntityType `type:"list"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s GetAccountAuthorizationDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetAccountAuthorizationDetailsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetAccountAuthorizationDetailsInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetAccountAuthorizationDetails request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetAccountAuthorizationDetailsResponse +type GetAccountAuthorizationDetailsOutput struct { + _ struct{} `type:"structure"` + + // A list containing information about IAM groups. + GroupDetailList []GroupDetail `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A list containing information about managed policies. + Policies []ManagedPolicyDetail `type:"list"` + + // A list containing information about IAM roles. + RoleDetailList []RoleDetail `type:"list"` + + // A list containing information about IAM users. + UserDetailList []UserDetail `type:"list"` +} + +// String returns the string representation +func (s GetAccountAuthorizationDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetAccountAuthorizationDetails = "GetAccountAuthorizationDetails" + +// GetAccountAuthorizationDetailsRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Retrieves information about all IAM users, groups, roles, and policies in +// your AWS account, including their relationships to one another. Use this +// API to obtain a snapshot of the configuration of IAM permissions (users, +// groups, roles, and policies) in your account. +// +// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986). +// You can use a URL decoding method to convert the policy back to plain JSON +// text. For example, if you use Java, you can use the decode method of the +// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs +// provide similar functionality. +// +// You can optionally filter the results using the Filter parameter. You can +// paginate the results using the MaxItems and Marker parameters. +// +// // Example sending a request using GetAccountAuthorizationDetailsRequest. +// req := client.GetAccountAuthorizationDetailsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetAccountAuthorizationDetails +func (c *Client) GetAccountAuthorizationDetailsRequest(input *GetAccountAuthorizationDetailsInput) GetAccountAuthorizationDetailsRequest { + op := &aws.Operation{ + Name: opGetAccountAuthorizationDetails, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &GetAccountAuthorizationDetailsInput{} + } + + req := c.newRequest(op, input, &GetAccountAuthorizationDetailsOutput{}) + return GetAccountAuthorizationDetailsRequest{Request: req, Input: input, Copy: c.GetAccountAuthorizationDetailsRequest} +} + +// GetAccountAuthorizationDetailsRequest is the request type for the +// GetAccountAuthorizationDetails API operation. +type GetAccountAuthorizationDetailsRequest struct { + *aws.Request + Input *GetAccountAuthorizationDetailsInput + Copy func(*GetAccountAuthorizationDetailsInput) GetAccountAuthorizationDetailsRequest +} + +// Send marshals and sends the GetAccountAuthorizationDetails API request. +func (r GetAccountAuthorizationDetailsRequest) Send(ctx context.Context) (*GetAccountAuthorizationDetailsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetAccountAuthorizationDetailsResponse{ + GetAccountAuthorizationDetailsOutput: r.Request.Data.(*GetAccountAuthorizationDetailsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewGetAccountAuthorizationDetailsRequestPaginator returns a paginator for GetAccountAuthorizationDetails. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.GetAccountAuthorizationDetailsRequest(input) +// p := iam.NewGetAccountAuthorizationDetailsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewGetAccountAuthorizationDetailsPaginator(req GetAccountAuthorizationDetailsRequest) GetAccountAuthorizationDetailsPaginator { + return GetAccountAuthorizationDetailsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *GetAccountAuthorizationDetailsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// GetAccountAuthorizationDetailsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type GetAccountAuthorizationDetailsPaginator struct { + aws.Pager +} + +func (p *GetAccountAuthorizationDetailsPaginator) CurrentPage() *GetAccountAuthorizationDetailsOutput { + return p.Pager.CurrentPage().(*GetAccountAuthorizationDetailsOutput) +} + +// GetAccountAuthorizationDetailsResponse is the response type for the +// GetAccountAuthorizationDetails API operation. +type GetAccountAuthorizationDetailsResponse struct { + *GetAccountAuthorizationDetailsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetAccountAuthorizationDetails request. +func (r *GetAccountAuthorizationDetailsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccountPasswordPolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccountPasswordPolicy.go new file mode 100644 index 00000000..741057d9 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccountPasswordPolicy.go @@ -0,0 +1,105 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetAccountPasswordPolicyInput +type GetAccountPasswordPolicyInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetAccountPasswordPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Contains the response to a successful GetAccountPasswordPolicy request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetAccountPasswordPolicyResponse +type GetAccountPasswordPolicyOutput struct { + _ struct{} `type:"structure"` + + // A structure that contains details about the account's password policy. + // + // PasswordPolicy is a required field + PasswordPolicy *PasswordPolicy `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetAccountPasswordPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetAccountPasswordPolicy = "GetAccountPasswordPolicy" + +// GetAccountPasswordPolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Retrieves the password policy for the AWS account. For more information about +// using a password policy, go to Managing an IAM Password Policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingPasswordPolicies.html). +// +// // Example sending a request using GetAccountPasswordPolicyRequest. +// req := client.GetAccountPasswordPolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetAccountPasswordPolicy +func (c *Client) GetAccountPasswordPolicyRequest(input *GetAccountPasswordPolicyInput) GetAccountPasswordPolicyRequest { + op := &aws.Operation{ + Name: opGetAccountPasswordPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccountPasswordPolicyInput{} + } + + req := c.newRequest(op, input, &GetAccountPasswordPolicyOutput{}) + return GetAccountPasswordPolicyRequest{Request: req, Input: input, Copy: c.GetAccountPasswordPolicyRequest} +} + +// GetAccountPasswordPolicyRequest is the request type for the +// GetAccountPasswordPolicy API operation. +type GetAccountPasswordPolicyRequest struct { + *aws.Request + Input *GetAccountPasswordPolicyInput + Copy func(*GetAccountPasswordPolicyInput) GetAccountPasswordPolicyRequest +} + +// Send marshals and sends the GetAccountPasswordPolicy API request. +func (r GetAccountPasswordPolicyRequest) Send(ctx context.Context) (*GetAccountPasswordPolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetAccountPasswordPolicyResponse{ + GetAccountPasswordPolicyOutput: r.Request.Data.(*GetAccountPasswordPolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetAccountPasswordPolicyResponse is the response type for the +// GetAccountPasswordPolicy API operation. +type GetAccountPasswordPolicyResponse struct { + *GetAccountPasswordPolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetAccountPasswordPolicy request. +func (r *GetAccountPasswordPolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccountSummary.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccountSummary.go new file mode 100644 index 00000000..7ae1a0b1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetAccountSummary.go @@ -0,0 +1,107 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetAccountSummaryInput +type GetAccountSummaryInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetAccountSummaryInput) String() string { + return awsutil.Prettify(s) +} + +// Contains the response to a successful GetAccountSummary request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetAccountSummaryResponse +type GetAccountSummaryOutput struct { + _ struct{} `type:"structure"` + + // A set of key–value pairs containing information about IAM entity usage + // and IAM quotas. + SummaryMap map[string]int64 `type:"map"` +} + +// String returns the string representation +func (s GetAccountSummaryOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetAccountSummary = "GetAccountSummary" + +// GetAccountSummaryRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Retrieves information about IAM entity usage and IAM quotas in the AWS account. +// +// For information about limitations on IAM entities, see Limitations on IAM +// Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// // Example sending a request using GetAccountSummaryRequest. +// req := client.GetAccountSummaryRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetAccountSummary +func (c *Client) GetAccountSummaryRequest(input *GetAccountSummaryInput) GetAccountSummaryRequest { + op := &aws.Operation{ + Name: opGetAccountSummary, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetAccountSummaryInput{} + } + + req := c.newRequest(op, input, &GetAccountSummaryOutput{}) + return GetAccountSummaryRequest{Request: req, Input: input, Copy: c.GetAccountSummaryRequest} +} + +// GetAccountSummaryRequest is the request type for the +// GetAccountSummary API operation. +type GetAccountSummaryRequest struct { + *aws.Request + Input *GetAccountSummaryInput + Copy func(*GetAccountSummaryInput) GetAccountSummaryRequest +} + +// Send marshals and sends the GetAccountSummary API request. +func (r GetAccountSummaryRequest) Send(ctx context.Context) (*GetAccountSummaryResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetAccountSummaryResponse{ + GetAccountSummaryOutput: r.Request.Data.(*GetAccountSummaryOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetAccountSummaryResponse is the response type for the +// GetAccountSummary API operation. +type GetAccountSummaryResponse struct { + *GetAccountSummaryOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetAccountSummary request. +func (r *GetAccountSummaryResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetContextKeysForCustomPolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetContextKeysForCustomPolicy.go new file mode 100644 index 00000000..f45b33c0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetContextKeysForCustomPolicy.go @@ -0,0 +1,145 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetContextKeysForCustomPolicyRequest +type GetContextKeysForCustomPolicyInput struct { + _ struct{} `type:"structure"` + + // A list of policies for which you want the list of context keys referenced + // in those policies. Each document is specified as a string containing the + // complete, valid JSON text of an IAM policy. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // PolicyInputList is a required field + PolicyInputList []string `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetContextKeysForCustomPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetContextKeysForCustomPolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetContextKeysForCustomPolicyInput"} + + if s.PolicyInputList == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyInputList")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetContextKeysForPrincipalPolicy or +// GetContextKeysForCustomPolicy request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetContextKeysForPolicyResponse +type GetContextKeysForCustomPolicyOutput struct { + _ struct{} `type:"structure"` + + // The list of context keys that are referenced in the input policies. + ContextKeyNames []string `type:"list"` +} + +// String returns the string representation +func (s GetContextKeysForCustomPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetContextKeysForCustomPolicy = "GetContextKeysForCustomPolicy" + +// GetContextKeysForCustomPolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Gets a list of all of the context keys referenced in the input policies. +// The policies are supplied as a list of one or more strings. To get the context +// keys from policies associated with an IAM user, group, or role, use GetContextKeysForPrincipalPolicy. +// +// Context keys are variables maintained by AWS and its services that provide +// details about the context of an API query request. Context keys can be evaluated +// by testing against a value specified in an IAM policy. Use GetContextKeysForCustomPolicy +// to understand what key names and values you must supply when you call SimulateCustomPolicy. +// Note that all parameters are shown in unencoded form here for clarity but +// must be URL encoded to be included as a part of a real HTML request. +// +// // Example sending a request using GetContextKeysForCustomPolicyRequest. +// req := client.GetContextKeysForCustomPolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetContextKeysForCustomPolicy +func (c *Client) GetContextKeysForCustomPolicyRequest(input *GetContextKeysForCustomPolicyInput) GetContextKeysForCustomPolicyRequest { + op := &aws.Operation{ + Name: opGetContextKeysForCustomPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetContextKeysForCustomPolicyInput{} + } + + req := c.newRequest(op, input, &GetContextKeysForCustomPolicyOutput{}) + return GetContextKeysForCustomPolicyRequest{Request: req, Input: input, Copy: c.GetContextKeysForCustomPolicyRequest} +} + +// GetContextKeysForCustomPolicyRequest is the request type for the +// GetContextKeysForCustomPolicy API operation. +type GetContextKeysForCustomPolicyRequest struct { + *aws.Request + Input *GetContextKeysForCustomPolicyInput + Copy func(*GetContextKeysForCustomPolicyInput) GetContextKeysForCustomPolicyRequest +} + +// Send marshals and sends the GetContextKeysForCustomPolicy API request. +func (r GetContextKeysForCustomPolicyRequest) Send(ctx context.Context) (*GetContextKeysForCustomPolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetContextKeysForCustomPolicyResponse{ + GetContextKeysForCustomPolicyOutput: r.Request.Data.(*GetContextKeysForCustomPolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetContextKeysForCustomPolicyResponse is the response type for the +// GetContextKeysForCustomPolicy API operation. +type GetContextKeysForCustomPolicyResponse struct { + *GetContextKeysForCustomPolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetContextKeysForCustomPolicy request. +func (r *GetContextKeysForCustomPolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetContextKeysForPrincipalPolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetContextKeysForPrincipalPolicy.go new file mode 100644 index 00000000..bbf8088a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetContextKeysForPrincipalPolicy.go @@ -0,0 +1,168 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetContextKeysForPrincipalPolicyRequest +type GetContextKeysForPrincipalPolicyInput struct { + _ struct{} `type:"structure"` + + // An optional list of additional policies for which you want the list of context + // keys that are referenced. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + PolicyInputList []string `type:"list"` + + // The ARN of a user, group, or role whose policies contain the context keys + // that you want listed. If you specify a user, the list includes context keys + // that are found in all policies that are attached to the user. The list also + // includes all groups that the user is a member of. If you pick a group or + // a role, then it includes only those context keys that are found in policies + // attached to that entity. Note that all parameters are shown in unencoded + // form here for clarity, but must be URL encoded to be included as a part of + // a real HTML request. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicySourceArn is a required field + PolicySourceArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetContextKeysForPrincipalPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetContextKeysForPrincipalPolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetContextKeysForPrincipalPolicyInput"} + + if s.PolicySourceArn == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicySourceArn")) + } + if s.PolicySourceArn != nil && len(*s.PolicySourceArn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("PolicySourceArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetContextKeysForPrincipalPolicy or +// GetContextKeysForCustomPolicy request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetContextKeysForPolicyResponse +type GetContextKeysForPrincipalPolicyOutput struct { + _ struct{} `type:"structure"` + + // The list of context keys that are referenced in the input policies. + ContextKeyNames []string `type:"list"` +} + +// String returns the string representation +func (s GetContextKeysForPrincipalPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetContextKeysForPrincipalPolicy = "GetContextKeysForPrincipalPolicy" + +// GetContextKeysForPrincipalPolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Gets a list of all of the context keys referenced in all the IAM policies +// that are attached to the specified IAM entity. The entity can be an IAM user, +// group, or role. If you specify a user, then the request also includes all +// of the policies attached to groups that the user is a member of. +// +// You can optionally include a list of one or more additional policies, specified +// as strings. If you want to include only a list of policies by string, use +// GetContextKeysForCustomPolicy instead. +// +// Note: This API discloses information about the permissions granted to other +// users. If you do not want users to see other user's permissions, then consider +// allowing them to use GetContextKeysForCustomPolicy instead. +// +// Context keys are variables maintained by AWS and its services that provide +// details about the context of an API query request. Context keys can be evaluated +// by testing against a value in an IAM policy. Use GetContextKeysForPrincipalPolicy +// to understand what key names and values you must supply when you call SimulatePrincipalPolicy. +// +// // Example sending a request using GetContextKeysForPrincipalPolicyRequest. +// req := client.GetContextKeysForPrincipalPolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetContextKeysForPrincipalPolicy +func (c *Client) GetContextKeysForPrincipalPolicyRequest(input *GetContextKeysForPrincipalPolicyInput) GetContextKeysForPrincipalPolicyRequest { + op := &aws.Operation{ + Name: opGetContextKeysForPrincipalPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetContextKeysForPrincipalPolicyInput{} + } + + req := c.newRequest(op, input, &GetContextKeysForPrincipalPolicyOutput{}) + return GetContextKeysForPrincipalPolicyRequest{Request: req, Input: input, Copy: c.GetContextKeysForPrincipalPolicyRequest} +} + +// GetContextKeysForPrincipalPolicyRequest is the request type for the +// GetContextKeysForPrincipalPolicy API operation. +type GetContextKeysForPrincipalPolicyRequest struct { + *aws.Request + Input *GetContextKeysForPrincipalPolicyInput + Copy func(*GetContextKeysForPrincipalPolicyInput) GetContextKeysForPrincipalPolicyRequest +} + +// Send marshals and sends the GetContextKeysForPrincipalPolicy API request. +func (r GetContextKeysForPrincipalPolicyRequest) Send(ctx context.Context) (*GetContextKeysForPrincipalPolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetContextKeysForPrincipalPolicyResponse{ + GetContextKeysForPrincipalPolicyOutput: r.Request.Data.(*GetContextKeysForPrincipalPolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetContextKeysForPrincipalPolicyResponse is the response type for the +// GetContextKeysForPrincipalPolicy API operation. +type GetContextKeysForPrincipalPolicyResponse struct { + *GetContextKeysForPrincipalPolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetContextKeysForPrincipalPolicy request. +func (r *GetContextKeysForPrincipalPolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetCredentialReport.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetCredentialReport.go new file mode 100644 index 00000000..9bc03a27 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetCredentialReport.go @@ -0,0 +1,114 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetCredentialReportInput +type GetCredentialReportInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCredentialReportInput) String() string { + return awsutil.Prettify(s) +} + +// Contains the response to a successful GetCredentialReport request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetCredentialReportResponse +type GetCredentialReportOutput struct { + _ struct{} `type:"structure"` + + // Contains the credential report. The report is Base64-encoded. + // + // Content is automatically base64 encoded/decoded by the SDK. + Content []byte `type:"blob"` + + // The date and time when the credential report was created, in ISO 8601 date-time + // format (http://www.iso.org/iso/iso8601). + GeneratedTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The format (MIME type) of the credential report. + ReportFormat ReportFormatType `type:"string" enum:"true"` +} + +// String returns the string representation +func (s GetCredentialReportOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetCredentialReport = "GetCredentialReport" + +// GetCredentialReportRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Retrieves a credential report for the AWS account. For more information about +// the credential report, see Getting Credential Reports (https://docs.aws.amazon.com/IAM/latest/UserGuide/credential-reports.html) +// in the IAM User Guide. +// +// // Example sending a request using GetCredentialReportRequest. +// req := client.GetCredentialReportRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetCredentialReport +func (c *Client) GetCredentialReportRequest(input *GetCredentialReportInput) GetCredentialReportRequest { + op := &aws.Operation{ + Name: opGetCredentialReport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCredentialReportInput{} + } + + req := c.newRequest(op, input, &GetCredentialReportOutput{}) + return GetCredentialReportRequest{Request: req, Input: input, Copy: c.GetCredentialReportRequest} +} + +// GetCredentialReportRequest is the request type for the +// GetCredentialReport API operation. +type GetCredentialReportRequest struct { + *aws.Request + Input *GetCredentialReportInput + Copy func(*GetCredentialReportInput) GetCredentialReportRequest +} + +// Send marshals and sends the GetCredentialReport API request. +func (r GetCredentialReportRequest) Send(ctx context.Context) (*GetCredentialReportResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetCredentialReportResponse{ + GetCredentialReportOutput: r.Request.Data.(*GetCredentialReportOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetCredentialReportResponse is the response type for the +// GetCredentialReport API operation. +type GetCredentialReportResponse struct { + *GetCredentialReportOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetCredentialReport request. +func (r *GetCredentialReportResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetGroup.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetGroup.go new file mode 100644 index 00000000..dfec85df --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetGroup.go @@ -0,0 +1,224 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetGroupRequest +type GetGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s GetGroupInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetGroupInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetGroupInput"} + + if s.GroupName == nil { + invalidParams.Add(aws.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("GroupName", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetGroup request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetGroupResponse +type GetGroupOutput struct { + _ struct{} `type:"structure"` + + // A structure that contains details about the group. + // + // Group is a required field + Group *Group `type:"structure" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A list of users in the group. + // + // Users is a required field + Users []User `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetGroupOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetGroup = "GetGroup" + +// GetGroupRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Returns a list of IAM users that are in the specified IAM group. You can +// paginate the results using the MaxItems and Marker parameters. +// +// // Example sending a request using GetGroupRequest. +// req := client.GetGroupRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetGroup +func (c *Client) GetGroupRequest(input *GetGroupInput) GetGroupRequest { + op := &aws.Operation{ + Name: opGetGroup, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &GetGroupInput{} + } + + req := c.newRequest(op, input, &GetGroupOutput{}) + return GetGroupRequest{Request: req, Input: input, Copy: c.GetGroupRequest} +} + +// GetGroupRequest is the request type for the +// GetGroup API operation. +type GetGroupRequest struct { + *aws.Request + Input *GetGroupInput + Copy func(*GetGroupInput) GetGroupRequest +} + +// Send marshals and sends the GetGroup API request. +func (r GetGroupRequest) Send(ctx context.Context) (*GetGroupResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetGroupResponse{ + GetGroupOutput: r.Request.Data.(*GetGroupOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewGetGroupRequestPaginator returns a paginator for GetGroup. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.GetGroupRequest(input) +// p := iam.NewGetGroupRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewGetGroupPaginator(req GetGroupRequest) GetGroupPaginator { + return GetGroupPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *GetGroupInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// GetGroupPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type GetGroupPaginator struct { + aws.Pager +} + +func (p *GetGroupPaginator) CurrentPage() *GetGroupOutput { + return p.Pager.CurrentPage().(*GetGroupOutput) +} + +// GetGroupResponse is the response type for the +// GetGroup API operation. +type GetGroupResponse struct { + *GetGroupOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetGroup request. +func (r *GetGroupResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetGroupPolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetGroupPolicy.go new file mode 100644 index 00000000..05c7d1b5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetGroupPolicy.go @@ -0,0 +1,176 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetGroupPolicyRequest +type GetGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the group the policy is associated with. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // The name of the policy document to get. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetGroupPolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetGroupPolicyInput"} + + if s.GroupName == nil { + invalidParams.Add(aws.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("GroupName", 1)) + } + + if s.PolicyName == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetGroupPolicy request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetGroupPolicyResponse +type GetGroupPolicyOutput struct { + _ struct{} `type:"structure"` + + // The group the policy is associated with. + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // The policy document. + // + // IAM stores policies in JSON format. However, resources that were created + // using AWS CloudFormation templates can be formatted in YAML. AWS CloudFormation + // always converts a YAML policy to JSON format before submitting it to IAM. + // + // PolicyDocument is a required field + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy. + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetGroupPolicy = "GetGroupPolicy" + +// GetGroupPolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Retrieves the specified inline policy document that is embedded in the specified +// IAM group. +// +// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986). +// You can use a URL decoding method to convert the policy back to plain JSON +// text. For example, if you use Java, you can use the decode method of the +// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs +// provide similar functionality. +// +// An IAM group can also have managed policies attached to it. To retrieve a +// managed policy document that is attached to a group, use GetPolicy to determine +// the policy's default version, then use GetPolicyVersion to retrieve the policy +// document. +// +// For more information about policies, see Managed Policies and Inline Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// // Example sending a request using GetGroupPolicyRequest. +// req := client.GetGroupPolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetGroupPolicy +func (c *Client) GetGroupPolicyRequest(input *GetGroupPolicyInput) GetGroupPolicyRequest { + op := &aws.Operation{ + Name: opGetGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetGroupPolicyInput{} + } + + req := c.newRequest(op, input, &GetGroupPolicyOutput{}) + return GetGroupPolicyRequest{Request: req, Input: input, Copy: c.GetGroupPolicyRequest} +} + +// GetGroupPolicyRequest is the request type for the +// GetGroupPolicy API operation. +type GetGroupPolicyRequest struct { + *aws.Request + Input *GetGroupPolicyInput + Copy func(*GetGroupPolicyInput) GetGroupPolicyRequest +} + +// Send marshals and sends the GetGroupPolicy API request. +func (r GetGroupPolicyRequest) Send(ctx context.Context) (*GetGroupPolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetGroupPolicyResponse{ + GetGroupPolicyOutput: r.Request.Data.(*GetGroupPolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetGroupPolicyResponse is the response type for the +// GetGroupPolicy API operation. +type GetGroupPolicyResponse struct { + *GetGroupPolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetGroupPolicy request. +func (r *GetGroupPolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetInstanceProfile.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetInstanceProfile.go new file mode 100644 index 00000000..ac262350 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetInstanceProfile.go @@ -0,0 +1,133 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetInstanceProfileRequest +type GetInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to get information about. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // InstanceProfileName is a required field + InstanceProfileName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetInstanceProfileInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetInstanceProfileInput"} + + if s.InstanceProfileName == nil { + invalidParams.Add(aws.NewErrParamRequired("InstanceProfileName")) + } + if s.InstanceProfileName != nil && len(*s.InstanceProfileName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("InstanceProfileName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetInstanceProfile request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetInstanceProfileResponse +type GetInstanceProfileOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the instance profile. + // + // InstanceProfile is a required field + InstanceProfile *InstanceProfile `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetInstanceProfile = "GetInstanceProfile" + +// GetInstanceProfileRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Retrieves information about the specified instance profile, including the +// instance profile's path, GUID, ARN, and role. For more information about +// instance profiles, see About Instance Profiles (https://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html) +// in the IAM User Guide. +// +// // Example sending a request using GetInstanceProfileRequest. +// req := client.GetInstanceProfileRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetInstanceProfile +func (c *Client) GetInstanceProfileRequest(input *GetInstanceProfileInput) GetInstanceProfileRequest { + op := &aws.Operation{ + Name: opGetInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetInstanceProfileInput{} + } + + req := c.newRequest(op, input, &GetInstanceProfileOutput{}) + return GetInstanceProfileRequest{Request: req, Input: input, Copy: c.GetInstanceProfileRequest} +} + +// GetInstanceProfileRequest is the request type for the +// GetInstanceProfile API operation. +type GetInstanceProfileRequest struct { + *aws.Request + Input *GetInstanceProfileInput + Copy func(*GetInstanceProfileInput) GetInstanceProfileRequest +} + +// Send marshals and sends the GetInstanceProfile API request. +func (r GetInstanceProfileRequest) Send(ctx context.Context) (*GetInstanceProfileResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetInstanceProfileResponse{ + GetInstanceProfileOutput: r.Request.Data.(*GetInstanceProfileOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetInstanceProfileResponse is the response type for the +// GetInstanceProfile API operation. +type GetInstanceProfileResponse struct { + *GetInstanceProfileOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetInstanceProfile request. +func (r *GetInstanceProfileResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetLoginProfile.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetLoginProfile.go new file mode 100644 index 00000000..81d7d388 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetLoginProfile.go @@ -0,0 +1,132 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetLoginProfileRequest +type GetLoginProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the user whose login profile you want to retrieve. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetLoginProfileInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetLoginProfileInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetLoginProfileInput"} + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetLoginProfile request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetLoginProfileResponse +type GetLoginProfileOutput struct { + _ struct{} `type:"structure"` + + // A structure containing the user name and password create date for the user. + // + // LoginProfile is a required field + LoginProfile *LoginProfile `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetLoginProfileOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetLoginProfile = "GetLoginProfile" + +// GetLoginProfileRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Retrieves the user name and password-creation date for the specified IAM +// user. If the user has not been assigned a password, the operation returns +// a 404 (NoSuchEntity) error. +// +// // Example sending a request using GetLoginProfileRequest. +// req := client.GetLoginProfileRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetLoginProfile +func (c *Client) GetLoginProfileRequest(input *GetLoginProfileInput) GetLoginProfileRequest { + op := &aws.Operation{ + Name: opGetLoginProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetLoginProfileInput{} + } + + req := c.newRequest(op, input, &GetLoginProfileOutput{}) + return GetLoginProfileRequest{Request: req, Input: input, Copy: c.GetLoginProfileRequest} +} + +// GetLoginProfileRequest is the request type for the +// GetLoginProfile API operation. +type GetLoginProfileRequest struct { + *aws.Request + Input *GetLoginProfileInput + Copy func(*GetLoginProfileInput) GetLoginProfileRequest +} + +// Send marshals and sends the GetLoginProfile API request. +func (r GetLoginProfileRequest) Send(ctx context.Context) (*GetLoginProfileResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetLoginProfileResponse{ + GetLoginProfileOutput: r.Request.Data.(*GetLoginProfileOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetLoginProfileResponse is the response type for the +// GetLoginProfile API operation. +type GetLoginProfileResponse struct { + *GetLoginProfileOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetLoginProfile request. +func (r *GetLoginProfileResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetOpenIDConnectProvider.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetOpenIDConnectProvider.go new file mode 100644 index 00000000..b45c6ce8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetOpenIDConnectProvider.go @@ -0,0 +1,145 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetOpenIDConnectProviderRequest +type GetOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the OIDC provider resource object in IAM + // to get information for. You can get a list of OIDC provider resource ARNs + // by using the ListOpenIDConnectProviders operation. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // OpenIDConnectProviderArn is a required field + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetOpenIDConnectProviderInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetOpenIDConnectProviderInput"} + + if s.OpenIDConnectProviderArn == nil { + invalidParams.Add(aws.NewErrParamRequired("OpenIDConnectProviderArn")) + } + if s.OpenIDConnectProviderArn != nil && len(*s.OpenIDConnectProviderArn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("OpenIDConnectProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetOpenIDConnectProvider request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetOpenIDConnectProviderResponse +type GetOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` + + // A list of client IDs (also known as audiences) that are associated with the + // specified IAM OIDC provider resource object. For more information, see CreateOpenIDConnectProvider. + ClientIDList []string `type:"list"` + + // The date and time when the IAM OIDC provider resource object was created + // in the AWS account. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A list of certificate thumbprints that are associated with the specified + // IAM OIDC provider resource object. For more information, see CreateOpenIDConnectProvider. + ThumbprintList []string `type:"list"` + + // The URL that the IAM OIDC provider resource object is associated with. For + // more information, see CreateOpenIDConnectProvider. + Url *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetOpenIDConnectProvider = "GetOpenIDConnectProvider" + +// GetOpenIDConnectProviderRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Returns information about the specified OpenID Connect (OIDC) provider resource +// object in IAM. +// +// // Example sending a request using GetOpenIDConnectProviderRequest. +// req := client.GetOpenIDConnectProviderRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetOpenIDConnectProvider +func (c *Client) GetOpenIDConnectProviderRequest(input *GetOpenIDConnectProviderInput) GetOpenIDConnectProviderRequest { + op := &aws.Operation{ + Name: opGetOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetOpenIDConnectProviderInput{} + } + + req := c.newRequest(op, input, &GetOpenIDConnectProviderOutput{}) + return GetOpenIDConnectProviderRequest{Request: req, Input: input, Copy: c.GetOpenIDConnectProviderRequest} +} + +// GetOpenIDConnectProviderRequest is the request type for the +// GetOpenIDConnectProvider API operation. +type GetOpenIDConnectProviderRequest struct { + *aws.Request + Input *GetOpenIDConnectProviderInput + Copy func(*GetOpenIDConnectProviderInput) GetOpenIDConnectProviderRequest +} + +// Send marshals and sends the GetOpenIDConnectProvider API request. +func (r GetOpenIDConnectProviderRequest) Send(ctx context.Context) (*GetOpenIDConnectProviderResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetOpenIDConnectProviderResponse{ + GetOpenIDConnectProviderOutput: r.Request.Data.(*GetOpenIDConnectProviderOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetOpenIDConnectProviderResponse is the response type for the +// GetOpenIDConnectProvider API operation. +type GetOpenIDConnectProviderResponse struct { + *GetOpenIDConnectProviderOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetOpenIDConnectProvider request. +func (r *GetOpenIDConnectProviderResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetPolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetPolicy.go new file mode 100644 index 00000000..f4a6e381 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetPolicy.go @@ -0,0 +1,142 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetPolicyRequest +type GetPolicyInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the managed policy that you want information + // about. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetPolicyInput"} + + if s.PolicyArn == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetPolicy request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetPolicyResponse +type GetPolicyOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the policy. + Policy *Policy `type:"structure"` +} + +// String returns the string representation +func (s GetPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetPolicy = "GetPolicy" + +// GetPolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Retrieves information about the specified managed policy, including the policy's +// default version and the total number of IAM users, groups, and roles to which +// the policy is attached. To retrieve the list of the specific users, groups, +// and roles that the policy is attached to, use the ListEntitiesForPolicy API. +// This API returns metadata about the policy. To retrieve the actual policy +// document for a specific version of the policy, use GetPolicyVersion. +// +// This API retrieves information about managed policies. To retrieve information +// about an inline policy that is embedded with an IAM user, group, or role, +// use the GetUserPolicy, GetGroupPolicy, or GetRolePolicy API. +// +// For more information about policies, see Managed Policies and Inline Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// // Example sending a request using GetPolicyRequest. +// req := client.GetPolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetPolicy +func (c *Client) GetPolicyRequest(input *GetPolicyInput) GetPolicyRequest { + op := &aws.Operation{ + Name: opGetPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPolicyInput{} + } + + req := c.newRequest(op, input, &GetPolicyOutput{}) + return GetPolicyRequest{Request: req, Input: input, Copy: c.GetPolicyRequest} +} + +// GetPolicyRequest is the request type for the +// GetPolicy API operation. +type GetPolicyRequest struct { + *aws.Request + Input *GetPolicyInput + Copy func(*GetPolicyInput) GetPolicyRequest +} + +// Send marshals and sends the GetPolicy API request. +func (r GetPolicyRequest) Send(ctx context.Context) (*GetPolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetPolicyResponse{ + GetPolicyOutput: r.Request.Data.(*GetPolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetPolicyResponse is the response type for the +// GetPolicy API operation. +type GetPolicyResponse struct { + *GetPolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetPolicy request. +func (r *GetPolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetPolicyVersion.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetPolicyVersion.go new file mode 100644 index 00000000..866117c7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetPolicyVersion.go @@ -0,0 +1,164 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetPolicyVersionRequest +type GetPolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the managed policy that you want information + // about. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` + + // Identifies the policy version to retrieve. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that consists of the lowercase letter 'v' followed + // by one or two digits, and optionally followed by a period '.' and a string + // of letters and digits. + // + // VersionId is a required field + VersionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s GetPolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetPolicyVersionInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetPolicyVersionInput"} + + if s.PolicyArn == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyArn", 20)) + } + + if s.VersionId == nil { + invalidParams.Add(aws.NewErrParamRequired("VersionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetPolicyVersion request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetPolicyVersionResponse +type GetPolicyVersionOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the policy version. + PolicyVersion *PolicyVersion `type:"structure"` +} + +// String returns the string representation +func (s GetPolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetPolicyVersion = "GetPolicyVersion" + +// GetPolicyVersionRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Retrieves information about the specified version of the specified managed +// policy, including the policy document. +// +// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986). +// You can use a URL decoding method to convert the policy back to plain JSON +// text. For example, if you use Java, you can use the decode method of the +// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs +// provide similar functionality. +// +// To list the available versions for a policy, use ListPolicyVersions. +// +// This API retrieves information about managed policies. To retrieve information +// about an inline policy that is embedded in a user, group, or role, use the +// GetUserPolicy, GetGroupPolicy, or GetRolePolicy API. +// +// For more information about the types of policies, see Managed Policies and +// Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// For more information about managed policy versions, see Versioning for Managed +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) +// in the IAM User Guide. +// +// // Example sending a request using GetPolicyVersionRequest. +// req := client.GetPolicyVersionRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetPolicyVersion +func (c *Client) GetPolicyVersionRequest(input *GetPolicyVersionInput) GetPolicyVersionRequest { + op := &aws.Operation{ + Name: opGetPolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetPolicyVersionInput{} + } + + req := c.newRequest(op, input, &GetPolicyVersionOutput{}) + return GetPolicyVersionRequest{Request: req, Input: input, Copy: c.GetPolicyVersionRequest} +} + +// GetPolicyVersionRequest is the request type for the +// GetPolicyVersion API operation. +type GetPolicyVersionRequest struct { + *aws.Request + Input *GetPolicyVersionInput + Copy func(*GetPolicyVersionInput) GetPolicyVersionRequest +} + +// Send marshals and sends the GetPolicyVersion API request. +func (r GetPolicyVersionRequest) Send(ctx context.Context) (*GetPolicyVersionResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetPolicyVersionResponse{ + GetPolicyVersionOutput: r.Request.Data.(*GetPolicyVersionOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetPolicyVersionResponse is the response type for the +// GetPolicyVersion API operation. +type GetPolicyVersionResponse struct { + *GetPolicyVersionOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetPolicyVersion request. +func (r *GetPolicyVersionResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetRole.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetRole.go new file mode 100644 index 00000000..850dd812 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetRole.go @@ -0,0 +1,138 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetRoleRequest +type GetRoleInput struct { + _ struct{} `type:"structure"` + + // The name of the IAM role to get information about. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRoleInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRoleInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetRoleInput"} + + if s.RoleName == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetRole request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetRoleResponse +type GetRoleOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the IAM role. + // + // Role is a required field + Role *Role `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetRoleOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetRole = "GetRole" + +// GetRoleRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Retrieves information about the specified role, including the role's path, +// GUID, ARN, and the role's trust policy that grants permission to assume the +// role. For more information about roles, see Working with Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// +// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986). +// You can use a URL decoding method to convert the policy back to plain JSON +// text. For example, if you use Java, you can use the decode method of the +// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs +// provide similar functionality. +// +// // Example sending a request using GetRoleRequest. +// req := client.GetRoleRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetRole +func (c *Client) GetRoleRequest(input *GetRoleInput) GetRoleRequest { + op := &aws.Operation{ + Name: opGetRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRoleInput{} + } + + req := c.newRequest(op, input, &GetRoleOutput{}) + return GetRoleRequest{Request: req, Input: input, Copy: c.GetRoleRequest} +} + +// GetRoleRequest is the request type for the +// GetRole API operation. +type GetRoleRequest struct { + *aws.Request + Input *GetRoleInput + Copy func(*GetRoleInput) GetRoleRequest +} + +// Send marshals and sends the GetRole API request. +func (r GetRoleRequest) Send(ctx context.Context) (*GetRoleResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetRoleResponse{ + GetRoleOutput: r.Request.Data.(*GetRoleOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetRoleResponse is the response type for the +// GetRole API operation. +type GetRoleResponse struct { + *GetRoleOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetRole request. +func (r *GetRoleResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetRolePolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetRolePolicy.go new file mode 100644 index 00000000..76e09d02 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetRolePolicy.go @@ -0,0 +1,179 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetRolePolicyRequest +type GetRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the policy document to get. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name of the role associated with the policy. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetRolePolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetRolePolicyInput"} + + if s.PolicyName == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyName", 1)) + } + + if s.RoleName == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetRolePolicy request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetRolePolicyResponse +type GetRolePolicyOutput struct { + _ struct{} `type:"structure"` + + // The policy document. + // + // IAM stores policies in JSON format. However, resources that were created + // using AWS CloudFormation templates can be formatted in YAML. AWS CloudFormation + // always converts a YAML policy to JSON format before submitting it to IAM. + // + // PolicyDocument is a required field + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy. + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` + + // The role the policy is associated with. + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetRolePolicy = "GetRolePolicy" + +// GetRolePolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Retrieves the specified inline policy document that is embedded with the +// specified IAM role. +// +// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986). +// You can use a URL decoding method to convert the policy back to plain JSON +// text. For example, if you use Java, you can use the decode method of the +// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs +// provide similar functionality. +// +// An IAM role can also have managed policies attached to it. To retrieve a +// managed policy document that is attached to a role, use GetPolicy to determine +// the policy's default version, then use GetPolicyVersion to retrieve the policy +// document. +// +// For more information about policies, see Managed Policies and Inline Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// For more information about roles, see Using Roles to Delegate Permissions +// and Federate Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html). +// +// // Example sending a request using GetRolePolicyRequest. +// req := client.GetRolePolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetRolePolicy +func (c *Client) GetRolePolicyRequest(input *GetRolePolicyInput) GetRolePolicyRequest { + op := &aws.Operation{ + Name: opGetRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetRolePolicyInput{} + } + + req := c.newRequest(op, input, &GetRolePolicyOutput{}) + return GetRolePolicyRequest{Request: req, Input: input, Copy: c.GetRolePolicyRequest} +} + +// GetRolePolicyRequest is the request type for the +// GetRolePolicy API operation. +type GetRolePolicyRequest struct { + *aws.Request + Input *GetRolePolicyInput + Copy func(*GetRolePolicyInput) GetRolePolicyRequest +} + +// Send marshals and sends the GetRolePolicy API request. +func (r GetRolePolicyRequest) Send(ctx context.Context) (*GetRolePolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetRolePolicyResponse{ + GetRolePolicyOutput: r.Request.Data.(*GetRolePolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetRolePolicyResponse is the response type for the +// GetRolePolicy API operation. +type GetRolePolicyResponse struct { + *GetRolePolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetRolePolicy request. +func (r *GetRolePolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetSAMLProvider.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetSAMLProvider.go new file mode 100644 index 00000000..93a1a2c7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetSAMLProvider.go @@ -0,0 +1,139 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetSAMLProviderRequest +type GetSAMLProviderInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the SAML provider resource object in IAM + // to get information about. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // SAMLProviderArn is a required field + SAMLProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSAMLProviderInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSAMLProviderInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetSAMLProviderInput"} + + if s.SAMLProviderArn == nil { + invalidParams.Add(aws.NewErrParamRequired("SAMLProviderArn")) + } + if s.SAMLProviderArn != nil && len(*s.SAMLProviderArn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("SAMLProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetSAMLProvider request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetSAMLProviderResponse +type GetSAMLProviderOutput struct { + _ struct{} `type:"structure"` + + // The date and time when the SAML provider was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The XML metadata document that includes information about an identity provider. + SAMLMetadataDocument *string `min:"1000" type:"string"` + + // The expiration date and time for the SAML provider. + ValidUntil *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s GetSAMLProviderOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetSAMLProvider = "GetSAMLProvider" + +// GetSAMLProviderRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Returns the SAML provider metadocument that was uploaded when the IAM SAML +// provider resource object was created or updated. +// +// This operation requires Signature Version 4 (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// +// // Example sending a request using GetSAMLProviderRequest. +// req := client.GetSAMLProviderRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetSAMLProvider +func (c *Client) GetSAMLProviderRequest(input *GetSAMLProviderInput) GetSAMLProviderRequest { + op := &aws.Operation{ + Name: opGetSAMLProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSAMLProviderInput{} + } + + req := c.newRequest(op, input, &GetSAMLProviderOutput{}) + return GetSAMLProviderRequest{Request: req, Input: input, Copy: c.GetSAMLProviderRequest} +} + +// GetSAMLProviderRequest is the request type for the +// GetSAMLProvider API operation. +type GetSAMLProviderRequest struct { + *aws.Request + Input *GetSAMLProviderInput + Copy func(*GetSAMLProviderInput) GetSAMLProviderRequest +} + +// Send marshals and sends the GetSAMLProvider API request. +func (r GetSAMLProviderRequest) Send(ctx context.Context) (*GetSAMLProviderResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetSAMLProviderResponse{ + GetSAMLProviderOutput: r.Request.Data.(*GetSAMLProviderOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetSAMLProviderResponse is the response type for the +// GetSAMLProvider API operation. +type GetSAMLProviderResponse struct { + *GetSAMLProviderOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetSAMLProvider request. +func (r *GetSAMLProviderResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetSSHPublicKey.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetSSHPublicKey.go new file mode 100644 index 00000000..a818b4ef --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetSSHPublicKey.go @@ -0,0 +1,160 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetSSHPublicKeyRequest +type GetSSHPublicKeyInput struct { + _ struct{} `type:"structure"` + + // Specifies the public key encoding format to use in the response. To retrieve + // the public key in ssh-rsa format, use SSH. To retrieve the public key in + // PEM format, use PEM. + // + // Encoding is a required field + Encoding EncodingType `type:"string" required:"true" enum:"true"` + + // The unique identifier for the SSH public key. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that can consist of any upper or lowercased letter + // or digit. + // + // SSHPublicKeyId is a required field + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The name of the IAM user associated with the SSH public key. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetSSHPublicKeyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSSHPublicKeyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetSSHPublicKeyInput"} + if len(s.Encoding) == 0 { + invalidParams.Add(aws.NewErrParamRequired("Encoding")) + } + + if s.SSHPublicKeyId == nil { + invalidParams.Add(aws.NewErrParamRequired("SSHPublicKeyId")) + } + if s.SSHPublicKeyId != nil && len(*s.SSHPublicKeyId) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("SSHPublicKeyId", 20)) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetSSHPublicKey request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetSSHPublicKeyResponse +type GetSSHPublicKeyOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the SSH public key. + SSHPublicKey *SSHPublicKey `type:"structure"` +} + +// String returns the string representation +func (s GetSSHPublicKeyOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetSSHPublicKey = "GetSSHPublicKey" + +// GetSSHPublicKeyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Retrieves the specified SSH public key, including metadata about the key. +// +// The SSH public key retrieved by this operation is used only for authenticating +// the associated IAM user to an AWS CodeCommit repository. For more information +// about using SSH keys to authenticate to an AWS CodeCommit repository, see +// Set up AWS CodeCommit for SSH Connections (https://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +// +// // Example sending a request using GetSSHPublicKeyRequest. +// req := client.GetSSHPublicKeyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetSSHPublicKey +func (c *Client) GetSSHPublicKeyRequest(input *GetSSHPublicKeyInput) GetSSHPublicKeyRequest { + op := &aws.Operation{ + Name: opGetSSHPublicKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSSHPublicKeyInput{} + } + + req := c.newRequest(op, input, &GetSSHPublicKeyOutput{}) + return GetSSHPublicKeyRequest{Request: req, Input: input, Copy: c.GetSSHPublicKeyRequest} +} + +// GetSSHPublicKeyRequest is the request type for the +// GetSSHPublicKey API operation. +type GetSSHPublicKeyRequest struct { + *aws.Request + Input *GetSSHPublicKeyInput + Copy func(*GetSSHPublicKeyInput) GetSSHPublicKeyRequest +} + +// Send marshals and sends the GetSSHPublicKey API request. +func (r GetSSHPublicKeyRequest) Send(ctx context.Context) (*GetSSHPublicKeyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetSSHPublicKeyResponse{ + GetSSHPublicKeyOutput: r.Request.Data.(*GetSSHPublicKeyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetSSHPublicKeyResponse is the response type for the +// GetSSHPublicKey API operation. +type GetSSHPublicKeyResponse struct { + *GetSSHPublicKeyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetSSHPublicKey request. +func (r *GetSSHPublicKeyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServerCertificate.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServerCertificate.go new file mode 100644 index 00000000..8057e6bc --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServerCertificate.go @@ -0,0 +1,135 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetServerCertificateRequest +type GetServerCertificateInput struct { + _ struct{} `type:"structure"` + + // The name of the server certificate you want to retrieve information about. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // ServerCertificateName is a required field + ServerCertificateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetServerCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetServerCertificateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetServerCertificateInput"} + + if s.ServerCertificateName == nil { + invalidParams.Add(aws.NewErrParamRequired("ServerCertificateName")) + } + if s.ServerCertificateName != nil && len(*s.ServerCertificateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ServerCertificateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetServerCertificate request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetServerCertificateResponse +type GetServerCertificateOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the server certificate. + // + // ServerCertificate is a required field + ServerCertificate *ServerCertificate `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetServerCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetServerCertificate = "GetServerCertificate" + +// GetServerCertificateRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Retrieves information about the specified server certificate stored in IAM. +// +// For more information about working with server certificates, see Working +// with Server Certificates (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. This topic includes a list of AWS services that can +// use the server certificates that you manage with IAM. +// +// // Example sending a request using GetServerCertificateRequest. +// req := client.GetServerCertificateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetServerCertificate +func (c *Client) GetServerCertificateRequest(input *GetServerCertificateInput) GetServerCertificateRequest { + op := &aws.Operation{ + Name: opGetServerCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetServerCertificateInput{} + } + + req := c.newRequest(op, input, &GetServerCertificateOutput{}) + return GetServerCertificateRequest{Request: req, Input: input, Copy: c.GetServerCertificateRequest} +} + +// GetServerCertificateRequest is the request type for the +// GetServerCertificate API operation. +type GetServerCertificateRequest struct { + *aws.Request + Input *GetServerCertificateInput + Copy func(*GetServerCertificateInput) GetServerCertificateRequest +} + +// Send marshals and sends the GetServerCertificate API request. +func (r GetServerCertificateRequest) Send(ctx context.Context) (*GetServerCertificateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetServerCertificateResponse{ + GetServerCertificateOutput: r.Request.Data.(*GetServerCertificateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetServerCertificateResponse is the response type for the +// GetServerCertificate API operation. +type GetServerCertificateResponse struct { + *GetServerCertificateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetServerCertificate request. +func (r *GetServerCertificateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServiceLastAccessedDetails.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServiceLastAccessedDetails.go new file mode 100644 index 00000000..501e224b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServiceLastAccessedDetails.go @@ -0,0 +1,221 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetServiceLastAccessedDetailsRequest +type GetServiceLastAccessedDetailsInput struct { + _ struct{} `type:"structure"` + + // The ID of the request generated by the GenerateServiceLastAccessedDetails + // operation. + // + // JobId is a required field + JobId *string `min:"36" type:"string" required:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s GetServiceLastAccessedDetailsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetServiceLastAccessedDetailsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetServiceLastAccessedDetailsInput"} + + if s.JobId == nil { + invalidParams.Add(aws.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 36 { + invalidParams.Add(aws.NewErrParamMinLen("JobId", 36)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetServiceLastAccessedDetailsResponse +type GetServiceLastAccessedDetailsOutput struct { + _ struct{} `type:"structure"` + + // An object that contains details about the reason the operation failed. + Error *ErrorDetails `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the generated report job was completed or failed. + // + // This field is null if the job is still in progress, as indicated by a JobStatus + // value of IN_PROGRESS. + // + // JobCompletionDate is a required field + JobCompletionDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the report job was created. + // + // JobCreationDate is a required field + JobCreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The status of the job. + // + // JobStatus is a required field + JobStatus JobStatusType `type:"string" required:"true" enum:"true"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A ServiceLastAccessed object that contains details about the most recent + // attempt to access the service. + // + // ServicesLastAccessed is a required field + ServicesLastAccessed []ServiceLastAccessed `type:"list" required:"true"` +} + +// String returns the string representation +func (s GetServiceLastAccessedDetailsOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetServiceLastAccessedDetails = "GetServiceLastAccessedDetails" + +// GetServiceLastAccessedDetailsRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// After you generate a user, group, role, or policy report using the GenerateServiceLastAccessedDetails +// operation, you can use the JobId parameter in GetServiceLastAccessedDetails. +// This operation retrieves the status of your report job and a list of AWS +// services that the resource (user, group, role, or managed policy) can access. +// +// Service last accessed data does not use other policy types when determining +// whether a resource could access a service. These other policy types include +// resource-based policies, access control lists, AWS Organizations policies, +// IAM permissions boundaries, and AWS STS assume role policies. It only applies +// permissions policy logic. For more about the evaluation of policy types, +// see Evaluating Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-basics) +// in the IAM User Guide. +// +// For each service that the resource could access using permissions policies, +// the operation returns details about the most recent access attempt. If there +// was no attempt, the service is listed without details about the most recent +// attempt to access the service. If the operation fails, the GetServiceLastAccessedDetails +// operation returns the reason that it failed. +// +// The GetServiceLastAccessedDetails operation returns a list of services. This +// list includes the number of entities that have attempted to access the service +// and the date and time of the last attempt. It also returns the ARN of the +// following entity, depending on the resource ARN that you used to generate +// the report: +// +// * User – Returns the user ARN that you used to generate the report +// +// * Group – Returns the ARN of the group member (user) that last attempted +// to access the service +// +// * Role – Returns the role ARN that you used to generate the report +// +// * Policy – Returns the ARN of the user or role that last used the policy +// to attempt to access the service +// +// By default, the list is sorted by service namespace. +// +// // Example sending a request using GetServiceLastAccessedDetailsRequest. +// req := client.GetServiceLastAccessedDetailsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetServiceLastAccessedDetails +func (c *Client) GetServiceLastAccessedDetailsRequest(input *GetServiceLastAccessedDetailsInput) GetServiceLastAccessedDetailsRequest { + op := &aws.Operation{ + Name: opGetServiceLastAccessedDetails, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetServiceLastAccessedDetailsInput{} + } + + req := c.newRequest(op, input, &GetServiceLastAccessedDetailsOutput{}) + return GetServiceLastAccessedDetailsRequest{Request: req, Input: input, Copy: c.GetServiceLastAccessedDetailsRequest} +} + +// GetServiceLastAccessedDetailsRequest is the request type for the +// GetServiceLastAccessedDetails API operation. +type GetServiceLastAccessedDetailsRequest struct { + *aws.Request + Input *GetServiceLastAccessedDetailsInput + Copy func(*GetServiceLastAccessedDetailsInput) GetServiceLastAccessedDetailsRequest +} + +// Send marshals and sends the GetServiceLastAccessedDetails API request. +func (r GetServiceLastAccessedDetailsRequest) Send(ctx context.Context) (*GetServiceLastAccessedDetailsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetServiceLastAccessedDetailsResponse{ + GetServiceLastAccessedDetailsOutput: r.Request.Data.(*GetServiceLastAccessedDetailsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetServiceLastAccessedDetailsResponse is the response type for the +// GetServiceLastAccessedDetails API operation. +type GetServiceLastAccessedDetailsResponse struct { + *GetServiceLastAccessedDetailsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetServiceLastAccessedDetails request. +func (r *GetServiceLastAccessedDetailsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServiceLastAccessedDetailsWithEntities.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServiceLastAccessedDetailsWithEntities.go new file mode 100644 index 00000000..a89029bd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServiceLastAccessedDetailsWithEntities.go @@ -0,0 +1,226 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetServiceLastAccessedDetailsWithEntitiesRequest +type GetServiceLastAccessedDetailsWithEntitiesInput struct { + _ struct{} `type:"structure"` + + // The ID of the request generated by the GenerateServiceLastAccessedDetails + // operation. + // + // JobId is a required field + JobId *string `min:"36" type:"string" required:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The service namespace for an AWS service. Provide the service namespace to + // learn when the IAM entity last attempted to access the specified service. + // + // To learn the service namespace for a service, go to Actions, Resources, and + // Condition Keys for AWS Services (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_actions-resources-contextkeys.html) + // in the IAM User Guide. Choose the name of the service to view details for + // that service. In the first paragraph, find the service prefix. For example, + // (service prefix: a4b). For more information about service namespaces, see + // AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the AWS General Reference. + // + // ServiceNamespace is a required field + ServiceNamespace *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetServiceLastAccessedDetailsWithEntitiesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetServiceLastAccessedDetailsWithEntitiesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetServiceLastAccessedDetailsWithEntitiesInput"} + + if s.JobId == nil { + invalidParams.Add(aws.NewErrParamRequired("JobId")) + } + if s.JobId != nil && len(*s.JobId) < 36 { + invalidParams.Add(aws.NewErrParamMinLen("JobId", 36)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + + if s.ServiceNamespace == nil { + invalidParams.Add(aws.NewErrParamRequired("ServiceNamespace")) + } + if s.ServiceNamespace != nil && len(*s.ServiceNamespace) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ServiceNamespace", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetServiceLastAccessedDetailsWithEntitiesResponse +type GetServiceLastAccessedDetailsWithEntitiesOutput struct { + _ struct{} `type:"structure"` + + // An EntityDetailsList object that contains details about when an IAM entity + // (user or role) used group or policy permissions in an attempt to access the + // specified AWS service. + // + // EntityDetailsList is a required field + EntityDetailsList []EntityDetails `type:"list" required:"true"` + + // An object that contains details about the reason the operation failed. + Error *ErrorDetails `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the generated report job was completed or failed. + // + // JobCompletionDate is a required field + JobCompletionDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the report job was created. + // + // JobCreationDate is a required field + JobCreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The status of the job. + // + // JobStatus is a required field + JobStatus JobStatusType `type:"string" required:"true" enum:"true"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s GetServiceLastAccessedDetailsWithEntitiesOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetServiceLastAccessedDetailsWithEntities = "GetServiceLastAccessedDetailsWithEntities" + +// GetServiceLastAccessedDetailsWithEntitiesRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// After you generate a group or policy report using the GenerateServiceLastAccessedDetails +// operation, you can use the JobId parameter in GetServiceLastAccessedDetailsWithEntities. +// This operation retrieves the status of your report job and a list of entities +// that could have used group or policy permissions to access the specified +// service. +// +// * Group – For a group report, this operation returns a list of users +// in the group that could have used the group’s policies in an attempt +// to access the service. +// +// * Policy – For a policy report, this operation returns a list of entities +// (users or roles) that could have used the policy in an attempt to access +// the service. +// +// You can also use this operation for user or role reports to retrieve details +// about those entities. +// +// If the operation fails, the GetServiceLastAccessedDetailsWithEntities operation +// returns the reason that it failed. +// +// By default, the list of associated entities is sorted by date, with the most +// recent access listed first. +// +// // Example sending a request using GetServiceLastAccessedDetailsWithEntitiesRequest. +// req := client.GetServiceLastAccessedDetailsWithEntitiesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetServiceLastAccessedDetailsWithEntities +func (c *Client) GetServiceLastAccessedDetailsWithEntitiesRequest(input *GetServiceLastAccessedDetailsWithEntitiesInput) GetServiceLastAccessedDetailsWithEntitiesRequest { + op := &aws.Operation{ + Name: opGetServiceLastAccessedDetailsWithEntities, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetServiceLastAccessedDetailsWithEntitiesInput{} + } + + req := c.newRequest(op, input, &GetServiceLastAccessedDetailsWithEntitiesOutput{}) + return GetServiceLastAccessedDetailsWithEntitiesRequest{Request: req, Input: input, Copy: c.GetServiceLastAccessedDetailsWithEntitiesRequest} +} + +// GetServiceLastAccessedDetailsWithEntitiesRequest is the request type for the +// GetServiceLastAccessedDetailsWithEntities API operation. +type GetServiceLastAccessedDetailsWithEntitiesRequest struct { + *aws.Request + Input *GetServiceLastAccessedDetailsWithEntitiesInput + Copy func(*GetServiceLastAccessedDetailsWithEntitiesInput) GetServiceLastAccessedDetailsWithEntitiesRequest +} + +// Send marshals and sends the GetServiceLastAccessedDetailsWithEntities API request. +func (r GetServiceLastAccessedDetailsWithEntitiesRequest) Send(ctx context.Context) (*GetServiceLastAccessedDetailsWithEntitiesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetServiceLastAccessedDetailsWithEntitiesResponse{ + GetServiceLastAccessedDetailsWithEntitiesOutput: r.Request.Data.(*GetServiceLastAccessedDetailsWithEntitiesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetServiceLastAccessedDetailsWithEntitiesResponse is the response type for the +// GetServiceLastAccessedDetailsWithEntities API operation. +type GetServiceLastAccessedDetailsWithEntitiesResponse struct { + *GetServiceLastAccessedDetailsWithEntitiesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetServiceLastAccessedDetailsWithEntities request. +func (r *GetServiceLastAccessedDetailsWithEntitiesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServiceLinkedRoleDeletionStatus.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServiceLinkedRoleDeletionStatus.go new file mode 100644 index 00000000..37a592e7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetServiceLinkedRoleDeletionStatus.go @@ -0,0 +1,134 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetServiceLinkedRoleDeletionStatusRequest +type GetServiceLinkedRoleDeletionStatusInput struct { + _ struct{} `type:"structure"` + + // The deletion task identifier. This identifier is returned by the DeleteServiceLinkedRole + // operation in the format task/aws-service-role///. + // + // DeletionTaskId is a required field + DeletionTaskId *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetServiceLinkedRoleDeletionStatusInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetServiceLinkedRoleDeletionStatusInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetServiceLinkedRoleDeletionStatusInput"} + + if s.DeletionTaskId == nil { + invalidParams.Add(aws.NewErrParamRequired("DeletionTaskId")) + } + if s.DeletionTaskId != nil && len(*s.DeletionTaskId) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("DeletionTaskId", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetServiceLinkedRoleDeletionStatusResponse +type GetServiceLinkedRoleDeletionStatusOutput struct { + _ struct{} `type:"structure"` + + // An object that contains details about the reason the deletion failed. + Reason *DeletionTaskFailureReasonType `type:"structure"` + + // The status of the deletion. + // + // Status is a required field + Status DeletionTaskStatusType `type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s GetServiceLinkedRoleDeletionStatusOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetServiceLinkedRoleDeletionStatus = "GetServiceLinkedRoleDeletionStatus" + +// GetServiceLinkedRoleDeletionStatusRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Retrieves the status of your service-linked role deletion. After you use +// the DeleteServiceLinkedRole API operation to submit a service-linked role +// for deletion, you can use the DeletionTaskId parameter in GetServiceLinkedRoleDeletionStatus +// to check the status of the deletion. If the deletion fails, this operation +// returns the reason that it failed, if that information is returned by the +// service. +// +// // Example sending a request using GetServiceLinkedRoleDeletionStatusRequest. +// req := client.GetServiceLinkedRoleDeletionStatusRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetServiceLinkedRoleDeletionStatus +func (c *Client) GetServiceLinkedRoleDeletionStatusRequest(input *GetServiceLinkedRoleDeletionStatusInput) GetServiceLinkedRoleDeletionStatusRequest { + op := &aws.Operation{ + Name: opGetServiceLinkedRoleDeletionStatus, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetServiceLinkedRoleDeletionStatusInput{} + } + + req := c.newRequest(op, input, &GetServiceLinkedRoleDeletionStatusOutput{}) + return GetServiceLinkedRoleDeletionStatusRequest{Request: req, Input: input, Copy: c.GetServiceLinkedRoleDeletionStatusRequest} +} + +// GetServiceLinkedRoleDeletionStatusRequest is the request type for the +// GetServiceLinkedRoleDeletionStatus API operation. +type GetServiceLinkedRoleDeletionStatusRequest struct { + *aws.Request + Input *GetServiceLinkedRoleDeletionStatusInput + Copy func(*GetServiceLinkedRoleDeletionStatusInput) GetServiceLinkedRoleDeletionStatusRequest +} + +// Send marshals and sends the GetServiceLinkedRoleDeletionStatus API request. +func (r GetServiceLinkedRoleDeletionStatusRequest) Send(ctx context.Context) (*GetServiceLinkedRoleDeletionStatusResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetServiceLinkedRoleDeletionStatusResponse{ + GetServiceLinkedRoleDeletionStatusOutput: r.Request.Data.(*GetServiceLinkedRoleDeletionStatusOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetServiceLinkedRoleDeletionStatusResponse is the response type for the +// GetServiceLinkedRoleDeletionStatus API operation. +type GetServiceLinkedRoleDeletionStatusResponse struct { + *GetServiceLinkedRoleDeletionStatusOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetServiceLinkedRoleDeletionStatus request. +func (r *GetServiceLinkedRoleDeletionStatusResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetUser.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetUser.go new file mode 100644 index 00000000..d582a3e8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetUser.go @@ -0,0 +1,146 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetUserRequest +type GetUserInput struct { + _ struct{} `type:"structure"` + + // The name of the user to get information about. + // + // This parameter is optional. If it is not included, it defaults to the user + // making the request. This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetUserInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetUserInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetUserInput"} + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetUser request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetUserResponse +type GetUserOutput struct { + _ struct{} `type:"structure"` + + // A structure containing details about the IAM user. + // + // Due to a service issue, password last used data does not include password + // use from May 3, 2018 22:50 PDT to May 23, 2018 14:08 PDT. This affects last + // sign-in (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_finding-unused.html) + // dates shown in the IAM console and password last used dates in the IAM credential + // report (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html), + // and returned by this GetUser API. If users signed in during the affected + // time, the password last used date that is returned is the date the user last + // signed in before May 3, 2018. For users that signed in after May 23, 2018 + // 14:08 PDT, the returned password last used date is accurate. + // + // You can use password last used information to identify unused credentials + // for deletion. For example, you might delete users who did not sign in to + // AWS in the last 90 days. In cases like this, we recommend that you adjust + // your evaluation window to include dates after May 23, 2018. Alternatively, + // if your users use access keys to access AWS programmatically you can refer + // to access key last used information because it is accurate for all dates. + // + // User is a required field + User *User `type:"structure" required:"true"` +} + +// String returns the string representation +func (s GetUserOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetUser = "GetUser" + +// GetUserRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Retrieves information about the specified IAM user, including the user's +// creation date, path, unique ID, and ARN. +// +// If you do not specify a user name, IAM determines the user name implicitly +// based on the AWS access key ID used to sign the request to this API. +// +// // Example sending a request using GetUserRequest. +// req := client.GetUserRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetUser +func (c *Client) GetUserRequest(input *GetUserInput) GetUserRequest { + op := &aws.Operation{ + Name: opGetUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetUserInput{} + } + + req := c.newRequest(op, input, &GetUserOutput{}) + return GetUserRequest{Request: req, Input: input, Copy: c.GetUserRequest} +} + +// GetUserRequest is the request type for the +// GetUser API operation. +type GetUserRequest struct { + *aws.Request + Input *GetUserInput + Copy func(*GetUserInput) GetUserRequest +} + +// Send marshals and sends the GetUser API request. +func (r GetUserRequest) Send(ctx context.Context) (*GetUserResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetUserResponse{ + GetUserOutput: r.Request.Data.(*GetUserOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetUserResponse is the response type for the +// GetUser API operation. +type GetUserResponse struct { + *GetUserOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetUser request. +func (r *GetUserResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetUserPolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetUserPolicy.go new file mode 100644 index 00000000..00c09bcc --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_GetUserPolicy.go @@ -0,0 +1,176 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetUserPolicyRequest +type GetUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the policy document to get. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name of the user who the policy is associated with. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetUserPolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "GetUserPolicyInput"} + + if s.PolicyName == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyName", 1)) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful GetUserPolicy request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetUserPolicyResponse +type GetUserPolicyOutput struct { + _ struct{} `type:"structure"` + + // The policy document. + // + // IAM stores policies in JSON format. However, resources that were created + // using AWS CloudFormation templates can be formatted in YAML. AWS CloudFormation + // always converts a YAML policy to JSON format before submitting it to IAM. + // + // PolicyDocument is a required field + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy. + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` + + // The user the policy is associated with. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opGetUserPolicy = "GetUserPolicy" + +// GetUserPolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Retrieves the specified inline policy document that is embedded in the specified +// IAM user. +// +// Policies returned by this API are URL-encoded compliant with RFC 3986 (https://tools.ietf.org/html/rfc3986). +// You can use a URL decoding method to convert the policy back to plain JSON +// text. For example, if you use Java, you can use the decode method of the +// java.net.URLDecoder utility class in the Java SDK. Other languages and SDKs +// provide similar functionality. +// +// An IAM user can also have managed policies attached to it. To retrieve a +// managed policy document that is attached to a user, use GetPolicy to determine +// the policy's default version. Then use GetPolicyVersion to retrieve the policy +// document. +// +// For more information about policies, see Managed Policies and Inline Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// // Example sending a request using GetUserPolicyRequest. +// req := client.GetUserPolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GetUserPolicy +func (c *Client) GetUserPolicyRequest(input *GetUserPolicyInput) GetUserPolicyRequest { + op := &aws.Operation{ + Name: opGetUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetUserPolicyInput{} + } + + req := c.newRequest(op, input, &GetUserPolicyOutput{}) + return GetUserPolicyRequest{Request: req, Input: input, Copy: c.GetUserPolicyRequest} +} + +// GetUserPolicyRequest is the request type for the +// GetUserPolicy API operation. +type GetUserPolicyRequest struct { + *aws.Request + Input *GetUserPolicyInput + Copy func(*GetUserPolicyInput) GetUserPolicyRequest +} + +// Send marshals and sends the GetUserPolicy API request. +func (r GetUserPolicyRequest) Send(ctx context.Context) (*GetUserPolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &GetUserPolicyResponse{ + GetUserPolicyOutput: r.Request.Data.(*GetUserPolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// GetUserPolicyResponse is the response type for the +// GetUserPolicy API operation. +type GetUserPolicyResponse struct { + *GetUserPolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// GetUserPolicy request. +func (r *GetUserPolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAccessKeys.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAccessKeys.go new file mode 100644 index 00000000..3afdc9da --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAccessKeys.go @@ -0,0 +1,225 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAccessKeysRequest +type ListAccessKeysInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the user. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListAccessKeysInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAccessKeysInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListAccessKeysInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListAccessKeys request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAccessKeysResponse +type ListAccessKeysOutput struct { + _ struct{} `type:"structure"` + + // A list of objects containing metadata about the access keys. + // + // AccessKeyMetadata is a required field + AccessKeyMetadata []AccessKeyMetadata `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListAccessKeysOutput) String() string { + return awsutil.Prettify(s) +} + +const opListAccessKeys = "ListAccessKeys" + +// ListAccessKeysRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Returns information about the access key IDs associated with the specified +// IAM user. If there is none, the operation returns an empty list. +// +// Although each user is limited to a small number of keys, you can still paginate +// the results using the MaxItems and Marker parameters. +// +// If the UserName field is not specified, the user name is determined implicitly +// based on the AWS access key ID used to sign the request. This operation works +// for access keys under the AWS account. Consequently, you can use this operation +// to manage AWS account root user credentials even if the AWS account has no +// associated users. +// +// To ensure the security of your AWS account, the secret access key is accessible +// only during key and user creation. +// +// // Example sending a request using ListAccessKeysRequest. +// req := client.ListAccessKeysRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAccessKeys +func (c *Client) ListAccessKeysRequest(input *ListAccessKeysInput) ListAccessKeysRequest { + op := &aws.Operation{ + Name: opListAccessKeys, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAccessKeysInput{} + } + + req := c.newRequest(op, input, &ListAccessKeysOutput{}) + return ListAccessKeysRequest{Request: req, Input: input, Copy: c.ListAccessKeysRequest} +} + +// ListAccessKeysRequest is the request type for the +// ListAccessKeys API operation. +type ListAccessKeysRequest struct { + *aws.Request + Input *ListAccessKeysInput + Copy func(*ListAccessKeysInput) ListAccessKeysRequest +} + +// Send marshals and sends the ListAccessKeys API request. +func (r ListAccessKeysRequest) Send(ctx context.Context) (*ListAccessKeysResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListAccessKeysResponse{ + ListAccessKeysOutput: r.Request.Data.(*ListAccessKeysOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListAccessKeysRequestPaginator returns a paginator for ListAccessKeys. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListAccessKeysRequest(input) +// p := iam.NewListAccessKeysRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListAccessKeysPaginator(req ListAccessKeysRequest) ListAccessKeysPaginator { + return ListAccessKeysPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListAccessKeysInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListAccessKeysPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListAccessKeysPaginator struct { + aws.Pager +} + +func (p *ListAccessKeysPaginator) CurrentPage() *ListAccessKeysOutput { + return p.Pager.CurrentPage().(*ListAccessKeysOutput) +} + +// ListAccessKeysResponse is the response type for the +// ListAccessKeys API operation. +type ListAccessKeysResponse struct { + *ListAccessKeysOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListAccessKeys request. +func (r *ListAccessKeysResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAccountAliases.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAccountAliases.go new file mode 100644 index 00000000..f55d60a4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAccountAliases.go @@ -0,0 +1,206 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAccountAliasesRequest +type ListAccountAliasesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListAccountAliasesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAccountAliasesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListAccountAliasesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListAccountAliases request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAccountAliasesResponse +type ListAccountAliasesOutput struct { + _ struct{} `type:"structure"` + + // A list of aliases associated with the account. AWS supports only one alias + // per account. + // + // AccountAliases is a required field + AccountAliases []string `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListAccountAliasesOutput) String() string { + return awsutil.Prettify(s) +} + +const opListAccountAliases = "ListAccountAliases" + +// ListAccountAliasesRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Lists the account alias associated with the AWS account (Note: you can have +// only one). For information about using an AWS account alias, see Using an +// Alias for Your AWS Account ID (https://docs.aws.amazon.com/IAM/latest/UserGuide/AccountAlias.html) +// in the IAM User Guide. +// +// // Example sending a request using ListAccountAliasesRequest. +// req := client.ListAccountAliasesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAccountAliases +func (c *Client) ListAccountAliasesRequest(input *ListAccountAliasesInput) ListAccountAliasesRequest { + op := &aws.Operation{ + Name: opListAccountAliases, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAccountAliasesInput{} + } + + req := c.newRequest(op, input, &ListAccountAliasesOutput{}) + return ListAccountAliasesRequest{Request: req, Input: input, Copy: c.ListAccountAliasesRequest} +} + +// ListAccountAliasesRequest is the request type for the +// ListAccountAliases API operation. +type ListAccountAliasesRequest struct { + *aws.Request + Input *ListAccountAliasesInput + Copy func(*ListAccountAliasesInput) ListAccountAliasesRequest +} + +// Send marshals and sends the ListAccountAliases API request. +func (r ListAccountAliasesRequest) Send(ctx context.Context) (*ListAccountAliasesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListAccountAliasesResponse{ + ListAccountAliasesOutput: r.Request.Data.(*ListAccountAliasesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListAccountAliasesRequestPaginator returns a paginator for ListAccountAliases. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListAccountAliasesRequest(input) +// p := iam.NewListAccountAliasesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListAccountAliasesPaginator(req ListAccountAliasesRequest) ListAccountAliasesPaginator { + return ListAccountAliasesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListAccountAliasesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListAccountAliasesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListAccountAliasesPaginator struct { + aws.Pager +} + +func (p *ListAccountAliasesPaginator) CurrentPage() *ListAccountAliasesOutput { + return p.Pager.CurrentPage().(*ListAccountAliasesOutput) +} + +// ListAccountAliasesResponse is the response type for the +// ListAccountAliases API operation. +type ListAccountAliasesResponse struct { + *ListAccountAliasesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListAccountAliases request. +func (r *ListAccountAliasesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAttachedGroupPolicies.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAttachedGroupPolicies.go new file mode 100644 index 00000000..42d6aa08 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAttachedGroupPolicies.go @@ -0,0 +1,239 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAttachedGroupPoliciesRequest +type ListAttachedGroupPoliciesInput struct { + _ struct{} `type:"structure"` + + // The name (friendly name, not ARN) of the group to list attached policies + // for. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all policies. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + PathPrefix *string `type:"string"` +} + +// String returns the string representation +func (s ListAttachedGroupPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAttachedGroupPoliciesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListAttachedGroupPoliciesInput"} + + if s.GroupName == nil { + invalidParams.Add(aws.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("GroupName", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListAttachedGroupPolicies request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAttachedGroupPoliciesResponse +type ListAttachedGroupPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A list of the attached policies. + AttachedPolicies []AttachedPolicy `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListAttachedGroupPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +const opListAttachedGroupPolicies = "ListAttachedGroupPolicies" + +// ListAttachedGroupPoliciesRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Lists all managed policies that are attached to the specified IAM group. +// +// An IAM group can also have inline policies embedded with it. To list the +// inline policies for a group, use the ListGroupPolicies API. For information +// about policies, see Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. You +// can use the PathPrefix parameter to limit the list of policies to only those +// matching the specified path prefix. If there are no policies attached to +// the specified group (or none that match the specified path prefix), the operation +// returns an empty list. +// +// // Example sending a request using ListAttachedGroupPoliciesRequest. +// req := client.ListAttachedGroupPoliciesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAttachedGroupPolicies +func (c *Client) ListAttachedGroupPoliciesRequest(input *ListAttachedGroupPoliciesInput) ListAttachedGroupPoliciesRequest { + op := &aws.Operation{ + Name: opListAttachedGroupPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAttachedGroupPoliciesInput{} + } + + req := c.newRequest(op, input, &ListAttachedGroupPoliciesOutput{}) + return ListAttachedGroupPoliciesRequest{Request: req, Input: input, Copy: c.ListAttachedGroupPoliciesRequest} +} + +// ListAttachedGroupPoliciesRequest is the request type for the +// ListAttachedGroupPolicies API operation. +type ListAttachedGroupPoliciesRequest struct { + *aws.Request + Input *ListAttachedGroupPoliciesInput + Copy func(*ListAttachedGroupPoliciesInput) ListAttachedGroupPoliciesRequest +} + +// Send marshals and sends the ListAttachedGroupPolicies API request. +func (r ListAttachedGroupPoliciesRequest) Send(ctx context.Context) (*ListAttachedGroupPoliciesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListAttachedGroupPoliciesResponse{ + ListAttachedGroupPoliciesOutput: r.Request.Data.(*ListAttachedGroupPoliciesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListAttachedGroupPoliciesRequestPaginator returns a paginator for ListAttachedGroupPolicies. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListAttachedGroupPoliciesRequest(input) +// p := iam.NewListAttachedGroupPoliciesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListAttachedGroupPoliciesPaginator(req ListAttachedGroupPoliciesRequest) ListAttachedGroupPoliciesPaginator { + return ListAttachedGroupPoliciesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListAttachedGroupPoliciesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListAttachedGroupPoliciesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListAttachedGroupPoliciesPaginator struct { + aws.Pager +} + +func (p *ListAttachedGroupPoliciesPaginator) CurrentPage() *ListAttachedGroupPoliciesOutput { + return p.Pager.CurrentPage().(*ListAttachedGroupPoliciesOutput) +} + +// ListAttachedGroupPoliciesResponse is the response type for the +// ListAttachedGroupPolicies API operation. +type ListAttachedGroupPoliciesResponse struct { + *ListAttachedGroupPoliciesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListAttachedGroupPolicies request. +func (r *ListAttachedGroupPoliciesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAttachedRolePolicies.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAttachedRolePolicies.go new file mode 100644 index 00000000..e314b370 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAttachedRolePolicies.go @@ -0,0 +1,238 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAttachedRolePoliciesRequest +type ListAttachedRolePoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all policies. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + PathPrefix *string `type:"string"` + + // The name (friendly name, not ARN) of the role to list attached policies for. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListAttachedRolePoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAttachedRolePoliciesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListAttachedRolePoliciesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + + if s.RoleName == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListAttachedRolePolicies request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAttachedRolePoliciesResponse +type ListAttachedRolePoliciesOutput struct { + _ struct{} `type:"structure"` + + // A list of the attached policies. + AttachedPolicies []AttachedPolicy `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListAttachedRolePoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +const opListAttachedRolePolicies = "ListAttachedRolePolicies" + +// ListAttachedRolePoliciesRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Lists all managed policies that are attached to the specified IAM role. +// +// An IAM role can also have inline policies embedded with it. To list the inline +// policies for a role, use the ListRolePolicies API. For information about +// policies, see Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. You +// can use the PathPrefix parameter to limit the list of policies to only those +// matching the specified path prefix. If there are no policies attached to +// the specified role (or none that match the specified path prefix), the operation +// returns an empty list. +// +// // Example sending a request using ListAttachedRolePoliciesRequest. +// req := client.ListAttachedRolePoliciesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAttachedRolePolicies +func (c *Client) ListAttachedRolePoliciesRequest(input *ListAttachedRolePoliciesInput) ListAttachedRolePoliciesRequest { + op := &aws.Operation{ + Name: opListAttachedRolePolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAttachedRolePoliciesInput{} + } + + req := c.newRequest(op, input, &ListAttachedRolePoliciesOutput{}) + return ListAttachedRolePoliciesRequest{Request: req, Input: input, Copy: c.ListAttachedRolePoliciesRequest} +} + +// ListAttachedRolePoliciesRequest is the request type for the +// ListAttachedRolePolicies API operation. +type ListAttachedRolePoliciesRequest struct { + *aws.Request + Input *ListAttachedRolePoliciesInput + Copy func(*ListAttachedRolePoliciesInput) ListAttachedRolePoliciesRequest +} + +// Send marshals and sends the ListAttachedRolePolicies API request. +func (r ListAttachedRolePoliciesRequest) Send(ctx context.Context) (*ListAttachedRolePoliciesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListAttachedRolePoliciesResponse{ + ListAttachedRolePoliciesOutput: r.Request.Data.(*ListAttachedRolePoliciesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListAttachedRolePoliciesRequestPaginator returns a paginator for ListAttachedRolePolicies. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListAttachedRolePoliciesRequest(input) +// p := iam.NewListAttachedRolePoliciesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListAttachedRolePoliciesPaginator(req ListAttachedRolePoliciesRequest) ListAttachedRolePoliciesPaginator { + return ListAttachedRolePoliciesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListAttachedRolePoliciesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListAttachedRolePoliciesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListAttachedRolePoliciesPaginator struct { + aws.Pager +} + +func (p *ListAttachedRolePoliciesPaginator) CurrentPage() *ListAttachedRolePoliciesOutput { + return p.Pager.CurrentPage().(*ListAttachedRolePoliciesOutput) +} + +// ListAttachedRolePoliciesResponse is the response type for the +// ListAttachedRolePolicies API operation. +type ListAttachedRolePoliciesResponse struct { + *ListAttachedRolePoliciesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListAttachedRolePolicies request. +func (r *ListAttachedRolePoliciesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAttachedUserPolicies.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAttachedUserPolicies.go new file mode 100644 index 00000000..d5c5c1d8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListAttachedUserPolicies.go @@ -0,0 +1,238 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAttachedUserPoliciesRequest +type ListAttachedUserPoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all policies. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + PathPrefix *string `type:"string"` + + // The name (friendly name, not ARN) of the user to list attached policies for. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListAttachedUserPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListAttachedUserPoliciesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListAttachedUserPoliciesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListAttachedUserPolicies request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAttachedUserPoliciesResponse +type ListAttachedUserPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A list of the attached policies. + AttachedPolicies []AttachedPolicy `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListAttachedUserPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +const opListAttachedUserPolicies = "ListAttachedUserPolicies" + +// ListAttachedUserPoliciesRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Lists all managed policies that are attached to the specified IAM user. +// +// An IAM user can also have inline policies embedded with it. To list the inline +// policies for a user, use the ListUserPolicies API. For information about +// policies, see Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. You +// can use the PathPrefix parameter to limit the list of policies to only those +// matching the specified path prefix. If there are no policies attached to +// the specified group (or none that match the specified path prefix), the operation +// returns an empty list. +// +// // Example sending a request using ListAttachedUserPoliciesRequest. +// req := client.ListAttachedUserPoliciesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListAttachedUserPolicies +func (c *Client) ListAttachedUserPoliciesRequest(input *ListAttachedUserPoliciesInput) ListAttachedUserPoliciesRequest { + op := &aws.Operation{ + Name: opListAttachedUserPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListAttachedUserPoliciesInput{} + } + + req := c.newRequest(op, input, &ListAttachedUserPoliciesOutput{}) + return ListAttachedUserPoliciesRequest{Request: req, Input: input, Copy: c.ListAttachedUserPoliciesRequest} +} + +// ListAttachedUserPoliciesRequest is the request type for the +// ListAttachedUserPolicies API operation. +type ListAttachedUserPoliciesRequest struct { + *aws.Request + Input *ListAttachedUserPoliciesInput + Copy func(*ListAttachedUserPoliciesInput) ListAttachedUserPoliciesRequest +} + +// Send marshals and sends the ListAttachedUserPolicies API request. +func (r ListAttachedUserPoliciesRequest) Send(ctx context.Context) (*ListAttachedUserPoliciesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListAttachedUserPoliciesResponse{ + ListAttachedUserPoliciesOutput: r.Request.Data.(*ListAttachedUserPoliciesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListAttachedUserPoliciesRequestPaginator returns a paginator for ListAttachedUserPolicies. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListAttachedUserPoliciesRequest(input) +// p := iam.NewListAttachedUserPoliciesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListAttachedUserPoliciesPaginator(req ListAttachedUserPoliciesRequest) ListAttachedUserPoliciesPaginator { + return ListAttachedUserPoliciesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListAttachedUserPoliciesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListAttachedUserPoliciesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListAttachedUserPoliciesPaginator struct { + aws.Pager +} + +func (p *ListAttachedUserPoliciesPaginator) CurrentPage() *ListAttachedUserPoliciesOutput { + return p.Pager.CurrentPage().(*ListAttachedUserPoliciesOutput) +} + +// ListAttachedUserPoliciesResponse is the response type for the +// ListAttachedUserPolicies API operation. +type ListAttachedUserPoliciesResponse struct { + *ListAttachedUserPoliciesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListAttachedUserPolicies request. +func (r *ListAttachedUserPoliciesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListEntitiesForPolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListEntitiesForPolicy.go new file mode 100644 index 00000000..49b6be52 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListEntitiesForPolicy.go @@ -0,0 +1,261 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListEntitiesForPolicyRequest +type ListEntitiesForPolicyInput struct { + _ struct{} `type:"structure"` + + // The entity type to use for filtering the results. + // + // For example, when EntityFilter is Role, only the roles that are attached + // to the specified policy are returned. This parameter is optional. If it is + // not included, all attached entities (users, groups, and roles) are returned. + // The argument for this parameter must be one of the valid values listed below. + EntityFilter EntityType `type:"string" enum:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all entities. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + PathPrefix *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the IAM policy for which you want the versions. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The policy usage method to use for filtering the results. + // + // To list only permissions policies, set PolicyUsageFilter to PermissionsPolicy. + // To list only the policies used to set permissions boundaries, set the value + // to PermissionsBoundary. + // + // This parameter is optional. If it is not included, all policies are returned. + PolicyUsageFilter PolicyUsageType `type:"string" enum:"true"` +} + +// String returns the string representation +func (s ListEntitiesForPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListEntitiesForPolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListEntitiesForPolicyInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PathPrefix", 1)) + } + + if s.PolicyArn == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListEntitiesForPolicy request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListEntitiesForPolicyResponse +type ListEntitiesForPolicyOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A list of IAM groups that the policy is attached to. + PolicyGroups []PolicyGroup `type:"list"` + + // A list of IAM roles that the policy is attached to. + PolicyRoles []PolicyRole `type:"list"` + + // A list of IAM users that the policy is attached to. + PolicyUsers []PolicyUser `type:"list"` +} + +// String returns the string representation +func (s ListEntitiesForPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opListEntitiesForPolicy = "ListEntitiesForPolicy" + +// ListEntitiesForPolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Lists all IAM users, groups, and roles that the specified managed policy +// is attached to. +// +// You can use the optional EntityFilter parameter to limit the results to a +// particular type of entity (users, groups, or roles). For example, to list +// only the roles that are attached to the specified policy, set EntityFilter +// to Role. +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// // Example sending a request using ListEntitiesForPolicyRequest. +// req := client.ListEntitiesForPolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListEntitiesForPolicy +func (c *Client) ListEntitiesForPolicyRequest(input *ListEntitiesForPolicyInput) ListEntitiesForPolicyRequest { + op := &aws.Operation{ + Name: opListEntitiesForPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListEntitiesForPolicyInput{} + } + + req := c.newRequest(op, input, &ListEntitiesForPolicyOutput{}) + return ListEntitiesForPolicyRequest{Request: req, Input: input, Copy: c.ListEntitiesForPolicyRequest} +} + +// ListEntitiesForPolicyRequest is the request type for the +// ListEntitiesForPolicy API operation. +type ListEntitiesForPolicyRequest struct { + *aws.Request + Input *ListEntitiesForPolicyInput + Copy func(*ListEntitiesForPolicyInput) ListEntitiesForPolicyRequest +} + +// Send marshals and sends the ListEntitiesForPolicy API request. +func (r ListEntitiesForPolicyRequest) Send(ctx context.Context) (*ListEntitiesForPolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListEntitiesForPolicyResponse{ + ListEntitiesForPolicyOutput: r.Request.Data.(*ListEntitiesForPolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListEntitiesForPolicyRequestPaginator returns a paginator for ListEntitiesForPolicy. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListEntitiesForPolicyRequest(input) +// p := iam.NewListEntitiesForPolicyRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListEntitiesForPolicyPaginator(req ListEntitiesForPolicyRequest) ListEntitiesForPolicyPaginator { + return ListEntitiesForPolicyPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListEntitiesForPolicyInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListEntitiesForPolicyPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListEntitiesForPolicyPaginator struct { + aws.Pager +} + +func (p *ListEntitiesForPolicyPaginator) CurrentPage() *ListEntitiesForPolicyOutput { + return p.Pager.CurrentPage().(*ListEntitiesForPolicyOutput) +} + +// ListEntitiesForPolicyResponse is the response type for the +// ListEntitiesForPolicy API operation. +type ListEntitiesForPolicyResponse struct { + *ListEntitiesForPolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListEntitiesForPolicy request. +func (r *ListEntitiesForPolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListGroupPolicies.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListGroupPolicies.go new file mode 100644 index 00000000..6db8b073 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListGroupPolicies.go @@ -0,0 +1,233 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListGroupPoliciesRequest +type ListGroupPoliciesInput struct { + _ struct{} `type:"structure"` + + // The name of the group to list policies for. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListGroupPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListGroupPoliciesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListGroupPoliciesInput"} + + if s.GroupName == nil { + invalidParams.Add(aws.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("GroupName", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListGroupPolicies request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListGroupPoliciesResponse +type ListGroupPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A list of policy names. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // PolicyNames is a required field + PolicyNames []string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListGroupPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +const opListGroupPolicies = "ListGroupPolicies" + +// ListGroupPoliciesRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Lists the names of the inline policies that are embedded in the specified +// IAM group. +// +// An IAM group can also have managed policies attached to it. To list the managed +// policies that are attached to a group, use ListAttachedGroupPolicies. For +// more information about policies, see Managed Policies and Inline Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. If +// there are no inline policies embedded with the specified group, the operation +// returns an empty list. +// +// // Example sending a request using ListGroupPoliciesRequest. +// req := client.ListGroupPoliciesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListGroupPolicies +func (c *Client) ListGroupPoliciesRequest(input *ListGroupPoliciesInput) ListGroupPoliciesRequest { + op := &aws.Operation{ + Name: opListGroupPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListGroupPoliciesInput{} + } + + req := c.newRequest(op, input, &ListGroupPoliciesOutput{}) + return ListGroupPoliciesRequest{Request: req, Input: input, Copy: c.ListGroupPoliciesRequest} +} + +// ListGroupPoliciesRequest is the request type for the +// ListGroupPolicies API operation. +type ListGroupPoliciesRequest struct { + *aws.Request + Input *ListGroupPoliciesInput + Copy func(*ListGroupPoliciesInput) ListGroupPoliciesRequest +} + +// Send marshals and sends the ListGroupPolicies API request. +func (r ListGroupPoliciesRequest) Send(ctx context.Context) (*ListGroupPoliciesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListGroupPoliciesResponse{ + ListGroupPoliciesOutput: r.Request.Data.(*ListGroupPoliciesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListGroupPoliciesRequestPaginator returns a paginator for ListGroupPolicies. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListGroupPoliciesRequest(input) +// p := iam.NewListGroupPoliciesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListGroupPoliciesPaginator(req ListGroupPoliciesRequest) ListGroupPoliciesPaginator { + return ListGroupPoliciesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListGroupPoliciesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListGroupPoliciesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListGroupPoliciesPaginator struct { + aws.Pager +} + +func (p *ListGroupPoliciesPaginator) CurrentPage() *ListGroupPoliciesOutput { + return p.Pager.CurrentPage().(*ListGroupPoliciesOutput) +} + +// ListGroupPoliciesResponse is the response type for the +// ListGroupPolicies API operation. +type ListGroupPoliciesResponse struct { + *ListGroupPoliciesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListGroupPolicies request. +func (r *ListGroupPoliciesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListGroups.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListGroups.go new file mode 100644 index 00000000..b1ce3a9d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListGroups.go @@ -0,0 +1,219 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListGroupsRequest +type ListGroupsInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example, the prefix /division_abc/subdivision_xyz/ + // gets all groups whose path starts with /division_abc/subdivision_xyz/. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all groups. This parameter allows (through its regex pattern + // (http://wikipedia.org/wiki/regex)) a string of characters consisting of either + // a forward slash (/) by itself or a string that must begin and end with forward + // slashes. In addition, it can contain any ASCII character from the ! (\u0021) + // through the DEL character (\u007F), including most punctuation characters, + // digits, and upper and lowercased letters. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListGroupsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListGroupsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListGroupsInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PathPrefix", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListGroups request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListGroupsResponse +type ListGroupsOutput struct { + _ struct{} `type:"structure"` + + // A list of groups. + // + // Groups is a required field + Groups []Group `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListGroupsOutput) String() string { + return awsutil.Prettify(s) +} + +const opListGroups = "ListGroups" + +// ListGroupsRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Lists the IAM groups that have the specified path prefix. +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// // Example sending a request using ListGroupsRequest. +// req := client.ListGroupsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListGroups +func (c *Client) ListGroupsRequest(input *ListGroupsInput) ListGroupsRequest { + op := &aws.Operation{ + Name: opListGroups, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListGroupsInput{} + } + + req := c.newRequest(op, input, &ListGroupsOutput{}) + return ListGroupsRequest{Request: req, Input: input, Copy: c.ListGroupsRequest} +} + +// ListGroupsRequest is the request type for the +// ListGroups API operation. +type ListGroupsRequest struct { + *aws.Request + Input *ListGroupsInput + Copy func(*ListGroupsInput) ListGroupsRequest +} + +// Send marshals and sends the ListGroups API request. +func (r ListGroupsRequest) Send(ctx context.Context) (*ListGroupsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListGroupsResponse{ + ListGroupsOutput: r.Request.Data.(*ListGroupsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListGroupsRequestPaginator returns a paginator for ListGroups. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListGroupsRequest(input) +// p := iam.NewListGroupsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListGroupsPaginator(req ListGroupsRequest) ListGroupsPaginator { + return ListGroupsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListGroupsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListGroupsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListGroupsPaginator struct { + aws.Pager +} + +func (p *ListGroupsPaginator) CurrentPage() *ListGroupsOutput { + return p.Pager.CurrentPage().(*ListGroupsOutput) +} + +// ListGroupsResponse is the response type for the +// ListGroups API operation. +type ListGroupsResponse struct { + *ListGroupsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListGroups request. +func (r *ListGroupsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListGroupsForUser.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListGroupsForUser.go new file mode 100644 index 00000000..2146d325 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListGroupsForUser.go @@ -0,0 +1,220 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListGroupsForUserRequest +type ListGroupsForUserInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the user to list groups for. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListGroupsForUserInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListGroupsForUserInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListGroupsForUserInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListGroupsForUser request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListGroupsForUserResponse +type ListGroupsForUserOutput struct { + _ struct{} `type:"structure"` + + // A list of groups. + // + // Groups is a required field + Groups []Group `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListGroupsForUserOutput) String() string { + return awsutil.Prettify(s) +} + +const opListGroupsForUser = "ListGroupsForUser" + +// ListGroupsForUserRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Lists the IAM groups that the specified IAM user belongs to. +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// // Example sending a request using ListGroupsForUserRequest. +// req := client.ListGroupsForUserRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListGroupsForUser +func (c *Client) ListGroupsForUserRequest(input *ListGroupsForUserInput) ListGroupsForUserRequest { + op := &aws.Operation{ + Name: opListGroupsForUser, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListGroupsForUserInput{} + } + + req := c.newRequest(op, input, &ListGroupsForUserOutput{}) + return ListGroupsForUserRequest{Request: req, Input: input, Copy: c.ListGroupsForUserRequest} +} + +// ListGroupsForUserRequest is the request type for the +// ListGroupsForUser API operation. +type ListGroupsForUserRequest struct { + *aws.Request + Input *ListGroupsForUserInput + Copy func(*ListGroupsForUserInput) ListGroupsForUserRequest +} + +// Send marshals and sends the ListGroupsForUser API request. +func (r ListGroupsForUserRequest) Send(ctx context.Context) (*ListGroupsForUserResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListGroupsForUserResponse{ + ListGroupsForUserOutput: r.Request.Data.(*ListGroupsForUserOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListGroupsForUserRequestPaginator returns a paginator for ListGroupsForUser. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListGroupsForUserRequest(input) +// p := iam.NewListGroupsForUserRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListGroupsForUserPaginator(req ListGroupsForUserRequest) ListGroupsForUserPaginator { + return ListGroupsForUserPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListGroupsForUserInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListGroupsForUserPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListGroupsForUserPaginator struct { + aws.Pager +} + +func (p *ListGroupsForUserPaginator) CurrentPage() *ListGroupsForUserOutput { + return p.Pager.CurrentPage().(*ListGroupsForUserOutput) +} + +// ListGroupsForUserResponse is the response type for the +// ListGroupsForUser API operation. +type ListGroupsForUserResponse struct { + *ListGroupsForUserOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListGroupsForUser request. +func (r *ListGroupsForUserResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListInstanceProfiles.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListInstanceProfiles.go new file mode 100644 index 00000000..6fbcec9f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListInstanceProfiles.go @@ -0,0 +1,221 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListInstanceProfilesRequest +type ListInstanceProfilesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example, the prefix /application_abc/component_xyz/ + // gets all instance profiles whose path starts with /application_abc/component_xyz/. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all instance profiles. This parameter allows (through its regex + // pattern (http://wikipedia.org/wiki/regex)) a string of characters consisting + // of either a forward slash (/) by itself or a string that must begin and end + // with forward slashes. In addition, it can contain any ASCII character from + // the ! (\u0021) through the DEL character (\u007F), including most punctuation + // characters, digits, and upper and lowercased letters. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListInstanceProfilesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListInstanceProfilesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListInstanceProfilesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PathPrefix", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListInstanceProfiles request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListInstanceProfilesResponse +type ListInstanceProfilesOutput struct { + _ struct{} `type:"structure"` + + // A list of instance profiles. + // + // InstanceProfiles is a required field + InstanceProfiles []InstanceProfile `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListInstanceProfilesOutput) String() string { + return awsutil.Prettify(s) +} + +const opListInstanceProfiles = "ListInstanceProfiles" + +// ListInstanceProfilesRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Lists the instance profiles that have the specified path prefix. If there +// are none, the operation returns an empty list. For more information about +// instance profiles, go to About Instance Profiles (https://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// // Example sending a request using ListInstanceProfilesRequest. +// req := client.ListInstanceProfilesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListInstanceProfiles +func (c *Client) ListInstanceProfilesRequest(input *ListInstanceProfilesInput) ListInstanceProfilesRequest { + op := &aws.Operation{ + Name: opListInstanceProfiles, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListInstanceProfilesInput{} + } + + req := c.newRequest(op, input, &ListInstanceProfilesOutput{}) + return ListInstanceProfilesRequest{Request: req, Input: input, Copy: c.ListInstanceProfilesRequest} +} + +// ListInstanceProfilesRequest is the request type for the +// ListInstanceProfiles API operation. +type ListInstanceProfilesRequest struct { + *aws.Request + Input *ListInstanceProfilesInput + Copy func(*ListInstanceProfilesInput) ListInstanceProfilesRequest +} + +// Send marshals and sends the ListInstanceProfiles API request. +func (r ListInstanceProfilesRequest) Send(ctx context.Context) (*ListInstanceProfilesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListInstanceProfilesResponse{ + ListInstanceProfilesOutput: r.Request.Data.(*ListInstanceProfilesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListInstanceProfilesRequestPaginator returns a paginator for ListInstanceProfiles. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListInstanceProfilesRequest(input) +// p := iam.NewListInstanceProfilesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListInstanceProfilesPaginator(req ListInstanceProfilesRequest) ListInstanceProfilesPaginator { + return ListInstanceProfilesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListInstanceProfilesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListInstanceProfilesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListInstanceProfilesPaginator struct { + aws.Pager +} + +func (p *ListInstanceProfilesPaginator) CurrentPage() *ListInstanceProfilesOutput { + return p.Pager.CurrentPage().(*ListInstanceProfilesOutput) +} + +// ListInstanceProfilesResponse is the response type for the +// ListInstanceProfiles API operation. +type ListInstanceProfilesResponse struct { + *ListInstanceProfilesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListInstanceProfiles request. +func (r *ListInstanceProfilesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListInstanceProfilesForRole.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListInstanceProfilesForRole.go new file mode 100644 index 00000000..17aa5989 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListInstanceProfilesForRole.go @@ -0,0 +1,222 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListInstanceProfilesForRoleRequest +type ListInstanceProfilesForRoleInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the role to list instance profiles for. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListInstanceProfilesForRoleInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListInstanceProfilesForRoleInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListInstanceProfilesForRoleInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + + if s.RoleName == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListInstanceProfilesForRole request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListInstanceProfilesForRoleResponse +type ListInstanceProfilesForRoleOutput struct { + _ struct{} `type:"structure"` + + // A list of instance profiles. + // + // InstanceProfiles is a required field + InstanceProfiles []InstanceProfile `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListInstanceProfilesForRoleOutput) String() string { + return awsutil.Prettify(s) +} + +const opListInstanceProfilesForRole = "ListInstanceProfilesForRole" + +// ListInstanceProfilesForRoleRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Lists the instance profiles that have the specified associated IAM role. +// If there are none, the operation returns an empty list. For more information +// about instance profiles, go to About Instance Profiles (https://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// // Example sending a request using ListInstanceProfilesForRoleRequest. +// req := client.ListInstanceProfilesForRoleRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListInstanceProfilesForRole +func (c *Client) ListInstanceProfilesForRoleRequest(input *ListInstanceProfilesForRoleInput) ListInstanceProfilesForRoleRequest { + op := &aws.Operation{ + Name: opListInstanceProfilesForRole, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListInstanceProfilesForRoleInput{} + } + + req := c.newRequest(op, input, &ListInstanceProfilesForRoleOutput{}) + return ListInstanceProfilesForRoleRequest{Request: req, Input: input, Copy: c.ListInstanceProfilesForRoleRequest} +} + +// ListInstanceProfilesForRoleRequest is the request type for the +// ListInstanceProfilesForRole API operation. +type ListInstanceProfilesForRoleRequest struct { + *aws.Request + Input *ListInstanceProfilesForRoleInput + Copy func(*ListInstanceProfilesForRoleInput) ListInstanceProfilesForRoleRequest +} + +// Send marshals and sends the ListInstanceProfilesForRole API request. +func (r ListInstanceProfilesForRoleRequest) Send(ctx context.Context) (*ListInstanceProfilesForRoleResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListInstanceProfilesForRoleResponse{ + ListInstanceProfilesForRoleOutput: r.Request.Data.(*ListInstanceProfilesForRoleOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListInstanceProfilesForRoleRequestPaginator returns a paginator for ListInstanceProfilesForRole. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListInstanceProfilesForRoleRequest(input) +// p := iam.NewListInstanceProfilesForRoleRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListInstanceProfilesForRolePaginator(req ListInstanceProfilesForRoleRequest) ListInstanceProfilesForRolePaginator { + return ListInstanceProfilesForRolePaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListInstanceProfilesForRoleInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListInstanceProfilesForRolePaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListInstanceProfilesForRolePaginator struct { + aws.Pager +} + +func (p *ListInstanceProfilesForRolePaginator) CurrentPage() *ListInstanceProfilesForRoleOutput { + return p.Pager.CurrentPage().(*ListInstanceProfilesForRoleOutput) +} + +// ListInstanceProfilesForRoleResponse is the response type for the +// ListInstanceProfilesForRole API operation. +type ListInstanceProfilesForRoleResponse struct { + *ListInstanceProfilesForRoleOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListInstanceProfilesForRole request. +func (r *ListInstanceProfilesForRoleResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListMFADevices.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListMFADevices.go new file mode 100644 index 00000000..d5bd9781 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListMFADevices.go @@ -0,0 +1,217 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListMFADevicesRequest +type ListMFADevicesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the user whose MFA devices you want to list. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListMFADevicesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMFADevicesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListMFADevicesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListMFADevices request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListMFADevicesResponse +type ListMFADevicesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // A list of MFA devices. + // + // MFADevices is a required field + MFADevices []MFADevice `type:"list" required:"true"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListMFADevicesOutput) String() string { + return awsutil.Prettify(s) +} + +const opListMFADevices = "ListMFADevices" + +// ListMFADevicesRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Lists the MFA devices for an IAM user. If the request includes a IAM user +// name, then this operation lists all the MFA devices associated with the specified +// user. If you do not specify a user name, IAM determines the user name implicitly +// based on the AWS access key ID signing the request for this API. +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// // Example sending a request using ListMFADevicesRequest. +// req := client.ListMFADevicesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListMFADevices +func (c *Client) ListMFADevicesRequest(input *ListMFADevicesInput) ListMFADevicesRequest { + op := &aws.Operation{ + Name: opListMFADevices, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListMFADevicesInput{} + } + + req := c.newRequest(op, input, &ListMFADevicesOutput{}) + return ListMFADevicesRequest{Request: req, Input: input, Copy: c.ListMFADevicesRequest} +} + +// ListMFADevicesRequest is the request type for the +// ListMFADevices API operation. +type ListMFADevicesRequest struct { + *aws.Request + Input *ListMFADevicesInput + Copy func(*ListMFADevicesInput) ListMFADevicesRequest +} + +// Send marshals and sends the ListMFADevices API request. +func (r ListMFADevicesRequest) Send(ctx context.Context) (*ListMFADevicesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListMFADevicesResponse{ + ListMFADevicesOutput: r.Request.Data.(*ListMFADevicesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListMFADevicesRequestPaginator returns a paginator for ListMFADevices. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListMFADevicesRequest(input) +// p := iam.NewListMFADevicesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListMFADevicesPaginator(req ListMFADevicesRequest) ListMFADevicesPaginator { + return ListMFADevicesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListMFADevicesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListMFADevicesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListMFADevicesPaginator struct { + aws.Pager +} + +func (p *ListMFADevicesPaginator) CurrentPage() *ListMFADevicesOutput { + return p.Pager.CurrentPage().(*ListMFADevicesOutput) +} + +// ListMFADevicesResponse is the response type for the +// ListMFADevices API operation. +type ListMFADevicesResponse struct { + *ListMFADevicesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListMFADevices request. +func (r *ListMFADevicesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListOpenIDConnectProviders.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListOpenIDConnectProviders.go new file mode 100644 index 00000000..29ef400a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListOpenIDConnectProviders.go @@ -0,0 +1,103 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListOpenIDConnectProvidersRequest +type ListOpenIDConnectProvidersInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListOpenIDConnectProvidersInput) String() string { + return awsutil.Prettify(s) +} + +// Contains the response to a successful ListOpenIDConnectProviders request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListOpenIDConnectProvidersResponse +type ListOpenIDConnectProvidersOutput struct { + _ struct{} `type:"structure"` + + // The list of IAM OIDC provider resource objects defined in the AWS account. + OpenIDConnectProviderList []OpenIDConnectProviderListEntry `type:"list"` +} + +// String returns the string representation +func (s ListOpenIDConnectProvidersOutput) String() string { + return awsutil.Prettify(s) +} + +const opListOpenIDConnectProviders = "ListOpenIDConnectProviders" + +// ListOpenIDConnectProvidersRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Lists information about the IAM OpenID Connect (OIDC) provider resource objects +// defined in the AWS account. +// +// // Example sending a request using ListOpenIDConnectProvidersRequest. +// req := client.ListOpenIDConnectProvidersRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListOpenIDConnectProviders +func (c *Client) ListOpenIDConnectProvidersRequest(input *ListOpenIDConnectProvidersInput) ListOpenIDConnectProvidersRequest { + op := &aws.Operation{ + Name: opListOpenIDConnectProviders, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListOpenIDConnectProvidersInput{} + } + + req := c.newRequest(op, input, &ListOpenIDConnectProvidersOutput{}) + return ListOpenIDConnectProvidersRequest{Request: req, Input: input, Copy: c.ListOpenIDConnectProvidersRequest} +} + +// ListOpenIDConnectProvidersRequest is the request type for the +// ListOpenIDConnectProviders API operation. +type ListOpenIDConnectProvidersRequest struct { + *aws.Request + Input *ListOpenIDConnectProvidersInput + Copy func(*ListOpenIDConnectProvidersInput) ListOpenIDConnectProvidersRequest +} + +// Send marshals and sends the ListOpenIDConnectProviders API request. +func (r ListOpenIDConnectProvidersRequest) Send(ctx context.Context) (*ListOpenIDConnectProvidersResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListOpenIDConnectProvidersResponse{ + ListOpenIDConnectProvidersOutput: r.Request.Data.(*ListOpenIDConnectProvidersOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ListOpenIDConnectProvidersResponse is the response type for the +// ListOpenIDConnectProviders API operation. +type ListOpenIDConnectProvidersResponse struct { + *ListOpenIDConnectProvidersOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListOpenIDConnectProviders request. +func (r *ListOpenIDConnectProvidersResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPolicies.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPolicies.go new file mode 100644 index 00000000..804d4dbb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPolicies.go @@ -0,0 +1,247 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListPoliciesRequest +type ListPoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // A flag to filter the results to only the attached policies. + // + // When OnlyAttached is true, the returned list contains only the policies that + // are attached to an IAM user, group, or role. When OnlyAttached is false, + // or when the parameter is not included, all policies are returned. + OnlyAttached *bool `type:"boolean"` + + // The path prefix for filtering the results. This parameter is optional. If + // it is not included, it defaults to a slash (/), listing all policies. This + // parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + PathPrefix *string `type:"string"` + + // The policy usage method to use for filtering the results. + // + // To list only permissions policies, set PolicyUsageFilter to PermissionsPolicy. + // To list only the policies used to set permissions boundaries, set the value + // to PermissionsBoundary. + // + // This parameter is optional. If it is not included, all policies are returned. + PolicyUsageFilter PolicyUsageType `type:"string" enum:"true"` + + // The scope to use for filtering the results. + // + // To list only AWS managed policies, set Scope to AWS. To list only the customer + // managed policies in your AWS account, set Scope to Local. + // + // This parameter is optional. If it is not included, or if it is set to All, + // all policies are returned. + Scope PolicyScopeType `type:"string" enum:"true"` +} + +// String returns the string representation +func (s ListPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPoliciesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListPoliciesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListPolicies request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListPoliciesResponse +type ListPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A list of policies. + Policies []Policy `type:"list"` +} + +// String returns the string representation +func (s ListPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +const opListPolicies = "ListPolicies" + +// ListPoliciesRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Lists all the managed policies that are available in your AWS account, including +// your own customer-defined managed policies and all AWS managed policies. +// +// You can filter the list of policies that is returned using the optional OnlyAttached, +// Scope, and PathPrefix parameters. For example, to list only the customer +// managed policies in your AWS account, set Scope to Local. To list only AWS +// managed policies, set Scope to AWS. +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// For more information about managed policies, see Managed Policies and Inline +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// // Example sending a request using ListPoliciesRequest. +// req := client.ListPoliciesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListPolicies +func (c *Client) ListPoliciesRequest(input *ListPoliciesInput) ListPoliciesRequest { + op := &aws.Operation{ + Name: opListPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListPoliciesInput{} + } + + req := c.newRequest(op, input, &ListPoliciesOutput{}) + return ListPoliciesRequest{Request: req, Input: input, Copy: c.ListPoliciesRequest} +} + +// ListPoliciesRequest is the request type for the +// ListPolicies API operation. +type ListPoliciesRequest struct { + *aws.Request + Input *ListPoliciesInput + Copy func(*ListPoliciesInput) ListPoliciesRequest +} + +// Send marshals and sends the ListPolicies API request. +func (r ListPoliciesRequest) Send(ctx context.Context) (*ListPoliciesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListPoliciesResponse{ + ListPoliciesOutput: r.Request.Data.(*ListPoliciesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListPoliciesRequestPaginator returns a paginator for ListPolicies. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListPoliciesRequest(input) +// p := iam.NewListPoliciesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListPoliciesPaginator(req ListPoliciesRequest) ListPoliciesPaginator { + return ListPoliciesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListPoliciesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListPoliciesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListPoliciesPaginator struct { + aws.Pager +} + +func (p *ListPoliciesPaginator) CurrentPage() *ListPoliciesOutput { + return p.Pager.CurrentPage().(*ListPoliciesOutput) +} + +// ListPoliciesResponse is the response type for the +// ListPolicies API operation. +type ListPoliciesResponse struct { + *ListPoliciesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListPolicies request. +func (r *ListPoliciesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPoliciesGrantingServiceAccess.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPoliciesGrantingServiceAccess.go new file mode 100644 index 00000000..7d92d37f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPoliciesGrantingServiceAccess.go @@ -0,0 +1,201 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListPoliciesGrantingServiceAccessRequest +type ListPoliciesGrantingServiceAccessInput struct { + _ struct{} `type:"structure"` + + // The ARN of the IAM identity (user, group, or role) whose policies you want + // to list. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // The service namespace for the AWS services whose policies you want to list. + // + // To learn the service namespace for a service, go to Actions, Resources, and + // Condition Keys for AWS Services (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_actions-resources-contextkeys.html) + // in the IAM User Guide. Choose the name of the service to view details for + // that service. In the first paragraph, find the service prefix. For example, + // (service prefix: a4b). For more information about service namespaces, see + // AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the AWS General Reference. + // + // ServiceNamespaces is a required field + ServiceNamespaces []string `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s ListPoliciesGrantingServiceAccessInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPoliciesGrantingServiceAccessInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListPoliciesGrantingServiceAccessInput"} + + if s.Arn == nil { + invalidParams.Add(aws.NewErrParamRequired("Arn")) + } + if s.Arn != nil && len(*s.Arn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("Arn", 20)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + + if s.ServiceNamespaces == nil { + invalidParams.Add(aws.NewErrParamRequired("ServiceNamespaces")) + } + if s.ServiceNamespaces != nil && len(s.ServiceNamespaces) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ServiceNamespaces", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListPoliciesGrantingServiceAccessResponse +type ListPoliciesGrantingServiceAccessOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. We recommend that you check IsTruncated + // after every call to ensure that you receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A ListPoliciesGrantingServiceAccess object that contains details about the + // permissions policies attached to the specified identity (user, group, or + // role). + // + // PoliciesGrantingServiceAccess is a required field + PoliciesGrantingServiceAccess []ListPoliciesGrantingServiceAccessEntry `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListPoliciesGrantingServiceAccessOutput) String() string { + return awsutil.Prettify(s) +} + +const opListPoliciesGrantingServiceAccess = "ListPoliciesGrantingServiceAccess" + +// ListPoliciesGrantingServiceAccessRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Retrieves a list of policies that the IAM identity (user, group, or role) +// can use to access each specified service. +// +// This operation does not use other policy types when determining whether a +// resource could access a service. These other policy types include resource-based +// policies, access control lists, AWS Organizations policies, IAM permissions +// boundaries, and AWS STS assume role policies. It only applies permissions +// policy logic. For more about the evaluation of policy types, see Evaluating +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-basics) +// in the IAM User Guide. +// +// The list of policies returned by the operation depends on the ARN of the +// identity that you provide. +// +// * User – The list of policies includes the managed and inline policies +// that are attached to the user directly. The list also includes any additional +// managed and inline policies that are attached to the group to which the +// user belongs. +// +// * Group – The list of policies includes only the managed and inline +// policies that are attached to the group directly. Policies that are attached +// to the group’s user are not included. +// +// * Role – The list of policies includes only the managed and inline policies +// that are attached to the role. +// +// For each managed policy, this operation returns the ARN and policy name. +// For each inline policy, it returns the policy name and the entity to which +// it is attached. Inline policies do not have an ARN. For more information +// about these policy types, see Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html) +// in the IAM User Guide. +// +// Policies that are attached to users and roles as permissions boundaries are +// not returned. To view which managed policy is currently used to set the permissions +// boundary for a user or role, use the GetUser or GetRole operations. +// +// // Example sending a request using ListPoliciesGrantingServiceAccessRequest. +// req := client.ListPoliciesGrantingServiceAccessRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListPoliciesGrantingServiceAccess +func (c *Client) ListPoliciesGrantingServiceAccessRequest(input *ListPoliciesGrantingServiceAccessInput) ListPoliciesGrantingServiceAccessRequest { + op := &aws.Operation{ + Name: opListPoliciesGrantingServiceAccess, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListPoliciesGrantingServiceAccessInput{} + } + + req := c.newRequest(op, input, &ListPoliciesGrantingServiceAccessOutput{}) + return ListPoliciesGrantingServiceAccessRequest{Request: req, Input: input, Copy: c.ListPoliciesGrantingServiceAccessRequest} +} + +// ListPoliciesGrantingServiceAccessRequest is the request type for the +// ListPoliciesGrantingServiceAccess API operation. +type ListPoliciesGrantingServiceAccessRequest struct { + *aws.Request + Input *ListPoliciesGrantingServiceAccessInput + Copy func(*ListPoliciesGrantingServiceAccessInput) ListPoliciesGrantingServiceAccessRequest +} + +// Send marshals and sends the ListPoliciesGrantingServiceAccess API request. +func (r ListPoliciesGrantingServiceAccessRequest) Send(ctx context.Context) (*ListPoliciesGrantingServiceAccessResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListPoliciesGrantingServiceAccessResponse{ + ListPoliciesGrantingServiceAccessOutput: r.Request.Data.(*ListPoliciesGrantingServiceAccessOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ListPoliciesGrantingServiceAccessResponse is the response type for the +// ListPoliciesGrantingServiceAccess API operation. +type ListPoliciesGrantingServiceAccessResponse struct { + *ListPoliciesGrantingServiceAccessOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListPoliciesGrantingServiceAccess request. +func (r *ListPoliciesGrantingServiceAccessResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPolicyVersions.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPolicyVersions.go new file mode 100644 index 00000000..a50c135f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListPolicyVersions.go @@ -0,0 +1,225 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListPolicyVersionsRequest +type ListPolicyVersionsInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The Amazon Resource Name (ARN) of the IAM policy for which you want the versions. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPolicyVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPolicyVersionsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListPolicyVersionsInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + + if s.PolicyArn == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListPolicyVersions request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListPolicyVersionsResponse +type ListPolicyVersionsOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A list of policy versions. + // + // For more information about managed policy versions, see Versioning for Managed + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the IAM User Guide. + Versions []PolicyVersion `type:"list"` +} + +// String returns the string representation +func (s ListPolicyVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +const opListPolicyVersions = "ListPolicyVersions" + +// ListPolicyVersionsRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Lists information about the versions of the specified managed policy, including +// the version that is currently set as the policy's default version. +// +// For more information about managed policies, see Managed Policies and Inline +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// // Example sending a request using ListPolicyVersionsRequest. +// req := client.ListPolicyVersionsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListPolicyVersions +func (c *Client) ListPolicyVersionsRequest(input *ListPolicyVersionsInput) ListPolicyVersionsRequest { + op := &aws.Operation{ + Name: opListPolicyVersions, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListPolicyVersionsInput{} + } + + req := c.newRequest(op, input, &ListPolicyVersionsOutput{}) + return ListPolicyVersionsRequest{Request: req, Input: input, Copy: c.ListPolicyVersionsRequest} +} + +// ListPolicyVersionsRequest is the request type for the +// ListPolicyVersions API operation. +type ListPolicyVersionsRequest struct { + *aws.Request + Input *ListPolicyVersionsInput + Copy func(*ListPolicyVersionsInput) ListPolicyVersionsRequest +} + +// Send marshals and sends the ListPolicyVersions API request. +func (r ListPolicyVersionsRequest) Send(ctx context.Context) (*ListPolicyVersionsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListPolicyVersionsResponse{ + ListPolicyVersionsOutput: r.Request.Data.(*ListPolicyVersionsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListPolicyVersionsRequestPaginator returns a paginator for ListPolicyVersions. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListPolicyVersionsRequest(input) +// p := iam.NewListPolicyVersionsRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListPolicyVersionsPaginator(req ListPolicyVersionsRequest) ListPolicyVersionsPaginator { + return ListPolicyVersionsPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListPolicyVersionsInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListPolicyVersionsPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListPolicyVersionsPaginator struct { + aws.Pager +} + +func (p *ListPolicyVersionsPaginator) CurrentPage() *ListPolicyVersionsOutput { + return p.Pager.CurrentPage().(*ListPolicyVersionsOutput) +} + +// ListPolicyVersionsResponse is the response type for the +// ListPolicyVersions API operation. +type ListPolicyVersionsResponse struct { + *ListPolicyVersionsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListPolicyVersions request. +func (r *ListPolicyVersionsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListRolePolicies.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListRolePolicies.go new file mode 100644 index 00000000..36a19b4a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListRolePolicies.go @@ -0,0 +1,228 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListRolePoliciesRequest +type ListRolePoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the role to list policies for. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListRolePoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRolePoliciesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListRolePoliciesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + + if s.RoleName == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListRolePolicies request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListRolePoliciesResponse +type ListRolePoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A list of policy names. + // + // PolicyNames is a required field + PolicyNames []string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListRolePoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +const opListRolePolicies = "ListRolePolicies" + +// ListRolePoliciesRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Lists the names of the inline policies that are embedded in the specified +// IAM role. +// +// An IAM role can also have managed policies attached to it. To list the managed +// policies that are attached to a role, use ListAttachedRolePolicies. For more +// information about policies, see Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. If +// there are no inline policies embedded with the specified role, the operation +// returns an empty list. +// +// // Example sending a request using ListRolePoliciesRequest. +// req := client.ListRolePoliciesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListRolePolicies +func (c *Client) ListRolePoliciesRequest(input *ListRolePoliciesInput) ListRolePoliciesRequest { + op := &aws.Operation{ + Name: opListRolePolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListRolePoliciesInput{} + } + + req := c.newRequest(op, input, &ListRolePoliciesOutput{}) + return ListRolePoliciesRequest{Request: req, Input: input, Copy: c.ListRolePoliciesRequest} +} + +// ListRolePoliciesRequest is the request type for the +// ListRolePolicies API operation. +type ListRolePoliciesRequest struct { + *aws.Request + Input *ListRolePoliciesInput + Copy func(*ListRolePoliciesInput) ListRolePoliciesRequest +} + +// Send marshals and sends the ListRolePolicies API request. +func (r ListRolePoliciesRequest) Send(ctx context.Context) (*ListRolePoliciesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListRolePoliciesResponse{ + ListRolePoliciesOutput: r.Request.Data.(*ListRolePoliciesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListRolePoliciesRequestPaginator returns a paginator for ListRolePolicies. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListRolePoliciesRequest(input) +// p := iam.NewListRolePoliciesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListRolePoliciesPaginator(req ListRolePoliciesRequest) ListRolePoliciesPaginator { + return ListRolePoliciesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListRolePoliciesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListRolePoliciesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListRolePoliciesPaginator struct { + aws.Pager +} + +func (p *ListRolePoliciesPaginator) CurrentPage() *ListRolePoliciesOutput { + return p.Pager.CurrentPage().(*ListRolePoliciesOutput) +} + +// ListRolePoliciesResponse is the response type for the +// ListRolePolicies API operation. +type ListRolePoliciesResponse struct { + *ListRolePoliciesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListRolePolicies request. +func (r *ListRolePoliciesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListRoleTags.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListRoleTags.go new file mode 100644 index 00000000..be6eb4bd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListRoleTags.go @@ -0,0 +1,170 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListRoleTagsRequest +type ListRoleTagsInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // (Optional) Use this only when paginating results to indicate the maximum + // number of items that you want in the response. If additional items exist + // beyond the maximum that you specify, the IsTruncated response element is + // true. + // + // If you do not include this parameter, it defaults to 100. Note that IAM might + // return fewer results, even when more results are available. In that case, + // the IsTruncated response element returns true, and Marker contains a value + // to include in the subsequent call that tells the service where to continue + // from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the IAM role for which you want to see the list of tags. + // + // This parameter accepts (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that consist of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListRoleTagsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRoleTagsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListRoleTagsInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + + if s.RoleName == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListRoleTagsResponse +type ListRoleTagsOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can use the Marker request parameter to make a subsequent + // pagination request that retrieves more items. Note that IAM might return + // fewer than the MaxItems number of results even when more results are available. + // Check IsTruncated after every call to ensure that you receive all of your + // results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // The list of tags currently that is attached to the role. Each tag consists + // of a key name and an associated value. If no tags are attached to the specified + // role, the response contains an empty list. + // + // Tags is a required field + Tags []Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListRoleTagsOutput) String() string { + return awsutil.Prettify(s) +} + +const opListRoleTags = "ListRoleTags" + +// ListRoleTagsRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Lists the tags that are attached to the specified role. The returned list +// of tags is sorted by tag key. For more information about tagging, see Tagging +// IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) +// in the IAM User Guide. +// +// // Example sending a request using ListRoleTagsRequest. +// req := client.ListRoleTagsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListRoleTags +func (c *Client) ListRoleTagsRequest(input *ListRoleTagsInput) ListRoleTagsRequest { + op := &aws.Operation{ + Name: opListRoleTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListRoleTagsInput{} + } + + req := c.newRequest(op, input, &ListRoleTagsOutput{}) + return ListRoleTagsRequest{Request: req, Input: input, Copy: c.ListRoleTagsRequest} +} + +// ListRoleTagsRequest is the request type for the +// ListRoleTags API operation. +type ListRoleTagsRequest struct { + *aws.Request + Input *ListRoleTagsInput + Copy func(*ListRoleTagsInput) ListRoleTagsRequest +} + +// Send marshals and sends the ListRoleTags API request. +func (r ListRoleTagsRequest) Send(ctx context.Context) (*ListRoleTagsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListRoleTagsResponse{ + ListRoleTagsOutput: r.Request.Data.(*ListRoleTagsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ListRoleTagsResponse is the response type for the +// ListRoleTags API operation. +type ListRoleTagsResponse struct { + *ListRoleTagsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListRoleTags request. +func (r *ListRoleTagsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListRoles.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListRoles.go new file mode 100644 index 00000000..719d2036 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListRoles.go @@ -0,0 +1,221 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListRolesRequest +type ListRolesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example, the prefix /application_abc/component_xyz/ + // gets all roles whose path starts with /application_abc/component_xyz/. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all roles. This parameter allows (through its regex pattern + // (http://wikipedia.org/wiki/regex)) a string of characters consisting of either + // a forward slash (/) by itself or a string that must begin and end with forward + // slashes. In addition, it can contain any ASCII character from the ! (\u0021) + // through the DEL character (\u007F), including most punctuation characters, + // digits, and upper and lowercased letters. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListRolesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListRolesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListRolesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PathPrefix", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListRoles request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListRolesResponse +type ListRolesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A list of roles. + // + // Roles is a required field + Roles []Role `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListRolesOutput) String() string { + return awsutil.Prettify(s) +} + +const opListRoles = "ListRoles" + +// ListRolesRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Lists the IAM roles that have the specified path prefix. If there are none, +// the operation returns an empty list. For more information about roles, go +// to Working with Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// // Example sending a request using ListRolesRequest. +// req := client.ListRolesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListRoles +func (c *Client) ListRolesRequest(input *ListRolesInput) ListRolesRequest { + op := &aws.Operation{ + Name: opListRoles, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListRolesInput{} + } + + req := c.newRequest(op, input, &ListRolesOutput{}) + return ListRolesRequest{Request: req, Input: input, Copy: c.ListRolesRequest} +} + +// ListRolesRequest is the request type for the +// ListRoles API operation. +type ListRolesRequest struct { + *aws.Request + Input *ListRolesInput + Copy func(*ListRolesInput) ListRolesRequest +} + +// Send marshals and sends the ListRoles API request. +func (r ListRolesRequest) Send(ctx context.Context) (*ListRolesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListRolesResponse{ + ListRolesOutput: r.Request.Data.(*ListRolesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListRolesRequestPaginator returns a paginator for ListRoles. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListRolesRequest(input) +// p := iam.NewListRolesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListRolesPaginator(req ListRolesRequest) ListRolesPaginator { + return ListRolesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListRolesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListRolesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListRolesPaginator struct { + aws.Pager +} + +func (p *ListRolesPaginator) CurrentPage() *ListRolesOutput { + return p.Pager.CurrentPage().(*ListRolesOutput) +} + +// ListRolesResponse is the response type for the +// ListRoles API operation. +type ListRolesResponse struct { + *ListRolesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListRoles request. +func (r *ListRolesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSAMLProviders.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSAMLProviders.go new file mode 100644 index 00000000..6612471e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSAMLProviders.go @@ -0,0 +1,104 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListSAMLProvidersRequest +type ListSAMLProvidersInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListSAMLProvidersInput) String() string { + return awsutil.Prettify(s) +} + +// Contains the response to a successful ListSAMLProviders request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListSAMLProvidersResponse +type ListSAMLProvidersOutput struct { + _ struct{} `type:"structure"` + + // The list of SAML provider resource objects defined in IAM for this AWS account. + SAMLProviderList []SAMLProviderListEntry `type:"list"` +} + +// String returns the string representation +func (s ListSAMLProvidersOutput) String() string { + return awsutil.Prettify(s) +} + +const opListSAMLProviders = "ListSAMLProviders" + +// ListSAMLProvidersRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Lists the SAML provider resource objects defined in IAM in the account. +// +// This operation requires Signature Version 4 (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// +// // Example sending a request using ListSAMLProvidersRequest. +// req := client.ListSAMLProvidersRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListSAMLProviders +func (c *Client) ListSAMLProvidersRequest(input *ListSAMLProvidersInput) ListSAMLProvidersRequest { + op := &aws.Operation{ + Name: opListSAMLProviders, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListSAMLProvidersInput{} + } + + req := c.newRequest(op, input, &ListSAMLProvidersOutput{}) + return ListSAMLProvidersRequest{Request: req, Input: input, Copy: c.ListSAMLProvidersRequest} +} + +// ListSAMLProvidersRequest is the request type for the +// ListSAMLProviders API operation. +type ListSAMLProvidersRequest struct { + *aws.Request + Input *ListSAMLProvidersInput + Copy func(*ListSAMLProvidersInput) ListSAMLProvidersRequest +} + +// Send marshals and sends the ListSAMLProviders API request. +func (r ListSAMLProvidersRequest) Send(ctx context.Context) (*ListSAMLProvidersResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListSAMLProvidersResponse{ + ListSAMLProvidersOutput: r.Request.Data.(*ListSAMLProvidersOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ListSAMLProvidersResponse is the response type for the +// ListSAMLProviders API operation. +type ListSAMLProvidersResponse struct { + *ListSAMLProvidersOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListSAMLProviders request. +func (r *ListSAMLProvidersResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSSHPublicKeys.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSSHPublicKeys.go new file mode 100644 index 00000000..9e1142fa --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSSHPublicKeys.go @@ -0,0 +1,222 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListSSHPublicKeysRequest +type ListSSHPublicKeysInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the IAM user to list SSH public keys for. If none is specified, + // the UserName field is determined implicitly based on the AWS access key used + // to sign the request. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListSSHPublicKeysInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListSSHPublicKeysInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListSSHPublicKeysInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListSSHPublicKeys request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListSSHPublicKeysResponse +type ListSSHPublicKeysOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A list of the SSH public keys assigned to IAM user. + SSHPublicKeys []SSHPublicKeyMetadata `type:"list"` +} + +// String returns the string representation +func (s ListSSHPublicKeysOutput) String() string { + return awsutil.Prettify(s) +} + +const opListSSHPublicKeys = "ListSSHPublicKeys" + +// ListSSHPublicKeysRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Returns information about the SSH public keys associated with the specified +// IAM user. If none exists, the operation returns an empty list. +// +// The SSH public keys returned by this operation are used only for authenticating +// the IAM user to an AWS CodeCommit repository. For more information about +// using SSH keys to authenticate to an AWS CodeCommit repository, see Set up +// AWS CodeCommit for SSH Connections (https://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +// +// Although each user is limited to a small number of keys, you can still paginate +// the results using the MaxItems and Marker parameters. +// +// // Example sending a request using ListSSHPublicKeysRequest. +// req := client.ListSSHPublicKeysRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListSSHPublicKeys +func (c *Client) ListSSHPublicKeysRequest(input *ListSSHPublicKeysInput) ListSSHPublicKeysRequest { + op := &aws.Operation{ + Name: opListSSHPublicKeys, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListSSHPublicKeysInput{} + } + + req := c.newRequest(op, input, &ListSSHPublicKeysOutput{}) + return ListSSHPublicKeysRequest{Request: req, Input: input, Copy: c.ListSSHPublicKeysRequest} +} + +// ListSSHPublicKeysRequest is the request type for the +// ListSSHPublicKeys API operation. +type ListSSHPublicKeysRequest struct { + *aws.Request + Input *ListSSHPublicKeysInput + Copy func(*ListSSHPublicKeysInput) ListSSHPublicKeysRequest +} + +// Send marshals and sends the ListSSHPublicKeys API request. +func (r ListSSHPublicKeysRequest) Send(ctx context.Context) (*ListSSHPublicKeysResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListSSHPublicKeysResponse{ + ListSSHPublicKeysOutput: r.Request.Data.(*ListSSHPublicKeysOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListSSHPublicKeysRequestPaginator returns a paginator for ListSSHPublicKeys. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListSSHPublicKeysRequest(input) +// p := iam.NewListSSHPublicKeysRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListSSHPublicKeysPaginator(req ListSSHPublicKeysRequest) ListSSHPublicKeysPaginator { + return ListSSHPublicKeysPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListSSHPublicKeysInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListSSHPublicKeysPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListSSHPublicKeysPaginator struct { + aws.Pager +} + +func (p *ListSSHPublicKeysPaginator) CurrentPage() *ListSSHPublicKeysOutput { + return p.Pager.CurrentPage().(*ListSSHPublicKeysOutput) +} + +// ListSSHPublicKeysResponse is the response type for the +// ListSSHPublicKeys API operation. +type ListSSHPublicKeysResponse struct { + *ListSSHPublicKeysOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListSSHPublicKeys request. +func (r *ListSSHPublicKeysResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListServerCertificates.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListServerCertificates.go new file mode 100644 index 00000000..907c3059 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListServerCertificates.go @@ -0,0 +1,225 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListServerCertificatesRequest +type ListServerCertificatesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example: /company/servercerts + // would get all server certificates for which the path starts with /company/servercerts. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all server certificates. This parameter allows (through its + // regex pattern (http://wikipedia.org/wiki/regex)) a string of characters consisting + // of either a forward slash (/) by itself or a string that must begin and end + // with forward slashes. In addition, it can contain any ASCII character from + // the ! (\u0021) through the DEL character (\u007F), including most punctuation + // characters, digits, and upper and lowercased letters. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListServerCertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListServerCertificatesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListServerCertificatesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PathPrefix", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListServerCertificates request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListServerCertificatesResponse +type ListServerCertificatesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A list of server certificates. + // + // ServerCertificateMetadataList is a required field + ServerCertificateMetadataList []ServerCertificateMetadata `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListServerCertificatesOutput) String() string { + return awsutil.Prettify(s) +} + +const opListServerCertificates = "ListServerCertificates" + +// ListServerCertificatesRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Lists the server certificates stored in IAM that have the specified path +// prefix. If none exist, the operation returns an empty list. +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// For more information about working with server certificates, see Working +// with Server Certificates (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. This topic also includes a list of AWS services that +// can use the server certificates that you manage with IAM. +// +// // Example sending a request using ListServerCertificatesRequest. +// req := client.ListServerCertificatesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListServerCertificates +func (c *Client) ListServerCertificatesRequest(input *ListServerCertificatesInput) ListServerCertificatesRequest { + op := &aws.Operation{ + Name: opListServerCertificates, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListServerCertificatesInput{} + } + + req := c.newRequest(op, input, &ListServerCertificatesOutput{}) + return ListServerCertificatesRequest{Request: req, Input: input, Copy: c.ListServerCertificatesRequest} +} + +// ListServerCertificatesRequest is the request type for the +// ListServerCertificates API operation. +type ListServerCertificatesRequest struct { + *aws.Request + Input *ListServerCertificatesInput + Copy func(*ListServerCertificatesInput) ListServerCertificatesRequest +} + +// Send marshals and sends the ListServerCertificates API request. +func (r ListServerCertificatesRequest) Send(ctx context.Context) (*ListServerCertificatesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListServerCertificatesResponse{ + ListServerCertificatesOutput: r.Request.Data.(*ListServerCertificatesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListServerCertificatesRequestPaginator returns a paginator for ListServerCertificates. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListServerCertificatesRequest(input) +// p := iam.NewListServerCertificatesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListServerCertificatesPaginator(req ListServerCertificatesRequest) ListServerCertificatesPaginator { + return ListServerCertificatesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListServerCertificatesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListServerCertificatesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListServerCertificatesPaginator struct { + aws.Pager +} + +func (p *ListServerCertificatesPaginator) CurrentPage() *ListServerCertificatesOutput { + return p.Pager.CurrentPage().(*ListServerCertificatesOutput) +} + +// ListServerCertificatesResponse is the response type for the +// ListServerCertificates API operation. +type ListServerCertificatesResponse struct { + *ListServerCertificatesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListServerCertificates request. +func (r *ListServerCertificatesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListServiceSpecificCredentials.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListServiceSpecificCredentials.go new file mode 100644 index 00000000..d9bb415b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListServiceSpecificCredentials.go @@ -0,0 +1,133 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListServiceSpecificCredentialsRequest +type ListServiceSpecificCredentialsInput struct { + _ struct{} `type:"structure"` + + // Filters the returned results to only those for the specified AWS service. + // If not specified, then AWS returns service-specific credentials for all services. + ServiceName *string `type:"string"` + + // The name of the user whose service-specific credentials you want information + // about. If this value is not specified, then the operation assumes the user + // whose credentials are used to call the operation. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListServiceSpecificCredentialsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListServiceSpecificCredentialsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListServiceSpecificCredentialsInput"} + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListServiceSpecificCredentialsResponse +type ListServiceSpecificCredentialsOutput struct { + _ struct{} `type:"structure"` + + // A list of structures that each contain details about a service-specific credential. + ServiceSpecificCredentials []ServiceSpecificCredentialMetadata `type:"list"` +} + +// String returns the string representation +func (s ListServiceSpecificCredentialsOutput) String() string { + return awsutil.Prettify(s) +} + +const opListServiceSpecificCredentials = "ListServiceSpecificCredentials" + +// ListServiceSpecificCredentialsRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Returns information about the service-specific credentials associated with +// the specified IAM user. If none exists, the operation returns an empty list. +// The service-specific credentials returned by this operation are used only +// for authenticating the IAM user to a specific service. For more information +// about using service-specific credentials to authenticate to an AWS service, +// see Set Up service-specific credentials (https://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-gc.html) +// in the AWS CodeCommit User Guide. +// +// // Example sending a request using ListServiceSpecificCredentialsRequest. +// req := client.ListServiceSpecificCredentialsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListServiceSpecificCredentials +func (c *Client) ListServiceSpecificCredentialsRequest(input *ListServiceSpecificCredentialsInput) ListServiceSpecificCredentialsRequest { + op := &aws.Operation{ + Name: opListServiceSpecificCredentials, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListServiceSpecificCredentialsInput{} + } + + req := c.newRequest(op, input, &ListServiceSpecificCredentialsOutput{}) + return ListServiceSpecificCredentialsRequest{Request: req, Input: input, Copy: c.ListServiceSpecificCredentialsRequest} +} + +// ListServiceSpecificCredentialsRequest is the request type for the +// ListServiceSpecificCredentials API operation. +type ListServiceSpecificCredentialsRequest struct { + *aws.Request + Input *ListServiceSpecificCredentialsInput + Copy func(*ListServiceSpecificCredentialsInput) ListServiceSpecificCredentialsRequest +} + +// Send marshals and sends the ListServiceSpecificCredentials API request. +func (r ListServiceSpecificCredentialsRequest) Send(ctx context.Context) (*ListServiceSpecificCredentialsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListServiceSpecificCredentialsResponse{ + ListServiceSpecificCredentialsOutput: r.Request.Data.(*ListServiceSpecificCredentialsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ListServiceSpecificCredentialsResponse is the response type for the +// ListServiceSpecificCredentials API operation. +type ListServiceSpecificCredentialsResponse struct { + *ListServiceSpecificCredentialsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListServiceSpecificCredentials request. +func (r *ListServiceSpecificCredentialsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSigningCertificates.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSigningCertificates.go new file mode 100644 index 00000000..e6556e37 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListSigningCertificates.go @@ -0,0 +1,222 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListSigningCertificatesRequest +type ListSigningCertificatesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the IAM user whose signing certificates you want to examine. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListSigningCertificatesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListSigningCertificatesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListSigningCertificatesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListSigningCertificates request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListSigningCertificatesResponse +type ListSigningCertificatesOutput struct { + _ struct{} `type:"structure"` + + // A list of the user's signing certificate information. + // + // Certificates is a required field + Certificates []SigningCertificate `type:"list" required:"true"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s ListSigningCertificatesOutput) String() string { + return awsutil.Prettify(s) +} + +const opListSigningCertificates = "ListSigningCertificates" + +// ListSigningCertificatesRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Returns information about the signing certificates associated with the specified +// IAM user. If none exists, the operation returns an empty list. +// +// Although each user is limited to a small number of signing certificates, +// you can still paginate the results using the MaxItems and Marker parameters. +// +// If the UserName field is not specified, the user name is determined implicitly +// based on the AWS access key ID used to sign the request for this API. This +// operation works for access keys under the AWS account. Consequently, you +// can use this operation to manage AWS account root user credentials even if +// the AWS account has no associated users. +// +// // Example sending a request using ListSigningCertificatesRequest. +// req := client.ListSigningCertificatesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListSigningCertificates +func (c *Client) ListSigningCertificatesRequest(input *ListSigningCertificatesInput) ListSigningCertificatesRequest { + op := &aws.Operation{ + Name: opListSigningCertificates, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListSigningCertificatesInput{} + } + + req := c.newRequest(op, input, &ListSigningCertificatesOutput{}) + return ListSigningCertificatesRequest{Request: req, Input: input, Copy: c.ListSigningCertificatesRequest} +} + +// ListSigningCertificatesRequest is the request type for the +// ListSigningCertificates API operation. +type ListSigningCertificatesRequest struct { + *aws.Request + Input *ListSigningCertificatesInput + Copy func(*ListSigningCertificatesInput) ListSigningCertificatesRequest +} + +// Send marshals and sends the ListSigningCertificates API request. +func (r ListSigningCertificatesRequest) Send(ctx context.Context) (*ListSigningCertificatesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListSigningCertificatesResponse{ + ListSigningCertificatesOutput: r.Request.Data.(*ListSigningCertificatesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListSigningCertificatesRequestPaginator returns a paginator for ListSigningCertificates. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListSigningCertificatesRequest(input) +// p := iam.NewListSigningCertificatesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListSigningCertificatesPaginator(req ListSigningCertificatesRequest) ListSigningCertificatesPaginator { + return ListSigningCertificatesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListSigningCertificatesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListSigningCertificatesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListSigningCertificatesPaginator struct { + aws.Pager +} + +func (p *ListSigningCertificatesPaginator) CurrentPage() *ListSigningCertificatesOutput { + return p.Pager.CurrentPage().(*ListSigningCertificatesOutput) +} + +// ListSigningCertificatesResponse is the response type for the +// ListSigningCertificates API operation. +type ListSigningCertificatesResponse struct { + *ListSigningCertificatesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListSigningCertificates request. +func (r *ListSigningCertificatesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListUserPolicies.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListUserPolicies.go new file mode 100644 index 00000000..c5aeec82 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListUserPolicies.go @@ -0,0 +1,227 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListUserPoliciesRequest +type ListUserPoliciesInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the user to list policies for. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListUserPoliciesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListUserPoliciesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListUserPoliciesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListUserPolicies request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListUserPoliciesResponse +type ListUserPoliciesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A list of policy names. + // + // PolicyNames is a required field + PolicyNames []string `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListUserPoliciesOutput) String() string { + return awsutil.Prettify(s) +} + +const opListUserPolicies = "ListUserPolicies" + +// ListUserPoliciesRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Lists the names of the inline policies embedded in the specified IAM user. +// +// An IAM user can also have managed policies attached to it. To list the managed +// policies that are attached to a user, use ListAttachedUserPolicies. For more +// information about policies, see Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// You can paginate the results using the MaxItems and Marker parameters. If +// there are no inline policies embedded with the specified user, the operation +// returns an empty list. +// +// // Example sending a request using ListUserPoliciesRequest. +// req := client.ListUserPoliciesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListUserPolicies +func (c *Client) ListUserPoliciesRequest(input *ListUserPoliciesInput) ListUserPoliciesRequest { + op := &aws.Operation{ + Name: opListUserPolicies, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListUserPoliciesInput{} + } + + req := c.newRequest(op, input, &ListUserPoliciesOutput{}) + return ListUserPoliciesRequest{Request: req, Input: input, Copy: c.ListUserPoliciesRequest} +} + +// ListUserPoliciesRequest is the request type for the +// ListUserPolicies API operation. +type ListUserPoliciesRequest struct { + *aws.Request + Input *ListUserPoliciesInput + Copy func(*ListUserPoliciesInput) ListUserPoliciesRequest +} + +// Send marshals and sends the ListUserPolicies API request. +func (r ListUserPoliciesRequest) Send(ctx context.Context) (*ListUserPoliciesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListUserPoliciesResponse{ + ListUserPoliciesOutput: r.Request.Data.(*ListUserPoliciesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListUserPoliciesRequestPaginator returns a paginator for ListUserPolicies. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListUserPoliciesRequest(input) +// p := iam.NewListUserPoliciesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListUserPoliciesPaginator(req ListUserPoliciesRequest) ListUserPoliciesPaginator { + return ListUserPoliciesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListUserPoliciesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListUserPoliciesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListUserPoliciesPaginator struct { + aws.Pager +} + +func (p *ListUserPoliciesPaginator) CurrentPage() *ListUserPoliciesOutput { + return p.Pager.CurrentPage().(*ListUserPoliciesOutput) +} + +// ListUserPoliciesResponse is the response type for the +// ListUserPolicies API operation. +type ListUserPoliciesResponse struct { + *ListUserPoliciesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListUserPolicies request. +func (r *ListUserPoliciesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListUserTags.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListUserTags.go new file mode 100644 index 00000000..a4b48c59 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListUserTags.go @@ -0,0 +1,170 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListUserTagsRequest +type ListUserTagsInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // (Optional) Use this only when paginating results to indicate the maximum + // number of items that you want in the response. If additional items exist + // beyond the maximum that you specify, the IsTruncated response element is + // true. + // + // If you do not include this parameter, it defaults to 100. Note that IAM might + // return fewer results, even when more results are available. In that case, + // the IsTruncated response element returns true, and Marker contains a value + // to include in the subsequent call that tells the service where to continue + // from. + MaxItems *int64 `min:"1" type:"integer"` + + // The name of the IAM user whose tags you want to see. + // + // This parameter accepts (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that consist of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListUserTagsInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListUserTagsInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListUserTagsInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListUserTagsResponse +type ListUserTagsOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can use the Marker request parameter to make a subsequent + // pagination request that retrieves more items. Note that IAM might return + // fewer than the MaxItems number of results even when more results are available. + // Check IsTruncated after every call to ensure that you receive all of your + // results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // The list of tags that are currently attached to the user. Each tag consists + // of a key name and an associated value. If no tags are attached to the specified + // user, the response contains an empty list. + // + // Tags is a required field + Tags []Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListUserTagsOutput) String() string { + return awsutil.Prettify(s) +} + +const opListUserTags = "ListUserTags" + +// ListUserTagsRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Lists the tags that are attached to the specified user. The returned list +// of tags is sorted by tag key. For more information about tagging, see Tagging +// IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) +// in the IAM User Guide. +// +// // Example sending a request using ListUserTagsRequest. +// req := client.ListUserTagsRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListUserTags +func (c *Client) ListUserTagsRequest(input *ListUserTagsInput) ListUserTagsRequest { + op := &aws.Operation{ + Name: opListUserTags, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ListUserTagsInput{} + } + + req := c.newRequest(op, input, &ListUserTagsOutput{}) + return ListUserTagsRequest{Request: req, Input: input, Copy: c.ListUserTagsRequest} +} + +// ListUserTagsRequest is the request type for the +// ListUserTags API operation. +type ListUserTagsRequest struct { + *aws.Request + Input *ListUserTagsInput + Copy func(*ListUserTagsInput) ListUserTagsRequest +} + +// Send marshals and sends the ListUserTags API request. +func (r ListUserTagsRequest) Send(ctx context.Context) (*ListUserTagsResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListUserTagsResponse{ + ListUserTagsOutput: r.Request.Data.(*ListUserTagsOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ListUserTagsResponse is the response type for the +// ListUserTags API operation. +type ListUserTagsResponse struct { + *ListUserTagsOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListUserTags request. +func (r *ListUserTagsResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListUsers.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListUsers.go new file mode 100644 index 00000000..f6390613 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListUsers.go @@ -0,0 +1,221 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListUsersRequest +type ListUsersInput struct { + _ struct{} `type:"structure"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // The path prefix for filtering the results. For example: /division_abc/subdivision_xyz/, + // which would get all user names whose path starts with /division_abc/subdivision_xyz/. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/), listing all user names. This parameter allows (through its regex pattern + // (http://wikipedia.org/wiki/regex)) a string of characters consisting of either + // a forward slash (/) by itself or a string that must begin and end with forward + // slashes. In addition, it can contain any ASCII character from the ! (\u0021) + // through the DEL character (\u007F), including most punctuation characters, + // digits, and upper and lowercased letters. + PathPrefix *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListUsersInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListUsersInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListUsersInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + if s.PathPrefix != nil && len(*s.PathPrefix) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PathPrefix", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListUsers request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListUsersResponse +type ListUsersOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // A list of users. + // + // Users is a required field + Users []User `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListUsersOutput) String() string { + return awsutil.Prettify(s) +} + +const opListUsers = "ListUsers" + +// ListUsersRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Lists the IAM users that have the specified path prefix. If no path prefix +// is specified, the operation returns all users in the AWS account. If there +// are none, the operation returns an empty list. +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// // Example sending a request using ListUsersRequest. +// req := client.ListUsersRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListUsers +func (c *Client) ListUsersRequest(input *ListUsersInput) ListUsersRequest { + op := &aws.Operation{ + Name: opListUsers, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListUsersInput{} + } + + req := c.newRequest(op, input, &ListUsersOutput{}) + return ListUsersRequest{Request: req, Input: input, Copy: c.ListUsersRequest} +} + +// ListUsersRequest is the request type for the +// ListUsers API operation. +type ListUsersRequest struct { + *aws.Request + Input *ListUsersInput + Copy func(*ListUsersInput) ListUsersRequest +} + +// Send marshals and sends the ListUsers API request. +func (r ListUsersRequest) Send(ctx context.Context) (*ListUsersResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListUsersResponse{ + ListUsersOutput: r.Request.Data.(*ListUsersOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListUsersRequestPaginator returns a paginator for ListUsers. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListUsersRequest(input) +// p := iam.NewListUsersRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListUsersPaginator(req ListUsersRequest) ListUsersPaginator { + return ListUsersPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListUsersInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListUsersPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListUsersPaginator struct { + aws.Pager +} + +func (p *ListUsersPaginator) CurrentPage() *ListUsersOutput { + return p.Pager.CurrentPage().(*ListUsersOutput) +} + +// ListUsersResponse is the response type for the +// ListUsers API operation. +type ListUsersResponse struct { + *ListUsersOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListUsers request. +func (r *ListUsersResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListVirtualMFADevices.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListVirtualMFADevices.go new file mode 100644 index 00000000..6bee2f6f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ListVirtualMFADevices.go @@ -0,0 +1,213 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListVirtualMFADevicesRequest +type ListVirtualMFADevicesInput struct { + _ struct{} `type:"structure"` + + // The status (Unassigned or Assigned) of the devices to list. If you do not + // specify an AssignmentStatus, the operation defaults to Any, which lists both + // assigned and unassigned virtual MFA devices., + AssignmentStatus AssignmentStatusType `type:"string" enum:"true"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` +} + +// String returns the string representation +func (s ListVirtualMFADevicesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListVirtualMFADevicesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ListVirtualMFADevicesInput"} + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful ListVirtualMFADevices request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListVirtualMFADevicesResponse +type ListVirtualMFADevicesOutput struct { + _ struct{} `type:"structure"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` + + // The list of virtual MFA devices in the current account that match the AssignmentStatus + // value that was passed in the request. + // + // VirtualMFADevices is a required field + VirtualMFADevices []VirtualMFADevice `type:"list" required:"true"` +} + +// String returns the string representation +func (s ListVirtualMFADevicesOutput) String() string { + return awsutil.Prettify(s) +} + +const opListVirtualMFADevices = "ListVirtualMFADevices" + +// ListVirtualMFADevicesRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Lists the virtual MFA devices defined in the AWS account by assignment status. +// If you do not specify an assignment status, the operation returns a list +// of all virtual MFA devices. Assignment status can be Assigned, Unassigned, +// or Any. +// +// You can paginate the results using the MaxItems and Marker parameters. +// +// // Example sending a request using ListVirtualMFADevicesRequest. +// req := client.ListVirtualMFADevicesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListVirtualMFADevices +func (c *Client) ListVirtualMFADevicesRequest(input *ListVirtualMFADevicesInput) ListVirtualMFADevicesRequest { + op := &aws.Operation{ + Name: opListVirtualMFADevices, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListVirtualMFADevicesInput{} + } + + req := c.newRequest(op, input, &ListVirtualMFADevicesOutput{}) + return ListVirtualMFADevicesRequest{Request: req, Input: input, Copy: c.ListVirtualMFADevicesRequest} +} + +// ListVirtualMFADevicesRequest is the request type for the +// ListVirtualMFADevices API operation. +type ListVirtualMFADevicesRequest struct { + *aws.Request + Input *ListVirtualMFADevicesInput + Copy func(*ListVirtualMFADevicesInput) ListVirtualMFADevicesRequest +} + +// Send marshals and sends the ListVirtualMFADevices API request. +func (r ListVirtualMFADevicesRequest) Send(ctx context.Context) (*ListVirtualMFADevicesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ListVirtualMFADevicesResponse{ + ListVirtualMFADevicesOutput: r.Request.Data.(*ListVirtualMFADevicesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewListVirtualMFADevicesRequestPaginator returns a paginator for ListVirtualMFADevices. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.ListVirtualMFADevicesRequest(input) +// p := iam.NewListVirtualMFADevicesRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewListVirtualMFADevicesPaginator(req ListVirtualMFADevicesRequest) ListVirtualMFADevicesPaginator { + return ListVirtualMFADevicesPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *ListVirtualMFADevicesInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// ListVirtualMFADevicesPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type ListVirtualMFADevicesPaginator struct { + aws.Pager +} + +func (p *ListVirtualMFADevicesPaginator) CurrentPage() *ListVirtualMFADevicesOutput { + return p.Pager.CurrentPage().(*ListVirtualMFADevicesOutput) +} + +// ListVirtualMFADevicesResponse is the response type for the +// ListVirtualMFADevices API operation. +type ListVirtualMFADevicesResponse struct { + *ListVirtualMFADevicesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ListVirtualMFADevices request. +func (r *ListVirtualMFADevicesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutGroupPolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutGroupPolicy.go new file mode 100644 index 00000000..87b6d324 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutGroupPolicy.go @@ -0,0 +1,187 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutGroupPolicyRequest +type PutGroupPolicyInput struct { + _ struct{} `type:"structure"` + + // The name of the group to associate the policy with. + // + // ®ex-name;. + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // The policy document. + // + // You must provide policies in JSON format in IAM. However, for AWS CloudFormation + // templates formatted in YAML, you can provide the policy in JSON or YAML format. + // AWS CloudFormation always converts a YAML policy to JSON format before submitting + // it to IAM. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // PolicyDocument is a required field + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy document. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutGroupPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutGroupPolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "PutGroupPolicyInput"} + + if s.GroupName == nil { + invalidParams.Add(aws.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("GroupName", 1)) + } + + if s.PolicyDocument == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyDocument")) + } + if s.PolicyDocument != nil && len(*s.PolicyDocument) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyDocument", 1)) + } + + if s.PolicyName == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutGroupPolicyOutput +type PutGroupPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutGroupPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opPutGroupPolicy = "PutGroupPolicy" + +// PutGroupPolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Adds or updates an inline policy document that is embedded in the specified +// IAM group. +// +// A user can also have managed policies attached to it. To attach a managed +// policy to a group, use AttachGroupPolicy. To create a new managed policy, +// use CreatePolicy. For information about policies, see Managed Policies and +// Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// For information about limits on the number of inline policies that you can +// embed in a group, see Limitations on IAM Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// Because policy documents can be large, you should use POST rather than GET +// when calling PutGroupPolicy. For general information about using the Query +// API with IAM, go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the IAM User Guide. +// +// // Example sending a request using PutGroupPolicyRequest. +// req := client.PutGroupPolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutGroupPolicy +func (c *Client) PutGroupPolicyRequest(input *PutGroupPolicyInput) PutGroupPolicyRequest { + op := &aws.Operation{ + Name: opPutGroupPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutGroupPolicyInput{} + } + + req := c.newRequest(op, input, &PutGroupPolicyOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return PutGroupPolicyRequest{Request: req, Input: input, Copy: c.PutGroupPolicyRequest} +} + +// PutGroupPolicyRequest is the request type for the +// PutGroupPolicy API operation. +type PutGroupPolicyRequest struct { + *aws.Request + Input *PutGroupPolicyInput + Copy func(*PutGroupPolicyInput) PutGroupPolicyRequest +} + +// Send marshals and sends the PutGroupPolicy API request. +func (r PutGroupPolicyRequest) Send(ctx context.Context) (*PutGroupPolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &PutGroupPolicyResponse{ + PutGroupPolicyOutput: r.Request.Data.(*PutGroupPolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// PutGroupPolicyResponse is the response type for the +// PutGroupPolicy API operation. +type PutGroupPolicyResponse struct { + *PutGroupPolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// PutGroupPolicy request. +func (r *PutGroupPolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutRolePermissionsBoundary.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutRolePermissionsBoundary.go new file mode 100644 index 00000000..6a6b2b07 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutRolePermissionsBoundary.go @@ -0,0 +1,150 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutRolePermissionsBoundaryRequest +type PutRolePermissionsBoundaryInput struct { + _ struct{} `type:"structure"` + + // The ARN of the policy that is used to set the permissions boundary for the + // role. + // + // PermissionsBoundary is a required field + PermissionsBoundary *string `min:"20" type:"string" required:"true"` + + // The name (friendly name, not ARN) of the IAM role for which you want to set + // the permissions boundary. + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRolePermissionsBoundaryInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutRolePermissionsBoundaryInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "PutRolePermissionsBoundaryInput"} + + if s.PermissionsBoundary == nil { + invalidParams.Add(aws.NewErrParamRequired("PermissionsBoundary")) + } + if s.PermissionsBoundary != nil && len(*s.PermissionsBoundary) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("PermissionsBoundary", 20)) + } + + if s.RoleName == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutRolePermissionsBoundaryOutput +type PutRolePermissionsBoundaryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutRolePermissionsBoundaryOutput) String() string { + return awsutil.Prettify(s) +} + +const opPutRolePermissionsBoundary = "PutRolePermissionsBoundary" + +// PutRolePermissionsBoundaryRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Adds or updates the policy that is specified as the IAM role's permissions +// boundary. You can use an AWS managed policy or a customer managed policy +// to set the boundary for a role. Use the boundary to control the maximum permissions +// that the role can have. Setting a permissions boundary is an advanced feature +// that can affect the permissions for the role. +// +// You cannot set the boundary for a service-linked role. +// +// Policies used as permissions boundaries do not provide permissions. You must +// also attach a permissions policy to the role. To learn how the effective +// permissions for a role are evaluated, see IAM JSON Policy Evaluation Logic +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html) +// in the IAM User Guide. +// +// // Example sending a request using PutRolePermissionsBoundaryRequest. +// req := client.PutRolePermissionsBoundaryRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutRolePermissionsBoundary +func (c *Client) PutRolePermissionsBoundaryRequest(input *PutRolePermissionsBoundaryInput) PutRolePermissionsBoundaryRequest { + op := &aws.Operation{ + Name: opPutRolePermissionsBoundary, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRolePermissionsBoundaryInput{} + } + + req := c.newRequest(op, input, &PutRolePermissionsBoundaryOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return PutRolePermissionsBoundaryRequest{Request: req, Input: input, Copy: c.PutRolePermissionsBoundaryRequest} +} + +// PutRolePermissionsBoundaryRequest is the request type for the +// PutRolePermissionsBoundary API operation. +type PutRolePermissionsBoundaryRequest struct { + *aws.Request + Input *PutRolePermissionsBoundaryInput + Copy func(*PutRolePermissionsBoundaryInput) PutRolePermissionsBoundaryRequest +} + +// Send marshals and sends the PutRolePermissionsBoundary API request. +func (r PutRolePermissionsBoundaryRequest) Send(ctx context.Context) (*PutRolePermissionsBoundaryResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &PutRolePermissionsBoundaryResponse{ + PutRolePermissionsBoundaryOutput: r.Request.Data.(*PutRolePermissionsBoundaryOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// PutRolePermissionsBoundaryResponse is the response type for the +// PutRolePermissionsBoundary API operation. +type PutRolePermissionsBoundaryResponse struct { + *PutRolePermissionsBoundaryOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// PutRolePermissionsBoundary request. +func (r *PutRolePermissionsBoundaryResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutRolePolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutRolePolicy.go new file mode 100644 index 00000000..789b9906 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutRolePolicy.go @@ -0,0 +1,195 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutRolePolicyRequest +type PutRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The policy document. + // + // You must provide policies in JSON format in IAM. However, for AWS CloudFormation + // templates formatted in YAML, you can provide the policy in JSON or YAML format. + // AWS CloudFormation always converts a YAML policy to JSON format before submitting + // it to IAM. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // PolicyDocument is a required field + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy document. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name of the role to associate the policy with. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutRolePolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "PutRolePolicyInput"} + + if s.PolicyDocument == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyDocument")) + } + if s.PolicyDocument != nil && len(*s.PolicyDocument) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyDocument", 1)) + } + + if s.PolicyName == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyName", 1)) + } + + if s.RoleName == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutRolePolicyOutput +type PutRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opPutRolePolicy = "PutRolePolicy" + +// PutRolePolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Adds or updates an inline policy document that is embedded in the specified +// IAM role. +// +// When you embed an inline policy in a role, the inline policy is used as part +// of the role's access (permissions) policy. The role's trust policy is created +// at the same time as the role, using CreateRole. You can update a role's trust +// policy using UpdateAssumeRolePolicy. For more information about IAM roles, +// go to Using Roles to Delegate Permissions and Federate Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html). +// +// A role can also have a managed policy attached to it. To attach a managed +// policy to a role, use AttachRolePolicy. To create a new managed policy, use +// CreatePolicy. For information about policies, see Managed Policies and Inline +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// For information about limits on the number of inline policies that you can +// embed with a role, see Limitations on IAM Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// Because policy documents can be large, you should use POST rather than GET +// when calling PutRolePolicy. For general information about using the Query +// API with IAM, go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the IAM User Guide. +// +// // Example sending a request using PutRolePolicyRequest. +// req := client.PutRolePolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutRolePolicy +func (c *Client) PutRolePolicyRequest(input *PutRolePolicyInput) PutRolePolicyRequest { + op := &aws.Operation{ + Name: opPutRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutRolePolicyInput{} + } + + req := c.newRequest(op, input, &PutRolePolicyOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return PutRolePolicyRequest{Request: req, Input: input, Copy: c.PutRolePolicyRequest} +} + +// PutRolePolicyRequest is the request type for the +// PutRolePolicy API operation. +type PutRolePolicyRequest struct { + *aws.Request + Input *PutRolePolicyInput + Copy func(*PutRolePolicyInput) PutRolePolicyRequest +} + +// Send marshals and sends the PutRolePolicy API request. +func (r PutRolePolicyRequest) Send(ctx context.Context) (*PutRolePolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &PutRolePolicyResponse{ + PutRolePolicyOutput: r.Request.Data.(*PutRolePolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// PutRolePolicyResponse is the response type for the +// PutRolePolicy API operation. +type PutRolePolicyResponse struct { + *PutRolePolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// PutRolePolicy request. +func (r *PutRolePolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutUserPermissionsBoundary.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutUserPermissionsBoundary.go new file mode 100644 index 00000000..34325488 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutUserPermissionsBoundary.go @@ -0,0 +1,148 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutUserPermissionsBoundaryRequest +type PutUserPermissionsBoundaryInput struct { + _ struct{} `type:"structure"` + + // The ARN of the policy that is used to set the permissions boundary for the + // user. + // + // PermissionsBoundary is a required field + PermissionsBoundary *string `min:"20" type:"string" required:"true"` + + // The name (friendly name, not ARN) of the IAM user for which you want to set + // the permissions boundary. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutUserPermissionsBoundaryInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutUserPermissionsBoundaryInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "PutUserPermissionsBoundaryInput"} + + if s.PermissionsBoundary == nil { + invalidParams.Add(aws.NewErrParamRequired("PermissionsBoundary")) + } + if s.PermissionsBoundary != nil && len(*s.PermissionsBoundary) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("PermissionsBoundary", 20)) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutUserPermissionsBoundaryOutput +type PutUserPermissionsBoundaryOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutUserPermissionsBoundaryOutput) String() string { + return awsutil.Prettify(s) +} + +const opPutUserPermissionsBoundary = "PutUserPermissionsBoundary" + +// PutUserPermissionsBoundaryRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Adds or updates the policy that is specified as the IAM user's permissions +// boundary. You can use an AWS managed policy or a customer managed policy +// to set the boundary for a user. Use the boundary to control the maximum permissions +// that the user can have. Setting a permissions boundary is an advanced feature +// that can affect the permissions for the user. +// +// Policies that are used as permissions boundaries do not provide permissions. +// You must also attach a permissions policy to the user. To learn how the effective +// permissions for a user are evaluated, see IAM JSON Policy Evaluation Logic +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html) +// in the IAM User Guide. +// +// // Example sending a request using PutUserPermissionsBoundaryRequest. +// req := client.PutUserPermissionsBoundaryRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutUserPermissionsBoundary +func (c *Client) PutUserPermissionsBoundaryRequest(input *PutUserPermissionsBoundaryInput) PutUserPermissionsBoundaryRequest { + op := &aws.Operation{ + Name: opPutUserPermissionsBoundary, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutUserPermissionsBoundaryInput{} + } + + req := c.newRequest(op, input, &PutUserPermissionsBoundaryOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return PutUserPermissionsBoundaryRequest{Request: req, Input: input, Copy: c.PutUserPermissionsBoundaryRequest} +} + +// PutUserPermissionsBoundaryRequest is the request type for the +// PutUserPermissionsBoundary API operation. +type PutUserPermissionsBoundaryRequest struct { + *aws.Request + Input *PutUserPermissionsBoundaryInput + Copy func(*PutUserPermissionsBoundaryInput) PutUserPermissionsBoundaryRequest +} + +// Send marshals and sends the PutUserPermissionsBoundary API request. +func (r PutUserPermissionsBoundaryRequest) Send(ctx context.Context) (*PutUserPermissionsBoundaryResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &PutUserPermissionsBoundaryResponse{ + PutUserPermissionsBoundaryOutput: r.Request.Data.(*PutUserPermissionsBoundaryOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// PutUserPermissionsBoundaryResponse is the response type for the +// PutUserPermissionsBoundary API operation. +type PutUserPermissionsBoundaryResponse struct { + *PutUserPermissionsBoundaryOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// PutUserPermissionsBoundary request. +func (r *PutUserPermissionsBoundaryResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutUserPolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutUserPolicy.go new file mode 100644 index 00000000..deebd6cd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_PutUserPolicy.go @@ -0,0 +1,189 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutUserPolicyRequest +type PutUserPolicyInput struct { + _ struct{} `type:"structure"` + + // The policy document. + // + // You must provide policies in JSON format in IAM. However, for AWS CloudFormation + // templates formatted in YAML, you can provide the policy in JSON or YAML format. + // AWS CloudFormation always converts a YAML policy to JSON format before submitting + // it to IAM. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // PolicyDocument is a required field + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the policy document. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` + + // The name of the user to associate the policy with. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutUserPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutUserPolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "PutUserPolicyInput"} + + if s.PolicyDocument == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyDocument")) + } + if s.PolicyDocument != nil && len(*s.PolicyDocument) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyDocument", 1)) + } + + if s.PolicyName == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyName")) + } + if s.PolicyName != nil && len(*s.PolicyName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyName", 1)) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutUserPolicyOutput +type PutUserPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutUserPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opPutUserPolicy = "PutUserPolicy" + +// PutUserPolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Adds or updates an inline policy document that is embedded in the specified +// IAM user. +// +// An IAM user can also have a managed policy attached to it. To attach a managed +// policy to a user, use AttachUserPolicy. To create a new managed policy, use +// CreatePolicy. For information about policies, see Managed Policies and Inline +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// For information about limits on the number of inline policies that you can +// embed in a user, see Limitations on IAM Entities (https://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html) +// in the IAM User Guide. +// +// Because policy documents can be large, you should use POST rather than GET +// when calling PutUserPolicy. For general information about using the Query +// API with IAM, go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the IAM User Guide. +// +// // Example sending a request using PutUserPolicyRequest. +// req := client.PutUserPolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PutUserPolicy +func (c *Client) PutUserPolicyRequest(input *PutUserPolicyInput) PutUserPolicyRequest { + op := &aws.Operation{ + Name: opPutUserPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PutUserPolicyInput{} + } + + req := c.newRequest(op, input, &PutUserPolicyOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return PutUserPolicyRequest{Request: req, Input: input, Copy: c.PutUserPolicyRequest} +} + +// PutUserPolicyRequest is the request type for the +// PutUserPolicy API operation. +type PutUserPolicyRequest struct { + *aws.Request + Input *PutUserPolicyInput + Copy func(*PutUserPolicyInput) PutUserPolicyRequest +} + +// Send marshals and sends the PutUserPolicy API request. +func (r PutUserPolicyRequest) Send(ctx context.Context) (*PutUserPolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &PutUserPolicyResponse{ + PutUserPolicyOutput: r.Request.Data.(*PutUserPolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// PutUserPolicyResponse is the response type for the +// PutUserPolicy API operation. +type PutUserPolicyResponse struct { + *PutUserPolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// PutUserPolicy request. +func (r *PutUserPolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_RemoveClientIDFromOpenIDConnectProvider.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_RemoveClientIDFromOpenIDConnectProvider.go new file mode 100644 index 00000000..e50856ec --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_RemoveClientIDFromOpenIDConnectProvider.go @@ -0,0 +1,148 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/RemoveClientIDFromOpenIDConnectProviderRequest +type RemoveClientIDFromOpenIDConnectProviderInput struct { + _ struct{} `type:"structure"` + + // The client ID (also known as audience) to remove from the IAM OIDC provider + // resource. For more information about client IDs, see CreateOpenIDConnectProvider. + // + // ClientID is a required field + ClientID *string `min:"1" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the IAM OIDC provider resource to remove + // the client ID from. You can get a list of OIDC provider ARNs by using the + // ListOpenIDConnectProviders operation. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // OpenIDConnectProviderArn is a required field + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveClientIDFromOpenIDConnectProviderInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveClientIDFromOpenIDConnectProviderInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "RemoveClientIDFromOpenIDConnectProviderInput"} + + if s.ClientID == nil { + invalidParams.Add(aws.NewErrParamRequired("ClientID")) + } + if s.ClientID != nil && len(*s.ClientID) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ClientID", 1)) + } + + if s.OpenIDConnectProviderArn == nil { + invalidParams.Add(aws.NewErrParamRequired("OpenIDConnectProviderArn")) + } + if s.OpenIDConnectProviderArn != nil && len(*s.OpenIDConnectProviderArn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("OpenIDConnectProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/RemoveClientIDFromOpenIDConnectProviderOutput +type RemoveClientIDFromOpenIDConnectProviderOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveClientIDFromOpenIDConnectProviderOutput) String() string { + return awsutil.Prettify(s) +} + +const opRemoveClientIDFromOpenIDConnectProvider = "RemoveClientIDFromOpenIDConnectProvider" + +// RemoveClientIDFromOpenIDConnectProviderRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Removes the specified client ID (also known as audience) from the list of +// client IDs registered for the specified IAM OpenID Connect (OIDC) provider +// resource object. +// +// This operation is idempotent; it does not fail or return an error if you +// try to remove a client ID that does not exist. +// +// // Example sending a request using RemoveClientIDFromOpenIDConnectProviderRequest. +// req := client.RemoveClientIDFromOpenIDConnectProviderRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/RemoveClientIDFromOpenIDConnectProvider +func (c *Client) RemoveClientIDFromOpenIDConnectProviderRequest(input *RemoveClientIDFromOpenIDConnectProviderInput) RemoveClientIDFromOpenIDConnectProviderRequest { + op := &aws.Operation{ + Name: opRemoveClientIDFromOpenIDConnectProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveClientIDFromOpenIDConnectProviderInput{} + } + + req := c.newRequest(op, input, &RemoveClientIDFromOpenIDConnectProviderOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return RemoveClientIDFromOpenIDConnectProviderRequest{Request: req, Input: input, Copy: c.RemoveClientIDFromOpenIDConnectProviderRequest} +} + +// RemoveClientIDFromOpenIDConnectProviderRequest is the request type for the +// RemoveClientIDFromOpenIDConnectProvider API operation. +type RemoveClientIDFromOpenIDConnectProviderRequest struct { + *aws.Request + Input *RemoveClientIDFromOpenIDConnectProviderInput + Copy func(*RemoveClientIDFromOpenIDConnectProviderInput) RemoveClientIDFromOpenIDConnectProviderRequest +} + +// Send marshals and sends the RemoveClientIDFromOpenIDConnectProvider API request. +func (r RemoveClientIDFromOpenIDConnectProviderRequest) Send(ctx context.Context) (*RemoveClientIDFromOpenIDConnectProviderResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &RemoveClientIDFromOpenIDConnectProviderResponse{ + RemoveClientIDFromOpenIDConnectProviderOutput: r.Request.Data.(*RemoveClientIDFromOpenIDConnectProviderOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// RemoveClientIDFromOpenIDConnectProviderResponse is the response type for the +// RemoveClientIDFromOpenIDConnectProvider API operation. +type RemoveClientIDFromOpenIDConnectProviderResponse struct { + *RemoveClientIDFromOpenIDConnectProviderOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// RemoveClientIDFromOpenIDConnectProvider request. +func (r *RemoveClientIDFromOpenIDConnectProviderResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_RemoveRoleFromInstanceProfile.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_RemoveRoleFromInstanceProfile.go new file mode 100644 index 00000000..c74f3977 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_RemoveRoleFromInstanceProfile.go @@ -0,0 +1,153 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/RemoveRoleFromInstanceProfileRequest +type RemoveRoleFromInstanceProfileInput struct { + _ struct{} `type:"structure"` + + // The name of the instance profile to update. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // InstanceProfileName is a required field + InstanceProfileName *string `min:"1" type:"string" required:"true"` + + // The name of the role to remove. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveRoleFromInstanceProfileInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveRoleFromInstanceProfileInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "RemoveRoleFromInstanceProfileInput"} + + if s.InstanceProfileName == nil { + invalidParams.Add(aws.NewErrParamRequired("InstanceProfileName")) + } + if s.InstanceProfileName != nil && len(*s.InstanceProfileName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("InstanceProfileName", 1)) + } + + if s.RoleName == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/RemoveRoleFromInstanceProfileOutput +type RemoveRoleFromInstanceProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveRoleFromInstanceProfileOutput) String() string { + return awsutil.Prettify(s) +} + +const opRemoveRoleFromInstanceProfile = "RemoveRoleFromInstanceProfile" + +// RemoveRoleFromInstanceProfileRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Removes the specified IAM role from the specified EC2 instance profile. +// +// Make sure that you do not have any Amazon EC2 instances running with the +// role you are about to remove from the instance profile. Removing a role from +// an instance profile that is associated with a running instance might break +// any applications running on the instance. +// +// For more information about IAM roles, go to Working with Roles (https://docs.aws.amazon.com/IAM/latest/UserGuide/WorkingWithRoles.html). +// For more information about instance profiles, go to About Instance Profiles +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/AboutInstanceProfiles.html). +// +// // Example sending a request using RemoveRoleFromInstanceProfileRequest. +// req := client.RemoveRoleFromInstanceProfileRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/RemoveRoleFromInstanceProfile +func (c *Client) RemoveRoleFromInstanceProfileRequest(input *RemoveRoleFromInstanceProfileInput) RemoveRoleFromInstanceProfileRequest { + op := &aws.Operation{ + Name: opRemoveRoleFromInstanceProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveRoleFromInstanceProfileInput{} + } + + req := c.newRequest(op, input, &RemoveRoleFromInstanceProfileOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return RemoveRoleFromInstanceProfileRequest{Request: req, Input: input, Copy: c.RemoveRoleFromInstanceProfileRequest} +} + +// RemoveRoleFromInstanceProfileRequest is the request type for the +// RemoveRoleFromInstanceProfile API operation. +type RemoveRoleFromInstanceProfileRequest struct { + *aws.Request + Input *RemoveRoleFromInstanceProfileInput + Copy func(*RemoveRoleFromInstanceProfileInput) RemoveRoleFromInstanceProfileRequest +} + +// Send marshals and sends the RemoveRoleFromInstanceProfile API request. +func (r RemoveRoleFromInstanceProfileRequest) Send(ctx context.Context) (*RemoveRoleFromInstanceProfileResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &RemoveRoleFromInstanceProfileResponse{ + RemoveRoleFromInstanceProfileOutput: r.Request.Data.(*RemoveRoleFromInstanceProfileOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// RemoveRoleFromInstanceProfileResponse is the response type for the +// RemoveRoleFromInstanceProfile API operation. +type RemoveRoleFromInstanceProfileResponse struct { + *RemoveRoleFromInstanceProfileOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// RemoveRoleFromInstanceProfile request. +func (r *RemoveRoleFromInstanceProfileResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_RemoveUserFromGroup.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_RemoveUserFromGroup.go new file mode 100644 index 00000000..cd514104 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_RemoveUserFromGroup.go @@ -0,0 +1,144 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/RemoveUserFromGroupRequest +type RemoveUserFromGroupInput struct { + _ struct{} `type:"structure"` + + // The name of the group to update. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // The name of the user to remove. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s RemoveUserFromGroupInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RemoveUserFromGroupInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "RemoveUserFromGroupInput"} + + if s.GroupName == nil { + invalidParams.Add(aws.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("GroupName", 1)) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/RemoveUserFromGroupOutput +type RemoveUserFromGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s RemoveUserFromGroupOutput) String() string { + return awsutil.Prettify(s) +} + +const opRemoveUserFromGroup = "RemoveUserFromGroup" + +// RemoveUserFromGroupRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Removes the specified user from the specified group. +// +// // Example sending a request using RemoveUserFromGroupRequest. +// req := client.RemoveUserFromGroupRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/RemoveUserFromGroup +func (c *Client) RemoveUserFromGroupRequest(input *RemoveUserFromGroupInput) RemoveUserFromGroupRequest { + op := &aws.Operation{ + Name: opRemoveUserFromGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RemoveUserFromGroupInput{} + } + + req := c.newRequest(op, input, &RemoveUserFromGroupOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return RemoveUserFromGroupRequest{Request: req, Input: input, Copy: c.RemoveUserFromGroupRequest} +} + +// RemoveUserFromGroupRequest is the request type for the +// RemoveUserFromGroup API operation. +type RemoveUserFromGroupRequest struct { + *aws.Request + Input *RemoveUserFromGroupInput + Copy func(*RemoveUserFromGroupInput) RemoveUserFromGroupRequest +} + +// Send marshals and sends the RemoveUserFromGroup API request. +func (r RemoveUserFromGroupRequest) Send(ctx context.Context) (*RemoveUserFromGroupResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &RemoveUserFromGroupResponse{ + RemoveUserFromGroupOutput: r.Request.Data.(*RemoveUserFromGroupOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// RemoveUserFromGroupResponse is the response type for the +// RemoveUserFromGroup API operation. +type RemoveUserFromGroupResponse struct { + *RemoveUserFromGroupOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// RemoveUserFromGroup request. +func (r *RemoveUserFromGroupResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ResetServiceSpecificCredential.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ResetServiceSpecificCredential.go new file mode 100644 index 00000000..83d34872 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ResetServiceSpecificCredential.go @@ -0,0 +1,146 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ResetServiceSpecificCredentialRequest +type ResetServiceSpecificCredentialInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the service-specific credential. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that can consist of any upper or lowercased letter + // or digit. + // + // ServiceSpecificCredentialId is a required field + ServiceSpecificCredentialId *string `min:"20" type:"string" required:"true"` + + // The name of the IAM user associated with the service-specific credential. + // If this value is not specified, then the operation assumes the user whose + // credentials are used to call the operation. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ResetServiceSpecificCredentialInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResetServiceSpecificCredentialInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ResetServiceSpecificCredentialInput"} + + if s.ServiceSpecificCredentialId == nil { + invalidParams.Add(aws.NewErrParamRequired("ServiceSpecificCredentialId")) + } + if s.ServiceSpecificCredentialId != nil && len(*s.ServiceSpecificCredentialId) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("ServiceSpecificCredentialId", 20)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ResetServiceSpecificCredentialResponse +type ResetServiceSpecificCredentialOutput struct { + _ struct{} `type:"structure"` + + // A structure with details about the updated service-specific credential, including + // the new password. + // + // This is the only time that you can access the password. You cannot recover + // the password later, but you can reset it again. + ServiceSpecificCredential *ServiceSpecificCredential `type:"structure"` +} + +// String returns the string representation +func (s ResetServiceSpecificCredentialOutput) String() string { + return awsutil.Prettify(s) +} + +const opResetServiceSpecificCredential = "ResetServiceSpecificCredential" + +// ResetServiceSpecificCredentialRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Resets the password for a service-specific credential. The new password is +// AWS generated and cryptographically strong. It cannot be configured by the +// user. Resetting the password immediately invalidates the previous password +// associated with this user. +// +// // Example sending a request using ResetServiceSpecificCredentialRequest. +// req := client.ResetServiceSpecificCredentialRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ResetServiceSpecificCredential +func (c *Client) ResetServiceSpecificCredentialRequest(input *ResetServiceSpecificCredentialInput) ResetServiceSpecificCredentialRequest { + op := &aws.Operation{ + Name: opResetServiceSpecificCredential, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResetServiceSpecificCredentialInput{} + } + + req := c.newRequest(op, input, &ResetServiceSpecificCredentialOutput{}) + return ResetServiceSpecificCredentialRequest{Request: req, Input: input, Copy: c.ResetServiceSpecificCredentialRequest} +} + +// ResetServiceSpecificCredentialRequest is the request type for the +// ResetServiceSpecificCredential API operation. +type ResetServiceSpecificCredentialRequest struct { + *aws.Request + Input *ResetServiceSpecificCredentialInput + Copy func(*ResetServiceSpecificCredentialInput) ResetServiceSpecificCredentialRequest +} + +// Send marshals and sends the ResetServiceSpecificCredential API request. +func (r ResetServiceSpecificCredentialRequest) Send(ctx context.Context) (*ResetServiceSpecificCredentialResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ResetServiceSpecificCredentialResponse{ + ResetServiceSpecificCredentialOutput: r.Request.Data.(*ResetServiceSpecificCredentialOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ResetServiceSpecificCredentialResponse is the response type for the +// ResetServiceSpecificCredential API operation. +type ResetServiceSpecificCredentialResponse struct { + *ResetServiceSpecificCredentialOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ResetServiceSpecificCredential request. +func (r *ResetServiceSpecificCredentialResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ResyncMFADevice.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ResyncMFADevice.go new file mode 100644 index 00000000..1956910e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_ResyncMFADevice.go @@ -0,0 +1,177 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ResyncMFADeviceRequest +type ResyncMFADeviceInput struct { + _ struct{} `type:"structure"` + + // An authentication code emitted by the device. + // + // The format for this parameter is a sequence of six digits. + // + // AuthenticationCode1 is a required field + AuthenticationCode1 *string `min:"6" type:"string" required:"true"` + + // A subsequent authentication code emitted by the device. + // + // The format for this parameter is a sequence of six digits. + // + // AuthenticationCode2 is a required field + AuthenticationCode2 *string `min:"6" type:"string" required:"true"` + + // Serial number that uniquely identifies the MFA device. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // SerialNumber is a required field + SerialNumber *string `min:"9" type:"string" required:"true"` + + // The name of the user whose MFA device you want to resynchronize. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ResyncMFADeviceInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ResyncMFADeviceInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ResyncMFADeviceInput"} + + if s.AuthenticationCode1 == nil { + invalidParams.Add(aws.NewErrParamRequired("AuthenticationCode1")) + } + if s.AuthenticationCode1 != nil && len(*s.AuthenticationCode1) < 6 { + invalidParams.Add(aws.NewErrParamMinLen("AuthenticationCode1", 6)) + } + + if s.AuthenticationCode2 == nil { + invalidParams.Add(aws.NewErrParamRequired("AuthenticationCode2")) + } + if s.AuthenticationCode2 != nil && len(*s.AuthenticationCode2) < 6 { + invalidParams.Add(aws.NewErrParamMinLen("AuthenticationCode2", 6)) + } + + if s.SerialNumber == nil { + invalidParams.Add(aws.NewErrParamRequired("SerialNumber")) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(aws.NewErrParamMinLen("SerialNumber", 9)) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ResyncMFADeviceOutput +type ResyncMFADeviceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ResyncMFADeviceOutput) String() string { + return awsutil.Prettify(s) +} + +const opResyncMFADevice = "ResyncMFADevice" + +// ResyncMFADeviceRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Synchronizes the specified MFA device with its IAM resource object on the +// AWS servers. +// +// For more information about creating and working with virtual MFA devices, +// go to Using a Virtual MFA Device (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_VirtualMFA.html) +// in the IAM User Guide. +// +// // Example sending a request using ResyncMFADeviceRequest. +// req := client.ResyncMFADeviceRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ResyncMFADevice +func (c *Client) ResyncMFADeviceRequest(input *ResyncMFADeviceInput) ResyncMFADeviceRequest { + op := &aws.Operation{ + Name: opResyncMFADevice, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &ResyncMFADeviceInput{} + } + + req := c.newRequest(op, input, &ResyncMFADeviceOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return ResyncMFADeviceRequest{Request: req, Input: input, Copy: c.ResyncMFADeviceRequest} +} + +// ResyncMFADeviceRequest is the request type for the +// ResyncMFADevice API operation. +type ResyncMFADeviceRequest struct { + *aws.Request + Input *ResyncMFADeviceInput + Copy func(*ResyncMFADeviceInput) ResyncMFADeviceRequest +} + +// Send marshals and sends the ResyncMFADevice API request. +func (r ResyncMFADeviceRequest) Send(ctx context.Context) (*ResyncMFADeviceResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &ResyncMFADeviceResponse{ + ResyncMFADeviceOutput: r.Request.Data.(*ResyncMFADeviceOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// ResyncMFADeviceResponse is the response type for the +// ResyncMFADevice API operation. +type ResyncMFADeviceResponse struct { + *ResyncMFADeviceOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// ResyncMFADevice request. +func (r *ResyncMFADeviceResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SetDefaultPolicyVersion.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SetDefaultPolicyVersion.go new file mode 100644 index 00000000..441e2ae1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SetDefaultPolicyVersion.go @@ -0,0 +1,151 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SetDefaultPolicyVersionRequest +type SetDefaultPolicyVersionInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM policy whose default version you + // want to set. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicyArn is a required field + PolicyArn *string `min:"20" type:"string" required:"true"` + + // The version of the policy to set as the default (operative) version. + // + // For more information about managed policy versions, see Versioning for Managed + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the IAM User Guide. + // + // VersionId is a required field + VersionId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s SetDefaultPolicyVersionInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetDefaultPolicyVersionInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "SetDefaultPolicyVersionInput"} + + if s.PolicyArn == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyArn")) + } + if s.PolicyArn != nil && len(*s.PolicyArn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyArn", 20)) + } + + if s.VersionId == nil { + invalidParams.Add(aws.NewErrParamRequired("VersionId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SetDefaultPolicyVersionOutput +type SetDefaultPolicyVersionOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetDefaultPolicyVersionOutput) String() string { + return awsutil.Prettify(s) +} + +const opSetDefaultPolicyVersion = "SetDefaultPolicyVersion" + +// SetDefaultPolicyVersionRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Sets the specified version of the specified policy as the policy's default +// (operative) version. +// +// This operation affects all users, groups, and roles that the policy is attached +// to. To list the users, groups, and roles that the policy is attached to, +// use the ListEntitiesForPolicy API. +// +// For information about managed policies, see Managed Policies and Inline Policies +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the IAM User Guide. +// +// // Example sending a request using SetDefaultPolicyVersionRequest. +// req := client.SetDefaultPolicyVersionRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SetDefaultPolicyVersion +func (c *Client) SetDefaultPolicyVersionRequest(input *SetDefaultPolicyVersionInput) SetDefaultPolicyVersionRequest { + op := &aws.Operation{ + Name: opSetDefaultPolicyVersion, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetDefaultPolicyVersionInput{} + } + + req := c.newRequest(op, input, &SetDefaultPolicyVersionOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return SetDefaultPolicyVersionRequest{Request: req, Input: input, Copy: c.SetDefaultPolicyVersionRequest} +} + +// SetDefaultPolicyVersionRequest is the request type for the +// SetDefaultPolicyVersion API operation. +type SetDefaultPolicyVersionRequest struct { + *aws.Request + Input *SetDefaultPolicyVersionInput + Copy func(*SetDefaultPolicyVersionInput) SetDefaultPolicyVersionRequest +} + +// Send marshals and sends the SetDefaultPolicyVersion API request. +func (r SetDefaultPolicyVersionRequest) Send(ctx context.Context) (*SetDefaultPolicyVersionResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &SetDefaultPolicyVersionResponse{ + SetDefaultPolicyVersionOutput: r.Request.Data.(*SetDefaultPolicyVersionOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// SetDefaultPolicyVersionResponse is the response type for the +// SetDefaultPolicyVersion API operation. +type SetDefaultPolicyVersionResponse struct { + *SetDefaultPolicyVersionOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// SetDefaultPolicyVersion request. +func (r *SetDefaultPolicyVersionResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SetSecurityTokenServicePreferences.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SetSecurityTokenServicePreferences.go new file mode 100644 index 00000000..50a56d0c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SetSecurityTokenServicePreferences.go @@ -0,0 +1,148 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SetSecurityTokenServicePreferencesRequest +type SetSecurityTokenServicePreferencesInput struct { + _ struct{} `type:"structure"` + + // The version of the global endpoint token. Version 1 tokens are valid only + // in AWS Regions that are available by default. These tokens do not work in + // manually enabled Regions, such as Asia Pacific (Hong Kong). Version 2 tokens + // are valid in all Regions. However, version 2 tokens are longer and might + // affect systems where you temporarily store tokens. + // + // For information, see Activating and Deactivating STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. + // + // GlobalEndpointTokenVersion is a required field + GlobalEndpointTokenVersion GlobalEndpointTokenVersion `type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s SetSecurityTokenServicePreferencesInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SetSecurityTokenServicePreferencesInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "SetSecurityTokenServicePreferencesInput"} + if len(s.GlobalEndpointTokenVersion) == 0 { + invalidParams.Add(aws.NewErrParamRequired("GlobalEndpointTokenVersion")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SetSecurityTokenServicePreferencesOutput +type SetSecurityTokenServicePreferencesOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s SetSecurityTokenServicePreferencesOutput) String() string { + return awsutil.Prettify(s) +} + +const opSetSecurityTokenServicePreferences = "SetSecurityTokenServicePreferences" + +// SetSecurityTokenServicePreferencesRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Sets the specified version of the global endpoint token as the token version +// used for the AWS account. +// +// By default, AWS Security Token Service (STS) is available as a global service, +// and all STS requests go to a single endpoint at https://sts.amazonaws.com. +// AWS recommends using Regional STS endpoints to reduce latency, build in redundancy, +// and increase session token availability. For information about Regional endpoints +// for STS, see AWS Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) +// in the AWS General Reference. +// +// If you make an STS call to the global endpoint, the resulting session tokens +// might be valid in some Regions but not others. It depends on the version +// that is set in this operation. Version 1 tokens are valid only in AWS Regions +// that are available by default. These tokens do not work in manually enabled +// Regions, such as Asia Pacific (Hong Kong). Version 2 tokens are valid in +// all Regions. However, version 2 tokens are longer and might affect systems +// where you temporarily store tokens. For information, see Activating and Deactivating +// STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// To view the current session token version, see the GlobalEndpointTokenVersion +// entry in the response of the GetAccountSummary operation. +// +// // Example sending a request using SetSecurityTokenServicePreferencesRequest. +// req := client.SetSecurityTokenServicePreferencesRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SetSecurityTokenServicePreferences +func (c *Client) SetSecurityTokenServicePreferencesRequest(input *SetSecurityTokenServicePreferencesInput) SetSecurityTokenServicePreferencesRequest { + op := &aws.Operation{ + Name: opSetSecurityTokenServicePreferences, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &SetSecurityTokenServicePreferencesInput{} + } + + req := c.newRequest(op, input, &SetSecurityTokenServicePreferencesOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return SetSecurityTokenServicePreferencesRequest{Request: req, Input: input, Copy: c.SetSecurityTokenServicePreferencesRequest} +} + +// SetSecurityTokenServicePreferencesRequest is the request type for the +// SetSecurityTokenServicePreferences API operation. +type SetSecurityTokenServicePreferencesRequest struct { + *aws.Request + Input *SetSecurityTokenServicePreferencesInput + Copy func(*SetSecurityTokenServicePreferencesInput) SetSecurityTokenServicePreferencesRequest +} + +// Send marshals and sends the SetSecurityTokenServicePreferences API request. +func (r SetSecurityTokenServicePreferencesRequest) Send(ctx context.Context) (*SetSecurityTokenServicePreferencesResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &SetSecurityTokenServicePreferencesResponse{ + SetSecurityTokenServicePreferencesOutput: r.Request.Data.(*SetSecurityTokenServicePreferencesOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// SetSecurityTokenServicePreferencesResponse is the response type for the +// SetSecurityTokenServicePreferences API operation. +type SetSecurityTokenServicePreferencesResponse struct { + *SetSecurityTokenServicePreferencesOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// SetSecurityTokenServicePreferences request. +func (r *SetSecurityTokenServicePreferencesResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SimulateCustomPolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SimulateCustomPolicy.go new file mode 100644 index 00000000..7bf0a74f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SimulateCustomPolicy.go @@ -0,0 +1,373 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SimulateCustomPolicyRequest +type SimulateCustomPolicyInput struct { + _ struct{} `type:"structure"` + + // A list of names of API operations to evaluate in the simulation. Each operation + // is evaluated against each resource. Each operation must include the service + // identifier, such as iam:CreateUser. This operation does not support using + // wildcards (*) in an action name. + // + // ActionNames is a required field + ActionNames []string `type:"list" required:"true"` + + // The ARN of the IAM user that you want to use as the simulated caller of the + // API operations. CallerArn is required if you include a ResourcePolicy so + // that the policy's Principal element has a value to use in evaluating the + // policy. + // + // You can specify only the ARN of an IAM user. You cannot specify the ARN of + // an assumed role, federated user, or a service principal. + CallerArn *string `min:"1" type:"string"` + + // A list of context keys and corresponding values for the simulation to use. + // Whenever a context key is evaluated in one of the simulated IAM permissions + // policies, the corresponding value is supplied. + ContextEntries []ContextEntry `type:"list"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // A list of policy documents to include in the simulation. Each document is + // specified as a string containing the complete, valid JSON text of an IAM + // policy. Do not include any resource-based policies in this parameter. Any + // resource-based policy must be submitted with the ResourcePolicy parameter. + // The policies cannot be "scope-down" policies, such as you could include in + // a call to GetFederationToken (https://docs.aws.amazon.com/IAM/latest/APIReference/API_GetFederationToken.html) + // or one of the AssumeRole (https://docs.aws.amazon.com/IAM/latest/APIReference/API_AssumeRole.html) + // API operations. In other words, do not use policies designed to restrict + // what a user can do while using the temporary credentials. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // PolicyInputList is a required field + PolicyInputList []string `type:"list" required:"true"` + + // A list of ARNs of AWS resources to include in the simulation. If this parameter + // is not provided, then the value defaults to * (all resources). Each API in + // the ActionNames parameter is evaluated for each resource in this list. The + // simulation determines the access result (allowed or denied) of each combination + // and reports it in the response. + // + // The simulation does not automatically retrieve policies for the specified + // resources. If you want to include a resource policy in the simulation, then + // you must include the policy as a string in the ResourcePolicy parameter. + // + // If you include a ResourcePolicy, then it must be applicable to all of the + // resources included in the simulation or you receive an invalid input error. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + ResourceArns []string `type:"list"` + + // Specifies the type of simulation to run. Different API operations that support + // resource-based policies require different combinations of resources. By specifying + // the type of simulation to run, you enable the policy simulator to enforce + // the presence of the required resources to ensure reliable simulation results. + // If your simulation does not match one of the following scenarios, then you + // can omit this parameter. The following list shows each of the supported scenario + // values and the resources that you must define to run the simulation. + // + // Each of the EC2 scenarios requires that you specify instance, image, and + // security-group resources. If your scenario includes an EBS volume, then you + // must specify that volume as a resource. If the EC2 scenario includes VPC, + // then you must supply the network-interface resource. If it includes an IP + // subnet, then you must specify the subnet resource. For more information on + // the EC2 scenario options, see Supported Platforms (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html) + // in the Amazon EC2 User Guide. + // + // * EC2-Classic-InstanceStore instance, image, security-group + // + // * EC2-Classic-EBS instance, image, security-group, volume + // + // * EC2-VPC-InstanceStore instance, image, security-group, network-interface + // + // * EC2-VPC-InstanceStore-Subnet instance, image, security-group, network-interface, + // subnet + // + // * EC2-VPC-EBS instance, image, security-group, network-interface, volume + // + // * EC2-VPC-EBS-Subnet instance, image, security-group, network-interface, + // subnet, volume + ResourceHandlingOption *string `min:"1" type:"string"` + + // An ARN representing the AWS account ID that specifies the owner of any simulated + // resource that does not identify its owner in the resource ARN. Examples of + // resource ARNs include an S3 bucket or object. If ResourceOwner is specified, + // it is also used as the account owner of any ResourcePolicy included in the + // simulation. If the ResourceOwner parameter is not specified, then the owner + // of the resources and the resource policy defaults to the account of the identity + // provided in CallerArn. This parameter is required only if you specify a resource-based + // policy and account that owns the resource is different from the account that + // owns the simulated calling user CallerArn. + // + // The ARN for an account uses the following syntax: arn:aws:iam::AWS-account-ID:root. + // For example, to represent the account with the 112233445566 ID, use the following + // ARN: arn:aws:iam::112233445566-ID:root. + ResourceOwner *string `min:"1" type:"string"` + + // A resource-based policy to include in the simulation provided as a string. + // Each resource in the simulation is treated as if it had this policy attached. + // You can include only one resource-based policy in a simulation. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + ResourcePolicy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SimulateCustomPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SimulateCustomPolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "SimulateCustomPolicyInput"} + + if s.ActionNames == nil { + invalidParams.Add(aws.NewErrParamRequired("ActionNames")) + } + if s.CallerArn != nil && len(*s.CallerArn) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("CallerArn", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + + if s.PolicyInputList == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyInputList")) + } + if s.ResourceHandlingOption != nil && len(*s.ResourceHandlingOption) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ResourceHandlingOption", 1)) + } + if s.ResourceOwner != nil && len(*s.ResourceOwner) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ResourceOwner", 1)) + } + if s.ResourcePolicy != nil && len(*s.ResourcePolicy) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ResourcePolicy", 1)) + } + if s.ContextEntries != nil { + for i, v := range s.ContextEntries { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ContextEntries", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful SimulatePrincipalPolicy or SimulateCustomPolicy +// request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SimulatePolicyResponse +type SimulateCustomPolicyOutput struct { + _ struct{} `type:"structure"` + + // The results of the simulation. + EvaluationResults []EvaluationResult `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s SimulateCustomPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opSimulateCustomPolicy = "SimulateCustomPolicy" + +// SimulateCustomPolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Simulate how a set of IAM policies and optionally a resource-based policy +// works with a list of API operations and AWS resources to determine the policies' +// effective permissions. The policies are provided as strings. +// +// The simulation does not perform the API operations; it only checks the authorization +// to determine if the simulated policies allow or deny the operations. +// +// If you want to simulate existing policies attached to an IAM user, group, +// or role, use SimulatePrincipalPolicy instead. +// +// Context keys are variables maintained by AWS and its services that provide +// details about the context of an API query request. You can use the Condition +// element of an IAM policy to evaluate context keys. To get the list of context +// keys that the policies require for correct simulation, use GetContextKeysForCustomPolicy. +// +// If the output is long, you can use MaxItems and Marker parameters to paginate +// the results. +// +// // Example sending a request using SimulateCustomPolicyRequest. +// req := client.SimulateCustomPolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SimulateCustomPolicy +func (c *Client) SimulateCustomPolicyRequest(input *SimulateCustomPolicyInput) SimulateCustomPolicyRequest { + op := &aws.Operation{ + Name: opSimulateCustomPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &SimulateCustomPolicyInput{} + } + + req := c.newRequest(op, input, &SimulateCustomPolicyOutput{}) + return SimulateCustomPolicyRequest{Request: req, Input: input, Copy: c.SimulateCustomPolicyRequest} +} + +// SimulateCustomPolicyRequest is the request type for the +// SimulateCustomPolicy API operation. +type SimulateCustomPolicyRequest struct { + *aws.Request + Input *SimulateCustomPolicyInput + Copy func(*SimulateCustomPolicyInput) SimulateCustomPolicyRequest +} + +// Send marshals and sends the SimulateCustomPolicy API request. +func (r SimulateCustomPolicyRequest) Send(ctx context.Context) (*SimulateCustomPolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &SimulateCustomPolicyResponse{ + SimulateCustomPolicyOutput: r.Request.Data.(*SimulateCustomPolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewSimulateCustomPolicyRequestPaginator returns a paginator for SimulateCustomPolicy. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.SimulateCustomPolicyRequest(input) +// p := iam.NewSimulateCustomPolicyRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewSimulateCustomPolicyPaginator(req SimulateCustomPolicyRequest) SimulateCustomPolicyPaginator { + return SimulateCustomPolicyPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *SimulateCustomPolicyInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// SimulateCustomPolicyPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type SimulateCustomPolicyPaginator struct { + aws.Pager +} + +func (p *SimulateCustomPolicyPaginator) CurrentPage() *SimulateCustomPolicyOutput { + return p.Pager.CurrentPage().(*SimulateCustomPolicyOutput) +} + +// SimulateCustomPolicyResponse is the response type for the +// SimulateCustomPolicy API operation. +type SimulateCustomPolicyResponse struct { + *SimulateCustomPolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// SimulateCustomPolicy request. +func (r *SimulateCustomPolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SimulatePrincipalPolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SimulatePrincipalPolicy.go new file mode 100644 index 00000000..390f927f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_SimulatePrincipalPolicy.go @@ -0,0 +1,394 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SimulatePrincipalPolicyRequest +type SimulatePrincipalPolicyInput struct { + _ struct{} `type:"structure"` + + // A list of names of API operations to evaluate in the simulation. Each operation + // is evaluated for each resource. Each operation must include the service identifier, + // such as iam:CreateUser. + // + // ActionNames is a required field + ActionNames []string `type:"list" required:"true"` + + // The ARN of the IAM user that you want to specify as the simulated caller + // of the API operations. If you do not specify a CallerArn, it defaults to + // the ARN of the user that you specify in PolicySourceArn, if you specified + // a user. If you include both a PolicySourceArn (for example, arn:aws:iam::123456789012:user/David) + // and a CallerArn (for example, arn:aws:iam::123456789012:user/Bob), the result + // is that you simulate calling the API operations as Bob, as if Bob had David's + // policies. + // + // You can specify only the ARN of an IAM user. You cannot specify the ARN of + // an assumed role, federated user, or a service principal. + // + // CallerArn is required if you include a ResourcePolicy and the PolicySourceArn + // is not the ARN for an IAM user. This is required so that the resource-based + // policy's Principal element has a value to use in evaluating the policy. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + CallerArn *string `min:"1" type:"string"` + + // A list of context keys and corresponding values for the simulation to use. + // Whenever a context key is evaluated in one of the simulated IAM permission + // policies, the corresponding value is supplied. + ContextEntries []ContextEntry `type:"list"` + + // Use this parameter only when paginating results and only after you receive + // a response indicating that the results are truncated. Set it to the value + // of the Marker element in the response that you received to indicate where + // the next call should start. + Marker *string `min:"1" type:"string"` + + // Use this only when paginating results to indicate the maximum number of items + // you want in the response. If additional items exist beyond the maximum you + // specify, the IsTruncated response element is true. + // + // If you do not include this parameter, the number of items defaults to 100. + // Note that IAM might return fewer results, even when there are more results + // available. In that case, the IsTruncated response element returns true, and + // Marker contains a value to include in the subsequent call that tells the + // service where to continue from. + MaxItems *int64 `min:"1" type:"integer"` + + // An optional list of additional policy documents to include in the simulation. + // Each document is specified as a string containing the complete, valid JSON + // text of an IAM policy. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + PolicyInputList []string `type:"list"` + + // The Amazon Resource Name (ARN) of a user, group, or role whose policies you + // want to include in the simulation. If you specify a user, group, or role, + // the simulation includes all policies that are associated with that entity. + // If you specify a user, the simulation also includes all policies that are + // attached to any groups the user belongs to. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // PolicySourceArn is a required field + PolicySourceArn *string `min:"20" type:"string" required:"true"` + + // A list of ARNs of AWS resources to include in the simulation. If this parameter + // is not provided, then the value defaults to * (all resources). Each API in + // the ActionNames parameter is evaluated for each resource in this list. The + // simulation determines the access result (allowed or denied) of each combination + // and reports it in the response. + // + // The simulation does not automatically retrieve policies for the specified + // resources. If you want to include a resource policy in the simulation, then + // you must include the policy as a string in the ResourcePolicy parameter. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + ResourceArns []string `type:"list"` + + // Specifies the type of simulation to run. Different API operations that support + // resource-based policies require different combinations of resources. By specifying + // the type of simulation to run, you enable the policy simulator to enforce + // the presence of the required resources to ensure reliable simulation results. + // If your simulation does not match one of the following scenarios, then you + // can omit this parameter. The following list shows each of the supported scenario + // values and the resources that you must define to run the simulation. + // + // Each of the EC2 scenarios requires that you specify instance, image, and + // security group resources. If your scenario includes an EBS volume, then you + // must specify that volume as a resource. If the EC2 scenario includes VPC, + // then you must supply the network interface resource. If it includes an IP + // subnet, then you must specify the subnet resource. For more information on + // the EC2 scenario options, see Supported Platforms (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-supported-platforms.html) + // in the Amazon EC2 User Guide. + // + // * EC2-Classic-InstanceStore instance, image, security group + // + // * EC2-Classic-EBS instance, image, security group, volume + // + // * EC2-VPC-InstanceStore instance, image, security group, network interface + // + // * EC2-VPC-InstanceStore-Subnet instance, image, security group, network + // interface, subnet + // + // * EC2-VPC-EBS instance, image, security group, network interface, volume + // + // * EC2-VPC-EBS-Subnet instance, image, security group, network interface, + // subnet, volume + ResourceHandlingOption *string `min:"1" type:"string"` + + // An AWS account ID that specifies the owner of any simulated resource that + // does not identify its owner in the resource ARN. Examples of resource ARNs + // include an S3 bucket or object. If ResourceOwner is specified, it is also + // used as the account owner of any ResourcePolicy included in the simulation. + // If the ResourceOwner parameter is not specified, then the owner of the resources + // and the resource policy defaults to the account of the identity provided + // in CallerArn. This parameter is required only if you specify a resource-based + // policy and account that owns the resource is different from the account that + // owns the simulated calling user CallerArn. + ResourceOwner *string `min:"1" type:"string"` + + // A resource-based policy to include in the simulation provided as a string. + // Each resource in the simulation is treated as if it had this policy attached. + // You can include only one resource-based policy in a simulation. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + ResourcePolicy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s SimulatePrincipalPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *SimulatePrincipalPolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "SimulatePrincipalPolicyInput"} + + if s.ActionNames == nil { + invalidParams.Add(aws.NewErrParamRequired("ActionNames")) + } + if s.CallerArn != nil && len(*s.CallerArn) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("CallerArn", 1)) + } + if s.Marker != nil && len(*s.Marker) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Marker", 1)) + } + if s.MaxItems != nil && *s.MaxItems < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxItems", 1)) + } + + if s.PolicySourceArn == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicySourceArn")) + } + if s.PolicySourceArn != nil && len(*s.PolicySourceArn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("PolicySourceArn", 20)) + } + if s.ResourceHandlingOption != nil && len(*s.ResourceHandlingOption) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ResourceHandlingOption", 1)) + } + if s.ResourceOwner != nil && len(*s.ResourceOwner) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ResourceOwner", 1)) + } + if s.ResourcePolicy != nil && len(*s.ResourcePolicy) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ResourcePolicy", 1)) + } + if s.ContextEntries != nil { + for i, v := range s.ContextEntries { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "ContextEntries", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful SimulatePrincipalPolicy or SimulateCustomPolicy +// request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SimulatePolicyResponse +type SimulatePrincipalPolicyOutput struct { + _ struct{} `type:"structure"` + + // The results of the simulation. + EvaluationResults []EvaluationResult `type:"list"` + + // A flag that indicates whether there are more items to return. If your results + // were truncated, you can make a subsequent pagination request using the Marker + // request parameter to retrieve more items. Note that IAM might return fewer + // than the MaxItems number of results even when there are more results available. + // We recommend that you check IsTruncated after every call to ensure that you + // receive all your results. + IsTruncated *bool `type:"boolean"` + + // When IsTruncated is true, this element is present and contains the value + // to use for the Marker parameter in a subsequent pagination request. + Marker *string `type:"string"` +} + +// String returns the string representation +func (s SimulatePrincipalPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opSimulatePrincipalPolicy = "SimulatePrincipalPolicy" + +// SimulatePrincipalPolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Simulate how a set of IAM policies attached to an IAM entity works with a +// list of API operations and AWS resources to determine the policies' effective +// permissions. The entity can be an IAM user, group, or role. If you specify +// a user, then the simulation also includes all of the policies that are attached +// to groups that the user belongs to. +// +// You can optionally include a list of one or more additional policies specified +// as strings to include in the simulation. If you want to simulate only policies +// specified as strings, use SimulateCustomPolicy instead. +// +// You can also optionally include one resource-based policy to be evaluated +// with each of the resources included in the simulation. +// +// The simulation does not perform the API operations; it only checks the authorization +// to determine if the simulated policies allow or deny the operations. +// +// Note: This API discloses information about the permissions granted to other +// users. If you do not want users to see other user's permissions, then consider +// allowing them to use SimulateCustomPolicy instead. +// +// Context keys are variables maintained by AWS and its services that provide +// details about the context of an API query request. You can use the Condition +// element of an IAM policy to evaluate context keys. To get the list of context +// keys that the policies require for correct simulation, use GetContextKeysForPrincipalPolicy. +// +// If the output is long, you can use the MaxItems and Marker parameters to +// paginate the results. +// +// // Example sending a request using SimulatePrincipalPolicyRequest. +// req := client.SimulatePrincipalPolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SimulatePrincipalPolicy +func (c *Client) SimulatePrincipalPolicyRequest(input *SimulatePrincipalPolicyInput) SimulatePrincipalPolicyRequest { + op := &aws.Operation{ + Name: opSimulatePrincipalPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &aws.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"Marker"}, + LimitToken: "MaxItems", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &SimulatePrincipalPolicyInput{} + } + + req := c.newRequest(op, input, &SimulatePrincipalPolicyOutput{}) + return SimulatePrincipalPolicyRequest{Request: req, Input: input, Copy: c.SimulatePrincipalPolicyRequest} +} + +// SimulatePrincipalPolicyRequest is the request type for the +// SimulatePrincipalPolicy API operation. +type SimulatePrincipalPolicyRequest struct { + *aws.Request + Input *SimulatePrincipalPolicyInput + Copy func(*SimulatePrincipalPolicyInput) SimulatePrincipalPolicyRequest +} + +// Send marshals and sends the SimulatePrincipalPolicy API request. +func (r SimulatePrincipalPolicyRequest) Send(ctx context.Context) (*SimulatePrincipalPolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &SimulatePrincipalPolicyResponse{ + SimulatePrincipalPolicyOutput: r.Request.Data.(*SimulatePrincipalPolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// NewSimulatePrincipalPolicyRequestPaginator returns a paginator for SimulatePrincipalPolicy. +// Use Next method to get the next page, and CurrentPage to get the current +// response page from the paginator. Next will return false, if there are +// no more pages, or an error was encountered. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over pages. +// req := client.SimulatePrincipalPolicyRequest(input) +// p := iam.NewSimulatePrincipalPolicyRequestPaginator(req) +// +// for p.Next(context.TODO()) { +// page := p.CurrentPage() +// } +// +// if err := p.Err(); err != nil { +// return err +// } +// +func NewSimulatePrincipalPolicyPaginator(req SimulatePrincipalPolicyRequest) SimulatePrincipalPolicyPaginator { + return SimulatePrincipalPolicyPaginator{ + Pager: aws.Pager{ + NewRequest: func(ctx context.Context) (*aws.Request, error) { + var inCpy *SimulatePrincipalPolicyInput + if req.Input != nil { + tmp := *req.Input + inCpy = &tmp + } + + newReq := req.Copy(inCpy) + newReq.SetContext(ctx) + return newReq.Request, nil + }, + }, + } +} + +// SimulatePrincipalPolicyPaginator is used to paginate the request. This can be done by +// calling Next and CurrentPage. +type SimulatePrincipalPolicyPaginator struct { + aws.Pager +} + +func (p *SimulatePrincipalPolicyPaginator) CurrentPage() *SimulatePrincipalPolicyOutput { + return p.Pager.CurrentPage().(*SimulatePrincipalPolicyOutput) +} + +// SimulatePrincipalPolicyResponse is the response type for the +// SimulatePrincipalPolicy API operation. +type SimulatePrincipalPolicyResponse struct { + *SimulatePrincipalPolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// SimulatePrincipalPolicy request. +func (r *SimulatePrincipalPolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagRole.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagRole.go new file mode 100644 index 00000000..87e98aa4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagRole.go @@ -0,0 +1,179 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/TagRoleRequest +type TagRoleInput struct { + _ struct{} `type:"structure"` + + // The name of the role that you want to add tags to. + // + // This parameter accepts (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that consist of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` + + // The list of tags that you want to attach to the role. Each tag consists of + // a key name and an associated value. You can specify this with a JSON string. + // + // Tags is a required field + Tags []Tag `type:"list" required:"true"` +} + +// String returns the string representation +func (s TagRoleInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagRoleInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "TagRoleInput"} + + if s.RoleName == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleName", 1)) + } + + if s.Tags == nil { + invalidParams.Add(aws.NewErrParamRequired("Tags")) + } + if s.Tags != nil { + for i, v := range s.Tags { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/TagRoleOutput +type TagRoleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagRoleOutput) String() string { + return awsutil.Prettify(s) +} + +const opTagRole = "TagRole" + +// TagRoleRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Adds one or more tags to an IAM role. The role can be a regular role or a +// service-linked role. If a tag with the same key name already exists, then +// that tag is overwritten with the new value. +// +// A tag consists of a key name and an associated value. By assigning tags to +// your resources, you can do the following: +// +// * Administrative grouping and discovery - Attach tags to resources to +// aid in organization and search. For example, you could search for all +// resources with the key name Project and the value MyImportantProject. +// Or search for all resources with the key name Cost Center and the value +// 41200. +// +// * Access control - Reference tags in IAM user-based and resource-based +// policies. You can use tags to restrict access to only an IAM user or role +// that has a specified tag attached. You can also restrict access to only +// those resources that have a certain tag attached. For examples of policies +// that show how to use tags to control access, see Control Access Using +// IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html) +// in the IAM User Guide. +// +// * Cost allocation - Use tags to help track which individuals and teams +// are using which AWS resources. +// +// * Make sure that you have no invalid tags and that you do not exceed the +// allowed number of tags per role. In either case, the entire request fails +// and no tags are added to the role. +// +// * AWS always interprets the tag Value as a single string. If you need +// to store an array, you can store comma-separated values in the string. +// However, you must interpret the value in your code. +// +// For more information about tagging, see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) +// in the IAM User Guide. +// +// // Example sending a request using TagRoleRequest. +// req := client.TagRoleRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/TagRole +func (c *Client) TagRoleRequest(input *TagRoleInput) TagRoleRequest { + op := &aws.Operation{ + Name: opTagRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagRoleInput{} + } + + req := c.newRequest(op, input, &TagRoleOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return TagRoleRequest{Request: req, Input: input, Copy: c.TagRoleRequest} +} + +// TagRoleRequest is the request type for the +// TagRole API operation. +type TagRoleRequest struct { + *aws.Request + Input *TagRoleInput + Copy func(*TagRoleInput) TagRoleRequest +} + +// Send marshals and sends the TagRole API request. +func (r TagRoleRequest) Send(ctx context.Context) (*TagRoleResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &TagRoleResponse{ + TagRoleOutput: r.Request.Data.(*TagRoleOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// TagRoleResponse is the response type for the +// TagRole API operation. +type TagRoleResponse struct { + *TagRoleOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// TagRole request. +func (r *TagRoleResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagUser.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagUser.go new file mode 100644 index 00000000..1b365782 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_TagUser.go @@ -0,0 +1,178 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/TagUserRequest +type TagUserInput struct { + _ struct{} `type:"structure"` + + // The list of tags that you want to attach to the user. Each tag consists of + // a key name and an associated value. + // + // Tags is a required field + Tags []Tag `type:"list" required:"true"` + + // The name of the user that you want to add tags to. + // + // This parameter accepts (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that consist of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s TagUserInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TagUserInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "TagUserInput"} + + if s.Tags == nil { + invalidParams.Add(aws.NewErrParamRequired("Tags")) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + if s.Tags != nil { + for i, v := range s.Tags { + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(aws.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/TagUserOutput +type TagUserOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s TagUserOutput) String() string { + return awsutil.Prettify(s) +} + +const opTagUser = "TagUser" + +// TagUserRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Adds one or more tags to an IAM user. If a tag with the same key name already +// exists, then that tag is overwritten with the new value. +// +// A tag consists of a key name and an associated value. By assigning tags to +// your resources, you can do the following: +// +// * Administrative grouping and discovery - Attach tags to resources to +// aid in organization and search. For example, you could search for all +// resources with the key name Project and the value MyImportantProject. +// Or search for all resources with the key name Cost Center and the value +// 41200. +// +// * Access control - Reference tags in IAM user-based and resource-based +// policies. You can use tags to restrict access to only an IAM requesting +// user or to a role that has a specified tag attached. You can also restrict +// access to only those resources that have a certain tag attached. For examples +// of policies that show how to use tags to control access, see Control Access +// Using IAM Tags (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_tags.html) +// in the IAM User Guide. +// +// * Cost allocation - Use tags to help track which individuals and teams +// are using which AWS resources. +// +// * Make sure that you have no invalid tags and that you do not exceed the +// allowed number of tags per role. In either case, the entire request fails +// and no tags are added to the role. +// +// * AWS always interprets the tag Value as a single string. If you need +// to store an array, you can store comma-separated values in the string. +// However, you must interpret the value in your code. +// +// For more information about tagging, see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) +// in the IAM User Guide. +// +// // Example sending a request using TagUserRequest. +// req := client.TagUserRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/TagUser +func (c *Client) TagUserRequest(input *TagUserInput) TagUserRequest { + op := &aws.Operation{ + Name: opTagUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &TagUserInput{} + } + + req := c.newRequest(op, input, &TagUserOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return TagUserRequest{Request: req, Input: input, Copy: c.TagUserRequest} +} + +// TagUserRequest is the request type for the +// TagUser API operation. +type TagUserRequest struct { + *aws.Request + Input *TagUserInput + Copy func(*TagUserInput) TagUserRequest +} + +// Send marshals and sends the TagUser API request. +func (r TagUserRequest) Send(ctx context.Context) (*TagUserResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &TagUserResponse{ + TagUserOutput: r.Request.Data.(*TagUserOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// TagUserResponse is the response type for the +// TagUser API operation. +type TagUserResponse struct { + *TagUserOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// TagUser request. +func (r *TagUserResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagRole.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagRole.go new file mode 100644 index 00000000..b7cadb3a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagRole.go @@ -0,0 +1,140 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UntagRoleRequest +type UntagRoleInput struct { + _ struct{} `type:"structure"` + + // The name of the IAM role from which you want to remove tags. + // + // This parameter accepts (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that consist of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` + + // A list of key names as a simple array of strings. The tags with matching + // keys are removed from the specified role. + // + // TagKeys is a required field + TagKeys []string `type:"list" required:"true"` +} + +// String returns the string representation +func (s UntagRoleInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagRoleInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UntagRoleInput"} + + if s.RoleName == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleName", 1)) + } + + if s.TagKeys == nil { + invalidParams.Add(aws.NewErrParamRequired("TagKeys")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UntagRoleOutput +type UntagRoleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagRoleOutput) String() string { + return awsutil.Prettify(s) +} + +const opUntagRole = "UntagRole" + +// UntagRoleRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Removes the specified tags from the role. For more information about tagging, +// see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) +// in the IAM User Guide. +// +// // Example sending a request using UntagRoleRequest. +// req := client.UntagRoleRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UntagRole +func (c *Client) UntagRoleRequest(input *UntagRoleInput) UntagRoleRequest { + op := &aws.Operation{ + Name: opUntagRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagRoleInput{} + } + + req := c.newRequest(op, input, &UntagRoleOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return UntagRoleRequest{Request: req, Input: input, Copy: c.UntagRoleRequest} +} + +// UntagRoleRequest is the request type for the +// UntagRole API operation. +type UntagRoleRequest struct { + *aws.Request + Input *UntagRoleInput + Copy func(*UntagRoleInput) UntagRoleRequest +} + +// Send marshals and sends the UntagRole API request. +func (r UntagRoleRequest) Send(ctx context.Context) (*UntagRoleResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UntagRoleResponse{ + UntagRoleOutput: r.Request.Data.(*UntagRoleOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UntagRoleResponse is the response type for the +// UntagRole API operation. +type UntagRoleResponse struct { + *UntagRoleOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UntagRole request. +func (r *UntagRoleResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagUser.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagUser.go new file mode 100644 index 00000000..a616adf2 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UntagUser.go @@ -0,0 +1,140 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UntagUserRequest +type UntagUserInput struct { + _ struct{} `type:"structure"` + + // A list of key names as a simple array of strings. The tags with matching + // keys are removed from the specified user. + // + // TagKeys is a required field + TagKeys []string `type:"list" required:"true"` + + // The name of the IAM user from which you want to remove tags. + // + // This parameter accepts (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that consist of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: =,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UntagUserInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UntagUserInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UntagUserInput"} + + if s.TagKeys == nil { + invalidParams.Add(aws.NewErrParamRequired("TagKeys")) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UntagUserOutput +type UntagUserOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UntagUserOutput) String() string { + return awsutil.Prettify(s) +} + +const opUntagUser = "UntagUser" + +// UntagUserRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Removes the specified tags from the user. For more information about tagging, +// see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) +// in the IAM User Guide. +// +// // Example sending a request using UntagUserRequest. +// req := client.UntagUserRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UntagUser +func (c *Client) UntagUserRequest(input *UntagUserInput) UntagUserRequest { + op := &aws.Operation{ + Name: opUntagUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UntagUserInput{} + } + + req := c.newRequest(op, input, &UntagUserOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return UntagUserRequest{Request: req, Input: input, Copy: c.UntagUserRequest} +} + +// UntagUserRequest is the request type for the +// UntagUser API operation. +type UntagUserRequest struct { + *aws.Request + Input *UntagUserInput + Copy func(*UntagUserInput) UntagUserRequest +} + +// Send marshals and sends the UntagUser API request. +func (r UntagUserRequest) Send(ctx context.Context) (*UntagUserResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UntagUserResponse{ + UntagUserOutput: r.Request.Data.(*UntagUserOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UntagUserResponse is the response type for the +// UntagUser API operation. +type UntagUserResponse struct { + *UntagUserOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UntagUser request. +func (r *UntagUserResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateAccessKey.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateAccessKey.go new file mode 100644 index 00000000..8bf41394 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateAccessKey.go @@ -0,0 +1,159 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateAccessKeyRequest +type UpdateAccessKeyInput struct { + _ struct{} `type:"structure"` + + // The access key ID of the secret access key you want to update. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that can consist of any upper or lowercased letter + // or digit. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The status you want to assign to the secret access key. Active means that + // the key can be used for API calls to AWS, while Inactive means that the key + // cannot be used. + // + // Status is a required field + Status StatusType `type:"string" required:"true" enum:"true"` + + // The name of the user whose key you want to update. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateAccessKeyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAccessKeyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateAccessKeyInput"} + + if s.AccessKeyId == nil { + invalidParams.Add(aws.NewErrParamRequired("AccessKeyId")) + } + if s.AccessKeyId != nil && len(*s.AccessKeyId) < 16 { + invalidParams.Add(aws.NewErrParamMinLen("AccessKeyId", 16)) + } + if len(s.Status) == 0 { + invalidParams.Add(aws.NewErrParamRequired("Status")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateAccessKeyOutput +type UpdateAccessKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAccessKeyOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdateAccessKey = "UpdateAccessKey" + +// UpdateAccessKeyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Changes the status of the specified access key from Active to Inactive, or +// vice versa. This operation can be used to disable a user's key as part of +// a key rotation workflow. +// +// If the UserName is not specified, the user name is determined implicitly +// based on the AWS access key ID used to sign the request. This operation works +// for access keys under the AWS account. Consequently, you can use this operation +// to manage AWS account root user credentials even if the AWS account has no +// associated users. +// +// For information about rotating keys, see Managing Keys and Certificates (https://docs.aws.amazon.com/IAM/latest/UserGuide/ManagingCredentials.html) +// in the IAM User Guide. +// +// // Example sending a request using UpdateAccessKeyRequest. +// req := client.UpdateAccessKeyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateAccessKey +func (c *Client) UpdateAccessKeyRequest(input *UpdateAccessKeyInput) UpdateAccessKeyRequest { + op := &aws.Operation{ + Name: opUpdateAccessKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAccessKeyInput{} + } + + req := c.newRequest(op, input, &UpdateAccessKeyOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return UpdateAccessKeyRequest{Request: req, Input: input, Copy: c.UpdateAccessKeyRequest} +} + +// UpdateAccessKeyRequest is the request type for the +// UpdateAccessKey API operation. +type UpdateAccessKeyRequest struct { + *aws.Request + Input *UpdateAccessKeyInput + Copy func(*UpdateAccessKeyInput) UpdateAccessKeyRequest +} + +// Send marshals and sends the UpdateAccessKey API request. +func (r UpdateAccessKeyRequest) Send(ctx context.Context) (*UpdateAccessKeyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateAccessKeyResponse{ + UpdateAccessKeyOutput: r.Request.Data.(*UpdateAccessKeyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateAccessKeyResponse is the response type for the +// UpdateAccessKey API operation. +type UpdateAccessKeyResponse struct { + *UpdateAccessKeyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateAccessKey request. +func (r *UpdateAccessKeyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateAccountPasswordPolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateAccountPasswordPolicy.go new file mode 100644 index 00000000..11f77f58 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateAccountPasswordPolicy.go @@ -0,0 +1,204 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateAccountPasswordPolicyRequest +type UpdateAccountPasswordPolicyInput struct { + _ struct{} `type:"structure"` + + // Allows all IAM users in your account to use the AWS Management Console to + // change their own passwords. For more information, see Letting IAM Users Change + // Their Own Passwords (https://docs.aws.amazon.com/IAM/latest/UserGuide/HowToPwdIAMUser.html) + // in the IAM User Guide. + // + // If you do not specify a value for this parameter, then the operation uses + // the default value of false. The result is that IAM users in the account do + // not automatically have permissions to change their own password. + AllowUsersToChangePassword *bool `type:"boolean"` + + // Prevents IAM users from setting a new password after their password has expired. + // The IAM user cannot be accessed until an administrator resets the password. + // + // If you do not specify a value for this parameter, then the operation uses + // the default value of false. The result is that IAM users can change their + // passwords after they expire and continue to sign in as the user. + HardExpiry *bool `type:"boolean"` + + // The number of days that an IAM user password is valid. + // + // If you do not specify a value for this parameter, then the operation uses + // the default value of 0. The result is that IAM user passwords never expire. + MaxPasswordAge *int64 `min:"1" type:"integer"` + + // The minimum number of characters allowed in an IAM user password. + // + // If you do not specify a value for this parameter, then the operation uses + // the default value of 6. + MinimumPasswordLength *int64 `min:"6" type:"integer"` + + // Specifies the number of previous passwords that IAM users are prevented from + // reusing. + // + // If you do not specify a value for this parameter, then the operation uses + // the default value of 0. The result is that IAM users are not prevented from + // reusing previous passwords. + PasswordReusePrevention *int64 `min:"1" type:"integer"` + + // Specifies whether IAM user passwords must contain at least one lowercase + // character from the ISO basic Latin alphabet (a to z). + // + // If you do not specify a value for this parameter, then the operation uses + // the default value of false. The result is that passwords do not require at + // least one lowercase character. + RequireLowercaseCharacters *bool `type:"boolean"` + + // Specifies whether IAM user passwords must contain at least one numeric character + // (0 to 9). + // + // If you do not specify a value for this parameter, then the operation uses + // the default value of false. The result is that passwords do not require at + // least one numeric character. + RequireNumbers *bool `type:"boolean"` + + // Specifies whether IAM user passwords must contain at least one of the following + // non-alphanumeric characters: + // + // ! @ # $ % ^ & * ( ) _ + - = [ ] { } | ' + // + // If you do not specify a value for this parameter, then the operation uses + // the default value of false. The result is that passwords do not require at + // least one symbol character. + RequireSymbols *bool `type:"boolean"` + + // Specifies whether IAM user passwords must contain at least one uppercase + // character from the ISO basic Latin alphabet (A to Z). + // + // If you do not specify a value for this parameter, then the operation uses + // the default value of false. The result is that passwords do not require at + // least one uppercase character. + RequireUppercaseCharacters *bool `type:"boolean"` +} + +// String returns the string representation +func (s UpdateAccountPasswordPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAccountPasswordPolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateAccountPasswordPolicyInput"} + if s.MaxPasswordAge != nil && *s.MaxPasswordAge < 1 { + invalidParams.Add(aws.NewErrParamMinValue("MaxPasswordAge", 1)) + } + if s.MinimumPasswordLength != nil && *s.MinimumPasswordLength < 6 { + invalidParams.Add(aws.NewErrParamMinValue("MinimumPasswordLength", 6)) + } + if s.PasswordReusePrevention != nil && *s.PasswordReusePrevention < 1 { + invalidParams.Add(aws.NewErrParamMinValue("PasswordReusePrevention", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateAccountPasswordPolicyOutput +type UpdateAccountPasswordPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAccountPasswordPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdateAccountPasswordPolicy = "UpdateAccountPasswordPolicy" + +// UpdateAccountPasswordPolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Updates the password policy settings for the AWS account. +// +// * This operation does not support partial updates. No parameters are required, +// but if you do not specify a parameter, that parameter's value reverts +// to its default value. See the Request Parameters section for each parameter's +// default value. Also note that some parameters do not allow the default +// parameter to be explicitly set. Instead, to invoke the default value, +// do not include that parameter when you invoke the operation. +// +// For more information about using a password policy, see Managing an IAM Password +// Policy (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingPasswordPolicies.html) +// in the IAM User Guide. +// +// // Example sending a request using UpdateAccountPasswordPolicyRequest. +// req := client.UpdateAccountPasswordPolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateAccountPasswordPolicy +func (c *Client) UpdateAccountPasswordPolicyRequest(input *UpdateAccountPasswordPolicyInput) UpdateAccountPasswordPolicyRequest { + op := &aws.Operation{ + Name: opUpdateAccountPasswordPolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAccountPasswordPolicyInput{} + } + + req := c.newRequest(op, input, &UpdateAccountPasswordPolicyOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return UpdateAccountPasswordPolicyRequest{Request: req, Input: input, Copy: c.UpdateAccountPasswordPolicyRequest} +} + +// UpdateAccountPasswordPolicyRequest is the request type for the +// UpdateAccountPasswordPolicy API operation. +type UpdateAccountPasswordPolicyRequest struct { + *aws.Request + Input *UpdateAccountPasswordPolicyInput + Copy func(*UpdateAccountPasswordPolicyInput) UpdateAccountPasswordPolicyRequest +} + +// Send marshals and sends the UpdateAccountPasswordPolicy API request. +func (r UpdateAccountPasswordPolicyRequest) Send(ctx context.Context) (*UpdateAccountPasswordPolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateAccountPasswordPolicyResponse{ + UpdateAccountPasswordPolicyOutput: r.Request.Data.(*UpdateAccountPasswordPolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateAccountPasswordPolicyResponse is the response type for the +// UpdateAccountPasswordPolicy API operation. +type UpdateAccountPasswordPolicyResponse struct { + *UpdateAccountPasswordPolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateAccountPasswordPolicy request. +func (r *UpdateAccountPasswordPolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateAssumeRolePolicy.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateAssumeRolePolicy.go new file mode 100644 index 00000000..2dfa421f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateAssumeRolePolicy.go @@ -0,0 +1,160 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateAssumeRolePolicyRequest +type UpdateAssumeRolePolicyInput struct { + _ struct{} `type:"structure"` + + // The policy that grants an entity permission to assume the role. + // + // You must provide policies in JSON format in IAM. However, for AWS CloudFormation + // templates formatted in YAML, you can provide the policy in JSON or YAML format. + // AWS CloudFormation always converts a YAML policy to JSON format before submitting + // it to IAM. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // PolicyDocument is a required field + PolicyDocument *string `min:"1" type:"string" required:"true"` + + // The name of the role to update with the new policy. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateAssumeRolePolicyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateAssumeRolePolicyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateAssumeRolePolicyInput"} + + if s.PolicyDocument == nil { + invalidParams.Add(aws.NewErrParamRequired("PolicyDocument")) + } + if s.PolicyDocument != nil && len(*s.PolicyDocument) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PolicyDocument", 1)) + } + + if s.RoleName == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateAssumeRolePolicyOutput +type UpdateAssumeRolePolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateAssumeRolePolicyOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdateAssumeRolePolicy = "UpdateAssumeRolePolicy" + +// UpdateAssumeRolePolicyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Updates the policy that grants an IAM entity permission to assume a role. +// This is typically referred to as the "role trust policy". For more information +// about roles, go to Using Roles to Delegate Permissions and Federate Identities +// (https://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html). +// +// // Example sending a request using UpdateAssumeRolePolicyRequest. +// req := client.UpdateAssumeRolePolicyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateAssumeRolePolicy +func (c *Client) UpdateAssumeRolePolicyRequest(input *UpdateAssumeRolePolicyInput) UpdateAssumeRolePolicyRequest { + op := &aws.Operation{ + Name: opUpdateAssumeRolePolicy, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateAssumeRolePolicyInput{} + } + + req := c.newRequest(op, input, &UpdateAssumeRolePolicyOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return UpdateAssumeRolePolicyRequest{Request: req, Input: input, Copy: c.UpdateAssumeRolePolicyRequest} +} + +// UpdateAssumeRolePolicyRequest is the request type for the +// UpdateAssumeRolePolicy API operation. +type UpdateAssumeRolePolicyRequest struct { + *aws.Request + Input *UpdateAssumeRolePolicyInput + Copy func(*UpdateAssumeRolePolicyInput) UpdateAssumeRolePolicyRequest +} + +// Send marshals and sends the UpdateAssumeRolePolicy API request. +func (r UpdateAssumeRolePolicyRequest) Send(ctx context.Context) (*UpdateAssumeRolePolicyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateAssumeRolePolicyResponse{ + UpdateAssumeRolePolicyOutput: r.Request.Data.(*UpdateAssumeRolePolicyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateAssumeRolePolicyResponse is the response type for the +// UpdateAssumeRolePolicy API operation. +type UpdateAssumeRolePolicyResponse struct { + *UpdateAssumeRolePolicyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateAssumeRolePolicy request. +func (r *UpdateAssumeRolePolicyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateGroup.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateGroup.go new file mode 100644 index 00000000..3e6b07c1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateGroup.go @@ -0,0 +1,163 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateGroupRequest +type UpdateGroupInput struct { + _ struct{} `type:"structure"` + + // Name of the IAM group to update. If you're changing the name of the group, + // this is the original name. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // New name for the IAM group. Only include this if changing the group's name. + // + // IAM user, group, role, and policy names must be unique within the account. + // Names are not distinguished by case. For example, you cannot create resources + // named both "MyResource" and "myresource". + NewGroupName *string `min:"1" type:"string"` + + // New path for the IAM group. Only include this if changing the group's path. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + NewPath *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateGroupInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateGroupInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateGroupInput"} + + if s.GroupName == nil { + invalidParams.Add(aws.NewErrParamRequired("GroupName")) + } + if s.GroupName != nil && len(*s.GroupName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("GroupName", 1)) + } + if s.NewGroupName != nil && len(*s.NewGroupName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("NewGroupName", 1)) + } + if s.NewPath != nil && len(*s.NewPath) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("NewPath", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateGroupOutput +type UpdateGroupOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateGroupOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdateGroup = "UpdateGroup" + +// UpdateGroupRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Updates the name and/or the path of the specified IAM group. +// +// You should understand the implications of changing a group's path or name. +// For more information, see Renaming Users and Groups (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_WorkingWithGroupsAndUsers.html) +// in the IAM User Guide. +// +// The person making the request (the principal), must have permission to change +// the role group with the old name and the new name. For example, to change +// the group named Managers to MGRs, the principal must have a policy that allows +// them to update both groups. If the principal has permission to update the +// Managers group, but not the MGRs group, then the update fails. For more information +// about permissions, see Access Management (https://docs.aws.amazon.com/IAM/latest/UserGuide/access.html). +// +// // Example sending a request using UpdateGroupRequest. +// req := client.UpdateGroupRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateGroup +func (c *Client) UpdateGroupRequest(input *UpdateGroupInput) UpdateGroupRequest { + op := &aws.Operation{ + Name: opUpdateGroup, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateGroupInput{} + } + + req := c.newRequest(op, input, &UpdateGroupOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return UpdateGroupRequest{Request: req, Input: input, Copy: c.UpdateGroupRequest} +} + +// UpdateGroupRequest is the request type for the +// UpdateGroup API operation. +type UpdateGroupRequest struct { + *aws.Request + Input *UpdateGroupInput + Copy func(*UpdateGroupInput) UpdateGroupRequest +} + +// Send marshals and sends the UpdateGroup API request. +func (r UpdateGroupRequest) Send(ctx context.Context) (*UpdateGroupResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateGroupResponse{ + UpdateGroupOutput: r.Request.Data.(*UpdateGroupOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateGroupResponse is the response type for the +// UpdateGroup API operation. +type UpdateGroupResponse struct { + *UpdateGroupOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateGroup request. +func (r *UpdateGroupResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateLoginProfile.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateLoginProfile.go new file mode 100644 index 00000000..155e9840 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateLoginProfile.go @@ -0,0 +1,158 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateLoginProfileRequest +type UpdateLoginProfileInput struct { + _ struct{} `type:"structure"` + + // The new password for the specified IAM user. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // However, the format can be further restricted by the account administrator + // by setting a password policy on the AWS account. For more information, see + // UpdateAccountPasswordPolicy. + Password *string `min:"1" type:"string"` + + // Allows this new password to be used only once by requiring the specified + // IAM user to set a new password on next sign-in. + PasswordResetRequired *bool `type:"boolean"` + + // The name of the user whose password you want to update. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateLoginProfileInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateLoginProfileInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateLoginProfileInput"} + if s.Password != nil && len(*s.Password) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Password", 1)) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateLoginProfileOutput +type UpdateLoginProfileOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateLoginProfileOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdateLoginProfile = "UpdateLoginProfile" + +// UpdateLoginProfileRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Changes the password for the specified IAM user. +// +// IAM users can change their own passwords by calling ChangePassword. For more +// information about modifying passwords, see Managing Passwords (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_ManagingLogins.html) +// in the IAM User Guide. +// +// // Example sending a request using UpdateLoginProfileRequest. +// req := client.UpdateLoginProfileRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateLoginProfile +func (c *Client) UpdateLoginProfileRequest(input *UpdateLoginProfileInput) UpdateLoginProfileRequest { + op := &aws.Operation{ + Name: opUpdateLoginProfile, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateLoginProfileInput{} + } + + req := c.newRequest(op, input, &UpdateLoginProfileOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return UpdateLoginProfileRequest{Request: req, Input: input, Copy: c.UpdateLoginProfileRequest} +} + +// UpdateLoginProfileRequest is the request type for the +// UpdateLoginProfile API operation. +type UpdateLoginProfileRequest struct { + *aws.Request + Input *UpdateLoginProfileInput + Copy func(*UpdateLoginProfileInput) UpdateLoginProfileRequest +} + +// Send marshals and sends the UpdateLoginProfile API request. +func (r UpdateLoginProfileRequest) Send(ctx context.Context) (*UpdateLoginProfileResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateLoginProfileResponse{ + UpdateLoginProfileOutput: r.Request.Data.(*UpdateLoginProfileOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateLoginProfileResponse is the response type for the +// UpdateLoginProfile API operation. +type UpdateLoginProfileResponse struct { + *UpdateLoginProfileOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateLoginProfile request. +func (r *UpdateLoginProfileResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateOpenIDConnectProviderThumbprint.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateOpenIDConnectProviderThumbprint.go new file mode 100644 index 00000000..ba42b0e1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateOpenIDConnectProviderThumbprint.go @@ -0,0 +1,154 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateOpenIDConnectProviderThumbprintRequest +type UpdateOpenIDConnectProviderThumbprintInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the IAM OIDC provider resource object for + // which you want to update the thumbprint. You can get a list of OIDC provider + // ARNs by using the ListOpenIDConnectProviders operation. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // OpenIDConnectProviderArn is a required field + OpenIDConnectProviderArn *string `min:"20" type:"string" required:"true"` + + // A list of certificate thumbprints that are associated with the specified + // IAM OpenID Connect provider. For more information, see CreateOpenIDConnectProvider. + // + // ThumbprintList is a required field + ThumbprintList []string `type:"list" required:"true"` +} + +// String returns the string representation +func (s UpdateOpenIDConnectProviderThumbprintInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateOpenIDConnectProviderThumbprintInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateOpenIDConnectProviderThumbprintInput"} + + if s.OpenIDConnectProviderArn == nil { + invalidParams.Add(aws.NewErrParamRequired("OpenIDConnectProviderArn")) + } + if s.OpenIDConnectProviderArn != nil && len(*s.OpenIDConnectProviderArn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("OpenIDConnectProviderArn", 20)) + } + + if s.ThumbprintList == nil { + invalidParams.Add(aws.NewErrParamRequired("ThumbprintList")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateOpenIDConnectProviderThumbprintOutput +type UpdateOpenIDConnectProviderThumbprintOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateOpenIDConnectProviderThumbprintOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdateOpenIDConnectProviderThumbprint = "UpdateOpenIDConnectProviderThumbprint" + +// UpdateOpenIDConnectProviderThumbprintRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Replaces the existing list of server certificate thumbprints associated with +// an OpenID Connect (OIDC) provider resource object with a new list of thumbprints. +// +// The list that you pass with this operation completely replaces the existing +// list of thumbprints. (The lists are not merged.) +// +// Typically, you need to update a thumbprint only when the identity provider's +// certificate changes, which occurs rarely. However, if the provider's certificate +// does change, any attempt to assume an IAM role that specifies the OIDC provider +// as a principal fails until the certificate thumbprint is updated. +// +// Trust for the OIDC provider is derived from the provider's certificate and +// is validated by the thumbprint. Therefore, it is best to limit access to +// the UpdateOpenIDConnectProviderThumbprint operation to highly privileged +// users. +// +// // Example sending a request using UpdateOpenIDConnectProviderThumbprintRequest. +// req := client.UpdateOpenIDConnectProviderThumbprintRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateOpenIDConnectProviderThumbprint +func (c *Client) UpdateOpenIDConnectProviderThumbprintRequest(input *UpdateOpenIDConnectProviderThumbprintInput) UpdateOpenIDConnectProviderThumbprintRequest { + op := &aws.Operation{ + Name: opUpdateOpenIDConnectProviderThumbprint, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateOpenIDConnectProviderThumbprintInput{} + } + + req := c.newRequest(op, input, &UpdateOpenIDConnectProviderThumbprintOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return UpdateOpenIDConnectProviderThumbprintRequest{Request: req, Input: input, Copy: c.UpdateOpenIDConnectProviderThumbprintRequest} +} + +// UpdateOpenIDConnectProviderThumbprintRequest is the request type for the +// UpdateOpenIDConnectProviderThumbprint API operation. +type UpdateOpenIDConnectProviderThumbprintRequest struct { + *aws.Request + Input *UpdateOpenIDConnectProviderThumbprintInput + Copy func(*UpdateOpenIDConnectProviderThumbprintInput) UpdateOpenIDConnectProviderThumbprintRequest +} + +// Send marshals and sends the UpdateOpenIDConnectProviderThumbprint API request. +func (r UpdateOpenIDConnectProviderThumbprintRequest) Send(ctx context.Context) (*UpdateOpenIDConnectProviderThumbprintResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateOpenIDConnectProviderThumbprintResponse{ + UpdateOpenIDConnectProviderThumbprintOutput: r.Request.Data.(*UpdateOpenIDConnectProviderThumbprintOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateOpenIDConnectProviderThumbprintResponse is the response type for the +// UpdateOpenIDConnectProviderThumbprint API operation. +type UpdateOpenIDConnectProviderThumbprintResponse struct { + *UpdateOpenIDConnectProviderThumbprintOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateOpenIDConnectProviderThumbprint request. +func (r *UpdateOpenIDConnectProviderThumbprintResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateRole.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateRole.go new file mode 100644 index 00000000..d8b69e73 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateRole.go @@ -0,0 +1,142 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateRoleRequest +type UpdateRoleInput struct { + _ struct{} `type:"structure"` + + // The new description that you want to apply to the specified role. + Description *string `type:"string"` + + // The maximum session duration (in seconds) that you want to set for the specified + // role. If you do not specify a value for this setting, the default maximum + // of one hour is applied. This setting can have a value from 1 hour to 12 hours. + // + // Anyone who assumes the role from the AWS CLI or API can use the DurationSeconds + // API parameter or the duration-seconds CLI parameter to request a longer session. + // The MaxSessionDuration setting determines the maximum duration that can be + // requested using the DurationSeconds parameter. If users don't specify a value + // for the DurationSeconds parameter, their security credentials are valid for + // one hour by default. This applies when you use the AssumeRole* API operations + // or the assume-role* CLI operations but does not apply when you use those + // operations to create a console URL. For more information, see Using IAM Roles + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html) in the + // IAM User Guide. + MaxSessionDuration *int64 `min:"3600" type:"integer"` + + // The name of the role that you want to modify. + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateRoleInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateRoleInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateRoleInput"} + if s.MaxSessionDuration != nil && *s.MaxSessionDuration < 3600 { + invalidParams.Add(aws.NewErrParamMinValue("MaxSessionDuration", 3600)) + } + + if s.RoleName == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateRoleResponse +type UpdateRoleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateRoleOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdateRole = "UpdateRole" + +// UpdateRoleRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Updates the description or maximum session duration setting of a role. +// +// // Example sending a request using UpdateRoleRequest. +// req := client.UpdateRoleRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateRole +func (c *Client) UpdateRoleRequest(input *UpdateRoleInput) UpdateRoleRequest { + op := &aws.Operation{ + Name: opUpdateRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRoleInput{} + } + + req := c.newRequest(op, input, &UpdateRoleOutput{}) + return UpdateRoleRequest{Request: req, Input: input, Copy: c.UpdateRoleRequest} +} + +// UpdateRoleRequest is the request type for the +// UpdateRole API operation. +type UpdateRoleRequest struct { + *aws.Request + Input *UpdateRoleInput + Copy func(*UpdateRoleInput) UpdateRoleRequest +} + +// Send marshals and sends the UpdateRole API request. +func (r UpdateRoleRequest) Send(ctx context.Context) (*UpdateRoleResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateRoleResponse{ + UpdateRoleOutput: r.Request.Data.(*UpdateRoleOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateRoleResponse is the response type for the +// UpdateRole API operation. +type UpdateRoleResponse struct { + *UpdateRoleOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateRole request. +func (r *UpdateRoleResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateRoleDescription.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateRoleDescription.go new file mode 100644 index 00000000..18a0de54 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateRoleDescription.go @@ -0,0 +1,135 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateRoleDescriptionRequest +type UpdateRoleDescriptionInput struct { + _ struct{} `type:"structure"` + + // The new description that you want to apply to the specified role. + // + // Description is a required field + Description *string `type:"string" required:"true"` + + // The name of the role that you want to modify. + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateRoleDescriptionInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateRoleDescriptionInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateRoleDescriptionInput"} + + if s.Description == nil { + invalidParams.Add(aws.NewErrParamRequired("Description")) + } + + if s.RoleName == nil { + invalidParams.Add(aws.NewErrParamRequired("RoleName")) + } + if s.RoleName != nil && len(*s.RoleName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("RoleName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateRoleDescriptionResponse +type UpdateRoleDescriptionOutput struct { + _ struct{} `type:"structure"` + + // A structure that contains details about the modified role. + Role *Role `type:"structure"` +} + +// String returns the string representation +func (s UpdateRoleDescriptionOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdateRoleDescription = "UpdateRoleDescription" + +// UpdateRoleDescriptionRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Use UpdateRole instead. +// +// Modifies only the description of a role. This operation performs the same +// function as the Description parameter in the UpdateRole operation. +// +// // Example sending a request using UpdateRoleDescriptionRequest. +// req := client.UpdateRoleDescriptionRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateRoleDescription +func (c *Client) UpdateRoleDescriptionRequest(input *UpdateRoleDescriptionInput) UpdateRoleDescriptionRequest { + op := &aws.Operation{ + Name: opUpdateRoleDescription, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateRoleDescriptionInput{} + } + + req := c.newRequest(op, input, &UpdateRoleDescriptionOutput{}) + return UpdateRoleDescriptionRequest{Request: req, Input: input, Copy: c.UpdateRoleDescriptionRequest} +} + +// UpdateRoleDescriptionRequest is the request type for the +// UpdateRoleDescription API operation. +type UpdateRoleDescriptionRequest struct { + *aws.Request + Input *UpdateRoleDescriptionInput + Copy func(*UpdateRoleDescriptionInput) UpdateRoleDescriptionRequest +} + +// Send marshals and sends the UpdateRoleDescription API request. +func (r UpdateRoleDescriptionRequest) Send(ctx context.Context) (*UpdateRoleDescriptionResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateRoleDescriptionResponse{ + UpdateRoleDescriptionOutput: r.Request.Data.(*UpdateRoleDescriptionOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateRoleDescriptionResponse is the response type for the +// UpdateRoleDescription API operation. +type UpdateRoleDescriptionResponse struct { + *UpdateRoleDescriptionOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateRoleDescription request. +func (r *UpdateRoleDescriptionResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateSAMLProvider.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateSAMLProvider.go new file mode 100644 index 00000000..8a3731f2 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateSAMLProvider.go @@ -0,0 +1,146 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateSAMLProviderRequest +type UpdateSAMLProviderInput struct { + _ struct{} `type:"structure"` + + // An XML document generated by an identity provider (IdP) that supports SAML + // 2.0. The document includes the issuer's name, expiration information, and + // keys that can be used to validate the SAML authentication response (assertions) + // that are received from the IdP. You must generate the metadata document using + // the identity management software that is used as your organization's IdP. + // + // SAMLMetadataDocument is a required field + SAMLMetadataDocument *string `min:"1000" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the SAML provider to update. + // + // For more information about ARNs, see Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // SAMLProviderArn is a required field + SAMLProviderArn *string `min:"20" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateSAMLProviderInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateSAMLProviderInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateSAMLProviderInput"} + + if s.SAMLMetadataDocument == nil { + invalidParams.Add(aws.NewErrParamRequired("SAMLMetadataDocument")) + } + if s.SAMLMetadataDocument != nil && len(*s.SAMLMetadataDocument) < 1000 { + invalidParams.Add(aws.NewErrParamMinLen("SAMLMetadataDocument", 1000)) + } + + if s.SAMLProviderArn == nil { + invalidParams.Add(aws.NewErrParamRequired("SAMLProviderArn")) + } + if s.SAMLProviderArn != nil && len(*s.SAMLProviderArn) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("SAMLProviderArn", 20)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful UpdateSAMLProvider request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateSAMLProviderResponse +type UpdateSAMLProviderOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the SAML provider that was updated. + SAMLProviderArn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s UpdateSAMLProviderOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdateSAMLProvider = "UpdateSAMLProvider" + +// UpdateSAMLProviderRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Updates the metadata document for an existing SAML provider resource object. +// +// This operation requires Signature Version 4 (https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html). +// +// // Example sending a request using UpdateSAMLProviderRequest. +// req := client.UpdateSAMLProviderRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateSAMLProvider +func (c *Client) UpdateSAMLProviderRequest(input *UpdateSAMLProviderInput) UpdateSAMLProviderRequest { + op := &aws.Operation{ + Name: opUpdateSAMLProvider, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSAMLProviderInput{} + } + + req := c.newRequest(op, input, &UpdateSAMLProviderOutput{}) + return UpdateSAMLProviderRequest{Request: req, Input: input, Copy: c.UpdateSAMLProviderRequest} +} + +// UpdateSAMLProviderRequest is the request type for the +// UpdateSAMLProvider API operation. +type UpdateSAMLProviderRequest struct { + *aws.Request + Input *UpdateSAMLProviderInput + Copy func(*UpdateSAMLProviderInput) UpdateSAMLProviderRequest +} + +// Send marshals and sends the UpdateSAMLProvider API request. +func (r UpdateSAMLProviderRequest) Send(ctx context.Context) (*UpdateSAMLProviderResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateSAMLProviderResponse{ + UpdateSAMLProviderOutput: r.Request.Data.(*UpdateSAMLProviderOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateSAMLProviderResponse is the response type for the +// UpdateSAMLProvider API operation. +type UpdateSAMLProviderResponse struct { + *UpdateSAMLProviderOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateSAMLProvider request. +func (r *UpdateSAMLProviderResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateSSHPublicKey.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateSSHPublicKey.go new file mode 100644 index 00000000..36708136 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateSSHPublicKey.go @@ -0,0 +1,163 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateSSHPublicKeyRequest +type UpdateSSHPublicKeyInput struct { + _ struct{} `type:"structure"` + + // The unique identifier for the SSH public key. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that can consist of any upper or lowercased letter + // or digit. + // + // SSHPublicKeyId is a required field + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The status to assign to the SSH public key. Active means that the key can + // be used for authentication with an AWS CodeCommit repository. Inactive means + // that the key cannot be used. + // + // Status is a required field + Status StatusType `type:"string" required:"true" enum:"true"` + + // The name of the IAM user associated with the SSH public key. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateSSHPublicKeyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateSSHPublicKeyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateSSHPublicKeyInput"} + + if s.SSHPublicKeyId == nil { + invalidParams.Add(aws.NewErrParamRequired("SSHPublicKeyId")) + } + if s.SSHPublicKeyId != nil && len(*s.SSHPublicKeyId) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("SSHPublicKeyId", 20)) + } + if len(s.Status) == 0 { + invalidParams.Add(aws.NewErrParamRequired("Status")) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateSSHPublicKeyOutput +type UpdateSSHPublicKeyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateSSHPublicKeyOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdateSSHPublicKey = "UpdateSSHPublicKey" + +// UpdateSSHPublicKeyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Sets the status of an IAM user's SSH public key to active or inactive. SSH +// public keys that are inactive cannot be used for authentication. This operation +// can be used to disable a user's SSH public key as part of a key rotation +// work flow. +// +// The SSH public key affected by this operation is used only for authenticating +// the associated IAM user to an AWS CodeCommit repository. For more information +// about using SSH keys to authenticate to an AWS CodeCommit repository, see +// Set up AWS CodeCommit for SSH Connections (https://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +// +// // Example sending a request using UpdateSSHPublicKeyRequest. +// req := client.UpdateSSHPublicKeyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateSSHPublicKey +func (c *Client) UpdateSSHPublicKeyRequest(input *UpdateSSHPublicKeyInput) UpdateSSHPublicKeyRequest { + op := &aws.Operation{ + Name: opUpdateSSHPublicKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSSHPublicKeyInput{} + } + + req := c.newRequest(op, input, &UpdateSSHPublicKeyOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return UpdateSSHPublicKeyRequest{Request: req, Input: input, Copy: c.UpdateSSHPublicKeyRequest} +} + +// UpdateSSHPublicKeyRequest is the request type for the +// UpdateSSHPublicKey API operation. +type UpdateSSHPublicKeyRequest struct { + *aws.Request + Input *UpdateSSHPublicKeyInput + Copy func(*UpdateSSHPublicKeyInput) UpdateSSHPublicKeyRequest +} + +// Send marshals and sends the UpdateSSHPublicKey API request. +func (r UpdateSSHPublicKeyRequest) Send(ctx context.Context) (*UpdateSSHPublicKeyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateSSHPublicKeyResponse{ + UpdateSSHPublicKeyOutput: r.Request.Data.(*UpdateSSHPublicKeyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateSSHPublicKeyResponse is the response type for the +// UpdateSSHPublicKey API operation. +type UpdateSSHPublicKeyResponse struct { + *UpdateSSHPublicKeyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateSSHPublicKey request. +func (r *UpdateSSHPublicKeyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateServerCertificate.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateServerCertificate.go new file mode 100644 index 00000000..70fd606a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateServerCertificate.go @@ -0,0 +1,173 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateServerCertificateRequest +type UpdateServerCertificateInput struct { + _ struct{} `type:"structure"` + + // The new path for the server certificate. Include this only if you are updating + // the server certificate's path. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + NewPath *string `min:"1" type:"string"` + + // The new name for the server certificate. Include this only if you are updating + // the server certificate's name. The name of the certificate cannot contain + // any spaces. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + NewServerCertificateName *string `min:"1" type:"string"` + + // The name of the server certificate that you want to update. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // ServerCertificateName is a required field + ServerCertificateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateServerCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateServerCertificateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateServerCertificateInput"} + if s.NewPath != nil && len(*s.NewPath) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("NewPath", 1)) + } + if s.NewServerCertificateName != nil && len(*s.NewServerCertificateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("NewServerCertificateName", 1)) + } + + if s.ServerCertificateName == nil { + invalidParams.Add(aws.NewErrParamRequired("ServerCertificateName")) + } + if s.ServerCertificateName != nil && len(*s.ServerCertificateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ServerCertificateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateServerCertificateOutput +type UpdateServerCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateServerCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdateServerCertificate = "UpdateServerCertificate" + +// UpdateServerCertificateRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Updates the name and/or the path of the specified server certificate stored +// in IAM. +// +// For more information about working with server certificates, see Working +// with Server Certificates (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. This topic also includes a list of AWS services that +// can use the server certificates that you manage with IAM. +// +// You should understand the implications of changing a server certificate's +// path or name. For more information, see Renaming a Server Certificate (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs_manage.html#RenamingServerCerts) +// in the IAM User Guide. +// +// The person making the request (the principal), must have permission to change +// the server certificate with the old name and the new name. For example, to +// change the certificate named ProductionCert to ProdCert, the principal must +// have a policy that allows them to update both certificates. If the principal +// has permission to update the ProductionCert group, but not the ProdCert certificate, +// then the update fails. For more information about permissions, see Access +// Management (https://docs.aws.amazon.com/IAM/latest/UserGuide/access.html) +// in the IAM User Guide. +// +// // Example sending a request using UpdateServerCertificateRequest. +// req := client.UpdateServerCertificateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateServerCertificate +func (c *Client) UpdateServerCertificateRequest(input *UpdateServerCertificateInput) UpdateServerCertificateRequest { + op := &aws.Operation{ + Name: opUpdateServerCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateServerCertificateInput{} + } + + req := c.newRequest(op, input, &UpdateServerCertificateOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return UpdateServerCertificateRequest{Request: req, Input: input, Copy: c.UpdateServerCertificateRequest} +} + +// UpdateServerCertificateRequest is the request type for the +// UpdateServerCertificate API operation. +type UpdateServerCertificateRequest struct { + *aws.Request + Input *UpdateServerCertificateInput + Copy func(*UpdateServerCertificateInput) UpdateServerCertificateRequest +} + +// Send marshals and sends the UpdateServerCertificate API request. +func (r UpdateServerCertificateRequest) Send(ctx context.Context) (*UpdateServerCertificateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateServerCertificateResponse{ + UpdateServerCertificateOutput: r.Request.Data.(*UpdateServerCertificateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateServerCertificateResponse is the response type for the +// UpdateServerCertificate API operation. +type UpdateServerCertificateResponse struct { + *UpdateServerCertificateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateServerCertificate request. +func (r *UpdateServerCertificateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateServiceSpecificCredential.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateServiceSpecificCredential.go new file mode 100644 index 00000000..4e1a845c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateServiceSpecificCredential.go @@ -0,0 +1,151 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateServiceSpecificCredentialRequest +type UpdateServiceSpecificCredentialInput struct { + _ struct{} `type:"structure"` + + // The unique identifier of the service-specific credential. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that can consist of any upper or lowercased letter + // or digit. + // + // ServiceSpecificCredentialId is a required field + ServiceSpecificCredentialId *string `min:"20" type:"string" required:"true"` + + // The status to be assigned to the service-specific credential. + // + // Status is a required field + Status StatusType `type:"string" required:"true" enum:"true"` + + // The name of the IAM user associated with the service-specific credential. + // If you do not specify this value, then the operation assumes the user whose + // credentials are used to call the operation. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateServiceSpecificCredentialInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateServiceSpecificCredentialInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateServiceSpecificCredentialInput"} + + if s.ServiceSpecificCredentialId == nil { + invalidParams.Add(aws.NewErrParamRequired("ServiceSpecificCredentialId")) + } + if s.ServiceSpecificCredentialId != nil && len(*s.ServiceSpecificCredentialId) < 20 { + invalidParams.Add(aws.NewErrParamMinLen("ServiceSpecificCredentialId", 20)) + } + if len(s.Status) == 0 { + invalidParams.Add(aws.NewErrParamRequired("Status")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateServiceSpecificCredentialOutput +type UpdateServiceSpecificCredentialOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateServiceSpecificCredentialOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdateServiceSpecificCredential = "UpdateServiceSpecificCredential" + +// UpdateServiceSpecificCredentialRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Sets the status of a service-specific credential to Active or Inactive. Service-specific +// credentials that are inactive cannot be used for authentication to the service. +// This operation can be used to disable a user's service-specific credential +// as part of a credential rotation work flow. +// +// // Example sending a request using UpdateServiceSpecificCredentialRequest. +// req := client.UpdateServiceSpecificCredentialRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateServiceSpecificCredential +func (c *Client) UpdateServiceSpecificCredentialRequest(input *UpdateServiceSpecificCredentialInput) UpdateServiceSpecificCredentialRequest { + op := &aws.Operation{ + Name: opUpdateServiceSpecificCredential, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateServiceSpecificCredentialInput{} + } + + req := c.newRequest(op, input, &UpdateServiceSpecificCredentialOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return UpdateServiceSpecificCredentialRequest{Request: req, Input: input, Copy: c.UpdateServiceSpecificCredentialRequest} +} + +// UpdateServiceSpecificCredentialRequest is the request type for the +// UpdateServiceSpecificCredential API operation. +type UpdateServiceSpecificCredentialRequest struct { + *aws.Request + Input *UpdateServiceSpecificCredentialInput + Copy func(*UpdateServiceSpecificCredentialInput) UpdateServiceSpecificCredentialRequest +} + +// Send marshals and sends the UpdateServiceSpecificCredential API request. +func (r UpdateServiceSpecificCredentialRequest) Send(ctx context.Context) (*UpdateServiceSpecificCredentialResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateServiceSpecificCredentialResponse{ + UpdateServiceSpecificCredentialOutput: r.Request.Data.(*UpdateServiceSpecificCredentialOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateServiceSpecificCredentialResponse is the response type for the +// UpdateServiceSpecificCredential API operation. +type UpdateServiceSpecificCredentialResponse struct { + *UpdateServiceSpecificCredentialOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateServiceSpecificCredential request. +func (r *UpdateServiceSpecificCredentialResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateSigningCertificate.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateSigningCertificate.go new file mode 100644 index 00000000..b786a1b7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateSigningCertificate.go @@ -0,0 +1,156 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateSigningCertificateRequest +type UpdateSigningCertificateInput struct { + _ struct{} `type:"structure"` + + // The ID of the signing certificate you want to update. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters that can consist of any upper or lowercased letter + // or digit. + // + // CertificateId is a required field + CertificateId *string `min:"24" type:"string" required:"true"` + + // The status you want to assign to the certificate. Active means that the certificate + // can be used for API calls to AWS Inactive means that the certificate cannot + // be used. + // + // Status is a required field + Status StatusType `type:"string" required:"true" enum:"true"` + + // The name of the IAM user the signing certificate belongs to. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UpdateSigningCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateSigningCertificateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateSigningCertificateInput"} + + if s.CertificateId == nil { + invalidParams.Add(aws.NewErrParamRequired("CertificateId")) + } + if s.CertificateId != nil && len(*s.CertificateId) < 24 { + invalidParams.Add(aws.NewErrParamMinLen("CertificateId", 24)) + } + if len(s.Status) == 0 { + invalidParams.Add(aws.NewErrParamRequired("Status")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateSigningCertificateOutput +type UpdateSigningCertificateOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateSigningCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdateSigningCertificate = "UpdateSigningCertificate" + +// UpdateSigningCertificateRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Changes the status of the specified user signing certificate from active +// to disabled, or vice versa. This operation can be used to disable an IAM +// user's signing certificate as part of a certificate rotation work flow. +// +// If the UserName field is not specified, the user name is determined implicitly +// based on the AWS access key ID used to sign the request. This operation works +// for access keys under the AWS account. Consequently, you can use this operation +// to manage AWS account root user credentials even if the AWS account has no +// associated users. +// +// // Example sending a request using UpdateSigningCertificateRequest. +// req := client.UpdateSigningCertificateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateSigningCertificate +func (c *Client) UpdateSigningCertificateRequest(input *UpdateSigningCertificateInput) UpdateSigningCertificateRequest { + op := &aws.Operation{ + Name: opUpdateSigningCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateSigningCertificateInput{} + } + + req := c.newRequest(op, input, &UpdateSigningCertificateOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return UpdateSigningCertificateRequest{Request: req, Input: input, Copy: c.UpdateSigningCertificateRequest} +} + +// UpdateSigningCertificateRequest is the request type for the +// UpdateSigningCertificate API operation. +type UpdateSigningCertificateRequest struct { + *aws.Request + Input *UpdateSigningCertificateInput + Copy func(*UpdateSigningCertificateInput) UpdateSigningCertificateRequest +} + +// Send marshals and sends the UpdateSigningCertificate API request. +func (r UpdateSigningCertificateRequest) Send(ctx context.Context) (*UpdateSigningCertificateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateSigningCertificateResponse{ + UpdateSigningCertificateOutput: r.Request.Data.(*UpdateSigningCertificateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateSigningCertificateResponse is the response type for the +// UpdateSigningCertificate API operation. +type UpdateSigningCertificateResponse struct { + *UpdateSigningCertificateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateSigningCertificate request. +func (r *UpdateSigningCertificateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateUser.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateUser.go new file mode 100644 index 00000000..a90137ed --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UpdateUser.go @@ -0,0 +1,165 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" + "github.com/aws/aws-sdk-go-v2/private/protocol" + "github.com/aws/aws-sdk-go-v2/private/protocol/query" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateUserRequest +type UpdateUserInput struct { + _ struct{} `type:"structure"` + + // New path for the IAM user. Include this parameter only if you're changing + // the user's path. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + NewPath *string `min:"1" type:"string"` + + // New name for the user. Include this parameter only if you're changing the + // user's name. + // + // IAM user, group, role, and policy names must be unique within the account. + // Names are not distinguished by case. For example, you cannot create resources + // named both "MyResource" and "myresource". + NewUserName *string `min:"1" type:"string"` + + // Name of the user to update. If you're changing the name of the user, this + // is the original user name. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UpdateUserInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UpdateUserInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UpdateUserInput"} + if s.NewPath != nil && len(*s.NewPath) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("NewPath", 1)) + } + if s.NewUserName != nil && len(*s.NewUserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("NewUserName", 1)) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateUserOutput +type UpdateUserOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s UpdateUserOutput) String() string { + return awsutil.Prettify(s) +} + +const opUpdateUser = "UpdateUser" + +// UpdateUserRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Updates the name and/or the path of the specified IAM user. +// +// You should understand the implications of changing an IAM user's path or +// name. For more information, see Renaming an IAM User (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_manage.html#id_users_renaming) +// and Renaming an IAM Group (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_groups_manage_rename.html) +// in the IAM User Guide. +// +// To change a user name, the requester must have appropriate permissions on +// both the source object and the target object. For example, to change Bob +// to Robert, the entity making the request must have permission on Bob and +// Robert, or must have permission on all (*). For more information about permissions, +// see Permissions and Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/PermissionsAndPolicies.html). +// +// // Example sending a request using UpdateUserRequest. +// req := client.UpdateUserRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UpdateUser +func (c *Client) UpdateUserRequest(input *UpdateUserInput) UpdateUserRequest { + op := &aws.Operation{ + Name: opUpdateUser, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateUserInput{} + } + + req := c.newRequest(op, input, &UpdateUserOutput{}) + req.Handlers.Unmarshal.Remove(query.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return UpdateUserRequest{Request: req, Input: input, Copy: c.UpdateUserRequest} +} + +// UpdateUserRequest is the request type for the +// UpdateUser API operation. +type UpdateUserRequest struct { + *aws.Request + Input *UpdateUserInput + Copy func(*UpdateUserInput) UpdateUserRequest +} + +// Send marshals and sends the UpdateUser API request. +func (r UpdateUserRequest) Send(ctx context.Context) (*UpdateUserResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UpdateUserResponse{ + UpdateUserOutput: r.Request.Data.(*UpdateUserOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UpdateUserResponse is the response type for the +// UpdateUser API operation. +type UpdateUserResponse struct { + *UpdateUserOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UpdateUser request. +func (r *UpdateUserResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UploadSSHPublicKey.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UploadSSHPublicKey.go new file mode 100644 index 00000000..e71ce84a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UploadSSHPublicKey.go @@ -0,0 +1,161 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UploadSSHPublicKeyRequest +type UploadSSHPublicKeyInput struct { + _ struct{} `type:"structure"` + + // The SSH public key. The public key must be encoded in ssh-rsa format or PEM + // format. The minimum bit-length of the public key is 2048 bits. For example, + // you can generate a 2048-bit key, and the resulting PEM file is 1679 bytes + // long. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // SSHPublicKeyBody is a required field + SSHPublicKeyBody *string `min:"1" type:"string" required:"true"` + + // The name of the IAM user to associate the SSH public key with. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadSSHPublicKeyInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadSSHPublicKeyInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UploadSSHPublicKeyInput"} + + if s.SSHPublicKeyBody == nil { + invalidParams.Add(aws.NewErrParamRequired("SSHPublicKeyBody")) + } + if s.SSHPublicKeyBody != nil && len(*s.SSHPublicKeyBody) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("SSHPublicKeyBody", 1)) + } + + if s.UserName == nil { + invalidParams.Add(aws.NewErrParamRequired("UserName")) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful UploadSSHPublicKey request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UploadSSHPublicKeyResponse +type UploadSSHPublicKeyOutput struct { + _ struct{} `type:"structure"` + + // Contains information about the SSH public key. + SSHPublicKey *SSHPublicKey `type:"structure"` +} + +// String returns the string representation +func (s UploadSSHPublicKeyOutput) String() string { + return awsutil.Prettify(s) +} + +const opUploadSSHPublicKey = "UploadSSHPublicKey" + +// UploadSSHPublicKeyRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Uploads an SSH public key and associates it with the specified IAM user. +// +// The SSH public key uploaded by this operation can be used only for authenticating +// the associated IAM user to an AWS CodeCommit repository. For more information +// about using SSH keys to authenticate to an AWS CodeCommit repository, see +// Set up AWS CodeCommit for SSH Connections (https://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-credentials-ssh.html) +// in the AWS CodeCommit User Guide. +// +// // Example sending a request using UploadSSHPublicKeyRequest. +// req := client.UploadSSHPublicKeyRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UploadSSHPublicKey +func (c *Client) UploadSSHPublicKeyRequest(input *UploadSSHPublicKeyInput) UploadSSHPublicKeyRequest { + op := &aws.Operation{ + Name: opUploadSSHPublicKey, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UploadSSHPublicKeyInput{} + } + + req := c.newRequest(op, input, &UploadSSHPublicKeyOutput{}) + return UploadSSHPublicKeyRequest{Request: req, Input: input, Copy: c.UploadSSHPublicKeyRequest} +} + +// UploadSSHPublicKeyRequest is the request type for the +// UploadSSHPublicKey API operation. +type UploadSSHPublicKeyRequest struct { + *aws.Request + Input *UploadSSHPublicKeyInput + Copy func(*UploadSSHPublicKeyInput) UploadSSHPublicKeyRequest +} + +// Send marshals and sends the UploadSSHPublicKey API request. +func (r UploadSSHPublicKeyRequest) Send(ctx context.Context) (*UploadSSHPublicKeyResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UploadSSHPublicKeyResponse{ + UploadSSHPublicKeyOutput: r.Request.Data.(*UploadSSHPublicKeyOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UploadSSHPublicKeyResponse is the response type for the +// UploadSSHPublicKey API operation. +type UploadSSHPublicKeyResponse struct { + *UploadSSHPublicKeyOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UploadSSHPublicKey request. +func (r *UploadSSHPublicKeyResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UploadServerCertificate.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UploadServerCertificate.go new file mode 100644 index 00000000..af7916d6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UploadServerCertificate.go @@ -0,0 +1,243 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UploadServerCertificateRequest +type UploadServerCertificateInput struct { + _ struct{} `type:"structure"` + + // The contents of the public key certificate in PEM-encoded format. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // CertificateBody is a required field + CertificateBody *string `min:"1" type:"string" required:"true"` + + // The contents of the certificate chain. This is typically a concatenation + // of the PEM-encoded public key certificates of the chain. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + CertificateChain *string `min:"1" type:"string"` + + // The path for the server certificate. For more information about paths, see + // IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide. + // + // This parameter is optional. If it is not included, it defaults to a slash + // (/). This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of either a forward slash (/) by itself + // or a string that must begin and end with forward slashes. In addition, it + // can contain any ASCII character from the ! (\u0021) through the DEL character + // (\u007F), including most punctuation characters, digits, and upper and lowercased + // letters. + // + // If you are uploading a server certificate specifically for use with Amazon + // CloudFront distributions, you must specify a path using the path parameter. + // The path must begin with /cloudfront and must include a trailing slash (for + // example, /cloudfront/test/). + Path *string `min:"1" type:"string"` + + // The contents of the private key in PEM-encoded format. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // PrivateKey is a required field + PrivateKey *string `min:"1" type:"string" required:"true"` + + // The name for the server certificate. Do not include the path in this value. + // The name of the certificate cannot contain any spaces. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + // + // ServerCertificateName is a required field + ServerCertificateName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadServerCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadServerCertificateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UploadServerCertificateInput"} + + if s.CertificateBody == nil { + invalidParams.Add(aws.NewErrParamRequired("CertificateBody")) + } + if s.CertificateBody != nil && len(*s.CertificateBody) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("CertificateBody", 1)) + } + if s.CertificateChain != nil && len(*s.CertificateChain) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("CertificateChain", 1)) + } + if s.Path != nil && len(*s.Path) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Path", 1)) + } + + if s.PrivateKey == nil { + invalidParams.Add(aws.NewErrParamRequired("PrivateKey")) + } + if s.PrivateKey != nil && len(*s.PrivateKey) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("PrivateKey", 1)) + } + + if s.ServerCertificateName == nil { + invalidParams.Add(aws.NewErrParamRequired("ServerCertificateName")) + } + if s.ServerCertificateName != nil && len(*s.ServerCertificateName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("ServerCertificateName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful UploadServerCertificate request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UploadServerCertificateResponse +type UploadServerCertificateOutput struct { + _ struct{} `type:"structure"` + + // The meta information of the uploaded server certificate without its certificate + // body, certificate chain, and private key. + ServerCertificateMetadata *ServerCertificateMetadata `type:"structure"` +} + +// String returns the string representation +func (s UploadServerCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +const opUploadServerCertificate = "UploadServerCertificate" + +// UploadServerCertificateRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Uploads a server certificate entity for the AWS account. The server certificate +// entity includes a public key certificate, a private key, and an optional +// certificate chain, which should all be PEM-encoded. +// +// We recommend that you use AWS Certificate Manager (https://docs.aws.amazon.com/acm/) +// to provision, manage, and deploy your server certificates. With ACM you can +// request a certificate, deploy it to AWS resources, and let ACM handle certificate +// renewals for you. Certificates provided by ACM are free. For more information +// about using ACM, see the AWS Certificate Manager User Guide (https://docs.aws.amazon.com/acm/latest/userguide/). +// +// For more information about working with server certificates, see Working +// with Server Certificates (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html) +// in the IAM User Guide. This topic includes a list of AWS services that can +// use the server certificates that you manage with IAM. +// +// For information about the number of server certificates you can upload, see +// Limitations on IAM Entities and Objects (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html) +// in the IAM User Guide. +// +// Because the body of the public key certificate, private key, and the certificate +// chain can be large, you should use POST rather than GET when calling UploadServerCertificate. +// For information about setting up signatures and authorization through the +// API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about using the Query +// API with IAM, go to Calling the API by Making HTTP Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/programming.html) +// in the IAM User Guide. +// +// // Example sending a request using UploadServerCertificateRequest. +// req := client.UploadServerCertificateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UploadServerCertificate +func (c *Client) UploadServerCertificateRequest(input *UploadServerCertificateInput) UploadServerCertificateRequest { + op := &aws.Operation{ + Name: opUploadServerCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UploadServerCertificateInput{} + } + + req := c.newRequest(op, input, &UploadServerCertificateOutput{}) + return UploadServerCertificateRequest{Request: req, Input: input, Copy: c.UploadServerCertificateRequest} +} + +// UploadServerCertificateRequest is the request type for the +// UploadServerCertificate API operation. +type UploadServerCertificateRequest struct { + *aws.Request + Input *UploadServerCertificateInput + Copy func(*UploadServerCertificateInput) UploadServerCertificateRequest +} + +// Send marshals and sends the UploadServerCertificate API request. +func (r UploadServerCertificateRequest) Send(ctx context.Context) (*UploadServerCertificateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UploadServerCertificateResponse{ + UploadServerCertificateOutput: r.Request.Data.(*UploadServerCertificateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UploadServerCertificateResponse is the response type for the +// UploadServerCertificate API operation. +type UploadServerCertificateResponse struct { + *UploadServerCertificateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UploadServerCertificate request. +func (r *UploadServerCertificateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UploadSigningCertificate.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UploadSigningCertificate.go new file mode 100644 index 00000000..9db43af4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_op_UploadSigningCertificate.go @@ -0,0 +1,165 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UploadSigningCertificateRequest +type UploadSigningCertificateInput struct { + _ struct{} `type:"structure"` + + // The contents of the signing certificate. + // + // The regex pattern (http://wikipedia.org/wiki/regex) used to validate this + // parameter is a string of characters consisting of the following: + // + // * Any printable ASCII character ranging from the space character (\u0020) + // through the end of the ASCII character range + // + // * The printable characters in the Basic Latin and Latin-1 Supplement character + // set (through \u00FF) + // + // * The special characters tab (\u0009), line feed (\u000A), and carriage + // return (\u000D) + // + // CertificateBody is a required field + CertificateBody *string `min:"1" type:"string" required:"true"` + + // The name of the user the signing certificate is for. + // + // This parameter allows (through its regex pattern (http://wikipedia.org/wiki/regex)) + // a string of characters consisting of upper and lowercase alphanumeric characters + // with no spaces. You can also include any of the following characters: _+=,.@- + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s UploadSigningCertificateInput) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadSigningCertificateInput) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "UploadSigningCertificateInput"} + + if s.CertificateBody == nil { + invalidParams.Add(aws.NewErrParamRequired("CertificateBody")) + } + if s.CertificateBody != nil && len(*s.CertificateBody) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("CertificateBody", 1)) + } + if s.UserName != nil && len(*s.UserName) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("UserName", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains the response to a successful UploadSigningCertificate request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UploadSigningCertificateResponse +type UploadSigningCertificateOutput struct { + _ struct{} `type:"structure"` + + // Information about the certificate. + // + // Certificate is a required field + Certificate *SigningCertificate `type:"structure" required:"true"` +} + +// String returns the string representation +func (s UploadSigningCertificateOutput) String() string { + return awsutil.Prettify(s) +} + +const opUploadSigningCertificate = "UploadSigningCertificate" + +// UploadSigningCertificateRequest returns a request value for making API operation for +// AWS Identity and Access Management. +// +// Uploads an X.509 signing certificate and associates it with the specified +// IAM user. Some AWS services use X.509 signing certificates to validate requests +// that are signed with a corresponding private key. When you upload the certificate, +// its default status is Active. +// +// If the UserName is not specified, the IAM user name is determined implicitly +// based on the AWS access key ID used to sign the request. This operation works +// for access keys under the AWS account. Consequently, you can use this operation +// to manage AWS account root user credentials even if the AWS account has no +// associated users. +// +// Because the body of an X.509 certificate can be large, you should use POST +// rather than GET when calling UploadSigningCertificate. For information about +// setting up signatures and authorization through the API, go to Signing AWS +// API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about using the Query +// API with IAM, go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in the IAM User Guide. +// +// // Example sending a request using UploadSigningCertificateRequest. +// req := client.UploadSigningCertificateRequest(params) +// resp, err := req.Send(context.TODO()) +// if err == nil { +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UploadSigningCertificate +func (c *Client) UploadSigningCertificateRequest(input *UploadSigningCertificateInput) UploadSigningCertificateRequest { + op := &aws.Operation{ + Name: opUploadSigningCertificate, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UploadSigningCertificateInput{} + } + + req := c.newRequest(op, input, &UploadSigningCertificateOutput{}) + return UploadSigningCertificateRequest{Request: req, Input: input, Copy: c.UploadSigningCertificateRequest} +} + +// UploadSigningCertificateRequest is the request type for the +// UploadSigningCertificate API operation. +type UploadSigningCertificateRequest struct { + *aws.Request + Input *UploadSigningCertificateInput + Copy func(*UploadSigningCertificateInput) UploadSigningCertificateRequest +} + +// Send marshals and sends the UploadSigningCertificate API request. +func (r UploadSigningCertificateRequest) Send(ctx context.Context) (*UploadSigningCertificateResponse, error) { + r.Request.SetContext(ctx) + err := r.Request.Send() + if err != nil { + return nil, err + } + + resp := &UploadSigningCertificateResponse{ + UploadSigningCertificateOutput: r.Request.Data.(*UploadSigningCertificateOutput), + response: &aws.Response{Request: r.Request}, + } + + return resp, nil +} + +// UploadSigningCertificateResponse is the response type for the +// UploadSigningCertificate API operation. +type UploadSigningCertificateResponse struct { + *UploadSigningCertificateOutput + + response *aws.Response +} + +// SDKResponseMetdata returns the response metadata for the +// UploadSigningCertificate request. +func (r *UploadSigningCertificateResponse) SDKResponseMetdata() *aws.Response { + return r.response +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_types.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_types.go new file mode 100644 index 00000000..97e8434c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_types.go @@ -0,0 +1,1939 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/internal/awsutil" +) + +var _ aws.Config +var _ = awsutil.Prettify + +// Contains information about an AWS access key. +// +// This data type is used as a response element in the CreateAccessKey and ListAccessKeys +// operations. +// +// The SecretAccessKey value is returned only in response to CreateAccessKey. +// You can get a secret access key only when you first create an access key; +// you cannot recover the secret access key later. If you lose a secret access +// key, you must create a new access key. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AccessKey +type AccessKey struct { + _ struct{} `type:"structure"` + + // The ID for this access key. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date when the access key was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The secret key used to sign requests. + // + // SecretAccessKey is a required field + SecretAccessKey *string `type:"string" required:"true"` + + // The status of the access key. Active means that the key is valid for API + // calls, while Inactive means it is not. + // + // Status is a required field + Status StatusType `type:"string" required:"true" enum:"true"` + + // The name of the IAM user that the access key is associated with. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s AccessKey) String() string { + return awsutil.Prettify(s) +} + +// Contains information about the last time an AWS access key was used since +// IAM began tracking this information on April 22, 2015. +// +// This data type is used as a response element in the GetAccessKeyLastUsed +// operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AccessKeyLastUsed +type AccessKeyLastUsed struct { + _ struct{} `type:"structure"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the access key was most recently used. This field is null in the following + // situations: + // + // * The user does not have an access key. + // + // * An access key exists but has not been used since IAM began tracking + // this information. + // + // * There is no sign-in data associated with the user + // + // LastUsedDate is a required field + LastUsedDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The AWS region where this access key was most recently used. The value for + // this field is "N/A" in the following situations: + // + // * The user does not have an access key. + // + // * An access key exists but has not been used since IAM began tracking + // this information. + // + // * There is no sign-in data associated with the user + // + // For more information about AWS regions, see Regions and Endpoints (https://docs.aws.amazon.com/general/latest/gr/rande.html) + // in the Amazon Web Services General Reference. + // + // Region is a required field + Region *string `type:"string" required:"true"` + + // The name of the AWS service with which this access key was most recently + // used. The value of this field is "N/A" in the following situations: + // + // * The user does not have an access key. + // + // * An access key exists but has not been used since IAM started tracking + // this information. + // + // * There is no sign-in data associated with the user + // + // ServiceName is a required field + ServiceName *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s AccessKeyLastUsed) String() string { + return awsutil.Prettify(s) +} + +// Contains information about an AWS access key, without its secret key. +// +// This data type is used as a response element in the ListAccessKeys operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AccessKeyMetadata +type AccessKeyMetadata struct { + _ struct{} `type:"structure"` + + // The ID for this access key. + AccessKeyId *string `min:"16" type:"string"` + + // The date when the access key was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The status of the access key. Active means that the key is valid for API + // calls; Inactive means it is not. + Status StatusType `type:"string" enum:"true"` + + // The name of the IAM user that the key is associated with. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AccessKeyMetadata) String() string { + return awsutil.Prettify(s) +} + +// Contains information about an attached permissions boundary. +// +// An attached permissions boundary is a managed policy that has been attached +// to a user or role to set the permissions boundary. +// +// For more information about permissions boundaries, see Permissions Boundaries +// for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) +// in the IAM User Guide. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AttachedPermissionsBoundary +type AttachedPermissionsBoundary struct { + _ struct{} `type:"structure"` + + // The ARN of the policy used to set the permissions boundary for the user or + // role. + PermissionsBoundaryArn *string `min:"20" type:"string"` + + // The permissions boundary usage type that indicates what type of IAM resource + // is used as the permissions boundary for an entity. This data type can only + // have a value of Policy. + PermissionsBoundaryType PermissionsBoundaryAttachmentType `type:"string" enum:"true"` +} + +// String returns the string representation +func (s AttachedPermissionsBoundary) String() string { + return awsutil.Prettify(s) +} + +// Contains information about an attached policy. +// +// An attached policy is a managed policy that has been attached to a user, +// group, or role. This data type is used as a response element in the ListAttachedGroupPolicies, +// ListAttachedRolePolicies, ListAttachedUserPolicies, and GetAccountAuthorizationDetails +// operations. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/AttachedPolicy +type AttachedPolicy struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string"` + + // The friendly name of the attached policy. + PolicyName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s AttachedPolicy) String() string { + return awsutil.Prettify(s) +} + +// Contains information about a condition context key. It includes the name +// of the key and specifies the value (or values, if the context key supports +// multiple values) to use in the simulation. This information is used when +// evaluating the Condition elements of the input policies. +// +// This data type is used as an input parameter to SimulateCustomPolicy and +// SimulateCustomPolicy . +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ContextEntry +type ContextEntry struct { + _ struct{} `type:"structure"` + + // The full name of a condition context key, including the service prefix. For + // example, aws:SourceIp or s3:VersionId. + ContextKeyName *string `min:"5" type:"string"` + + // The data type of the value (or values) specified in the ContextKeyValues + // parameter. + ContextKeyType ContextKeyTypeEnum `type:"string" enum:"true"` + + // The value (or values, if the condition context key supports multiple values) + // to provide to the simulation when the key is referenced by a Condition element + // in an input policy. + ContextKeyValues []string `type:"list"` +} + +// String returns the string representation +func (s ContextEntry) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ContextEntry) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "ContextEntry"} + if s.ContextKeyName != nil && len(*s.ContextKeyName) < 5 { + invalidParams.Add(aws.NewErrParamMinLen("ContextKeyName", 5)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// The reason that the service-linked role deletion failed. +// +// This data type is used as a response element in the GetServiceLinkedRoleDeletionStatus +// operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/DeletionTaskFailureReasonType +type DeletionTaskFailureReasonType struct { + _ struct{} `type:"structure"` + + // A short description of the reason that the service-linked role deletion failed. + Reason *string `type:"string"` + + // A list of objects that contains details about the service-linked role deletion + // failure, if that information is returned by the service. If the service-linked + // role has active sessions or if any resources that were used by the role have + // not been deleted from the linked service, the role can't be deleted. This + // parameter includes a list of the resources that are associated with the role + // and the region in which the resources are being used. + RoleUsageList []RoleUsageType `type:"list"` +} + +// String returns the string representation +func (s DeletionTaskFailureReasonType) String() string { + return awsutil.Prettify(s) +} + +// An object that contains details about when the IAM entities (users or roles) +// were last used in an attempt to access the specified AWS service. +// +// This data type is a response element in the GetServiceLastAccessedDetailsWithEntities +// operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/EntityDetails +type EntityDetails struct { + _ struct{} `type:"structure"` + + // The EntityInfo object that contains details about the entity (user or role). + // + // EntityInfo is a required field + EntityInfo *EntityInfo `type:"structure" required:"true"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the authenticated entity last attempted to access AWS. AWS does not + // report unauthenticated requests. + // + // This field is null if no IAM entities attempted to access the service within + // the reporting period (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#service-last-accessed-reporting-period). + LastAuthenticated *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s EntityDetails) String() string { + return awsutil.Prettify(s) +} + +// Contains details about the specified entity (user or role). +// +// This data type is an element of the EntityDetails object. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/EntityInfo +type EntityInfo struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The identifier of the entity (user or role). + // + // Id is a required field + Id *string `min:"16" type:"string" required:"true"` + + // The name of the entity (user or role). + // + // Name is a required field + Name *string `min:"1" type:"string" required:"true"` + + // The path to the entity (user or role). For more information about paths, + // see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string"` + + // The type of entity (user or role). + // + // Type is a required field + Type PolicyOwnerEntityType `type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s EntityInfo) String() string { + return awsutil.Prettify(s) +} + +// Contains information about the reason that the operation failed. +// +// This data type is used as a response element in the GetServiceLastAccessedDetails +// operation and the GetServiceLastAccessedDetailsWithEntities operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ErrorDetails +type ErrorDetails struct { + _ struct{} `type:"structure"` + + // The error code associated with the operation failure. + // + // Code is a required field + Code *string `type:"string" required:"true"` + + // Detailed information about the reason that the operation failed. + // + // Message is a required field + Message *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s ErrorDetails) String() string { + return awsutil.Prettify(s) +} + +// Contains the results of a simulation. +// +// This data type is used by the return parameter of SimulateCustomPolicy and +// SimulatePrincipalPolicy . +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/EvaluationResult +type EvaluationResult struct { + _ struct{} `type:"structure"` + + // The name of the API operation tested on the indicated resource. + // + // EvalActionName is a required field + EvalActionName *string `min:"3" type:"string" required:"true"` + + // The result of the simulation. + // + // EvalDecision is a required field + EvalDecision PolicyEvaluationDecisionType `type:"string" required:"true" enum:"true"` + + // Additional details about the results of the evaluation decision. When there + // are both IAM policies and resource policies, this parameter explains how + // each set of policies contributes to the final evaluation decision. When simulating + // cross-account access to a resource, both the resource-based policy and the + // caller's IAM policy must grant access. See How IAM Roles Differ from Resource-based + // Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_compare-resource-policies.html) + EvalDecisionDetails map[string]PolicyEvaluationDecisionType `type:"map"` + + // The ARN of the resource that the indicated API operation was tested on. + EvalResourceName *string `min:"1" type:"string"` + + // A list of the statements in the input policies that determine the result + // for this scenario. Remember that even if multiple statements allow the operation + // on the resource, if only one statement denies that operation, then the explicit + // deny overrides any allow. Inaddition, the deny statement is the only entry + // included in the result. + MatchedStatements []Statement `type:"list"` + + // A list of context keys that are required by the included input policies but + // that were not provided by one of the input parameters. This list is used + // when the resource in a simulation is "*", either explicitly, or when the + // ResourceArns parameter blank. If you include a list of resources, then any + // missing context values are instead included under the ResourceSpecificResults + // section. To discover the context keys used by a set of policies, you can + // call GetContextKeysForCustomPolicy or GetContextKeysForPrincipalPolicy. + MissingContextValues []string `type:"list"` + + // A structure that details how AWS Organizations and its service control policies + // affect the results of the simulation. Only applies if the simulated user's + // account is part of an organization. + OrganizationsDecisionDetail *OrganizationsDecisionDetail `type:"structure"` + + // The individual results of the simulation of the API operation specified in + // EvalActionName on each resource. + ResourceSpecificResults []ResourceSpecificResult `type:"list"` +} + +// String returns the string representation +func (s EvaluationResult) String() string { + return awsutil.Prettify(s) +} + +// Contains information about an IAM group entity. +// +// This data type is used as a response element in the following operations: +// +// * CreateGroup +// +// * GetGroup +// +// * ListGroups +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/Group +type Group struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) specifying the group. For more information + // about ARNs and how to use them in policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the group was created. + // + // CreateDate is a required field + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The stable and unique string identifying the group. For more information + // about IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // GroupId is a required field + GroupId *string `min:"16" type:"string" required:"true"` + + // The friendly name that identifies the group. + // + // GroupName is a required field + GroupName *string `min:"1" type:"string" required:"true"` + + // The path to the group. For more information about paths, see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // Path is a required field + Path *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s Group) String() string { + return awsutil.Prettify(s) +} + +// Contains information about an IAM group, including all of the group's policies. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/GroupDetail +type GroupDetail struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // A list of the managed policies attached to the group. + AttachedManagedPolicies []AttachedPolicy `type:"list"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the group was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The stable and unique string identifying the group. For more information + // about IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + GroupId *string `min:"16" type:"string"` + + // The friendly name that identifies the group. + GroupName *string `min:"1" type:"string"` + + // A list of the inline policies embedded in the group. + GroupPolicyList []PolicyDetail `type:"list"` + + // The path to the group. For more information about paths, see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GroupDetail) String() string { + return awsutil.Prettify(s) +} + +// Contains information about an instance profile. +// +// This data type is used as a response element in the following operations: +// +// * CreateInstanceProfile +// +// * GetInstanceProfile +// +// * ListInstanceProfiles +// +// * ListInstanceProfilesForRole +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/InstanceProfile +type InstanceProfile struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) specifying the instance profile. For more + // information about ARNs and how to use them in policies, see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The date when the instance profile was created. + // + // CreateDate is a required field + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The stable and unique string identifying the instance profile. For more information + // about IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // InstanceProfileId is a required field + InstanceProfileId *string `min:"16" type:"string" required:"true"` + + // The name identifying the instance profile. + // + // InstanceProfileName is a required field + InstanceProfileName *string `min:"1" type:"string" required:"true"` + + // The path to the instance profile. For more information about paths, see IAM + // Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // Path is a required field + Path *string `min:"1" type:"string" required:"true"` + + // The role associated with the instance profile. + // + // Roles is a required field + Roles []Role `type:"list" required:"true"` +} + +// String returns the string representation +func (s InstanceProfile) String() string { + return awsutil.Prettify(s) +} + +// Contains details about the permissions policies that are attached to the +// specified identity (user, group, or role). +// +// This data type is used as a response element in the ListPoliciesGrantingServiceAccess +// operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ListPoliciesGrantingServiceAccessEntry +type ListPoliciesGrantingServiceAccessEntry struct { + _ struct{} `type:"structure"` + + // The PoliciesGrantingServiceAccess object that contains details about the + // policy. + Policies []PolicyGrantingServiceAccess `type:"list"` + + // The namespace of the service that was accessed. + // + // To learn the service namespace of a service, go to Actions, Resources, and + // Condition Keys for AWS Services (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_actions-resources-contextkeys.html) + // in the IAM User Guide. Choose the name of the service to view details for + // that service. In the first paragraph, find the service prefix. For example, + // (service prefix: a4b). For more information about service namespaces, see + // AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the AWS General Reference. + ServiceNamespace *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s ListPoliciesGrantingServiceAccessEntry) String() string { + return awsutil.Prettify(s) +} + +// Contains the user name and password create date for a user. +// +// This data type is used as a response element in the CreateLoginProfile and +// GetLoginProfile operations. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/LoginProfile +type LoginProfile struct { + _ struct{} `type:"structure"` + + // The date when the password for the user was created. + // + // CreateDate is a required field + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // Specifies whether the user is required to set a new password on next sign-in. + PasswordResetRequired *bool `type:"boolean"` + + // The name of the user, which can be used for signing in to the AWS Management + // Console. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s LoginProfile) String() string { + return awsutil.Prettify(s) +} + +// Contains information about an MFA device. +// +// This data type is used as a response element in the ListMFADevices operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/MFADevice +type MFADevice struct { + _ struct{} `type:"structure"` + + // The date when the MFA device was enabled for the user. + // + // EnableDate is a required field + EnableDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The serial number that uniquely identifies the MFA device. For virtual MFA + // devices, the serial number is the device ARN. + // + // SerialNumber is a required field + SerialNumber *string `min:"9" type:"string" required:"true"` + + // The user with whom the MFA device is associated. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s MFADevice) String() string { + return awsutil.Prettify(s) +} + +// Contains information about a managed policy, including the policy's ARN, +// versions, and the number of principal entities (users, groups, and roles) +// that the policy is attached to. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// operation. +// +// For more information about managed policies, see Managed Policies and Inline +// Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ManagedPolicyDetail +type ManagedPolicyDetail struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // The number of principal entities (users, groups, and roles) that the policy + // is attached to. + AttachmentCount *int64 `type:"integer"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The identifier for the version of the policy that is set as the default (operative) + // version. + // + // For more information about policy versions, see Versioning for Managed Policies + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-versions.html) + // in the Using IAM guide. + DefaultVersionId *string `type:"string"` + + // A friendly description of the policy. + Description *string `type:"string"` + + // Specifies whether the policy can be attached to an IAM user, group, or role. + IsAttachable *bool `type:"boolean"` + + // The path to the policy. + // + // For more information about paths, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `type:"string"` + + // The number of entities (users and roles) for which the policy is used as + // the permissions boundary. + // + // For more information about permissions boundaries, see Permissions Boundaries + // for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) + // in the IAM User Guide. + PermissionsBoundaryUsageCount *int64 `type:"integer"` + + // The stable and unique string identifying the policy. + // + // For more information about IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + PolicyId *string `min:"16" type:"string"` + + // The friendly name (not ARN) identifying the policy. + PolicyName *string `min:"1" type:"string"` + + // A list containing information about the versions of the policy. + PolicyVersionList []PolicyVersion `type:"list"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy was last updated. + // + // When a policy has only one version, this field contains the date and time + // when the policy was created. When a policy has more than one version, this + // field contains the date and time when the most recent policy version was + // created. + UpdateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ManagedPolicyDetail) String() string { + return awsutil.Prettify(s) +} + +// Contains the Amazon Resource Name (ARN) for an IAM OpenID Connect provider. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/OpenIDConnectProviderListEntry +type OpenIDConnectProviderListEntry struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` +} + +// String returns the string representation +func (s OpenIDConnectProviderListEntry) String() string { + return awsutil.Prettify(s) +} + +// Contains information about AWS Organizations's effect on a policy simulation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/OrganizationsDecisionDetail +type OrganizationsDecisionDetail struct { + _ struct{} `type:"structure"` + + // Specifies whether the simulated operation is allowed by the AWS Organizations + // service control policies that impact the simulated user's account. + AllowedByOrganizations *bool `type:"boolean"` +} + +// String returns the string representation +func (s OrganizationsDecisionDetail) String() string { + return awsutil.Prettify(s) +} + +// Contains information about the account password policy. +// +// This data type is used as a response element in the GetAccountPasswordPolicy +// operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PasswordPolicy +type PasswordPolicy struct { + _ struct{} `type:"structure"` + + // Specifies whether IAM users are allowed to change their own password. + AllowUsersToChangePassword *bool `type:"boolean"` + + // Indicates whether passwords in the account expire. Returns true if MaxPasswordAge + // contains a value greater than 0. Returns false if MaxPasswordAge is 0 or + // not present. + ExpirePasswords *bool `type:"boolean"` + + // Specifies whether IAM users are prevented from setting a new password after + // their password has expired. + HardExpiry *bool `type:"boolean"` + + // The number of days that an IAM user password is valid. + MaxPasswordAge *int64 `min:"1" type:"integer"` + + // Minimum length to require for IAM user passwords. + MinimumPasswordLength *int64 `min:"6" type:"integer"` + + // Specifies the number of previous passwords that IAM users are prevented from + // reusing. + PasswordReusePrevention *int64 `min:"1" type:"integer"` + + // Specifies whether to require lowercase characters for IAM user passwords. + RequireLowercaseCharacters *bool `type:"boolean"` + + // Specifies whether to require numbers for IAM user passwords. + RequireNumbers *bool `type:"boolean"` + + // Specifies whether to require symbols for IAM user passwords. + RequireSymbols *bool `type:"boolean"` + + // Specifies whether to require uppercase characters for IAM user passwords. + RequireUppercaseCharacters *bool `type:"boolean"` +} + +// String returns the string representation +func (s PasswordPolicy) String() string { + return awsutil.Prettify(s) +} + +// Contains information about a managed policy. +// +// This data type is used as a response element in the CreatePolicy, GetPolicy, +// and ListPolicies operations. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/Policy +type Policy struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // The number of entities (users, groups, and roles) that the policy is attached + // to. + AttachmentCount *int64 `type:"integer"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The identifier for the version of the policy that is set as the default version. + DefaultVersionId *string `type:"string"` + + // A friendly description of the policy. + // + // This element is included in the response to the GetPolicy operation. It is + // not included in the response to the ListPolicies operation. + Description *string `type:"string"` + + // Specifies whether the policy can be attached to an IAM user, group, or role. + IsAttachable *bool `type:"boolean"` + + // The path to the policy. + // + // For more information about paths, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `type:"string"` + + // The number of entities (users and roles) for which the policy is used to + // set the permissions boundary. + // + // For more information about permissions boundaries, see Permissions Boundaries + // for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) + // in the IAM User Guide. + PermissionsBoundaryUsageCount *int64 `type:"integer"` + + // The stable and unique string identifying the policy. + // + // For more information about IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + PolicyId *string `min:"16" type:"string"` + + // The friendly name (not ARN) identifying the policy. + PolicyName *string `min:"1" type:"string"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy was last updated. + // + // When a policy has only one version, this field contains the date and time + // when the policy was created. When a policy has more than one version, this + // field contains the date and time when the most recent policy version was + // created. + UpdateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s Policy) String() string { + return awsutil.Prettify(s) +} + +// Contains information about an IAM policy, including the policy document. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PolicyDetail +type PolicyDetail struct { + _ struct{} `type:"structure"` + + // The policy document. + PolicyDocument *string `min:"1" type:"string"` + + // The name of the policy. + PolicyName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PolicyDetail) String() string { + return awsutil.Prettify(s) +} + +// Contains details about the permissions policies that are attached to the +// specified identity (user, group, or role). +// +// This data type is an element of the ListPoliciesGrantingServiceAccessEntry +// object. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PolicyGrantingServiceAccess +type PolicyGrantingServiceAccess struct { + _ struct{} `type:"structure"` + + // The name of the entity (user or role) to which the inline policy is attached. + // + // This field is null for managed policies. For more information about these + // policy types, see Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html) + // in the IAM User Guide. + EntityName *string `min:"1" type:"string"` + + // The type of entity (user or role) that used the policy to access the service + // to which the inline policy is attached. + // + // This field is null for managed policies. For more information about these + // policy types, see Managed Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html) + // in the IAM User Guide. + EntityType PolicyOwnerEntityType `type:"string" enum:"true"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + PolicyArn *string `min:"20" type:"string"` + + // The policy name. + // + // PolicyName is a required field + PolicyName *string `min:"1" type:"string" required:"true"` + + // The policy type. For more information about these policy types, see Managed + // Policies and Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_managed-vs-inline.html) + // in the IAM User Guide. + // + // PolicyType is a required field + PolicyType PolicyType `type:"string" required:"true" enum:"true"` +} + +// String returns the string representation +func (s PolicyGrantingServiceAccess) String() string { + return awsutil.Prettify(s) +} + +// Contains information about a group that a managed policy is attached to. +// +// This data type is used as a response element in the ListEntitiesForPolicy +// operation. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PolicyGroup +type PolicyGroup struct { + _ struct{} `type:"structure"` + + // The stable and unique string identifying the group. For more information + // about IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + GroupId *string `min:"16" type:"string"` + + // The name (friendly name, not ARN) identifying the group. + GroupName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PolicyGroup) String() string { + return awsutil.Prettify(s) +} + +// Contains information about a role that a managed policy is attached to. +// +// This data type is used as a response element in the ListEntitiesForPolicy +// operation. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PolicyRole +type PolicyRole struct { + _ struct{} `type:"structure"` + + // The stable and unique string identifying the role. For more information about + // IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + RoleId *string `min:"16" type:"string"` + + // The name (friendly name, not ARN) identifying the role. + RoleName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PolicyRole) String() string { + return awsutil.Prettify(s) +} + +// Contains information about a user that a managed policy is attached to. +// +// This data type is used as a response element in the ListEntitiesForPolicy +// operation. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PolicyUser +type PolicyUser struct { + _ struct{} `type:"structure"` + + // The stable and unique string identifying the user. For more information about + // IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in the IAM User Guide. + UserId *string `min:"16" type:"string"` + + // The name (friendly name, not ARN) identifying the user. + UserName *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s PolicyUser) String() string { + return awsutil.Prettify(s) +} + +// Contains information about a version of a managed policy. +// +// This data type is used as a response element in the CreatePolicyVersion, +// GetPolicyVersion, ListPolicyVersions, and GetAccountAuthorizationDetails +// operations. +// +// For more information about managed policies, refer to Managed Policies and +// Inline Policies (https://docs.aws.amazon.com/IAM/latest/UserGuide/policies-managed-vs-inline.html) +// in the Using IAM guide. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/PolicyVersion +type PolicyVersion struct { + _ struct{} `type:"structure"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the policy version was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The policy document. + // + // The policy document is returned in the response to the GetPolicyVersion and + // GetAccountAuthorizationDetails operations. It is not returned in the response + // to the CreatePolicyVersion or ListPolicyVersions operations. + // + // The policy document returned in this structure is URL-encoded compliant with + // RFC 3986 (https://tools.ietf.org/html/rfc3986). You can use a URL decoding + // method to convert the policy back to plain JSON text. For example, if you + // use Java, you can use the decode method of the java.net.URLDecoder utility + // class in the Java SDK. Other languages and SDKs provide similar functionality. + Document *string `min:"1" type:"string"` + + // Specifies whether the policy version is set as the policy's default version. + IsDefaultVersion *bool `type:"boolean"` + + // The identifier for the policy version. + // + // Policy version identifiers always begin with v (always lowercase). When a + // policy is created, the first policy version is v1. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s PolicyVersion) String() string { + return awsutil.Prettify(s) +} + +// Contains the row and column of a location of a Statement element in a policy +// document. +// +// This data type is used as a member of the Statement type. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/Position +type Position struct { + _ struct{} `type:"structure"` + + // The column in the line containing the specified position in the document. + Column *int64 `type:"integer"` + + // The line containing the specified position in the document. + Line *int64 `type:"integer"` +} + +// String returns the string representation +func (s Position) String() string { + return awsutil.Prettify(s) +} + +// Contains the result of the simulation of a single API operation call on a +// single resource. +// +// This data type is used by a member of the EvaluationResult data type. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ResourceSpecificResult +type ResourceSpecificResult struct { + _ struct{} `type:"structure"` + + // Additional details about the results of the evaluation decision. When there + // are both IAM policies and resource policies, this parameter explains how + // each set of policies contributes to the final evaluation decision. When simulating + // cross-account access to a resource, both the resource-based policy and the + // caller's IAM policy must grant access. + EvalDecisionDetails map[string]PolicyEvaluationDecisionType `type:"map"` + + // The result of the simulation of the simulated API operation on the resource + // specified in EvalResourceName. + // + // EvalResourceDecision is a required field + EvalResourceDecision PolicyEvaluationDecisionType `type:"string" required:"true" enum:"true"` + + // The name of the simulated resource, in Amazon Resource Name (ARN) format. + // + // EvalResourceName is a required field + EvalResourceName *string `min:"1" type:"string" required:"true"` + + // A list of the statements in the input policies that determine the result + // for this part of the simulation. Remember that even if multiple statements + // allow the operation on the resource, if any statement denies that operation, + // then the explicit deny overrides any allow. In addition, the deny statement + // is the only entry included in the result. + MatchedStatements []Statement `type:"list"` + + // A list of context keys that are required by the included input policies but + // that were not provided by one of the input parameters. This list is used + // when a list of ARNs is included in the ResourceArns parameter instead of + // "*". If you do not specify individual resources, by setting ResourceArns + // to "*" or by not including the ResourceArns parameter, then any missing context + // values are instead included under the EvaluationResults section. To discover + // the context keys used by a set of policies, you can call GetContextKeysForCustomPolicy + // or GetContextKeysForPrincipalPolicy. + MissingContextValues []string `type:"list"` +} + +// String returns the string representation +func (s ResourceSpecificResult) String() string { + return awsutil.Prettify(s) +} + +// Contains information about an IAM role. This structure is returned as a response +// element in several API operations that interact with roles. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/Role +type Role struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) specifying the role. For more information + // about ARNs and how to use them in policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the IAM User Guide guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The policy that grants an entity permission to assume the role. + AssumeRolePolicyDocument *string `min:"1" type:"string"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the role was created. + // + // CreateDate is a required field + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // A description of the role that you provide. + Description *string `type:"string"` + + // The maximum session duration (in seconds) for the specified role. Anyone + // who uses the AWS CLI, or API to assume the role can specify the duration + // using the optional DurationSeconds API parameter or duration-seconds CLI + // parameter. + MaxSessionDuration *int64 `min:"3600" type:"integer"` + + // The path to the role. For more information about paths, see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // Path is a required field + Path *string `min:"1" type:"string" required:"true"` + + // The ARN of the policy used to set the permissions boundary for the role. + // + // For more information about permissions boundaries, see Permissions Boundaries + // for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) + // in the IAM User Guide. + PermissionsBoundary *AttachedPermissionsBoundary `type:"structure"` + + // The stable and unique string identifying the role. For more information about + // IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // RoleId is a required field + RoleId *string `min:"16" type:"string" required:"true"` + + // The friendly name that identifies the role. + // + // RoleName is a required field + RoleName *string `min:"1" type:"string" required:"true"` + + // A list of tags that are attached to the specified role. For more information + // about tagging, see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) + // in the IAM User Guide. + Tags []Tag `type:"list"` +} + +// String returns the string representation +func (s Role) String() string { + return awsutil.Prettify(s) +} + +// Contains information about an IAM role, including all of the role's policies. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/RoleDetail +type RoleDetail struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // The trust policy that grants permission to assume the role. + AssumeRolePolicyDocument *string `min:"1" type:"string"` + + // A list of managed policies attached to the role. These policies are the role's + // access (permissions) policies. + AttachedManagedPolicies []AttachedPolicy `type:"list"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the role was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A list of instance profiles that contain this role. + InstanceProfileList []InstanceProfile `type:"list"` + + // The path to the role. For more information about paths, see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string"` + + // The ARN of the policy used to set the permissions boundary for the role. + // + // For more information about permissions boundaries, see Permissions Boundaries + // for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) + // in the IAM User Guide. + PermissionsBoundary *AttachedPermissionsBoundary `type:"structure"` + + // The stable and unique string identifying the role. For more information about + // IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + RoleId *string `min:"16" type:"string"` + + // The friendly name that identifies the role. + RoleName *string `min:"1" type:"string"` + + // A list of inline policies embedded in the role. These policies are the role's + // access (permissions) policies. + RolePolicyList []PolicyDetail `type:"list"` + + // A list of tags that are attached to the specified role. For more information + // about tagging, see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) + // in the IAM User Guide. + Tags []Tag `type:"list"` +} + +// String returns the string representation +func (s RoleDetail) String() string { + return awsutil.Prettify(s) +} + +// An object that contains details about how a service-linked role is used, +// if that information is returned by the service. +// +// This data type is used as a response element in the GetServiceLinkedRoleDeletionStatus +// operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/RoleUsageType +type RoleUsageType struct { + _ struct{} `type:"structure"` + + // The name of the region where the service-linked role is being used. + Region *string `min:"1" type:"string"` + + // The name of the resource that is using the service-linked role. + Resources []string `type:"list"` +} + +// String returns the string representation +func (s RoleUsageType) String() string { + return awsutil.Prettify(s) +} + +// Contains the list of SAML providers for this account. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SAMLProviderListEntry +type SAMLProviderListEntry struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the SAML provider. + Arn *string `min:"20" type:"string"` + + // The date and time when the SAML provider was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The expiration date and time for the SAML provider. + ValidUntil *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s SAMLProviderListEntry) String() string { + return awsutil.Prettify(s) +} + +// Contains information about an SSH public key. +// +// This data type is used as a response element in the GetSSHPublicKey and UploadSSHPublicKey +// operations. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SSHPublicKey +type SSHPublicKey struct { + _ struct{} `type:"structure"` + + // The MD5 message digest of the SSH public key. + // + // Fingerprint is a required field + Fingerprint *string `min:"48" type:"string" required:"true"` + + // The SSH public key. + // + // SSHPublicKeyBody is a required field + SSHPublicKeyBody *string `min:"1" type:"string" required:"true"` + + // The unique identifier for the SSH public key. + // + // SSHPublicKeyId is a required field + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The status of the SSH public key. Active means that the key can be used for + // authentication with an AWS CodeCommit repository. Inactive means that the + // key cannot be used. + // + // Status is a required field + Status StatusType `type:"string" required:"true" enum:"true"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the SSH public key was uploaded. + UploadDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The name of the IAM user associated with the SSH public key. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SSHPublicKey) String() string { + return awsutil.Prettify(s) +} + +// Contains information about an SSH public key, without the key's body or fingerprint. +// +// This data type is used as a response element in the ListSSHPublicKeys operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SSHPublicKeyMetadata +type SSHPublicKeyMetadata struct { + _ struct{} `type:"structure"` + + // The unique identifier for the SSH public key. + // + // SSHPublicKeyId is a required field + SSHPublicKeyId *string `min:"20" type:"string" required:"true"` + + // The status of the SSH public key. Active means that the key can be used for + // authentication with an AWS CodeCommit repository. Inactive means that the + // key cannot be used. + // + // Status is a required field + Status StatusType `type:"string" required:"true" enum:"true"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the SSH public key was uploaded. + // + // UploadDate is a required field + UploadDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The name of the IAM user associated with the SSH public key. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SSHPublicKeyMetadata) String() string { + return awsutil.Prettify(s) +} + +// Contains information about a server certificate. +// +// This data type is used as a response element in the GetServerCertificate +// operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ServerCertificate +type ServerCertificate struct { + _ struct{} `type:"structure"` + + // The contents of the public key certificate. + // + // CertificateBody is a required field + CertificateBody *string `min:"1" type:"string" required:"true"` + + // The contents of the public key certificate chain. + CertificateChain *string `min:"1" type:"string"` + + // The meta information of the server certificate, such as its name, path, ID, + // and ARN. + // + // ServerCertificateMetadata is a required field + ServerCertificateMetadata *ServerCertificateMetadata `type:"structure" required:"true"` +} + +// String returns the string representation +func (s ServerCertificate) String() string { + return awsutil.Prettify(s) +} + +// Contains information about a server certificate without its certificate body, +// certificate chain, and private key. +// +// This data type is used as a response element in the UploadServerCertificate +// and ListServerCertificates operations. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ServerCertificateMetadata +type ServerCertificateMetadata struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) specifying the server certificate. For more + // information about ARNs and how to use them in policies, see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The date on which the certificate is set to expire. + Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The path to the server certificate. For more information about paths, see + // IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // Path is a required field + Path *string `min:"1" type:"string" required:"true"` + + // The stable and unique string identifying the server certificate. For more + // information about IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // ServerCertificateId is a required field + ServerCertificateId *string `min:"16" type:"string" required:"true"` + + // The name that identifies the server certificate. + // + // ServerCertificateName is a required field + ServerCertificateName *string `min:"1" type:"string" required:"true"` + + // The date when the server certificate was uploaded. + UploadDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s ServerCertificateMetadata) String() string { + return awsutil.Prettify(s) +} + +// Contains details about the most recent attempt to access the service. +// +// This data type is used as a response element in the GetServiceLastAccessedDetails +// operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ServiceLastAccessed +type ServiceLastAccessed struct { + _ struct{} `type:"structure"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when an authenticated entity most recently attempted to access the service. + // AWS does not report unauthenticated requests. + // + // This field is null if no IAM entities attempted to access the service within + // the reporting period (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#service-last-accessed-reporting-period). + LastAuthenticated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The ARN of the authenticated entity (user or role) that last attempted to + // access the service. AWS does not report unauthenticated requests. + // + // This field is null if no IAM entities attempted to access the service within + // the reporting period (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#service-last-accessed-reporting-period). + LastAuthenticatedEntity *string `min:"20" type:"string"` + + // The name of the service in which access was attempted. + // + // ServiceName is a required field + ServiceName *string `type:"string" required:"true"` + + // The namespace of the service in which access was attempted. + // + // To learn the service namespace of a service, go to Actions, Resources, and + // Condition Keys for AWS Services (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_actions-resources-contextkeys.html) + // in the IAM User Guide. Choose the name of the service to view details for + // that service. In the first paragraph, find the service prefix. For example, + // (service prefix: a4b). For more information about service namespaces, see + // AWS Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) + // in the AWS General Reference. + // + // ServiceNamespace is a required field + ServiceNamespace *string `min:"1" type:"string" required:"true"` + + // The total number of authenticated entities that have attempted to access + // the service. + // + // This field is null if no IAM entities attempted to access the service within + // the reporting period (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_access-advisor.html#service-last-accessed-reporting-period). + TotalAuthenticatedEntities *int64 `type:"integer"` +} + +// String returns the string representation +func (s ServiceLastAccessed) String() string { + return awsutil.Prettify(s) +} + +// Contains the details of a service-specific credential. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ServiceSpecificCredential +type ServiceSpecificCredential struct { + _ struct{} `type:"structure"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the service-specific credential were created. + // + // CreateDate is a required field + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The name of the service associated with the service-specific credential. + // + // ServiceName is a required field + ServiceName *string `type:"string" required:"true"` + + // The generated password for the service-specific credential. + // + // ServicePassword is a required field + ServicePassword *string `type:"string" required:"true"` + + // The unique identifier for the service-specific credential. + // + // ServiceSpecificCredentialId is a required field + ServiceSpecificCredentialId *string `min:"20" type:"string" required:"true"` + + // The generated user name for the service-specific credential. This value is + // generated by combining the IAM user's name combined with the ID number of + // the AWS account, as in jane-at-123456789012, for example. This value cannot + // be configured by the user. + // + // ServiceUserName is a required field + ServiceUserName *string `min:"17" type:"string" required:"true"` + + // The status of the service-specific credential. Active means that the key + // is valid for API calls, while Inactive means it is not. + // + // Status is a required field + Status StatusType `type:"string" required:"true" enum:"true"` + + // The name of the IAM user associated with the service-specific credential. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ServiceSpecificCredential) String() string { + return awsutil.Prettify(s) +} + +// Contains additional details about a service-specific credential. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/ServiceSpecificCredentialMetadata +type ServiceSpecificCredentialMetadata struct { + _ struct{} `type:"structure"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the service-specific credential were created. + // + // CreateDate is a required field + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The name of the service associated with the service-specific credential. + // + // ServiceName is a required field + ServiceName *string `type:"string" required:"true"` + + // The unique identifier for the service-specific credential. + // + // ServiceSpecificCredentialId is a required field + ServiceSpecificCredentialId *string `min:"20" type:"string" required:"true"` + + // The generated user name for the service-specific credential. + // + // ServiceUserName is a required field + ServiceUserName *string `min:"17" type:"string" required:"true"` + + // The status of the service-specific credential. Active means that the key + // is valid for API calls, while Inactive means it is not. + // + // Status is a required field + Status StatusType `type:"string" required:"true" enum:"true"` + + // The name of the IAM user associated with the service-specific credential. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ServiceSpecificCredentialMetadata) String() string { + return awsutil.Prettify(s) +} + +// Contains information about an X.509 signing certificate. +// +// This data type is used as a response element in the UploadSigningCertificate +// and ListSigningCertificates operations. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/SigningCertificate +type SigningCertificate struct { + _ struct{} `type:"structure"` + + // The contents of the signing certificate. + // + // CertificateBody is a required field + CertificateBody *string `min:"1" type:"string" required:"true"` + + // The ID for the signing certificate. + // + // CertificateId is a required field + CertificateId *string `min:"24" type:"string" required:"true"` + + // The status of the signing certificate. Active means that the key is valid + // for API calls, while Inactive means it is not. + // + // Status is a required field + Status StatusType `type:"string" required:"true" enum:"true"` + + // The date when the signing certificate was uploaded. + UploadDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The name of the user the signing certificate is associated with. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s SigningCertificate) String() string { + return awsutil.Prettify(s) +} + +// Contains a reference to a Statement element in a policy document that determines +// the result of the simulation. +// +// This data type is used by the MatchedStatements member of the EvaluationResult +// type. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/Statement +type Statement struct { + _ struct{} `type:"structure"` + + // The row and column of the end of a Statement in an IAM policy. + EndPosition *Position `type:"structure"` + + // The identifier of the policy that was provided as an input. + SourcePolicyId *string `type:"string"` + + // The type of the policy. + SourcePolicyType PolicySourceType `type:"string" enum:"true"` + + // The row and column of the beginning of the Statement in an IAM policy. + StartPosition *Position `type:"structure"` +} + +// String returns the string representation +func (s Statement) String() string { + return awsutil.Prettify(s) +} + +// A structure that represents user-provided metadata that can be associated +// with a resource such as an IAM user or role. For more information about tagging, +// see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) +// in the IAM User Guide. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/Tag +type Tag struct { + _ struct{} `type:"structure"` + + // The key name that can be used to look up or retrieve the associated value. + // For example, Department or Cost Center are common choices. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // The value associated with this tag. For example, tags with a key name of + // Department could have values such as Human Resources, Accounting, and Support. + // Tags with a key name of Cost Center might have values that consist of the + // number associated with the different cost centers in your company. Typically, + // many resources have tags with the same key name but with different values. + // + // AWS always interprets the tag Value as a single string. If you need to store + // an array, you can store comma-separated values in the string. However, you + // must interpret the value in your code. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := aws.ErrInvalidParams{Context: "Tag"} + + if s.Key == nil { + invalidParams.Add(aws.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(aws.NewErrParamMinLen("Key", 1)) + } + + if s.Value == nil { + invalidParams.Add(aws.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// Contains information about an IAM user entity. +// +// This data type is used as a response element in the following operations: +// +// * CreateUser +// +// * GetUser +// +// * ListUsers +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/User +type User struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) that identifies the user. For more information + // about ARNs and how to use ARNs in policies, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the user was created. + // + // CreateDate is a required field + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the user's password was last used to sign in to an AWS website. For + // a list of AWS websites that capture a user's last sign-in time, see the Credential + // Reports (https://docs.aws.amazon.com/IAM/latest/UserGuide/credential-reports.html) + // topic in the Using IAM guide. If a password is used more than once in a five-minute + // span, only the first use is returned in this field. If the field is null + // (no value), then it indicates that they never signed in with a password. + // This can be because: + // + // * The user never had a password. + // + // * A password exists but has not been used since IAM started tracking this + // information on October 20, 2014. + // + // A null valuedoes not mean that the user never had a password. Also, if the + // user does not currently have a password, but had one in the past, then this + // field contains the date and time the most recent password was used. + // + // This value is returned only in the GetUser and ListUsers operations. + PasswordLastUsed *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The path to the user. For more information about paths, see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // Path is a required field + Path *string `min:"1" type:"string" required:"true"` + + // The ARN of the policy used to set the permissions boundary for the user. + // + // For more information about permissions boundaries, see Permissions Boundaries + // for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) + // in the IAM User Guide. + PermissionsBoundary *AttachedPermissionsBoundary `type:"structure"` + + // A list of tags that are associated with the specified user. For more information + // about tagging, see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) + // in the IAM User Guide. + Tags []Tag `type:"list"` + + // The stable and unique string identifying the user. For more information about + // IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + // + // UserId is a required field + UserId *string `min:"16" type:"string" required:"true"` + + // The friendly name identifying the user. + // + // UserName is a required field + UserName *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s User) String() string { + return awsutil.Prettify(s) +} + +// Contains information about an IAM user, including all the user's policies +// and all the IAM groups the user is in. +// +// This data type is used as a response element in the GetAccountAuthorizationDetails +// operation. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/UserDetail +type UserDetail struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). ARNs are unique identifiers for AWS resources. + // + // For more information about ARNs, go to Amazon Resource Names (ARNs) and AWS + // Service Namespaces (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) + // in the AWS General Reference. + Arn *string `min:"20" type:"string"` + + // A list of the managed policies attached to the user. + AttachedManagedPolicies []AttachedPolicy `type:"list"` + + // The date and time, in ISO 8601 date-time format (http://www.iso.org/iso/iso8601), + // when the user was created. + CreateDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A list of IAM groups that the user is in. + GroupList []string `type:"list"` + + // The path to the user. For more information about paths, see IAM Identifiers + // (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + Path *string `min:"1" type:"string"` + + // The ARN of the policy used to set the permissions boundary for the user. + // + // For more information about permissions boundaries, see Permissions Boundaries + // for IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_boundaries.html) + // in the IAM User Guide. + PermissionsBoundary *AttachedPermissionsBoundary `type:"structure"` + + // A list of tags that are associated with the specified user. For more information + // about tagging, see Tagging IAM Identities (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_tags.html) + // in the IAM User Guide. + Tags []Tag `type:"list"` + + // The stable and unique string identifying the user. For more information about + // IDs, see IAM Identifiers (https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) + // in the Using IAM guide. + UserId *string `min:"16" type:"string"` + + // The friendly name identifying the user. + UserName *string `min:"1" type:"string"` + + // A list of the inline policies embedded in the user. + UserPolicyList []PolicyDetail `type:"list"` +} + +// String returns the string representation +func (s UserDetail) String() string { + return awsutil.Prettify(s) +} + +// Contains information about a virtual MFA device. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/iam-2010-05-08/VirtualMFADevice +type VirtualMFADevice struct { + _ struct{} `type:"structure"` + + // The base32 seed defined as specified in RFC3548 (https://tools.ietf.org/html/rfc3548.txt). + // The Base32StringSeed is base64-encoded. + // + // Base32StringSeed is automatically base64 encoded/decoded by the SDK. + Base32StringSeed []byte `type:"blob"` + + // The date and time on which the virtual MFA device was enabled. + EnableDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // A QR code PNG image that encodes otpauth://totp/$virtualMFADeviceName@$AccountName?secret=$Base32String + // where $virtualMFADeviceName is one of the create call arguments. AccountName + // is the user name if set (otherwise, the account ID otherwise), and Base32String + // is the seed in base32 format. The Base32String value is base64-encoded. + // + // QRCodePNG is automatically base64 encoded/decoded by the SDK. + QRCodePNG []byte `type:"blob"` + + // The serial number associated with VirtualMFADevice. + // + // SerialNumber is a required field + SerialNumber *string `min:"9" type:"string" required:"true"` + + // The IAM user associated with this virtual MFA device. + User *User `type:"structure"` +} + +// String returns the string representation +func (s VirtualMFADevice) String() string { + return awsutil.Prettify(s) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_waiters.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_waiters.go new file mode 100644 index 00000000..d40fabd6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/api_waiters.go @@ -0,0 +1,186 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package iam + +import ( + "context" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" +) + +// WaitUntilInstanceProfileExists uses the IAM API operation +// GetInstanceProfile to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Client) WaitUntilInstanceProfileExists(ctx context.Context, input *GetInstanceProfileInput, opts ...aws.WaiterOption) error { + w := aws.Waiter{ + Name: "WaitUntilInstanceProfileExists", + MaxAttempts: 40, + Delay: aws.ConstantWaiterDelay(1 * time.Second), + Acceptors: []aws.WaiterAcceptor{ + { + State: aws.SuccessWaiterState, + Matcher: aws.StatusWaiterMatch, + Expected: 200, + }, + { + State: aws.RetryWaiterState, + Matcher: aws.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []aws.Option) (*aws.Request, error) { + var inCpy *GetInstanceProfileInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req := c.GetInstanceProfileRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req.Request, nil + }, + } + w.ApplyOptions(opts...) + + return w.Wait(ctx) +} + +// WaitUntilPolicyExists uses the IAM API operation +// GetPolicy to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Client) WaitUntilPolicyExists(ctx context.Context, input *GetPolicyInput, opts ...aws.WaiterOption) error { + w := aws.Waiter{ + Name: "WaitUntilPolicyExists", + MaxAttempts: 20, + Delay: aws.ConstantWaiterDelay(1 * time.Second), + Acceptors: []aws.WaiterAcceptor{ + { + State: aws.SuccessWaiterState, + Matcher: aws.StatusWaiterMatch, + Expected: 200, + }, + { + State: aws.RetryWaiterState, + Matcher: aws.ErrorWaiterMatch, + Expected: "NoSuchEntity", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []aws.Option) (*aws.Request, error) { + var inCpy *GetPolicyInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req := c.GetPolicyRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req.Request, nil + }, + } + w.ApplyOptions(opts...) + + return w.Wait(ctx) +} + +// WaitUntilRoleExists uses the IAM API operation +// GetRole to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Client) WaitUntilRoleExists(ctx context.Context, input *GetRoleInput, opts ...aws.WaiterOption) error { + w := aws.Waiter{ + Name: "WaitUntilRoleExists", + MaxAttempts: 20, + Delay: aws.ConstantWaiterDelay(1 * time.Second), + Acceptors: []aws.WaiterAcceptor{ + { + State: aws.SuccessWaiterState, + Matcher: aws.StatusWaiterMatch, + Expected: 200, + }, + { + State: aws.RetryWaiterState, + Matcher: aws.ErrorWaiterMatch, + Expected: "NoSuchEntity", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []aws.Option) (*aws.Request, error) { + var inCpy *GetRoleInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req := c.GetRoleRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req.Request, nil + }, + } + w.ApplyOptions(opts...) + + return w.Wait(ctx) +} + +// WaitUntilUserExists uses the IAM API operation +// GetUser to wait for a condition to be met before returning. +// If the condition is not met within the max attempt window, an error will +// be returned. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *Client) WaitUntilUserExists(ctx context.Context, input *GetUserInput, opts ...aws.WaiterOption) error { + w := aws.Waiter{ + Name: "WaitUntilUserExists", + MaxAttempts: 20, + Delay: aws.ConstantWaiterDelay(1 * time.Second), + Acceptors: []aws.WaiterAcceptor{ + { + State: aws.SuccessWaiterState, + Matcher: aws.StatusWaiterMatch, + Expected: 200, + }, + { + State: aws.RetryWaiterState, + Matcher: aws.ErrorWaiterMatch, + Expected: "NoSuchEntity", + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []aws.Option) (*aws.Request, error) { + var inCpy *GetUserInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req := c.GetUserRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req.Request, nil + }, + } + w.ApplyOptions(opts...) + + return w.Wait(ctx) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/iamiface/interface.go b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/iamiface/interface.go new file mode 100644 index 00000000..9e569920 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/aws/aws-sdk-go-v2/service/iam/iamiface/interface.go @@ -0,0 +1,352 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package iamiface provides an interface to enable mocking the AWS Identity and Access Management service client +// for testing your code. +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. +package iamiface + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/iam" +) + +// ClientAPI provides an interface to enable mocking the +// iam.Client methods. This make unit testing your code that +// calls out to the SDK's service client's calls easier. +// +// The best way to use this interface is so the SDK's service client's calls +// can be stubbed out for unit testing your code with the SDK without needing +// to inject custom request handlers into the SDK's request pipeline. +// +// // myFunc uses an SDK service client to make a request to +// // IAM. +// func myFunc(svc iamiface.ClientAPI) bool { +// // Make svc.AddClientIDToOpenIDConnectProvider request +// } +// +// func main() { +// cfg, err := external.LoadDefaultAWSConfig() +// if err != nil { +// panic("failed to load config, " + err.Error()) +// } +// +// svc := iam.New(cfg) +// +// myFunc(svc) +// } +// +// In your _test.go file: +// +// // Define a mock struct to be used in your unit tests of myFunc. +// type mockClientClient struct { +// iamiface.ClientPI +// } +// func (m *mockClientClient) AddClientIDToOpenIDConnectProvider(input *iam.AddClientIDToOpenIDConnectProviderInput) (*iam.AddClientIDToOpenIDConnectProviderOutput, error) { +// // mock response/functionality +// } +// +// func TestMyFunc(t *testing.T) { +// // Setup Test +// mockSvc := &mockClientClient{} +// +// myfunc(mockSvc) +// +// // Verify myFunc's functionality +// } +// +// It is important to note that this interface will have breaking changes +// when the service model is updated and adds new API operations, paginators, +// and waiters. Its suggested to use the pattern above for testing, or using +// tooling to generate mocks to satisfy the interfaces. +type ClientAPI interface { + AddClientIDToOpenIDConnectProviderRequest(*iam.AddClientIDToOpenIDConnectProviderInput) iam.AddClientIDToOpenIDConnectProviderRequest + + AddRoleToInstanceProfileRequest(*iam.AddRoleToInstanceProfileInput) iam.AddRoleToInstanceProfileRequest + + AddUserToGroupRequest(*iam.AddUserToGroupInput) iam.AddUserToGroupRequest + + AttachGroupPolicyRequest(*iam.AttachGroupPolicyInput) iam.AttachGroupPolicyRequest + + AttachRolePolicyRequest(*iam.AttachRolePolicyInput) iam.AttachRolePolicyRequest + + AttachUserPolicyRequest(*iam.AttachUserPolicyInput) iam.AttachUserPolicyRequest + + ChangePasswordRequest(*iam.ChangePasswordInput) iam.ChangePasswordRequest + + CreateAccessKeyRequest(*iam.CreateAccessKeyInput) iam.CreateAccessKeyRequest + + CreateAccountAliasRequest(*iam.CreateAccountAliasInput) iam.CreateAccountAliasRequest + + CreateGroupRequest(*iam.CreateGroupInput) iam.CreateGroupRequest + + CreateInstanceProfileRequest(*iam.CreateInstanceProfileInput) iam.CreateInstanceProfileRequest + + CreateLoginProfileRequest(*iam.CreateLoginProfileInput) iam.CreateLoginProfileRequest + + CreateOpenIDConnectProviderRequest(*iam.CreateOpenIDConnectProviderInput) iam.CreateOpenIDConnectProviderRequest + + CreatePolicyRequest(*iam.CreatePolicyInput) iam.CreatePolicyRequest + + CreatePolicyVersionRequest(*iam.CreatePolicyVersionInput) iam.CreatePolicyVersionRequest + + CreateRoleRequest(*iam.CreateRoleInput) iam.CreateRoleRequest + + CreateSAMLProviderRequest(*iam.CreateSAMLProviderInput) iam.CreateSAMLProviderRequest + + CreateServiceLinkedRoleRequest(*iam.CreateServiceLinkedRoleInput) iam.CreateServiceLinkedRoleRequest + + CreateServiceSpecificCredentialRequest(*iam.CreateServiceSpecificCredentialInput) iam.CreateServiceSpecificCredentialRequest + + CreateUserRequest(*iam.CreateUserInput) iam.CreateUserRequest + + CreateVirtualMFADeviceRequest(*iam.CreateVirtualMFADeviceInput) iam.CreateVirtualMFADeviceRequest + + DeactivateMFADeviceRequest(*iam.DeactivateMFADeviceInput) iam.DeactivateMFADeviceRequest + + DeleteAccessKeyRequest(*iam.DeleteAccessKeyInput) iam.DeleteAccessKeyRequest + + DeleteAccountAliasRequest(*iam.DeleteAccountAliasInput) iam.DeleteAccountAliasRequest + + DeleteAccountPasswordPolicyRequest(*iam.DeleteAccountPasswordPolicyInput) iam.DeleteAccountPasswordPolicyRequest + + DeleteGroupRequest(*iam.DeleteGroupInput) iam.DeleteGroupRequest + + DeleteGroupPolicyRequest(*iam.DeleteGroupPolicyInput) iam.DeleteGroupPolicyRequest + + DeleteInstanceProfileRequest(*iam.DeleteInstanceProfileInput) iam.DeleteInstanceProfileRequest + + DeleteLoginProfileRequest(*iam.DeleteLoginProfileInput) iam.DeleteLoginProfileRequest + + DeleteOpenIDConnectProviderRequest(*iam.DeleteOpenIDConnectProviderInput) iam.DeleteOpenIDConnectProviderRequest + + DeletePolicyRequest(*iam.DeletePolicyInput) iam.DeletePolicyRequest + + DeletePolicyVersionRequest(*iam.DeletePolicyVersionInput) iam.DeletePolicyVersionRequest + + DeleteRoleRequest(*iam.DeleteRoleInput) iam.DeleteRoleRequest + + DeleteRolePermissionsBoundaryRequest(*iam.DeleteRolePermissionsBoundaryInput) iam.DeleteRolePermissionsBoundaryRequest + + DeleteRolePolicyRequest(*iam.DeleteRolePolicyInput) iam.DeleteRolePolicyRequest + + DeleteSAMLProviderRequest(*iam.DeleteSAMLProviderInput) iam.DeleteSAMLProviderRequest + + DeleteSSHPublicKeyRequest(*iam.DeleteSSHPublicKeyInput) iam.DeleteSSHPublicKeyRequest + + DeleteServerCertificateRequest(*iam.DeleteServerCertificateInput) iam.DeleteServerCertificateRequest + + DeleteServiceLinkedRoleRequest(*iam.DeleteServiceLinkedRoleInput) iam.DeleteServiceLinkedRoleRequest + + DeleteServiceSpecificCredentialRequest(*iam.DeleteServiceSpecificCredentialInput) iam.DeleteServiceSpecificCredentialRequest + + DeleteSigningCertificateRequest(*iam.DeleteSigningCertificateInput) iam.DeleteSigningCertificateRequest + + DeleteUserRequest(*iam.DeleteUserInput) iam.DeleteUserRequest + + DeleteUserPermissionsBoundaryRequest(*iam.DeleteUserPermissionsBoundaryInput) iam.DeleteUserPermissionsBoundaryRequest + + DeleteUserPolicyRequest(*iam.DeleteUserPolicyInput) iam.DeleteUserPolicyRequest + + DeleteVirtualMFADeviceRequest(*iam.DeleteVirtualMFADeviceInput) iam.DeleteVirtualMFADeviceRequest + + DetachGroupPolicyRequest(*iam.DetachGroupPolicyInput) iam.DetachGroupPolicyRequest + + DetachRolePolicyRequest(*iam.DetachRolePolicyInput) iam.DetachRolePolicyRequest + + DetachUserPolicyRequest(*iam.DetachUserPolicyInput) iam.DetachUserPolicyRequest + + EnableMFADeviceRequest(*iam.EnableMFADeviceInput) iam.EnableMFADeviceRequest + + GenerateCredentialReportRequest(*iam.GenerateCredentialReportInput) iam.GenerateCredentialReportRequest + + GenerateServiceLastAccessedDetailsRequest(*iam.GenerateServiceLastAccessedDetailsInput) iam.GenerateServiceLastAccessedDetailsRequest + + GetAccessKeyLastUsedRequest(*iam.GetAccessKeyLastUsedInput) iam.GetAccessKeyLastUsedRequest + + GetAccountAuthorizationDetailsRequest(*iam.GetAccountAuthorizationDetailsInput) iam.GetAccountAuthorizationDetailsRequest + + GetAccountPasswordPolicyRequest(*iam.GetAccountPasswordPolicyInput) iam.GetAccountPasswordPolicyRequest + + GetAccountSummaryRequest(*iam.GetAccountSummaryInput) iam.GetAccountSummaryRequest + + GetContextKeysForCustomPolicyRequest(*iam.GetContextKeysForCustomPolicyInput) iam.GetContextKeysForCustomPolicyRequest + + GetContextKeysForPrincipalPolicyRequest(*iam.GetContextKeysForPrincipalPolicyInput) iam.GetContextKeysForPrincipalPolicyRequest + + GetCredentialReportRequest(*iam.GetCredentialReportInput) iam.GetCredentialReportRequest + + GetGroupRequest(*iam.GetGroupInput) iam.GetGroupRequest + + GetGroupPolicyRequest(*iam.GetGroupPolicyInput) iam.GetGroupPolicyRequest + + GetInstanceProfileRequest(*iam.GetInstanceProfileInput) iam.GetInstanceProfileRequest + + GetLoginProfileRequest(*iam.GetLoginProfileInput) iam.GetLoginProfileRequest + + GetOpenIDConnectProviderRequest(*iam.GetOpenIDConnectProviderInput) iam.GetOpenIDConnectProviderRequest + + GetPolicyRequest(*iam.GetPolicyInput) iam.GetPolicyRequest + + GetPolicyVersionRequest(*iam.GetPolicyVersionInput) iam.GetPolicyVersionRequest + + GetRoleRequest(*iam.GetRoleInput) iam.GetRoleRequest + + GetRolePolicyRequest(*iam.GetRolePolicyInput) iam.GetRolePolicyRequest + + GetSAMLProviderRequest(*iam.GetSAMLProviderInput) iam.GetSAMLProviderRequest + + GetSSHPublicKeyRequest(*iam.GetSSHPublicKeyInput) iam.GetSSHPublicKeyRequest + + GetServerCertificateRequest(*iam.GetServerCertificateInput) iam.GetServerCertificateRequest + + GetServiceLastAccessedDetailsRequest(*iam.GetServiceLastAccessedDetailsInput) iam.GetServiceLastAccessedDetailsRequest + + GetServiceLastAccessedDetailsWithEntitiesRequest(*iam.GetServiceLastAccessedDetailsWithEntitiesInput) iam.GetServiceLastAccessedDetailsWithEntitiesRequest + + GetServiceLinkedRoleDeletionStatusRequest(*iam.GetServiceLinkedRoleDeletionStatusInput) iam.GetServiceLinkedRoleDeletionStatusRequest + + GetUserRequest(*iam.GetUserInput) iam.GetUserRequest + + GetUserPolicyRequest(*iam.GetUserPolicyInput) iam.GetUserPolicyRequest + + ListAccessKeysRequest(*iam.ListAccessKeysInput) iam.ListAccessKeysRequest + + ListAccountAliasesRequest(*iam.ListAccountAliasesInput) iam.ListAccountAliasesRequest + + ListAttachedGroupPoliciesRequest(*iam.ListAttachedGroupPoliciesInput) iam.ListAttachedGroupPoliciesRequest + + ListAttachedRolePoliciesRequest(*iam.ListAttachedRolePoliciesInput) iam.ListAttachedRolePoliciesRequest + + ListAttachedUserPoliciesRequest(*iam.ListAttachedUserPoliciesInput) iam.ListAttachedUserPoliciesRequest + + ListEntitiesForPolicyRequest(*iam.ListEntitiesForPolicyInput) iam.ListEntitiesForPolicyRequest + + ListGroupPoliciesRequest(*iam.ListGroupPoliciesInput) iam.ListGroupPoliciesRequest + + ListGroupsRequest(*iam.ListGroupsInput) iam.ListGroupsRequest + + ListGroupsForUserRequest(*iam.ListGroupsForUserInput) iam.ListGroupsForUserRequest + + ListInstanceProfilesRequest(*iam.ListInstanceProfilesInput) iam.ListInstanceProfilesRequest + + ListInstanceProfilesForRoleRequest(*iam.ListInstanceProfilesForRoleInput) iam.ListInstanceProfilesForRoleRequest + + ListMFADevicesRequest(*iam.ListMFADevicesInput) iam.ListMFADevicesRequest + + ListOpenIDConnectProvidersRequest(*iam.ListOpenIDConnectProvidersInput) iam.ListOpenIDConnectProvidersRequest + + ListPoliciesRequest(*iam.ListPoliciesInput) iam.ListPoliciesRequest + + ListPoliciesGrantingServiceAccessRequest(*iam.ListPoliciesGrantingServiceAccessInput) iam.ListPoliciesGrantingServiceAccessRequest + + ListPolicyVersionsRequest(*iam.ListPolicyVersionsInput) iam.ListPolicyVersionsRequest + + ListRolePoliciesRequest(*iam.ListRolePoliciesInput) iam.ListRolePoliciesRequest + + ListRoleTagsRequest(*iam.ListRoleTagsInput) iam.ListRoleTagsRequest + + ListRolesRequest(*iam.ListRolesInput) iam.ListRolesRequest + + ListSAMLProvidersRequest(*iam.ListSAMLProvidersInput) iam.ListSAMLProvidersRequest + + ListSSHPublicKeysRequest(*iam.ListSSHPublicKeysInput) iam.ListSSHPublicKeysRequest + + ListServerCertificatesRequest(*iam.ListServerCertificatesInput) iam.ListServerCertificatesRequest + + ListServiceSpecificCredentialsRequest(*iam.ListServiceSpecificCredentialsInput) iam.ListServiceSpecificCredentialsRequest + + ListSigningCertificatesRequest(*iam.ListSigningCertificatesInput) iam.ListSigningCertificatesRequest + + ListUserPoliciesRequest(*iam.ListUserPoliciesInput) iam.ListUserPoliciesRequest + + ListUserTagsRequest(*iam.ListUserTagsInput) iam.ListUserTagsRequest + + ListUsersRequest(*iam.ListUsersInput) iam.ListUsersRequest + + ListVirtualMFADevicesRequest(*iam.ListVirtualMFADevicesInput) iam.ListVirtualMFADevicesRequest + + PutGroupPolicyRequest(*iam.PutGroupPolicyInput) iam.PutGroupPolicyRequest + + PutRolePermissionsBoundaryRequest(*iam.PutRolePermissionsBoundaryInput) iam.PutRolePermissionsBoundaryRequest + + PutRolePolicyRequest(*iam.PutRolePolicyInput) iam.PutRolePolicyRequest + + PutUserPermissionsBoundaryRequest(*iam.PutUserPermissionsBoundaryInput) iam.PutUserPermissionsBoundaryRequest + + PutUserPolicyRequest(*iam.PutUserPolicyInput) iam.PutUserPolicyRequest + + RemoveClientIDFromOpenIDConnectProviderRequest(*iam.RemoveClientIDFromOpenIDConnectProviderInput) iam.RemoveClientIDFromOpenIDConnectProviderRequest + + RemoveRoleFromInstanceProfileRequest(*iam.RemoveRoleFromInstanceProfileInput) iam.RemoveRoleFromInstanceProfileRequest + + RemoveUserFromGroupRequest(*iam.RemoveUserFromGroupInput) iam.RemoveUserFromGroupRequest + + ResetServiceSpecificCredentialRequest(*iam.ResetServiceSpecificCredentialInput) iam.ResetServiceSpecificCredentialRequest + + ResyncMFADeviceRequest(*iam.ResyncMFADeviceInput) iam.ResyncMFADeviceRequest + + SetDefaultPolicyVersionRequest(*iam.SetDefaultPolicyVersionInput) iam.SetDefaultPolicyVersionRequest + + SetSecurityTokenServicePreferencesRequest(*iam.SetSecurityTokenServicePreferencesInput) iam.SetSecurityTokenServicePreferencesRequest + + SimulateCustomPolicyRequest(*iam.SimulateCustomPolicyInput) iam.SimulateCustomPolicyRequest + + SimulatePrincipalPolicyRequest(*iam.SimulatePrincipalPolicyInput) iam.SimulatePrincipalPolicyRequest + + TagRoleRequest(*iam.TagRoleInput) iam.TagRoleRequest + + TagUserRequest(*iam.TagUserInput) iam.TagUserRequest + + UntagRoleRequest(*iam.UntagRoleInput) iam.UntagRoleRequest + + UntagUserRequest(*iam.UntagUserInput) iam.UntagUserRequest + + UpdateAccessKeyRequest(*iam.UpdateAccessKeyInput) iam.UpdateAccessKeyRequest + + UpdateAccountPasswordPolicyRequest(*iam.UpdateAccountPasswordPolicyInput) iam.UpdateAccountPasswordPolicyRequest + + UpdateAssumeRolePolicyRequest(*iam.UpdateAssumeRolePolicyInput) iam.UpdateAssumeRolePolicyRequest + + UpdateGroupRequest(*iam.UpdateGroupInput) iam.UpdateGroupRequest + + UpdateLoginProfileRequest(*iam.UpdateLoginProfileInput) iam.UpdateLoginProfileRequest + + UpdateOpenIDConnectProviderThumbprintRequest(*iam.UpdateOpenIDConnectProviderThumbprintInput) iam.UpdateOpenIDConnectProviderThumbprintRequest + + UpdateRoleRequest(*iam.UpdateRoleInput) iam.UpdateRoleRequest + + UpdateRoleDescriptionRequest(*iam.UpdateRoleDescriptionInput) iam.UpdateRoleDescriptionRequest + + UpdateSAMLProviderRequest(*iam.UpdateSAMLProviderInput) iam.UpdateSAMLProviderRequest + + UpdateSSHPublicKeyRequest(*iam.UpdateSSHPublicKeyInput) iam.UpdateSSHPublicKeyRequest + + UpdateServerCertificateRequest(*iam.UpdateServerCertificateInput) iam.UpdateServerCertificateRequest + + UpdateServiceSpecificCredentialRequest(*iam.UpdateServiceSpecificCredentialInput) iam.UpdateServiceSpecificCredentialRequest + + UpdateSigningCertificateRequest(*iam.UpdateSigningCertificateInput) iam.UpdateSigningCertificateRequest + + UpdateUserRequest(*iam.UpdateUserInput) iam.UpdateUserRequest + + UploadSSHPublicKeyRequest(*iam.UploadSSHPublicKeyInput) iam.UploadSSHPublicKeyRequest + + UploadServerCertificateRequest(*iam.UploadServerCertificateInput) iam.UploadServerCertificateRequest + + UploadSigningCertificateRequest(*iam.UploadSigningCertificateInput) iam.UploadSigningCertificateRequest + + WaitUntilInstanceProfileExists(context.Context, *iam.GetInstanceProfileInput, ...aws.WaiterOption) error + + WaitUntilPolicyExists(context.Context, *iam.GetPolicyInput, ...aws.WaiterOption) error + + WaitUntilRoleExists(context.Context, *iam.GetRoleInput, ...aws.WaiterOption) error + + WaitUntilUserExists(context.Context, *iam.GetUserInput, ...aws.WaiterOption) error +} + +var _ ClientAPI = (*iam.Client)(nil) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/beorn7/perks/LICENSE new file mode 100644 index 00000000..339177be --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/beorn7/perks/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/elastic/beats/vendor/github.com/beorn7/perks/quantile/stream.go new file mode 100644 index 00000000..d7d14f8e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/beorn7/perks/quantile/stream.go @@ -0,0 +1,316 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targetMap map[float64]float64) *Stream { + // Convert map to slice to avoid slow iterations on a map. + // ƒ is called on the hot path, so converting the map to a slice + // beforehand results in significant CPU savings. + targets := targetMapToSlice(targetMap) + + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for _, t := range targets { + if t.quantile*s.n <= r { + f = (2 * t.epsilon * r) / t.quantile + } else { + f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +type target struct { + quantile float64 + epsilon float64 +} + +func targetMapToSlice(targetMap map[float64]float64) []target { + targets := make([]target, 0, len(targetMap)) + + for quantile, epsilon := range targetMap { + t := target{ + quantile: quantile, + epsilon: epsilon, + } + targets = append(targets, t) + } + + return targets +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(math.Ceil(float64(l) * q)) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go new file mode 100644 index 00000000..a6f0febe --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go @@ -0,0 +1,361 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/agent/common/v1/common.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type LibraryInfo_Language int32 + +const ( + LibraryInfo_LANGUAGE_UNSPECIFIED LibraryInfo_Language = 0 + LibraryInfo_CPP LibraryInfo_Language = 1 + LibraryInfo_C_SHARP LibraryInfo_Language = 2 + LibraryInfo_ERLANG LibraryInfo_Language = 3 + LibraryInfo_GO_LANG LibraryInfo_Language = 4 + LibraryInfo_JAVA LibraryInfo_Language = 5 + LibraryInfo_NODE_JS LibraryInfo_Language = 6 + LibraryInfo_PHP LibraryInfo_Language = 7 + LibraryInfo_PYTHON LibraryInfo_Language = 8 + LibraryInfo_RUBY LibraryInfo_Language = 9 + LibraryInfo_WEB_JS LibraryInfo_Language = 10 +) + +var LibraryInfo_Language_name = map[int32]string{ + 0: "LANGUAGE_UNSPECIFIED", + 1: "CPP", + 2: "C_SHARP", + 3: "ERLANG", + 4: "GO_LANG", + 5: "JAVA", + 6: "NODE_JS", + 7: "PHP", + 8: "PYTHON", + 9: "RUBY", + 10: "WEB_JS", +} + +var LibraryInfo_Language_value = map[string]int32{ + "LANGUAGE_UNSPECIFIED": 0, + "CPP": 1, + "C_SHARP": 2, + "ERLANG": 3, + "GO_LANG": 4, + "JAVA": 5, + "NODE_JS": 6, + "PHP": 7, + "PYTHON": 8, + "RUBY": 9, + "WEB_JS": 10, +} + +func (x LibraryInfo_Language) String() string { + return proto.EnumName(LibraryInfo_Language_name, int32(x)) +} + +func (LibraryInfo_Language) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{2, 0} +} + +// Identifier metadata of the Node that produces the span or tracing data. +// Note, this is not the metadata about the Node or service that is described by associated spans. +// In the future we plan to extend the identifier proto definition to support +// additional information (e.g cloud id, etc.) +type Node struct { + // Identifier that uniquely identifies a process within a VM/container. + Identifier *ProcessIdentifier `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` + // Information on the OpenCensus Library that initiates the stream. + LibraryInfo *LibraryInfo `protobuf:"bytes,2,opt,name=library_info,json=libraryInfo,proto3" json:"library_info,omitempty"` + // Additional information on service. + ServiceInfo *ServiceInfo `protobuf:"bytes,3,opt,name=service_info,json=serviceInfo,proto3" json:"service_info,omitempty"` + // Additional attributes. + Attributes map[string]string `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Node) Reset() { *m = Node{} } +func (m *Node) String() string { return proto.CompactTextString(m) } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{0} +} + +func (m *Node) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Node.Unmarshal(m, b) +} +func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Node.Marshal(b, m, deterministic) +} +func (m *Node) XXX_Merge(src proto.Message) { + xxx_messageInfo_Node.Merge(m, src) +} +func (m *Node) XXX_Size() int { + return xxx_messageInfo_Node.Size(m) +} +func (m *Node) XXX_DiscardUnknown() { + xxx_messageInfo_Node.DiscardUnknown(m) +} + +var xxx_messageInfo_Node proto.InternalMessageInfo + +func (m *Node) GetIdentifier() *ProcessIdentifier { + if m != nil { + return m.Identifier + } + return nil +} + +func (m *Node) GetLibraryInfo() *LibraryInfo { + if m != nil { + return m.LibraryInfo + } + return nil +} + +func (m *Node) GetServiceInfo() *ServiceInfo { + if m != nil { + return m.ServiceInfo + } + return nil +} + +func (m *Node) GetAttributes() map[string]string { + if m != nil { + return m.Attributes + } + return nil +} + +// Identifier that uniquely identifies a process within a VM/container. +type ProcessIdentifier struct { + // The host name. Usually refers to the machine/container name. + // For example: os.Hostname() in Go, socket.gethostname() in Python. + HostName string `protobuf:"bytes,1,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"` + // Process id. + Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + // Start time of this ProcessIdentifier. Represented in epoch time. + StartTimestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProcessIdentifier) Reset() { *m = ProcessIdentifier{} } +func (m *ProcessIdentifier) String() string { return proto.CompactTextString(m) } +func (*ProcessIdentifier) ProtoMessage() {} +func (*ProcessIdentifier) Descriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{1} +} + +func (m *ProcessIdentifier) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProcessIdentifier.Unmarshal(m, b) +} +func (m *ProcessIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProcessIdentifier.Marshal(b, m, deterministic) +} +func (m *ProcessIdentifier) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProcessIdentifier.Merge(m, src) +} +func (m *ProcessIdentifier) XXX_Size() int { + return xxx_messageInfo_ProcessIdentifier.Size(m) +} +func (m *ProcessIdentifier) XXX_DiscardUnknown() { + xxx_messageInfo_ProcessIdentifier.DiscardUnknown(m) +} + +var xxx_messageInfo_ProcessIdentifier proto.InternalMessageInfo + +func (m *ProcessIdentifier) GetHostName() string { + if m != nil { + return m.HostName + } + return "" +} + +func (m *ProcessIdentifier) GetPid() uint32 { + if m != nil { + return m.Pid + } + return 0 +} + +func (m *ProcessIdentifier) GetStartTimestamp() *timestamp.Timestamp { + if m != nil { + return m.StartTimestamp + } + return nil +} + +// Information on OpenCensus Library. +type LibraryInfo struct { + // Language of OpenCensus Library. + Language LibraryInfo_Language `protobuf:"varint,1,opt,name=language,proto3,enum=opencensus.proto.agent.common.v1.LibraryInfo_Language" json:"language,omitempty"` + // Version of Agent exporter of Library. + ExporterVersion string `protobuf:"bytes,2,opt,name=exporter_version,json=exporterVersion,proto3" json:"exporter_version,omitempty"` + // Version of OpenCensus Library. + CoreLibraryVersion string `protobuf:"bytes,3,opt,name=core_library_version,json=coreLibraryVersion,proto3" json:"core_library_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LibraryInfo) Reset() { *m = LibraryInfo{} } +func (m *LibraryInfo) String() string { return proto.CompactTextString(m) } +func (*LibraryInfo) ProtoMessage() {} +func (*LibraryInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{2} +} + +func (m *LibraryInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LibraryInfo.Unmarshal(m, b) +} +func (m *LibraryInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LibraryInfo.Marshal(b, m, deterministic) +} +func (m *LibraryInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_LibraryInfo.Merge(m, src) +} +func (m *LibraryInfo) XXX_Size() int { + return xxx_messageInfo_LibraryInfo.Size(m) +} +func (m *LibraryInfo) XXX_DiscardUnknown() { + xxx_messageInfo_LibraryInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_LibraryInfo proto.InternalMessageInfo + +func (m *LibraryInfo) GetLanguage() LibraryInfo_Language { + if m != nil { + return m.Language + } + return LibraryInfo_LANGUAGE_UNSPECIFIED +} + +func (m *LibraryInfo) GetExporterVersion() string { + if m != nil { + return m.ExporterVersion + } + return "" +} + +func (m *LibraryInfo) GetCoreLibraryVersion() string { + if m != nil { + return m.CoreLibraryVersion + } + return "" +} + +// Additional service information. +type ServiceInfo struct { + // Name of the service. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceInfo) Reset() { *m = ServiceInfo{} } +func (m *ServiceInfo) String() string { return proto.CompactTextString(m) } +func (*ServiceInfo) ProtoMessage() {} +func (*ServiceInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_126c72ed8a252c84, []int{3} +} + +func (m *ServiceInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceInfo.Unmarshal(m, b) +} +func (m *ServiceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceInfo.Marshal(b, m, deterministic) +} +func (m *ServiceInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceInfo.Merge(m, src) +} +func (m *ServiceInfo) XXX_Size() int { + return xxx_messageInfo_ServiceInfo.Size(m) +} +func (m *ServiceInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceInfo proto.InternalMessageInfo + +func (m *ServiceInfo) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func init() { + proto.RegisterEnum("opencensus.proto.agent.common.v1.LibraryInfo_Language", LibraryInfo_Language_name, LibraryInfo_Language_value) + proto.RegisterType((*Node)(nil), "opencensus.proto.agent.common.v1.Node") + proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.agent.common.v1.Node.AttributesEntry") + proto.RegisterType((*ProcessIdentifier)(nil), "opencensus.proto.agent.common.v1.ProcessIdentifier") + proto.RegisterType((*LibraryInfo)(nil), "opencensus.proto.agent.common.v1.LibraryInfo") + proto.RegisterType((*ServiceInfo)(nil), "opencensus.proto.agent.common.v1.ServiceInfo") +} + +func init() { + proto.RegisterFile("opencensus/proto/agent/common/v1/common.proto", fileDescriptor_126c72ed8a252c84) +} + +var fileDescriptor_126c72ed8a252c84 = []byte{ + // 618 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0x4f, 0x6e, 0xda, 0x4e, + 0x14, 0xc7, 0x7f, 0xc6, 0x24, 0x81, 0xe7, 0x5f, 0x13, 0x77, 0x94, 0x05, 0x4a, 0x17, 0xa5, 0x74, + 0x93, 0x2e, 0xb0, 0x9b, 0x44, 0xaa, 0xaa, 0x4a, 0x5d, 0x18, 0xe2, 0x26, 0x44, 0x11, 0x58, 0x26, + 0xa1, 0x4a, 0x37, 0x96, 0x21, 0x83, 0x33, 0x2a, 0x9e, 0x41, 0xe3, 0x31, 0x2a, 0x27, 0xe8, 0x09, + 0xda, 0x03, 0xf4, 0x50, 0x3d, 0x44, 0x4f, 0x51, 0xcd, 0x8c, 0x01, 0xab, 0x59, 0x90, 0xdd, 0xfb, + 0xf3, 0xfd, 0x7e, 0x9e, 0xf5, 0xe6, 0xc9, 0xd0, 0x66, 0x73, 0x4c, 0x27, 0x98, 0x66, 0x79, 0xe6, + 0xce, 0x39, 0x13, 0xcc, 0x8d, 0x13, 0x4c, 0x85, 0x3b, 0x61, 0x69, 0xca, 0xa8, 0xbb, 0x38, 0x29, + 0x22, 0x47, 0x35, 0x51, 0x73, 0x23, 0xd7, 0x15, 0x47, 0xc9, 0x9d, 0x42, 0xb4, 0x38, 0x39, 0x7a, + 0x99, 0x30, 0x96, 0xcc, 0xb0, 0x86, 0x8d, 0xf3, 0xa9, 0x2b, 0x48, 0x8a, 0x33, 0x11, 0xa7, 0x73, + 0x6d, 0x68, 0xfd, 0x34, 0xa1, 0xda, 0x67, 0xf7, 0x18, 0x0d, 0x01, 0xc8, 0x3d, 0xa6, 0x82, 0x4c, + 0x09, 0xe6, 0x0d, 0xa3, 0x69, 0x1c, 0x5b, 0xa7, 0x67, 0xce, 0xb6, 0x01, 0x4e, 0xc0, 0xd9, 0x04, + 0x67, 0x59, 0x6f, 0x6d, 0x0d, 0x4b, 0x18, 0x14, 0xc0, 0xff, 0x33, 0x32, 0xe6, 0x31, 0x5f, 0x46, + 0x84, 0x4e, 0x59, 0xa3, 0xa2, 0xb0, 0xed, 0xed, 0xd8, 0x6b, 0xed, 0xea, 0xd1, 0x29, 0x0b, 0xad, + 0xd9, 0x26, 0x91, 0xc4, 0x0c, 0xf3, 0x05, 0x99, 0x60, 0x4d, 0x34, 0x9f, 0x4a, 0x1c, 0x6a, 0x97, + 0x26, 0x66, 0x9b, 0x04, 0x8d, 0x00, 0x62, 0x21, 0x38, 0x19, 0xe7, 0x02, 0x67, 0x8d, 0x6a, 0xd3, + 0x3c, 0xb6, 0x4e, 0xdf, 0x6d, 0xe7, 0xc9, 0xa5, 0x39, 0xde, 0xda, 0xe8, 0x53, 0xc1, 0x97, 0x61, + 0x89, 0x74, 0xf4, 0x11, 0x0e, 0xfe, 0x69, 0x23, 0x1b, 0xcc, 0xaf, 0x78, 0xa9, 0x96, 0x5b, 0x0f, + 0x65, 0x88, 0x0e, 0x61, 0x67, 0x11, 0xcf, 0x72, 0xac, 0x36, 0x53, 0x0f, 0x75, 0xf2, 0xa1, 0xf2, + 0xde, 0x68, 0x7d, 0x37, 0xe0, 0xf9, 0xa3, 0xe5, 0xa2, 0x17, 0x50, 0x7f, 0x60, 0x99, 0x88, 0x68, + 0x9c, 0xe2, 0x82, 0x53, 0x93, 0x85, 0x7e, 0x9c, 0x62, 0x89, 0x9f, 0x93, 0x7b, 0x85, 0x7a, 0x16, + 0xca, 0x10, 0x75, 0xe1, 0x20, 0x13, 0x31, 0x17, 0xd1, 0xfa, 0xd9, 0x8b, 0x85, 0x1d, 0x39, 0xfa, + 0x30, 0x9c, 0xd5, 0x61, 0x38, 0x37, 0x2b, 0x45, 0xb8, 0xaf, 0x2c, 0xeb, 0xbc, 0xf5, 0xbb, 0x02, + 0x56, 0xe9, 0x3d, 0x50, 0x08, 0xb5, 0x59, 0x4c, 0x93, 0x3c, 0x4e, 0xf4, 0x27, 0xec, 0x3f, 0x65, + 0x5d, 0x25, 0x80, 0x73, 0x5d, 0xb8, 0xc3, 0x35, 0x07, 0xbd, 0x01, 0x1b, 0x7f, 0x9b, 0x33, 0x2e, + 0x30, 0x8f, 0x16, 0x98, 0x67, 0x84, 0xd1, 0x62, 0x25, 0x07, 0xab, 0xfa, 0x48, 0x97, 0xd1, 0x5b, + 0x38, 0x9c, 0x30, 0x8e, 0xa3, 0xd5, 0x61, 0xad, 0xe4, 0xa6, 0x92, 0x23, 0xd9, 0x2b, 0x86, 0x15, + 0x8e, 0xd6, 0x0f, 0x03, 0x6a, 0xab, 0x99, 0xa8, 0x01, 0x87, 0xd7, 0x5e, 0xff, 0xe2, 0xd6, 0xbb, + 0xf0, 0xa3, 0xdb, 0xfe, 0x30, 0xf0, 0xbb, 0xbd, 0x4f, 0x3d, 0xff, 0xdc, 0xfe, 0x0f, 0xed, 0x81, + 0xd9, 0x0d, 0x02, 0xdb, 0x40, 0x16, 0xec, 0x75, 0xa3, 0xe1, 0xa5, 0x17, 0x06, 0x76, 0x05, 0x01, + 0xec, 0xfa, 0xa1, 0x74, 0xd8, 0xa6, 0x6c, 0x5c, 0x0c, 0x22, 0x95, 0x54, 0x51, 0x0d, 0xaa, 0x57, + 0xde, 0xc8, 0xb3, 0x77, 0x64, 0xb9, 0x3f, 0x38, 0xf7, 0xa3, 0xab, 0xa1, 0xbd, 0x2b, 0x29, 0xc1, + 0x65, 0x60, 0xef, 0x49, 0x63, 0x70, 0x77, 0x73, 0x39, 0xe8, 0xdb, 0x35, 0xa9, 0x0d, 0x6f, 0x3b, + 0x77, 0x76, 0x5d, 0x56, 0x3f, 0xfb, 0x1d, 0x29, 0x85, 0xd6, 0x2b, 0xb0, 0x4a, 0x57, 0x89, 0x10, + 0x54, 0x4b, 0xcf, 0xaa, 0xe2, 0xce, 0x2f, 0x03, 0x5e, 0x13, 0xb6, 0x75, 0xbd, 0x1d, 0xab, 0xab, + 0xc2, 0x40, 0x36, 0x03, 0xe3, 0x4b, 0x2f, 0x21, 0xe2, 0x21, 0x1f, 0x4b, 0x81, 0xab, 0x7d, 0x6d, + 0x42, 0x33, 0xc1, 0xf3, 0x14, 0x53, 0x11, 0x0b, 0xc2, 0xa8, 0xbb, 0x41, 0xb6, 0xf5, 0x9f, 0x26, + 0xc1, 0xb4, 0x9d, 0x3c, 0xfa, 0xe1, 0xfc, 0xa9, 0x34, 0x07, 0x73, 0x4c, 0xbb, 0x7a, 0xb8, 0xe2, + 0x3b, 0x9e, 0x1a, 0xae, 0x27, 0x3a, 0xa3, 0x93, 0xf1, 0xae, 0x02, 0x9c, 0xfd, 0x0d, 0x00, 0x00, + 0xff, 0xff, 0xe3, 0x53, 0x74, 0x5e, 0xbe, 0x04, 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go new file mode 100644 index 00000000..5f222b47 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go @@ -0,0 +1,275 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/agent/metrics/v1/metrics_service.proto + +package v1 + +import ( + context "context" + fmt "fmt" + v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + v11 "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1" + v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type ExportMetricsServiceRequest struct { + // This is required only in the first message on the stream or if the + // previous sent ExportMetricsServiceRequest message has a different Node (e.g. + // when the same RPC is used to send Metrics from multiple Applications). + Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // A list of metrics that belong to the last received Node. + Metrics []*v11.Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"` + // The resource for the metrics in this message that do not have an explicit + // resource set. + // If unset, the most recently set resource in the RPC stream applies. It is + // valid to never be set within a stream, e.g. when no resource info is known + // at all or when all sent metrics have an explicit resource set. + Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportMetricsServiceRequest) Reset() { *m = ExportMetricsServiceRequest{} } +func (m *ExportMetricsServiceRequest) String() string { return proto.CompactTextString(m) } +func (*ExportMetricsServiceRequest) ProtoMessage() {} +func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_47e253a956287d04, []int{0} +} + +func (m *ExportMetricsServiceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportMetricsServiceRequest.Unmarshal(m, b) +} +func (m *ExportMetricsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportMetricsServiceRequest.Marshal(b, m, deterministic) +} +func (m *ExportMetricsServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportMetricsServiceRequest.Merge(m, src) +} +func (m *ExportMetricsServiceRequest) XXX_Size() int { + return xxx_messageInfo_ExportMetricsServiceRequest.Size(m) +} +func (m *ExportMetricsServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExportMetricsServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportMetricsServiceRequest proto.InternalMessageInfo + +func (m *ExportMetricsServiceRequest) GetNode() *v1.Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *ExportMetricsServiceRequest) GetMetrics() []*v11.Metric { + if m != nil { + return m.Metrics + } + return nil +} + +func (m *ExportMetricsServiceRequest) GetResource() *v12.Resource { + if m != nil { + return m.Resource + } + return nil +} + +type ExportMetricsServiceResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportMetricsServiceResponse) Reset() { *m = ExportMetricsServiceResponse{} } +func (m *ExportMetricsServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ExportMetricsServiceResponse) ProtoMessage() {} +func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_47e253a956287d04, []int{1} +} + +func (m *ExportMetricsServiceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportMetricsServiceResponse.Unmarshal(m, b) +} +func (m *ExportMetricsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportMetricsServiceResponse.Marshal(b, m, deterministic) +} +func (m *ExportMetricsServiceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportMetricsServiceResponse.Merge(m, src) +} +func (m *ExportMetricsServiceResponse) XXX_Size() int { + return xxx_messageInfo_ExportMetricsServiceResponse.Size(m) +} +func (m *ExportMetricsServiceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExportMetricsServiceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportMetricsServiceResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ExportMetricsServiceRequest)(nil), "opencensus.proto.agent.metrics.v1.ExportMetricsServiceRequest") + proto.RegisterType((*ExportMetricsServiceResponse)(nil), "opencensus.proto.agent.metrics.v1.ExportMetricsServiceResponse") +} + +func init() { + proto.RegisterFile("opencensus/proto/agent/metrics/v1/metrics_service.proto", fileDescriptor_47e253a956287d04) +} + +var fileDescriptor_47e253a956287d04 = []byte{ + // 361 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0x41, 0x4a, 0xc3, 0x40, + 0x14, 0x86, 0x9d, 0x56, 0xaa, 0x4c, 0xc1, 0x45, 0xdc, 0x94, 0x2a, 0xd2, 0x56, 0x91, 0x8a, 0x64, + 0x62, 0xea, 0x42, 0x10, 0x54, 0xac, 0xb8, 0x11, 0xd4, 0x12, 0xc1, 0x85, 0x1b, 0x69, 0xd3, 0x47, + 0xcc, 0x22, 0x33, 0x71, 0x66, 0x12, 0xbc, 0x85, 0x77, 0x70, 0xef, 0x8d, 0x3c, 0x81, 0xa7, 0x90, + 0xe4, 0x4d, 0x5a, 0x4a, 0x8c, 0x05, 0x77, 0x8f, 0xe4, 0xff, 0xfe, 0xf7, 0xff, 0x33, 0x43, 0x4f, + 0x44, 0x0c, 0xdc, 0x07, 0xae, 0x12, 0xe5, 0xc4, 0x52, 0x68, 0xe1, 0x8c, 0x03, 0xe0, 0xda, 0x89, + 0x40, 0xcb, 0xd0, 0x57, 0x4e, 0xea, 0x16, 0xe3, 0xb3, 0x02, 0x99, 0x86, 0x3e, 0xb0, 0x5c, 0x66, + 0x75, 0xe7, 0x20, 0x7e, 0x61, 0x39, 0xc8, 0x8c, 0x9a, 0xa5, 0x6e, 0xdb, 0xae, 0xf0, 0xf6, 0x45, + 0x14, 0x09, 0x9e, 0x59, 0xe3, 0x84, 0x7c, 0xfb, 0xa0, 0x24, 0x2f, 0x87, 0x30, 0xd2, 0xc3, 0x92, + 0x54, 0x82, 0x12, 0x89, 0xf4, 0x21, 0xd3, 0x16, 0x33, 0x8a, 0x7b, 0x5f, 0x84, 0x6e, 0x5d, 0xbf, + 0xc5, 0x42, 0xea, 0x5b, 0x34, 0x79, 0xc0, 0x22, 0x1e, 0xbc, 0x26, 0xa0, 0xb4, 0x75, 0x4a, 0x57, + 0xb9, 0x98, 0x42, 0x8b, 0x74, 0x48, 0xbf, 0x39, 0xd8, 0x67, 0x15, 0xc5, 0x4c, 0xd6, 0xd4, 0x65, + 0x77, 0x62, 0x0a, 0x5e, 0xce, 0x58, 0x67, 0x74, 0xcd, 0x24, 0x6b, 0xd5, 0x3a, 0xf5, 0x7e, 0x73, + 0xb0, 0x5b, 0xc6, 0xe7, 0x27, 0xc2, 0x30, 0x80, 0x57, 0x30, 0xd6, 0x90, 0xae, 0x17, 0x61, 0x5b, + 0xf5, 0xaa, 0xf5, 0xb3, 0x3a, 0xa9, 0xcb, 0x3c, 0x33, 0x7b, 0x33, 0xae, 0xb7, 0x43, 0xb7, 0x7f, + 0x6f, 0xa7, 0x62, 0xc1, 0x15, 0x0c, 0x3e, 0x08, 0xdd, 0x58, 0xfc, 0x65, 0xbd, 0x13, 0xda, 0x40, + 0xc6, 0x3a, 0x67, 0x4b, 0xef, 0x91, 0xfd, 0x71, 0x78, 0xed, 0x8b, 0x7f, 0xf3, 0x18, 0xaf, 0xb7, + 0xd2, 0x27, 0x47, 0x64, 0xf8, 0x49, 0xe8, 0x5e, 0x28, 0x96, 0x7b, 0x0d, 0x37, 0x17, 0x6d, 0x46, + 0x99, 0x6a, 0x44, 0x9e, 0x6e, 0x82, 0x50, 0xbf, 0x24, 0x93, 0xec, 0x92, 0x1c, 0x34, 0xb0, 0x43, + 0xae, 0xb4, 0x4c, 0x22, 0xe0, 0x7a, 0xac, 0x43, 0xc1, 0x9d, 0xb9, 0xb7, 0x8d, 0x4f, 0x26, 0x00, + 0x6e, 0x07, 0xe5, 0xf7, 0xfe, 0x5d, 0xeb, 0xde, 0xc7, 0xc0, 0xaf, 0x30, 0x46, 0xbe, 0x80, 0x5d, + 0xe6, 0x31, 0xcc, 0x6a, 0xf6, 0xe8, 0x4e, 0x1a, 0xb9, 0xc5, 0xf1, 0x4f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x19, 0x28, 0xa4, 0x50, 0x3f, 0x03, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MetricsServiceClient is the client API for MetricsService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MetricsServiceClient interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) +} + +type metricsServiceClient struct { + cc *grpc.ClientConn +} + +func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient { + return &metricsServiceClient{cc} +} + +func (c *metricsServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) { + stream, err := c.cc.NewStream(ctx, &_MetricsService_serviceDesc.Streams[0], "/opencensus.proto.agent.metrics.v1.MetricsService/Export", opts...) + if err != nil { + return nil, err + } + x := &metricsServiceExportClient{stream} + return x, nil +} + +type MetricsService_ExportClient interface { + Send(*ExportMetricsServiceRequest) error + Recv() (*ExportMetricsServiceResponse, error) + grpc.ClientStream +} + +type metricsServiceExportClient struct { + grpc.ClientStream +} + +func (x *metricsServiceExportClient) Send(m *ExportMetricsServiceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *metricsServiceExportClient) Recv() (*ExportMetricsServiceResponse, error) { + m := new(ExportMetricsServiceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// MetricsServiceServer is the server API for MetricsService service. +type MetricsServiceServer interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(MetricsService_ExportServer) error +} + +// UnimplementedMetricsServiceServer can be embedded to have forward compatible implementations. +type UnimplementedMetricsServiceServer struct { +} + +func (*UnimplementedMetricsServiceServer) Export(srv MetricsService_ExportServer) error { + return status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) { + s.RegisterService(&_MetricsService_serviceDesc, srv) +} + +func _MetricsService_Export_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(MetricsServiceServer).Export(&metricsServiceExportServer{stream}) +} + +type MetricsService_ExportServer interface { + Send(*ExportMetricsServiceResponse) error + Recv() (*ExportMetricsServiceRequest, error) + grpc.ServerStream +} + +type metricsServiceExportServer struct { + grpc.ServerStream +} + +func (x *metricsServiceExportServer) Send(m *ExportMetricsServiceResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *metricsServiceExportServer) Recv() (*ExportMetricsServiceRequest, error) { + m := new(ExportMetricsServiceRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _MetricsService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opencensus.proto.agent.metrics.v1.MetricsService", + HandlerType: (*MetricsServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Export", + Handler: _MetricsService_Export_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "opencensus/proto/agent/metrics/v1/metrics_service.proto", +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.gw.go b/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.gw.go new file mode 100644 index 00000000..158c1608 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.gw.go @@ -0,0 +1,150 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: opencensus/proto/agent/metrics/v1/metrics_service.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +func request_MetricsService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client MetricsServiceClient, req *http.Request, pathParams map[string]string) (MetricsService_ExportClient, runtime.ServerMetadata, error) { + var metadata runtime.ServerMetadata + stream, err := client.Export(ctx) + if err != nil { + grpclog.Infof("Failed to start streaming: %v", err) + return nil, metadata, err + } + dec := marshaler.NewDecoder(req.Body) + handleSend := func() error { + var protoReq ExportMetricsServiceRequest + err := dec.Decode(&protoReq) + if err == io.EOF { + return err + } + if err != nil { + grpclog.Infof("Failed to decode request: %v", err) + return err + } + if err := stream.Send(&protoReq); err != nil { + grpclog.Infof("Failed to send request: %v", err) + return err + } + return nil + } + if err := handleSend(); err != nil { + if cerr := stream.CloseSend(); cerr != nil { + grpclog.Infof("Failed to terminate client stream: %v", cerr) + } + if err == io.EOF { + return stream, metadata, nil + } + return nil, metadata, err + } + go func() { + for { + if err := handleSend(); err != nil { + break + } + } + if err := stream.CloseSend(); err != nil { + grpclog.Infof("Failed to terminate client stream: %v", err) + } + }() + header, err := stream.Header() + if err != nil { + grpclog.Infof("Failed to get header from client: %v", err) + return nil, metadata, err + } + metadata.HeaderMD = header + return stream, metadata, nil +} + +// RegisterMetricsServiceHandlerFromEndpoint is same as RegisterMetricsServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterMetricsServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterMetricsServiceHandler(ctx, mux, conn) +} + +// RegisterMetricsServiceHandler registers the http handlers for service MetricsService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterMetricsServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterMetricsServiceHandlerClient(ctx, mux, NewMetricsServiceClient(conn)) +} + +// RegisterMetricsServiceHandlerClient registers the http handlers for service MetricsService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "MetricsServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "MetricsServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "MetricsServiceClient" to call the correct interceptors. +func RegisterMetricsServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client MetricsServiceClient) error { + + mux.Handle("POST", pattern_MetricsService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_MetricsService_Export_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_MetricsService_Export_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_MetricsService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "metrics"}, "")) +) + +var ( + forward_MetricsService_Export_0 = runtime.ForwardResponseStream +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go new file mode 100644 index 00000000..a0a3504d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go @@ -0,0 +1,457 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/agent/trace/v1/trace_service.proto + +package v1 + +import ( + context "context" + fmt "fmt" + v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1" + v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + v11 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type CurrentLibraryConfig struct { + // This is required only in the first message on the stream or if the + // previous sent CurrentLibraryConfig message has a different Node (e.g. + // when the same RPC is used to configure multiple Applications). + Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // Current configuration. + Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CurrentLibraryConfig) Reset() { *m = CurrentLibraryConfig{} } +func (m *CurrentLibraryConfig) String() string { return proto.CompactTextString(m) } +func (*CurrentLibraryConfig) ProtoMessage() {} +func (*CurrentLibraryConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_7027f99caf7ac6a5, []int{0} +} + +func (m *CurrentLibraryConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CurrentLibraryConfig.Unmarshal(m, b) +} +func (m *CurrentLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CurrentLibraryConfig.Marshal(b, m, deterministic) +} +func (m *CurrentLibraryConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_CurrentLibraryConfig.Merge(m, src) +} +func (m *CurrentLibraryConfig) XXX_Size() int { + return xxx_messageInfo_CurrentLibraryConfig.Size(m) +} +func (m *CurrentLibraryConfig) XXX_DiscardUnknown() { + xxx_messageInfo_CurrentLibraryConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_CurrentLibraryConfig proto.InternalMessageInfo + +func (m *CurrentLibraryConfig) GetNode() *v1.Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *CurrentLibraryConfig) GetConfig() *v11.TraceConfig { + if m != nil { + return m.Config + } + return nil +} + +type UpdatedLibraryConfig struct { + // This field is ignored when the RPC is used to configure only one Application. + // This is required only in the first message on the stream or if the + // previous sent UpdatedLibraryConfig message has a different Node. + Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // Requested updated configuration. + Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UpdatedLibraryConfig) Reset() { *m = UpdatedLibraryConfig{} } +func (m *UpdatedLibraryConfig) String() string { return proto.CompactTextString(m) } +func (*UpdatedLibraryConfig) ProtoMessage() {} +func (*UpdatedLibraryConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_7027f99caf7ac6a5, []int{1} +} + +func (m *UpdatedLibraryConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UpdatedLibraryConfig.Unmarshal(m, b) +} +func (m *UpdatedLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UpdatedLibraryConfig.Marshal(b, m, deterministic) +} +func (m *UpdatedLibraryConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdatedLibraryConfig.Merge(m, src) +} +func (m *UpdatedLibraryConfig) XXX_Size() int { + return xxx_messageInfo_UpdatedLibraryConfig.Size(m) +} +func (m *UpdatedLibraryConfig) XXX_DiscardUnknown() { + xxx_messageInfo_UpdatedLibraryConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdatedLibraryConfig proto.InternalMessageInfo + +func (m *UpdatedLibraryConfig) GetNode() *v1.Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *UpdatedLibraryConfig) GetConfig() *v11.TraceConfig { + if m != nil { + return m.Config + } + return nil +} + +type ExportTraceServiceRequest struct { + // This is required only in the first message on the stream or if the + // previous sent ExportTraceServiceRequest message has a different Node (e.g. + // when the same RPC is used to send Spans from multiple Applications). + Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"` + // A list of Spans that belong to the last received Node. + Spans []*v11.Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` + // The resource for the spans in this message that do not have an explicit + // resource set. + // If unset, the most recently set resource in the RPC stream applies. It is + // valid to never be set within a stream, e.g. when no resource info is known. + Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportTraceServiceRequest) Reset() { *m = ExportTraceServiceRequest{} } +func (m *ExportTraceServiceRequest) String() string { return proto.CompactTextString(m) } +func (*ExportTraceServiceRequest) ProtoMessage() {} +func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_7027f99caf7ac6a5, []int{2} +} + +func (m *ExportTraceServiceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportTraceServiceRequest.Unmarshal(m, b) +} +func (m *ExportTraceServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportTraceServiceRequest.Marshal(b, m, deterministic) +} +func (m *ExportTraceServiceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportTraceServiceRequest.Merge(m, src) +} +func (m *ExportTraceServiceRequest) XXX_Size() int { + return xxx_messageInfo_ExportTraceServiceRequest.Size(m) +} +func (m *ExportTraceServiceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ExportTraceServiceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportTraceServiceRequest proto.InternalMessageInfo + +func (m *ExportTraceServiceRequest) GetNode() *v1.Node { + if m != nil { + return m.Node + } + return nil +} + +func (m *ExportTraceServiceRequest) GetSpans() []*v11.Span { + if m != nil { + return m.Spans + } + return nil +} + +func (m *ExportTraceServiceRequest) GetResource() *v12.Resource { + if m != nil { + return m.Resource + } + return nil +} + +type ExportTraceServiceResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExportTraceServiceResponse) Reset() { *m = ExportTraceServiceResponse{} } +func (m *ExportTraceServiceResponse) String() string { return proto.CompactTextString(m) } +func (*ExportTraceServiceResponse) ProtoMessage() {} +func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_7027f99caf7ac6a5, []int{3} +} + +func (m *ExportTraceServiceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExportTraceServiceResponse.Unmarshal(m, b) +} +func (m *ExportTraceServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExportTraceServiceResponse.Marshal(b, m, deterministic) +} +func (m *ExportTraceServiceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportTraceServiceResponse.Merge(m, src) +} +func (m *ExportTraceServiceResponse) XXX_Size() int { + return xxx_messageInfo_ExportTraceServiceResponse.Size(m) +} +func (m *ExportTraceServiceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ExportTraceServiceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportTraceServiceResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CurrentLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.CurrentLibraryConfig") + proto.RegisterType((*UpdatedLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.UpdatedLibraryConfig") + proto.RegisterType((*ExportTraceServiceRequest)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceRequest") + proto.RegisterType((*ExportTraceServiceResponse)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceResponse") +} + +func init() { + proto.RegisterFile("opencensus/proto/agent/trace/v1/trace_service.proto", fileDescriptor_7027f99caf7ac6a5) +} + +var fileDescriptor_7027f99caf7ac6a5 = []byte{ + // 442 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x94, 0xcf, 0xaa, 0xd4, 0x30, + 0x14, 0xc6, 0x4d, 0xaf, 0x16, 0xc9, 0x75, 0x63, 0x71, 0x51, 0x8b, 0x30, 0x97, 0x82, 0x32, 0xa0, + 0x4d, 0xed, 0x5c, 0xee, 0xe6, 0x0a, 0x82, 0x33, 0x08, 0x2e, 0x44, 0x2f, 0x1d, 0x75, 0xe1, 0x66, + 0xe8, 0xb4, 0xc7, 0xda, 0xc5, 0x24, 0x31, 0x49, 0x8b, 0x82, 0x7b, 0xf7, 0x2e, 0x7c, 0x03, 0x5f, + 0xc8, 0xc7, 0xf0, 0x29, 0xa4, 0x39, 0x9d, 0x3f, 0x3a, 0x53, 0x0b, 0xba, 0xb9, 0xbb, 0x43, 0xf3, + 0xfd, 0xbe, 0xf3, 0x25, 0x39, 0x29, 0x3d, 0x15, 0x12, 0x78, 0x0e, 0x5c, 0xd7, 0x3a, 0x96, 0x4a, + 0x18, 0x11, 0x67, 0x25, 0x70, 0x13, 0x1b, 0x95, 0xe5, 0x10, 0x37, 0x09, 0x16, 0x0b, 0x0d, 0xaa, + 0xa9, 0x72, 0x60, 0x56, 0xe2, 0x8d, 0xb6, 0x10, 0x7e, 0x61, 0x16, 0x62, 0x56, 0xcb, 0x9a, 0x24, + 0x88, 0x7a, 0x5c, 0x73, 0xb1, 0x5a, 0x09, 0xde, 0xda, 0x62, 0x85, 0x74, 0x70, 0x7f, 0x4f, 0xae, + 0x40, 0x8b, 0x5a, 0x61, 0x82, 0x75, 0xdd, 0x89, 0xef, 0xee, 0x89, 0x7f, 0xcf, 0xda, 0xc9, 0x1e, + 0x0c, 0xc8, 0x16, 0xb9, 0xe0, 0xef, 0xaa, 0x12, 0xd5, 0xe1, 0x57, 0x42, 0x6f, 0xcd, 0x6a, 0xa5, + 0x80, 0x9b, 0xe7, 0xd5, 0x52, 0x65, 0xea, 0xd3, 0xcc, 0x2e, 0x7b, 0xe7, 0xf4, 0x2a, 0x17, 0x05, + 0xf8, 0xe4, 0x84, 0x8c, 0x8f, 0x27, 0xf7, 0x58, 0xcf, 0xce, 0xbb, 0xed, 0x34, 0x09, 0x7b, 0x21, + 0x0a, 0x48, 0x2d, 0xe3, 0x3d, 0xa6, 0x2e, 0x36, 0xf1, 0x9d, 0x3e, 0x7a, 0x7d, 0x62, 0xec, 0x55, + 0x5b, 0x60, 0xcf, 0xb4, 0xa3, 0x6c, 0xa8, 0xd7, 0xb2, 0xc8, 0x0c, 0x14, 0x97, 0x27, 0xd4, 0x0f, + 0x42, 0x6f, 0x3f, 0xfd, 0x28, 0x85, 0x32, 0x76, 0x75, 0x8e, 0x83, 0x91, 0xc2, 0x87, 0x1a, 0xb4, + 0xf9, 0xaf, 0x64, 0x67, 0xf4, 0x9a, 0x96, 0x19, 0xd7, 0xbe, 0x73, 0x72, 0x34, 0x3e, 0x9e, 0x8c, + 0xfe, 0x12, 0x6c, 0x2e, 0x33, 0x9e, 0xa2, 0xda, 0x9b, 0xd2, 0xeb, 0xeb, 0x09, 0xf1, 0x8f, 0xfa, + 0xda, 0x6e, 0x66, 0xa8, 0x49, 0x58, 0xda, 0xd5, 0xe9, 0x86, 0x0b, 0xef, 0xd0, 0xe0, 0xd0, 0x9e, + 0xb4, 0x14, 0x5c, 0xc3, 0xe4, 0x9b, 0x43, 0x6f, 0xec, 0x2e, 0x78, 0x9f, 0xa9, 0xdb, 0xdd, 0xc4, + 0x19, 0x1b, 0x78, 0x0a, 0xec, 0xd0, 0x54, 0x05, 0xc3, 0xd8, 0xa1, 0x7b, 0x0f, 0xaf, 0x8c, 0xc9, + 0x43, 0xe2, 0x7d, 0x21, 0xd4, 0xc5, 0xb4, 0xde, 0xf9, 0xa0, 0x4f, 0xef, 0x55, 0x05, 0x8f, 0xfe, + 0x89, 0xc5, 0x23, 0xc1, 0x24, 0xd3, 0xef, 0x84, 0x86, 0x95, 0x18, 0xf2, 0x99, 0xde, 0xdc, 0xb5, + 0xb8, 0x68, 0x15, 0x17, 0xe4, 0xed, 0xb3, 0xb2, 0x32, 0xef, 0xeb, 0x65, 0x3b, 0x0a, 0x31, 0xc2, + 0x51, 0xc5, 0xb5, 0x51, 0xf5, 0x0a, 0xb8, 0xc9, 0x4c, 0x25, 0x78, 0xbc, 0xf5, 0x8d, 0xf0, 0x05, + 0x97, 0xc0, 0xa3, 0xf2, 0xcf, 0x3f, 0xd4, 0x4f, 0x67, 0xf4, 0x52, 0x02, 0x9f, 0x61, 0x00, 0x6b, + 0xcf, 0x9e, 0xd8, 0x00, 0xb6, 0x2d, 0x7b, 0x93, 0x2c, 0x5d, 0x8b, 0x9f, 0xfe, 0x0a, 0x00, 0x00, + 0xff, 0xff, 0x65, 0x76, 0xd7, 0xb9, 0xed, 0x04, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// TraceServiceClient is the client API for TraceService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type TraceServiceClient interface { + // After initialization, this RPC must be kept alive for the entire life of + // the application. The agent pushes configs down to applications via a + // stream. + Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) +} + +type traceServiceClient struct { + cc *grpc.ClientConn +} + +func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient { + return &traceServiceClient{cc} +} + +func (c *traceServiceClient) Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) { + stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[0], "/opencensus.proto.agent.trace.v1.TraceService/Config", opts...) + if err != nil { + return nil, err + } + x := &traceServiceConfigClient{stream} + return x, nil +} + +type TraceService_ConfigClient interface { + Send(*CurrentLibraryConfig) error + Recv() (*UpdatedLibraryConfig, error) + grpc.ClientStream +} + +type traceServiceConfigClient struct { + grpc.ClientStream +} + +func (x *traceServiceConfigClient) Send(m *CurrentLibraryConfig) error { + return x.ClientStream.SendMsg(m) +} + +func (x *traceServiceConfigClient) Recv() (*UpdatedLibraryConfig, error) { + m := new(UpdatedLibraryConfig) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *traceServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) { + stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[1], "/opencensus.proto.agent.trace.v1.TraceService/Export", opts...) + if err != nil { + return nil, err + } + x := &traceServiceExportClient{stream} + return x, nil +} + +type TraceService_ExportClient interface { + Send(*ExportTraceServiceRequest) error + Recv() (*ExportTraceServiceResponse, error) + grpc.ClientStream +} + +type traceServiceExportClient struct { + grpc.ClientStream +} + +func (x *traceServiceExportClient) Send(m *ExportTraceServiceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *traceServiceExportClient) Recv() (*ExportTraceServiceResponse, error) { + m := new(ExportTraceServiceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// TraceServiceServer is the server API for TraceService service. +type TraceServiceServer interface { + // After initialization, this RPC must be kept alive for the entire life of + // the application. The agent pushes configs down to applications via a + // stream. + Config(TraceService_ConfigServer) error + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(TraceService_ExportServer) error +} + +// UnimplementedTraceServiceServer can be embedded to have forward compatible implementations. +type UnimplementedTraceServiceServer struct { +} + +func (*UnimplementedTraceServiceServer) Config(srv TraceService_ConfigServer) error { + return status.Errorf(codes.Unimplemented, "method Config not implemented") +} +func (*UnimplementedTraceServiceServer) Export(srv TraceService_ExportServer) error { + return status.Errorf(codes.Unimplemented, "method Export not implemented") +} + +func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) { + s.RegisterService(&_TraceService_serviceDesc, srv) +} + +func _TraceService_Config_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TraceServiceServer).Config(&traceServiceConfigServer{stream}) +} + +type TraceService_ConfigServer interface { + Send(*UpdatedLibraryConfig) error + Recv() (*CurrentLibraryConfig, error) + grpc.ServerStream +} + +type traceServiceConfigServer struct { + grpc.ServerStream +} + +func (x *traceServiceConfigServer) Send(m *UpdatedLibraryConfig) error { + return x.ServerStream.SendMsg(m) +} + +func (x *traceServiceConfigServer) Recv() (*CurrentLibraryConfig, error) { + m := new(CurrentLibraryConfig) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _TraceService_Export_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(TraceServiceServer).Export(&traceServiceExportServer{stream}) +} + +type TraceService_ExportServer interface { + Send(*ExportTraceServiceResponse) error + Recv() (*ExportTraceServiceRequest, error) + grpc.ServerStream +} + +type traceServiceExportServer struct { + grpc.ServerStream +} + +func (x *traceServiceExportServer) Send(m *ExportTraceServiceResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *traceServiceExportServer) Recv() (*ExportTraceServiceRequest, error) { + m := new(ExportTraceServiceRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _TraceService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opencensus.proto.agent.trace.v1.TraceService", + HandlerType: (*TraceServiceServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Config", + Handler: _TraceService_Config_Handler, + ServerStreams: true, + ClientStreams: true, + }, + { + StreamName: "Export", + Handler: _TraceService_Export_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "opencensus/proto/agent/trace/v1/trace_service.proto", +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go b/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go new file mode 100644 index 00000000..334331b0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go @@ -0,0 +1,150 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: opencensus/proto/agent/trace/v1/trace_service.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray + +func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (TraceService_ExportClient, runtime.ServerMetadata, error) { + var metadata runtime.ServerMetadata + stream, err := client.Export(ctx) + if err != nil { + grpclog.Infof("Failed to start streaming: %v", err) + return nil, metadata, err + } + dec := marshaler.NewDecoder(req.Body) + handleSend := func() error { + var protoReq ExportTraceServiceRequest + err := dec.Decode(&protoReq) + if err == io.EOF { + return err + } + if err != nil { + grpclog.Infof("Failed to decode request: %v", err) + return err + } + if err := stream.Send(&protoReq); err != nil { + grpclog.Infof("Failed to send request: %v", err) + return err + } + return nil + } + if err := handleSend(); err != nil { + if cerr := stream.CloseSend(); cerr != nil { + grpclog.Infof("Failed to terminate client stream: %v", cerr) + } + if err == io.EOF { + return stream, metadata, nil + } + return nil, metadata, err + } + go func() { + for { + if err := handleSend(); err != nil { + break + } + } + if err := stream.CloseSend(); err != nil { + grpclog.Infof("Failed to terminate client stream: %v", err) + } + }() + header, err := stream.Header() + if err != nil { + grpclog.Infof("Failed to get header from client: %v", err) + return nil, metadata, err + } + metadata.HeaderMD = header + return stream, metadata, nil +} + +// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterTraceServiceHandler(ctx, mux, conn) +} + +// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn)) +} + +// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "TraceServiceClient" to call the correct interceptors. +func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error { + + mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_TraceService_Export_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, "")) +) + +var ( + forward_TraceService_Export_0 = runtime.ForwardResponseStream +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go new file mode 100644 index 00000000..466b2342 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go @@ -0,0 +1,1127 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/metrics/v1/metrics.proto + +package v1 + +import ( + fmt "fmt" + v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// The kind of metric. It describes how the data is reported. +// +// A gauge is an instantaneous measurement of a value. +// +// A cumulative measurement is a value accumulated over a time interval. In +// a time series, cumulative measurements should have the same start time, +// increasing values and increasing end times, until an event resets the +// cumulative value to zero and sets a new start time for the following +// points. +type MetricDescriptor_Type int32 + +const ( + // Do not use this default value. + MetricDescriptor_UNSPECIFIED MetricDescriptor_Type = 0 + // Integer gauge. The value can go both up and down. + MetricDescriptor_GAUGE_INT64 MetricDescriptor_Type = 1 + // Floating point gauge. The value can go both up and down. + MetricDescriptor_GAUGE_DOUBLE MetricDescriptor_Type = 2 + // Distribution gauge measurement. The count and sum can go both up and + // down. Recorded values are always >= 0. + // Used in scenarios like a snapshot of time the current items in a queue + // have spent there. + MetricDescriptor_GAUGE_DISTRIBUTION MetricDescriptor_Type = 3 + // Integer cumulative measurement. The value cannot decrease, if resets + // then the start_time should also be reset. + MetricDescriptor_CUMULATIVE_INT64 MetricDescriptor_Type = 4 + // Floating point cumulative measurement. The value cannot decrease, if + // resets then the start_time should also be reset. Recorded values are + // always >= 0. + MetricDescriptor_CUMULATIVE_DOUBLE MetricDescriptor_Type = 5 + // Distribution cumulative measurement. The count and sum cannot decrease, + // if resets then the start_time should also be reset. + MetricDescriptor_CUMULATIVE_DISTRIBUTION MetricDescriptor_Type = 6 + // Some frameworks implemented Histograms as a summary of observations + // (usually things like request durations and response sizes). While it + // also provides a total count of observations and a sum of all observed + // values, it calculates configurable percentiles over a sliding time + // window. This is not recommended, since it cannot be aggregated. + MetricDescriptor_SUMMARY MetricDescriptor_Type = 7 +) + +var MetricDescriptor_Type_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "GAUGE_INT64", + 2: "GAUGE_DOUBLE", + 3: "GAUGE_DISTRIBUTION", + 4: "CUMULATIVE_INT64", + 5: "CUMULATIVE_DOUBLE", + 6: "CUMULATIVE_DISTRIBUTION", + 7: "SUMMARY", +} + +var MetricDescriptor_Type_value = map[string]int32{ + "UNSPECIFIED": 0, + "GAUGE_INT64": 1, + "GAUGE_DOUBLE": 2, + "GAUGE_DISTRIBUTION": 3, + "CUMULATIVE_INT64": 4, + "CUMULATIVE_DOUBLE": 5, + "CUMULATIVE_DISTRIBUTION": 6, + "SUMMARY": 7, +} + +func (x MetricDescriptor_Type) String() string { + return proto.EnumName(MetricDescriptor_Type_name, int32(x)) +} + +func (MetricDescriptor_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{1, 0} +} + +// Defines a Metric which has one or more timeseries. +type Metric struct { + // The descriptor of the Metric. + // TODO(issue #152): consider only sending the name of descriptor for + // optimization. + MetricDescriptor *MetricDescriptor `protobuf:"bytes,1,opt,name=metric_descriptor,json=metricDescriptor,proto3" json:"metric_descriptor,omitempty"` + // One or more timeseries for a single metric, where each timeseries has + // one or more points. + Timeseries []*TimeSeries `protobuf:"bytes,2,rep,name=timeseries,proto3" json:"timeseries,omitempty"` + // The resource for the metric. If unset, it may be set to a default value + // provided for a sequence of messages in an RPC stream. + Resource *v1.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{0} +} + +func (m *Metric) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metric.Unmarshal(m, b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) +} +func (m *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(m, src) +} +func (m *Metric) XXX_Size() int { + return xxx_messageInfo_Metric.Size(m) +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +func (m *Metric) GetMetricDescriptor() *MetricDescriptor { + if m != nil { + return m.MetricDescriptor + } + return nil +} + +func (m *Metric) GetTimeseries() []*TimeSeries { + if m != nil { + return m.Timeseries + } + return nil +} + +func (m *Metric) GetResource() *v1.Resource { + if m != nil { + return m.Resource + } + return nil +} + +// Defines a metric type and its schema. +type MetricDescriptor struct { + // The metric type, including its DNS name prefix. It must be unique. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A detailed description of the metric, which can be used in documentation. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + // The unit in which the metric value is reported. Follows the format + // described by http://unitsofmeasure.org/ucum.html. + Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"` + Type MetricDescriptor_Type `protobuf:"varint,4,opt,name=type,proto3,enum=opencensus.proto.metrics.v1.MetricDescriptor_Type" json:"type,omitempty"` + // The label keys associated with the metric descriptor. + LabelKeys []*LabelKey `protobuf:"bytes,5,rep,name=label_keys,json=labelKeys,proto3" json:"label_keys,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricDescriptor) Reset() { *m = MetricDescriptor{} } +func (m *MetricDescriptor) String() string { return proto.CompactTextString(m) } +func (*MetricDescriptor) ProtoMessage() {} +func (*MetricDescriptor) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{1} +} + +func (m *MetricDescriptor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricDescriptor.Unmarshal(m, b) +} +func (m *MetricDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricDescriptor.Marshal(b, m, deterministic) +} +func (m *MetricDescriptor) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricDescriptor.Merge(m, src) +} +func (m *MetricDescriptor) XXX_Size() int { + return xxx_messageInfo_MetricDescriptor.Size(m) +} +func (m *MetricDescriptor) XXX_DiscardUnknown() { + xxx_messageInfo_MetricDescriptor.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricDescriptor proto.InternalMessageInfo + +func (m *MetricDescriptor) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MetricDescriptor) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *MetricDescriptor) GetUnit() string { + if m != nil { + return m.Unit + } + return "" +} + +func (m *MetricDescriptor) GetType() MetricDescriptor_Type { + if m != nil { + return m.Type + } + return MetricDescriptor_UNSPECIFIED +} + +func (m *MetricDescriptor) GetLabelKeys() []*LabelKey { + if m != nil { + return m.LabelKeys + } + return nil +} + +// Defines a label key associated with a metric descriptor. +type LabelKey struct { + // The key for the label. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // A human-readable description of what this label key represents. + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelKey) Reset() { *m = LabelKey{} } +func (m *LabelKey) String() string { return proto.CompactTextString(m) } +func (*LabelKey) ProtoMessage() {} +func (*LabelKey) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{2} +} + +func (m *LabelKey) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabelKey.Unmarshal(m, b) +} +func (m *LabelKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabelKey.Marshal(b, m, deterministic) +} +func (m *LabelKey) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelKey.Merge(m, src) +} +func (m *LabelKey) XXX_Size() int { + return xxx_messageInfo_LabelKey.Size(m) +} +func (m *LabelKey) XXX_DiscardUnknown() { + xxx_messageInfo_LabelKey.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelKey proto.InternalMessageInfo + +func (m *LabelKey) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *LabelKey) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +// A collection of data points that describes the time-varying values +// of a metric. +type TimeSeries struct { + // Must be present for cumulative metrics. The time when the cumulative value + // was reset to zero. Exclusive. The cumulative value is over the time interval + // (start_timestamp, timestamp]. If not specified, the backend can use the + // previous recorded value. + StartTimestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"` + // The set of label values that uniquely identify this timeseries. Applies to + // all points. The order of label values must match that of label keys in the + // metric descriptor. + LabelValues []*LabelValue `protobuf:"bytes,2,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"` + // The data points of this timeseries. Point.value type MUST match the + // MetricDescriptor.type. + Points []*Point `protobuf:"bytes,3,rep,name=points,proto3" json:"points,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TimeSeries) Reset() { *m = TimeSeries{} } +func (m *TimeSeries) String() string { return proto.CompactTextString(m) } +func (*TimeSeries) ProtoMessage() {} +func (*TimeSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{3} +} + +func (m *TimeSeries) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TimeSeries.Unmarshal(m, b) +} +func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic) +} +func (m *TimeSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_TimeSeries.Merge(m, src) +} +func (m *TimeSeries) XXX_Size() int { + return xxx_messageInfo_TimeSeries.Size(m) +} +func (m *TimeSeries) XXX_DiscardUnknown() { + xxx_messageInfo_TimeSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_TimeSeries proto.InternalMessageInfo + +func (m *TimeSeries) GetStartTimestamp() *timestamp.Timestamp { + if m != nil { + return m.StartTimestamp + } + return nil +} + +func (m *TimeSeries) GetLabelValues() []*LabelValue { + if m != nil { + return m.LabelValues + } + return nil +} + +func (m *TimeSeries) GetPoints() []*Point { + if m != nil { + return m.Points + } + return nil +} + +type LabelValue struct { + // The value for the label. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + // If false the value field is ignored and considered not set. + // This is used to differentiate a missing label from an empty string. + HasValue bool `protobuf:"varint,2,opt,name=has_value,json=hasValue,proto3" json:"has_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelValue) Reset() { *m = LabelValue{} } +func (m *LabelValue) String() string { return proto.CompactTextString(m) } +func (*LabelValue) ProtoMessage() {} +func (*LabelValue) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{4} +} + +func (m *LabelValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabelValue.Unmarshal(m, b) +} +func (m *LabelValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabelValue.Marshal(b, m, deterministic) +} +func (m *LabelValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelValue.Merge(m, src) +} +func (m *LabelValue) XXX_Size() int { + return xxx_messageInfo_LabelValue.Size(m) +} +func (m *LabelValue) XXX_DiscardUnknown() { + xxx_messageInfo_LabelValue.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelValue proto.InternalMessageInfo + +func (m *LabelValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *LabelValue) GetHasValue() bool { + if m != nil { + return m.HasValue + } + return false +} + +// A timestamped measurement. +type Point struct { + // The moment when this point was recorded. Inclusive. + // If not specified, the timestamp will be decided by the backend. + Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // The actual point value. + // + // Types that are valid to be assigned to Value: + // *Point_Int64Value + // *Point_DoubleValue + // *Point_DistributionValue + // *Point_SummaryValue + Value isPoint_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Point) Reset() { *m = Point{} } +func (m *Point) String() string { return proto.CompactTextString(m) } +func (*Point) ProtoMessage() {} +func (*Point) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{5} +} + +func (m *Point) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Point.Unmarshal(m, b) +} +func (m *Point) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Point.Marshal(b, m, deterministic) +} +func (m *Point) XXX_Merge(src proto.Message) { + xxx_messageInfo_Point.Merge(m, src) +} +func (m *Point) XXX_Size() int { + return xxx_messageInfo_Point.Size(m) +} +func (m *Point) XXX_DiscardUnknown() { + xxx_messageInfo_Point.DiscardUnknown(m) +} + +var xxx_messageInfo_Point proto.InternalMessageInfo + +func (m *Point) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +type isPoint_Value interface { + isPoint_Value() +} + +type Point_Int64Value struct { + Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"` +} + +type Point_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type Point_DistributionValue struct { + DistributionValue *DistributionValue `protobuf:"bytes,4,opt,name=distribution_value,json=distributionValue,proto3,oneof"` +} + +type Point_SummaryValue struct { + SummaryValue *SummaryValue `protobuf:"bytes,5,opt,name=summary_value,json=summaryValue,proto3,oneof"` +} + +func (*Point_Int64Value) isPoint_Value() {} + +func (*Point_DoubleValue) isPoint_Value() {} + +func (*Point_DistributionValue) isPoint_Value() {} + +func (*Point_SummaryValue) isPoint_Value() {} + +func (m *Point) GetValue() isPoint_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Point) GetInt64Value() int64 { + if x, ok := m.GetValue().(*Point_Int64Value); ok { + return x.Int64Value + } + return 0 +} + +func (m *Point) GetDoubleValue() float64 { + if x, ok := m.GetValue().(*Point_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (m *Point) GetDistributionValue() *DistributionValue { + if x, ok := m.GetValue().(*Point_DistributionValue); ok { + return x.DistributionValue + } + return nil +} + +func (m *Point) GetSummaryValue() *SummaryValue { + if x, ok := m.GetValue().(*Point_SummaryValue); ok { + return x.SummaryValue + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Point) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Point_Int64Value)(nil), + (*Point_DoubleValue)(nil), + (*Point_DistributionValue)(nil), + (*Point_SummaryValue)(nil), + } +} + +// Distribution contains summary statistics for a population of values. It +// optionally contains a histogram representing the distribution of those +// values across a set of buckets. +type DistributionValue struct { + // The number of values in the population. Must be non-negative. This value + // must equal the sum of the values in bucket_counts if a histogram is + // provided. + Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + // The sum of the values in the population. If count is zero then this field + // must be zero. + Sum float64 `protobuf:"fixed64,2,opt,name=sum,proto3" json:"sum,omitempty"` + // The sum of squared deviations from the mean of the values in the + // population. For values x_i this is: + // + // Sum[i=1..n]((x_i - mean)^2) + // + // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition + // describes Welford's method for accumulating this sum in one pass. + // + // If count is zero then this field must be zero. + SumOfSquaredDeviation float64 `protobuf:"fixed64,3,opt,name=sum_of_squared_deviation,json=sumOfSquaredDeviation,proto3" json:"sum_of_squared_deviation,omitempty"` + // Don't change bucket boundaries within a TimeSeries if your backend doesn't + // support this. + // TODO(issue #152): consider not required to send bucket options for + // optimization. + BucketOptions *DistributionValue_BucketOptions `protobuf:"bytes,4,opt,name=bucket_options,json=bucketOptions,proto3" json:"bucket_options,omitempty"` + // If the distribution does not have a histogram, then omit this field. + // If there is a histogram, then the sum of the values in the Bucket counts + // must equal the value in the count field of the distribution. + Buckets []*DistributionValue_Bucket `protobuf:"bytes,5,rep,name=buckets,proto3" json:"buckets,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue) Reset() { *m = DistributionValue{} } +func (m *DistributionValue) String() string { return proto.CompactTextString(m) } +func (*DistributionValue) ProtoMessage() {} +func (*DistributionValue) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6} +} + +func (m *DistributionValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue.Unmarshal(m, b) +} +func (m *DistributionValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue.Marshal(b, m, deterministic) +} +func (m *DistributionValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue.Merge(m, src) +} +func (m *DistributionValue) XXX_Size() int { + return xxx_messageInfo_DistributionValue.Size(m) +} +func (m *DistributionValue) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue proto.InternalMessageInfo + +func (m *DistributionValue) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *DistributionValue) GetSum() float64 { + if m != nil { + return m.Sum + } + return 0 +} + +func (m *DistributionValue) GetSumOfSquaredDeviation() float64 { + if m != nil { + return m.SumOfSquaredDeviation + } + return 0 +} + +func (m *DistributionValue) GetBucketOptions() *DistributionValue_BucketOptions { + if m != nil { + return m.BucketOptions + } + return nil +} + +func (m *DistributionValue) GetBuckets() []*DistributionValue_Bucket { + if m != nil { + return m.Buckets + } + return nil +} + +// A Distribution may optionally contain a histogram of the values in the +// population. The bucket boundaries for that histogram are described by +// BucketOptions. +// +// If bucket_options has no type, then there is no histogram associated with +// the Distribution. +type DistributionValue_BucketOptions struct { + // Types that are valid to be assigned to Type: + // *DistributionValue_BucketOptions_Explicit_ + Type isDistributionValue_BucketOptions_Type `protobuf_oneof:"type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue_BucketOptions) Reset() { *m = DistributionValue_BucketOptions{} } +func (m *DistributionValue_BucketOptions) String() string { return proto.CompactTextString(m) } +func (*DistributionValue_BucketOptions) ProtoMessage() {} +func (*DistributionValue_BucketOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6, 0} +} + +func (m *DistributionValue_BucketOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue_BucketOptions.Unmarshal(m, b) +} +func (m *DistributionValue_BucketOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue_BucketOptions.Marshal(b, m, deterministic) +} +func (m *DistributionValue_BucketOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue_BucketOptions.Merge(m, src) +} +func (m *DistributionValue_BucketOptions) XXX_Size() int { + return xxx_messageInfo_DistributionValue_BucketOptions.Size(m) +} +func (m *DistributionValue_BucketOptions) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue_BucketOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue_BucketOptions proto.InternalMessageInfo + +type isDistributionValue_BucketOptions_Type interface { + isDistributionValue_BucketOptions_Type() +} + +type DistributionValue_BucketOptions_Explicit_ struct { + Explicit *DistributionValue_BucketOptions_Explicit `protobuf:"bytes,1,opt,name=explicit,proto3,oneof"` +} + +func (*DistributionValue_BucketOptions_Explicit_) isDistributionValue_BucketOptions_Type() {} + +func (m *DistributionValue_BucketOptions) GetType() isDistributionValue_BucketOptions_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *DistributionValue_BucketOptions) GetExplicit() *DistributionValue_BucketOptions_Explicit { + if x, ok := m.GetType().(*DistributionValue_BucketOptions_Explicit_); ok { + return x.Explicit + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*DistributionValue_BucketOptions) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*DistributionValue_BucketOptions_Explicit_)(nil), + } +} + +// Specifies a set of buckets with arbitrary upper-bounds. +// This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket +// index i are: +// +// [0, bucket_bounds[i]) for i == 0 +// [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-1 +// [bucket_bounds[i], +infinity) for i == N-1 +type DistributionValue_BucketOptions_Explicit struct { + // The values must be strictly increasing and > 0. + Bounds []float64 `protobuf:"fixed64,1,rep,packed,name=bounds,proto3" json:"bounds,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue_BucketOptions_Explicit) Reset() { + *m = DistributionValue_BucketOptions_Explicit{} +} +func (m *DistributionValue_BucketOptions_Explicit) String() string { return proto.CompactTextString(m) } +func (*DistributionValue_BucketOptions_Explicit) ProtoMessage() {} +func (*DistributionValue_BucketOptions_Explicit) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6, 0, 0} +} + +func (m *DistributionValue_BucketOptions_Explicit) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Unmarshal(m, b) +} +func (m *DistributionValue_BucketOptions_Explicit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Marshal(b, m, deterministic) +} +func (m *DistributionValue_BucketOptions_Explicit) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Merge(m, src) +} +func (m *DistributionValue_BucketOptions_Explicit) XXX_Size() int { + return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Size(m) +} +func (m *DistributionValue_BucketOptions_Explicit) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue_BucketOptions_Explicit.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue_BucketOptions_Explicit proto.InternalMessageInfo + +func (m *DistributionValue_BucketOptions_Explicit) GetBounds() []float64 { + if m != nil { + return m.Bounds + } + return nil +} + +type DistributionValue_Bucket struct { + // The number of values in each bucket of the histogram, as described in + // bucket_bounds. + Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"` + // If the distribution does not have a histogram, then omit this field. + Exemplar *DistributionValue_Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue_Bucket) Reset() { *m = DistributionValue_Bucket{} } +func (m *DistributionValue_Bucket) String() string { return proto.CompactTextString(m) } +func (*DistributionValue_Bucket) ProtoMessage() {} +func (*DistributionValue_Bucket) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6, 1} +} + +func (m *DistributionValue_Bucket) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue_Bucket.Unmarshal(m, b) +} +func (m *DistributionValue_Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue_Bucket.Marshal(b, m, deterministic) +} +func (m *DistributionValue_Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue_Bucket.Merge(m, src) +} +func (m *DistributionValue_Bucket) XXX_Size() int { + return xxx_messageInfo_DistributionValue_Bucket.Size(m) +} +func (m *DistributionValue_Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue_Bucket.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue_Bucket proto.InternalMessageInfo + +func (m *DistributionValue_Bucket) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *DistributionValue_Bucket) GetExemplar() *DistributionValue_Exemplar { + if m != nil { + return m.Exemplar + } + return nil +} + +// Exemplars are example points that may be used to annotate aggregated +// Distribution values. They are metadata that gives information about a +// particular value added to a Distribution bucket. +type DistributionValue_Exemplar struct { + // Value of the exemplar point. It determines which bucket the exemplar + // belongs to. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + // The observation (sampling) time of the above value. + Timestamp *timestamp.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // Contextual information about the example value. + Attachments map[string]string `protobuf:"bytes,3,rep,name=attachments,proto3" json:"attachments,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DistributionValue_Exemplar) Reset() { *m = DistributionValue_Exemplar{} } +func (m *DistributionValue_Exemplar) String() string { return proto.CompactTextString(m) } +func (*DistributionValue_Exemplar) ProtoMessage() {} +func (*DistributionValue_Exemplar) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{6, 2} +} + +func (m *DistributionValue_Exemplar) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DistributionValue_Exemplar.Unmarshal(m, b) +} +func (m *DistributionValue_Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DistributionValue_Exemplar.Marshal(b, m, deterministic) +} +func (m *DistributionValue_Exemplar) XXX_Merge(src proto.Message) { + xxx_messageInfo_DistributionValue_Exemplar.Merge(m, src) +} +func (m *DistributionValue_Exemplar) XXX_Size() int { + return xxx_messageInfo_DistributionValue_Exemplar.Size(m) +} +func (m *DistributionValue_Exemplar) XXX_DiscardUnknown() { + xxx_messageInfo_DistributionValue_Exemplar.DiscardUnknown(m) +} + +var xxx_messageInfo_DistributionValue_Exemplar proto.InternalMessageInfo + +func (m *DistributionValue_Exemplar) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (m *DistributionValue_Exemplar) GetTimestamp() *timestamp.Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *DistributionValue_Exemplar) GetAttachments() map[string]string { + if m != nil { + return m.Attachments + } + return nil +} + +// The start_timestamp only applies to the count and sum in the SummaryValue. +type SummaryValue struct { + // The total number of recorded values since start_time. Optional since + // some systems don't expose this. + Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"` + // The total sum of recorded values since start_time. Optional since some + // systems don't expose this. If count is zero then this field must be zero. + // This field must be unset if the sum is not available. + Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"` + // Values calculated over an arbitrary time window. + Snapshot *SummaryValue_Snapshot `protobuf:"bytes,3,opt,name=snapshot,proto3" json:"snapshot,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SummaryValue) Reset() { *m = SummaryValue{} } +func (m *SummaryValue) String() string { return proto.CompactTextString(m) } +func (*SummaryValue) ProtoMessage() {} +func (*SummaryValue) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{7} +} + +func (m *SummaryValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SummaryValue.Unmarshal(m, b) +} +func (m *SummaryValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SummaryValue.Marshal(b, m, deterministic) +} +func (m *SummaryValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_SummaryValue.Merge(m, src) +} +func (m *SummaryValue) XXX_Size() int { + return xxx_messageInfo_SummaryValue.Size(m) +} +func (m *SummaryValue) XXX_DiscardUnknown() { + xxx_messageInfo_SummaryValue.DiscardUnknown(m) +} + +var xxx_messageInfo_SummaryValue proto.InternalMessageInfo + +func (m *SummaryValue) GetCount() *wrappers.Int64Value { + if m != nil { + return m.Count + } + return nil +} + +func (m *SummaryValue) GetSum() *wrappers.DoubleValue { + if m != nil { + return m.Sum + } + return nil +} + +func (m *SummaryValue) GetSnapshot() *SummaryValue_Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +// The values in this message can be reset at arbitrary unknown times, with +// the requirement that all of them are reset at the same time. +type SummaryValue_Snapshot struct { + // The number of values in the snapshot. Optional since some systems don't + // expose this. + Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"` + // The sum of values in the snapshot. Optional since some systems don't + // expose this. If count is zero then this field must be zero or not set + // (if not supported). + Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"` + // A list of values at different percentiles of the distribution calculated + // from the current snapshot. The percentiles must be strictly increasing. + PercentileValues []*SummaryValue_Snapshot_ValueAtPercentile `protobuf:"bytes,3,rep,name=percentile_values,json=percentileValues,proto3" json:"percentile_values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SummaryValue_Snapshot) Reset() { *m = SummaryValue_Snapshot{} } +func (m *SummaryValue_Snapshot) String() string { return proto.CompactTextString(m) } +func (*SummaryValue_Snapshot) ProtoMessage() {} +func (*SummaryValue_Snapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{7, 0} +} + +func (m *SummaryValue_Snapshot) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SummaryValue_Snapshot.Unmarshal(m, b) +} +func (m *SummaryValue_Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SummaryValue_Snapshot.Marshal(b, m, deterministic) +} +func (m *SummaryValue_Snapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_SummaryValue_Snapshot.Merge(m, src) +} +func (m *SummaryValue_Snapshot) XXX_Size() int { + return xxx_messageInfo_SummaryValue_Snapshot.Size(m) +} +func (m *SummaryValue_Snapshot) XXX_DiscardUnknown() { + xxx_messageInfo_SummaryValue_Snapshot.DiscardUnknown(m) +} + +var xxx_messageInfo_SummaryValue_Snapshot proto.InternalMessageInfo + +func (m *SummaryValue_Snapshot) GetCount() *wrappers.Int64Value { + if m != nil { + return m.Count + } + return nil +} + +func (m *SummaryValue_Snapshot) GetSum() *wrappers.DoubleValue { + if m != nil { + return m.Sum + } + return nil +} + +func (m *SummaryValue_Snapshot) GetPercentileValues() []*SummaryValue_Snapshot_ValueAtPercentile { + if m != nil { + return m.PercentileValues + } + return nil +} + +// Represents the value at a given percentile of a distribution. +type SummaryValue_Snapshot_ValueAtPercentile struct { + // The percentile of a distribution. Must be in the interval + // (0.0, 100.0]. + Percentile float64 `protobuf:"fixed64,1,opt,name=percentile,proto3" json:"percentile,omitempty"` + // The value at the given percentile of a distribution. + Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SummaryValue_Snapshot_ValueAtPercentile) Reset() { + *m = SummaryValue_Snapshot_ValueAtPercentile{} +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) String() string { return proto.CompactTextString(m) } +func (*SummaryValue_Snapshot_ValueAtPercentile) ProtoMessage() {} +func (*SummaryValue_Snapshot_ValueAtPercentile) Descriptor() ([]byte, []int) { + return fileDescriptor_0ee3deb72053811a, []int{7, 0, 0} +} + +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Unmarshal(m, b) +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Marshal(b, m, deterministic) +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Merge(src proto.Message) { + xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Merge(m, src) +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Size() int { + return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Size(m) +} +func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_DiscardUnknown() { + xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.DiscardUnknown(m) +} + +var xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile proto.InternalMessageInfo + +func (m *SummaryValue_Snapshot_ValueAtPercentile) GetPercentile() float64 { + if m != nil { + return m.Percentile + } + return 0 +} + +func (m *SummaryValue_Snapshot_ValueAtPercentile) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func init() { + proto.RegisterEnum("opencensus.proto.metrics.v1.MetricDescriptor_Type", MetricDescriptor_Type_name, MetricDescriptor_Type_value) + proto.RegisterType((*Metric)(nil), "opencensus.proto.metrics.v1.Metric") + proto.RegisterType((*MetricDescriptor)(nil), "opencensus.proto.metrics.v1.MetricDescriptor") + proto.RegisterType((*LabelKey)(nil), "opencensus.proto.metrics.v1.LabelKey") + proto.RegisterType((*TimeSeries)(nil), "opencensus.proto.metrics.v1.TimeSeries") + proto.RegisterType((*LabelValue)(nil), "opencensus.proto.metrics.v1.LabelValue") + proto.RegisterType((*Point)(nil), "opencensus.proto.metrics.v1.Point") + proto.RegisterType((*DistributionValue)(nil), "opencensus.proto.metrics.v1.DistributionValue") + proto.RegisterType((*DistributionValue_BucketOptions)(nil), "opencensus.proto.metrics.v1.DistributionValue.BucketOptions") + proto.RegisterType((*DistributionValue_BucketOptions_Explicit)(nil), "opencensus.proto.metrics.v1.DistributionValue.BucketOptions.Explicit") + proto.RegisterType((*DistributionValue_Bucket)(nil), "opencensus.proto.metrics.v1.DistributionValue.Bucket") + proto.RegisterType((*DistributionValue_Exemplar)(nil), "opencensus.proto.metrics.v1.DistributionValue.Exemplar") + proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.metrics.v1.DistributionValue.Exemplar.AttachmentsEntry") + proto.RegisterType((*SummaryValue)(nil), "opencensus.proto.metrics.v1.SummaryValue") + proto.RegisterType((*SummaryValue_Snapshot)(nil), "opencensus.proto.metrics.v1.SummaryValue.Snapshot") + proto.RegisterType((*SummaryValue_Snapshot_ValueAtPercentile)(nil), "opencensus.proto.metrics.v1.SummaryValue.Snapshot.ValueAtPercentile") +} + +func init() { + proto.RegisterFile("opencensus/proto/metrics/v1/metrics.proto", fileDescriptor_0ee3deb72053811a) +} + +var fileDescriptor_0ee3deb72053811a = []byte{ + // 1118 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xdd, 0x6e, 0x1b, 0xc5, + 0x17, 0xcf, 0xda, 0x8e, 0xe3, 0x9c, 0x75, 0xd2, 0xf5, 0xa8, 0xed, 0xdf, 0x72, 0xfe, 0x0a, 0x61, + 0x11, 0x90, 0x0a, 0x65, 0xad, 0x98, 0xd2, 0x56, 0x15, 0x2a, 0x8a, 0x63, 0x37, 0x31, 0x24, 0xb1, + 0x35, 0xb6, 0x23, 0xd1, 0x1b, 0x6b, 0xbd, 0x9e, 0x24, 0x4b, 0xbc, 0x1f, 0xdd, 0x99, 0x0d, 0xf8, + 0x05, 0x78, 0x02, 0xc4, 0x35, 0xb7, 0x88, 0xe7, 0xe0, 0x8a, 0x27, 0xe0, 0x15, 0xb8, 0x41, 0xbc, + 0x01, 0xda, 0x99, 0xd9, 0x8f, 0xc4, 0x60, 0xea, 0x22, 0x71, 0x77, 0xe6, 0xcc, 0x39, 0xbf, 0xfd, + 0x9d, 0xcf, 0x1d, 0x78, 0xe4, 0xf9, 0xc4, 0xb5, 0x88, 0x4b, 0x43, 0x5a, 0xf7, 0x03, 0x8f, 0x79, + 0x75, 0x87, 0xb0, 0xc0, 0xb6, 0x68, 0xfd, 0x66, 0x3f, 0x16, 0x0d, 0x7e, 0x81, 0xb6, 0x52, 0x53, + 0xa1, 0x31, 0xe2, 0xfb, 0x9b, 0xfd, 0xda, 0x3b, 0x97, 0x9e, 0x77, 0x39, 0x25, 0x02, 0x63, 0x1c, + 0x5e, 0xd4, 0x99, 0xed, 0x10, 0xca, 0x4c, 0xc7, 0x17, 0xb6, 0xb5, 0xed, 0xbb, 0x06, 0x5f, 0x07, + 0xa6, 0xef, 0x93, 0x40, 0x62, 0xd5, 0x3e, 0x9a, 0x23, 0x12, 0x10, 0xea, 0x85, 0x81, 0x45, 0x22, + 0x26, 0xb1, 0x2c, 0x8c, 0xf5, 0x3f, 0x14, 0x28, 0x9e, 0xf2, 0x8f, 0xa3, 0x57, 0x50, 0x11, 0x34, + 0x46, 0x13, 0x42, 0xad, 0xc0, 0xf6, 0x99, 0x17, 0x54, 0x95, 0x1d, 0x65, 0x57, 0x6d, 0xec, 0x19, + 0x0b, 0x18, 0x1b, 0xc2, 0xbf, 0x95, 0x38, 0x61, 0xcd, 0xb9, 0xa3, 0x41, 0x47, 0x00, 0x3c, 0x0c, + 0x12, 0xd8, 0x84, 0x56, 0x73, 0x3b, 0xf9, 0x5d, 0xb5, 0xf1, 0xe1, 0x42, 0xd0, 0x81, 0xed, 0x90, + 0x3e, 0x37, 0xc7, 0x19, 0x57, 0xd4, 0x84, 0x52, 0x1c, 0x41, 0x35, 0xcf, 0xb9, 0x7d, 0x30, 0x0f, + 0x93, 0xc4, 0x78, 0xb3, 0x6f, 0x60, 0x29, 0xe3, 0xc4, 0x4f, 0xff, 0x3e, 0x0f, 0xda, 0x5d, 0xce, + 0x08, 0x41, 0xc1, 0x35, 0x1d, 0xc2, 0x03, 0x5e, 0xc7, 0x5c, 0x46, 0x3b, 0xa0, 0xc6, 0xa9, 0xb0, + 0x3d, 0xb7, 0x9a, 0xe3, 0x57, 0x59, 0x55, 0xe4, 0x15, 0xba, 0x36, 0xe3, 0x54, 0xd6, 0x31, 0x97, + 0xd1, 0x4b, 0x28, 0xb0, 0x99, 0x4f, 0xaa, 0x85, 0x1d, 0x65, 0x77, 0xb3, 0xd1, 0x58, 0x2a, 0x75, + 0xc6, 0x60, 0xe6, 0x13, 0xcc, 0xfd, 0x51, 0x0b, 0x60, 0x6a, 0x8e, 0xc9, 0x74, 0x74, 0x4d, 0x66, + 0xb4, 0xba, 0xca, 0x73, 0xf6, 0xfe, 0x42, 0xb4, 0x93, 0xc8, 0xfc, 0x0b, 0x32, 0xc3, 0xeb, 0x53, + 0x29, 0x51, 0xfd, 0x47, 0x05, 0x0a, 0x11, 0x28, 0xba, 0x07, 0xea, 0xf0, 0xac, 0xdf, 0x6b, 0x1f, + 0x76, 0x5e, 0x76, 0xda, 0x2d, 0x6d, 0x25, 0x52, 0x1c, 0x1d, 0x0c, 0x8f, 0xda, 0xa3, 0xce, 0xd9, + 0xe0, 0xc9, 0x63, 0x4d, 0x41, 0x1a, 0x94, 0x85, 0xa2, 0xd5, 0x1d, 0x36, 0x4f, 0xda, 0x5a, 0x0e, + 0x3d, 0x04, 0x24, 0x35, 0x9d, 0xfe, 0x00, 0x77, 0x9a, 0xc3, 0x41, 0xa7, 0x7b, 0xa6, 0xe5, 0xd1, + 0x7d, 0xd0, 0x0e, 0x87, 0xa7, 0xc3, 0x93, 0x83, 0x41, 0xe7, 0x3c, 0xf6, 0x2f, 0xa0, 0x07, 0x50, + 0xc9, 0x68, 0x25, 0xc8, 0x2a, 0xda, 0x82, 0xff, 0x65, 0xd5, 0x59, 0xa4, 0x22, 0x52, 0x61, 0xad, + 0x3f, 0x3c, 0x3d, 0x3d, 0xc0, 0x5f, 0x6a, 0x6b, 0xfa, 0x0b, 0x28, 0xc5, 0x21, 0x20, 0x0d, 0xf2, + 0xd7, 0x64, 0x26, 0xcb, 0x11, 0x89, 0xff, 0x5c, 0x0d, 0xfd, 0x57, 0x05, 0x20, 0xed, 0x1b, 0x74, + 0x08, 0xf7, 0x28, 0x33, 0x03, 0x36, 0x4a, 0x26, 0x48, 0xb6, 0x73, 0xcd, 0x10, 0x23, 0x64, 0xc4, + 0x23, 0xc4, 0xbb, 0x8d, 0x5b, 0xe0, 0x4d, 0xee, 0x92, 0x9c, 0xd1, 0xe7, 0x50, 0x16, 0x55, 0xb8, + 0x31, 0xa7, 0xe1, 0x1b, 0xf6, 0x2e, 0x0f, 0xe2, 0x3c, 0xb2, 0xc7, 0xea, 0x34, 0x91, 0x29, 0x7a, + 0x0e, 0x45, 0xdf, 0xb3, 0x5d, 0x46, 0xab, 0x79, 0x8e, 0xa2, 0x2f, 0x44, 0xe9, 0x45, 0xa6, 0x58, + 0x7a, 0xe8, 0x9f, 0x01, 0xa4, 0xb0, 0xe8, 0x3e, 0xac, 0x72, 0x3e, 0x32, 0x3f, 0xe2, 0x80, 0xb6, + 0x60, 0xfd, 0xca, 0xa4, 0x82, 0x29, 0xcf, 0x4f, 0x09, 0x97, 0xae, 0x4c, 0xca, 0x5d, 0xf4, 0x9f, + 0x73, 0xb0, 0xca, 0x21, 0xd1, 0x33, 0x58, 0x5f, 0x26, 0x23, 0xa9, 0x31, 0x7a, 0x17, 0x54, 0xdb, + 0x65, 0x4f, 0x1e, 0x67, 0x3e, 0x91, 0x3f, 0x5e, 0xc1, 0xc0, 0x95, 0x82, 0xd9, 0x7b, 0x50, 0x9e, + 0x78, 0xe1, 0x78, 0x4a, 0xa4, 0x4d, 0x34, 0x19, 0xca, 0xf1, 0x0a, 0x56, 0x85, 0x56, 0x18, 0x8d, + 0x00, 0x4d, 0x6c, 0xca, 0x02, 0x7b, 0x1c, 0x46, 0x85, 0x93, 0xa6, 0x05, 0x4e, 0xc5, 0x58, 0x98, + 0x94, 0x56, 0xc6, 0x8d, 0x63, 0x1d, 0xaf, 0xe0, 0xca, 0xe4, 0xae, 0x12, 0xf5, 0x60, 0x83, 0x86, + 0x8e, 0x63, 0x06, 0x33, 0x89, 0xbd, 0xca, 0xb1, 0x1f, 0x2d, 0xc4, 0xee, 0x0b, 0x8f, 0x18, 0xb6, + 0x4c, 0x33, 0xe7, 0xe6, 0x9a, 0xcc, 0xb8, 0xfe, 0x4b, 0x11, 0x2a, 0x73, 0x2c, 0xa2, 0x82, 0x58, + 0x5e, 0xe8, 0x32, 0x9e, 0xcf, 0x3c, 0x16, 0x87, 0xa8, 0x89, 0x69, 0xe8, 0xf0, 0x3c, 0x29, 0x38, + 0x12, 0xd1, 0x53, 0xa8, 0xd2, 0xd0, 0x19, 0x79, 0x17, 0x23, 0xfa, 0x3a, 0x34, 0x03, 0x32, 0x19, + 0x4d, 0xc8, 0x8d, 0x6d, 0xf2, 0x8e, 0xe6, 0xa9, 0xc2, 0x0f, 0x68, 0xe8, 0x74, 0x2f, 0xfa, 0xe2, + 0xb6, 0x15, 0x5f, 0x22, 0x0b, 0x36, 0xc7, 0xa1, 0x75, 0x4d, 0xd8, 0xc8, 0xe3, 0xcd, 0x4e, 0x65, + 0xba, 0x3e, 0x5d, 0x2e, 0x5d, 0x46, 0x93, 0x83, 0x74, 0x05, 0x06, 0xde, 0x18, 0x67, 0x8f, 0xa8, + 0x0b, 0x6b, 0x42, 0x11, 0xef, 0x9b, 0x4f, 0xde, 0x0a, 0x1d, 0xc7, 0x28, 0xb5, 0x1f, 0x14, 0xd8, + 0xb8, 0xf5, 0x45, 0x64, 0x41, 0x89, 0x7c, 0xe3, 0x4f, 0x6d, 0xcb, 0x66, 0xb2, 0xf7, 0xda, 0xff, + 0x26, 0x02, 0xa3, 0x2d, 0xc1, 0x8e, 0x57, 0x70, 0x02, 0x5c, 0xd3, 0xa1, 0x14, 0xeb, 0xd1, 0x43, + 0x28, 0x8e, 0xbd, 0xd0, 0x9d, 0xd0, 0xaa, 0xb2, 0x93, 0xdf, 0x55, 0xb0, 0x3c, 0x35, 0x8b, 0x62, + 0x4d, 0xd7, 0x28, 0x14, 0x05, 0xe2, 0xdf, 0xd4, 0xb0, 0x1f, 0x11, 0x26, 0x8e, 0x3f, 0x35, 0x03, + 0x5e, 0x48, 0xb5, 0xf1, 0x74, 0x49, 0xc2, 0x6d, 0xe9, 0x8e, 0x13, 0xa0, 0xda, 0xb7, 0xb9, 0x88, + 0xa1, 0x38, 0xdc, 0x1e, 0x66, 0x25, 0x1e, 0xe6, 0x5b, 0x53, 0x9a, 0x5b, 0x66, 0x4a, 0xbf, 0x02, + 0xd5, 0x64, 0xcc, 0xb4, 0xae, 0x1c, 0x92, 0xee, 0x9a, 0xe3, 0xb7, 0x24, 0x6d, 0x1c, 0xa4, 0x50, + 0x6d, 0x97, 0x05, 0x33, 0x9c, 0x05, 0xaf, 0xbd, 0x00, 0xed, 0xae, 0xc1, 0x5f, 0xac, 0xee, 0x24, + 0xc2, 0x5c, 0x66, 0x5d, 0x3d, 0xcf, 0x3d, 0x53, 0xf4, 0xdf, 0xf3, 0x50, 0xce, 0xce, 0x1d, 0xda, + 0xcf, 0x16, 0x41, 0x6d, 0x6c, 0xcd, 0x85, 0xdc, 0x49, 0x76, 0x4d, 0x5c, 0x21, 0x23, 0x9d, 0x32, + 0xb5, 0xf1, 0xff, 0x39, 0x87, 0x56, 0xba, 0x78, 0xc4, 0x0c, 0x9e, 0x41, 0x89, 0xba, 0xa6, 0x4f, + 0xaf, 0x3c, 0x26, 0xdf, 0x10, 0x8d, 0x37, 0xde, 0x0b, 0x46, 0x5f, 0x7a, 0xe2, 0x04, 0xa3, 0xf6, + 0x53, 0x0e, 0x4a, 0xb1, 0xfa, 0xbf, 0xe0, 0xff, 0x1a, 0x2a, 0x3e, 0x09, 0x2c, 0xe2, 0x32, 0x3b, + 0x5e, 0xb3, 0x71, 0x95, 0x5b, 0xcb, 0x07, 0x62, 0xf0, 0xe3, 0x01, 0xeb, 0x25, 0x90, 0x58, 0x4b, + 0xe1, 0xc5, 0x9f, 0xab, 0xd6, 0x81, 0xca, 0x9c, 0x19, 0xda, 0x06, 0x48, 0x0d, 0x65, 0xf3, 0x66, + 0x34, 0xb7, 0xab, 0x1e, 0xf7, 0x75, 0xf3, 0x3b, 0x05, 0xb6, 0x6d, 0x6f, 0x11, 0xcf, 0x66, 0x59, + 0x3c, 0x8b, 0x68, 0x2f, 0xba, 0xe8, 0x29, 0xaf, 0x5a, 0x97, 0x36, 0xbb, 0x0a, 0xc7, 0x86, 0xe5, + 0x39, 0x75, 0xe1, 0xb3, 0x67, 0xbb, 0x94, 0x05, 0x61, 0xd4, 0x74, 0x7c, 0x3d, 0xd6, 0x53, 0xb8, + 0x3d, 0xf1, 0xe6, 0xbd, 0x24, 0xee, 0xde, 0x65, 0xf6, 0x0d, 0xfe, 0x5b, 0x6e, 0xab, 0xeb, 0x13, + 0xf7, 0x50, 0x7c, 0x93, 0x43, 0xcb, 0xe7, 0x17, 0x35, 0xce, 0xf7, 0xc7, 0x45, 0xee, 0xf6, 0xf1, + 0x9f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf8, 0xd0, 0xb4, 0x8d, 0xc7, 0x0b, 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go new file mode 100644 index 00000000..5dba6a2a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go @@ -0,0 +1,100 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/resource/v1/resource.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Resource information. +type Resource struct { + // Type identifier for the resource. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Set of labels that describe the resource. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Resource) Reset() { *m = Resource{} } +func (m *Resource) String() string { return proto.CompactTextString(m) } +func (*Resource) ProtoMessage() {} +func (*Resource) Descriptor() ([]byte, []int) { + return fileDescriptor_584700775a2fc762, []int{0} +} + +func (m *Resource) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Resource.Unmarshal(m, b) +} +func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Resource.Marshal(b, m, deterministic) +} +func (m *Resource) XXX_Merge(src proto.Message) { + xxx_messageInfo_Resource.Merge(m, src) +} +func (m *Resource) XXX_Size() int { + return xxx_messageInfo_Resource.Size(m) +} +func (m *Resource) XXX_DiscardUnknown() { + xxx_messageInfo_Resource.DiscardUnknown(m) +} + +var xxx_messageInfo_Resource proto.InternalMessageInfo + +func (m *Resource) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Resource) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func init() { + proto.RegisterType((*Resource)(nil), "opencensus.proto.resource.v1.Resource") + proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.resource.v1.Resource.LabelsEntry") +} + +func init() { + proto.RegisterFile("opencensus/proto/resource/v1/resource.proto", fileDescriptor_584700775a2fc762) +} + +var fileDescriptor_584700775a2fc762 = []byte{ + // 251 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xce, 0x2f, 0x48, 0xcd, + 0x4b, 0x4e, 0xcd, 0x2b, 0x2e, 0x2d, 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x4a, 0x2d, + 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2f, 0x33, 0x84, 0xb3, 0xf5, 0xc0, 0x52, 0x42, 0x32, 0x08, + 0xc5, 0x10, 0x11, 0x3d, 0xb8, 0x82, 0x32, 0x43, 0xa5, 0xa5, 0x8c, 0x5c, 0x1c, 0x41, 0x50, 0xbe, + 0x90, 0x10, 0x17, 0x4b, 0x49, 0x65, 0x41, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x98, + 0x2d, 0xe4, 0xc5, 0xc5, 0x96, 0x93, 0x98, 0x94, 0x9a, 0x53, 0x2c, 0xc1, 0xa4, 0xc0, 0xac, 0xc1, + 0x6d, 0x64, 0xa4, 0x87, 0xcf, 0x3c, 0x3d, 0x98, 0x59, 0x7a, 0x3e, 0x60, 0x4d, 0xae, 0x79, 0x25, + 0x45, 0x95, 0x41, 0x50, 0x13, 0xa4, 0x2c, 0xb9, 0xb8, 0x91, 0x84, 0x85, 0x04, 0xb8, 0x98, 0xb3, + 0x53, 0x2b, 0xa1, 0xb6, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac, 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12, + 0x4c, 0x60, 0x31, 0x08, 0xc7, 0x8a, 0xc9, 0x82, 0xd1, 0x69, 0x06, 0x23, 0x97, 0x7c, 0x66, 0x3e, + 0x5e, 0xbb, 0x9d, 0x78, 0x61, 0x96, 0x07, 0x80, 0xa4, 0x02, 0x18, 0xa3, 0x5c, 0xd3, 0x33, 0x4b, + 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0x21, 0xba, 0x74, 0x33, 0xf3, 0x8a, 0x4b, 0x8a, + 0x4a, 0x73, 0x53, 0xf3, 0x4a, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0xf4, 0x11, 0x06, 0xea, 0x42, 0x42, + 0x32, 0x3d, 0x35, 0x4f, 0x37, 0x1d, 0x25, 0x40, 0x5f, 0x31, 0xc9, 0xf8, 0x17, 0xa4, 0xe6, 0x39, + 0x43, 0xac, 0x05, 0x9b, 0x8d, 0xf0, 0x66, 0x98, 0x61, 0x12, 0x1b, 0x58, 0xa3, 0x31, 0x20, 0x00, + 0x00, 0xff, 0xff, 0xcf, 0x32, 0xff, 0x46, 0x96, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go new file mode 100644 index 00000000..2f4ab19b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go @@ -0,0 +1,1553 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/trace/v1/trace.proto + +package v1 + +import ( + fmt "fmt" + v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + wrappers "github.com/golang/protobuf/ptypes/wrappers" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type Span_SpanKind int32 + +const ( + // Unspecified. + Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + Span_SERVER Span_SpanKind = 1 + // Indicates that the span covers the client-side wrapper around an RPC or + // other remote request. + Span_CLIENT Span_SpanKind = 2 +) + +var Span_SpanKind_name = map[int32]string{ + 0: "SPAN_KIND_UNSPECIFIED", + 1: "SERVER", + 2: "CLIENT", +} + +var Span_SpanKind_value = map[string]int32{ + "SPAN_KIND_UNSPECIFIED": 0, + "SERVER": 1, + "CLIENT": 2, +} + +func (x Span_SpanKind) String() string { + return proto.EnumName(Span_SpanKind_name, int32(x)) +} + +func (Span_SpanKind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 0} +} + +// Indicates whether the message was sent or received. +type Span_TimeEvent_MessageEvent_Type int32 + +const ( + // Unknown event type. + Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED Span_TimeEvent_MessageEvent_Type = 0 + // Indicates a sent message. + Span_TimeEvent_MessageEvent_SENT Span_TimeEvent_MessageEvent_Type = 1 + // Indicates a received message. + Span_TimeEvent_MessageEvent_RECEIVED Span_TimeEvent_MessageEvent_Type = 2 +) + +var Span_TimeEvent_MessageEvent_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "SENT", + 2: "RECEIVED", +} + +var Span_TimeEvent_MessageEvent_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "SENT": 1, + "RECEIVED": 2, +} + +func (x Span_TimeEvent_MessageEvent_Type) String() string { + return proto.EnumName(Span_TimeEvent_MessageEvent_Type_name, int32(x)) +} + +func (Span_TimeEvent_MessageEvent_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 1, 0} +} + +// The relationship of the current span relative to the linked span: child, +// parent, or unspecified. +type Span_Link_Type int32 + +const ( + // The relationship of the two spans is unknown, or known but other + // than parent-child. + Span_Link_TYPE_UNSPECIFIED Span_Link_Type = 0 + // The linked span is a child of the current span. + Span_Link_CHILD_LINKED_SPAN Span_Link_Type = 1 + // The linked span is a parent of the current span. + Span_Link_PARENT_LINKED_SPAN Span_Link_Type = 2 +) + +var Span_Link_Type_name = map[int32]string{ + 0: "TYPE_UNSPECIFIED", + 1: "CHILD_LINKED_SPAN", + 2: "PARENT_LINKED_SPAN", +} + +var Span_Link_Type_value = map[string]int32{ + "TYPE_UNSPECIFIED": 0, + "CHILD_LINKED_SPAN": 1, + "PARENT_LINKED_SPAN": 2, +} + +func (x Span_Link_Type) String() string { + return proto.EnumName(Span_Link_Type_name, int32(x)) +} + +func (Span_Link_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 4, 0} +} + +// A span represents a single operation within a trace. Spans can be +// nested to form a trace tree. Spans may also be linked to other spans +// from the same or different trace. And form graphs. Often, a trace +// contains a root span that describes the end-to-end latency, and one +// or more subspans for its sub-operations. A trace can also contain +// multiple root spans, or none at all. Spans do not need to be +// contiguous - there may be gaps or overlaps between spans in a trace. +// +// The next id is 17. +// TODO(bdrutu): Add an example. +type Span struct { + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes + // is considered invalid. + // + // This field is semantically required. Receiver should generate new + // random trace_id if empty or invalid trace_id was received. + // + // This field is required. + TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes is considered + // invalid. + // + // This field is semantically required. Receiver should generate new + // random span_id if empty or invalid span_id was received. + // + // This field is required. + SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // The Tracestate on the span. + Tracestate *Span_Tracestate `protobuf:"bytes,15,opt,name=tracestate,proto3" json:"tracestate,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanId []byte `protobuf:"bytes,3,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // When null or empty string received - receiver may use string "name" + // as a replacement. There might be smarted algorithms implemented by + // receiver to fix the empty span name. + // + // This field is required. + Name *TruncatableString `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind Span_SpanKind `protobuf:"varint,14,opt,name=kind,proto3,enum=opencensus.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"` + // The start time of the span. On the client side, this is the time kept by + // the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // + // This field is semantically required. When not set on receive - + // receiver should set it to the value of end_time field if it was + // set. Or to the current time if neither was set. It is important to + // keep end_time > start_time for consistency. + // + // This field is required. + StartTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"` + // The end time of the span. On the client side, this is the time kept by + // the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // + // This field is semantically required. When not set on receive - + // receiver should set it to start_time value. It is important to + // keep end_time > start_time for consistency. + // + // This field is required. + EndTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"` + // A set of attributes on the span. + Attributes *Span_Attributes `protobuf:"bytes,7,opt,name=attributes,proto3" json:"attributes,omitempty"` + // A stack trace captured at the start of the span. + StackTrace *StackTrace `protobuf:"bytes,8,opt,name=stack_trace,json=stackTrace,proto3" json:"stack_trace,omitempty"` + // The included time events. + TimeEvents *Span_TimeEvents `protobuf:"bytes,9,opt,name=time_events,json=timeEvents,proto3" json:"time_events,omitempty"` + // The included links. + Links *Span_Links `protobuf:"bytes,10,opt,name=links,proto3" json:"links,omitempty"` + // An optional final status for this span. Semantically when Status + // wasn't set it is means span ended without errors and assume + // Status.Ok (code = 0). + Status *Status `protobuf:"bytes,11,opt,name=status,proto3" json:"status,omitempty"` + // An optional resource that is associated with this span. If not set, this span + // should be part of a batch that does include the resource information, unless resource + // information is unknown. + Resource *v1.Resource `protobuf:"bytes,16,opt,name=resource,proto3" json:"resource,omitempty"` + // A highly recommended but not required flag that identifies when a + // trace crosses a process boundary. True when the parent_span belongs + // to the same process as the current span. This flag is most commonly + // used to indicate the need to adjust time as clocks in different + // processes may not be synchronized. + SameProcessAsParentSpan *wrappers.BoolValue `protobuf:"bytes,12,opt,name=same_process_as_parent_span,json=sameProcessAsParentSpan,proto3" json:"same_process_as_parent_span,omitempty"` + // An optional number of child spans that were generated while this span + // was active. If set, allows an implementation to detect missing child spans. + ChildSpanCount *wrappers.UInt32Value `protobuf:"bytes,13,opt,name=child_span_count,json=childSpanCount,proto3" json:"child_span_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span) Reset() { *m = Span{} } +func (m *Span) String() string { return proto.CompactTextString(m) } +func (*Span) ProtoMessage() {} +func (*Span) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0} +} + +func (m *Span) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span.Unmarshal(m, b) +} +func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span.Marshal(b, m, deterministic) +} +func (m *Span) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span.Merge(m, src) +} +func (m *Span) XXX_Size() int { + return xxx_messageInfo_Span.Size(m) +} +func (m *Span) XXX_DiscardUnknown() { + xxx_messageInfo_Span.DiscardUnknown(m) +} + +var xxx_messageInfo_Span proto.InternalMessageInfo + +func (m *Span) GetTraceId() []byte { + if m != nil { + return m.TraceId + } + return nil +} + +func (m *Span) GetSpanId() []byte { + if m != nil { + return m.SpanId + } + return nil +} + +func (m *Span) GetTracestate() *Span_Tracestate { + if m != nil { + return m.Tracestate + } + return nil +} + +func (m *Span) GetParentSpanId() []byte { + if m != nil { + return m.ParentSpanId + } + return nil +} + +func (m *Span) GetName() *TruncatableString { + if m != nil { + return m.Name + } + return nil +} + +func (m *Span) GetKind() Span_SpanKind { + if m != nil { + return m.Kind + } + return Span_SPAN_KIND_UNSPECIFIED +} + +func (m *Span) GetStartTime() *timestamp.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *Span) GetEndTime() *timestamp.Timestamp { + if m != nil { + return m.EndTime + } + return nil +} + +func (m *Span) GetAttributes() *Span_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Span) GetStackTrace() *StackTrace { + if m != nil { + return m.StackTrace + } + return nil +} + +func (m *Span) GetTimeEvents() *Span_TimeEvents { + if m != nil { + return m.TimeEvents + } + return nil +} + +func (m *Span) GetLinks() *Span_Links { + if m != nil { + return m.Links + } + return nil +} + +func (m *Span) GetStatus() *Status { + if m != nil { + return m.Status + } + return nil +} + +func (m *Span) GetResource() *v1.Resource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *Span) GetSameProcessAsParentSpan() *wrappers.BoolValue { + if m != nil { + return m.SameProcessAsParentSpan + } + return nil +} + +func (m *Span) GetChildSpanCount() *wrappers.UInt32Value { + if m != nil { + return m.ChildSpanCount + } + return nil +} + +// This field conveys information about request position in multiple distributed tracing graphs. +// It is a list of Tracestate.Entry with a maximum of 32 members in the list. +// +// See the https://github.com/w3c/distributed-tracing for more details about this field. +type Span_Tracestate struct { + // A list of entries that represent the Tracestate. + Entries []*Span_Tracestate_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Tracestate) Reset() { *m = Span_Tracestate{} } +func (m *Span_Tracestate) String() string { return proto.CompactTextString(m) } +func (*Span_Tracestate) ProtoMessage() {} +func (*Span_Tracestate) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 0} +} + +func (m *Span_Tracestate) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Tracestate.Unmarshal(m, b) +} +func (m *Span_Tracestate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Tracestate.Marshal(b, m, deterministic) +} +func (m *Span_Tracestate) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Tracestate.Merge(m, src) +} +func (m *Span_Tracestate) XXX_Size() int { + return xxx_messageInfo_Span_Tracestate.Size(m) +} +func (m *Span_Tracestate) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Tracestate.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Tracestate proto.InternalMessageInfo + +func (m *Span_Tracestate) GetEntries() []*Span_Tracestate_Entry { + if m != nil { + return m.Entries + } + return nil +} + +type Span_Tracestate_Entry struct { + // The key must begin with a lowercase letter, and can only contain + // lowercase letters 'a'-'z', digits '0'-'9', underscores '_', dashes + // '-', asterisks '*', and forward slashes '/'. + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + // The value is opaque string up to 256 characters printable ASCII + // RFC0020 characters (i.e., the range 0x20 to 0x7E) except ',' and '='. + // Note that this also excludes tabs, newlines, carriage returns, etc. + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Tracestate_Entry) Reset() { *m = Span_Tracestate_Entry{} } +func (m *Span_Tracestate_Entry) String() string { return proto.CompactTextString(m) } +func (*Span_Tracestate_Entry) ProtoMessage() {} +func (*Span_Tracestate_Entry) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 0, 0} +} + +func (m *Span_Tracestate_Entry) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Tracestate_Entry.Unmarshal(m, b) +} +func (m *Span_Tracestate_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Tracestate_Entry.Marshal(b, m, deterministic) +} +func (m *Span_Tracestate_Entry) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Tracestate_Entry.Merge(m, src) +} +func (m *Span_Tracestate_Entry) XXX_Size() int { + return xxx_messageInfo_Span_Tracestate_Entry.Size(m) +} +func (m *Span_Tracestate_Entry) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Tracestate_Entry.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Tracestate_Entry proto.InternalMessageInfo + +func (m *Span_Tracestate_Entry) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *Span_Tracestate_Entry) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// A set of attributes, each with a key and a value. +type Span_Attributes struct { + // The set of attributes. The value can be a string, an integer, a double + // or the Boolean values `true` or `false`. Note, global attributes like + // server name can be set as tags using resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "abc.com/myattribute": true + // "abc.com/score": 10.239 + AttributeMap map[string]*AttributeValue `protobuf:"bytes,1,rep,name=attribute_map,json=attributeMap,proto3" json:"attribute_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // The number of attributes that were discarded. Attributes can be discarded + // because their keys are too long or because there are too many attributes. + // If this value is 0, then no attributes were dropped. + DroppedAttributesCount int32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Attributes) Reset() { *m = Span_Attributes{} } +func (m *Span_Attributes) String() string { return proto.CompactTextString(m) } +func (*Span_Attributes) ProtoMessage() {} +func (*Span_Attributes) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 1} +} + +func (m *Span_Attributes) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Attributes.Unmarshal(m, b) +} +func (m *Span_Attributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Attributes.Marshal(b, m, deterministic) +} +func (m *Span_Attributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Attributes.Merge(m, src) +} +func (m *Span_Attributes) XXX_Size() int { + return xxx_messageInfo_Span_Attributes.Size(m) +} +func (m *Span_Attributes) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Attributes.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Attributes proto.InternalMessageInfo + +func (m *Span_Attributes) GetAttributeMap() map[string]*AttributeValue { + if m != nil { + return m.AttributeMap + } + return nil +} + +func (m *Span_Attributes) GetDroppedAttributesCount() int32 { + if m != nil { + return m.DroppedAttributesCount + } + return 0 +} + +// A time-stamped annotation or message event in the Span. +type Span_TimeEvent struct { + // The time the event occurred. + Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` + // A `TimeEvent` can contain either an `Annotation` object or a + // `MessageEvent` object, but not both. + // + // Types that are valid to be assigned to Value: + // *Span_TimeEvent_Annotation_ + // *Span_TimeEvent_MessageEvent_ + Value isSpan_TimeEvent_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvent) Reset() { *m = Span_TimeEvent{} } +func (m *Span_TimeEvent) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent) ProtoMessage() {} +func (*Span_TimeEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 2} +} + +func (m *Span_TimeEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvent.Unmarshal(m, b) +} +func (m *Span_TimeEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvent.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvent.Merge(m, src) +} +func (m *Span_TimeEvent) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvent.Size(m) +} +func (m *Span_TimeEvent) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvent proto.InternalMessageInfo + +func (m *Span_TimeEvent) GetTime() *timestamp.Timestamp { + if m != nil { + return m.Time + } + return nil +} + +type isSpan_TimeEvent_Value interface { + isSpan_TimeEvent_Value() +} + +type Span_TimeEvent_Annotation_ struct { + Annotation *Span_TimeEvent_Annotation `protobuf:"bytes,2,opt,name=annotation,proto3,oneof"` +} + +type Span_TimeEvent_MessageEvent_ struct { + MessageEvent *Span_TimeEvent_MessageEvent `protobuf:"bytes,3,opt,name=message_event,json=messageEvent,proto3,oneof"` +} + +func (*Span_TimeEvent_Annotation_) isSpan_TimeEvent_Value() {} + +func (*Span_TimeEvent_MessageEvent_) isSpan_TimeEvent_Value() {} + +func (m *Span_TimeEvent) GetValue() isSpan_TimeEvent_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Span_TimeEvent) GetAnnotation() *Span_TimeEvent_Annotation { + if x, ok := m.GetValue().(*Span_TimeEvent_Annotation_); ok { + return x.Annotation + } + return nil +} + +func (m *Span_TimeEvent) GetMessageEvent() *Span_TimeEvent_MessageEvent { + if x, ok := m.GetValue().(*Span_TimeEvent_MessageEvent_); ok { + return x.MessageEvent + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Span_TimeEvent) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Span_TimeEvent_Annotation_)(nil), + (*Span_TimeEvent_MessageEvent_)(nil), + } +} + +// A text annotation with a set of attributes. +type Span_TimeEvent_Annotation struct { + // A user-supplied message describing the event. + Description *TruncatableString `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // A set of attributes on the annotation. + Attributes *Span_Attributes `protobuf:"bytes,2,opt,name=attributes,proto3" json:"attributes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvent_Annotation) Reset() { *m = Span_TimeEvent_Annotation{} } +func (m *Span_TimeEvent_Annotation) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent_Annotation) ProtoMessage() {} +func (*Span_TimeEvent_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 0} +} + +func (m *Span_TimeEvent_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvent_Annotation.Unmarshal(m, b) +} +func (m *Span_TimeEvent_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvent_Annotation.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvent_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvent_Annotation.Merge(m, src) +} +func (m *Span_TimeEvent_Annotation) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvent_Annotation.Size(m) +} +func (m *Span_TimeEvent_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvent_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvent_Annotation proto.InternalMessageInfo + +func (m *Span_TimeEvent_Annotation) GetDescription() *TruncatableString { + if m != nil { + return m.Description + } + return nil +} + +func (m *Span_TimeEvent_Annotation) GetAttributes() *Span_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +// An event describing a message sent/received between Spans. +type Span_TimeEvent_MessageEvent struct { + // The type of MessageEvent. Indicates whether the message was sent or + // received. + Type Span_TimeEvent_MessageEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_TimeEvent_MessageEvent_Type" json:"type,omitempty"` + // An identifier for the MessageEvent's message that can be used to match + // SENT and RECEIVED MessageEvents. For example, this field could + // represent a sequence ID for a streaming RPC. It is recommended to be + // unique within a Span. + Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"` + // The number of uncompressed bytes sent or received. + UncompressedSize uint64 `protobuf:"varint,3,opt,name=uncompressed_size,json=uncompressedSize,proto3" json:"uncompressed_size,omitempty"` + // The number of compressed bytes sent or received. If zero, assumed to + // be the same size as uncompressed. + CompressedSize uint64 `protobuf:"varint,4,opt,name=compressed_size,json=compressedSize,proto3" json:"compressed_size,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvent_MessageEvent) Reset() { *m = Span_TimeEvent_MessageEvent{} } +func (m *Span_TimeEvent_MessageEvent) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvent_MessageEvent) ProtoMessage() {} +func (*Span_TimeEvent_MessageEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 1} +} + +func (m *Span_TimeEvent_MessageEvent) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvent_MessageEvent.Unmarshal(m, b) +} +func (m *Span_TimeEvent_MessageEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvent_MessageEvent.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvent_MessageEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvent_MessageEvent.Merge(m, src) +} +func (m *Span_TimeEvent_MessageEvent) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvent_MessageEvent.Size(m) +} +func (m *Span_TimeEvent_MessageEvent) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvent_MessageEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvent_MessageEvent proto.InternalMessageInfo + +func (m *Span_TimeEvent_MessageEvent) GetType() Span_TimeEvent_MessageEvent_Type { + if m != nil { + return m.Type + } + return Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED +} + +func (m *Span_TimeEvent_MessageEvent) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Span_TimeEvent_MessageEvent) GetUncompressedSize() uint64 { + if m != nil { + return m.UncompressedSize + } + return 0 +} + +func (m *Span_TimeEvent_MessageEvent) GetCompressedSize() uint64 { + if m != nil { + return m.CompressedSize + } + return 0 +} + +// A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation +// on the span, consisting of either user-supplied key-value pairs, or +// details of a message sent/received between Spans. +type Span_TimeEvents struct { + // A collection of `TimeEvent`s. + TimeEvent []*Span_TimeEvent `protobuf:"bytes,1,rep,name=time_event,json=timeEvent,proto3" json:"time_event,omitempty"` + // The number of dropped annotations in all the included time events. + // If the value is 0, then no annotations were dropped. + DroppedAnnotationsCount int32 `protobuf:"varint,2,opt,name=dropped_annotations_count,json=droppedAnnotationsCount,proto3" json:"dropped_annotations_count,omitempty"` + // The number of dropped message events in all the included time events. + // If the value is 0, then no message events were dropped. + DroppedMessageEventsCount int32 `protobuf:"varint,3,opt,name=dropped_message_events_count,json=droppedMessageEventsCount,proto3" json:"dropped_message_events_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_TimeEvents) Reset() { *m = Span_TimeEvents{} } +func (m *Span_TimeEvents) String() string { return proto.CompactTextString(m) } +func (*Span_TimeEvents) ProtoMessage() {} +func (*Span_TimeEvents) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 3} +} + +func (m *Span_TimeEvents) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_TimeEvents.Unmarshal(m, b) +} +func (m *Span_TimeEvents) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_TimeEvents.Marshal(b, m, deterministic) +} +func (m *Span_TimeEvents) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_TimeEvents.Merge(m, src) +} +func (m *Span_TimeEvents) XXX_Size() int { + return xxx_messageInfo_Span_TimeEvents.Size(m) +} +func (m *Span_TimeEvents) XXX_DiscardUnknown() { + xxx_messageInfo_Span_TimeEvents.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_TimeEvents proto.InternalMessageInfo + +func (m *Span_TimeEvents) GetTimeEvent() []*Span_TimeEvent { + if m != nil { + return m.TimeEvent + } + return nil +} + +func (m *Span_TimeEvents) GetDroppedAnnotationsCount() int32 { + if m != nil { + return m.DroppedAnnotationsCount + } + return 0 +} + +func (m *Span_TimeEvents) GetDroppedMessageEventsCount() int32 { + if m != nil { + return m.DroppedMessageEventsCount + } + return 0 +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type Span_Link struct { + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // The relationship of the current span relative to the linked span. + Type Span_Link_Type `protobuf:"varint,3,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_Link_Type" json:"type,omitempty"` + // A set of attributes on the link. + Attributes *Span_Attributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"` + // The Tracestate associated with the link. + Tracestate *Span_Tracestate `protobuf:"bytes,5,opt,name=tracestate,proto3" json:"tracestate,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Link) Reset() { *m = Span_Link{} } +func (m *Span_Link) String() string { return proto.CompactTextString(m) } +func (*Span_Link) ProtoMessage() {} +func (*Span_Link) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 4} +} + +func (m *Span_Link) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Link.Unmarshal(m, b) +} +func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic) +} +func (m *Span_Link) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Link.Merge(m, src) +} +func (m *Span_Link) XXX_Size() int { + return xxx_messageInfo_Span_Link.Size(m) +} +func (m *Span_Link) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Link.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Link proto.InternalMessageInfo + +func (m *Span_Link) GetTraceId() []byte { + if m != nil { + return m.TraceId + } + return nil +} + +func (m *Span_Link) GetSpanId() []byte { + if m != nil { + return m.SpanId + } + return nil +} + +func (m *Span_Link) GetType() Span_Link_Type { + if m != nil { + return m.Type + } + return Span_Link_TYPE_UNSPECIFIED +} + +func (m *Span_Link) GetAttributes() *Span_Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Span_Link) GetTracestate() *Span_Tracestate { + if m != nil { + return m.Tracestate + } + return nil +} + +// A collection of links, which are references from this span to a span +// in the same or different trace. +type Span_Links struct { + // A collection of links. + Link []*Span_Link `protobuf:"bytes,1,rep,name=link,proto3" json:"link,omitempty"` + // The number of dropped links after the maximum size was enforced. If + // this value is 0, then no links were dropped. + DroppedLinksCount int32 `protobuf:"varint,2,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Span_Links) Reset() { *m = Span_Links{} } +func (m *Span_Links) String() string { return proto.CompactTextString(m) } +func (*Span_Links) ProtoMessage() {} +func (*Span_Links) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{0, 5} +} + +func (m *Span_Links) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Span_Links.Unmarshal(m, b) +} +func (m *Span_Links) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Span_Links.Marshal(b, m, deterministic) +} +func (m *Span_Links) XXX_Merge(src proto.Message) { + xxx_messageInfo_Span_Links.Merge(m, src) +} +func (m *Span_Links) XXX_Size() int { + return xxx_messageInfo_Span_Links.Size(m) +} +func (m *Span_Links) XXX_DiscardUnknown() { + xxx_messageInfo_Span_Links.DiscardUnknown(m) +} + +var xxx_messageInfo_Span_Links proto.InternalMessageInfo + +func (m *Span_Links) GetLink() []*Span_Link { + if m != nil { + return m.Link + } + return nil +} + +func (m *Span_Links) GetDroppedLinksCount() int32 { + if m != nil { + return m.DroppedLinksCount + } + return 0 +} + +// The `Status` type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. This proto's fields +// are a subset of those of +// [google.rpc.Status](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto), +// which is used by [gRPC](https://github.com/grpc). +type Status struct { + // The status code. This is optional field. It is safe to assume 0 (OK) + // when not set. + Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + // A developer-facing error message, which should be in English. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{1} +} + +func (m *Status) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Status.Unmarshal(m, b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Status.Marshal(b, m, deterministic) +} +func (m *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(m, src) +} +func (m *Status) XXX_Size() int { + return xxx_messageInfo_Status.Size(m) +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} + +var xxx_messageInfo_Status proto.InternalMessageInfo + +func (m *Status) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *Status) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +// The value of an Attribute. +type AttributeValue struct { + // The type of the value. + // + // Types that are valid to be assigned to Value: + // *AttributeValue_StringValue + // *AttributeValue_IntValue + // *AttributeValue_BoolValue + // *AttributeValue_DoubleValue + Value isAttributeValue_Value `protobuf_oneof:"value"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AttributeValue) Reset() { *m = AttributeValue{} } +func (m *AttributeValue) String() string { return proto.CompactTextString(m) } +func (*AttributeValue) ProtoMessage() {} +func (*AttributeValue) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{2} +} + +func (m *AttributeValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AttributeValue.Unmarshal(m, b) +} +func (m *AttributeValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AttributeValue.Marshal(b, m, deterministic) +} +func (m *AttributeValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributeValue.Merge(m, src) +} +func (m *AttributeValue) XXX_Size() int { + return xxx_messageInfo_AttributeValue.Size(m) +} +func (m *AttributeValue) XXX_DiscardUnknown() { + xxx_messageInfo_AttributeValue.DiscardUnknown(m) +} + +var xxx_messageInfo_AttributeValue proto.InternalMessageInfo + +type isAttributeValue_Value interface { + isAttributeValue_Value() +} + +type AttributeValue_StringValue struct { + StringValue *TruncatableString `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type AttributeValue_IntValue struct { + IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof"` +} + +type AttributeValue_BoolValue struct { + BoolValue bool `protobuf:"varint,3,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type AttributeValue_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +func (*AttributeValue_StringValue) isAttributeValue_Value() {} + +func (*AttributeValue_IntValue) isAttributeValue_Value() {} + +func (*AttributeValue_BoolValue) isAttributeValue_Value() {} + +func (*AttributeValue_DoubleValue) isAttributeValue_Value() {} + +func (m *AttributeValue) GetValue() isAttributeValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *AttributeValue) GetStringValue() *TruncatableString { + if x, ok := m.GetValue().(*AttributeValue_StringValue); ok { + return x.StringValue + } + return nil +} + +func (m *AttributeValue) GetIntValue() int64 { + if x, ok := m.GetValue().(*AttributeValue_IntValue); ok { + return x.IntValue + } + return 0 +} + +func (m *AttributeValue) GetBoolValue() bool { + if x, ok := m.GetValue().(*AttributeValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *AttributeValue) GetDoubleValue() float64 { + if x, ok := m.GetValue().(*AttributeValue_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*AttributeValue) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*AttributeValue_StringValue)(nil), + (*AttributeValue_IntValue)(nil), + (*AttributeValue_BoolValue)(nil), + (*AttributeValue_DoubleValue)(nil), + } +} + +// The call stack which originated this span. +type StackTrace struct { + // Stack frames in this stack trace. + StackFrames *StackTrace_StackFrames `protobuf:"bytes,1,opt,name=stack_frames,json=stackFrames,proto3" json:"stack_frames,omitempty"` + // The hash ID is used to conserve network bandwidth for duplicate + // stack traces within a single trace. + // + // Often multiple spans will have identical stack traces. + // The first occurrence of a stack trace should contain both + // `stack_frames` and a value in `stack_trace_hash_id`. + // + // Subsequent spans within the same request can refer + // to that stack trace by setting only `stack_trace_hash_id`. + // + // TODO: describe how to deal with the case where stack_trace_hash_id is + // zero because it was not set. + StackTraceHashId uint64 `protobuf:"varint,2,opt,name=stack_trace_hash_id,json=stackTraceHashId,proto3" json:"stack_trace_hash_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StackTrace) Reset() { *m = StackTrace{} } +func (m *StackTrace) String() string { return proto.CompactTextString(m) } +func (*StackTrace) ProtoMessage() {} +func (*StackTrace) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{3} +} + +func (m *StackTrace) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StackTrace.Unmarshal(m, b) +} +func (m *StackTrace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StackTrace.Marshal(b, m, deterministic) +} +func (m *StackTrace) XXX_Merge(src proto.Message) { + xxx_messageInfo_StackTrace.Merge(m, src) +} +func (m *StackTrace) XXX_Size() int { + return xxx_messageInfo_StackTrace.Size(m) +} +func (m *StackTrace) XXX_DiscardUnknown() { + xxx_messageInfo_StackTrace.DiscardUnknown(m) +} + +var xxx_messageInfo_StackTrace proto.InternalMessageInfo + +func (m *StackTrace) GetStackFrames() *StackTrace_StackFrames { + if m != nil { + return m.StackFrames + } + return nil +} + +func (m *StackTrace) GetStackTraceHashId() uint64 { + if m != nil { + return m.StackTraceHashId + } + return 0 +} + +// A single stack frame in a stack trace. +type StackTrace_StackFrame struct { + // The fully-qualified name that uniquely identifies the function or + // method that is active in this frame. + FunctionName *TruncatableString `protobuf:"bytes,1,opt,name=function_name,json=functionName,proto3" json:"function_name,omitempty"` + // An un-mangled function name, if `function_name` is + // [mangled](http://www.avabodh.com/cxxin/namemangling.html). The name can + // be fully qualified. + OriginalFunctionName *TruncatableString `protobuf:"bytes,2,opt,name=original_function_name,json=originalFunctionName,proto3" json:"original_function_name,omitempty"` + // The name of the source file where the function call appears. + FileName *TruncatableString `protobuf:"bytes,3,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` + // The line number in `file_name` where the function call appears. + LineNumber int64 `protobuf:"varint,4,opt,name=line_number,json=lineNumber,proto3" json:"line_number,omitempty"` + // The column number where the function call appears, if available. + // This is important in JavaScript because of its anonymous functions. + ColumnNumber int64 `protobuf:"varint,5,opt,name=column_number,json=columnNumber,proto3" json:"column_number,omitempty"` + // The binary module from where the code was loaded. + LoadModule *Module `protobuf:"bytes,6,opt,name=load_module,json=loadModule,proto3" json:"load_module,omitempty"` + // The version of the deployed source code. + SourceVersion *TruncatableString `protobuf:"bytes,7,opt,name=source_version,json=sourceVersion,proto3" json:"source_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StackTrace_StackFrame) Reset() { *m = StackTrace_StackFrame{} } +func (m *StackTrace_StackFrame) String() string { return proto.CompactTextString(m) } +func (*StackTrace_StackFrame) ProtoMessage() {} +func (*StackTrace_StackFrame) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{3, 0} +} + +func (m *StackTrace_StackFrame) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StackTrace_StackFrame.Unmarshal(m, b) +} +func (m *StackTrace_StackFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StackTrace_StackFrame.Marshal(b, m, deterministic) +} +func (m *StackTrace_StackFrame) XXX_Merge(src proto.Message) { + xxx_messageInfo_StackTrace_StackFrame.Merge(m, src) +} +func (m *StackTrace_StackFrame) XXX_Size() int { + return xxx_messageInfo_StackTrace_StackFrame.Size(m) +} +func (m *StackTrace_StackFrame) XXX_DiscardUnknown() { + xxx_messageInfo_StackTrace_StackFrame.DiscardUnknown(m) +} + +var xxx_messageInfo_StackTrace_StackFrame proto.InternalMessageInfo + +func (m *StackTrace_StackFrame) GetFunctionName() *TruncatableString { + if m != nil { + return m.FunctionName + } + return nil +} + +func (m *StackTrace_StackFrame) GetOriginalFunctionName() *TruncatableString { + if m != nil { + return m.OriginalFunctionName + } + return nil +} + +func (m *StackTrace_StackFrame) GetFileName() *TruncatableString { + if m != nil { + return m.FileName + } + return nil +} + +func (m *StackTrace_StackFrame) GetLineNumber() int64 { + if m != nil { + return m.LineNumber + } + return 0 +} + +func (m *StackTrace_StackFrame) GetColumnNumber() int64 { + if m != nil { + return m.ColumnNumber + } + return 0 +} + +func (m *StackTrace_StackFrame) GetLoadModule() *Module { + if m != nil { + return m.LoadModule + } + return nil +} + +func (m *StackTrace_StackFrame) GetSourceVersion() *TruncatableString { + if m != nil { + return m.SourceVersion + } + return nil +} + +// A collection of stack frames, which can be truncated. +type StackTrace_StackFrames struct { + // Stack frames in this call stack. + Frame []*StackTrace_StackFrame `protobuf:"bytes,1,rep,name=frame,proto3" json:"frame,omitempty"` + // The number of stack frames that were dropped because there + // were too many stack frames. + // If this value is 0, then no stack frames were dropped. + DroppedFramesCount int32 `protobuf:"varint,2,opt,name=dropped_frames_count,json=droppedFramesCount,proto3" json:"dropped_frames_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StackTrace_StackFrames) Reset() { *m = StackTrace_StackFrames{} } +func (m *StackTrace_StackFrames) String() string { return proto.CompactTextString(m) } +func (*StackTrace_StackFrames) ProtoMessage() {} +func (*StackTrace_StackFrames) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{3, 1} +} + +func (m *StackTrace_StackFrames) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StackTrace_StackFrames.Unmarshal(m, b) +} +func (m *StackTrace_StackFrames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StackTrace_StackFrames.Marshal(b, m, deterministic) +} +func (m *StackTrace_StackFrames) XXX_Merge(src proto.Message) { + xxx_messageInfo_StackTrace_StackFrames.Merge(m, src) +} +func (m *StackTrace_StackFrames) XXX_Size() int { + return xxx_messageInfo_StackTrace_StackFrames.Size(m) +} +func (m *StackTrace_StackFrames) XXX_DiscardUnknown() { + xxx_messageInfo_StackTrace_StackFrames.DiscardUnknown(m) +} + +var xxx_messageInfo_StackTrace_StackFrames proto.InternalMessageInfo + +func (m *StackTrace_StackFrames) GetFrame() []*StackTrace_StackFrame { + if m != nil { + return m.Frame + } + return nil +} + +func (m *StackTrace_StackFrames) GetDroppedFramesCount() int32 { + if m != nil { + return m.DroppedFramesCount + } + return 0 +} + +// A description of a binary module. +type Module struct { + // TODO: document the meaning of this field. + // For example: main binary, kernel modules, and dynamic libraries + // such as libc.so, sharedlib.so. + Module *TruncatableString `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"` + // A unique identifier for the module, usually a hash of its + // contents. + BuildId *TruncatableString `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Module) Reset() { *m = Module{} } +func (m *Module) String() string { return proto.CompactTextString(m) } +func (*Module) ProtoMessage() {} +func (*Module) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{4} +} + +func (m *Module) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Module.Unmarshal(m, b) +} +func (m *Module) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Module.Marshal(b, m, deterministic) +} +func (m *Module) XXX_Merge(src proto.Message) { + xxx_messageInfo_Module.Merge(m, src) +} +func (m *Module) XXX_Size() int { + return xxx_messageInfo_Module.Size(m) +} +func (m *Module) XXX_DiscardUnknown() { + xxx_messageInfo_Module.DiscardUnknown(m) +} + +var xxx_messageInfo_Module proto.InternalMessageInfo + +func (m *Module) GetModule() *TruncatableString { + if m != nil { + return m.Module + } + return nil +} + +func (m *Module) GetBuildId() *TruncatableString { + if m != nil { + return m.BuildId + } + return nil +} + +// A string that might be shortened to a specified length. +type TruncatableString struct { + // The shortened string. For example, if the original string was 500 bytes long and + // the limit of the string was 128 bytes, then this value contains the first 128 + // bytes of the 500-byte string. Note that truncation always happens on a + // character boundary, to ensure that a truncated string is still valid UTF-8. + // Because it may contain multi-byte characters, the size of the truncated string + // may be less than the truncation limit. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + // The number of bytes removed from the original string. If this + // value is 0, then the string was not shortened. + TruncatedByteCount int32 `protobuf:"varint,2,opt,name=truncated_byte_count,json=truncatedByteCount,proto3" json:"truncated_byte_count,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TruncatableString) Reset() { *m = TruncatableString{} } +func (m *TruncatableString) String() string { return proto.CompactTextString(m) } +func (*TruncatableString) ProtoMessage() {} +func (*TruncatableString) Descriptor() ([]byte, []int) { + return fileDescriptor_8ea38bbb821bf584, []int{5} +} + +func (m *TruncatableString) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TruncatableString.Unmarshal(m, b) +} +func (m *TruncatableString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TruncatableString.Marshal(b, m, deterministic) +} +func (m *TruncatableString) XXX_Merge(src proto.Message) { + xxx_messageInfo_TruncatableString.Merge(m, src) +} +func (m *TruncatableString) XXX_Size() int { + return xxx_messageInfo_TruncatableString.Size(m) +} +func (m *TruncatableString) XXX_DiscardUnknown() { + xxx_messageInfo_TruncatableString.DiscardUnknown(m) +} + +var xxx_messageInfo_TruncatableString proto.InternalMessageInfo + +func (m *TruncatableString) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (m *TruncatableString) GetTruncatedByteCount() int32 { + if m != nil { + return m.TruncatedByteCount + } + return 0 +} + +func init() { + proto.RegisterEnum("opencensus.proto.trace.v1.Span_SpanKind", Span_SpanKind_name, Span_SpanKind_value) + proto.RegisterEnum("opencensus.proto.trace.v1.Span_TimeEvent_MessageEvent_Type", Span_TimeEvent_MessageEvent_Type_name, Span_TimeEvent_MessageEvent_Type_value) + proto.RegisterEnum("opencensus.proto.trace.v1.Span_Link_Type", Span_Link_Type_name, Span_Link_Type_value) + proto.RegisterType((*Span)(nil), "opencensus.proto.trace.v1.Span") + proto.RegisterType((*Span_Tracestate)(nil), "opencensus.proto.trace.v1.Span.Tracestate") + proto.RegisterType((*Span_Tracestate_Entry)(nil), "opencensus.proto.trace.v1.Span.Tracestate.Entry") + proto.RegisterType((*Span_Attributes)(nil), "opencensus.proto.trace.v1.Span.Attributes") + proto.RegisterMapType((map[string]*AttributeValue)(nil), "opencensus.proto.trace.v1.Span.Attributes.AttributeMapEntry") + proto.RegisterType((*Span_TimeEvent)(nil), "opencensus.proto.trace.v1.Span.TimeEvent") + proto.RegisterType((*Span_TimeEvent_Annotation)(nil), "opencensus.proto.trace.v1.Span.TimeEvent.Annotation") + proto.RegisterType((*Span_TimeEvent_MessageEvent)(nil), "opencensus.proto.trace.v1.Span.TimeEvent.MessageEvent") + proto.RegisterType((*Span_TimeEvents)(nil), "opencensus.proto.trace.v1.Span.TimeEvents") + proto.RegisterType((*Span_Link)(nil), "opencensus.proto.trace.v1.Span.Link") + proto.RegisterType((*Span_Links)(nil), "opencensus.proto.trace.v1.Span.Links") + proto.RegisterType((*Status)(nil), "opencensus.proto.trace.v1.Status") + proto.RegisterType((*AttributeValue)(nil), "opencensus.proto.trace.v1.AttributeValue") + proto.RegisterType((*StackTrace)(nil), "opencensus.proto.trace.v1.StackTrace") + proto.RegisterType((*StackTrace_StackFrame)(nil), "opencensus.proto.trace.v1.StackTrace.StackFrame") + proto.RegisterType((*StackTrace_StackFrames)(nil), "opencensus.proto.trace.v1.StackTrace.StackFrames") + proto.RegisterType((*Module)(nil), "opencensus.proto.trace.v1.Module") + proto.RegisterType((*TruncatableString)(nil), "opencensus.proto.trace.v1.TruncatableString") +} + +func init() { + proto.RegisterFile("opencensus/proto/trace/v1/trace.proto", fileDescriptor_8ea38bbb821bf584) +} + +var fileDescriptor_8ea38bbb821bf584 = []byte{ + // 1581 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0xdb, 0x6e, 0x1b, 0x41, + 0x19, 0xce, 0xfa, 0xec, 0xdf, 0x8e, 0xeb, 0x4c, 0xd3, 0x74, 0x63, 0x0a, 0x0d, 0x6e, 0x0b, 0x29, + 0x25, 0x9b, 0x26, 0x2d, 0x55, 0x8f, 0x2a, 0x71, 0xe2, 0x60, 0x37, 0xa9, 0xeb, 0x8e, 0xdd, 0x88, + 0x83, 0xd0, 0x6a, 0xed, 0x9d, 0x38, 0x4b, 0xec, 0xd9, 0x65, 0x77, 0x36, 0x28, 0x7d, 0x01, 0x84, + 0xe0, 0x86, 0x0b, 0xc4, 0x0b, 0x70, 0xc1, 0xeb, 0x20, 0xee, 0x79, 0x00, 0x24, 0x9e, 0x80, 0x1b, + 0x34, 0x33, 0x7b, 0x72, 0xd2, 0x26, 0xc6, 0xbd, 0xb1, 0xe6, 0xf0, 0x7f, 0xdf, 0x3f, 0xff, 0xcc, + 0x7f, 0x5a, 0xc3, 0x03, 0xdb, 0x21, 0x74, 0x48, 0xa8, 0xe7, 0x7b, 0x9b, 0x8e, 0x6b, 0x33, 0x7b, + 0x93, 0xb9, 0xc6, 0x90, 0x6c, 0x9e, 0x6d, 0xc9, 0x81, 0x26, 0x16, 0xd1, 0x6a, 0x2c, 0x26, 0x57, + 0x34, 0xb9, 0x7b, 0xb6, 0x55, 0x7b, 0x74, 0x89, 0xc1, 0x25, 0x9e, 0xed, 0xbb, 0x92, 0x24, 0x1c, + 0x4b, 0x54, 0xed, 0xee, 0xc8, 0xb6, 0x47, 0x63, 0x22, 0x05, 0x07, 0xfe, 0xf1, 0x26, 0xb3, 0x26, + 0xc4, 0x63, 0xc6, 0xc4, 0x09, 0x04, 0xbe, 0x77, 0x51, 0xe0, 0x77, 0xae, 0xe1, 0x38, 0xc4, 0x0d, + 0xd4, 0xd6, 0xff, 0xbc, 0x02, 0x99, 0x9e, 0x63, 0x50, 0xb4, 0x0a, 0x05, 0x71, 0x04, 0xdd, 0x32, + 0x55, 0x65, 0x4d, 0x59, 0x2f, 0xe3, 0xbc, 0x98, 0xb7, 0x4d, 0x74, 0x1b, 0xf2, 0x9e, 0x63, 0x50, + 0xbe, 0x93, 0x12, 0x3b, 0x39, 0x3e, 0x6d, 0x9b, 0xe8, 0x1d, 0x80, 0x90, 0xf1, 0x98, 0xc1, 0x88, + 0x7a, 0x63, 0x4d, 0x59, 0x2f, 0x6d, 0xff, 0x48, 0xfb, 0xaa, 0x69, 0x1a, 0x57, 0xa4, 0xf5, 0x23, + 0x04, 0x4e, 0xa0, 0xd1, 0x7d, 0xa8, 0x38, 0x86, 0x4b, 0x28, 0xd3, 0x43, 0x5d, 0x69, 0xa1, 0xab, + 0x2c, 0x57, 0x7b, 0x52, 0xe3, 0x4f, 0x21, 0x43, 0x8d, 0x09, 0x51, 0x33, 0x42, 0xd7, 0x8f, 0xaf, + 0xd0, 0xd5, 0x77, 0x7d, 0x3a, 0x34, 0x98, 0x31, 0x18, 0x93, 0x1e, 0x73, 0x2d, 0x3a, 0xc2, 0x02, + 0x89, 0x5e, 0x43, 0xe6, 0xd4, 0xa2, 0xa6, 0x5a, 0x59, 0x53, 0xd6, 0x2b, 0xdb, 0xeb, 0xd7, 0x9d, + 0x96, 0xff, 0x1c, 0x58, 0xd4, 0xc4, 0x02, 0x85, 0x5e, 0x00, 0x78, 0xcc, 0x70, 0x99, 0xce, 0xef, + 0x59, 0xcd, 0x8a, 0x53, 0xd4, 0x34, 0x79, 0xc7, 0x5a, 0x78, 0xc7, 0x5a, 0x3f, 0x7c, 0x04, 0x5c, + 0x14, 0xd2, 0x7c, 0x8e, 0x7e, 0x02, 0x05, 0x42, 0x4d, 0x09, 0xcc, 0x5d, 0x0b, 0xcc, 0x13, 0x6a, + 0x0a, 0xd8, 0x3b, 0x00, 0x83, 0x31, 0xd7, 0x1a, 0xf8, 0x8c, 0x78, 0x6a, 0x7e, 0xb6, 0x3b, 0xde, + 0x89, 0x10, 0x38, 0x81, 0x46, 0xfb, 0x50, 0xf2, 0x98, 0x31, 0x3c, 0xd5, 0x85, 0xb4, 0x5a, 0x10, + 0x64, 0x0f, 0xae, 0x22, 0xe3, 0xd2, 0xe2, 0xc1, 0x30, 0x78, 0xd1, 0x18, 0x1d, 0x40, 0x89, 0x9b, + 0xa1, 0x93, 0x33, 0x42, 0x99, 0xa7, 0x16, 0x67, 0x7c, 0x78, 0x6b, 0x42, 0x9a, 0x02, 0x81, 0x81, + 0x45, 0x63, 0xf4, 0x0a, 0xb2, 0x63, 0x8b, 0x9e, 0x7a, 0x2a, 0x5c, 0x7f, 0x1c, 0x4e, 0x73, 0xc8, + 0x85, 0xb1, 0xc4, 0xa0, 0x17, 0x90, 0xe3, 0xee, 0xe3, 0x7b, 0x6a, 0x49, 0xa0, 0xbf, 0x7f, 0xb5, + 0x31, 0xcc, 0xf7, 0x70, 0x00, 0x40, 0x0d, 0x28, 0x84, 0xc1, 0xa4, 0x56, 0x05, 0xf8, 0x07, 0x97, + 0xc1, 0x51, 0xb8, 0x9d, 0x6d, 0x69, 0x38, 0x18, 0xe3, 0x08, 0x87, 0x7e, 0x0e, 0xdf, 0xf1, 0x8c, + 0x09, 0xd1, 0x1d, 0xd7, 0x1e, 0x12, 0xcf, 0xd3, 0x0d, 0x4f, 0x4f, 0x38, 0xb1, 0x5a, 0xfe, 0xca, + 0x33, 0x37, 0x6c, 0x7b, 0x7c, 0x64, 0x8c, 0x7d, 0x82, 0x6f, 0x73, 0x78, 0x57, 0xa2, 0x77, 0xbc, + 0x6e, 0xe4, 0xea, 0x68, 0x1f, 0xaa, 0xc3, 0x13, 0x6b, 0x6c, 0xca, 0x68, 0x18, 0xda, 0x3e, 0x65, + 0xea, 0xa2, 0xa0, 0xbb, 0x73, 0x89, 0xee, 0x53, 0x9b, 0xb2, 0x27, 0xdb, 0x92, 0xb0, 0x22, 0x50, + 0x9c, 0x62, 0x97, 0x63, 0x6a, 0x7f, 0x50, 0x00, 0xe2, 0x88, 0x43, 0xef, 0x20, 0x4f, 0x28, 0x73, + 0x2d, 0xe2, 0xa9, 0xca, 0x5a, 0x7a, 0xbd, 0xb4, 0xfd, 0x78, 0xf6, 0x70, 0xd5, 0x9a, 0x94, 0xb9, + 0xe7, 0x38, 0x24, 0xa8, 0x6d, 0x42, 0x56, 0xac, 0xa0, 0x2a, 0xa4, 0x4f, 0xc9, 0xb9, 0xc8, 0x1a, + 0x45, 0xcc, 0x87, 0x68, 0x19, 0xb2, 0x67, 0xfc, 0x38, 0x22, 0x5f, 0x14, 0xb1, 0x9c, 0xd4, 0xfe, + 0x92, 0x02, 0x88, 0x3d, 0x13, 0x19, 0xb0, 0x18, 0xf9, 0xa6, 0x3e, 0x31, 0x9c, 0xe0, 0x44, 0xaf, + 0x67, 0x77, 0xee, 0x78, 0xf8, 0xde, 0x70, 0xe4, 0xe9, 0xca, 0x46, 0x62, 0x09, 0x3d, 0x07, 0xd5, + 0x74, 0x6d, 0xc7, 0x21, 0xa6, 0x1e, 0x87, 0x41, 0x70, 0x9b, 0xfc, 0x68, 0x59, 0xbc, 0x12, 0xec, + 0xc7, 0xa4, 0xf2, 0xde, 0x7e, 0x03, 0x4b, 0x97, 0xc8, 0xbf, 0x60, 0xe8, 0xdb, 0xa4, 0xa1, 0xa5, + 0xed, 0x87, 0x57, 0x9c, 0x3d, 0xa2, 0x93, 0x0f, 0x25, 0x71, 0x2f, 0x53, 0xcf, 0x95, 0xda, 0xdf, + 0xb2, 0x50, 0x8c, 0x82, 0x03, 0x69, 0x90, 0x11, 0x39, 0x42, 0xb9, 0x36, 0x47, 0x08, 0x39, 0x74, + 0x04, 0x60, 0x50, 0x6a, 0x33, 0x83, 0x59, 0x36, 0x0d, 0xce, 0xf1, 0x74, 0xe6, 0x58, 0xd4, 0x76, + 0x22, 0x6c, 0x6b, 0x01, 0x27, 0x98, 0xd0, 0xaf, 0x61, 0x71, 0x42, 0x3c, 0xcf, 0x18, 0x05, 0x71, + 0x2e, 0xf2, 0x71, 0x69, 0xfb, 0xd9, 0xec, 0xd4, 0xef, 0x25, 0x5c, 0x4c, 0x5a, 0x0b, 0xb8, 0x3c, + 0x49, 0xcc, 0x6b, 0x7f, 0x57, 0x00, 0x62, 0xdd, 0xa8, 0x03, 0x25, 0x93, 0x78, 0x43, 0xd7, 0x72, + 0x84, 0x19, 0xca, 0x1c, 0xf9, 0x3d, 0x49, 0x70, 0x21, 0x6d, 0xa6, 0xbe, 0x25, 0x6d, 0xd6, 0xfe, + 0xab, 0x40, 0x39, 0x69, 0x0b, 0xfa, 0x00, 0x19, 0x76, 0xee, 0xc8, 0x27, 0xaa, 0x6c, 0xbf, 0x9a, + 0xef, 0x46, 0xb4, 0xfe, 0xb9, 0x43, 0xb0, 0x20, 0x42, 0x15, 0x48, 0x05, 0xc5, 0x35, 0x83, 0x53, + 0x96, 0x89, 0x1e, 0xc1, 0x92, 0x4f, 0x87, 0xf6, 0xc4, 0x71, 0x89, 0xe7, 0x11, 0x53, 0xf7, 0xac, + 0xcf, 0x44, 0xdc, 0x7f, 0x06, 0x57, 0x93, 0x1b, 0x3d, 0xeb, 0x33, 0x41, 0x3f, 0x84, 0x1b, 0x17, + 0x45, 0x33, 0x42, 0xb4, 0x32, 0x2d, 0x58, 0x7f, 0x0a, 0x19, 0xae, 0x13, 0x2d, 0x43, 0xb5, 0xff, + 0x8b, 0x6e, 0x53, 0xff, 0xd4, 0xe9, 0x75, 0x9b, 0xbb, 0xed, 0xfd, 0x76, 0x73, 0xaf, 0xba, 0x80, + 0x0a, 0x90, 0xe9, 0x35, 0x3b, 0xfd, 0xaa, 0x82, 0xca, 0x50, 0xc0, 0xcd, 0xdd, 0x66, 0xfb, 0xa8, + 0xb9, 0x57, 0x4d, 0x35, 0xf2, 0x81, 0x8b, 0xd7, 0xfe, 0xc9, 0x53, 0x49, 0x9c, 0xb7, 0x5b, 0x00, + 0x71, 0x11, 0x08, 0x62, 0xf7, 0xe1, 0xcc, 0x57, 0x81, 0x8b, 0x51, 0x09, 0x40, 0x2f, 0x61, 0x35, + 0x8a, 0xd2, 0xc8, 0x23, 0xa6, 0xc3, 0xf4, 0x76, 0x18, 0xa6, 0xf1, 0xbe, 0x88, 0x53, 0xf4, 0x16, + 0xee, 0x84, 0xd8, 0x29, 0x6f, 0x0d, 0xe1, 0x69, 0x01, 0x0f, 0xf9, 0x93, 0xf7, 0x1f, 0x04, 0xfa, + 0xbf, 0x52, 0x90, 0xe1, 0x25, 0x65, 0xae, 0x06, 0xe8, 0x4d, 0xe0, 0x08, 0x69, 0xe1, 0x08, 0x0f, + 0x67, 0x29, 0x5d, 0xc9, 0x67, 0x9f, 0x76, 0xd2, 0xcc, 0x37, 0xd5, 0xf6, 0xe9, 0x5e, 0x2c, 0xfb, + 0x2d, 0xbd, 0x58, 0xfd, 0xe0, 0x4a, 0x47, 0xb9, 0x05, 0x4b, 0xbb, 0xad, 0xf6, 0xe1, 0x9e, 0x7e, + 0xd8, 0xee, 0x1c, 0x34, 0xf7, 0xf4, 0x5e, 0x77, 0xa7, 0x53, 0x55, 0xd0, 0x0a, 0xa0, 0xee, 0x0e, + 0x6e, 0x76, 0xfa, 0x53, 0xeb, 0xa9, 0xda, 0x6f, 0x21, 0x2b, 0x4a, 0x36, 0x7a, 0x0e, 0x19, 0x5e, + 0xb4, 0x03, 0x57, 0xb9, 0x3f, 0xcb, 0x65, 0x61, 0x81, 0x40, 0x1a, 0xdc, 0x0c, 0x1f, 0x59, 0x94, + 0xfd, 0x29, 0xd7, 0x58, 0x0a, 0xb6, 0x84, 0x12, 0xf1, 0xa6, 0xf5, 0x37, 0x50, 0x08, 0xfb, 0x36, + 0xb4, 0x0a, 0xb7, 0xf8, 0x41, 0xf4, 0x83, 0x76, 0x67, 0xef, 0x82, 0x21, 0x00, 0xb9, 0x5e, 0x13, + 0x1f, 0x35, 0x71, 0x55, 0xe1, 0xe3, 0xdd, 0xc3, 0x36, 0xf7, 0xff, 0x54, 0xfd, 0x19, 0xe4, 0x64, + 0xaf, 0x80, 0x10, 0x64, 0x86, 0xb6, 0x29, 0x03, 0x3d, 0x8b, 0xc5, 0x18, 0xa9, 0x90, 0x0f, 0x3c, + 0x2d, 0xa8, 0x6e, 0xe1, 0xb4, 0xfe, 0x0f, 0x05, 0x2a, 0xd3, 0x59, 0x1e, 0x7d, 0x84, 0xb2, 0x27, + 0xb2, 0x93, 0x2e, 0xcb, 0xc4, 0x1c, 0x79, 0xad, 0xb5, 0x80, 0x4b, 0x92, 0x43, 0x52, 0x7e, 0x17, + 0x8a, 0x16, 0x65, 0x7a, 0x5c, 0x76, 0xd2, 0xad, 0x05, 0x5c, 0xb0, 0x28, 0x93, 0xdb, 0x77, 0x01, + 0x06, 0xb6, 0x3d, 0x0e, 0xf6, 0xb9, 0x63, 0x16, 0x5a, 0x0b, 0xb8, 0x38, 0x08, 0x5b, 0x0e, 0x74, + 0x0f, 0xca, 0xa6, 0xed, 0x0f, 0xc6, 0x24, 0x10, 0xe1, 0x6e, 0xa7, 0x70, 0x25, 0x72, 0x55, 0x08, + 0x45, 0x41, 0x5f, 0xff, 0x63, 0x0e, 0x20, 0xee, 0x02, 0x51, 0x9f, 0xdb, 0xc3, 0x3b, 0xc8, 0x63, + 0xd7, 0x98, 0x88, 0x26, 0x82, 0xdb, 0xb3, 0x35, 0x53, 0x0b, 0x29, 0x87, 0xfb, 0x02, 0x88, 0x65, + 0x23, 0x2a, 0x27, 0x68, 0x03, 0x6e, 0x26, 0xfa, 0x52, 0xfd, 0xc4, 0xf0, 0x4e, 0xf4, 0x28, 0x1f, + 0x56, 0xe3, 0xc6, 0xb3, 0x65, 0x78, 0x27, 0x6d, 0xb3, 0xf6, 0x9f, 0x74, 0x70, 0x26, 0x01, 0x47, + 0x1f, 0x61, 0xf1, 0xd8, 0xa7, 0x43, 0x9e, 0x14, 0x74, 0xf1, 0x71, 0x30, 0x4f, 0xf1, 0x28, 0x87, + 0x14, 0x1d, 0x4e, 0x39, 0x80, 0x15, 0xdb, 0xb5, 0x46, 0x16, 0x35, 0xc6, 0xfa, 0x34, 0x77, 0x6a, + 0x0e, 0xee, 0xe5, 0x90, 0x6b, 0x3f, 0xa9, 0xa3, 0x0d, 0xc5, 0x63, 0x6b, 0x4c, 0x24, 0x6d, 0x7a, + 0x0e, 0xda, 0x02, 0x87, 0x0b, 0xaa, 0xbb, 0x50, 0x1a, 0x5b, 0x94, 0xe8, 0xd4, 0x9f, 0x0c, 0x88, + 0x2b, 0x5e, 0x34, 0x8d, 0x81, 0x2f, 0x75, 0xc4, 0x0a, 0xba, 0x07, 0x8b, 0x43, 0x7b, 0xec, 0x4f, + 0x68, 0x28, 0x92, 0x15, 0x22, 0x65, 0xb9, 0x18, 0x08, 0x35, 0xa0, 0x34, 0xb6, 0x0d, 0x53, 0x9f, + 0xd8, 0xa6, 0x3f, 0x0e, 0xbf, 0x51, 0xae, 0x6a, 0xa8, 0xdf, 0x0b, 0x41, 0x0c, 0x1c, 0x25, 0xc7, + 0xa8, 0x07, 0x15, 0xd9, 0x1a, 0xeb, 0x67, 0xc4, 0xf5, 0x78, 0x25, 0xcf, 0xcf, 0x61, 0xd9, 0xa2, + 0xe4, 0x38, 0x92, 0x14, 0xb5, 0xdf, 0x2b, 0x50, 0x4a, 0xf8, 0x0e, 0xda, 0x87, 0xac, 0x70, 0xbf, + 0x59, 0x5a, 0xd8, 0x2f, 0x79, 0x1f, 0x96, 0x70, 0xf4, 0x18, 0x96, 0xc3, 0xb4, 0x22, 0xdd, 0x79, + 0x2a, 0xaf, 0xa0, 0x60, 0x4f, 0x2a, 0x95, 0x89, 0xe5, 0xaf, 0x0a, 0xe4, 0x02, 0x4b, 0xf7, 0x20, + 0x17, 0x5c, 0xd4, 0x3c, 0xee, 0x16, 0x60, 0xd1, 0xcf, 0xa0, 0x30, 0xf0, 0x79, 0x9b, 0x1f, 0xb8, + 0xfb, 0xff, 0xcb, 0x93, 0x17, 0xe8, 0xb6, 0x59, 0xff, 0x15, 0x2c, 0x5d, 0xda, 0x8d, 0xdb, 0x70, + 0x25, 0xd1, 0x86, 0x73, 0xb3, 0x99, 0x14, 0x25, 0xa6, 0x3e, 0x38, 0x67, 0x64, 0xda, 0xec, 0x68, + 0xaf, 0x71, 0xce, 0x88, 0x30, 0xbb, 0xf1, 0x27, 0x05, 0xee, 0x58, 0xf6, 0xd7, 0x0f, 0xd6, 0x90, + 0x9f, 0x18, 0x5d, 0xbe, 0xd8, 0x55, 0x7e, 0xd9, 0x18, 0x59, 0xec, 0xc4, 0x1f, 0x68, 0x43, 0x7b, + 0xb2, 0x29, 0xe5, 0x37, 0x2c, 0xea, 0x31, 0xd7, 0x9f, 0x10, 0x2a, 0x8b, 0xf7, 0x66, 0x4c, 0xb5, + 0x21, 0xff, 0xe3, 0x18, 0x11, 0xba, 0x31, 0x8a, 0xff, 0x2c, 0xf9, 0x77, 0x6a, 0xf5, 0x83, 0x43, + 0xe8, 0xae, 0xd4, 0x26, 0x88, 0x65, 0xb1, 0xd2, 0x8e, 0xb6, 0x06, 0x39, 0x01, 0x79, 0xf2, 0xbf, + 0x00, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x56, 0xb6, 0xfd, 0x6c, 0x11, 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go new file mode 100644 index 00000000..02538778 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go @@ -0,0 +1,359 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: opencensus/proto/trace/v1/trace_config.proto + +package v1 + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// How spans should be sampled: +// - Always off +// - Always on +// - Always follow the parent Span's decision (off if no parent). +type ConstantSampler_ConstantDecision int32 + +const ( + ConstantSampler_ALWAYS_OFF ConstantSampler_ConstantDecision = 0 + ConstantSampler_ALWAYS_ON ConstantSampler_ConstantDecision = 1 + ConstantSampler_ALWAYS_PARENT ConstantSampler_ConstantDecision = 2 +) + +var ConstantSampler_ConstantDecision_name = map[int32]string{ + 0: "ALWAYS_OFF", + 1: "ALWAYS_ON", + 2: "ALWAYS_PARENT", +} + +var ConstantSampler_ConstantDecision_value = map[string]int32{ + "ALWAYS_OFF": 0, + "ALWAYS_ON": 1, + "ALWAYS_PARENT": 2, +} + +func (x ConstantSampler_ConstantDecision) String() string { + return proto.EnumName(ConstantSampler_ConstantDecision_name, int32(x)) +} + +func (ConstantSampler_ConstantDecision) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_5359209b41ff50c5, []int{2, 0} +} + +// Global configuration of the trace service. All fields must be specified, or +// the default (zero) values will be used for each type. +type TraceConfig struct { + // The global default sampler used to make decisions on span sampling. + // + // Types that are valid to be assigned to Sampler: + // *TraceConfig_ProbabilitySampler + // *TraceConfig_ConstantSampler + // *TraceConfig_RateLimitingSampler + Sampler isTraceConfig_Sampler `protobuf_oneof:"sampler"` + // The global default max number of attributes per span. + MaxNumberOfAttributes int64 `protobuf:"varint,4,opt,name=max_number_of_attributes,json=maxNumberOfAttributes,proto3" json:"max_number_of_attributes,omitempty"` + // The global default max number of annotation events per span. + MaxNumberOfAnnotations int64 `protobuf:"varint,5,opt,name=max_number_of_annotations,json=maxNumberOfAnnotations,proto3" json:"max_number_of_annotations,omitempty"` + // The global default max number of message events per span. + MaxNumberOfMessageEvents int64 `protobuf:"varint,6,opt,name=max_number_of_message_events,json=maxNumberOfMessageEvents,proto3" json:"max_number_of_message_events,omitempty"` + // The global default max number of link entries per span. + MaxNumberOfLinks int64 `protobuf:"varint,7,opt,name=max_number_of_links,json=maxNumberOfLinks,proto3" json:"max_number_of_links,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TraceConfig) Reset() { *m = TraceConfig{} } +func (m *TraceConfig) String() string { return proto.CompactTextString(m) } +func (*TraceConfig) ProtoMessage() {} +func (*TraceConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_5359209b41ff50c5, []int{0} +} + +func (m *TraceConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TraceConfig.Unmarshal(m, b) +} +func (m *TraceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TraceConfig.Marshal(b, m, deterministic) +} +func (m *TraceConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_TraceConfig.Merge(m, src) +} +func (m *TraceConfig) XXX_Size() int { + return xxx_messageInfo_TraceConfig.Size(m) +} +func (m *TraceConfig) XXX_DiscardUnknown() { + xxx_messageInfo_TraceConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_TraceConfig proto.InternalMessageInfo + +type isTraceConfig_Sampler interface { + isTraceConfig_Sampler() +} + +type TraceConfig_ProbabilitySampler struct { + ProbabilitySampler *ProbabilitySampler `protobuf:"bytes,1,opt,name=probability_sampler,json=probabilitySampler,proto3,oneof"` +} + +type TraceConfig_ConstantSampler struct { + ConstantSampler *ConstantSampler `protobuf:"bytes,2,opt,name=constant_sampler,json=constantSampler,proto3,oneof"` +} + +type TraceConfig_RateLimitingSampler struct { + RateLimitingSampler *RateLimitingSampler `protobuf:"bytes,3,opt,name=rate_limiting_sampler,json=rateLimitingSampler,proto3,oneof"` +} + +func (*TraceConfig_ProbabilitySampler) isTraceConfig_Sampler() {} + +func (*TraceConfig_ConstantSampler) isTraceConfig_Sampler() {} + +func (*TraceConfig_RateLimitingSampler) isTraceConfig_Sampler() {} + +func (m *TraceConfig) GetSampler() isTraceConfig_Sampler { + if m != nil { + return m.Sampler + } + return nil +} + +func (m *TraceConfig) GetProbabilitySampler() *ProbabilitySampler { + if x, ok := m.GetSampler().(*TraceConfig_ProbabilitySampler); ok { + return x.ProbabilitySampler + } + return nil +} + +func (m *TraceConfig) GetConstantSampler() *ConstantSampler { + if x, ok := m.GetSampler().(*TraceConfig_ConstantSampler); ok { + return x.ConstantSampler + } + return nil +} + +func (m *TraceConfig) GetRateLimitingSampler() *RateLimitingSampler { + if x, ok := m.GetSampler().(*TraceConfig_RateLimitingSampler); ok { + return x.RateLimitingSampler + } + return nil +} + +func (m *TraceConfig) GetMaxNumberOfAttributes() int64 { + if m != nil { + return m.MaxNumberOfAttributes + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfAnnotations() int64 { + if m != nil { + return m.MaxNumberOfAnnotations + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfMessageEvents() int64 { + if m != nil { + return m.MaxNumberOfMessageEvents + } + return 0 +} + +func (m *TraceConfig) GetMaxNumberOfLinks() int64 { + if m != nil { + return m.MaxNumberOfLinks + } + return 0 +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*TraceConfig) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*TraceConfig_ProbabilitySampler)(nil), + (*TraceConfig_ConstantSampler)(nil), + (*TraceConfig_RateLimitingSampler)(nil), + } +} + +// Sampler that tries to uniformly sample traces with a given probability. +// The probability of sampling a trace is equal to that of the specified probability. +type ProbabilitySampler struct { + // The desired probability of sampling. Must be within [0.0, 1.0]. + SamplingProbability float64 `protobuf:"fixed64,1,opt,name=samplingProbability,proto3" json:"samplingProbability,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProbabilitySampler) Reset() { *m = ProbabilitySampler{} } +func (m *ProbabilitySampler) String() string { return proto.CompactTextString(m) } +func (*ProbabilitySampler) ProtoMessage() {} +func (*ProbabilitySampler) Descriptor() ([]byte, []int) { + return fileDescriptor_5359209b41ff50c5, []int{1} +} + +func (m *ProbabilitySampler) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ProbabilitySampler.Unmarshal(m, b) +} +func (m *ProbabilitySampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ProbabilitySampler.Marshal(b, m, deterministic) +} +func (m *ProbabilitySampler) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProbabilitySampler.Merge(m, src) +} +func (m *ProbabilitySampler) XXX_Size() int { + return xxx_messageInfo_ProbabilitySampler.Size(m) +} +func (m *ProbabilitySampler) XXX_DiscardUnknown() { + xxx_messageInfo_ProbabilitySampler.DiscardUnknown(m) +} + +var xxx_messageInfo_ProbabilitySampler proto.InternalMessageInfo + +func (m *ProbabilitySampler) GetSamplingProbability() float64 { + if m != nil { + return m.SamplingProbability + } + return 0 +} + +// Sampler that always makes a constant decision on span sampling. +type ConstantSampler struct { + Decision ConstantSampler_ConstantDecision `protobuf:"varint,1,opt,name=decision,proto3,enum=opencensus.proto.trace.v1.ConstantSampler_ConstantDecision" json:"decision,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConstantSampler) Reset() { *m = ConstantSampler{} } +func (m *ConstantSampler) String() string { return proto.CompactTextString(m) } +func (*ConstantSampler) ProtoMessage() {} +func (*ConstantSampler) Descriptor() ([]byte, []int) { + return fileDescriptor_5359209b41ff50c5, []int{2} +} + +func (m *ConstantSampler) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConstantSampler.Unmarshal(m, b) +} +func (m *ConstantSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConstantSampler.Marshal(b, m, deterministic) +} +func (m *ConstantSampler) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConstantSampler.Merge(m, src) +} +func (m *ConstantSampler) XXX_Size() int { + return xxx_messageInfo_ConstantSampler.Size(m) +} +func (m *ConstantSampler) XXX_DiscardUnknown() { + xxx_messageInfo_ConstantSampler.DiscardUnknown(m) +} + +var xxx_messageInfo_ConstantSampler proto.InternalMessageInfo + +func (m *ConstantSampler) GetDecision() ConstantSampler_ConstantDecision { + if m != nil { + return m.Decision + } + return ConstantSampler_ALWAYS_OFF +} + +// Sampler that tries to sample with a rate per time window. +type RateLimitingSampler struct { + // Rate per second. + Qps int64 `protobuf:"varint,1,opt,name=qps,proto3" json:"qps,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RateLimitingSampler) Reset() { *m = RateLimitingSampler{} } +func (m *RateLimitingSampler) String() string { return proto.CompactTextString(m) } +func (*RateLimitingSampler) ProtoMessage() {} +func (*RateLimitingSampler) Descriptor() ([]byte, []int) { + return fileDescriptor_5359209b41ff50c5, []int{3} +} + +func (m *RateLimitingSampler) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RateLimitingSampler.Unmarshal(m, b) +} +func (m *RateLimitingSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RateLimitingSampler.Marshal(b, m, deterministic) +} +func (m *RateLimitingSampler) XXX_Merge(src proto.Message) { + xxx_messageInfo_RateLimitingSampler.Merge(m, src) +} +func (m *RateLimitingSampler) XXX_Size() int { + return xxx_messageInfo_RateLimitingSampler.Size(m) +} +func (m *RateLimitingSampler) XXX_DiscardUnknown() { + xxx_messageInfo_RateLimitingSampler.DiscardUnknown(m) +} + +var xxx_messageInfo_RateLimitingSampler proto.InternalMessageInfo + +func (m *RateLimitingSampler) GetQps() int64 { + if m != nil { + return m.Qps + } + return 0 +} + +func init() { + proto.RegisterEnum("opencensus.proto.trace.v1.ConstantSampler_ConstantDecision", ConstantSampler_ConstantDecision_name, ConstantSampler_ConstantDecision_value) + proto.RegisterType((*TraceConfig)(nil), "opencensus.proto.trace.v1.TraceConfig") + proto.RegisterType((*ProbabilitySampler)(nil), "opencensus.proto.trace.v1.ProbabilitySampler") + proto.RegisterType((*ConstantSampler)(nil), "opencensus.proto.trace.v1.ConstantSampler") + proto.RegisterType((*RateLimitingSampler)(nil), "opencensus.proto.trace.v1.RateLimitingSampler") +} + +func init() { + proto.RegisterFile("opencensus/proto/trace/v1/trace_config.proto", fileDescriptor_5359209b41ff50c5) +} + +var fileDescriptor_5359209b41ff50c5 = []byte{ + // 506 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xc1, 0x6e, 0xd3, 0x30, + 0x18, 0xc7, 0x97, 0x76, 0x6c, 0xec, 0x9b, 0xb6, 0x05, 0x57, 0x43, 0xa9, 0xb4, 0xc3, 0x94, 0x0b, + 0x13, 0x22, 0x09, 0x1d, 0x07, 0x84, 0x90, 0x90, 0xda, 0x6e, 0x15, 0x87, 0xd2, 0x56, 0xd9, 0x44, + 0x05, 0x97, 0xe0, 0x64, 0x6e, 0xb0, 0x68, 0xec, 0x60, 0x3b, 0xd5, 0x78, 0x0d, 0xce, 0x3c, 0x04, + 0xcf, 0xc5, 0x53, 0xa0, 0x3a, 0x21, 0x49, 0xdb, 0x6d, 0xe2, 0x96, 0xef, 0xfb, 0x7f, 0xbf, 0x9f, + 0xad, 0xd8, 0x86, 0x17, 0x3c, 0x25, 0x2c, 0x22, 0x4c, 0x66, 0xd2, 0x4b, 0x05, 0x57, 0xdc, 0x53, + 0x02, 0x47, 0xc4, 0x5b, 0x74, 0xf2, 0x8f, 0x20, 0xe2, 0x6c, 0x46, 0x63, 0x57, 0x67, 0xa8, 0x5d, + 0x4d, 0xe7, 0x1d, 0x57, 0x0f, 0xb9, 0x8b, 0x8e, 0xfd, 0x6b, 0x1b, 0xf6, 0xaf, 0x97, 0x45, 0x5f, + 0x03, 0xe8, 0x0b, 0xb4, 0x52, 0xc1, 0x43, 0x1c, 0xd2, 0x39, 0x55, 0x3f, 0x02, 0x89, 0x93, 0x74, + 0x4e, 0x84, 0x65, 0x9c, 0x1a, 0x67, 0xfb, 0xe7, 0x8e, 0x7b, 0xaf, 0xc8, 0x9d, 0x54, 0xd4, 0x55, + 0x0e, 0xbd, 0xdf, 0xf2, 0x51, 0xba, 0xd1, 0x45, 0x53, 0x30, 0x23, 0xce, 0xa4, 0xc2, 0x4c, 0x95, + 0xfa, 0x86, 0xd6, 0x3f, 0x7f, 0x40, 0xdf, 0x2f, 0x90, 0xca, 0x7d, 0x14, 0xad, 0xb6, 0xd0, 0x0d, + 0x1c, 0x0b, 0xac, 0x48, 0x30, 0xa7, 0x09, 0x55, 0x94, 0xc5, 0xa5, 0xbd, 0xa9, 0xed, 0xee, 0x03, + 0x76, 0x1f, 0x2b, 0x32, 0x2c, 0xb0, 0x6a, 0x85, 0x96, 0xd8, 0x6c, 0xa3, 0xd7, 0x60, 0x25, 0xf8, + 0x36, 0x60, 0x59, 0x12, 0x12, 0x11, 0xf0, 0x59, 0x80, 0x95, 0x12, 0x34, 0xcc, 0x14, 0x91, 0xd6, + 0xf6, 0xa9, 0x71, 0xd6, 0xf4, 0x8f, 0x13, 0x7c, 0x3b, 0xd2, 0xf1, 0x78, 0xd6, 0x2d, 0x43, 0xf4, + 0x06, 0xda, 0x6b, 0x20, 0x63, 0x5c, 0x61, 0x45, 0x39, 0x93, 0xd6, 0x23, 0x4d, 0x3e, 0xad, 0x93, + 0x55, 0x8a, 0xde, 0xc1, 0xc9, 0x2a, 0x9a, 0x10, 0x29, 0x71, 0x4c, 0x02, 0xb2, 0x20, 0x4c, 0x49, + 0x6b, 0x47, 0xd3, 0x56, 0x8d, 0xfe, 0x90, 0x0f, 0x5c, 0xea, 0x1c, 0x39, 0xd0, 0x5a, 0xe5, 0xe7, + 0x94, 0x7d, 0x93, 0xd6, 0xae, 0xc6, 0xcc, 0x1a, 0x36, 0x5c, 0xf6, 0x7b, 0x7b, 0xb0, 0x5b, 0xfc, + 0x3a, 0x7b, 0x00, 0x68, 0xf3, 0x60, 0xd1, 0x4b, 0x68, 0xe9, 0x01, 0xca, 0xe2, 0x5a, 0xaa, 0x2f, + 0x89, 0xe1, 0xdf, 0x15, 0xd9, 0xbf, 0x0d, 0x38, 0x5a, 0x3b, 0x42, 0x34, 0x85, 0xc7, 0x37, 0x24, + 0xa2, 0x92, 0x72, 0xa6, 0xd1, 0xc3, 0xf3, 0xb7, 0xff, 0x7f, 0x01, 0xca, 0xfa, 0xa2, 0x50, 0xf8, + 0xa5, 0xcc, 0xbe, 0x00, 0x73, 0x3d, 0x45, 0x87, 0x00, 0xdd, 0xe1, 0xb4, 0xfb, 0xe9, 0x2a, 0x18, + 0x0f, 0x06, 0xe6, 0x16, 0x3a, 0x80, 0xbd, 0x7f, 0xf5, 0xc8, 0x34, 0xd0, 0x13, 0x38, 0x28, 0xca, + 0x49, 0xd7, 0xbf, 0x1c, 0x5d, 0x9b, 0x0d, 0xfb, 0x19, 0xb4, 0xee, 0xb8, 0x16, 0xc8, 0x84, 0xe6, + 0xf7, 0x54, 0xea, 0x0d, 0x37, 0xfd, 0xe5, 0x67, 0xef, 0xa7, 0x01, 0x27, 0x94, 0xdf, 0xbf, 0xf5, + 0x9e, 0x59, 0x7b, 0x60, 0x93, 0x65, 0x34, 0x31, 0x3e, 0xf7, 0x62, 0xaa, 0xbe, 0x66, 0xa1, 0x1b, + 0xf1, 0xc4, 0xcb, 0x29, 0x87, 0x32, 0xa9, 0x44, 0x96, 0x10, 0x96, 0x1f, 0xbb, 0x57, 0x09, 0x9d, + 0xfc, 0x89, 0xc7, 0x84, 0x39, 0x71, 0xf5, 0xd2, 0xff, 0x34, 0xda, 0xe3, 0x94, 0xb0, 0x7e, 0xbe, + 0xa6, 0x16, 0xbb, 0x7a, 0x25, 0xf7, 0x63, 0x27, 0xdc, 0xd1, 0xc8, 0xab, 0xbf, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x50, 0x0c, 0xfe, 0x32, 0x29, 0x04, 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/LICENSE new file mode 100644 index 00000000..bc5841fa --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2014 CloudFlare Inc. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/api/api.go b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/api/api.go new file mode 100644 index 00000000..98b0ec46 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/api/api.go @@ -0,0 +1,231 @@ +// Package api implements an HTTP-based API and server for CFSSL. +package api + +import ( + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/cloudflare/cfssl/errors" + "github.com/cloudflare/cfssl/log" +) + +// Handler is an interface providing a generic mechanism for handling HTTP requests. +type Handler interface { + Handle(w http.ResponseWriter, r *http.Request) error +} + +// HTTPHandler is a wrapper that encapsulates Handler interface as http.Handler. +// HTTPHandler also enforces that the Handler only responds to requests with registered HTTP methods. +type HTTPHandler struct { + Handler // CFSSL handler + Methods []string // The associated HTTP methods +} + +// HandlerFunc is similar to the http.HandlerFunc type; it serves as +// an adapter allowing the use of ordinary functions as Handlers. If +// f is a function with the appropriate signature, HandlerFunc(f) is a +// Handler object that calls f. +type HandlerFunc func(http.ResponseWriter, *http.Request) error + +// Handle calls f(w, r) +func (f HandlerFunc) Handle(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Content-Type", "application/json") + return f(w, r) +} + +// HandleError is the centralised error handling and reporting. +func HandleError(w http.ResponseWriter, err error) (code int) { + if err == nil { + return http.StatusOK + } + msg := err.Error() + httpCode := http.StatusInternalServerError + + // If it is recognized as HttpError emitted from cfssl, + // we rewrite the status code accordingly. If it is a + // cfssl error, set the http status to StatusBadRequest + switch err := err.(type) { + case *errors.HTTPError: + httpCode = err.StatusCode + code = err.StatusCode + case *errors.Error: + httpCode = http.StatusBadRequest + code = err.ErrorCode + msg = err.Message + } + + response := NewErrorResponse(msg, code) + jsonMessage, err := json.Marshal(response) + if err != nil { + log.Errorf("Failed to marshal JSON: %v", err) + } else { + msg = string(jsonMessage) + } + http.Error(w, msg, httpCode) + return code +} + +// ServeHTTP encapsulates the call to underlying Handler to handle the request +// and return the response with proper HTTP status code +func (h HTTPHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + var err error + var match bool + // Throw 405 when requested with an unsupported verb. + for _, m := range h.Methods { + if m == r.Method { + match = true + } + } + if match { + err = h.Handle(w, r) + } else { + err = errors.NewMethodNotAllowed(r.Method) + } + status := HandleError(w, err) + log.Infof("%s - \"%s %s\" %d", r.RemoteAddr, r.Method, r.URL, status) +} + +// readRequestBlob takes a JSON-blob-encoded response body in the form +// map[string]string and returns it, the list of keywords presented, +// and any error that occurred. +func readRequestBlob(r *http.Request) (map[string]string, error) { + var blob map[string]string + + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, err + } + r.Body.Close() + + err = json.Unmarshal(body, &blob) + if err != nil { + return nil, err + } + return blob, nil +} + +// ProcessRequestOneOf reads a JSON blob for the request and makes +// sure it contains one of a set of keywords. For example, a request +// might have the ('foo' && 'bar') keys, OR it might have the 'baz' +// key. In either case, we want to accept the request; however, if +// none of these sets shows up, the request is a bad request, and it +// should be returned. +func ProcessRequestOneOf(r *http.Request, keywordSets [][]string) (map[string]string, []string, error) { + blob, err := readRequestBlob(r) + if err != nil { + return nil, nil, err + } + + var matched []string + for _, set := range keywordSets { + if matchKeywords(blob, set) { + if matched != nil { + return nil, nil, errors.NewBadRequestString("mismatched parameters") + } + matched = set + } + } + if matched == nil { + return nil, nil, errors.NewBadRequestString("no valid parameter sets found") + } + return blob, matched, nil +} + +// ProcessRequestFirstMatchOf reads a JSON blob for the request and returns +// the first match of a set of keywords. For example, a request +// might have one of the following combinations: (foo=1, bar=2), (foo=1), and (bar=2) +// By giving a specific ordering of those combinations, we could decide how to accept +// the request. +func ProcessRequestFirstMatchOf(r *http.Request, keywordSets [][]string) (map[string]string, []string, error) { + blob, err := readRequestBlob(r) + if err != nil { + return nil, nil, err + } + + for _, set := range keywordSets { + if matchKeywords(blob, set) { + return blob, set, nil + } + } + return nil, nil, errors.NewBadRequestString("no valid parameter sets found") +} + +func matchKeywords(blob map[string]string, keywords []string) bool { + for _, keyword := range keywords { + if _, ok := blob[keyword]; !ok { + return false + } + } + return true +} + +// ResponseMessage implements the standard for response errors and +// messages. A message has a code and a string message. +type ResponseMessage struct { + Code int `json:"code"` + Message string `json:"message"` +} + +// Response implements the CloudFlare standard for API +// responses. +type Response struct { + Success bool `json:"success"` + Result interface{} `json:"result"` + Errors []ResponseMessage `json:"errors"` + Messages []ResponseMessage `json:"messages"` +} + +// NewSuccessResponse is a shortcut for creating new successul API +// responses. +func NewSuccessResponse(result interface{}) Response { + return Response{ + Success: true, + Result: result, + Errors: []ResponseMessage{}, + Messages: []ResponseMessage{}, + } +} + +// NewSuccessResponseWithMessage is a shortcut for creating new successul API +// responses that includes a message. +func NewSuccessResponseWithMessage(result interface{}, message string, code int) Response { + return Response{ + Success: true, + Result: result, + Errors: []ResponseMessage{}, + Messages: []ResponseMessage{{code, message}}, + } +} + +// NewErrorResponse is a shortcut for creating an error response for a +// single error. +func NewErrorResponse(message string, code int) Response { + return Response{ + Success: false, + Result: nil, + Errors: []ResponseMessage{{code, message}}, + Messages: []ResponseMessage{}, + } +} + +// SendResponse builds a response from the result, sets the JSON +// header, and writes to the http.ResponseWriter. +func SendResponse(w http.ResponseWriter, result interface{}) error { + response := NewSuccessResponse(result) + w.Header().Set("Content-Type", "application/json") + enc := json.NewEncoder(w) + err := enc.Encode(response) + return err +} + +// SendResponseWithMessage builds a response from the result and the +// provided message, sets the JSON header, and writes to the +// http.ResponseWriter. +func SendResponseWithMessage(w http.ResponseWriter, result interface{}, message string, code int) error { + response := NewSuccessResponseWithMessage(result, message, code) + w.Header().Set("Content-Type", "application/json") + enc := json.NewEncoder(w) + err := enc.Encode(response) + return err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/auth/auth.go b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/auth/auth.go new file mode 100644 index 00000000..db2197bb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/auth/auth.go @@ -0,0 +1,94 @@ +// Package auth implements an interface for providing CFSSL +// authentication. This is meant to authenticate a client CFSSL to a +// remote CFSSL in order to prevent unauthorised use of the signature +// capabilities. This package provides both the interface and a +// standard HMAC-based implementation. +package auth + +import ( + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io/ioutil" + "os" + "strings" +) + +// An AuthenticatedRequest contains a request and authentication +// token. The Provider may determine whether to validate the timestamp +// and remote address. +type AuthenticatedRequest struct { + // An Authenticator decides whether to use this field. + Timestamp int64 `json:"timestamp,omitempty"` + RemoteAddress []byte `json:"remote_address,omitempty"` + Token []byte `json:"token"` + Request []byte `json:"request"` +} + +// A Provider can generate tokens from a request and verify a +// request. The handling of additional authentication data (such as +// the IP address) is handled by the concrete type, as is any +// serialisation and state-keeping. +type Provider interface { + Token(req []byte) (token []byte, err error) + Verify(aReq *AuthenticatedRequest) bool +} + +// Standard implements an HMAC-SHA-256 authentication provider. It may +// be supplied additional data at creation time that will be used as +// request || additional-data with the HMAC. +type Standard struct { + key []byte + ad []byte +} + +// New generates a new standard authentication provider from the key +// and additional data. The additional data will be used when +// generating a new token. +func New(key string, ad []byte) (*Standard, error) { + if splitKey := strings.SplitN(key, ":", 2); len(splitKey) == 2 { + switch splitKey[0] { + case "env": + key = os.Getenv(splitKey[1]) + case "file": + data, err := ioutil.ReadFile(splitKey[1]) + if err != nil { + return nil, err + } + key = strings.TrimSpace(string(data)) + default: + return nil, fmt.Errorf("unknown key prefix: %s", splitKey[0]) + } + } + + keyBytes, err := hex.DecodeString(key) + if err != nil { + return nil, err + } + + return &Standard{keyBytes, ad}, nil +} + +// Token generates a new authentication token from the request. +func (p Standard) Token(req []byte) (token []byte, err error) { + h := hmac.New(sha256.New, p.key) + h.Write(req) + h.Write(p.ad) + return h.Sum(nil), nil +} + +// Verify determines whether an authenticated request is valid. +func (p Standard) Verify(ad *AuthenticatedRequest) bool { + if ad == nil { + return false + } + + // Standard token generation returns no error. + token, _ := p.Token(ad.Request) + if len(ad.Token) != len(token) { + return false + } + + return hmac.Equal(token, ad.Token) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/certdb/README.md b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/certdb/README.md new file mode 100644 index 00000000..fbd941e3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/certdb/README.md @@ -0,0 +1,75 @@ +# certdb usage + +Using a database enables additional functionality for existing commands when a +db config is provided: + + - `sign` and `gencert` add a certificate to the certdb after signing it + - `serve` enables database functionality for the sign and revoke endpoints + +A database is required for the following: + + - `revoke` marks certificates revoked in the database with an optional reason + - `ocsprefresh` refreshes the table of cached OCSP responses + - `ocspdump` outputs cached OCSP responses in a concatenated base64-encoded format + +## Setup/Migration + +This directory stores [goose](https://bitbucket.org/liamstask/goose/) db migration scripts for various DB backends. +Currently supported: + - MySQL in mysql + - PostgreSQL in pg + - SQLite in sqlite + +### Get goose + + go get bitbucket.org/liamstask/goose/cmd/goose + +### Use goose to start and terminate a MySQL DB +To start a MySQL using goose: + + goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/mysql up + +To tear down a MySQL DB using goose + + goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/mysql down + +Note: the administration of MySQL DB is not included. We assume +the databases being connected to are already created and access control +is properly handled. + +### Use goose to start and terminate a PostgreSQL DB +To start a PostgreSQL using goose: + + goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/pg up + +To tear down a PostgreSQL DB using goose + + goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/pg down + +Note: the administration of PostgreSQL DB is not included. We assume +the databases being connected to are already created and access control +is properly handled. + +### Use goose to start and terminate a SQLite DB +To start a SQLite DB using goose: + + goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite up + +To tear down a SQLite DB using goose + + goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite down + +## CFSSL Configuration + +Several cfssl commands take a -db-config flag. Create a file with a +JSON dictionary: + + {"driver":"sqlite3","data_source":"certs.db"} + +or + + {"driver":"postgres","data_source":"postgres://user:password@host/db"} + +or + + {"driver":"mysql","data_source":"user:password@tcp(hostname:3306)/db?parseTime=true"} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/certdb/certdb.go b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/certdb/certdb.go new file mode 100644 index 00000000..dc8c856c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/certdb/certdb.go @@ -0,0 +1,42 @@ +package certdb + +import ( + "time" +) + +// CertificateRecord encodes a certificate and its metadata +// that will be recorded in a database. +type CertificateRecord struct { + Serial string `db:"serial_number"` + AKI string `db:"authority_key_identifier"` + CALabel string `db:"ca_label"` + Status string `db:"status"` + Reason int `db:"reason"` + Expiry time.Time `db:"expiry"` + RevokedAt time.Time `db:"revoked_at"` + PEM string `db:"pem"` +} + +// OCSPRecord encodes a OCSP response body and its metadata +// that will be recorded in a database. +type OCSPRecord struct { + Serial string `db:"serial_number"` + AKI string `db:"authority_key_identifier"` + Body string `db:"body"` + Expiry time.Time `db:"expiry"` +} + +// Accessor abstracts the CRUD of certdb objects from a DB. +type Accessor interface { + InsertCertificate(cr CertificateRecord) error + GetCertificate(serial, aki string) ([]CertificateRecord, error) + GetUnexpiredCertificates() ([]CertificateRecord, error) + GetRevokedAndUnexpiredCertificates() ([]CertificateRecord, error) + GetRevokedAndUnexpiredCertificatesByLabel(label string) ([]CertificateRecord, error) + RevokeCertificate(serial, aki string, reasonCode int) error + InsertOCSP(rr OCSPRecord) error + GetOCSP(serial, aki string) ([]OCSPRecord, error) + GetUnexpiredOCSPs() ([]OCSPRecord, error) + UpdateOCSP(serial, aki, body string, expiry time.Time) error + UpsertOCSP(serial, aki, body string, expiry time.Time) error +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/config/config.go b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/config/config.go new file mode 100644 index 00000000..376d8af2 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/config/config.go @@ -0,0 +1,710 @@ +// Package config contains the configuration logic for CFSSL. +package config + +import ( + "crypto/tls" + "crypto/x509" + "encoding/asn1" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "regexp" + "strconv" + "strings" + "time" + + "github.com/cloudflare/cfssl/auth" + cferr "github.com/cloudflare/cfssl/errors" + "github.com/cloudflare/cfssl/helpers" + "github.com/cloudflare/cfssl/log" + ocspConfig "github.com/cloudflare/cfssl/ocsp/config" + "github.com/zmap/zlint/lints" +) + +// A CSRWhitelist stores booleans for fields in the CSR. If a CSRWhitelist is +// not present in a SigningProfile, all of these fields may be copied from the +// CSR into the signed certificate. If a CSRWhitelist *is* present in a +// SigningProfile, only those fields with a `true` value in the CSRWhitelist may +// be copied from the CSR to the signed certificate. Note that some of these +// fields, like Subject, can be provided or partially provided through the API. +// Since API clients are expected to be trusted, but CSRs are not, fields +// provided through the API are not subject to whitelisting through this +// mechanism. +type CSRWhitelist struct { + Subject, PublicKeyAlgorithm, PublicKey, SignatureAlgorithm bool + DNSNames, IPAddresses, EmailAddresses, URIs bool +} + +// OID is our own version of asn1's ObjectIdentifier, so we can define a custom +// JSON marshal / unmarshal. +type OID asn1.ObjectIdentifier + +// CertificatePolicy represents the ASN.1 PolicyInformation structure from +// https://tools.ietf.org/html/rfc3280.html#page-106. +// Valid values of Type are "id-qt-unotice" and "id-qt-cps" +type CertificatePolicy struct { + ID OID + Qualifiers []CertificatePolicyQualifier +} + +// CertificatePolicyQualifier represents a single qualifier from an ASN.1 +// PolicyInformation structure. +type CertificatePolicyQualifier struct { + Type string + Value string +} + +// AuthRemote is an authenticated remote signer. +type AuthRemote struct { + RemoteName string `json:"remote"` + AuthKeyName string `json:"auth_key"` +} + +// CAConstraint specifies various CA constraints on the signed certificate. +// CAConstraint would verify against (and override) the CA +// extensions in the given CSR. +type CAConstraint struct { + IsCA bool `json:"is_ca"` + MaxPathLen int `json:"max_path_len"` + MaxPathLenZero bool `json:"max_path_len_zero"` +} + +// A SigningProfile stores information that the CA needs to store +// signature policy. +type SigningProfile struct { + Usage []string `json:"usages"` + IssuerURL []string `json:"issuer_urls"` + OCSP string `json:"ocsp_url"` + CRL string `json:"crl_url"` + CAConstraint CAConstraint `json:"ca_constraint"` + OCSPNoCheck bool `json:"ocsp_no_check"` + ExpiryString string `json:"expiry"` + BackdateString string `json:"backdate"` + AuthKeyName string `json:"auth_key"` + PrevAuthKeyName string `json:"prev_auth_key"` // to suppport key rotation + RemoteName string `json:"remote"` + NotBefore time.Time `json:"not_before"` + NotAfter time.Time `json:"not_after"` + NameWhitelistString string `json:"name_whitelist"` + AuthRemote AuthRemote `json:"auth_remote"` + CTLogServers []string `json:"ct_log_servers"` + AllowedExtensions []OID `json:"allowed_extensions"` + CertStore string `json:"cert_store"` + // LintErrLevel controls preissuance linting for the signing profile. + // 0 = no linting is performed [default] + // 2..3 = reserved + // 3 = all lint results except pass are considered errors + // 4 = all lint results except pass and notice are considered errors + // 5 = all lint results except pass, notice and warn are considered errors + // 6 = all lint results except pass, notice, warn and error are considered errors. + // 7 = lint is performed, no lint results are treated as errors. + LintErrLevel lints.LintStatus `json:"lint_error_level"` + // IgnoredLints lists zlint lint names to ignore. Any lint results from + // matching lints will be ignored no matter what the configured LintErrLevel + // is. + IgnoredLints []string `json:"ignored_lints"` + + Policies []CertificatePolicy + Expiry time.Duration + Backdate time.Duration + Provider auth.Provider + PrevProvider auth.Provider // to suppport key rotation + RemoteProvider auth.Provider + RemoteServer string + RemoteCAs *x509.CertPool + ClientCert *tls.Certificate + CSRWhitelist *CSRWhitelist + NameWhitelist *regexp.Regexp + ExtensionWhitelist map[string]bool + ClientProvidesSerialNumbers bool + // IgnoredLintsMap is a bool map created from IgnoredLints when the profile is + // loaded. It facilitates set membership testing. + IgnoredLintsMap map[string]bool +} + +// UnmarshalJSON unmarshals a JSON string into an OID. +func (oid *OID) UnmarshalJSON(data []byte) (err error) { + if data[0] != '"' || data[len(data)-1] != '"' { + return errors.New("OID JSON string not wrapped in quotes." + string(data)) + } + data = data[1 : len(data)-1] + parsedOid, err := parseObjectIdentifier(string(data)) + if err != nil { + return err + } + *oid = OID(parsedOid) + return +} + +// MarshalJSON marshals an oid into a JSON string. +func (oid OID) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%v"`, asn1.ObjectIdentifier(oid))), nil +} + +func parseObjectIdentifier(oidString string) (oid asn1.ObjectIdentifier, err error) { + validOID, err := regexp.MatchString("\\d(\\.\\d+)*", oidString) + if err != nil { + return + } + if !validOID { + err = errors.New("Invalid OID") + return + } + + segments := strings.Split(oidString, ".") + oid = make(asn1.ObjectIdentifier, len(segments)) + for i, intString := range segments { + oid[i], err = strconv.Atoi(intString) + if err != nil { + return + } + } + return +} + +const timeFormat = "2006-01-02T15:04:05" + +// populate is used to fill in the fields that are not in JSON +// +// First, the ExpiryString parameter is needed to parse +// expiration timestamps from JSON. The JSON decoder is not able to +// decode a string time duration to a time.Duration, so this is called +// when loading the configuration to properly parse and fill out the +// Expiry parameter. +// This function is also used to create references to the auth key +// and default remote for the profile. +// It returns true if ExpiryString is a valid representation of a +// time.Duration, and the AuthKeyString and RemoteName point to +// valid objects. It returns false otherwise. +func (p *SigningProfile) populate(cfg *Config) error { + if p == nil { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("can't parse nil profile")) + } + + var err error + if p.RemoteName == "" && p.AuthRemote.RemoteName == "" { + log.Debugf("parse expiry in profile") + if p.ExpiryString == "" { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("empty expiry string")) + } + + dur, err := time.ParseDuration(p.ExpiryString) + if err != nil { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err) + } + + log.Debugf("expiry is valid") + p.Expiry = dur + + if p.BackdateString != "" { + dur, err = time.ParseDuration(p.BackdateString) + if err != nil { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err) + } + + p.Backdate = dur + } + + if !p.NotBefore.IsZero() && !p.NotAfter.IsZero() && p.NotAfter.Before(p.NotBefore) { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err) + } + + if len(p.Policies) > 0 { + for _, policy := range p.Policies { + for _, qualifier := range policy.Qualifiers { + if qualifier.Type != "" && qualifier.Type != "id-qt-unotice" && qualifier.Type != "id-qt-cps" { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("invalid policy qualifier type")) + } + } + } + } + } else if p.RemoteName != "" { + log.Debug("match remote in profile to remotes section") + if p.AuthRemote.RemoteName != "" { + log.Error("profile has both a remote and an auth remote specified") + return cferr.New(cferr.PolicyError, cferr.InvalidPolicy) + } + if remote := cfg.Remotes[p.RemoteName]; remote != "" { + if err := p.updateRemote(remote); err != nil { + return err + } + } else { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("failed to find remote in remotes section")) + } + } else { + log.Debug("match auth remote in profile to remotes section") + if remote := cfg.Remotes[p.AuthRemote.RemoteName]; remote != "" { + if err := p.updateRemote(remote); err != nil { + return err + } + } else { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("failed to find remote in remotes section")) + } + } + + if p.AuthKeyName != "" { + log.Debug("match auth key in profile to auth_keys section") + if key, ok := cfg.AuthKeys[p.AuthKeyName]; ok { + if key.Type == "standard" { + p.Provider, err = auth.New(key.Key, nil) + if err != nil { + log.Debugf("failed to create new standard auth provider: %v", err) + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("failed to create new standard auth provider")) + } + } else { + log.Debugf("unknown authentication type %v", key.Type) + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("unknown authentication type")) + } + } else { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("failed to find auth_key in auth_keys section")) + } + } + + if p.PrevAuthKeyName != "" { + log.Debug("match previous auth key in profile to auth_keys section") + if key, ok := cfg.AuthKeys[p.PrevAuthKeyName]; ok { + if key.Type == "standard" { + p.PrevProvider, err = auth.New(key.Key, nil) + if err != nil { + log.Debugf("failed to create new standard auth provider: %v", err) + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("failed to create new standard auth provider")) + } + } else { + log.Debugf("unknown authentication type %v", key.Type) + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("unknown authentication type")) + } + } else { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("failed to find prev_auth_key in auth_keys section")) + } + } + + if p.AuthRemote.AuthKeyName != "" { + log.Debug("match auth remote key in profile to auth_keys section") + if key, ok := cfg.AuthKeys[p.AuthRemote.AuthKeyName]; ok == true { + if key.Type == "standard" { + p.RemoteProvider, err = auth.New(key.Key, nil) + if err != nil { + log.Debugf("failed to create new standard auth provider: %v", err) + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("failed to create new standard auth provider")) + } + } else { + log.Debugf("unknown authentication type %v", key.Type) + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("unknown authentication type")) + } + } else { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("failed to find auth_remote's auth_key in auth_keys section")) + } + } + + if p.NameWhitelistString != "" { + log.Debug("compiling whitelist regular expression") + rule, err := regexp.Compile(p.NameWhitelistString) + if err != nil { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("failed to compile name whitelist section")) + } + p.NameWhitelist = rule + } + + p.ExtensionWhitelist = map[string]bool{} + for _, oid := range p.AllowedExtensions { + p.ExtensionWhitelist[asn1.ObjectIdentifier(oid).String()] = true + } + + p.IgnoredLintsMap = map[string]bool{} + for _, lintName := range p.IgnoredLints { + p.IgnoredLintsMap[lintName] = true + } + + return nil +} + +// updateRemote takes a signing profile and initializes the remote server object +// to the hostname:port combination sent by remote. +func (p *SigningProfile) updateRemote(remote string) error { + if remote != "" { + p.RemoteServer = remote + } + return nil +} + +// OverrideRemotes takes a signing configuration and updates the remote server object +// to the hostname:port combination sent by remote +func (p *Signing) OverrideRemotes(remote string) error { + if remote != "" { + var err error + for _, profile := range p.Profiles { + err = profile.updateRemote(remote) + if err != nil { + return err + } + } + err = p.Default.updateRemote(remote) + if err != nil { + return err + } + } + return nil +} + +// SetClientCertKeyPairFromFile updates the properties to set client certificates for mutual +// authenticated TLS remote requests +func (p *Signing) SetClientCertKeyPairFromFile(certFile string, keyFile string) error { + if certFile != "" && keyFile != "" { + cert, err := helpers.LoadClientCertificate(certFile, keyFile) + if err != nil { + return err + } + for _, profile := range p.Profiles { + profile.ClientCert = cert + } + p.Default.ClientCert = cert + } + return nil +} + +// SetRemoteCAsFromFile reads root CAs from file and updates the properties to set remote CAs for TLS +// remote requests +func (p *Signing) SetRemoteCAsFromFile(caFile string) error { + if caFile != "" { + remoteCAs, err := helpers.LoadPEMCertPool(caFile) + if err != nil { + return err + } + p.SetRemoteCAs(remoteCAs) + } + return nil +} + +// SetRemoteCAs updates the properties to set remote CAs for TLS +// remote requests +func (p *Signing) SetRemoteCAs(remoteCAs *x509.CertPool) { + for _, profile := range p.Profiles { + profile.RemoteCAs = remoteCAs + } + p.Default.RemoteCAs = remoteCAs +} + +// NeedsRemoteSigner returns true if one of the profiles has a remote set +func (p *Signing) NeedsRemoteSigner() bool { + for _, profile := range p.Profiles { + if profile.RemoteServer != "" { + return true + } + } + + if p.Default.RemoteServer != "" { + return true + } + + return false +} + +// NeedsLocalSigner returns true if one of the profiles doe not have a remote set +func (p *Signing) NeedsLocalSigner() bool { + for _, profile := range p.Profiles { + if profile.RemoteServer == "" { + return true + } + } + + if p.Default.RemoteServer == "" { + return true + } + + return false +} + +// Usages parses the list of key uses in the profile, translating them +// to a list of X.509 key usages and extended key usages. The unknown +// uses are collected into a slice that is also returned. +func (p *SigningProfile) Usages() (ku x509.KeyUsage, eku []x509.ExtKeyUsage, unk []string) { + for _, keyUse := range p.Usage { + if kuse, ok := KeyUsage[keyUse]; ok { + ku |= kuse + } else if ekuse, ok := ExtKeyUsage[keyUse]; ok { + eku = append(eku, ekuse) + } else { + unk = append(unk, keyUse) + } + } + return +} + +// A valid profile must be a valid local profile or a valid remote profile. +// A valid local profile has defined at least key usages to be used, and a +// valid local default profile has defined at least a default expiration. +// A valid remote profile (default or not) has remote signer initialized. +// In addition, a remote profile must has a valid auth provider if auth +// key defined. A valid profile must not include a lint_error_level outside of +// [0,8). +func (p *SigningProfile) validProfile(isDefault bool) bool { + if p == nil { + return false + } + + if p.AuthRemote.RemoteName == "" && p.AuthRemote.AuthKeyName != "" { + log.Debugf("invalid auth remote profile: no remote signer specified") + return false + } + + if p.RemoteName != "" { + log.Debugf("validate remote profile") + + if p.RemoteServer == "" { + log.Debugf("invalid remote profile: no remote signer specified") + return false + } + + if p.AuthKeyName != "" && p.Provider == nil { + log.Debugf("invalid remote profile: auth key name is defined but no auth provider is set") + return false + } + + if p.AuthRemote.RemoteName != "" { + log.Debugf("invalid remote profile: auth remote is also specified") + return false + } + } else if p.AuthRemote.RemoteName != "" { + log.Debugf("validate auth remote profile") + if p.RemoteServer == "" { + log.Debugf("invalid auth remote profile: no remote signer specified") + return false + } + + if p.AuthRemote.AuthKeyName == "" || p.RemoteProvider == nil { + log.Debugf("invalid auth remote profile: no auth key is defined") + return false + } + } else { + log.Debugf("validate local profile") + if !isDefault { + if len(p.Usage) == 0 { + log.Debugf("invalid local profile: no usages specified") + return false + } else if _, _, unk := p.Usages(); len(unk) == len(p.Usage) { + log.Debugf("invalid local profile: no valid usages") + return false + } + } else { + if p.Expiry == 0 { + log.Debugf("invalid local profile: no expiry set") + return false + } + } + } + + if p.LintErrLevel < 0 || p.LintErrLevel >= 8 { + log.Debugf("invalid profile: lint_error_level outside of range [0,8)") + return false + } + + log.Debugf("profile is valid") + return true +} + +// This checks if the SigningProfile object contains configurations that are only effective with a local signer +// which has access to CA private key. +func (p *SigningProfile) hasLocalConfig() bool { + if p.Usage != nil || + p.IssuerURL != nil || + p.OCSP != "" || + p.ExpiryString != "" || + p.BackdateString != "" || + p.CAConstraint.IsCA != false || + !p.NotBefore.IsZero() || + !p.NotAfter.IsZero() || + p.NameWhitelistString != "" || + len(p.CTLogServers) != 0 { + return true + } + return false +} + +// warnSkippedSettings prints a log warning message about skipped settings +// in a SigningProfile, usually due to remote signer. +func (p *Signing) warnSkippedSettings() { + const warningMessage = `The configuration value by "usages", "issuer_urls", "ocsp_url", "crl_url", "ca_constraint", "expiry", "backdate", "not_before", "not_after", "cert_store" and "ct_log_servers" are skipped` + if p == nil { + return + } + + if (p.Default.RemoteName != "" || p.Default.AuthRemote.RemoteName != "") && p.Default.hasLocalConfig() { + log.Warning("default profile points to a remote signer: ", warningMessage) + } + + for name, profile := range p.Profiles { + if (profile.RemoteName != "" || profile.AuthRemote.RemoteName != "") && profile.hasLocalConfig() { + log.Warningf("Profiles[%s] points to a remote signer: %s", name, warningMessage) + } + } +} + +// Signing codifies the signature configuration policy for a CA. +type Signing struct { + Profiles map[string]*SigningProfile `json:"profiles"` + Default *SigningProfile `json:"default"` +} + +// Config stores configuration information for the CA. +type Config struct { + Signing *Signing `json:"signing"` + OCSP *ocspConfig.Config `json:"ocsp"` + AuthKeys map[string]AuthKey `json:"auth_keys,omitempty"` + Remotes map[string]string `json:"remotes,omitempty"` +} + +// Valid ensures that Config is a valid configuration. It should be +// called immediately after parsing a configuration file. +func (c *Config) Valid() bool { + return c.Signing.Valid() +} + +// Valid checks the signature policies, ensuring they are valid +// policies. A policy is valid if it has defined at least key usages +// to be used, and a valid default profile has defined at least a +// default expiration. +func (p *Signing) Valid() bool { + if p == nil { + return false + } + + log.Debugf("validating configuration") + if !p.Default.validProfile(true) { + log.Debugf("default profile is invalid") + return false + } + + for _, sp := range p.Profiles { + if !sp.validProfile(false) { + log.Debugf("invalid profile") + return false + } + } + + p.warnSkippedSettings() + + return true +} + +// KeyUsage contains a mapping of string names to key usages. +var KeyUsage = map[string]x509.KeyUsage{ + "signing": x509.KeyUsageDigitalSignature, + "digital signature": x509.KeyUsageDigitalSignature, + "content commitment": x509.KeyUsageContentCommitment, + "key encipherment": x509.KeyUsageKeyEncipherment, + "key agreement": x509.KeyUsageKeyAgreement, + "data encipherment": x509.KeyUsageDataEncipherment, + "cert sign": x509.KeyUsageCertSign, + "crl sign": x509.KeyUsageCRLSign, + "encipher only": x509.KeyUsageEncipherOnly, + "decipher only": x509.KeyUsageDecipherOnly, +} + +// ExtKeyUsage contains a mapping of string names to extended key +// usages. +var ExtKeyUsage = map[string]x509.ExtKeyUsage{ + "any": x509.ExtKeyUsageAny, + "server auth": x509.ExtKeyUsageServerAuth, + "client auth": x509.ExtKeyUsageClientAuth, + "code signing": x509.ExtKeyUsageCodeSigning, + "email protection": x509.ExtKeyUsageEmailProtection, + "s/mime": x509.ExtKeyUsageEmailProtection, + "ipsec end system": x509.ExtKeyUsageIPSECEndSystem, + "ipsec tunnel": x509.ExtKeyUsageIPSECTunnel, + "ipsec user": x509.ExtKeyUsageIPSECUser, + "timestamping": x509.ExtKeyUsageTimeStamping, + "ocsp signing": x509.ExtKeyUsageOCSPSigning, + "microsoft sgc": x509.ExtKeyUsageMicrosoftServerGatedCrypto, + "netscape sgc": x509.ExtKeyUsageNetscapeServerGatedCrypto, +} + +// An AuthKey contains an entry for a key used for authentication. +type AuthKey struct { + // Type contains information needed to select the appropriate + // constructor. For example, "standard" for HMAC-SHA-256, + // "standard-ip" for HMAC-SHA-256 incorporating the client's + // IP. + Type string `json:"type"` + // Key contains the key information, such as a hex-encoded + // HMAC key. + Key string `json:"key"` +} + +// DefaultConfig returns a default configuration specifying basic key +// usage and a 1 year expiration time. The key usages chosen are +// signing, key encipherment, client auth and server auth. +func DefaultConfig() *SigningProfile { + d := helpers.OneYear + return &SigningProfile{ + Usage: []string{"signing", "key encipherment", "server auth", "client auth"}, + Expiry: d, + ExpiryString: "8760h", + } +} + +// LoadFile attempts to load the configuration file stored at the path +// and returns the configuration. On error, it returns nil. +func LoadFile(path string) (*Config, error) { + log.Debugf("loading configuration file from %s", path) + if path == "" { + return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("invalid path")) + } + + body, err := ioutil.ReadFile(path) + if err != nil { + return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("could not read configuration file")) + } + + return LoadConfig(body) +} + +// LoadConfig attempts to load the configuration from a byte slice. +// On error, it returns nil. +func LoadConfig(config []byte) (*Config, error) { + var cfg = &Config{} + err := json.Unmarshal(config, &cfg) + if err != nil { + return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, + errors.New("failed to unmarshal configuration: "+err.Error())) + } + + if cfg.Signing == nil { + return nil, errors.New("No \"signing\" field present") + } + + if cfg.Signing.Default == nil { + log.Debugf("no default given: using default config") + cfg.Signing.Default = DefaultConfig() + } else { + if err := cfg.Signing.Default.populate(cfg); err != nil { + return nil, err + } + } + + for k := range cfg.Signing.Profiles { + if err := cfg.Signing.Profiles[k].populate(cfg); err != nil { + return nil, err + } + } + + if !cfg.Valid() { + return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("invalid configuration")) + } + + log.Debugf("configuration ok") + return cfg, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go new file mode 100644 index 00000000..d57daf51 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go @@ -0,0 +1,188 @@ +// Package pkcs7 implements the subset of the CMS PKCS #7 datatype that is typically +// used to package certificates and CRLs. Using openssl, every certificate converted +// to PKCS #7 format from another encoding such as PEM conforms to this implementation. +// reference: https://www.openssl.org/docs/man1.1.0/apps/crl2pkcs7.html +// +// PKCS #7 Data type, reference: https://tools.ietf.org/html/rfc2315 +// +// The full pkcs#7 cryptographic message syntax allows for cryptographic enhancements, +// for example data can be encrypted and signed and then packaged through pkcs#7 to be +// sent over a network and then verified and decrypted. It is asn1, and the type of +// PKCS #7 ContentInfo, which comprises the PKCS #7 structure, is: +// +// ContentInfo ::= SEQUENCE { +// contentType ContentType, +// content [0] EXPLICIT ANY DEFINED BY contentType OPTIONAL +// } +// +// There are 6 possible ContentTypes, data, signedData, envelopedData, +// signedAndEnvelopedData, digestedData, and encryptedData. Here signedData, Data, and encrypted +// Data are implemented, as the degenerate case of signedData without a signature is the typical +// format for transferring certificates and CRLS, and Data and encryptedData are used in PKCS #12 +// formats. +// The ContentType signedData has the form: +// +// +// signedData ::= SEQUENCE { +// version Version, +// digestAlgorithms DigestAlgorithmIdentifiers, +// contentInfo ContentInfo, +// certificates [0] IMPLICIT ExtendedCertificatesAndCertificates OPTIONAL +// crls [1] IMPLICIT CertificateRevocationLists OPTIONAL, +// signerInfos SignerInfos +// } +// +// As of yet signerInfos and digestAlgorithms are not parsed, as they are not relevant to +// this system's use of PKCS #7 data. Version is an integer type, note that PKCS #7 is +// recursive, this second layer of ContentInfo is similar ignored for our degenerate +// usage. The ExtendedCertificatesAndCertificates type consists of a sequence of choices +// between PKCS #6 extended certificates and x509 certificates. Any sequence consisting +// of any number of extended certificates is not yet supported in this implementation. +// +// The ContentType Data is simply a raw octet string and is parsed directly into a Go []byte slice. +// +// The ContentType encryptedData is the most complicated and its form can be gathered by +// the go type below. It essentially contains a raw octet string of encrypted data and an +// algorithm identifier for use in decrypting this data. +package pkcs7 + +import ( + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + + cferr "github.com/cloudflare/cfssl/errors" +) + +// Types used for asn1 Unmarshaling. + +type signedData struct { + Version int + DigestAlgorithms asn1.RawValue + ContentInfo asn1.RawValue + Certificates asn1.RawValue `asn1:"optional" asn1:"tag:0"` + Crls asn1.RawValue `asn1:"optional"` + SignerInfos asn1.RawValue +} + +type initPKCS7 struct { + Raw asn1.RawContent + ContentType asn1.ObjectIdentifier + Content asn1.RawValue `asn1:"tag:0,explicit,optional"` +} + +// Object identifier strings of the three implemented PKCS7 types. +const ( + ObjIDData = "1.2.840.113549.1.7.1" + ObjIDSignedData = "1.2.840.113549.1.7.2" + ObjIDEncryptedData = "1.2.840.113549.1.7.6" +) + +// PKCS7 represents the ASN1 PKCS #7 Content type. It contains one of three +// possible types of Content objects, as denoted by the object identifier in +// the ContentInfo field, the other two being nil. SignedData +// is the degenerate SignedData Content info without signature used +// to hold certificates and crls. Data is raw bytes, and EncryptedData +// is as defined in PKCS #7 standard. +type PKCS7 struct { + Raw asn1.RawContent + ContentInfo string + Content Content +} + +// Content implements three of the six possible PKCS7 data types. Only one is non-nil. +type Content struct { + Data []byte + SignedData SignedData + EncryptedData EncryptedData +} + +// SignedData defines the typical carrier of certificates and crls. +type SignedData struct { + Raw asn1.RawContent + Version int + Certificates []*x509.Certificate + Crl *pkix.CertificateList +} + +// Data contains raw bytes. Used as a subtype in PKCS12. +type Data struct { + Bytes []byte +} + +// EncryptedData contains encrypted data. Used as a subtype in PKCS12. +type EncryptedData struct { + Raw asn1.RawContent + Version int + EncryptedContentInfo EncryptedContentInfo +} + +// EncryptedContentInfo is a subtype of PKCS7EncryptedData. +type EncryptedContentInfo struct { + Raw asn1.RawContent + ContentType asn1.ObjectIdentifier + ContentEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedContent []byte `asn1:"tag:0,optional"` +} + +// ParsePKCS7 attempts to parse the DER encoded bytes of a +// PKCS7 structure. +func ParsePKCS7(raw []byte) (msg *PKCS7, err error) { + + var pkcs7 initPKCS7 + _, err = asn1.Unmarshal(raw, &pkcs7) + if err != nil { + return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) + } + + msg = new(PKCS7) + msg.Raw = pkcs7.Raw + msg.ContentInfo = pkcs7.ContentType.String() + switch { + case msg.ContentInfo == ObjIDData: + msg.ContentInfo = "Data" + _, err = asn1.Unmarshal(pkcs7.Content.Bytes, &msg.Content.Data) + if err != nil { + return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) + } + case msg.ContentInfo == ObjIDSignedData: + msg.ContentInfo = "SignedData" + var signedData signedData + _, err = asn1.Unmarshal(pkcs7.Content.Bytes, &signedData) + if err != nil { + return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) + } + if len(signedData.Certificates.Bytes) != 0 { + msg.Content.SignedData.Certificates, err = x509.ParseCertificates(signedData.Certificates.Bytes) + if err != nil { + return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) + } + } + if len(signedData.Crls.Bytes) != 0 { + msg.Content.SignedData.Crl, err = x509.ParseDERCRL(signedData.Crls.Bytes) + if err != nil { + return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) + } + } + msg.Content.SignedData.Version = signedData.Version + msg.Content.SignedData.Raw = pkcs7.Content.Bytes + case msg.ContentInfo == ObjIDEncryptedData: + msg.ContentInfo = "EncryptedData" + var encryptedData EncryptedData + _, err = asn1.Unmarshal(pkcs7.Content.Bytes, &encryptedData) + if err != nil { + return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) + } + if encryptedData.Version != 0 { + return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("Only support for PKCS #7 encryptedData version 0")) + } + msg.Content.EncryptedData = encryptedData + + default: + return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("Attempt to parse PKCS# 7 Content not of type data, signed data or encrypted data")) + } + + return msg, nil + +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/csr/csr.go b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/csr/csr.go new file mode 100644 index 00000000..94f05d9b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/csr/csr.go @@ -0,0 +1,430 @@ +// Package csr implements certificate requests for CFSSL. +package csr + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/pem" + "errors" + "net" + "net/mail" + "net/url" + "strings" + + cferr "github.com/cloudflare/cfssl/errors" + "github.com/cloudflare/cfssl/helpers" + "github.com/cloudflare/cfssl/log" +) + +const ( + curveP256 = 256 + curveP384 = 384 + curveP521 = 521 +) + +// A Name contains the SubjectInfo fields. +type Name struct { + C string `json:"C,omitempty" yaml:"C,omitempty"` // Country + ST string `json:"ST,omitempty" yaml:"ST,omitempty"` // State + L string `json:"L,omitempty" yaml:"L,omitempty"` // Locality + O string `json:"O,omitempty" yaml:"O,omitempty"` // OrganisationName + OU string `json:"OU,omitempty" yaml:"OU,omitempty"` // OrganisationalUnitName + SerialNumber string `json:"SerialNumber,omitempty" yaml:"SerialNumber,omitempty"` +} + +// A KeyRequest contains the algorithm and key size for a new private key. +type KeyRequest struct { + A string `json:"algo" yaml:"algo"` + S int `json:"size" yaml:"size"` +} + +// NewKeyRequest returns a default KeyRequest. +func NewKeyRequest() *KeyRequest { + return &KeyRequest{"ecdsa", curveP256} +} + +// Algo returns the requested key algorithm represented as a string. +func (kr *KeyRequest) Algo() string { + return kr.A +} + +// Size returns the requested key size. +func (kr *KeyRequest) Size() int { + return kr.S +} + +// Generate generates a key as specified in the request. Currently, +// only ECDSA and RSA are supported. +func (kr *KeyRequest) Generate() (crypto.PrivateKey, error) { + log.Debugf("generate key from request: algo=%s, size=%d", kr.Algo(), kr.Size()) + switch kr.Algo() { + case "rsa": + if kr.Size() < 2048 { + return nil, errors.New("RSA key is too weak") + } + if kr.Size() > 8192 { + return nil, errors.New("RSA key size too large") + } + return rsa.GenerateKey(rand.Reader, kr.Size()) + case "ecdsa": + var curve elliptic.Curve + switch kr.Size() { + case curveP256: + curve = elliptic.P256() + case curveP384: + curve = elliptic.P384() + case curveP521: + curve = elliptic.P521() + default: + return nil, errors.New("invalid curve") + } + return ecdsa.GenerateKey(curve, rand.Reader) + default: + return nil, errors.New("invalid algorithm") + } +} + +// SigAlgo returns an appropriate X.509 signature algorithm given the +// key request's type and size. +func (kr *KeyRequest) SigAlgo() x509.SignatureAlgorithm { + switch kr.Algo() { + case "rsa": + switch { + case kr.Size() >= 4096: + return x509.SHA512WithRSA + case kr.Size() >= 3072: + return x509.SHA384WithRSA + case kr.Size() >= 2048: + return x509.SHA256WithRSA + default: + return x509.SHA1WithRSA + } + case "ecdsa": + switch kr.Size() { + case curveP521: + return x509.ECDSAWithSHA512 + case curveP384: + return x509.ECDSAWithSHA384 + case curveP256: + return x509.ECDSAWithSHA256 + default: + return x509.ECDSAWithSHA1 + } + default: + return x509.UnknownSignatureAlgorithm + } +} + +// CAConfig is a section used in the requests initialising a new CA. +type CAConfig struct { + PathLength int `json:"pathlen" yaml:"pathlen"` + PathLenZero bool `json:"pathlenzero" yaml:"pathlenzero"` + Expiry string `json:"expiry" yaml:"expiry"` + Backdate string `json:"backdate" yaml:"backdate"` +} + +// A CertificateRequest encapsulates the API interface to the +// certificate request functionality. +type CertificateRequest struct { + CN string `json:"CN" yaml:"CN"` + Names []Name `json:"names" yaml:"names"` + Hosts []string `json:"hosts" yaml:"hosts"` + KeyRequest *KeyRequest `json:"key,omitempty" yaml:"key,omitempty"` + CA *CAConfig `json:"ca,omitempty" yaml:"ca,omitempty"` + SerialNumber string `json:"serialnumber,omitempty" yaml:"serialnumber,omitempty"` +} + +// New returns a new, empty CertificateRequest with a +// KeyRequest. +func New() *CertificateRequest { + return &CertificateRequest{ + KeyRequest: NewKeyRequest(), + } +} + +// appendIf appends to a if s is not an empty string. +func appendIf(s string, a *[]string) { + if s != "" { + *a = append(*a, s) + } +} + +// Name returns the PKIX name for the request. +func (cr *CertificateRequest) Name() pkix.Name { + var name pkix.Name + name.CommonName = cr.CN + + for _, n := range cr.Names { + appendIf(n.C, &name.Country) + appendIf(n.ST, &name.Province) + appendIf(n.L, &name.Locality) + appendIf(n.O, &name.Organization) + appendIf(n.OU, &name.OrganizationalUnit) + } + name.SerialNumber = cr.SerialNumber + return name +} + +// BasicConstraints CSR information RFC 5280, 4.2.1.9 +type BasicConstraints struct { + IsCA bool `asn1:"optional"` + MaxPathLen int `asn1:"optional,default:-1"` +} + +// ParseRequest takes a certificate request and generates a key and +// CSR from it. It does no validation -- caveat emptor. It will, +// however, fail if the key request is not valid (i.e., an unsupported +// curve or RSA key size). The lack of validation was specifically +// chosen to allow the end user to define a policy and validate the +// request appropriately before calling this function. +func ParseRequest(req *CertificateRequest) (csr, key []byte, err error) { + log.Info("received CSR") + if req.KeyRequest == nil { + req.KeyRequest = NewKeyRequest() + } + + log.Infof("generating key: %s-%d", req.KeyRequest.Algo(), req.KeyRequest.Size()) + priv, err := req.KeyRequest.Generate() + if err != nil { + err = cferr.Wrap(cferr.PrivateKeyError, cferr.GenerationFailed, err) + return + } + + switch priv := priv.(type) { + case *rsa.PrivateKey: + key = x509.MarshalPKCS1PrivateKey(priv) + block := pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: key, + } + key = pem.EncodeToMemory(&block) + case *ecdsa.PrivateKey: + key, err = x509.MarshalECPrivateKey(priv) + if err != nil { + err = cferr.Wrap(cferr.PrivateKeyError, cferr.Unknown, err) + return + } + block := pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: key, + } + key = pem.EncodeToMemory(&block) + default: + panic("Generate should have failed to produce a valid key.") + } + + csr, err = Generate(priv.(crypto.Signer), req) + if err != nil { + log.Errorf("failed to generate a CSR: %v", err) + err = cferr.Wrap(cferr.CSRError, cferr.BadRequest, err) + } + return +} + +// ExtractCertificateRequest extracts a CertificateRequest from +// x509.Certificate. It is aimed to used for generating a new certificate +// from an existing certificate. For a root certificate, the CA expiry +// length is calculated as the duration between cert.NotAfter and cert.NotBefore. +func ExtractCertificateRequest(cert *x509.Certificate) *CertificateRequest { + req := New() + req.CN = cert.Subject.CommonName + req.Names = getNames(cert.Subject) + req.Hosts = getHosts(cert) + req.SerialNumber = cert.Subject.SerialNumber + + if cert.IsCA { + req.CA = new(CAConfig) + // CA expiry length is calculated based on the input cert + // issue date and expiry date. + req.CA.Expiry = cert.NotAfter.Sub(cert.NotBefore).String() + req.CA.PathLength = cert.MaxPathLen + req.CA.PathLenZero = cert.MaxPathLenZero + } + + return req +} + +func getHosts(cert *x509.Certificate) []string { + var hosts []string + for _, ip := range cert.IPAddresses { + hosts = append(hosts, ip.String()) + } + for _, dns := range cert.DNSNames { + hosts = append(hosts, dns) + } + for _, email := range cert.EmailAddresses { + hosts = append(hosts, email) + } + for _, uri := range cert.URIs { + hosts = append(hosts, uri.String()) + } + + return hosts +} + +// getNames returns an array of Names from the certificate +// It onnly cares about Country, Organization, OrganizationalUnit, Locality, Province +func getNames(sub pkix.Name) []Name { + // anonymous func for finding the max of a list of interger + max := func(v1 int, vn ...int) (max int) { + max = v1 + for i := 0; i < len(vn); i++ { + if vn[i] > max { + max = vn[i] + } + } + return max + } + + nc := len(sub.Country) + norg := len(sub.Organization) + nou := len(sub.OrganizationalUnit) + nl := len(sub.Locality) + np := len(sub.Province) + + n := max(nc, norg, nou, nl, np) + + names := make([]Name, n) + for i := range names { + if i < nc { + names[i].C = sub.Country[i] + } + if i < norg { + names[i].O = sub.Organization[i] + } + if i < nou { + names[i].OU = sub.OrganizationalUnit[i] + } + if i < nl { + names[i].L = sub.Locality[i] + } + if i < np { + names[i].ST = sub.Province[i] + } + } + return names +} + +// A Generator is responsible for validating certificate requests. +type Generator struct { + Validator func(*CertificateRequest) error +} + +// ProcessRequest validates and processes the incoming request. It is +// a wrapper around a validator and the ParseRequest function. +func (g *Generator) ProcessRequest(req *CertificateRequest) (csr, key []byte, err error) { + + log.Info("generate received request") + err = g.Validator(req) + if err != nil { + log.Warningf("invalid request: %v", err) + return nil, nil, err + } + + csr, key, err = ParseRequest(req) + if err != nil { + return nil, nil, err + } + return +} + +// IsNameEmpty returns true if the name has no identifying information in it. +func IsNameEmpty(n Name) bool { + empty := func(s string) bool { return strings.TrimSpace(s) == "" } + + if empty(n.C) && empty(n.ST) && empty(n.L) && empty(n.O) && empty(n.OU) { + return true + } + return false +} + +// Regenerate uses the provided CSR as a template for signing a new +// CSR using priv. +func Regenerate(priv crypto.Signer, csr []byte) ([]byte, error) { + req, extra, err := helpers.ParseCSR(csr) + if err != nil { + return nil, err + } else if len(extra) > 0 { + return nil, errors.New("csr: trailing data in certificate request") + } + + return x509.CreateCertificateRequest(rand.Reader, req, priv) +} + +// Generate creates a new CSR from a CertificateRequest structure and +// an existing key. The KeyRequest field is ignored. +func Generate(priv crypto.Signer, req *CertificateRequest) (csr []byte, err error) { + sigAlgo := helpers.SignerAlgo(priv) + if sigAlgo == x509.UnknownSignatureAlgorithm { + return nil, cferr.New(cferr.PrivateKeyError, cferr.Unavailable) + } + + var tpl = x509.CertificateRequest{ + Subject: req.Name(), + SignatureAlgorithm: sigAlgo, + } + + for i := range req.Hosts { + if ip := net.ParseIP(req.Hosts[i]); ip != nil { + tpl.IPAddresses = append(tpl.IPAddresses, ip) + } else if email, err := mail.ParseAddress(req.Hosts[i]); err == nil && email != nil { + tpl.EmailAddresses = append(tpl.EmailAddresses, email.Address) + } else if uri, err := url.ParseRequestURI(req.Hosts[i]); err == nil && uri != nil { + tpl.URIs = append(tpl.URIs, uri) + } else { + tpl.DNSNames = append(tpl.DNSNames, req.Hosts[i]) + } + } + + if req.CA != nil { + err = appendCAInfoToCSR(req.CA, &tpl) + if err != nil { + err = cferr.Wrap(cferr.CSRError, cferr.GenerationFailed, err) + return + } + } + + csr, err = x509.CreateCertificateRequest(rand.Reader, &tpl, priv) + if err != nil { + log.Errorf("failed to generate a CSR: %v", err) + err = cferr.Wrap(cferr.CSRError, cferr.BadRequest, err) + return + } + block := pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: csr, + } + + log.Info("encoded CSR") + csr = pem.EncodeToMemory(&block) + return +} + +// appendCAInfoToCSR appends CAConfig BasicConstraint extension to a CSR +func appendCAInfoToCSR(reqConf *CAConfig, csr *x509.CertificateRequest) error { + pathlen := reqConf.PathLength + if pathlen == 0 && !reqConf.PathLenZero { + pathlen = -1 + } + val, err := asn1.Marshal(BasicConstraints{true, pathlen}) + + if err != nil { + return err + } + + csr.ExtraExtensions = []pkix.Extension{ + { + Id: asn1.ObjectIdentifier{2, 5, 29, 19}, + Value: val, + Critical: true, + }, + } + + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/errors/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/errors/doc.go new file mode 100644 index 00000000..1910e266 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/errors/doc.go @@ -0,0 +1,46 @@ +/* +Package errors provides error types returned in CF SSL. + +1. Type Error is intended for errors produced by CF SSL packages. +It formats to a json object that consists of an error message and a 4-digit code for error reasoning. + +Example: {"code":1002, "message": "Failed to decode certificate"} + +The index of codes are listed below: + 1XXX: CertificateError + 1000: Unknown + 1001: ReadFailed + 1002: DecodeFailed + 1003: ParseFailed + 1100: SelfSigned + 12XX: VerifyFailed + 121X: CertificateInvalid + 1210: NotAuthorizedToSign + 1211: Expired + 1212: CANotAuthorizedForThisName + 1213: TooManyIntermediates + 1214: IncompatibleUsage + 1220: UnknownAuthority + 2XXX: PrivatekeyError + 2000: Unknown + 2001: ReadFailed + 2002: DecodeFailed + 2003: ParseFailed + 2100: Encrypted + 2200: NotRSA + 2300: KeyMismatch + 2400: GenerationFailed + 2500: Unavailable + 3XXX: IntermediatesError + 4XXX: RootError + 5XXX: PolicyError + 5100: NoKeyUsages + 5200: InvalidPolicy + 5300: InvalidRequest + 5400: UnknownProfile + 6XXX: DialError + +2. Type HttpError is intended for CF SSL API to consume. It contains a HTTP status code that will be read and returned +by the API server. +*/ +package errors diff --git a/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/errors/error.go b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/errors/error.go new file mode 100644 index 00000000..9715a7cf --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/errors/error.go @@ -0,0 +1,438 @@ +package errors + +import ( + "crypto/x509" + "encoding/json" + "fmt" +) + +// Error is the error type usually returned by functions in CF SSL package. +// It contains a 4-digit error code where the most significant digit +// describes the category where the error occurred and the rest 3 digits +// describe the specific error reason. +type Error struct { + ErrorCode int `json:"code"` + Message string `json:"message"` +} + +// Category is the most significant digit of the error code. +type Category int + +// Reason is the last 3 digits of the error code. +type Reason int + +const ( + // Success indicates no error occurred. + Success Category = 1000 * iota // 0XXX + + // CertificateError indicates a fault in a certificate. + CertificateError // 1XXX + + // PrivateKeyError indicates a fault in a private key. + PrivateKeyError // 2XXX + + // IntermediatesError indicates a fault in an intermediate. + IntermediatesError // 3XXX + + // RootError indicates a fault in a root. + RootError // 4XXX + + // PolicyError indicates an error arising from a malformed or + // non-existent policy, or a breach of policy. + PolicyError // 5XXX + + // DialError indicates a network fault. + DialError // 6XXX + + // APIClientError indicates a problem with the API client. + APIClientError // 7XXX + + // OCSPError indicates a problem with OCSP signing + OCSPError // 8XXX + + // CSRError indicates a problem with CSR parsing + CSRError // 9XXX + + // CTError indicates a problem with the certificate transparency process + CTError // 10XXX + + // CertStoreError indicates a problem with the certificate store + CertStoreError // 11XXX +) + +// None is a non-specified error. +const ( + None Reason = iota +) + +// Warning code for a success +const ( + BundleExpiringBit int = 1 << iota // 0x01 + BundleNotUbiquitousBit // 0x02 +) + +// Parsing errors +const ( + Unknown Reason = iota // X000 + ReadFailed // X001 + DecodeFailed // X002 + ParseFailed // X003 +) + +// The following represent certificate non-parsing errors, and must be +// specified along with CertificateError. +const ( + // SelfSigned indicates that a certificate is self-signed and + // cannot be used in the manner being attempted. + SelfSigned Reason = 100 * (iota + 1) // Code 11XX + + // VerifyFailed is an X.509 verification failure. The least two + // significant digits of 12XX is determined as the actual x509 + // error is examined. + VerifyFailed // Code 12XX + + // BadRequest indicates that the certificate request is invalid. + BadRequest // Code 13XX + + // MissingSerial indicates that the profile specified + // 'ClientProvidesSerialNumbers', but the SignRequest did not include a serial + // number. + MissingSerial // Code 14XX +) + +const ( + certificateInvalid = 10 * (iota + 1) //121X + unknownAuthority //122x +) + +// The following represent private-key non-parsing errors, and must be +// specified with PrivateKeyError. +const ( + // Encrypted indicates that the private key is a PKCS #8 encrypted + // private key. At this time, CFSSL does not support decrypting + // these keys. + Encrypted Reason = 100 * (iota + 1) //21XX + + // NotRSAOrECC indicates that they key is not an RSA or ECC + // private key; these are the only two private key types supported + // at this time by CFSSL. + NotRSAOrECC //22XX + + // KeyMismatch indicates that the private key does not match + // the public key or certificate being presented with the key. + KeyMismatch //23XX + + // GenerationFailed indicates that a private key could not + // be generated. + GenerationFailed //24XX + + // Unavailable indicates that a private key mechanism (such as + // PKCS #11) was requested but support for that mechanism is + // not available. + Unavailable +) + +// The following are policy-related non-parsing errors, and must be +// specified along with PolicyError. +const ( + // NoKeyUsages indicates that the profile does not permit any + // key usages for the certificate. + NoKeyUsages Reason = 100 * (iota + 1) // 51XX + + // InvalidPolicy indicates that policy being requested is not + // a valid policy or does not exist. + InvalidPolicy // 52XX + + // InvalidRequest indicates a certificate request violated the + // constraints of the policy being applied to the request. + InvalidRequest // 53XX + + // UnknownProfile indicates that the profile does not exist. + UnknownProfile // 54XX + + UnmatchedWhitelist // 55xx +) + +// The following are API client related errors, and should be +// specified with APIClientError. +const ( + // AuthenticationFailure occurs when the client is unable + // to obtain an authentication token for the request. + AuthenticationFailure Reason = 100 * (iota + 1) + + // JSONError wraps an encoding/json error. + JSONError + + // IOError wraps an io/ioutil error. + IOError + + // ClientHTTPError wraps a net/http error. + ClientHTTPError + + // ServerRequestFailed covers any other failures from the API + // client. + ServerRequestFailed +) + +// The following are OCSP related errors, and should be +// specified with OCSPError +const ( + // IssuerMismatch ocurs when the certificate in the OCSP signing + // request was not issued by the CA that this responder responds for. + IssuerMismatch Reason = 100 * (iota + 1) // 81XX + + // InvalidStatus occurs when the OCSP signing requests includes an + // invalid value for the certificate status. + InvalidStatus +) + +// Certificate transparency related errors specified with CTError +const ( + // PrecertSubmissionFailed occurs when submitting a precertificate to + // a log server fails + PrecertSubmissionFailed = 100 * (iota + 1) + // CTClientConstructionFailed occurs when the construction of a new + // github.com/google/certificate-transparency client fails. + CTClientConstructionFailed + // PrecertMissingPoison occurs when a precert is passed to SignFromPrecert + // and is missing the CT poison extension. + PrecertMissingPoison + // PrecertInvalidPoison occurs when a precert is passed to SignFromPrecert + // and has a invalid CT poison extension value or the extension is not + // critical. + PrecertInvalidPoison +) + +// Certificate persistence related errors specified with CertStoreError +const ( + // InsertionFailed occurs when a SQL insert query failes to complete. + InsertionFailed = 100 * (iota + 1) + // RecordNotFound occurs when a SQL query targeting on one unique + // record failes to update the specified row in the table. + RecordNotFound +) + +// The error interface implementation, which formats to a JSON object string. +func (e *Error) Error() string { + marshaled, err := json.Marshal(e) + if err != nil { + panic(err) + } + return string(marshaled) + +} + +// New returns an error that contains an error code and message derived from +// the given category, reason. Currently, to avoid confusion, it is not +// allowed to create an error of category Success +func New(category Category, reason Reason) *Error { + errorCode := int(category) + int(reason) + var msg string + switch category { + case OCSPError: + switch reason { + case ReadFailed: + msg = "No certificate provided" + case IssuerMismatch: + msg = "Certificate not issued by this issuer" + case InvalidStatus: + msg = "Invalid revocation status" + } + case CertificateError: + switch reason { + case Unknown: + msg = "Unknown certificate error" + case ReadFailed: + msg = "Failed to read certificate" + case DecodeFailed: + msg = "Failed to decode certificate" + case ParseFailed: + msg = "Failed to parse certificate" + case SelfSigned: + msg = "Certificate is self signed" + case VerifyFailed: + msg = "Unable to verify certificate" + case BadRequest: + msg = "Invalid certificate request" + case MissingSerial: + msg = "Missing serial number in request" + default: + panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category CertificateError.", + reason)) + + } + case PrivateKeyError: + switch reason { + case Unknown: + msg = "Unknown private key error" + case ReadFailed: + msg = "Failed to read private key" + case DecodeFailed: + msg = "Failed to decode private key" + case ParseFailed: + msg = "Failed to parse private key" + case Encrypted: + msg = "Private key is encrypted." + case NotRSAOrECC: + msg = "Private key algorithm is not RSA or ECC" + case KeyMismatch: + msg = "Private key does not match public key" + case GenerationFailed: + msg = "Failed to new private key" + case Unavailable: + msg = "Private key is unavailable" + default: + panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category PrivateKeyError.", + reason)) + } + case IntermediatesError: + switch reason { + case Unknown: + msg = "Unknown intermediate certificate error" + case ReadFailed: + msg = "Failed to read intermediate certificate" + case DecodeFailed: + msg = "Failed to decode intermediate certificate" + case ParseFailed: + msg = "Failed to parse intermediate certificate" + default: + panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category IntermediatesError.", + reason)) + } + case RootError: + switch reason { + case Unknown: + msg = "Unknown root certificate error" + case ReadFailed: + msg = "Failed to read root certificate" + case DecodeFailed: + msg = "Failed to decode root certificate" + case ParseFailed: + msg = "Failed to parse root certificate" + default: + panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category RootError.", + reason)) + } + case PolicyError: + switch reason { + case Unknown: + msg = "Unknown policy error" + case NoKeyUsages: + msg = "Invalid policy: no key usage available" + case InvalidPolicy: + msg = "Invalid or unknown policy" + case InvalidRequest: + msg = "Policy violation request" + case UnknownProfile: + msg = "Unknown policy profile" + case UnmatchedWhitelist: + msg = "Request does not match policy whitelist" + default: + panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category PolicyError.", + reason)) + } + case DialError: + switch reason { + case Unknown: + msg = "Failed to dial remote server" + default: + panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category DialError.", + reason)) + } + case APIClientError: + switch reason { + case AuthenticationFailure: + msg = "API client authentication failure" + case JSONError: + msg = "API client JSON config error" + case ClientHTTPError: + msg = "API client HTTP error" + case IOError: + msg = "API client IO error" + case ServerRequestFailed: + msg = "API client error: Server request failed" + default: + panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category APIClientError.", + reason)) + } + case CSRError: + switch reason { + case Unknown: + msg = "CSR parsing failed due to unknown error" + case ReadFailed: + msg = "CSR file read failed" + case ParseFailed: + msg = "CSR Parsing failed" + case DecodeFailed: + msg = "CSR Decode failed" + case BadRequest: + msg = "CSR Bad request" + default: + panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category APIClientError.", reason)) + } + case CTError: + switch reason { + case Unknown: + msg = "Certificate transparency parsing failed due to unknown error" + case PrecertSubmissionFailed: + msg = "Certificate transparency precertificate submission failed" + case PrecertMissingPoison: + msg = "Precertificate is missing CT poison extension" + case PrecertInvalidPoison: + msg = "Precertificate contains an invalid CT poison extension" + default: + panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category CTError.", reason)) + } + case CertStoreError: + switch reason { + case Unknown: + msg = "Certificate store action failed due to unknown error" + default: + panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category CertStoreError.", reason)) + } + + default: + panic(fmt.Sprintf("Unsupported CFSSL error type: %d.", + category)) + } + return &Error{ErrorCode: errorCode, Message: msg} +} + +// Wrap returns an error that contains the given error and an error code derived from +// the given category, reason and the error. Currently, to avoid confusion, it is not +// allowed to create an error of category Success +func Wrap(category Category, reason Reason, err error) *Error { + errorCode := int(category) + int(reason) + if err == nil { + panic("Wrap needs a supplied error to initialize.") + } + + // do not double wrap a error + switch err.(type) { + case *Error: + panic("Unable to wrap a wrapped error.") + } + + switch category { + case CertificateError: + // given VerifyFailed , report the status with more detailed status code + // for some certificate errors we care. + if reason == VerifyFailed { + switch errorType := err.(type) { + case x509.CertificateInvalidError: + errorCode += certificateInvalid + int(errorType.Reason) + case x509.UnknownAuthorityError: + errorCode += unknownAuthority + } + } + case PrivateKeyError, IntermediatesError, RootError, PolicyError, DialError, + APIClientError, CSRError, CTError, CertStoreError, OCSPError: + // no-op, just use the error + default: + panic(fmt.Sprintf("Unsupported CFSSL error type: %d.", + category)) + } + + return &Error{ErrorCode: errorCode, Message: err.Error()} + +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/errors/http.go b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/errors/http.go new file mode 100644 index 00000000..c9c0a39c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/errors/http.go @@ -0,0 +1,47 @@ +package errors + +import ( + "errors" + "net/http" +) + +// HTTPError is an augmented error with a HTTP status code. +type HTTPError struct { + StatusCode int + error +} + +// Error implements the error interface. +func (e *HTTPError) Error() string { + return e.error.Error() +} + +// NewMethodNotAllowed returns an appropriate error in the case that +// an HTTP client uses an invalid method (i.e. a GET in place of a POST) +// on an API endpoint. +func NewMethodNotAllowed(method string) *HTTPError { + return &HTTPError{http.StatusMethodNotAllowed, errors.New(`Method is not allowed:"` + method + `"`)} +} + +// NewBadRequest creates a HttpError with the given error and error code 400. +func NewBadRequest(err error) *HTTPError { + return &HTTPError{http.StatusBadRequest, err} +} + +// NewBadRequestString returns a HttpError with the supplied message +// and error code 400. +func NewBadRequestString(s string) *HTTPError { + return NewBadRequest(errors.New(s)) +} + +// NewBadRequestMissingParameter returns a 400 HttpError as a required +// parameter is missing in the HTTP request. +func NewBadRequestMissingParameter(s string) *HTTPError { + return NewBadRequestString(`Missing parameter "` + s + `"`) +} + +// NewBadRequestUnwantedParameter returns a 400 HttpError as a unnecessary +// parameter is present in the HTTP request. +func NewBadRequestUnwantedParameter(s string) *HTTPError { + return NewBadRequestString(`Unwanted parameter "` + s + `"`) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go new file mode 100644 index 00000000..812664ce --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go @@ -0,0 +1,48 @@ +// Package derhelpers implements common functionality +// on DER encoded data +package derhelpers + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + + cferr "github.com/cloudflare/cfssl/errors" + "golang.org/x/crypto/ed25519" +) + +// ParsePrivateKeyDER parses a PKCS #1, PKCS #8, ECDSA, or Ed25519 DER-encoded +// private key. The key must not be in PEM format. +func ParsePrivateKeyDER(keyDER []byte) (key crypto.Signer, err error) { + generalKey, err := x509.ParsePKCS8PrivateKey(keyDER) + if err != nil { + generalKey, err = x509.ParsePKCS1PrivateKey(keyDER) + if err != nil { + generalKey, err = x509.ParseECPrivateKey(keyDER) + if err != nil { + generalKey, err = ParseEd25519PrivateKey(keyDER) + if err != nil { + // We don't include the actual error into + // the final error. The reason might be + // we don't want to leak any info about + // the private key. + return nil, cferr.New(cferr.PrivateKeyError, + cferr.ParseFailed) + } + } + } + } + + switch generalKey.(type) { + case *rsa.PrivateKey: + return generalKey.(*rsa.PrivateKey), nil + case *ecdsa.PrivateKey: + return generalKey.(*ecdsa.PrivateKey), nil + case ed25519.PrivateKey: + return generalKey.(ed25519.PrivateKey), nil + } + + // should never reach here + return nil, cferr.New(cferr.PrivateKeyError, cferr.ParseFailed) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/helpers/derhelpers/ed25519.go b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/helpers/derhelpers/ed25519.go new file mode 100644 index 00000000..9220f3e5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/helpers/derhelpers/ed25519.go @@ -0,0 +1,133 @@ +package derhelpers + +import ( + "crypto" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + + "golang.org/x/crypto/ed25519" +) + +var errEd25519WrongID = errors.New("incorrect object identifier") +var errEd25519WrongKeyType = errors.New("incorrect key type") + +// ed25519OID is the OID for the Ed25519 signature scheme: see +// https://datatracker.ietf.org/doc/draft-ietf-curdle-pkix-04. +var ed25519OID = asn1.ObjectIdentifier{1, 3, 101, 112} + +// subjectPublicKeyInfo reflects the ASN.1 object defined in the X.509 standard. +// +// This is defined in crypto/x509 as "publicKeyInfo". +type subjectPublicKeyInfo struct { + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString +} + +// MarshalEd25519PublicKey creates a DER-encoded SubjectPublicKeyInfo for an +// ed25519 public key, as defined in +// https://tools.ietf.org/html/draft-ietf-curdle-pkix-04. This is analagous to +// MarshalPKIXPublicKey in crypto/x509, which doesn't currently support Ed25519. +func MarshalEd25519PublicKey(pk crypto.PublicKey) ([]byte, error) { + pub, ok := pk.(ed25519.PublicKey) + if !ok { + return nil, errEd25519WrongKeyType + } + + spki := subjectPublicKeyInfo{ + Algorithm: pkix.AlgorithmIdentifier{ + Algorithm: ed25519OID, + }, + PublicKey: asn1.BitString{ + BitLength: len(pub) * 8, + Bytes: pub, + }, + } + + return asn1.Marshal(spki) +} + +// ParseEd25519PublicKey returns the Ed25519 public key encoded by the input. +func ParseEd25519PublicKey(der []byte) (crypto.PublicKey, error) { + var spki subjectPublicKeyInfo + if rest, err := asn1.Unmarshal(der, &spki); err != nil { + return nil, err + } else if len(rest) > 0 { + return nil, errors.New("SubjectPublicKeyInfo too long") + } + + if !spki.Algorithm.Algorithm.Equal(ed25519OID) { + return nil, errEd25519WrongID + } + + if spki.PublicKey.BitLength != ed25519.PublicKeySize*8 { + return nil, errors.New("SubjectPublicKeyInfo PublicKey length mismatch") + } + + return ed25519.PublicKey(spki.PublicKey.Bytes), nil +} + +// oneAsymmetricKey reflects the ASN.1 structure for storing private keys in +// https://tools.ietf.org/html/draft-ietf-curdle-pkix-04, excluding the optional +// fields, which we don't use here. +// +// This is identical to pkcs8 in crypto/x509. +type oneAsymmetricKey struct { + Version int + Algorithm pkix.AlgorithmIdentifier + PrivateKey []byte +} + +// curvePrivateKey is the innter type of the PrivateKey field of +// oneAsymmetricKey. +type curvePrivateKey []byte + +// MarshalEd25519PrivateKey returns a DER encdoing of the input private key as +// specified in https://tools.ietf.org/html/draft-ietf-curdle-pkix-04. +func MarshalEd25519PrivateKey(sk crypto.PrivateKey) ([]byte, error) { + priv, ok := sk.(ed25519.PrivateKey) + if !ok { + return nil, errEd25519WrongKeyType + } + + // Marshal the innter CurvePrivateKey. + curvePrivateKey, err := asn1.Marshal(priv.Seed()) + if err != nil { + return nil, err + } + + // Marshal the OneAsymmetricKey. + asym := oneAsymmetricKey{ + Version: 0, + Algorithm: pkix.AlgorithmIdentifier{ + Algorithm: ed25519OID, + }, + PrivateKey: curvePrivateKey, + } + return asn1.Marshal(asym) +} + +// ParseEd25519PrivateKey returns the Ed25519 private key encoded by the input. +func ParseEd25519PrivateKey(der []byte) (crypto.PrivateKey, error) { + asym := new(oneAsymmetricKey) + if rest, err := asn1.Unmarshal(der, asym); err != nil { + return nil, err + } else if len(rest) > 0 { + return nil, errors.New("OneAsymmetricKey too long") + } + + // Check that the key type is correct. + if !asym.Algorithm.Algorithm.Equal(ed25519OID) { + return nil, errEd25519WrongID + } + + // Unmarshal the inner CurvePrivateKey. + seed := new(curvePrivateKey) + if rest, err := asn1.Unmarshal(asym.PrivateKey, seed); err != nil { + return nil, err + } else if len(rest) > 0 { + return nil, errors.New("CurvePrivateKey too long") + } + + return ed25519.NewKeyFromSeed(*seed), nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/helpers/helpers.go b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/helpers/helpers.go new file mode 100644 index 00000000..fdc2440d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/helpers/helpers.go @@ -0,0 +1,590 @@ +// Package helpers implements utility functionality common to many +// CFSSL packages. +package helpers + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/pem" + "errors" + "fmt" + "io/ioutil" + "os" + + "github.com/google/certificate-transparency-go" + cttls "github.com/google/certificate-transparency-go/tls" + ctx509 "github.com/google/certificate-transparency-go/x509" + "golang.org/x/crypto/ocsp" + + "strings" + "time" + + "github.com/cloudflare/cfssl/crypto/pkcs7" + cferr "github.com/cloudflare/cfssl/errors" + "github.com/cloudflare/cfssl/helpers/derhelpers" + "github.com/cloudflare/cfssl/log" + "golang.org/x/crypto/pkcs12" +) + +// OneYear is a time.Duration representing a year's worth of seconds. +const OneYear = 8760 * time.Hour + +// OneDay is a time.Duration representing a day's worth of seconds. +const OneDay = 24 * time.Hour + +// InclusiveDate returns the time.Time representation of a date - 1 +// nanosecond. This allows time.After to be used inclusively. +func InclusiveDate(year int, month time.Month, day int) time.Time { + return time.Date(year, month, day, 0, 0, 0, 0, time.UTC).Add(-1 * time.Nanosecond) +} + +// Jul2012 is the July 2012 CAB Forum deadline for when CAs must stop +// issuing certificates valid for more than 5 years. +var Jul2012 = InclusiveDate(2012, time.July, 01) + +// Apr2015 is the April 2015 CAB Forum deadline for when CAs must stop +// issuing certificates valid for more than 39 months. +var Apr2015 = InclusiveDate(2015, time.April, 01) + +// KeyLength returns the bit size of ECDSA or RSA PublicKey +func KeyLength(key interface{}) int { + if key == nil { + return 0 + } + if ecdsaKey, ok := key.(*ecdsa.PublicKey); ok { + return ecdsaKey.Curve.Params().BitSize + } else if rsaKey, ok := key.(*rsa.PublicKey); ok { + return rsaKey.N.BitLen() + } + + return 0 +} + +// ExpiryTime returns the time when the certificate chain is expired. +func ExpiryTime(chain []*x509.Certificate) (notAfter time.Time) { + if len(chain) == 0 { + return + } + + notAfter = chain[0].NotAfter + for _, cert := range chain { + if notAfter.After(cert.NotAfter) { + notAfter = cert.NotAfter + } + } + return +} + +// MonthsValid returns the number of months for which a certificate is valid. +func MonthsValid(c *x509.Certificate) int { + issued := c.NotBefore + expiry := c.NotAfter + years := (expiry.Year() - issued.Year()) + months := years*12 + int(expiry.Month()) - int(issued.Month()) + + // Round up if valid for less than a full month + if expiry.Day() > issued.Day() { + months++ + } + return months +} + +// ValidExpiry determines if a certificate is valid for an acceptable +// length of time per the CA/Browser Forum baseline requirements. +// See https://cabforum.org/wp-content/uploads/CAB-Forum-BR-1.3.0.pdf +func ValidExpiry(c *x509.Certificate) bool { + issued := c.NotBefore + + var maxMonths int + switch { + case issued.After(Apr2015): + maxMonths = 39 + case issued.After(Jul2012): + maxMonths = 60 + case issued.Before(Jul2012): + maxMonths = 120 + } + + if MonthsValid(c) > maxMonths { + return false + } + return true +} + +// SignatureString returns the TLS signature string corresponding to +// an X509 signature algorithm. +func SignatureString(alg x509.SignatureAlgorithm) string { + switch alg { + case x509.MD2WithRSA: + return "MD2WithRSA" + case x509.MD5WithRSA: + return "MD5WithRSA" + case x509.SHA1WithRSA: + return "SHA1WithRSA" + case x509.SHA256WithRSA: + return "SHA256WithRSA" + case x509.SHA384WithRSA: + return "SHA384WithRSA" + case x509.SHA512WithRSA: + return "SHA512WithRSA" + case x509.DSAWithSHA1: + return "DSAWithSHA1" + case x509.DSAWithSHA256: + return "DSAWithSHA256" + case x509.ECDSAWithSHA1: + return "ECDSAWithSHA1" + case x509.ECDSAWithSHA256: + return "ECDSAWithSHA256" + case x509.ECDSAWithSHA384: + return "ECDSAWithSHA384" + case x509.ECDSAWithSHA512: + return "ECDSAWithSHA512" + default: + return "Unknown Signature" + } +} + +// HashAlgoString returns the hash algorithm name contains in the signature +// method. +func HashAlgoString(alg x509.SignatureAlgorithm) string { + switch alg { + case x509.MD2WithRSA: + return "MD2" + case x509.MD5WithRSA: + return "MD5" + case x509.SHA1WithRSA: + return "SHA1" + case x509.SHA256WithRSA: + return "SHA256" + case x509.SHA384WithRSA: + return "SHA384" + case x509.SHA512WithRSA: + return "SHA512" + case x509.DSAWithSHA1: + return "SHA1" + case x509.DSAWithSHA256: + return "SHA256" + case x509.ECDSAWithSHA1: + return "SHA1" + case x509.ECDSAWithSHA256: + return "SHA256" + case x509.ECDSAWithSHA384: + return "SHA384" + case x509.ECDSAWithSHA512: + return "SHA512" + default: + return "Unknown Hash Algorithm" + } +} + +// StringTLSVersion returns underlying enum values from human names for TLS +// versions, defaults to current golang default of TLS 1.0 +func StringTLSVersion(version string) uint16 { + switch version { + case "1.2": + return tls.VersionTLS12 + case "1.1": + return tls.VersionTLS11 + default: + return tls.VersionTLS10 + } +} + +// EncodeCertificatesPEM encodes a number of x509 certificates to PEM +func EncodeCertificatesPEM(certs []*x509.Certificate) []byte { + var buffer bytes.Buffer + for _, cert := range certs { + pem.Encode(&buffer, &pem.Block{ + Type: "CERTIFICATE", + Bytes: cert.Raw, + }) + } + + return buffer.Bytes() +} + +// EncodeCertificatePEM encodes a single x509 certificates to PEM +func EncodeCertificatePEM(cert *x509.Certificate) []byte { + return EncodeCertificatesPEM([]*x509.Certificate{cert}) +} + +// ParseCertificatesPEM parses a sequence of PEM-encoded certificate and returns them, +// can handle PEM encoded PKCS #7 structures. +func ParseCertificatesPEM(certsPEM []byte) ([]*x509.Certificate, error) { + var certs []*x509.Certificate + var err error + certsPEM = bytes.TrimSpace(certsPEM) + for len(certsPEM) > 0 { + var cert []*x509.Certificate + cert, certsPEM, err = ParseOneCertificateFromPEM(certsPEM) + if err != nil { + + return nil, cferr.New(cferr.CertificateError, cferr.ParseFailed) + } else if cert == nil { + break + } + + certs = append(certs, cert...) + } + if len(certsPEM) > 0 { + return nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed) + } + return certs, nil +} + +// ParseCertificatesDER parses a DER encoding of a certificate object and possibly private key, +// either PKCS #7, PKCS #12, or raw x509. +func ParseCertificatesDER(certsDER []byte, password string) (certs []*x509.Certificate, key crypto.Signer, err error) { + certsDER = bytes.TrimSpace(certsDER) + pkcs7data, err := pkcs7.ParsePKCS7(certsDER) + if err != nil { + var pkcs12data interface{} + certs = make([]*x509.Certificate, 1) + pkcs12data, certs[0], err = pkcs12.Decode(certsDER, password) + if err != nil { + certs, err = x509.ParseCertificates(certsDER) + if err != nil { + return nil, nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed) + } + } else { + key = pkcs12data.(crypto.Signer) + } + } else { + if pkcs7data.ContentInfo != "SignedData" { + return nil, nil, cferr.Wrap(cferr.CertificateError, cferr.DecodeFailed, errors.New("can only extract certificates from signed data content info")) + } + certs = pkcs7data.Content.SignedData.Certificates + } + if certs == nil { + return nil, key, cferr.New(cferr.CertificateError, cferr.DecodeFailed) + } + return certs, key, nil +} + +// ParseSelfSignedCertificatePEM parses a PEM-encoded certificate and check if it is self-signed. +func ParseSelfSignedCertificatePEM(certPEM []byte) (*x509.Certificate, error) { + cert, err := ParseCertificatePEM(certPEM) + if err != nil { + return nil, err + } + + if err := cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature); err != nil { + return nil, cferr.Wrap(cferr.CertificateError, cferr.VerifyFailed, err) + } + return cert, nil +} + +// ParseCertificatePEM parses and returns a PEM-encoded certificate, +// can handle PEM encoded PKCS #7 structures. +func ParseCertificatePEM(certPEM []byte) (*x509.Certificate, error) { + certPEM = bytes.TrimSpace(certPEM) + cert, rest, err := ParseOneCertificateFromPEM(certPEM) + if err != nil { + // Log the actual parsing error but throw a default parse error message. + log.Debugf("Certificate parsing error: %v", err) + return nil, cferr.New(cferr.CertificateError, cferr.ParseFailed) + } else if cert == nil { + return nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed) + } else if len(rest) > 0 { + return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("the PEM file should contain only one object")) + } else if len(cert) > 1 { + return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("the PKCS7 object in the PEM file should contain only one certificate")) + } + return cert[0], nil +} + +// ParseOneCertificateFromPEM attempts to parse one PEM encoded certificate object, +// either a raw x509 certificate or a PKCS #7 structure possibly containing +// multiple certificates, from the top of certsPEM, which itself may +// contain multiple PEM encoded certificate objects. +func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, error) { + + block, rest := pem.Decode(certsPEM) + if block == nil { + return nil, rest, nil + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + pkcs7data, err := pkcs7.ParsePKCS7(block.Bytes) + if err != nil { + return nil, rest, err + } + if pkcs7data.ContentInfo != "SignedData" { + return nil, rest, errors.New("only PKCS #7 Signed Data Content Info supported for certificate parsing") + } + certs := pkcs7data.Content.SignedData.Certificates + if certs == nil { + return nil, rest, errors.New("PKCS #7 structure contains no certificates") + } + return certs, rest, nil + } + var certs = []*x509.Certificate{cert} + return certs, rest, nil +} + +// LoadPEMCertPool loads a pool of PEM certificates from file. +func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) { + if certsFile == "" { + return nil, nil + } + pemCerts, err := ioutil.ReadFile(certsFile) + if err != nil { + return nil, err + } + + return PEMToCertPool(pemCerts) +} + +// PEMToCertPool concerts PEM certificates to a CertPool. +func PEMToCertPool(pemCerts []byte) (*x509.CertPool, error) { + if len(pemCerts) == 0 { + return nil, nil + } + + certPool := x509.NewCertPool() + if !certPool.AppendCertsFromPEM(pemCerts) { + return nil, errors.New("failed to load cert pool") + } + + return certPool, nil +} + +// ParsePrivateKeyPEM parses and returns a PEM-encoded private +// key. The private key may be either an unencrypted PKCS#8, PKCS#1, +// or elliptic private key. +func ParsePrivateKeyPEM(keyPEM []byte) (key crypto.Signer, err error) { + return ParsePrivateKeyPEMWithPassword(keyPEM, nil) +} + +// ParsePrivateKeyPEMWithPassword parses and returns a PEM-encoded private +// key. The private key may be a potentially encrypted PKCS#8, PKCS#1, +// or elliptic private key. +func ParsePrivateKeyPEMWithPassword(keyPEM []byte, password []byte) (key crypto.Signer, err error) { + keyDER, err := GetKeyDERFromPEM(keyPEM, password) + if err != nil { + return nil, err + } + + return derhelpers.ParsePrivateKeyDER(keyDER) +} + +// GetKeyDERFromPEM parses a PEM-encoded private key and returns DER-format key bytes. +func GetKeyDERFromPEM(in []byte, password []byte) ([]byte, error) { + keyDER, _ := pem.Decode(in) + if keyDER != nil { + if procType, ok := keyDER.Headers["Proc-Type"]; ok { + if strings.Contains(procType, "ENCRYPTED") { + if password != nil { + return x509.DecryptPEMBlock(keyDER, password) + } + return nil, cferr.New(cferr.PrivateKeyError, cferr.Encrypted) + } + } + return keyDER.Bytes, nil + } + + return nil, cferr.New(cferr.PrivateKeyError, cferr.DecodeFailed) +} + +// ParseCSR parses a PEM- or DER-encoded PKCS #10 certificate signing request. +func ParseCSR(in []byte) (csr *x509.CertificateRequest, rest []byte, err error) { + in = bytes.TrimSpace(in) + p, rest := pem.Decode(in) + if p != nil { + if p.Type != "NEW CERTIFICATE REQUEST" && p.Type != "CERTIFICATE REQUEST" { + return nil, rest, cferr.New(cferr.CSRError, cferr.BadRequest) + } + + csr, err = x509.ParseCertificateRequest(p.Bytes) + } else { + csr, err = x509.ParseCertificateRequest(in) + } + + if err != nil { + return nil, rest, err + } + + err = csr.CheckSignature() + if err != nil { + return nil, rest, err + } + + return csr, rest, nil +} + +// ParseCSRPEM parses a PEM-encoded certificate signing request. +// It does not check the signature. This is useful for dumping data from a CSR +// locally. +func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) { + block, _ := pem.Decode([]byte(csrPEM)) + if block == nil { + return nil, cferr.New(cferr.CSRError, cferr.DecodeFailed) + } + csrObject, err := x509.ParseCertificateRequest(block.Bytes) + + if err != nil { + return nil, err + } + + return csrObject, nil +} + +// SignerAlgo returns an X.509 signature algorithm from a crypto.Signer. +func SignerAlgo(priv crypto.Signer) x509.SignatureAlgorithm { + switch pub := priv.Public().(type) { + case *rsa.PublicKey: + bitLength := pub.N.BitLen() + switch { + case bitLength >= 4096: + return x509.SHA512WithRSA + case bitLength >= 3072: + return x509.SHA384WithRSA + case bitLength >= 2048: + return x509.SHA256WithRSA + default: + return x509.SHA1WithRSA + } + case *ecdsa.PublicKey: + switch pub.Curve { + case elliptic.P521(): + return x509.ECDSAWithSHA512 + case elliptic.P384(): + return x509.ECDSAWithSHA384 + case elliptic.P256(): + return x509.ECDSAWithSHA256 + default: + return x509.ECDSAWithSHA1 + } + default: + return x509.UnknownSignatureAlgorithm + } +} + +// LoadClientCertificate load key/certificate from pem files +func LoadClientCertificate(certFile string, keyFile string) (*tls.Certificate, error) { + if certFile != "" && keyFile != "" { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + log.Criticalf("Unable to read client certificate from file: %s or key from file: %s", certFile, keyFile) + return nil, err + } + log.Debug("Client certificate loaded ") + return &cert, nil + } + return nil, nil +} + +// CreateTLSConfig creates a tls.Config object from certs and roots +func CreateTLSConfig(remoteCAs *x509.CertPool, cert *tls.Certificate) *tls.Config { + var certs []tls.Certificate + if cert != nil { + certs = []tls.Certificate{*cert} + } + return &tls.Config{ + Certificates: certs, + RootCAs: remoteCAs, + } +} + +// SerializeSCTList serializes a list of SCTs. +func SerializeSCTList(sctList []ct.SignedCertificateTimestamp) ([]byte, error) { + list := ctx509.SignedCertificateTimestampList{} + for _, sct := range sctList { + sctBytes, err := cttls.Marshal(sct) + if err != nil { + return nil, err + } + list.SCTList = append(list.SCTList, ctx509.SerializedSCT{Val: sctBytes}) + } + return cttls.Marshal(list) +} + +// DeserializeSCTList deserializes a list of SCTs. +func DeserializeSCTList(serializedSCTList []byte) ([]ct.SignedCertificateTimestamp, error) { + var sctList ctx509.SignedCertificateTimestampList + rest, err := cttls.Unmarshal(serializedSCTList, &sctList) + if err != nil { + return nil, err + } + if len(rest) != 0 { + return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, errors.New("serialized SCT list contained trailing garbage")) + } + list := make([]ct.SignedCertificateTimestamp, len(sctList.SCTList)) + for i, serializedSCT := range sctList.SCTList { + var sct ct.SignedCertificateTimestamp + rest, err := cttls.Unmarshal(serializedSCT.Val, &sct) + if err != nil { + return nil, err + } + if len(rest) != 0 { + return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, errors.New("serialized SCT contained trailing garbage")) + } + list[i] = sct + } + return list, nil +} + +// SCTListFromOCSPResponse extracts the SCTList from an ocsp.Response, +// returning an empty list if the SCT extension was not found or could not be +// unmarshalled. +func SCTListFromOCSPResponse(response *ocsp.Response) ([]ct.SignedCertificateTimestamp, error) { + // This loop finds the SCTListExtension in the OCSP response. + var SCTListExtension, ext pkix.Extension + for _, ext = range response.Extensions { + // sctExtOid is the ObjectIdentifier of a Signed Certificate Timestamp. + sctExtOid := asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 5} + if ext.Id.Equal(sctExtOid) { + SCTListExtension = ext + break + } + } + + // This code block extracts the sctList from the SCT extension. + var sctList []ct.SignedCertificateTimestamp + var err error + if numBytes := len(SCTListExtension.Value); numBytes != 0 { + var serializedSCTList []byte + rest := make([]byte, numBytes) + copy(rest, SCTListExtension.Value) + for len(rest) != 0 { + rest, err = asn1.Unmarshal(rest, &serializedSCTList) + if err != nil { + return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, err) + } + } + sctList, err = DeserializeSCTList(serializedSCTList) + } + return sctList, err +} + +// ReadBytes reads a []byte either from a file or an environment variable. +// If valFile has a prefix of 'env:', the []byte is read from the environment +// using the subsequent name. If the prefix is 'file:' the []byte is read from +// the subsequent file. If no prefix is provided, valFile is assumed to be a +// file path. +func ReadBytes(valFile string) ([]byte, error) { + switch splitVal := strings.SplitN(valFile, ":", 2); len(splitVal) { + case 1: + return ioutil.ReadFile(valFile) + case 2: + switch splitVal[0] { + case "env": + return []byte(os.Getenv(splitVal[1])), nil + case "file": + return ioutil.ReadFile(splitVal[1]) + default: + return nil, fmt.Errorf("unknown prefix: %s", splitVal[0]) + } + default: + return nil, fmt.Errorf("multiple prefixes: %s", + strings.Join(splitVal[:len(splitVal)-1], ", ")) + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/info/info.go b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/info/info.go new file mode 100644 index 00000000..926a411f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/info/info.go @@ -0,0 +1,15 @@ +// Package info contains the definitions for the info endpoint +package info + +// Req is the request struct for an info API request. +type Req struct { + Label string `json:"label"` + Profile string `json:"profile"` +} + +// Resp is the response for an Info API request. +type Resp struct { + Certificate string `json:"certificate"` + Usage []string `json:"usages"` + ExpiryString string `json:"expiry"` +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/initca/initca.go b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/initca/initca.go new file mode 100644 index 00000000..2cdc0925 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/initca/initca.go @@ -0,0 +1,249 @@ +// Package initca contains code to initialise a certificate authority, +// generating a new root key and certificate. +package initca + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "time" + + "github.com/cloudflare/cfssl/config" + "github.com/cloudflare/cfssl/csr" + cferr "github.com/cloudflare/cfssl/errors" + "github.com/cloudflare/cfssl/helpers" + "github.com/cloudflare/cfssl/log" + "github.com/cloudflare/cfssl/signer" + "github.com/cloudflare/cfssl/signer/local" +) + +// validator contains the default validation logic for certificate +// authority certificates. The only requirement here is that the +// certificate have a non-empty subject field. +func validator(req *csr.CertificateRequest) error { + if req.CN != "" { + return nil + } + + if len(req.Names) == 0 { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidRequest, errors.New("missing subject information")) + } + + for i := range req.Names { + if csr.IsNameEmpty(req.Names[i]) { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidRequest, errors.New("missing subject information")) + } + } + + return nil +} + +// New creates a new root certificate from the certificate request. +func New(req *csr.CertificateRequest) (cert, csrPEM, key []byte, err error) { + policy := CAPolicy() + if req.CA != nil { + if req.CA.Expiry != "" { + policy.Default.ExpiryString = req.CA.Expiry + policy.Default.Expiry, err = time.ParseDuration(req.CA.Expiry) + if err != nil { + return + } + } + + if req.CA.Backdate != "" { + policy.Default.Backdate, err = time.ParseDuration(req.CA.Backdate) + if err != nil { + return + } + } + + policy.Default.CAConstraint.MaxPathLen = req.CA.PathLength + if req.CA.PathLength != 0 && req.CA.PathLenZero { + log.Infof("ignore invalid 'pathlenzero' value") + } else { + policy.Default.CAConstraint.MaxPathLenZero = req.CA.PathLenZero + } + } + + g := &csr.Generator{Validator: validator} + csrPEM, key, err = g.ProcessRequest(req) + if err != nil { + log.Errorf("failed to process request: %v", err) + key = nil + return + } + + priv, err := helpers.ParsePrivateKeyPEM(key) + if err != nil { + log.Errorf("failed to parse private key: %v", err) + return + } + + s, err := local.NewSigner(priv, nil, signer.DefaultSigAlgo(priv), policy) + if err != nil { + log.Errorf("failed to create signer: %v", err) + return + } + + signReq := signer.SignRequest{Hosts: req.Hosts, Request: string(csrPEM)} + cert, err = s.Sign(signReq) + + return + +} + +// NewFromPEM creates a new root certificate from the key file passed in. +func NewFromPEM(req *csr.CertificateRequest, keyFile string) (cert, csrPEM []byte, err error) { + privData, err := helpers.ReadBytes(keyFile) + if err != nil { + return nil, nil, err + } + + priv, err := helpers.ParsePrivateKeyPEM(privData) + if err != nil { + return nil, nil, err + } + + return NewFromSigner(req, priv) +} + +// RenewFromPEM re-creates a root certificate from the CA cert and key +// files. The resulting root certificate will have the input CA certificate +// as the template and have the same expiry length. E.g. the existing CA +// is valid for a year from Jan 01 2015 to Jan 01 2016, the renewed certificate +// will be valid from now and expire in one year as well. +func RenewFromPEM(caFile, keyFile string) ([]byte, error) { + caBytes, err := helpers.ReadBytes(caFile) + if err != nil { + return nil, err + } + + ca, err := helpers.ParseCertificatePEM(caBytes) + if err != nil { + return nil, err + } + + keyBytes, err := helpers.ReadBytes(keyFile) + if err != nil { + return nil, err + } + + key, err := helpers.ParsePrivateKeyPEM(keyBytes) + if err != nil { + return nil, err + } + + return RenewFromSigner(ca, key) +} + +// NewFromSigner creates a new root certificate from a crypto.Signer. +func NewFromSigner(req *csr.CertificateRequest, priv crypto.Signer) (cert, csrPEM []byte, err error) { + policy := CAPolicy() + if req.CA != nil { + if req.CA.Expiry != "" { + policy.Default.ExpiryString = req.CA.Expiry + policy.Default.Expiry, err = time.ParseDuration(req.CA.Expiry) + if err != nil { + return nil, nil, err + } + } + + policy.Default.CAConstraint.MaxPathLen = req.CA.PathLength + if req.CA.PathLength != 0 && req.CA.PathLenZero == true { + log.Infof("ignore invalid 'pathlenzero' value") + } else { + policy.Default.CAConstraint.MaxPathLenZero = req.CA.PathLenZero + } + } + + csrPEM, err = csr.Generate(priv, req) + if err != nil { + return nil, nil, err + } + + s, err := local.NewSigner(priv, nil, signer.DefaultSigAlgo(priv), policy) + if err != nil { + log.Errorf("failed to create signer: %v", err) + return + } + + signReq := signer.SignRequest{Request: string(csrPEM)} + cert, err = s.Sign(signReq) + return +} + +// RenewFromSigner re-creates a root certificate from the CA cert and crypto.Signer. +// The resulting root certificate will have ca certificate +// as the template and have the same expiry length. E.g. the existing CA +// is valid for a year from Jan 01 2015 to Jan 01 2016, the renewed certificate +// will be valid from now and expire in one year as well. +func RenewFromSigner(ca *x509.Certificate, priv crypto.Signer) ([]byte, error) { + if !ca.IsCA { + return nil, errors.New("input certificate is not a CA cert") + } + + // matching certificate public key vs private key + switch { + case ca.PublicKeyAlgorithm == x509.RSA: + var rsaPublicKey *rsa.PublicKey + var ok bool + if rsaPublicKey, ok = priv.Public().(*rsa.PublicKey); !ok { + return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch) + } + if ca.PublicKey.(*rsa.PublicKey).N.Cmp(rsaPublicKey.N) != 0 { + return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch) + } + case ca.PublicKeyAlgorithm == x509.ECDSA: + var ecdsaPublicKey *ecdsa.PublicKey + var ok bool + if ecdsaPublicKey, ok = priv.Public().(*ecdsa.PublicKey); !ok { + return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch) + } + if ca.PublicKey.(*ecdsa.PublicKey).X.Cmp(ecdsaPublicKey.X) != 0 { + return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch) + } + default: + return nil, cferr.New(cferr.PrivateKeyError, cferr.NotRSAOrECC) + } + + req := csr.ExtractCertificateRequest(ca) + cert, _, err := NewFromSigner(req, priv) + return cert, err + +} + +// CAPolicy contains the CA issuing policy as default policy. +var CAPolicy = func() *config.Signing { + return &config.Signing{ + Default: &config.SigningProfile{ + Usage: []string{"cert sign", "crl sign"}, + ExpiryString: "43800h", + Expiry: 5 * helpers.OneYear, + CAConstraint: config.CAConstraint{IsCA: true}, + }, + } +} + +// Update copies the CA certificate, updates the NotBefore and +// NotAfter fields, and then re-signs the certificate. +func Update(ca *x509.Certificate, priv crypto.Signer) (cert []byte, err error) { + copy, err := x509.ParseCertificate(ca.Raw) + if err != nil { + return + } + + validity := ca.NotAfter.Sub(ca.NotBefore) + copy.NotBefore = time.Now().Round(time.Minute).Add(-5 * time.Minute) + copy.NotAfter = copy.NotBefore.Add(validity) + cert, err = x509.CreateCertificate(rand.Reader, copy, copy, priv.Public(), priv) + if err != nil { + return + } + + cert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert}) + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/log/log.go b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/log/log.go new file mode 100644 index 00000000..956c9d46 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/log/log.go @@ -0,0 +1,162 @@ +// Package log implements a wrapper around the Go standard library's +// logging package. Clients should set the current log level; only +// messages below that level will actually be logged. For example, if +// Level is set to LevelWarning, only log messages at the Warning, +// Error, and Critical levels will be logged. +package log + +import ( + "fmt" + "log" + "os" +) + +// The following constants represent logging levels in increasing levels of seriousness. +const ( + // LevelDebug is the log level for Debug statements. + LevelDebug = iota + // LevelInfo is the log level for Info statements. + LevelInfo + // LevelWarning is the log level for Warning statements. + LevelWarning + // LevelError is the log level for Error statements. + LevelError + // LevelCritical is the log level for Critical statements. + LevelCritical + // LevelFatal is the log level for Fatal statements. + LevelFatal +) + +var levelPrefix = [...]string{ + LevelDebug: "DEBUG", + LevelInfo: "INFO", + LevelWarning: "WARNING", + LevelError: "ERROR", + LevelCritical: "CRITICAL", + LevelFatal: "FATAL", +} + +// Level stores the current logging level. +var Level = LevelInfo + +// SyslogWriter specifies the necessary methods for an alternate output +// destination passed in via SetLogger. +// +// SyslogWriter is satisfied by *syslog.Writer. +type SyslogWriter interface { + Debug(string) + Info(string) + Warning(string) + Err(string) + Crit(string) + Emerg(string) +} + +// syslogWriter stores the SetLogger() parameter. +var syslogWriter SyslogWriter + +// SetLogger sets the output used for output by this package. +// A *syslog.Writer is a good choice for the logger parameter. +// Call with a nil parameter to revert to default behavior. +func SetLogger(logger SyslogWriter) { + syslogWriter = logger +} + +func print(l int, msg string) { + if l >= Level { + if syslogWriter != nil { + switch l { + case LevelDebug: + syslogWriter.Debug(msg) + case LevelInfo: + syslogWriter.Info(msg) + case LevelWarning: + syslogWriter.Warning(msg) + case LevelError: + syslogWriter.Err(msg) + case LevelCritical: + syslogWriter.Crit(msg) + case LevelFatal: + syslogWriter.Emerg(msg) + } + } else { + log.Printf("[%s] %s", levelPrefix[l], msg) + } + } +} + +func outputf(l int, format string, v []interface{}) { + print(l, fmt.Sprintf(format, v...)) +} + +func output(l int, v []interface{}) { + print(l, fmt.Sprint(v...)) +} + +// Fatalf logs a formatted message at the "fatal" level and then exits. The +// arguments are handled in the same manner as fmt.Printf. +func Fatalf(format string, v ...interface{}) { + outputf(LevelFatal, format, v) + os.Exit(1) +} + +// Fatal logs its arguments at the "fatal" level and then exits. +func Fatal(v ...interface{}) { + output(LevelFatal, v) + os.Exit(1) +} + +// Criticalf logs a formatted message at the "critical" level. The +// arguments are handled in the same manner as fmt.Printf. +func Criticalf(format string, v ...interface{}) { + outputf(LevelCritical, format, v) +} + +// Critical logs its arguments at the "critical" level. +func Critical(v ...interface{}) { + output(LevelCritical, v) +} + +// Errorf logs a formatted message at the "error" level. The arguments +// are handled in the same manner as fmt.Printf. +func Errorf(format string, v ...interface{}) { + outputf(LevelError, format, v) +} + +// Error logs its arguments at the "error" level. +func Error(v ...interface{}) { + output(LevelError, v) +} + +// Warningf logs a formatted message at the "warning" level. The +// arguments are handled in the same manner as fmt.Printf. +func Warningf(format string, v ...interface{}) { + outputf(LevelWarning, format, v) +} + +// Warning logs its arguments at the "warning" level. +func Warning(v ...interface{}) { + output(LevelWarning, v) +} + +// Infof logs a formatted message at the "info" level. The arguments +// are handled in the same manner as fmt.Printf. +func Infof(format string, v ...interface{}) { + outputf(LevelInfo, format, v) +} + +// Info logs its arguments at the "info" level. +func Info(v ...interface{}) { + output(LevelInfo, v) +} + +// Debugf logs a formatted message at the "debug" level. The arguments +// are handled in the same manner as fmt.Printf. +func Debugf(format string, v ...interface{}) { + outputf(LevelDebug, format, v) +} + +// Debug logs its arguments at the "debug" level. +func Debug(v ...interface{}) { + output(LevelDebug, v) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/ocsp/config/config.go b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/ocsp/config/config.go new file mode 100644 index 00000000..a19b113d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/ocsp/config/config.go @@ -0,0 +1,13 @@ +// Package config in the ocsp directory provides configuration data for an OCSP +// signer. +package config + +import "time" + +// Config contains configuration information required to set up an OCSP signer. +type Config struct { + CACertFile string + ResponderCertFile string + KeyFile string + Interval time.Duration +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/signer/local/local.go b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/signer/local/local.go new file mode 100644 index 00000000..247b72c5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/signer/local/local.go @@ -0,0 +1,673 @@ +// Package local implements certificate signature functionality for CFSSL. +package local + +import ( + "bytes" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + "net" + "net/http" + "net/mail" + "net/url" + "os" + + "github.com/cloudflare/cfssl/certdb" + "github.com/cloudflare/cfssl/config" + cferr "github.com/cloudflare/cfssl/errors" + "github.com/cloudflare/cfssl/helpers" + "github.com/cloudflare/cfssl/info" + "github.com/cloudflare/cfssl/log" + "github.com/cloudflare/cfssl/signer" + "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/client" + "github.com/google/certificate-transparency-go/jsonclient" + + zx509 "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint" + "github.com/zmap/zlint/lints" + "golang.org/x/net/context" +) + +// Signer contains a signer that uses the standard library to +// support both ECDSA and RSA CA keys. +type Signer struct { + ca *x509.Certificate + priv crypto.Signer + // lintPriv is generated randomly when pre-issuance linting is configured and + // used to sign TBSCertificates for linting. + lintPriv crypto.Signer + policy *config.Signing + sigAlgo x509.SignatureAlgorithm + dbAccessor certdb.Accessor +} + +// NewSigner creates a new Signer directly from a +// private key and certificate, with optional policy. +func NewSigner(priv crypto.Signer, cert *x509.Certificate, sigAlgo x509.SignatureAlgorithm, policy *config.Signing) (*Signer, error) { + if policy == nil { + policy = &config.Signing{ + Profiles: map[string]*config.SigningProfile{}, + Default: config.DefaultConfig()} + } + + if !policy.Valid() { + return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy) + } + + var lintPriv crypto.Signer + // If there is at least one profile (including the default) that configures + // pre-issuance linting then generate the one-off lintPriv key. + for _, profile := range policy.Profiles { + if profile.LintErrLevel > 0 || policy.Default.LintErrLevel > 0 { + // In the future there may be demand for specifying the type of signer used + // for pre-issuance linting in configuration. For now we assume that signing + // with a randomly generated P-256 ECDSA private key is acceptable for all cases + // where linting is requested. + k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + return nil, cferr.New(cferr.PrivateKeyError, cferr.GenerationFailed) + } + lintPriv = k + break + } + } + + return &Signer{ + ca: cert, + priv: priv, + lintPriv: lintPriv, + sigAlgo: sigAlgo, + policy: policy, + }, nil +} + +// NewSignerFromFile generates a new local signer from a caFile +// and a caKey file, both PEM encoded. +func NewSignerFromFile(caFile, caKeyFile string, policy *config.Signing) (*Signer, error) { + log.Debug("Loading CA: ", caFile) + ca, err := helpers.ReadBytes(caFile) + if err != nil { + return nil, err + } + log.Debug("Loading CA key: ", caKeyFile) + cakey, err := helpers.ReadBytes(caKeyFile) + if err != nil { + return nil, cferr.Wrap(cferr.CertificateError, cferr.ReadFailed, err) + } + + parsedCa, err := helpers.ParseCertificatePEM(ca) + if err != nil { + return nil, err + } + + strPassword := os.Getenv("CFSSL_CA_PK_PASSWORD") + password := []byte(strPassword) + if strPassword == "" { + password = nil + } + + priv, err := helpers.ParsePrivateKeyPEMWithPassword(cakey, password) + if err != nil { + log.Debugf("Malformed private key %v", err) + return nil, err + } + + return NewSigner(priv, parsedCa, signer.DefaultSigAlgo(priv), policy) +} + +// LintError is an error type returned when pre-issuance linting is configured +// in a signing profile and a TBS Certificate fails linting. It wraps the +// concrete zlint LintResults so that callers can further inspect the cause of +// the failing lints. +type LintError struct { + ErrorResults map[string]lints.LintResult +} + +func (e *LintError) Error() string { + return fmt.Sprintf("pre-issuance linting found %d error results", + len(e.ErrorResults)) +} + +// lint performs pre-issuance linting of a given TBS certificate template when +// the provided errLevel is > 0. Any lint results with a status higher than the +// errLevel that isn't created by a lint in the ignoreMap will result in +// a LintError being returned to the caller. Note that the template is provided +// by-value and not by-reference. This is important as the lint function needs +// to mutate the template's signature algorithm to match the lintPriv. +func (s *Signer) lint(template x509.Certificate, errLevel lints.LintStatus, ignoreMap map[string]bool) error { + // Always return nil when linting is disabled (lints.Reserved == 0). + if errLevel == lints.Reserved { + return nil + } + // without a lintPriv key to use to sign the tbsCertificate we can't lint it. + if s.lintPriv == nil { + return cferr.New(cferr.PrivateKeyError, cferr.Unavailable) + } + + // The template's SignatureAlgorithm must be mutated to match the lintPriv or + // x509.CreateCertificate will error because of the mismatch. At the time of + // writing s.lintPriv is always an ECDSA private key. This switch will need to + // be expanded if the lint key type is made configurable. + switch s.lintPriv.(type) { + case *ecdsa.PrivateKey: + template.SignatureAlgorithm = x509.ECDSAWithSHA256 + default: + return cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch) + } + + prelintBytes, err := x509.CreateCertificate(rand.Reader, &template, s.ca, template.PublicKey, s.lintPriv) + if err != nil { + return cferr.Wrap(cferr.CertificateError, cferr.Unknown, err) + } + prelintCert, err := zx509.ParseCertificate(prelintBytes) + if err != nil { + return cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) + } + errorResults := map[string]lints.LintResult{} + results := zlint.LintCertificate(prelintCert) + for name, res := range results.Results { + if ignoreMap[name] { + continue + } + if res.Status > errLevel { + errorResults[name] = *res + } + } + if len(errorResults) > 0 { + return &LintError{ + ErrorResults: errorResults, + } + } + return nil +} + +func (s *Signer) sign(template *x509.Certificate, lintErrLevel lints.LintStatus, lintIgnore map[string]bool) (cert []byte, err error) { + var initRoot bool + if s.ca == nil { + if !template.IsCA { + err = cferr.New(cferr.PolicyError, cferr.InvalidRequest) + return + } + template.DNSNames = nil + template.EmailAddresses = nil + template.URIs = nil + s.ca = template + initRoot = true + } + + if err := s.lint(*template, lintErrLevel, lintIgnore); err != nil { + return nil, err + } + + derBytes, err := x509.CreateCertificate(rand.Reader, template, s.ca, template.PublicKey, s.priv) + if err != nil { + return nil, cferr.Wrap(cferr.CertificateError, cferr.Unknown, err) + } + if initRoot { + s.ca, err = x509.ParseCertificate(derBytes) + if err != nil { + return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) + } + } + + cert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) + log.Infof("signed certificate with serial number %d", template.SerialNumber) + return +} + +// replaceSliceIfEmpty replaces the contents of replaced with newContents if +// the slice referenced by replaced is empty +func replaceSliceIfEmpty(replaced, newContents *[]string) { + if len(*replaced) == 0 { + *replaced = *newContents + } +} + +// PopulateSubjectFromCSR has functionality similar to Name, except +// it fills the fields of the resulting pkix.Name with req's if the +// subject's corresponding fields are empty +func PopulateSubjectFromCSR(s *signer.Subject, req pkix.Name) pkix.Name { + // if no subject, use req + if s == nil { + return req + } + + name := s.Name() + + if name.CommonName == "" { + name.CommonName = req.CommonName + } + + replaceSliceIfEmpty(&name.Country, &req.Country) + replaceSliceIfEmpty(&name.Province, &req.Province) + replaceSliceIfEmpty(&name.Locality, &req.Locality) + replaceSliceIfEmpty(&name.Organization, &req.Organization) + replaceSliceIfEmpty(&name.OrganizationalUnit, &req.OrganizationalUnit) + if name.SerialNumber == "" { + name.SerialNumber = req.SerialNumber + } + return name +} + +// OverrideHosts fills template's IPAddresses, EmailAddresses, DNSNames, and URIs with the +// content of hosts, if it is not nil. +func OverrideHosts(template *x509.Certificate, hosts []string) { + if hosts != nil { + template.IPAddresses = []net.IP{} + template.EmailAddresses = []string{} + template.DNSNames = []string{} + template.URIs = []*url.URL{} + } + + for i := range hosts { + if ip := net.ParseIP(hosts[i]); ip != nil { + template.IPAddresses = append(template.IPAddresses, ip) + } else if email, err := mail.ParseAddress(hosts[i]); err == nil && email != nil { + template.EmailAddresses = append(template.EmailAddresses, email.Address) + } else if uri, err := url.ParseRequestURI(hosts[i]); err == nil && uri != nil { + template.URIs = append(template.URIs, uri) + } else { + template.DNSNames = append(template.DNSNames, hosts[i]) + } + } + +} + +// Sign signs a new certificate based on the PEM-encoded client +// certificate or certificate request with the signing profile, +// specified by profileName. +func (s *Signer) Sign(req signer.SignRequest) (cert []byte, err error) { + profile, err := signer.Profile(s, req.Profile) + if err != nil { + return + } + + block, _ := pem.Decode([]byte(req.Request)) + if block == nil { + return nil, cferr.New(cferr.CSRError, cferr.DecodeFailed) + } + + if block.Type != "NEW CERTIFICATE REQUEST" && block.Type != "CERTIFICATE REQUEST" { + return nil, cferr.Wrap(cferr.CSRError, + cferr.BadRequest, errors.New("not a csr")) + } + + csrTemplate, err := signer.ParseCertificateRequest(s, block.Bytes) + if err != nil { + return nil, err + } + + // Copy out only the fields from the CSR authorized by policy. + safeTemplate := x509.Certificate{} + // If the profile contains no explicit whitelist, assume that all fields + // should be copied from the CSR. + if profile.CSRWhitelist == nil { + safeTemplate = *csrTemplate + } else { + if profile.CSRWhitelist.Subject { + safeTemplate.Subject = csrTemplate.Subject + } + if profile.CSRWhitelist.PublicKeyAlgorithm { + safeTemplate.PublicKeyAlgorithm = csrTemplate.PublicKeyAlgorithm + } + if profile.CSRWhitelist.PublicKey { + safeTemplate.PublicKey = csrTemplate.PublicKey + } + if profile.CSRWhitelist.SignatureAlgorithm { + safeTemplate.SignatureAlgorithm = csrTemplate.SignatureAlgorithm + } + if profile.CSRWhitelist.DNSNames { + safeTemplate.DNSNames = csrTemplate.DNSNames + } + if profile.CSRWhitelist.IPAddresses { + safeTemplate.IPAddresses = csrTemplate.IPAddresses + } + if profile.CSRWhitelist.EmailAddresses { + safeTemplate.EmailAddresses = csrTemplate.EmailAddresses + } + if profile.CSRWhitelist.URIs { + safeTemplate.URIs = csrTemplate.URIs + } + } + + if req.CRLOverride != "" { + safeTemplate.CRLDistributionPoints = []string{req.CRLOverride} + } + + if safeTemplate.IsCA { + if !profile.CAConstraint.IsCA { + log.Error("local signer policy disallows issuing CA certificate") + return nil, cferr.New(cferr.PolicyError, cferr.InvalidRequest) + } + + if s.ca != nil && s.ca.MaxPathLen > 0 { + if safeTemplate.MaxPathLen >= s.ca.MaxPathLen { + log.Error("local signer certificate disallows CA MaxPathLen extending") + // do not sign a cert with pathlen > current + return nil, cferr.New(cferr.PolicyError, cferr.InvalidRequest) + } + } else if s.ca != nil && s.ca.MaxPathLen == 0 && s.ca.MaxPathLenZero { + log.Error("local signer certificate disallows issuing CA certificate") + // signer has pathlen of 0, do not sign more intermediate CAs + return nil, cferr.New(cferr.PolicyError, cferr.InvalidRequest) + } + } + + OverrideHosts(&safeTemplate, req.Hosts) + safeTemplate.Subject = PopulateSubjectFromCSR(req.Subject, safeTemplate.Subject) + + // If there is a whitelist, ensure that both the Common Name and SAN DNSNames match + if profile.NameWhitelist != nil { + if safeTemplate.Subject.CommonName != "" { + if profile.NameWhitelist.Find([]byte(safeTemplate.Subject.CommonName)) == nil { + return nil, cferr.New(cferr.PolicyError, cferr.UnmatchedWhitelist) + } + } + for _, name := range safeTemplate.DNSNames { + if profile.NameWhitelist.Find([]byte(name)) == nil { + return nil, cferr.New(cferr.PolicyError, cferr.UnmatchedWhitelist) + } + } + for _, name := range safeTemplate.EmailAddresses { + if profile.NameWhitelist.Find([]byte(name)) == nil { + return nil, cferr.New(cferr.PolicyError, cferr.UnmatchedWhitelist) + } + } + for _, name := range safeTemplate.URIs { + if profile.NameWhitelist.Find([]byte(name.String())) == nil { + return nil, cferr.New(cferr.PolicyError, cferr.UnmatchedWhitelist) + } + } + } + + if profile.ClientProvidesSerialNumbers { + if req.Serial == nil { + return nil, cferr.New(cferr.CertificateError, cferr.MissingSerial) + } + safeTemplate.SerialNumber = req.Serial + } else { + // RFC 5280 4.1.2.2: + // Certificate users MUST be able to handle serialNumber + // values up to 20 octets. Conforming CAs MUST NOT use + // serialNumber values longer than 20 octets. + // + // If CFSSL is providing the serial numbers, it makes + // sense to use the max supported size. + serialNumber := make([]byte, 20) + _, err = io.ReadFull(rand.Reader, serialNumber) + if err != nil { + return nil, cferr.Wrap(cferr.CertificateError, cferr.Unknown, err) + } + + // SetBytes interprets buf as the bytes of a big-endian + // unsigned integer. The leading byte should be masked + // off to ensure it isn't negative. + serialNumber[0] &= 0x7F + + safeTemplate.SerialNumber = new(big.Int).SetBytes(serialNumber) + } + + if len(req.Extensions) > 0 { + for _, ext := range req.Extensions { + oid := asn1.ObjectIdentifier(ext.ID) + if !profile.ExtensionWhitelist[oid.String()] { + return nil, cferr.New(cferr.CertificateError, cferr.InvalidRequest) + } + + rawValue, err := hex.DecodeString(ext.Value) + if err != nil { + return nil, cferr.Wrap(cferr.CertificateError, cferr.InvalidRequest, err) + } + + safeTemplate.ExtraExtensions = append(safeTemplate.ExtraExtensions, pkix.Extension{ + Id: oid, + Critical: ext.Critical, + Value: rawValue, + }) + } + } + + var distPoints = safeTemplate.CRLDistributionPoints + err = signer.FillTemplate(&safeTemplate, s.policy.Default, profile, req.NotBefore, req.NotAfter) + if err != nil { + return nil, err + } + if distPoints != nil && len(distPoints) > 0 { + safeTemplate.CRLDistributionPoints = distPoints + } + + var certTBS = safeTemplate + + if len(profile.CTLogServers) > 0 || req.ReturnPrecert { + // Add a poison extension which prevents validation + var poisonExtension = pkix.Extension{Id: signer.CTPoisonOID, Critical: true, Value: []byte{0x05, 0x00}} + var poisonedPreCert = certTBS + poisonedPreCert.ExtraExtensions = append(safeTemplate.ExtraExtensions, poisonExtension) + cert, err = s.sign(&poisonedPreCert, profile.LintErrLevel, profile.IgnoredLintsMap) + if err != nil { + return + } + + if req.ReturnPrecert { + return cert, nil + } + + derCert, _ := pem.Decode(cert) + prechain := []ct.ASN1Cert{{Data: derCert.Bytes}, {Data: s.ca.Raw}} + var sctList []ct.SignedCertificateTimestamp + + for _, server := range profile.CTLogServers { + log.Infof("submitting poisoned precertificate to %s", server) + ctclient, err := client.New(server, nil, jsonclient.Options{}) + if err != nil { + return nil, cferr.Wrap(cferr.CTError, cferr.PrecertSubmissionFailed, err) + } + var resp *ct.SignedCertificateTimestamp + ctx := context.Background() + resp, err = ctclient.AddPreChain(ctx, prechain) + if err != nil { + return nil, cferr.Wrap(cferr.CTError, cferr.PrecertSubmissionFailed, err) + } + sctList = append(sctList, *resp) + } + + var serializedSCTList []byte + serializedSCTList, err = helpers.SerializeSCTList(sctList) + if err != nil { + return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, err) + } + + // Serialize again as an octet string before embedding + serializedSCTList, err = asn1.Marshal(serializedSCTList) + if err != nil { + return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, err) + } + + var SCTListExtension = pkix.Extension{Id: signer.SCTListOID, Critical: false, Value: serializedSCTList} + certTBS.ExtraExtensions = append(certTBS.ExtraExtensions, SCTListExtension) + } + + var signedCert []byte + signedCert, err = s.sign(&certTBS, profile.LintErrLevel, profile.IgnoredLintsMap) + if err != nil { + return nil, err + } + + // Get the AKI from signedCert. This is required to support Go 1.9+. + // In prior versions of Go, x509.CreateCertificate updated the + // AuthorityKeyId of certTBS. + parsedCert, _ := helpers.ParseCertificatePEM(signedCert) + + if s.dbAccessor != nil { + var certRecord = certdb.CertificateRecord{ + Serial: certTBS.SerialNumber.String(), + // this relies on the specific behavior of x509.CreateCertificate + // which sets the AuthorityKeyId from the signer's SubjectKeyId + AKI: hex.EncodeToString(parsedCert.AuthorityKeyId), + CALabel: req.Label, + Status: "good", + Expiry: certTBS.NotAfter, + PEM: string(signedCert), + } + + err = s.dbAccessor.InsertCertificate(certRecord) + if err != nil { + return nil, err + } + log.Debug("saved certificate with serial number ", certTBS.SerialNumber) + } + + return signedCert, nil +} + +// SignFromPrecert creates and signs a certificate from an existing precertificate +// that was previously signed by Signer.ca and inserts the provided SCTs into the +// new certificate. The resulting certificate will be a exact copy of the precert +// except for the removal of the poison extension and the addition of the SCT list +// extension. SignFromPrecert does not verify that the contents of the certificate +// still match the signing profile of the signer, it only requires that the precert +// was previously signed by the Signers CA. Similarly, any linting configured +// by the profile used to sign the precert will not be re-applied to the final +// cert and must be done separately by the caller. +func (s *Signer) SignFromPrecert(precert *x509.Certificate, scts []ct.SignedCertificateTimestamp) ([]byte, error) { + // Verify certificate was signed by s.ca + if err := precert.CheckSignatureFrom(s.ca); err != nil { + return nil, err + } + + // Verify certificate is a precert + isPrecert := false + poisonIndex := 0 + for i, ext := range precert.Extensions { + if ext.Id.Equal(signer.CTPoisonOID) { + if !ext.Critical { + return nil, cferr.New(cferr.CTError, cferr.PrecertInvalidPoison) + } + // Check extension contains ASN.1 NULL + if bytes.Compare(ext.Value, []byte{0x05, 0x00}) != 0 { + return nil, cferr.New(cferr.CTError, cferr.PrecertInvalidPoison) + } + isPrecert = true + poisonIndex = i + break + } + } + if !isPrecert { + return nil, cferr.New(cferr.CTError, cferr.PrecertMissingPoison) + } + + // Serialize SCTs into list format and create extension + serializedList, err := helpers.SerializeSCTList(scts) + if err != nil { + return nil, err + } + // Serialize again as an octet string before embedding + serializedList, err = asn1.Marshal(serializedList) + if err != nil { + return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, err) + } + sctExt := pkix.Extension{Id: signer.SCTListOID, Critical: false, Value: serializedList} + + // Create the new tbsCert from precert. Do explicit copies of any slices so that we don't + // use memory that may be altered by us or the caller at a later stage. + tbsCert := x509.Certificate{ + SignatureAlgorithm: precert.SignatureAlgorithm, + PublicKeyAlgorithm: precert.PublicKeyAlgorithm, + PublicKey: precert.PublicKey, + Version: precert.Version, + SerialNumber: precert.SerialNumber, + Issuer: precert.Issuer, + Subject: precert.Subject, + NotBefore: precert.NotBefore, + NotAfter: precert.NotAfter, + KeyUsage: precert.KeyUsage, + BasicConstraintsValid: precert.BasicConstraintsValid, + IsCA: precert.IsCA, + MaxPathLen: precert.MaxPathLen, + MaxPathLenZero: precert.MaxPathLenZero, + PermittedDNSDomainsCritical: precert.PermittedDNSDomainsCritical, + } + if len(precert.Extensions) > 0 { + tbsCert.ExtraExtensions = make([]pkix.Extension, len(precert.Extensions)) + copy(tbsCert.ExtraExtensions, precert.Extensions) + } + + // Remove the poison extension from ExtraExtensions + tbsCert.ExtraExtensions = append(tbsCert.ExtraExtensions[:poisonIndex], tbsCert.ExtraExtensions[poisonIndex+1:]...) + // Insert the SCT list extension + tbsCert.ExtraExtensions = append(tbsCert.ExtraExtensions, sctExt) + + // Sign the tbsCert. Linting is always disabled because there is no way for + // this API to know the correct lint settings to use because there is no + // reference to the signing profile of the precert available. + return s.sign(&tbsCert, 0, nil) +} + +// Info return a populated info.Resp struct or an error. +func (s *Signer) Info(req info.Req) (resp *info.Resp, err error) { + cert, err := s.Certificate(req.Label, req.Profile) + if err != nil { + return + } + + profile, err := signer.Profile(s, req.Profile) + if err != nil { + return + } + + resp = new(info.Resp) + if cert.Raw != nil { + resp.Certificate = string(bytes.TrimSpace(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}))) + } + resp.Usage = profile.Usage + resp.ExpiryString = profile.ExpiryString + + return +} + +// SigAlgo returns the RSA signer's signature algorithm. +func (s *Signer) SigAlgo() x509.SignatureAlgorithm { + return s.sigAlgo +} + +// Certificate returns the signer's certificate. +func (s *Signer) Certificate(label, profile string) (*x509.Certificate, error) { + cert := *s.ca + return &cert, nil +} + +// SetPolicy sets the signer's signature policy. +func (s *Signer) SetPolicy(policy *config.Signing) { + s.policy = policy +} + +// SetDBAccessor sets the signers' cert db accessor +func (s *Signer) SetDBAccessor(dba certdb.Accessor) { + s.dbAccessor = dba +} + +// GetDBAccessor returns the signers' cert db accessor +func (s *Signer) GetDBAccessor() certdb.Accessor { + return s.dbAccessor +} + +// SetReqModifier does nothing for local +func (s *Signer) SetReqModifier(func(*http.Request, []byte)) { + // noop +} + +// Policy returns the signer's policy. +func (s *Signer) Policy() *config.Signing { + return s.policy +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/signer/signer.go b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/signer/signer.go new file mode 100644 index 00000000..88000ab1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/cloudflare/cfssl/signer/signer.go @@ -0,0 +1,438 @@ +// Package signer implements certificate signature functionality for CFSSL. +package signer + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "math/big" + "net/http" + "strings" + "time" + + "github.com/cloudflare/cfssl/certdb" + "github.com/cloudflare/cfssl/config" + "github.com/cloudflare/cfssl/csr" + cferr "github.com/cloudflare/cfssl/errors" + "github.com/cloudflare/cfssl/info" +) + +// Subject contains the information that should be used to override the +// subject information when signing a certificate. +type Subject struct { + CN string + Names []csr.Name `json:"names"` + SerialNumber string +} + +// Extension represents a raw extension to be included in the certificate. The +// "value" field must be hex encoded. +type Extension struct { + ID config.OID `json:"id"` + Critical bool `json:"critical"` + Value string `json:"value"` +} + +// SignRequest stores a signature request, which contains the hostname, +// the CSR, optional subject information, and the signature profile. +// +// Extensions provided in the signRequest are copied into the certificate, as +// long as they are in the ExtensionWhitelist for the signer's policy. +// Extensions requested in the CSR are ignored, except for those processed by +// ParseCertificateRequest (mainly subjectAltName). +type SignRequest struct { + Hosts []string `json:"hosts"` + Request string `json:"certificate_request"` + Subject *Subject `json:"subject,omitempty"` + Profile string `json:"profile"` + CRLOverride string `json:"crl_override"` + Label string `json:"label"` + Serial *big.Int `json:"serial,omitempty"` + Extensions []Extension `json:"extensions,omitempty"` + // If provided, NotBefore will be used without modification (except + // for canonicalization) as the value of the notBefore field of the + // certificate. In particular no backdating adjustment will be made + // when NotBefore is provided. + NotBefore time.Time + // If provided, NotAfter will be used without modification (except + // for canonicalization) as the value of the notAfter field of the + // certificate. + NotAfter time.Time + // If ReturnPrecert is true a certificate with the CT poison extension + // will be returned from the Signer instead of attempting to retrieve + // SCTs and populate the tbsCert with them itself. This precert can then + // be passed to SignFromPrecert with the SCTs in order to create a + // valid certificate. + ReturnPrecert bool +} + +// appendIf appends to a if s is not an empty string. +func appendIf(s string, a *[]string) { + if s != "" { + *a = append(*a, s) + } +} + +// Name returns the PKIX name for the subject. +func (s *Subject) Name() pkix.Name { + var name pkix.Name + name.CommonName = s.CN + + for _, n := range s.Names { + appendIf(n.C, &name.Country) + appendIf(n.ST, &name.Province) + appendIf(n.L, &name.Locality) + appendIf(n.O, &name.Organization) + appendIf(n.OU, &name.OrganizationalUnit) + } + name.SerialNumber = s.SerialNumber + return name +} + +// SplitHosts takes a comma-spearated list of hosts and returns a slice +// with the hosts split +func SplitHosts(hostList string) []string { + if hostList == "" { + return nil + } + + return strings.Split(hostList, ",") +} + +// A Signer contains a CA's certificate and private key for signing +// certificates, a Signing policy to refer to and a SignatureAlgorithm. +type Signer interface { + Info(info.Req) (*info.Resp, error) + Policy() *config.Signing + SetDBAccessor(certdb.Accessor) + GetDBAccessor() certdb.Accessor + SetPolicy(*config.Signing) + SigAlgo() x509.SignatureAlgorithm + Sign(req SignRequest) (cert []byte, err error) + SetReqModifier(func(*http.Request, []byte)) +} + +// Profile gets the specific profile from the signer +func Profile(s Signer, profile string) (*config.SigningProfile, error) { + var p *config.SigningProfile + policy := s.Policy() + if policy != nil && policy.Profiles != nil && profile != "" { + p = policy.Profiles[profile] + } + + if p == nil && policy != nil { + p = policy.Default + } + + if p == nil { + return nil, cferr.Wrap(cferr.APIClientError, cferr.ClientHTTPError, errors.New("profile must not be nil")) + } + return p, nil +} + +// DefaultSigAlgo returns an appropriate X.509 signature algorithm given +// the CA's private key. +func DefaultSigAlgo(priv crypto.Signer) x509.SignatureAlgorithm { + pub := priv.Public() + switch pub := pub.(type) { + case *rsa.PublicKey: + keySize := pub.N.BitLen() + switch { + case keySize >= 4096: + return x509.SHA512WithRSA + case keySize >= 3072: + return x509.SHA384WithRSA + case keySize >= 2048: + return x509.SHA256WithRSA + default: + return x509.SHA1WithRSA + } + case *ecdsa.PublicKey: + switch pub.Curve { + case elliptic.P256(): + return x509.ECDSAWithSHA256 + case elliptic.P384(): + return x509.ECDSAWithSHA384 + case elliptic.P521(): + return x509.ECDSAWithSHA512 + default: + return x509.ECDSAWithSHA1 + } + default: + return x509.UnknownSignatureAlgorithm + } +} + +// ParseCertificateRequest takes an incoming certificate request and +// builds a certificate template from it. +func ParseCertificateRequest(s Signer, csrBytes []byte) (template *x509.Certificate, err error) { + csrv, err := x509.ParseCertificateRequest(csrBytes) + if err != nil { + err = cferr.Wrap(cferr.CSRError, cferr.ParseFailed, err) + return + } + + err = csrv.CheckSignature() + if err != nil { + err = cferr.Wrap(cferr.CSRError, cferr.KeyMismatch, err) + return + } + + template = &x509.Certificate{ + Subject: csrv.Subject, + PublicKeyAlgorithm: csrv.PublicKeyAlgorithm, + PublicKey: csrv.PublicKey, + SignatureAlgorithm: s.SigAlgo(), + DNSNames: csrv.DNSNames, + IPAddresses: csrv.IPAddresses, + EmailAddresses: csrv.EmailAddresses, + URIs: csrv.URIs, + } + + for _, val := range csrv.Extensions { + // Check the CSR for the X.509 BasicConstraints (RFC 5280, 4.2.1.9) + // extension and append to template if necessary + if val.Id.Equal(asn1.ObjectIdentifier{2, 5, 29, 19}) { + var constraints csr.BasicConstraints + var rest []byte + + if rest, err = asn1.Unmarshal(val.Value, &constraints); err != nil { + return nil, cferr.Wrap(cferr.CSRError, cferr.ParseFailed, err) + } else if len(rest) != 0 { + return nil, cferr.Wrap(cferr.CSRError, cferr.ParseFailed, errors.New("x509: trailing data after X.509 BasicConstraints")) + } + + template.BasicConstraintsValid = true + template.IsCA = constraints.IsCA + template.MaxPathLen = constraints.MaxPathLen + template.MaxPathLenZero = template.MaxPathLen == 0 + } + } + + return +} + +type subjectPublicKeyInfo struct { + Algorithm pkix.AlgorithmIdentifier + SubjectPublicKey asn1.BitString +} + +// ComputeSKI derives an SKI from the certificate's public key in a +// standard manner. This is done by computing the SHA-1 digest of the +// SubjectPublicKeyInfo component of the certificate. +func ComputeSKI(template *x509.Certificate) ([]byte, error) { + pub := template.PublicKey + encodedPub, err := x509.MarshalPKIXPublicKey(pub) + if err != nil { + return nil, err + } + + var subPKI subjectPublicKeyInfo + _, err = asn1.Unmarshal(encodedPub, &subPKI) + if err != nil { + return nil, err + } + + pubHash := sha1.Sum(subPKI.SubjectPublicKey.Bytes) + return pubHash[:], nil +} + +// FillTemplate is a utility function that tries to load as much of +// the certificate template as possible from the profiles and current +// template. It fills in the key uses, expiration, revocation URLs +// and SKI. +func FillTemplate(template *x509.Certificate, defaultProfile, profile *config.SigningProfile, notBefore time.Time, notAfter time.Time) error { + ski, err := ComputeSKI(template) + if err != nil { + return err + } + + var ( + eku []x509.ExtKeyUsage + ku x509.KeyUsage + backdate time.Duration + expiry time.Duration + crlURL, ocspURL string + issuerURL = profile.IssuerURL + ) + + // The third value returned from Usages is a list of unknown key usages. + // This should be used when validating the profile at load, and isn't used + // here. + ku, eku, _ = profile.Usages() + if profile.IssuerURL == nil { + issuerURL = defaultProfile.IssuerURL + } + + if ku == 0 && len(eku) == 0 { + return cferr.New(cferr.PolicyError, cferr.NoKeyUsages) + } + + if expiry = profile.Expiry; expiry == 0 { + expiry = defaultProfile.Expiry + } + + if crlURL = profile.CRL; crlURL == "" { + crlURL = defaultProfile.CRL + } + if ocspURL = profile.OCSP; ocspURL == "" { + ocspURL = defaultProfile.OCSP + } + + if notBefore.IsZero() { + if !profile.NotBefore.IsZero() { + notBefore = profile.NotBefore + } else { + if backdate = profile.Backdate; backdate == 0 { + backdate = -5 * time.Minute + } else { + backdate = -1 * profile.Backdate + } + notBefore = time.Now().Round(time.Minute).Add(backdate) + } + } + notBefore = notBefore.UTC() + + if notAfter.IsZero() { + if !profile.NotAfter.IsZero() { + notAfter = profile.NotAfter + } else { + notAfter = notBefore.Add(expiry) + } + } + notAfter = notAfter.UTC() + + template.NotBefore = notBefore + template.NotAfter = notAfter + template.KeyUsage = ku + template.ExtKeyUsage = eku + template.BasicConstraintsValid = true + template.IsCA = profile.CAConstraint.IsCA + if template.IsCA { + template.MaxPathLen = profile.CAConstraint.MaxPathLen + if template.MaxPathLen == 0 { + template.MaxPathLenZero = profile.CAConstraint.MaxPathLenZero + } + template.DNSNames = nil + template.EmailAddresses = nil + template.URIs = nil + } + template.SubjectKeyId = ski + + if ocspURL != "" { + template.OCSPServer = []string{ocspURL} + } + if crlURL != "" { + template.CRLDistributionPoints = []string{crlURL} + } + + if len(issuerURL) != 0 { + template.IssuingCertificateURL = issuerURL + } + if len(profile.Policies) != 0 { + err = addPolicies(template, profile.Policies) + if err != nil { + return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err) + } + } + if profile.OCSPNoCheck { + ocspNoCheckExtension := pkix.Extension{ + Id: asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1, 5}, + Critical: false, + Value: []byte{0x05, 0x00}, + } + template.ExtraExtensions = append(template.ExtraExtensions, ocspNoCheckExtension) + } + + return nil +} + +type policyInformation struct { + PolicyIdentifier asn1.ObjectIdentifier + Qualifiers []interface{} `asn1:"tag:optional,omitempty"` +} + +type cpsPolicyQualifier struct { + PolicyQualifierID asn1.ObjectIdentifier + Qualifier string `asn1:"tag:optional,ia5"` +} + +type userNotice struct { + ExplicitText string `asn1:"tag:optional,utf8"` +} +type userNoticePolicyQualifier struct { + PolicyQualifierID asn1.ObjectIdentifier + Qualifier userNotice +} + +var ( + // Per https://tools.ietf.org/html/rfc3280.html#page-106, this represents: + // iso(1) identified-organization(3) dod(6) internet(1) security(5) + // mechanisms(5) pkix(7) id-qt(2) id-qt-cps(1) + iDQTCertificationPracticeStatement = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 1} + // iso(1) identified-organization(3) dod(6) internet(1) security(5) + // mechanisms(5) pkix(7) id-qt(2) id-qt-unotice(2) + iDQTUserNotice = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 2} + + // CTPoisonOID is the object ID of the critical poison extension for precertificates + // https://tools.ietf.org/html/rfc6962#page-9 + CTPoisonOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3} + + // SCTListOID is the object ID for the Signed Certificate Timestamp certificate extension + // https://tools.ietf.org/html/rfc6962#page-14 + SCTListOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2} +) + +// addPolicies adds Certificate Policies and optional Policy Qualifiers to a +// certificate, based on the input config. Go's x509 library allows setting +// Certificate Policies easily, but does not support nested Policy Qualifiers +// under those policies. So we need to construct the ASN.1 structure ourselves. +func addPolicies(template *x509.Certificate, policies []config.CertificatePolicy) error { + asn1PolicyList := []policyInformation{} + + for _, policy := range policies { + pi := policyInformation{ + // The PolicyIdentifier is an OID assigned to a given issuer. + PolicyIdentifier: asn1.ObjectIdentifier(policy.ID), + } + for _, qualifier := range policy.Qualifiers { + switch qualifier.Type { + case "id-qt-unotice": + pi.Qualifiers = append(pi.Qualifiers, + userNoticePolicyQualifier{ + PolicyQualifierID: iDQTUserNotice, + Qualifier: userNotice{ + ExplicitText: qualifier.Value, + }, + }) + case "id-qt-cps": + pi.Qualifiers = append(pi.Qualifiers, + cpsPolicyQualifier{ + PolicyQualifierID: iDQTCertificationPracticeStatement, + Qualifier: qualifier.Value, + }) + default: + return errors.New("Invalid qualifier type in Policies " + qualifier.Type) + } + } + asn1PolicyList = append(asn1PolicyList, pi) + } + + asn1Bytes, err := asn1.Marshal(asn1PolicyList) + if err != nil { + return err + } + + template.ExtraExtensions = append(template.ExtraExtensions, pkix.Extension{ + Id: asn1.ObjectIdentifier{2, 5, 29, 32}, + Critical: false, + Value: asn1Bytes, + }) + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/containerd/continuity/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/containerd/continuity/LICENSE new file mode 100644 index 00000000..584149b6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/containerd/continuity/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/containerd/continuity/pathdriver/path_driver.go b/vendor/github.com/elastic/beats/vendor/github.com/containerd/continuity/pathdriver/path_driver.go new file mode 100644 index 00000000..b0d5a6b5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/containerd/continuity/pathdriver/path_driver.go @@ -0,0 +1,101 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package pathdriver + +import ( + "path/filepath" +) + +// PathDriver provides all of the path manipulation functions in a common +// interface. The context should call these and never use the `filepath` +// package or any other package to manipulate paths. +type PathDriver interface { + Join(paths ...string) string + IsAbs(path string) bool + Rel(base, target string) (string, error) + Base(path string) string + Dir(path string) string + Clean(path string) string + Split(path string) (dir, file string) + Separator() byte + Abs(path string) (string, error) + Walk(string, filepath.WalkFunc) error + FromSlash(path string) string + ToSlash(path string) string + Match(pattern, name string) (matched bool, err error) +} + +// pathDriver is a simple default implementation calls the filepath package. +type pathDriver struct{} + +// LocalPathDriver is the exported pathDriver struct for convenience. +var LocalPathDriver PathDriver = &pathDriver{} + +func (*pathDriver) Join(paths ...string) string { + return filepath.Join(paths...) +} + +func (*pathDriver) IsAbs(path string) bool { + return filepath.IsAbs(path) +} + +func (*pathDriver) Rel(base, target string) (string, error) { + return filepath.Rel(base, target) +} + +func (*pathDriver) Base(path string) string { + return filepath.Base(path) +} + +func (*pathDriver) Dir(path string) string { + return filepath.Dir(path) +} + +func (*pathDriver) Clean(path string) string { + return filepath.Clean(path) +} + +func (*pathDriver) Split(path string) (dir, file string) { + return filepath.Split(path) +} + +func (*pathDriver) Separator() byte { + return filepath.Separator +} + +func (*pathDriver) Abs(path string) (string, error) { + return filepath.Abs(path) +} + +// Note that filepath.Walk calls os.Stat, so if the context wants to +// to call Driver.Stat() for Walk, they need to create a new struct that +// overrides this method. +func (*pathDriver) Walk(root string, walkFn filepath.WalkFunc) error { + return filepath.Walk(root, walkFn) +} + +func (*pathDriver) FromSlash(path string) string { + return filepath.FromSlash(path) +} + +func (*pathDriver) ToSlash(path string) string { + return filepath.ToSlash(path) +} + +func (*pathDriver) Match(pattern, name string) (bool, error) { + return filepath.Match(pattern, name) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/coreos/etcd/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/coreos/etcd/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/coreos/etcd/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/coreos/etcd/NOTICE b/vendor/github.com/elastic/beats/vendor/github.com/coreos/etcd/NOTICE new file mode 100644 index 00000000..b39ddfa5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/coreos/etcd/NOTICE @@ -0,0 +1,5 @@ +CoreOS Project +Copyright 2014 CoreOS, Inc + +This product includes software developed at CoreOS, Inc. +(http://www.coreos.com/). diff --git a/vendor/github.com/elastic/beats/vendor/github.com/coreos/etcd/raft/raftpb/confchange.go b/vendor/github.com/elastic/beats/vendor/github.com/coreos/etcd/raft/raftpb/confchange.go new file mode 100644 index 00000000..46a7a702 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/coreos/etcd/raft/raftpb/confchange.go @@ -0,0 +1,170 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raftpb + +import ( + "fmt" + "strconv" + "strings" + + "github.com/gogo/protobuf/proto" +) + +// ConfChangeI abstracts over ConfChangeV2 and (legacy) ConfChange to allow +// treating them in a unified manner. +type ConfChangeI interface { + AsV2() ConfChangeV2 + AsV1() (ConfChange, bool) +} + +// MarshalConfChange calls Marshal on the underlying ConfChange or ConfChangeV2 +// and returns the result along with the corresponding EntryType. +func MarshalConfChange(c ConfChangeI) (EntryType, []byte, error) { + var typ EntryType + var ccdata []byte + var err error + if ccv1, ok := c.AsV1(); ok { + typ = EntryConfChange + ccdata, err = ccv1.Marshal() + } else { + ccv2 := c.AsV2() + typ = EntryConfChangeV2 + ccdata, err = ccv2.Marshal() + } + return typ, ccdata, err +} + +// AsV2 returns a V2 configuration change carrying out the same operation. +func (c ConfChange) AsV2() ConfChangeV2 { + return ConfChangeV2{ + Changes: []ConfChangeSingle{{ + Type: c.Type, + NodeID: c.NodeID, + }}, + Context: c.Context, + } +} + +// AsV1 returns the ConfChange and true. +func (c ConfChange) AsV1() (ConfChange, bool) { + return c, true +} + +// AsV2 is the identity. +func (c ConfChangeV2) AsV2() ConfChangeV2 { return c } + +// AsV1 returns ConfChange{} and false. +func (c ConfChangeV2) AsV1() (ConfChange, bool) { return ConfChange{}, false } + +// EnterJoint returns two bools. The second bool is true if and only if this +// config change will use Joint Consensus, which is the case if it contains more +// than one change or if the use of Joint Consensus was requested explicitly. +// The first bool can only be true if second one is, and indicates whether the +// Joint State will be left automatically. +func (c *ConfChangeV2) EnterJoint() (autoLeave bool, ok bool) { + // NB: in theory, more config changes could qualify for the "simple" + // protocol but it depends on the config on top of which the changes apply. + // For example, adding two learners is not OK if both nodes are part of the + // base config (i.e. two voters are turned into learners in the process of + // applying the conf change). In practice, these distinctions should not + // matter, so we keep it simple and use Joint Consensus liberally. + if c.Transition != ConfChangeTransitionAuto || len(c.Changes) > 1 { + // Use Joint Consensus. + var autoLeave bool + switch c.Transition { + case ConfChangeTransitionAuto: + autoLeave = true + case ConfChangeTransitionJointImplicit: + autoLeave = true + case ConfChangeTransitionJointExplicit: + default: + panic(fmt.Sprintf("unknown transition: %+v", c)) + } + return autoLeave, true + } + return false, false +} + +// LeaveJoint is true if the configuration change leaves a joint configuration. +// This is the case if the ConfChangeV2 is zero, with the possible exception of +// the Context field. +func (c *ConfChangeV2) LeaveJoint() bool { + cpy := *c + cpy.Context = nil + return proto.Equal(&cpy, &ConfChangeV2{}) +} + +// ConfChangesFromString parses a Space-delimited sequence of operations into a +// slice of ConfChangeSingle. The supported operations are: +// - vn: make n a voter, +// - ln: make n a learner, +// - rn: remove n, and +// - un: update n. +func ConfChangesFromString(s string) ([]ConfChangeSingle, error) { + var ccs []ConfChangeSingle + toks := strings.Split(strings.TrimSpace(s), " ") + if toks[0] == "" { + toks = nil + } + for _, tok := range toks { + if len(tok) < 2 { + return nil, fmt.Errorf("unknown token %s", tok) + } + var cc ConfChangeSingle + switch tok[0] { + case 'v': + cc.Type = ConfChangeAddNode + case 'l': + cc.Type = ConfChangeAddLearnerNode + case 'r': + cc.Type = ConfChangeRemoveNode + case 'u': + cc.Type = ConfChangeUpdateNode + default: + return nil, fmt.Errorf("unknown input: %s", tok) + } + id, err := strconv.ParseUint(tok[1:], 10, 64) + if err != nil { + return nil, err + } + cc.NodeID = id + ccs = append(ccs, cc) + } + return ccs, nil +} + +// ConfChangesToString is the inverse to ConfChangesFromString. +func ConfChangesToString(ccs []ConfChangeSingle) string { + var buf strings.Builder + for i, cc := range ccs { + if i > 0 { + buf.WriteByte(' ') + } + switch cc.Type { + case ConfChangeAddNode: + buf.WriteByte('v') + case ConfChangeAddLearnerNode: + buf.WriteByte('l') + case ConfChangeRemoveNode: + buf.WriteByte('r') + case ConfChangeUpdateNode: + buf.WriteByte('u') + default: + buf.WriteString("unknown") + } + fmt.Fprintf(&buf, "%d", cc.NodeID) + } + return buf.String() +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/coreos/etcd/raft/raftpb/confstate.go b/vendor/github.com/elastic/beats/vendor/github.com/coreos/etcd/raft/raftpb/confstate.go new file mode 100644 index 00000000..4bda9321 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/coreos/etcd/raft/raftpb/confstate.go @@ -0,0 +1,45 @@ +// Copyright 2019 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package raftpb + +import ( + "fmt" + "reflect" + "sort" +) + +// Equivalent returns a nil error if the inputs describe the same configuration. +// On mismatch, returns a descriptive error showing the differences. +func (cs ConfState) Equivalent(cs2 ConfState) error { + cs1 := cs + orig1, orig2 := cs1, cs2 + s := func(sl *[]uint64) { + *sl = append([]uint64(nil), *sl...) + sort.Slice(*sl, func(i, j int) bool { return (*sl)[i] < (*sl)[j] }) + } + + for _, cs := range []*ConfState{&cs1, &cs2} { + s(&cs.Voters) + s(&cs.Learners) + s(&cs.VotersOutgoing) + s(&cs.LearnersNext) + cs.XXX_unrecognized = nil + } + + if !reflect.DeepEqual(cs1, cs2) { + return fmt.Errorf("ConfStates not equivalent after sorting:\n%+#v\n%+#v\nInputs were:\n%+#v\n%+#v", cs1, cs2, orig1, orig2) + } + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go new file mode 100644 index 00000000..fcf259c8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go @@ -0,0 +1,2646 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: raft.proto + +/* + Package raftpb is a generated protocol buffer package. + + It is generated from these files: + raft.proto + + It has these top-level messages: + Entry + SnapshotMetadata + Snapshot + Message + HardState + ConfState + ConfChange + ConfChangeSingle + ConfChangeV2 +*/ +package raftpb + +import ( + "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + _ "github.com/gogo/protobuf/gogoproto" + + io "io" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type EntryType int32 + +const ( + EntryNormal EntryType = 0 + EntryConfChange EntryType = 1 + EntryConfChangeV2 EntryType = 2 +) + +var EntryType_name = map[int32]string{ + 0: "EntryNormal", + 1: "EntryConfChange", + 2: "EntryConfChangeV2", +} +var EntryType_value = map[string]int32{ + "EntryNormal": 0, + "EntryConfChange": 1, + "EntryConfChangeV2": 2, +} + +func (x EntryType) Enum() *EntryType { + p := new(EntryType) + *p = x + return p +} +func (x EntryType) String() string { + return proto.EnumName(EntryType_name, int32(x)) +} +func (x *EntryType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType") + if err != nil { + return err + } + *x = EntryType(value) + return nil +} +func (EntryType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } + +type MessageType int32 + +const ( + MsgHup MessageType = 0 + MsgBeat MessageType = 1 + MsgProp MessageType = 2 + MsgApp MessageType = 3 + MsgAppResp MessageType = 4 + MsgVote MessageType = 5 + MsgVoteResp MessageType = 6 + MsgSnap MessageType = 7 + MsgHeartbeat MessageType = 8 + MsgHeartbeatResp MessageType = 9 + MsgUnreachable MessageType = 10 + MsgSnapStatus MessageType = 11 + MsgCheckQuorum MessageType = 12 + MsgTransferLeader MessageType = 13 + MsgTimeoutNow MessageType = 14 + MsgReadIndex MessageType = 15 + MsgReadIndexResp MessageType = 16 + MsgPreVote MessageType = 17 + MsgPreVoteResp MessageType = 18 +) + +var MessageType_name = map[int32]string{ + 0: "MsgHup", + 1: "MsgBeat", + 2: "MsgProp", + 3: "MsgApp", + 4: "MsgAppResp", + 5: "MsgVote", + 6: "MsgVoteResp", + 7: "MsgSnap", + 8: "MsgHeartbeat", + 9: "MsgHeartbeatResp", + 10: "MsgUnreachable", + 11: "MsgSnapStatus", + 12: "MsgCheckQuorum", + 13: "MsgTransferLeader", + 14: "MsgTimeoutNow", + 15: "MsgReadIndex", + 16: "MsgReadIndexResp", + 17: "MsgPreVote", + 18: "MsgPreVoteResp", +} +var MessageType_value = map[string]int32{ + "MsgHup": 0, + "MsgBeat": 1, + "MsgProp": 2, + "MsgApp": 3, + "MsgAppResp": 4, + "MsgVote": 5, + "MsgVoteResp": 6, + "MsgSnap": 7, + "MsgHeartbeat": 8, + "MsgHeartbeatResp": 9, + "MsgUnreachable": 10, + "MsgSnapStatus": 11, + "MsgCheckQuorum": 12, + "MsgTransferLeader": 13, + "MsgTimeoutNow": 14, + "MsgReadIndex": 15, + "MsgReadIndexResp": 16, + "MsgPreVote": 17, + "MsgPreVoteResp": 18, +} + +func (x MessageType) Enum() *MessageType { + p := new(MessageType) + *p = x + return p +} +func (x MessageType) String() string { + return proto.EnumName(MessageType_name, int32(x)) +} +func (x *MessageType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType") + if err != nil { + return err + } + *x = MessageType(value) + return nil +} +func (MessageType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } + +// ConfChangeTransition specifies the behavior of a configuration change with +// respect to joint consensus. +type ConfChangeTransition int32 + +const ( + // Automatically use the simple protocol if possible, otherwise fall back + // to ConfChangeJointImplicit. Most applications will want to use this. + ConfChangeTransitionAuto ConfChangeTransition = 0 + // Use joint consensus unconditionally, and transition out of them + // automatically (by proposing a zero configuration change). + // + // This option is suitable for applications that want to minimize the time + // spent in the joint configuration and do not store the joint configuration + // in the state machine (outside of InitialState). + ConfChangeTransitionJointImplicit ConfChangeTransition = 1 + // Use joint consensus and remain in the joint configuration until the + // application proposes a no-op configuration change. This is suitable for + // applications that want to explicitly control the transitions, for example + // to use a custom payload (via the Context field). + ConfChangeTransitionJointExplicit ConfChangeTransition = 2 +) + +var ConfChangeTransition_name = map[int32]string{ + 0: "ConfChangeTransitionAuto", + 1: "ConfChangeTransitionJointImplicit", + 2: "ConfChangeTransitionJointExplicit", +} +var ConfChangeTransition_value = map[string]int32{ + "ConfChangeTransitionAuto": 0, + "ConfChangeTransitionJointImplicit": 1, + "ConfChangeTransitionJointExplicit": 2, +} + +func (x ConfChangeTransition) Enum() *ConfChangeTransition { + p := new(ConfChangeTransition) + *p = x + return p +} +func (x ConfChangeTransition) String() string { + return proto.EnumName(ConfChangeTransition_name, int32(x)) +} +func (x *ConfChangeTransition) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ConfChangeTransition_value, data, "ConfChangeTransition") + if err != nil { + return err + } + *x = ConfChangeTransition(value) + return nil +} +func (ConfChangeTransition) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } + +type ConfChangeType int32 + +const ( + ConfChangeAddNode ConfChangeType = 0 + ConfChangeRemoveNode ConfChangeType = 1 + ConfChangeUpdateNode ConfChangeType = 2 + ConfChangeAddLearnerNode ConfChangeType = 3 +) + +var ConfChangeType_name = map[int32]string{ + 0: "ConfChangeAddNode", + 1: "ConfChangeRemoveNode", + 2: "ConfChangeUpdateNode", + 3: "ConfChangeAddLearnerNode", +} +var ConfChangeType_value = map[string]int32{ + "ConfChangeAddNode": 0, + "ConfChangeRemoveNode": 1, + "ConfChangeUpdateNode": 2, + "ConfChangeAddLearnerNode": 3, +} + +func (x ConfChangeType) Enum() *ConfChangeType { + p := new(ConfChangeType) + *p = x + return p +} +func (x ConfChangeType) String() string { + return proto.EnumName(ConfChangeType_name, int32(x)) +} +func (x *ConfChangeType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType") + if err != nil { + return err + } + *x = ConfChangeType(value) + return nil +} +func (ConfChangeType) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{3} } + +type Entry struct { + Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"` + Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"` + Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"` + Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Entry) Reset() { *m = Entry{} } +func (m *Entry) String() string { return proto.CompactTextString(m) } +func (*Entry) ProtoMessage() {} +func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } + +type SnapshotMetadata struct { + ConfState ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"` + Index uint64 `protobuf:"varint,2,opt,name=index" json:"index"` + Term uint64 `protobuf:"varint,3,opt,name=term" json:"term"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} } +func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) } +func (*SnapshotMetadata) ProtoMessage() {} +func (*SnapshotMetadata) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } + +type Snapshot struct { + Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` + Metadata SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } + +type Message struct { + Type MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"` + To uint64 `protobuf:"varint,2,opt,name=to" json:"to"` + From uint64 `protobuf:"varint,3,opt,name=from" json:"from"` + Term uint64 `protobuf:"varint,4,opt,name=term" json:"term"` + LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"` + Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"` + Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"` + Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"` + Snapshot Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"` + Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"` + RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"` + Context []byte `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Message) Reset() { *m = Message{} } +func (m *Message) String() string { return proto.CompactTextString(m) } +func (*Message) ProtoMessage() {} +func (*Message) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{3} } + +type HardState struct { + Term uint64 `protobuf:"varint,1,opt,name=term" json:"term"` + Vote uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"` + Commit uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *HardState) Reset() { *m = HardState{} } +func (m *HardState) String() string { return proto.CompactTextString(m) } +func (*HardState) ProtoMessage() {} +func (*HardState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{4} } + +type ConfState struct { + // The voters in the incoming config. (If the configuration is not joint, + // then the outgoing config is empty). + Voters []uint64 `protobuf:"varint,1,rep,name=voters" json:"voters,omitempty"` + // The learners in the incoming config. + Learners []uint64 `protobuf:"varint,2,rep,name=learners" json:"learners,omitempty"` + // The voters in the outgoing config. + VotersOutgoing []uint64 `protobuf:"varint,3,rep,name=voters_outgoing,json=votersOutgoing" json:"voters_outgoing,omitempty"` + // The nodes that will become learners when the outgoing config is removed. + // These nodes are necessarily currently in nodes_joint (or they would have + // been added to the incoming config right away). + LearnersNext []uint64 `protobuf:"varint,4,rep,name=learners_next,json=learnersNext" json:"learners_next,omitempty"` + // If set, the config is joint and Raft will automatically transition into + // the final config (i.e. remove the outgoing config) when this is safe. + AutoLeave bool `protobuf:"varint,5,opt,name=auto_leave,json=autoLeave" json:"auto_leave"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfState) Reset() { *m = ConfState{} } +func (m *ConfState) String() string { return proto.CompactTextString(m) } +func (*ConfState) ProtoMessage() {} +func (*ConfState) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{5} } + +type ConfChange struct { + Type ConfChangeType `protobuf:"varint,2,opt,name=type,enum=raftpb.ConfChangeType" json:"type"` + NodeID uint64 `protobuf:"varint,3,opt,name=node_id,json=nodeId" json:"node_id"` + Context []byte `protobuf:"bytes,4,opt,name=context" json:"context,omitempty"` + // NB: this is used only by etcd to thread through a unique identifier. + // Ideally it should really use the Context instead. No counterpart to + // this field exists in ConfChangeV2. + ID uint64 `protobuf:"varint,1,opt,name=id" json:"id"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfChange) Reset() { *m = ConfChange{} } +func (m *ConfChange) String() string { return proto.CompactTextString(m) } +func (*ConfChange) ProtoMessage() {} +func (*ConfChange) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{6} } + +// ConfChangeSingle is an individual configuration change operation. Multiple +// such operations can be carried out atomically via a ConfChangeV2. +type ConfChangeSingle struct { + Type ConfChangeType `protobuf:"varint,1,opt,name=type,enum=raftpb.ConfChangeType" json:"type"` + NodeID uint64 `protobuf:"varint,2,opt,name=node_id,json=nodeId" json:"node_id"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfChangeSingle) Reset() { *m = ConfChangeSingle{} } +func (m *ConfChangeSingle) String() string { return proto.CompactTextString(m) } +func (*ConfChangeSingle) ProtoMessage() {} +func (*ConfChangeSingle) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{7} } + +// ConfChangeV2 messages initiate configuration changes. They support both the +// simple "one at a time" membership change protocol and full Joint Consensus +// allowing for arbitrary changes in membership. +// +// The supplied context is treated as an opaque payload and can be used to +// attach an action on the state machine to the application of the config change +// proposal. Note that contrary to Joint Consensus as outlined in the Raft +// paper[1], configuration changes become active when they are *applied* to the +// state machine (not when they are appended to the log). +// +// The simple protocol can be used whenever only a single change is made. +// +// Non-simple changes require the use of Joint Consensus, for which two +// configuration changes are run. The first configuration change specifies the +// desired changes and transitions the Raft group into the joint configuration, +// in which quorum requires a majority of both the pre-changes and post-changes +// configuration. Joint Consensus avoids entering fragile intermediate +// configurations that could compromise survivability. For example, without the +// use of Joint Consensus and running across three availability zones with a +// replication factor of three, it is not possible to replace a voter without +// entering an intermediate configuration that does not survive the outage of +// one availability zone. +// +// The provided ConfChangeTransition specifies how (and whether) Joint Consensus +// is used, and assigns the task of leaving the joint configuration either to +// Raft or the application. Leaving the joint configuration is accomplished by +// proposing a ConfChangeV2 with only and optionally the Context field +// populated. +// +// For details on Raft membership changes, see: +// +// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf +type ConfChangeV2 struct { + Transition ConfChangeTransition `protobuf:"varint,1,opt,name=transition,enum=raftpb.ConfChangeTransition" json:"transition"` + Changes []ConfChangeSingle `protobuf:"bytes,2,rep,name=changes" json:"changes"` + Context []byte `protobuf:"bytes,3,opt,name=context" json:"context,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ConfChangeV2) Reset() { *m = ConfChangeV2{} } +func (m *ConfChangeV2) String() string { return proto.CompactTextString(m) } +func (*ConfChangeV2) ProtoMessage() {} +func (*ConfChangeV2) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{8} } + +func init() { + proto.RegisterType((*Entry)(nil), "raftpb.Entry") + proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata") + proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot") + proto.RegisterType((*Message)(nil), "raftpb.Message") + proto.RegisterType((*HardState)(nil), "raftpb.HardState") + proto.RegisterType((*ConfState)(nil), "raftpb.ConfState") + proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange") + proto.RegisterType((*ConfChangeSingle)(nil), "raftpb.ConfChangeSingle") + proto.RegisterType((*ConfChangeV2)(nil), "raftpb.ConfChangeV2") + proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value) + proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value) + proto.RegisterEnum("raftpb.ConfChangeTransition", ConfChangeTransition_name, ConfChangeTransition_value) + proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value) +} +func (m *Entry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Entry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + if m.Data != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *SnapshotMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SnapshotMetadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.ConfState.Size())) + n1, err := m.ConfState.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Data != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Metadata.Size())) + n2, err := m.Metadata.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Message) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Message) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.To)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.From)) + dAtA[i] = 0x20 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x28 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.LogTerm)) + dAtA[i] = 0x30 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Index)) + if len(m.Entries) > 0 { + for _, msg := range m.Entries { + dAtA[i] = 0x3a + i++ + i = encodeVarintRaft(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x40 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) + dAtA[i] = 0x4a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Snapshot.Size())) + n3, err := m.Snapshot.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + dAtA[i] = 0x50 + i++ + if m.Reject { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x58 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.RejectHint)) + if m.Context != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *HardState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HardState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Term)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Vote)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Commit)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Voters) > 0 { + for _, num := range m.Voters { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + if len(m.Learners) > 0 { + for _, num := range m.Learners { + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + if len(m.VotersOutgoing) > 0 { + for _, num := range m.VotersOutgoing { + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + if len(m.LearnersNext) > 0 { + for _, num := range m.LearnersNext { + dAtA[i] = 0x20 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + dAtA[i] = 0x28 + i++ + if m.AutoLeave { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfChange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfChange) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.ID)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.NodeID)) + if m.Context != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfChangeSingle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfChangeSingle) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.NodeID)) + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *ConfChangeV2) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfChangeV2) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Transition)) + if len(m.Changes) > 0 { + for _, msg := range m.Changes { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Context != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Context))) + i += copy(dAtA[i:], m.Context) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintRaft(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Entry) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.Term)) + n += 1 + sovRaft(uint64(m.Index)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *SnapshotMetadata) Size() (n int) { + var l int + _ = l + l = m.ConfState.Size() + n += 1 + l + sovRaft(uint64(l)) + n += 1 + sovRaft(uint64(m.Index)) + n += 1 + sovRaft(uint64(m.Term)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Snapshot) Size() (n int) { + var l int + _ = l + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovRaft(uint64(l)) + } + l = m.Metadata.Size() + n += 1 + l + sovRaft(uint64(l)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Message) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.To)) + n += 1 + sovRaft(uint64(m.From)) + n += 1 + sovRaft(uint64(m.Term)) + n += 1 + sovRaft(uint64(m.LogTerm)) + n += 1 + sovRaft(uint64(m.Index)) + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovRaft(uint64(l)) + } + } + n += 1 + sovRaft(uint64(m.Commit)) + l = m.Snapshot.Size() + n += 1 + l + sovRaft(uint64(l)) + n += 2 + n += 1 + sovRaft(uint64(m.RejectHint)) + if m.Context != nil { + l = len(m.Context) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *HardState) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Term)) + n += 1 + sovRaft(uint64(m.Vote)) + n += 1 + sovRaft(uint64(m.Commit)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfState) Size() (n int) { + var l int + _ = l + if len(m.Voters) > 0 { + for _, e := range m.Voters { + n += 1 + sovRaft(uint64(e)) + } + } + if len(m.Learners) > 0 { + for _, e := range m.Learners { + n += 1 + sovRaft(uint64(e)) + } + } + if len(m.VotersOutgoing) > 0 { + for _, e := range m.VotersOutgoing { + n += 1 + sovRaft(uint64(e)) + } + } + if len(m.LearnersNext) > 0 { + for _, e := range m.LearnersNext { + n += 1 + sovRaft(uint64(e)) + } + } + n += 2 + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfChange) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.ID)) + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.NodeID)) + if m.Context != nil { + l = len(m.Context) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfChangeSingle) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Type)) + n += 1 + sovRaft(uint64(m.NodeID)) + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ConfChangeV2) Size() (n int) { + var l int + _ = l + n += 1 + sovRaft(uint64(m.Transition)) + if len(m.Changes) > 0 { + for _, e := range m.Changes { + l = e.Size() + n += 1 + l + sovRaft(uint64(l)) + } + } + if m.Context != nil { + l = len(m.Context) + n += 1 + l + sovRaft(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovRaft(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRaft(x uint64) (n int) { + return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Entry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Entry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (EntryType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConfState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Snapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Message) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Message: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (MessageType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + m.To = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.To |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + m.From = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.From |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LogTerm", wireType) + } + m.LogTerm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LogTerm |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entries = append(m.Entries, Entry{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + m.Commit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Commit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reject", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Reject = bool(v != 0) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RejectHint", wireType) + } + m.RejectHint = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RejectHint |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...) + if m.Context == nil { + m.Context = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HardState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HardState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) + } + m.Term = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Term |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) + } + m.Vote = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Vote |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) + } + m.Commit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Commit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Voters = append(m.Voters, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Voters = append(m.Voters, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Voters", wireType) + } + case 2: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Learners = append(m.Learners, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Learners = append(m.Learners, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Learners", wireType) + } + case 3: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.VotersOutgoing = append(m.VotersOutgoing, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.VotersOutgoing = append(m.VotersOutgoing, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field VotersOutgoing", wireType) + } + case 4: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LearnersNext = append(m.LearnersNext, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.LearnersNext = append(m.LearnersNext, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field LearnersNext", wireType) + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoLeave", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.AutoLeave = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfChange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfChange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (ConfChangeType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + m.NodeID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodeID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...) + if m.Context == nil { + m.Context = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfChangeSingle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfChangeSingle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfChangeSingle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (ConfChangeType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + m.NodeID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodeID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfChangeV2) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfChangeV2: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfChangeV2: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Transition", wireType) + } + m.Transition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Transition |= (ConfChangeTransition(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Changes = append(m.Changes, ConfChangeSingle{}) + if err := m.Changes[len(m.Changes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...) + if m.Context == nil { + m.Context = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRaft(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRaft + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRaft(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("raft.proto", fileDescriptorRaft) } + +var fileDescriptorRaft = []byte{ + // 1009 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0xcd, 0x6e, 0xe3, 0x36, + 0x17, 0xb5, 0x64, 0xc5, 0x3f, 0xd7, 0x8e, 0xc3, 0xdc, 0xc9, 0x37, 0x20, 0x82, 0xc0, 0xe3, 0xcf, + 0xd3, 0x62, 0x8c, 0x14, 0x93, 0x16, 0x5e, 0x14, 0x45, 0x77, 0xf9, 0x19, 0x20, 0x29, 0xe2, 0x74, + 0xea, 0x64, 0xb2, 0x28, 0x50, 0x04, 0x8c, 0x45, 0x2b, 0x6a, 0x2d, 0x51, 0xa0, 0xe8, 0x34, 0xd9, + 0x14, 0x45, 0x9f, 0xa2, 0x9b, 0xd9, 0xf6, 0x01, 0xfa, 0x14, 0x59, 0x0e, 0xd0, 0xfd, 0xa0, 0x93, + 0xbe, 0x48, 0x41, 0x8a, 0xb2, 0x65, 0x27, 0x98, 0x45, 0x77, 0xe4, 0x39, 0x87, 0xf7, 0x9e, 0x7b, + 0x79, 0x45, 0x01, 0x48, 0x36, 0x56, 0x3b, 0x89, 0x14, 0x4a, 0x60, 0x45, 0xaf, 0x93, 0xcb, 0xcd, + 0x8d, 0x40, 0x04, 0xc2, 0x40, 0x9f, 0xeb, 0x55, 0xc6, 0x76, 0x7f, 0x81, 0x95, 0x57, 0xb1, 0x92, + 0xb7, 0xf8, 0x19, 0x78, 0x67, 0xb7, 0x09, 0xa7, 0x4e, 0xc7, 0xe9, 0xb5, 0xfa, 0xeb, 0x3b, 0xd9, + 0xa9, 0x1d, 0x43, 0x6a, 0x62, 0xcf, 0xbb, 0x7b, 0xff, 0xac, 0x34, 0x34, 0x22, 0xa4, 0xe0, 0x9d, + 0x71, 0x19, 0x51, 0xb7, 0xe3, 0xf4, 0xbc, 0x19, 0xc3, 0x65, 0x84, 0x9b, 0xb0, 0x72, 0x14, 0xfb, + 0xfc, 0x86, 0x96, 0x0b, 0x54, 0x06, 0x21, 0x82, 0x77, 0xc0, 0x14, 0xa3, 0x5e, 0xc7, 0xe9, 0x35, + 0x87, 0x66, 0xdd, 0xfd, 0xd5, 0x01, 0x72, 0x1a, 0xb3, 0x24, 0xbd, 0x12, 0x6a, 0xc0, 0x15, 0xf3, + 0x99, 0x62, 0xf8, 0x25, 0xc0, 0x48, 0xc4, 0xe3, 0x8b, 0x54, 0x31, 0x95, 0x39, 0x6a, 0xcc, 0x1d, + 0xed, 0x8b, 0x78, 0x7c, 0xaa, 0x09, 0x1b, 0xbc, 0x3e, 0xca, 0x01, 0x9d, 0x3c, 0x34, 0xc9, 0x8b, + 0xbe, 0x32, 0x48, 0x5b, 0x56, 0xda, 0x72, 0xd1, 0x97, 0x41, 0xba, 0xdf, 0x43, 0x2d, 0x77, 0xa0, + 0x2d, 0x6a, 0x07, 0x26, 0x67, 0x73, 0x68, 0xd6, 0xf8, 0x35, 0xd4, 0x22, 0xeb, 0xcc, 0x04, 0x6e, + 0xf4, 0x69, 0xee, 0x65, 0xd9, 0xb9, 0x8d, 0x3b, 0xd3, 0x77, 0xdf, 0x96, 0xa1, 0x3a, 0xe0, 0x69, + 0xca, 0x02, 0x8e, 0x2f, 0xc1, 0x53, 0xf3, 0x0e, 0x3f, 0xc9, 0x63, 0x58, 0xba, 0xd8, 0x63, 0x2d, + 0xc3, 0x0d, 0x70, 0x95, 0x58, 0xa8, 0xc4, 0x55, 0x42, 0x97, 0x31, 0x96, 0x62, 0xa9, 0x0c, 0x8d, + 0xcc, 0x0a, 0xf4, 0x96, 0x0b, 0xc4, 0x36, 0x54, 0x27, 0x22, 0x30, 0x17, 0xb6, 0x52, 0x20, 0x73, + 0x70, 0xde, 0xb6, 0xca, 0xc3, 0xb6, 0xbd, 0x84, 0x2a, 0x8f, 0x95, 0x0c, 0x79, 0x4a, 0xab, 0x9d, + 0x72, 0xaf, 0xd1, 0x5f, 0x5d, 0x98, 0x8c, 0x3c, 0x94, 0xd5, 0xe0, 0x16, 0x54, 0x46, 0x22, 0x8a, + 0x42, 0x45, 0x6b, 0x85, 0x58, 0x16, 0xc3, 0x3e, 0xd4, 0x52, 0xdb, 0x31, 0x5a, 0x37, 0x9d, 0x24, + 0xcb, 0x9d, 0xcc, 0x3b, 0x98, 0xeb, 0x74, 0x44, 0xc9, 0x7f, 0xe4, 0x23, 0x45, 0xa1, 0xe3, 0xf4, + 0x6a, 0x79, 0xc4, 0x0c, 0xc3, 0x4f, 0x00, 0xb2, 0xd5, 0x61, 0x18, 0x2b, 0xda, 0x28, 0xe4, 0x2c, + 0xe0, 0x48, 0xa1, 0x3a, 0x12, 0xb1, 0xe2, 0x37, 0x8a, 0x36, 0xcd, 0xc5, 0xe6, 0xdb, 0xee, 0x0f, + 0x50, 0x3f, 0x64, 0xd2, 0xcf, 0xc6, 0x27, 0xef, 0xa0, 0xf3, 0xa0, 0x83, 0x14, 0xbc, 0x6b, 0xa1, + 0xf8, 0xe2, 0xbc, 0x6b, 0xa4, 0x50, 0x70, 0xf9, 0x61, 0xc1, 0xdd, 0x3f, 0x1d, 0xa8, 0xcf, 0xe6, + 0x15, 0x9f, 0x42, 0x45, 0x9f, 0x91, 0x29, 0x75, 0x3a, 0xe5, 0x9e, 0x37, 0xb4, 0x3b, 0xdc, 0x84, + 0xda, 0x84, 0x33, 0x19, 0x6b, 0xc6, 0x35, 0xcc, 0x6c, 0x8f, 0x2f, 0x60, 0x2d, 0x53, 0x5d, 0x88, + 0xa9, 0x0a, 0x44, 0x18, 0x07, 0xb4, 0x6c, 0x24, 0xad, 0x0c, 0xfe, 0xd6, 0xa2, 0xf8, 0x1c, 0x56, + 0xf3, 0x43, 0x17, 0xb1, 0xae, 0xd4, 0x33, 0xb2, 0x66, 0x0e, 0x9e, 0xf0, 0x1b, 0x85, 0xcf, 0x01, + 0xd8, 0x54, 0x89, 0x8b, 0x09, 0x67, 0xd7, 0xdc, 0x0c, 0x43, 0xde, 0xd0, 0xba, 0xc6, 0x8f, 0x35, + 0xdc, 0x7d, 0xeb, 0x00, 0x68, 0xd3, 0xfb, 0x57, 0x2c, 0x0e, 0xf4, 0x47, 0xe5, 0x86, 0xbe, 0xed, + 0x09, 0x68, 0xed, 0xfd, 0xfb, 0x67, 0xee, 0xd1, 0xc1, 0xd0, 0x0d, 0x7d, 0xfc, 0xc2, 0x8e, 0xb4, + 0x6b, 0x46, 0xfa, 0x69, 0xf1, 0x13, 0xcd, 0x4e, 0x3f, 0x98, 0xea, 0x17, 0x50, 0x8d, 0x85, 0xcf, + 0x2f, 0x42, 0xdf, 0x36, 0xac, 0x65, 0x43, 0x56, 0x4e, 0x84, 0xcf, 0x8f, 0x0e, 0x86, 0x15, 0x4d, + 0x1f, 0xf9, 0xc5, 0x3b, 0xf3, 0x16, 0xef, 0x2c, 0x02, 0x32, 0x4f, 0x70, 0x1a, 0xc6, 0xc1, 0x84, + 0xcf, 0x8c, 0x38, 0xff, 0xc5, 0x88, 0xfb, 0x31, 0x23, 0xdd, 0x3f, 0x1c, 0x68, 0xce, 0xe3, 0x9c, + 0xf7, 0x71, 0x0f, 0x40, 0x49, 0x16, 0xa7, 0xa1, 0x0a, 0x45, 0x6c, 0x33, 0x6e, 0x3d, 0x92, 0x71, + 0xa6, 0xc9, 0x27, 0x72, 0x7e, 0x0a, 0xbf, 0x82, 0xea, 0xc8, 0xa8, 0xb2, 0x1b, 0x2f, 0x3c, 0x29, + 0xcb, 0xa5, 0xe5, 0x5f, 0x98, 0x95, 0x17, 0xfb, 0x52, 0x5e, 0xe8, 0xcb, 0xf6, 0x21, 0xd4, 0x67, + 0xaf, 0x35, 0xae, 0x41, 0xc3, 0x6c, 0x4e, 0x84, 0x8c, 0xd8, 0x84, 0x94, 0xf0, 0x09, 0xac, 0x19, + 0x60, 0x1e, 0x9f, 0x38, 0xf8, 0x3f, 0x58, 0x5f, 0x02, 0xcf, 0xfb, 0xc4, 0xdd, 0xfe, 0xcb, 0x85, + 0x46, 0xe1, 0x59, 0x42, 0x80, 0xca, 0x20, 0x0d, 0x0e, 0xa7, 0x09, 0x29, 0x61, 0x03, 0xaa, 0x83, + 0x34, 0xd8, 0xe3, 0x4c, 0x11, 0xc7, 0x6e, 0x5e, 0x4b, 0x91, 0x10, 0xd7, 0xaa, 0x76, 0x93, 0x84, + 0x94, 0xb1, 0x05, 0x90, 0xad, 0x87, 0x3c, 0x4d, 0x88, 0x67, 0x85, 0xe7, 0x42, 0x71, 0xb2, 0xa2, + 0xbd, 0xd9, 0x8d, 0x61, 0x2b, 0x96, 0xd5, 0x4f, 0x00, 0xa9, 0x22, 0x81, 0xa6, 0x4e, 0xc6, 0x99, + 0x54, 0x97, 0x3a, 0x4b, 0x0d, 0x37, 0x80, 0x14, 0x11, 0x73, 0xa8, 0x8e, 0x08, 0xad, 0x41, 0x1a, + 0xbc, 0x89, 0x25, 0x67, 0xa3, 0x2b, 0x76, 0x39, 0xe1, 0x04, 0x70, 0x1d, 0x56, 0x6d, 0x20, 0xfd, + 0xc5, 0x4d, 0x53, 0xd2, 0xb0, 0xb2, 0xfd, 0x2b, 0x3e, 0xfa, 0xe9, 0xbb, 0xa9, 0x90, 0xd3, 0x88, + 0x34, 0x75, 0xd9, 0x83, 0x34, 0x30, 0x17, 0x34, 0xe6, 0xf2, 0x98, 0x33, 0x9f, 0x4b, 0xb2, 0x6a, + 0x4f, 0x9f, 0x85, 0x11, 0x17, 0x53, 0x75, 0x22, 0x7e, 0x26, 0x2d, 0x6b, 0x66, 0xc8, 0x99, 0x6f, + 0x7e, 0x61, 0x64, 0xcd, 0x9a, 0x99, 0x21, 0xc6, 0x0c, 0xb1, 0xf5, 0xbe, 0x96, 0xdc, 0x94, 0xb8, + 0x6e, 0xb3, 0xda, 0xbd, 0xd1, 0xe0, 0xf6, 0x6f, 0x0e, 0x6c, 0x3c, 0x36, 0x1e, 0xb8, 0x05, 0xf4, + 0x31, 0x7c, 0x77, 0xaa, 0x04, 0x29, 0xe1, 0xa7, 0xf0, 0xff, 0xc7, 0xd8, 0x6f, 0x44, 0x18, 0xab, + 0xa3, 0x28, 0x99, 0x84, 0xa3, 0x50, 0x5f, 0xc5, 0xc7, 0x64, 0xaf, 0x6e, 0xac, 0xcc, 0xdd, 0xbe, + 0x85, 0xd6, 0xe2, 0x47, 0xa1, 0x9b, 0x31, 0x47, 0x76, 0x7d, 0x5f, 0x8f, 0x3f, 0x29, 0x21, 0x2d, + 0x9a, 0x1d, 0xf2, 0x48, 0x5c, 0x73, 0xc3, 0x38, 0x8b, 0xcc, 0x9b, 0xc4, 0x67, 0x2a, 0x63, 0xdc, + 0xc5, 0x42, 0x76, 0x7d, 0xff, 0x38, 0x7b, 0x7b, 0x0c, 0x5b, 0xde, 0xa3, 0x77, 0x1f, 0xda, 0xa5, + 0x77, 0x1f, 0xda, 0xa5, 0xbb, 0xfb, 0xb6, 0xf3, 0xee, 0xbe, 0xed, 0xfc, 0x7d, 0xdf, 0x76, 0x7e, + 0xff, 0xa7, 0x5d, 0xfa, 0x37, 0x00, 0x00, 0xff, 0xff, 0x87, 0x11, 0x6d, 0xd6, 0xaf, 0x08, 0x00, + 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto b/vendor/github.com/elastic/beats/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto new file mode 100644 index 00000000..23d62ec2 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto @@ -0,0 +1,177 @@ +syntax = "proto2"; +package raftpb; + +import "gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.sizer_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.goproto_getters_all) = false; +option (gogoproto.goproto_enum_prefix_all) = false; + +enum EntryType { + EntryNormal = 0; + EntryConfChange = 1; // corresponds to pb.ConfChange + EntryConfChangeV2 = 2; // corresponds to pb.ConfChangeV2 +} + +message Entry { + optional uint64 Term = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations + optional uint64 Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations + optional EntryType Type = 1 [(gogoproto.nullable) = false]; + optional bytes Data = 4; +} + +message SnapshotMetadata { + optional ConfState conf_state = 1 [(gogoproto.nullable) = false]; + optional uint64 index = 2 [(gogoproto.nullable) = false]; + optional uint64 term = 3 [(gogoproto.nullable) = false]; +} + +message Snapshot { + optional bytes data = 1; + optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false]; +} + +enum MessageType { + MsgHup = 0; + MsgBeat = 1; + MsgProp = 2; + MsgApp = 3; + MsgAppResp = 4; + MsgVote = 5; + MsgVoteResp = 6; + MsgSnap = 7; + MsgHeartbeat = 8; + MsgHeartbeatResp = 9; + MsgUnreachable = 10; + MsgSnapStatus = 11; + MsgCheckQuorum = 12; + MsgTransferLeader = 13; + MsgTimeoutNow = 14; + MsgReadIndex = 15; + MsgReadIndexResp = 16; + MsgPreVote = 17; + MsgPreVoteResp = 18; +} + +message Message { + optional MessageType type = 1 [(gogoproto.nullable) = false]; + optional uint64 to = 2 [(gogoproto.nullable) = false]; + optional uint64 from = 3 [(gogoproto.nullable) = false]; + optional uint64 term = 4 [(gogoproto.nullable) = false]; + optional uint64 logTerm = 5 [(gogoproto.nullable) = false]; + optional uint64 index = 6 [(gogoproto.nullable) = false]; + repeated Entry entries = 7 [(gogoproto.nullable) = false]; + optional uint64 commit = 8 [(gogoproto.nullable) = false]; + optional Snapshot snapshot = 9 [(gogoproto.nullable) = false]; + optional bool reject = 10 [(gogoproto.nullable) = false]; + optional uint64 rejectHint = 11 [(gogoproto.nullable) = false]; + optional bytes context = 12; +} + +message HardState { + optional uint64 term = 1 [(gogoproto.nullable) = false]; + optional uint64 vote = 2 [(gogoproto.nullable) = false]; + optional uint64 commit = 3 [(gogoproto.nullable) = false]; +} + +// ConfChangeTransition specifies the behavior of a configuration change with +// respect to joint consensus. +enum ConfChangeTransition { + // Automatically use the simple protocol if possible, otherwise fall back + // to ConfChangeJointImplicit. Most applications will want to use this. + ConfChangeTransitionAuto = 0; + // Use joint consensus unconditionally, and transition out of them + // automatically (by proposing a zero configuration change). + // + // This option is suitable for applications that want to minimize the time + // spent in the joint configuration and do not store the joint configuration + // in the state machine (outside of InitialState). + ConfChangeTransitionJointImplicit = 1; + // Use joint consensus and remain in the joint configuration until the + // application proposes a no-op configuration change. This is suitable for + // applications that want to explicitly control the transitions, for example + // to use a custom payload (via the Context field). + ConfChangeTransitionJointExplicit = 2; +} + +message ConfState { + // The voters in the incoming config. (If the configuration is not joint, + // then the outgoing config is empty). + repeated uint64 voters = 1; + // The learners in the incoming config. + repeated uint64 learners = 2; + // The voters in the outgoing config. + repeated uint64 voters_outgoing = 3; + // The nodes that will become learners when the outgoing config is removed. + // These nodes are necessarily currently in nodes_joint (or they would have + // been added to the incoming config right away). + repeated uint64 learners_next = 4; + // If set, the config is joint and Raft will automatically transition into + // the final config (i.e. remove the outgoing config) when this is safe. + optional bool auto_leave = 5 [(gogoproto.nullable) = false]; +} + +enum ConfChangeType { + ConfChangeAddNode = 0; + ConfChangeRemoveNode = 1; + ConfChangeUpdateNode = 2; + ConfChangeAddLearnerNode = 3; +} + +message ConfChange { + optional ConfChangeType type = 2 [(gogoproto.nullable) = false]; + optional uint64 node_id = 3 [(gogoproto.nullable) = false, (gogoproto.customname) = "NodeID" ]; + optional bytes context = 4; + + // NB: this is used only by etcd to thread through a unique identifier. + // Ideally it should really use the Context instead. No counterpart to + // this field exists in ConfChangeV2. + optional uint64 id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID" ]; +} + +// ConfChangeSingle is an individual configuration change operation. Multiple +// such operations can be carried out atomically via a ConfChangeV2. +message ConfChangeSingle { + optional ConfChangeType type = 1 [(gogoproto.nullable) = false]; + optional uint64 node_id = 2 [(gogoproto.nullable) = false, (gogoproto.customname) = "NodeID"]; +} + +// ConfChangeV2 messages initiate configuration changes. They support both the +// simple "one at a time" membership change protocol and full Joint Consensus +// allowing for arbitrary changes in membership. +// +// The supplied context is treated as an opaque payload and can be used to +// attach an action on the state machine to the application of the config change +// proposal. Note that contrary to Joint Consensus as outlined in the Raft +// paper[1], configuration changes become active when they are *applied* to the +// state machine (not when they are appended to the log). +// +// The simple protocol can be used whenever only a single change is made. +// +// Non-simple changes require the use of Joint Consensus, for which two +// configuration changes are run. The first configuration change specifies the +// desired changes and transitions the Raft group into the joint configuration, +// in which quorum requires a majority of both the pre-changes and post-changes +// configuration. Joint Consensus avoids entering fragile intermediate +// configurations that could compromise survivability. For example, without the +// use of Joint Consensus and running across three availability zones with a +// replication factor of three, it is not possible to replace a voter without +// entering an intermediate configuration that does not survive the outage of +// one availability zone. +// +// The provided ConfChangeTransition specifies how (and whether) Joint Consensus +// is used, and assigns the task of leaving the joint configuration either to +// Raft or the application. Leaving the joint configuration is accomplished by +// proposing a ConfChangeV2 with only and optionally the Context field +// populated. +// +// For details on Raft membership changes, see: +// +// [1]: https://github.com/ongardie/dissertation/blob/master/online-trim.pdf +message ConfChangeV2 { + optional ConfChangeTransition transition = 1 [(gogoproto.nullable) = false]; + repeated ConfChangeSingle changes = 2 [(gogoproto.nullable) = false]; + optional bytes context = 3; +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/denisenkom/go-mssqldb/appveyor.yml b/vendor/github.com/elastic/beats/vendor/github.com/denisenkom/go-mssqldb/appveyor.yml deleted file mode 100644 index 2ae5456d..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/denisenkom/go-mssqldb/appveyor.yml +++ /dev/null @@ -1,48 +0,0 @@ -version: 1.0.{build} - -os: Windows Server 2012 R2 - -clone_folder: c:\gopath\src\github.com\denisenkom\go-mssqldb - -environment: - GOPATH: c:\gopath - HOST: localhost - SQLUSER: sa - SQLPASSWORD: Password12! - DATABASE: test - GOVERSION: 110 - matrix: - - GOVERSION: 18 - SQLINSTANCE: SQL2016 - - GOVERSION: 19 - SQLINSTANCE: SQL2016 - - GOVERSION: 110 - SQLINSTANCE: SQL2016 - - SQLINSTANCE: SQL2014 - - SQLINSTANCE: SQL2012SP1 - - SQLINSTANCE: SQL2008R2SP2 - -install: - - set GOROOT=c:\go%GOVERSION% - - set PATH=%GOPATH%\bin;%GOROOT%\bin;%PATH% - - go version - - go env - - go get -u cloud.google.com/go/civil - -build_script: - - go build - -before_test: - # setup SQL Server - - ps: | - $instanceName = $env:SQLINSTANCE - Start-Service "MSSQL`$$instanceName" - Start-Service "SQLBrowser" - - sqlcmd -S "(local)\%SQLINSTANCE%" -Q "Use [master]; CREATE DATABASE test;" - - sqlcmd -S "(local)\%SQLINSTANCE%" -h -1 -Q "set nocount on; Select @@version" - - pip install codecov - - -test_script: - - go test -race -cpu 4 -coverprofile=coverage.txt -covermode=atomic - - codecov -f coverage.txt diff --git a/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/LICENSE new file mode 100644 index 00000000..df83a9c2 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md new file mode 100644 index 00000000..7fc1f793 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md @@ -0,0 +1,97 @@ +## Migration Guide from v2 -> v3 + +Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. + +### `Token.Claims` is now an interface type + +The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. + +`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. + +The old example for parsing a token looked like this.. + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is now directly mapped to... + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. + +```go + type MyCustomClaims struct { + User string + *StandardClaims + } + + if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { + claims := token.Claims.(*MyCustomClaims) + fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) + } +``` + +### `ParseFromRequest` has been moved + +To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. + +`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. + +This simple parsing example: + +```go + if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is directly mapped to: + +```go + if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +There are several concrete `Extractor` types provided for your convenience: + +* `HeaderExtractor` will search a list of headers until one contains content. +* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. +* `MultiExtractor` will try a list of `Extractors` in order until one returns content. +* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. +* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument +* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header + + +### RSA signing methods no longer accept `[]byte` keys + +Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. + +To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. + +```go + func keyLookupFunc(*Token) (interface{}, error) { + // Don't forget to validate the alg is what you expect: + if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { + return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) + } + + // Look up key + key, err := lookupPublicKey(token.Header["kid"]) + if err != nil { + return nil, err + } + + // Unpack key from PEM encoded PKCS8 + return jwt.ParseRSAPublicKeyFromPEM(key) + } +``` diff --git a/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/README.md new file mode 100644 index 00000000..d7749077 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/README.md @@ -0,0 +1,104 @@ +# jwt-go + +[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) +[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go) + +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) + +**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3. + +**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail. + +**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Examples + +See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: + +* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) +* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) +* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) + +## Extensions + +This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. + +Here's an example of an extension that integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS): https://github.com/someone1/gcp-jwt-go + +## Compliance + +This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: + +* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). + +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning. + +**BREAKING CHANGES:*** +* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +* The author of the token was in the possession of the signing secret +* The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### Signing Methods and Key Types + +Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: + +* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +### Troubleshooting + +This library uses descriptive error messages whenever possible. If you are not getting the expected result, have a look at the errors. The most common place people get stuck is providing the correct type of key to the parser. See the above section on signing methods and key types. + +## More + +Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md new file mode 100644 index 00000000..63702983 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md @@ -0,0 +1,118 @@ +## `jwt-go` Version History + +#### 3.2.0 + +* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation +* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate +* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. +* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. + +#### 3.1.0 + +* Improvements to `jwt` command line tool +* Added `SkipClaimsValidation` option to `Parser` +* Documentation updates + +#### 3.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. + * `ParseFromRequest` has been moved to `request` subpackage and usage has changed + * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. +* Other Additions and Changes + * Added `Claims` interface type to allow users to decode the claims into a custom type + * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. + * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage + * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` + * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. + * Added several new, more specific, validation errors to error type bitmask + * Moved examples from README to executable example files + * Signing method registry is now thread safe + * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) + +#### 2.7.0 + +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. + +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying +* Error text for expired tokens includes how long it's been expired +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` +* Documentation updates + +#### 2.6.0 + +* Exposed inner error within ValidationError +* Fixed validation errors when using UseJSONNumber flag +* Added several unit tests + +#### 2.5.0 + +* Added support for signing method none. You shouldn't use this. The API tries to make this clear. +* Updated/fixed some documentation +* Added more helpful error message when trying to parse tokens that begin with `BEARER ` + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/claims.go b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/claims.go new file mode 100644 index 00000000..f0228f02 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/claims.go @@ -0,0 +1,134 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// For a type to be a Claims object, it must just have a Valid method that determines +// if the token is invalid for any supported reason +type Claims interface { + Valid() error +} + +// Structured version of Claims Section, as referenced at +// https://tools.ietf.org/html/rfc7519#section-4.1 +// See examples for how to use this with your own claim types +type StandardClaims struct { + Audience string `json:"aud,omitempty"` + ExpiresAt int64 `json:"exp,omitempty"` + Id string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c StandardClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if c.VerifyExpiresAt(now, false) == false { + delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) + vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Errors |= ValidationErrorExpired + } + + if c.VerifyIssuedAt(now, false) == false { + vErr.Inner = fmt.Errorf("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if c.VerifyNotBefore(now, false) == false { + vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud(c.Audience, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { + return verifyExp(c.ExpiresAt, cmp, req) +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { + return verifyIat(c.IssuedAt, cmp, req) +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + return verifyNbf(c.NotBefore, cmp, req) +} + +// ----- helpers + +func verifyAud(aud string, cmp string, required bool) bool { + if aud == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyExp(exp int64, now int64, required bool) bool { + if exp == 0 { + return !required + } + return now <= exp +} + +func verifyIat(iat int64, now int64, required bool) bool { + if iat == 0 { + return !required + } + return now >= iat +} + +func verifyIss(iss string, cmp string, required bool) bool { + if iss == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyNbf(nbf int64, now int64, required bool) bool { + if nbf == 0 { + return !required + } + return now >= nbf +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/doc.go new file mode 100644 index 00000000..a86dc1a3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/ecdsa.go new file mode 100644 index 00000000..f9773812 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/ecdsa.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// Implements the ECDSA family of signing methods signing methods +// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return ErrInvalidKeyType + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { + return nil + } else { + return ErrECDSAVerification + } +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outpus (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return EncodeSegment(out), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go new file mode 100644 index 00000000..d19624b7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go @@ -0,0 +1,67 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") +) + +// Parse PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/errors.go b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/errors.go new file mode 100644 index 00000000..1c93024a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/errors.go @@ -0,0 +1,59 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid") + ErrInvalidKeyType = errors.New("key is of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + + // Standard Claim validation errors + ValidationErrorAudience // AUD validation failed + ValidationErrorExpired // EXP validation failed + ValidationErrorIssuedAt // IAT validation failed + ValidationErrorIssuer // ISS validation failed + ValidationErrorNotValidYet // NBF validation failed + ValidationErrorId // JTI validation failed + ValidationErrorClaimsInvalid // Generic claims validation error +) + +// Helper for constructing a ValidationError with a string error message +func NewValidationError(errorText string, errorFlags uint32) *ValidationError { + return &ValidationError{ + text: errorText, + Errors: errorFlags, + } +} + +// The error from Parse if token is not valid +type ValidationError struct { + Inner error // stores the error returned by external dependencies, i.e.: KeyFunc + Errors uint32 // bitfield. see ValidationError... constants + text string // errors that do not have a valid error just have text +} + +// Validation error is an error type +func (e ValidationError) Error() string { + if e.Inner != nil { + return e.Inner.Error() + } else if e.text != "" { + return e.text + } else { + return "token is invalid" + } +} + +// No errors +func (e *ValidationError) valid() bool { + return e.Errors == 0 +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/hmac.go new file mode 100644 index 00000000..addbe5d4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/hmac.go @@ -0,0 +1,95 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// Implements the HMAC-SHA family of signing methods signing methods +// Expects key type of []byte for both signing and validation +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return ErrInvalidKeyType + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Implements the Sign method from SigningMethod for this signing method. +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil + } + + return "", ErrInvalidKeyType +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/map_claims.go b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/map_claims.go new file mode 100644 index 00000000..291213c4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/map_claims.go @@ -0,0 +1,94 @@ +package jwt + +import ( + "encoding/json" + "errors" + // "fmt" +) + +// Claims type that uses the map[string]interface{} for JSON decoding +// This is the default claims type if you don't supply one +type MapClaims map[string]interface{} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyAudience(cmp string, req bool) bool { + aud, _ := m["aud"].(string) + return verifyAud(aud, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { + switch exp := m["exp"].(type) { + case float64: + return verifyExp(int64(exp), cmp, req) + case json.Number: + v, _ := exp.Int64() + return verifyExp(v, cmp, req) + } + return req == false +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { + switch iat := m["iat"].(type) { + case float64: + return verifyIat(int64(iat), cmp, req) + case json.Number: + v, _ := iat.Int64() + return verifyIat(v, cmp, req) + } + return req == false +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { + switch nbf := m["nbf"].(type) { + case float64: + return verifyNbf(int64(nbf), cmp, req) + case json.Number: + v, _ := nbf.Int64() + return verifyNbf(v, cmp, req) + } + return req == false +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (m MapClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + if m.VerifyExpiresAt(now, false) == false { + vErr.Inner = errors.New("Token is expired") + vErr.Errors |= ValidationErrorExpired + } + + if m.VerifyIssuedAt(now, false) == false { + vErr.Inner = errors.New("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if m.VerifyNotBefore(now, false) == false { + vErr.Inner = errors.New("Token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/none.go b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/none.go new file mode 100644 index 00000000..f04d189d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/none.go @@ -0,0 +1,52 @@ +package jwt + +// Implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if signature != "" { + return NewValidationError( + "'none' signing method with non-empty signature", + ValidationErrorSignatureInvalid, + ) + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return "", nil + } + return "", NoneSignatureTypeDisallowedError +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/parser.go new file mode 100644 index 00000000..d6901d9a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/parser.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + ValidMethods []string // If populated, only these methods will be considered valid + UseJSONNumber bool // Use JSON Number format in JSON decoder + SkipClaimsValidation bool // Skip claims validation during token parsing +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + token, parts, err := p.ParseUnverified(tokenString, claims) + if err != nil { + return token, err + } + + // Verify signing method is in the required set + if p.ValidMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.ValidMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + if ve, ok := err.(*ValidationError); ok { + return token, ve + } + return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} + } + + vErr := &ValidationError{} + + // Validate Claims + if !p.SkipClaimsValidation { + if err := token.Claims.Valid(); err != nil { + + // If the Claims Valid returned an error, check if it is a validation error, + // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set + if e, ok := err.(*ValidationError); !ok { + vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} + } else { + vErr = e + } + } + } + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr.Inner = err + vErr.Errors |= ValidationErrorSignatureInvalid + } + + if vErr.valid() { + token.Valid = true + return token, nil + } + + return token, vErr +} + +// WARNING: Don't use this method unless you know what you're doing +// +// This method parses the token but doesn't validate the signature. It's only +// ever useful in cases where you know the signature is valid (because it has +// been checked previously in the stack) and you want to extract values from +// it. +func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { + parts = strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + token = &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + return token, parts, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/rsa.go new file mode 100644 index 00000000..e4caf1ca --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/rsa.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSA family of signing methods signing methods +// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this signing method, must be an *rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return ErrInvalidKeyType + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Implements the Sign method from SigningMethod +// For this signing method, must be an *rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + var ok bool + + // Validate type of key + if rsaKey, ok = key.(*rsa.PrivateKey); !ok { + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go new file mode 100644 index 00000000..10ee9db8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go @@ -0,0 +1,126 @@ +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions +} + +// Specific instances for RS/PS and company +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA256, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA384, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA512, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options) +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go new file mode 100644 index 00000000..a5ababf9 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key") + ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") +) + +// Parse PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 private key protected with password +func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + + var blockDecrypted []byte + if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { + return nil, err + } + + if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/signing_method.go b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/signing_method.go new file mode 100644 index 00000000..ed1f212b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/signing_method.go @@ -0,0 +1,35 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// Implement SigningMethod to add new methods for signing or verifying tokens. +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// Register the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// Get a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/token.go b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/token.go new file mode 100644 index 00000000..d637e086 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/dgrijalva/jwt-go/token.go @@ -0,0 +1,108 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Parse methods use this callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// A JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// Create a new Token. Takes a signing method +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// Get the complete, signed token +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// Generate the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + parts := make([]string, 2) + for i, _ := range parts { + var jsonValue []byte + if i == 0 { + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err + } + } else { + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err + } + } + + parts[i] = EncodeSegment(jsonValue) + } + return strings.Join(parts, "."), nil +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return new(Parser).Parse(tokenString, keyFunc) +} + +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) +} + +// Encode JWT specific base64url encoding with padding stripped +func EncodeSegment(seg []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") +} + +// Decode JWT specific base64url encoding with padding stripped +func DecodeSegment(seg string) ([]byte, error) { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + + return base64.URLEncoding.DecodeString(seg) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/dimchansky/utfbom/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/dimchansky/utfbom/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/dimchansky/utfbom/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/dimchansky/utfbom/README.md b/vendor/github.com/elastic/beats/vendor/github.com/dimchansky/utfbom/README.md new file mode 100644 index 00000000..8ece2800 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/dimchansky/utfbom/README.md @@ -0,0 +1,66 @@ +# utfbom [![Godoc](https://godoc.org/github.com/dimchansky/utfbom?status.png)](https://godoc.org/github.com/dimchansky/utfbom) [![License](https://img.shields.io/:license-apache-blue.svg)](https://opensource.org/licenses/Apache-2.0) [![Build Status](https://travis-ci.org/dimchansky/utfbom.svg?branch=master)](https://travis-ci.org/dimchansky/utfbom) [![Go Report Card](https://goreportcard.com/badge/github.com/dimchansky/utfbom)](https://goreportcard.com/report/github.com/dimchansky/utfbom) [![Coverage Status](https://coveralls.io/repos/github/dimchansky/utfbom/badge.svg?branch=master)](https://coveralls.io/github/dimchansky/utfbom?branch=master) + +The package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary. It can also return the encoding detected by the BOM. + +## Installation + + go get -u github.com/dimchansky/utfbom + +## Example + +```go +package main + +import ( + "bytes" + "fmt" + "io/ioutil" + + "github.com/dimchansky/utfbom" +) + +func main() { + trySkip([]byte("\xEF\xBB\xBFhello")) + trySkip([]byte("hello")) +} + +func trySkip(byteData []byte) { + fmt.Println("Input:", byteData) + + // just skip BOM + output, err := ioutil.ReadAll(utfbom.SkipOnly(bytes.NewReader(byteData))) + if err != nil { + fmt.Println(err) + return + } + fmt.Println("ReadAll with BOM skipping", output) + + // skip BOM and detect encoding + sr, enc := utfbom.Skip(bytes.NewReader(byteData)) + fmt.Printf("Detected encoding: %s\n", enc) + output, err = ioutil.ReadAll(sr) + if err != nil { + fmt.Println(err) + return + } + fmt.Println("ReadAll with BOM detection and skipping", output) + fmt.Println() +} +``` + +Output: + +``` +$ go run main.go +Input: [239 187 191 104 101 108 108 111] +ReadAll with BOM skipping [104 101 108 108 111] +Detected encoding: UTF8 +ReadAll with BOM detection and skipping [104 101 108 108 111] + +Input: [104 101 108 108 111] +ReadAll with BOM skipping [104 101 108 108 111] +Detected encoding: Unknown +ReadAll with BOM detection and skipping [104 101 108 108 111] +``` + + diff --git a/vendor/github.com/elastic/beats/vendor/github.com/dimchansky/utfbom/go.mod b/vendor/github.com/elastic/beats/vendor/github.com/dimchansky/utfbom/go.mod new file mode 100644 index 00000000..4b9ecc6f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/dimchansky/utfbom/go.mod @@ -0,0 +1 @@ +module github.com/dimchansky/utfbom \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/vendor/github.com/dimchansky/utfbom/utfbom.go b/vendor/github.com/elastic/beats/vendor/github.com/dimchansky/utfbom/utfbom.go new file mode 100644 index 00000000..77a303e5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/dimchansky/utfbom/utfbom.go @@ -0,0 +1,192 @@ +// Package utfbom implements the detection of the BOM (Unicode Byte Order Mark) and removing as necessary. +// It wraps an io.Reader object, creating another object (Reader) that also implements the io.Reader +// interface but provides automatic BOM checking and removing as necessary. +package utfbom + +import ( + "errors" + "io" +) + +// Encoding is type alias for detected UTF encoding. +type Encoding int + +// Constants to identify detected UTF encodings. +const ( + // Unknown encoding, returned when no BOM was detected + Unknown Encoding = iota + + // UTF8, BOM bytes: EF BB BF + UTF8 + + // UTF-16, big-endian, BOM bytes: FE FF + UTF16BigEndian + + // UTF-16, little-endian, BOM bytes: FF FE + UTF16LittleEndian + + // UTF-32, big-endian, BOM bytes: 00 00 FE FF + UTF32BigEndian + + // UTF-32, little-endian, BOM bytes: FF FE 00 00 + UTF32LittleEndian +) + +// String returns a user-friendly string representation of the encoding. Satisfies fmt.Stringer interface. +func (e Encoding) String() string { + switch e { + case UTF8: + return "UTF8" + case UTF16BigEndian: + return "UTF16BigEndian" + case UTF16LittleEndian: + return "UTF16LittleEndian" + case UTF32BigEndian: + return "UTF32BigEndian" + case UTF32LittleEndian: + return "UTF32LittleEndian" + default: + return "Unknown" + } +} + +const maxConsecutiveEmptyReads = 100 + +// Skip creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary. +// It also returns the encoding detected by the BOM. +// If the detected encoding is not needed, you can call the SkipOnly function. +func Skip(rd io.Reader) (*Reader, Encoding) { + // Is it already a Reader? + b, ok := rd.(*Reader) + if ok { + return b, Unknown + } + + enc, left, err := detectUtf(rd) + return &Reader{ + rd: rd, + buf: left, + err: err, + }, enc +} + +// SkipOnly creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary. +func SkipOnly(rd io.Reader) *Reader { + r, _ := Skip(rd) + return r +} + +// Reader implements automatic BOM (Unicode Byte Order Mark) checking and +// removing as necessary for an io.Reader object. +type Reader struct { + rd io.Reader // reader provided by the client + buf []byte // buffered data + err error // last error +} + +// Read is an implementation of io.Reader interface. +// The bytes are taken from the underlying Reader, but it checks for BOMs, removing them as necessary. +func (r *Reader) Read(p []byte) (n int, err error) { + if len(p) == 0 { + return 0, nil + } + + if r.buf == nil { + if r.err != nil { + return 0, r.readErr() + } + + return r.rd.Read(p) + } + + // copy as much as we can + n = copy(p, r.buf) + r.buf = nilIfEmpty(r.buf[n:]) + return n, nil +} + +func (r *Reader) readErr() error { + err := r.err + r.err = nil + return err +} + +var errNegativeRead = errors.New("utfbom: reader returned negative count from Read") + +func detectUtf(rd io.Reader) (enc Encoding, buf []byte, err error) { + buf, err = readBOM(rd) + + if len(buf) >= 4 { + if isUTF32BigEndianBOM4(buf) { + return UTF32BigEndian, nilIfEmpty(buf[4:]), err + } + if isUTF32LittleEndianBOM4(buf) { + return UTF32LittleEndian, nilIfEmpty(buf[4:]), err + } + } + + if len(buf) > 2 && isUTF8BOM3(buf) { + return UTF8, nilIfEmpty(buf[3:]), err + } + + if (err != nil && err != io.EOF) || (len(buf) < 2) { + return Unknown, nilIfEmpty(buf), err + } + + if isUTF16BigEndianBOM2(buf) { + return UTF16BigEndian, nilIfEmpty(buf[2:]), err + } + if isUTF16LittleEndianBOM2(buf) { + return UTF16LittleEndian, nilIfEmpty(buf[2:]), err + } + + return Unknown, nilIfEmpty(buf), err +} + +func readBOM(rd io.Reader) (buf []byte, err error) { + const maxBOMSize = 4 + var bom [maxBOMSize]byte // used to read BOM + + // read as many bytes as possible + for nEmpty, n := 0, 0; err == nil && len(buf) < maxBOMSize; buf = bom[:len(buf)+n] { + if n, err = rd.Read(bom[len(buf):]); n < 0 { + panic(errNegativeRead) + } + if n > 0 { + nEmpty = 0 + } else { + nEmpty++ + if nEmpty >= maxConsecutiveEmptyReads { + err = io.ErrNoProgress + } + } + } + return +} + +func isUTF32BigEndianBOM4(buf []byte) bool { + return buf[0] == 0x00 && buf[1] == 0x00 && buf[2] == 0xFE && buf[3] == 0xFF +} + +func isUTF32LittleEndianBOM4(buf []byte) bool { + return buf[0] == 0xFF && buf[1] == 0xFE && buf[2] == 0x00 && buf[3] == 0x00 +} + +func isUTF8BOM3(buf []byte) bool { + return buf[0] == 0xEF && buf[1] == 0xBB && buf[2] == 0xBF +} + +func isUTF16BigEndianBOM2(buf []byte) bool { + return buf[0] == 0xFE && buf[1] == 0xFF +} + +func isUTF16LittleEndianBOM2(buf []byte) bool { + return buf[0] == 0xFF && buf[1] == 0xFE +} + +func nilIfEmpty(buf []byte) (res []byte) { + if len(buf) > 0 { + res = buf + } + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/distribution/registry/api/errcode/errors.go new file mode 100644 index 00000000..4c35b879 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/distribution/registry/api/errcode/errors.go @@ -0,0 +1,267 @@ +package errcode + +import ( + "encoding/json" + "fmt" + "strings" +) + +// ErrorCoder is the base interface for ErrorCode and Error allowing +// users of each to just call ErrorCode to get the real ID of each +type ErrorCoder interface { + ErrorCode() ErrorCode +} + +// ErrorCode represents the error type. The errors are serialized via strings +// and the integer format may change and should *never* be exported. +type ErrorCode int + +var _ error = ErrorCode(0) + +// ErrorCode just returns itself +func (ec ErrorCode) ErrorCode() ErrorCode { + return ec +} + +// Error returns the ID/Value +func (ec ErrorCode) Error() string { + // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. + return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) +} + +// Descriptor returns the descriptor for the error code. +func (ec ErrorCode) Descriptor() ErrorDescriptor { + d, ok := errorCodeToDescriptors[ec] + + if !ok { + return ErrorCodeUnknown.Descriptor() + } + + return d +} + +// String returns the canonical identifier for this error code. +func (ec ErrorCode) String() string { + return ec.Descriptor().Value +} + +// Message returned the human-readable error message for this error code. +func (ec ErrorCode) Message() string { + return ec.Descriptor().Message +} + +// MarshalText encodes the receiver into UTF-8-encoded text and returns the +// result. +func (ec ErrorCode) MarshalText() (text []byte, err error) { + return []byte(ec.String()), nil +} + +// UnmarshalText decodes the form generated by MarshalText. +func (ec *ErrorCode) UnmarshalText(text []byte) error { + desc, ok := idToDescriptors[string(text)] + + if !ok { + desc = ErrorCodeUnknown.Descriptor() + } + + *ec = desc.Code + + return nil +} + +// WithMessage creates a new Error struct based on the passed-in info and +// overrides the Message property. +func (ec ErrorCode) WithMessage(message string) Error { + return Error{ + Code: ec, + Message: message, + } +} + +// WithDetail creates a new Error struct based on the passed-in info and +// set the Detail property appropriately +func (ec ErrorCode) WithDetail(detail interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithDetail(detail) +} + +// WithArgs creates a new Error struct and sets the Args slice +func (ec ErrorCode) WithArgs(args ...interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithArgs(args...) +} + +// Error provides a wrapper around ErrorCode with extra Details provided. +type Error struct { + Code ErrorCode `json:"code"` + Message string `json:"message"` + Detail interface{} `json:"detail,omitempty"` + + // TODO(duglin): See if we need an "args" property so we can do the + // variable substitution right before showing the message to the user +} + +var _ error = Error{} + +// ErrorCode returns the ID/Value of this Error +func (e Error) ErrorCode() ErrorCode { + return e.Code +} + +// Error returns a human readable representation of the error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) +} + +// WithDetail will return a new Error, based on the current one, but with +// some Detail info added +func (e Error) WithDetail(detail interface{}) Error { + return Error{ + Code: e.Code, + Message: e.Message, + Detail: detail, + } +} + +// WithArgs uses the passed-in list of interface{} as the substitution +// variables in the Error's Message string, but returns a new Error +func (e Error) WithArgs(args ...interface{}) Error { + return Error{ + Code: e.Code, + Message: fmt.Sprintf(e.Code.Message(), args...), + Detail: e.Detail, + } +} + +// ErrorDescriptor provides relevant information about a given error code. +type ErrorDescriptor struct { + // Code is the error code that this descriptor describes. + Code ErrorCode + + // Value provides a unique, string key, often captilized with + // underscores, to identify the error code. This value is used as the + // keyed value when serializing api errors. + Value string + + // Message is a short, human readable decription of the error condition + // included in API responses. + Message string + + // Description provides a complete account of the errors purpose, suitable + // for use in documentation. + Description string + + // HTTPStatusCode provides the http status code that is associated with + // this error condition. + HTTPStatusCode int +} + +// ParseErrorCode returns the value by the string error code. +// `ErrorCodeUnknown` will be returned if the error is not known. +func ParseErrorCode(value string) ErrorCode { + ed, ok := idToDescriptors[value] + if ok { + return ed.Code + } + + return ErrorCodeUnknown +} + +// Errors provides the envelope for multiple errors and a few sugar methods +// for use within the application. +type Errors []error + +var _ error = Errors{} + +func (errs Errors) Error() string { + switch len(errs) { + case 0: + return "" + case 1: + return errs[0].Error() + default: + msg := "errors:\n" + for _, err := range errs { + msg += err.Error() + "\n" + } + return msg + } +} + +// Len returns the current number of errors. +func (errs Errors) Len() int { + return len(errs) +} + +// MarshalJSON converts slice of error, ErrorCode or Error into a +// slice of Error - then serializes +func (errs Errors) MarshalJSON() ([]byte, error) { + var tmpErrs struct { + Errors []Error `json:"errors,omitempty"` + } + + for _, daErr := range errs { + var err Error + + switch daErr := daErr.(type) { + case ErrorCode: + err = daErr.WithDetail(nil) + case Error: + err = daErr + default: + err = ErrorCodeUnknown.WithDetail(daErr) + + } + + // If the Error struct was setup and they forgot to set the + // Message field (meaning its "") then grab it from the ErrCode + msg := err.Message + if msg == "" { + msg = err.Code.Message() + } + + tmpErrs.Errors = append(tmpErrs.Errors, Error{ + Code: err.Code, + Message: msg, + Detail: err.Detail, + }) + } + + return json.Marshal(tmpErrs) +} + +// UnmarshalJSON deserializes []Error and then converts it into slice of +// Error or ErrorCode +func (errs *Errors) UnmarshalJSON(data []byte) error { + var tmpErrs struct { + Errors []Error + } + + if err := json.Unmarshal(data, &tmpErrs); err != nil { + return err + } + + var newErrs Errors + for _, daErr := range tmpErrs.Errors { + // If Message is empty or exactly matches the Code's message string + // then just use the Code, no need for a full Error struct + if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { + // Error's w/o details get converted to ErrorCode + newErrs = append(newErrs, daErr.Code) + } else { + // Error's w/ details are untouched + newErrs = append(newErrs, Error{ + Code: daErr.Code, + Message: daErr.Message, + Detail: daErr.Detail, + }) + } + } + + *errs = newErrs + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/distribution/registry/api/errcode/handler.go new file mode 100644 index 00000000..ebb9ce92 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/distribution/registry/api/errcode/handler.go @@ -0,0 +1,40 @@ +package errcode + +import ( + "encoding/json" + "net/http" +) + +// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err +// and sets the content-type header to 'application/json'. It will handle +// ErrorCoder and Errors, and if necessary will create an envelope. +func ServeJSON(w http.ResponseWriter, err error) error { + w.Header().Set("Content-Type", "application/json") + var sc int + + switch errs := err.(type) { + case Errors: + if len(errs) < 1 { + break + } + + if err, ok := errs[0].(ErrorCoder); ok { + sc = err.ErrorCode().Descriptor().HTTPStatusCode + } + case ErrorCoder: + sc = errs.ErrorCode().Descriptor().HTTPStatusCode + err = Errors{err} // create an envelope. + default: + // We just have an unhandled error type, so just place in an envelope + // and move along. + err = Errors{err} + } + + if sc == 0 { + sc = http.StatusInternalServerError + } + + w.WriteHeader(sc) + + return json.NewEncoder(w).Encode(err) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/distribution/registry/api/errcode/register.go new file mode 100644 index 00000000..d1e8826c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/distribution/registry/api/errcode/register.go @@ -0,0 +1,138 @@ +package errcode + +import ( + "fmt" + "net/http" + "sort" + "sync" +) + +var ( + errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} + idToDescriptors = map[string]ErrorDescriptor{} + groupToDescriptors = map[string][]ErrorDescriptor{} +) + +var ( + // ErrorCodeUnknown is a generic error that can be used as a last + // resort if there is no situation-specific error message that can be used + ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an + API classification.`, + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeUnsupported is returned when an operation is not supported. + ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ + Value: "UNSUPPORTED", + Message: "The operation is unsupported.", + Description: `The operation was unsupported due to a missing + implementation or invalid set of parameters.`, + HTTPStatusCode: http.StatusMethodNotAllowed, + }) + + // ErrorCodeUnauthorized is returned if a request requires + // authentication. + ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ + Value: "UNAUTHORIZED", + Message: "authentication required", + Description: `The access controller was unable to authenticate + the client. Often this will be accompanied by a + Www-Authenticate HTTP response header indicating how to + authenticate.`, + HTTPStatusCode: http.StatusUnauthorized, + }) + + // ErrorCodeDenied is returned if a client does not have sufficient + // permission to perform an action. + ErrorCodeDenied = Register("errcode", ErrorDescriptor{ + Value: "DENIED", + Message: "requested access to the resource is denied", + Description: `The access controller denied access for the + operation on a resource.`, + HTTPStatusCode: http.StatusForbidden, + }) + + // ErrorCodeUnavailable provides a common error to report unavailability + // of a service or endpoint. + ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ + Value: "UNAVAILABLE", + Message: "service unavailable", + Description: "Returned when a service is not available", + HTTPStatusCode: http.StatusServiceUnavailable, + }) + + // ErrorCodeTooManyRequests is returned if a client attempts too many + // times to contact a service endpoint. + ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ + Value: "TOOMANYREQUESTS", + Message: "too many requests", + Description: `Returned when a client attempts to contact a + service too many times`, + HTTPStatusCode: http.StatusTooManyRequests, + }) +) + +var nextCode = 1000 +var registerLock sync.Mutex + +// Register will make the passed-in error known to the environment and +// return a new ErrorCode +func Register(group string, descriptor ErrorDescriptor) ErrorCode { + registerLock.Lock() + defer registerLock.Unlock() + + descriptor.Code = ErrorCode(nextCode) + + if _, ok := idToDescriptors[descriptor.Value]; ok { + panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) + } + if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { + panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) + } + + groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) + errorCodeToDescriptors[descriptor.Code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + + nextCode++ + return descriptor.Code +} + +type byValue []ErrorDescriptor + +func (a byValue) Len() int { return len(a) } +func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +// GetGroupNames returns the list of Error group names that are registered +func GetGroupNames() []string { + keys := []string{} + + for k := range groupToDescriptors { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// GetErrorCodeGroup returns the named group of error descriptors +func GetErrorCodeGroup(name string) []ErrorDescriptor { + desc := groupToDescriptors[name] + sort.Sort(byValue(desc)) + return desc +} + +// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are +// registered, irrespective of what group they're in +func GetErrorAllDescriptors() []ErrorDescriptor { + result := []ErrorDescriptor{} + + for _, group := range GetGroupNames() { + result = append(result, GetErrorCodeGroup(group)...) + } + sort.Sort(byValue(result)) + return result +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/LICENSE index 9c8e20ab..6d8d58fb 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/LICENSE +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/LICENSE @@ -176,7 +176,7 @@ END OF TERMS AND CONDITIONS - Copyright 2013-2017 Docker, Inc. + Copyright 2013-2018 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/README.md b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/README.md index bb881325..f136c343 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/README.md +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/README.md @@ -10,7 +10,7 @@ It consists of various components in this repository: - `client/` The Go client used by the command-line client. It can also be used by third-party Go programs. - `daemon/` The daemon, which serves the API. -## Swagger definition +## Swagger definition The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to: @@ -20,7 +20,7 @@ The API is defined by the [Swagger](http://swagger.io/specification/) definition ## Updating the API documentation -The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, you'll need to edit this file to represent the change in the documentation. +The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation. The file is split into two main sections: @@ -29,9 +29,9 @@ The file is split into two main sections: To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section. -There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919) +There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919). -`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful for when you are making edits to ensure you are doing the right thing. +`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing. ## Viewing the API documentation diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/common.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/common.go index 859daf60..aa146cda 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/common.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/common.go @@ -1,65 +1,11 @@ -package api - -import ( - "encoding/json" - "encoding/pem" - "fmt" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/system" - "github.com/docker/libtrust" -) +package api // import "github.com/docker/docker/api" // Common constants for daemon and client. const ( // DefaultVersion of Current REST API - DefaultVersion string = "1.31" + DefaultVersion = "1.40" // NoBaseImageSpecifier is the symbol used by the FROM // command to specify that no base image is to be used. - NoBaseImageSpecifier string = "scratch" + NoBaseImageSpecifier = "scratch" ) - -// LoadOrCreateTrustKey attempts to load the libtrust key at the given path, -// otherwise generates a new one -func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { - err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700, "") - if err != nil { - return nil, err - } - trustKey, err := libtrust.LoadKeyFile(trustKeyPath) - if err == libtrust.ErrKeyFileDoesNotExist { - trustKey, err = libtrust.GenerateECP256PrivateKey() - if err != nil { - return nil, fmt.Errorf("Error generating key: %s", err) - } - encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath)) - if err != nil { - return nil, fmt.Errorf("Error serializing key: %s", err) - } - if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil { - return nil, fmt.Errorf("Error saving key file: %s", err) - } - } else if err != nil { - return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) - } - return trustKey, nil -} - -func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) { - if ext == ".json" || ext == ".jwk" { - encoded, err = json.Marshal(key) - if err != nil { - return nil, fmt.Errorf("unable to encode private key JWK: %s", err) - } - } else { - pemBlock, err := key.PEMBlock() - if err != nil { - return nil, fmt.Errorf("unable to encode private key PEM: %s", err) - } - encoded = pem.EncodeToMemory(pemBlock) - } - return -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/common_unix.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/common_unix.go index 081e61c4..504b0c90 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/common_unix.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/common_unix.go @@ -1,6 +1,6 @@ // +build !windows -package api +package api // import "github.com/docker/docker/api" // MinVersion represents Minimum REST API version supported -const MinVersion string = "1.12" +const MinVersion = "1.12" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/common_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/common_windows.go index a6268a4f..590ba547 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/common_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/common_windows.go @@ -1,4 +1,4 @@ -package api +package api // import "github.com/docker/docker/api" // MinVersion represents Minimum REST API version supported // Technically the first daemon API version released on Windows is v1.25 in diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/names.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/names.go deleted file mode 100644 index f147d1f4..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/names.go +++ /dev/null @@ -1,9 +0,0 @@ -package api - -import "regexp" - -// RestrictedNameChars collects the characters allowed to represent a name, normally used to validate container and volume names. -const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]` - -// RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters. -var RestrictedNamePattern = regexp.MustCompile(`^` + RestrictedNameChars + `+$`) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/swagger.yaml index 41f4248a..6e0bc25b 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/swagger.yaml +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/swagger.yaml @@ -19,10 +19,10 @@ produces: consumes: - "application/json" - "text/plain" -basePath: "/v1.31" +basePath: "/v1.40" info: title: "Docker Engine API" - version: "1.31" + version: "1.40" x-logo: url: "https://docs.docker.com/images/logo-docker-main.png" description: | @@ -42,33 +42,26 @@ info: # Versioning - The API is usually changed in each release of Docker, so API calls are versioned to ensure that clients don't break. + The API is usually changed in each release, so API calls are versioned to + ensure that clients don't break. To lock to a specific version of the API, + you prefix the URL with its version, for example, call `/v1.30/info` to use + the v1.30 version of the `/info` endpoint. If the API version specified in + the URL is not supported by the daemon, a HTTP `400 Bad Request` error message + is returned. - For Docker Engine 17.06, the API version is 1.30. To lock to this version, you prefix the URL with `/v1.30`. For example, calling `/info` is the same as calling `/v1.30/info`. + If you omit the version-prefix, the current version of the API (v1.40) is used. + For example, calling `/info` is the same as calling `/v1.40/info`. Using the + API without a version-prefix is deprecated and will be removed in a future release. - Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine. + Engine releases in the near future should support this version of the API, + so your client will continue to work even if it is talking to a newer Engine. - In previous versions of Docker, it was possible to access the API without providing a version. This behaviour is now deprecated will be removed in a future version of Docker. + The API uses an open schema model, which means server may add extra properties + to responses. Likewise, the server will ignore any extra query parameters and + request body properties. When you write clients, you need to ignore additional + properties in responses to ensure they do not break when talking to newer + daemons. - The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer Docker daemons. - - This documentation is for version 1.31 of the API. Use this table to find documentation for previous versions of the API: - - Docker version | API version | Changes - ----------------|-------------|--------- - 17.06.x | [1.30](https://docs.docker.com/engine/api/v1.30/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-30-api-changes) - 17.05.x | [1.29](https://docs.docker.com/engine/api/v1.29/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-29-api-changes) - 17.04.x | [1.28](https://docs.docker.com/engine/api/v1.28/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-28-api-changes) - 17.03.1 | [1.27](https://docs.docker.com/engine/api/v1.27/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-27-api-changes) - 1.13.1 & 17.03.0 | [1.26](https://docs.docker.com/engine/api/v1.26/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-26-api-changes) - 1.13.0 | [1.25](https://docs.docker.com/engine/api/v1.25/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-25-api-changes) - 1.12.x | [1.24](https://docs.docker.com/engine/api/v1.24/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-24-api-changes) - 1.11.x | [1.23](https://docs.docker.com/engine/api/v1.23/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-23-api-changes) - 1.10.x | [1.22](https://docs.docker.com/engine/api/v1.22/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-22-api-changes) - 1.9.x | [1.21](https://docs.docker.com/engine/api/v1.21/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-21-api-changes) - 1.8.x | [1.20](https://docs.docker.com/engine/api/v1.20/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-20-api-changes) - 1.7.x | [1.19](https://docs.docker.com/engine/api/v1.19/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-19-api-changes) - 1.6.x | [1.18](https://docs.docker.com/engine/api/v1.18/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-18-api-changes) # Authentication @@ -143,6 +136,10 @@ tags: x-displayName: "Secrets" description: | Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work. + - name: "Config" + x-displayName: "Configs" + description: | + Configs are application configurations that can be used by services. Swarm mode must be enabled for these endpoints to work. # System things - name: "Plugin" x-displayName: "Plugins" @@ -158,6 +155,7 @@ definitions: IP: type: "string" format: "ip-address" + description: "Host IP address that the container's port is mapped to" PrivatePort: type: "integer" format: "uint16" @@ -170,7 +168,7 @@ definitions: Type: type: "string" x-nullable: false - enum: ["tcp", "udp"] + enum: ["tcp", "udp", "sctp"] example: PrivatePort: 8080 PublicPort: 80 @@ -212,6 +210,43 @@ definitions: PathInContainer: "/dev/deviceName" CgroupPermissions: "mrw" + DeviceRequest: + type: "object" + description: "A request for devices to be sent to device drivers" + properties: + Driver: + type: "string" + example: "nvidia" + Count: + type: "integer" + example: -1 + DeviceIDs: + type: "array" + items: + type: "string" + example: + - "0" + - "1" + - "GPU-fef8089b-4820-abfc-e83e-94318197576e" + Capabilities: + description: | + A list of capabilities; an OR list of AND lists of capabilities. + type: "array" + items: + type: "array" + items: + type: "string" + example: + # gpu AND nvidia AND compute + - ["gpu", "nvidia", "compute"] + Options: + description: | + Driver-specific options, specified as a key/value pairs. These options + are passed directly to the driver. + type: "object" + additionalProperties: + type: "string" + ThrottleDevice: type: "object" properties: @@ -240,11 +275,13 @@ definitions: - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. + - `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container. type: "string" enum: - "bind" - "volume" - "tmpfs" + - "npipe" ReadOnly: description: "Whether the mount should be read-only." type: "boolean" @@ -257,6 +294,7 @@ definitions: properties: Propagation: description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + type: "string" enum: - "private" - "rprivate" @@ -264,6 +302,10 @@ definitions: - "rshared" - "slave" - "rslave" + NonRecursive: + description: "Disable recursive bind mount." + type: "boolean" + default: false VolumeOptions: description: "Optional configuration for the `volume` type." type: "object" @@ -300,6 +342,7 @@ definitions: Mode: description: "The permission mode for the tmpfs mount in an integer." type: "integer" + RestartPolicy: description: | The behavior to apply when the container exits. The default is not to restart. @@ -334,6 +377,7 @@ definitions: Memory: description: "Memory limit in bytes." type: "integer" + format: "int64" default: 0 # Applicable to UNIX platforms CgroupParent: @@ -399,6 +443,7 @@ definitions: CpusetCpus: description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)" type: "string" + example: "0-3" CpusetMems: description: "Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems." type: "string" @@ -413,14 +458,20 @@ definitions: items: type: "string" example: "c 13:* rwm" - DiskQuota: - description: "Disk limit (in bytes)." - type: "integer" - format: "int64" + DeviceRequests: + description: "a list of requests for devices to be sent to device drivers" + type: "array" + items: + $ref: "#/definitions/DeviceRequest" KernelMemory: description: "Kernel memory limit in bytes." type: "integer" format: "int64" + example: 209715200 + KernelMemoryTCP: + description: "Hard limit for kernel TCP buffer memory (in bytes)." + type: "integer" + format: "int64" MemoryReservation: description: "Memory soft limit in bytes." type: "integer" @@ -442,10 +493,16 @@ definitions: OomKillDisable: description: "Disable OOM Killer for the container." type: "boolean" + Init: + description: "Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used." + type: "boolean" + x-nullable: true PidsLimit: - description: "Tune a container's pids limit. Set -1 for unlimited." + description: | + Tune a container's PIDs limit. Set `0` or `-1` for unlimited, or `null` to not change. type: "integer" format: "int64" + x-nullable: true Ulimits: description: | A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" @@ -493,14 +550,16 @@ definitions: NanoCPUs: type: "integer" format: "int64" + example: 4000000000 MemoryBytes: type: "integer" format: "int64" + example: 8272408576 GenericResources: $ref: "#/definitions/GenericResources" GenericResources: - description: "User defined Resources, can be either Integer resources (e.g: SSD=3) or String resources (e.g: GPU={UUID1, UUID2})" + description: "User-defined resources can be either Integer resources (e.g, `SSD=3`) or String resources (e.g, `GPU=UUID1`)" type: "array" items: type: "object" @@ -520,6 +579,16 @@ definitions: Value: type: "integer" format: "int64" + example: + - DiscreteResourceSpec: + Kind: "SSD" + Value: 3 + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID1" + - NamedResourceSpec: + Kind: "GPU" + Value: "UUID2" HealthConfig: description: "A test to perform to check that the container is healthy." @@ -562,7 +631,7 @@ definitions: A list of volume bindings for this container. Each volume binding is a string in one of these forms: - `host-src:container-dest` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path. - - `host-src:container-dest:ro` to make the bind-mount read-only inside the container. Both `host-src`, and `container-dest` must be an _absolute_ path. + - `host-src:container-dest:ro` to make the bind mount read-only inside the container. Both `host-src`, and `container-dest` must be an _absolute_ path. - `volume-name:container-dest` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path. - `volume-name:container-dest:ro` to mount the volume read-only inside the container. `container-dest` must be an _absolute_ path. items: @@ -595,17 +664,7 @@ definitions: description: "Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken as a custom network's name to which this container should connect to." PortBindings: - type: "object" - description: "A map of exposed container ports and the host port they should map to." - additionalProperties: - type: "object" - properties: - HostIp: - type: "string" - description: "The host IP address" - HostPort: - type: "string" - description: "The host port number, as a string" + $ref: "#/definitions/PortMap" RestartPolicy: $ref: "#/definitions/RestartPolicy" AutoRemove: @@ -626,14 +685,22 @@ definitions: $ref: "#/definitions/Mount" # Applicable to UNIX platforms + Capabilities: + type: "array" + description: | + A list of kernel capabilities to be available for container (this overrides the default set). + + Conflicts with options 'CapAdd' and 'CapDrop'" + items: + type: "string" CapAdd: type: "array" - description: "A list of kernel capabilities to add to the container." + description: "A list of kernel capabilities to add to the container. Conflicts with option 'Capabilities'" items: type: "string" CapDrop: type: "array" - description: "A list of kernel capabilities to drop from the container." + description: "A list of kernel capabilities to drop from the container. Conflicts with option 'Capabilities'" items: type: "string" Dns: @@ -664,7 +731,17 @@ definitions: type: "string" IpcMode: type: "string" - description: "IPC namespace to use for the container." + description: | + IPC sharing mode for the container. Possible values are: + + - `"none"`: own private IPC namespace, with /dev/shm not mounted + - `"private"`: own private IPC namespace + - `"shareable"`: own private IPC namespace, with a possibility to share it with other containers + - `"container:"`: join another (shareable) container's IPC namespace + - `"host"`: use the host system's IPC namespace + + If not specified, daemon default is used, which can either be `"private"` + or `"shareable"`, depending on daemon version and configuration. Cgroup: type: "string" description: "Cgroup to use for the container." @@ -676,6 +753,7 @@ definitions: OomScoreAdj: type: "integer" description: "An integer value containing the score given to the container in order to tune OOM killer preferences." + example: 500 PidMode: type: "string" description: | @@ -688,7 +766,15 @@ definitions: description: "Gives the container full access to the host." PublishAllPorts: type: "boolean" - description: "Allocates a random host port for all of a container's exposed ports." + description: | + Allocates an ephemeral host port for all of a container's + exposed ports. + + Ports are de-allocated when the container stops and allocated when the container starts. + The allocated port might be changed when restarting the container. + + The port is selected from the ephemeral port range that depends on the kernel. + For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`. ReadonlyRootfs: type: "boolean" description: "Mount the container's root filesystem as read only." @@ -745,6 +831,16 @@ definitions: - "default" - "process" - "hyperv" + MaskedPaths: + type: "array" + description: "The list of paths to be masked inside the container (this overrides the default set of paths)" + items: + type: "string" + ReadonlyPaths: + type: "array" + description: "The list of paths to be set as read-only inside the container (this overrides the default set of paths)" + items: + type: "string" ContainerConfig: description: "Configuration for a container that is portable between hosts" @@ -775,7 +871,7 @@ definitions: description: | An object mapping ports to an empty object in the form: - `{"/": {}}` + `{"/": {}}` type: "object" additionalProperties: type: "object" @@ -802,9 +898,7 @@ definitions: type: "string" Cmd: description: "Command to run specified as a string or an array of strings." - type: - - "array" - - "string" + type: "array" items: type: "string" Healthcheck: @@ -818,12 +912,11 @@ definitions: Volumes: description: "An object mapping mount point paths inside the container to empty objects." type: "object" - properties: - additionalProperties: - type: "object" - enum: - - {} - default: {} + additionalProperties: + type: "object" + enum: + - {} + default: {} WorkingDir: description: "The working directory for commands to run in." type: "string" @@ -832,9 +925,7 @@ definitions: The entry point for the container as a string or an array of strings. If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). - type: - - "array" - - "string" + type: "array" items: type: "string" NetworkDisabled: @@ -867,26 +958,223 @@ definitions: items: type: "string" - NetworkConfig: - description: "TODO: check is correct" + NetworkSettings: + description: "NetworkSettings exposes the network settings in the API" type: "object" properties: Bridge: + description: Name of the network'a bridge (for example, `docker0`). type: "string" - Gateway: + example: "docker0" + SandboxID: + description: SandboxID uniquely represents a container's network stack. type: "string" - Address: + example: "9d12daf2c33f5959c8bf90aa513e4f65b561738661003029ec84830cd503a0c3" + HairpinMode: + description: | + Indicates if hairpin NAT should be enabled on the virtual interface. + type: "boolean" + example: false + LinkLocalIPv6Address: + description: IPv6 unicast address using the link-local prefix. type: "string" - IPPrefixLen: + example: "fe80::42:acff:fe11:1" + LinkLocalIPv6PrefixLen: + description: Prefix length of the IPv6 unicast address. type: "integer" - MacAddress: - type: "string" - PortMapping: - type: "string" + example: "64" Ports: + $ref: "#/definitions/PortMap" + SandboxKey: + description: SandboxKey identifies the sandbox + type: "string" + example: "/var/run/docker/netns/8ab54b426c38" + + # TODO is SecondaryIPAddresses actually used? + SecondaryIPAddresses: + description: "" type: "array" items: - $ref: "#/definitions/Port" + $ref: "#/definitions/Address" + x-nullable: true + + # TODO is SecondaryIPv6Addresses actually used? + SecondaryIPv6Addresses: + description: "" + type: "array" + items: + $ref: "#/definitions/Address" + x-nullable: true + + # TODO properties below are part of DefaultNetworkSettings, which is + # marked as deprecated since Docker 1.9 and to be removed in Docker v17.12 + EndpointID: + description: | + EndpointID uniquely represents a service endpoint in a Sandbox. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" + Gateway: + description: | + Gateway address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.1" + GlobalIPv6Address: + description: | + Global IPv6 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8::5689" + GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 64 + IPAddress: + description: | + IPv4 address for the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "172.17.0.4" + IPPrefixLen: + description: | + Mask length of the IPv4 address. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "integer" + example: 16 + IPv6Gateway: + description: | + IPv6 gateway address for this network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "2001:db8:2::100" + MacAddress: + description: | + MAC address for the container on the default "bridge" network. + +


+ + > **Deprecated**: This field is only propagated when attached to the + > default "bridge" network. Use the information from the "bridge" + > network inside the `Networks` map instead, which contains the same + > information. This field was deprecated in Docker 1.9 and is scheduled + > to be removed in Docker 17.12.0 + type: "string" + example: "02:42:ac:11:00:04" + Networks: + description: | + Information about all networks that the container is connected to. + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + + Address: + description: Address represents an IPv4 or IPv6 IP address. + type: "object" + properties: + Addr: + description: IP address. + type: "string" + PrefixLen: + description: Mask length of the IP address. + type: "integer" + + PortMap: + description: | + PortMap describes the mapping of container ports to host ports, using the + container's port-number and protocol as key in the format `/`, + for example, `80/udp`. + + If a container's port is mapped for multiple protocols, separate entries + are added to the mapping table. + type: "object" + additionalProperties: + type: "array" + x-nullable: true + items: + $ref: "#/definitions/PortBinding" + example: + "443/tcp": + - HostIp: "127.0.0.1" + HostPort: "4443" + "80/tcp": + - HostIp: "0.0.0.0" + HostPort: "80" + - HostIp: "0.0.0.0" + HostPort: "8080" + "80/udp": + - HostIp: "0.0.0.0" + HostPort: "80" + "53/udp": + - HostIp: "0.0.0.0" + HostPort: "53" + "2377/tcp": null + + PortBinding: + description: | + PortBinding represents a binding between a host IP address and a host + port. + type: "object" + properties: + HostIp: + description: "Host IP address that the container's port is mapped to." + type: "string" + example: "127.0.0.1" + HostPort: + description: "Host port number that the container's port is mapped to." + type: "string" + example: "4443" GraphDriverData: description: "Information about a container's graph driver." @@ -1244,11 +1532,10 @@ definitions: type: "string" Options: description: "Driver-specific options, specified as a map." - type: "array" - items: - type: "object" - additionalProperties: - type: "string" + type: "object" + additionalProperties: + type: "string" + NetworkContainer: type: "object" properties: @@ -1280,10 +1567,48 @@ definitions: type: "string" progressDetail: $ref: "#/definitions/ProgressDetail" + aux: + $ref: "#/definitions/ImageID" + + BuildCache: + type: "object" + properties: + ID: + type: "string" + Parent: + type: "string" + Type: + type: "string" + Description: + type: "string" + InUse: + type: "boolean" + Shared: + type: "boolean" + Size: + type: "integer" + CreatedAt: + type: "integer" + LastUsedAt: + type: "integer" + x-nullable: true + UsageCount: + type: "integer" + + ImageID: + type: "object" + description: "Image ID or Digest" + properties: + ID: + type: "string" + example: + ID: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" CreateImageInfo: type: "object" properties: + id: + type: "string" error: type: "string" status: @@ -1304,6 +1629,7 @@ definitions: type: "string" progressDetail: $ref: "#/definitions/ProgressDetail" + ErrorDetail: type: "object" properties: @@ -1311,12 +1637,13 @@ definitions: type: "integer" message: type: "string" + ProgressDetail: type: "object" properties: - code: + current: type: "integer" - message: + total: type: "integer" ErrorResponse: @@ -1345,45 +1672,102 @@ definitions: description: "Configuration for a network endpoint." type: "object" properties: + # Configurations IPAMConfig: - description: "IPAM configurations for the endpoint" - type: "object" - properties: - IPv4Address: - type: "string" - IPv6Address: - type: "string" - LinkLocalIPs: - type: "array" - items: - type: "string" + $ref: "#/definitions/EndpointIPAMConfig" Links: type: "array" items: type: "string" + example: + - "container_1" + - "container_2" Aliases: type: "array" items: type: "string" + example: + - "server_x" + - "server_y" + + # Operational data NetworkID: + description: | + Unique ID of the network. type: "string" + example: "08754567f1f40222263eab4102e1c733ae697e8e354aa9cd6e18d7402835292a" EndpointID: + description: | + Unique ID for the service endpoint in a Sandbox. type: "string" + example: "b88f5b905aabf2893f3cbc4ee42d1ea7980bbc0a92e2c8922b1e1795298afb0b" Gateway: + description: | + Gateway address for this network. type: "string" + example: "172.17.0.1" IPAddress: + description: | + IPv4 address. type: "string" + example: "172.17.0.4" IPPrefixLen: + description: | + Mask length of the IPv4 address. type: "integer" + example: 16 IPv6Gateway: + description: | + IPv6 gateway address. type: "string" + example: "2001:db8:2::100" GlobalIPv6Address: + description: | + Global IPv6 address. type: "string" + example: "2001:db8::5689" GlobalIPv6PrefixLen: + description: | + Mask length of the global IPv6 address. type: "integer" format: "int64" + example: 64 MacAddress: + description: | + MAC address for the endpoint on this network. type: "string" + example: "02:42:ac:11:00:04" + DriverOpts: + description: | + DriverOpts is a mapping of driver options and values. These options + are passed directly to the driver and are driver specific. + type: "object" + x-nullable: true + additionalProperties: + type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + + EndpointIPAMConfig: + description: | + EndpointIPAMConfig represents an endpoint's IPAM configuration. + type: "object" + x-nullable: true + properties: + IPv4Address: + type: "string" + example: "172.20.30.33" + IPv6Address: + type: "string" + example: "2001:db8:abcd::3033" + LinkLocalIPs: + type: "array" + items: + type: "string" + example: + - "169.254.34.68" + - "fe80::3468" PluginMount: type: "object" @@ -1420,6 +1804,7 @@ definitions: example: - "rbind" - "rw" + PluginDevice: type: "object" required: [Name, Description, Settable, Path] @@ -1566,6 +1951,13 @@ definitions: type: "string" x-nullable: false example: "plugins.sock" + ProtocolScheme: + type: "string" + example: "some.protocol/v1.0" + description: "Protocol to use for clients connecting to the plugin." + enum: + - "" + - "moby.plugins.http/v1" Entrypoint: type: "array" items: @@ -1691,7 +2083,8 @@ definitions: properties: Index: type: "integer" - format: "int64" + format: "uint64" + example: 373531 NodeSpec: type: "object" @@ -1699,6 +2092,7 @@ definitions: Name: description: "Name for the node." type: "string" + example: "my-node" Labels: description: "User-defined key/value metadata." type: "object" @@ -1710,6 +2104,7 @@ definitions: enum: - "worker" - "manager" + example: "manager" Availability: description: "Availability of the node." type: "string" @@ -1717,125 +2112,140 @@ definitions: - "active" - "pause" - "drain" + example: "active" example: Availability: "active" Name: "node-name" Role: "manager" Labels: foo: "bar" + Node: type: "object" properties: ID: type: "string" + example: "24ifsmvkjbyhk" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: + description: | + Date and time at which the node was added to the swarm in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" UpdatedAt: + description: | + Date and time at which the node was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" Spec: $ref: "#/definitions/NodeSpec" Description: - type: "object" - properties: - Hostname: - type: "string" - Platform: - type: "object" - properties: - Architecture: - type: "string" - OS: - type: "string" - Resources: - $ref: "#/definitions/ResourceObject" - Engine: - type: "object" - properties: - EngineVersion: - type: "string" - Labels: - type: "object" - additionalProperties: - type: "string" - Plugins: - type: "array" - items: - type: "object" - properties: - Type: - type: "string" - Name: - type: "string" - TLSInfo: - $ref: "#/definitions/SwarmSpec" - example: - ID: "24ifsmvkjbyhk" - Version: - Index: 8 - CreatedAt: "2016-06-07T20:31:11.853781916Z" - UpdatedAt: "2016-06-07T20:31:11.999868824Z" - Spec: - Name: "my-node" - Role: "manager" - Availability: "active" - Labels: - foo: "bar" - Description: - Hostname: "bf3067039e47" - Platform: - Architecture: "x86_64" - OS: "linux" - Resources: - NanoCPUs: 4000000000 - MemoryBytes: 8272408576 - GenericResources: - - DiscreteResourceSpec: - Kind: "SSD" - Value: 3 - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID1" - - NamedResourceSpec: - Kind: "GPU" - Value: "UUID2" - Engine: - EngineVersion: "17.04.0" - Labels: - foo: "bar" - Plugins: - - Type: "Volume" - Name: "local" - - Type: "Network" - Name: "bridge" - - Type: "Network" - Name: "null" - - Type: "Network" - Name: "overlay" + $ref: "#/definitions/NodeDescription" Status: - State: "ready" - Addr: "172.17.0.2" + $ref: "#/definitions/NodeStatus" ManagerStatus: - Leader: true - Reachability: "reachable" - Addr: "172.17.0.2:2377" + $ref: "#/definitions/ManagerStatus" + + NodeDescription: + description: | + NodeDescription encapsulates the properties of the Node as reported by the + agent. + type: "object" + properties: + Hostname: + type: "string" + example: "bf3067039e47" + Platform: + $ref: "#/definitions/Platform" + Resources: + $ref: "#/definitions/ResourceObject" + Engine: + $ref: "#/definitions/EngineDescription" TLSInfo: - TrustRoot: | - -----BEGIN CERTIFICATE----- - MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw - EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 - MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH - A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf - 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB - Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO - PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz - pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H - -----END CERTIFICATE----- - CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" - CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + $ref: "#/definitions/TLSInfo" + + Platform: + description: | + Platform represents the platform (Arch/OS). + type: "object" + properties: + Architecture: + description: | + Architecture represents the hardware architecture (for example, + `x86_64`). + type: "string" + example: "x86_64" + OS: + description: | + OS represents the Operating System (for example, `linux` or `windows`). + type: "string" + example: "linux" + + EngineDescription: + description: "EngineDescription provides information about an engine." + type: "object" + properties: + EngineVersion: + type: "string" + example: "17.06.0" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + foo: "bar" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + example: + - Type: "Log" + Name: "awslogs" + - Type: "Log" + Name: "fluentd" + - Type: "Log" + Name: "gcplogs" + - Type: "Log" + Name: "gelf" + - Type: "Log" + Name: "journald" + - Type: "Log" + Name: "json-file" + - Type: "Log" + Name: "logentries" + - Type: "Log" + Name: "splunk" + - Type: "Log" + Name: "syslog" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "host" + - Type: "Network" + Name: "ipvlan" + - Type: "Network" + Name: "macvlan" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + - Type: "Volume" + Name: "local" + - Type: "Volume" + Name: "localhost:5000/vieux/sshfs:latest" + - Type: "Volume" + Name: "vieux/sshfs:latest" + TLSInfo: description: "Information about the issuer of leaf TLS certificates and the trusted root CA certificate" type: "object" @@ -1863,6 +2273,64 @@ definitions: -----END CERTIFICATE----- CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + + NodeStatus: + description: | + NodeStatus represents the status of a node. + + It provides the current status of the node, as seen by the manager. + type: "object" + properties: + State: + $ref: "#/definitions/NodeState" + Message: + type: "string" + example: "" + Addr: + description: "IP address of the node." + type: "string" + example: "172.17.0.2" + + NodeState: + description: "NodeState represents the state of a node." + type: "string" + enum: + - "unknown" + - "down" + - "ready" + - "disconnected" + example: "ready" + + ManagerStatus: + description: | + ManagerStatus represents the status of a manager. + + It provides the current status of a node's manager component, if the node + is a manager. + x-nullable: true + type: "object" + properties: + Leader: + type: "boolean" + default: false + example: true + Reachability: + $ref: "#/definitions/Reachability" + Addr: + description: | + The IP address and port at which the manager is reachable. + type: "string" + example: "10.0.0.46:2377" + + Reachability: + description: "Reachability represents the reachability of a node." + type: "string" + enum: + - "unknown" + - "unreachable" + - "reachable" + example: "reachable" + SwarmSpec: description: "User modifiable swarm configuration." type: "object" @@ -1870,19 +2338,25 @@ definitions: Name: description: "Name of the swarm." type: "string" + example: "default" Labels: description: "User-defined key/value metadata." type: "object" additionalProperties: type: "string" + example: + com.example.corp.type: "production" + com.example.corp.department: "engineering" Orchestration: description: "Orchestration configuration." type: "object" + x-nullable: true properties: TaskHistoryRetentionLimit: description: "The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks." type: "integer" format: "int64" + example: 10 Raft: description: "Raft configuration." type: "object" @@ -1890,43 +2364,51 @@ definitions: SnapshotInterval: description: "The number of log entries between snapshots." type: "integer" - format: "int64" + format: "uint64" + example: 10000 KeepOldSnapshots: description: "The number of snapshots to keep beyond the current snapshot." type: "integer" - format: "int64" + format: "uint64" LogEntriesForSlowFollowers: description: "The number of log entries to keep around to sync up slow followers after a snapshot is created." type: "integer" - format: "int64" + format: "uint64" + example: 500 ElectionTick: description: | The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`. A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. type: "integer" + example: 3 HeartbeatTick: description: | The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers. A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. type: "integer" + example: 1 Dispatcher: description: "Dispatcher configuration." type: "object" + x-nullable: true properties: HeartbeatPeriod: description: "The delay for an agent to send a heartbeat to the dispatcher." type: "integer" format: "int64" + example: 5000000000 CAConfig: description: "CA configuration." type: "object" + x-nullable: true properties: NodeCertExpiry: description: "The duration node certificates are issued for." type: "integer" format: "int64" + example: 7776000000000000 ExternalCAs: description: "Configuration for forwarding signing requests to an external certificate authority." type: "array" @@ -1958,6 +2440,8 @@ definitions: type: "string" ForceRotate: description: "An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified in `SigningCACert` and `SigningCAKey`" + format: "uint64" + type: "integer" EncryptionConfig: description: "Parameters related to encryption-at-rest." type: "object" @@ -1965,57 +2449,65 @@ definitions: AutoLockManagers: description: "If set, generate a key and use it to lock data stored on the managers." type: "boolean" + example: false TaskDefaults: description: "Defaults for creating tasks in this cluster." type: "object" properties: LogDriver: description: | - The log driver to use for tasks created in the orchestrator if unspecified by a service. + The log driver to use for tasks created in the orchestrator if + unspecified by a service. - Updating this value will only have an affect on new tasks. Old tasks will continue use their previously configured log driver until recreated. + Updating this value only affects new tasks. Existing tasks continue + to use their previously configured log driver until recreated. type: "object" properties: Name: + description: | + The log driver to use as a default for new tasks. type: "string" + example: "json-file" Options: + description: | + Driver-specific options for the selectd log driver, specified + as key/value pairs. type: "object" additionalProperties: type: "string" - example: - Name: "default" - Orchestration: - TaskHistoryRetentionLimit: 10 - Raft: - SnapshotInterval: 10000 - LogEntriesForSlowFollowers: 500 - HeartbeatTick: 1 - ElectionTick: 3 - Dispatcher: - HeartbeatPeriod: 5000000000 - CAConfig: - NodeCertExpiry: 7776000000000000 - JoinTokens: - Worker: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" - Manager: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" - EncryptionConfig: - AutoLockManagers: false + example: + "max-file": "10" + "max-size": "100m" + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but # without `JoinTokens`. ClusterInfo: + description: | + ClusterInfo represents information about the swarm as is returned by the + "/info" endpoint. Join-tokens are not included. + x-nullable: true type: "object" properties: ID: description: "The ID of the swarm." type: "string" + example: "abajmipo7b4xz5ip2nrla6b11" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: + description: | + Date and time at which the swarm was initialised in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" + example: "2016-08-18T10:44:24.496525531Z" UpdatedAt: + description: | + Date and time at which the swarm was last updated in + [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds. type: "string" format: "dateTime" + example: "2017-08-09T07:09:37.632105588Z" Spec: $ref: "#/definitions/SwarmSpec" TLSInfo: @@ -2023,13 +2515,73 @@ definitions: RootRotationInProgress: description: "Whether there is currently a root CA rotation in progress for the swarm" type: "boolean" + example: false + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + If no port is set or is set to 0, the default port (4789) is used. + type: "integer" + format: "uint32" + default: 4789 + example: 4789 + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global scope networks. + type: "array" + items: + type: "string" + format: "CIDR" + example: ["10.10.0.0/16", "20.20.0.0/16"] + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created from the default subnet pool + type: "integer" + format: "uint32" + maximum: 29 + default: 24 + example: 24 + + JoinTokens: + description: | + JoinTokens contains the tokens workers and managers need to join the swarm. + type: "object" + properties: + Worker: + description: | + The token workers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: + description: | + The token managers can use to join the swarm. + type: "string" + example: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + + Swarm: + type: "object" + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + $ref: "#/definitions/JoinTokens" + TaskSpec: description: "User modifiable task configuration." type: "object" properties: PluginSpec: type: "object" - description: "Invalid when specified with `ContainerSpec`." + description: | + Plugin spec for the service. *(Experimental release only.)* + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. properties: Name: description: "The name or 'alias' to use for the plugin." @@ -2056,7 +2608,15 @@ definitions: type: "string" ContainerSpec: type: "object" - description: "Invalid when specified with `PluginSpec`." + description: | + Container spec for the service. + +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. properties: Image: description: "The image name to use for the container" @@ -2103,8 +2663,20 @@ definitions: type: "object" description: "CredentialSpec for managed service account (Windows only)" properties: + Config: + type: "string" + example: "0bt9dmxjvjiqermk6xrop3ekq" + description: | + Load credential spec from a Swarm Config with the given ID. + The specified config must also be present in the Configs field with the Runtime property set. + +


+ + + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, and `CredentialSpec.Config` are mutually exclusive. File: type: "string" + example: "spec.json" description: | Load credential spec from this file. The file is read by the daemon, and must be present in the `CredentialSpecs` subdirectory in the docker data directory, which defaults to @@ -2114,7 +2686,7 @@ definitions:


- > **Note**: `CredentialSpec.File` and `CredentialSpec.Registry` are mutually exclusive. + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, and `CredentialSpec.Config` are mutually exclusive. Registry: type: "string" description: | @@ -2126,7 +2698,7 @@ definitions:


- > **Note**: `CredentialSpec.File` and `CredentialSpec.Registry` are mutually exclusive. + > **Note**: `CredentialSpec.File`, `CredentialSpec.Registry`, and `CredentialSpec.Config` are mutually exclusive. SELinuxContext: type: "object" description: "SELinux labels of the container" @@ -2172,10 +2744,12 @@ definitions: Hosts: type: "array" description: | - A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. - The format of extra hosts on swarmkit is specified in: - http://man7.org/linux/man-pages/man5/hosts.5.html - IP_address canonical_hostname [aliases...] + A list of hostname/IP mappings to add to the container's `hosts` + file. The format of extra hosts is specified in the + [hosts(5)](http://man7.org/linux/man-pages/man5/hosts.5.html) + man page: + + IP_address canonical_hostname [aliases...] items: type: "string" DNSConfig: @@ -2235,7 +2809,12 @@ definitions: type: "object" properties: File: - description: "File represents a specific target that is backed by a file." + description: | + File represents a specific target that is backed by a file. + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive type: "object" properties: Name: @@ -2251,6 +2830,14 @@ definitions: description: "Mode represents the FileMode of the file." type: "integer" format: "uint32" + Runtime: + description: | + Runtime represents a target that is not mounted into the container but is used by the task + +


+ + > **Note**: `Configs.File` and `Configs.Runtime` are mutually exclusive + type: "object" ConfigID: description: "ConfigID represents the ID of the specific config that we're referencing." type: "string" @@ -2259,7 +2846,45 @@ definitions: ConfigName is the name of the config that this references, but this is just provided for lookup/display purposes. The config in the reference will be identified by its ID. type: "string" + Isolation: + type: "string" + description: "Isolation technology of the containers running the service. (Windows only)" + enum: + - "default" + - "process" + - "hyperv" + Init: + description: "Run an init inside the container that forwards signals and reaps processes. This field is omitted if empty, and the default (as configured on the daemon) is used." + type: "boolean" + x-nullable: true + Sysctls: + description: | + Set kernel namedspaced parameters (sysctls) in the container. + The Sysctls option on services accepts the same sysctls as the + are supported on containers. Note that while the same sysctls are + supported, no guarantees or checks are made about their + suitability for a clustered environment, and it's up to the user + to determine whether a given sysctl will work properly in a + Service. + type: "object" + additionalProperties: + type: "string" + NetworkAttachmentSpec: + description: | + Read-only spec type for non-swarm containers attached to swarm overlay + networks. +


+ + > **Note**: ContainerSpec, NetworkAttachmentSpec, and PluginSpec are + > mutually exclusive. PluginSpec is only used when the Runtime field + > is set to `plugin`. NetworkAttachmentSpec is used when the Runtime + > field is set to `attachment`. + type: "object" + properties: + ContainerID: + description: "ID of the container represented by this task" + type: "string" Resources: description: "Resource requirements which apply to each individual container created as part of the service." type: "object" @@ -2303,6 +2928,10 @@ definitions: type: "array" items: type: "string" + example: + - "node.hostname!=node3.corp.example.com" + - "node.role!=manager" + - "node.labels.type==production" Preferences: description: "Preferences provide a way to make the scheduler aware of factors such as topology. They are provided in order from highest to lowest precedence." type: "array" @@ -2315,16 +2944,25 @@ definitions: SpreadDescriptor: description: "label descriptor, such as engine.labels.az" type: "string" + example: + - Spread: + SpreadDescriptor: "node.labels.datacenter" + - Spread: + SpreadDescriptor: "node.labels.rack" + MaxReplicas: + description: "Maximum number of replicas for per node (default value is 0, which is unlimited)" + type: "integer" + format: "int64" + default: 0 Platforms: - description: "An array of supported platforms." + description: | + Platforms stores all the platforms that the service's image can + run on. This field is used in the platform filter for scheduling. + If empty, then the platform filter is off, meaning there are no + scheduling restrictions. type: "array" items: - type: "object" - properties: - Architecture: - type: "string" - OS: - type: "string" + $ref: "#/definitions/Platform" ForceUpdate: description: "A counter that triggers an update even if no relevant parameters have been changed." type: "integer" @@ -2352,6 +2990,7 @@ definitions: type: "object" additionalProperties: type: "string" + TaskState: type: "string" enum: @@ -2368,6 +3007,9 @@ definitions: - "shutdown" - "failed" - "rejected" + - "remove" + - "orphaned" + Task: type: "object" properties: @@ -2491,6 +3133,7 @@ definitions: - NamedResourceSpec: Kind: "GPU" Value: "UUID2" + ServiceSpec: description: "User modifiable configuration for a service." properties: @@ -2595,6 +3238,7 @@ definitions: type: "string" EndpointSpec: $ref: "#/definitions/EndpointSpec" + EndpointPortConfig: type: "object" properties: @@ -2605,12 +3249,32 @@ definitions: enum: - "tcp" - "udp" + - "sctp" TargetPort: description: "The port inside the container." type: "integer" PublishedPort: description: "The port on the swarm hosts." type: "integer" + PublishMode: + description: | + The mode in which port is published. + +


+ + - "ingress" makes the target port accessible on on every node, + regardless of whether there is a task for the service running on + that node or not. + - "host" bypasses the routing mesh and publish the port directly on + the swarm node where that service is running. + + type: "string" + enum: + - "ingress" + - "host" + default: "ingress" + example: "ingress" + EndpointSpec: description: "Properties that can be configured to access and load balance a service." type: "object" @@ -2628,6 +3292,7 @@ definitions: type: "array" items: $ref: "#/definitions/EndpointPortConfig" + Service: type: "object" properties: @@ -2740,6 +3405,7 @@ definitions: - NetworkID: "4qvuz4ko70xaltuqbt8956gd1" Addr: "10.255.0.3/16" + ImageDeleteResponseItem: type: "object" properties: @@ -2749,6 +3415,7 @@ definitions: Deleted: description: "The image ID of an image that was deleted" type: "string" + ServiceUpdateResponse: type: "object" properties: @@ -2759,6 +3426,7 @@ definitions: type: "string" example: Warning: "unable to pin image doesnotexist:latest to digest: image library/doesnotexist:latest not found" + ContainerSummary: type: "array" items: @@ -2827,6 +3495,27 @@ definitions: type: "array" items: $ref: "#/definitions/Mount" + + Driver: + description: "Driver represents a driver (network, logging, secrets)." + type: "object" + required: [Name] + properties: + Name: + description: "Name of the driver." + type: "string" + x-nullable: false + example: "some-driver" + Options: + description: "Key/value map of driver-specific options." + type: "object" + x-nullable: false + additionalProperties: + type: "string" + example: + OptionA: "value for driver-specific option A" + OptionB: "value for driver-specific option B" + SecretSpec: type: "object" properties: @@ -2838,26 +3527,48 @@ definitions: type: "object" additionalProperties: type: "string" + example: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" Data: - description: "Base64-url-safe-encoded secret data" - type: "array" - items: - type: "string" + description: | + Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2)) + data to store as secret. + + This field is only used to _create_ a secret, and is not returned by + other endpoints. + type: "string" + example: "" + Driver: + description: "Name of the secrets driver used to fetch the secret's value from an external secret store" + $ref: "#/definitions/Driver" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + Secret: type: "object" properties: ID: type: "string" + example: "blt1owaxmitz71s9v5zh81zun" Version: $ref: "#/definitions/ObjectVersion" CreatedAt: type: "string" format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" UpdatedAt: type: "string" format: "dateTime" + example: "2017-07-20T13:55:28.678958722Z" Spec: $ref: "#/definitions/SecretSpec" + ConfigSpec: type: "object" properties: @@ -2870,10 +3581,18 @@ definitions: additionalProperties: type: "string" Data: - description: "Base64-url-safe-encoded config data" - type: "array" - items: - type: "string" + description: | + Base64-url-safe-encoded ([RFC 4648](https://tools.ietf.org/html/rfc4648#section-3.2)) + config data. + type: "string" + Templating: + description: | + Templating driver, if applicable + + Templating controls whether and how to evaluate the config payload as + a template. If no driver is set, no templating is used. + $ref: "#/definitions/Driver" + Config: type: "object" properties: @@ -2890,6 +3609,804 @@ definitions: Spec: $ref: "#/definitions/ConfigSpec" + SystemInfo: + type: "object" + properties: + ID: + description: | + Unique identifier of the daemon. + +


+ + > **Note**: The format of the ID itself is not part of the API, and + > should not be considered stable. + type: "string" + example: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + Containers: + description: "Total number of containers on the host." + type: "integer" + example: 14 + ContainersRunning: + description: | + Number of containers with status `"running"`. + type: "integer" + example: 3 + ContainersPaused: + description: | + Number of containers with status `"paused"`. + type: "integer" + example: 1 + ContainersStopped: + description: | + Number of containers with status `"stopped"`. + type: "integer" + example: 10 + Images: + description: | + Total number of images on the host. + + Both _tagged_ and _untagged_ (dangling) images are counted. + type: "integer" + example: 508 + Driver: + description: "Name of the storage driver in use." + type: "string" + example: "overlay2" + DriverStatus: + description: | + Information specific to the storage driver, provided as + "label" / "value" pairs. + + This information is provided by the storage driver, and formatted + in a way consistent with the output of `docker info` on the command + line. + +


+ + > **Note**: The information returned in this field, including the + > formatting of values and labels, should not be considered stable, + > and may change without notice. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["Backing Filesystem", "extfs"] + - ["Supports d_type", "true"] + - ["Native Overlay Diff", "true"] + DockerRootDir: + description: | + Root directory of persistent Docker state. + + Defaults to `/var/lib/docker` on Linux, and `C:\ProgramData\docker` + on Windows. + type: "string" + example: "/var/lib/docker" + SystemStatus: + description: | + Status information about this node (standalone Swarm API). + +


+ + > **Note**: The information returned in this field is only propagated + > by the Swarm standalone API, and is empty (`null`) when using + > built-in swarm mode. + type: "array" + items: + type: "array" + items: + type: "string" + example: + - ["Role", "primary"] + - ["State", "Healthy"] + - ["Strategy", "spread"] + - ["Filters", "health, port, containerslots, dependency, affinity, constraint, whitelist"] + - ["Nodes", "2"] + - [" swarm-agent-00", "192.168.99.102:2376"] + - [" └ ID", "5CT6:FBGO:RVGO:CZL4:PB2K:WCYN:2JSV:KSHH:GGFW:QOPG:6J5Q:IOZ2|192.168.99.102:2376"] + - [" └ Status", "Healthy"] + - [" └ Containers", "1 (1 Running, 0 Paused, 0 Stopped)"] + - [" └ Reserved CPUs", "0 / 1"] + - [" └ Reserved Memory", "0 B / 1.021 GiB"] + - [" └ Labels", "kernelversion=4.4.74-boot2docker, operatingsystem=Boot2Docker 17.06.0-ce (TCL 7.2); HEAD : 0672754 - Thu Jun 29 00:06:31 UTC 2017, ostype=linux, provider=virtualbox, storagedriver=aufs"] + - [" └ UpdatedAt", "2017-08-09T10:03:46Z"] + - [" └ ServerVersion", "17.06.0-ce"] + - [" swarm-manager", "192.168.99.101:2376"] + - [" └ ID", "TAMD:7LL3:SEF7:LW2W:4Q2X:WVFH:RTXX:JSYS:XY2P:JEHL:ZMJK:JGIW|192.168.99.101:2376"] + - [" └ Status", "Healthy"] + - [" └ Containers", "2 (2 Running, 0 Paused, 0 Stopped)"] + - [" └ Reserved CPUs", "0 / 1"] + - [" └ Reserved Memory", "0 B / 1.021 GiB"] + - [" └ Labels", "kernelversion=4.4.74-boot2docker, operatingsystem=Boot2Docker 17.06.0-ce (TCL 7.2); HEAD : 0672754 - Thu Jun 29 00:06:31 UTC 2017, ostype=linux, provider=virtualbox, storagedriver=aufs"] + - [" └ UpdatedAt", "2017-08-09T10:04:11Z"] + - [" └ ServerVersion", "17.06.0-ce"] + Plugins: + $ref: "#/definitions/PluginsInfo" + MemoryLimit: + description: "Indicates if the host has memory limit support enabled." + type: "boolean" + example: true + SwapLimit: + description: "Indicates if the host has memory swap limit support enabled." + type: "boolean" + example: true + KernelMemory: + description: "Indicates if the host has kernel memory limit support enabled." + type: "boolean" + example: true + CpuCfsPeriod: + description: "Indicates if CPU CFS(Completely Fair Scheduler) period is supported by the host." + type: "boolean" + example: true + CpuCfsQuota: + description: "Indicates if CPU CFS(Completely Fair Scheduler) quota is supported by the host." + type: "boolean" + example: true + CPUShares: + description: "Indicates if CPU Shares limiting is supported by the host." + type: "boolean" + example: true + CPUSet: + description: | + Indicates if CPUsets (cpuset.cpus, cpuset.mems) are supported by the host. + + See [cpuset(7)](https://www.kernel.org/doc/Documentation/cgroup-v1/cpusets.txt) + type: "boolean" + example: true + PidsLimit: + description: "Indicates if the host kernel has PID limit support enabled." + type: "boolean" + example: true + OomKillDisable: + description: "Indicates if OOM killer disable is supported on the host." + type: "boolean" + IPv4Forwarding: + description: "Indicates IPv4 forwarding is enabled." + type: "boolean" + example: true + BridgeNfIptables: + description: "Indicates if `bridge-nf-call-iptables` is available on the host." + type: "boolean" + example: true + BridgeNfIp6tables: + description: "Indicates if `bridge-nf-call-ip6tables` is available on the host." + type: "boolean" + example: true + Debug: + description: "Indicates if the daemon is running in debug-mode / with debug-level logging enabled." + type: "boolean" + example: true + NFd: + description: | + The total number of file Descriptors in use by the daemon process. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 64 + NGoroutines: + description: | + The number of goroutines that currently exist. + + This information is only returned if debug-mode is enabled. + type: "integer" + example: 174 + SystemTime: + description: | + Current system-time in [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) + format with nano-seconds. + type: "string" + example: "2017-08-08T20:28:29.06202363Z" + LoggingDriver: + description: | + The logging driver to use as a default for new containers. + type: "string" + CgroupDriver: + description: | + The driver to use for managing cgroups. + type: "string" + enum: ["cgroupfs", "systemd", "none"] + default: "cgroupfs" + example: "cgroupfs" + NEventsListener: + description: "Number of event listeners subscribed." + type: "integer" + example: 30 + KernelVersion: + description: | + Kernel version of the host. + + On Linux, this information obtained from `uname`. On Windows this + information is queried from the HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\ + registry value, for example _"10.0 14393 (14393.1198.amd64fre.rs1_release_sec.170427-1353)"_. + type: "string" + example: "4.9.38-moby" + OperatingSystem: + description: | + Name of the host's operating system, for example: "Ubuntu 16.04.2 LTS" + or "Windows Server 2016 Datacenter" + type: "string" + example: "Alpine Linux v3.5" + OSType: + description: | + Generic type of the operating system of the host, as returned by the + Go runtime (`GOOS`). + + Currently returned values are "linux" and "windows". A full list of + possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). + type: "string" + example: "linux" + Architecture: + description: | + Hardware architecture of the host, as returned by the Go runtime + (`GOARCH`). + + A full list of possible values can be found in the [Go documentation](https://golang.org/doc/install/source#environment). + type: "string" + example: "x86_64" + NCPU: + description: | + The number of logical CPUs usable by the daemon. + + The number of available CPUs is checked by querying the operating + system when the daemon starts. Changes to operating system CPU + allocation after the daemon is started are not reflected. + type: "integer" + example: 4 + MemTotal: + description: | + Total amount of physical memory available on the host, in kilobytes (kB). + type: "integer" + format: "int64" + example: 2095882240 + + IndexServerAddress: + description: | + Address / URL of the index server that is used for image search, + and as a default for user authentication for Docker Hub and Docker Cloud. + default: "https://index.docker.io/v1/" + type: "string" + example: "https://index.docker.io/v1/" + RegistryConfig: + $ref: "#/definitions/RegistryServiceConfig" + GenericResources: + $ref: "#/definitions/GenericResources" + HttpProxy: + description: | + HTTP-proxy configured for the daemon. This value is obtained from the + [`HTTP_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "http://xxxxx:xxxxx@proxy.corp.example.com:8080" + HttpsProxy: + description: | + HTTPS-proxy configured for the daemon. This value is obtained from the + [`HTTPS_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) environment variable. + Credentials ([user info component](https://tools.ietf.org/html/rfc3986#section-3.2.1)) in the proxy URL + are masked in the API response. + + Containers do not automatically inherit this configuration. + type: "string" + example: "https://xxxxx:xxxxx@proxy.corp.example.com:4443" + NoProxy: + description: | + Comma-separated list of domain extensions for which no proxy should be + used. This value is obtained from the [`NO_PROXY`](https://www.gnu.org/software/wget/manual/html_node/Proxies.html) + environment variable. + + Containers do not automatically inherit this configuration. + type: "string" + example: "*.local, 169.254/16" + Name: + description: "Hostname of the host." + type: "string" + example: "node5.corp.example.com" + Labels: + description: | + User-defined labels (key/value metadata) as set on the daemon. + +


+ + > **Note**: When part of a Swarm, nodes can both have _daemon_ labels, + > set through the daemon configuration, and _node_ labels, set from a + > manager node in the Swarm. Node labels are not included in this + > field. Node labels can be retrieved using the `/nodes/(id)` endpoint + > on a manager node in the Swarm. + type: "array" + items: + type: "string" + example: ["storage=ssd", "production"] + ExperimentalBuild: + description: | + Indicates if experimental features are enabled on the daemon. + type: "boolean" + example: true + ServerVersion: + description: | + Version string of the daemon. + + > **Note**: the [standalone Swarm API](https://docs.docker.com/swarm/swarm-api/) + > returns the Swarm version instead of the daemon version, for example + > `swarm/1.2.8`. + type: "string" + example: "17.06.0-ce" + ClusterStore: + description: | + URL of the distributed storage backend. + + + The storage backend is used for multihost networking (to store + network and endpoint information) and by the node discovery mechanism. + +


+ + > **Note**: This field is only propagated when using standalone Swarm + > mode, and overlay networking using an external k/v store. Overlay + > networks with Swarm mode enabled use the built-in raft store, and + > this field will be empty. + type: "string" + example: "consul://consul.corp.example.com:8600/some/path" + ClusterAdvertise: + description: | + The network endpoint that the Engine advertises for the purpose of + node discovery. ClusterAdvertise is a `host:port` combination on which + the daemon is reachable by other hosts. + +


+ + > **Note**: This field is only propagated when using standalone Swarm + > mode, and overlay networking using an external k/v store. Overlay + > networks with Swarm mode enabled use the built-in raft store, and + > this field will be empty. + type: "string" + example: "node5.corp.example.com:8000" + Runtimes: + description: | + List of [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtimes configured on the daemon. Keys hold the "name" used to + reference the runtime. + + The Docker daemon relies on an OCI compliant runtime (invoked via the + `containerd` daemon) as its interface to the Linux kernel namespaces, + cgroups, and SELinux. + + The default runtime is `runc`, and automatically configured. Additional + runtimes can be configured by the user and will be listed here. + type: "object" + additionalProperties: + $ref: "#/definitions/Runtime" + default: + runc: + path: "runc" + example: + runc: + path: "runc" + runc-master: + path: "/go/bin/runc" + custom: + path: "/usr/local/bin/my-oci-runtime" + runtimeArgs: ["--debug", "--systemd-cgroup=false"] + DefaultRuntime: + description: | + Name of the default OCI runtime that is used when starting containers. + + The default can be overridden per-container at create time. + type: "string" + default: "runc" + example: "runc" + Swarm: + $ref: "#/definitions/SwarmInfo" + LiveRestoreEnabled: + description: | + Indicates if live restore is enabled. + + If enabled, containers are kept running when the daemon is shutdown + or upon daemon start if running containers are detected. + type: "boolean" + default: false + example: false + Isolation: + description: | + Represents the isolation technology to use as a default for containers. + The supported values are platform-specific. + + If no isolation value is specified on daemon start, on Windows client, + the default is `hyperv`, and on Windows server, the default is `process`. + + This option is currently not used on other platforms. + default: "default" + type: "string" + enum: + - "default" + - "hyperv" + - "process" + InitBinary: + description: | + Name and, optional, path of the `docker-init` binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "docker-init" + ContainerdCommit: + $ref: "#/definitions/Commit" + RuncCommit: + $ref: "#/definitions/Commit" + InitCommit: + $ref: "#/definitions/Commit" + SecurityOptions: + description: | + List of security features that are enabled on the daemon, such as + apparmor, seccomp, SELinux, user-namespaces (userns), and rootless. + + Additional configuration options for each security feature may + be present, and are included as a comma-separated list of key/value + pairs. + type: "array" + items: + type: "string" + example: + - "name=apparmor" + - "name=seccomp,profile=default" + - "name=selinux" + - "name=userns" + - "name=rootless" + ProductLicense: + description: | + Reports a summary of the product license on the daemon. + + If a commercial license has been applied to the daemon, information + such as number of nodes, and expiration are included. + type: "string" + example: "Community Engine" + Warnings: + description: | + List of warnings / informational messages about missing features, or + issues related to the daemon configuration. + + These messages can be printed by the client as information to the user. + type: "array" + items: + type: "string" + example: + - "WARNING: No memory limit support" + - "WARNING: bridge-nf-call-iptables is disabled" + - "WARNING: bridge-nf-call-ip6tables is disabled" + + + # PluginsInfo is a temp struct holding Plugins name + # registered with docker daemon. It is used by Info struct + PluginsInfo: + description: | + Available plugins per type. + +


+ + > **Note**: Only unmanaged (V1) plugins are included in this list. + > V1 plugins are "lazily" loaded, and are not returned in this list + > if there is no resource using the plugin. + type: "object" + properties: + Volume: + description: "Names of available volume-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["local"] + Network: + description: "Names of available network-drivers, and network-driver plugins." + type: "array" + items: + type: "string" + example: ["bridge", "host", "ipvlan", "macvlan", "null", "overlay"] + Authorization: + description: "Names of available authorization plugins." + type: "array" + items: + type: "string" + example: ["img-authz-plugin", "hbm"] + Log: + description: "Names of available logging-drivers, and logging-driver plugins." + type: "array" + items: + type: "string" + example: ["awslogs", "fluentd", "gcplogs", "gelf", "journald", "json-file", "logentries", "splunk", "syslog"] + + + RegistryServiceConfig: + description: | + RegistryServiceConfig stores daemon registry services configuration. + type: "object" + x-nullable: true + properties: + AllowNondistributableArtifactsCIDRs: + description: | + List of IP ranges to which nondistributable artifacts can be pushed, + using the CIDR syntax [RFC 4632](https://tools.ietf.org/html/4632). + + Some images (for example, Windows base images) contain artifacts + whose distribution is restricted by license. When these images are + pushed to a registry, restricted artifacts are not included. + + This configuration override this behavior, and enables the daemon to + push nondistributable artifacts to all registries whose resolved IP + address is within the subnet described by the CIDR syntax. + + This option is useful when pushing images containing + nondistributable artifacts to a registry on an air-gapped network so + hosts on that network can pull the images without connecting to + another server. + + > **Warning**: Nondistributable artifacts typically have restrictions + > on how and where they can be distributed and shared. Only use this + > feature to push artifacts to private registries and ensure that you + > are in compliance with any terms that cover redistributing + > nondistributable artifacts. + + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + AllowNondistributableArtifactsHostnames: + description: | + List of registry hostnames to which nondistributable artifacts can be + pushed, using the format `[:]` or `[:]`. + + Some images (for example, Windows base images) contain artifacts + whose distribution is restricted by license. When these images are + pushed to a registry, restricted artifacts are not included. + + This configuration override this behavior for the specified + registries. + + This option is useful when pushing images containing + nondistributable artifacts to a registry on an air-gapped network so + hosts on that network can pull the images without connecting to + another server. + + > **Warning**: Nondistributable artifacts typically have restrictions + > on how and where they can be distributed and shared. Only use this + > feature to push artifacts to private registries and ensure that you + > are in compliance with any terms that cover redistributing + > nondistributable artifacts. + type: "array" + items: + type: "string" + example: ["registry.internal.corp.example.com:3000", "[2001:db8:a0b:12f0::1]:443"] + InsecureRegistryCIDRs: + description: | + List of IP ranges of insecure registries, using the CIDR syntax + ([RFC 4632](https://tools.ietf.org/html/4632)). Insecure registries + accept un-encrypted (HTTP) and/or untrusted (HTTPS with certificates + from unknown CAs) communication. + + By default, local registries (`127.0.0.0/8`) are configured as + insecure. All other registries are secure. Communicating with an + insecure registry is not possible if the daemon assumes that registry + is secure. + + This configuration override this behavior, insecure communication with + registries whose resolved IP address is within the subnet described by + the CIDR syntax. + + Registries can also be marked insecure by hostname. Those registries + are listed under `IndexConfigs` and have their `Secure` field set to + `false`. + + > **Warning**: Using this option can be useful when running a local + > registry, but introduces security vulnerabilities. This option + > should therefore ONLY be used for testing purposes. For increased + > security, users should add their CA to their system's list of trusted + > CAs instead of enabling this option. + type: "array" + items: + type: "string" + example: ["::1/128", "127.0.0.0/8"] + IndexConfigs: + type: "object" + additionalProperties: + $ref: "#/definitions/IndexInfo" + example: + "127.0.0.1:5000": + "Name": "127.0.0.1:5000" + "Mirrors": [] + "Secure": false + "Official": false + "[2001:db8:a0b:12f0::1]:80": + "Name": "[2001:db8:a0b:12f0::1]:80" + "Mirrors": [] + "Secure": false + "Official": false + "docker.io": + Name: "docker.io" + Mirrors: ["https://hub-mirror.corp.example.com:5000/"] + Secure: true + Official: true + "registry.internal.corp.example.com:3000": + Name: "registry.internal.corp.example.com:3000" + Mirrors: [] + Secure: false + Official: false + Mirrors: + description: | + List of registry URLs that act as a mirror for the official + (`docker.io`) registry. + + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://[2001:db8:a0b:12f0::1]/" + + IndexInfo: + description: + IndexInfo contains information about a registry. + type: "object" + x-nullable: true + properties: + Name: + description: | + Name of the registry, such as "docker.io". + type: "string" + example: "docker.io" + Mirrors: + description: | + List of mirrors, expressed as URIs. + type: "array" + items: + type: "string" + example: + - "https://hub-mirror.corp.example.com:5000/" + - "https://registry-2.docker.io/" + - "https://registry-3.docker.io/" + Secure: + description: | + Indicates if the registry is part of the list of insecure + registries. + + If `false`, the registry is insecure. Insecure registries accept + un-encrypted (HTTP) and/or untrusted (HTTPS with certificates from + unknown CAs) communication. + + > **Warning**: Insecure registries can be useful when running a local + > registry. However, because its use creates security vulnerabilities + > it should ONLY be enabled for testing purposes. For increased + > security, users should add their CA to their system's list of + > trusted CAs instead of enabling this option. + type: "boolean" + example: true + Official: + description: | + Indicates whether this is an official registry (i.e., Docker Hub / docker.io) + type: "boolean" + example: true + + Runtime: + description: | + Runtime describes an [OCI compliant](https://github.com/opencontainers/runtime-spec) + runtime. + + The runtime is invoked by the daemon via the `containerd` daemon. OCI + runtimes act as an interface to the Linux kernel namespaces, cgroups, + and SELinux. + type: "object" + properties: + path: + description: | + Name and, optional, path, of the OCI executable binary. + + If the path is omitted, the daemon searches the host's `$PATH` for the + binary and uses the first result. + type: "string" + example: "/usr/local/bin/my-oci-runtime" + runtimeArgs: + description: | + List of command-line arguments to pass to the runtime when invoked. + type: "array" + x-nullable: true + items: + type: "string" + example: ["--debug", "--systemd-cgroup=false"] + + Commit: + description: | + Commit holds the Git-commit (SHA1) that a binary was built from, as + reported in the version-string of external tools, such as `containerd`, + or `runC`. + type: "object" + properties: + ID: + description: "Actual commit ID of external tool." + type: "string" + example: "cfb82a876ecc11b5ca0977d1733adbe58599088a" + Expected: + description: | + Commit ID of external tool expected by dockerd as set at build time. + type: "string" + example: "2d41c047c83e09a6d61d464906feb2a2f3c52aa4" + + SwarmInfo: + description: | + Represents generic information about swarm. + type: "object" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + default: "" + example: "k67qz4598weg5unwwffg6z1m1" + NodeAddr: + description: | + IP address at which this node can be reached by other nodes in the + swarm. + type: "string" + default: "" + example: "10.0.0.46" + LocalNodeState: + $ref: "#/definitions/LocalNodeState" + ControlAvailable: + type: "boolean" + default: false + example: true + Error: + type: "string" + default: "" + RemoteManagers: + description: | + List of ID's and addresses of other managers in the swarm. + type: "array" + default: null + x-nullable: true + items: + $ref: "#/definitions/PeerNode" + example: + - NodeID: "71izy0goik036k48jg985xnds" + Addr: "10.0.0.158:2377" + - NodeID: "79y6h1o4gv8n120drcprv5nmc" + Addr: "10.0.0.159:2377" + - NodeID: "k67qz4598weg5unwwffg6z1m1" + Addr: "10.0.0.46:2377" + Nodes: + description: "Total number of nodes in the swarm." + type: "integer" + x-nullable: true + example: 4 + Managers: + description: "Total number of managers in the swarm." + type: "integer" + x-nullable: true + example: 3 + Cluster: + $ref: "#/definitions/ClusterInfo" + + LocalNodeState: + description: "Current local status of this node." + type: "string" + default: "" + enum: + - "" + - "inactive" + - "pending" + - "active" + - "error" + - "locked" + example: "active" + + PeerNode: + description: "Represents a peer-node in the swarm" + properties: + NodeID: + description: "Unique identifier of for this node in the swarm." + type: "string" + Addr: + description: | + IP address and ports at which this node can be reached. + type: "string" + paths: /containers/json: get: @@ -3091,9 +4608,9 @@ paths: parameters: - name: "name" in: "query" - description: "Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`." + description: "Assign the specified name to the container. Must match `/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`." type: "string" - pattern: "/?[a-zA-Z0-9_-]+" + pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$" - name: "body" in: "body" description: "Container to create" @@ -3178,7 +4695,7 @@ paths: OomKillDisable: false OomScoreAdj: 500 PidMode: "" - PidsLimit: -1 + PidsLimit: 0 PortBindings: 22/tcp: - HostPort: "11022" @@ -3238,6 +4755,8 @@ paths: description: "Container created successfully" schema: type: "object" + title: "ContainerCreateResponse" + description: "OK response to ContainerCreate operation" required: [Id, Warnings] properties: Id: @@ -3265,10 +4784,6 @@ paths: examples: application/json: message: "No such container: c2ada9df5af8" - 406: - description: "impossible to attach" - schema: - $ref: "#/definitions/ErrorResponse" 409: description: "conflict" schema: @@ -3290,6 +4805,7 @@ paths: description: "no error" schema: type: "object" + title: "ContainerInspectResponse" properties: Id: description: "The ID of the container" @@ -3379,7 +4895,11 @@ paths: AppArmorProfile: type: "string" ExecIDs: - type: "string" + description: "IDs of exec instances that are running in the container." + type: "array" + items: + type: "string" + x-nullable: true HostConfig: $ref: "#/definitions/HostConfig" GraphDriver: @@ -3399,7 +4919,7 @@ paths: Config: $ref: "#/definitions/ContainerConfig" NetworkSettings: - $ref: "#/definitions/NetworkConfig" + $ref: "#/definitions/NetworkSettings" examples: application/json: AppArmorProfile: "" @@ -3436,6 +4956,9 @@ paths: StopTimeout: 10 Created: "2015-01-06T15:47:31.485331387Z" Driver: "devicemapper" + ExecIDs: + - "b35395de42bc8abd327f9dd65d913b9ba28c74d2f0734eeeae84fa1c616a0fca" + - "3fc1232e5cd20c8de182ed81178503dc6437f4e7ef12b52cc5e8de020652f1c4" HostConfig: MaximumIOps: 0 MaximumIOBps: 0 @@ -3498,8 +5021,6 @@ paths: LinkLocalIPv6Address: "" LinkLocalIPv6PrefixLen: 0 SandboxKey: "" - SecondaryIPAddresses: null - SecondaryIPv6Addresses: null EndpointID: "" Gateway: "" GlobalIPv6Address: "" @@ -3576,6 +5097,8 @@ paths: description: "no error" schema: type: "object" + title: "ContainerTopResponse" + description: "OK response to ContainerTop operation" properties: Titles: description: "The ps column titles" @@ -3651,15 +5174,15 @@ paths: Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. operationId: "ContainerLogs" responses: - 101: - description: "logs returned as a stream" + 200: + description: | + logs returned as a stream in response body. + For the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + Note that unlike the attach endpoint, the logs endpoint does not upgrade the connection and does not + set Content-Type. schema: type: "string" format: "binary" - 200: - description: "logs returned as a string in response body" - schema: - type: "string" 404: description: "no such container" schema: @@ -3679,10 +5202,7 @@ paths: type: "string" - name: "follow" in: "query" - description: | - Return the logs as a stream. - - This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" @@ -3700,6 +5220,11 @@ paths: description: "Only return logs since this time, as a UNIX timestamp" type: "integer" default: 0 + - name: "until" + in: "query" + description: "Only return logs before this time, as a UNIX timestamp" + type: "integer" + default: 0 - name: "timestamps" in: "query" description: "Add timestamps to every log line" @@ -3731,6 +5256,8 @@ paths: items: type: "object" x-go-name: "ContainerChangeResponseItem" + title: "ContainerChangeResponseItem" + description: "change item in response to ContainerChanges operation" required: [Path, Kind] properties: Path: @@ -3804,9 +5331,9 @@ paths: This endpoint returns a live stream of a container’s resource usage statistics. - The `precpu_stats` is the CPU statistic of last read, which is used - for calculating the CPU usage percentage. It is not the same as the - `cpu_stats` field. + The `precpu_stats` is the CPU statistic of the *previous* read, and is + used to calculate the CPU usage percentage. It is not an exact copy + of the `cpu_stats` field. If either `precpu_stats.online_cpus` or `cpu_stats.online_cpus` is nil then for compatibility with older daemons the length of the @@ -3936,7 +5463,7 @@ paths: /containers/{id}/resize: post: summary: "Resize a container TTY" - description: "Resize the TTY for a container. You must restart the container for the resize to take effect." + description: "Resize the TTY for a container." operationId: "ContainerResize" consumes: - "application/octet-stream" @@ -4081,6 +5608,13 @@ paths: examples: application/json: message: "No such container: c2ada9df5af8" + 409: + description: "container is not running" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "Container d37cde0fe4ad63c3a7252023b2f9800282894247d145cb5933ddf6e52cc03a28 is not running" 500: description: "server error" schema: @@ -4109,6 +5643,8 @@ paths: description: "The container has been updated." schema: type: "object" + title: "ContainerUpdateResponse" + description: "OK response to ContainerUpdate operation" properties: Warnings: type: "array" @@ -4462,12 +5998,21 @@ paths: description: "The container has exit." schema: type: "object" + title: "ContainerWaitResponse" + description: "OK response to ContainerWait operation" required: [StatusCode] properties: StatusCode: description: "Exit code of the container" type: "integer" x-nullable: false + Error: + description: "container waiting error, if any" + type: "object" + properties: + Message: + description: "Details of an error" + type: "string" 404: description: "no such container" schema: @@ -4553,7 +6098,7 @@ paths: headers: X-Docker-Container-Path-Stat: type: "string" - description: "TODO" + description: "A base64 - encoded JSON object with some filesystem header information about the path" 400: description: "Bad parameter" schema: @@ -4672,12 +6217,17 @@ paths: in: "query" description: "If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa." type: "string" + - name: "copyUIDGID" + in: "query" + description: "If “1”, “true”, then it will copy UID/GID maps to the dest file or dir" + type: "string" - name: "inputStream" in: "body" required: true description: "The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." schema: type: "string" + format: "binary" tags: ["Container"] /containers/prune: post: @@ -4700,6 +6250,7 @@ paths: description: "No error" schema: type: "object" + title: "ContainerPruneResponse" properties: ContainersDeleted: description: "Container IDs that were deleted" @@ -4879,8 +6430,19 @@ paths: type: "integer" - name: "buildargs" in: "query" - description: "JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the buildargs as the environment context for commands run via the `Dockerfile` RUN instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for passing secret values. [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg)" - type: "integer" + description: > + JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker + uses the buildargs as the environment context for commands run via the `Dockerfile` RUN + instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for + passing secret values. + + + For example, the build arg `FOO=bar` would become `{"FOO":"bar"}` in JSON. This would result in the + the query parameter `buildargs={"FOO":"bar"}`. Note that `{"FOO":"bar"}` should be URI component encoded. + + + [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg) + type: "string" - name: "shmsize" in: "query" description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." @@ -4928,6 +6490,21 @@ paths: Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. type: "string" + - name: "platform" + in: "query" + description: "Platform in the format os[/arch[/variant]]" + type: "string" + default: "" + - name: "target" + in: "query" + description: "Target build stage" + type: "string" + default: "" + - name: "outputs" + in: "query" + description: "BuildKit output configuration" + type: "string" + default: "" responses: 200: description: "no error" @@ -4946,12 +6523,41 @@ paths: produces: - "application/json" operationId: "BuildPrune" + parameters: + - name: "keep-storage" + in: "query" + description: "Amount of disk space in bytes to keep for cache" + type: "integer" + format: "int64" + - name: "all" + in: "query" + type: "boolean" + description: "Remove all types of build cache" + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the list of build cache objects. Available filters: + - `until=`: duration relative to daemon's time, during which build cache was not used, in Go's duration format (e.g., '24h') + - `id=` + - `parent=` + - `type=` + - `description=` + - `inuse` + - `shared` + - `private` responses: 200: description: "No error" schema: type: "object" + title: "BuildPruneResponse" properties: + CachesDeleted: + type: "array" + items: + description: "ID of build cache object" + type: "string" SpaceReclaimed: description: "Disk space reclaimed in bytes" type: "integer" @@ -5009,6 +6615,11 @@ paths: in: "header" description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" type: "string" + - name: "platform" + in: "query" + description: "Platform in the format os[/arch[/variant]]" + type: "string" + default: "" tags: ["Image"] /images/{name}/json: get: @@ -5131,6 +6742,8 @@ paths: items: type: "object" x-go-name: HistoryResponseItem + title: "HistoryResponseItem" + description: "individual image layer information in response to ImageHistory operation" required: [Id, Created, CreatedBy, Tags, Size, Comment] properties: Id: @@ -5337,6 +6950,7 @@ paths: type: "array" items: type: "object" + title: "ImageSearchResponseItem" properties: description: type: "string" @@ -5412,6 +7026,7 @@ paths: description: "No error" schema: type: "object" + title: "ImagePruneResponse" properties: ImagesDeleted: description: "Images that were deleted" @@ -5439,6 +7054,7 @@ paths: description: "An identity token was generated successfully." schema: type: "object" + title: "SystemAuthResponse" required: [Status] properties: Status: @@ -5476,209 +7092,7 @@ paths: 200: description: "No error" schema: - type: "object" - properties: - Architecture: - type: "string" - Containers: - type: "integer" - ContainersRunning: - type: "integer" - ContainersStopped: - type: "integer" - ContainersPaused: - type: "integer" - CpuCfsPeriod: - type: "boolean" - CpuCfsQuota: - type: "boolean" - Debug: - type: "boolean" - DiscoveryBackend: - type: "string" - DockerRootDir: - type: "string" - Driver: - type: "string" - DriverStatus: - type: "array" - items: - type: "array" - items: - type: "string" - SystemStatus: - type: "array" - items: - type: "array" - items: - type: "string" - Plugins: - type: "object" - properties: - Volume: - type: "array" - items: - type: "string" - Network: - type: "array" - items: - type: "string" - Log: - type: "array" - items: - type: "string" - ExperimentalBuild: - type: "boolean" - HttpProxy: - type: "string" - HttpsProxy: - type: "string" - ID: - type: "string" - IPv4Forwarding: - type: "boolean" - Images: - type: "integer" - IndexServerAddress: - type: "string" - InitPath: - type: "string" - InitSha1: - type: "string" - KernelVersion: - type: "string" - Labels: - type: "array" - items: - type: "string" - MemTotal: - type: "integer" - GenericResources: - $ref: "#/definitions/GenericResources" - MemoryLimit: - type: "boolean" - NCPU: - type: "integer" - NEventsListener: - type: "integer" - NFd: - type: "integer" - NGoroutines: - type: "integer" - Name: - type: "string" - NoProxy: - type: "string" - OomKillDisable: - type: "boolean" - OSType: - type: "string" - OomScoreAdj: - type: "integer" - OperatingSystem: - type: "string" - RegistryConfig: - type: "object" - properties: - IndexConfigs: - type: "object" - additionalProperties: - type: "object" - properties: - Mirrors: - type: "array" - items: - type: "string" - Name: - type: "string" - Official: - type: "boolean" - Secure: - type: "boolean" - InsecureRegistryCIDRs: - type: "array" - items: - type: "string" - SwapLimit: - type: "boolean" - SystemTime: - type: "string" - ServerVersion: - type: "string" - examples: - application/json: - Architecture: "x86_64" - ClusterStore: "etcd://localhost:2379" - CgroupDriver: "cgroupfs" - Containers: 11 - ContainersRunning: 7 - ContainersStopped: 3 - ContainersPaused: 1 - CpuCfsPeriod: true - CpuCfsQuota: true - Debug: false - DockerRootDir: "/var/lib/docker" - Driver: "btrfs" - DriverStatus: - - - - "" - ExperimentalBuild: false - HttpProxy: "http://test:test@localhost:8080" - HttpsProxy: "https://test:test@localhost:8080" - ID: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" - IPv4Forwarding: true - Images: 16 - IndexServerAddress: "https://index.docker.io/v1/" - InitPath: "/usr/bin/docker" - InitSha1: "" - KernelMemory: true - KernelVersion: "3.12.0-1-amd64" - Labels: - - "storage=ssd" - MemTotal: 2099236864 - MemoryLimit: true - NCPU: 1 - NEventsListener: 0 - NFd: 11 - NGoroutines: 21 - Name: "prod-server-42" - NoProxy: "9.81.1.160" - OomKillDisable: true - OSType: "linux" - OperatingSystem: "Boot2Docker" - Plugins: - Volume: - - "local" - Network: - - "null" - - "host" - - "bridge" - RegistryConfig: - IndexConfigs: - docker.io: - Name: "docker.io" - Official: true - Secure: true - InsecureRegistryCIDRs: - - "127.0.0.0/8" - SecurityOptions: - - Key: "Name" - Value: "seccomp" - - Key: "Profile" - Value: "default" - - Key: "Name" - Value: "apparmor" - - Key: "Name" - Value: "selinux" - - Key: "Name" - Value: "userns" - ServerVersion: "1.9.0" - SwapLimit: false - SystemStatus: - - - - "State" - - "Healthy" - SystemTime: "2015-03-10T11:11:23.730591467-07:00" + $ref: "#/definitions/SystemInfo" 500: description: "Server error" schema: @@ -5695,7 +7109,30 @@ paths: description: "no error" schema: type: "object" + title: "SystemVersionResponse" properties: + Platform: + type: "object" + required: [Name] + properties: + Name: + type: "string" + Components: + type: "array" + items: + type: "object" + x-go-name: ComponentVersion + required: [Name, Version] + properties: + Name: + type: "string" + Version: + type: "string" + x-nullable: false + Details: + type: "object" + x-nullable: true + Version: type: "string" ApiVersion: @@ -5749,9 +7186,57 @@ paths: API-Version: type: "string" description: "Max API Version the server supports" + BuildKit-Version: + type: "string" + description: "Default version of docker image builder" Docker-Experimental: type: "boolean" description: "If the server is running with experimental mode enabled" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + headers: + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" + tags: ["System"] + head: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "SystemPingHead" + produces: ["text/plain"] + responses: + 200: + description: "no error" + schema: + type: "string" + example: "(empty)" + headers: + API-Version: + type: "string" + description: "Max API Version the server supports" + BuildKit-Version: + type: "string" + description: "Default version of docker image builder" + Docker-Experimental: + type: "boolean" + description: "If the server is running with experimental mode enabled" + Cache-Control: + type: "string" + default: "no-cache, no-store, must-revalidate" + Pragma: + type: "string" + default: "no-cache" 500: description: "server error" schema: @@ -5825,7 +7310,7 @@ paths: Various objects within Docker report events when something happens to them. - Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, and `update` + Containers report these events: `attach`, `commit`, `copy`, `create`, `destroy`, `detach`, `die`, `exec_create`, `exec_detach`, `exec_start`, `exec_die`, `export`, `health_status`, `kill`, `oom`, `pause`, `rename`, `resize`, `restart`, `start`, `stop`, `top`, `unpause`, and `update` Images report these events: `delete`, `import`, `load`, `pull`, `push`, `save`, `tag`, and `untag` @@ -5851,6 +7336,7 @@ paths: description: "no error" schema: type: "object" + title: "SystemEventsResponse" properties: Type: description: "The type of object emitting the event" @@ -5909,16 +7395,20 @@ paths: description: | A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + - `config=` config name or ID - `container=` container name or ID - `daemon=` daemon name or ID - `event=` event type - `image=` image name or ID - `label=` image or container label - `network=` network name or ID + - `node=` node ID - `plugin`= plugin name or ID - - `scope`= local or swarm - - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service` or `secret` - - `volume=` volume name or ID + - `scope`= local or swarm + - `secret=` secret name or ID + - `service=` service name or ID + - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config` + - `volume=` volume name type: "string" tags: ["System"] /system/df: @@ -5930,6 +7420,7 @@ paths: description: "no error" schema: type: "object" + title: "SystemDataUsageResponse" properties: LayersSize: type: "integer" @@ -5946,6 +7437,10 @@ paths: type: "array" items: $ref: "#/definitions/Volume" + BuildCache: + type: "array" + items: + $ref: "#/definitions/BuildCache" example: LayersSize: 1092588 Images: @@ -6187,6 +7682,9 @@ paths: User: type: "string" description: "The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`." + WorkingDir: + type: "string" + description: "The working directory for the exec process inside the container." example: AttachStdin: false AttachStdout: true @@ -6285,7 +7783,12 @@ paths: description: "No error" schema: type: "object" + title: "ExecInspectResponse" properties: + CanRemove: + type: "boolean" + DetachKeys: + type: "string" ID: type: "string" Running: @@ -6351,6 +7854,8 @@ paths: description: "Summary volume data that matches the query" schema: type: "object" + title: "VolumeListResponse" + description: "Volume list response" required: [Volumes, Warnings] properties: Volumes: @@ -6427,6 +7932,8 @@ paths: description: "Volume configuration" schema: type: "object" + description: "Volume configuration" + title: "VolumeConfig" properties: Name: description: "The new volume's name. If not specified, Docker generates a name." @@ -6532,6 +8039,7 @@ paths: description: "No error" schema: type: "object" + title: "VolumePruneResponse" properties: VolumesDeleted: description: "Volumes that were deleted" @@ -6626,6 +8134,10 @@ paths: description: | JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: + - `dangling=` When set to `true` (or `1`), returns all + networks that are not in use by a container. When set to `false` + (or `0`), only networks that are in use by one or more + containers are returned. - `driver=` Matches a network's driver. - `id=` Matches all or part of a network ID. - `label=` or `label==` of a network label. @@ -6710,6 +8222,7 @@ paths: description: "No error" schema: type: "object" + title: "NetworkCreateResponse" properties: Id: description: "The ID of the created network." @@ -6810,7 +8323,7 @@ paths: summary: "Connect a container to a network" operationId: "NetworkConnect" consumes: - - "application/octet-stream" + - "application/json" responses: 200: description: "No error" @@ -6912,6 +8425,7 @@ paths: description: "No error" schema: type: "object" + title: "NetworkPruneResponse" properties: NetworksDeleted: description: "Networks that were deleted" @@ -6963,6 +8477,7 @@ paths: items: description: "Describes a permission the user has to accept upon installing the plugin." type: "object" + title: "PluginPrivilegeItem" properties: Name: type: "string" @@ -7340,6 +8855,7 @@ paths: - `label=` - `membership=`(`accepted`|`pending`)` - `name=` + - `node.label=` - `role=`(`manager`|`worker`)` type: "string" tags: ["Node"] @@ -7449,60 +8965,7 @@ paths: 200: description: "no error" schema: - allOf: - - $ref: "#/definitions/ClusterInfo" - - type: "object" - properties: - JoinTokens: - description: "The tokens workers and managers need to join the swarm." - type: "object" - properties: - Worker: - description: "The token workers can use to join the swarm." - type: "string" - Manager: - description: "The token managers can use to join the swarm." - type: "string" - example: - CreatedAt: "2016-08-15T16:00:20.349727406Z" - Spec: - Dispatcher: - HeartbeatPeriod: 5000000000 - Orchestration: - TaskHistoryRetentionLimit: 10 - CAConfig: - NodeCertExpiry: 7776000000000000 - Raft: - LogEntriesForSlowFollowers: 500 - HeartbeatTick: 1 - SnapshotInterval: 10000 - ElectionTick: 3 - TaskDefaults: {} - EncryptionConfig: - AutoLockManagers: false - Name: "default" - JoinTokens: - Worker: "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-6qmn92w6bu3jdvnglku58u11a" - Manager: "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-8llk83c4wm9lwioey2s316r9l" - ID: "70ilmkj2f6sp2137c753w2nmt" - UpdatedAt: "2016-08-15T16:32:09.623207604Z" - Version: - Index: 51 - RootRotationInProgress: false - TLSInfo: - TrustRoot: | - -----BEGIN CERTIFICATE----- - MIIBajCCARCgAwIBAgIUbYqrLSOSQHoxD8CwG6Bi2PJi9c8wCgYIKoZIzj0EAwIw - EzERMA8GA1UEAxMIc3dhcm0tY2EwHhcNMTcwNDI0MjE0MzAwWhcNMzcwNDE5MjE0 - MzAwWjATMREwDwYDVQQDEwhzd2FybS1jYTBZMBMGByqGSM49AgEGCCqGSM49AwEH - A0IABJk/VyMPYdaqDXJb/VXh5n/1Yuv7iNrxV3Qb3l06XD46seovcDWs3IZNV1lf - 3Skyr0ofcchipoiHkXBODojJydSjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMB - Af8EBTADAQH/MB0GA1UdDgQWBBRUXxuRcnFjDfR/RIAUQab8ZV/n4jAKBggqhkjO - PQQDAgNIADBFAiAy+JTe6Uc3KyLCMiqGl2GyWGQqQDEcO3/YG36x7om65AIhAJvz - pxv6zFeVEkAEEkqIYi0omA9+CjanB/6Bz4n1uw8H - -----END CERTIFICATE----- - CertIssuerSubject: "MBMxETAPBgNVBAMTCHN3YXJtLWNh" - CertIssuerPublicKey: "MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEmT9XIw9h1qoNclv9VeHmf/Vi6/uI2vFXdBveXTpcPjqx6i9wNazchk1XWV/dKTKvSh9xyGKmiIeRcE4OiMnJ1A==" + $ref: "#/definitions/Swarm" 404: description: "no such swarm" schema: @@ -7565,14 +9028,36 @@ paths: nodes in order to reach the containers running on this node. Using this parameter it is possible to separate the container data traffic from the management traffic of the cluster. type: "string" + DataPathPort: + description: | + DataPathPort specifies the data path port number for data traffic. + Acceptable port range is 1024 to 49151. + if no port is set or is set to 0, default port 4789 will be used. + type: "integer" + format: "uint32" + DefaultAddrPool: + description: | + Default Address Pool specifies default subnet pools for global scope networks. + type: "array" + items: + type: "string" + example: ["10.10.0.0/16", "20.20.0.0/16"] ForceNewCluster: description: "Force creation of a new swarm." type: "boolean" + SubnetSize: + description: | + SubnetSize specifies the subnet size of the networks created from the default subnet pool + type: "integer" + format: "uint32" Spec: $ref: "#/definitions/SwarmSpec" example: ListenAddr: "0.0.0.0:2377" AdvertiseAddr: "192.168.1.1:2377" + DataPathPort: 4789 + DefaultAddrPool: ["10.10.0.0/8", "20.20.0.0/8"] + SubnetSize: 24 ForceNewCluster: false Spec: Orchestration: {} @@ -7627,7 +9112,9 @@ paths: type: "string" RemoteAddrs: description: "Addresses of manager nodes already participating in the swarm." - type: "string" + type: "array" + items: + type: "string" JoinToken: description: "Secret token for joining this swarm." type: "string" @@ -7718,6 +9205,7 @@ paths: description: "no error" schema: type: "object" + title: "UnlockKeyResponse" properties: UnlockKey: description: "The swarm's unlock key." @@ -7809,6 +9297,7 @@ paths: description: "no error" schema: type: "object" + title: "ServiceCreateResponse" properties: ID: description: "The ID of the created service." @@ -8052,7 +9541,10 @@ paths: - name: "version" in: "query" - description: "The version number of the service object being updated. This is required to avoid conflicting writes." + description: "The version number of the service object being updated. + This is required to avoid conflicting writes. + This version number should be the value as currently set on the service *before* the update. + You can find the current version by calling `GET /services/{id}`" required: true type: "integer" - name: "registryAuthFrom" @@ -8078,23 +9570,16 @@ paths: get: summary: "Get service logs" description: | - Get `stdout` and `stderr` logs from a service. + Get `stdout` and `stderr` logs from a service. See also [`/containers/{id}/logs`](#operation/ContainerLogs). - **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. + **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. operationId: "ServiceLogs" - produces: - - "application/vnd.docker.raw-stream" - - "application/json" responses: - 101: - description: "logs returned as a stream" + 200: + description: "logs returned as a stream in response body" schema: type: "string" format: "binary" - 200: - description: "logs returned as a string in response body" - schema: - type: "string" 404: description: "no such service" schema: @@ -8123,10 +9608,7 @@ paths: default: false - name: "follow" in: "query" - description: | - Return the logs as a stream. - - This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" @@ -8335,23 +9817,16 @@ paths: get: summary: "Get task logs" description: | - Get `stdout` and `stderr` logs from a task. + Get `stdout` and `stderr` logs from a task. See also [`/containers/{id}/logs`](#operation/ContainerLogs). - **Note**: This endpoint works only for services with the `json-file` or `journald` logging drivers. + **Note**: This endpoint works only for services with the `local`, `json-file` or `journald` logging drivers. operationId: "TaskLogs" - produces: - - "application/vnd.docker.raw-stream" - - "application/json" responses: - 101: - description: "logs returned as a stream" + 200: + description: "logs returned as a stream in response body" schema: type: "string" format: "binary" - 200: - description: "logs returned as a string in response body" - schema: - type: "string" 404: description: "no such task" schema: @@ -8380,10 +9855,7 @@ paths: default: false - name: "follow" in: "query" - description: | - Return the logs as a stream. - - This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/ContainerAttach). + description: "Keep connection after returning logs." type: "boolean" default: false - name: "stdout" @@ -8411,6 +9883,7 @@ paths: description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." type: "string" default: "all" + tags: ["Task"] /secrets: get: summary: "List secrets" @@ -8425,6 +9898,20 @@ paths: items: $ref: "#/definitions/Secret" example: + - ID: "blt1owaxmitz71s9v5zh81zun" + Version: + Index: 85 + CreatedAt: "2017-07-20T13:55:28.678958722Z" + UpdatedAt: "2017-07-20T13:55:28.678958722Z" + Spec: + Name: "mysql-passwd" + Labels: + some.label: "some.value" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" - ID: "ktnbjxoalbkvbvedmg1urrz8h" Version: Index: 11 @@ -8432,6 +9919,8 @@ paths: UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "app-dev.crt" + Labels: + foo: "bar" 500: description: "server error" schema: @@ -8464,13 +9953,7 @@ paths: 201: description: "no error" schema: - type: "object" - properties: - ID: - description: "The ID of the created secret." - type: "string" - example: - ID: "ktnbjxoalbkvbvedmg1urrz8h" + $ref: "#/definitions/IdResponse" 409: description: "name conflicts with an existing object" schema: @@ -8495,6 +9978,11 @@ paths: Labels: foo: "bar" Data: "VEhJUyBJUyBOT1QgQSBSRUFMIENFUlRJRklDQVRFCg==" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" tags: ["Secret"] /secrets/{id}: get: @@ -8516,6 +10004,14 @@ paths: UpdatedAt: "2016-11-05T01:20:17.327670065Z" Spec: Name: "app-dev.crt" + Labels: + foo: "bar" + Driver: + Name: "secret-bucket" + Options: + OptionA: "value for driver option A" + OptionB: "value for driver option B" + 404: description: "secret not found" schema: @@ -8656,13 +10152,7 @@ paths: 201: description: "no error" schema: - type: "object" - properties: - ID: - description: "The ID of the created config." - type: "string" - example: - ID: "ktnbjxoalbkvbvedmg1urrz8h" + $ref: "#/definitions/IdResponse" 409: description: "name conflicts with an existing object" schema: @@ -8808,6 +10298,7 @@ paths: schema: type: "object" x-go-name: DistributionInspect + title: "DistributionInspectResponse" required: [Descriptor, Platforms] properties: Descriptor: diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/auth.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/auth.go index 056af6b8..ddf15bb1 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/auth.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/auth.go @@ -1,4 +1,4 @@ -package types +package types // import "github.com/docker/docker/api/types" // AuthConfig contains authorization information for connecting to a Registry type AuthConfig struct { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go index 931ae10a..bf3463b9 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go @@ -1,4 +1,4 @@ -package blkiodev +package blkiodev // import "github.com/docker/docker/api/types/blkiodev" import "fmt" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/client.go index 0ce2c943..4b9f5028 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/client.go @@ -1,4 +1,4 @@ -package types +package types // import "github.com/docker/docker/api/types" import ( "bufio" @@ -7,7 +7,7 @@ import ( "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" - "github.com/docker/go-units" + units "github.com/docker/go-units" ) // CheckpointCreateOptions holds parameters to create a checkpoint from a container @@ -74,6 +74,7 @@ type ContainerLogsOptions struct { ShowStdout bool ShowStderr bool Since string + Until string Timestamps bool Follow bool Tail string @@ -178,8 +179,35 @@ type ImageBuildOptions struct { SecurityOpt []string ExtraHosts []string // List of extra hosts Target string + SessionID string + Platform string + // Version specifies the version of the unerlying builder to use + Version BuilderVersion + // BuildID is an optional identifier that can be passed together with the + // build request. The same identifier can be used to gracefully cancel the + // build with the cancel request. + BuildID string + // Outputs defines configurations for exporting build results. Only supported + // in BuildKit mode + Outputs []ImageBuildOutput } +// ImageBuildOutput defines configuration for exporting a build result +type ImageBuildOutput struct { + Type string + Attrs map[string]string +} + +// BuilderVersion sets the version of underlying builder to use +type BuilderVersion string + +const ( + // BuilderV1 is the first generation builder in docker daemon + BuilderV1 BuilderVersion = "1" + // BuilderBuildKit is builder based on moby/buildkit project + BuilderBuildKit = "2" +) + // ImageBuildResponse holds information // returned by a server after building // an image. @@ -190,7 +218,8 @@ type ImageBuildResponse struct { // ImageCreateOptions holds information to create images. type ImageCreateOptions struct { - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. + Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. } // ImageImportSource holds source information for ImageImport @@ -201,9 +230,10 @@ type ImageImportSource struct { // ImageImportOptions holds information to import images from the client host. type ImageImportOptions struct { - Tag string // Tag is the name to tag this image with. This attribute is deprecated. - Message string // Message is the message to tag the image with - Changes []string // Changes are the raw changes to apply to this image + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image + Platform string // Platform is the target platform of the image } // ImageListOptions holds parameters to filter the list of images with. @@ -224,6 +254,7 @@ type ImagePullOptions struct { All bool RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry PrivilegeFunc RequestPrivilegeFunc + Platform string } // RequestPrivilegeFunc is a function interface that diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/configs.go index 20c19f21..178e911a 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/configs.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/configs.go @@ -1,4 +1,4 @@ -package types +package types // import "github.com/docker/docker/api/types" import ( "github.com/docker/docker/api/types/container" @@ -25,19 +25,6 @@ type ContainerRmConfig struct { ForceRemove, RemoveVolume, RemoveLink bool } -// ContainerCommitConfig contains build configs for commit operation, -// and is used when making a commit with the current state of the container. -type ContainerCommitConfig struct { - Pause bool - Repo string - Tag string - Author string - Comment string - // merge container config into commit config before commit - MergeConfigs bool - Config *container.Config -} - // ExecConfig is a small subset of the Config struct that holds the configuration // for the exec feature of docker. type ExecConfig struct { @@ -50,6 +37,7 @@ type ExecConfig struct { Detach bool // Execute in detach mode DetachKeys string // Escape keys for detach Env []string // Environment variables + WorkingDir string // Working directory Cmd []string // Execution commands and args } @@ -67,3 +55,10 @@ type PluginEnableConfig struct { type PluginDisableConfig struct { ForceDisable bool } + +// NetworkListConfig stores the options available for listing networks +type NetworkListConfig struct { + // TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here + Detailed bool + Verbose bool +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/config.go index 55a03fc9..f767195b 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/config.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/config.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" import ( "time" @@ -54,7 +54,7 @@ type Config struct { Env []string // List of environment variable to set in the container Cmd strslice.StrSlice // Command to run when starting the container Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific). Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) Volumes map[string]struct{} // List of volumes (mounts) used for the container WorkingDir string // Current directory (PWD) in the command will be launched diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/container_changes.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/container_changes.go index 767945a5..222d1410 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/container_changes.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/container_changes.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE @@ -7,7 +7,7 @@ package container // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- -// ContainerChangeResponseItem container change response item +// ContainerChangeResponseItem change item in response to ContainerChanges operation // swagger:model ContainerChangeResponseItem type ContainerChangeResponseItem struct { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/container_create.go index c95023b8..1ec9c372 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/container_create.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/container_create.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE @@ -7,7 +7,7 @@ package container // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- -// ContainerCreateCreatedBody container create created body +// ContainerCreateCreatedBody OK response to ContainerCreate operation // swagger:model ContainerCreateCreatedBody type ContainerCreateCreatedBody struct { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/container_top.go index 78bc37ee..f8a60668 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/container_top.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/container_top.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE @@ -7,7 +7,7 @@ package container // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- -// ContainerTopOKBody container top o k body +// ContainerTopOKBody OK response to ContainerTop operation // swagger:model ContainerTopOKBody type ContainerTopOKBody struct { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/container_update.go index 2339366f..33addedf 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/container_update.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/container_update.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE @@ -7,7 +7,7 @@ package container // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- -// ContainerUpdateOKBody container update o k body +// ContainerUpdateOKBody OK response to ContainerUpdate operation // swagger:model ContainerUpdateOKBody type ContainerUpdateOKBody struct { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/container_wait.go index 77ecdbaf..94b6a20e 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/container_wait.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/container_wait.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE @@ -7,10 +7,22 @@ package container // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- -// ContainerWaitOKBody container wait o k body +// ContainerWaitOKBodyError container waiting error, if any +// swagger:model ContainerWaitOKBodyError +type ContainerWaitOKBodyError struct { + + // Details of an error + Message string `json:"Message,omitempty"` +} + +// ContainerWaitOKBody OK response to ContainerWait operation // swagger:model ContainerWaitOKBody type ContainerWaitOKBody struct { + // error + // Required: true + Error *ContainerWaitOKBodyError `json:"Error"` + // Exit code of the container // Required: true StatusCode int64 `json:"StatusCode"` diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/host_config.go index 9fea9eb0..c3de3d97 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/host_config.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" import ( "strings" @@ -20,44 +20,70 @@ func (i Isolation) IsDefault() bool { return strings.ToLower(string(i)) == "default" || string(i) == "" } +// IsHyperV indicates the use of a Hyper-V partition for isolation +func (i Isolation) IsHyperV() bool { + return strings.ToLower(string(i)) == "hyperv" +} + +// IsProcess indicates the use of process isolation +func (i Isolation) IsProcess() bool { + return strings.ToLower(string(i)) == "process" +} + +const ( + // IsolationEmpty is unspecified (same behavior as default) + IsolationEmpty = Isolation("") + // IsolationDefault is the default isolation mode on current daemon + IsolationDefault = Isolation("default") + // IsolationProcess is process isolation mode + IsolationProcess = Isolation("process") + // IsolationHyperV is HyperV isolation mode + IsolationHyperV = Isolation("hyperv") +) + // IpcMode represents the container ipc stack. type IpcMode string -// IsPrivate indicates whether the container uses its private ipc stack. +// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. func (n IpcMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) + return n == "private" } -// IsHost indicates whether the container uses the host's ipc stack. +// IsHost indicates whether the container shares the host's ipc namespace. func (n IpcMode) IsHost() bool { return n == "host" } -// IsContainer indicates whether the container uses a container's ipc stack. +// IsShareable indicates whether the container's ipc namespace can be shared with another container. +func (n IpcMode) IsShareable() bool { + return n == "shareable" +} + +// IsContainer indicates whether the container uses another container's ipc namespace. func (n IpcMode) IsContainer() bool { parts := strings.SplitN(string(n), ":", 2) return len(parts) > 1 && parts[0] == "container" } -// Valid indicates whether the ipc stack is valid. +// IsNone indicates whether container IpcMode is set to "none". +func (n IpcMode) IsNone() bool { + return n == "none" +} + +// IsEmpty indicates whether container IpcMode is empty +func (n IpcMode) IsEmpty() bool { + return n == "" +} + +// Valid indicates whether the ipc mode is valid. func (n IpcMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - case "container": - if len(parts) != 2 || parts[1] == "" { - return false - } - default: - return false - } - return true + return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer() } // Container returns the name of the container ipc stack is going to be used. func (n IpcMode) Container() string { parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { + if len(parts) > 1 && parts[0] == "container" { return parts[1] } return "" @@ -218,6 +244,16 @@ func (n PidMode) Container() string { return "" } +// DeviceRequest represents a request for devices from a device driver. +// Used by GPU device drivers. +type DeviceRequest struct { + Driver string // Name of device driver + Count int // Number of devices to request (-1 = All) + DeviceIDs []string // List of device IDs as recognizable by the device driver + Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu") + Options map[string]string // Options to pass onto the device driver +} + // DeviceMapping represents the device mapping between the host and the container. type DeviceMapping struct { PathOnHost string @@ -301,13 +337,14 @@ type Resources struct { CpusetMems string // CpusetMems 0-2, 0,1 Devices []DeviceMapping // List of devices to map inside the container DeviceCgroupRules []string // List of rule to be added to the device cgroup - DiskQuota int64 // Disk limit (in bytes) + DeviceRequests []DeviceRequest // List of device requests for device drivers KernelMemory int64 // Kernel memory limit (in bytes) + KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes) MemoryReservation int64 // Memory soft limit (in bytes) MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap MemorySwappiness *int64 // Tuning container memory swappiness behaviour OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit int64 // Setting pids limit for a container + PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. Ulimits []*units.Ulimit // List of ulimits to be set in the container // Applicable to Windows @@ -343,9 +380,10 @@ type HostConfig struct { // Applicable to UNIX platforms CapAdd strslice.StrSlice // List of kernel capabilities to add to the container CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container - DNS []string `json:"Dns"` // List of DNS server to lookup - DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for - DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + Capabilities []string `json:"Capabilities"` // List of kernel capabilities to be available for container (this overrides the default set) + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for ExtraHosts []string // List of extra hosts GroupAdd []string // List of additional groups that the container process will run as IpcMode IpcMode // IPC namespace to use for the container @@ -375,6 +413,12 @@ type HostConfig struct { // Mounts specs used by the container Mounts []mount.Mount `json:",omitempty"` + // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths) + MaskedPaths []string + + // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths) + ReadonlyPaths []string + // Run a custom init inside the container, if null, use the daemon's configured settings Init *bool `json:",omitempty"` } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go index 2d664d1c..cf6fdf44 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go @@ -1,6 +1,6 @@ // +build !windows -package container +package container // import "github.com/docker/docker/api/types/container" // IsValid indicates if an isolation technology is valid func (i Isolation) IsValid() bool { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go index 469923f7..99f803a5 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go @@ -1,8 +1,4 @@ -package container - -import ( - "strings" -) +package container // import "github.com/docker/docker/api/types/container" // IsBridge indicates whether container uses the bridge network stack // in windows it is given the name NAT @@ -21,16 +17,6 @@ func (n NetworkMode) IsUserDefined() bool { return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() } -// IsHyperV indicates the use of a Hyper-V partition for isolation -func (i Isolation) IsHyperV() bool { - return strings.ToLower(string(i)) == "hyperv" -} - -// IsProcess indicates the use of process isolation -func (i Isolation) IsProcess() bool { - return strings.ToLower(string(i)) == "process" -} - // IsValid indicates if an isolation technology is valid func (i Isolation) IsValid() bool { return i.IsDefault() || i.IsHyperV() || i.IsProcess() diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/waitcondition.go index 64820fe3..cd8311f9 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/waitcondition.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/container/waitcondition.go @@ -1,4 +1,4 @@ -package container +package container // import "github.com/docker/docker/api/types/container" // WaitCondition is a type used to specify a container state for which // to wait. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/events/events.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/events/events.go index 5f5f5403..027c6edb 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/events/events.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/events/events.go @@ -1,4 +1,4 @@ -package events +package events // import "github.com/docker/docker/api/types/events" const ( // ContainerEventType is the event type that containers generate @@ -19,6 +19,8 @@ const ( NodeEventType = "node" // SecretEventType is the event type that secrets generate SecretEventType = "secret" + // ConfigEventType is the event type that configs generate + ConfigEventType = "config" ) // Actor describes something that generates events, diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/filters/parse.go index beec3d49..1f75403f 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/filters/parse.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -1,126 +1,111 @@ -// Package filters provides helper function to parse and handle command line -// filter, used for example in docker ps or docker images commands. -package filters +/*Package filters provides tools for encoding a mapping of keys to a set of +multiple values. +*/ +package filters // import "github.com/docker/docker/api/types/filters" import ( "encoding/json" - "errors" - "fmt" "regexp" "strings" "github.com/docker/docker/api/types/versions" ) -// Args stores filter arguments as map key:{map key: bool}. -// It contains an aggregation of the map of arguments (which are in the form -// of -f 'key=value') based on the key, and stores values for the same key -// in a map with string keys and boolean values. -// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' -// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}} +// Args stores a mapping of keys to a set of multiple values. type Args struct { fields map[string]map[string]bool } -// NewArgs initializes a new Args struct. -func NewArgs() Args { - return Args{fields: map[string]map[string]bool{}} +// KeyValuePair are used to initialize a new Args +type KeyValuePair struct { + Key string + Value string } -// ParseFlag parses the argument to the filter flag. Like -// -// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` -// -// If prev map is provided, then it is appended to, and returned. By default a new -// map is created. -func ParseFlag(arg string, prev Args) (Args, error) { - filters := prev - if len(arg) == 0 { - return filters, nil - } - - if !strings.Contains(arg, "=") { - return filters, ErrBadFormat - } - - f := strings.SplitN(arg, "=", 2) - - name := strings.ToLower(strings.TrimSpace(f[0])) - value := strings.TrimSpace(f[1]) - - filters.Add(name, value) - - return filters, nil +// Arg creates a new KeyValuePair for initializing Args +func Arg(key, value string) KeyValuePair { + return KeyValuePair{Key: key, Value: value} } -// ErrBadFormat is an error returned in case of bad format for a filter. -var ErrBadFormat = errors.New("bad format of filter (expected name=value)") +// NewArgs returns a new Args populated with the initial args +func NewArgs(initialArgs ...KeyValuePair) Args { + args := Args{fields: map[string]map[string]bool{}} + for _, arg := range initialArgs { + args.Add(arg.Key, arg.Value) + } + return args +} -// ToParam packs the Args into a string for easy transport from client to server. -func ToParam(a Args) (string, error) { - // this way we don't URL encode {}, just empty space +// MarshalJSON returns a JSON byte representation of the Args +func (args Args) MarshalJSON() ([]byte, error) { + if len(args.fields) == 0 { + return []byte{}, nil + } + return json.Marshal(args.fields) +} + +// ToJSON returns the Args as a JSON encoded string +func ToJSON(a Args) (string, error) { if a.Len() == 0 { return "", nil } - - buf, err := json.Marshal(a.fields) - if err != nil { - return "", err - } - return string(buf), nil + buf, err := json.Marshal(a) + return string(buf), err } -// ToParamWithVersion packs the Args into a string for easy transport from client to server. -// The generated string will depend on the specified version (corresponding to the API version). +// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 +// then the encoded format will use an older legacy format where the values are a +// list of strings, instead of a set. +// +// Deprecated: Use ToJSON func ToParamWithVersion(version string, a Args) (string, error) { - // this way we don't URL encode {}, just empty space if a.Len() == 0 { return "", nil } - // for daemons older than v1.10, filter must be of the form map[string][]string - var buf []byte - var err error if version != "" && versions.LessThan(version, "1.22") { - buf, err = json.Marshal(convertArgsToSlice(a.fields)) - } else { - buf, err = json.Marshal(a.fields) + buf, err := json.Marshal(convertArgsToSlice(a.fields)) + return string(buf), err } - if err != nil { - return "", err - } - return string(buf), nil + + return ToJSON(a) } -// FromParam unpacks the filter Args. -func FromParam(p string) (Args, error) { - if len(p) == 0 { - return NewArgs(), nil +// FromJSON decodes a JSON encoded string into Args +func FromJSON(p string) (Args, error) { + args := NewArgs() + + if p == "" { + return args, nil } - r := strings.NewReader(p) - d := json.NewDecoder(r) - - m := map[string]map[string]bool{} - if err := d.Decode(&m); err != nil { - r.Seek(0, 0) - - // Allow parsing old arguments in slice format. - // Because other libraries might be sending them in this format. - deprecated := map[string][]string{} - if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil { - m = deprecatedArgs(deprecated) - } else { - return NewArgs(), err - } + raw := []byte(p) + err := json.Unmarshal(raw, &args) + if err == nil { + return args, nil } - return Args{m}, nil + + // Fallback to parsing arguments in the legacy slice format + deprecated := map[string][]string{} + if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { + return args, err + } + + args.fields = deprecatedArgs(deprecated) + return args, nil } -// Get returns the list of values associates with a field. -// It returns a slice of strings to keep backwards compatibility with old code. -func (filters Args) Get(field string) []string { - values := filters.fields[field] +// UnmarshalJSON populates the Args from JSON encode bytes +func (args Args) UnmarshalJSON(raw []byte) error { + if len(raw) == 0 { + return nil + } + return json.Unmarshal(raw, &args.fields) +} + +// Get returns the list of values associated with the key +func (args Args) Get(key string) []string { + values := args.fields[key] if values == nil { return make([]string, 0) } @@ -131,37 +116,34 @@ func (filters Args) Get(field string) []string { return slice } -// Add adds a new value to a filter field. -func (filters Args) Add(name, value string) { - if _, ok := filters.fields[name]; ok { - filters.fields[name][value] = true +// Add a new value to the set of values +func (args Args) Add(key, value string) { + if _, ok := args.fields[key]; ok { + args.fields[key][value] = true } else { - filters.fields[name] = map[string]bool{value: true} + args.fields[key] = map[string]bool{value: true} } } -// Del removes a value from a filter field. -func (filters Args) Del(name, value string) { - if _, ok := filters.fields[name]; ok { - delete(filters.fields[name], value) - if len(filters.fields[name]) == 0 { - delete(filters.fields, name) +// Del removes a value from the set +func (args Args) Del(key, value string) { + if _, ok := args.fields[key]; ok { + delete(args.fields[key], value) + if len(args.fields[key]) == 0 { + delete(args.fields, key) } } } -// Len returns the number of fields in the arguments. -func (filters Args) Len() int { - return len(filters.fields) +// Len returns the number of keys in the mapping +func (args Args) Len() int { + return len(args.fields) } -// MatchKVList returns true if the values for the specified field matches the ones -// from the sources. -// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, -// field is 'label' and sources are {'label1': '1', 'label2': '2'} -// it returns true. -func (filters Args) MatchKVList(field string, sources map[string]string) bool { - fieldValues := filters.fields[field] +// MatchKVList returns true if all the pairs in sources exist as key=value +// pairs in the mapping at key, or if there are no values at key. +func (args Args) MatchKVList(key string, sources map[string]string) bool { + fieldValues := args.fields[key] //do not filter if there is no filter set or cannot determine filter if len(fieldValues) == 0 { @@ -172,8 +154,8 @@ func (filters Args) MatchKVList(field string, sources map[string]string) bool { return false } - for name2match := range fieldValues { - testKV := strings.SplitN(name2match, "=", 2) + for value := range fieldValues { + testKV := strings.SplitN(value, "=", 2) v, ok := sources[testKV[0]] if !ok { @@ -187,16 +169,13 @@ func (filters Args) MatchKVList(field string, sources map[string]string) bool { return true } -// Match returns true if the values for the specified field matches the source string -// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, -// field is 'image.name' and source is 'ubuntu' -// it returns true. -func (filters Args) Match(field, source string) bool { - if filters.ExactMatch(field, source) { +// Match returns true if any of the values at key match the source string +func (args Args) Match(field, source string) bool { + if args.ExactMatch(field, source) { return true } - fieldValues := filters.fields[field] + fieldValues := args.fields[field] for name2match := range fieldValues { match, err := regexp.MatchString(name2match, source) if err != nil { @@ -209,9 +188,9 @@ func (filters Args) Match(field, source string) bool { return false } -// ExactMatch returns true if the source matches exactly one of the filters. -func (filters Args) ExactMatch(field, source string) bool { - fieldValues, ok := filters.fields[field] +// ExactMatch returns true if the source matches exactly one of the values. +func (args Args) ExactMatch(key, source string) bool { + fieldValues, ok := args.fields[key] //do not filter if there is no filter set or cannot determine filter if !ok || len(fieldValues) == 0 { return true @@ -221,14 +200,15 @@ func (filters Args) ExactMatch(field, source string) bool { return fieldValues[source] } -// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one. -func (filters Args) UniqueExactMatch(field, source string) bool { - fieldValues := filters.fields[field] +// UniqueExactMatch returns true if there is only one value and the source +// matches exactly the value. +func (args Args) UniqueExactMatch(key, source string) bool { + fieldValues := args.fields[key] //do not filter if there is no filter set or cannot determine filter if len(fieldValues) == 0 { return true } - if len(filters.fields[field]) != 1 { + if len(args.fields[key]) != 1 { return false } @@ -236,14 +216,14 @@ func (filters Args) UniqueExactMatch(field, source string) bool { return fieldValues[source] } -// FuzzyMatch returns true if the source matches exactly one of the filters, -// or the source has one of the filters as a prefix. -func (filters Args) FuzzyMatch(field, source string) bool { - if filters.ExactMatch(field, source) { +// FuzzyMatch returns true if the source matches exactly one value, or the +// source has one of the values as a prefix. +func (args Args) FuzzyMatch(key, source string) bool { + if args.ExactMatch(key, source) { return true } - fieldValues := filters.fields[field] + fieldValues := args.fields[key] for prefix := range fieldValues { if strings.HasPrefix(source, prefix) { return true @@ -252,30 +232,39 @@ func (filters Args) FuzzyMatch(field, source string) bool { return false } -// Include returns true if the name of the field to filter is in the filters. -func (filters Args) Include(field string) bool { - _, ok := filters.fields[field] +// Contains returns true if the key exists in the mapping +func (args Args) Contains(field string) bool { + _, ok := args.fields[field] return ok } -// Validate ensures that all the fields in the filter are valid. -// It returns an error as soon as it finds an invalid field. -func (filters Args) Validate(accepted map[string]bool) error { - for name := range filters.fields { +type invalidFilter string + +func (e invalidFilter) Error() string { + return "Invalid filter '" + string(e) + "'" +} + +func (invalidFilter) InvalidParameter() {} + +// Validate compared the set of accepted keys against the keys in the mapping. +// An error is returned if any mapping keys are not in the accepted set. +func (args Args) Validate(accepted map[string]bool) error { + for name := range args.fields { if !accepted[name] { - return fmt.Errorf("Invalid filter '%s'", name) + return invalidFilter(name) } } return nil } -// WalkValues iterates over the list of filtered values for a field. -// It stops the iteration if it finds an error and it returns that error. -func (filters Args) WalkValues(field string, op func(value string) error) error { - if _, ok := filters.fields[field]; !ok { +// WalkValues iterates over the list of values for a key in the mapping and calls +// op() for each value. If op returns an error the iteration stops and the +// error is returned. +func (args Args) WalkValues(field string, op func(value string) error) error { + if _, ok := args.fields[field]; !ok { return nil } - for v := range filters.fields[field] { + for v := range args.fields[field] { if err := op(v); err != nil { return err } @@ -283,6 +272,22 @@ func (filters Args) WalkValues(field string, op func(value string) error) error return nil } +// Clone returns a copy of args. +func (args Args) Clone() (newArgs Args) { + newArgs.fields = make(map[string]map[string]bool, len(args.fields)) + for k, m := range args.fields { + var mm map[string]bool + if m != nil { + mm = make(map[string]bool, len(m)) + for kk, v := range m { + mm[kk] = v + } + } + newArgs.fields[k] = mm + } + return newArgs +} + func deprecatedArgs(d map[string][]string) map[string]map[string]bool { m := map[string]map[string]bool{} for k, v := range d { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/image/image_history.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/image/image_history.go index 0dd30c72..b5a7a0c4 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/image/image_history.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/image/image_history.go @@ -1,4 +1,4 @@ -package image +package image // import "github.com/docker/docker/api/types/image" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE @@ -7,7 +7,7 @@ package image // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- -// HistoryResponseItem history response item +// HistoryResponseItem individual image layer information in response to ImageHistory operation // swagger:model HistoryResponseItem type HistoryResponseItem struct { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/mount/mount.go index 2744f85d..ab4446b3 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/mount/mount.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -1,4 +1,4 @@ -package mount +package mount // import "github.com/docker/docker/api/types/mount" import ( "os" @@ -15,6 +15,8 @@ const ( TypeVolume Type = "volume" // TypeTmpfs is the type for mounting tmpfs TypeTmpfs Type = "tmpfs" + // TypeNamedPipe is the type for mounting Windows named pipes + TypeNamedPipe Type = "npipe" ) // Mount represents a mount (volume). @@ -65,7 +67,7 @@ var Propagations = []Propagation{ type Consistency string const ( - // ConsistencyFull guarantees bind-mount-like consistency + // ConsistencyFull guarantees bind mount-like consistency ConsistencyFull Consistency = "consistent" // ConsistencyCached mounts can cache read data and FS structure ConsistencyCached Consistency = "cached" @@ -77,7 +79,8 @@ const ( // BindOptions defines options specific to mounts of type "bind". type BindOptions struct { - Propagation Propagation `json:",omitempty"` + Propagation Propagation `json:",omitempty"` + NonRecursive bool `json:",omitempty"` } // VolumeOptions represents the options for a mount of type volume. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/network/network.go index 7c7dbacc..71e97338 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/network/network.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/network/network.go @@ -1,4 +1,8 @@ -package network +package network // import "github.com/docker/docker/api/types/network" +import ( + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/errdefs" +) // Address represents an IP address type Address struct { @@ -106,3 +110,18 @@ type NetworkingConfig struct { type ConfigReference struct { Network string } + +var acceptedFilters = map[string]bool{ + "dangling": true, + "driver": true, + "id": true, + "label": true, + "name": true, + "scope": true, + "type": true, +} + +// ValidateFilters validates the list of filter args with the available filters. +func ValidateFilters(filter filters.Args) error { + return errdefs.InvalidParameter(filter.Validate(acceptedFilters)) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/plugin.go index ed3c2c26..abae48b9 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/plugin.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/plugin.go @@ -11,7 +11,7 @@ type Plugin struct { // Required: true Config PluginConfig `json:"Config"` - // True when the plugin is running. False when the plugin is not running, only installed. + // True if the plugin is running. False if the plugin is not running, only installed. // Required: true Enabled bool `json:"Enabled"` @@ -121,6 +121,9 @@ type PluginConfigArgs struct { // swagger:model PluginConfigInterface type PluginConfigInterface struct { + // Protocol to use for clients connecting to the plugin. + ProtocolScheme string `json:"ProtocolScheme,omitempty"` + // socket // Required: true Socket string `json:"Socket"` diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/plugin_responses.go index 1c6461f2..60d1fb5a 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/plugin_responses.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/plugin_responses.go @@ -1,4 +1,4 @@ -package types +package types // import "github.com/docker/docker/api/types" import ( "encoding/json" @@ -9,14 +9,6 @@ import ( // PluginsListResponse contains the response for the Engine API type PluginsListResponse []*Plugin -const ( - authzDriver = "AuthzDriver" - graphDriver = "GraphDriver" - ipamDriver = "IpamDriver" - networkDriver = "NetworkDriver" - volumeDriver = "VolumeDriver" -) - // UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { versionIndex := len(p) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/port.go index ad52d46d..d9123474 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/port.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/port.go @@ -7,7 +7,7 @@ package types // swagger:model Port type Port struct { - // IP + // Host IP address that the container's port is mapped to IP string `json:"IP,omitempty"` // Port on the container diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/registry/authenticate.go index 42cac443..f0a2113e 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/registry/authenticate.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/registry/authenticate.go @@ -1,4 +1,4 @@ -package registry +package registry // import "github.com/docker/docker/api/types/registry" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/registry/registry.go index b98a943a..8789ad3b 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/registry/registry.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -1,4 +1,4 @@ -package registry +package registry // import "github.com/docker/docker/api/types/registry" import ( "encoding/json" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/seccomp.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/seccomp.go index 7d62c9a4..2259c6be 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/seccomp.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/seccomp.go @@ -1,4 +1,4 @@ -package types +package types // import "github.com/docker/docker/api/types" // Seccomp represents the config for a seccomp profile for syscall restriction. type Seccomp struct { @@ -77,8 +77,9 @@ type Arg struct { // Filter is used to conditionally apply Seccomp rules type Filter struct { - Caps []string `json:"caps,omitempty"` - Arches []string `json:"arches,omitempty"` + Caps []string `json:"caps,omitempty"` + Arches []string `json:"arches,omitempty"` + MinKernel string `json:"minKernel,omitempty"` } // Syscall is used to match a group of syscalls in Seccomp diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/stats.go index 7ca76a5b..20daebed 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/stats.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/stats.go @@ -1,6 +1,6 @@ // Package types is used for API stability in the types and response to the // consumers of the API stats endpoint. -package types +package types // import "github.com/docker/docker/api/types" import "time" @@ -120,7 +120,7 @@ type NetworkStats struct { RxBytes uint64 `json:"rx_bytes"` // Packets received. Windows and Linux. RxPackets uint64 `json:"rx_packets"` - // Received errors. Not used on Windows. Note that we dont `omitempty` this + // Received errors. Not used on Windows. Note that we don't `omitempty` this // field as it is expected in the >=v1.21 API stats structure. RxErrors uint64 `json:"rx_errors"` // Incoming packets dropped. Windows and Linux. @@ -129,7 +129,7 @@ type NetworkStats struct { TxBytes uint64 `json:"tx_bytes"` // Packets sent. Windows and Linux. TxPackets uint64 `json:"tx_packets"` - // Sent errors. Not used on Windows. Note that we dont `omitempty` this + // Sent errors. Not used on Windows. Note that we don't `omitempty` this // field as it is expected in the >=v1.21 API stats structure. TxErrors uint64 `json:"tx_errors"` // Outgoing packets dropped. Windows and Linux. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/strslice/strslice.go index bad493fb..82921ceb 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/strslice/strslice.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/strslice/strslice.go @@ -1,4 +1,4 @@ -package strslice +package strslice // import "github.com/docker/docker/api/types/strslice" import "encoding/json" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/common.go index 54af82b3..ef020f45 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/common.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/common.go @@ -1,4 +1,4 @@ -package swarm +package swarm // import "github.com/docker/docker/api/types/swarm" import "time" @@ -20,7 +20,7 @@ type Annotations struct { Labels map[string]string `json:"Labels"` } -// Driver represents a driver (network, logging). +// Driver represents a driver (network, logging, secrets backend). type Driver struct { Name string `json:",omitempty"` Options map[string]string `json:",omitempty"` diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/config.go index 0fb021ce..16202ccc 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/config.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/config.go @@ -1,4 +1,4 @@ -package swarm +package swarm // import "github.com/docker/docker/api/types/swarm" import "os" @@ -13,6 +13,10 @@ type Config struct { type ConfigSpec struct { Annotations Data []byte `json:",omitempty"` + + // Templating controls whether and how to evaluate the config payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` } // ConfigReferenceFileTarget is a file target in a config reference @@ -23,9 +27,14 @@ type ConfigReferenceFileTarget struct { Mode os.FileMode } +// ConfigReferenceRuntimeTarget is a target for a config specifying that it +// isn't mounted into the container but instead has some other purpose. +type ConfigReferenceRuntimeTarget struct{} + // ConfigReference is a reference to a config in swarm type ConfigReference struct { - File *ConfigReferenceFileTarget + File *ConfigReferenceFileTarget `json:",omitempty"` + Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"` ConfigID string ConfigName string } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/container.go index 6f8b45f6..48190c17 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/container.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -1,4 +1,4 @@ -package swarm +package swarm // import "github.com/docker/docker/api/types/swarm" import ( "time" @@ -33,6 +33,7 @@ type SELinuxContext struct { // CredentialSpec for managed service account (Windows only) type CredentialSpec struct { + Config string File string Registry string } @@ -55,6 +56,7 @@ type ContainerSpec struct { User string `json:",omitempty"` Groups []string `json:",omitempty"` Privileges *Privileges `json:",omitempty"` + Init *bool `json:",omitempty"` StopSignal string `json:",omitempty"` TTY bool `json:",omitempty"` OpenStdin bool `json:",omitempty"` @@ -65,8 +67,10 @@ type ContainerSpec struct { // The format of extra hosts on swarmkit is specified in: // http://man7.org/linux/man-pages/man5/hosts.5.html // IP_address canonical_hostname [aliases...] - Hosts []string `json:",omitempty"` - DNSConfig *DNSConfig `json:",omitempty"` - Secrets []*SecretReference `json:",omitempty"` - Configs []*ConfigReference `json:",omitempty"` + Hosts []string `json:",omitempty"` + DNSConfig *DNSConfig `json:",omitempty"` + Secrets []*SecretReference `json:",omitempty"` + Configs []*ConfigReference `json:",omitempty"` + Isolation container.Isolation `json:",omitempty"` + Sysctls map[string]string `json:",omitempty"` } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/network.go index 97c484e1..98ef3284 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/network.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/network.go @@ -1,4 +1,4 @@ -package swarm +package swarm // import "github.com/docker/docker/api/types/swarm" import ( "github.com/docker/docker/api/types/network" @@ -62,6 +62,8 @@ const ( PortConfigProtocolTCP PortConfigProtocol = "tcp" // PortConfigProtocolUDP UDP PortConfigProtocolUDP PortConfigProtocol = "udp" + // PortConfigProtocolSCTP SCTP + PortConfigProtocolSCTP PortConfigProtocol = "sctp" ) // EndpointVirtualIP represents the virtual ip of a port. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/node.go index 28c6851e..1e30f5fa 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/node.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/node.go @@ -1,4 +1,4 @@ -package swarm +package swarm // import "github.com/docker/docker/api/types/swarm" // Node represents a node. type Node struct { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/runtime.go index c4c731dc..0c77403c 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/runtime.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/runtime.go @@ -1,4 +1,4 @@ -package swarm +package swarm // import "github.com/docker/docker/api/types/swarm" // RuntimeType is the type of runtime used for the TaskSpec type RuntimeType string @@ -11,9 +11,17 @@ const ( RuntimeContainer RuntimeType = "container" // RuntimePlugin is the plugin based runtime RuntimePlugin RuntimeType = "plugin" + // RuntimeNetworkAttachment is the network attachment runtime + RuntimeNetworkAttachment RuntimeType = "attachment" // RuntimeURLContainer is the proto url for the container type RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" // RuntimeURLPlugin is the proto url for the plugin type RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" ) + +// NetworkAttachmentSpec represents the runtime spec type for network +// attachment tasks +type NetworkAttachmentSpec struct { + ContainerID string +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go new file mode 100644 index 00000000..98c2806c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto + +package runtime // import "github.com/docker/docker/api/types/swarm/runtime" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go new file mode 100644 index 00000000..1fdc9b04 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go @@ -0,0 +1,712 @@ +// Code generated by protoc-gen-gogo. +// source: plugin.proto +// DO NOT EDIT! + +/* + Package runtime is a generated protocol buffer package. + + It is generated from these files: + plugin.proto + + It has these top-level messages: + PluginSpec + PluginPrivilege +*/ +package runtime + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +type PluginSpec struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` + Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` + Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` +} + +func (m *PluginSpec) Reset() { *m = PluginSpec{} } +func (m *PluginSpec) String() string { return proto.CompactTextString(m) } +func (*PluginSpec) ProtoMessage() {} +func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } + +func (m *PluginSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginSpec) GetRemote() string { + if m != nil { + return m.Remote + } + return "" +} + +func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { + if m != nil { + return m.Privileges + } + return nil +} + +func (m *PluginSpec) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` +} + +func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } +func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } +func (*PluginPrivilege) ProtoMessage() {} +func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } + +func (m *PluginPrivilege) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginPrivilege) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PluginPrivilege) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*PluginSpec)(nil), "PluginSpec") + proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") +} +func (m *PluginSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Remote) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) + i += copy(dAtA[i:], m.Remote) + } + if len(m.Privileges) > 0 { + for _, msg := range m.Privileges { + dAtA[i] = 0x1a + i++ + i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Disabled { + dAtA[i] = 0x20 + i++ + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Description) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func encodeFixed64Plugin(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Plugin(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PluginSpec) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Remote) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Privileges) > 0 { + for _, e := range m.Privileges { + l = e.Size() + n += 1 + l + sovPlugin(uint64(l)) + } + } + if m.Disabled { + n += 2 + } + return n +} + +func (m *PluginPrivilege) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + return n +} + +func sovPlugin(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozPlugin(x uint64) (n int) { + return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PluginSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Remote = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Privileges = append(m.Privileges, &PluginPrivilege{}) + if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Disabled = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPlugin(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthPlugin + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPlugin(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } + +var fileDescriptorPlugin = []byte{ + // 196 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0xc8, 0x29, 0x4d, + 0xcf, 0xcc, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x57, 0x6a, 0x63, 0xe4, 0xe2, 0x0a, 0x00, 0x0b, + 0x04, 0x17, 0xa4, 0x26, 0x0b, 0x09, 0x71, 0xb1, 0xe4, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, + 0x6a, 0x70, 0x06, 0x81, 0xd9, 0x42, 0x62, 0x5c, 0x6c, 0x45, 0xa9, 0xb9, 0xf9, 0x25, 0xa9, 0x12, + 0x4c, 0x60, 0x51, 0x28, 0x4f, 0xc8, 0x80, 0x8b, 0xab, 0xa0, 0x28, 0xb3, 0x2c, 0x33, 0x27, 0x35, + 0x3d, 0xb5, 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x40, 0x0f, 0x62, 0x58, 0x00, 0x4c, + 0x22, 0x08, 0x49, 0x8d, 0x90, 0x14, 0x17, 0x47, 0x4a, 0x66, 0x71, 0x62, 0x52, 0x4e, 0x6a, 0x8a, + 0x04, 0x8b, 0x02, 0xa3, 0x06, 0x47, 0x10, 0x9c, 0xaf, 0x14, 0xcb, 0xc5, 0x8f, 0xa6, 0x15, 0xab, + 0x63, 0x14, 0xb8, 0xb8, 0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0x32, 0xf3, 0xf3, 0xa0, + 0x2e, 0x42, 0x16, 0x12, 0x12, 0xe1, 0x62, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0xbb, 0x88, 0x33, + 0x08, 0xc2, 0x71, 0xe2, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, + 0x18, 0x93, 0xd8, 0xc0, 0x9e, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x84, 0xad, 0x79, + 0x0c, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto new file mode 100644 index 00000000..6d63b778 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime"; + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +message PluginSpec { + string name = 1; + string remote = 2; + repeated PluginPrivilege privileges = 3; + bool disabled = 4; +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +message PluginPrivilege { + string name = 1; + string description = 2; + repeated string value = 3; +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/secret.go index fdb23888..d5213ec9 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/secret.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/secret.go @@ -1,4 +1,4 @@ -package swarm +package swarm // import "github.com/docker/docker/api/types/swarm" import "os" @@ -12,7 +12,12 @@ type Secret struct { // SecretSpec represents a secret specification from a secret in swarm type SecretSpec struct { Annotations - Data []byte `json:",omitempty"` + Data []byte `json:",omitempty"` + Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store + + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` } // SecretReferenceFileTarget is a file target in a secret reference diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/service.go index fa31a7ec..abf192e7 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/service.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/service.go @@ -1,4 +1,4 @@ -package swarm +package swarm // import "github.com/docker/docker/api/types/swarm" import "time" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/swarm.go index 5b74f14b..b25f9996 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -1,8 +1,10 @@ -package swarm +package swarm // import "github.com/docker/docker/api/types/swarm" -import "time" +import ( + "time" +) -// ClusterInfo represents info about the cluster for outputing in "info" +// ClusterInfo represents info about the cluster for outputting in "info" // it contains the same information as "Swarm", but without the JoinTokens type ClusterInfo struct { ID string @@ -10,6 +12,9 @@ type ClusterInfo struct { Spec Spec TLSInfo TLSInfo RootRotationInProgress bool + DefaultAddrPool []string + SubnetSize uint32 + DataPathPort uint32 } // Swarm represents a swarm. @@ -149,10 +154,13 @@ type InitRequest struct { ListenAddr string AdvertiseAddr string DataPathAddr string + DataPathPort uint32 ForceNewCluster bool Spec Spec AutoLockManagers bool Availability NodeAvailability + DefaultAddrPool []string + SubnetSize uint32 } // JoinRequest is the request used to join a swarm. @@ -201,6 +209,8 @@ type Info struct { Managers int `json:",omitempty"` Cluster *ClusterInfo `json:",omitempty"` + + Warnings []string `json:",omitempty"` } // Peer represents a peer. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/task.go index a598a79d..d5a57df5 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/task.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -1,6 +1,10 @@ -package swarm +package swarm // import "github.com/docker/docker/api/types/swarm" -import "time" +import ( + "time" + + "github.com/docker/docker/api/types/swarm/runtime" +) // TaskState represents the state of a task. type TaskState string @@ -32,6 +36,10 @@ const ( TaskStateFailed TaskState = "failed" // TaskStateRejected REJECTED TaskStateRejected TaskState = "rejected" + // TaskStateRemove REMOVE + TaskStateRemove TaskState = "remove" + // TaskStateOrphaned ORPHANED + TaskStateOrphaned TaskState = "orphaned" ) // Task represents a task. @@ -47,11 +55,19 @@ type Task struct { Status TaskStatus `json:",omitempty"` DesiredState TaskState `json:",omitempty"` NetworksAttachments []NetworkAttachment `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` } // TaskSpec represents the spec of a task. type TaskSpec struct { - ContainerSpec ContainerSpec `json:",omitempty"` + // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive. + // PluginSpec is only used when the `Runtime` field is set to `plugin` + // NetworkAttachmentSpec is used if the `Runtime` field is set to + // `attachment`. + ContainerSpec *ContainerSpec `json:",omitempty"` + PluginSpec *runtime.PluginSpec `json:",omitempty"` + NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` + Resources *ResourceRequirements `json:",omitempty"` RestartPolicy *RestartPolicy `json:",omitempty"` Placement *Placement `json:",omitempty"` @@ -71,8 +87,34 @@ type TaskSpec struct { // Resources represents resources (CPU/Memory). type Resources struct { - NanoCPUs int64 `json:",omitempty"` - MemoryBytes int64 `json:",omitempty"` + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` +} + +// GenericResource represents a "user defined" resource which can +// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) +type GenericResource struct { + NamedResourceSpec *NamedGenericResource `json:",omitempty"` + DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` +} + +// NamedGenericResource represents a "user defined" resource which is defined +// as a string. +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) +type NamedGenericResource struct { + Kind string `json:",omitempty"` + Value string `json:",omitempty"` +} + +// DiscreteGenericResource represents a "user defined" resource which is defined +// as an integer +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to count the resource (SSD=5, HDD=3, ...) +type DiscreteGenericResource struct { + Kind string `json:",omitempty"` + Value int64 `json:",omitempty"` } // ResourceRequirements represents resources requirements. @@ -85,6 +127,7 @@ type ResourceRequirements struct { type Placement struct { Constraints []string `json:",omitempty"` Preferences []PlacementPreference `json:",omitempty"` + MaxReplicas uint64 `json:",omitempty"` // Platforms stores all the platforms that the image can run on. // This field is used in the platform filter for scheduling. If empty, @@ -127,19 +170,19 @@ const ( // TaskStatus represents the status of a task. type TaskStatus struct { - Timestamp time.Time `json:",omitempty"` - State TaskState `json:",omitempty"` - Message string `json:",omitempty"` - Err string `json:",omitempty"` - ContainerStatus ContainerStatus `json:",omitempty"` - PortStatus PortStatus `json:",omitempty"` + Timestamp time.Time `json:",omitempty"` + State TaskState `json:",omitempty"` + Message string `json:",omitempty"` + Err string `json:",omitempty"` + ContainerStatus *ContainerStatus `json:",omitempty"` + PortStatus PortStatus `json:",omitempty"` } // ContainerStatus represents the status of a container. type ContainerStatus struct { - ContainerID string `json:",omitempty"` - PID int `json:",omitempty"` - ExitCode int `json:",omitempty"` + ContainerID string + PID int + ExitCode int } // PortStatus represents the port status of a task's host ports whose diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/time/duration_convert.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/time/duration_convert.go index 63e1eec1..84b6f073 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/time/duration_convert.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/time/duration_convert.go @@ -1,4 +1,4 @@ -package time +package time // import "github.com/docker/docker/api/types/time" import ( "strconv" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/time/timestamp.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/time/timestamp.go index 9aa9702d..ea3495ef 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/time/timestamp.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/time/timestamp.go @@ -1,4 +1,4 @@ -package time +package time // import "github.com/docker/docker/api/types/time" import ( "fmt" @@ -29,10 +29,8 @@ func GetTimestamp(value string, reference time.Time) (string, error) { } var format string - var parseInLocation bool - // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation - parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) + parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) if strings.Contains(value, ".") { if parseInLocation { @@ -84,11 +82,14 @@ func GetTimestamp(value string, reference time.Time) (string, error) { } if err != nil { - // if there is a `-` then it's an RFC3339 like timestamp otherwise assume unixtimestamp + // if there is a `-` then it's an RFC3339 like timestamp if strings.Contains(value, "-") { return "", err // was probably an RFC3339 like timestamp but the parser failed with an error } - return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) + if _, _, err := parseTimestamp(value); err != nil { + return "", fmt.Errorf("failed to parse value as time or duration: %q", value) + } + return value, nil // unix timestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) } return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil @@ -106,6 +107,10 @@ func ParseTimestamps(value string, def int64) (int64, int64, error) { if value == "" { return def, 0, nil } + return parseTimestamp(value) +} + +func parseTimestamp(value string) (int64, int64, error) { sa := strings.SplitN(value, ".", 2) s, err := strconv.ParseInt(sa[0], 10, 64) if err != nil { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/types.go index c905466e..a39ffcb7 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/types.go @@ -1,4 +1,4 @@ -package types +package types // import "github.com/docker/docker/api/types" import ( "errors" @@ -45,6 +45,12 @@ type ImageInspect struct { VirtualSize int64 GraphDriver GraphDriverData RootFS RootFS + Metadata ImageMetadata +} + +// ImageMetadata contains engine-local data about the image +type ImageMetadata struct { + LastTagTime time.Time `json:",omitempty"` } // Container contains response of Engine API: @@ -96,14 +102,27 @@ type ContainerStats struct { // Ping contains response of Engine API: // GET "/_ping" type Ping struct { - APIVersion string - OSType string - Experimental bool + APIVersion string + OSType string + Experimental bool + BuilderVersion BuilderVersion +} + +// ComponentVersion describes the version information for a specific component. +type ComponentVersion struct { + Name string + Version string + Details map[string]string `json:",omitempty"` } // Version contains response of Engine API: // GET "/version" type Version struct { + Platform struct{ Name string } `json:",omitempty"` + Components []ComponentVersion `json:",omitempty"` + + // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility + Version string APIVersion string `json:"ApiVersion"` MinAPIVersion string `json:"MinAPIVersion,omitempty"` @@ -139,10 +158,12 @@ type Info struct { MemoryLimit bool SwapLimit bool KernelMemory bool + KernelMemoryTCP bool CPUCfsPeriod bool `json:"CpuCfsPeriod"` CPUCfsQuota bool `json:"CpuCfsQuota"` CPUShares bool CPUSet bool + PidsLimit bool IPv4Forwarding bool BridgeNfIptables bool BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` @@ -162,6 +183,7 @@ type Info struct { RegistryConfig *registry.ServiceConfig NCPU int MemTotal int64 + GenericResources []swarm.GenericResource DockerRootDir string HTTPProxy string `json:"HttpProxy"` HTTPSProxy string `json:"HttpsProxy"` @@ -185,6 +207,8 @@ type Info struct { RuncCommit Commit InitCommit Commit SecurityOptions []string + ProductLicense string `json:",omitempty"` + Warnings []string } // KeyValue holds a key/value pair @@ -320,6 +344,7 @@ type ContainerJSONBase struct { Name string RestartCount int Driver string + Platform string MountLabel string ProcessLabel string AppArmorProfile string @@ -468,6 +493,12 @@ type NetworkDisconnect struct { Force bool } +// NetworkInspectOptions holds parameters to inspect network +type NetworkInspectOptions struct { + Scope string + Verbose bool +} + // Checkpoint represents the details of a checkpoint type Checkpoint struct { Name string // Name is the name of the checkpoint @@ -482,10 +513,12 @@ type Runtime struct { // DiskUsage contains response of Engine API: // GET "/system/df" type DiskUsage struct { - LayersSize int64 - Images []*ImageSummary - Containers []*Container - Volumes []*Volume + LayersSize int64 + Images []*ImageSummary + Containers []*Container + Volumes []*Volume + BuildCache []*BuildCache + BuilderSize int64 // deprecated } // ContainersPruneReport contains the response for Engine API: @@ -509,6 +542,13 @@ type ImagesPruneReport struct { SpaceReclaimed uint64 } +// BuildCachePruneReport contains the response for Engine API: +// POST "/build/prune" +type BuildCachePruneReport struct { + CachesDeleted []string + SpaceReclaimed uint64 +} + // NetworksPruneReport contains the response for Engine API: // POST "/networks/prune" type NetworksPruneReport struct { @@ -552,3 +592,24 @@ type PushResult struct { type BuildResult struct { ID string } + +// BuildCache contains information about a build cache record +type BuildCache struct { + ID string + Parent string + Type string + Description string + InUse bool + Shared bool + Size int64 + CreatedAt time.Time + LastUsedAt *time.Time + UsageCount int +} + +// BuildCachePruneOptions hold parameters to prune the build cache +type BuildCachePruneOptions struct { + All bool + KeepStorage int64 + Filters filters.Args +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/versions/compare.go index 611d4fed..8ccb0aa9 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/versions/compare.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/versions/compare.go @@ -1,4 +1,4 @@ -package versions +package versions // import "github.com/docker/docker/api/types/versions" import ( "strconv" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/volume.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/volume.go index a69b0cfb..b5ee96a5 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/volume.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/volume.go @@ -7,7 +7,7 @@ package types // swagger:model Volume type Volume struct { - // Time volume was created. + // Date/Time the volume was created. CreatedAt string `json:"CreatedAt,omitempty"` // Name of the volume driver used by the volume. @@ -47,15 +47,23 @@ type Volume struct { UsageData *VolumeUsageData `json:"UsageData,omitempty"` } -// VolumeUsageData volume usage data +// VolumeUsageData Usage details about the volume. This information is used by the +// `GET /system/df` endpoint, and omitted in other endpoints. +// // swagger:model VolumeUsageData type VolumeUsageData struct { - // The number of containers referencing this volume. + // The number of containers referencing this volume. This field + // is set to `-1` if the reference-count is not available. + // // Required: true RefCount int64 `json:"RefCount"` - // The disk space used by the volume (local driver only) + // Amount of disk space used by the volume (in bytes). This information + // is only available for volumes created with the `"local"` volume + // driver. For volumes created with other volume drivers, this field + // is set to `-1` ("not available") + // // Required: true Size int64 `json:"Size"` } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/volume/volumes_create.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/volume/volume_create.go similarity index 81% rename from vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/volume/volumes_create.go rename to vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/volume/volume_create.go index 9f70e43c..0c3772d3 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/volume/volumes_create.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/volume/volume_create.go @@ -1,4 +1,4 @@ -package volume +package volume // import "github.com/docker/docker/api/types/volume" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE @@ -7,9 +7,9 @@ package volume // See hack/generate-swagger-api.sh // ---------------------------------------------------------------------------- -// VolumesCreateBody volumes create body -// swagger:model VolumesCreateBody -type VolumesCreateBody struct { +// VolumeCreateBody Volume configuration +// swagger:model VolumeCreateBody +type VolumeCreateBody struct { // Name of the volume driver to use. // Required: true diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/volume/volumes_list.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/volume/volume_list.go similarity index 75% rename from vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/volume/volumes_list.go rename to vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/volume/volume_list.go index 833dad93..45c3c1c9 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/volume/volumes_list.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/api/types/volume/volume_list.go @@ -1,4 +1,4 @@ -package volume +package volume // import "github.com/docker/docker/api/types/volume" // ---------------------------------------------------------------------------- // DO NOT EDIT THIS FILE @@ -9,9 +9,9 @@ package volume import "github.com/docker/docker/api/types" -// VolumesListOKBody volumes list o k body -// swagger:model VolumesListOKBody -type VolumesListOKBody struct { +// VolumeListOKBody Volume list response +// swagger:model VolumeListOKBody +type VolumeListOKBody struct { // List of volumes // Required: true diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/README.md b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/README.md index 059dfb3c..992f1811 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/README.md +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/README.md @@ -16,7 +16,7 @@ import ( ) func main() { - cli, err := client.NewEnvClient() + cli, err := client.NewClientWithOpts(client.FromEnv) if err != nil { panic(err) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/build_cancel.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/build_cancel.go new file mode 100644 index 00000000..3aae43e3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/build_cancel.go @@ -0,0 +1,16 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" +) + +// BuildCancel requests the daemon to cancel ongoing build request +func (cli *Client) BuildCancel(ctx context.Context, id string) error { + query := url.Values{} + query.Set("id", id) + + serverResp, err := cli.post(ctx, "/build/cancel", query, nil, nil) + ensureReaderClosed(serverResp) + return err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/build_prune.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/build_prune.go new file mode 100644 index 00000000..397d67cd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/build_prune.go @@ -0,0 +1,45 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "fmt" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/pkg/errors" +) + +// BuildCachePrune requests the daemon to delete unused cache data +func (cli *Client) BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) { + if err := cli.NewVersionError("1.31", "build prune"); err != nil { + return nil, err + } + + report := types.BuildCachePruneReport{} + + query := url.Values{} + if opts.All { + query.Set("all", "1") + } + query.Set("keep-storage", fmt.Sprintf("%d", opts.KeepStorage)) + filters, err := filters.ToJSON(opts.Filters) + if err != nil { + return nil, errors.Wrap(err, "prune could not marshal filters option") + } + query.Set("filters", filters) + + serverResp, err := cli.post(ctx, "/build/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) + + if err != nil { + return nil, err + } + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return nil, fmt.Errorf("Error retrieving disk usage: %v", err) + } + + return &report, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/checkpoint_create.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/checkpoint_create.go index 0effe498..921024fe 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/checkpoint_create.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/checkpoint_create.go @@ -1,8 +1,9 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" + "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // CheckpointCreate creates a checkpoint from the given container with the given name diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/checkpoint_delete.go index e6e75588..54f55fa7 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/checkpoint_delete.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/checkpoint_delete.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // CheckpointDelete deletes the checkpoint with the given name from the given container diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/checkpoint_list.go index ffe44bc9..66d46dd1 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/checkpoint_list.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/checkpoint_list.go @@ -1,12 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" - "net/http" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // CheckpointList returns the checkpoints of the given container in the docker host @@ -19,14 +18,11 @@ func (cli *Client) CheckpointList(ctx context.Context, container string, options } resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) + defer ensureReaderClosed(resp) if err != nil { - if resp.statusCode == http.StatusNotFound { - return checkpoints, containerNotFoundError{container} - } - return checkpoints, err + return checkpoints, wrapResponseError(err, resp, "container", container) } err = json.NewDecoder(resp.body).Decode(&checkpoints) - ensureReaderClosed(resp) return checkpoints, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/client.go index df3698ad..b63d4d6d 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/client.go @@ -1,10 +1,6 @@ /* Package client is a Go client for the Docker Engine API. -The "docker" command uses this package to communicate with the daemon. It can also -be used by your own Go applications to do anything the command-line interface does -- running containers, pulling images, managing swarms, etc. - For more information about the Engine API, see the documentation: https://docs.docker.com/engine/reference/api/ @@ -27,7 +23,7 @@ For example, to list running containers (the equivalent of "docker ps"): ) func main() { - cli, err := client.NewEnvClient() + cli, err := client.NewClientWithOpts(client.FromEnv) if err != nil { panic(err) } @@ -43,21 +39,27 @@ For example, to list running containers (the equivalent of "docker ps"): } */ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "fmt" + "net" "net/http" "net/url" - "os" - "path/filepath" + "path" "strings" "github.com/docker/docker/api" + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/versions" "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" ) +// ErrRedirect is the error returned by checkRedirect when the request is non-GET. +var ErrRedirect = errors.New("unexpected redirect in response") + // Client is the API client that performs all operations // against a docker server. type Client struct { @@ -79,156 +81,186 @@ type Client struct { customHTTPHeaders map[string]string // manualOverride is set to true when the version was set by users. manualOverride bool + + // negotiateVersion indicates if the client should automatically negotiate + // the API version to use when making requests. API version negotiation is + // performed on the first request, after which negotiated is set to "true" + // so that subsequent requests do not re-negotiate. + negotiateVersion bool + + // negotiated indicates that API version negotiation took place + negotiated bool } -// NewEnvClient initializes a new API client based on environment variables. -// Use DOCKER_HOST to set the url to the docker server. -// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. -// Use DOCKER_CERT_PATH to load the TLS certificates from. -// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. -func NewEnvClient() (*Client, error) { - var client *http.Client - if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { - options := tlsconfig.Options{ - CAFile: filepath.Join(dockerCertPath, "ca.pem"), - CertFile: filepath.Join(dockerCertPath, "cert.pem"), - KeyFile: filepath.Join(dockerCertPath, "key.pem"), - InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", - } - tlsc, err := tlsconfig.Client(options) - if err != nil { - return nil, err - } - - client = &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsc, - }, - } +// CheckRedirect specifies the policy for dealing with redirect responses: +// If the request is non-GET return `ErrRedirect`. Otherwise use the last response. +// +// Go 1.8 changes behavior for HTTP redirects (specifically 301, 307, and 308) in the client . +// The Docker client (and by extension docker API client) can be made to send a request +// like POST /containers//start where what would normally be in the name section of the URL is empty. +// This triggers an HTTP 301 from the daemon. +// In go 1.8 this 301 will be converted to a GET request, and ends up getting a 404 from the daemon. +// This behavior change manifests in the client in that before the 301 was not followed and +// the client did not generate an error, but now results in a message like Error response from daemon: page not found. +func CheckRedirect(req *http.Request, via []*http.Request) error { + if via[0].Method == http.MethodGet { + return http.ErrUseLastResponse } - - host := os.Getenv("DOCKER_HOST") - if host == "" { - host = DefaultDockerHost - } - version := os.Getenv("DOCKER_API_VERSION") - if version == "" { - version = api.DefaultVersion - } - - cli, err := NewClient(host, version, client, nil) - if err != nil { - return cli, err - } - if os.Getenv("DOCKER_API_VERSION") != "" { - cli.manualOverride = true - } - return cli, nil + return ErrRedirect } -// NewClient initializes a new API client for the given host and API version. -// It uses the given http client as transport. +// NewClientWithOpts initializes a new API client with default values. It takes functors +// to modify values when creating it, like `NewClientWithOpts(WithVersion(…))` // It also initializes the custom http headers to add to each request. // // It won't send any version information if the version number is empty. It is // highly recommended that you set a version or your client may break if the // server is upgraded. -func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { - proto, addr, basePath, err := ParseHost(host) +func NewClientWithOpts(ops ...Opt) (*Client, error) { + client, err := defaultHTTPClient(DefaultDockerHost) if err != nil { return nil, err } + c := &Client{ + host: DefaultDockerHost, + version: api.DefaultVersion, + client: client, + proto: defaultProto, + addr: defaultAddr, + } - if client != nil { - if _, ok := client.Transport.(*http.Transport); !ok { - return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", client.Transport) - } - } else { - transport := new(http.Transport) - sockets.ConfigureTransport(transport, proto, addr) - client = &http.Client{ - Transport: transport, + for _, op := range ops { + if err := op(c); err != nil { + return nil, err } } - scheme := "http" - tlsConfig := resolveTLSConfig(client.Transport) - if tlsConfig != nil { - // TODO(stevvooe): This isn't really the right way to write clients in Go. - // `NewClient` should probably only take an `*http.Client` and work from there. - // Unfortunately, the model of having a host-ish/url-thingy as the connection - // string has us confusing protocol and transport layers. We continue doing - // this to avoid breaking existing clients but this should be addressed. - scheme = "https" + if _, ok := c.client.Transport.(http.RoundTripper); !ok { + return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", c.client.Transport) + } + if c.scheme == "" { + c.scheme = "http" + + tlsConfig := resolveTLSConfig(c.client.Transport) + if tlsConfig != nil { + // TODO(stevvooe): This isn't really the right way to write clients in Go. + // `NewClient` should probably only take an `*http.Client` and work from there. + // Unfortunately, the model of having a host-ish/url-thingy as the connection + // string has us confusing protocol and transport layers. We continue doing + // this to avoid breaking existing clients but this should be addressed. + c.scheme = "https" + } } - return &Client{ - scheme: scheme, - host: host, - proto: proto, - addr: addr, - basePath: basePath, - client: client, - version: version, - customHTTPHeaders: httpHeaders, + return c, nil +} + +func defaultHTTPClient(host string) (*http.Client, error) { + url, err := ParseHostURL(host) + if err != nil { + return nil, err + } + transport := new(http.Transport) + sockets.ConfigureTransport(transport, url.Scheme, url.Host) + return &http.Client{ + Transport: transport, + CheckRedirect: CheckRedirect, }, nil } -// Close ensures that transport.Client is closed -// especially needed while using NewClient with *http.Client = nil -// for example -// client.NewClient("unix:///var/run/docker.sock", nil, "v1.18", map[string]string{"User-Agent": "engine-api-cli-1.0"}) +// Close the transport used by the client func (cli *Client) Close() error { - if t, ok := cli.client.Transport.(*http.Transport); ok { t.CloseIdleConnections() } - return nil } // getAPIPath returns the versioned request path to call the api. // It appends the query parameters to the path if they are not empty. -func (cli *Client) getAPIPath(p string, query url.Values) string { +func (cli *Client) getAPIPath(ctx context.Context, p string, query url.Values) string { var apiPath string + if cli.negotiateVersion && !cli.negotiated { + cli.NegotiateAPIVersion(ctx) + } if cli.version != "" { v := strings.TrimPrefix(cli.version, "v") - apiPath = fmt.Sprintf("%s/v%s%s", cli.basePath, v, p) + apiPath = path.Join(cli.basePath, "/v"+v, p) } else { - apiPath = fmt.Sprintf("%s%s", cli.basePath, p) + apiPath = path.Join(cli.basePath, p) } - - u := &url.URL{ - Path: apiPath, - } - if len(query) > 0 { - u.RawQuery = query.Encode() - } - return u.String() + return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String() } -// ClientVersion returns the version string associated with this -// instance of the Client. Note that this value can be changed -// via the DOCKER_API_VERSION env var. -// This operation doesn't acquire a mutex. +// ClientVersion returns the API version used by this client. func (cli *Client) ClientVersion() string { return cli.version } -// UpdateClientVersion updates the version string associated with this -// instance of the Client. This operation doesn't acquire a mutex. -func (cli *Client) UpdateClientVersion(v string) { +// NegotiateAPIVersion queries the API and updates the version to match the +// API version. Any errors are silently ignored. If a manual override is in place, +// either through the `DOCKER_API_VERSION` environment variable, or if the client +// was initialized with a fixed version (`opts.WithVersion(xx)`), no negotiation +// will be performed. +func (cli *Client) NegotiateAPIVersion(ctx context.Context) { if !cli.manualOverride { - cli.version = v + ping, _ := cli.Ping(ctx) + cli.negotiateAPIVersionPing(ping) } - } -// ParseHost verifies that the given host strings is valid. -func ParseHost(host string) (string, string, string, error) { +// NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion +// if the ping version is less than the default version. If a manual override is +// in place, either through the `DOCKER_API_VERSION` environment variable, or if +// the client was initialized with a fixed version (`opts.WithVersion(xx)`), no +// negotiation is performed. +func (cli *Client) NegotiateAPIVersionPing(p types.Ping) { + if !cli.manualOverride { + cli.negotiateAPIVersionPing(p) + } +} + +// negotiateAPIVersionPing queries the API and updates the version to match the +// API version. Any errors are silently ignored. +func (cli *Client) negotiateAPIVersionPing(p types.Ping) { + // try the latest version before versioning headers existed + if p.APIVersion == "" { + p.APIVersion = "1.24" + } + + // if the client is not initialized with a version, start with the latest supported version + if cli.version == "" { + cli.version = api.DefaultVersion + } + + // if server version is lower than the client version, downgrade + if versions.LessThan(p.APIVersion, cli.version) { + cli.version = p.APIVersion + } + + // Store the results, so that automatic API version negotiation (if enabled) + // won't be performed on the next request. + if cli.negotiateVersion { + cli.negotiated = true + } +} + +// DaemonHost returns the host address used by the client +func (cli *Client) DaemonHost() string { + return cli.host +} + +// HTTPClient returns a copy of the HTTP client bound to the server +func (cli *Client) HTTPClient() *http.Client { + return &*cli.client +} + +// ParseHostURL parses a url string, validates the string is a host url, and +// returns the parsed URL +func ParseHostURL(host string) (*url.URL, error) { protoAddrParts := strings.SplitN(host, "://", 2) if len(protoAddrParts) == 1 { - return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host) + return nil, fmt.Errorf("unable to parse docker host `%s`", host) } var basePath string @@ -236,16 +268,19 @@ func ParseHost(host string) (string, string, string, error) { if proto == "tcp" { parsed, err := url.Parse("tcp://" + addr) if err != nil { - return "", "", "", err + return nil, err } addr = parsed.Host basePath = parsed.Path } - return proto, addr, basePath, nil + return &url.URL{ + Scheme: proto, + Host: addr, + Path: basePath, + }, nil } -// CustomHTTPHeaders returns the custom http headers associated with this -// instance of the Client. This operation doesn't acquire a mutex. +// CustomHTTPHeaders returns the custom http headers stored by the client. func (cli *Client) CustomHTTPHeaders() map[string]string { m := make(map[string]string) for k, v := range cli.customHTTPHeaders { @@ -254,8 +289,21 @@ func (cli *Client) CustomHTTPHeaders() map[string]string { return m } -// SetCustomHTTPHeaders updates the custom http headers associated with this -// instance of the Client. This operation doesn't acquire a mutex. +// SetCustomHTTPHeaders that will be set on every HTTP request made by the client. +// Deprecated: use WithHTTPHeaders when creating the client. func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) { cli.customHTTPHeaders = headers } + +// Dialer returns a dialer for a raw stream connection, with HTTP/1.1 header, that can be used for proxying the daemon connection. +// Used by `docker dial-stdio` (docker/cli#889). +func (cli *Client) Dialer() func(context.Context) (net.Conn, error) { + return func(ctx context.Context) (net.Conn, error) { + if transport, ok := cli.client.Transport.(*http.Transport); ok { + if transport.DialContext != nil && transport.TLSClientConfig == nil { + return transport.DialContext(ctx, cli.proto, cli.addr) + } + } + return fallbackDial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/client_deprecated.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/client_deprecated.go new file mode 100644 index 00000000..54cdfc29 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/client_deprecated.go @@ -0,0 +1,23 @@ +package client + +import "net/http" + +// NewClient initializes a new API client for the given host and API version. +// It uses the given http client as transport. +// It also initializes the custom http headers to add to each request. +// +// It won't send any version information if the version number is empty. It is +// highly recommended that you set a version or your client may break if the +// server is upgraded. +// Deprecated: use NewClientWithOpts +func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { + return NewClientWithOpts(WithHost(host), WithVersion(version), WithHTTPClient(client), WithHTTPHeaders(httpHeaders)) +} + +// NewEnvClient initializes a new API client based on environment variables. +// See FromEnv for a list of support environment variables. +// +// Deprecated: use NewClientWithOpts(FromEnv) +func NewEnvClient() (*Client, error) { + return NewClientWithOpts(FromEnv) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/client_unix.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/client_unix.go index 03e136d7..3d24470b 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/client_unix.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/client_unix.go @@ -1,6 +1,9 @@ -// +build linux freebsd solaris openbsd darwin netbsd +// +build linux freebsd openbsd darwin -package client +package client // import "github.com/docker/docker/client" // DefaultDockerHost defines os specific default if DOCKER_HOST is unset const DefaultDockerHost = "unix:///var/run/docker.sock" + +const defaultProto = "unix" +const defaultAddr = "/var/run/docker.sock" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/client_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/client_windows.go index 07c0c7a7..c649e544 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/client_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/client_windows.go @@ -1,4 +1,7 @@ -package client +package client // import "github.com/docker/docker/client" // DefaultDockerHost defines os specific default if DOCKER_HOST is unset const DefaultDockerHost = "npipe:////./pipe/docker_engine" + +const defaultProto = "npipe" +const defaultAddr = "//./pipe/docker_engine" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/config_create.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/config_create.go new file mode 100644 index 00000000..ee7d411d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/config_create.go @@ -0,0 +1,25 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" +) + +// ConfigCreate creates a new Config. +func (cli *Client) ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) { + var response types.ConfigCreateResponse + if err := cli.NewVersionError("1.30", "config create"); err != nil { + return response, err + } + resp, err := cli.post(ctx, "/configs/create", nil, config, nil) + defer ensureReaderClosed(resp) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + return response, err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/config_inspect.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/config_inspect.go new file mode 100644 index 00000000..7d0ce3e1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/config_inspect.go @@ -0,0 +1,36 @@ +package client // import "github.com/docker/docker/client" + +import ( + "bytes" + "context" + "encoding/json" + "io/ioutil" + + "github.com/docker/docker/api/types/swarm" +) + +// ConfigInspectWithRaw returns the config information with raw data +func (cli *Client) ConfigInspectWithRaw(ctx context.Context, id string) (swarm.Config, []byte, error) { + if id == "" { + return swarm.Config{}, nil, objectNotFoundError{object: "config", id: id} + } + if err := cli.NewVersionError("1.30", "config inspect"); err != nil { + return swarm.Config{}, nil, err + } + resp, err := cli.get(ctx, "/configs/"+id, nil, nil) + defer ensureReaderClosed(resp) + if err != nil { + return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id) + } + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return swarm.Config{}, nil, err + } + + var config swarm.Config + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&config) + + return config, body, err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/config_list.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/config_list.go new file mode 100644 index 00000000..565acc6e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/config_list.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" +) + +// ConfigList returns the list of configs. +func (cli *Client) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { + if err := cli.NewVersionError("1.30", "config list"); err != nil { + return nil, err + } + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToJSON(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/configs", query, nil) + defer ensureReaderClosed(resp) + if err != nil { + return nil, err + } + + var configs []swarm.Config + err = json.NewDecoder(resp.body).Decode(&configs) + return configs, err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/config_remove.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/config_remove.go new file mode 100644 index 00000000..a708fcae --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/config_remove.go @@ -0,0 +1,13 @@ +package client // import "github.com/docker/docker/client" + +import "context" + +// ConfigRemove removes a Config. +func (cli *Client) ConfigRemove(ctx context.Context, id string) error { + if err := cli.NewVersionError("1.30", "config remove"); err != nil { + return err + } + resp, err := cli.delete(ctx, "/configs/"+id, nil, nil) + defer ensureReaderClosed(resp) + return wrapResponseError(err, resp, "config", id) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/config_update.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/config_update.go new file mode 100644 index 00000000..39e59cf8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/config_update.go @@ -0,0 +1,21 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "net/url" + "strconv" + + "github.com/docker/docker/api/types/swarm" +) + +// ConfigUpdate attempts to update a Config +func (cli *Client) ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error { + if err := cli.NewVersionError("1.30", "config update"); err != nil { + return err + } + query := url.Values{} + query.Set("version", strconv.FormatUint(version.Index, 10)) + resp, err := cli.post(ctx, "/configs/"+id+"/update", query, config, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_attach.go index eea46821..88ba1ef6 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_attach.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_attach.go @@ -1,16 +1,36 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerAttach attaches a connection to a container in the server. // It returns a types.HijackedConnection with the hijacked connection // and the a reader to get output. It's up to the called to close // the hijacked connection by calling types.HijackedResponse.Close. +// +// The stream format on the response will be in one of two formats: +// +// If the container is using a TTY, there is only a single stream (stdout), and +// data is copied directly from the container output stream, no extra +// multiplexing or headers. +// +// If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// The format of the multiplexed stream is as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for stdout and 2 for stderr +// +// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. +// This is the size of OUTPUT. +// +// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this +// stream. func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { query := url.Values{} if options.Stream { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_commit.go index 531d796e..2966e88c 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_commit.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_commit.go @@ -1,13 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "errors" "net/url" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerCommit applies changes into a container and creates a new tagged image. @@ -39,17 +39,17 @@ func (cli *Client) ContainerCommit(ctx context.Context, container string, option for _, change := range options.Changes { query.Add("changes", change) } - if options.Pause != true { + if !options.Pause { query.Set("pause", "0") } var response types.IDResponse resp, err := cli.post(ctx, "/commit", query, options.Config, nil) + defer ensureReaderClosed(resp) if err != nil { return response, err } err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) return response, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_copy.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_copy.go index 8380eeab..bb278bf7 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_copy.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_copy.go @@ -1,6 +1,7 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/base64" "encoding/json" "fmt" @@ -10,8 +11,6 @@ import ( "path/filepath" "strings" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" ) @@ -20,32 +19,38 @@ func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path stri query := url.Values{} query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. - urlStr := fmt.Sprintf("/containers/%s/archive", containerID) + urlStr := "/containers/" + containerID + "/archive" response, err := cli.head(ctx, urlStr, query, nil) - if err != nil { - return types.ContainerPathStat{}, err - } defer ensureReaderClosed(response) + if err != nil { + return types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+path) + } return getContainerPathStatFromHeader(response.header) } // CopyToContainer copies content into the container filesystem. -func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error { +// Note that `content` must be a Reader for a TAR archive +func (cli *Client) CopyToContainer(ctx context.Context, containerID, dstPath string, content io.Reader, options types.CopyToContainerOptions) error { query := url.Values{} - query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. + query.Set("path", filepath.ToSlash(dstPath)) // Normalize the paths used in the API. // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. if !options.AllowOverwriteDirWithFile { query.Set("noOverwriteDirNonDir", "true") } - apiPath := fmt.Sprintf("/containers/%s/archive", container) + if options.CopyUIDGID { + query.Set("copyUIDGID", "true") + } + + apiPath := "/containers/" + containerID + "/archive" response, err := cli.putRaw(ctx, apiPath, query, content, nil) - if err != nil { - return err - } defer ensureReaderClosed(response) + if err != nil { + return wrapResponseError(err, response, "container:path", containerID+":"+dstPath) + } + // TODO this code converts non-error status-codes (e.g., "204 No Content") into an error; verify if this is the desired behavior if response.statusCode != http.StatusOK { return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) } @@ -54,17 +59,18 @@ func (cli *Client) CopyToContainer(ctx context.Context, container, path string, } // CopyFromContainer gets the content from the container and returns it as a Reader -// to manipulate it in the host. It's up to the caller to close the reader. -func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { +// for a TAR archive to manipulate it in the host. It's up to the caller to close the reader. +func (cli *Client) CopyFromContainer(ctx context.Context, containerID, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { query := make(url.Values, 1) query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. - apiPath := fmt.Sprintf("/containers/%s/archive", container) + apiPath := "/containers/" + containerID + "/archive" response, err := cli.get(ctx, apiPath, query, nil) if err != nil { - return nil, types.ContainerPathStat{}, err + return nil, types.ContainerPathStat{}, wrapResponseError(err, response, "container:path", containerID+":"+srcPath) } + // TODO this code converts non-error status-codes (e.g., "204 No Content") into an error; verify if this is the desired behavior if response.statusCode != http.StatusOK { return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_create.go index 6841b0b2..5b795e0c 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_create.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_create.go @@ -1,14 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" - "strings" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/versions" - "golang.org/x/net/context" ) type configWrapper struct { @@ -43,14 +42,11 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config } serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) + defer ensureReaderClosed(serverResp) if err != nil { - if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { - return response, imageNotFoundError{config.Image} - } return response, err } err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) return response, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_diff.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_diff.go index 884dc9fe..29dac849 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_diff.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_diff.go @@ -1,11 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types/container" - "golang.org/x/net/context" ) // ContainerDiff shows differences in a container filesystem since it was started. @@ -13,11 +13,11 @@ func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]con var changes []container.ContainerChangeResponseItem serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) + defer ensureReaderClosed(serverResp) if err != nil { return changes, err } err = json.NewDecoder(serverResp.body).Decode(&changes) - ensureReaderClosed(serverResp) return changes, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_exec.go index 0665c54f..e3ee755b 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_exec.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_exec.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerExecCreate creates a new exec configuration to run an exec process. @@ -16,11 +16,11 @@ func (cli *Client) ContainerExecCreate(ctx context.Context, container string, co } resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) + defer ensureReaderClosed(resp) if err != nil { return response, err } err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) return response, err } @@ -35,7 +35,7 @@ func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config // It returns a types.HijackedConnection with the hijacked connection // and the a reader to get output. It's up to the called to close // the hijacked connection by calling types.HijackedResponse.Close. -func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) { +func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) { headers := map[string][]string{"Content-Type": {"application/json"}} return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_export.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_export.go index 52194f3d..d0c0a5cb 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_export.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_export.go @@ -1,10 +1,9 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" - - "golang.org/x/net/context" ) // ContainerExport retrieves the raw contents of a container diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_inspect.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_inspect.go index 17f18097..c496bcff 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_inspect.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_inspect.go @@ -1,46 +1,45 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" - "net/http" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerInspect returns the container information. func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { + if containerID == "" { + return types.ContainerJSON{}, objectNotFoundError{object: "container", id: containerID} + } serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return types.ContainerJSON{}, containerNotFoundError{containerID} - } - return types.ContainerJSON{}, err + return types.ContainerJSON{}, wrapResponseError(err, serverResp, "container", containerID) } var response types.ContainerJSON err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) return response, err } // ContainerInspectWithRaw returns the container information and its raw representation. func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { + if containerID == "" { + return types.ContainerJSON{}, nil, objectNotFoundError{object: "container", id: containerID} + } query := url.Values{} if getSize { query.Set("size", "1") } serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) - if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return types.ContainerJSON{}, nil, containerNotFoundError{containerID} - } - return types.ContainerJSON{}, nil, err - } defer ensureReaderClosed(serverResp) + if err != nil { + return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID) + } body, err := ioutil.ReadAll(serverResp.body) if err != nil { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_kill.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_kill.go index 29f80c73..4d6f1d23 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_kill.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_kill.go @@ -1,9 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" - - "golang.org/x/net/context" ) // ContainerKill terminates the container process but does not remove the container from the docker host. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_list.go index 43989121..1e7a63a9 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_list.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_list.go @@ -1,13 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "strconv" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // ContainerList returns the list of containers in the docker host. @@ -45,12 +45,12 @@ func (cli *Client) ContainerList(ctx context.Context, options types.ContainerLis } resp, err := cli.get(ctx, "/containers/json", query, nil) + defer ensureReaderClosed(resp) if err != nil { return nil, err } var containers []types.Container err = json.NewDecoder(resp.body).Decode(&containers) - ensureReaderClosed(resp) return containers, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_logs.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_logs.go index 69056b63..5b6541f0 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_logs.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_logs.go @@ -1,18 +1,38 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" "time" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" timetypes "github.com/docker/docker/api/types/time" + "github.com/pkg/errors" ) // ContainerLogs returns the logs generated by a container in an io.ReadCloser. // It's up to the caller to close the stream. +// +// The stream format on the response will be in one of two formats: +// +// If the container is using a TTY, there is only a single stream (stdout), and +// data is copied directly from the container output stream, no extra +// multiplexing or headers. +// +// If the container is *not* using a TTY, streams for stdout and stderr are +// multiplexed. +// The format of the multiplexed stream is as follows: +// +// [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4}[]byte{OUTPUT} +// +// STREAM_TYPE can be 1 for stdout and 2 for stderr +// +// SIZE1, SIZE2, SIZE3, and SIZE4 are four bytes of uint32 encoded as big endian. +// This is the size of OUTPUT. +// +// You can use github.com/docker/docker/pkg/stdcopy.StdCopy to demultiplex this +// stream. func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { query := url.Values{} if options.ShowStdout { @@ -26,11 +46,19 @@ func (cli *Client) ContainerLogs(ctx context.Context, container string, options if options.Since != "" { ts, err := timetypes.GetTimestamp(options.Since, time.Now()) if err != nil { - return nil, err + return nil, errors.Wrap(err, `invalid value for "since"`) } query.Set("since", ts) } + if options.Until != "" { + ts, err := timetypes.GetTimestamp(options.Until, time.Now()) + if err != nil { + return nil, errors.Wrap(err, `invalid value for "until"`) + } + query.Set("until", ts) + } + if options.Timestamps { query.Set("timestamps", "1") } @@ -46,7 +74,7 @@ func (cli *Client) ContainerLogs(ctx context.Context, container string, options resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) if err != nil { - return nil, err + return nil, wrapResponseError(err, resp, "container", container) } return resp.body, nil } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_pause.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_pause.go index 412067a7..5e7271a3 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_pause.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_pause.go @@ -1,6 +1,6 @@ -package client +package client // import "github.com/docker/docker/client" -import "golang.org/x/net/context" +import "context" // ContainerPause pauses the main process of a given container without terminating it. func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_prune.go index b5821708..04383dea 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_prune.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_prune.go @@ -1,12 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // ContainersPrune requests the daemon to delete unused data @@ -23,10 +23,10 @@ func (cli *Client) ContainersPrune(ctx context.Context, pruneFilters filters.Arg } serverResp, err := cli.post(ctx, "/containers/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return report, err } - defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { return report, fmt.Errorf("Error retrieving disk usage: %v", err) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_remove.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_remove.go index 3a79590c..df81461b 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_remove.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_remove.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerRemove kills and removes a container from the docker host. @@ -22,6 +22,6 @@ func (cli *Client) ContainerRemove(ctx context.Context, containerID string, opti } resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) - ensureReaderClosed(resp) - return err + defer ensureReaderClosed(resp) + return wrapResponseError(err, resp, "container", containerID) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_rename.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_rename.go index 0e718da7..240fdf55 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_rename.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_rename.go @@ -1,9 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" - - "golang.org/x/net/context" ) // ContainerRename changes the name of a given container. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_resize.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_resize.go index 66c3cc19..a9d4c0c7 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_resize.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_resize.go @@ -1,11 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "strconv" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerResize changes the size of the tty for a container. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_restart.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_restart.go index 74d7455f..41e42196 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_restart.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_restart.go @@ -1,11 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "time" timetypes "github.com/docker/docker/api/types/time" - "golang.org/x/net/context" ) // ContainerRestart stops and starts a container again. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_start.go index b1f08de4..c2e0b15d 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_start.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_start.go @@ -1,10 +1,9 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" ) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_stats.go index 4758c66e..6ef44c77 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_stats.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_stats.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ContainerStats returns near realtime stats for a given container. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_stop.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_stop.go index b5418ae8..629d7ab6 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_stop.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_stop.go @@ -1,15 +1,20 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "time" timetypes "github.com/docker/docker/api/types/time" - "golang.org/x/net/context" ) -// ContainerStop stops a container without terminating the process. -// The process is blocked until the container stops or the timeout expires. +// ContainerStop stops a container. In case the container fails to stop +// gracefully within a time frame specified by the timeout argument, +// it is forcefully terminated (killed). +// +// If the timeout is nil, the container's StopTimeout value is used, if set, +// otherwise the engine default. A negative timeout value can be specified, +// meaning no timeout, i.e. no forceful termination is performed. func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { query := url.Values{} if timeout != nil { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_top.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_top.go index 9689123a..a5b78999 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_top.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_top.go @@ -1,12 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "strings" "github.com/docker/docker/api/types/container" - "golang.org/x/net/context" ) // ContainerTop shows process information from within a container. @@ -18,11 +18,11 @@ func (cli *Client) ContainerTop(ctx context.Context, containerID string, argumen } resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) + defer ensureReaderClosed(resp) if err != nil { return response, err } err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) return response, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_unpause.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_unpause.go index 5c762112..1d8f8731 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_unpause.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_unpause.go @@ -1,6 +1,6 @@ -package client +package client // import "github.com/docker/docker/client" -import "golang.org/x/net/context" +import "context" // ContainerUnpause resumes the process execution within a container func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_update.go index 5082f22d..6917cf9f 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_update.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_update.go @@ -1,22 +1,21 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types/container" - "golang.org/x/net/context" ) // ContainerUpdate updates resources of a container func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { var response container.ContainerUpdateOKBody serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) + defer ensureReaderClosed(serverResp) if err != nil { return response, err } err = json.NewDecoder(serverResp.body).Decode(&response) - - ensureReaderClosed(serverResp) return response, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_wait.go index 93212c70..6ab8c1da 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_wait.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/container_wait.go @@ -1,26 +1,83 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" - - "golang.org/x/net/context" + "net/url" "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/versions" ) -// ContainerWait pauses execution until a container exits. -// It returns the API status code as response of its readiness. -func (cli *Client) ContainerWait(ctx context.Context, containerID string) (int64, error) { - resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) +// ContainerWait waits until the specified container is in a certain state +// indicated by the given condition, either "not-running" (default), +// "next-exit", or "removed". +// +// If this client's API version is before 1.30, condition is ignored and +// ContainerWait will return immediately with the two channels, as the server +// will wait as if the condition were "not-running". +// +// If this client's API version is at least 1.30, ContainerWait blocks until +// the request has been acknowledged by the server (with a response header), +// then returns two channels on which the caller can wait for the exit status +// of the container or an error if there was a problem either beginning the +// wait request or in getting the response. This allows the caller to +// synchronize ContainerWait with other calls, such as specifying a +// "next-exit" condition before issuing a ContainerStart request. +func (cli *Client) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.ContainerWaitOKBody, <-chan error) { + if versions.LessThan(cli.ClientVersion(), "1.30") { + return cli.legacyContainerWait(ctx, containerID) + } + + resultC := make(chan container.ContainerWaitOKBody) + errC := make(chan error, 1) + + query := url.Values{} + query.Set("condition", string(condition)) + + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil) if err != nil { - return -1, err - } - defer ensureReaderClosed(resp) - - var res container.ContainerWaitOKBody - if err := json.NewDecoder(resp.body).Decode(&res); err != nil { - return -1, err + defer ensureReaderClosed(resp) + errC <- err + return resultC, errC } - return res.StatusCode, nil + go func() { + defer ensureReaderClosed(resp) + var res container.ContainerWaitOKBody + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + errC <- err + return + } + + resultC <- res + }() + + return resultC, errC +} + +// legacyContainerWait returns immediately and doesn't have an option to wait +// until the container is removed. +func (cli *Client) legacyContainerWait(ctx context.Context, containerID string) (<-chan container.ContainerWaitOKBody, <-chan error) { + resultC := make(chan container.ContainerWaitOKBody) + errC := make(chan error) + + go func() { + resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) + if err != nil { + errC <- err + return + } + defer ensureReaderClosed(resp) + + var res container.ContainerWaitOKBody + if err := json.NewDecoder(resp.body).Decode(&res); err != nil { + errC <- err + return + } + + resultC <- res + }() + + return resultC, errC } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/disk_usage.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/disk_usage.go index 03c80b39..354cd369 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/disk_usage.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/disk_usage.go @@ -1,11 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // DiskUsage requests the current data usage from the daemon @@ -13,10 +13,10 @@ func (cli *Client) DiskUsage(ctx context.Context) (types.DiskUsage, error) { var du types.DiskUsage serverResp, err := cli.get(ctx, "/system/df", nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return du, err } - defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&du); err != nil { return du, fmt.Errorf("Error retrieving disk usage: %v", err) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/distribution_inspect.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/distribution_inspect.go new file mode 100644 index 00000000..f4e3794c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/distribution_inspect.go @@ -0,0 +1,38 @@ +package client // import "github.com/docker/docker/client" + +import ( + "context" + "encoding/json" + "net/url" + + registrytypes "github.com/docker/docker/api/types/registry" +) + +// DistributionInspect returns the image digest with full Manifest +func (cli *Client) DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registrytypes.DistributionInspect, error) { + // Contact the registry to retrieve digest and platform information + var distributionInspect registrytypes.DistributionInspect + if image == "" { + return distributionInspect, objectNotFoundError{object: "distribution", id: image} + } + + if err := cli.NewVersionError("1.30", "distribution inspect"); err != nil { + return distributionInspect, err + } + var headers map[string][]string + + if encodedRegistryAuth != "" { + headers = map[string][]string{ + "X-Registry-Auth": {encodedRegistryAuth}, + } + } + + resp, err := cli.get(ctx, "/distribution/"+image+"/json", url.Values{}, headers) + defer ensureReaderClosed(resp) + if err != nil { + return distributionInspect, err + } + + err = json.NewDecoder(resp.body).Decode(&distributionInspect) + return distributionInspect, err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/errors.go index 4f767bd8..001c1028 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/errors.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/errors.go @@ -1,9 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( "fmt" + "net/http" "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/errdefs" "github.com/pkg/errors" ) @@ -31,100 +33,43 @@ func ErrorConnectionFailed(host string) error { return errConnectionFailed{host: host} } +// Deprecated: use the errdefs.NotFound() interface instead. Kept for backward compatibility type notFound interface { error - NotFound() bool // Is the error a NotFound error + NotFound() bool } -// IsErrNotFound returns true if the error is caused with an -// object (image, container, network, volume, …) is not found in the docker host. +// IsErrNotFound returns true if the error is a NotFound error, which is returned +// by the API when some object is not found. func IsErrNotFound(err error) bool { - te, ok := err.(notFound) - return ok && te.NotFound() + if _, ok := err.(notFound); ok { + return ok + } + return errdefs.IsNotFound(err) } -// imageNotFoundError implements an error returned when an image is not in the docker host. -type imageNotFoundError struct { - imageID string +type objectNotFoundError struct { + object string + id string } -// NotFound indicates that this error type is of NotFound -func (e imageNotFoundError) NotFound() bool { - return true +func (e objectNotFoundError) NotFound() {} + +func (e objectNotFoundError) Error() string { + return fmt.Sprintf("Error: No such %s: %s", e.object, e.id) } -// Error returns a string representation of an imageNotFoundError -func (e imageNotFoundError) Error() string { - return fmt.Sprintf("Error: No such image: %s", e.imageID) -} - -// IsErrImageNotFound returns true if the error is caused -// when an image is not found in the docker host. -func IsErrImageNotFound(err error) bool { - return IsErrNotFound(err) -} - -// containerNotFoundError implements an error returned when a container is not in the docker host. -type containerNotFoundError struct { - containerID string -} - -// NotFound indicates that this error type is of NotFound -func (e containerNotFoundError) NotFound() bool { - return true -} - -// Error returns a string representation of a containerNotFoundError -func (e containerNotFoundError) Error() string { - return fmt.Sprintf("Error: No such container: %s", e.containerID) -} - -// IsErrContainerNotFound returns true if the error is caused -// when a container is not found in the docker host. -func IsErrContainerNotFound(err error) bool { - return IsErrNotFound(err) -} - -// networkNotFoundError implements an error returned when a network is not in the docker host. -type networkNotFoundError struct { - networkID string -} - -// NotFound indicates that this error type is of NotFound -func (e networkNotFoundError) NotFound() bool { - return true -} - -// Error returns a string representation of a networkNotFoundError -func (e networkNotFoundError) Error() string { - return fmt.Sprintf("Error: No such network: %s", e.networkID) -} - -// IsErrNetworkNotFound returns true if the error is caused -// when a network is not found in the docker host. -func IsErrNetworkNotFound(err error) bool { - return IsErrNotFound(err) -} - -// volumeNotFoundError implements an error returned when a volume is not in the docker host. -type volumeNotFoundError struct { - volumeID string -} - -// NotFound indicates that this error type is of NotFound -func (e volumeNotFoundError) NotFound() bool { - return true -} - -// Error returns a string representation of a volumeNotFoundError -func (e volumeNotFoundError) Error() string { - return fmt.Sprintf("Error: No such volume: %s", e.volumeID) -} - -// IsErrVolumeNotFound returns true if the error is caused -// when a volume is not found in the docker host. -func IsErrVolumeNotFound(err error) bool { - return IsErrNotFound(err) +func wrapResponseError(err error, resp serverResponse, object, id string) error { + switch { + case err == nil: + return nil + case resp.statusCode == http.StatusNotFound: + return objectNotFoundError{object: object, id: id} + case resp.statusCode == http.StatusNotImplemented: + return errdefs.NotImplemented(err) + default: + return err + } } // unauthorizedError represents an authorization error in a remote registry. @@ -140,74 +85,10 @@ func (u unauthorizedError) Error() string { // IsErrUnauthorized returns true if the error is caused // when a remote registry authentication fails func IsErrUnauthorized(err error) bool { - _, ok := err.(unauthorizedError) - return ok -} - -// nodeNotFoundError implements an error returned when a node is not found. -type nodeNotFoundError struct { - nodeID string -} - -// Error returns a string representation of a nodeNotFoundError -func (e nodeNotFoundError) Error() string { - return fmt.Sprintf("Error: No such node: %s", e.nodeID) -} - -// NotFound indicates that this error type is of NotFound -func (e nodeNotFoundError) NotFound() bool { - return true -} - -// IsErrNodeNotFound returns true if the error is caused -// when a node is not found. -func IsErrNodeNotFound(err error) bool { - _, ok := err.(nodeNotFoundError) - return ok -} - -// serviceNotFoundError implements an error returned when a service is not found. -type serviceNotFoundError struct { - serviceID string -} - -// Error returns a string representation of a serviceNotFoundError -func (e serviceNotFoundError) Error() string { - return fmt.Sprintf("Error: No such service: %s", e.serviceID) -} - -// NotFound indicates that this error type is of NotFound -func (e serviceNotFoundError) NotFound() bool { - return true -} - -// IsErrServiceNotFound returns true if the error is caused -// when a service is not found. -func IsErrServiceNotFound(err error) bool { - _, ok := err.(serviceNotFoundError) - return ok -} - -// taskNotFoundError implements an error returned when a task is not found. -type taskNotFoundError struct { - taskID string -} - -// Error returns a string representation of a taskNotFoundError -func (e taskNotFoundError) Error() string { - return fmt.Sprintf("Error: No such task: %s", e.taskID) -} - -// NotFound indicates that this error type is of NotFound -func (e taskNotFoundError) NotFound() bool { - return true -} - -// IsErrTaskNotFound returns true if the error is caused -// when a task is not found. -func IsErrTaskNotFound(err error) bool { - _, ok := err.(taskNotFoundError) - return ok + if _, ok := err.(unauthorizedError); ok { + return ok + } + return errdefs.IsUnauthorized(err) } type pluginPermissionDenied struct { @@ -225,54 +106,33 @@ func IsErrPluginPermissionDenied(err error) bool { return ok } +type notImplementedError struct { + message string +} + +func (e notImplementedError) Error() string { + return e.message +} + +func (e notImplementedError) NotImplemented() bool { + return true +} + +// IsErrNotImplemented returns true if the error is a NotImplemented error. +// This is returned by the API when a requested feature has not been +// implemented. +func IsErrNotImplemented(err error) bool { + if _, ok := err.(notImplementedError); ok { + return ok + } + return errdefs.IsNotImplemented(err) +} + // NewVersionError returns an error if the APIVersion required // if less than the current supported version func (cli *Client) NewVersionError(APIrequired, feature string) error { - if versions.LessThan(cli.version, APIrequired) { + if cli.version != "" && versions.LessThan(cli.version, APIrequired) { return fmt.Errorf("%q requires API version %s, but the Docker daemon API version is %s", feature, APIrequired, cli.version) } return nil } - -// secretNotFoundError implements an error returned when a secret is not found. -type secretNotFoundError struct { - name string -} - -// Error returns a string representation of a secretNotFoundError -func (e secretNotFoundError) Error() string { - return fmt.Sprintf("Error: no such secret: %s", e.name) -} - -// NotFound indicates that this error type is of NotFound -func (e secretNotFoundError) NotFound() bool { - return true -} - -// IsErrSecretNotFound returns true if the error is caused -// when a secret is not found. -func IsErrSecretNotFound(err error) bool { - _, ok := err.(secretNotFoundError) - return ok -} - -// pluginNotFoundError implements an error returned when a plugin is not in the docker host. -type pluginNotFoundError struct { - name string -} - -// NotFound indicates that this error type is of NotFound -func (e pluginNotFoundError) NotFound() bool { - return true -} - -// Error returns a string representation of a pluginNotFoundError -func (e pluginNotFoundError) Error() string { - return fmt.Sprintf("Error: No such plugin: %s", e.name) -} - -// IsErrPluginNotFound returns true if the error is caused -// when a plugin is not found in the docker host. -func IsErrPluginNotFound(err error) bool { - return IsErrNotFound(err) -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/events.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/events.go index af47aefa..6e565389 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/events.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/events.go @@ -1,12 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "time" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/hijack.go index 74c53f52..e9c9a752 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/hijack.go @@ -1,37 +1,21 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "bufio" + "context" "crypto/tls" - "errors" "fmt" "net" "net/http" "net/http/httputil" "net/url" - "strings" "time" "github.com/docker/docker/api/types" - "github.com/docker/docker/pkg/tlsconfig" "github.com/docker/go-connections/sockets" - "golang.org/x/net/context" + "github.com/pkg/errors" ) -// tlsClientCon holds tls information and a dialed connection. -type tlsClientCon struct { - *tls.Conn - rawConn net.Conn -} - -func (c *tlsClientCon) CloseWrite() error { - // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it - // on its underlying connection. - if conn, ok := c.rawConn.(types.CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - // postHijacked sends a POST request and hijacks the connection. func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { bodyEncoded, err := encodeData(body) @@ -39,23 +23,53 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu return types.HijackedResponse{}, err } - apiPath := cli.getAPIPath(path, query) + apiPath := cli.getAPIPath(ctx, path, query) req, err := http.NewRequest("POST", apiPath, bodyEncoded) if err != nil { return types.HijackedResponse{}, err } req = cli.addHeaders(req, headers) + conn, err := cli.setupHijackConn(ctx, req, "tcp") + if err != nil { + return types.HijackedResponse{}, err + } + + return types.HijackedResponse{Conn: conn, Reader: bufio.NewReader(conn)}, err +} + +// DialHijack returns a hijacked connection with negotiated protocol proto. +func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) { + req, err := http.NewRequest("POST", url, nil) + if err != nil { + return nil, err + } + req = cli.addHeaders(req, meta) + + return cli.setupHijackConn(ctx, req, proto) +} + +// fallbackDial is used when WithDialer() was not called. +// See cli.Dialer(). +func fallbackDial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { + if tlsConfig != nil && proto != "unix" && proto != "npipe" { + return tls.Dial(proto, addr, tlsConfig) + } + if proto == "npipe" { + return sockets.DialPipe(addr, 32*time.Second) + } + return net.Dial(proto, addr) +} + +func (cli *Client) setupHijackConn(ctx context.Context, req *http.Request, proto string) (net.Conn, error) { req.Host = cli.addr req.Header.Set("Connection", "Upgrade") - req.Header.Set("Upgrade", "tcp") + req.Header.Set("Upgrade", proto) - conn, err := dial(cli.proto, cli.addr, resolveTLSConfig(cli.client.Transport)) + dialer := cli.Dialer() + conn, err := dialer(ctx) if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return types.HijackedResponse{}, fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") - } - return types.HijackedResponse{}, err + return nil, errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") } // When we set up a TCP connection for hijack, there could be long periods @@ -72,106 +86,58 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu defer clientconn.Close() // Server hijacks the connection, error 'connection closed' expected - _, err = clientconn.Do(req) - - rwc, br := clientconn.Hijack() - - return types.HijackedResponse{Conn: rwc, Reader: br}, err -} - -func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { - return tlsDialWithDialer(new(net.Dialer), network, addr, config) -} - -// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in -// order to return our custom tlsClientCon struct which holds both the tls.Conn -// object _and_ its underlying raw connection. The rationale for this is that -// we need to be able to close the write end of the connection when attaching, -// which tls.Conn does not provide. -func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { - // We want the Timeout and Deadline values from dialer to cover the - // whole process: TCP connection and TLS handshake. This means that we - // also need to start our own timers now. - timeout := dialer.Timeout - - if !dialer.Deadline.IsZero() { - deadlineTimeout := dialer.Deadline.Sub(time.Now()) - if timeout == 0 || deadlineTimeout < timeout { - timeout = deadlineTimeout + resp, err := clientconn.Do(req) + if err != httputil.ErrPersistEOF { + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusSwitchingProtocols { + resp.Body.Close() + return nil, fmt.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode) } } - var errChannel chan error - - if timeout != 0 { - errChannel = make(chan error, 2) - time.AfterFunc(timeout, func() { - errChannel <- errors.New("") - }) - } - - proxyDialer, err := sockets.DialerFromEnvironment(dialer) - if err != nil { - return nil, err - } - - rawConn, err := proxyDialer.Dial(network, addr) - if err != nil { - return nil, err - } - // When we set up a TCP connection for hijack, there could be long periods - // of inactivity (a long running command with no output) that in certain - // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit - // ECONNTIMEOUT unless the socket connection truly is broken - if tcpConn, ok := rawConn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(30 * time.Second) - } - - colonPos := strings.LastIndex(addr, ":") - if colonPos == -1 { - colonPos = len(addr) - } - hostname := addr[:colonPos] - - // If no ServerName is set, infer the ServerName - // from the hostname we're connecting to. - if config.ServerName == "" { - // Make a copy to avoid polluting argument or default. - config = tlsconfig.Clone(config) - config.ServerName = hostname - } - - conn := tls.Client(rawConn, config) - - if timeout == 0 { - err = conn.Handshake() + c, br := clientconn.Hijack() + if br.Buffered() > 0 { + // If there is buffered content, wrap the connection. We return an + // object that implements CloseWrite iff the underlying connection + // implements it. + if _, ok := c.(types.CloseWriter); ok { + c = &hijackedConnCloseWriter{&hijackedConn{c, br}} + } else { + c = &hijackedConn{c, br} + } } else { - go func() { - errChannel <- conn.Handshake() - }() - - err = <-errChannel + br.Reset(nil) } - if err != nil { - rawConn.Close() - return nil, err - } - - // This is Docker difference with standard's crypto/tls package: returned a - // wrapper which holds both the TLS and raw connections. - return &tlsClientCon{conn, rawConn}, nil + return c, nil } -func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { - if tlsConfig != nil && proto != "unix" && proto != "npipe" { - // Notice this isn't Go standard's tls.Dial function - return tlsDial(proto, addr, tlsConfig) - } - if proto == "npipe" { - return sockets.DialPipe(addr, 32*time.Second) - } - return net.Dial(proto, addr) +// hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case +// that a) there was already buffered data in the http layer when Hijack() was +// called, and b) the underlying net.Conn does *not* implement CloseWrite(). +// hijackedConn does not implement CloseWrite() either. +type hijackedConn struct { + net.Conn + r *bufio.Reader +} + +func (c *hijackedConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +// hijackedConnCloseWriter is a hijackedConn which additionally implements +// CloseWrite(). It is returned by setupHijackConn in the case that a) there +// was already buffered data in the http layer when Hijack() was called, and b) +// the underlying net.Conn *does* implement CloseWrite(). +type hijackedConnCloseWriter struct { + *hijackedConn +} + +var _ types.CloseWriter = &hijackedConnCloseWriter{} + +func (c *hijackedConnCloseWriter) CloseWrite() error { + conn := c.Conn.(types.CloseWriter) + return conn.CloseWrite() } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_build.go index bb69143e..8fcf9950 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_build.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_build.go @@ -1,14 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/base64" "encoding/json" "io" "net/http" "net/url" "strconv" - - "golang.org/x/net/context" + "strings" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" @@ -29,6 +29,7 @@ func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, optio return types.ImageBuildResponse{}, err } headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) + headers.Set("Content-Type", "application/x-tar") serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) @@ -120,6 +121,26 @@ func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (ur return query, err } query.Set("cachefrom", string(cacheFromJSON)) + if options.SessionID != "" { + query.Set("session", options.SessionID) + } + if options.Platform != "" { + if err := cli.NewVersionError("1.32", "platform"); err != nil { + return query, err + } + query.Set("platform", strings.ToLower(options.Platform)) + } + if options.BuildID != "" { + query.Set("buildid", options.BuildID) + } + query.Set("version", string(options.Version)) + if options.Outputs != nil { + outputsJSON, err := json.Marshal(options.Outputs) + if err != nil { + return query, err + } + query.Set("outputs", string(outputsJSON)) + } return query, nil } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_create.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_create.go index 4436abb0..23938047 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_create.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_create.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" - - "golang.org/x/net/context" + "strings" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" @@ -21,6 +21,9 @@ func (cli *Client) ImageCreate(ctx context.Context, parentReference string, opti query := url.Values{} query.Set("fromImage", reference.FamiliarName(ref)) query.Set("tag", getAPITagFromNamedRef(ref)) + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) if err != nil { return nil, err diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_history.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_history.go index 7b4babcb..b5bea10d 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_history.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_history.go @@ -1,22 +1,22 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types/image" - "golang.org/x/net/context" ) // ImageHistory returns the changes in an image in history format. func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]image.HistoryResponseItem, error) { var history []image.HistoryResponseItem serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) + defer ensureReaderClosed(serverResp) if err != nil { return history, err } err = json.NewDecoder(serverResp.body).Decode(&history) - ensureReaderClosed(serverResp) return history, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_import.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_import.go index d7dedd82..c2972ea9 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_import.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_import.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" - - "golang.org/x/net/context" + "strings" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" @@ -25,6 +25,9 @@ func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSour query.Set("repo", ref) query.Set("tag", options.Tag) query.Set("message", options.Message) + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } for _, change := range options.Changes { query.Add("changes", change) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_inspect.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_inspect.go index b3a64ce2..1eb8dce0 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_inspect.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_inspect.go @@ -1,25 +1,24 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" - "net/http" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ImageInspectWithRaw returns the image information and its raw representation. func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) { - serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) - if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return types.ImageInspect{}, nil, imageNotFoundError{imageID} - } - return types.ImageInspect{}, nil, err + if imageID == "" { + return types.ImageInspect{}, nil, objectNotFoundError{object: "image", id: imageID} } + serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil) defer ensureReaderClosed(serverResp) + if err != nil { + return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID) + } body, err := ioutil.ReadAll(serverResp.body) if err != nil { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_list.go index f26464f6..4fa8c006 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_list.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_list.go @@ -1,13 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/versions" - "golang.org/x/net/context" ) // ImageList returns a list of images in the docker host. @@ -35,11 +35,11 @@ func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions } serverResp, err := cli.get(ctx, "/images/json", query, nil) + defer ensureReaderClosed(serverResp) if err != nil { return images, err } err = json.NewDecoder(serverResp.body).Decode(&images) - ensureReaderClosed(serverResp) return images, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_load.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_load.go index 77aaf1af..91016e49 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_load.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_load.go @@ -1,11 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" ) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_prune.go index 5ef98b7f..56af6d7f 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_prune.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_prune.go @@ -1,12 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // ImagesPrune requests the daemon to delete unused data @@ -23,10 +23,10 @@ func (cli *Client) ImagesPrune(ctx context.Context, pruneFilters filters.Args) ( } serverResp, err := cli.post(ctx, "/images/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return report, err } - defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { return report, fmt.Errorf("Error retrieving disk usage: %v", err) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_pull.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_pull.go index a72b9bf7..a2397559 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_pull.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_pull.go @@ -1,14 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" - "net/http" "net/url" - - "golang.org/x/net/context" + "strings" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" ) // ImagePull requests the docker host to pull an image from a remote registry. @@ -30,9 +30,12 @@ func (cli *Client) ImagePull(ctx context.Context, refStr string, options types.I if !options.All { query.Set("tag", getAPITagFromNamedRef(ref)) } + if options.Platform != "" { + query.Set("platform", strings.ToLower(options.Platform)) + } resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { newAuthHeader, privilegeErr := options.PrivilegeFunc() if privilegeErr != nil { return nil, privilegeErr diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_push.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_push.go index 410d2fb9..49d412ee 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_push.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_push.go @@ -1,15 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "errors" "io" - "net/http" "net/url" - "golang.org/x/net/context" - "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" ) // ImagePush requests the docker host to push an image to a remote registry. @@ -37,7 +36,7 @@ func (cli *Client) ImagePush(ctx context.Context, image string, options types.Im query.Set("tag", tag) resp, err := cli.tryImagePush(ctx, name, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { newAuthHeader, privilegeErr := options.PrivilegeFunc() if privilegeErr != nil { return nil, privilegeErr diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_remove.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_remove.go index 6921209e..84a41af0 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_remove.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_remove.go @@ -1,11 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ImageRemove removes an image from the docker host. @@ -19,13 +19,13 @@ func (cli *Client) ImageRemove(ctx context.Context, imageID string, options type query.Set("noprune", "1") } + var dels []types.ImageDeleteResponseItem resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) + defer ensureReaderClosed(resp) if err != nil { - return nil, err + return dels, wrapResponseError(err, resp, "image", imageID) } - var dels []types.ImageDeleteResponseItem err = json.NewDecoder(resp.body).Decode(&dels) - ensureReaderClosed(resp) return dels, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_save.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_save.go index ecac880a..d1314e4b 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_save.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_save.go @@ -1,10 +1,9 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" - - "golang.org/x/net/context" ) // ImageSave retrieves one or more images from the docker host as an io.ReadCloser. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_search.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_search.go index b0fcd5c2..82955a74 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_search.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_search.go @@ -1,15 +1,15 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" - "net/http" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/registry" - "golang.org/x/net/context" + "github.com/docker/docker/errdefs" ) // ImageSearch makes the docker host to search by a term in a remote registry. @@ -21,7 +21,7 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options types.I query.Set("limit", fmt.Sprintf("%d", options.Limit)) if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) + filterJSON, err := filters.ToJSON(options.Filters) if err != nil { return results, err } @@ -29,7 +29,8 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options types.I } resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + defer ensureReaderClosed(resp) + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { newAuthHeader, privilegeErr := options.PrivilegeFunc() if privilegeErr != nil { return results, privilegeErr @@ -41,7 +42,6 @@ func (cli *Client) ImageSearch(ctx context.Context, term string, options types.I } err = json.NewDecoder(resp.body).Decode(&results) - ensureReaderClosed(resp) return results, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_tag.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_tag.go index 8924f71e..5652bfc2 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_tag.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/image_tag.go @@ -1,11 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/distribution/reference" "github.com/pkg/errors" - "golang.org/x/net/context" ) // ImageTag tags an image in the docker host diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/info.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/info.go index ac079612..c856704e 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/info.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/info.go @@ -1,22 +1,22 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // Info returns information about the docker server. func (cli *Client) Info(ctx context.Context) (types.Info, error) { var info types.Info serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) + defer ensureReaderClosed(serverResp) if err != nil { return info, err } - defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { return info, fmt.Errorf("Error reading remote info: %v", err) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/interface.go index 8dbe4300..cde64be4 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/interface.go @@ -1,24 +1,28 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" + "net" + "net/http" "time" "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" + containertypes "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/events" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/image" - "github.com/docker/docker/api/types/network" + networktypes "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" volumetypes "github.com/docker/docker/api/types/volume" - "golang.org/x/net/context" ) // CommonAPIClient is the common methods between stable and experimental versions of APIClient. type CommonAPIClient interface { + ConfigAPIClient ContainerAPIClient + DistributionAPIClient ImageAPIClient NodeAPIClient NetworkAPIClient @@ -29,17 +33,23 @@ type CommonAPIClient interface { SystemAPIClient VolumeAPIClient ClientVersion() string + DaemonHost() string + HTTPClient() *http.Client ServerVersion(ctx context.Context) (types.Version, error) - UpdateClientVersion(v string) + NegotiateAPIVersion(ctx context.Context) + NegotiateAPIVersionPing(types.Ping) + DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) + Dialer() func(context.Context) (net.Conn, error) + Close() error } // ContainerAPIClient defines API client methods for the containers type ContainerAPIClient interface { ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) - ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) - ContainerDiff(ctx context.Context, container string) ([]container.ContainerChangeResponseItem, error) - ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) + ContainerCreate(ctx context.Context, config *containertypes.Config, hostConfig *containertypes.HostConfig, networkingConfig *networktypes.NetworkingConfig, containerName string) (containertypes.ContainerCreateCreatedBody, error) + ContainerDiff(ctx context.Context, container string) ([]containertypes.ContainerChangeResponseItem, error) + ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error @@ -59,18 +69,25 @@ type ContainerAPIClient interface { ContainerStats(ctx context.Context, container string, stream bool) (types.ContainerStats, error) ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error ContainerStop(ctx context.Context, container string, timeout *time.Duration) error - ContainerTop(ctx context.Context, container string, arguments []string) (container.ContainerTopOKBody, error) + ContainerTop(ctx context.Context, container string, arguments []string) (containertypes.ContainerTopOKBody, error) ContainerUnpause(ctx context.Context, container string) error - ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) - ContainerWait(ctx context.Context, container string) (int64, error) + ContainerUpdate(ctx context.Context, container string, updateConfig containertypes.UpdateConfig) (containertypes.ContainerUpdateOKBody, error) + ContainerWait(ctx context.Context, container string, condition containertypes.WaitCondition) (<-chan containertypes.ContainerWaitOKBody, <-chan error) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error ContainersPrune(ctx context.Context, pruneFilters filters.Args) (types.ContainersPruneReport, error) } +// DistributionAPIClient defines API client methods for the registry +type DistributionAPIClient interface { + DistributionInspect(ctx context.Context, image, encodedRegistryAuth string) (registry.DistributionInspect, error) +} + // ImageAPIClient defines API client methods for the images type ImageAPIClient interface { ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) + BuildCachePrune(ctx context.Context, opts types.BuildCachePruneOptions) (*types.BuildCachePruneReport, error) + BuildCancel(ctx context.Context, id string) error ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) ImageHistory(ctx context.Context, image string) ([]image.HistoryResponseItem, error) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) @@ -88,13 +105,13 @@ type ImageAPIClient interface { // NetworkAPIClient defines API client methods for the networks type NetworkAPIClient interface { - NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error + NetworkConnect(ctx context.Context, network, container string, config *networktypes.EndpointSettings) error NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) - NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error - NetworkInspect(ctx context.Context, networkID string, verbose bool) (types.NetworkResource, error) - NetworkInspectWithRaw(ctx context.Context, networkID string, verbose bool) (types.NetworkResource, []byte, error) + NetworkDisconnect(ctx context.Context, network, container string, force bool) error + NetworkInspect(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, error) + NetworkInspectWithRaw(ctx context.Context, network string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) - NetworkRemove(ctx context.Context, networkID string) error + NetworkRemove(ctx context.Context, network string) error NetworksPrune(ctx context.Context, pruneFilter filters.Args) (types.NetworksPruneReport, error) } @@ -155,10 +172,10 @@ type SystemAPIClient interface { // VolumeAPIClient defines API client methods for the volumes type VolumeAPIClient interface { - VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) + VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) - VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) + VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) VolumeRemove(ctx context.Context, volumeID string, force bool) error VolumesPrune(ctx context.Context, pruneFilter filters.Args) (types.VolumesPruneReport, error) } @@ -171,3 +188,12 @@ type SecretAPIClient interface { SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error } + +// ConfigAPIClient defines API client methods for configs +type ConfigAPIClient interface { + ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) + ConfigCreate(ctx context.Context, config swarm.ConfigSpec) (types.ConfigCreateResponse, error) + ConfigRemove(ctx context.Context, id string) error + ConfigInspectWithRaw(ctx context.Context, name string) (swarm.Config, []byte, error) + ConfigUpdate(ctx context.Context, id string, version swarm.Version, config swarm.ConfigSpec) error +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/interface_experimental.go index 51da98ec..402ffb51 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/interface_experimental.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/interface_experimental.go @@ -1,8 +1,9 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" + "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) type apiClientExperimental interface { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/interface_stable.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/interface_stable.go index cc90a3cb..5502cd74 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/interface_stable.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/interface_stable.go @@ -1,4 +1,4 @@ -package client +package client // import "github.com/docker/docker/client" // APIClient is an interface that clients that talk with a docker server must implement. type APIClient interface { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/login.go index 79219ff5..f0585206 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/login.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/login.go @@ -1,29 +1,25 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" - "net/http" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/registry" - "golang.org/x/net/context" ) // RegistryLogin authenticates the docker server with a given docker registry. // It returns unauthorizedError when the authentication fails. func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) { resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) + defer ensureReaderClosed(resp) - if resp.statusCode == http.StatusUnauthorized { - return registry.AuthenticateOKBody{}, unauthorizedError{err} - } if err != nil { return registry.AuthenticateOKBody{}, err } var response registry.AuthenticateOKBody err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) return response, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_connect.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_connect.go index c022c17b..57189461 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_connect.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_connect.go @@ -1,9 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" + "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/network" - "golang.org/x/net/context" ) // NetworkConnect connects a container to an existent network in the docker host. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_create.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_create.go index 4067a541..278d9383 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_create.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_create.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // NetworkCreate creates a new network in the docker host. @@ -15,11 +15,11 @@ func (cli *Client) NetworkCreate(ctx context.Context, name string, options types } var response types.NetworkCreateResponse serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) + defer ensureReaderClosed(serverResp) if err != nil { return response, err } - json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) + err = json.NewDecoder(serverResp.body).Decode(&response) return response, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_disconnect.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_disconnect.go index 24b58e3c..dd156766 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_disconnect.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_disconnect.go @@ -1,8 +1,9 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" + "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // NetworkDisconnect disconnects a container from an existent network in the docker host. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_inspect.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_inspect.go index 72423040..89a05b30 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_inspect.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_inspect.go @@ -1,41 +1,43 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" - "net/http" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // NetworkInspect returns the information for a specific network configured in the docker host. -func (cli *Client) NetworkInspect(ctx context.Context, networkID string, verbose bool) (types.NetworkResource, error) { - networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, verbose) +func (cli *Client) NetworkInspect(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, error) { + networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID, options) return networkResource, err } // NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. -func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, verbose bool) (types.NetworkResource, []byte, error) { +func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string, options types.NetworkInspectOptions) (types.NetworkResource, []byte, error) { + if networkID == "" { + return types.NetworkResource{}, nil, objectNotFoundError{object: "network", id: networkID} + } var ( networkResource types.NetworkResource resp serverResponse err error ) query := url.Values{} - if verbose { + if options.Verbose { query.Set("verbose", "true") } - resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) - if err != nil { - if resp.statusCode == http.StatusNotFound { - return networkResource, nil, networkNotFoundError{networkID} - } - return networkResource, nil, err + if options.Scope != "" { + query.Set("scope", options.Scope) } + resp, err = cli.get(ctx, "/networks/"+networkID, query, nil) defer ensureReaderClosed(resp) + if err != nil { + return networkResource, nil, wrapResponseError(err, resp, "network", networkID) + } body, err := ioutil.ReadAll(resp.body) if err != nil { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_list.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_list.go index e566a93e..7130c136 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_list.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_list.go @@ -1,12 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // NetworkList returns the list of networks configured in the docker host. @@ -22,10 +22,10 @@ func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOpt } var networkResources []types.NetworkResource resp, err := cli.get(ctx, "/networks", query, nil) + defer ensureReaderClosed(resp) if err != nil { return networkResources, err } err = json.NewDecoder(resp.body).Decode(&networkResources) - ensureReaderClosed(resp) return networkResources, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_prune.go index 7352a7f0..cebb1882 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_prune.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_prune.go @@ -1,12 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // NetworksPrune requests the daemon to delete unused networks @@ -23,10 +23,10 @@ func (cli *Client) NetworksPrune(ctx context.Context, pruneFilters filters.Args) } serverResp, err := cli.post(ctx, "/networks/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return report, err } - defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { return report, fmt.Errorf("Error retrieving network prune report: %v", err) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_remove.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_remove.go index 6bd67489..e71b16d8 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_remove.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/network_remove.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" -import "golang.org/x/net/context" +import "context" // NetworkRemove removes an existent network from the docker host. func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) - ensureReaderClosed(resp) - return err + defer ensureReaderClosed(resp) + return wrapResponseError(err, resp, "network", networkID) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/node_inspect.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/node_inspect.go index abf505d2..d296c9fd 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/node_inspect.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/node_inspect.go @@ -1,25 +1,24 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" - "net/http" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // NodeInspectWithRaw returns the node information. func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { - serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) - if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return swarm.Node{}, nil, nodeNotFoundError{nodeID} - } - return swarm.Node{}, nil, err + if nodeID == "" { + return swarm.Node{}, nil, objectNotFoundError{object: "node", id: nodeID} } + serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) defer ensureReaderClosed(serverResp) + if err != nil { + return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID) + } body, err := ioutil.ReadAll(serverResp.body) if err != nil { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/node_list.go index 3e8440f0..c212906b 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/node_list.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/node_list.go @@ -1,13 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // NodeList returns the list of nodes. @@ -15,7 +15,7 @@ func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) query := url.Values{} if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) + filterJSON, err := filters.ToJSON(options.Filters) if err != nil { return nil, err @@ -25,12 +25,12 @@ func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) } resp, err := cli.get(ctx, "/nodes", query, nil) + defer ensureReaderClosed(resp) if err != nil { return nil, err } var nodes []swarm.Node err = json.NewDecoder(resp.body).Decode(&nodes) - ensureReaderClosed(resp) return nodes, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/node_remove.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/node_remove.go index 0a77f3d5..03ab8780 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/node_remove.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/node_remove.go @@ -1,11 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - - "golang.org/x/net/context" ) // NodeRemove removes a Node. @@ -16,6 +15,6 @@ func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types. } resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) - ensureReaderClosed(resp) - return err + defer ensureReaderClosed(resp) + return wrapResponseError(err, resp, "node", nodeID) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/node_update.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/node_update.go index 3ca97602..de32a617 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/node_update.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/node_update.go @@ -1,11 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "strconv" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // NodeUpdate updates a Node. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/options.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/options.go new file mode 100644 index 00000000..6f77f095 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/options.go @@ -0,0 +1,172 @@ +package client + +import ( + "context" + "net" + "net/http" + "os" + "path/filepath" + "time" + + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" + "github.com/pkg/errors" +) + +// Opt is a configuration option to initialize a client +type Opt func(*Client) error + +// FromEnv configures the client with values from environment variables. +// +// Supported environment variables: +// DOCKER_HOST to set the url to the docker server. +// DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. +// DOCKER_CERT_PATH to load the TLS certificates from. +// DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. +func FromEnv(c *Client) error { + if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { + options := tlsconfig.Options{ + CAFile: filepath.Join(dockerCertPath, "ca.pem"), + CertFile: filepath.Join(dockerCertPath, "cert.pem"), + KeyFile: filepath.Join(dockerCertPath, "key.pem"), + InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", + } + tlsc, err := tlsconfig.Client(options) + if err != nil { + return err + } + + c.client = &http.Client{ + Transport: &http.Transport{TLSClientConfig: tlsc}, + CheckRedirect: CheckRedirect, + } + } + + if host := os.Getenv("DOCKER_HOST"); host != "" { + if err := WithHost(host)(c); err != nil { + return err + } + } + + if version := os.Getenv("DOCKER_API_VERSION"); version != "" { + if err := WithVersion(version)(c); err != nil { + return err + } + } + return nil +} + +// WithDialer applies the dialer.DialContext to the client transport. This can be +// used to set the Timeout and KeepAlive settings of the client. +// Deprecated: use WithDialContext +func WithDialer(dialer *net.Dialer) Opt { + return WithDialContext(dialer.DialContext) +} + +// WithDialContext applies the dialer to the client transport. This can be +// used to set the Timeout and KeepAlive settings of the client. +func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt { + return func(c *Client) error { + if transport, ok := c.client.Transport.(*http.Transport); ok { + transport.DialContext = dialContext + return nil + } + return errors.Errorf("cannot apply dialer to transport: %T", c.client.Transport) + } +} + +// WithHost overrides the client host with the specified one. +func WithHost(host string) Opt { + return func(c *Client) error { + hostURL, err := ParseHostURL(host) + if err != nil { + return err + } + c.host = host + c.proto = hostURL.Scheme + c.addr = hostURL.Host + c.basePath = hostURL.Path + if transport, ok := c.client.Transport.(*http.Transport); ok { + return sockets.ConfigureTransport(transport, c.proto, c.addr) + } + return errors.Errorf("cannot apply host to transport: %T", c.client.Transport) + } +} + +// WithHTTPClient overrides the client http client with the specified one +func WithHTTPClient(client *http.Client) Opt { + return func(c *Client) error { + if client != nil { + c.client = client + } + return nil + } +} + +// WithTimeout configures the time limit for requests made by the HTTP client +func WithTimeout(timeout time.Duration) Opt { + return func(c *Client) error { + c.client.Timeout = timeout + return nil + } +} + +// WithHTTPHeaders overrides the client default http headers +func WithHTTPHeaders(headers map[string]string) Opt { + return func(c *Client) error { + c.customHTTPHeaders = headers + return nil + } +} + +// WithScheme overrides the client scheme with the specified one +func WithScheme(scheme string) Opt { + return func(c *Client) error { + c.scheme = scheme + return nil + } +} + +// WithTLSClientConfig applies a tls config to the client transport. +func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt { + return func(c *Client) error { + opts := tlsconfig.Options{ + CAFile: cacertPath, + CertFile: certPath, + KeyFile: keyPath, + ExclusiveRootPools: true, + } + config, err := tlsconfig.Client(opts) + if err != nil { + return errors.Wrap(err, "failed to create tls config") + } + if transport, ok := c.client.Transport.(*http.Transport); ok { + transport.TLSClientConfig = config + return nil + } + return errors.Errorf("cannot apply tls config to transport: %T", c.client.Transport) + } +} + +// WithVersion overrides the client version with the specified one. If an empty +// version is specified, the value will be ignored to allow version negotiation. +func WithVersion(version string) Opt { + return func(c *Client) error { + if version != "" { + c.version = version + c.manualOverride = true + } + return nil + } +} + +// WithAPIVersionNegotiation enables automatic API version negotiation for the client. +// With this option enabled, the client automatically negotiates the API version +// to use when making requests. API version negotiation is performed on the first +// request; subsequent requests will not re-negotiate. +func WithAPIVersionNegotiation() Opt { + return func(c *Client) error { + c.negotiateVersion = true + return nil + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/ping.go index d6212ef8..90f39ec1 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/ping.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/ping.go @@ -1,32 +1,66 @@ -package client +package client // import "github.com/docker/docker/client" import ( - "fmt" + "context" + "net/http" + "path" "github.com/docker/docker/api/types" - "golang.org/x/net/context" + "github.com/docker/docker/errdefs" ) -// Ping pings the server and returns the value of the "Docker-Experimental", "OS-Type" & "API-Version" headers +// Ping pings the server and returns the value of the "Docker-Experimental", +// "Builder-Version", "OS-Type" & "API-Version" headers. It attempts to use +// a HEAD request on the endpoint, but falls back to GET if HEAD is not supported +// by the daemon. func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { var ping types.Ping - req, err := cli.buildRequest("GET", fmt.Sprintf("%s/_ping", cli.basePath), nil, nil) + + // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest() + // because ping requests are used during API version negotiation, so we want + // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping + req, err := cli.buildRequest("HEAD", path.Join(cli.basePath, "/_ping"), nil, nil) if err != nil { return ping, err } serverResp, err := cli.doRequest(ctx, req) + if err == nil { + defer ensureReaderClosed(serverResp) + switch serverResp.statusCode { + case http.StatusOK, http.StatusInternalServerError: + // Server handled the request, so parse the response + return parsePingResponse(cli, serverResp) + } + } else if IsErrConnectionFailed(err) { + return ping, err + } + + req, err = cli.buildRequest("GET", path.Join(cli.basePath, "/_ping"), nil, nil) if err != nil { return ping, err } + serverResp, err = cli.doRequest(ctx, req) defer ensureReaderClosed(serverResp) + if err != nil { + return ping, err + } + return parsePingResponse(cli, serverResp) +} - ping.APIVersion = serverResp.header.Get("API-Version") - - if serverResp.header.Get("Docker-Experimental") == "true" { +func parsePingResponse(cli *Client, resp serverResponse) (types.Ping, error) { + var ping types.Ping + if resp.header == nil { + err := cli.checkResponseErr(resp) + return ping, errdefs.FromStatusCode(err, resp.statusCode) + } + ping.APIVersion = resp.header.Get("API-Version") + ping.OSType = resp.header.Get("OSType") + if resp.header.Get("Docker-Experimental") == "true" { ping.Experimental = true } - - ping.OSType = serverResp.header.Get("OSType") - - return ping, nil + if bv := resp.header.Get("Builder-Version"); bv != "" { + ping.BuilderVersion = types.BuilderVersion(bv) + } + err := cli.checkResponseErr(resp) + return ping, errdefs.FromStatusCode(err, resp.statusCode) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_create.go index 27954aa5..b95dbaf6 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_create.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_create.go @@ -1,12 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/http" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // PluginCreate creates a plugin @@ -18,9 +18,6 @@ func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, cr query.Set("name", createOptions.RepoName) resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) - if err != nil { - return err - } ensureReaderClosed(resp) return err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_disable.go index 30467db7..01f6574f 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_disable.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_disable.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // PluginDisable disables a plugin diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_enable.go index 95517c4b..736da48b 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_enable.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_enable.go @@ -1,11 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "strconv" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // PluginEnable enables a plugin diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_inspect.go index 89f39ee2..81b89732 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_inspect.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_inspect.go @@ -1,26 +1,25 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" - "net/http" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // PluginInspectWithRaw inspects an existing plugin func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) { + if name == "" { + return nil, nil, objectNotFoundError{object: "plugin", id: name} + } resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil) + defer ensureReaderClosed(resp) if err != nil { - if resp.statusCode == http.StatusNotFound { - return nil, nil, pluginNotFoundError{name} - } - return nil, nil, err + return nil, nil, wrapResponseError(err, resp, "plugin", name) } - defer ensureReaderClosed(resp) body, err := ioutil.ReadAll(resp.body) if err != nil { return nil, nil, err diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_install.go index ce3e0506..012afe61 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_install.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_install.go @@ -1,15 +1,15 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "io" - "net/http" "net/url" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" + "github.com/docker/docker/errdefs" "github.com/pkg/errors" - "golang.org/x/net/context" ) // PluginInstall installs a plugin @@ -78,7 +78,7 @@ func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, privileg func (cli *Client) checkPluginPermissions(ctx context.Context, query url.Values, options types.PluginInstallOptions) (types.PluginPrivileges, error) { resp, err := cli.tryPluginPrivileges(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { + if errdefs.IsUnauthorized(err) && options.PrivilegeFunc != nil { // todo: do inspect before to check existing name before checking privileges newAuthHeader, privilegeErr := options.PrivilegeFunc() if privilegeErr != nil { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_list.go index 3acde3b9..8285cecd 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_list.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_list.go @@ -1,12 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // PluginList returns the installed plugins @@ -22,11 +22,11 @@ func (cli *Client) PluginList(ctx context.Context, filter filters.Args) (types.P query.Set("filters", filterJSON) } resp, err := cli.get(ctx, "/plugins", query, nil) + defer ensureReaderClosed(resp) if err != nil { - return plugins, err + return plugins, wrapResponseError(err, resp, "plugin", "") } err = json.NewDecoder(resp.body).Decode(&plugins) - ensureReaderClosed(resp) return plugins, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_push.go index 1e5f9632..d20bfe84 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_push.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_push.go @@ -1,9 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" - - "golang.org/x/net/context" ) // PluginPush pushes a plugin to a registry diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_remove.go index b017e4d3..51ca1040 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_remove.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_remove.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // PluginRemove removes a plugin @@ -15,6 +15,6 @@ func (cli *Client) PluginRemove(ctx context.Context, name string, options types. } resp, err := cli.delete(ctx, "/plugins/"+name, query, nil) - ensureReaderClosed(resp) - return err + defer ensureReaderClosed(resp) + return wrapResponseError(err, resp, "plugin", name) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_set.go index 3260d2a9..dcf5752c 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_set.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_set.go @@ -1,7 +1,7 @@ -package client +package client // import "github.com/docker/docker/client" import ( - "golang.org/x/net/context" + "context" ) // PluginSet modifies settings for an existing plugin diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_upgrade.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_upgrade.go index 24293c50..115cea94 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_upgrade.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/plugin_upgrade.go @@ -1,18 +1,20 @@ -package client +package client // import "github.com/docker/docker/client" import ( - "fmt" + "context" "io" "net/url" "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/pkg/errors" - "golang.org/x/net/context" ) // PluginUpgrade upgrades a plugin func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types.PluginInstallOptions) (rc io.ReadCloser, err error) { + if err := cli.NewVersionError("1.26", "plugin upgrade"); err != nil { + return nil, err + } query := url.Values{} if _, err := reference.ParseNormalizedNamed(options.RemoteRef); err != nil { return nil, errors.Wrap(err, "invalid remote reference") @@ -33,5 +35,5 @@ func (cli *Client) PluginUpgrade(ctx context.Context, name string, options types func (cli *Client) tryPluginUpgrade(ctx context.Context, query url.Values, privileges types.PluginPrivileges, name, registryAuth string) (serverResponse, error) { headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, fmt.Sprintf("/plugins/%s/upgrade", name), query, privileges, headers) + return cli.post(ctx, "/plugins/"+name+"/upgrade", query, privileges, headers) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/request.go index 6457b316..3078335e 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/request.go @@ -1,7 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io" @@ -14,9 +15,8 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/versions" + "github.com/docker/docker/errdefs" "github.com/pkg/errors" - "golang.org/x/net/context" - "golang.org/x/net/context/ctxhttp" ) // serverResponse is a wrapper for http API responses. @@ -24,6 +24,7 @@ type serverResponse struct { body io.ReadCloser header http.Header statusCode int + reqURL *url.URL } // head sends an http request to the docker API using the method HEAD. @@ -114,24 +115,30 @@ func (cli *Client) buildRequest(method, path string, body io.Reader, headers hea } func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { - req, err := cli.buildRequest(method, cli.getAPIPath(path, query), body, headers) + req, err := cli.buildRequest(method, cli.getAPIPath(ctx, path, query), body, headers) if err != nil { return serverResponse{}, err } - return cli.doRequest(ctx, req) + resp, err := cli.doRequest(ctx, req) + if err != nil { + return resp, errdefs.FromStatusCode(err, resp.statusCode) + } + err = cli.checkResponseErr(resp) + return resp, errdefs.FromStatusCode(err, resp.statusCode) } func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { - serverResp := serverResponse{statusCode: -1} + serverResp := serverResponse{statusCode: -1, reqURL: req.URL} - resp, err := ctxhttp.Do(ctx, cli.client, req) + req = req.WithContext(ctx) + resp, err := cli.client.Do(req) if err != nil { if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") { return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) } if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") { - return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err) + return serverResp, errors.Wrap(err, "The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings") } // Don't decorate context sentinel errors; users may be comparing to @@ -179,37 +186,56 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp if resp != nil { serverResp.statusCode = resp.StatusCode + serverResp.body = resp.Body + serverResp.header = resp.Header } - - if serverResp.statusCode < 200 || serverResp.statusCode >= 400 { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return serverResp, err - } - if len(body) == 0 { - return serverResp, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), req.URL) - } - - var errorMessage string - if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && - resp.Header.Get("Content-Type") == "application/json" { - var errorResponse types.ErrorResponse - if err := json.Unmarshal(body, &errorResponse); err != nil { - return serverResp, fmt.Errorf("Error reading JSON: %v", err) - } - errorMessage = errorResponse.Message - } else { - errorMessage = string(body) - } - - return serverResp, fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage)) - } - - serverResp.body = resp.Body - serverResp.header = resp.Header return serverResp, nil } +func (cli *Client) checkResponseErr(serverResp serverResponse) error { + if serverResp.statusCode >= 200 && serverResp.statusCode < 400 { + return nil + } + + var body []byte + var err error + if serverResp.body != nil { + bodyMax := 1 * 1024 * 1024 // 1 MiB + bodyR := &io.LimitedReader{ + R: serverResp.body, + N: int64(bodyMax), + } + body, err = ioutil.ReadAll(bodyR) + if err != nil { + return err + } + if bodyR.N == 0 { + return fmt.Errorf("request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), bodyMax, serverResp.reqURL) + } + } + if len(body) == 0 { + return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL) + } + + var ct string + if serverResp.header != nil { + ct = serverResp.header.Get("Content-Type") + } + + var errorMessage string + if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && ct == "application/json" { + var errorResponse types.ErrorResponse + if err := json.Unmarshal(body, &errorResponse); err != nil { + return errors.Wrap(err, "Error reading JSON") + } + errorMessage = strings.TrimSpace(errorResponse.Message) + } else { + errorMessage = strings.TrimSpace(string(body)) + } + + return errors.Wrap(errors.New(errorMessage), "Error response from daemon") +} + func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { // Add CLI Config's HTTP Headers BEFORE we set the Docker headers // then the user can't change OUR headers @@ -239,9 +265,9 @@ func encodeData(data interface{}) (*bytes.Buffer, error) { } func ensureReaderClosed(response serverResponse) { - if body := response.body; body != nil { + if response.body != nil { // Drain up to 512 bytes and close the body to let the Transport reuse the connection - io.CopyN(ioutil.Discard, body, 512) + io.CopyN(ioutil.Discard, response.body, 512) response.body.Close() } } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/secret_create.go index b5325a56..fd5b9141 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/secret_create.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/secret_create.go @@ -1,22 +1,25 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // SecretCreate creates a new Secret. func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { var response types.SecretCreateResponse + if err := cli.NewVersionError("1.25", "secret create"); err != nil { + return response, err + } resp, err := cli.post(ctx, "/secrets/create", nil, secret, nil) + defer ensureReaderClosed(resp) if err != nil { return response, err } err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) return response, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/secret_inspect.go index f7745761..d093916c 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/secret_inspect.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/secret_inspect.go @@ -1,25 +1,27 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" - "net/http" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // SecretInspectWithRaw returns the secret information with raw data func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { - resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) - if err != nil { - if resp.statusCode == http.StatusNotFound { - return swarm.Secret{}, nil, secretNotFoundError{id} - } + if err := cli.NewVersionError("1.25", "secret inspect"); err != nil { return swarm.Secret{}, nil, err } + if id == "" { + return swarm.Secret{}, nil, objectNotFoundError{object: "secret", id: id} + } + resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) defer ensureReaderClosed(resp) + if err != nil { + return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id) + } body, err := ioutil.ReadAll(resp.body) if err != nil { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/secret_list.go index 7e9d5ec1..a0289c9f 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/secret_list.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/secret_list.go @@ -1,21 +1,24 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // SecretList returns the list of secrets. func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + if err := cli.NewVersionError("1.25", "secret list"); err != nil { + return nil, err + } query := url.Values{} if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) + filterJSON, err := filters.ToJSON(options.Filters) if err != nil { return nil, err } @@ -24,12 +27,12 @@ func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptio } resp, err := cli.get(ctx, "/secrets", query, nil) + defer ensureReaderClosed(resp) if err != nil { return nil, err } var secrets []swarm.Secret err = json.NewDecoder(resp.body).Decode(&secrets) - ensureReaderClosed(resp) return secrets, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/secret_remove.go index 1955b988..c16f5558 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/secret_remove.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/secret_remove.go @@ -1,10 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" -import "golang.org/x/net/context" +import "context" // SecretRemove removes a Secret. func (cli *Client) SecretRemove(ctx context.Context, id string) error { + if err := cli.NewVersionError("1.25", "secret remove"); err != nil { + return err + } resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) - ensureReaderClosed(resp) - return err + defer ensureReaderClosed(resp) + return wrapResponseError(err, resp, "secret", id) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/secret_update.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/secret_update.go index 42cdbbe1..164256bb 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/secret_update.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/secret_update.go @@ -1,15 +1,18 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "strconv" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) -// SecretUpdate attempts to updates a Secret +// SecretUpdate attempts to update a Secret func (cli *Client) SecretUpdate(ctx context.Context, id string, version swarm.Version, secret swarm.SecretSpec) error { + if err := cli.NewVersionError("1.25", "secret update"); err != nil { + return err + } query := url.Values{} query.Set("version", strconv.FormatUint(version.Index, 10)) resp, err := cli.post(ctx, "/secrets/"+id+"/update", query, secret, nil) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/service_create.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/service_create.go index 3d1be225..620fc6cf 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/service_create.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/service_create.go @@ -1,30 +1,166 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" + "fmt" + "strings" + "github.com/docker/distribution/reference" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" ) // ServiceCreate creates a new Service. func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { - var headers map[string][]string + var distErr error + + headers := map[string][]string{ + "version": {cli.version}, + } if options.EncodedRegistryAuth != "" { - headers = map[string][]string{ - "X-Registry-Auth": {options.EncodedRegistryAuth}, + headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} + } + + // Make sure containerSpec is not nil when no runtime is set or the runtime is set to container + if service.TaskTemplate.ContainerSpec == nil && (service.TaskTemplate.Runtime == "" || service.TaskTemplate.Runtime == swarm.RuntimeContainer) { + service.TaskTemplate.ContainerSpec = &swarm.ContainerSpec{} + } + + if err := validateServiceSpec(service); err != nil { + return types.ServiceCreateResponse{}, err + } + + // ensure that the image is tagged + var imgPlatforms []swarm.Platform + if service.TaskTemplate.ContainerSpec != nil { + if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + service.TaskTemplate.ContainerSpec.Image = taggedImg } + if options.QueryRegistry { + var img string + img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth) + if img != "" { + service.TaskTemplate.ContainerSpec.Image = img + } + } + } + + // ensure that the image is tagged + if service.TaskTemplate.PluginSpec != nil { + if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + service.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + var img string + img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth) + if img != "" { + service.TaskTemplate.PluginSpec.Remote = img + } + } + } + + if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 { + service.TaskTemplate.Placement = &swarm.Placement{} + } + if len(imgPlatforms) > 0 { + service.TaskTemplate.Placement.Platforms = imgPlatforms } var response types.ServiceCreateResponse resp, err := cli.post(ctx, "/services/create", nil, service, headers) + defer ensureReaderClosed(resp) if err != nil { return response, err } err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) + + if distErr != nil { + response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) + } + return response, err } + +func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) { + distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth) + var platforms []swarm.Platform + if err != nil { + return "", nil, err + } + + imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest) + + if len(distributionInspect.Platforms) > 0 { + platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms)) + for _, p := range distributionInspect.Platforms { + // clear architecture field for arm. This is a temporary patch to address + // https://github.com/docker/swarmkit/issues/2294. The issue is that while + // image manifests report "arm" as the architecture, the node reports + // something like "armv7l" (includes the variant), which causes arm images + // to stop working with swarm mode. This patch removes the architecture + // constraint for arm images to ensure tasks get scheduled. + arch := p.Architecture + if strings.ToLower(arch) == "arm" { + arch = "" + } + platforms = append(platforms, swarm.Platform{ + Architecture: arch, + OS: p.OS, + }) + } + } + return imageWithDigest, platforms, err +} + +// imageWithDigestString takes an image string and a digest, and updates +// the image string if it didn't originally contain a digest. It returns +// an empty string if there are no updates. +func imageWithDigestString(image string, dgst digest.Digest) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + if _, isCanonical := namedRef.(reference.Canonical); !isCanonical { + // ensure that image gets a default tag if none is provided + img, err := reference.WithDigest(namedRef, dgst) + if err == nil { + return reference.FamiliarString(img) + } + } + } + return "" +} + +// imageWithTagString takes an image string, and returns a tagged image +// string, adding a 'latest' tag if one was not provided. It returns an +// empty string if a canonical reference was provided +func imageWithTagString(image string) string { + namedRef, err := reference.ParseNormalizedNamed(image) + if err == nil { + return reference.FamiliarString(reference.TagNameOnly(namedRef)) + } + return "" +} + +// digestWarning constructs a formatted warning string using the +// image name that could not be pinned by digest. The formatting +// is hardcoded, but could me made smarter in the future +func digestWarning(image string) string { + return fmt.Sprintf("image %s could not be accessed on a registry to record\nits digest. Each node will access %s independently,\npossibly leading to different nodes running different\nversions of the image.\n", image, image) +} + +func validateServiceSpec(s swarm.ServiceSpec) error { + if s.TaskTemplate.ContainerSpec != nil && s.TaskTemplate.PluginSpec != nil { + return errors.New("must not specify both a container spec and a plugin spec in the task template") + } + if s.TaskTemplate.PluginSpec != nil && s.TaskTemplate.Runtime != swarm.RuntimePlugin { + return errors.New("mismatched runtime with plugin spec") + } + if s.TaskTemplate.ContainerSpec != nil && (s.TaskTemplate.Runtime != "" && s.TaskTemplate.Runtime != swarm.RuntimeContainer) { + return errors.New("mismatched runtime with container spec") + } + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/service_inspect.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/service_inspect.go index d7e051e3..2801483b 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/service_inspect.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/service_inspect.go @@ -1,30 +1,29 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "fmt" "io/ioutil" - "net/http" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // ServiceInspectWithRaw returns the service information and the raw data. func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string, opts types.ServiceInspectOptions) (swarm.Service, []byte, error) { + if serviceID == "" { + return swarm.Service{}, nil, objectNotFoundError{object: "service", id: serviceID} + } query := url.Values{} query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults)) serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil) - if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return swarm.Service{}, nil, serviceNotFoundError{serviceID} - } - return swarm.Service{}, nil, err - } defer ensureReaderClosed(serverResp) + if err != nil { + return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID) + } body, err := ioutil.ReadAll(serverResp.body) if err != nil { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/service_list.go index c29e6d40..64d35e71 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/service_list.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/service_list.go @@ -1,13 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // ServiceList returns the list of services. @@ -15,7 +15,7 @@ func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOpt query := url.Values{} if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) + filterJSON, err := filters.ToJSON(options.Filters) if err != nil { return nil, err } @@ -24,12 +24,12 @@ func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOpt } resp, err := cli.get(ctx, "/services", query, nil) + defer ensureReaderClosed(resp) if err != nil { return nil, err } var services []swarm.Service err = json.NewDecoder(resp.body).Decode(&services) - ensureReaderClosed(resp) return services, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/service_logs.go index 24384e3e..906fd405 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/service_logs.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/service_logs.go @@ -1,14 +1,14 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" "time" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" timetypes "github.com/docker/docker/api/types/time" + "github.com/pkg/errors" ) // ServiceLogs returns the logs generated by a service in an io.ReadCloser. @@ -26,7 +26,7 @@ func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options ty if options.Since != "" { ts, err := timetypes.GetTimestamp(options.Since, time.Now()) if err != nil { - return nil, err + return nil, errors.Wrap(err, `invalid value for "since"`) } query.Set("since", ts) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/service_remove.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/service_remove.go index a9331f92..953a2adf 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/service_remove.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/service_remove.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" -import "golang.org/x/net/context" +import "context" // ServiceRemove kills and removes a service. func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) - ensureReaderClosed(resp) - return err + defer ensureReaderClosed(resp) + return wrapResponseError(err, resp, "service", serviceID) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/service_update.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/service_update.go index 873a1e05..cd0f59e2 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/service_update.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/service_update.go @@ -1,26 +1,30 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "strconv" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) -// ServiceUpdate updates a Service. +// ServiceUpdate updates a Service. The version number is required to avoid conflicting writes. +// It should be the value as set *before* the update. You can find this value in the Meta field +// of swarm.Service, which can be found using ServiceInspectWithRaw. func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) (types.ServiceUpdateResponse, error) { var ( - headers map[string][]string query = url.Values{} + distErr error ) + headers := map[string][]string{ + "version": {cli.version}, + } + if options.EncodedRegistryAuth != "" { - headers = map[string][]string{ - "X-Registry-Auth": {options.EncodedRegistryAuth}, - } + headers["X-Registry-Auth"] = []string{options.EncodedRegistryAuth} } if options.RegistryAuthFrom != "" { @@ -33,13 +37,58 @@ func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version query.Set("version", strconv.FormatUint(version.Index, 10)) + if err := validateServiceSpec(service); err != nil { + return types.ServiceUpdateResponse{}, err + } + + var imgPlatforms []swarm.Platform + // ensure that the image is tagged + if service.TaskTemplate.ContainerSpec != nil { + if taggedImg := imageWithTagString(service.TaskTemplate.ContainerSpec.Image); taggedImg != "" { + service.TaskTemplate.ContainerSpec.Image = taggedImg + } + if options.QueryRegistry { + var img string + img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.ContainerSpec.Image, options.EncodedRegistryAuth) + if img != "" { + service.TaskTemplate.ContainerSpec.Image = img + } + } + } + + // ensure that the image is tagged + if service.TaskTemplate.PluginSpec != nil { + if taggedImg := imageWithTagString(service.TaskTemplate.PluginSpec.Remote); taggedImg != "" { + service.TaskTemplate.PluginSpec.Remote = taggedImg + } + if options.QueryRegistry { + var img string + img, imgPlatforms, distErr = imageDigestAndPlatforms(ctx, cli, service.TaskTemplate.PluginSpec.Remote, options.EncodedRegistryAuth) + if img != "" { + service.TaskTemplate.PluginSpec.Remote = img + } + } + } + + if service.TaskTemplate.Placement == nil && len(imgPlatforms) > 0 { + service.TaskTemplate.Placement = &swarm.Placement{} + } + if len(imgPlatforms) > 0 { + service.TaskTemplate.Placement.Platforms = imgPlatforms + } + var response types.ServiceUpdateResponse resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) + defer ensureReaderClosed(resp) if err != nil { return response, err } err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) + + if distErr != nil { + response.Warnings = append(response.Warnings, digestWarning(service.TaskTemplate.ContainerSpec.Image)) + } + return response, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go index be28d326..19f59dd5 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go @@ -1,21 +1,21 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // SwarmGetUnlockKey retrieves the swarm's unlock key. func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return types.SwarmUnlockKeyResponse{}, err } var response types.SwarmUnlockKeyResponse err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) return response, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_init.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_init.go index 9e65e1cc..da3c1637 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_init.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_init.go @@ -1,21 +1,21 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // SwarmInit initializes the swarm. func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) + defer ensureReaderClosed(serverResp) if err != nil { return "", err } var response string err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) return response, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_inspect.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_inspect.go index 77e72f84..b52b67a8 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_inspect.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_inspect.go @@ -1,21 +1,21 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // SwarmInspect inspects the swarm. func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { serverResp, err := cli.get(ctx, "/swarm", nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return swarm.Swarm{}, err } var response swarm.Swarm err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) return response, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_join.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_join.go index 19e5192b..a1cf0455 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_join.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_join.go @@ -1,8 +1,9 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" + "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // SwarmJoin joins the swarm. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_leave.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_leave.go index 3a205cf3..90ca84b3 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_leave.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_leave.go @@ -1,9 +1,8 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" - - "golang.org/x/net/context" ) // SwarmLeave leaves the swarm. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_unlock.go index 9ee441fe..d2412f7d 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_unlock.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_unlock.go @@ -1,8 +1,9 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" + "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // SwarmUnlock unlocks locked swarm. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_update.go index 7245fd4e..56a5bea7 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_update.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/swarm_update.go @@ -1,12 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "fmt" "net/url" "strconv" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // SwarmUpdate updates the swarm. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/task_inspect.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/task_inspect.go index bc8058fc..44d40ba5 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/task_inspect.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/task_inspect.go @@ -1,26 +1,24 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" - "net/http" "github.com/docker/docker/api/types/swarm" - - "golang.org/x/net/context" ) // TaskInspectWithRaw returns the task information and its raw representation.. func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { - serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) - if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return swarm.Task{}, nil, taskNotFoundError{taskID} - } - return swarm.Task{}, nil, err + if taskID == "" { + return swarm.Task{}, nil, objectNotFoundError{object: "task", id: taskID} } + serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) defer ensureReaderClosed(serverResp) + if err != nil { + return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID) + } body, err := ioutil.ReadAll(serverResp.body) if err != nil { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/task_list.go index 66324da9..4869b444 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/task_list.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/task_list.go @@ -1,13 +1,13 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/swarm" - "golang.org/x/net/context" ) // TaskList returns the list of tasks. @@ -15,7 +15,7 @@ func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) query := url.Values{} if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) + filterJSON, err := filters.ToJSON(options.Filters) if err != nil { return nil, err } @@ -24,12 +24,12 @@ func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) } resp, err := cli.get(ctx, "/tasks", query, nil) + defer ensureReaderClosed(resp) if err != nil { return nil, err } var tasks []swarm.Task err = json.NewDecoder(resp.body).Decode(&tasks) - ensureReaderClosed(resp) return tasks, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/task_logs.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/task_logs.go index 2ed19543..6222fab5 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/task_logs.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/task_logs.go @@ -1,12 +1,11 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "io" "net/url" "time" - "golang.org/x/net/context" - "github.com/docker/docker/api/types" timetypes "github.com/docker/docker/api/types/time" ) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/transport.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/transport.go index 401ab15d..55413443 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/transport.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/transport.go @@ -1,18 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( "crypto/tls" "net/http" ) -// transportFunc allows us to inject a mock transport for testing. We define it -// here so we can detect the tlsconfig and return nil for only this type. -type transportFunc func(*http.Request) (*http.Response, error) - -func (tf transportFunc) RoundTrip(req *http.Request) (*http.Response, error) { - return tf(req) -} - // resolveTLSConfig attempts to resolve the TLS configuration from the // RoundTripper. func resolveTLSConfig(transport http.RoundTripper) *tls.Config { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/utils.go index 23d520ec..7f3ff44e 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/utils.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/utils.go @@ -1,9 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( - "github.com/docker/docker/api/types/filters" "net/url" "regexp" + + "github.com/docker/docker/api/types/filters" ) var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) @@ -23,7 +24,7 @@ func getDockerOS(serverHeader string) string { func getFiltersQuery(f filters.Args) (url.Values, error) { query := url.Values{} if f.Len() > 0 { - filterJSON, err := filters.ToParam(f) + filterJSON, err := filters.ToJSON(f) if err != nil { return query, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/version.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/version.go index 933ceb4a..8f17ff4e 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/version.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/version.go @@ -1,21 +1,21 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // ServerVersion returns information of the docker client and server host. func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { resp, err := cli.get(ctx, "/version", nil, nil) + defer ensureReaderClosed(resp) if err != nil { return types.Version{}, err } var server types.Version err = json.NewDecoder(resp.body).Decode(&server) - ensureReaderClosed(resp) return server, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/volume_create.go index 9620c87c..92761b3c 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/volume_create.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/volume_create.go @@ -1,21 +1,21 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "github.com/docker/docker/api/types" volumetypes "github.com/docker/docker/api/types/volume" - "golang.org/x/net/context" ) // VolumeCreate creates a volume in the docker host. -func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) { +func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumeCreateBody) (types.Volume, error) { var volume types.Volume resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) + defer ensureReaderClosed(resp) if err != nil { return volume, err } err = json.NewDecoder(resp.body).Decode(&volume) - ensureReaderClosed(resp) return volume, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/volume_inspect.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/volume_inspect.go index 3860e9b2..e20b2c67 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/volume_inspect.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/volume_inspect.go @@ -1,13 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( "bytes" + "context" "encoding/json" "io/ioutil" - "net/http" "github.com/docker/docker/api/types" - "golang.org/x/net/context" ) // VolumeInspect returns the information about a specific volume in the docker host. @@ -18,15 +17,16 @@ func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Vo // VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) { + if volumeID == "" { + return types.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID} + } + var volume types.Volume resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) - if err != nil { - if resp.statusCode == http.StatusNotFound { - return volume, nil, volumeNotFoundError{volumeID} - } - return volume, nil, err - } defer ensureReaderClosed(resp) + if err != nil { + return volume, nil, wrapResponseError(err, resp, "volume", volumeID) + } body, err := ioutil.ReadAll(resp.body) if err != nil { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/volume_list.go index 32247ce1..2380d563 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/volume_list.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/volume_list.go @@ -1,17 +1,17 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "net/url" "github.com/docker/docker/api/types/filters" volumetypes "github.com/docker/docker/api/types/volume" - "golang.org/x/net/context" ) // VolumeList returns the volumes configured in the docker host. -func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) { - var volumes volumetypes.VolumesListOKBody +func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumeListOKBody, error) { + var volumes volumetypes.VolumeListOKBody query := url.Values{} if filter.Len() > 0 { @@ -22,11 +22,11 @@ func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumet query.Set("filters", filterJSON) } resp, err := cli.get(ctx, "/volumes", query, nil) + defer ensureReaderClosed(resp) if err != nil { return volumes, err } err = json.NewDecoder(resp.body).Decode(&volumes) - ensureReaderClosed(resp) return volumes, err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/volume_prune.go index 2e7fea77..6e324708 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/volume_prune.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/volume_prune.go @@ -1,12 +1,12 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "encoding/json" "fmt" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" - "golang.org/x/net/context" ) // VolumesPrune requests the daemon to delete unused data @@ -23,10 +23,10 @@ func (cli *Client) VolumesPrune(ctx context.Context, pruneFilters filters.Args) } serverResp, err := cli.post(ctx, "/volumes/prune", query, nil, nil) + defer ensureReaderClosed(serverResp) if err != nil { return report, err } - defer ensureReaderClosed(serverResp) if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { return report, fmt.Errorf("Error retrieving volume prune report: %v", err) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/volume_remove.go index 6c26575b..79decdaf 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/volume_remove.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/client/volume_remove.go @@ -1,10 +1,10 @@ -package client +package client // import "github.com/docker/docker/client" import ( + "context" "net/url" "github.com/docker/docker/api/types/versions" - "golang.org/x/net/context" ) // VolumeRemove removes a volume from the docker host. @@ -16,6 +16,6 @@ func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool } } resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) - ensureReaderClosed(resp) - return err + defer ensureReaderClosed(resp) + return wrapResponseError(err, resp, "volume", volumeID) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/daemon/cluster/convert/config.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/daemon/cluster/convert/config.go new file mode 100644 index 00000000..16b3475a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/daemon/cluster/convert/config.go @@ -0,0 +1,78 @@ +package convert // import "github.com/docker/docker/daemon/cluster/convert" + +import ( + swarmtypes "github.com/docker/docker/api/types/swarm" + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +// ConfigFromGRPC converts a grpc Config to a Config. +func ConfigFromGRPC(s *swarmapi.Config) swarmtypes.Config { + config := swarmtypes.Config{ + ID: s.ID, + Spec: swarmtypes.ConfigSpec{ + Annotations: annotationsFromGRPC(s.Spec.Annotations), + Data: s.Spec.Data, + }, + } + + config.Version.Index = s.Meta.Version.Index + // Meta + config.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt) + config.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt) + + if s.Spec.Templating != nil { + config.Spec.Templating = &types.Driver{ + Name: s.Spec.Templating.Name, + Options: s.Spec.Templating.Options, + } + } + + return config +} + +// ConfigSpecToGRPC converts Config to a grpc Config. +func ConfigSpecToGRPC(s swarmtypes.ConfigSpec) swarmapi.ConfigSpec { + spec := swarmapi.ConfigSpec{ + Annotations: swarmapi.Annotations{ + Name: s.Name, + Labels: s.Labels, + }, + Data: s.Data, + } + + if s.Templating != nil { + spec.Templating = &swarmapi.Driver{ + Name: s.Templating.Name, + Options: s.Templating.Options, + } + } + + return spec +} + +// ConfigReferencesFromGRPC converts a slice of grpc ConfigReference to ConfigReference +func ConfigReferencesFromGRPC(s []*swarmapi.ConfigReference) []*swarmtypes.ConfigReference { + refs := []*swarmtypes.ConfigReference{} + + for _, r := range s { + ref := &swarmtypes.ConfigReference{ + ConfigID: r.ConfigID, + ConfigName: r.ConfigName, + } + + if t, ok := r.Target.(*swarmapi.ConfigReference_File); ok { + ref.File = &swarmtypes.ConfigReferenceFileTarget{ + Name: t.File.Name, + UID: t.File.UID, + GID: t.File.GID, + Mode: t.File.Mode, + } + } + + refs = append(refs, ref) + } + + return refs +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/daemon/cluster/convert/container.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/daemon/cluster/convert/container.go new file mode 100644 index 00000000..c3eb28df --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/daemon/cluster/convert/container.go @@ -0,0 +1,466 @@ +package convert // import "github.com/docker/docker/daemon/cluster/convert" + +import ( + "fmt" + "strings" + + "github.com/docker/docker/api/types/container" + mounttypes "github.com/docker/docker/api/types/mount" + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +func containerSpecFromGRPC(c *swarmapi.ContainerSpec) *types.ContainerSpec { + if c == nil { + return nil + } + containerSpec := &types.ContainerSpec{ + Image: c.Image, + Labels: c.Labels, + Command: c.Command, + Args: c.Args, + Hostname: c.Hostname, + Env: c.Env, + Dir: c.Dir, + User: c.User, + Groups: c.Groups, + StopSignal: c.StopSignal, + TTY: c.TTY, + OpenStdin: c.OpenStdin, + ReadOnly: c.ReadOnly, + Hosts: c.Hosts, + Secrets: secretReferencesFromGRPC(c.Secrets), + Configs: configReferencesFromGRPC(c.Configs), + Isolation: IsolationFromGRPC(c.Isolation), + Init: initFromGRPC(c.Init), + Sysctls: c.Sysctls, + } + + if c.DNSConfig != nil { + containerSpec.DNSConfig = &types.DNSConfig{ + Nameservers: c.DNSConfig.Nameservers, + Search: c.DNSConfig.Search, + Options: c.DNSConfig.Options, + } + } + + // Privileges + if c.Privileges != nil { + containerSpec.Privileges = &types.Privileges{} + + if c.Privileges.CredentialSpec != nil { + containerSpec.Privileges.CredentialSpec = credentialSpecFromGRPC(c.Privileges.CredentialSpec) + } + + if c.Privileges.SELinuxContext != nil { + containerSpec.Privileges.SELinuxContext = &types.SELinuxContext{ + Disable: c.Privileges.SELinuxContext.Disable, + User: c.Privileges.SELinuxContext.User, + Type: c.Privileges.SELinuxContext.Type, + Role: c.Privileges.SELinuxContext.Role, + Level: c.Privileges.SELinuxContext.Level, + } + } + } + + // Mounts + for _, m := range c.Mounts { + mount := mounttypes.Mount{ + Target: m.Target, + Source: m.Source, + Type: mounttypes.Type(strings.ToLower(swarmapi.Mount_MountType_name[int32(m.Type)])), + ReadOnly: m.ReadOnly, + } + + if m.BindOptions != nil { + mount.BindOptions = &mounttypes.BindOptions{ + Propagation: mounttypes.Propagation(strings.ToLower(swarmapi.Mount_BindOptions_MountPropagation_name[int32(m.BindOptions.Propagation)])), + } + } + + if m.VolumeOptions != nil { + mount.VolumeOptions = &mounttypes.VolumeOptions{ + NoCopy: m.VolumeOptions.NoCopy, + Labels: m.VolumeOptions.Labels, + } + if m.VolumeOptions.DriverConfig != nil { + mount.VolumeOptions.DriverConfig = &mounttypes.Driver{ + Name: m.VolumeOptions.DriverConfig.Name, + Options: m.VolumeOptions.DriverConfig.Options, + } + } + } + + if m.TmpfsOptions != nil { + mount.TmpfsOptions = &mounttypes.TmpfsOptions{ + SizeBytes: m.TmpfsOptions.SizeBytes, + Mode: m.TmpfsOptions.Mode, + } + } + containerSpec.Mounts = append(containerSpec.Mounts, mount) + } + + if c.StopGracePeriod != nil { + grace, _ := gogotypes.DurationFromProto(c.StopGracePeriod) + containerSpec.StopGracePeriod = &grace + } + + if c.Healthcheck != nil { + containerSpec.Healthcheck = healthConfigFromGRPC(c.Healthcheck) + } + + return containerSpec +} + +func initFromGRPC(v *gogotypes.BoolValue) *bool { + if v == nil { + return nil + } + value := v.GetValue() + return &value +} + +func initToGRPC(v *bool) *gogotypes.BoolValue { + if v == nil { + return nil + } + return &gogotypes.BoolValue{Value: *v} +} + +func secretReferencesToGRPC(sr []*types.SecretReference) []*swarmapi.SecretReference { + refs := make([]*swarmapi.SecretReference, 0, len(sr)) + for _, s := range sr { + ref := &swarmapi.SecretReference{ + SecretID: s.SecretID, + SecretName: s.SecretName, + } + if s.File != nil { + ref.Target = &swarmapi.SecretReference_File{ + File: &swarmapi.FileTarget{ + Name: s.File.Name, + UID: s.File.UID, + GID: s.File.GID, + Mode: s.File.Mode, + }, + } + } + + refs = append(refs, ref) + } + + return refs +} + +func secretReferencesFromGRPC(sr []*swarmapi.SecretReference) []*types.SecretReference { + refs := make([]*types.SecretReference, 0, len(sr)) + for _, s := range sr { + target := s.GetFile() + if target == nil { + // not a file target + logrus.Warnf("secret target not a file: secret=%s", s.SecretID) + continue + } + refs = append(refs, &types.SecretReference{ + File: &types.SecretReferenceFileTarget{ + Name: target.Name, + UID: target.UID, + GID: target.GID, + Mode: target.Mode, + }, + SecretID: s.SecretID, + SecretName: s.SecretName, + }) + } + + return refs +} + +func configReferencesToGRPC(sr []*types.ConfigReference) ([]*swarmapi.ConfigReference, error) { + refs := make([]*swarmapi.ConfigReference, 0, len(sr)) + for _, s := range sr { + ref := &swarmapi.ConfigReference{ + ConfigID: s.ConfigID, + ConfigName: s.ConfigName, + } + switch { + case s.Runtime == nil && s.File == nil: + return nil, errors.New("either File or Runtime should be set") + case s.Runtime != nil && s.File != nil: + return nil, errors.New("cannot specify both File and Runtime") + case s.Runtime != nil: + // Runtime target was added in API v1.40 and takes precedence over + // File target. However, File and Runtime targets are mutually exclusive, + // so we should never have both. + ref.Target = &swarmapi.ConfigReference_Runtime{ + Runtime: &swarmapi.RuntimeTarget{}, + } + case s.File != nil: + ref.Target = &swarmapi.ConfigReference_File{ + File: &swarmapi.FileTarget{ + Name: s.File.Name, + UID: s.File.UID, + GID: s.File.GID, + Mode: s.File.Mode, + }, + } + } + + refs = append(refs, ref) + } + + return refs, nil +} + +func configReferencesFromGRPC(sr []*swarmapi.ConfigReference) []*types.ConfigReference { + refs := make([]*types.ConfigReference, 0, len(sr)) + for _, s := range sr { + + r := &types.ConfigReference{ + ConfigID: s.ConfigID, + ConfigName: s.ConfigName, + } + if target := s.GetRuntime(); target != nil { + r.Runtime = &types.ConfigReferenceRuntimeTarget{} + } else if target := s.GetFile(); target != nil { + r.File = &types.ConfigReferenceFileTarget{ + Name: target.Name, + UID: target.UID, + GID: target.GID, + Mode: target.Mode, + } + } else { + // not a file target + logrus.Warnf("config target not known: config=%s", s.ConfigID) + continue + } + refs = append(refs, r) + } + + return refs +} + +func containerToGRPC(c *types.ContainerSpec) (*swarmapi.ContainerSpec, error) { + containerSpec := &swarmapi.ContainerSpec{ + Image: c.Image, + Labels: c.Labels, + Command: c.Command, + Args: c.Args, + Hostname: c.Hostname, + Env: c.Env, + Dir: c.Dir, + User: c.User, + Groups: c.Groups, + StopSignal: c.StopSignal, + TTY: c.TTY, + OpenStdin: c.OpenStdin, + ReadOnly: c.ReadOnly, + Hosts: c.Hosts, + Secrets: secretReferencesToGRPC(c.Secrets), + Isolation: isolationToGRPC(c.Isolation), + Init: initToGRPC(c.Init), + Sysctls: c.Sysctls, + } + + if c.DNSConfig != nil { + containerSpec.DNSConfig = &swarmapi.ContainerSpec_DNSConfig{ + Nameservers: c.DNSConfig.Nameservers, + Search: c.DNSConfig.Search, + Options: c.DNSConfig.Options, + } + } + + if c.StopGracePeriod != nil { + containerSpec.StopGracePeriod = gogotypes.DurationProto(*c.StopGracePeriod) + } + + // Privileges + if c.Privileges != nil { + containerSpec.Privileges = &swarmapi.Privileges{} + + if c.Privileges.CredentialSpec != nil { + cs, err := credentialSpecToGRPC(c.Privileges.CredentialSpec) + if err != nil { + return nil, errors.Wrap(err, "invalid CredentialSpec") + } + containerSpec.Privileges.CredentialSpec = cs + } + + if c.Privileges.SELinuxContext != nil { + containerSpec.Privileges.SELinuxContext = &swarmapi.Privileges_SELinuxContext{ + Disable: c.Privileges.SELinuxContext.Disable, + User: c.Privileges.SELinuxContext.User, + Type: c.Privileges.SELinuxContext.Type, + Role: c.Privileges.SELinuxContext.Role, + Level: c.Privileges.SELinuxContext.Level, + } + } + } + + if c.Configs != nil { + configs, err := configReferencesToGRPC(c.Configs) + if err != nil { + return nil, errors.Wrap(err, "invalid Config") + } + containerSpec.Configs = configs + } + + // Mounts + for _, m := range c.Mounts { + mount := swarmapi.Mount{ + Target: m.Target, + Source: m.Source, + ReadOnly: m.ReadOnly, + } + + if mountType, ok := swarmapi.Mount_MountType_value[strings.ToUpper(string(m.Type))]; ok { + mount.Type = swarmapi.Mount_MountType(mountType) + } else if string(m.Type) != "" { + return nil, fmt.Errorf("invalid MountType: %q", m.Type) + } + + if m.BindOptions != nil { + if mountPropagation, ok := swarmapi.Mount_BindOptions_MountPropagation_value[strings.ToUpper(string(m.BindOptions.Propagation))]; ok { + mount.BindOptions = &swarmapi.Mount_BindOptions{Propagation: swarmapi.Mount_BindOptions_MountPropagation(mountPropagation)} + } else if string(m.BindOptions.Propagation) != "" { + return nil, fmt.Errorf("invalid MountPropagation: %q", m.BindOptions.Propagation) + } + + if m.BindOptions.NonRecursive { + // TODO(AkihiroSuda): NonRecursive is unsupported for Swarm-mode now because of mutual vendoring + // across moby and swarmkit. Will be available soon after the moby PR gets merged. + return nil, fmt.Errorf("invalid NonRecursive: %q", m.BindOptions.Propagation) + } + } + + if m.VolumeOptions != nil { + mount.VolumeOptions = &swarmapi.Mount_VolumeOptions{ + NoCopy: m.VolumeOptions.NoCopy, + Labels: m.VolumeOptions.Labels, + } + if m.VolumeOptions.DriverConfig != nil { + mount.VolumeOptions.DriverConfig = &swarmapi.Driver{ + Name: m.VolumeOptions.DriverConfig.Name, + Options: m.VolumeOptions.DriverConfig.Options, + } + } + } + + if m.TmpfsOptions != nil { + mount.TmpfsOptions = &swarmapi.Mount_TmpfsOptions{ + SizeBytes: m.TmpfsOptions.SizeBytes, + Mode: m.TmpfsOptions.Mode, + } + } + + containerSpec.Mounts = append(containerSpec.Mounts, mount) + } + + if c.Healthcheck != nil { + containerSpec.Healthcheck = healthConfigToGRPC(c.Healthcheck) + } + + return containerSpec, nil +} + +func credentialSpecFromGRPC(c *swarmapi.Privileges_CredentialSpec) *types.CredentialSpec { + cs := &types.CredentialSpec{} + switch c.Source.(type) { + case *swarmapi.Privileges_CredentialSpec_Config: + cs.Config = c.GetConfig() + case *swarmapi.Privileges_CredentialSpec_File: + cs.File = c.GetFile() + case *swarmapi.Privileges_CredentialSpec_Registry: + cs.Registry = c.GetRegistry() + } + return cs +} + +func credentialSpecToGRPC(c *types.CredentialSpec) (*swarmapi.Privileges_CredentialSpec, error) { + var opts []string + + if c.Config != "" { + opts = append(opts, `"config"`) + } + if c.File != "" { + opts = append(opts, `"file"`) + } + if c.Registry != "" { + opts = append(opts, `"registry"`) + } + l := len(opts) + switch { + case l == 0: + return nil, errors.New(`must either provide "file", "registry", or "config" for credential spec`) + case l == 2: + return nil, fmt.Errorf("cannot specify both %s and %s credential specs", opts[0], opts[1]) + case l > 2: + return nil, fmt.Errorf("cannot specify both %s, and %s credential specs", strings.Join(opts[:l-1], ", "), opts[l-1]) + } + + spec := &swarmapi.Privileges_CredentialSpec{} + switch { + case c.Config != "": + spec.Source = &swarmapi.Privileges_CredentialSpec_Config{ + Config: c.Config, + } + case c.File != "": + spec.Source = &swarmapi.Privileges_CredentialSpec_File{ + File: c.File, + } + case c.Registry != "": + spec.Source = &swarmapi.Privileges_CredentialSpec_Registry{ + Registry: c.Registry, + } + } + + return spec, nil +} + +func healthConfigFromGRPC(h *swarmapi.HealthConfig) *container.HealthConfig { + interval, _ := gogotypes.DurationFromProto(h.Interval) + timeout, _ := gogotypes.DurationFromProto(h.Timeout) + startPeriod, _ := gogotypes.DurationFromProto(h.StartPeriod) + return &container.HealthConfig{ + Test: h.Test, + Interval: interval, + Timeout: timeout, + Retries: int(h.Retries), + StartPeriod: startPeriod, + } +} + +func healthConfigToGRPC(h *container.HealthConfig) *swarmapi.HealthConfig { + return &swarmapi.HealthConfig{ + Test: h.Test, + Interval: gogotypes.DurationProto(h.Interval), + Timeout: gogotypes.DurationProto(h.Timeout), + Retries: int32(h.Retries), + StartPeriod: gogotypes.DurationProto(h.StartPeriod), + } +} + +// IsolationFromGRPC converts a swarm api container isolation to a moby isolation representation +func IsolationFromGRPC(i swarmapi.ContainerSpec_Isolation) container.Isolation { + switch i { + case swarmapi.ContainerIsolationHyperV: + return container.IsolationHyperV + case swarmapi.ContainerIsolationProcess: + return container.IsolationProcess + case swarmapi.ContainerIsolationDefault: + return container.IsolationDefault + } + return container.IsolationEmpty +} + +func isolationToGRPC(i container.Isolation) swarmapi.ContainerSpec_Isolation { + if i.IsHyperV() { + return swarmapi.ContainerIsolationHyperV + } + if i.IsProcess() { + return swarmapi.ContainerIsolationProcess + } + return swarmapi.ContainerIsolationDefault +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/daemon/cluster/convert/network.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/daemon/cluster/convert/network.go new file mode 100644 index 00000000..34660fc4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/daemon/cluster/convert/network.go @@ -0,0 +1,240 @@ +package convert // import "github.com/docker/docker/daemon/cluster/convert" + +import ( + "strings" + + basictypes "github.com/docker/docker/api/types" + networktypes "github.com/docker/docker/api/types/network" + types "github.com/docker/docker/api/types/swarm" + netconst "github.com/docker/libnetwork/datastore" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +func networkAttachmentFromGRPC(na *swarmapi.NetworkAttachment) types.NetworkAttachment { + if na != nil { + return types.NetworkAttachment{ + Network: networkFromGRPC(na.Network), + Addresses: na.Addresses, + } + } + return types.NetworkAttachment{} +} + +func networkFromGRPC(n *swarmapi.Network) types.Network { + if n != nil { + network := types.Network{ + ID: n.ID, + Spec: types.NetworkSpec{ + IPv6Enabled: n.Spec.Ipv6Enabled, + Internal: n.Spec.Internal, + Attachable: n.Spec.Attachable, + Ingress: IsIngressNetwork(n), + IPAMOptions: ipamFromGRPC(n.Spec.IPAM), + Scope: netconst.SwarmScope, + }, + IPAMOptions: ipamFromGRPC(n.IPAM), + } + + if n.Spec.GetNetwork() != "" { + network.Spec.ConfigFrom = &networktypes.ConfigReference{ + Network: n.Spec.GetNetwork(), + } + } + + // Meta + network.Version.Index = n.Meta.Version.Index + network.CreatedAt, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt) + network.UpdatedAt, _ = gogotypes.TimestampFromProto(n.Meta.UpdatedAt) + + //Annotations + network.Spec.Annotations = annotationsFromGRPC(n.Spec.Annotations) + + //DriverConfiguration + if n.Spec.DriverConfig != nil { + network.Spec.DriverConfiguration = &types.Driver{ + Name: n.Spec.DriverConfig.Name, + Options: n.Spec.DriverConfig.Options, + } + } + + //DriverState + if n.DriverState != nil { + network.DriverState = types.Driver{ + Name: n.DriverState.Name, + Options: n.DriverState.Options, + } + } + + return network + } + return types.Network{} +} + +func ipamFromGRPC(i *swarmapi.IPAMOptions) *types.IPAMOptions { + var ipam *types.IPAMOptions + if i != nil { + ipam = &types.IPAMOptions{} + if i.Driver != nil { + ipam.Driver.Name = i.Driver.Name + ipam.Driver.Options = i.Driver.Options + } + + for _, config := range i.Configs { + ipam.Configs = append(ipam.Configs, types.IPAMConfig{ + Subnet: config.Subnet, + Range: config.Range, + Gateway: config.Gateway, + }) + } + } + return ipam +} + +func endpointSpecFromGRPC(es *swarmapi.EndpointSpec) *types.EndpointSpec { + var endpointSpec *types.EndpointSpec + if es != nil { + endpointSpec = &types.EndpointSpec{} + endpointSpec.Mode = types.ResolutionMode(strings.ToLower(es.Mode.String())) + + for _, portState := range es.Ports { + endpointSpec.Ports = append(endpointSpec.Ports, swarmPortConfigToAPIPortConfig(portState)) + } + } + return endpointSpec +} + +func endpointFromGRPC(e *swarmapi.Endpoint) types.Endpoint { + endpoint := types.Endpoint{} + if e != nil { + if espec := endpointSpecFromGRPC(e.Spec); espec != nil { + endpoint.Spec = *espec + } + + for _, portState := range e.Ports { + endpoint.Ports = append(endpoint.Ports, swarmPortConfigToAPIPortConfig(portState)) + } + + for _, v := range e.VirtualIPs { + endpoint.VirtualIPs = append(endpoint.VirtualIPs, types.EndpointVirtualIP{ + NetworkID: v.NetworkID, + Addr: v.Addr}) + } + + } + + return endpoint +} + +func swarmPortConfigToAPIPortConfig(portConfig *swarmapi.PortConfig) types.PortConfig { + return types.PortConfig{ + Name: portConfig.Name, + Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portConfig.Protocol)])), + PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(portConfig.PublishMode)])), + TargetPort: portConfig.TargetPort, + PublishedPort: portConfig.PublishedPort, + } +} + +// BasicNetworkFromGRPC converts a grpc Network to a NetworkResource. +func BasicNetworkFromGRPC(n swarmapi.Network) basictypes.NetworkResource { + spec := n.Spec + var ipam networktypes.IPAM + if n.IPAM != nil { + if n.IPAM.Driver != nil { + ipam.Driver = n.IPAM.Driver.Name + ipam.Options = n.IPAM.Driver.Options + } + ipam.Config = make([]networktypes.IPAMConfig, 0, len(n.IPAM.Configs)) + for _, ic := range n.IPAM.Configs { + ipamConfig := networktypes.IPAMConfig{ + Subnet: ic.Subnet, + IPRange: ic.Range, + Gateway: ic.Gateway, + AuxAddress: ic.Reserved, + } + ipam.Config = append(ipam.Config, ipamConfig) + } + } + + nr := basictypes.NetworkResource{ + ID: n.ID, + Name: n.Spec.Annotations.Name, + Scope: netconst.SwarmScope, + EnableIPv6: spec.Ipv6Enabled, + IPAM: ipam, + Internal: spec.Internal, + Attachable: spec.Attachable, + Ingress: IsIngressNetwork(&n), + Labels: n.Spec.Annotations.Labels, + } + nr.Created, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt) + + if n.Spec.GetNetwork() != "" { + nr.ConfigFrom = networktypes.ConfigReference{ + Network: n.Spec.GetNetwork(), + } + } + + if n.DriverState != nil { + nr.Driver = n.DriverState.Name + nr.Options = n.DriverState.Options + } + + return nr +} + +// BasicNetworkCreateToGRPC converts a NetworkCreateRequest to a grpc NetworkSpec. +func BasicNetworkCreateToGRPC(create basictypes.NetworkCreateRequest) swarmapi.NetworkSpec { + ns := swarmapi.NetworkSpec{ + Annotations: swarmapi.Annotations{ + Name: create.Name, + Labels: create.Labels, + }, + DriverConfig: &swarmapi.Driver{ + Name: create.Driver, + Options: create.Options, + }, + Ipv6Enabled: create.EnableIPv6, + Internal: create.Internal, + Attachable: create.Attachable, + Ingress: create.Ingress, + } + if create.IPAM != nil { + driver := create.IPAM.Driver + if driver == "" { + driver = "default" + } + ns.IPAM = &swarmapi.IPAMOptions{ + Driver: &swarmapi.Driver{ + Name: driver, + Options: create.IPAM.Options, + }, + } + ipamSpec := make([]*swarmapi.IPAMConfig, 0, len(create.IPAM.Config)) + for _, ipamConfig := range create.IPAM.Config { + ipamSpec = append(ipamSpec, &swarmapi.IPAMConfig{ + Subnet: ipamConfig.Subnet, + Range: ipamConfig.IPRange, + Gateway: ipamConfig.Gateway, + }) + } + ns.IPAM.Configs = ipamSpec + } + if create.ConfigFrom != nil { + ns.ConfigFrom = &swarmapi.NetworkSpec_Network{ + Network: create.ConfigFrom.Network, + } + } + return ns +} + +// IsIngressNetwork check if the swarm network is an ingress network +func IsIngressNetwork(n *swarmapi.Network) bool { + if n.Spec.Ingress { + return true + } + // Check if legacy defined ingress network + _, ok := n.Spec.Annotations.Labels["com.docker.swarm.internal"] + return ok && n.Spec.Annotations.Name == "ingress" +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/daemon/cluster/convert/node.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/daemon/cluster/convert/node.go new file mode 100644 index 00000000..00636b6a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/daemon/cluster/convert/node.go @@ -0,0 +1,94 @@ +package convert // import "github.com/docker/docker/daemon/cluster/convert" + +import ( + "fmt" + "strings" + + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +// NodeFromGRPC converts a grpc Node to a Node. +func NodeFromGRPC(n swarmapi.Node) types.Node { + node := types.Node{ + ID: n.ID, + Spec: types.NodeSpec{ + Role: types.NodeRole(strings.ToLower(n.Spec.DesiredRole.String())), + Availability: types.NodeAvailability(strings.ToLower(n.Spec.Availability.String())), + }, + Status: types.NodeStatus{ + State: types.NodeState(strings.ToLower(n.Status.State.String())), + Message: n.Status.Message, + Addr: n.Status.Addr, + }, + } + + // Meta + node.Version.Index = n.Meta.Version.Index + node.CreatedAt, _ = gogotypes.TimestampFromProto(n.Meta.CreatedAt) + node.UpdatedAt, _ = gogotypes.TimestampFromProto(n.Meta.UpdatedAt) + + //Annotations + node.Spec.Annotations = annotationsFromGRPC(n.Spec.Annotations) + + //Description + if n.Description != nil { + node.Description.Hostname = n.Description.Hostname + if n.Description.Platform != nil { + node.Description.Platform.Architecture = n.Description.Platform.Architecture + node.Description.Platform.OS = n.Description.Platform.OS + } + if n.Description.Resources != nil { + node.Description.Resources.NanoCPUs = n.Description.Resources.NanoCPUs + node.Description.Resources.MemoryBytes = n.Description.Resources.MemoryBytes + node.Description.Resources.GenericResources = GenericResourcesFromGRPC(n.Description.Resources.Generic) + } + if n.Description.Engine != nil { + node.Description.Engine.EngineVersion = n.Description.Engine.EngineVersion + node.Description.Engine.Labels = n.Description.Engine.Labels + for _, plugin := range n.Description.Engine.Plugins { + node.Description.Engine.Plugins = append(node.Description.Engine.Plugins, types.PluginDescription{Type: plugin.Type, Name: plugin.Name}) + } + } + if n.Description.TLSInfo != nil { + node.Description.TLSInfo.TrustRoot = string(n.Description.TLSInfo.TrustRoot) + node.Description.TLSInfo.CertIssuerPublicKey = n.Description.TLSInfo.CertIssuerPublicKey + node.Description.TLSInfo.CertIssuerSubject = n.Description.TLSInfo.CertIssuerSubject + } + } + + //Manager + if n.ManagerStatus != nil { + node.ManagerStatus = &types.ManagerStatus{ + Leader: n.ManagerStatus.Leader, + Reachability: types.Reachability(strings.ToLower(n.ManagerStatus.Reachability.String())), + Addr: n.ManagerStatus.Addr, + } + } + + return node +} + +// NodeSpecToGRPC converts a NodeSpec to a grpc NodeSpec. +func NodeSpecToGRPC(s types.NodeSpec) (swarmapi.NodeSpec, error) { + spec := swarmapi.NodeSpec{ + Annotations: swarmapi.Annotations{ + Name: s.Name, + Labels: s.Labels, + }, + } + if role, ok := swarmapi.NodeRole_value[strings.ToUpper(string(s.Role))]; ok { + spec.DesiredRole = swarmapi.NodeRole(role) + } else { + return swarmapi.NodeSpec{}, fmt.Errorf("invalid Role: %q", s.Role) + } + + if availability, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(s.Availability))]; ok { + spec.Availability = swarmapi.NodeSpec_Availability(availability) + } else { + return swarmapi.NodeSpec{}, fmt.Errorf("invalid Availability: %q", s.Availability) + } + + return spec, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/daemon/cluster/convert/secret.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/daemon/cluster/convert/secret.go new file mode 100644 index 00000000..d0e5ac45 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/daemon/cluster/convert/secret.go @@ -0,0 +1,80 @@ +package convert // import "github.com/docker/docker/daemon/cluster/convert" + +import ( + swarmtypes "github.com/docker/docker/api/types/swarm" + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +// SecretFromGRPC converts a grpc Secret to a Secret. +func SecretFromGRPC(s *swarmapi.Secret) swarmtypes.Secret { + secret := swarmtypes.Secret{ + ID: s.ID, + Spec: swarmtypes.SecretSpec{ + Annotations: annotationsFromGRPC(s.Spec.Annotations), + Data: s.Spec.Data, + Driver: driverFromGRPC(s.Spec.Driver), + }, + } + + secret.Version.Index = s.Meta.Version.Index + // Meta + secret.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt) + secret.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt) + + if s.Spec.Templating != nil { + secret.Spec.Templating = &types.Driver{ + Name: s.Spec.Templating.Name, + Options: s.Spec.Templating.Options, + } + } + + return secret +} + +// SecretSpecToGRPC converts Secret to a grpc Secret. +func SecretSpecToGRPC(s swarmtypes.SecretSpec) swarmapi.SecretSpec { + spec := swarmapi.SecretSpec{ + Annotations: swarmapi.Annotations{ + Name: s.Name, + Labels: s.Labels, + }, + Data: s.Data, + Driver: driverToGRPC(s.Driver), + } + + if s.Templating != nil { + spec.Templating = &swarmapi.Driver{ + Name: s.Templating.Name, + Options: s.Templating.Options, + } + } + + return spec +} + +// SecretReferencesFromGRPC converts a slice of grpc SecretReference to SecretReference +func SecretReferencesFromGRPC(s []*swarmapi.SecretReference) []*swarmtypes.SecretReference { + refs := []*swarmtypes.SecretReference{} + + for _, r := range s { + ref := &swarmtypes.SecretReference{ + SecretID: r.SecretID, + SecretName: r.SecretName, + } + + if t, ok := r.Target.(*swarmapi.SecretReference_File); ok { + ref.File = &swarmtypes.SecretReferenceFileTarget{ + Name: t.File.Name, + UID: t.File.UID, + GID: t.File.GID, + Mode: t.File.Mode, + } + } + + refs = append(refs, ref) + } + + return refs +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/daemon/cluster/convert/service.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/daemon/cluster/convert/service.go new file mode 100644 index 00000000..2b723425 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/daemon/cluster/convert/service.go @@ -0,0 +1,641 @@ +package convert // import "github.com/docker/docker/daemon/cluster/convert" + +import ( + "fmt" + "strings" + + types "github.com/docker/docker/api/types/swarm" + "github.com/docker/docker/api/types/swarm/runtime" + "github.com/docker/docker/pkg/namesgenerator" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/genericresource" + "github.com/gogo/protobuf/proto" + gogotypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" +) + +var ( + // ErrUnsupportedRuntime returns an error if the runtime is not supported by the daemon + ErrUnsupportedRuntime = errors.New("unsupported runtime") + // ErrMismatchedRuntime returns an error if the runtime does not match the provided spec + ErrMismatchedRuntime = errors.New("mismatched Runtime and *Spec fields") +) + +// ServiceFromGRPC converts a grpc Service to a Service. +func ServiceFromGRPC(s swarmapi.Service) (types.Service, error) { + curSpec, err := serviceSpecFromGRPC(&s.Spec) + if err != nil { + return types.Service{}, err + } + prevSpec, err := serviceSpecFromGRPC(s.PreviousSpec) + if err != nil { + return types.Service{}, err + } + service := types.Service{ + ID: s.ID, + Spec: *curSpec, + PreviousSpec: prevSpec, + + Endpoint: endpointFromGRPC(s.Endpoint), + } + + // Meta + service.Version.Index = s.Meta.Version.Index + service.CreatedAt, _ = gogotypes.TimestampFromProto(s.Meta.CreatedAt) + service.UpdatedAt, _ = gogotypes.TimestampFromProto(s.Meta.UpdatedAt) + + // UpdateStatus + if s.UpdateStatus != nil { + service.UpdateStatus = &types.UpdateStatus{} + switch s.UpdateStatus.State { + case swarmapi.UpdateStatus_UPDATING: + service.UpdateStatus.State = types.UpdateStateUpdating + case swarmapi.UpdateStatus_PAUSED: + service.UpdateStatus.State = types.UpdateStatePaused + case swarmapi.UpdateStatus_COMPLETED: + service.UpdateStatus.State = types.UpdateStateCompleted + case swarmapi.UpdateStatus_ROLLBACK_STARTED: + service.UpdateStatus.State = types.UpdateStateRollbackStarted + case swarmapi.UpdateStatus_ROLLBACK_PAUSED: + service.UpdateStatus.State = types.UpdateStateRollbackPaused + case swarmapi.UpdateStatus_ROLLBACK_COMPLETED: + service.UpdateStatus.State = types.UpdateStateRollbackCompleted + } + + startedAt, _ := gogotypes.TimestampFromProto(s.UpdateStatus.StartedAt) + if !startedAt.IsZero() && startedAt.Unix() != 0 { + service.UpdateStatus.StartedAt = &startedAt + } + + completedAt, _ := gogotypes.TimestampFromProto(s.UpdateStatus.CompletedAt) + if !completedAt.IsZero() && completedAt.Unix() != 0 { + service.UpdateStatus.CompletedAt = &completedAt + } + + service.UpdateStatus.Message = s.UpdateStatus.Message + } + + return service, nil +} + +func serviceSpecFromGRPC(spec *swarmapi.ServiceSpec) (*types.ServiceSpec, error) { + if spec == nil { + return nil, nil + } + + serviceNetworks := make([]types.NetworkAttachmentConfig, 0, len(spec.Networks)) + for _, n := range spec.Networks { + netConfig := types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverOpts: n.DriverAttachmentOpts} + serviceNetworks = append(serviceNetworks, netConfig) + + } + + taskTemplate, err := taskSpecFromGRPC(spec.Task) + if err != nil { + return nil, err + } + + switch t := spec.Task.GetRuntime().(type) { + case *swarmapi.TaskSpec_Container: + containerConfig := t.Container + taskTemplate.ContainerSpec = containerSpecFromGRPC(containerConfig) + taskTemplate.Runtime = types.RuntimeContainer + case *swarmapi.TaskSpec_Generic: + switch t.Generic.Kind { + case string(types.RuntimePlugin): + taskTemplate.Runtime = types.RuntimePlugin + default: + return nil, fmt.Errorf("unknown task runtime type: %s", t.Generic.Payload.TypeUrl) + } + + default: + return nil, fmt.Errorf("error creating service; unsupported runtime %T", t) + } + + convertedSpec := &types.ServiceSpec{ + Annotations: annotationsFromGRPC(spec.Annotations), + TaskTemplate: taskTemplate, + Networks: serviceNetworks, + EndpointSpec: endpointSpecFromGRPC(spec.Endpoint), + } + + // UpdateConfig + convertedSpec.UpdateConfig = updateConfigFromGRPC(spec.Update) + convertedSpec.RollbackConfig = updateConfigFromGRPC(spec.Rollback) + + // Mode + switch t := spec.GetMode().(type) { + case *swarmapi.ServiceSpec_Global: + convertedSpec.Mode.Global = &types.GlobalService{} + case *swarmapi.ServiceSpec_Replicated: + convertedSpec.Mode.Replicated = &types.ReplicatedService{ + Replicas: &t.Replicated.Replicas, + } + } + + return convertedSpec, nil +} + +// ServiceSpecToGRPC converts a ServiceSpec to a grpc ServiceSpec. +func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) { + name := s.Name + if name == "" { + name = namesgenerator.GetRandomName(0) + } + + serviceNetworks := make([]*swarmapi.NetworkAttachmentConfig, 0, len(s.Networks)) + for _, n := range s.Networks { + netConfig := &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverAttachmentOpts: n.DriverOpts} + serviceNetworks = append(serviceNetworks, netConfig) + } + + taskNetworks := make([]*swarmapi.NetworkAttachmentConfig, 0, len(s.TaskTemplate.Networks)) + for _, n := range s.TaskTemplate.Networks { + netConfig := &swarmapi.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverAttachmentOpts: n.DriverOpts} + taskNetworks = append(taskNetworks, netConfig) + + } + + spec := swarmapi.ServiceSpec{ + Annotations: swarmapi.Annotations{ + Name: name, + Labels: s.Labels, + }, + Task: swarmapi.TaskSpec{ + Resources: resourcesToGRPC(s.TaskTemplate.Resources), + LogDriver: driverToGRPC(s.TaskTemplate.LogDriver), + Networks: taskNetworks, + ForceUpdate: s.TaskTemplate.ForceUpdate, + }, + Networks: serviceNetworks, + } + + switch s.TaskTemplate.Runtime { + case types.RuntimeContainer, "": // if empty runtime default to container + if s.TaskTemplate.ContainerSpec != nil { + containerSpec, err := containerToGRPC(s.TaskTemplate.ContainerSpec) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Task.Runtime = &swarmapi.TaskSpec_Container{Container: containerSpec} + } else { + // If the ContainerSpec is nil, we can't set the task runtime + return swarmapi.ServiceSpec{}, ErrMismatchedRuntime + } + case types.RuntimePlugin: + if s.TaskTemplate.PluginSpec != nil { + if s.Mode.Replicated != nil { + return swarmapi.ServiceSpec{}, errors.New("plugins must not use replicated mode") + } + + s.Mode.Global = &types.GlobalService{} // must always be global + + pluginSpec, err := proto.Marshal(s.TaskTemplate.PluginSpec) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Task.Runtime = &swarmapi.TaskSpec_Generic{ + Generic: &swarmapi.GenericRuntimeSpec{ + Kind: string(types.RuntimePlugin), + Payload: &gogotypes.Any{ + TypeUrl: string(types.RuntimeURLPlugin), + Value: pluginSpec, + }, + }, + } + } else { + return swarmapi.ServiceSpec{}, ErrMismatchedRuntime + } + case types.RuntimeNetworkAttachment: + // NOTE(dperny) I'm leaving this case here for completeness. The actual + // code is left out deliberately, as we should refuse to parse a + // Network Attachment runtime; it will cause weird behavior all over + // the system if we do. Instead, fallthrough and return + // ErrUnsupportedRuntime if we get one. + fallthrough + default: + return swarmapi.ServiceSpec{}, ErrUnsupportedRuntime + } + + restartPolicy, err := restartPolicyToGRPC(s.TaskTemplate.RestartPolicy) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Task.Restart = restartPolicy + + if s.TaskTemplate.Placement != nil { + var preferences []*swarmapi.PlacementPreference + for _, pref := range s.TaskTemplate.Placement.Preferences { + if pref.Spread != nil { + preferences = append(preferences, &swarmapi.PlacementPreference{ + Preference: &swarmapi.PlacementPreference_Spread{ + Spread: &swarmapi.SpreadOver{ + SpreadDescriptor: pref.Spread.SpreadDescriptor, + }, + }, + }) + } + } + var platforms []*swarmapi.Platform + for _, plat := range s.TaskTemplate.Placement.Platforms { + platforms = append(platforms, &swarmapi.Platform{ + Architecture: plat.Architecture, + OS: plat.OS, + }) + } + spec.Task.Placement = &swarmapi.Placement{ + Constraints: s.TaskTemplate.Placement.Constraints, + Preferences: preferences, + MaxReplicas: s.TaskTemplate.Placement.MaxReplicas, + Platforms: platforms, + } + } + + spec.Update, err = updateConfigToGRPC(s.UpdateConfig) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + spec.Rollback, err = updateConfigToGRPC(s.RollbackConfig) + if err != nil { + return swarmapi.ServiceSpec{}, err + } + + if s.EndpointSpec != nil { + if s.EndpointSpec.Mode != "" && + s.EndpointSpec.Mode != types.ResolutionModeVIP && + s.EndpointSpec.Mode != types.ResolutionModeDNSRR { + return swarmapi.ServiceSpec{}, fmt.Errorf("invalid resolution mode: %q", s.EndpointSpec.Mode) + } + + spec.Endpoint = &swarmapi.EndpointSpec{} + + spec.Endpoint.Mode = swarmapi.EndpointSpec_ResolutionMode(swarmapi.EndpointSpec_ResolutionMode_value[strings.ToUpper(string(s.EndpointSpec.Mode))]) + + for _, portConfig := range s.EndpointSpec.Ports { + spec.Endpoint.Ports = append(spec.Endpoint.Ports, &swarmapi.PortConfig{ + Name: portConfig.Name, + Protocol: swarmapi.PortConfig_Protocol(swarmapi.PortConfig_Protocol_value[strings.ToUpper(string(portConfig.Protocol))]), + PublishMode: swarmapi.PortConfig_PublishMode(swarmapi.PortConfig_PublishMode_value[strings.ToUpper(string(portConfig.PublishMode))]), + TargetPort: portConfig.TargetPort, + PublishedPort: portConfig.PublishedPort, + }) + } + } + + // Mode + if s.Mode.Global != nil && s.Mode.Replicated != nil { + return swarmapi.ServiceSpec{}, fmt.Errorf("cannot specify both replicated mode and global mode") + } + + if s.Mode.Global != nil { + spec.Mode = &swarmapi.ServiceSpec_Global{ + Global: &swarmapi.GlobalService{}, + } + } else if s.Mode.Replicated != nil && s.Mode.Replicated.Replicas != nil { + spec.Mode = &swarmapi.ServiceSpec_Replicated{ + Replicated: &swarmapi.ReplicatedService{Replicas: *s.Mode.Replicated.Replicas}, + } + } else { + spec.Mode = &swarmapi.ServiceSpec_Replicated{ + Replicated: &swarmapi.ReplicatedService{Replicas: 1}, + } + } + + return spec, nil +} + +func annotationsFromGRPC(ann swarmapi.Annotations) types.Annotations { + a := types.Annotations{ + Name: ann.Name, + Labels: ann.Labels, + } + + if a.Labels == nil { + a.Labels = make(map[string]string) + } + + return a +} + +// GenericResourcesFromGRPC converts a GRPC GenericResource to a GenericResource +func GenericResourcesFromGRPC(genericRes []*swarmapi.GenericResource) []types.GenericResource { + var generic []types.GenericResource + for _, res := range genericRes { + var current types.GenericResource + + switch r := res.Resource.(type) { + case *swarmapi.GenericResource_DiscreteResourceSpec: + current.DiscreteResourceSpec = &types.DiscreteGenericResource{ + Kind: r.DiscreteResourceSpec.Kind, + Value: r.DiscreteResourceSpec.Value, + } + case *swarmapi.GenericResource_NamedResourceSpec: + current.NamedResourceSpec = &types.NamedGenericResource{ + Kind: r.NamedResourceSpec.Kind, + Value: r.NamedResourceSpec.Value, + } + } + + generic = append(generic, current) + } + + return generic +} + +func resourcesFromGRPC(res *swarmapi.ResourceRequirements) *types.ResourceRequirements { + var resources *types.ResourceRequirements + if res != nil { + resources = &types.ResourceRequirements{} + if res.Limits != nil { + resources.Limits = &types.Resources{ + NanoCPUs: res.Limits.NanoCPUs, + MemoryBytes: res.Limits.MemoryBytes, + } + } + if res.Reservations != nil { + resources.Reservations = &types.Resources{ + NanoCPUs: res.Reservations.NanoCPUs, + MemoryBytes: res.Reservations.MemoryBytes, + GenericResources: GenericResourcesFromGRPC(res.Reservations.Generic), + } + } + } + + return resources +} + +// GenericResourcesToGRPC converts a GenericResource to a GRPC GenericResource +func GenericResourcesToGRPC(genericRes []types.GenericResource) []*swarmapi.GenericResource { + var generic []*swarmapi.GenericResource + for _, res := range genericRes { + var r *swarmapi.GenericResource + + if res.DiscreteResourceSpec != nil { + r = genericresource.NewDiscrete(res.DiscreteResourceSpec.Kind, res.DiscreteResourceSpec.Value) + } else if res.NamedResourceSpec != nil { + r = genericresource.NewString(res.NamedResourceSpec.Kind, res.NamedResourceSpec.Value) + } + + generic = append(generic, r) + } + + return generic +} + +func resourcesToGRPC(res *types.ResourceRequirements) *swarmapi.ResourceRequirements { + var reqs *swarmapi.ResourceRequirements + if res != nil { + reqs = &swarmapi.ResourceRequirements{} + if res.Limits != nil { + reqs.Limits = &swarmapi.Resources{ + NanoCPUs: res.Limits.NanoCPUs, + MemoryBytes: res.Limits.MemoryBytes, + } + } + if res.Reservations != nil { + reqs.Reservations = &swarmapi.Resources{ + NanoCPUs: res.Reservations.NanoCPUs, + MemoryBytes: res.Reservations.MemoryBytes, + Generic: GenericResourcesToGRPC(res.Reservations.GenericResources), + } + + } + } + return reqs +} + +func restartPolicyFromGRPC(p *swarmapi.RestartPolicy) *types.RestartPolicy { + var rp *types.RestartPolicy + if p != nil { + rp = &types.RestartPolicy{} + + switch p.Condition { + case swarmapi.RestartOnNone: + rp.Condition = types.RestartPolicyConditionNone + case swarmapi.RestartOnFailure: + rp.Condition = types.RestartPolicyConditionOnFailure + case swarmapi.RestartOnAny: + rp.Condition = types.RestartPolicyConditionAny + default: + rp.Condition = types.RestartPolicyConditionAny + } + + if p.Delay != nil { + delay, _ := gogotypes.DurationFromProto(p.Delay) + rp.Delay = &delay + } + if p.Window != nil { + window, _ := gogotypes.DurationFromProto(p.Window) + rp.Window = &window + } + + rp.MaxAttempts = &p.MaxAttempts + } + return rp +} + +func restartPolicyToGRPC(p *types.RestartPolicy) (*swarmapi.RestartPolicy, error) { + var rp *swarmapi.RestartPolicy + if p != nil { + rp = &swarmapi.RestartPolicy{} + + switch p.Condition { + case types.RestartPolicyConditionNone: + rp.Condition = swarmapi.RestartOnNone + case types.RestartPolicyConditionOnFailure: + rp.Condition = swarmapi.RestartOnFailure + case types.RestartPolicyConditionAny: + rp.Condition = swarmapi.RestartOnAny + default: + if string(p.Condition) != "" { + return nil, fmt.Errorf("invalid RestartCondition: %q", p.Condition) + } + rp.Condition = swarmapi.RestartOnAny + } + + if p.Delay != nil { + rp.Delay = gogotypes.DurationProto(*p.Delay) + } + if p.Window != nil { + rp.Window = gogotypes.DurationProto(*p.Window) + } + if p.MaxAttempts != nil { + rp.MaxAttempts = *p.MaxAttempts + + } + } + return rp, nil +} + +func placementFromGRPC(p *swarmapi.Placement) *types.Placement { + if p == nil { + return nil + } + r := &types.Placement{ + Constraints: p.Constraints, + MaxReplicas: p.MaxReplicas, + } + + for _, pref := range p.Preferences { + if spread := pref.GetSpread(); spread != nil { + r.Preferences = append(r.Preferences, types.PlacementPreference{ + Spread: &types.SpreadOver{ + SpreadDescriptor: spread.SpreadDescriptor, + }, + }) + } + } + + for _, plat := range p.Platforms { + r.Platforms = append(r.Platforms, types.Platform{ + Architecture: plat.Architecture, + OS: plat.OS, + }) + } + + return r +} + +func driverFromGRPC(p *swarmapi.Driver) *types.Driver { + if p == nil { + return nil + } + + return &types.Driver{ + Name: p.Name, + Options: p.Options, + } +} + +func driverToGRPC(p *types.Driver) *swarmapi.Driver { + if p == nil { + return nil + } + + return &swarmapi.Driver{ + Name: p.Name, + Options: p.Options, + } +} + +func updateConfigFromGRPC(updateConfig *swarmapi.UpdateConfig) *types.UpdateConfig { + if updateConfig == nil { + return nil + } + + converted := &types.UpdateConfig{ + Parallelism: updateConfig.Parallelism, + MaxFailureRatio: updateConfig.MaxFailureRatio, + } + + converted.Delay = updateConfig.Delay + if updateConfig.Monitor != nil { + converted.Monitor, _ = gogotypes.DurationFromProto(updateConfig.Monitor) + } + + switch updateConfig.FailureAction { + case swarmapi.UpdateConfig_PAUSE: + converted.FailureAction = types.UpdateFailureActionPause + case swarmapi.UpdateConfig_CONTINUE: + converted.FailureAction = types.UpdateFailureActionContinue + case swarmapi.UpdateConfig_ROLLBACK: + converted.FailureAction = types.UpdateFailureActionRollback + } + + switch updateConfig.Order { + case swarmapi.UpdateConfig_STOP_FIRST: + converted.Order = types.UpdateOrderStopFirst + case swarmapi.UpdateConfig_START_FIRST: + converted.Order = types.UpdateOrderStartFirst + } + + return converted +} + +func updateConfigToGRPC(updateConfig *types.UpdateConfig) (*swarmapi.UpdateConfig, error) { + if updateConfig == nil { + return nil, nil + } + + converted := &swarmapi.UpdateConfig{ + Parallelism: updateConfig.Parallelism, + Delay: updateConfig.Delay, + MaxFailureRatio: updateConfig.MaxFailureRatio, + } + + switch updateConfig.FailureAction { + case types.UpdateFailureActionPause, "": + converted.FailureAction = swarmapi.UpdateConfig_PAUSE + case types.UpdateFailureActionContinue: + converted.FailureAction = swarmapi.UpdateConfig_CONTINUE + case types.UpdateFailureActionRollback: + converted.FailureAction = swarmapi.UpdateConfig_ROLLBACK + default: + return nil, fmt.Errorf("unrecognized update failure action %s", updateConfig.FailureAction) + } + if updateConfig.Monitor != 0 { + converted.Monitor = gogotypes.DurationProto(updateConfig.Monitor) + } + + switch updateConfig.Order { + case types.UpdateOrderStopFirst, "": + converted.Order = swarmapi.UpdateConfig_STOP_FIRST + case types.UpdateOrderStartFirst: + converted.Order = swarmapi.UpdateConfig_START_FIRST + default: + return nil, fmt.Errorf("unrecognized update order %s", updateConfig.Order) + } + + return converted, nil +} + +func networkAttachmentSpecFromGRPC(attachment swarmapi.NetworkAttachmentSpec) *types.NetworkAttachmentSpec { + return &types.NetworkAttachmentSpec{ + ContainerID: attachment.ContainerID, + } +} + +func taskSpecFromGRPC(taskSpec swarmapi.TaskSpec) (types.TaskSpec, error) { + taskNetworks := make([]types.NetworkAttachmentConfig, 0, len(taskSpec.Networks)) + for _, n := range taskSpec.Networks { + netConfig := types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases, DriverOpts: n.DriverAttachmentOpts} + taskNetworks = append(taskNetworks, netConfig) + } + + t := types.TaskSpec{ + Resources: resourcesFromGRPC(taskSpec.Resources), + RestartPolicy: restartPolicyFromGRPC(taskSpec.Restart), + Placement: placementFromGRPC(taskSpec.Placement), + LogDriver: driverFromGRPC(taskSpec.LogDriver), + Networks: taskNetworks, + ForceUpdate: taskSpec.ForceUpdate, + } + + switch taskSpec.GetRuntime().(type) { + case *swarmapi.TaskSpec_Container, nil: + c := taskSpec.GetContainer() + if c != nil { + t.ContainerSpec = containerSpecFromGRPC(c) + } + case *swarmapi.TaskSpec_Generic: + g := taskSpec.GetGeneric() + if g != nil { + switch g.Kind { + case string(types.RuntimePlugin): + var p runtime.PluginSpec + if err := proto.Unmarshal(g.Payload.Value, &p); err != nil { + return t, errors.Wrap(err, "error unmarshalling plugin spec") + } + t.PluginSpec = &p + } + } + case *swarmapi.TaskSpec_Attachment: + a := taskSpec.GetAttachment() + if a != nil { + t.NetworkAttachmentSpec = networkAttachmentSpecFromGRPC(*a) + } + t.Runtime = types.RuntimeNetworkAttachment + } + + return t, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/daemon/cluster/convert/swarm.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/daemon/cluster/convert/swarm.go new file mode 100644 index 00000000..bdad2a5e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/daemon/cluster/convert/swarm.go @@ -0,0 +1,150 @@ +package convert // import "github.com/docker/docker/daemon/cluster/convert" + +import ( + "fmt" + "strings" + + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca" + gogotypes "github.com/gogo/protobuf/types" +) + +// SwarmFromGRPC converts a grpc Cluster to a Swarm. +func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm { + swarm := types.Swarm{ + ClusterInfo: types.ClusterInfo{ + ID: c.ID, + Spec: types.Spec{ + Orchestration: types.OrchestrationConfig{ + TaskHistoryRetentionLimit: &c.Spec.Orchestration.TaskHistoryRetentionLimit, + }, + Raft: types.RaftConfig{ + SnapshotInterval: c.Spec.Raft.SnapshotInterval, + KeepOldSnapshots: &c.Spec.Raft.KeepOldSnapshots, + LogEntriesForSlowFollowers: c.Spec.Raft.LogEntriesForSlowFollowers, + HeartbeatTick: int(c.Spec.Raft.HeartbeatTick), + ElectionTick: int(c.Spec.Raft.ElectionTick), + }, + EncryptionConfig: types.EncryptionConfig{ + AutoLockManagers: c.Spec.EncryptionConfig.AutoLockManagers, + }, + CAConfig: types.CAConfig{ + // do not include the signing CA cert or key (it should already be redacted via the swarm APIs) - + // the key because it's secret, and the cert because otherwise doing a get + update on the spec + // can cause issues because the key would be missing and the cert wouldn't + ForceRotate: c.Spec.CAConfig.ForceRotate, + }, + }, + TLSInfo: types.TLSInfo{ + TrustRoot: string(c.RootCA.CACert), + }, + RootRotationInProgress: c.RootCA.RootRotation != nil, + DefaultAddrPool: c.DefaultAddressPool, + SubnetSize: c.SubnetSize, + DataPathPort: c.VXLANUDPPort, + }, + JoinTokens: types.JoinTokens{ + Worker: c.RootCA.JoinTokens.Worker, + Manager: c.RootCA.JoinTokens.Manager, + }, + } + + issuerInfo, err := ca.IssuerFromAPIRootCA(&c.RootCA) + if err == nil && issuerInfo != nil { + swarm.TLSInfo.CertIssuerSubject = issuerInfo.Subject + swarm.TLSInfo.CertIssuerPublicKey = issuerInfo.PublicKey + } + + heartbeatPeriod, _ := gogotypes.DurationFromProto(c.Spec.Dispatcher.HeartbeatPeriod) + swarm.Spec.Dispatcher.HeartbeatPeriod = heartbeatPeriod + + swarm.Spec.CAConfig.NodeCertExpiry, _ = gogotypes.DurationFromProto(c.Spec.CAConfig.NodeCertExpiry) + + for _, ca := range c.Spec.CAConfig.ExternalCAs { + swarm.Spec.CAConfig.ExternalCAs = append(swarm.Spec.CAConfig.ExternalCAs, &types.ExternalCA{ + Protocol: types.ExternalCAProtocol(strings.ToLower(ca.Protocol.String())), + URL: ca.URL, + Options: ca.Options, + CACert: string(ca.CACert), + }) + } + + // Meta + swarm.Version.Index = c.Meta.Version.Index + swarm.CreatedAt, _ = gogotypes.TimestampFromProto(c.Meta.CreatedAt) + swarm.UpdatedAt, _ = gogotypes.TimestampFromProto(c.Meta.UpdatedAt) + + // Annotations + swarm.Spec.Annotations = annotationsFromGRPC(c.Spec.Annotations) + + return swarm +} + +// SwarmSpecToGRPC converts a Spec to a grpc ClusterSpec. +func SwarmSpecToGRPC(s types.Spec) (swarmapi.ClusterSpec, error) { + return MergeSwarmSpecToGRPC(s, swarmapi.ClusterSpec{}) +} + +// MergeSwarmSpecToGRPC merges a Spec with an initial grpc ClusterSpec +func MergeSwarmSpecToGRPC(s types.Spec, spec swarmapi.ClusterSpec) (swarmapi.ClusterSpec, error) { + // We take the initSpec (either created from scratch, or returned by swarmkit), + // and will only change the value if the one taken from types.Spec is not nil or 0. + // In other words, if the value taken from types.Spec is nil or 0, we will maintain the status quo. + if s.Annotations.Name != "" { + spec.Annotations.Name = s.Annotations.Name + } + if len(s.Annotations.Labels) != 0 { + spec.Annotations.Labels = s.Annotations.Labels + } + + if s.Orchestration.TaskHistoryRetentionLimit != nil { + spec.Orchestration.TaskHistoryRetentionLimit = *s.Orchestration.TaskHistoryRetentionLimit + } + if s.Raft.SnapshotInterval != 0 { + spec.Raft.SnapshotInterval = s.Raft.SnapshotInterval + } + if s.Raft.KeepOldSnapshots != nil { + spec.Raft.KeepOldSnapshots = *s.Raft.KeepOldSnapshots + } + if s.Raft.LogEntriesForSlowFollowers != 0 { + spec.Raft.LogEntriesForSlowFollowers = s.Raft.LogEntriesForSlowFollowers + } + if s.Raft.HeartbeatTick != 0 { + spec.Raft.HeartbeatTick = uint32(s.Raft.HeartbeatTick) + } + if s.Raft.ElectionTick != 0 { + spec.Raft.ElectionTick = uint32(s.Raft.ElectionTick) + } + if s.Dispatcher.HeartbeatPeriod != 0 { + spec.Dispatcher.HeartbeatPeriod = gogotypes.DurationProto(s.Dispatcher.HeartbeatPeriod) + } + if s.CAConfig.NodeCertExpiry != 0 { + spec.CAConfig.NodeCertExpiry = gogotypes.DurationProto(s.CAConfig.NodeCertExpiry) + } + if s.CAConfig.SigningCACert != "" { + spec.CAConfig.SigningCACert = []byte(s.CAConfig.SigningCACert) + } + if s.CAConfig.SigningCAKey != "" { + // do propagate the signing CA key here because we want to provide it TO the swarm APIs + spec.CAConfig.SigningCAKey = []byte(s.CAConfig.SigningCAKey) + } + spec.CAConfig.ForceRotate = s.CAConfig.ForceRotate + + for _, ca := range s.CAConfig.ExternalCAs { + protocol, ok := swarmapi.ExternalCA_CAProtocol_value[strings.ToUpper(string(ca.Protocol))] + if !ok { + return swarmapi.ClusterSpec{}, fmt.Errorf("invalid protocol: %q", ca.Protocol) + } + spec.CAConfig.ExternalCAs = append(spec.CAConfig.ExternalCAs, &swarmapi.ExternalCA{ + Protocol: swarmapi.ExternalCA_CAProtocol(protocol), + URL: ca.URL, + Options: ca.Options, + CACert: []byte(ca.CACert), + }) + } + + spec.EncryptionConfig.AutoLockManagers = s.EncryptionConfig.AutoLockManagers + + return spec, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/daemon/cluster/convert/task.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/daemon/cluster/convert/task.go new file mode 100644 index 00000000..72e2805e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/daemon/cluster/convert/task.go @@ -0,0 +1,69 @@ +package convert // import "github.com/docker/docker/daemon/cluster/convert" + +import ( + "strings" + + types "github.com/docker/docker/api/types/swarm" + swarmapi "github.com/docker/swarmkit/api" + gogotypes "github.com/gogo/protobuf/types" +) + +// TaskFromGRPC converts a grpc Task to a Task. +func TaskFromGRPC(t swarmapi.Task) (types.Task, error) { + containerStatus := t.Status.GetContainer() + taskSpec, err := taskSpecFromGRPC(t.Spec) + if err != nil { + return types.Task{}, err + } + task := types.Task{ + ID: t.ID, + Annotations: annotationsFromGRPC(t.Annotations), + ServiceID: t.ServiceID, + Slot: int(t.Slot), + NodeID: t.NodeID, + Spec: taskSpec, + Status: types.TaskStatus{ + State: types.TaskState(strings.ToLower(t.Status.State.String())), + Message: t.Status.Message, + Err: t.Status.Err, + }, + DesiredState: types.TaskState(strings.ToLower(t.DesiredState.String())), + GenericResources: GenericResourcesFromGRPC(t.AssignedGenericResources), + } + + // Meta + task.Version.Index = t.Meta.Version.Index + task.CreatedAt, _ = gogotypes.TimestampFromProto(t.Meta.CreatedAt) + task.UpdatedAt, _ = gogotypes.TimestampFromProto(t.Meta.UpdatedAt) + + task.Status.Timestamp, _ = gogotypes.TimestampFromProto(t.Status.Timestamp) + + if containerStatus != nil { + task.Status.ContainerStatus = &types.ContainerStatus{ + ContainerID: containerStatus.ContainerID, + PID: int(containerStatus.PID), + ExitCode: int(containerStatus.ExitCode), + } + } + + // NetworksAttachments + for _, na := range t.Networks { + task.NetworksAttachments = append(task.NetworksAttachments, networkAttachmentFromGRPC(na)) + } + + if t.Status.PortStatus == nil { + return task, nil + } + + for _, p := range t.Status.PortStatus.Ports { + task.Status.PortStatus.Ports = append(task.Status.PortStatus.Ports, types.PortConfig{ + Name: p.Name, + Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(p.Protocol)])), + PublishMode: types.PortConfigPublishMode(strings.ToLower(swarmapi.PortConfig_PublishMode_name[int32(p.PublishMode)])), + TargetPort: p.TargetPort, + PublishedPort: p.PublishedPort, + }) + } + + return task, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/errdefs/defs.go new file mode 100644 index 00000000..61e7456b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/errdefs/defs.go @@ -0,0 +1,69 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +// ErrNotFound signals that the requested object doesn't exist +type ErrNotFound interface { + NotFound() +} + +// ErrInvalidParameter signals that the user input is invalid +type ErrInvalidParameter interface { + InvalidParameter() +} + +// ErrConflict signals that some internal state conflicts with the requested action and can't be performed. +// A change in state should be able to clear this error. +type ErrConflict interface { + Conflict() +} + +// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action +type ErrUnauthorized interface { + Unauthorized() +} + +// ErrUnavailable signals that the requested action/subsystem is not available. +type ErrUnavailable interface { + Unavailable() +} + +// ErrForbidden signals that the requested action cannot be performed under any circumstances. +// When a ErrForbidden is returned, the caller should never retry the action. +type ErrForbidden interface { + Forbidden() +} + +// ErrSystem signals that some internal error occurred. +// An example of this would be a failed mount request. +type ErrSystem interface { + System() +} + +// ErrNotModified signals that an action can't be performed because it's already in the desired state +type ErrNotModified interface { + NotModified() +} + +// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. +type ErrNotImplemented interface { + NotImplemented() +} + +// ErrUnknown signals that the kind of error that occurred is not known. +type ErrUnknown interface { + Unknown() +} + +// ErrCancelled signals that the action was cancelled. +type ErrCancelled interface { + Cancelled() +} + +// ErrDeadline signals that the deadline was reached before the action completed. +type ErrDeadline interface { + DeadlineExceeded() +} + +// ErrDataLoss indicates that data was lost or there is data corruption. +type ErrDataLoss interface { + DataLoss() +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/errdefs/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/errdefs/doc.go new file mode 100644 index 00000000..c211f174 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/errdefs/doc.go @@ -0,0 +1,8 @@ +// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors. +// Errors that cross the package boundary should implement one (and only one) of these interfaces. +// +// Packages should not reference these interfaces directly, only implement them. +// To check if a particular error implements one of these interfaces, there are helper +// functions provided (e.g. `Is`) which can be used rather than asserting the interfaces directly. +// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`). +package errdefs // import "github.com/docker/docker/errdefs" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/errdefs/helpers.go new file mode 100644 index 00000000..c9916e01 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/errdefs/helpers.go @@ -0,0 +1,227 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +import "context" + +type errNotFound struct{ error } + +func (errNotFound) NotFound() {} + +func (e errNotFound) Cause() error { + return e.error +} + +// NotFound is a helper to create an error of the class with the same name from any error type +func NotFound(err error) error { + if err == nil || IsNotFound(err) { + return err + } + return errNotFound{err} +} + +type errInvalidParameter struct{ error } + +func (errInvalidParameter) InvalidParameter() {} + +func (e errInvalidParameter) Cause() error { + return e.error +} + +// InvalidParameter is a helper to create an error of the class with the same name from any error type +func InvalidParameter(err error) error { + if err == nil || IsInvalidParameter(err) { + return err + } + return errInvalidParameter{err} +} + +type errConflict struct{ error } + +func (errConflict) Conflict() {} + +func (e errConflict) Cause() error { + return e.error +} + +// Conflict is a helper to create an error of the class with the same name from any error type +func Conflict(err error) error { + if err == nil || IsConflict(err) { + return err + } + return errConflict{err} +} + +type errUnauthorized struct{ error } + +func (errUnauthorized) Unauthorized() {} + +func (e errUnauthorized) Cause() error { + return e.error +} + +// Unauthorized is a helper to create an error of the class with the same name from any error type +func Unauthorized(err error) error { + if err == nil || IsUnauthorized(err) { + return err + } + return errUnauthorized{err} +} + +type errUnavailable struct{ error } + +func (errUnavailable) Unavailable() {} + +func (e errUnavailable) Cause() error { + return e.error +} + +// Unavailable is a helper to create an error of the class with the same name from any error type +func Unavailable(err error) error { + if err == nil || IsUnavailable(err) { + return err + } + return errUnavailable{err} +} + +type errForbidden struct{ error } + +func (errForbidden) Forbidden() {} + +func (e errForbidden) Cause() error { + return e.error +} + +// Forbidden is a helper to create an error of the class with the same name from any error type +func Forbidden(err error) error { + if err == nil || IsForbidden(err) { + return err + } + return errForbidden{err} +} + +type errSystem struct{ error } + +func (errSystem) System() {} + +func (e errSystem) Cause() error { + return e.error +} + +// System is a helper to create an error of the class with the same name from any error type +func System(err error) error { + if err == nil || IsSystem(err) { + return err + } + return errSystem{err} +} + +type errNotModified struct{ error } + +func (errNotModified) NotModified() {} + +func (e errNotModified) Cause() error { + return e.error +} + +// NotModified is a helper to create an error of the class with the same name from any error type +func NotModified(err error) error { + if err == nil || IsNotModified(err) { + return err + } + return errNotModified{err} +} + +type errNotImplemented struct{ error } + +func (errNotImplemented) NotImplemented() {} + +func (e errNotImplemented) Cause() error { + return e.error +} + +// NotImplemented is a helper to create an error of the class with the same name from any error type +func NotImplemented(err error) error { + if err == nil || IsNotImplemented(err) { + return err + } + return errNotImplemented{err} +} + +type errUnknown struct{ error } + +func (errUnknown) Unknown() {} + +func (e errUnknown) Cause() error { + return e.error +} + +// Unknown is a helper to create an error of the class with the same name from any error type +func Unknown(err error) error { + if err == nil || IsUnknown(err) { + return err + } + return errUnknown{err} +} + +type errCancelled struct{ error } + +func (errCancelled) Cancelled() {} + +func (e errCancelled) Cause() error { + return e.error +} + +// Cancelled is a helper to create an error of the class with the same name from any error type +func Cancelled(err error) error { + if err == nil || IsCancelled(err) { + return err + } + return errCancelled{err} +} + +type errDeadline struct{ error } + +func (errDeadline) DeadlineExceeded() {} + +func (e errDeadline) Cause() error { + return e.error +} + +// Deadline is a helper to create an error of the class with the same name from any error type +func Deadline(err error) error { + if err == nil || IsDeadline(err) { + return err + } + return errDeadline{err} +} + +type errDataLoss struct{ error } + +func (errDataLoss) DataLoss() {} + +func (e errDataLoss) Cause() error { + return e.error +} + +// DataLoss is a helper to create an error of the class with the same name from any error type +func DataLoss(err error) error { + if err == nil || IsDataLoss(err) { + return err + } + return errDataLoss{err} +} + +// FromContext returns the error class from the passed in context +func FromContext(ctx context.Context) error { + e := ctx.Err() + if e == nil { + return nil + } + + if e == context.Canceled { + return Cancelled(e) + } + if e == context.DeadlineExceeded { + return Deadline(e) + } + return Unknown(e) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/errdefs/http_helpers.go new file mode 100644 index 00000000..ac9bf6d3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/errdefs/http_helpers.go @@ -0,0 +1,172 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +import ( + "fmt" + "net/http" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// GetHTTPErrorStatusCode retrieves status code from error message. +func GetHTTPErrorStatusCode(err error) int { + if err == nil { + logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling") + return http.StatusInternalServerError + } + + var statusCode int + + // Stop right there + // Are you sure you should be adding a new error class here? Do one of the existing ones work? + + // Note that the below functions are already checking the error causal chain for matches. + switch { + case IsNotFound(err): + statusCode = http.StatusNotFound + case IsInvalidParameter(err): + statusCode = http.StatusBadRequest + case IsConflict(err): + statusCode = http.StatusConflict + case IsUnauthorized(err): + statusCode = http.StatusUnauthorized + case IsUnavailable(err): + statusCode = http.StatusServiceUnavailable + case IsForbidden(err): + statusCode = http.StatusForbidden + case IsNotModified(err): + statusCode = http.StatusNotModified + case IsNotImplemented(err): + statusCode = http.StatusNotImplemented + case IsSystem(err) || IsUnknown(err) || IsDataLoss(err) || IsDeadline(err) || IsCancelled(err): + statusCode = http.StatusInternalServerError + default: + statusCode = statusCodeFromGRPCError(err) + if statusCode != http.StatusInternalServerError { + return statusCode + } + statusCode = statusCodeFromDistributionError(err) + if statusCode != http.StatusInternalServerError { + return statusCode + } + if e, ok := err.(causer); ok { + return GetHTTPErrorStatusCode(e.Cause()) + } + + logrus.WithFields(logrus.Fields{ + "module": "api", + "error_type": fmt.Sprintf("%T", err), + }).Debugf("FIXME: Got an API for which error does not match any expected type!!!: %+v", err) + } + + if statusCode == 0 { + statusCode = http.StatusInternalServerError + } + + return statusCode +} + +// FromStatusCode creates an errdef error, based on the provided HTTP status-code +func FromStatusCode(err error, statusCode int) error { + if err == nil { + return err + } + switch statusCode { + case http.StatusNotFound: + err = NotFound(err) + case http.StatusBadRequest: + err = InvalidParameter(err) + case http.StatusConflict: + err = Conflict(err) + case http.StatusUnauthorized: + err = Unauthorized(err) + case http.StatusServiceUnavailable: + err = Unavailable(err) + case http.StatusForbidden: + err = Forbidden(err) + case http.StatusNotModified: + err = NotModified(err) + case http.StatusNotImplemented: + err = NotImplemented(err) + case http.StatusInternalServerError: + if !IsSystem(err) && !IsUnknown(err) && !IsDataLoss(err) && !IsDeadline(err) && !IsCancelled(err) { + err = System(err) + } + default: + logrus.WithFields(logrus.Fields{ + "module": "api", + "status_code": fmt.Sprintf("%d", statusCode), + }).Debugf("FIXME: Got an status-code for which error does not match any expected type!!!: %d", statusCode) + + switch { + case statusCode >= 200 && statusCode < 400: + // it's a client error + case statusCode >= 400 && statusCode < 500: + err = InvalidParameter(err) + case statusCode >= 500 && statusCode < 600: + err = System(err) + default: + err = Unknown(err) + } + } + return err +} + +// statusCodeFromGRPCError returns status code according to gRPC error +func statusCodeFromGRPCError(err error) int { + switch status.Code(err) { + case codes.InvalidArgument: // code 3 + return http.StatusBadRequest + case codes.NotFound: // code 5 + return http.StatusNotFound + case codes.AlreadyExists: // code 6 + return http.StatusConflict + case codes.PermissionDenied: // code 7 + return http.StatusForbidden + case codes.FailedPrecondition: // code 9 + return http.StatusBadRequest + case codes.Unauthenticated: // code 16 + return http.StatusUnauthorized + case codes.OutOfRange: // code 11 + return http.StatusBadRequest + case codes.Unimplemented: // code 12 + return http.StatusNotImplemented + case codes.Unavailable: // code 14 + return http.StatusServiceUnavailable + default: + if e, ok := err.(causer); ok { + return statusCodeFromGRPCError(e.Cause()) + } + // codes.Canceled(1) + // codes.Unknown(2) + // codes.DeadlineExceeded(4) + // codes.ResourceExhausted(8) + // codes.Aborted(10) + // codes.Internal(13) + // codes.DataLoss(15) + return http.StatusInternalServerError + } +} + +// statusCodeFromDistributionError returns status code according to registry errcode +// code is loosely based on errcode.ServeJSON() in docker/distribution +func statusCodeFromDistributionError(err error) int { + switch errs := err.(type) { + case errcode.Errors: + if len(errs) < 1 { + return http.StatusInternalServerError + } + if _, ok := errs[0].(errcode.ErrorCoder); ok { + return statusCodeFromDistributionError(errs[0]) + } + case errcode.ErrorCoder: + return errs.ErrorCode().Descriptor().HTTPStatusCode + default: + if e, ok := err.(causer); ok { + return statusCodeFromDistributionError(e.Cause()) + } + } + return http.StatusInternalServerError +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/errdefs/is.go new file mode 100644 index 00000000..3abf07d0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/errdefs/is.go @@ -0,0 +1,107 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +type causer interface { + Cause() error +} + +func getImplementer(err error) error { + switch e := err.(type) { + case + ErrNotFound, + ErrInvalidParameter, + ErrConflict, + ErrUnauthorized, + ErrUnavailable, + ErrForbidden, + ErrSystem, + ErrNotModified, + ErrNotImplemented, + ErrCancelled, + ErrDeadline, + ErrDataLoss, + ErrUnknown: + return err + case causer: + return getImplementer(e.Cause()) + default: + return err + } +} + +// IsNotFound returns if the passed in error is an ErrNotFound +func IsNotFound(err error) bool { + _, ok := getImplementer(err).(ErrNotFound) + return ok +} + +// IsInvalidParameter returns if the passed in error is an ErrInvalidParameter +func IsInvalidParameter(err error) bool { + _, ok := getImplementer(err).(ErrInvalidParameter) + return ok +} + +// IsConflict returns if the passed in error is an ErrConflict +func IsConflict(err error) bool { + _, ok := getImplementer(err).(ErrConflict) + return ok +} + +// IsUnauthorized returns if the passed in error is an ErrUnauthorized +func IsUnauthorized(err error) bool { + _, ok := getImplementer(err).(ErrUnauthorized) + return ok +} + +// IsUnavailable returns if the passed in error is an ErrUnavailable +func IsUnavailable(err error) bool { + _, ok := getImplementer(err).(ErrUnavailable) + return ok +} + +// IsForbidden returns if the passed in error is an ErrForbidden +func IsForbidden(err error) bool { + _, ok := getImplementer(err).(ErrForbidden) + return ok +} + +// IsSystem returns if the passed in error is an ErrSystem +func IsSystem(err error) bool { + _, ok := getImplementer(err).(ErrSystem) + return ok +} + +// IsNotModified returns if the passed in error is a NotModified error +func IsNotModified(err error) bool { + _, ok := getImplementer(err).(ErrNotModified) + return ok +} + +// IsNotImplemented returns if the passed in error is an ErrNotImplemented +func IsNotImplemented(err error) bool { + _, ok := getImplementer(err).(ErrNotImplemented) + return ok +} + +// IsUnknown returns if the passed in error is an ErrUnknown +func IsUnknown(err error) bool { + _, ok := getImplementer(err).(ErrUnknown) + return ok +} + +// IsCancelled returns if the passed in error is an ErrCancelled +func IsCancelled(err error) bool { + _, ok := getImplementer(err).(ErrCancelled) + return ok +} + +// IsDeadline returns if the passed in error is an ErrDeadline +func IsDeadline(err error) bool { + _, ok := getImplementer(err).(ErrDeadline) + return ok +} + +// IsDataLoss returns if the passed in error is an ErrDataLoss +func IsDataLoss(err error) bool { + _, ok := getImplementer(err).(ErrDataLoss) + return ok +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/address_pools.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/address_pools.go new file mode 100644 index 00000000..9b27a628 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/address_pools.go @@ -0,0 +1,84 @@ +package opts + +import ( + "encoding/csv" + "encoding/json" + "fmt" + "strconv" + "strings" + + types "github.com/docker/libnetwork/ipamutils" +) + +// PoolsOpt is a Value type for parsing the default address pools definitions +type PoolsOpt struct { + values []*types.NetworkToSplit +} + +// UnmarshalJSON fills values structure info from JSON input +func (p *PoolsOpt) UnmarshalJSON(raw []byte) error { + return json.Unmarshal(raw, &(p.values)) +} + +// Set predefined pools +func (p *PoolsOpt) Set(value string) error { + csvReader := csv.NewReader(strings.NewReader(value)) + fields, err := csvReader.Read() + if err != nil { + return err + } + + poolsDef := types.NetworkToSplit{} + + for _, field := range fields { + parts := strings.SplitN(field, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("invalid field '%s' must be a key=value pair", field) + } + + key := strings.ToLower(parts[0]) + value := strings.ToLower(parts[1]) + + switch key { + case "base": + poolsDef.Base = value + case "size": + size, err := strconv.Atoi(value) + if err != nil { + return fmt.Errorf("invalid size value: %q (must be integer): %v", value, err) + } + poolsDef.Size = size + default: + return fmt.Errorf("unexpected key '%s' in '%s'", key, field) + } + } + + p.values = append(p.values, &poolsDef) + + return nil +} + +// Type returns the type of this option +func (p *PoolsOpt) Type() string { + return "pool-options" +} + +// String returns a string repr of this option +func (p *PoolsOpt) String() string { + var pools []string + for _, pool := range p.values { + repr := fmt.Sprintf("%s %d", pool.Base, pool.Size) + pools = append(pools, repr) + } + return strings.Join(pools, ", ") +} + +// Value returns the mounts +func (p *PoolsOpt) Value() []*types.NetworkToSplit { + return p.values +} + +// Name returns the flag name of this option +func (p *PoolsOpt) Name() string { + return "default-address-pools" +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/env.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/env.go index e6ddd733..f6e5e907 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/env.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/env.go @@ -1,10 +1,12 @@ -package opts +package opts // import "github.com/docker/docker/opts" import ( "fmt" "os" "runtime" "strings" + + "github.com/pkg/errors" ) // ValidateEnv validates an environment variable and returns it. @@ -18,7 +20,7 @@ import ( func ValidateEnv(val string) (string, error) { arr := strings.Split(val, "=") if arr[0] == "" { - return "", fmt.Errorf("invalid environment variable: %s", val) + return "", errors.Errorf("invalid environment variable: %s", val) } if len(arr) > 1 { return val, nil diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/hosts.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/hosts.go index 594cccf2..a6f2662d 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/hosts.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/hosts.go @@ -1,11 +1,14 @@ -package opts +package opts // import "github.com/docker/docker/opts" import ( "fmt" "net" "net/url" + "path/filepath" "strconv" "strings" + + "github.com/docker/docker/pkg/homedir" ) var ( @@ -29,9 +32,9 @@ var ( // ValidateHost validates that the specified string is a valid host and returns it. func ValidateHost(val string) (string, error) { host := strings.TrimSpace(val) - // The empty string means default and is not handled by parseDockerDaemonHost + // The empty string means default and is not handled by parseDaemonHost if host != "" { - _, err := parseDockerDaemonHost(host) + _, err := parseDaemonHost(host) if err != nil { return val, err } @@ -41,18 +44,26 @@ func ValidateHost(val string) (string, error) { return val, nil } -// ParseHost and set defaults for a Daemon host string -func ParseHost(defaultToTLS bool, val string) (string, error) { +// ParseHost and set defaults for a Daemon host string. +// defaultToTLS is preferred over defaultToUnixXDG. +func ParseHost(defaultToTLS, defaultToUnixXDG bool, val string) (string, error) { host := strings.TrimSpace(val) if host == "" { if defaultToTLS { host = DefaultTLSHost + } else if defaultToUnixXDG { + runtimeDir, err := homedir.GetRuntimeDir() + if err != nil { + return "", err + } + socket := filepath.Join(runtimeDir, "docker.sock") + host = "unix://" + socket } else { host = DefaultHost } } else { var err error - host, err = parseDockerDaemonHost(host) + host, err = parseDaemonHost(host) if err != nil { return val, err } @@ -60,9 +71,9 @@ func ParseHost(defaultToTLS bool, val string) (string, error) { return host, nil } -// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host. +// parseDaemonHost parses the specified address and returns an address that will be used as the host. // Depending of the address specified, this may return one of the global Default* strings defined in hosts.go. -func parseDockerDaemonHost(addr string) (string, error) { +func parseDaemonHost(addr string) (string, error) { addrParts := strings.SplitN(addr, "://", 2) if len(addrParts) == 1 && addrParts[0] != "" { addrParts = []string{"tcp", addrParts[0]} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/hosts_unix.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/hosts_unix.go index 611407a9..9d5bb645 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/hosts_unix.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/hosts_unix.go @@ -1,6 +1,6 @@ // +build !windows -package opts +package opts // import "github.com/docker/docker/opts" import "fmt" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/hosts_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/hosts_windows.go index 7c239e00..906eba53 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/hosts_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/hosts_windows.go @@ -1,6 +1,4 @@ -// +build windows - -package opts +package opts // import "github.com/docker/docker/opts" // DefaultHost constant defines the default host string used by docker on Windows var DefaultHost = "npipe://" + DefaultNamedPipe diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/ip.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/ip.go index fb03b501..cfbff3a9 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/ip.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/ip.go @@ -1,4 +1,4 @@ -package opts +package opts // import "github.com/docker/docker/opts" import ( "fmt" @@ -22,7 +22,7 @@ func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { } // Set sets an IPv4 or IPv6 address from a given string. If the given -// string is not parseable as an IP address it returns an error. +// string is not parsable as an IP address it returns an error. func (o *IPOpt) Set(val string) error { ip := net.ParseIP(val) if ip == nil { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/mount.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/mount.go deleted file mode 100644 index d4ccf838..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/mount.go +++ /dev/null @@ -1,173 +0,0 @@ -package opts - -import ( - "encoding/csv" - "fmt" - "os" - "strconv" - "strings" - - mounttypes "github.com/docker/docker/api/types/mount" - "github.com/docker/go-units" -) - -// MountOpt is a Value type for parsing mounts -type MountOpt struct { - values []mounttypes.Mount -} - -// Set a new mount value -func (m *MountOpt) Set(value string) error { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - mount := mounttypes.Mount{} - - volumeOptions := func() *mounttypes.VolumeOptions { - if mount.VolumeOptions == nil { - mount.VolumeOptions = &mounttypes.VolumeOptions{ - Labels: make(map[string]string), - } - } - if mount.VolumeOptions.DriverConfig == nil { - mount.VolumeOptions.DriverConfig = &mounttypes.Driver{} - } - return mount.VolumeOptions - } - - bindOptions := func() *mounttypes.BindOptions { - if mount.BindOptions == nil { - mount.BindOptions = new(mounttypes.BindOptions) - } - return mount.BindOptions - } - - tmpfsOptions := func() *mounttypes.TmpfsOptions { - if mount.TmpfsOptions == nil { - mount.TmpfsOptions = new(mounttypes.TmpfsOptions) - } - return mount.TmpfsOptions - } - - setValueOnMap := func(target map[string]string, value string) { - parts := strings.SplitN(value, "=", 2) - if len(parts) == 1 { - target[value] = "" - } else { - target[parts[0]] = parts[1] - } - } - - mount.Type = mounttypes.TypeVolume // default to volume mounts - // Set writable as the default - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - key := strings.ToLower(parts[0]) - - if len(parts) == 1 { - switch key { - case "readonly", "ro": - mount.ReadOnly = true - continue - case "volume-nocopy": - volumeOptions().NoCopy = true - continue - } - } - - if len(parts) != 2 { - return fmt.Errorf("invalid field '%s' must be a key=value pair", field) - } - - value := parts[1] - switch key { - case "type": - mount.Type = mounttypes.Type(strings.ToLower(value)) - case "source", "src": - mount.Source = value - case "target", "dst", "destination": - mount.Target = value - case "readonly", "ro": - mount.ReadOnly, err = strconv.ParseBool(value) - if err != nil { - return fmt.Errorf("invalid value for %s: %s", key, value) - } - case "consistency": - mount.Consistency = mounttypes.Consistency(strings.ToLower(value)) - case "bind-propagation": - bindOptions().Propagation = mounttypes.Propagation(strings.ToLower(value)) - case "volume-nocopy": - volumeOptions().NoCopy, err = strconv.ParseBool(value) - if err != nil { - return fmt.Errorf("invalid value for volume-nocopy: %s", value) - } - case "volume-label": - setValueOnMap(volumeOptions().Labels, value) - case "volume-driver": - volumeOptions().DriverConfig.Name = value - case "volume-opt": - if volumeOptions().DriverConfig.Options == nil { - volumeOptions().DriverConfig.Options = make(map[string]string) - } - setValueOnMap(volumeOptions().DriverConfig.Options, value) - case "tmpfs-size": - sizeBytes, err := units.RAMInBytes(value) - if err != nil { - return fmt.Errorf("invalid value for %s: %s", key, value) - } - tmpfsOptions().SizeBytes = sizeBytes - case "tmpfs-mode": - ui64, err := strconv.ParseUint(value, 8, 32) - if err != nil { - return fmt.Errorf("invalid value for %s: %s", key, value) - } - tmpfsOptions().Mode = os.FileMode(ui64) - default: - return fmt.Errorf("unexpected key '%s' in '%s'", key, field) - } - } - - if mount.Type == "" { - return fmt.Errorf("type is required") - } - - if mount.Target == "" { - return fmt.Errorf("target is required") - } - - if mount.VolumeOptions != nil && mount.Type != mounttypes.TypeVolume { - return fmt.Errorf("cannot mix 'volume-*' options with mount type '%s'", mount.Type) - } - if mount.BindOptions != nil && mount.Type != mounttypes.TypeBind { - return fmt.Errorf("cannot mix 'bind-*' options with mount type '%s'", mount.Type) - } - if mount.TmpfsOptions != nil && mount.Type != mounttypes.TypeTmpfs { - return fmt.Errorf("cannot mix 'tmpfs-*' options with mount type '%s'", mount.Type) - } - - m.values = append(m.values, mount) - return nil -} - -// Type returns the type of this option -func (m *MountOpt) Type() string { - return "mount" -} - -// String returns a string repr of this option -func (m *MountOpt) String() string { - mounts := []string{} - for _, mount := range m.values { - repr := fmt.Sprintf("%s %s %s", mount.Type, mount.Source, mount.Target) - mounts = append(mounts, repr) - } - return strings.Join(mounts, ", ") -} - -// Value returns the mounts -func (m *MountOpt) Value() []mounttypes.Mount { - return m.values -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/opts.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/opts.go index f76f3080..de8aacb8 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/opts.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/opts.go @@ -1,15 +1,13 @@ -package opts +package opts // import "github.com/docker/docker/opts" import ( "fmt" - "math/big" "net" "path" "regexp" "strings" - "github.com/docker/docker/api/types/filters" - units "github.com/docker/go-units" + "github.com/docker/go-units" ) var ( @@ -54,7 +52,7 @@ func (opts *ListOpts) Set(value string) error { } value = v } - (*opts.values) = append((*opts.values), value) + *opts.values = append(*opts.values, value) return nil } @@ -62,7 +60,7 @@ func (opts *ListOpts) Set(value string) error { func (opts *ListOpts) Delete(key string) { for i, k := range *opts.values { if k == key { - (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) + *opts.values = append((*opts.values)[:i], (*opts.values)[i+1:]...) return } } @@ -80,7 +78,7 @@ func (opts *ListOpts) GetMap() map[string]struct{} { // GetAll returns the values of slice. func (opts *ListOpts) GetAll() []string { - return (*opts.values) + return *opts.values } // GetAllOrEmpty returns the values of the slice @@ -105,7 +103,7 @@ func (opts *ListOpts) Get(key string) bool { // Len returns the amount of element in the slice. func (opts *ListOpts) Len() int { - return len((*opts.values)) + return len(*opts.values) } // Type returns a string name for this Option type @@ -179,7 +177,7 @@ func (opts *MapOpts) GetAll() map[string]string { } func (opts *MapOpts) String() string { - return fmt.Sprintf("%v", map[string]string((opts.values))) + return fmt.Sprintf("%v", opts.values) } // Type returns a string name for this Option type @@ -236,15 +234,6 @@ func ValidateIPAddress(val string) (string, error) { return "", fmt.Errorf("%s is not an ip address", val) } -// ValidateMACAddress validates a MAC address. -func ValidateMACAddress(val string) (string, error) { - _, err := net.ParseMAC(strings.TrimSpace(val)) - if err != nil { - return "", err - } - return val, nil -} - // ValidateDNSSearch validates domain for resolvconf search configuration. // A zero length domain is represented by a dot (.). func ValidateDNSSearch(val string) (string, error) { @@ -274,112 +263,14 @@ func ValidateLabel(val string) (string, error) { return val, nil } -// ValidateSysctl validates a sysctl and returns it. -func ValidateSysctl(val string) (string, error) { - validSysctlMap := map[string]bool{ - "kernel.msgmax": true, - "kernel.msgmnb": true, - "kernel.msgmni": true, - "kernel.sem": true, - "kernel.shmall": true, - "kernel.shmmax": true, - "kernel.shmmni": true, - "kernel.shm_rmid_forced": true, +// ValidateSingleGenericResource validates that a single entry in the +// generic resource list is valid. +// i.e 'GPU=UID1' is valid however 'GPU:UID1' or 'UID1' isn't +func ValidateSingleGenericResource(val string) (string, error) { + if strings.Count(val, "=") < 1 { + return "", fmt.Errorf("invalid node-generic-resource format `%s` expected `name=value`", val) } - validSysctlPrefixes := []string{ - "net.", - "fs.mqueue.", - } - arr := strings.Split(val, "=") - if len(arr) < 2 { - return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) - } - if validSysctlMap[arr[0]] { - return val, nil - } - - for _, vp := range validSysctlPrefixes { - if strings.HasPrefix(arr[0], vp) { - return val, nil - } - } - return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) -} - -// FilterOpt is a flag type for validating filters -type FilterOpt struct { - filter filters.Args -} - -// NewFilterOpt returns a new FilterOpt -func NewFilterOpt() FilterOpt { - return FilterOpt{filter: filters.NewArgs()} -} - -func (o *FilterOpt) String() string { - repr, err := filters.ToParam(o.filter) - if err != nil { - return "invalid filters" - } - return repr -} - -// Set sets the value of the opt by parsing the command line value -func (o *FilterOpt) Set(value string) error { - var err error - o.filter, err = filters.ParseFlag(value, o.filter) - return err -} - -// Type returns the option type -func (o *FilterOpt) Type() string { - return "filter" -} - -// Value returns the value of this option -func (o *FilterOpt) Value() filters.Args { - return o.filter -} - -// NanoCPUs is a type for fixed point fractional number. -type NanoCPUs int64 - -// String returns the string format of the number -func (c *NanoCPUs) String() string { - if *c == 0 { - return "" - } - return big.NewRat(c.Value(), 1e9).FloatString(3) -} - -// Set sets the value of the NanoCPU by passing a string -func (c *NanoCPUs) Set(value string) error { - cpus, err := ParseCPUs(value) - *c = NanoCPUs(cpus) - return err -} - -// Type returns the type -func (c *NanoCPUs) Type() string { - return "decimal" -} - -// Value returns the value in int64 -func (c *NanoCPUs) Value() int64 { - return int64(*c) -} - -// ParseCPUs takes a string ratio and returns an integer value of nano cpus -func ParseCPUs(value string) (int64, error) { - cpu, ok := new(big.Rat).SetString(value) - if !ok { - return 0, fmt.Errorf("failed to parse %v as a rational number", value) - } - nano := cpu.Mul(cpu, big.NewRat(1e9, 1)) - if !nano.IsInt() { - return 0, fmt.Errorf("value is too precise") - } - return nano.Num().Int64(), nil + return val, nil } // ParseLink parses and validates the specified string as a link format (name:alias) @@ -404,12 +295,6 @@ func ParseLink(val string) (string, string, error) { return arr[0], arr[1], nil } -// ValidateLink validates that the specified string has a valid link format (containerName:alias). -func ValidateLink(val string) (string, error) { - _, _, err := ParseLink(val) - return val, err -} - // MemBytes is a type for human readable memory bytes (like 128M, 2g, etc) type MemBytes int64 @@ -450,39 +335,3 @@ func (m *MemBytes) UnmarshalJSON(s []byte) error { *m = MemBytes(val) return err } - -// MemSwapBytes is a type for human readable memory bytes (like 128M, 2g, etc). -// It differs from MemBytes in that -1 is valid and the default. -type MemSwapBytes int64 - -// Set sets the value of the MemSwapBytes by passing a string -func (m *MemSwapBytes) Set(value string) error { - if value == "-1" { - *m = MemSwapBytes(-1) - return nil - } - val, err := units.RAMInBytes(value) - *m = MemSwapBytes(val) - return err -} - -// Type returns the type -func (m *MemSwapBytes) Type() string { - return "bytes" -} - -// Value returns the value in int64 -func (m *MemSwapBytes) Value() int64 { - return int64(*m) -} - -func (m *MemSwapBytes) String() string { - b := MemBytes(*m) - return b.String() -} - -// UnmarshalJSON is the customized unmarshaler for MemSwapBytes -func (m *MemSwapBytes) UnmarshalJSON(s []byte) error { - b := MemBytes(*m) - return b.UnmarshalJSON(s) -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/opts_unix.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/opts_unix.go index 2766a43a..0c32367c 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/opts_unix.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/opts_unix.go @@ -1,6 +1,6 @@ // +build !windows -package opts +package opts // import "github.com/docker/docker/opts" // DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080 const DefaultHTTPHost = "localhost" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/opts_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/opts_windows.go index 98b7251a..0e1b6c6d 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/opts_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/opts_windows.go @@ -1,4 +1,4 @@ -package opts +package opts // import "github.com/docker/docker/opts" // TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5. // @jhowardmsft, @swernli. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/port.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/port.go deleted file mode 100644 index 152683c9..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/port.go +++ /dev/null @@ -1,162 +0,0 @@ -package opts - -import ( - "encoding/csv" - "fmt" - "regexp" - "strconv" - "strings" - - "github.com/docker/docker/api/types/swarm" - "github.com/docker/go-connections/nat" -) - -const ( - portOptTargetPort = "target" - portOptPublishedPort = "published" - portOptProtocol = "protocol" - portOptMode = "mode" -) - -// PortOpt represents a port config in swarm mode. -type PortOpt struct { - ports []swarm.PortConfig -} - -// Set a new port value -func (p *PortOpt) Set(value string) error { - longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value) - if err != nil { - return err - } - if longSyntax { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - pConfig := swarm.PortConfig{} - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - if len(parts) != 2 { - return fmt.Errorf("invalid field %s", field) - } - - key := strings.ToLower(parts[0]) - value := strings.ToLower(parts[1]) - - switch key { - case portOptProtocol: - if value != string(swarm.PortConfigProtocolTCP) && value != string(swarm.PortConfigProtocolUDP) { - return fmt.Errorf("invalid protocol value %s", value) - } - - pConfig.Protocol = swarm.PortConfigProtocol(value) - case portOptMode: - if value != string(swarm.PortConfigPublishModeIngress) && value != string(swarm.PortConfigPublishModeHost) { - return fmt.Errorf("invalid publish mode value %s", value) - } - - pConfig.PublishMode = swarm.PortConfigPublishMode(value) - case portOptTargetPort: - tPort, err := strconv.ParseUint(value, 10, 16) - if err != nil { - return err - } - - pConfig.TargetPort = uint32(tPort) - case portOptPublishedPort: - pPort, err := strconv.ParseUint(value, 10, 16) - if err != nil { - return err - } - - pConfig.PublishedPort = uint32(pPort) - default: - return fmt.Errorf("invalid field key %s", key) - } - } - - if pConfig.TargetPort == 0 { - return fmt.Errorf("missing mandatory field %q", portOptTargetPort) - } - - if pConfig.PublishMode == "" { - pConfig.PublishMode = swarm.PortConfigPublishModeIngress - } - - if pConfig.Protocol == "" { - pConfig.Protocol = swarm.PortConfigProtocolTCP - } - - p.ports = append(p.ports, pConfig) - } else { - // short syntax - portConfigs := []swarm.PortConfig{} - ports, portBindingMap, err := nat.ParsePortSpecs([]string{value}) - if err != nil { - return err - } - for _, portBindings := range portBindingMap { - for _, portBinding := range portBindings { - if portBinding.HostIP != "" { - return fmt.Errorf("HostIP is not supported.") - } - } - } - - for port := range ports { - portConfig, err := ConvertPortToPortConfig(port, portBindingMap) - if err != nil { - return err - } - portConfigs = append(portConfigs, portConfig...) - } - p.ports = append(p.ports, portConfigs...) - } - return nil -} - -// Type returns the type of this option -func (p *PortOpt) Type() string { - return "port" -} - -// String returns a string repr of this option -func (p *PortOpt) String() string { - ports := []string{} - for _, port := range p.ports { - repr := fmt.Sprintf("%v:%v/%s/%s", port.PublishedPort, port.TargetPort, port.Protocol, port.PublishMode) - ports = append(ports, repr) - } - return strings.Join(ports, ", ") -} - -// Value returns the ports -func (p *PortOpt) Value() []swarm.PortConfig { - return p.ports -} - -// ConvertPortToPortConfig converts ports to the swarm type -func ConvertPortToPortConfig( - port nat.Port, - portBindings map[nat.Port][]nat.PortBinding, -) ([]swarm.PortConfig, error) { - ports := []swarm.PortConfig{} - - for _, binding := range portBindings[port] { - hostPort, err := strconv.ParseUint(binding.HostPort, 10, 16) - if err != nil && binding.HostPort != "" { - return nil, fmt.Errorf("invalid hostport binding (%s) for port (%s)", binding.HostPort, port.Port()) - } - ports = append(ports, swarm.PortConfig{ - //TODO Name: ? - Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())), - TargetPort: uint32(port.Int()), - PublishedPort: uint32(hostPort), - PublishMode: swarm.PortConfigPublishModeIngress, - }) - } - return ports, nil -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/quotedstring.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/quotedstring.go index fb1e5374..6c889070 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/quotedstring.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/quotedstring.go @@ -1,4 +1,4 @@ -package opts +package opts // import "github.com/docker/docker/opts" // QuotedString is a string that may have extra quotes around the value. The // quotes are stripped from the value. @@ -18,7 +18,7 @@ func (s *QuotedString) Type() string { } func (s *QuotedString) String() string { - return string(*s.value) + return *s.value } func trimQuotes(value string) string { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/runtime.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/runtime.go index 4361b3ce..4b9babf0 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/runtime.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/runtime.go @@ -1,4 +1,4 @@ -package opts +package opts // import "github.com/docker/docker/opts" import ( "fmt" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/secret.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/secret.go deleted file mode 100644 index 56ed29eb..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/secret.go +++ /dev/null @@ -1,103 +0,0 @@ -package opts - -import ( - "encoding/csv" - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - - swarmtypes "github.com/docker/docker/api/types/swarm" -) - -// SecretOpt is a Value type for parsing secrets -type SecretOpt struct { - values []*swarmtypes.SecretReference -} - -// Set a new secret value -func (o *SecretOpt) Set(value string) error { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - options := &swarmtypes.SecretReference{ - File: &swarmtypes.SecretReferenceFileTarget{ - UID: "0", - GID: "0", - Mode: 0444, - }, - } - - // support a simple syntax of --secret foo - if len(fields) == 1 { - options.File.Name = fields[0] - options.SecretName = fields[0] - o.values = append(o.values, options) - return nil - } - - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - key := strings.ToLower(parts[0]) - - if len(parts) != 2 { - return fmt.Errorf("invalid field '%s' must be a key=value pair", field) - } - - value := parts[1] - switch key { - case "source", "src": - options.SecretName = value - case "target": - tDir, _ := filepath.Split(value) - if tDir != "" { - return fmt.Errorf("target must not be a path") - } - options.File.Name = value - case "uid": - options.File.UID = value - case "gid": - options.File.GID = value - case "mode": - m, err := strconv.ParseUint(value, 0, 32) - if err != nil { - return fmt.Errorf("invalid mode specified: %v", err) - } - - options.File.Mode = os.FileMode(m) - default: - return fmt.Errorf("invalid field in secret request: %s", key) - } - } - - if options.SecretName == "" { - return fmt.Errorf("source is required") - } - - o.values = append(o.values, options) - return nil -} - -// Type returns the type of this option -func (o *SecretOpt) Type() string { - return "secret" -} - -// String returns a string repr of this option -func (o *SecretOpt) String() string { - secrets := []string{} - for _, secret := range o.values { - repr := fmt.Sprintf("%s -> %s", secret.SecretName, secret.File.Name) - secrets = append(secrets, repr) - } - return strings.Join(secrets, ", ") -} - -// Value returns the secret requests -func (o *SecretOpt) Value() []*swarmtypes.SecretReference { - return o.values -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/throttledevice.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/throttledevice.go deleted file mode 100644 index 65dd3ebf..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/throttledevice.go +++ /dev/null @@ -1,111 +0,0 @@ -package opts - -import ( - "fmt" - "strconv" - "strings" - - "github.com/docker/docker/api/types/blkiodev" - "github.com/docker/go-units" -) - -// ValidatorThrottleFctType defines a validator function that returns a validated struct and/or an error. -type ValidatorThrottleFctType func(val string) (*blkiodev.ThrottleDevice, error) - -// ValidateThrottleBpsDevice validates that the specified string has a valid device-rate format. -func ValidateThrottleBpsDevice(val string) (*blkiodev.ThrottleDevice, error) { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) - } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) - } - rate, err := units.RAMInBytes(split[1]) - if err != nil { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) - } - if rate < 0 { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) - } - - return &blkiodev.ThrottleDevice{ - Path: split[0], - Rate: uint64(rate), - }, nil -} - -// ValidateThrottleIOpsDevice validates that the specified string has a valid device-rate format. -func ValidateThrottleIOpsDevice(val string) (*blkiodev.ThrottleDevice, error) { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) - } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) - } - rate, err := strconv.ParseUint(split[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) - } - if rate < 0 { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) - } - - return &blkiodev.ThrottleDevice{ - Path: split[0], - Rate: uint64(rate), - }, nil -} - -// ThrottledeviceOpt defines a map of ThrottleDevices -type ThrottledeviceOpt struct { - values []*blkiodev.ThrottleDevice - validator ValidatorThrottleFctType -} - -// NewThrottledeviceOpt creates a new ThrottledeviceOpt -func NewThrottledeviceOpt(validator ValidatorThrottleFctType) ThrottledeviceOpt { - values := []*blkiodev.ThrottleDevice{} - return ThrottledeviceOpt{ - values: values, - validator: validator, - } -} - -// Set validates a ThrottleDevice and sets its name as a key in ThrottledeviceOpt -func (opt *ThrottledeviceOpt) Set(val string) error { - var value *blkiodev.ThrottleDevice - if opt.validator != nil { - v, err := opt.validator(val) - if err != nil { - return err - } - value = v - } - (opt.values) = append((opt.values), value) - return nil -} - -// String returns ThrottledeviceOpt values as a string. -func (opt *ThrottledeviceOpt) String() string { - var out []string - for _, v := range opt.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to ThrottleDevices. -func (opt *ThrottledeviceOpt) GetList() []*blkiodev.ThrottleDevice { - var throttledevice []*blkiodev.ThrottleDevice - throttledevice = append(throttledevice, opt.values...) - - return throttledevice -} - -// Type returns the option type -func (opt *ThrottledeviceOpt) Type() string { - return "list" -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/ulimit.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/ulimit.go index 5adfe308..0e2a3623 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/ulimit.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/ulimit.go @@ -1,4 +1,4 @@ -package opts +package opts // import "github.com/docker/docker/opts" import ( "fmt" @@ -55,3 +55,27 @@ func (o *UlimitOpt) GetList() []*units.Ulimit { func (o *UlimitOpt) Type() string { return "ulimit" } + +// NamedUlimitOpt defines a named map of Ulimits +type NamedUlimitOpt struct { + name string + UlimitOpt +} + +var _ NamedOption = &NamedUlimitOpt{} + +// NewNamedUlimitOpt creates a new NamedUlimitOpt +func NewNamedUlimitOpt(name string, ref *map[string]*units.Ulimit) *NamedUlimitOpt { + if ref == nil { + ref = &map[string]*units.Ulimit{} + } + return &NamedUlimitOpt{ + name: name, + UlimitOpt: *NewUlimitOpt(ref), + } +} + +// Name returns the option name +func (o *NamedUlimitOpt) Name() string { + return o.name +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/weightdevice.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/weightdevice.go deleted file mode 100644 index 7e3d064f..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/opts/weightdevice.go +++ /dev/null @@ -1,89 +0,0 @@ -package opts - -import ( - "fmt" - "strconv" - "strings" - - "github.com/docker/docker/api/types/blkiodev" -) - -// ValidatorWeightFctType defines a validator function that returns a validated struct and/or an error. -type ValidatorWeightFctType func(val string) (*blkiodev.WeightDevice, error) - -// ValidateWeightDevice validates that the specified string has a valid device-weight format. -func ValidateWeightDevice(val string) (*blkiodev.WeightDevice, error) { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) - } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) - } - weight, err := strconv.ParseUint(split[1], 10, 0) - if err != nil { - return nil, fmt.Errorf("invalid weight for device: %s", val) - } - if weight > 0 && (weight < 10 || weight > 1000) { - return nil, fmt.Errorf("invalid weight for device: %s", val) - } - - return &blkiodev.WeightDevice{ - Path: split[0], - Weight: uint16(weight), - }, nil -} - -// WeightdeviceOpt defines a map of WeightDevices -type WeightdeviceOpt struct { - values []*blkiodev.WeightDevice - validator ValidatorWeightFctType -} - -// NewWeightdeviceOpt creates a new WeightdeviceOpt -func NewWeightdeviceOpt(validator ValidatorWeightFctType) WeightdeviceOpt { - values := []*blkiodev.WeightDevice{} - return WeightdeviceOpt{ - values: values, - validator: validator, - } -} - -// Set validates a WeightDevice and sets its name as a key in WeightdeviceOpt -func (opt *WeightdeviceOpt) Set(val string) error { - var value *blkiodev.WeightDevice - if opt.validator != nil { - v, err := opt.validator(val) - if err != nil { - return err - } - value = v - } - (opt.values) = append((opt.values), value) - return nil -} - -// String returns WeightdeviceOpt values as a string. -func (opt *WeightdeviceOpt) String() string { - var out []string - for _, v := range opt.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to WeightDevices. -func (opt *WeightdeviceOpt) GetList() []*blkiodev.WeightDevice { - var weightdevice []*blkiodev.WeightDevice - for _, v := range opt.values { - weightdevice = append(weightdevice, v) - } - - return weightdevice -} - -// Type returns the option type -func (opt *WeightdeviceOpt) Type() string { - return "list" -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go index a129e654..34f1c726 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -1,4 +1,4 @@ -package fileutils +package fileutils // import "github.com/docker/docker/pkg/fileutils" import ( "errors" @@ -13,7 +13,7 @@ import ( "github.com/sirupsen/logrus" ) -// PatternMatcher allows checking paths agaist a list of patterns +// PatternMatcher allows checking paths against a list of patterns type PatternMatcher struct { patterns []*Pattern exclusions bool @@ -106,7 +106,7 @@ func (pm *PatternMatcher) Patterns() []*Pattern { return pm.patterns } -// Pattern defines a single regexp used used to filter file paths. +// Pattern defines a single regexp used to filter file paths. type Pattern struct { cleanedPattern string dirs []string diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go index ccd648fa..e40cc271 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go @@ -1,4 +1,4 @@ -package fileutils +package fileutils // import "github.com/docker/docker/pkg/fileutils" import ( "os" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go deleted file mode 100644 index 0f2cb7ab..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/fileutils/fileutils_solaris.go +++ /dev/null @@ -1,7 +0,0 @@ -package fileutils - -// GetTotalUsedFds Returns the number of used File Descriptors. -// On Solaris these limits are per process and not systemwide -func GetTotalUsedFds() int { - return -1 -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go index 9e0e97bd..565396f1 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go @@ -1,6 +1,6 @@ // +build linux freebsd -package fileutils +package fileutils // import "github.com/docker/docker/pkg/fileutils" import ( "fmt" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go index 5ec21cac..3f1ebb65 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go @@ -1,4 +1,4 @@ -package fileutils +package fileutils // import "github.com/docker/docker/pkg/fileutils" // GetTotalUsedFds Returns the number of used File Descriptors. Not supported // on Windows. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go index 012fe52a..47ecd0c0 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go @@ -1,9 +1,10 @@ -// +build linux - -package homedir +package homedir // import "github.com/docker/docker/pkg/homedir" import ( + "errors" "os" + "path/filepath" + "strings" "github.com/docker/docker/pkg/idtools" ) @@ -21,3 +22,88 @@ func GetStatic() (string, error) { } return usr.Home, nil } + +// GetRuntimeDir returns XDG_RUNTIME_DIR. +// XDG_RUNTIME_DIR is typically configured via pam_systemd. +// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetRuntimeDir() (string, error) { + if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" { + return xdgRuntimeDir, nil + } + return "", errors.New("could not get XDG_RUNTIME_DIR") +} + +// StickRuntimeDirContents sets the sticky bit on files that are under +// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system. +// +// StickyRuntimeDir returns slice of sticked files. +// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func StickRuntimeDirContents(files []string) ([]string, error) { + runtimeDir, err := GetRuntimeDir() + if err != nil { + // ignore error if runtimeDir is empty + return nil, nil + } + runtimeDir, err = filepath.Abs(runtimeDir) + if err != nil { + return nil, err + } + var sticked []string + for _, f := range files { + f, err = filepath.Abs(f) + if err != nil { + return sticked, err + } + if strings.HasPrefix(f, runtimeDir+"/") { + if err = stick(f); err != nil { + return sticked, err + } + sticked = append(sticked, f) + } + } + return sticked, nil +} + +func stick(f string) error { + st, err := os.Stat(f) + if err != nil { + return err + } + m := st.Mode() + m |= os.ModeSticky + return os.Chmod(f, m) +} + +// GetDataHome returns XDG_DATA_HOME. +// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetDataHome() (string, error) { + if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { + return xdgDataHome, nil + } + home := os.Getenv("HOME") + if home == "" { + return "", errors.New("could not get either XDG_DATA_HOME or HOME") + } + return filepath.Join(home, ".local", "share"), nil +} + +// GetConfigHome returns XDG_CONFIG_HOME. +// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetConfigHome() (string, error) { + if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { + return xdgConfigHome, nil + } + home := os.Getenv("HOME") + if home == "" { + return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") + } + return filepath.Join(home, ".config"), nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go index 6b96b856..f0a363de 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go @@ -1,6 +1,6 @@ // +build !linux -package homedir +package homedir // import "github.com/docker/docker/pkg/homedir" import ( "errors" @@ -11,3 +11,23 @@ import ( func GetStatic() (string, error) { return "", errors.New("homedir.GetStatic() is not supported on this system") } + +// GetRuntimeDir is unsupported on non-linux system. +func GetRuntimeDir() (string, error) { + return "", errors.New("homedir.GetRuntimeDir() is not supported on this system") +} + +// StickRuntimeDirContents is unsupported on non-linux system. +func StickRuntimeDirContents(files []string) ([]string, error) { + return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") +} + +// GetDataHome is unsupported on non-linux system. +func GetDataHome() (string, error) { + return "", errors.New("homedir.GetDataHome() is not supported on this system") +} + +// GetConfigHome is unsupported on non-linux system. +func GetConfigHome() (string, error) { + return "", errors.New("homedir.GetConfigHome() is not supported on this system") +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/homedir/homedir.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go similarity index 75% rename from vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/homedir/homedir.go rename to vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go index 8154e83f..d85e1244 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/homedir/homedir.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go @@ -1,8 +1,9 @@ -package homedir +// +build !windows + +package homedir // import "github.com/docker/docker/pkg/homedir" import ( "os" - "runtime" "github.com/opencontainers/runc/libcontainer/user" ) @@ -10,9 +11,6 @@ import ( // Key returns the env var name for the user's home dir based on // the platform being run on func Key() string { - if runtime.GOOS == "windows" { - return "USERPROFILE" - } return "HOME" } @@ -21,7 +19,7 @@ func Key() string { // Returned path should be used with "path/filepath" to form new paths. func Get() string { home := os.Getenv(Key()) - if home == "" && runtime.GOOS != "windows" { + if home == "" { if u, err := user.CurrentUser(); err == nil { return u.Home } @@ -32,8 +30,5 @@ func Get() string { // GetShortcutString returns the string that is shortcut to user's home directory // in the native shell of the platform running on. func GetShortcutString() string { - if runtime.GOOS == "windows" { - return "%USERPROFILE%" // be careful while using in format functions - } return "~" } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go new file mode 100644 index 00000000..2f81813b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go @@ -0,0 +1,24 @@ +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "os" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "USERPROFILE" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + return os.Getenv(Key()) +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "%USERPROFILE%" // be careful while using in format functions +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/idtools/idtools.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/idtools/idtools.go index 6bca4662..b3af7a42 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/idtools/idtools.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/idtools/idtools.go @@ -1,10 +1,9 @@ -package idtools +package idtools // import "github.com/docker/docker/pkg/idtools" import ( "bufio" "fmt" "os" - "sort" "strconv" "strings" ) @@ -30,56 +29,50 @@ func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } const ( - subuidFileName string = "/etc/subuid" - subgidFileName string = "/etc/subgid" + subuidFileName = "/etc/subuid" + subgidFileName = "/etc/subgid" ) -// MkdirAllAs creates a directory (include any along the path) and then modifies +// MkdirAllAndChown creates a directory (include any along the path) and then modifies // ownership to the requested uid/gid. If the directory already exists, this // function will still change ownership to the requested uid/gid pair. -func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, true, true) +func MkdirAllAndChown(path string, mode os.FileMode, owner Identity) error { + return mkdirAs(path, mode, owner, true, true) } -// MkdirAllNewAs creates a directory (include any along the path) and then modifies +// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. +// If the directory already exists, this function still changes ownership. +// Note that unlike os.Mkdir(), this function does not return IsExist error +// in case path already exists. +func MkdirAndChown(path string, mode os.FileMode, owner Identity) error { + return mkdirAs(path, mode, owner, false, true) +} + +// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies // ownership ONLY of newly created directories to the requested uid/gid. If the // directories along the path exist, no change of ownership will be performed -func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, true, false) -} - -// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. -// If the directory already exists, this function still changes ownership -func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, false, true) +func MkdirAllAndChownNew(path string, mode os.FileMode, owner Identity) error { + return mkdirAs(path, mode, owner, true, false) } // GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. // If the maps are empty, then the root uid/gid will default to "real" 0/0 func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { - var uid, gid int - - if uidMap != nil { - xUID, err := ToHost(0, uidMap) - if err != nil { - return -1, -1, err - } - uid = xUID + uid, err := toHost(0, uidMap) + if err != nil { + return -1, -1, err } - if gidMap != nil { - xGID, err := ToHost(0, gidMap) - if err != nil { - return -1, -1, err - } - gid = xGID + gid, err := toHost(0, gidMap) + if err != nil { + return -1, -1, err } return uid, gid, nil } -// ToContainer takes an id mapping, and uses it to translate a +// toContainer takes an id mapping, and uses it to translate a // host ID to the remapped ID. If no map is provided, then the translation // assumes a 1-to-1 mapping and returns the passed in id -func ToContainer(hostID int, idMap []IDMap) (int, error) { +func toContainer(hostID int, idMap []IDMap) (int, error) { if idMap == nil { return hostID, nil } @@ -92,10 +85,10 @@ func ToContainer(hostID int, idMap []IDMap) (int, error) { return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) } -// ToHost takes an id mapping and a remapped ID, and translates the +// toHost takes an id mapping and a remapped ID, and translates the // ID to the mapped host ID. If no map is provided, then the translation // assumes a 1-to-1 mapping and returns the passed in id # -func ToHost(contID int, idMap []IDMap) (int, error) { +func toHost(contID int, idMap []IDMap) (int, error) { if idMap == nil { return contID, nil } @@ -108,33 +101,107 @@ func ToHost(contID int, idMap []IDMap) (int, error) { return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) } -// CreateIDMappings takes a requested user and group name and +// Identity is either a UID and GID pair or a SID (but not both) +type Identity struct { + UID int + GID int + SID string +} + +// IdentityMapping contains a mappings of UIDs and GIDs +type IdentityMapping struct { + uids []IDMap + gids []IDMap +} + +// NewIdentityMapping takes a requested user and group name and // using the data from /etc/sub{uid,gid} ranges, creates the // proper uid and gid remapping ranges for that user/group pair -func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) { +func NewIdentityMapping(username, groupname string) (*IdentityMapping, error) { subuidRanges, err := parseSubuid(username) if err != nil { - return nil, nil, err + return nil, err } subgidRanges, err := parseSubgid(groupname) if err != nil { - return nil, nil, err + return nil, err } if len(subuidRanges) == 0 { - return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username) + return nil, fmt.Errorf("No subuid ranges found for user %q", username) } if len(subgidRanges) == 0 { - return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname) + return nil, fmt.Errorf("No subgid ranges found for group %q", groupname) } - return createIDMap(subuidRanges), createIDMap(subgidRanges), nil + return &IdentityMapping{ + uids: createIDMap(subuidRanges), + gids: createIDMap(subgidRanges), + }, nil +} + +// NewIDMappingsFromMaps creates a new mapping from two slices +// Deprecated: this is a temporary shim while transitioning to IDMapping +func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IdentityMapping { + return &IdentityMapping{uids: uids, gids: gids} +} + +// RootPair returns a uid and gid pair for the root user. The error is ignored +// because a root user always exists, and the defaults are correct when the uid +// and gid maps are empty. +func (i *IdentityMapping) RootPair() Identity { + uid, gid, _ := GetRootUIDGID(i.uids, i.gids) + return Identity{UID: uid, GID: gid} +} + +// ToHost returns the host UID and GID for the container uid, gid. +// Remapping is only performed if the ids aren't already the remapped root ids +func (i *IdentityMapping) ToHost(pair Identity) (Identity, error) { + var err error + target := i.RootPair() + + if pair.UID != target.UID { + target.UID, err = toHost(pair.UID, i.uids) + if err != nil { + return target, err + } + } + + if pair.GID != target.GID { + target.GID, err = toHost(pair.GID, i.gids) + } + return target, err +} + +// ToContainer returns the container UID and GID for the host uid and gid +func (i *IdentityMapping) ToContainer(pair Identity) (int, int, error) { + uid, err := toContainer(pair.UID, i.uids) + if err != nil { + return -1, -1, err + } + gid, err := toContainer(pair.GID, i.gids) + return uid, gid, err +} + +// Empty returns true if there are no id mappings +func (i *IdentityMapping) Empty() bool { + return len(i.uids) == 0 && len(i.gids) == 0 +} + +// UIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IdentityMapping) UIDs() []IDMap { + return i.uids +} + +// GIDs return the UID mapping +// TODO: remove this once everything has been refactored to use pairs +func (i *IdentityMapping) GIDs() []IDMap { + return i.gids } func createIDMap(subidRanges ranges) []IDMap { idMap := []IDMap{} - // sort the ranges by lowest ID first - sort.Sort(subidRanges) containerID := 0 for _, idrange := range subidRanges { idMap = append(idMap, IDMap{ diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go index 7c7e82ae..fb239743 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go @@ -1,6 +1,6 @@ // +build !windows -package idtools +package idtools // import "github.com/docker/docker/pkg/idtools" import ( "bytes" @@ -10,6 +10,7 @@ import ( "path/filepath" "strings" "sync" + "syscall" "github.com/docker/docker/pkg/system" "github.com/opencontainers/runc/libcontainer/user" @@ -20,20 +21,29 @@ var ( getentCmd string ) -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { +func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { // make an array containing the original path asked for, plus (for mkAll == true) // all path components leading up to the complete path that don't exist before we MkdirAll // so that we can chown all of them properly at the end. If chownExisting is false, we won't // chown the full directory path if it exists + var paths []string - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - paths = []string{path} - } else if err == nil && chownExisting { + + stat, err := system.Stat(path) + if err == nil { + if !stat.IsDir() { + return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} + } + if !chownExisting { + return nil + } + // short-circuit--we were called with an existing directory and chown was requested - return os.Chown(path, ownerUID, ownerGID) - } else if err == nil { - // nothing to do; directory path fully exists already and chown was NOT requested - return nil + return lazyChown(path, owner.UID, owner.GID, stat) + } + + if os.IsNotExist(err) { + paths = []string{path} } if mkAll { @@ -49,7 +59,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown paths = append(paths, dirPath) } } - if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { + if err := system.MkdirAll(path, mode, ""); err != nil { return err } } else { @@ -60,7 +70,7 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown // even if it existed, we will chown the requested path + any subpaths that // didn't exist when we called MkdirAll for _, pathComponent := range paths { - if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { + if err := lazyChown(pathComponent, owner.UID, owner.GID, nil); err != nil { return err } } @@ -69,15 +79,15 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown // CanAccess takes a valid (existing) directory and a uid, gid pair and determines // if that uid, gid pair has access (execute bit) to the directory -func CanAccess(path string, uid, gid int) bool { +func CanAccess(path string, pair Identity) bool { statInfo, err := system.Stat(path) if err != nil { return false } fileMode := os.FileMode(statInfo.Mode()) permBits := fileMode.Perm() - return accessible(statInfo.UID() == uint32(uid), - statInfo.GID() == uint32(gid), permBits) + return accessible(statInfo.UID() == uint32(pair.UID), + statInfo.GID() == uint32(pair.GID), permBits) } func accessible(isOwner, isGroup bool, perms os.FileMode) bool { @@ -202,3 +212,20 @@ func callGetent(args string) (io.Reader, error) { } return bytes.NewReader(out), nil } + +// lazyChown performs a chown only if the uid/gid don't match what's requested +// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the +// dir is on an NFS share, so don't call chown unless we absolutely must. +func lazyChown(p string, uid, gid int, stat *system.StatT) error { + if stat == nil { + var err error + stat, err = system.Stat(p) + if err != nil { + return err + } + } + if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { + return nil + } + return os.Chown(p, uid, gid) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go index 49f67e78..4ae38a1b 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go @@ -1,6 +1,4 @@ -// +build windows - -package idtools +package idtools // import "github.com/docker/docker/pkg/idtools" import ( "os" @@ -8,10 +6,12 @@ import ( "github.com/docker/docker/pkg/system" ) -// Platforms such as Windows do not support the UID/GID concept. So make this -// just a wrapper around system.MkdirAll. -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { +// This is currently a wrapper around MkdirAll, however, since currently +// permissions aren't set through this path, the identity isn't utilized. +// Ownership is handled elsewhere, but in the future could be support here +// too. +func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting bool) error { + if err := system.MkdirAll(path, mode, ""); err != nil { return err } return nil @@ -20,6 +20,6 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown // CanAccess takes a valid (existing) directory and a uid, gid pair and determines // if that uid, gid pair has access (execute bit) to the directory // Windows does not require/support this function, so always return true -func CanAccess(path string, uid, gid int) bool { +func CanAccess(path string, identity Identity) bool { return true } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go index 9da7975e..6272c5a4 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go @@ -1,4 +1,4 @@ -package idtools +package idtools // import "github.com/docker/docker/pkg/idtools" import ( "fmt" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go index d98b354c..e7c4d631 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go @@ -1,6 +1,6 @@ // +build !linux -package idtools +package idtools // import "github.com/docker/docker/pkg/idtools" import "fmt" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go index 9703ecbd..903ac450 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/idtools/utils_unix.go @@ -1,6 +1,6 @@ // +build !windows -package idtools +package idtools // import "github.com/docker/docker/pkg/idtools" import ( "fmt" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/buffer.go index 3d737b3e..466f7929 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/buffer.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/buffer.go @@ -1,4 +1,4 @@ -package ioutils +package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "errors" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go index 72a04f34..d4bbf3c9 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go @@ -1,4 +1,4 @@ -package ioutils +package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "errors" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go index a56c4626..534d66ac 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go @@ -1,4 +1,4 @@ -package ioutils +package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "io" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/multireader.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/multireader.go deleted file mode 100644 index edb043dd..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/multireader.go +++ /dev/null @@ -1,224 +0,0 @@ -package ioutils - -import ( - "bytes" - "fmt" - "io" - "os" -) - -type pos struct { - idx int - offset int64 -} - -type multiReadSeeker struct { - readers []io.ReadSeeker - pos *pos - posIdx map[io.ReadSeeker]int -} - -func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { - var tmpOffset int64 - switch whence { - case os.SEEK_SET: - for i, rdr := range r.readers { - // get size of the current reader - s, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - - if offset > tmpOffset+s { - if i == len(r.readers)-1 { - rdrOffset := s + (offset - tmpOffset) - if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { - return -1, err - } - r.pos = &pos{i, rdrOffset} - return offset, nil - } - - tmpOffset += s - continue - } - - rdrOffset := offset - tmpOffset - idx := i - - rdr.Seek(rdrOffset, os.SEEK_SET) - // make sure all following readers are at 0 - for _, rdr := range r.readers[i+1:] { - rdr.Seek(0, os.SEEK_SET) - } - - if rdrOffset == s && i != len(r.readers)-1 { - idx++ - rdrOffset = 0 - } - r.pos = &pos{idx, rdrOffset} - return offset, nil - } - case os.SEEK_END: - for _, rdr := range r.readers { - s, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - tmpOffset += s - } - r.Seek(tmpOffset+offset, os.SEEK_SET) - return tmpOffset + offset, nil - case os.SEEK_CUR: - if r.pos == nil { - return r.Seek(offset, os.SEEK_SET) - } - // Just return the current offset - if offset == 0 { - return r.getCurOffset() - } - - curOffset, err := r.getCurOffset() - if err != nil { - return -1, err - } - rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) - if err != nil { - return -1, err - } - - r.pos = &pos{r.posIdx[rdr], rdrOffset} - return curOffset + offset, nil - default: - return -1, fmt.Errorf("Invalid whence: %d", whence) - } - - return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) -} - -func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { - - var offsetTo int64 - - for _, rdr := range r.readers { - size, err := getReadSeekerSize(rdr) - if err != nil { - return nil, -1, err - } - if offsetTo+size > offset { - return rdr, offset - offsetTo, nil - } - if rdr == r.readers[len(r.readers)-1] { - return rdr, offsetTo + offset, nil - } - offsetTo += size - } - - return nil, 0, nil -} - -func (r *multiReadSeeker) getCurOffset() (int64, error) { - var totalSize int64 - for _, rdr := range r.readers[:r.pos.idx+1] { - if r.posIdx[rdr] == r.pos.idx { - totalSize += r.pos.offset - break - } - - size, err := getReadSeekerSize(rdr) - if err != nil { - return -1, fmt.Errorf("error getting seeker size: %v", err) - } - totalSize += size - } - return totalSize, nil -} - -func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { - var offset int64 - for _, r := range r.readers { - if r == rdr { - break - } - - size, err := getReadSeekerSize(rdr) - if err != nil { - return -1, err - } - offset += size - } - return offset, nil -} - -func (r *multiReadSeeker) Read(b []byte) (int, error) { - if r.pos == nil { - // make sure all readers are at 0 - r.Seek(0, os.SEEK_SET) - } - - bLen := int64(len(b)) - buf := bytes.NewBuffer(nil) - var rdr io.ReadSeeker - - for _, rdr = range r.readers[r.pos.idx:] { - readBytes, err := io.CopyN(buf, rdr, bLen) - if err != nil && err != io.EOF { - return -1, err - } - bLen -= readBytes - - if bLen == 0 { - break - } - } - - rdrPos, err := rdr.Seek(0, os.SEEK_CUR) - if err != nil { - return -1, err - } - r.pos = &pos{r.posIdx[rdr], rdrPos} - return buf.Read(b) -} - -func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { - // save the current position - pos, err := rdr.Seek(0, os.SEEK_CUR) - if err != nil { - return -1, err - } - - // get the size - size, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - - // reset the position - if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { - return -1, err - } - return size, nil -} - -// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided -// input readseekers. After calling this method the initial position is set to the -// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances -// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. -// Seek can be used over the sum of lengths of all readseekers. -// -// When a MultiReadSeeker is used, no Read and Seek operations should be made on -// its ReadSeeker components. Also, users should make no assumption on the state -// of individual readseekers while the MultiReadSeeker is used. -func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { - if len(readers) == 1 { - return readers[0] - } - idx := make(map[io.ReadSeeker]int) - for i, rdr := range readers { - idx[rdr] = i - } - return &multiReadSeeker{ - readers: readers, - posIdx: idx, - } -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/readers.go index 63f3c07f..1f657bd3 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/readers.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/readers.go @@ -1,25 +1,28 @@ -package ioutils +package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( + "context" "crypto/sha256" "encoding/hex" "io" - - "golang.org/x/net/context" ) -type readCloserWrapper struct { +// ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser +// It calls the given callback function when closed. It should be constructed +// with NewReadCloserWrapper +type ReadCloserWrapper struct { io.Reader closer func() error } -func (r *readCloserWrapper) Close() error { +// Close calls back the passed closer function +func (r *ReadCloserWrapper) Close() error { return r.closer() } // NewReadCloserWrapper returns a new io.ReadCloser. func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { - return &readCloserWrapper{ + return &ReadCloserWrapper{ Reader: r, closer: closer, } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go index 1539ad21..dc894f91 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go @@ -1,6 +1,6 @@ // +build !windows -package ioutils +package ioutils // import "github.com/docker/docker/pkg/ioutils" import "io/ioutil" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go index c258e5fd..ecaba2e3 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go @@ -1,6 +1,4 @@ -// +build windows - -package ioutils +package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "io/ioutil" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go index 52a4901a..91b8d182 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go @@ -1,4 +1,4 @@ -package ioutils +package ioutils // import "github.com/docker/docker/pkg/ioutils" import ( "io" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/writers.go index ccc7f9c2..61c67949 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/writers.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/ioutils/writers.go @@ -1,4 +1,4 @@ -package ioutils +package ioutils // import "github.com/docker/docker/pkg/ioutils" import "io" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go deleted file mode 100644 index 4734c311..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog.go +++ /dev/null @@ -1,42 +0,0 @@ -package jsonlog - -import ( - "encoding/json" - "fmt" - "time" -) - -// JSONLog represents a log message, typically a single entry from a given log stream. -// JSONLogs can be easily serialized to and from JSON and support custom formatting. -type JSONLog struct { - // Log is the log message - Log string `json:"log,omitempty"` - // Stream is the log source - Stream string `json:"stream,omitempty"` - // Created is the created timestamp of log - Created time.Time `json:"time"` - // Attrs is the list of extra attributes provided by the user - Attrs map[string]string `json:"attrs,omitempty"` -} - -// Format returns the log formatted according to format -// If format is nil, returns the log message -// If format is json, returns the log marshaled in json format -// By default, returns the log with the log time formatted according to format. -func (jl *JSONLog) Format(format string) (string, error) { - if format == "" { - return jl.Log, nil - } - if format == "json" { - m, err := json.Marshal(jl) - return string(m), err - } - return fmt.Sprintf("%s %s", jl.Created.Format(format), jl.Log), nil -} - -// Reset resets the log to nil. -func (jl *JSONLog) Reset() { - jl.Log = "" - jl.Stream = "" - jl.Created = time.Time{} -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go deleted file mode 100644 index 83ce684a..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/jsonlog/jsonlog_marshalling.go +++ /dev/null @@ -1,178 +0,0 @@ -// This code was initially generated by ffjson -// This code was generated via the following steps: -// $ go get -u github.com/pquerna/ffjson -// $ make BIND_DIR=. shell -// $ ffjson pkg/jsonlog/jsonlog.go -// $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go -// -// It has been modified to improve the performance of time marshalling to JSON -// and to clean it up. -// Should this code need to be regenerated when the JSONLog struct is changed, -// the relevant changes which have been made are: -// import ( -// "bytes" -//- -// "unicode/utf8" -// ) -// -// func (mj *JSONLog) MarshalJSON() ([]byte, error) { -//@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) { -// } -// return buf.Bytes(), nil -// } -//+ -// func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { -//- var err error -//- var obj []byte -//- var first bool = true -//- _ = obj -//- _ = err -//- _ = first -//+ var ( -//+ err error -//+ timestamp string -//+ first bool = true -//+ ) -// buf.WriteString(`{`) -// if len(mj.Log) != 0 { -// if first == true { -//@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { -// buf.WriteString(`,`) -// } -// buf.WriteString(`"time":`) -//- obj, err = mj.Created.MarshalJSON() -//+ timestamp, err = FastTimeMarshalJSON(mj.Created) -// if err != nil { -// return err -// } -//- buf.Write(obj) -//+ buf.WriteString(timestamp) -// buf.WriteString(`}`) -// return nil -// } -// @@ -81,9 +81,10 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { -// if len(mj.Log) != 0 { -// - if first == true { -// - first = false -// - } else { -// - buf.WriteString(`,`) -// - } -// + first = false -// buf.WriteString(`"log":`) -// ffjsonWriteJSONString(buf, mj.Log) -// } - -package jsonlog - -import ( - "bytes" - "unicode/utf8" -) - -// MarshalJSON marshals the JSONLog. -func (mj *JSONLog) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - buf.Grow(1024) - if err := mj.MarshalJSONBuf(&buf); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshals the JSONLog and stores the result to a bytes.Buffer. -func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { - var ( - err error - timestamp string - first = true - ) - buf.WriteString(`{`) - if len(mj.Log) != 0 { - first = false - buf.WriteString(`"log":`) - ffjsonWriteJSONString(buf, mj.Log) - } - if len(mj.Stream) != 0 { - if first { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"stream":`) - ffjsonWriteJSONString(buf, mj.Stream) - } - if !first { - buf.WriteString(`,`) - } - buf.WriteString(`"time":`) - timestamp, err = FastTimeMarshalJSON(mj.Created) - if err != nil { - return err - } - buf.WriteString(timestamp) - buf.WriteString(`}`) - return nil -} - -func ffjsonWriteJSONString(buf *bytes.Buffer, s string) { - const hex = "0123456789abcdef" - - buf.WriteByte('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - i++ - continue - } - if start < i { - buf.WriteString(s[start:i]) - } - switch b { - case '\\', '"': - buf.WriteByte('\\') - buf.WriteByte(b) - case '\n': - buf.WriteByte('\\') - buf.WriteByte('n') - case '\r': - buf.WriteByte('\\') - buf.WriteByte('r') - default: - - buf.WriteString(`\u00`) - buf.WriteByte(hex[b>>4]) - buf.WriteByte(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRuneInString(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - buf.WriteString(s[start:i]) - } - buf.WriteString(`\ufffd`) - i += size - start = i - continue - } - - if c == '\u2028' || c == '\u2029' { - if start < i { - buf.WriteString(s[start:i]) - } - buf.WriteString(`\u202`) - buf.WriteByte(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - buf.WriteString(s[start:]) - } - buf.WriteByte('"') -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go deleted file mode 100644 index 0ba716f2..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/jsonlog/jsonlogbytes.go +++ /dev/null @@ -1,122 +0,0 @@ -package jsonlog - -import ( - "bytes" - "encoding/json" - "unicode/utf8" -) - -// JSONLogs is based on JSONLog. -// It allows marshalling JSONLog from Log as []byte -// and an already marshalled Created timestamp. -type JSONLogs struct { - Log []byte `json:"log,omitempty"` - Stream string `json:"stream,omitempty"` - Created string `json:"time"` - - // json-encoded bytes - RawAttrs json.RawMessage `json:"attrs,omitempty"` -} - -// MarshalJSONBuf is based on the same method from JSONLog -// It has been modified to take into account the necessary changes. -func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error { - var first = true - - buf.WriteString(`{`) - if len(mj.Log) != 0 { - first = false - buf.WriteString(`"log":`) - ffjsonWriteJSONBytesAsString(buf, mj.Log) - } - if len(mj.Stream) != 0 { - if first { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"stream":`) - ffjsonWriteJSONString(buf, mj.Stream) - } - if len(mj.RawAttrs) > 0 { - if first { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"attrs":`) - buf.Write(mj.RawAttrs) - } - if !first { - buf.WriteString(`,`) - } - buf.WriteString(`"time":`) - buf.WriteString(mj.Created) - buf.WriteString(`}`) - return nil -} - -// This is based on ffjsonWriteJSONBytesAsString. It has been changed -// to accept a string passed as a slice of bytes. -func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) { - const hex = "0123456789abcdef" - - buf.WriteByte('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - i++ - continue - } - if start < i { - buf.Write(s[start:i]) - } - switch b { - case '\\', '"': - buf.WriteByte('\\') - buf.WriteByte(b) - case '\n': - buf.WriteByte('\\') - buf.WriteByte('n') - case '\r': - buf.WriteByte('\\') - buf.WriteByte('r') - default: - - buf.WriteString(`\u00`) - buf.WriteByte(hex[b>>4]) - buf.WriteByte(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRune(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - buf.Write(s[start:i]) - } - buf.WriteString(`\ufffd`) - i += size - start = i - continue - } - - if c == '\u2028' || c == '\u2029' { - if start < i { - buf.Write(s[start:i]) - } - buf.WriteString(`\u202`) - buf.WriteByte(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - buf.Write(s[start:]) - } - buf.WriteByte('"') -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go deleted file mode 100644 index 21173381..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/jsonlog/time_marshalling.go +++ /dev/null @@ -1,27 +0,0 @@ -// Package jsonlog provides helper functions to parse and print time (time.Time) as JSON. -package jsonlog - -import ( - "errors" - "time" -) - -const ( - // RFC3339NanoFixed is our own version of RFC339Nano because we want one - // that pads the nano seconds part with zeros to ensure - // the timestamps are aligned in the logs. - RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - // JSONFormat is the format used by FastMarshalJSON - JSONFormat = `"` + time.RFC3339Nano + `"` -) - -// FastTimeMarshalJSON avoids one of the extra allocations that -// time.MarshalJSON is making. -func FastTimeMarshalJSON(t time.Time) (string, error) { - if y := t.Year(); y < 0 || y >= 10000 { - // RFC 3339 is clear that years are 4 digits exactly. - // See golang.org/issue/4556#c15 for more discussion. - return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") - } - return t.Format(JSONFormat), nil -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/longpath/longpath.go index 9b15bfff..4177affb 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/longpath/longpath.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/longpath/longpath.go @@ -2,7 +2,7 @@ // in Windows, which are expected to be prepended with `\\?\` and followed by either // a drive letter, a UNC server\share, or a volume identifier. -package longpath +package longpath // import "github.com/docker/docker/pkg/longpath" import ( "strings" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/flags.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/flags.go index 607dbed4..ffd47331 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/flags.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/flags.go @@ -1,4 +1,4 @@ -package mount +package mount // import "github.com/docker/docker/pkg/mount" import ( "fmt" @@ -135,15 +135,3 @@ func parseOptions(options string) (int, string) { } return flag, strings.Join(data, ",") } - -// ParseTmpfsOptions parse fstab type mount options into flags and data -func ParseTmpfsOptions(options string) (int, string, error) { - flags, data := parseOptions(options) - for _, o := range strings.Split(data, ",") { - opt := strings.SplitN(o, "=", 2) - if !validFlags[opt[0]] { - return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) - } - } - return flags, data, nil -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go index 5f76f331..ef35ef90 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/flags_freebsd.go @@ -1,6 +1,6 @@ // +build freebsd,cgo -package mount +package mount // import "github.com/docker/docker/pkg/mount" /* #include diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/flags_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/flags_linux.go index 25f46618..a1b199a3 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/flags_linux.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/flags_linux.go @@ -1,87 +1,87 @@ -package mount +package mount // import "github.com/docker/docker/pkg/mount" import ( - "syscall" + "golang.org/x/sys/unix" ) const ( // RDONLY will mount the file system read-only. - RDONLY = syscall.MS_RDONLY + RDONLY = unix.MS_RDONLY // NOSUID will not allow set-user-identifier or set-group-identifier bits to // take effect. - NOSUID = syscall.MS_NOSUID + NOSUID = unix.MS_NOSUID // NODEV will not interpret character or block special devices on the file // system. - NODEV = syscall.MS_NODEV + NODEV = unix.MS_NODEV // NOEXEC will not allow execution of any binaries on the mounted file system. - NOEXEC = syscall.MS_NOEXEC + NOEXEC = unix.MS_NOEXEC // SYNCHRONOUS will allow I/O to the file system to be done synchronously. - SYNCHRONOUS = syscall.MS_SYNCHRONOUS + SYNCHRONOUS = unix.MS_SYNCHRONOUS // DIRSYNC will force all directory updates within the file system to be done // synchronously. This affects the following system calls: create, link, // unlink, symlink, mkdir, rmdir, mknod and rename. - DIRSYNC = syscall.MS_DIRSYNC + DIRSYNC = unix.MS_DIRSYNC // REMOUNT will attempt to remount an already-mounted file system. This is // commonly used to change the mount flags for a file system, especially to // make a readonly file system writeable. It does not change device or mount // point. - REMOUNT = syscall.MS_REMOUNT + REMOUNT = unix.MS_REMOUNT // MANDLOCK will force mandatory locks on a filesystem. - MANDLOCK = syscall.MS_MANDLOCK + MANDLOCK = unix.MS_MANDLOCK // NOATIME will not update the file access time when reading from a file. - NOATIME = syscall.MS_NOATIME + NOATIME = unix.MS_NOATIME // NODIRATIME will not update the directory access time. - NODIRATIME = syscall.MS_NODIRATIME + NODIRATIME = unix.MS_NODIRATIME // BIND remounts a subtree somewhere else. - BIND = syscall.MS_BIND + BIND = unix.MS_BIND // RBIND remounts a subtree and all possible submounts somewhere else. - RBIND = syscall.MS_BIND | syscall.MS_REC + RBIND = unix.MS_BIND | unix.MS_REC // UNBINDABLE creates a mount which cannot be cloned through a bind operation. - UNBINDABLE = syscall.MS_UNBINDABLE + UNBINDABLE = unix.MS_UNBINDABLE // RUNBINDABLE marks the entire mount tree as UNBINDABLE. - RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC + RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC // PRIVATE creates a mount which carries no propagation abilities. - PRIVATE = syscall.MS_PRIVATE + PRIVATE = unix.MS_PRIVATE // RPRIVATE marks the entire mount tree as PRIVATE. - RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC + RPRIVATE = unix.MS_PRIVATE | unix.MS_REC // SLAVE creates a mount which receives propagation from its master, but not // vice versa. - SLAVE = syscall.MS_SLAVE + SLAVE = unix.MS_SLAVE // RSLAVE marks the entire mount tree as SLAVE. - RSLAVE = syscall.MS_SLAVE | syscall.MS_REC + RSLAVE = unix.MS_SLAVE | unix.MS_REC // SHARED creates a mount which provides the ability to create mirrors of // that mount such that mounts and unmounts within any of the mirrors // propagate to the other mirrors. - SHARED = syscall.MS_SHARED + SHARED = unix.MS_SHARED // RSHARED marks the entire mount tree as SHARED. - RSHARED = syscall.MS_SHARED | syscall.MS_REC + RSHARED = unix.MS_SHARED | unix.MS_REC // RELATIME updates inode access times relative to modify or change time. - RELATIME = syscall.MS_RELATIME + RELATIME = unix.MS_RELATIME // STRICTATIME allows to explicitly request full atime updates. This makes // it possible for the kernel to default to relatime or noatime but still // allow userspace to override it. - STRICTATIME = syscall.MS_STRICTATIME + STRICTATIME = unix.MS_STRICTATIME - mntDetach = syscall.MNT_DETACH + mntDetach = unix.MNT_DETACH ) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go index 9ed741e3..cc6c4759 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/flags_unsupported.go @@ -1,6 +1,6 @@ -// +build !linux,!freebsd freebsd,!cgo solaris,!cgo +// +build !linux,!freebsd freebsd,!cgo -package mount +package mount // import "github.com/docker/docker/pkg/mount" // These flags are unsupported. const ( diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mount.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mount.go index c9fdfd69..4afd63c4 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mount.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mount.go @@ -1,30 +1,100 @@ -package mount +package mount // import "github.com/docker/docker/pkg/mount" import ( "sort" + "strconv" "strings" + + "github.com/sirupsen/logrus" ) -// GetMounts retrieves a list of mounts for the current running process. -func GetMounts() ([]*Info, error) { - return parseMountTable() +// mountError records an error from mount or unmount operation +type mountError struct { + op string + source, target string + flags uintptr + data string + err error +} + +func (e *mountError) Error() string { + out := e.op + " " + + if e.source != "" { + out += e.source + ":" + e.target + } else { + out += e.target + } + + if e.flags != uintptr(0) { + out += ", flags: 0x" + strconv.FormatUint(uint64(e.flags), 16) + } + if e.data != "" { + out += ", data: " + e.data + } + + out += ": " + e.err.Error() + return out +} + +// Cause returns the underlying cause of the error +func (e *mountError) Cause() error { + return e.err +} + +// FilterFunc is a type defining a callback function +// to filter out unwanted entries. It takes a pointer +// to an Info struct (not fully populated, currently +// only Mountpoint is filled in), and returns two booleans: +// - skip: true if the entry should be skipped +// - stop: true if parsing should be stopped after the entry +type FilterFunc func(*Info) (skip, stop bool) + +// PrefixFilter discards all entries whose mount points +// do not start with a prefix specified +func PrefixFilter(prefix string) FilterFunc { + return func(m *Info) (bool, bool) { + skip := !strings.HasPrefix(m.Mountpoint, prefix) + return skip, false + } +} + +// SingleEntryFilter looks for a specific entry +func SingleEntryFilter(mp string) FilterFunc { + return func(m *Info) (bool, bool) { + if m.Mountpoint == mp { + return false, true // don't skip, stop now + } + return true, false // skip, keep going + } +} + +// ParentsFilter returns all entries whose mount points +// can be parents of a path specified, discarding others. +// For example, given `/var/lib/docker/something`, entries +// like `/var/lib/docker`, `/var` and `/` are returned. +func ParentsFilter(path string) FilterFunc { + return func(m *Info) (bool, bool) { + skip := !strings.HasPrefix(path, m.Mountpoint) + return skip, false + } +} + +// GetMounts retrieves a list of mounts for the current running process, +// with an optional filter applied (use nil for no filter). +func GetMounts(f FilterFunc) ([]*Info, error) { + return parseMountTable(f) } // Mounted determines if a specified mountpoint has been mounted. -// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab. +// On Linux it looks at /proc/self/mountinfo. func Mounted(mountpoint string) (bool, error) { - entries, err := parseMountTable() + entries, err := GetMounts(SingleEntryFilter(mountpoint)) if err != nil { return false, err } - // Search the table for the mountpoint - for _, e := range entries { - if e.Mountpoint == mountpoint { - return true, nil - } - } - return false, nil + return len(entries) > 0, nil } // Mount will mount filesystem according to the specified configuration, on the @@ -53,34 +123,37 @@ func ForceMount(device, target, mType, options string) error { // Unmount lazily unmounts a filesystem on supported platforms, otherwise // does a normal unmount. func Unmount(target string) error { - if mounted, err := Mounted(target); err != nil || !mounted { - return err - } return unmount(target, mntDetach) } // RecursiveUnmount unmounts the target and all mounts underneath, starting with // the deepsest mount first. func RecursiveUnmount(target string) error { - mounts, err := GetMounts() + mounts, err := parseMountTable(PrefixFilter(target)) if err != nil { return err } // Make the deepest mount be first - sort.Sort(sort.Reverse(byMountpoint(mounts))) + sort.Slice(mounts, func(i, j int) bool { + return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint) + }) for i, m := range mounts { - if !strings.HasPrefix(m.Mountpoint, target) { - continue - } - if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 { - if mounted, err := Mounted(m.Mountpoint); err != nil || mounted { - return err + logrus.Debugf("Trying to unmount %s", m.Mountpoint) + err = unmount(m.Mountpoint, mntDetach) + if err != nil { + if i == len(mounts)-1 { // last mount + if mounted, e := Mounted(m.Mountpoint); e != nil || mounted { + return err + } + } else { + // This is some submount, we can ignore this error for now, the final unmount will fail if this is a real problem + logrus.WithError(err).Warnf("Failed to unmount submount %s", m.Mountpoint) } - // Ignore errors for submounts and continue trying to unmount others - // The final unmount should fail if there ane any submounts remaining } + + logrus.Debugf("Unmounted %s", m.Mountpoint) } return nil } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go index bb870e6f..09ad3606 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mounter_freebsd.go @@ -1,4 +1,4 @@ -package mount +package mount // import "github.com/docker/docker/pkg/mount" /* #include @@ -11,7 +11,6 @@ package mount import "C" import ( - "fmt" "strings" "syscall" "unsafe" @@ -48,12 +47,13 @@ func mount(device, target, mType string, flag uintptr, data string) error { } if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { - reason := C.GoString(C.strerror(*C.__error())) - return fmt.Errorf("Failed to call nmount: %s", reason) + return &mountError{ + op: "mount", + source: device, + target: target, + flags: flag, + err: syscall.Errno(errno), + } } return nil } - -func unmount(target string, flag int) error { - return syscall.Unmount(target, flag) -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go index 3ef2ce6f..a0a1ad23 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mounter_linux.go @@ -1,18 +1,18 @@ -package mount +package mount // import "github.com/docker/docker/pkg/mount" import ( - "syscall" + "golang.org/x/sys/unix" ) const ( // ptypes is the set propagation types. - ptypes = syscall.MS_SHARED | syscall.MS_PRIVATE | syscall.MS_SLAVE | syscall.MS_UNBINDABLE + ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE // pflags is the full set valid flags for a change propagation call. - pflags = ptypes | syscall.MS_REC | syscall.MS_SILENT + pflags = ptypes | unix.MS_REC | unix.MS_SILENT // broflags is the combination of bind and read only - broflags = syscall.MS_BIND | syscall.MS_RDONLY + broflags = unix.MS_BIND | unix.MS_RDONLY ) // isremount returns true if either device name or flags identify a remount request, false otherwise. @@ -20,7 +20,7 @@ func isremount(device string, flags uintptr) bool { switch { // We treat device "" and "none" as a remount request to provide compatibility with // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts. - case flags&syscall.MS_REMOUNT != 0, device == "", device == "none": + case flags&unix.MS_REMOUNT != 0, device == "", device == "none": return true default: return false @@ -29,28 +29,45 @@ func isremount(device string, flags uintptr) bool { func mount(device, target, mType string, flags uintptr, data string) error { oflags := flags &^ ptypes - if !isremount(device, flags) { - // Initial call applying all non-propagation flags. - if err := syscall.Mount(device, target, mType, oflags, data); err != nil { - return err + if !isremount(device, flags) || data != "" { + // Initial call applying all non-propagation flags for mount + // or remount with changed data + if err := unix.Mount(device, target, mType, oflags, data); err != nil { + return &mountError{ + op: "mount", + source: device, + target: target, + flags: oflags, + data: data, + err: err, + } } } if flags&ptypes != 0 { // Change the propagation type. - if err := syscall.Mount("", target, "", flags&pflags, ""); err != nil { - return err + if err := unix.Mount("", target, "", flags&pflags, ""); err != nil { + return &mountError{ + op: "remount", + target: target, + flags: flags & pflags, + err: err, + } } } if oflags&broflags == broflags { // Remount the bind to apply read only. - return syscall.Mount("", target, "", oflags|syscall.MS_REMOUNT, "") + if err := unix.Mount("", target, "", oflags|unix.MS_REMOUNT, ""); err != nil { + return &mountError{ + op: "remount-ro", + target: target, + flags: oflags | unix.MS_REMOUNT, + err: err, + } + + } } return nil } - -func unmount(target string, flag int) error { - return syscall.Unmount(target, flag) -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go deleted file mode 100644 index c684aa81..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mounter_solaris.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build solaris,cgo - -package mount - -import ( - "golang.org/x/sys/unix" - "unsafe" -) - -// #include -// #include -// #include -// int Mount(const char *spec, const char *dir, int mflag, -// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) { -// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen); -// } -import "C" - -func mount(device, target, mType string, flag uintptr, data string) error { - spec := C.CString(device) - dir := C.CString(target) - fstype := C.CString(mType) - _, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0) - C.free(unsafe.Pointer(spec)) - C.free(unsafe.Pointer(dir)) - C.free(unsafe.Pointer(fstype)) - return err -} - -func unmount(target string, flag int) error { - err := unix.Unmount(target, flag) - return err -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go index a2a3bb45..c3e5aec2 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mounter_unsupported.go @@ -1,11 +1,7 @@ -// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo +// +build !linux,!freebsd freebsd,!cgo -package mount +package mount // import "github.com/docker/docker/pkg/mount" func mount(device, target, mType string, flag uintptr, data string) error { panic("Not implemented") } - -func unmount(target string, flag int) error { - panic("Not implemented") -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mountinfo.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mountinfo.go index ff4cc1d8..ecd03fc0 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mountinfo.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mountinfo.go @@ -1,4 +1,4 @@ -package mount +package mount // import "github.com/docker/docker/pkg/mount" // Info reveals information about a particular mounted filesystem. This // struct is populated from the content in the /proc//mountinfo file. @@ -38,17 +38,3 @@ type Info struct { // VfsOpts represents per super block options. VfsOpts string } - -type byMountpoint []*Info - -func (by byMountpoint) Len() int { - return len(by) -} - -func (by byMountpoint) Less(i, j int) bool { - return by[i].Mountpoint < by[j].Mountpoint -} - -func (by byMountpoint) Swap(i, j int) { - by[i], by[j] = by[j], by[i] -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go index 4f32edcd..36c89dc1 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mountinfo_freebsd.go @@ -1,4 +1,4 @@ -package mount +package mount // import "github.com/docker/docker/pkg/mount" /* #include @@ -15,7 +15,7 @@ import ( // Parse /proc/self/mountinfo because comparing Dev and ino does not work from // bind mounts. -func parseMountTable() ([]*Info, error) { +func parseMountTable(filter FilterFunc) ([]*Info, error) { var rawEntries *C.struct_statfs count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) @@ -32,10 +32,24 @@ func parseMountTable() ([]*Info, error) { var out []*Info for _, entry := range entries { var mountinfo Info + var skip, stop bool mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) + + if filter != nil { + // filter out entries we're not interested in + skip, stop = filter(p) + if skip { + continue + } + } + mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) + out = append(out, &mountinfo) + if stop { + break + } } return out, nil } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go index be69fee1..fe6e3ddb 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mountinfo_linux.go @@ -1,84 +1,133 @@ -// +build linux - -package mount +package mount // import "github.com/docker/docker/pkg/mount" import ( "bufio" "fmt" "io" "os" + "strconv" "strings" + + "github.com/pkg/errors" ) -const ( - /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue - (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) +func parseInfoFile(r io.Reader, filter FilterFunc) ([]*Info, error) { + s := bufio.NewScanner(r) + out := []*Info{} + var err error + for s.Scan() { + if err = s.Err(); err != nil { + return nil, err + } + /* + See http://man7.org/linux/man-pages/man5/proc.5.html - (1) mount ID: unique identifier of the mount (may be reused after umount) - (2) parent ID: ID of parent (or of self for the top of the mount tree) - (3) major:minor: value of st_dev for files on filesystem - (4) root: root of the mount within the filesystem - (5) mount point: mount point relative to the process's root - (6) mount options: per mount options - (7) optional fields: zero or more fields of the form "tag[:value]" - (8) separator: marks the end of the optional fields - (9) filesystem type: name of filesystem of the form "type[.subtype]" - (10) mount source: filesystem specific information or "none" - (11) super options: per super block options*/ - mountinfoFormat = "%d %d %d:%d %s %s %s %s" -) + 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue + (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) + + (1) mount ID: unique identifier of the mount (may be reused after umount) + (2) parent ID: ID of parent (or of self for the top of the mount tree) + (3) major:minor: value of st_dev for files on filesystem + (4) root: root of the mount within the filesystem + (5) mount point: mount point relative to the process's root + (6) mount options: per mount options + (7) optional fields: zero or more fields of the form "tag[:value]" + (8) separator: marks the end of the optional fields + (9) filesystem type: name of filesystem of the form "type[.subtype]" + (10) mount source: filesystem specific information or "none" + (11) super options: per super block options + */ + + text := s.Text() + fields := strings.Split(text, " ") + numFields := len(fields) + if numFields < 10 { + // should be at least 10 fields + return nil, fmt.Errorf("Parsing '%s' failed: not enough fields (%d)", text, numFields) + } + + p := &Info{} + // ignore any numbers parsing errors, as there should not be any + p.ID, _ = strconv.Atoi(fields[0]) + p.Parent, _ = strconv.Atoi(fields[1]) + mm := strings.Split(fields[2], ":") + if len(mm) != 2 { + return nil, fmt.Errorf("Parsing '%s' failed: unexpected minor:major pair %s", text, mm) + } + p.Major, _ = strconv.Atoi(mm[0]) + p.Minor, _ = strconv.Atoi(mm[1]) + + p.Root, err = strconv.Unquote(`"` + fields[3] + `"`) + if err != nil { + return nil, errors.Wrapf(err, "Parsing '%s' failed: unable to unquote root field", fields[3]) + } + + p.Mountpoint, err = strconv.Unquote(`"` + fields[4] + `"`) + if err != nil { + return nil, errors.Wrapf(err, "Parsing '%s' failed: unable to unquote mount point field", fields[4]) + } + p.Opts = fields[5] + + var skip, stop bool + if filter != nil { + // filter out entries we're not interested in + skip, stop = filter(p) + if skip { + continue + } + } + + // one or more optional fields, when a separator (-) + i := 6 + for ; i < numFields && fields[i] != "-"; i++ { + switch i { + case 6: + p.Optional = fields[6] + default: + /* NOTE there might be more optional fields before the such as + fields[7]...fields[N] (where N < sepIndex), although + as of Linux kernel 4.15 the only known ones are + mount propagation flags in fields[6]. The correct + behavior is to ignore any unknown optional fields. + */ + break + } + } + if i == numFields { + return nil, fmt.Errorf("Parsing '%s' failed: missing separator ('-')", text) + } + + // There should be 3 fields after the separator... + if i+4 > numFields { + return nil, fmt.Errorf("Parsing '%s' failed: not enough fields after a separator", text) + } + // ... but in Linux <= 3.9 mounting a cifs with spaces in a share name + // (like "//serv/My Documents") _may_ end up having a space in the last field + // of mountinfo (like "unc=//serv/My Documents"). Since kernel 3.10-rc1, cifs + // option unc= is ignored, so a space should not appear. In here we ignore + // those "extra" fields caused by extra spaces. + p.Fstype = fields[i+1] + p.Source = fields[i+2] + p.VfsOpts = fields[i+3] + + out = append(out, p) + if stop { + break + } + } + return out, nil +} // Parse /proc/self/mountinfo because comparing Dev and ino does not work from // bind mounts -func parseMountTable() ([]*Info, error) { +func parseMountTable(filter FilterFunc) ([]*Info, error) { f, err := os.Open("/proc/self/mountinfo") if err != nil { return nil, err } defer f.Close() - return parseInfoFile(f) -} - -func parseInfoFile(r io.Reader) ([]*Info, error) { - var ( - s = bufio.NewScanner(r) - out = []*Info{} - ) - - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - - var ( - p = &Info{} - text = s.Text() - optionalFields string - ) - - if _, err := fmt.Sscanf(text, mountinfoFormat, - &p.ID, &p.Parent, &p.Major, &p.Minor, - &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { - return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) - } - // Safe as mountinfo encodes mountpoints with spaces as \040. - index := strings.Index(text, " - ") - postSeparatorFields := strings.Fields(text[index+3:]) - if len(postSeparatorFields) < 3 { - return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) - } - - if optionalFields != "-" { - p.Optional = optionalFields - } - - p.Fstype = postSeparatorFields[0] - p.Source = postSeparatorFields[1] - p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") - out = append(out, p) - } - return out, nil + return parseInfoFile(f, filter) } // PidMountInfo collects the mounts for a specific process ID. If the process @@ -91,5 +140,5 @@ func PidMountInfo(pid int) ([]*Info, error) { } defer f.Close() - return parseInfoFile(f) + return parseInfoFile(f, nil) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go deleted file mode 100644 index ad9ab57f..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mountinfo_solaris.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build solaris,cgo - -package mount - -/* -#include -#include -*/ -import "C" - -import ( - "fmt" -) - -func parseMountTable() ([]*Info, error) { - mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r")) - if mnttab == nil { - return nil, fmt.Errorf("Failed to open %s", C.MNTTAB) - } - - var out []*Info - var mp C.struct_mnttab - - ret := C.getmntent(mnttab, &mp) - for ret == 0 { - var mountinfo Info - mountinfo.Mountpoint = C.GoString(mp.mnt_mountp) - mountinfo.Source = C.GoString(mp.mnt_special) - mountinfo.Fstype = C.GoString(mp.mnt_fstype) - mountinfo.Opts = C.GoString(mp.mnt_mntopts) - out = append(out, &mountinfo) - ret = C.getmntent(mnttab, &mp) - } - - C.fclose(mnttab) - return out, nil -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go index 7fbcf192..fd16d3ed 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mountinfo_unsupported.go @@ -1,12 +1,12 @@ -// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo +// +build !windows,!linux,!freebsd freebsd,!cgo -package mount +package mount // import "github.com/docker/docker/pkg/mount" import ( "fmt" "runtime" ) -func parseMountTable() ([]*Info, error) { +func parseMountTable(f FilterFunc) ([]*Info, error) { return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go index dab8a37e..27e0f697 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/mountinfo_windows.go @@ -1,6 +1,6 @@ -package mount +package mount // import "github.com/docker/docker/pkg/mount" -func parseMountTable() ([]*Info, error) { +func parseMountTable(f FilterFunc) ([]*Info, error) { // Do NOT return an error! return nil, nil } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go index 8ceec84b..8a100f0b 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_linux.go @@ -1,6 +1,4 @@ -// +build linux - -package mount +package mount // import "github.com/docker/docker/pkg/mount" // MakeShared ensures a mounted filesystem has the SHARED mount option enabled. // See the supported options in flags.go for further reference. @@ -50,18 +48,22 @@ func MakeRUnbindable(mountPoint string) error { return ensureMountedAs(mountPoint, "runbindable") } -func ensureMountedAs(mountPoint, options string) error { - mounted, err := Mounted(mountPoint) +// MakeMount ensures that the file or directory given is a mount point, +// bind mounting it to itself it case it is not. +func MakeMount(mnt string) error { + mounted, err := Mounted(mnt) if err != nil { return err } - - if !mounted { - if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { - return err - } + if mounted { + return nil } - if _, err = Mounted(mountPoint); err != nil { + + return Mount(mnt, mnt, "none", "bind") +} + +func ensureMountedAs(mountPoint, options string) error { + if err := MakeMount(mountPoint); err != nil { return err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go deleted file mode 100644 index 09f6b03c..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/sharedsubtree_solaris.go +++ /dev/null @@ -1,58 +0,0 @@ -// +build solaris - -package mount - -// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "shared") -} - -// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "rshared") -} - -// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. -// See the supported options in flags.go for further reference. -func MakePrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "private") -} - -// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeRPrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "rprivate") -} - -// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "slave") -} - -// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "rslave") -} - -// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "unbindable") -} - -// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount -// option enabled. See the supported options in flags.go for further reference. -func MakeRUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "runbindable") -} - -func ensureMountedAs(mountPoint, options string) error { - // TODO: Solaris does not support bind mounts. - // Evaluate lofs and also look at the relevant - // mount flags to be supported. - return nil -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/unmount_unix.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/unmount_unix.go new file mode 100644 index 00000000..4be42768 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/unmount_unix.go @@ -0,0 +1,22 @@ +// +build !windows + +package mount // import "github.com/docker/docker/pkg/mount" + +import "golang.org/x/sys/unix" + +func unmount(target string, flags int) error { + err := unix.Unmount(target, flags) + if err == nil || err == unix.EINVAL { + // Ignore "not mounted" error here. Note the same error + // can be returned if flags are invalid, so this code + // assumes that the flags value is always correct. + return nil + } + + return &mountError{ + op: "umount", + target: target, + flags: uintptr(flags), + err: err, + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go new file mode 100644 index 00000000..a88ad357 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/mount/unmount_unsupported.go @@ -0,0 +1,7 @@ +// +build windows + +package mount // import "github.com/docker/docker/pkg/mount" + +func unmount(target string, flag int) error { + panic("Not implemented") +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go new file mode 100644 index 00000000..05eac680 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go @@ -0,0 +1,853 @@ +package namesgenerator // import "github.com/docker/docker/pkg/namesgenerator" + +import ( + "fmt" + "math/rand" +) + +var ( + left = [...]string{ + "admiring", + "adoring", + "affectionate", + "agitated", + "amazing", + "angry", + "awesome", + "beautiful", + "blissful", + "bold", + "boring", + "brave", + "busy", + "charming", + "clever", + "cocky", + "cool", + "compassionate", + "competent", + "condescending", + "confident", + "cranky", + "crazy", + "dazzling", + "determined", + "distracted", + "dreamy", + "eager", + "ecstatic", + "elastic", + "elated", + "elegant", + "eloquent", + "epic", + "exciting", + "fervent", + "festive", + "flamboyant", + "focused", + "friendly", + "frosty", + "funny", + "gallant", + "gifted", + "goofy", + "gracious", + "great", + "happy", + "hardcore", + "heuristic", + "hopeful", + "hungry", + "infallible", + "inspiring", + "interesting", + "intelligent", + "jolly", + "jovial", + "keen", + "kind", + "laughing", + "loving", + "lucid", + "magical", + "mystifying", + "modest", + "musing", + "naughty", + "nervous", + "nice", + "nifty", + "nostalgic", + "objective", + "optimistic", + "peaceful", + "pedantic", + "pensive", + "practical", + "priceless", + "quirky", + "quizzical", + "recursing", + "relaxed", + "reverent", + "romantic", + "sad", + "serene", + "sharp", + "silly", + "sleepy", + "stoic", + "strange", + "stupefied", + "suspicious", + "sweet", + "tender", + "thirsty", + "trusting", + "unruffled", + "upbeat", + "vibrant", + "vigilant", + "vigorous", + "wizardly", + "wonderful", + "xenodochial", + "youthful", + "zealous", + "zen", + } + + // Docker, starting from 0.7.x, generates names from notable scientists and hackers. + // Please, for any amazing man that you add to the list, consider adding an equally amazing woman to it, and vice versa. + right = [...]string{ + // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. https://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB + "albattani", + + // Frances E. Allen, became the first female IBM Fellow in 1989. In 2006, she became the first female recipient of the ACM's Turing Award. https://en.wikipedia.org/wiki/Frances_E._Allen + "allen", + + // June Almeida - Scottish virologist who took the first pictures of the rubella virus - https://en.wikipedia.org/wiki/June_Almeida + "almeida", + + // Kathleen Antonelli, American computer programmer and one of the six original programmers of the ENIAC - https://en.wikipedia.org/wiki/Kathleen_Antonelli + "antonelli", + + // Maria Gaetana Agnesi - Italian mathematician, philosopher, theologian and humanitarian. She was the first woman to write a mathematics handbook and the first woman appointed as a Mathematics Professor at a University. https://en.wikipedia.org/wiki/Maria_Gaetana_Agnesi + "agnesi", + + // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. https://en.wikipedia.org/wiki/Archimedes + "archimedes", + + // Maria Ardinghelli - Italian translator, mathematician and physicist - https://en.wikipedia.org/wiki/Maria_Ardinghelli + "ardinghelli", + + // Aryabhata - Ancient Indian mathematician-astronomer during 476-550 CE https://en.wikipedia.org/wiki/Aryabhata + "aryabhata", + + // Wanda Austin - Wanda Austin is the President and CEO of The Aerospace Corporation, a leading architect for the US security space programs. https://en.wikipedia.org/wiki/Wanda_Austin + "austin", + + // Charles Babbage invented the concept of a programmable computer. https://en.wikipedia.org/wiki/Charles_Babbage. + "babbage", + + // Stefan Banach - Polish mathematician, was one of the founders of modern functional analysis. https://en.wikipedia.org/wiki/Stefan_Banach + "banach", + + // Buckaroo Banzai and his mentor Dr. Hikita perfectd the "oscillation overthruster", a device that allows one to pass through solid matter. - https://en.wikipedia.org/wiki/The_Adventures_of_Buckaroo_Banzai_Across_the_8th_Dimension + "banzai", + + // John Bardeen co-invented the transistor - https://en.wikipedia.org/wiki/John_Bardeen + "bardeen", + + // Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. https://en.wikipedia.org/wiki/Jean_Bartik + "bartik", + + // Laura Bassi, the world's first female professor https://en.wikipedia.org/wiki/Laura_Bassi + "bassi", + + // Hugh Beaver, British engineer, founder of the Guinness Book of World Records https://en.wikipedia.org/wiki/Hugh_Beaver + "beaver", + + // Alexander Graham Bell - an eminent Scottish-born scientist, inventor, engineer and innovator who is credited with inventing the first practical telephone - https://en.wikipedia.org/wiki/Alexander_Graham_Bell + "bell", + + // Karl Friedrich Benz - a German automobile engineer. Inventor of the first practical motorcar. https://en.wikipedia.org/wiki/Karl_Benz + "benz", + + // Homi J Bhabha - was an Indian nuclear physicist, founding director, and professor of physics at the Tata Institute of Fundamental Research. Colloquially known as "father of Indian nuclear programme"- https://en.wikipedia.org/wiki/Homi_J._Bhabha + "bhabha", + + // Bhaskara II - Ancient Indian mathematician-astronomer whose work on calculus predates Newton and Leibniz by over half a millennium - https://en.wikipedia.org/wiki/Bh%C4%81skara_II#Calculus + "bhaskara", + + // Sue Black - British computer scientist and campaigner. She has been instrumental in saving Bletchley Park, the site of World War II codebreaking - https://en.wikipedia.org/wiki/Sue_Black_(computer_scientist) + "black", + + // Elizabeth Helen Blackburn - Australian-American Nobel laureate; best known for co-discovering telomerase. https://en.wikipedia.org/wiki/Elizabeth_Blackburn + "blackburn", + + // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - https://en.wikipedia.org/wiki/Elizabeth_Blackwell + "blackwell", + + // Niels Bohr is the father of quantum theory. https://en.wikipedia.org/wiki/Niels_Bohr. + "bohr", + + // Kathleen Booth, she's credited with writing the first assembly language. https://en.wikipedia.org/wiki/Kathleen_Booth + "booth", + + // Anita Borg - Anita Borg was the founding director of the Institute for Women and Technology (IWT). https://en.wikipedia.org/wiki/Anita_Borg + "borg", + + // Satyendra Nath Bose - He provided the foundation for Bose–Einstein statistics and the theory of the Bose–Einstein condensate. - https://en.wikipedia.org/wiki/Satyendra_Nath_Bose + "bose", + + // Katherine Louise Bouman is an imaging scientist and Assistant Professor of Computer Science at the California Institute of Technology. She researches computational methods for imaging, and developed an algorithm that made possible the picture first visualization of a black hole using the Event Horizon Telescope. - https://en.wikipedia.org/wiki/Katie_Bouman + "bouman", + + // Evelyn Boyd Granville - She was one of the first African-American woman to receive a Ph.D. in mathematics; she earned it in 1949 from Yale University. https://en.wikipedia.org/wiki/Evelyn_Boyd_Granville + "boyd", + + // Brahmagupta - Ancient Indian mathematician during 598-670 CE who gave rules to compute with zero - https://en.wikipedia.org/wiki/Brahmagupta#Zero + "brahmagupta", + + // Walter Houser Brattain co-invented the transistor - https://en.wikipedia.org/wiki/Walter_Houser_Brattain + "brattain", + + // Emmett Brown invented time travel. https://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) + "brown", + + // Linda Brown Buck - American biologist and Nobel laureate best known for her genetic and molecular analyses of the mechanisms of smell. https://en.wikipedia.org/wiki/Linda_B._Buck + "buck", + + // Dame Susan Jocelyn Bell Burnell - Northern Irish astrophysicist who discovered radio pulsars and was the first to analyse them. https://en.wikipedia.org/wiki/Jocelyn_Bell_Burnell + "burnell", + + // Annie Jump Cannon - pioneering female astronomer who classified hundreds of thousands of stars and created the system we use to understand stars today. https://en.wikipedia.org/wiki/Annie_Jump_Cannon + "cannon", + + // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. https://en.wikipedia.org/wiki/Rachel_Carson + "carson", + + // Dame Mary Lucy Cartwright - British mathematician who was one of the first to study what is now known as chaos theory. Also known for Cartwright's theorem which finds applications in signal processing. https://en.wikipedia.org/wiki/Mary_Cartwright + "cartwright", + + // Vinton Gray Cerf - American Internet pioneer, recognised as one of "the fathers of the Internet". With Robert Elliot Kahn, he designed TCP and IP, the primary data communication protocols of the Internet and other computer networks. https://en.wikipedia.org/wiki/Vint_Cerf + "cerf", + + // Subrahmanyan Chandrasekhar - Astrophysicist known for his mathematical theory on different stages and evolution in structures of the stars. He has won nobel prize for physics - https://en.wikipedia.org/wiki/Subrahmanyan_Chandrasekhar + "chandrasekhar", + + // Sergey Alexeyevich Chaplygin (Russian: Серге́й Алексе́евич Чаплы́гин; April 5, 1869 – October 8, 1942) was a Russian and Soviet physicist, mathematician, and mechanical engineer. He is known for mathematical formulas such as Chaplygin's equation and for a hypothetical substance in cosmology called Chaplygin gas, named after him. https://en.wikipedia.org/wiki/Sergey_Chaplygin + "chaplygin", + + // Émilie du Châtelet - French natural philosopher, mathematician, physicist, and author during the early 1730s, known for her translation of and commentary on Isaac Newton's book Principia containing basic laws of physics. https://en.wikipedia.org/wiki/%C3%89milie_du_Ch%C3%A2telet + "chatelet", + + // Asima Chatterjee was an Indian organic chemist noted for her research on vinca alkaloids, development of drugs for treatment of epilepsy and malaria - https://en.wikipedia.org/wiki/Asima_Chatterjee + "chatterjee", + + // Pafnuty Chebyshev - Russian mathematician. He is known fo his works on probability, statistics, mechanics, analytical geometry and number theory https://en.wikipedia.org/wiki/Pafnuty_Chebyshev + "chebyshev", + + // Bram Cohen - American computer programmer and author of the BitTorrent peer-to-peer protocol. https://en.wikipedia.org/wiki/Bram_Cohen + "cohen", + + // David Lee Chaum - American computer scientist and cryptographer. Known for his seminal contributions in the field of anonymous communication. https://en.wikipedia.org/wiki/David_Chaum + "chaum", + + // Joan Clarke - Bletchley Park code breaker during the Second World War who pioneered techniques that remained top secret for decades. Also an accomplished numismatist https://en.wikipedia.org/wiki/Joan_Clarke + "clarke", + + // Jane Colden - American botanist widely considered the first female American botanist - https://en.wikipedia.org/wiki/Jane_Colden + "colden", + + // Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. https://en.wikipedia.org/wiki/Gerty_Cori + "cori", + + // Seymour Roger Cray was an American electrical engineer and supercomputer architect who designed a series of computers that were the fastest in the world for decades. https://en.wikipedia.org/wiki/Seymour_Cray + "cray", + + // This entry reflects a husband and wife team who worked together: + // Joan Curran was a Welsh scientist who developed radar and invented chaff, a radar countermeasure. https://en.wikipedia.org/wiki/Joan_Curran + // Samuel Curran was an Irish physicist who worked alongside his wife during WWII and invented the proximity fuse. https://en.wikipedia.org/wiki/Samuel_Curran + "curran", + + // Marie Curie discovered radioactivity. https://en.wikipedia.org/wiki/Marie_Curie. + "curie", + + // Charles Darwin established the principles of natural evolution. https://en.wikipedia.org/wiki/Charles_Darwin. + "darwin", + + // Leonardo Da Vinci invented too many things to list here. https://en.wikipedia.org/wiki/Leonardo_da_Vinci. + "davinci", + + // A. K. (Alexander Keewatin) Dewdney, Canadian mathematician, computer scientist, author and filmmaker. Contributor to Scientific American's "Computer Recreations" from 1984 to 1991. Author of Core War (program), The Planiverse, The Armchair Universe, The Magic Machine, The New Turing Omnibus, and more. https://en.wikipedia.org/wiki/Alexander_Dewdney + "dewdney", + + // Satish Dhawan - Indian mathematician and aerospace engineer, known for leading the successful and indigenous development of the Indian space programme. https://en.wikipedia.org/wiki/Satish_Dhawan + "dhawan", + + // Bailey Whitfield Diffie - American cryptographer and one of the pioneers of public-key cryptography. https://en.wikipedia.org/wiki/Whitfield_Diffie + "diffie", + + // Edsger Wybe Dijkstra was a Dutch computer scientist and mathematical scientist. https://en.wikipedia.org/wiki/Edsger_W._Dijkstra. + "dijkstra", + + // Paul Adrien Maurice Dirac - English theoretical physicist who made fundamental contributions to the early development of both quantum mechanics and quantum electrodynamics. https://en.wikipedia.org/wiki/Paul_Dirac + "dirac", + + // Agnes Meyer Driscoll - American cryptanalyst during World Wars I and II who successfully cryptanalysed a number of Japanese ciphers. She was also the co-developer of one of the cipher machines of the US Navy, the CM. https://en.wikipedia.org/wiki/Agnes_Meyer_Driscoll + "driscoll", + + // Donna Dubinsky - played an integral role in the development of personal digital assistants (PDAs) serving as CEO of Palm, Inc. and co-founding Handspring. https://en.wikipedia.org/wiki/Donna_Dubinsky + "dubinsky", + + // Annie Easley - She was a leading member of the team which developed software for the Centaur rocket stage and one of the first African-Americans in her field. https://en.wikipedia.org/wiki/Annie_Easley + "easley", + + // Thomas Alva Edison, prolific inventor https://en.wikipedia.org/wiki/Thomas_Edison + "edison", + + // Albert Einstein invented the general theory of relativity. https://en.wikipedia.org/wiki/Albert_Einstein + "einstein", + + // Alexandra Asanovna Elbakyan (Russian: Алекса́ндра Аса́новна Элбакя́н) is a Kazakhstani graduate student, computer programmer, internet pirate in hiding, and the creator of the site Sci-Hub. Nature has listed her in 2016 in the top ten people that mattered in science, and Ars Technica has compared her to Aaron Swartz. - https://en.wikipedia.org/wiki/Alexandra_Elbakyan + "elbakyan", + + // Taher A. ElGamal - Egyptian cryptographer best known for the ElGamal discrete log cryptosystem and the ElGamal digital signature scheme. https://en.wikipedia.org/wiki/Taher_Elgamal + "elgamal", + + // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - https://en.wikipedia.org/wiki/Gertrude_Elion + "elion", + + // James Henry Ellis - British engineer and cryptographer employed by the GCHQ. Best known for conceiving for the first time, the idea of public-key cryptography. https://en.wikipedia.org/wiki/James_H._Ellis + "ellis", + + // Douglas Engelbart gave the mother of all demos: https://en.wikipedia.org/wiki/Douglas_Engelbart + "engelbart", + + // Euclid invented geometry. https://en.wikipedia.org/wiki/Euclid + "euclid", + + // Leonhard Euler invented large parts of modern mathematics. https://de.wikipedia.org/wiki/Leonhard_Euler + "euler", + + // Michael Faraday - British scientist who contributed to the study of electromagnetism and electrochemistry. https://en.wikipedia.org/wiki/Michael_Faraday + "faraday", + + // Horst Feistel - German-born American cryptographer who was one of the earliest non-government researchers to study the design and theory of block ciphers. Co-developer of DES and Lucifer. Feistel networks, a symmetric structure used in the construction of block ciphers are named after him. https://en.wikipedia.org/wiki/Horst_Feistel + "feistel", + + // Pierre de Fermat pioneered several aspects of modern mathematics. https://en.wikipedia.org/wiki/Pierre_de_Fermat + "fermat", + + // Enrico Fermi invented the first nuclear reactor. https://en.wikipedia.org/wiki/Enrico_Fermi. + "fermi", + + // Richard Feynman was a key contributor to quantum mechanics and particle physics. https://en.wikipedia.org/wiki/Richard_Feynman + "feynman", + + // Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod. + "franklin", + + // Yuri Alekseyevich Gagarin - Soviet pilot and cosmonaut, best known as the first human to journey into outer space. https://en.wikipedia.org/wiki/Yuri_Gagarin + "gagarin", + + // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. https://en.wikipedia.org/wiki/Galileo_Galilei + "galileo", + + // Évariste Galois - French mathematician whose work laid the foundations of Galois theory and group theory, two major branches of abstract algebra, and the subfield of Galois connections, all while still in his late teens. https://en.wikipedia.org/wiki/%C3%89variste_Galois + "galois", + + // Kadambini Ganguly - Indian physician, known for being the first South Asian female physician, trained in western medicine, to graduate in South Asia. https://en.wikipedia.org/wiki/Kadambini_Ganguly + "ganguly", + + // William Henry "Bill" Gates III is an American business magnate, philanthropist, investor, computer programmer, and inventor. https://en.wikipedia.org/wiki/Bill_Gates + "gates", + + // Johann Carl Friedrich Gauss - German mathematician who made significant contributions to many fields, including number theory, algebra, statistics, analysis, differential geometry, geodesy, geophysics, mechanics, electrostatics, magnetic fields, astronomy, matrix theory, and optics. https://en.wikipedia.org/wiki/Carl_Friedrich_Gauss + "gauss", + + // Marie-Sophie Germain - French mathematician, physicist and philosopher. Known for her work on elasticity theory, number theory and philosophy. https://en.wikipedia.org/wiki/Sophie_Germain + "germain", + + // Adele Goldberg, was one of the designers and developers of the Smalltalk language. https://en.wikipedia.org/wiki/Adele_Goldberg_(computer_scientist) + "goldberg", + + // Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. https://en.wikipedia.org/wiki/Adele_Goldstine + "goldstine", + + // Shafi Goldwasser is a computer scientist known for creating theoretical foundations of modern cryptography. Winner of 2012 ACM Turing Award. https://en.wikipedia.org/wiki/Shafi_Goldwasser + "goldwasser", + + // James Golick, all around gangster. + "golick", + + // Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - https://en.wikipedia.org/wiki/Jane_Goodall + "goodall", + + // Stephen Jay Gould was was an American paleontologist, evolutionary biologist, and historian of science. He is most famous for the theory of punctuated equilibrium - https://en.wikipedia.org/wiki/Stephen_Jay_Gould + "gould", + + // Carolyn Widney Greider - American molecular biologist and joint winner of the 2009 Nobel Prize for Physiology or Medicine for the discovery of telomerase. https://en.wikipedia.org/wiki/Carol_W._Greider + "greider", + + // Alexander Grothendieck - German-born French mathematician who became a leading figure in the creation of modern algebraic geometry. https://en.wikipedia.org/wiki/Alexander_Grothendieck + "grothendieck", + + // Lois Haibt - American computer scientist, part of the team at IBM that developed FORTRAN - https://en.wikipedia.org/wiki/Lois_Haibt + "haibt", + + // Margaret Hamilton - Director of the Software Engineering Division of the MIT Instrumentation Laboratory, which developed on-board flight software for the Apollo space program. https://en.wikipedia.org/wiki/Margaret_Hamilton_(scientist) + "hamilton", + + // Caroline Harriet Haslett - English electrical engineer, electricity industry administrator and champion of women's rights. Co-author of British Standard 1363 that specifies AC power plugs and sockets used across the United Kingdom (which is widely considered as one of the safest designs). https://en.wikipedia.org/wiki/Caroline_Haslett + "haslett", + + // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. https://en.wikipedia.org/wiki/Stephen_Hawking + "hawking", + + // Martin Edward Hellman - American cryptologist, best known for his invention of public-key cryptography in co-operation with Whitfield Diffie and Ralph Merkle. https://en.wikipedia.org/wiki/Martin_Hellman + "hellman", + + // Werner Heisenberg was a founding father of quantum mechanics. https://en.wikipedia.org/wiki/Werner_Heisenberg + "heisenberg", + + // Grete Hermann was a German philosopher noted for her philosophical work on the foundations of quantum mechanics. https://en.wikipedia.org/wiki/Grete_Hermann + "hermann", + + // Caroline Lucretia Herschel - German astronomer and discoverer of several comets. https://en.wikipedia.org/wiki/Caroline_Herschel + "herschel", + + // Heinrich Rudolf Hertz - German physicist who first conclusively proved the existence of the electromagnetic waves. https://en.wikipedia.org/wiki/Heinrich_Hertz + "hertz", + + // Jaroslav Heyrovský was the inventor of the polarographic method, father of the electroanalytical method, and recipient of the Nobel Prize in 1959. His main field of work was polarography. https://en.wikipedia.org/wiki/Jaroslav_Heyrovsk%C3%BD + "heyrovsky", + + // Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. https://en.wikipedia.org/wiki/Dorothy_Hodgkin + "hodgkin", + + // Douglas R. Hofstadter is an American professor of cognitive science and author of the Pulitzer Prize and American Book Award-winning work Goedel, Escher, Bach: An Eternal Golden Braid in 1979. A mind-bending work which coined Hofstadter's Law: "It always takes longer than you expect, even when you take into account Hofstadter's Law." https://en.wikipedia.org/wiki/Douglas_Hofstadter + "hofstadter", + + // Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephone switching method. https://en.wikipedia.org/wiki/Erna_Schneider_Hoover + "hoover", + + // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. https://en.wikipedia.org/wiki/Grace_Hopper + "hopper", + + // Frances Hugle, she was an American scientist, engineer, and inventor who contributed to the understanding of semiconductors, integrated circuitry, and the unique electrical principles of microscopic materials. https://en.wikipedia.org/wiki/Frances_Hugle + "hugle", + + // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - https://en.wikipedia.org/wiki/Hypatia + "hypatia", + + // Teruko Ishizaka - Japanese scientist and immunologist who co-discovered the antibody class Immunoglobulin E. https://en.wikipedia.org/wiki/Teruko_Ishizaka + "ishizaka", + + // Mary Jackson, American mathematician and aerospace engineer who earned the highest title within NASA's engineering department - https://en.wikipedia.org/wiki/Mary_Jackson_(engineer) + "jackson", + + // Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil + "jang", + + // Betty Jennings - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Jean_Bartik + "jennings", + + // Mary Lou Jepsen, was the founder and chief technology officer of One Laptop Per Child (OLPC), and the founder of Pixel Qi. https://en.wikipedia.org/wiki/Mary_Lou_Jepsen + "jepsen", + + // Katherine Coleman Goble Johnson - American physicist and mathematician contributed to the NASA. https://en.wikipedia.org/wiki/Katherine_Johnson + "johnson", + + // Irène Joliot-Curie - French scientist who was awarded the Nobel Prize for Chemistry in 1935. Daughter of Marie and Pierre Curie. https://en.wikipedia.org/wiki/Ir%C3%A8ne_Joliot-Curie + "joliot", + + // Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. https://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones + "jones", + + // A. P. J. Abdul Kalam - is an Indian scientist aka Missile Man of India for his work on the development of ballistic missile and launch vehicle technology - https://en.wikipedia.org/wiki/A._P._J._Abdul_Kalam + "kalam", + + // Sergey Petrovich Kapitsa (Russian: Серге́й Петро́вич Капи́ца; 14 February 1928 – 14 August 2012) was a Russian physicist and demographer. He was best known as host of the popular and long-running Russian scientific TV show, Evident, but Incredible. His father was the Nobel laureate Soviet-era physicist Pyotr Kapitsa, and his brother was the geographer and Antarctic explorer Andrey Kapitsa. - https://en.wikipedia.org/wiki/Sergey_Kapitsa + "kapitsa", + + // Susan Kare, created the icons and many of the interface elements for the original Apple Macintosh in the 1980s, and was an original employee of NeXT, working as the Creative Director. https://en.wikipedia.org/wiki/Susan_Kare + "kare", + + // Mstislav Keldysh - a Soviet scientist in the field of mathematics and mechanics, academician of the USSR Academy of Sciences (1946), President of the USSR Academy of Sciences (1961–1975), three times Hero of Socialist Labor (1956, 1961, 1971), fellow of the Royal Society of Edinburgh (1968). https://en.wikipedia.org/wiki/Mstislav_Keldysh + "keldysh", + + // Mary Kenneth Keller, Sister Mary Kenneth Keller became the first American woman to earn a PhD in Computer Science in 1965. https://en.wikipedia.org/wiki/Mary_Kenneth_Keller + "keller", + + // Johannes Kepler, German astronomer known for his three laws of planetary motion - https://en.wikipedia.org/wiki/Johannes_Kepler + "kepler", + + // Omar Khayyam - Persian mathematician, astronomer and poet. Known for his work on the classification and solution of cubic equations, for his contribution to the understanding of Euclid's fifth postulate and for computing the length of a year very accurately. https://en.wikipedia.org/wiki/Omar_Khayyam + "khayyam", + + // Har Gobind Khorana - Indian-American biochemist who shared the 1968 Nobel Prize for Physiology - https://en.wikipedia.org/wiki/Har_Gobind_Khorana + "khorana", + + // Jack Kilby invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Jack_Kilby + "kilby", + + // Maria Kirch - German astronomer and first woman to discover a comet - https://en.wikipedia.org/wiki/Maria_Margarethe_Kirch + "kirch", + + // Donald Knuth - American computer scientist, author of "The Art of Computer Programming" and creator of the TeX typesetting system. https://en.wikipedia.org/wiki/Donald_Knuth + "knuth", + + // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - https://en.wikipedia.org/wiki/Sofia_Kovalevskaya + "kowalevski", + + // Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - https://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande + "lalande", + + // Hedy Lamarr - Actress and inventor. The principles of her work are now incorporated into modern Wi-Fi, CDMA and Bluetooth technology. https://en.wikipedia.org/wiki/Hedy_Lamarr + "lamarr", + + // Leslie B. Lamport - American computer scientist. Lamport is best known for his seminal work in distributed systems and was the winner of the 2013 Turing Award. https://en.wikipedia.org/wiki/Leslie_Lamport + "lamport", + + // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - https://en.wikipedia.org/wiki/Mary_Leakey + "leakey", + + // Henrietta Swan Leavitt - she was an American astronomer who discovered the relation between the luminosity and the period of Cepheid variable stars. https://en.wikipedia.org/wiki/Henrietta_Swan_Leavitt + "leavitt", + + // Esther Miriam Zimmer Lederberg - American microbiologist and a pioneer of bacterial genetics. https://en.wikipedia.org/wiki/Esther_Lederberg + "lederberg", + + // Inge Lehmann - Danish seismologist and geophysicist. Known for discovering in 1936 that the Earth has a solid inner core inside a molten outer core. https://en.wikipedia.org/wiki/Inge_Lehmann + "lehmann", + + // Daniel Lewin - Mathematician, Akamai co-founder, soldier, 9/11 victim-- Developed optimization techniques for routing traffic on the internet. Died attempting to stop the 9-11 hijackers. https://en.wikipedia.org/wiki/Daniel_Lewin + "lewin", + + // Ruth Lichterman - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Ruth_Teitelbaum + "lichterman", + + // Barbara Liskov - co-developed the Liskov substitution principle. Liskov was also the winner of the Turing Prize in 2008. - https://en.wikipedia.org/wiki/Barbara_Liskov + "liskov", + + // Ada Lovelace invented the first algorithm. https://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) + "lovelace", + + // Auguste and Louis Lumière - the first filmmakers in history - https://en.wikipedia.org/wiki/Auguste_and_Louis_Lumi%C3%A8re + "lumiere", + + // Mahavira - Ancient Indian mathematician during 9th century AD who discovered basic algebraic identities - https://en.wikipedia.org/wiki/Mah%C4%81v%C4%ABra_(mathematician) + "mahavira", + + // Lynn Margulis (b. Lynn Petra Alexander) - an American evolutionary theorist and biologist, science author, educator, and popularizer, and was the primary modern proponent for the significance of symbiosis in evolution. - https://en.wikipedia.org/wiki/Lynn_Margulis + "margulis", + + // Yukihiro Matsumoto - Japanese computer scientist and software programmer best known as the chief designer of the Ruby programming language. https://en.wikipedia.org/wiki/Yukihiro_Matsumoto + "matsumoto", + + // James Clerk Maxwell - Scottish physicist, best known for his formulation of electromagnetic theory. https://en.wikipedia.org/wiki/James_Clerk_Maxwell + "maxwell", + + // Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - https://en.wikipedia.org/wiki/Maria_Mayer + "mayer", + + // John McCarthy invented LISP: https://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist) + "mccarthy", + + // Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. https://en.wikipedia.org/wiki/Barbara_McClintock + "mcclintock", + + // Anne Laura Dorinthea McLaren - British developmental biologist whose work helped lead to human in-vitro fertilisation. https://en.wikipedia.org/wiki/Anne_McLaren + "mclaren", + + // Malcolm McLean invented the modern shipping container: https://en.wikipedia.org/wiki/Malcom_McLean + "mclean", + + // Kay McNulty - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Kathleen_Antonelli + "mcnulty", + + // Gregor Johann Mendel - Czech scientist and founder of genetics. https://en.wikipedia.org/wiki/Gregor_Mendel + "mendel", + + // Dmitri Mendeleev - a chemist and inventor. He formulated the Periodic Law, created a farsighted version of the periodic table of elements, and used it to correct the properties of some already discovered elements and also to predict the properties of eight elements yet to be discovered. https://en.wikipedia.org/wiki/Dmitri_Mendeleev + "mendeleev", + + // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - https://en.wikipedia.org/wiki/Lise_Meitner + "meitner", + + // Carla Meninsky, was the game designer and programmer for Atari 2600 games Dodge 'Em and Warlords. https://en.wikipedia.org/wiki/Carla_Meninsky + "meninsky", + + // Ralph C. Merkle - American computer scientist, known for devising Merkle's puzzles - one of the very first schemes for public-key cryptography. Also, inventor of Merkle trees and co-inventor of the Merkle-Damgård construction for building collision-resistant cryptographic hash functions and the Merkle-Hellman knapsack cryptosystem. https://en.wikipedia.org/wiki/Ralph_Merkle + "merkle", + + // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf + "mestorf", + + // Marvin Minsky - Pioneer in Artificial Intelligence, co-founder of the MIT's AI Lab, won the Turing Award in 1969. https://en.wikipedia.org/wiki/Marvin_Minsky + "minsky", + + // Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia.org/wiki/Maryam_Mirzakhani + "mirzakhani", + + // Gordon Earle Moore - American engineer, Silicon Valley founding father, author of Moore's law. https://en.wikipedia.org/wiki/Gordon_Moore + "moore", + + // Samuel Morse - contributed to the invention of a single-wire telegraph system based on European telegraphs and was a co-developer of the Morse code - https://en.wikipedia.org/wiki/Samuel_Morse + "morse", + + // Ian Murdock - founder of the Debian project - https://en.wikipedia.org/wiki/Ian_Murdock + "murdock", + + // May-Britt Moser - Nobel prize winner neuroscientist who contributed to the discovery of grid cells in the brain. https://en.wikipedia.org/wiki/May-Britt_Moser + "moser", + + // John Napier of Merchiston - Scottish landowner known as an astronomer, mathematician and physicist. Best known for his discovery of logarithms. https://en.wikipedia.org/wiki/John_Napier + "napier", + + // John Forbes Nash, Jr. - American mathematician who made fundamental contributions to game theory, differential geometry, and the study of partial differential equations. https://en.wikipedia.org/wiki/John_Forbes_Nash_Jr. + "nash", + + // John von Neumann - todays computer architectures are based on the von Neumann architecture. https://en.wikipedia.org/wiki/Von_Neumann_architecture + "neumann", + + // Isaac Newton invented classic mechanics and modern optics. https://en.wikipedia.org/wiki/Isaac_Newton + "newton", + + // Florence Nightingale, more prominently known as a nurse, was also the first female member of the Royal Statistical Society and a pioneer in statistical graphics https://en.wikipedia.org/wiki/Florence_Nightingale#Statistics_and_sanitary_reform + "nightingale", + + // Alfred Nobel - a Swedish chemist, engineer, innovator, and armaments manufacturer (inventor of dynamite) - https://en.wikipedia.org/wiki/Alfred_Nobel + "nobel", + + // Emmy Noether, German mathematician. Noether's Theorem is named after her. https://en.wikipedia.org/wiki/Emmy_Noether + "noether", + + // Poppy Northcutt. Poppy Northcutt was the first woman to work as part of NASA’s Mission Control. http://www.businessinsider.com/poppy-northcutt-helped-apollo-astronauts-2014-12?op=1 + "northcutt", + + // Robert Noyce invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Robert_Noyce + "noyce", + + // Panini - Ancient Indian linguist and grammarian from 4th century CE who worked on the world's first formal system - https://en.wikipedia.org/wiki/P%C4%81%E1%B9%87ini#Comparison_with_modern_formal_systems + "panini", + + // Ambroise Pare invented modern surgery. https://en.wikipedia.org/wiki/Ambroise_Par%C3%A9 + "pare", + + // Blaise Pascal, French mathematician, physicist, and inventor - https://en.wikipedia.org/wiki/Blaise_Pascal + "pascal", + + // Louis Pasteur discovered vaccination, fermentation and pasteurization. https://en.wikipedia.org/wiki/Louis_Pasteur. + "pasteur", + + // Cecilia Payne-Gaposchkin was an astronomer and astrophysicist who, in 1925, proposed in her Ph.D. thesis an explanation for the composition of stars in terms of the relative abundances of hydrogen and helium. https://en.wikipedia.org/wiki/Cecilia_Payne-Gaposchkin + "payne", + + // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). https://en.wikipedia.org/wiki/Radia_Perlman + "perlman", + + // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. https://en.wikipedia.org/wiki/Rob_Pike + "pike", + + // Henri Poincaré made fundamental contributions in several fields of mathematics. https://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 + "poincare", + + // Laura Poitras is a director and producer whose work, made possible by open source crypto tools, advances the causes of truth and freedom of information by reporting disclosures by whistleblowers such as Edward Snowden. https://en.wikipedia.org/wiki/Laura_Poitras + "poitras", + + // Tat’yana Avenirovna Proskuriakova (Russian: Татья́на Авени́ровна Проскуряко́ва) (January 23 [O.S. January 10] 1909 – August 30, 1985) was a Russian-American Mayanist scholar and archaeologist who contributed significantly to the deciphering of Maya hieroglyphs, the writing system of the pre-Columbian Maya civilization of Mesoamerica. https://en.wikipedia.org/wiki/Tatiana_Proskouriakoff + "proskuriakova", + + // Claudius Ptolemy - a Greco-Egyptian writer of Alexandria, known as a mathematician, astronomer, geographer, astrologer, and poet of a single epigram in the Greek Anthology - https://en.wikipedia.org/wiki/Ptolemy + "ptolemy", + + // C. V. Raman - Indian physicist who won the Nobel Prize in 1930 for proposing the Raman effect. - https://en.wikipedia.org/wiki/C._V._Raman + "raman", + + // Srinivasa Ramanujan - Indian mathematician and autodidact who made extraordinary contributions to mathematical analysis, number theory, infinite series, and continued fractions. - https://en.wikipedia.org/wiki/Srinivasa_Ramanujan + "ramanujan", + + // Sally Kristen Ride was an American physicist and astronaut. She was the first American woman in space, and the youngest American astronaut. https://en.wikipedia.org/wiki/Sally_Ride + "ride", + + // Rita Levi-Montalcini - Won Nobel Prize in Physiology or Medicine jointly with colleague Stanley Cohen for the discovery of nerve growth factor (https://en.wikipedia.org/wiki/Rita_Levi-Montalcini) + "montalcini", + + // Dennis Ritchie - co-creator of UNIX and the C programming language. - https://en.wikipedia.org/wiki/Dennis_Ritchie + "ritchie", + + // Ida Rhodes - American pioneer in computer programming, designed the first computer used for Social Security. https://en.wikipedia.org/wiki/Ida_Rhodes + "rhodes", + + // Julia Hall Bowman Robinson - American mathematician renowned for her contributions to the fields of computability theory and computational complexity theory. https://en.wikipedia.org/wiki/Julia_Robinson + "robinson", + + // Wilhelm Conrad Röntgen - German physicist who was awarded the first Nobel Prize in Physics in 1901 for the discovery of X-rays (Röntgen rays). https://en.wikipedia.org/wiki/Wilhelm_R%C3%B6ntgen + "roentgen", + + // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - https://en.wikipedia.org/wiki/Rosalind_Franklin + "rosalind", + + // Vera Rubin - American astronomer who pioneered work on galaxy rotation rates. https://en.wikipedia.org/wiki/Vera_Rubin + "rubin", + + // Meghnad Saha - Indian astrophysicist best known for his development of the Saha equation, used to describe chemical and physical conditions in stars - https://en.wikipedia.org/wiki/Meghnad_Saha + "saha", + + // Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. https://en.wikipedia.org/wiki/Jean_E._Sammet + "sammet", + + // Mildred Sanderson - American mathematician best known for Sanderson's theorem concerning modular invariants. https://en.wikipedia.org/wiki/Mildred_Sanderson + "sanderson", + + // Satoshi Nakamoto is the name used by the unknown person or group of people who developed bitcoin, authored the bitcoin white paper, and created and deployed bitcoin's original reference implementation. https://en.wikipedia.org/wiki/Satoshi_Nakamoto + "satoshi", + + // Adi Shamir - Israeli cryptographer whose numerous inventions and contributions to cryptography include the Ferge Fiat Shamir identification scheme, the Rivest Shamir Adleman (RSA) public-key cryptosystem, the Shamir's secret sharing scheme, the breaking of the Merkle-Hellman cryptosystem, the TWINKLE and TWIRL factoring devices and the discovery of differential cryptanalysis (with Eli Biham). https://en.wikipedia.org/wiki/Adi_Shamir + "shamir", + + // Claude Shannon - The father of information theory and founder of digital circuit design theory. (https://en.wikipedia.org/wiki/Claude_Shannon) + "shannon", + + // Carol Shaw - Originally an Atari employee, Carol Shaw is said to be the first female video game designer. https://en.wikipedia.org/wiki/Carol_Shaw_(video_game_designer) + "shaw", + + // Dame Stephanie "Steve" Shirley - Founded a software company in 1962 employing women working from home. https://en.wikipedia.org/wiki/Steve_Shirley + "shirley", + + // William Shockley co-invented the transistor - https://en.wikipedia.org/wiki/William_Shockley + "shockley", + + // Lina Solomonovna Stern (or Shtern; Russian: Лина Соломоновна Штерн; 26 August 1878 – 7 March 1968) was a Soviet biochemist, physiologist and humanist whose medical discoveries saved thousands of lives at the fronts of World War II. She is best known for her pioneering work on blood–brain barrier, which she described as hemato-encephalic barrier in 1921. https://en.wikipedia.org/wiki/Lina_Stern + "shtern", + + // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. https://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi + "sinoussi", + + // Betty Snyder - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Betty_Holberton + "snyder", + + // Cynthia Solomon - Pioneer in the fields of artificial intelligence, computer science and educational computing. Known for creation of Logo, an educational programming language. https://en.wikipedia.org/wiki/Cynthia_Solomon + "solomon", + + // Frances Spence - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Frances_Spence + "spence", + + // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman + "stallman", + + // Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker + "stonebraker", + + // Ivan Edward Sutherland - American computer scientist and Internet pioneer, widely regarded as the father of computer graphics. https://en.wikipedia.org/wiki/Ivan_Sutherland + "sutherland", + + // Janese Swanson (with others) developed the first of the Carmen Sandiego games. She went on to found Girl Tech. https://en.wikipedia.org/wiki/Janese_Swanson + "swanson", + + // Aaron Swartz was influential in creating RSS, Markdown, Creative Commons, Reddit, and much of the internet as we know it today. He was devoted to freedom of information on the web. https://en.wikiquote.org/wiki/Aaron_Swartz + "swartz", + + // Bertha Swirles was a theoretical physicist who made a number of contributions to early quantum theory. https://en.wikipedia.org/wiki/Bertha_Swirles + "swirles", + + // Helen Brooke Taussig - American cardiologist and founder of the field of paediatric cardiology. https://en.wikipedia.org/wiki/Helen_B._Taussig + "taussig", + + // Valentina Tereshkova is a Russian engineer, cosmonaut and politician. She was the first woman to fly to space in 1963. In 2013, at the age of 76, she offered to go on a one-way mission to Mars. https://en.wikipedia.org/wiki/Valentina_Tereshkova + "tereshkova", + + // Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en.wikipedia.org/wiki/Nikola_Tesla + "tesla", + + // Marie Tharp - American geologist and oceanic cartographer who co-created the first scientific map of the Atlantic Ocean floor. Her work led to the acceptance of the theories of plate tectonics and continental drift. https://en.wikipedia.org/wiki/Marie_Tharp + "tharp", + + // Ken Thompson - co-creator of UNIX and the C programming language - https://en.wikipedia.org/wiki/Ken_Thompson + "thompson", + + // Linus Torvalds invented Linux and Git. https://en.wikipedia.org/wiki/Linus_Torvalds + "torvalds", + + // Youyou Tu - Chinese pharmaceutical chemist and educator known for discovering artemisinin and dihydroartemisinin, used to treat malaria, which has saved millions of lives. Joint winner of the 2015 Nobel Prize in Physiology or Medicine. https://en.wikipedia.org/wiki/Tu_Youyou + "tu", + + // Alan Turing was a founding father of computer science. https://en.wikipedia.org/wiki/Alan_Turing. + "turing", + + // Varahamihira - Ancient Indian mathematician who discovered trigonometric formulae during 505-587 CE - https://en.wikipedia.org/wiki/Var%C4%81hamihira#Contributions + "varahamihira", + + // Dorothy Vaughan was a NASA mathematician and computer programmer on the SCOUT launch vehicle program that put America's first satellites into space - https://en.wikipedia.org/wiki/Dorothy_Vaughan + "vaughan", + + // Sir Mokshagundam Visvesvaraya - is a notable Indian engineer. He is a recipient of the Indian Republic's highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in India in his memory - https://en.wikipedia.org/wiki/Visvesvaraya + "visvesvaraya", + + // Christiane Nüsslein-Volhard - German biologist, won Nobel Prize in Physiology or Medicine in 1995 for research on the genetic control of embryonic development. https://en.wikipedia.org/wiki/Christiane_N%C3%BCsslein-Volhard + "volhard", + + // Cédric Villani - French mathematician, won Fields Medal, Fermat Prize and Poincaré Price for his work in differential geometry and statistical mechanics. https://en.wikipedia.org/wiki/C%C3%A9dric_Villani + "villani", + + // Marlyn Wescoff - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Marlyn_Meltzer + "wescoff", + + // Sylvia B. Wilbur - British computer scientist who helped develop the ARPANET, was one of the first to exchange email in the UK and a leading researcher in computer-supported collaborative work. https://en.wikipedia.org/wiki/Sylvia_Wilbur + "wilbur", + + // Andrew Wiles - Notable British mathematician who proved the enigmatic Fermat's Last Theorem - https://en.wikipedia.org/wiki/Andrew_Wiles + "wiles", + + // Roberta Williams, did pioneering work in graphical adventure games for personal computers, particularly the King's Quest series. https://en.wikipedia.org/wiki/Roberta_Williams + "williams", + + // Malcolm John Williamson - British mathematician and cryptographer employed by the GCHQ. Developed in 1974 what is now known as Diffie-Hellman key exchange (Diffie and Hellman first published the scheme in 1976). https://en.wikipedia.org/wiki/Malcolm_J._Williamson + "williamson", + + // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. https://en.wikipedia.org/wiki/Sophie_Wilson + "wilson", + + // Jeannette Wing - co-developed the Liskov substitution principle. - https://en.wikipedia.org/wiki/Jeannette_Wing + "wing", + + // Steve Wozniak invented the Apple I and Apple II. https://en.wikipedia.org/wiki/Steve_Wozniak + "wozniak", + + // The Wright brothers, Orville and Wilbur - credited with inventing and building the world's first successful airplane and making the first controlled, powered and sustained heavier-than-air human flight - https://en.wikipedia.org/wiki/Wright_brothers + "wright", + + // Chien-Shiung Wu - Chinese-American experimental physicist who made significant contributions to nuclear physics. https://en.wikipedia.org/wiki/Chien-Shiung_Wu + "wu", + + // Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. https://en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow + "yalow", + + // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https://en.wikipedia.org/wiki/Ada_Yonath + "yonath", + + // Nikolay Yegorovich Zhukovsky (Russian: Никола́й Его́рович Жуко́вский, January 17 1847 – March 17, 1921) was a Russian scientist, mathematician and engineer, and a founding father of modern aero- and hydrodynamics. Whereas contemporary scientists scoffed at the idea of human flight, Zhukovsky was the first to undertake the study of airflow. He is often called the Father of Russian Aviation. https://en.wikipedia.org/wiki/Nikolay_Yegorovich_Zhukovsky + "zhukovsky", + } +) + +// GetRandomName generates a random name from the list of adjectives and surnames in this package +// formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random +// integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3` +func GetRandomName(retry int) string { +begin: + name := fmt.Sprintf("%s_%s", left[rand.Intn(len(left))], right[rand.Intn(len(right))]) + if name == "boring_wozniak" /* Steve Wozniak is not boring */ { + goto begin + } + + if retry > 0 { + name = fmt.Sprintf("%s%d", name, rand.Intn(10)) + } + return name +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go index a018a203..8f6e0a73 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/stdcopy/stdcopy.go @@ -1,4 +1,4 @@ -package stdcopy +package stdcopy // import "github.com/docker/docker/pkg/stdcopy" import ( "bytes" @@ -21,7 +21,7 @@ const ( // Stderr represents standard error steam type. Stderr // Systemerr represents errors originating from the system that make it - // into the the multiplexed stream. + // into the multiplexed stream. Systemerr stdWriterPrefixLen = 8 diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/args_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/args_windows.go new file mode 100644 index 00000000..b7c9487a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/args_windows.go @@ -0,0 +1,16 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "strings" + + "golang.org/x/sys/windows" +) + +// EscapeArgs makes a Windows-style escaped command line from a set of arguments +func EscapeArgs(args []string) string { + escapedArgs := make([]string, len(args)) + for i, a := range args { + escapedArgs[i] = windows.EscapeArg(a) + } + return strings.Join(escapedArgs, " ") +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/chtimes.go index 056d1995..c26a4e24 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/chtimes.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/chtimes.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import ( "os" @@ -27,9 +27,5 @@ func Chtimes(name string, atime time.Time, mtime time.Time) error { } // Take platform specific action for setting create time. - if err := setCTime(name, mtime); err != nil { - return err - } - - return nil + return setCTime(name, mtime) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go index 09d58bcb..259138a4 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/chtimes_unix.go @@ -1,6 +1,6 @@ // +build !windows -package system +package system // import "github.com/docker/docker/pkg/system" import ( "time" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go index 45428c14..d3a115ff 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/chtimes_windows.go @@ -1,6 +1,4 @@ -// +build windows - -package system +package system // import "github.com/docker/docker/pkg/system" import ( "time" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/errors.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/errors.go index 28831898..2573d716 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/errors.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/errors.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import ( "errors" @@ -7,4 +7,7 @@ import ( var ( // ErrNotSupportedPlatform means the platform is not supported. ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") + + // ErrNotSupportedOperatingSystem means the operating system is not supported. + ErrNotSupportedOperatingSystem = errors.New("operating system is not supported") ) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/events_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/events_windows.go deleted file mode 100644 index 192e3678..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/events_windows.go +++ /dev/null @@ -1,85 +0,0 @@ -package system - -// This file implements syscalls for Win32 events which are not implemented -// in golang. - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var ( - procCreateEvent = modkernel32.NewProc("CreateEventW") - procOpenEvent = modkernel32.NewProc("OpenEventW") - procSetEvent = modkernel32.NewProc("SetEvent") - procResetEvent = modkernel32.NewProc("ResetEvent") - procPulseEvent = modkernel32.NewProc("PulseEvent") -) - -// CreateEvent implements win32 CreateEventW func in golang. It will create an event object. -func CreateEvent(eventAttributes *windows.SecurityAttributes, manualReset bool, initialState bool, name string) (handle windows.Handle, err error) { - namep, _ := windows.UTF16PtrFromString(name) - var _p1 uint32 - if manualReset { - _p1 = 1 - } - var _p2 uint32 - if initialState { - _p2 = 1 - } - r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) - use(unsafe.Pointer(namep)) - handle = windows.Handle(r0) - if handle == windows.InvalidHandle { - err = e1 - } - return -} - -// OpenEvent implements win32 OpenEventW func in golang. It opens an event object. -func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle windows.Handle, err error) { - namep, _ := windows.UTF16PtrFromString(name) - var _p1 uint32 - if inheritHandle { - _p1 = 1 - } - r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) - use(unsafe.Pointer(namep)) - handle = windows.Handle(r0) - if handle == windows.InvalidHandle { - err = e1 - } - return -} - -// SetEvent implements win32 SetEvent func in golang. -func SetEvent(handle windows.Handle) (err error) { - return setResetPulse(handle, procSetEvent) -} - -// ResetEvent implements win32 ResetEvent func in golang. -func ResetEvent(handle windows.Handle) (err error) { - return setResetPulse(handle, procResetEvent) -} - -// PulseEvent implements win32 PulseEvent func in golang. -func PulseEvent(handle windows.Handle) (err error) { - return setResetPulse(handle, procPulseEvent) -} - -func setResetPulse(handle windows.Handle, proc *windows.LazyProc) (err error) { - r0, _, _ := proc.Call(uintptr(handle)) - if r0 != 0 { - err = syscall.Errno(r0) - } - return -} - -var temp unsafe.Pointer - -// use ensures a variable is kept alive without the GC freeing while still needed -func use(p unsafe.Pointer) { - temp = p -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/exitcode.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/exitcode.go index 60f0514b..4ba8fe35 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/exitcode.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/exitcode.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import ( "fmt" @@ -17,17 +17,3 @@ func GetExitCode(err error) (int, error) { } return exitCode, fmt.Errorf("failed to get exit code") } - -// ProcessExitCode process the specified error and returns the exit status code -// if the error was of type exec.ExitError, returns nothing otherwise. -func ProcessExitCode(err error) (exitCode int) { - if err != nil { - var exiterr error - if exitCode, exiterr = GetExitCode(err); exiterr != nil { - // TODO: Fix this so we check the error's text. - // we've failed to retrieve exit code, so we set it to 127 - exitCode = 127 - } - } - return -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/filesys.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/filesys.go index 102565f7..adeb1630 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/filesys.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/filesys.go @@ -1,6 +1,6 @@ // +build !windows -package system +package system // import "github.com/docker/docker/pkg/system" import ( "io/ioutil" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/filesys_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/filesys_windows.go index a61b53d0..3049ff38 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/filesys_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/filesys_windows.go @@ -1,6 +1,4 @@ -// +build windows - -package system +package system // import "github.com/docker/docker/pkg/system" import ( "os" @@ -20,8 +18,6 @@ import ( const ( // SddlAdministratorsLocalSystem is local administrators plus NT AUTHORITY\System SddlAdministratorsLocalSystem = "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" - // SddlNtvmAdministratorsLocalSystem is NT VIRTUAL MACHINE\Virtual Machines plus local administrators plus NT AUTHORITY\System - SddlNtvmAdministratorsLocalSystem = "D:P(A;OICI;GA;;;S-1-5-83-0)(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)" ) // MkdirAllWithACL is a wrapper for MkdirAll that creates a directory diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/init.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/init.go index 17935088..a17597aa 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/init.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/init.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import ( "syscall" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/init_unix.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/init_unix.go new file mode 100644 index 00000000..c2bb0f4c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/init_unix.go @@ -0,0 +1,12 @@ +// +build !windows + +package system // import "github.com/docker/docker/pkg/system" + +// InitLCOW does nothing since LCOW is a windows only feature +func InitLCOW(experimental bool) { +} + +// ContainerdRuntimeSupported returns true if the use of ContainerD runtime is supported. +func ContainerdRuntimeSupported(_ bool, _ string) bool { + return true +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/init_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/init_windows.go index 019c6644..f303aa90 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/init_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/init_windows.go @@ -1,17 +1,41 @@ -package system +package system // import "github.com/docker/docker/pkg/system" -import "os" +import ( + "os" -// LCOWSupported determines if Linux Containers on Windows are supported. -// Note: This feature is in development (06/17) and enabled through an -// environment variable. At a future time, it will be enabled based -// on build number. @jhowardmsft -var lcowSupported = false + "github.com/Microsoft/hcsshim/osversion" + "github.com/sirupsen/logrus" +) -func init() { - // LCOW initialization - if os.Getenv("LCOW_SUPPORTED") != "" { +var ( + // lcowSupported determines if Linux Containers on Windows are supported. + lcowSupported = false + + // containerdRuntimeSupported determines if ContainerD should be the runtime. + // As of March 2019, this is an experimental feature. + containerdRuntimeSupported = false +) + +// InitLCOW sets whether LCOW is supported or not. Requires RS5+ +func InitLCOW(experimental bool) { + v := GetOSVersion() + if experimental && v.Build >= osversion.RS5 { lcowSupported = true } - +} + +// InitContainerdRuntime sets whether to use ContainerD for runtime +// on Windows. This is an experimental feature still in development, and +// also requires an environment variable to be set (so as not to turn the +// feature on from simply experimental which would also mean LCOW. +func InitContainerdRuntime(experimental bool, cdPath string) { + if experimental && len(cdPath) > 0 && len(os.Getenv("DOCKER_WINDOWS_CONTAINERD_RUNTIME")) > 0 { + logrus.Warnf("Using ContainerD runtime. This feature is experimental") + containerdRuntimeSupported = true + } +} + +// ContainerdRuntimeSupported returns true if the use of ContainerD runtime is supported. +func ContainerdRuntimeSupported() bool { + return containerdRuntimeSupported } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/lcow.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/lcow.go new file mode 100644 index 00000000..5be3e218 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/lcow.go @@ -0,0 +1,32 @@ +package system // import "github.com/docker/docker/pkg/system" + +import ( + "runtime" + "strings" + + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// IsOSSupported determines if an operating system is supported by the host +func IsOSSupported(os string) bool { + if strings.EqualFold(runtime.GOOS, os) { + return true + } + if LCOWSupported() && strings.EqualFold(os, "linux") { + return true + } + return false +} + +// ValidatePlatform determines if a platform structure is valid. +// TODO This is a temporary windows-only function, should be replaced by +// comparison of worker capabilities +func ValidatePlatform(platform specs.Platform) error { + if runtime.GOOS == "windows" { + if !(platform.OS == runtime.GOOS || (LCOWSupported() && platform.OS == "linux")) { + return errors.Errorf("unsupported os %s", platform.OS) + } + } + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/lcow_unix.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/lcow_unix.go index cff33bb4..26397fb8 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/lcow_unix.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/lcow_unix.go @@ -1,6 +1,6 @@ // +build !windows -package system +package system // import "github.com/docker/docker/pkg/system" // LCOWSupported returns true if Linux containers on Windows are supported. func LCOWSupported() bool { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/lcow_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/lcow_windows.go index e54d01e6..f0139df8 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/lcow_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/lcow_windows.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" // LCOWSupported returns true if Linux containers on Windows are supported. func LCOWSupported() bool { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/lstat_unix.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/lstat_unix.go index bd23c4d5..de5a1c0f 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/lstat_unix.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/lstat_unix.go @@ -1,8 +1,9 @@ // +build !windows -package system +package system // import "github.com/docker/docker/pkg/system" import ( + "os" "syscall" ) @@ -13,7 +14,7 @@ import ( func Lstat(path string) (*StatT, error) { s := &syscall.Stat_t{} if err := syscall.Lstat(path, s); err != nil { - return nil, err + return nil, &os.PathError{Op: "Lstat", Path: path, Err: err} } return fromStatT(s) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/lstat_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/lstat_windows.go index e51df0da..359c791d 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/lstat_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/lstat_windows.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import "os" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/meminfo.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/meminfo.go index 3b6e947e..6667eb84 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/meminfo.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/meminfo.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" // MemInfo contains memory statistics of the host system. type MemInfo struct { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go index 385f1d5e..d79e8b07 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/meminfo_linux.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import ( "bufio" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go deleted file mode 100644 index 925776e7..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/meminfo_solaris.go +++ /dev/null @@ -1,129 +0,0 @@ -// +build solaris,cgo - -package system - -import ( - "fmt" - "unsafe" -) - -// #cgo CFLAGS: -std=c99 -// #cgo LDFLAGS: -lkstat -// #include -// #include -// #include -// #include -// #include -// #include -// struct swaptable *allocSwaptable(int num) { -// struct swaptable *st; -// struct swapent *swapent; -// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int)); -// swapent = st->swt_ent; -// for (int i = 0; i < num; i++,swapent++) { -// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char)); -// } -// st->swt_n = num; -// return st; -//} -// void freeSwaptable (struct swaptable *st) { -// struct swapent *swapent = st->swt_ent; -// for (int i = 0; i < st->swt_n; i++,swapent++) { -// free(swapent->ste_path); -// } -// free(st); -// } -// swapent_t getSwapEnt(swapent_t *ent, int i) { -// return ent[i]; -// } -// int64_t getPpKernel() { -// int64_t pp_kernel = 0; -// kstat_ctl_t *ksc; -// kstat_t *ks; -// kstat_named_t *knp; -// kid_t kid; -// -// if ((ksc = kstat_open()) == NULL) { -// return -1; -// } -// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) { -// return -1; -// } -// if (((kid = kstat_read(ksc, ks, NULL)) == -1) || -// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) { -// return -1; -// } -// switch (knp->data_type) { -// case KSTAT_DATA_UINT64: -// pp_kernel = knp->value.ui64; -// break; -// case KSTAT_DATA_UINT32: -// pp_kernel = knp->value.ui32; -// break; -// } -// pp_kernel *= sysconf(_SC_PAGESIZE); -// return (pp_kernel > 0 ? pp_kernel : -1); -// } -import "C" - -// Get the system memory info using sysconf same as prtconf -func getTotalMem() int64 { - pagesize := C.sysconf(C._SC_PAGESIZE) - npages := C.sysconf(C._SC_PHYS_PAGES) - return int64(pagesize * npages) -} - -func getFreeMem() int64 { - pagesize := C.sysconf(C._SC_PAGESIZE) - npages := C.sysconf(C._SC_AVPHYS_PAGES) - return int64(pagesize * npages) -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - - ppKernel := C.getPpKernel() - MemTotal := getTotalMem() - MemFree := getFreeMem() - SwapTotal, SwapFree, err := getSysSwap() - - if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || - SwapFree < 0 { - return nil, fmt.Errorf("error getting system memory info %v\n", err) - } - - meminfo := &MemInfo{} - // Total memory is total physical memory less than memory locked by kernel - meminfo.MemTotal = MemTotal - int64(ppKernel) - meminfo.MemFree = MemFree - meminfo.SwapTotal = SwapTotal - meminfo.SwapFree = SwapFree - - return meminfo, nil -} - -func getSysSwap() (int64, int64, error) { - var tSwap int64 - var fSwap int64 - var diskblksPerPage int64 - num, err := C.swapctl(C.SC_GETNSWP, nil) - if err != nil { - return -1, -1, err - } - st := C.allocSwaptable(num) - _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st)) - if err != nil { - C.freeSwaptable(st) - return -1, -1, err - } - - diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT) - for i := 0; i < int(num); i++ { - swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i)) - tSwap += int64(swapent.ste_pages) * diskblksPerPage - fSwap += int64(swapent.ste_free) * diskblksPerPage - } - C.freeSwaptable(st) - return tSwap, fSwap, nil -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go index 3ce019df..56f44942 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/meminfo_unsupported.go @@ -1,6 +1,6 @@ -// +build !linux,!windows,!solaris +// +build !linux,!windows -package system +package system // import "github.com/docker/docker/pkg/system" // ReadMemInfo is not supported on platforms other than linux and windows. func ReadMemInfo() (*MemInfo, error) { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go index 883944a4..6ed93f2f 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/meminfo_windows.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import ( "unsafe" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/mknod.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/mknod.go index af79a653..b132482e 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/mknod.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/mknod.go @@ -1,6 +1,6 @@ // +build !windows -package system +package system // import "github.com/docker/docker/pkg/system" import ( "golang.org/x/sys/unix" @@ -18,5 +18,5 @@ func Mknod(path string, mode uint32, dev int) error { // They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, // then the top 12 bits of the minor. func Mkdev(major int64, minor int64) uint32 { - return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) + return uint32(unix.Mkdev(uint32(major), uint32(minor))) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/mknod_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/mknod_windows.go index 2e863c02..ec89d7a1 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/mknod_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/mknod_windows.go @@ -1,6 +1,4 @@ -// +build windows - -package system +package system // import "github.com/docker/docker/pkg/system" // Mknod is not implemented on Windows. func Mknod(path string, mode uint32, dev int) error { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/path.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/path.go index f634a6be..a3d957af 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/path.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/path.go @@ -1,15 +1,22 @@ -package system +package system // import "github.com/docker/docker/pkg/system" -import "runtime" +import ( + "fmt" + "path/filepath" + "runtime" + "strings" + + "github.com/containerd/continuity/pathdriver" +) const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" // DefaultPathEnv is unix style list of directories to search for // executables. Each directory is separated from the next by a colon // ':' character . -func DefaultPathEnv(platform string) string { +func DefaultPathEnv(os string) string { if runtime.GOOS == "windows" { - if platform != runtime.GOOS && LCOWSupported() { + if os != runtime.GOOS { return defaultUnixPathEnv } // Deliberately empty on Windows containers on Windows as the default path will be set by @@ -19,3 +26,35 @@ func DefaultPathEnv(platform string) string { return defaultUnixPathEnv } + +// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, +// is the system drive. +// On Linux: this is a no-op. +// On Windows: this does the following> +// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. +// This is used, for example, when validating a user provided path in docker cp. +// If a drive letter is supplied, it must be the system drive. The drive letter +// is always removed. Also, it translates it to OS semantics (IOW / to \). We +// need the path in this syntax so that it can ultimately be concatenated with +// a Windows long-path which doesn't support drive-letters. Examples: +// C: --> Fail +// C:\ --> \ +// a --> a +// /a --> \a +// d:\ --> Fail +func CheckSystemDriveAndRemoveDriveLetter(path string, driver pathdriver.PathDriver) (string, error) { + if runtime.GOOS != "windows" || LCOWSupported() { + return path, nil + } + + if len(path) == 2 && string(path[1]) == ":" { + return "", fmt.Errorf("No relative path specified in %q", path) + } + if !driver.IsAbs(path) || len(path) < 2 { + return filepath.FromSlash(path), nil + } + if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { + return "", fmt.Errorf("The specified path is not on the system drive (C:)") + } + return filepath.FromSlash(path[2:]), nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/path_unix.go index f3762e69..b0b93196 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/path_unix.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/path_unix.go @@ -1,9 +1,10 @@ // +build !windows -package system +package system // import "github.com/docker/docker/pkg/system" -// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, -// is the system drive. This is a no-op on Linux. -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { +// GetLongPathName converts Windows short pathnames to full pathnames. +// For example C:\Users\ADMIN~1 --> C:\Users\Administrator. +// It is a no-op on non-Windows platforms +func GetLongPathName(path string) (string, error) { return path, nil } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/path_windows.go index aab89152..188f2c29 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/path_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/path_windows.go @@ -1,33 +1,24 @@ -// +build windows +package system // import "github.com/docker/docker/pkg/system" -package system +import "syscall" -import ( - "fmt" - "path/filepath" - "strings" -) - -// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. -// This is used, for example, when validating a user provided path in docker cp. -// If a drive letter is supplied, it must be the system drive. The drive letter -// is always removed. Also, it translates it to OS semantics (IOW / to \). We -// need the path in this syntax so that it can ultimately be concatenated with -// a Windows long-path which doesn't support drive-letters. Examples: -// C: --> Fail -// C:\ --> \ -// a --> a -// /a --> \a -// d:\ --> Fail -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - if len(path) == 2 && string(path[1]) == ":" { - return "", fmt.Errorf("No relative path specified in %q", path) +// GetLongPathName converts Windows short pathnames to full pathnames. +// For example C:\Users\ADMIN~1 --> C:\Users\Administrator. +// It is a no-op on non-Windows platforms +func GetLongPathName(path string) (string, error) { + // See https://groups.google.com/forum/#!topic/golang-dev/1tufzkruoTg + p := syscall.StringToUTF16(path) + b := p // GetLongPathName says we can reuse buffer + n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err } - if !filepath.IsAbs(path) || len(path) < 2 { - return filepath.FromSlash(path), nil + if n > uint32(len(b)) { + b = make([]uint16, n) + _, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) + if err != nil { + return "", err + } } - if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { - return "", fmt.Errorf("The specified path is not on the system drive (C:)") - } - return filepath.FromSlash(path[2:]), nil + return syscall.UTF16ToString(b), nil } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/process_unix.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/process_unix.go index 26c8b42c..0195a891 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/process_unix.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/process_unix.go @@ -1,6 +1,6 @@ -// +build linux freebsd solaris darwin +// +build linux freebsd darwin -package system +package system // import "github.com/docker/docker/pkg/system" import ( "syscall" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/process_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/process_windows.go new file mode 100644 index 00000000..4e70c97b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/process_windows.go @@ -0,0 +1,18 @@ +package system // import "github.com/docker/docker/pkg/system" + +import "os" + +// IsProcessAlive returns true if process with a given pid is running. +func IsProcessAlive(pid int) bool { + _, err := os.FindProcess(pid) + + return err == nil +} + +// KillProcess force-stops a process. +func KillProcess(pid int) { + p, err := os.FindProcess(pid) + if err == nil { + p.Kill() + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/rm.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/rm.go index 101b569a..b3109918 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/rm.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/rm.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import ( "os" @@ -26,7 +26,7 @@ func EnsureRemoveAll(dir string) error { // track retries exitOnErr := make(map[string]int) - maxRetry := 5 + maxRetry := 50 // Attempt to unmount anything beneath this dir first mount.RecursiveUnmount(dir) @@ -34,7 +34,7 @@ func EnsureRemoveAll(dir string) error { for { err := os.RemoveAll(dir) if err == nil { - return err + return nil } pe, ok := err.(*os.PathError) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_darwin.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_darwin.go index 715f05b9..c1c0ee9f 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_darwin.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_darwin.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import "syscall" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go index 715f05b9..c1c0ee9f 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_freebsd.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import "syscall" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_linux.go index 66bf6e28..98c9eb18 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_linux.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_linux.go @@ -1,14 +1,14 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import "syscall" // fromStatT converts a syscall.Stat_t type to a system.Stat_t type func fromStatT(s *syscall.Stat_t) (*StatT, error) { return &StatT{size: s.Size, - mode: uint32(s.Mode), + mode: s.Mode, uid: s.Uid, gid: s.Gid, - rdev: uint64(s.Rdev), + rdev: s.Rdev, mtim: s.Mtim}, nil } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_netbsd.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_netbsd.go deleted file mode 100644 index 715f05b9..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_netbsd.go +++ /dev/null @@ -1,13 +0,0 @@ -package system - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go index b607dea9..756b92d1 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_openbsd.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import "syscall" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_solaris.go index b607dea9..756b92d1 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_solaris.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_solaris.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import "syscall" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_unix.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_unix.go index 91c7d121..86bb6dd5 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_unix.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_unix.go @@ -1,8 +1,9 @@ // +build !windows -package system +package system // import "github.com/docker/docker/pkg/system" import ( + "os" "syscall" ) @@ -47,6 +48,11 @@ func (s StatT) Mtim() syscall.Timespec { return s.mtim } +// IsDir reports whether s describes a directory. +func (s StatT) IsDir() bool { + return s.mode&syscall.S_IFDIR != 0 +} + // Stat takes a path to a file and returns // a system.StatT type pertaining to that file. // @@ -54,7 +60,7 @@ func (s StatT) Mtim() syscall.Timespec { func Stat(path string) (*StatT, error) { s := &syscall.Stat_t{} if err := syscall.Stat(path, s); err != nil { - return nil, err + return nil, &os.PathError{Op: "Stat", Path: path, Err: err} } return fromStatT(s) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_windows.go index 6c639726..b2456cb8 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/stat_windows.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import ( "os" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/syscall_unix.go index 49dbdd37..919a412a 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/syscall_unix.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/syscall_unix.go @@ -1,6 +1,6 @@ // +build linux freebsd -package system +package system // import "github.com/docker/docker/pkg/system" import "golang.org/x/sys/unix" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/syscall_windows.go index 23e9b207..4ae92fa6 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/syscall_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/syscall_windows.go @@ -1,16 +1,63 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import ( + "fmt" + "syscall" "unsafe" "github.com/sirupsen/logrus" "golang.org/x/sys/windows" ) +const ( + OWNER_SECURITY_INFORMATION = 0x00000001 + GROUP_SECURITY_INFORMATION = 0x00000002 + DACL_SECURITY_INFORMATION = 0x00000004 + SACL_SECURITY_INFORMATION = 0x00000008 + LABEL_SECURITY_INFORMATION = 0x00000010 + ATTRIBUTE_SECURITY_INFORMATION = 0x00000020 + SCOPE_SECURITY_INFORMATION = 0x00000040 + PROCESS_TRUST_LABEL_SECURITY_INFORMATION = 0x00000080 + ACCESS_FILTER_SECURITY_INFORMATION = 0x00000100 + BACKUP_SECURITY_INFORMATION = 0x00010000 + PROTECTED_DACL_SECURITY_INFORMATION = 0x80000000 + PROTECTED_SACL_SECURITY_INFORMATION = 0x40000000 + UNPROTECTED_DACL_SECURITY_INFORMATION = 0x20000000 + UNPROTECTED_SACL_SECURITY_INFORMATION = 0x10000000 +) + +const ( + SE_UNKNOWN_OBJECT_TYPE = iota + SE_FILE_OBJECT + SE_SERVICE + SE_PRINTER + SE_REGISTRY_KEY + SE_LMSHARE + SE_KERNEL_OBJECT + SE_WINDOW_OBJECT + SE_DS_OBJECT + SE_DS_OBJECT_ALL + SE_PROVIDER_DEFINED_OBJECT + SE_WMIGUID_OBJECT + SE_REGISTRY_WOW64_32KEY +) + +const ( + SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" +) + +const ( + ContainerAdministratorSidString = "S-1-5-93-2-1" + ContainerUserSidString = "S-1-5-93-2-2" +) + var ( - ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") - procGetVersionExW = modkernel32.NewProc("GetVersionExW") - procGetProductInfo = modkernel32.NewProc("GetProductInfo") + ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") + modadvapi32 = windows.NewLazySystemDLL("advapi32.dll") + procGetVersionExW = modkernel32.NewProc("GetVersionExW") + procGetProductInfo = modkernel32.NewProc("GetProductInfo") + procSetNamedSecurityInfo = modadvapi32.NewProc("SetNamedSecurityInfoW") + procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") ) // OSVersion is a wrapper for Windows version information @@ -53,6 +100,10 @@ func GetOSVersion() OSVersion { return osv } +func (osv OSVersion) ToString() string { + return fmt.Sprintf("%d.%d.%d", osv.MajorVersion, osv.MinorVersion, osv.Build) +} + // IsWindowsClient returns true if the SKU is client // @engine maintainers - this function should not be removed or modified as it // is used to enforce licensing restrictions on Windows. @@ -120,3 +171,23 @@ func HasWin32KSupport() bool { // APIs. return ntuserApiset.Load() == nil } + +func SetNamedSecurityInfo(objectName *uint16, objectType uint32, securityInformation uint32, sidOwner *windows.SID, sidGroup *windows.SID, dacl *byte, sacl *byte) (result error) { + r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfo.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(sidOwner)), uintptr(unsafe.Pointer(sidGroup)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + if r0 != 0 { + result = syscall.Errno(r0) + } + return +} + +func GetSecurityDescriptorDacl(securityDescriptor *byte, daclPresent *uint32, dacl **byte, daclDefaulted *uint32) (result error) { + r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(securityDescriptor)), uintptr(unsafe.Pointer(daclPresent)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclDefaulted)), 0, 0) + if r1 == 0 { + if e1 != 0 { + result = syscall.Errno(e1) + } else { + result = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/umask.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/umask.go index 5a10eda5..9912a2ba 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/umask.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/umask.go @@ -1,6 +1,6 @@ // +build !windows -package system +package system // import "github.com/docker/docker/pkg/system" import ( "golang.org/x/sys/unix" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/umask_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/umask_windows.go index 13f1de17..fc62388c 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/umask_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/umask_windows.go @@ -1,6 +1,4 @@ -// +build windows - -package system +package system // import "github.com/docker/docker/pkg/system" // Umask is not supported on the windows platform. func Umask(newmask int) (oldmask int, err error) { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go index 6a775243..ed1b9fad 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/utimes_freebsd.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import ( "syscall" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/utimes_linux.go index edc588a6..0afe8545 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/utimes_linux.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/utimes_linux.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import ( "syscall" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go index 13971454..095e072e 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/utimes_unsupported.go @@ -1,6 +1,6 @@ // +build !linux,!freebsd -package system +package system // import "github.com/docker/docker/pkg/system" import "syscall" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go index 98b111be..66d4895b 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/xattrs_linux.go @@ -1,4 +1,4 @@ -package system +package system // import "github.com/docker/docker/pkg/system" import "golang.org/x/sys/unix" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go index 0114f222..d780a90c 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/system/xattrs_unsupported.go @@ -1,6 +1,6 @@ // +build !linux -package system +package system // import "github.com/docker/docker/pkg/system" // Lgetxattr is not supported on platforms other than linux. func Lgetxattr(path string, attr string) ([]byte, error) { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go deleted file mode 100644 index e4dec3a5..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build go1.8 - -package tlsconfig - -import "crypto/tls" - -// Clone returns a clone of tls.Config. This function is provided for -// compatibility for go1.7 that doesn't include this method in stdlib. -func Clone(c *tls.Config) *tls.Config { - return c.Clone() -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go deleted file mode 100644 index 0b816650..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go16.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build go1.6,!go1.7 - -package tlsconfig - -import "crypto/tls" - -// Clone returns a clone of tls.Config. This function is provided for -// compatibility for go1.6 that doesn't include this method in stdlib. -func Clone(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - } -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go deleted file mode 100644 index 0d5b448f..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/docker/docker/pkg/tlsconfig/tlsconfig_clone_go17.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build go1.7,!go1.8 - -package tlsconfig - -import "crypto/tls" - -// Clone returns a clone of tls.Config. This function is provided for -// compatibility for go1.7 that doesn't include this method in stdlib. -func Clone(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, - Renegotiation: c.Renegotiation, - } -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/CONTRIBUTING.md b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/CONTRIBUTING.md new file mode 100644 index 00000000..d813af77 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/CONTRIBUTING.md @@ -0,0 +1,70 @@ +# Contributing to Docker open source projects + +Want to hack on go-events? Awesome! Here are instructions to get you started. + +go-events is part of the [Docker](https://www.docker.com) project, and +follows the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +For an in-depth description of our contribution process, visit the +contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/) + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/LICENSE new file mode 100644 index 00000000..6d630cf5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/MAINTAINERS b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/MAINTAINERS new file mode 100644 index 00000000..e414d82e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/MAINTAINERS @@ -0,0 +1,46 @@ +# go-events maintainers file +# +# This file describes who runs the docker/go-events project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "aaronlehmann", + "aluzzardi", + "lk4d4", + "stevvooe", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aaronlehmann] + Name = "Aaron Lehmann" + Email = "aaron.lehmann@docker.com" + GitHub = "aaronlehmann" + + [people.aluzzardi] + Name = "Andrea Luzzardi" + Email = "al@docker.com" + GitHub = "aluzzardi" + + [people.lk4d4] + Name = "Alexander Morozov" + Email = "lk4d4@docker.com" + GitHub = "lk4d4" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/README.md b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/README.md new file mode 100644 index 00000000..0acafc27 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/README.md @@ -0,0 +1,117 @@ +# Docker Events Package + +[![GoDoc](https://godoc.org/github.com/docker/go-events?status.svg)](https://godoc.org/github.com/docker/go-events) +[![Circle CI](https://circleci.com/gh/docker/go-events.svg?style=shield)](https://circleci.com/gh/docker/go-events) + +The Docker `events` package implements a composable event distribution package +for Go. + +Originally created to implement the [notifications in Docker Registry +2](https://github.com/docker/distribution/blob/master/docs/notifications.md), +we've found the pattern to be useful in other applications. This package is +most of the same code with slightly updated interfaces. Much of the internals +have been made available. + +## Usage + +The `events` package centers around a `Sink` type. Events are written with +calls to `Sink.Write(event Event)`. Sinks can be wired up in various +configurations to achieve interesting behavior. + +The canonical example is that employed by the +[docker/distribution/notifications](https://godoc.org/github.com/docker/distribution/notifications) +package. Let's say we have a type `httpSink` where we'd like to queue +notifications. As a rule, it should send a single http request and return an +error if it fails: + +```go +func (h *httpSink) Write(event Event) error { + p, err := json.Marshal(event) + if err != nil { + return err + } + body := bytes.NewReader(p) + resp, err := h.client.Post(h.url, "application/json", body) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.Status != 200 { + return errors.New("unexpected status") + } + + return nil +} + +// implement (*httpSink).Close() +``` + +With just that, we can start using components from this package. One can call +`(*httpSink).Write` to send events as the body of a post request to a +configured URL. + +### Retries + +HTTP can be unreliable. The first feature we'd like is to have some retry: + +```go +hs := newHTTPSink(/*...*/) +retry := NewRetryingSink(hs, NewBreaker(5, time.Second)) +``` + +We now have a sink that will retry events against the `httpSink` until they +succeed. The retry will backoff for one second after 5 consecutive failures +using the breaker strategy. + +### Queues + +This isn't quite enough. We we want a sink that doesn't block while we are +waiting for events to be sent. Let's add a `Queue`: + +```go +queue := NewQueue(retry) +``` + +Now, we have an unbounded queue that will work through all events sent with +`(*Queue).Write`. Events can be added asynchronously to the queue without +blocking the current execution path. This is ideal for use in an http request. + +### Broadcast + +It usually turns out that you want to send to more than one listener. We can +use `Broadcaster` to support this: + +```go +var broadcast = NewBroadcaster() // make it available somewhere in your application. +broadcast.Add(queue) // add your queue! +broadcast.Add(queue2) // and another! +``` + +With the above, we can now call `broadcast.Write` in our http handlers and have +all the events distributed to each queue. Because the events are queued, not +listener blocks another. + +### Extending + +For the most part, the above is sufficient for a lot of applications. However, +extending the above functionality can be done implementing your own `Sink`. The +behavior and semantics of the sink can be completely dependent on the +application requirements. The interface is provided below for reference: + +```go +type Sink { + Write(Event) error + Close() error +} +``` + +Application behavior can be controlled by how `Write` behaves. The examples +above are designed to queue the message and return as quickly as possible. +Other implementations may block until the event is committed to durable +storage. + +## Copyright and license + +Copyright © 2016 Docker, Inc. go-events is licensed under the Apache License, +Version 2.0. See [LICENSE](LICENSE) for the full license text. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/broadcast.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/broadcast.go new file mode 100644 index 00000000..5120078d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/broadcast.go @@ -0,0 +1,178 @@ +package events + +import ( + "fmt" + "sync" + + "github.com/sirupsen/logrus" +) + +// Broadcaster sends events to multiple, reliable Sinks. The goal of this +// component is to dispatch events to configured endpoints. Reliability can be +// provided by wrapping incoming sinks. +type Broadcaster struct { + sinks []Sink + events chan Event + adds chan configureRequest + removes chan configureRequest + + shutdown chan struct{} + closed chan struct{} + once sync.Once +} + +// NewBroadcaster appends one or more sinks to the list of sinks. The +// broadcaster behavior will be affected by the properties of the sink. +// Generally, the sink should accept all messages and deal with reliability on +// its own. Use of EventQueue and RetryingSink should be used here. +func NewBroadcaster(sinks ...Sink) *Broadcaster { + b := Broadcaster{ + sinks: sinks, + events: make(chan Event), + adds: make(chan configureRequest), + removes: make(chan configureRequest), + shutdown: make(chan struct{}), + closed: make(chan struct{}), + } + + // Start the broadcaster + go b.run() + + return &b +} + +// Write accepts an event to be dispatched to all sinks. This method will never +// fail and should never block (hopefully!). The caller cedes the memory to the +// broadcaster and should not modify it after calling write. +func (b *Broadcaster) Write(event Event) error { + select { + case b.events <- event: + case <-b.closed: + return ErrSinkClosed + } + return nil +} + +// Add the sink to the broadcaster. +// +// The provided sink must be comparable with equality. Typically, this just +// works with a regular pointer type. +func (b *Broadcaster) Add(sink Sink) error { + return b.configure(b.adds, sink) +} + +// Remove the provided sink. +func (b *Broadcaster) Remove(sink Sink) error { + return b.configure(b.removes, sink) +} + +type configureRequest struct { + sink Sink + response chan error +} + +func (b *Broadcaster) configure(ch chan configureRequest, sink Sink) error { + response := make(chan error, 1) + + for { + select { + case ch <- configureRequest{ + sink: sink, + response: response}: + ch = nil + case err := <-response: + return err + case <-b.closed: + return ErrSinkClosed + } + } +} + +// Close the broadcaster, ensuring that all messages are flushed to the +// underlying sink before returning. +func (b *Broadcaster) Close() error { + b.once.Do(func() { + close(b.shutdown) + }) + + <-b.closed + return nil +} + +// run is the main broadcast loop, started when the broadcaster is created. +// Under normal conditions, it waits for events on the event channel. After +// Close is called, this goroutine will exit. +func (b *Broadcaster) run() { + defer close(b.closed) + remove := func(target Sink) { + for i, sink := range b.sinks { + if sink == target { + b.sinks = append(b.sinks[:i], b.sinks[i+1:]...) + break + } + } + } + + for { + select { + case event := <-b.events: + for _, sink := range b.sinks { + if err := sink.Write(event); err != nil { + if err == ErrSinkClosed { + // remove closed sinks + remove(sink) + continue + } + logrus.WithField("event", event).WithField("events.sink", sink).WithError(err). + Errorf("broadcaster: dropping event") + } + } + case request := <-b.adds: + // while we have to iterate for add/remove, common iteration for + // send is faster against slice. + + var found bool + for _, sink := range b.sinks { + if request.sink == sink { + found = true + break + } + } + + if !found { + b.sinks = append(b.sinks, request.sink) + } + // b.sinks[request.sink] = struct{}{} + request.response <- nil + case request := <-b.removes: + remove(request.sink) + request.response <- nil + case <-b.shutdown: + // close all the underlying sinks + for _, sink := range b.sinks { + if err := sink.Close(); err != nil && err != ErrSinkClosed { + logrus.WithField("events.sink", sink).WithError(err). + Errorf("broadcaster: closing sink failed") + } + } + return + } + } +} + +func (b *Broadcaster) String() string { + // Serialize copy of this broadcaster without the sync.Once, to avoid + // a data race. + + b2 := map[string]interface{}{ + "sinks": b.sinks, + "events": b.events, + "adds": b.adds, + "removes": b.removes, + + "shutdown": b.shutdown, + "closed": b.closed, + } + + return fmt.Sprint(b2) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/channel.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/channel.go new file mode 100644 index 00000000..802cf51f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/channel.go @@ -0,0 +1,61 @@ +package events + +import ( + "fmt" + "sync" +) + +// Channel provides a sink that can be listened on. The writer and channel +// listener must operate in separate goroutines. +// +// Consumers should listen on Channel.C until Closed is closed. +type Channel struct { + C chan Event + + closed chan struct{} + once sync.Once +} + +// NewChannel returns a channel. If buffer is zero, the channel is +// unbuffered. +func NewChannel(buffer int) *Channel { + return &Channel{ + C: make(chan Event, buffer), + closed: make(chan struct{}), + } +} + +// Done returns a channel that will always proceed once the sink is closed. +func (ch *Channel) Done() chan struct{} { + return ch.closed +} + +// Write the event to the channel. Must be called in a separate goroutine from +// the listener. +func (ch *Channel) Write(event Event) error { + select { + case ch.C <- event: + return nil + case <-ch.closed: + return ErrSinkClosed + } +} + +// Close the channel sink. +func (ch *Channel) Close() error { + ch.once.Do(func() { + close(ch.closed) + }) + + return nil +} + +func (ch *Channel) String() string { + // Serialize a copy of the Channel that doesn't contain the sync.Once, + // to avoid a data race. + ch2 := map[string]interface{}{ + "C": ch.C, + "closed": ch.closed, + } + return fmt.Sprint(ch2) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/errors.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/errors.go new file mode 100644 index 00000000..56db7c25 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/errors.go @@ -0,0 +1,10 @@ +package events + +import "fmt" + +var ( + // ErrSinkClosed is returned if a write is issued to a sink that has been + // closed. If encountered, the error should be considered terminal and + // retries will not be successful. + ErrSinkClosed = fmt.Errorf("events: sink closed") +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/event.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/event.go new file mode 100644 index 00000000..f0f1d9ea --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/event.go @@ -0,0 +1,15 @@ +package events + +// Event marks items that can be sent as events. +type Event interface{} + +// Sink accepts and sends events. +type Sink interface { + // Write an event to the Sink. If no error is returned, the caller will + // assume that all events have been committed to the sink. If an error is + // received, the caller may retry sending the event. + Write(event Event) error + + // Close the sink, possibly waiting for pending events to flush. + Close() error +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/filter.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/filter.go new file mode 100644 index 00000000..e6c0eb69 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/filter.go @@ -0,0 +1,52 @@ +package events + +// Matcher matches events. +type Matcher interface { + Match(event Event) bool +} + +// MatcherFunc implements matcher with just a function. +type MatcherFunc func(event Event) bool + +// Match calls the wrapped function. +func (fn MatcherFunc) Match(event Event) bool { + return fn(event) +} + +// Filter provides an event sink that sends only events that are accepted by a +// Matcher. No methods on filter are goroutine safe. +type Filter struct { + dst Sink + matcher Matcher + closed bool +} + +// NewFilter returns a new filter that will send to events to dst that return +// true for Matcher. +func NewFilter(dst Sink, matcher Matcher) Sink { + return &Filter{dst: dst, matcher: matcher} +} + +// Write an event to the filter. +func (f *Filter) Write(event Event) error { + if f.closed { + return ErrSinkClosed + } + + if f.matcher.Match(event) { + return f.dst.Write(event) + } + + return nil +} + +// Close the filter and allow no more events to pass through. +func (f *Filter) Close() error { + // TODO(stevvooe): Not all sinks should have Close. + if f.closed { + return nil + } + + f.closed = true + return f.dst.Close() +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/queue.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/queue.go new file mode 100644 index 00000000..4bb770af --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/queue.go @@ -0,0 +1,111 @@ +package events + +import ( + "container/list" + "sync" + + "github.com/sirupsen/logrus" +) + +// Queue accepts all messages into a queue for asynchronous consumption +// by a sink. It is unbounded and thread safe but the sink must be reliable or +// events will be dropped. +type Queue struct { + dst Sink + events *list.List + cond *sync.Cond + mu sync.Mutex + closed bool +} + +// NewQueue returns a queue to the provided Sink dst. +func NewQueue(dst Sink) *Queue { + eq := Queue{ + dst: dst, + events: list.New(), + } + + eq.cond = sync.NewCond(&eq.mu) + go eq.run() + return &eq +} + +// Write accepts the events into the queue, only failing if the queue has +// been closed. +func (eq *Queue) Write(event Event) error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return ErrSinkClosed + } + + eq.events.PushBack(event) + eq.cond.Signal() // signal waiters + + return nil +} + +// Close shutsdown the event queue, flushing +func (eq *Queue) Close() error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return nil + } + + // set closed flag + eq.closed = true + eq.cond.Signal() // signal flushes queue + eq.cond.Wait() // wait for signal from last flush + return eq.dst.Close() +} + +// run is the main goroutine to flush events to the target sink. +func (eq *Queue) run() { + for { + event := eq.next() + + if event == nil { + return // nil block means event queue is closed. + } + + if err := eq.dst.Write(event); err != nil { + // TODO(aaronl): Dropping events could be bad depending + // on the application. We should have a way of + // communicating this condition. However, logging + // at a log level above debug may not be appropriate. + // Eventually, go-events should not use logrus at all, + // and should bubble up conditions like this through + // error values. + logrus.WithFields(logrus.Fields{ + "event": event, + "sink": eq.dst, + }).WithError(err).Debug("eventqueue: dropped event") + } + } +} + +// next encompasses the critical section of the run loop. When the queue is +// empty, it will block on the condition. If new data arrives, it will wake +// and return a block. When closed, a nil slice will be returned. +func (eq *Queue) next() Event { + eq.mu.Lock() + defer eq.mu.Unlock() + + for eq.events.Len() < 1 { + if eq.closed { + eq.cond.Broadcast() + return nil + } + + eq.cond.Wait() + } + + front := eq.events.Front() + block := front.Value.(Event) + eq.events.Remove(front) + + return block +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/retry.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/retry.go new file mode 100644 index 00000000..b7f0a542 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-events/retry.go @@ -0,0 +1,260 @@ +package events + +import ( + "fmt" + "math/rand" + "sync" + "sync/atomic" + "time" + + "github.com/sirupsen/logrus" +) + +// RetryingSink retries the write until success or an ErrSinkClosed is +// returned. Underlying sink must have p > 0 of succeeding or the sink will +// block. Retry is configured with a RetryStrategy. Concurrent calls to a +// retrying sink are serialized through the sink, meaning that if one is +// in-flight, another will not proceed. +type RetryingSink struct { + sink Sink + strategy RetryStrategy + closed chan struct{} + once sync.Once +} + +// NewRetryingSink returns a sink that will retry writes to a sink, backing +// off on failure. Parameters threshold and backoff adjust the behavior of the +// circuit breaker. +func NewRetryingSink(sink Sink, strategy RetryStrategy) *RetryingSink { + rs := &RetryingSink{ + sink: sink, + strategy: strategy, + closed: make(chan struct{}), + } + + return rs +} + +// Write attempts to flush the events to the downstream sink until it succeeds +// or the sink is closed. +func (rs *RetryingSink) Write(event Event) error { + logger := logrus.WithField("event", event) + +retry: + select { + case <-rs.closed: + return ErrSinkClosed + default: + } + + if backoff := rs.strategy.Proceed(event); backoff > 0 { + select { + case <-time.After(backoff): + // TODO(stevvooe): This branch holds up the next try. Before, we + // would simply break to the "retry" label and then possibly wait + // again. However, this requires all retry strategies to have a + // large probability of probing the sync for success, rather than + // just backing off and sending the request. + case <-rs.closed: + return ErrSinkClosed + } + } + + if err := rs.sink.Write(event); err != nil { + if err == ErrSinkClosed { + // terminal! + return err + } + + logger := logger.WithError(err) // shadow!! + + if rs.strategy.Failure(event, err) { + logger.Errorf("retryingsink: dropped event") + return nil + } + + logger.Errorf("retryingsink: error writing event, retrying") + goto retry + } + + rs.strategy.Success(event) + return nil +} + +// Close closes the sink and the underlying sink. +func (rs *RetryingSink) Close() error { + rs.once.Do(func() { + close(rs.closed) + }) + + return nil +} + +func (rs *RetryingSink) String() string { + // Serialize a copy of the RetryingSink without the sync.Once, to avoid + // a data race. + rs2 := map[string]interface{}{ + "sink": rs.sink, + "strategy": rs.strategy, + "closed": rs.closed, + } + return fmt.Sprint(rs2) +} + +// RetryStrategy defines a strategy for retrying event sink writes. +// +// All methods should be goroutine safe. +type RetryStrategy interface { + // Proceed is called before every event send. If proceed returns a + // positive, non-zero integer, the retryer will back off by the provided + // duration. + // + // An event is provided, by may be ignored. + Proceed(event Event) time.Duration + + // Failure reports a failure to the strategy. If this method returns true, + // the event should be dropped. + Failure(event Event, err error) bool + + // Success should be called when an event is sent successfully. + Success(event Event) +} + +// Breaker implements a circuit breaker retry strategy. +// +// The current implementation never drops events. +type Breaker struct { + threshold int + recent int + last time.Time + backoff time.Duration // time after which we retry after failure. + mu sync.Mutex +} + +var _ RetryStrategy = &Breaker{} + +// NewBreaker returns a breaker that will backoff after the threshold has been +// tripped. A Breaker is thread safe and may be shared by many goroutines. +func NewBreaker(threshold int, backoff time.Duration) *Breaker { + return &Breaker{ + threshold: threshold, + backoff: backoff, + } +} + +// Proceed checks the failures against the threshold. +func (b *Breaker) Proceed(event Event) time.Duration { + b.mu.Lock() + defer b.mu.Unlock() + + if b.recent < b.threshold { + return 0 + } + + return b.last.Add(b.backoff).Sub(time.Now()) +} + +// Success resets the breaker. +func (b *Breaker) Success(event Event) { + b.mu.Lock() + defer b.mu.Unlock() + + b.recent = 0 + b.last = time.Time{} +} + +// Failure records the failure and latest failure time. +func (b *Breaker) Failure(event Event, err error) bool { + b.mu.Lock() + defer b.mu.Unlock() + + b.recent++ + b.last = time.Now().UTC() + return false // never drop events. +} + +var ( + // DefaultExponentialBackoffConfig provides a default configuration for + // exponential backoff. + DefaultExponentialBackoffConfig = ExponentialBackoffConfig{ + Base: time.Second, + Factor: time.Second, + Max: 20 * time.Second, + } +) + +// ExponentialBackoffConfig configures backoff parameters. +// +// Note that these parameters operate on the upper bound for choosing a random +// value. For example, at Base=1s, a random value in [0,1s) will be chosen for +// the backoff value. +type ExponentialBackoffConfig struct { + // Base is the minimum bound for backing off after failure. + Base time.Duration + + // Factor sets the amount of time by which the backoff grows with each + // failure. + Factor time.Duration + + // Max is the absolute maxiumum bound for a single backoff. + Max time.Duration +} + +// ExponentialBackoff implements random backoff with exponentially increasing +// bounds as the number consecutive failures increase. +type ExponentialBackoff struct { + failures uint64 // consecutive failure counter (needs to be 64-bit aligned) + config ExponentialBackoffConfig +} + +// NewExponentialBackoff returns an exponential backoff strategy with the +// desired config. If config is nil, the default is returned. +func NewExponentialBackoff(config ExponentialBackoffConfig) *ExponentialBackoff { + return &ExponentialBackoff{ + config: config, + } +} + +// Proceed returns the next randomly bound exponential backoff time. +func (b *ExponentialBackoff) Proceed(event Event) time.Duration { + return b.backoff(atomic.LoadUint64(&b.failures)) +} + +// Success resets the failures counter. +func (b *ExponentialBackoff) Success(event Event) { + atomic.StoreUint64(&b.failures, 0) +} + +// Failure increments the failure counter. +func (b *ExponentialBackoff) Failure(event Event, err error) bool { + atomic.AddUint64(&b.failures, 1) + return false +} + +// backoff calculates the amount of time to wait based on the number of +// consecutive failures. +func (b *ExponentialBackoff) backoff(failures uint64) time.Duration { + if failures <= 0 { + // proceed normally when there are no failures. + return 0 + } + + factor := b.config.Factor + if factor <= 0 { + factor = DefaultExponentialBackoffConfig.Factor + } + + backoff := b.config.Base + factor*time.Duration(1<<(failures-1)) + + max := b.config.Max + if max <= 0 { + max = DefaultExponentialBackoffConfig.Max + } + + if backoff > max || backoff < 0 { + backoff = max + } + + // Choose a uniformly distributed value from [0, backoff). + return time.Duration(rand.Int63n(int64(backoff))) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/CONTRIBUTING.md b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/CONTRIBUTING.md new file mode 100644 index 00000000..b8a512c3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/CONTRIBUTING.md @@ -0,0 +1,55 @@ +# Contributing + +## Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/LICENSE new file mode 100644 index 00000000..8f3fee62 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/LICENSE.docs b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/LICENSE.docs new file mode 100644 index 00000000..e26cd4fc --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/NOTICE b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/NOTICE new file mode 100644 index 00000000..8915f027 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/README.md b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/README.md new file mode 100644 index 00000000..a9e947cb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/README.md @@ -0,0 +1,91 @@ +# go-metrics [![GoDoc](https://godoc.org/github.com/docker/go-metrics?status.svg)](https://godoc.org/github.com/docker/go-metrics) ![Badge Badge](http://doyouevenbadge.com/github.com/docker/go-metrics) + +This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. + +## Best Practices + +This packages is meant to be used for collecting metrics in Docker projects. +It is not meant to be used as a replacement for the prometheus client but to help enforce consistent naming across metrics collected. +If you have not already read the prometheus best practices around naming and labels you can read the page [here](https://prometheus.io/docs/practices/naming/). + +The following are a few Docker specific rules that will help you name and work with metrics in your project. + +1. Namespace and Subsystem + +This package provides you with a namespace type that allows you to specify the same namespace and subsystem for your metrics. + +```go +ns := metrics.NewNamespace("engine", "daemon", metrics.Labels{ + "version": dockerversion.Version, + "commit": dockerversion.GitCommit, +}) +``` + +In the example above we are creating metrics for the Docker engine's daemon package. +`engine` would be the namespace in this example where `daemon` is the subsystem or package where we are collecting the metrics. + +A namespace also allows you to attach constant labels to the metrics such as the git commit and version that it is collecting. + +2. Declaring your Metrics + +Try to keep all your metric declarations in one file. +This makes it easy for others to see what constant labels are defined on the namespace and what labels are defined on the metrics when they are created. + +3. Use labels instead of multiple metrics + +Labels allow you to define one metric such as the time it takes to perform a certain action on an object. +If we wanted to collect timings on various container actions such as create, start, and delete then we can define one metric called `container_actions` and use labels to specify the type of action. + + +```go +containerActions = ns.NewLabeledTimer("container_actions", "The number of milliseconds it takes to process each container action", "action") +``` + +The last parameter is the label name or key. +When adding a data point to the metric you will use the `WithValues` function to specify the `action` that you are collecting for. + +```go +containerActions.WithValues("create").UpdateSince(start) +``` + +4. Always use a unit + +The metric name should describe what you are measuring but you also need to provide the unit that it is being measured with. +For a timer, the standard unit is seconds and a counter's standard unit is a total. +For gauges you must provide the unit. +This package provides a standard set of units for use within the Docker projects. + +```go +Nanoseconds Unit = "nanoseconds" +Seconds Unit = "seconds" +Bytes Unit = "bytes" +Total Unit = "total" +``` + +If you need to use a unit but it is not defined in the package please open a PR to add it but first try to see if one of the already created units will work for your metric, i.e. seconds or nanoseconds vs adding milliseconds. + +## Docs + +Package documentation can be found [here](https://godoc.org/github.com/docker/go-metrics). + +## HTTP Metrics + +To instrument a http handler, you can wrap the code like this: + +```go +namespace := metrics.NewNamespace("docker_distribution", "http", metrics.Labels{"handler": "your_http_handler_name"}) +httpMetrics := namespace.NewDefaultHttpMetrics() +metrics.Register(namespace) +instrumentedHandler = metrics.InstrumentHandler(httpMetrics, unInstrumentedHandler) +``` +Note: The `handler` label must be provided when a new namespace is created. + +## Additional Metrics + +Additional metrics are also defined here that are not available in the prometheus client. +If you need a custom metrics and it is generic enough to be used by multiple projects, define it here. + + +## Copyright and license + +Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/counter.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/counter.go new file mode 100644 index 00000000..fe36316a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/counter.go @@ -0,0 +1,52 @@ +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Counter is a metrics that can only increment its current count +type Counter interface { + // Inc adds Sum(vs) to the counter. Sum(vs) must be positive. + // + // If len(vs) == 0, increments the counter by 1. + Inc(vs ...float64) +} + +// LabeledCounter is counter that must have labels populated before use. +type LabeledCounter interface { + WithValues(vs ...string) Counter +} + +type labeledCounter struct { + pc *prometheus.CounterVec +} + +func (lc *labeledCounter) WithValues(vs ...string) Counter { + return &counter{pc: lc.pc.WithLabelValues(vs...)} +} + +func (lc *labeledCounter) Describe(ch chan<- *prometheus.Desc) { + lc.pc.Describe(ch) +} + +func (lc *labeledCounter) Collect(ch chan<- prometheus.Metric) { + lc.pc.Collect(ch) +} + +type counter struct { + pc prometheus.Counter +} + +func (c *counter) Inc(vs ...float64) { + if len(vs) == 0 { + c.pc.Inc() + } + + c.pc.Add(sumFloat64(vs...)) +} + +func (c *counter) Describe(ch chan<- *prometheus.Desc) { + c.pc.Describe(ch) +} + +func (c *counter) Collect(ch chan<- prometheus.Metric) { + c.pc.Collect(ch) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/docs.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/docs.go new file mode 100644 index 00000000..8fbdfc69 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/docs.go @@ -0,0 +1,3 @@ +// This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. + +package metrics diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/gauge.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/gauge.go new file mode 100644 index 00000000..74296e87 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/gauge.go @@ -0,0 +1,72 @@ +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Gauge is a metric that allows incrementing and decrementing a value +type Gauge interface { + Inc(...float64) + Dec(...float64) + + // Add adds the provided value to the gauge's current value + Add(float64) + + // Set replaces the gauge's current value with the provided value + Set(float64) +} + +// LabeledGauge describes a gauge the must have values populated before use. +type LabeledGauge interface { + WithValues(labels ...string) Gauge +} + +type labeledGauge struct { + pg *prometheus.GaugeVec +} + +func (lg *labeledGauge) WithValues(labels ...string) Gauge { + return &gauge{pg: lg.pg.WithLabelValues(labels...)} +} + +func (lg *labeledGauge) Describe(c chan<- *prometheus.Desc) { + lg.pg.Describe(c) +} + +func (lg *labeledGauge) Collect(c chan<- prometheus.Metric) { + lg.pg.Collect(c) +} + +type gauge struct { + pg prometheus.Gauge +} + +func (g *gauge) Inc(vs ...float64) { + if len(vs) == 0 { + g.pg.Inc() + } + + g.Add(sumFloat64(vs...)) +} + +func (g *gauge) Dec(vs ...float64) { + if len(vs) == 0 { + g.pg.Dec() + } + + g.Add(-sumFloat64(vs...)) +} + +func (g *gauge) Add(v float64) { + g.pg.Add(v) +} + +func (g *gauge) Set(v float64) { + g.pg.Set(v) +} + +func (g *gauge) Describe(c chan<- *prometheus.Desc) { + g.pg.Describe(c) +} + +func (g *gauge) Collect(c chan<- prometheus.Metric) { + g.pg.Collect(c) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/go.mod b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/go.mod new file mode 100644 index 00000000..7e328f0c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/go.mod @@ -0,0 +1,5 @@ +module github.com/docker/go-metrics + +go 1.11 + +require github.com/prometheus/client_golang v1.1.0 diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/go.sum b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/go.sum new file mode 100644 index 00000000..b8fb9d07 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/go.sum @@ -0,0 +1,67 @@ +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0 h1:BQ53HtBmfOitExawJ6LokA4x8ov/z0SYYb0+HxJfRI8= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/handler.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/handler.go new file mode 100644 index 00000000..05601e9e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/handler.go @@ -0,0 +1,74 @@ +package metrics + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +// HTTPHandlerOpts describes a set of configurable options of http metrics +type HTTPHandlerOpts struct { + DurationBuckets []float64 + RequestSizeBuckets []float64 + ResponseSizeBuckets []float64 +} + +const ( + InstrumentHandlerResponseSize = iota + InstrumentHandlerRequestSize + InstrumentHandlerDuration + InstrumentHandlerCounter + InstrumentHandlerInFlight +) + +type HTTPMetric struct { + prometheus.Collector + handlerType int +} + +var ( + defaultDurationBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 60} + defaultRequestSizeBuckets = prometheus.ExponentialBuckets(1024, 2, 22) //1K to 4G + defaultResponseSizeBuckets = defaultRequestSizeBuckets +) + +// Handler returns the global http.Handler that provides the prometheus +// metrics format on GET requests. This handler is no longer instrumented. +func Handler() http.Handler { + return promhttp.Handler() +} + +func InstrumentHandler(metrics []*HTTPMetric, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFunc(metrics, handler.ServeHTTP) +} + +func InstrumentHandlerFunc(metrics []*HTTPMetric, handlerFunc http.HandlerFunc) http.HandlerFunc { + var handler http.Handler + handler = http.HandlerFunc(handlerFunc) + for _, metric := range metrics { + switch metric.handlerType { + case InstrumentHandlerResponseSize: + if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { + handler = promhttp.InstrumentHandlerResponseSize(collector, handler) + } + case InstrumentHandlerRequestSize: + if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { + handler = promhttp.InstrumentHandlerRequestSize(collector, handler) + } + case InstrumentHandlerDuration: + if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { + handler = promhttp.InstrumentHandlerDuration(collector, handler) + } + case InstrumentHandlerCounter: + if collector, ok := metric.Collector.(*prometheus.CounterVec); ok { + handler = promhttp.InstrumentHandlerCounter(collector, handler) + } + case InstrumentHandlerInFlight: + if collector, ok := metric.Collector.(prometheus.Gauge); ok { + handler = promhttp.InstrumentHandlerInFlight(collector, handler) + } + } + } + return handler.ServeHTTP +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/helpers.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/helpers.go new file mode 100644 index 00000000..68b7f51b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/helpers.go @@ -0,0 +1,10 @@ +package metrics + +func sumFloat64(vs ...float64) float64 { + var sum float64 + for _, v := range vs { + sum += v + } + + return sum +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/namespace.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/namespace.go new file mode 100644 index 00000000..79831545 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/namespace.go @@ -0,0 +1,315 @@ +package metrics + +import ( + "fmt" + "sync" + + "github.com/prometheus/client_golang/prometheus" +) + +type Labels map[string]string + +// NewNamespace returns a namespaces that is responsible for managing a collection of +// metrics for a particual namespace and subsystem +// +// labels allows const labels to be added to all metrics created in this namespace +// and are commonly used for data like application version and git commit +func NewNamespace(name, subsystem string, labels Labels) *Namespace { + if labels == nil { + labels = make(map[string]string) + } + return &Namespace{ + name: name, + subsystem: subsystem, + labels: labels, + } +} + +// Namespace describes a set of metrics that share a namespace and subsystem. +type Namespace struct { + name string + subsystem string + labels Labels + mu sync.Mutex + metrics []prometheus.Collector +} + +// WithConstLabels returns a namespace with the provided set of labels merged +// with the existing constant labels on the namespace. +// +// Only metrics created with the returned namespace will get the new constant +// labels. The returned namespace must be registered separately. +func (n *Namespace) WithConstLabels(labels Labels) *Namespace { + n.mu.Lock() + ns := &Namespace{ + name: n.name, + subsystem: n.subsystem, + labels: mergeLabels(n.labels, labels), + } + n.mu.Unlock() + return ns +} + +func (n *Namespace) NewCounter(name, help string) Counter { + c := &counter{pc: prometheus.NewCounter(n.newCounterOpts(name, help))} + n.Add(c) + return c +} + +func (n *Namespace) NewLabeledCounter(name, help string, labels ...string) LabeledCounter { + c := &labeledCounter{pc: prometheus.NewCounterVec(n.newCounterOpts(name, help), labels)} + n.Add(c) + return c +} + +func (n *Namespace) newCounterOpts(name, help string) prometheus.CounterOpts { + return prometheus.CounterOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: makeName(name, Total), + Help: help, + ConstLabels: prometheus.Labels(n.labels), + } +} + +func (n *Namespace) NewTimer(name, help string) Timer { + t := &timer{ + m: prometheus.NewHistogram(n.newTimerOpts(name, help)), + } + n.Add(t) + return t +} + +func (n *Namespace) NewLabeledTimer(name, help string, labels ...string) LabeledTimer { + t := &labeledTimer{ + m: prometheus.NewHistogramVec(n.newTimerOpts(name, help), labels), + } + n.Add(t) + return t +} + +func (n *Namespace) newTimerOpts(name, help string) prometheus.HistogramOpts { + return prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: makeName(name, Seconds), + Help: help, + ConstLabels: prometheus.Labels(n.labels), + } +} + +func (n *Namespace) NewGauge(name, help string, unit Unit) Gauge { + g := &gauge{ + pg: prometheus.NewGauge(n.newGaugeOpts(name, help, unit)), + } + n.Add(g) + return g +} + +func (n *Namespace) NewLabeledGauge(name, help string, unit Unit, labels ...string) LabeledGauge { + g := &labeledGauge{ + pg: prometheus.NewGaugeVec(n.newGaugeOpts(name, help, unit), labels), + } + n.Add(g) + return g +} + +func (n *Namespace) newGaugeOpts(name, help string, unit Unit) prometheus.GaugeOpts { + return prometheus.GaugeOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: makeName(name, unit), + Help: help, + ConstLabels: prometheus.Labels(n.labels), + } +} + +func (n *Namespace) Describe(ch chan<- *prometheus.Desc) { + n.mu.Lock() + defer n.mu.Unlock() + + for _, metric := range n.metrics { + metric.Describe(ch) + } +} + +func (n *Namespace) Collect(ch chan<- prometheus.Metric) { + n.mu.Lock() + defer n.mu.Unlock() + + for _, metric := range n.metrics { + metric.Collect(ch) + } +} + +func (n *Namespace) Add(collector prometheus.Collector) { + n.mu.Lock() + n.metrics = append(n.metrics, collector) + n.mu.Unlock() +} + +func (n *Namespace) NewDesc(name, help string, unit Unit, labels ...string) *prometheus.Desc { + name = makeName(name, unit) + namespace := n.name + if n.subsystem != "" { + namespace = fmt.Sprintf("%s_%s", namespace, n.subsystem) + } + name = fmt.Sprintf("%s_%s", namespace, name) + return prometheus.NewDesc(name, help, labels, prometheus.Labels(n.labels)) +} + +// mergeLabels merges two or more labels objects into a single map, favoring +// the later labels. +func mergeLabels(lbs ...Labels) Labels { + merged := make(Labels) + + for _, target := range lbs { + for k, v := range target { + merged[k] = v + } + } + + return merged +} + +func makeName(name string, unit Unit) string { + if unit == "" { + return name + } + + return fmt.Sprintf("%s_%s", name, unit) +} + +func (n *Namespace) NewDefaultHttpMetrics(handlerName string) []*HTTPMetric { + return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{ + DurationBuckets: defaultDurationBuckets, + RequestSizeBuckets: defaultResponseSizeBuckets, + ResponseSizeBuckets: defaultResponseSizeBuckets, + }) +} + +func (n *Namespace) NewHttpMetrics(handlerName string, durationBuckets, requestSizeBuckets, responseSizeBuckets []float64) []*HTTPMetric { + return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{ + DurationBuckets: durationBuckets, + RequestSizeBuckets: requestSizeBuckets, + ResponseSizeBuckets: responseSizeBuckets, + }) +} + +func (n *Namespace) NewHttpMetricsWithOpts(handlerName string, opts HTTPHandlerOpts) []*HTTPMetric { + var httpMetrics []*HTTPMetric + inFlightMetric := n.NewInFlightGaugeMetric(handlerName) + requestTotalMetric := n.NewRequestTotalMetric(handlerName) + requestDurationMetric := n.NewRequestDurationMetric(handlerName, opts.DurationBuckets) + requestSizeMetric := n.NewRequestSizeMetric(handlerName, opts.RequestSizeBuckets) + responseSizeMetric := n.NewResponseSizeMetric(handlerName, opts.ResponseSizeBuckets) + httpMetrics = append(httpMetrics, inFlightMetric, requestDurationMetric, requestTotalMetric, requestSizeMetric, responseSizeMetric) + return httpMetrics +} + +func (n *Namespace) NewInFlightGaugeMetric(handlerName string) *HTTPMetric { + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + metric := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "in_flight_requests", + Help: "The in-flight HTTP requests", + ConstLabels: prometheus.Labels(labels), + }) + httpMetric := &HTTPMetric{ + Collector: metric, + handlerType: InstrumentHandlerInFlight, + } + n.Add(httpMetric) + return httpMetric +} + +func (n *Namespace) NewRequestTotalMetric(handlerName string) *HTTPMetric { + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + metric := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: prometheus.Labels(labels), + }, + []string{"code", "method"}, + ) + httpMetric := &HTTPMetric{ + Collector: metric, + handlerType: InstrumentHandlerCounter, + } + n.Add(httpMetric) + return httpMetric +} +func (n *Namespace) NewRequestDurationMetric(handlerName string, buckets []float64) *HTTPMetric { + if len(buckets) == 0 { + panic("DurationBuckets must be provided") + } + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + opts := prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "request_duration_seconds", + Help: "The HTTP request latencies in seconds.", + Buckets: buckets, + ConstLabels: prometheus.Labels(labels), + } + metric := prometheus.NewHistogramVec(opts, []string{"method"}) + httpMetric := &HTTPMetric{ + Collector: metric, + handlerType: InstrumentHandlerDuration, + } + n.Add(httpMetric) + return httpMetric +} + +func (n *Namespace) NewRequestSizeMetric(handlerName string, buckets []float64) *HTTPMetric { + if len(buckets) == 0 { + panic("RequestSizeBuckets must be provided") + } + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + opts := prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "request_size_bytes", + Help: "The HTTP request sizes in bytes.", + Buckets: buckets, + ConstLabels: prometheus.Labels(labels), + } + metric := prometheus.NewHistogramVec(opts, []string{}) + httpMetric := &HTTPMetric{ + Collector: metric, + handlerType: InstrumentHandlerRequestSize, + } + n.Add(httpMetric) + return httpMetric +} + +func (n *Namespace) NewResponseSizeMetric(handlerName string, buckets []float64) *HTTPMetric { + if len(buckets) == 0 { + panic("ResponseSizeBuckets must be provided") + } + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + opts := prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "response_size_bytes", + Help: "The HTTP response sizes in bytes.", + Buckets: buckets, + ConstLabels: prometheus.Labels(labels), + } + metrics := prometheus.NewHistogramVec(opts, []string{}) + httpMetric := &HTTPMetric{ + Collector: metrics, + handlerType: InstrumentHandlerResponseSize, + } + n.Add(httpMetric) + return httpMetric +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/register.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/register.go new file mode 100644 index 00000000..708358df --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/register.go @@ -0,0 +1,15 @@ +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Register adds all the metrics in the provided namespace to the global +// metrics registry +func Register(n *Namespace) { + prometheus.MustRegister(n) +} + +// Deregister removes all the metrics in the provided namespace from the +// global metrics registry +func Deregister(n *Namespace) { + prometheus.Unregister(n) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/timer.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/timer.go new file mode 100644 index 00000000..824c9873 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/timer.go @@ -0,0 +1,85 @@ +package metrics + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// StartTimer begins a timer observation at the callsite. When the target +// operation is completed, the caller should call the return done func(). +func StartTimer(timer Timer) (done func()) { + start := time.Now() + return func() { + timer.Update(time.Since(start)) + } +} + +// Timer is a metric that allows collecting the duration of an action in seconds +type Timer interface { + // Update records an observation, duration, and converts to the target + // units. + Update(duration time.Duration) + + // UpdateSince will add the duration from the provided starting time to the + // timer's summary with the precisions that was used in creation of the timer + UpdateSince(time.Time) +} + +// LabeledTimer is a timer that must have label values populated before use. +type LabeledTimer interface { + WithValues(labels ...string) *labeledTimerObserver +} + +type labeledTimer struct { + m *prometheus.HistogramVec +} + +type labeledTimerObserver struct { + m prometheus.Observer +} + +func (lbo *labeledTimerObserver) Update(duration time.Duration) { + lbo.m.Observe(duration.Seconds()) +} + +func (lbo *labeledTimerObserver) UpdateSince(since time.Time) { + lbo.m.Observe(time.Since(since).Seconds()) +} + +func (lt *labeledTimer) WithValues(labels ...string) *labeledTimerObserver { + return &labeledTimerObserver{m: lt.m.WithLabelValues(labels...)} +} + +func (lt *labeledTimer) Describe(c chan<- *prometheus.Desc) { + lt.m.Describe(c) +} + +func (lt *labeledTimer) Collect(c chan<- prometheus.Metric) { + lt.m.Collect(c) +} + +type timer struct { + m prometheus.Observer +} + +func (t *timer) Update(duration time.Duration) { + t.m.Observe(duration.Seconds()) +} + +func (t *timer) UpdateSince(since time.Time) { + t.m.Observe(time.Since(since).Seconds()) +} + +func (t *timer) Describe(c chan<- *prometheus.Desc) { + c <- t.m.(prometheus.Metric).Desc() +} + +func (t *timer) Collect(c chan<- prometheus.Metric) { + // Are there any observers that don't implement Collector? It is really + // unclear what the point of the upstream change was, but we'll let this + // panic if we get an observer that doesn't implement collector. In this + // case, we should almost always see metricVec objects, so this should + // never panic. + t.m.(prometheus.Collector).Collect(c) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/unit.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/unit.go new file mode 100644 index 00000000..c96622f9 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/go-metrics/unit.go @@ -0,0 +1,12 @@ +package metrics + +// Unit represents the type or precision of a metric that is appended to +// the metrics fully qualified name +type Unit string + +const ( + Nanoseconds Unit = "nanoseconds" + Seconds Unit = "seconds" + Bytes Unit = "bytes" + Total Unit = "total" +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/libkv/LICENSE.code b/vendor/github.com/elastic/beats/vendor/github.com/docker/libkv/LICENSE.code new file mode 100644 index 00000000..34c4ea7c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/libkv/LICENSE.code @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/libkv/LICENSE.docs b/vendor/github.com/elastic/beats/vendor/github.com/docker/libkv/LICENSE.docs new file mode 100644 index 00000000..e26cd4fc --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/libkv/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/libkv/MAINTAINERS b/vendor/github.com/elastic/beats/vendor/github.com/docker/libkv/MAINTAINERS new file mode 100644 index 00000000..4a8bbc61 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/libkv/MAINTAINERS @@ -0,0 +1,40 @@ +# Libkv maintainers file +# +# This file describes who runs the docker/libkv project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "aluzzardi", + "sanimej", + "vieux", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.aluzzardi] + Name = "Andrea Luzzardi" + Email = "al@docker.com" + GitHub = "aluzzardi" + + [people.sanimej] + Name = "Santhosh Manohar" + Email = "santhosh@docker.com" + GitHub = "sanimej" + + [people.vieux] + Name = "Victor Vieux" + Email = "vieux@docker.com" + GitHub = "vieux" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/libkv/README.md b/vendor/github.com/elastic/beats/vendor/github.com/docker/libkv/README.md new file mode 100644 index 00000000..ff2cc446 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/libkv/README.md @@ -0,0 +1,107 @@ +# libkv + +[![GoDoc](https://godoc.org/github.com/docker/libkv?status.png)](https://godoc.org/github.com/docker/libkv) +[![Build Status](https://travis-ci.org/docker/libkv.svg?branch=master)](https://travis-ci.org/docker/libkv) +[![Coverage Status](https://coveralls.io/repos/docker/libkv/badge.svg)](https://coveralls.io/r/docker/libkv) +[![Go Report Card](https://goreportcard.com/badge/github.com/docker/libkv)](https://goreportcard.com/report/github.com/docker/libkv) + +`libkv` provides a `Go` native library to store metadata. + +The goal of `libkv` is to abstract common store operations for multiple distributed and/or local Key/Value store backends. + +For example, you can use it to store your metadata or for service discovery to register machines and endpoints inside your cluster. + +You can also easily implement a generic *Leader Election* on top of it (see the [docker/leadership](https://github.com/docker/leadership) repository). + +As of now, `libkv` offers support for `Consul`, `Etcd`, `Zookeeper` (**Distributed** store) and `BoltDB` (**Local** store). + +## Usage + +`libkv` is meant to be used as an abstraction layer over existing distributed Key/Value stores. It is especially useful if you plan to support `consul`, `etcd` and `zookeeper` using the same codebase. + +It is ideal if you plan for something written in Go that should support: + +- A simple metadata storage, distributed or local +- A lightweight discovery service for your nodes +- A distributed lock mechanism + +You can find examples of usage for `libkv` under in `docs/examples.go`. Optionally you can also take a look at the `docker/swarm` or `docker/libnetwork` repositories which are using `docker/libkv` for all the use cases listed above. + +## Supported versions + +`libkv` supports: +- Consul versions >= `0.5.1` because it uses Sessions with `Delete` behavior for the use of `TTLs` (mimics zookeeper's Ephemeral node support), If you don't plan to use `TTLs`: you can use Consul version `0.4.0+`. +- Etcd versions >= `2.0` because it uses the new `coreos/etcd/client`, this might change in the future as the support for `APIv3` comes along and adds more capabilities. +- Zookeeper versions >= `3.4.5`. Although this might work with previous version but this remains untested as of now. +- Boltdb, which shouldn't be subject to any version dependencies. + +## Interface + +A **storage backend** in `libkv` should implement (fully or partially) this interface: + +```go +type Store interface { + Put(key string, value []byte, options *WriteOptions) error + Get(key string) (*KVPair, error) + Delete(key string) error + Exists(key string) (bool, error) + Watch(key string, stopCh <-chan struct{}) (<-chan *KVPair, error) + WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*KVPair, error) + NewLock(key string, options *LockOptions) (Locker, error) + List(directory string) ([]*KVPair, error) + DeleteTree(directory string) error + AtomicPut(key string, value []byte, previous *KVPair, options *WriteOptions) (bool, *KVPair, error) + AtomicDelete(key string, previous *KVPair) (bool, error) + Close() +} +``` + +## Compatibility matrix + +Backend drivers in `libkv` are generally divided between **local drivers** and **distributed drivers**. Distributed backends offer enhanced capabilities like `Watches` and/or distributed `Locks`. + +Local drivers are usually used in complement to the distributed drivers to store informations that only needs to be available locally. + +| Calls | Consul | Etcd | Zookeeper | BoltDB | +|-----------------------|:----------:|:------:|:-----------:|:--------:| +| Put | X | X | X | X | +| Get | X | X | X | X | +| Delete | X | X | X | X | +| Exists | X | X | X | X | +| Watch | X | X | X | | +| WatchTree | X | X | X | | +| NewLock (Lock/Unlock) | X | X | X | | +| List | X | X | X | X | +| DeleteTree | X | X | X | X | +| AtomicPut | X | X | X | X | +| Close | X | X | X | X | + +## Limitations + +Distributed Key/Value stores often have different concepts for managing and formatting keys and their associated values. Even though `libkv` tries to abstract those stores aiming for some consistency, in some cases it can't be applied easily. + +Please refer to the `docs/compatibility.md` to see what are the special cases for cross-backend compatibility. + +Other than those special cases, you should expect the same experience for basic operations like `Get`/`Put`, etc. + +Calls like `WatchTree` may return different events (or number of events) depending on the backend (for now, `Etcd` and `Consul` will likely return more events than `Zookeeper` that you should triage properly). Although you should be able to use it successfully to watch on events in an interchangeable way (see the **docker/leadership** repository or the **pkg/discovery/kv** package in **docker/docker**). + +## TLS + +Only `Consul` and `etcd` have support for TLS and you should build and provide your own `config.TLS` object to feed the client. Support is planned for `zookeeper`. + +## Roadmap + +- Make the API nicer to use (using `options`) +- Provide more options (`consistency` for example) +- Improve performance (remove extras `Get`/`List` operations) +- Better key formatting +- New backends? + +## Contributing + +Want to hack on libkv? [Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md) apply. + +## Copyright and license + +Copyright © 2014-2016 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/libkv/libkv.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/libkv/libkv.go new file mode 100644 index 00000000..bdb8c752 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/libkv/libkv.go @@ -0,0 +1,40 @@ +package libkv + +import ( + "fmt" + "sort" + "strings" + + "github.com/docker/libkv/store" +) + +// Initialize creates a new Store object, initializing the client +type Initialize func(addrs []string, options *store.Config) (store.Store, error) + +var ( + // Backend initializers + initializers = make(map[store.Backend]Initialize) + + supportedBackend = func() string { + keys := make([]string, 0, len(initializers)) + for k := range initializers { + keys = append(keys, string(k)) + } + sort.Strings(keys) + return strings.Join(keys, ", ") + }() +) + +// NewStore creates an instance of store +func NewStore(backend store.Backend, addrs []string, options *store.Config) (store.Store, error) { + if init, exists := initializers[backend]; exists { + return init(addrs, options) + } + + return nil, fmt.Errorf("%s %s", store.ErrBackendNotSupported.Error(), supportedBackend) +} + +// AddStore adds a new store backend to libkv +func AddStore(store store.Backend, init Initialize) { + initializers[store] = init +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/libkv/store/helpers.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/libkv/store/helpers.go new file mode 100644 index 00000000..0fb74c9a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/libkv/store/helpers.go @@ -0,0 +1,47 @@ +package store + +import ( + "strings" +) + +// CreateEndpoints creates a list of endpoints given the right scheme +func CreateEndpoints(addrs []string, scheme string) (entries []string) { + for _, addr := range addrs { + entries = append(entries, scheme+"://"+addr) + } + return entries +} + +// Normalize the key for each store to the form: +// +// /path/to/key +// +func Normalize(key string) string { + return "/" + join(SplitKey(key)) +} + +// GetDirectory gets the full directory part of +// the key to the form: +// +// /path/to/ +// +func GetDirectory(key string) string { + parts := SplitKey(key) + parts = parts[:len(parts)-1] + return "/" + join(parts) +} + +// SplitKey splits the key to extract path informations +func SplitKey(key string) (path []string) { + if strings.Contains(key, "/") { + path = strings.Split(key, "/") + } else { + path = []string{key} + } + return path +} + +// join the path parts with '/' +func join(parts []string) string { + return strings.Join(parts, "/") +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/libkv/store/store.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/libkv/store/store.go new file mode 100644 index 00000000..7a4850c0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/libkv/store/store.go @@ -0,0 +1,132 @@ +package store + +import ( + "crypto/tls" + "errors" + "time" +) + +// Backend represents a KV Store Backend +type Backend string + +const ( + // CONSUL backend + CONSUL Backend = "consul" + // ETCD backend + ETCD Backend = "etcd" + // ZK backend + ZK Backend = "zk" + // BOLTDB backend + BOLTDB Backend = "boltdb" +) + +var ( + // ErrBackendNotSupported is thrown when the backend k/v store is not supported by libkv + ErrBackendNotSupported = errors.New("Backend storage not supported yet, please choose one of") + // ErrCallNotSupported is thrown when a method is not implemented/supported by the current backend + ErrCallNotSupported = errors.New("The current call is not supported with this backend") + // ErrNotReachable is thrown when the API cannot be reached for issuing common store operations + ErrNotReachable = errors.New("Api not reachable") + // ErrCannotLock is thrown when there is an error acquiring a lock on a key + ErrCannotLock = errors.New("Error acquiring the lock") + // ErrKeyModified is thrown during an atomic operation if the index does not match the one in the store + ErrKeyModified = errors.New("Unable to complete atomic operation, key modified") + // ErrKeyNotFound is thrown when the key is not found in the store during a Get operation + ErrKeyNotFound = errors.New("Key not found in store") + // ErrPreviousNotSpecified is thrown when the previous value is not specified for an atomic operation + ErrPreviousNotSpecified = errors.New("Previous K/V pair should be provided for the Atomic operation") + // ErrKeyExists is thrown when the previous value exists in the case of an AtomicPut + ErrKeyExists = errors.New("Previous K/V pair exists, cannot complete Atomic operation") +) + +// Config contains the options for a storage client +type Config struct { + ClientTLS *ClientTLSConfig + TLS *tls.Config + ConnectionTimeout time.Duration + Bucket string + PersistConnection bool + Username string + Password string +} + +// ClientTLSConfig contains data for a Client TLS configuration in the form +// the etcd client wants it. Eventually we'll adapt it for ZK and Consul. +type ClientTLSConfig struct { + CertFile string + KeyFile string + CACertFile string +} + +// Store represents the backend K/V storage +// Each store should support every call listed +// here. Or it couldn't be implemented as a K/V +// backend for libkv +type Store interface { + // Put a value at the specified key + Put(key string, value []byte, options *WriteOptions) error + + // Get a value given its key + Get(key string) (*KVPair, error) + + // Delete the value at the specified key + Delete(key string) error + + // Verify if a Key exists in the store + Exists(key string) (bool, error) + + // Watch for changes on a key + Watch(key string, stopCh <-chan struct{}) (<-chan *KVPair, error) + + // WatchTree watches for changes on child nodes under + // a given directory + WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*KVPair, error) + + // NewLock creates a lock for a given key. + // The returned Locker is not held and must be acquired + // with `.Lock`. The Value is optional. + NewLock(key string, options *LockOptions) (Locker, error) + + // List the content of a given prefix + List(directory string) ([]*KVPair, error) + + // DeleteTree deletes a range of keys under a given directory + DeleteTree(directory string) error + + // Atomic CAS operation on a single value. + // Pass previous = nil to create a new key. + AtomicPut(key string, value []byte, previous *KVPair, options *WriteOptions) (bool, *KVPair, error) + + // Atomic delete of a single value + AtomicDelete(key string, previous *KVPair) (bool, error) + + // Close the store connection + Close() +} + +// KVPair represents {Key, Value, Lastindex} tuple +type KVPair struct { + Key string + Value []byte + LastIndex uint64 +} + +// WriteOptions contains optional request parameters +type WriteOptions struct { + IsDir bool + TTL time.Duration +} + +// LockOptions contains optional request parameters +type LockOptions struct { + Value []byte // Optional, value to associate with the lock + TTL time.Duration // Optional, expiration ttl associated with the lock + RenewLock chan struct{} // Optional, chan used to control and stop the session ttl renewal for the lock +} + +// Locker provides locking mechanism on top of the store. +// Similar to `sync.Lock` except it may return errors. +type Locker interface { + Lock(stopChan chan struct{}) (<-chan struct{}, error) + Unlock() error +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/libnetwork/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/docker/libnetwork/LICENSE new file mode 100644 index 00000000..e06d2081 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/libnetwork/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/libnetwork/datastore/cache.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/libnetwork/datastore/cache.go new file mode 100644 index 00000000..49839ae8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/libnetwork/datastore/cache.go @@ -0,0 +1,178 @@ +package datastore + +import ( + "errors" + "fmt" + "sync" + + "github.com/docker/libkv/store" +) + +type kvMap map[string]KVObject + +type cache struct { + sync.Mutex + kmm map[string]kvMap + ds *datastore +} + +func newCache(ds *datastore) *cache { + return &cache{kmm: make(map[string]kvMap), ds: ds} +} + +func (c *cache) kmap(kvObject KVObject) (kvMap, error) { + var err error + + c.Lock() + keyPrefix := Key(kvObject.KeyPrefix()...) + kmap, ok := c.kmm[keyPrefix] + c.Unlock() + + if ok { + return kmap, nil + } + + kmap = kvMap{} + + // Bail out right away if the kvObject does not implement KVConstructor + ctor, ok := kvObject.(KVConstructor) + if !ok { + return nil, errors.New("error while populating kmap, object does not implement KVConstructor interface") + } + + kvList, err := c.ds.store.List(keyPrefix) + if err != nil { + if err == store.ErrKeyNotFound { + // If the store doesn't have anything then there is nothing to + // populate in the cache. Just bail out. + goto out + } + + return nil, fmt.Errorf("error while populating kmap: %v", err) + } + + for _, kvPair := range kvList { + // Ignore empty kvPair values + if len(kvPair.Value) == 0 { + continue + } + + dstO := ctor.New() + err = dstO.SetValue(kvPair.Value) + if err != nil { + return nil, err + } + + // Make sure the object has a correct view of the DB index in + // case we need to modify it and update the DB. + dstO.SetIndex(kvPair.LastIndex) + + kmap[Key(dstO.Key()...)] = dstO + } + +out: + // There may multiple go routines racing to fill the + // cache. The one which places the kmap in c.kmm first + // wins. The others should just use what the first populated. + c.Lock() + kmapNew, ok := c.kmm[keyPrefix] + if ok { + c.Unlock() + return kmapNew, nil + } + + c.kmm[keyPrefix] = kmap + c.Unlock() + + return kmap, nil +} + +func (c *cache) add(kvObject KVObject, atomic bool) error { + kmap, err := c.kmap(kvObject) + if err != nil { + return err + } + + c.Lock() + // If atomic is true, cache needs to maintain its own index + // for atomicity and the add needs to be atomic. + if atomic { + if prev, ok := kmap[Key(kvObject.Key()...)]; ok { + if prev.Index() != kvObject.Index() { + c.Unlock() + return ErrKeyModified + } + } + + // Increment index + index := kvObject.Index() + index++ + kvObject.SetIndex(index) + } + + kmap[Key(kvObject.Key()...)] = kvObject + c.Unlock() + return nil +} + +func (c *cache) del(kvObject KVObject, atomic bool) error { + kmap, err := c.kmap(kvObject) + if err != nil { + return err + } + + c.Lock() + // If atomic is true, cache needs to maintain its own index + // for atomicity and del needs to be atomic. + if atomic { + if prev, ok := kmap[Key(kvObject.Key()...)]; ok { + if prev.Index() != kvObject.Index() { + c.Unlock() + return ErrKeyModified + } + } + } + + delete(kmap, Key(kvObject.Key()...)) + c.Unlock() + return nil +} + +func (c *cache) get(key string, kvObject KVObject) error { + kmap, err := c.kmap(kvObject) + if err != nil { + return err + } + + c.Lock() + defer c.Unlock() + + o, ok := kmap[Key(kvObject.Key()...)] + if !ok { + return ErrKeyNotFound + } + + ctor, ok := o.(KVConstructor) + if !ok { + return errors.New("kvobject does not implement KVConstructor interface. could not get object") + } + + return ctor.CopyTo(kvObject) +} + +func (c *cache) list(kvObject KVObject) ([]KVObject, error) { + kmap, err := c.kmap(kvObject) + if err != nil { + return nil, err + } + + c.Lock() + defer c.Unlock() + + var kvol []KVObject + for _, v := range kmap { + kvol = append(kvol, v) + } + + return kvol, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/libnetwork/datastore/datastore.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/libnetwork/datastore/datastore.go new file mode 100644 index 00000000..e35dc43b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/libnetwork/datastore/datastore.go @@ -0,0 +1,660 @@ +package datastore + +import ( + "fmt" + "log" + "reflect" + "strings" + "sync" + "time" + + "github.com/docker/libkv" + "github.com/docker/libkv/store" + "github.com/docker/libnetwork/discoverapi" + "github.com/docker/libnetwork/types" +) + +//DataStore exported +type DataStore interface { + // GetObject gets data from datastore and unmarshals to the specified object + GetObject(key string, o KVObject) error + // PutObject adds a new Record based on an object into the datastore + PutObject(kvObject KVObject) error + // PutObjectAtomic provides an atomic add and update operation for a Record + PutObjectAtomic(kvObject KVObject) error + // DeleteObject deletes a record + DeleteObject(kvObject KVObject) error + // DeleteObjectAtomic performs an atomic delete operation + DeleteObjectAtomic(kvObject KVObject) error + // DeleteTree deletes a record + DeleteTree(kvObject KVObject) error + // Watchable returns whether the store is watchable or not + Watchable() bool + // Watch for changes on a KVObject + Watch(kvObject KVObject, stopCh <-chan struct{}) (<-chan KVObject, error) + // RestartWatch retriggers stopped Watches + RestartWatch() + // Active returns if the store is active + Active() bool + // List returns of a list of KVObjects belonging to the parent + // key. The caller must pass a KVObject of the same type as + // the objects that need to be listed + List(string, KVObject) ([]KVObject, error) + // Map returns a Map of KVObjects + Map(key string, kvObject KVObject) (map[string]KVObject, error) + // Scope returns the scope of the store + Scope() string + // KVStore returns access to the KV Store + KVStore() store.Store + // Close closes the data store + Close() +} + +// ErrKeyModified is raised for an atomic update when the update is working on a stale state +var ( + ErrKeyModified = store.ErrKeyModified + ErrKeyNotFound = store.ErrKeyNotFound +) + +type datastore struct { + scope string + store store.Store + cache *cache + watchCh chan struct{} + active bool + sequential bool + sync.Mutex +} + +// KVObject is Key/Value interface used by objects to be part of the DataStore +type KVObject interface { + // Key method lets an object provide the Key to be used in KV Store + Key() []string + // KeyPrefix method lets an object return immediate parent key that can be used for tree walk + KeyPrefix() []string + // Value method lets an object marshal its content to be stored in the KV store + Value() []byte + // SetValue is used by the datastore to set the object's value when loaded from the data store. + SetValue([]byte) error + // Index method returns the latest DB Index as seen by the object + Index() uint64 + // SetIndex method allows the datastore to store the latest DB Index into the object + SetIndex(uint64) + // True if the object exists in the datastore, false if it hasn't been stored yet. + // When SetIndex() is called, the object has been stored. + Exists() bool + // DataScope indicates the storage scope of the KV object + DataScope() string + // Skip provides a way for a KV Object to avoid persisting it in the KV Store + Skip() bool +} + +// KVConstructor interface defines methods which can construct a KVObject from another. +type KVConstructor interface { + // New returns a new object which is created based on the + // source object + New() KVObject + // CopyTo deep copies the contents of the implementing object + // to the passed destination object + CopyTo(KVObject) error +} + +// ScopeCfg represents Datastore configuration. +type ScopeCfg struct { + Client ScopeClientCfg +} + +// ScopeClientCfg represents Datastore Client-only mode configuration +type ScopeClientCfg struct { + Provider string + Address string + Config *store.Config +} + +const ( + // LocalScope indicates to store the KV object in local datastore such as boltdb + LocalScope = "local" + // GlobalScope indicates to store the KV object in global datastore such as consul/etcd/zookeeper + GlobalScope = "global" + // SwarmScope is not indicating a datastore location. It is defined here + // along with the other two scopes just for consistency. + SwarmScope = "swarm" + defaultPrefix = "/var/lib/docker/network/files" +) + +const ( + // NetworkKeyPrefix is the prefix for network key in the kv store + NetworkKeyPrefix = "network" + // EndpointKeyPrefix is the prefix for endpoint key in the kv store + EndpointKeyPrefix = "endpoint" +) + +var ( + defaultScopes = makeDefaultScopes() +) + +func makeDefaultScopes() map[string]*ScopeCfg { + def := make(map[string]*ScopeCfg) + def[LocalScope] = &ScopeCfg{ + Client: ScopeClientCfg{ + Provider: string(store.BOLTDB), + Address: defaultPrefix + "/local-kv.db", + Config: &store.Config{ + Bucket: "libnetwork", + ConnectionTimeout: time.Minute, + }, + }, + } + + return def +} + +var defaultRootChain = []string{"docker", "network", "v1.0"} +var rootChain = defaultRootChain + +// DefaultScopes returns a map of default scopes and its config for clients to use. +func DefaultScopes(dataDir string) map[string]*ScopeCfg { + if dataDir != "" { + defaultScopes[LocalScope].Client.Address = dataDir + "/network/files/local-kv.db" + return defaultScopes + } + + defaultScopes[LocalScope].Client.Address = defaultPrefix + "/local-kv.db" + return defaultScopes +} + +// IsValid checks if the scope config has valid configuration. +func (cfg *ScopeCfg) IsValid() bool { + if cfg == nil || + strings.TrimSpace(cfg.Client.Provider) == "" || + strings.TrimSpace(cfg.Client.Address) == "" { + return false + } + + return true +} + +//Key provides convenient method to create a Key +func Key(key ...string) string { + keychain := append(rootChain, key...) + str := strings.Join(keychain, "/") + return str + "/" +} + +//ParseKey provides convenient method to unpack the key to complement the Key function +func ParseKey(key string) ([]string, error) { + chain := strings.Split(strings.Trim(key, "/"), "/") + + // The key must at least be equal to the rootChain in order to be considered as valid + if len(chain) <= len(rootChain) || !reflect.DeepEqual(chain[0:len(rootChain)], rootChain) { + return nil, types.BadRequestErrorf("invalid Key : %s", key) + } + return chain[len(rootChain):], nil +} + +// newClient used to connect to KV Store +func newClient(scope string, kv string, addr string, config *store.Config, cached bool) (DataStore, error) { + + if cached && scope != LocalScope { + return nil, fmt.Errorf("caching supported only for scope %s", LocalScope) + } + sequential := false + if scope == LocalScope { + sequential = true + } + + if config == nil { + config = &store.Config{} + } + + var addrs []string + + if kv == string(store.BOLTDB) { + // Parse file path + addrs = strings.Split(addr, ",") + } else { + // Parse URI + parts := strings.SplitN(addr, "/", 2) + addrs = strings.Split(parts[0], ",") + + // Add the custom prefix to the root chain + if len(parts) == 2 { + rootChain = append([]string{parts[1]}, defaultRootChain...) + } + } + + store, err := libkv.NewStore(store.Backend(kv), addrs, config) + if err != nil { + return nil, err + } + + ds := &datastore{scope: scope, store: store, active: true, watchCh: make(chan struct{}), sequential: sequential} + if cached { + ds.cache = newCache(ds) + } + + return ds, nil +} + +// NewDataStore creates a new instance of LibKV data store +func NewDataStore(scope string, cfg *ScopeCfg) (DataStore, error) { + if cfg == nil || cfg.Client.Provider == "" || cfg.Client.Address == "" { + c, ok := defaultScopes[scope] + if !ok || c.Client.Provider == "" || c.Client.Address == "" { + return nil, fmt.Errorf("unexpected scope %s without configuration passed", scope) + } + + cfg = c + } + + var cached bool + if scope == LocalScope { + cached = true + } + + return newClient(scope, cfg.Client.Provider, cfg.Client.Address, cfg.Client.Config, cached) +} + +// NewDataStoreFromConfig creates a new instance of LibKV data store starting from the datastore config data +func NewDataStoreFromConfig(dsc discoverapi.DatastoreConfigData) (DataStore, error) { + var ( + ok bool + sCfgP *store.Config + ) + + sCfgP, ok = dsc.Config.(*store.Config) + if !ok && dsc.Config != nil { + return nil, fmt.Errorf("cannot parse store configuration: %v", dsc.Config) + } + + scopeCfg := &ScopeCfg{ + Client: ScopeClientCfg{ + Address: dsc.Address, + Provider: dsc.Provider, + Config: sCfgP, + }, + } + + ds, err := NewDataStore(dsc.Scope, scopeCfg) + if err != nil { + return nil, fmt.Errorf("failed to construct datastore client from datastore configuration %v: %v", dsc, err) + } + + return ds, err +} + +func (ds *datastore) Close() { + ds.store.Close() +} + +func (ds *datastore) Scope() string { + return ds.scope +} + +func (ds *datastore) Active() bool { + return ds.active +} + +func (ds *datastore) Watchable() bool { + return ds.scope != LocalScope +} + +func (ds *datastore) Watch(kvObject KVObject, stopCh <-chan struct{}) (<-chan KVObject, error) { + sCh := make(chan struct{}) + + ctor, ok := kvObject.(KVConstructor) + if !ok { + return nil, fmt.Errorf("error watching object type %T, object does not implement KVConstructor interface", kvObject) + } + + kvpCh, err := ds.store.Watch(Key(kvObject.Key()...), sCh) + if err != nil { + return nil, err + } + + kvoCh := make(chan KVObject) + + go func() { + retry_watch: + var err error + + // Make sure to get a new instance of watch channel + ds.Lock() + watchCh := ds.watchCh + ds.Unlock() + + loop: + for { + select { + case <-stopCh: + close(sCh) + return + case kvPair := <-kvpCh: + // If the backend KV store gets reset libkv's go routine + // for the watch can exit resulting in a nil value in + // channel. + if kvPair == nil { + ds.Lock() + ds.active = false + ds.Unlock() + break loop + } + + dstO := ctor.New() + + if err = dstO.SetValue(kvPair.Value); err != nil { + log.Printf("Could not unmarshal kvpair value = %s", string(kvPair.Value)) + break + } + + dstO.SetIndex(kvPair.LastIndex) + kvoCh <- dstO + } + } + + // Wait on watch channel for a re-trigger when datastore becomes active + <-watchCh + + kvpCh, err = ds.store.Watch(Key(kvObject.Key()...), sCh) + if err != nil { + log.Printf("Could not watch the key %s in store: %v", Key(kvObject.Key()...), err) + } + + goto retry_watch + }() + + return kvoCh, nil +} + +func (ds *datastore) RestartWatch() { + ds.Lock() + defer ds.Unlock() + + ds.active = true + watchCh := ds.watchCh + ds.watchCh = make(chan struct{}) + close(watchCh) +} + +func (ds *datastore) KVStore() store.Store { + return ds.store +} + +// PutObjectAtomic adds a new Record based on an object into the datastore +func (ds *datastore) PutObjectAtomic(kvObject KVObject) error { + var ( + previous *store.KVPair + pair *store.KVPair + err error + ) + if ds.sequential { + ds.Lock() + defer ds.Unlock() + } + + if kvObject == nil { + return types.BadRequestErrorf("invalid KV Object : nil") + } + + kvObjValue := kvObject.Value() + + if kvObjValue == nil { + return types.BadRequestErrorf("invalid KV Object with a nil Value for key %s", Key(kvObject.Key()...)) + } + + if kvObject.Skip() { + goto add_cache + } + + if kvObject.Exists() { + previous = &store.KVPair{Key: Key(kvObject.Key()...), LastIndex: kvObject.Index()} + } else { + previous = nil + } + + _, pair, err = ds.store.AtomicPut(Key(kvObject.Key()...), kvObjValue, previous, nil) + if err != nil { + if err == store.ErrKeyExists { + return ErrKeyModified + } + return err + } + + kvObject.SetIndex(pair.LastIndex) + +add_cache: + if ds.cache != nil { + // If persistent store is skipped, sequencing needs to + // happen in cache. + return ds.cache.add(kvObject, kvObject.Skip()) + } + + return nil +} + +// PutObject adds a new Record based on an object into the datastore +func (ds *datastore) PutObject(kvObject KVObject) error { + if ds.sequential { + ds.Lock() + defer ds.Unlock() + } + + if kvObject == nil { + return types.BadRequestErrorf("invalid KV Object : nil") + } + + if kvObject.Skip() { + goto add_cache + } + + if err := ds.putObjectWithKey(kvObject, kvObject.Key()...); err != nil { + return err + } + +add_cache: + if ds.cache != nil { + // If persistent store is skipped, sequencing needs to + // happen in cache. + return ds.cache.add(kvObject, kvObject.Skip()) + } + + return nil +} + +func (ds *datastore) putObjectWithKey(kvObject KVObject, key ...string) error { + kvObjValue := kvObject.Value() + + if kvObjValue == nil { + return types.BadRequestErrorf("invalid KV Object with a nil Value for key %s", Key(kvObject.Key()...)) + } + return ds.store.Put(Key(key...), kvObjValue, nil) +} + +// GetObject returns a record matching the key +func (ds *datastore) GetObject(key string, o KVObject) error { + if ds.sequential { + ds.Lock() + defer ds.Unlock() + } + + if ds.cache != nil { + return ds.cache.get(key, o) + } + + kvPair, err := ds.store.Get(key) + if err != nil { + return err + } + + if err := o.SetValue(kvPair.Value); err != nil { + return err + } + + // Make sure the object has a correct view of the DB index in + // case we need to modify it and update the DB. + o.SetIndex(kvPair.LastIndex) + return nil +} + +func (ds *datastore) ensureParent(parent string) error { + exists, err := ds.store.Exists(parent) + if err != nil { + return err + } + if exists { + return nil + } + return ds.store.Put(parent, []byte{}, &store.WriteOptions{IsDir: true}) +} + +func (ds *datastore) List(key string, kvObject KVObject) ([]KVObject, error) { + if ds.sequential { + ds.Lock() + defer ds.Unlock() + } + + if ds.cache != nil { + return ds.cache.list(kvObject) + } + + var kvol []KVObject + cb := func(key string, val KVObject) { + kvol = append(kvol, val) + } + err := ds.iterateKVPairsFromStore(key, kvObject, cb) + if err != nil { + return nil, err + } + return kvol, nil +} + +func (ds *datastore) iterateKVPairsFromStore(key string, kvObject KVObject, callback func(string, KVObject)) error { + // Bail out right away if the kvObject does not implement KVConstructor + ctor, ok := kvObject.(KVConstructor) + if !ok { + return fmt.Errorf("error listing objects, object does not implement KVConstructor interface") + } + + // Make sure the parent key exists + if err := ds.ensureParent(key); err != nil { + return err + } + + kvList, err := ds.store.List(key) + if err != nil { + return err + } + + for _, kvPair := range kvList { + if len(kvPair.Value) == 0 { + continue + } + + dstO := ctor.New() + if err := dstO.SetValue(kvPair.Value); err != nil { + return err + } + + // Make sure the object has a correct view of the DB index in + // case we need to modify it and update the DB. + dstO.SetIndex(kvPair.LastIndex) + callback(kvPair.Key, dstO) + } + + return nil +} + +func (ds *datastore) Map(key string, kvObject KVObject) (map[string]KVObject, error) { + if ds.sequential { + ds.Lock() + defer ds.Unlock() + } + + kvol := make(map[string]KVObject) + cb := func(key string, val KVObject) { + // Trim the leading & trailing "/" to make it consistent across all stores + kvol[strings.Trim(key, "/")] = val + } + err := ds.iterateKVPairsFromStore(key, kvObject, cb) + if err != nil { + return nil, err + } + return kvol, nil +} + +// DeleteObject unconditionally deletes a record from the store +func (ds *datastore) DeleteObject(kvObject KVObject) error { + if ds.sequential { + ds.Lock() + defer ds.Unlock() + } + + // cleanup the cache first + if ds.cache != nil { + // If persistent store is skipped, sequencing needs to + // happen in cache. + ds.cache.del(kvObject, kvObject.Skip()) + } + + if kvObject.Skip() { + return nil + } + + return ds.store.Delete(Key(kvObject.Key()...)) +} + +// DeleteObjectAtomic performs atomic delete on a record +func (ds *datastore) DeleteObjectAtomic(kvObject KVObject) error { + if ds.sequential { + ds.Lock() + defer ds.Unlock() + } + + if kvObject == nil { + return types.BadRequestErrorf("invalid KV Object : nil") + } + + previous := &store.KVPair{Key: Key(kvObject.Key()...), LastIndex: kvObject.Index()} + + if kvObject.Skip() { + goto del_cache + } + + if _, err := ds.store.AtomicDelete(Key(kvObject.Key()...), previous); err != nil { + if err == store.ErrKeyExists { + return ErrKeyModified + } + return err + } + +del_cache: + // cleanup the cache only if AtomicDelete went through successfully + if ds.cache != nil { + // If persistent store is skipped, sequencing needs to + // happen in cache. + return ds.cache.del(kvObject, kvObject.Skip()) + } + + return nil +} + +// DeleteTree unconditionally deletes a record from the store +func (ds *datastore) DeleteTree(kvObject KVObject) error { + if ds.sequential { + ds.Lock() + defer ds.Unlock() + } + + // cleanup the cache first + if ds.cache != nil { + // If persistent store is skipped, sequencing needs to + // happen in cache. + ds.cache.del(kvObject, kvObject.Skip()) + } + + if kvObject.Skip() { + return nil + } + + return ds.store.DeleteTree(Key(kvObject.KeyPrefix()...)) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/libnetwork/datastore/mock_store.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/libnetwork/datastore/mock_store.go new file mode 100644 index 00000000..215cc4fd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/libnetwork/datastore/mock_store.go @@ -0,0 +1,129 @@ +package datastore + +import ( + "errors" + + "github.com/docker/libkv/store" + "github.com/docker/libnetwork/types" +) + +var ( + // ErrNotImplemented exported + ErrNotImplemented = errors.New("Functionality not implemented") +) + +// MockData exported +type MockData struct { + Data []byte + Index uint64 +} + +// MockStore exported +type MockStore struct { + db map[string]*MockData +} + +// NewMockStore creates a Map backed Datastore that is useful for mocking +func NewMockStore() *MockStore { + db := make(map[string]*MockData) + return &MockStore{db} +} + +// Get the value at "key", returns the last modified index +// to use in conjunction to CAS calls +func (s *MockStore) Get(key string) (*store.KVPair, error) { + mData := s.db[key] + if mData == nil { + return nil, nil + } + return &store.KVPair{Value: mData.Data, LastIndex: mData.Index}, nil + +} + +// Put a value at "key" +func (s *MockStore) Put(key string, value []byte, options *store.WriteOptions) error { + mData := s.db[key] + if mData == nil { + mData = &MockData{value, 0} + } + mData.Index = mData.Index + 1 + s.db[key] = mData + return nil +} + +// Delete a value at "key" +func (s *MockStore) Delete(key string) error { + delete(s.db, key) + return nil +} + +// Exists checks that the key exists inside the store +func (s *MockStore) Exists(key string) (bool, error) { + _, ok := s.db[key] + return ok, nil +} + +// List gets a range of values at "directory" +func (s *MockStore) List(prefix string) ([]*store.KVPair, error) { + return nil, ErrNotImplemented +} + +// DeleteTree deletes a range of values at "directory" +func (s *MockStore) DeleteTree(prefix string) error { + delete(s.db, prefix) + return nil +} + +// Watch a single key for modifications +func (s *MockStore) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { + return nil, ErrNotImplemented +} + +// WatchTree triggers a watch on a range of values at "directory" +func (s *MockStore) WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { + return nil, ErrNotImplemented +} + +// NewLock exposed +func (s *MockStore) NewLock(key string, options *store.LockOptions) (store.Locker, error) { + return nil, ErrNotImplemented +} + +// AtomicPut put a value at "key" if the key has not been +// modified in the meantime, throws an error if this is the case +func (s *MockStore) AtomicPut(key string, newValue []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) { + mData := s.db[key] + + if previous == nil { + if mData != nil { + return false, nil, types.BadRequestErrorf("atomic put failed because key exists") + } // Else OK. + } else { + if mData == nil { + return false, nil, types.BadRequestErrorf("atomic put failed because key exists") + } + if mData != nil && mData.Index != previous.LastIndex { + return false, nil, types.BadRequestErrorf("atomic put failed due to mismatched Index") + } // Else OK. + } + err := s.Put(key, newValue, nil) + if err != nil { + return false, nil, err + } + return true, &store.KVPair{Key: key, Value: newValue, LastIndex: s.db[key].Index}, nil +} + +// AtomicDelete deletes a value at "key" if the key has not +// been modified in the meantime, throws an error if this is the case +func (s *MockStore) AtomicDelete(key string, previous *store.KVPair) (bool, error) { + mData := s.db[key] + if mData != nil && mData.Index != previous.LastIndex { + return false, types.BadRequestErrorf("atomic delete failed due to mismatched Index") + } + return true, s.Delete(key) +} + +// Close closes the client connection +func (s *MockStore) Close() { + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/libnetwork/discoverapi/discoverapi.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/libnetwork/discoverapi/discoverapi.go new file mode 100644 index 00000000..7ac36155 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/libnetwork/discoverapi/discoverapi.go @@ -0,0 +1,60 @@ +package discoverapi + +// Discover is an interface to be implemented by the component interested in receiving discover events +// like new node joining the cluster or datastore updates +type Discover interface { + // DiscoverNew is a notification for a new discovery event, Example:a new node joining a cluster + DiscoverNew(dType DiscoveryType, data interface{}) error + + // DiscoverDelete is a notification for a discovery delete event, Example:a node leaving a cluster + DiscoverDelete(dType DiscoveryType, data interface{}) error +} + +// DiscoveryType represents the type of discovery element the DiscoverNew function is invoked on +type DiscoveryType int + +const ( + // NodeDiscovery represents Node join/leave events provided by discovery + NodeDiscovery = iota + 1 + // DatastoreConfig represents an add/remove datastore event + DatastoreConfig + // EncryptionKeysConfig represents the initial key(s) for performing datapath encryption + EncryptionKeysConfig + // EncryptionKeysUpdate represents an update to the datapath encryption key(s) + EncryptionKeysUpdate +) + +// NodeDiscoveryData represents the structure backing the node discovery data json string +type NodeDiscoveryData struct { + Address string + BindAddress string + Self bool +} + +// DatastoreConfigData is the data for the datastore update event message +type DatastoreConfigData struct { + Scope string + Provider string + Address string + Config interface{} +} + +// DriverEncryptionConfig contains the initial datapath encryption key(s) +// Key in first position is the primary key, the one to be used in tx. +// Original key and tag types are []byte and uint64 +type DriverEncryptionConfig struct { + Keys [][]byte + Tags []uint64 +} + +// DriverEncryptionUpdate carries an update to the encryption key(s) as: +// a new key and/or set a primary key and/or a removal of an existing key. +// Original key and tag types are []byte and uint64 +type DriverEncryptionUpdate struct { + Key []byte + Tag uint64 + Primary []byte + PrimaryTag uint64 + Prune []byte + PruneTag uint64 +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/libnetwork/ipamutils/utils.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/libnetwork/ipamutils/utils.go new file mode 100644 index 00000000..3fd37cd8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/libnetwork/ipamutils/utils.go @@ -0,0 +1,135 @@ +// Package ipamutils provides utility functions for ipam management +package ipamutils + +import ( + "fmt" + "net" + "sync" +) + +var ( + // PredefinedLocalScopeDefaultNetworks contains a list of 31 IPv4 private networks with host size 16 and 12 + // (172.17-31.x.x/16, 192.168.x.x/20) which do not overlap with the networks in `PredefinedGlobalScopeDefaultNetworks` + PredefinedLocalScopeDefaultNetworks []*net.IPNet + // PredefinedGlobalScopeDefaultNetworks contains a list of 64K IPv4 private networks with host size 8 + // (10.x.x.x/24) which do not overlap with the networks in `PredefinedLocalScopeDefaultNetworks` + PredefinedGlobalScopeDefaultNetworks []*net.IPNet + mutex sync.Mutex + localScopeDefaultNetworks = []*NetworkToSplit{{"172.17.0.0/16", 16}, {"172.18.0.0/16", 16}, {"172.19.0.0/16", 16}, + {"172.20.0.0/14", 16}, {"172.24.0.0/14", 16}, {"172.28.0.0/14", 16}, + {"192.168.0.0/16", 20}} + globalScopeDefaultNetworks = []*NetworkToSplit{{"10.0.0.0/8", 24}} +) + +// NetworkToSplit represent a network that has to be split in chunks with mask length Size. +// Each subnet in the set is derived from the Base pool. Base is to be passed +// in CIDR format. +// Example: a Base "10.10.0.0/16 with Size 24 will define the set of 256 +// 10.10.[0-255].0/24 address pools +type NetworkToSplit struct { + Base string `json:"base"` + Size int `json:"size"` +} + +func init() { + var err error + if PredefinedGlobalScopeDefaultNetworks, err = splitNetworks(globalScopeDefaultNetworks); err != nil { + //we are going to panic in case of error as we should never get into this state + panic("InitAddressPools failed to initialize the global scope default address pool") + } + + if PredefinedLocalScopeDefaultNetworks, err = splitNetworks(localScopeDefaultNetworks); err != nil { + //we are going to panic in case of error as we should never get into this state + panic("InitAddressPools failed to initialize the local scope default address pool") + } +} + +// configDefaultNetworks configures local as well global default pool based on input +func configDefaultNetworks(defaultAddressPool []*NetworkToSplit, result *[]*net.IPNet) error { + mutex.Lock() + defer mutex.Unlock() + defaultNetworks, err := splitNetworks(defaultAddressPool) + if err != nil { + return err + } + *result = defaultNetworks + return nil +} + +// GetGlobalScopeDefaultNetworks returns PredefinedGlobalScopeDefaultNetworks +func GetGlobalScopeDefaultNetworks() []*net.IPNet { + mutex.Lock() + defer mutex.Unlock() + return PredefinedGlobalScopeDefaultNetworks +} + +// GetLocalScopeDefaultNetworks returns PredefinedLocalScopeDefaultNetworks +func GetLocalScopeDefaultNetworks() []*net.IPNet { + mutex.Lock() + defer mutex.Unlock() + return PredefinedLocalScopeDefaultNetworks +} + +// ConfigGlobalScopeDefaultNetworks configures global default pool. +// Ideally this will be called from SwarmKit as part of swarm init +func ConfigGlobalScopeDefaultNetworks(defaultAddressPool []*NetworkToSplit) error { + if defaultAddressPool == nil { + defaultAddressPool = globalScopeDefaultNetworks + } + return configDefaultNetworks(defaultAddressPool, &PredefinedGlobalScopeDefaultNetworks) +} + +// ConfigLocalScopeDefaultNetworks configures local default pool. +// Ideally this will be called during libnetwork init +func ConfigLocalScopeDefaultNetworks(defaultAddressPool []*NetworkToSplit) error { + if defaultAddressPool == nil { + return nil + } + return configDefaultNetworks(defaultAddressPool, &PredefinedLocalScopeDefaultNetworks) +} + +// splitNetworks takes a slice of networks, split them accordingly and returns them +func splitNetworks(list []*NetworkToSplit) ([]*net.IPNet, error) { + localPools := make([]*net.IPNet, 0, len(list)) + + for _, p := range list { + _, b, err := net.ParseCIDR(p.Base) + if err != nil { + return nil, fmt.Errorf("invalid base pool %q: %v", p.Base, err) + } + ones, _ := b.Mask.Size() + if p.Size <= 0 || p.Size < ones { + return nil, fmt.Errorf("invalid pools size: %d", p.Size) + } + localPools = append(localPools, splitNetwork(p.Size, b)...) + } + return localPools, nil +} + +func splitNetwork(size int, base *net.IPNet) []*net.IPNet { + one, bits := base.Mask.Size() + mask := net.CIDRMask(size, bits) + n := 1 << uint(size-one) + s := uint(bits - size) + list := make([]*net.IPNet, 0, n) + + for i := 0; i < n; i++ { + ip := copyIP(base.IP) + addIntToIP(ip, uint(i<= 0; i-- { + array[i] |= (byte)(ordinal & 0xff) + ordinal >>= 8 + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/libnetwork/types/types.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/libnetwork/types/types.go new file mode 100644 index 00000000..db1960c1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/libnetwork/types/types.go @@ -0,0 +1,653 @@ +// Package types contains types that are common across libnetwork project +package types + +import ( + "bytes" + "fmt" + "net" + "strconv" + "strings" + + "github.com/ishidawataru/sctp" +) + +// constants for the IP address type +const ( + IP = iota // IPv4 and IPv6 + IPv4 + IPv6 +) + +// EncryptionKey is the libnetwork representation of the key distributed by the lead +// manager. +type EncryptionKey struct { + Subsystem string + Algorithm int32 + Key []byte + LamportTime uint64 +} + +// UUID represents a globally unique ID of various resources like network and endpoint +type UUID string + +// QosPolicy represents a quality of service policy on an endpoint +type QosPolicy struct { + MaxEgressBandwidth uint64 +} + +// TransportPort represents a local Layer 4 endpoint +type TransportPort struct { + Proto Protocol + Port uint16 +} + +// Equal checks if this instance of Transportport is equal to the passed one +func (t *TransportPort) Equal(o *TransportPort) bool { + if t == o { + return true + } + + if o == nil { + return false + } + + if t.Proto != o.Proto || t.Port != o.Port { + return false + } + + return true +} + +// GetCopy returns a copy of this TransportPort structure instance +func (t *TransportPort) GetCopy() TransportPort { + return TransportPort{Proto: t.Proto, Port: t.Port} +} + +// String returns the TransportPort structure in string form +func (t *TransportPort) String() string { + return fmt.Sprintf("%s/%d", t.Proto.String(), t.Port) +} + +// FromString reads the TransportPort structure from string +func (t *TransportPort) FromString(s string) error { + ps := strings.Split(s, "/") + if len(ps) == 2 { + t.Proto = ParseProtocol(ps[0]) + if p, err := strconv.ParseUint(ps[1], 10, 16); err == nil { + t.Port = uint16(p) + return nil + } + } + return BadRequestErrorf("invalid format for transport port: %s", s) +} + +// PortBinding represents a port binding between the container and the host +type PortBinding struct { + Proto Protocol + IP net.IP + Port uint16 + HostIP net.IP + HostPort uint16 + HostPortEnd uint16 +} + +// HostAddr returns the host side transport address +func (p PortBinding) HostAddr() (net.Addr, error) { + switch p.Proto { + case UDP: + return &net.UDPAddr{IP: p.HostIP, Port: int(p.HostPort)}, nil + case TCP: + return &net.TCPAddr{IP: p.HostIP, Port: int(p.HostPort)}, nil + case SCTP: + return &sctp.SCTPAddr{IPAddrs: []net.IPAddr{{IP: p.HostIP}}, Port: int(p.HostPort)}, nil + default: + return nil, ErrInvalidProtocolBinding(p.Proto.String()) + } +} + +// ContainerAddr returns the container side transport address +func (p PortBinding) ContainerAddr() (net.Addr, error) { + switch p.Proto { + case UDP: + return &net.UDPAddr{IP: p.IP, Port: int(p.Port)}, nil + case TCP: + return &net.TCPAddr{IP: p.IP, Port: int(p.Port)}, nil + case SCTP: + return &sctp.SCTPAddr{IPAddrs: []net.IPAddr{{IP: p.IP}}, Port: int(p.Port)}, nil + default: + return nil, ErrInvalidProtocolBinding(p.Proto.String()) + } +} + +// GetCopy returns a copy of this PortBinding structure instance +func (p *PortBinding) GetCopy() PortBinding { + return PortBinding{ + Proto: p.Proto, + IP: GetIPCopy(p.IP), + Port: p.Port, + HostIP: GetIPCopy(p.HostIP), + HostPort: p.HostPort, + HostPortEnd: p.HostPortEnd, + } +} + +// String returns the PortBinding structure in string form +func (p *PortBinding) String() string { + ret := fmt.Sprintf("%s/", p.Proto) + if p.IP != nil { + ret += p.IP.String() + } + ret = fmt.Sprintf("%s:%d/", ret, p.Port) + if p.HostIP != nil { + ret += p.HostIP.String() + } + ret = fmt.Sprintf("%s:%d", ret, p.HostPort) + return ret +} + +// FromString reads the PortBinding structure from string s. +// String s is a triple of "protocol/containerIP:port/hostIP:port" +// containerIP and hostIP can be in dotted decimal ("192.0.2.1") or IPv6 ("2001:db8::68") form. +// Zoned addresses ("169.254.0.23%eth0" or "fe80::1ff:fe23:4567:890a%eth0") are not supported. +// If string s is incorrectly formatted or the IP addresses or ports cannot be parsed, FromString +// returns an error. +func (p *PortBinding) FromString(s string) error { + ps := strings.Split(s, "/") + if len(ps) != 3 { + return BadRequestErrorf("invalid format for port binding: %s", s) + } + + p.Proto = ParseProtocol(ps[0]) + + var err error + if p.IP, p.Port, err = parseIPPort(ps[1]); err != nil { + return BadRequestErrorf("failed to parse Container IP/Port in port binding: %s", err.Error()) + } + + if p.HostIP, p.HostPort, err = parseIPPort(ps[2]); err != nil { + return BadRequestErrorf("failed to parse Host IP/Port in port binding: %s", err.Error()) + } + + return nil +} + +func parseIPPort(s string) (net.IP, uint16, error) { + hoststr, portstr, err := net.SplitHostPort(s) + if err != nil { + return nil, 0, err + } + + ip := net.ParseIP(hoststr) + if ip == nil { + return nil, 0, BadRequestErrorf("invalid ip: %s", hoststr) + } + + port, err := strconv.ParseUint(portstr, 10, 16) + if err != nil { + return nil, 0, BadRequestErrorf("invalid port: %s", portstr) + } + + return ip, uint16(port), nil +} + +// Equal checks if this instance of PortBinding is equal to the passed one +func (p *PortBinding) Equal(o *PortBinding) bool { + if p == o { + return true + } + + if o == nil { + return false + } + + if p.Proto != o.Proto || p.Port != o.Port || + p.HostPort != o.HostPort || p.HostPortEnd != o.HostPortEnd { + return false + } + + if p.IP != nil { + if !p.IP.Equal(o.IP) { + return false + } + } else { + if o.IP != nil { + return false + } + } + + if p.HostIP != nil { + if !p.HostIP.Equal(o.HostIP) { + return false + } + } else { + if o.HostIP != nil { + return false + } + } + + return true +} + +// ErrInvalidProtocolBinding is returned when the port binding protocol is not valid. +type ErrInvalidProtocolBinding string + +func (ipb ErrInvalidProtocolBinding) Error() string { + return fmt.Sprintf("invalid transport protocol: %s", string(ipb)) +} + +const ( + // ICMP is for the ICMP ip protocol + ICMP = 1 + // TCP is for the TCP ip protocol + TCP = 6 + // UDP is for the UDP ip protocol + UDP = 17 + // SCTP is for the SCTP ip protocol + SCTP = 132 +) + +// Protocol represents an IP protocol number +type Protocol uint8 + +func (p Protocol) String() string { + switch p { + case ICMP: + return "icmp" + case TCP: + return "tcp" + case UDP: + return "udp" + case SCTP: + return "sctp" + default: + return fmt.Sprintf("%d", p) + } +} + +// ParseProtocol returns the respective Protocol type for the passed string +func ParseProtocol(s string) Protocol { + switch strings.ToLower(s) { + case "icmp": + return ICMP + case "udp": + return UDP + case "tcp": + return TCP + case "sctp": + return SCTP + default: + return 0 + } +} + +// GetMacCopy returns a copy of the passed MAC address +func GetMacCopy(from net.HardwareAddr) net.HardwareAddr { + if from == nil { + return nil + } + to := make(net.HardwareAddr, len(from)) + copy(to, from) + return to +} + +// GetIPCopy returns a copy of the passed IP address +func GetIPCopy(from net.IP) net.IP { + if from == nil { + return nil + } + to := make(net.IP, len(from)) + copy(to, from) + return to +} + +// GetIPNetCopy returns a copy of the passed IP Network +func GetIPNetCopy(from *net.IPNet) *net.IPNet { + if from == nil { + return nil + } + bm := make(net.IPMask, len(from.Mask)) + copy(bm, from.Mask) + return &net.IPNet{IP: GetIPCopy(from.IP), Mask: bm} +} + +// GetIPNetCanonical returns the canonical form for the passed network +func GetIPNetCanonical(nw *net.IPNet) *net.IPNet { + if nw == nil { + return nil + } + c := GetIPNetCopy(nw) + c.IP = c.IP.Mask(nw.Mask) + return c +} + +// CompareIPNet returns equal if the two IP Networks are equal +func CompareIPNet(a, b *net.IPNet) bool { + if a == b { + return true + } + if a == nil || b == nil { + return false + } + return a.IP.Equal(b.IP) && bytes.Equal(a.Mask, b.Mask) +} + +// GetMinimalIP returns the address in its shortest form +// If ip contains an IPv4-mapped IPv6 address, the 4-octet form of the IPv4 address will be returned. +// Otherwise ip is returned unchanged. +func GetMinimalIP(ip net.IP) net.IP { + if ip != nil && ip.To4() != nil { + return ip.To4() + } + return ip +} + +// GetMinimalIPNet returns a copy of the passed IP Network with congruent ip and mask notation +func GetMinimalIPNet(nw *net.IPNet) *net.IPNet { + if nw == nil { + return nil + } + if len(nw.IP) == 16 && nw.IP.To4() != nil { + m := nw.Mask + if len(m) == 16 { + m = m[12:16] + } + return &net.IPNet{IP: nw.IP.To4(), Mask: m} + } + return nw +} + +// IsIPNetValid returns true if the ipnet is a valid network/mask +// combination. Otherwise returns false. +func IsIPNetValid(nw *net.IPNet) bool { + return nw.String() != "0.0.0.0/0" +} + +var v4inV6MaskPrefix = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + +// compareIPMask checks if the passed ip and mask are semantically compatible. +// It returns the byte indexes for the address and mask so that caller can +// do bitwise operations without modifying address representation. +func compareIPMask(ip net.IP, mask net.IPMask) (is int, ms int, err error) { + // Find the effective starting of address and mask + if len(ip) == net.IPv6len && ip.To4() != nil { + is = 12 + } + if len(ip[is:]) == net.IPv4len && len(mask) == net.IPv6len && bytes.Equal(mask[:12], v4inV6MaskPrefix) { + ms = 12 + } + // Check if address and mask are semantically compatible + if len(ip[is:]) != len(mask[ms:]) { + err = fmt.Errorf("ip and mask are not compatible: (%#v, %#v)", ip, mask) + } + return +} + +// GetHostPartIP returns the host portion of the ip address identified by the mask. +// IP address representation is not modified. If address and mask are not compatible +// an error is returned. +func GetHostPartIP(ip net.IP, mask net.IPMask) (net.IP, error) { + // Find the effective starting of address and mask + is, ms, err := compareIPMask(ip, mask) + if err != nil { + return nil, fmt.Errorf("cannot compute host portion ip address because %s", err) + } + + // Compute host portion + out := GetIPCopy(ip) + for i := 0; i < len(mask[ms:]); i++ { + out[is+i] &= ^mask[ms+i] + } + + return out, nil +} + +// GetBroadcastIP returns the broadcast ip address for the passed network (ip and mask). +// IP address representation is not modified. If address and mask are not compatible +// an error is returned. +func GetBroadcastIP(ip net.IP, mask net.IPMask) (net.IP, error) { + // Find the effective starting of address and mask + is, ms, err := compareIPMask(ip, mask) + if err != nil { + return nil, fmt.Errorf("cannot compute broadcast ip address because %s", err) + } + + // Compute broadcast address + out := GetIPCopy(ip) + for i := 0; i < len(mask[ms:]); i++ { + out[is+i] |= ^mask[ms+i] + } + + return out, nil +} + +// ParseCIDR returns the *net.IPNet represented by the passed CIDR notation +func ParseCIDR(cidr string) (n *net.IPNet, e error) { + var i net.IP + if i, n, e = net.ParseCIDR(cidr); e == nil { + n.IP = i + } + return +} + +const ( + // NEXTHOP indicates a StaticRoute with an IP next hop. + NEXTHOP = iota + + // CONNECTED indicates a StaticRoute with an interface for directly connected peers. + CONNECTED +) + +// StaticRoute is a statically-provisioned IP route. +type StaticRoute struct { + Destination *net.IPNet + + RouteType int // NEXT_HOP or CONNECTED + + // NextHop will be resolved by the kernel (i.e. as a loose hop). + NextHop net.IP +} + +// GetCopy returns a copy of this StaticRoute structure +func (r *StaticRoute) GetCopy() *StaticRoute { + d := GetIPNetCopy(r.Destination) + nh := GetIPCopy(r.NextHop) + return &StaticRoute{Destination: d, + RouteType: r.RouteType, + NextHop: nh, + } +} + +// InterfaceStatistics represents the interface's statistics +type InterfaceStatistics struct { + RxBytes uint64 + RxPackets uint64 + RxErrors uint64 + RxDropped uint64 + TxBytes uint64 + TxPackets uint64 + TxErrors uint64 + TxDropped uint64 +} + +func (is *InterfaceStatistics) String() string { + return fmt.Sprintf("\nRxBytes: %d, RxPackets: %d, RxErrors: %d, RxDropped: %d, TxBytes: %d, TxPackets: %d, TxErrors: %d, TxDropped: %d", + is.RxBytes, is.RxPackets, is.RxErrors, is.RxDropped, is.TxBytes, is.TxPackets, is.TxErrors, is.TxDropped) +} + +/****************************** + * Well-known Error Interfaces + ******************************/ + +// MaskableError is an interface for errors which can be ignored by caller +type MaskableError interface { + // Maskable makes implementer into MaskableError type + Maskable() +} + +// RetryError is an interface for errors which might get resolved through retry +type RetryError interface { + // Retry makes implementer into RetryError type + Retry() +} + +// BadRequestError is an interface for errors originated by a bad request +type BadRequestError interface { + // BadRequest makes implementer into BadRequestError type + BadRequest() +} + +// NotFoundError is an interface for errors raised because a needed resource is not available +type NotFoundError interface { + // NotFound makes implementer into NotFoundError type + NotFound() +} + +// ForbiddenError is an interface for errors which denote a valid request that cannot be honored +type ForbiddenError interface { + // Forbidden makes implementer into ForbiddenError type + Forbidden() +} + +// NoServiceError is an interface for errors returned when the required service is not available +type NoServiceError interface { + // NoService makes implementer into NoServiceError type + NoService() +} + +// TimeoutError is an interface for errors raised because of timeout +type TimeoutError interface { + // Timeout makes implementer into TimeoutError type + Timeout() +} + +// NotImplementedError is an interface for errors raised because of requested functionality is not yet implemented +type NotImplementedError interface { + // NotImplemented makes implementer into NotImplementedError type + NotImplemented() +} + +// InternalError is an interface for errors raised because of an internal error +type InternalError interface { + // Internal makes implementer into InternalError type + Internal() +} + +/****************************** + * Well-known Error Formatters + ******************************/ + +// BadRequestErrorf creates an instance of BadRequestError +func BadRequestErrorf(format string, params ...interface{}) error { + return badRequest(fmt.Sprintf(format, params...)) +} + +// NotFoundErrorf creates an instance of NotFoundError +func NotFoundErrorf(format string, params ...interface{}) error { + return notFound(fmt.Sprintf(format, params...)) +} + +// ForbiddenErrorf creates an instance of ForbiddenError +func ForbiddenErrorf(format string, params ...interface{}) error { + return forbidden(fmt.Sprintf(format, params...)) +} + +// NoServiceErrorf creates an instance of NoServiceError +func NoServiceErrorf(format string, params ...interface{}) error { + return noService(fmt.Sprintf(format, params...)) +} + +// NotImplementedErrorf creates an instance of NotImplementedError +func NotImplementedErrorf(format string, params ...interface{}) error { + return notImpl(fmt.Sprintf(format, params...)) +} + +// TimeoutErrorf creates an instance of TimeoutError +func TimeoutErrorf(format string, params ...interface{}) error { + return timeout(fmt.Sprintf(format, params...)) +} + +// InternalErrorf creates an instance of InternalError +func InternalErrorf(format string, params ...interface{}) error { + return internal(fmt.Sprintf(format, params...)) +} + +// InternalMaskableErrorf creates an instance of InternalError and MaskableError +func InternalMaskableErrorf(format string, params ...interface{}) error { + return maskInternal(fmt.Sprintf(format, params...)) +} + +// RetryErrorf creates an instance of RetryError +func RetryErrorf(format string, params ...interface{}) error { + return retry(fmt.Sprintf(format, params...)) +} + +/*********************** + * Internal Error Types + ***********************/ +type badRequest string + +func (br badRequest) Error() string { + return string(br) +} +func (br badRequest) BadRequest() {} + +type maskBadRequest string + +type notFound string + +func (nf notFound) Error() string { + return string(nf) +} +func (nf notFound) NotFound() {} + +type forbidden string + +func (frb forbidden) Error() string { + return string(frb) +} +func (frb forbidden) Forbidden() {} + +type noService string + +func (ns noService) Error() string { + return string(ns) +} +func (ns noService) NoService() {} + +type maskNoService string + +type timeout string + +func (to timeout) Error() string { + return string(to) +} +func (to timeout) Timeout() {} + +type notImpl string + +func (ni notImpl) Error() string { + return string(ni) +} +func (ni notImpl) NotImplemented() {} + +type internal string + +func (nt internal) Error() string { + return string(nt) +} +func (nt internal) Internal() {} + +type maskInternal string + +func (mnt maskInternal) Error() string { + return string(mnt) +} +func (mnt maskInternal) Internal() {} +func (mnt maskInternal) Maskable() {} + +type retry string + +func (r retry) Error() string { + return string(r) +} +func (r retry) Retry() {} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/LICENSE new file mode 100644 index 00000000..e2db6ed1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016-2018 Docker Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/README.md b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/README.md new file mode 100644 index 00000000..a7ec3fc5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/README.md @@ -0,0 +1,24 @@ +### Notice + +Do not change .pb.go files directly. You need to change the corresponding .proto files and run the following command to regenerate the .pb.go files. +``` +$ make generate +``` + +Click [here](https://github.com/google/protobuf) for more information about protobuf. + +The `api.pb.txt` file contains merged descriptors of all defined services and messages. +Definitions present here are considered frozen after the release. + +At release time, the current `api.pb.txt` file will be moved into place to +freeze the API changes for the minor version. For example, when 1.0.0 is +released, `api.pb.txt` should be moved to `1.0.txt`. Notice that we leave off +the patch number, since the API will be completely locked down for a given +patch series. + +We may find that by default, protobuf descriptors are too noisy to lock down +API changes. In that case, we may filter out certain fields in the descriptors, +possibly regenerating for old versions. + +This process is similar to the [process used to ensure backwards compatibility +in Go](https://github.com/golang/go/tree/master/api). diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/ca.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/ca.pb.go new file mode 100644 index 00000000..6f8e35c1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/ca.pb.go @@ -0,0 +1,2340 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/ca.proto + +/* + Package api is a generated protocol buffer package. + + It is generated from these files: + github.com/docker/swarmkit/api/ca.proto + github.com/docker/swarmkit/api/control.proto + github.com/docker/swarmkit/api/dispatcher.proto + github.com/docker/swarmkit/api/health.proto + github.com/docker/swarmkit/api/logbroker.proto + github.com/docker/swarmkit/api/objects.proto + github.com/docker/swarmkit/api/raft.proto + github.com/docker/swarmkit/api/resource.proto + github.com/docker/swarmkit/api/snapshot.proto + github.com/docker/swarmkit/api/specs.proto + github.com/docker/swarmkit/api/types.proto + github.com/docker/swarmkit/api/watch.proto + + It has these top-level messages: + NodeCertificateStatusRequest + NodeCertificateStatusResponse + IssueNodeCertificateRequest + IssueNodeCertificateResponse + GetRootCACertificateRequest + GetRootCACertificateResponse + GetUnlockKeyRequest + GetUnlockKeyResponse + GetNodeRequest + GetNodeResponse + ListNodesRequest + ListNodesResponse + UpdateNodeRequest + UpdateNodeResponse + RemoveNodeRequest + RemoveNodeResponse + GetTaskRequest + GetTaskResponse + RemoveTaskRequest + RemoveTaskResponse + ListTasksRequest + ListTasksResponse + CreateServiceRequest + CreateServiceResponse + GetServiceRequest + GetServiceResponse + UpdateServiceRequest + UpdateServiceResponse + RemoveServiceRequest + RemoveServiceResponse + ListServicesRequest + ListServicesResponse + ListServiceStatusesRequest + ListServiceStatusesResponse + CreateNetworkRequest + CreateNetworkResponse + GetNetworkRequest + GetNetworkResponse + RemoveNetworkRequest + RemoveNetworkResponse + ListNetworksRequest + ListNetworksResponse + GetClusterRequest + GetClusterResponse + ListClustersRequest + ListClustersResponse + KeyRotation + UpdateClusterRequest + UpdateClusterResponse + GetSecretRequest + GetSecretResponse + UpdateSecretRequest + UpdateSecretResponse + ListSecretsRequest + ListSecretsResponse + CreateSecretRequest + CreateSecretResponse + RemoveSecretRequest + RemoveSecretResponse + GetConfigRequest + GetConfigResponse + UpdateConfigRequest + UpdateConfigResponse + ListConfigsRequest + ListConfigsResponse + CreateConfigRequest + CreateConfigResponse + RemoveConfigRequest + RemoveConfigResponse + CreateExtensionRequest + CreateExtensionResponse + RemoveExtensionRequest + RemoveExtensionResponse + GetExtensionRequest + GetExtensionResponse + CreateResourceRequest + CreateResourceResponse + RemoveResourceRequest + RemoveResourceResponse + UpdateResourceRequest + UpdateResourceResponse + GetResourceRequest + GetResourceResponse + ListResourcesRequest + ListResourcesResponse + SessionRequest + SessionMessage + HeartbeatRequest + HeartbeatResponse + UpdateTaskStatusRequest + UpdateTaskStatusResponse + TasksRequest + TasksMessage + AssignmentsRequest + Assignment + AssignmentChange + AssignmentsMessage + HealthCheckRequest + HealthCheckResponse + LogSubscriptionOptions + LogSelector + LogContext + LogAttr + LogMessage + SubscribeLogsRequest + SubscribeLogsMessage + ListenSubscriptionsRequest + SubscriptionMessage + PublishLogsMessage + PublishLogsResponse + Meta + Node + Service + Endpoint + Task + NetworkAttachment + Network + Cluster + Secret + Config + Resource + Extension + RaftMember + JoinRequest + JoinResponse + LeaveRequest + LeaveResponse + ProcessRaftMessageRequest + ProcessRaftMessageResponse + StreamRaftMessageRequest + StreamRaftMessageResponse + ResolveAddressRequest + ResolveAddressResponse + InternalRaftRequest + StoreAction + AttachNetworkRequest + AttachNetworkResponse + DetachNetworkRequest + DetachNetworkResponse + StoreSnapshot + ClusterSnapshot + Snapshot + NodeSpec + ServiceSpec + ReplicatedService + GlobalService + TaskSpec + ResourceReference + GenericRuntimeSpec + NetworkAttachmentSpec + ContainerSpec + EndpointSpec + NetworkSpec + ClusterSpec + SecretSpec + ConfigSpec + Version + IndexEntry + Annotations + NamedGenericResource + DiscreteGenericResource + GenericResource + Resources + ResourceRequirements + Platform + PluginDescription + EngineDescription + NodeDescription + NodeTLSInfo + RaftMemberStatus + NodeStatus + Image + Mount + RestartPolicy + UpdateConfig + UpdateStatus + ContainerStatus + PortStatus + TaskStatus + NetworkAttachmentConfig + IPAMConfig + PortConfig + Driver + IPAMOptions + Peer + WeightedPeer + IssuanceStatus + AcceptancePolicy + ExternalCA + CAConfig + OrchestrationConfig + TaskDefaults + DispatcherConfig + RaftConfig + EncryptionConfig + SpreadOver + PlacementPreference + Placement + JoinTokens + RootCA + Certificate + EncryptionKey + ManagerStatus + FileTarget + RuntimeTarget + SecretReference + ConfigReference + BlacklistedCertificate + HealthConfig + MaybeEncryptedRecord + RootRotation + Privileges + Object + SelectBySlot + SelectByCustom + SelectBy + WatchRequest + WatchMessage +*/ +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type NodeCertificateStatusRequest struct { + NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` +} + +func (m *NodeCertificateStatusRequest) Reset() { *m = NodeCertificateStatusRequest{} } +func (*NodeCertificateStatusRequest) ProtoMessage() {} +func (*NodeCertificateStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{0} } + +type NodeCertificateStatusResponse struct { + Status *IssuanceStatus `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"` + Certificate *Certificate `protobuf:"bytes,2,opt,name=certificate" json:"certificate,omitempty"` +} + +func (m *NodeCertificateStatusResponse) Reset() { *m = NodeCertificateStatusResponse{} } +func (*NodeCertificateStatusResponse) ProtoMessage() {} +func (*NodeCertificateStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{1} } + +type IssueNodeCertificateRequest struct { + // DEPRECATED: Role is now selected based on which secret is matched. + Role NodeRole `protobuf:"varint,1,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"` + // CSR is the certificate signing request. + CSR []byte `protobuf:"bytes,2,opt,name=csr,proto3" json:"csr,omitempty"` + // Token represents a user-provided string that is necessary for new + // nodes to join the cluster + Token string `protobuf:"bytes,3,opt,name=token,proto3" json:"token,omitempty"` + // Availability allows a user to control the current scheduling status of a node + Availability NodeSpec_Availability `protobuf:"varint,4,opt,name=availability,proto3,enum=docker.swarmkit.v1.NodeSpec_Availability" json:"availability,omitempty"` +} + +func (m *IssueNodeCertificateRequest) Reset() { *m = IssueNodeCertificateRequest{} } +func (*IssueNodeCertificateRequest) ProtoMessage() {} +func (*IssueNodeCertificateRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{2} } + +type IssueNodeCertificateResponse struct { + NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + NodeMembership NodeSpec_Membership `protobuf:"varint,2,opt,name=node_membership,json=nodeMembership,proto3,enum=docker.swarmkit.v1.NodeSpec_Membership" json:"node_membership,omitempty"` +} + +func (m *IssueNodeCertificateResponse) Reset() { *m = IssueNodeCertificateResponse{} } +func (*IssueNodeCertificateResponse) ProtoMessage() {} +func (*IssueNodeCertificateResponse) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{3} } + +type GetRootCACertificateRequest struct { +} + +func (m *GetRootCACertificateRequest) Reset() { *m = GetRootCACertificateRequest{} } +func (*GetRootCACertificateRequest) ProtoMessage() {} +func (*GetRootCACertificateRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{4} } + +type GetRootCACertificateResponse struct { + Certificate []byte `protobuf:"bytes,1,opt,name=certificate,proto3" json:"certificate,omitempty"` +} + +func (m *GetRootCACertificateResponse) Reset() { *m = GetRootCACertificateResponse{} } +func (*GetRootCACertificateResponse) ProtoMessage() {} +func (*GetRootCACertificateResponse) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{5} } + +type GetUnlockKeyRequest struct { +} + +func (m *GetUnlockKeyRequest) Reset() { *m = GetUnlockKeyRequest{} } +func (*GetUnlockKeyRequest) ProtoMessage() {} +func (*GetUnlockKeyRequest) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{6} } + +type GetUnlockKeyResponse struct { + UnlockKey []byte `protobuf:"bytes,1,opt,name=unlock_key,json=unlockKey,proto3" json:"unlock_key,omitempty"` + Version Version `protobuf:"bytes,2,opt,name=version" json:"version"` +} + +func (m *GetUnlockKeyResponse) Reset() { *m = GetUnlockKeyResponse{} } +func (*GetUnlockKeyResponse) ProtoMessage() {} +func (*GetUnlockKeyResponse) Descriptor() ([]byte, []int) { return fileDescriptorCa, []int{7} } + +func init() { + proto.RegisterType((*NodeCertificateStatusRequest)(nil), "docker.swarmkit.v1.NodeCertificateStatusRequest") + proto.RegisterType((*NodeCertificateStatusResponse)(nil), "docker.swarmkit.v1.NodeCertificateStatusResponse") + proto.RegisterType((*IssueNodeCertificateRequest)(nil), "docker.swarmkit.v1.IssueNodeCertificateRequest") + proto.RegisterType((*IssueNodeCertificateResponse)(nil), "docker.swarmkit.v1.IssueNodeCertificateResponse") + proto.RegisterType((*GetRootCACertificateRequest)(nil), "docker.swarmkit.v1.GetRootCACertificateRequest") + proto.RegisterType((*GetRootCACertificateResponse)(nil), "docker.swarmkit.v1.GetRootCACertificateResponse") + proto.RegisterType((*GetUnlockKeyRequest)(nil), "docker.swarmkit.v1.GetUnlockKeyRequest") + proto.RegisterType((*GetUnlockKeyResponse)(nil), "docker.swarmkit.v1.GetUnlockKeyResponse") +} + +type authenticatedWrapperCAServer struct { + local CAServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperCAServer(local CAServer, authorize func(context.Context, []string) error) CAServer { + return &authenticatedWrapperCAServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperCAServer) GetRootCACertificate(ctx context.Context, r *GetRootCACertificateRequest) (*GetRootCACertificateResponse, error) { + + return p.local.GetRootCACertificate(ctx, r) +} + +func (p *authenticatedWrapperCAServer) GetUnlockKey(ctx context.Context, r *GetUnlockKeyRequest) (*GetUnlockKeyResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetUnlockKey(ctx, r) +} + +type authenticatedWrapperNodeCAServer struct { + local NodeCAServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperNodeCAServer(local NodeCAServer, authorize func(context.Context, []string) error) NodeCAServer { + return &authenticatedWrapperNodeCAServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperNodeCAServer) IssueNodeCertificate(ctx context.Context, r *IssueNodeCertificateRequest) (*IssueNodeCertificateResponse, error) { + + return p.local.IssueNodeCertificate(ctx, r) +} + +func (p *authenticatedWrapperNodeCAServer) NodeCertificateStatus(ctx context.Context, r *NodeCertificateStatusRequest) (*NodeCertificateStatusResponse, error) { + + return p.local.NodeCertificateStatus(ctx, r) +} + +func (m *NodeCertificateStatusRequest) Copy() *NodeCertificateStatusRequest { + if m == nil { + return nil + } + o := &NodeCertificateStatusRequest{} + o.CopyFrom(m) + return o +} + +func (m *NodeCertificateStatusRequest) CopyFrom(src interface{}) { + + o := src.(*NodeCertificateStatusRequest) + *m = *o +} + +func (m *NodeCertificateStatusResponse) Copy() *NodeCertificateStatusResponse { + if m == nil { + return nil + } + o := &NodeCertificateStatusResponse{} + o.CopyFrom(m) + return o +} + +func (m *NodeCertificateStatusResponse) CopyFrom(src interface{}) { + + o := src.(*NodeCertificateStatusResponse) + *m = *o + if o.Status != nil { + m.Status = &IssuanceStatus{} + deepcopy.Copy(m.Status, o.Status) + } + if o.Certificate != nil { + m.Certificate = &Certificate{} + deepcopy.Copy(m.Certificate, o.Certificate) + } +} + +func (m *IssueNodeCertificateRequest) Copy() *IssueNodeCertificateRequest { + if m == nil { + return nil + } + o := &IssueNodeCertificateRequest{} + o.CopyFrom(m) + return o +} + +func (m *IssueNodeCertificateRequest) CopyFrom(src interface{}) { + + o := src.(*IssueNodeCertificateRequest) + *m = *o + if o.CSR != nil { + m.CSR = make([]byte, len(o.CSR)) + copy(m.CSR, o.CSR) + } +} + +func (m *IssueNodeCertificateResponse) Copy() *IssueNodeCertificateResponse { + if m == nil { + return nil + } + o := &IssueNodeCertificateResponse{} + o.CopyFrom(m) + return o +} + +func (m *IssueNodeCertificateResponse) CopyFrom(src interface{}) { + + o := src.(*IssueNodeCertificateResponse) + *m = *o +} + +func (m *GetRootCACertificateRequest) Copy() *GetRootCACertificateRequest { + if m == nil { + return nil + } + o := &GetRootCACertificateRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetRootCACertificateRequest) CopyFrom(src interface{}) {} +func (m *GetRootCACertificateResponse) Copy() *GetRootCACertificateResponse { + if m == nil { + return nil + } + o := &GetRootCACertificateResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetRootCACertificateResponse) CopyFrom(src interface{}) { + + o := src.(*GetRootCACertificateResponse) + *m = *o + if o.Certificate != nil { + m.Certificate = make([]byte, len(o.Certificate)) + copy(m.Certificate, o.Certificate) + } +} + +func (m *GetUnlockKeyRequest) Copy() *GetUnlockKeyRequest { + if m == nil { + return nil + } + o := &GetUnlockKeyRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetUnlockKeyRequest) CopyFrom(src interface{}) {} +func (m *GetUnlockKeyResponse) Copy() *GetUnlockKeyResponse { + if m == nil { + return nil + } + o := &GetUnlockKeyResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetUnlockKeyResponse) CopyFrom(src interface{}) { + + o := src.(*GetUnlockKeyResponse) + *m = *o + if o.UnlockKey != nil { + m.UnlockKey = make([]byte, len(o.UnlockKey)) + copy(m.UnlockKey, o.UnlockKey) + } + deepcopy.Copy(&m.Version, &o.Version) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for CA service + +type CAClient interface { + GetRootCACertificate(ctx context.Context, in *GetRootCACertificateRequest, opts ...grpc.CallOption) (*GetRootCACertificateResponse, error) + // GetUnlockKey returns the current unlock key for the cluster for the role of the client + // asking. + GetUnlockKey(ctx context.Context, in *GetUnlockKeyRequest, opts ...grpc.CallOption) (*GetUnlockKeyResponse, error) +} + +type cAClient struct { + cc *grpc.ClientConn +} + +func NewCAClient(cc *grpc.ClientConn) CAClient { + return &cAClient{cc} +} + +func (c *cAClient) GetRootCACertificate(ctx context.Context, in *GetRootCACertificateRequest, opts ...grpc.CallOption) (*GetRootCACertificateResponse, error) { + out := new(GetRootCACertificateResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.CA/GetRootCACertificate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cAClient) GetUnlockKey(ctx context.Context, in *GetUnlockKeyRequest, opts ...grpc.CallOption) (*GetUnlockKeyResponse, error) { + out := new(GetUnlockKeyResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.CA/GetUnlockKey", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for CA service + +type CAServer interface { + GetRootCACertificate(context.Context, *GetRootCACertificateRequest) (*GetRootCACertificateResponse, error) + // GetUnlockKey returns the current unlock key for the cluster for the role of the client + // asking. + GetUnlockKey(context.Context, *GetUnlockKeyRequest) (*GetUnlockKeyResponse, error) +} + +func RegisterCAServer(s *grpc.Server, srv CAServer) { + s.RegisterService(&_CA_serviceDesc, srv) +} + +func _CA_GetRootCACertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRootCACertificateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CAServer).GetRootCACertificate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.CA/GetRootCACertificate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CAServer).GetRootCACertificate(ctx, req.(*GetRootCACertificateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CA_GetUnlockKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetUnlockKeyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CAServer).GetUnlockKey(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.CA/GetUnlockKey", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CAServer).GetUnlockKey(ctx, req.(*GetUnlockKeyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _CA_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.CA", + HandlerType: (*CAServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetRootCACertificate", + Handler: _CA_GetRootCACertificate_Handler, + }, + { + MethodName: "GetUnlockKey", + Handler: _CA_GetUnlockKey_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/api/ca.proto", +} + +// Client API for NodeCA service + +type NodeCAClient interface { + IssueNodeCertificate(ctx context.Context, in *IssueNodeCertificateRequest, opts ...grpc.CallOption) (*IssueNodeCertificateResponse, error) + NodeCertificateStatus(ctx context.Context, in *NodeCertificateStatusRequest, opts ...grpc.CallOption) (*NodeCertificateStatusResponse, error) +} + +type nodeCAClient struct { + cc *grpc.ClientConn +} + +func NewNodeCAClient(cc *grpc.ClientConn) NodeCAClient { + return &nodeCAClient{cc} +} + +func (c *nodeCAClient) IssueNodeCertificate(ctx context.Context, in *IssueNodeCertificateRequest, opts ...grpc.CallOption) (*IssueNodeCertificateResponse, error) { + out := new(IssueNodeCertificateResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.NodeCA/IssueNodeCertificate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *nodeCAClient) NodeCertificateStatus(ctx context.Context, in *NodeCertificateStatusRequest, opts ...grpc.CallOption) (*NodeCertificateStatusResponse, error) { + out := new(NodeCertificateStatusResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.NodeCA/NodeCertificateStatus", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for NodeCA service + +type NodeCAServer interface { + IssueNodeCertificate(context.Context, *IssueNodeCertificateRequest) (*IssueNodeCertificateResponse, error) + NodeCertificateStatus(context.Context, *NodeCertificateStatusRequest) (*NodeCertificateStatusResponse, error) +} + +func RegisterNodeCAServer(s *grpc.Server, srv NodeCAServer) { + s.RegisterService(&_NodeCA_serviceDesc, srv) +} + +func _NodeCA_IssueNodeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(IssueNodeCertificateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeCAServer).IssueNodeCertificate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.NodeCA/IssueNodeCertificate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeCAServer).IssueNodeCertificate(ctx, req.(*IssueNodeCertificateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _NodeCA_NodeCertificateStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NodeCertificateStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(NodeCAServer).NodeCertificateStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.NodeCA/NodeCertificateStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(NodeCAServer).NodeCertificateStatus(ctx, req.(*NodeCertificateStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _NodeCA_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.NodeCA", + HandlerType: (*NodeCAServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "IssueNodeCertificate", + Handler: _NodeCA_IssueNodeCertificate_Handler, + }, + { + MethodName: "NodeCertificateStatus", + Handler: _NodeCA_NodeCertificateStatus_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/api/ca.proto", +} + +func (m *NodeCertificateStatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeCertificateStatusRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintCa(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + return i, nil +} + +func (m *NodeCertificateStatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeCertificateStatusResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Status != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintCa(dAtA, i, uint64(m.Status.Size())) + n1, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.Certificate != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintCa(dAtA, i, uint64(m.Certificate.Size())) + n2, err := m.Certificate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *IssueNodeCertificateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IssueNodeCertificateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Role != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintCa(dAtA, i, uint64(m.Role)) + } + if len(m.CSR) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintCa(dAtA, i, uint64(len(m.CSR))) + i += copy(dAtA[i:], m.CSR) + } + if len(m.Token) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintCa(dAtA, i, uint64(len(m.Token))) + i += copy(dAtA[i:], m.Token) + } + if m.Availability != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintCa(dAtA, i, uint64(m.Availability)) + } + return i, nil +} + +func (m *IssueNodeCertificateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IssueNodeCertificateResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintCa(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + if m.NodeMembership != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintCa(dAtA, i, uint64(m.NodeMembership)) + } + return i, nil +} + +func (m *GetRootCACertificateRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetRootCACertificateRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *GetRootCACertificateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetRootCACertificateResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Certificate) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintCa(dAtA, i, uint64(len(m.Certificate))) + i += copy(dAtA[i:], m.Certificate) + } + return i, nil +} + +func (m *GetUnlockKeyRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetUnlockKeyRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *GetUnlockKeyResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetUnlockKeyResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.UnlockKey) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintCa(dAtA, i, uint64(len(m.UnlockKey))) + i += copy(dAtA[i:], m.UnlockKey) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintCa(dAtA, i, uint64(m.Version.Size())) + n3, err := m.Version.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func encodeVarintCa(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyCAServer struct { + local CAServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyCAServer(local CAServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) CAServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyCAServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyCAServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyCAServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyCAServer) GetRootCACertificate(ctx context.Context, r *GetRootCACertificateRequest) (*GetRootCACertificateResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetRootCACertificate(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewCAClient(conn).GetRootCACertificate(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetRootCACertificate(ctx, r) + } + return nil, err + } + return NewCAClient(conn).GetRootCACertificate(modCtx, r) + } + return resp, err +} + +func (p *raftProxyCAServer) GetUnlockKey(ctx context.Context, r *GetUnlockKeyRequest) (*GetUnlockKeyResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetUnlockKey(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewCAClient(conn).GetUnlockKey(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetUnlockKey(ctx, r) + } + return nil, err + } + return NewCAClient(conn).GetUnlockKey(modCtx, r) + } + return resp, err +} + +type raftProxyNodeCAServer struct { + local NodeCAServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyNodeCAServer(local NodeCAServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) NodeCAServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyNodeCAServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyNodeCAServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyNodeCAServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyNodeCAServer) IssueNodeCertificate(ctx context.Context, r *IssueNodeCertificateRequest) (*IssueNodeCertificateResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.IssueNodeCertificate(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewNodeCAClient(conn).IssueNodeCertificate(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.IssueNodeCertificate(ctx, r) + } + return nil, err + } + return NewNodeCAClient(conn).IssueNodeCertificate(modCtx, r) + } + return resp, err +} + +func (p *raftProxyNodeCAServer) NodeCertificateStatus(ctx context.Context, r *NodeCertificateStatusRequest) (*NodeCertificateStatusResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.NodeCertificateStatus(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewNodeCAClient(conn).NodeCertificateStatus(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.NodeCertificateStatus(ctx, r) + } + return nil, err + } + return NewNodeCAClient(conn).NodeCertificateStatus(modCtx, r) + } + return resp, err +} + +func (m *NodeCertificateStatusRequest) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovCa(uint64(l)) + } + return n +} + +func (m *NodeCertificateStatusResponse) Size() (n int) { + var l int + _ = l + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovCa(uint64(l)) + } + if m.Certificate != nil { + l = m.Certificate.Size() + n += 1 + l + sovCa(uint64(l)) + } + return n +} + +func (m *IssueNodeCertificateRequest) Size() (n int) { + var l int + _ = l + if m.Role != 0 { + n += 1 + sovCa(uint64(m.Role)) + } + l = len(m.CSR) + if l > 0 { + n += 1 + l + sovCa(uint64(l)) + } + l = len(m.Token) + if l > 0 { + n += 1 + l + sovCa(uint64(l)) + } + if m.Availability != 0 { + n += 1 + sovCa(uint64(m.Availability)) + } + return n +} + +func (m *IssueNodeCertificateResponse) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovCa(uint64(l)) + } + if m.NodeMembership != 0 { + n += 1 + sovCa(uint64(m.NodeMembership)) + } + return n +} + +func (m *GetRootCACertificateRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *GetRootCACertificateResponse) Size() (n int) { + var l int + _ = l + l = len(m.Certificate) + if l > 0 { + n += 1 + l + sovCa(uint64(l)) + } + return n +} + +func (m *GetUnlockKeyRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *GetUnlockKeyResponse) Size() (n int) { + var l int + _ = l + l = len(m.UnlockKey) + if l > 0 { + n += 1 + l + sovCa(uint64(l)) + } + l = m.Version.Size() + n += 1 + l + sovCa(uint64(l)) + return n +} + +func sovCa(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozCa(x uint64) (n int) { + return sovCa(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *NodeCertificateStatusRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeCertificateStatusRequest{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `}`, + }, "") + return s +} +func (this *NodeCertificateStatusResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeCertificateStatusResponse{`, + `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "IssuanceStatus", "IssuanceStatus", 1) + `,`, + `Certificate:` + strings.Replace(fmt.Sprintf("%v", this.Certificate), "Certificate", "Certificate", 1) + `,`, + `}`, + }, "") + return s +} +func (this *IssueNodeCertificateRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IssueNodeCertificateRequest{`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `CSR:` + fmt.Sprintf("%v", this.CSR) + `,`, + `Token:` + fmt.Sprintf("%v", this.Token) + `,`, + `Availability:` + fmt.Sprintf("%v", this.Availability) + `,`, + `}`, + }, "") + return s +} +func (this *IssueNodeCertificateResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IssueNodeCertificateResponse{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `NodeMembership:` + fmt.Sprintf("%v", this.NodeMembership) + `,`, + `}`, + }, "") + return s +} +func (this *GetRootCACertificateRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetRootCACertificateRequest{`, + `}`, + }, "") + return s +} +func (this *GetRootCACertificateResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetRootCACertificateResponse{`, + `Certificate:` + fmt.Sprintf("%v", this.Certificate) + `,`, + `}`, + }, "") + return s +} +func (this *GetUnlockKeyRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetUnlockKeyRequest{`, + `}`, + }, "") + return s +} +func (this *GetUnlockKeyResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetUnlockKeyResponse{`, + `UnlockKey:` + fmt.Sprintf("%v", this.UnlockKey) + `,`, + `Version:` + strings.Replace(strings.Replace(this.Version.String(), "Version", "Version", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringCa(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *NodeCertificateStatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeCertificateStatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeCertificateStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeCertificateStatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeCertificateStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeCertificateStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &IssuanceStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Certificate == nil { + m.Certificate = &Certificate{} + } + if err := m.Certificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IssueNodeCertificateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IssueNodeCertificateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IssueNodeCertificateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + m.Role = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Role |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CSR", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CSR = append(m.CSR[:0], dAtA[iNdEx:postIndex]...) + if m.CSR == nil { + m.CSR = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Token = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Availability", wireType) + } + m.Availability = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Availability |= (NodeSpec_Availability(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IssueNodeCertificateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IssueNodeCertificateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IssueNodeCertificateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeMembership", wireType) + } + m.NodeMembership = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodeMembership |= (NodeSpec_Membership(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetRootCACertificateRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetRootCACertificateRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetRootCACertificateRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetRootCACertificateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetRootCACertificateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetRootCACertificateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Certificate = append(m.Certificate[:0], dAtA[iNdEx:postIndex]...) + if m.Certificate == nil { + m.Certificate = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetUnlockKeyRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetUnlockKeyRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetUnlockKeyRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetUnlockKeyResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetUnlockKeyResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetUnlockKeyResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnlockKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UnlockKey = append(m.UnlockKey[:0], dAtA[iNdEx:postIndex]...) + if m.UnlockKey == nil { + m.UnlockKey = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCa + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCa + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCa(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthCa + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCa(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCa + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCa + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCa + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthCa + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCa + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipCa(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthCa = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCa = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/ca.proto", fileDescriptorCa) } + +var fileDescriptorCa = []byte{ + // 638 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xc1, 0x6e, 0xd3, 0x4c, + 0x10, 0xee, 0xba, 0xfd, 0xd3, 0xbf, 0xd3, 0xd0, 0xa2, 0xa5, 0x95, 0x4c, 0x9a, 0x3a, 0x95, 0x39, + 0xb4, 0x20, 0x61, 0xb7, 0x01, 0x09, 0x09, 0x2e, 0x24, 0x41, 0xaa, 0x2a, 0x54, 0x84, 0xb6, 0x82, + 0x6b, 0xe5, 0x38, 0xdb, 0x74, 0x15, 0xc7, 0x6b, 0xbc, 0xeb, 0x42, 0x6e, 0x48, 0x20, 0xde, 0x00, + 0xc1, 0x89, 0x47, 0xe0, 0x39, 0x2a, 0x4e, 0x48, 0x5c, 0x38, 0x55, 0xd4, 0x0f, 0xc0, 0x33, 0x20, + 0xaf, 0x6d, 0x9a, 0xb4, 0x4e, 0x5a, 0x4e, 0xf1, 0xce, 0x7c, 0xdf, 0x37, 0x33, 0xdf, 0x4e, 0x16, + 0xd6, 0xbb, 0x4c, 0x1e, 0x46, 0x6d, 0xcb, 0xe5, 0x7d, 0xbb, 0xc3, 0xdd, 0x1e, 0x0d, 0x6d, 0xf1, + 0xda, 0x09, 0xfb, 0x3d, 0x26, 0x6d, 0x27, 0x60, 0xb6, 0xeb, 0x58, 0x41, 0xc8, 0x25, 0xc7, 0x38, + 0xcd, 0x5a, 0x79, 0xd6, 0x3a, 0xda, 0xaa, 0xdc, 0xb9, 0x84, 0x2c, 0x07, 0x01, 0x15, 0x29, 0xff, + 0x52, 0xac, 0x08, 0xa8, 0x9b, 0x63, 0x97, 0xba, 0xbc, 0xcb, 0xd5, 0xa7, 0x9d, 0x7c, 0x65, 0xd1, + 0x07, 0x13, 0x14, 0x14, 0xa2, 0x1d, 0x1d, 0xd8, 0x81, 0x17, 0x75, 0x99, 0x9f, 0xfd, 0xa4, 0x44, + 0xb3, 0x05, 0xd5, 0x67, 0xbc, 0x43, 0x5b, 0x34, 0x94, 0xec, 0x80, 0xb9, 0x8e, 0xa4, 0x7b, 0xd2, + 0x91, 0x91, 0x20, 0xf4, 0x55, 0x44, 0x85, 0xc4, 0xb7, 0x60, 0xd6, 0xe7, 0x1d, 0xba, 0xcf, 0x3a, + 0x3a, 0x5a, 0x43, 0x1b, 0x73, 0x4d, 0x88, 0x4f, 0x6a, 0xa5, 0x84, 0xb2, 0xf3, 0x84, 0x94, 0x92, + 0xd4, 0x4e, 0xc7, 0xfc, 0x82, 0x60, 0x75, 0x8c, 0x8a, 0x08, 0xb8, 0x2f, 0x28, 0x7e, 0x08, 0x25, + 0xa1, 0x22, 0x4a, 0x65, 0xbe, 0x6e, 0x5a, 0x17, 0x2d, 0xb3, 0x76, 0x84, 0x88, 0x1c, 0xdf, 0xcd, + 0xb9, 0x19, 0x03, 0x37, 0x60, 0xde, 0x3d, 0x13, 0xd6, 0x35, 0x25, 0x50, 0x2b, 0x12, 0x18, 0xaa, + 0x4f, 0x86, 0x39, 0xe6, 0x0f, 0x04, 0x2b, 0x89, 0x3a, 0x3d, 0xd7, 0x65, 0x3e, 0xe5, 0x7d, 0x98, + 0x09, 0xb9, 0x47, 0x55, 0x73, 0x0b, 0xf5, 0x6a, 0x91, 0x76, 0xc2, 0x24, 0xdc, 0xa3, 0x4d, 0x4d, + 0x47, 0x44, 0xa1, 0xf1, 0x4d, 0x98, 0x76, 0x45, 0xa8, 0x1a, 0x2a, 0x37, 0x67, 0xe3, 0x93, 0xda, + 0x74, 0x6b, 0x8f, 0x90, 0x24, 0x86, 0x97, 0xe0, 0x3f, 0xc9, 0x7b, 0xd4, 0xd7, 0xa7, 0x13, 0xd3, + 0x48, 0x7a, 0xc0, 0xbb, 0x50, 0x76, 0x8e, 0x1c, 0xe6, 0x39, 0x6d, 0xe6, 0x31, 0x39, 0xd0, 0x67, + 0x54, 0xb9, 0xdb, 0xe3, 0xca, 0xed, 0x05, 0xd4, 0xb5, 0x1a, 0x43, 0x04, 0x32, 0x42, 0x37, 0x3f, + 0x22, 0xa8, 0x16, 0x4f, 0x95, 0xb9, 0x7e, 0x95, 0xcb, 0xc3, 0xcf, 0x61, 0x51, 0x81, 0xfa, 0xb4, + 0xdf, 0xa6, 0xa1, 0x38, 0x64, 0x81, 0x9a, 0x68, 0xa1, 0xbe, 0x3e, 0xb1, 0xaf, 0xdd, 0xbf, 0x70, + 0xb2, 0x90, 0xf0, 0xcf, 0xce, 0xe6, 0x2a, 0xac, 0x6c, 0x53, 0x49, 0x38, 0x97, 0xad, 0xc6, 0x45, + 0xb3, 0xcd, 0xc7, 0x50, 0x2d, 0x4e, 0x67, 0x5d, 0xaf, 0x8d, 0xde, 0x77, 0xd2, 0x79, 0x79, 0xf4, + 0x3a, 0x97, 0xe1, 0xc6, 0x36, 0x95, 0x2f, 0x7c, 0x8f, 0xbb, 0xbd, 0xa7, 0x74, 0x90, 0x0b, 0x87, + 0xb0, 0x34, 0x1a, 0xce, 0x04, 0x57, 0x01, 0x22, 0x15, 0xdc, 0xef, 0xd1, 0x41, 0xa6, 0x37, 0x17, + 0xe5, 0x30, 0xfc, 0x08, 0x66, 0x8f, 0x68, 0x28, 0x18, 0xf7, 0xb3, 0xdd, 0x5a, 0x29, 0x1a, 0xfc, + 0x65, 0x0a, 0x69, 0xce, 0x1c, 0x9f, 0xd4, 0xa6, 0x48, 0xce, 0xa8, 0xbf, 0xd7, 0x40, 0x6b, 0x35, + 0xf0, 0x3b, 0xa4, 0x6a, 0x5f, 0x18, 0x0a, 0xdb, 0x45, 0x5a, 0x13, 0xdc, 0xa9, 0x6c, 0x5e, 0x9d, + 0x90, 0x8e, 0x67, 0xfe, 0xff, 0xed, 0xeb, 0xef, 0xcf, 0x9a, 0x76, 0x1d, 0xe1, 0x37, 0x50, 0x1e, + 0x36, 0x00, 0xaf, 0x8f, 0xd1, 0x3a, 0xef, 0x5c, 0x65, 0xe3, 0x72, 0x60, 0x56, 0x6c, 0x59, 0x15, + 0x5b, 0x84, 0x6b, 0x0a, 0x79, 0xb7, 0xef, 0xf8, 0x4e, 0x97, 0x86, 0xf5, 0x4f, 0x1a, 0xa8, 0xbd, + 0xca, 0xac, 0x28, 0xda, 0xca, 0x62, 0x2b, 0x26, 0xfc, 0x2b, 0x8b, 0xad, 0x98, 0xb4, 0xf0, 0x43, + 0x56, 0x7c, 0x40, 0xb0, 0x5c, 0xf8, 0x24, 0xe1, 0xcd, 0x71, 0x6b, 0x3d, 0xee, 0x0d, 0xac, 0x6c, + 0xfd, 0x03, 0xe3, 0x7c, 0x23, 0x4d, 0xfd, 0xf8, 0xd4, 0x98, 0xfa, 0x79, 0x6a, 0x4c, 0xbd, 0x8d, + 0x0d, 0x74, 0x1c, 0x1b, 0xe8, 0x7b, 0x6c, 0xa0, 0x5f, 0xb1, 0x81, 0xda, 0x25, 0xf5, 0x02, 0xdf, + 0xfb, 0x13, 0x00, 0x00, 0xff, 0xff, 0xe1, 0xda, 0xca, 0xba, 0x67, 0x06, 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/ca.proto b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/ca.proto new file mode 100644 index 00000000..e26c8f35 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/ca.proto @@ -0,0 +1,72 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/types.proto"; +import "github.com/docker/swarmkit/api/specs.proto"; +import "gogoproto/gogo.proto"; +import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; + +// CA defines the RPC methods for requesting certificates from a CA. + +service CA { + rpc GetRootCACertificate(GetRootCACertificateRequest) returns (GetRootCACertificateResponse) { + option (docker.protobuf.plugin.tls_authorization) = { insecure: true }; + }; + // GetUnlockKey returns the current unlock key for the cluster for the role of the client + // asking. + rpc GetUnlockKey(GetUnlockKeyRequest) returns (GetUnlockKeyResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: ["swarm-manager"] }; + }; +} + +service NodeCA { + rpc IssueNodeCertificate(IssueNodeCertificateRequest) returns (IssueNodeCertificateResponse) { + option (docker.protobuf.plugin.tls_authorization) = { insecure: true }; + }; + rpc NodeCertificateStatus(NodeCertificateStatusRequest) returns (NodeCertificateStatusResponse) { + option (docker.protobuf.plugin.tls_authorization) = { insecure: true }; + }; +} + +message NodeCertificateStatusRequest { + string node_id = 1; +} + +message NodeCertificateStatusResponse { + IssuanceStatus status = 1; + Certificate certificate = 2; +} + +message IssueNodeCertificateRequest { + // DEPRECATED: Role is now selected based on which secret is matched. + NodeRole role = 1 [deprecated=true]; + + // CSR is the certificate signing request. + bytes csr = 2 [(gogoproto.customname) = "CSR"]; + + // Token represents a user-provided string that is necessary for new + // nodes to join the cluster + string token = 3; + + // Availability allows a user to control the current scheduling status of a node + NodeSpec.Availability availability = 4; +} + +message IssueNodeCertificateResponse { + string node_id = 1; + NodeSpec.Membership node_membership = 2; +} + +message GetRootCACertificateRequest {} + +message GetRootCACertificateResponse { + bytes certificate = 1; +} + +message GetUnlockKeyRequest {} + +message GetUnlockKeyResponse { + bytes unlock_key = 1; + Version version = 2 [(gogoproto.nullable) = false]; +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/control.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/control.pb.go new file mode 100644 index 00000000..64facf19 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/control.pb.go @@ -0,0 +1,20801 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/control.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import google_protobuf4 "github.com/gogo/protobuf/types" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" +import sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type UpdateServiceRequest_Rollback int32 + +const ( + // This is not a rollback. The spec field of the request will + // be honored. + UpdateServiceRequest_NONE UpdateServiceRequest_Rollback = 0 + // Roll back the service - get spec from the service's + // previous_spec. + UpdateServiceRequest_PREVIOUS UpdateServiceRequest_Rollback = 1 +) + +var UpdateServiceRequest_Rollback_name = map[int32]string{ + 0: "NONE", + 1: "PREVIOUS", +} +var UpdateServiceRequest_Rollback_value = map[string]int32{ + "NONE": 0, + "PREVIOUS": 1, +} + +func (x UpdateServiceRequest_Rollback) String() string { + return proto.EnumName(UpdateServiceRequest_Rollback_name, int32(x)) +} +func (UpdateServiceRequest_Rollback) EnumDescriptor() ([]byte, []int) { + return fileDescriptorControl, []int{18, 0} +} + +type GetNodeRequest struct { + NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` +} + +func (m *GetNodeRequest) Reset() { *m = GetNodeRequest{} } +func (*GetNodeRequest) ProtoMessage() {} +func (*GetNodeRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{0} } + +type GetNodeResponse struct { + Node *Node `protobuf:"bytes,1,opt,name=node" json:"node,omitempty"` +} + +func (m *GetNodeResponse) Reset() { *m = GetNodeResponse{} } +func (*GetNodeResponse) ProtoMessage() {} +func (*GetNodeResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{1} } + +type ListNodesRequest struct { + Filters *ListNodesRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListNodesRequest) Reset() { *m = ListNodesRequest{} } +func (*ListNodesRequest) ProtoMessage() {} +func (*ListNodesRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{2} } + +type ListNodesRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + // Labels refers to engine labels, which are labels set by the user on the + // node and reported back to the managers + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // NodeLabels are labels set on the node object on the managers. + NodeLabels map[string]string `protobuf:"bytes,7,rep,name=node_labels,json=nodeLabels" json:"node_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Memberships []NodeSpec_Membership `protobuf:"varint,4,rep,name=memberships,enum=docker.swarmkit.v1.NodeSpec_Membership" json:"memberships,omitempty"` + Roles []NodeRole `protobuf:"varint,5,rep,name=roles,enum=docker.swarmkit.v1.NodeRole" json:"roles,omitempty"` + // NamePrefixes matches all objects with the given prefixes + NamePrefixes []string `protobuf:"bytes,6,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` +} + +func (m *ListNodesRequest_Filters) Reset() { *m = ListNodesRequest_Filters{} } +func (*ListNodesRequest_Filters) ProtoMessage() {} +func (*ListNodesRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{2, 0} +} + +type ListNodesResponse struct { + Nodes []*Node `protobuf:"bytes,1,rep,name=nodes" json:"nodes,omitempty"` +} + +func (m *ListNodesResponse) Reset() { *m = ListNodesResponse{} } +func (*ListNodesResponse) ProtoMessage() {} +func (*ListNodesResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{3} } + +// UpdateNodeRequest requests an update to the specified node. This may be used +// to request a new availability for a node, such as PAUSE. Invalid updates +// will be denied and cause an error. +type UpdateNodeRequest struct { + NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + NodeVersion *Version `protobuf:"bytes,2,opt,name=node_version,json=nodeVersion" json:"node_version,omitempty"` + Spec *NodeSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"` +} + +func (m *UpdateNodeRequest) Reset() { *m = UpdateNodeRequest{} } +func (*UpdateNodeRequest) ProtoMessage() {} +func (*UpdateNodeRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{4} } + +type UpdateNodeResponse struct { + Node *Node `protobuf:"bytes,1,opt,name=node" json:"node,omitempty"` +} + +func (m *UpdateNodeResponse) Reset() { *m = UpdateNodeResponse{} } +func (*UpdateNodeResponse) ProtoMessage() {} +func (*UpdateNodeResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{5} } + +// RemoveNodeRequest requests to delete the specified node from store. +type RemoveNodeRequest struct { + NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + Force bool `protobuf:"varint,2,opt,name=force,proto3" json:"force,omitempty"` +} + +func (m *RemoveNodeRequest) Reset() { *m = RemoveNodeRequest{} } +func (*RemoveNodeRequest) ProtoMessage() {} +func (*RemoveNodeRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{6} } + +type RemoveNodeResponse struct { +} + +func (m *RemoveNodeResponse) Reset() { *m = RemoveNodeResponse{} } +func (*RemoveNodeResponse) ProtoMessage() {} +func (*RemoveNodeResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{7} } + +type GetTaskRequest struct { + TaskID string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` +} + +func (m *GetTaskRequest) Reset() { *m = GetTaskRequest{} } +func (*GetTaskRequest) ProtoMessage() {} +func (*GetTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{8} } + +type GetTaskResponse struct { + Task *Task `protobuf:"bytes,1,opt,name=task" json:"task,omitempty"` +} + +func (m *GetTaskResponse) Reset() { *m = GetTaskResponse{} } +func (*GetTaskResponse) ProtoMessage() {} +func (*GetTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{9} } + +type RemoveTaskRequest struct { + TaskID string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` +} + +func (m *RemoveTaskRequest) Reset() { *m = RemoveTaskRequest{} } +func (*RemoveTaskRequest) ProtoMessage() {} +func (*RemoveTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{10} } + +type RemoveTaskResponse struct { +} + +func (m *RemoveTaskResponse) Reset() { *m = RemoveTaskResponse{} } +func (*RemoveTaskResponse) ProtoMessage() {} +func (*RemoveTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{11} } + +type ListTasksRequest struct { + Filters *ListTasksRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListTasksRequest) Reset() { *m = ListTasksRequest{} } +func (*ListTasksRequest) ProtoMessage() {} +func (*ListTasksRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{12} } + +type ListTasksRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + ServiceIDs []string `protobuf:"bytes,4,rep,name=service_ids,json=serviceIds" json:"service_ids,omitempty"` + NodeIDs []string `protobuf:"bytes,5,rep,name=node_ids,json=nodeIds" json:"node_ids,omitempty"` + DesiredStates []TaskState `protobuf:"varint,6,rep,name=desired_states,json=desiredStates,enum=docker.swarmkit.v1.TaskState" json:"desired_states,omitempty"` + // NamePrefixes matches all objects with the given prefixes + NamePrefixes []string `protobuf:"bytes,7,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` + Runtimes []string `protobuf:"bytes,9,rep,name=runtimes" json:"runtimes,omitempty"` + // UpToDate matches tasks that are consistent with the current + // service definition. + // Note: this is intended for internal status reporting rather + // than being exposed to users. It may be removed in the future. + UpToDate bool `protobuf:"varint,8,opt,name=up_to_date,json=upToDate,proto3" json:"up_to_date,omitempty"` +} + +func (m *ListTasksRequest_Filters) Reset() { *m = ListTasksRequest_Filters{} } +func (*ListTasksRequest_Filters) ProtoMessage() {} +func (*ListTasksRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{12, 0} +} + +type ListTasksResponse struct { + Tasks []*Task `protobuf:"bytes,1,rep,name=tasks" json:"tasks,omitempty"` +} + +func (m *ListTasksResponse) Reset() { *m = ListTasksResponse{} } +func (*ListTasksResponse) ProtoMessage() {} +func (*ListTasksResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{13} } + +type CreateServiceRequest struct { + Spec *ServiceSpec `protobuf:"bytes,1,opt,name=spec" json:"spec,omitempty"` +} + +func (m *CreateServiceRequest) Reset() { *m = CreateServiceRequest{} } +func (*CreateServiceRequest) ProtoMessage() {} +func (*CreateServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{14} } + +type CreateServiceResponse struct { + Service *Service `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` +} + +func (m *CreateServiceResponse) Reset() { *m = CreateServiceResponse{} } +func (*CreateServiceResponse) ProtoMessage() {} +func (*CreateServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{15} } + +type GetServiceRequest struct { + ServiceID string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + InsertDefaults bool `protobuf:"varint,2,opt,name=insert_defaults,json=insertDefaults,proto3" json:"insert_defaults,omitempty"` +} + +func (m *GetServiceRequest) Reset() { *m = GetServiceRequest{} } +func (*GetServiceRequest) ProtoMessage() {} +func (*GetServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{16} } + +type GetServiceResponse struct { + Service *Service `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` +} + +func (m *GetServiceResponse) Reset() { *m = GetServiceResponse{} } +func (*GetServiceResponse) ProtoMessage() {} +func (*GetServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{17} } + +type UpdateServiceRequest struct { + ServiceID string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + ServiceVersion *Version `protobuf:"bytes,2,opt,name=service_version,json=serviceVersion" json:"service_version,omitempty"` + Spec *ServiceSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"` + // Rollback may be set to PREVIOUS to request a rollback (the service's + // spec will be set to the value of its previous_spec field). In this + // case, the spec field of this request is ignored. + Rollback UpdateServiceRequest_Rollback `protobuf:"varint,4,opt,name=rollback,proto3,enum=docker.swarmkit.v1.UpdateServiceRequest_Rollback" json:"rollback,omitempty"` +} + +func (m *UpdateServiceRequest) Reset() { *m = UpdateServiceRequest{} } +func (*UpdateServiceRequest) ProtoMessage() {} +func (*UpdateServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{18} } + +type UpdateServiceResponse struct { + Service *Service `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` +} + +func (m *UpdateServiceResponse) Reset() { *m = UpdateServiceResponse{} } +func (*UpdateServiceResponse) ProtoMessage() {} +func (*UpdateServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{19} } + +type RemoveServiceRequest struct { + ServiceID string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` +} + +func (m *RemoveServiceRequest) Reset() { *m = RemoveServiceRequest{} } +func (*RemoveServiceRequest) ProtoMessage() {} +func (*RemoveServiceRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{20} } + +type RemoveServiceResponse struct { +} + +func (m *RemoveServiceResponse) Reset() { *m = RemoveServiceResponse{} } +func (*RemoveServiceResponse) ProtoMessage() {} +func (*RemoveServiceResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{21} } + +type ListServicesRequest struct { + Filters *ListServicesRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListServicesRequest) Reset() { *m = ListServicesRequest{} } +func (*ListServicesRequest) ProtoMessage() {} +func (*ListServicesRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{22} } + +type ListServicesRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // NamePrefixes matches all objects with the given prefixes + NamePrefixes []string `protobuf:"bytes,4,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` + Runtimes []string `protobuf:"bytes,5,rep,name=runtimes" json:"runtimes,omitempty"` +} + +func (m *ListServicesRequest_Filters) Reset() { *m = ListServicesRequest_Filters{} } +func (*ListServicesRequest_Filters) ProtoMessage() {} +func (*ListServicesRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{22, 0} +} + +type ListServicesResponse struct { + Services []*Service `protobuf:"bytes,1,rep,name=services" json:"services,omitempty"` +} + +func (m *ListServicesResponse) Reset() { *m = ListServicesResponse{} } +func (*ListServicesResponse) ProtoMessage() {} +func (*ListServicesResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{23} } + +// ListServiceStatusesRequest is a request to get the aggregate status of a +// service by computing the number of running vs desired tasks. It includes +// only a service ID. +type ListServiceStatusesRequest struct { + // Services is a list of service IDs to get statuses for. + Services []string `protobuf:"bytes,1,rep,name=services" json:"services,omitempty"` +} + +func (m *ListServiceStatusesRequest) Reset() { *m = ListServiceStatusesRequest{} } +func (*ListServiceStatusesRequest) ProtoMessage() {} +func (*ListServiceStatusesRequest) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{24} +} + +// ListServiceStatusesResponse is a response containing the aggregate status of +// a service, formed by computing the number of running vs desired tasks. The +// values returned are only valid for the point in time at which the request is +// made. +type ListServiceStatusesResponse struct { + Statuses []*ListServiceStatusesResponse_ServiceStatus `protobuf:"bytes,1,rep,name=statuses" json:"statuses,omitempty"` +} + +func (m *ListServiceStatusesResponse) Reset() { *m = ListServiceStatusesResponse{} } +func (*ListServiceStatusesResponse) ProtoMessage() {} +func (*ListServiceStatusesResponse) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{25} +} + +type ListServiceStatusesResponse_ServiceStatus struct { + // ServiceID is the ID of the service this status describes + ServiceID string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + // DesiredTasks is the number of tasks desired to be running according to the + // service definition at request time. It is a uint64 because that is what + // the replicas field on the service spec is + DesiredTasks uint64 `protobuf:"varint,2,opt,name=desired_tasks,json=desiredTasks,proto3" json:"desired_tasks,omitempty"` + // RunningTasks is the number of tasks currently in the Running state at + // request time. This may be larger than desired tasks if, for example, a + // service has been scaled down. + RunningTasks uint64 `protobuf:"varint,3,opt,name=running_tasks,json=runningTasks,proto3" json:"running_tasks,omitempty"` +} + +func (m *ListServiceStatusesResponse_ServiceStatus) Reset() { + *m = ListServiceStatusesResponse_ServiceStatus{} +} +func (*ListServiceStatusesResponse_ServiceStatus) ProtoMessage() {} +func (*ListServiceStatusesResponse_ServiceStatus) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{25, 0} +} + +type CreateNetworkRequest struct { + Spec *NetworkSpec `protobuf:"bytes,1,opt,name=spec" json:"spec,omitempty"` +} + +func (m *CreateNetworkRequest) Reset() { *m = CreateNetworkRequest{} } +func (*CreateNetworkRequest) ProtoMessage() {} +func (*CreateNetworkRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{26} } + +type CreateNetworkResponse struct { + Network *Network `protobuf:"bytes,1,opt,name=network" json:"network,omitempty"` +} + +func (m *CreateNetworkResponse) Reset() { *m = CreateNetworkResponse{} } +func (*CreateNetworkResponse) ProtoMessage() {} +func (*CreateNetworkResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{27} } + +type GetNetworkRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + NetworkID string `protobuf:"bytes,2,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` +} + +func (m *GetNetworkRequest) Reset() { *m = GetNetworkRequest{} } +func (*GetNetworkRequest) ProtoMessage() {} +func (*GetNetworkRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{28} } + +type GetNetworkResponse struct { + Network *Network `protobuf:"bytes,1,opt,name=network" json:"network,omitempty"` +} + +func (m *GetNetworkResponse) Reset() { *m = GetNetworkResponse{} } +func (*GetNetworkResponse) ProtoMessage() {} +func (*GetNetworkResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{29} } + +type RemoveNetworkRequest struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + NetworkID string `protobuf:"bytes,2,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` +} + +func (m *RemoveNetworkRequest) Reset() { *m = RemoveNetworkRequest{} } +func (*RemoveNetworkRequest) ProtoMessage() {} +func (*RemoveNetworkRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{30} } + +type RemoveNetworkResponse struct { +} + +func (m *RemoveNetworkResponse) Reset() { *m = RemoveNetworkResponse{} } +func (*RemoveNetworkResponse) ProtoMessage() {} +func (*RemoveNetworkResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{31} } + +type ListNetworksRequest struct { + Filters *ListNetworksRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListNetworksRequest) Reset() { *m = ListNetworksRequest{} } +func (*ListNetworksRequest) ProtoMessage() {} +func (*ListNetworksRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{32} } + +type ListNetworksRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // NamePrefixes matches all objects with the given prefixes + NamePrefixes []string `protobuf:"bytes,4,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` +} + +func (m *ListNetworksRequest_Filters) Reset() { *m = ListNetworksRequest_Filters{} } +func (*ListNetworksRequest_Filters) ProtoMessage() {} +func (*ListNetworksRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{32, 0} +} + +type ListNetworksResponse struct { + Networks []*Network `protobuf:"bytes,1,rep,name=networks" json:"networks,omitempty"` +} + +func (m *ListNetworksResponse) Reset() { *m = ListNetworksResponse{} } +func (*ListNetworksResponse) ProtoMessage() {} +func (*ListNetworksResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{33} } + +type GetClusterRequest struct { + ClusterID string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` +} + +func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} } +func (*GetClusterRequest) ProtoMessage() {} +func (*GetClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{34} } + +type GetClusterResponse struct { + Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster" json:"cluster,omitempty"` +} + +func (m *GetClusterResponse) Reset() { *m = GetClusterResponse{} } +func (*GetClusterResponse) ProtoMessage() {} +func (*GetClusterResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{35} } + +type ListClustersRequest struct { + Filters *ListClustersRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} } +func (*ListClustersRequest) ProtoMessage() {} +func (*ListClustersRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{36} } + +type ListClustersRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // NamePrefixes matches all objects with the given prefixes + NamePrefixes []string `protobuf:"bytes,4,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` +} + +func (m *ListClustersRequest_Filters) Reset() { *m = ListClustersRequest_Filters{} } +func (*ListClustersRequest_Filters) ProtoMessage() {} +func (*ListClustersRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{36, 0} +} + +type ListClustersResponse struct { + Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters" json:"clusters,omitempty"` +} + +func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} } +func (*ListClustersResponse) ProtoMessage() {} +func (*ListClustersResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{37} } + +// KeyRotation tells UpdateCluster what items to rotate +type KeyRotation struct { + // WorkerJoinToken tells UpdateCluster to rotate the worker secret token. + WorkerJoinToken bool `protobuf:"varint,1,opt,name=worker_join_token,json=workerJoinToken,proto3" json:"worker_join_token,omitempty"` + // ManagerJoinToken tells UpdateCluster to rotate the manager secret token. + ManagerJoinToken bool `protobuf:"varint,2,opt,name=manager_join_token,json=managerJoinToken,proto3" json:"manager_join_token,omitempty"` + // ManagerUnlockKey tells UpdateCluster to rotate the manager unlock key + ManagerUnlockKey bool `protobuf:"varint,3,opt,name=manager_unlock_key,json=managerUnlockKey,proto3" json:"manager_unlock_key,omitempty"` +} + +func (m *KeyRotation) Reset() { *m = KeyRotation{} } +func (*KeyRotation) ProtoMessage() {} +func (*KeyRotation) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{38} } + +type UpdateClusterRequest struct { + // ClusterID is the cluster ID to update. + ClusterID string `protobuf:"bytes,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"` + // ClusterVersion is the version of the cluster being updated. + ClusterVersion *Version `protobuf:"bytes,2,opt,name=cluster_version,json=clusterVersion" json:"cluster_version,omitempty"` + // Spec is the new spec to apply to the cluster. + Spec *ClusterSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"` + // Rotation contains flags for join token and unlock key rotation + Rotation KeyRotation `protobuf:"bytes,4,opt,name=rotation" json:"rotation"` +} + +func (m *UpdateClusterRequest) Reset() { *m = UpdateClusterRequest{} } +func (*UpdateClusterRequest) ProtoMessage() {} +func (*UpdateClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{39} } + +type UpdateClusterResponse struct { + Cluster *Cluster `protobuf:"bytes,1,opt,name=cluster" json:"cluster,omitempty"` +} + +func (m *UpdateClusterResponse) Reset() { *m = UpdateClusterResponse{} } +func (*UpdateClusterResponse) ProtoMessage() {} +func (*UpdateClusterResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{40} } + +// GetSecretRequest is the request to get a `Secret` object given a secret id. +type GetSecretRequest struct { + SecretID string `protobuf:"bytes,1,opt,name=secret_id,json=secretId,proto3" json:"secret_id,omitempty"` +} + +func (m *GetSecretRequest) Reset() { *m = GetSecretRequest{} } +func (*GetSecretRequest) ProtoMessage() {} +func (*GetSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{41} } + +// GetSecretResponse contains the Secret corresponding to the id in +// `GetSecretRequest`, but the `Secret.Spec.Data` field in each `Secret` +// object should be nil instead of actually containing the secret bytes. +type GetSecretResponse struct { + Secret *Secret `protobuf:"bytes,1,opt,name=secret" json:"secret,omitempty"` +} + +func (m *GetSecretResponse) Reset() { *m = GetSecretResponse{} } +func (*GetSecretResponse) ProtoMessage() {} +func (*GetSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{42} } + +type UpdateSecretRequest struct { + // SecretID is the secret ID to update. + SecretID string `protobuf:"bytes,1,opt,name=secret_id,json=secretId,proto3" json:"secret_id,omitempty"` + // SecretVersion is the version of the secret being updated. + SecretVersion *Version `protobuf:"bytes,2,opt,name=secret_version,json=secretVersion" json:"secret_version,omitempty"` + // Spec is the new spec to apply to the Secret + // Only some fields are allowed to be updated. + Spec *SecretSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"` +} + +func (m *UpdateSecretRequest) Reset() { *m = UpdateSecretRequest{} } +func (*UpdateSecretRequest) ProtoMessage() {} +func (*UpdateSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{43} } + +type UpdateSecretResponse struct { + Secret *Secret `protobuf:"bytes,1,opt,name=secret" json:"secret,omitempty"` +} + +func (m *UpdateSecretResponse) Reset() { *m = UpdateSecretResponse{} } +func (*UpdateSecretResponse) ProtoMessage() {} +func (*UpdateSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{44} } + +// ListSecretRequest is the request to list all non-internal secrets in the secret store, +// or all secrets filtered by (name or name prefix or id prefix) and labels. +type ListSecretsRequest struct { + Filters *ListSecretsRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListSecretsRequest) Reset() { *m = ListSecretsRequest{} } +func (*ListSecretsRequest) ProtoMessage() {} +func (*ListSecretsRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{45} } + +type ListSecretsRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + NamePrefixes []string `protobuf:"bytes,4,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` +} + +func (m *ListSecretsRequest_Filters) Reset() { *m = ListSecretsRequest_Filters{} } +func (*ListSecretsRequest_Filters) ProtoMessage() {} +func (*ListSecretsRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{45, 0} +} + +// ListSecretResponse contains a list of all the secrets that match the name or +// name prefix filters provided in `ListSecretRequest`. The `Secret.Spec.Data` +// field in each `Secret` object should be nil instead of actually containing +// the secret bytes. +type ListSecretsResponse struct { + Secrets []*Secret `protobuf:"bytes,1,rep,name=secrets" json:"secrets,omitempty"` +} + +func (m *ListSecretsResponse) Reset() { *m = ListSecretsResponse{} } +func (*ListSecretsResponse) ProtoMessage() {} +func (*ListSecretsResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{46} } + +// CreateSecretRequest specifies a new secret (it will not update an existing +// secret) to create. +type CreateSecretRequest struct { + Spec *SecretSpec `protobuf:"bytes,1,opt,name=spec" json:"spec,omitempty"` +} + +func (m *CreateSecretRequest) Reset() { *m = CreateSecretRequest{} } +func (*CreateSecretRequest) ProtoMessage() {} +func (*CreateSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{47} } + +// CreateSecretResponse contains the newly created `Secret` corresponding to the +// name in `CreateSecretRequest`. The `Secret.Spec.Data` field should be nil instead +// of actually containing the secret bytes. +type CreateSecretResponse struct { + Secret *Secret `protobuf:"bytes,1,opt,name=secret" json:"secret,omitempty"` +} + +func (m *CreateSecretResponse) Reset() { *m = CreateSecretResponse{} } +func (*CreateSecretResponse) ProtoMessage() {} +func (*CreateSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{48} } + +// RemoveSecretRequest contains the ID of the secret that should be removed. This +// removes all versions of the secret. +type RemoveSecretRequest struct { + SecretID string `protobuf:"bytes,1,opt,name=secret_id,json=secretId,proto3" json:"secret_id,omitempty"` +} + +func (m *RemoveSecretRequest) Reset() { *m = RemoveSecretRequest{} } +func (*RemoveSecretRequest) ProtoMessage() {} +func (*RemoveSecretRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{49} } + +// RemoveSecretResponse is an empty object indicating the successful removal of +// a secret. +type RemoveSecretResponse struct { +} + +func (m *RemoveSecretResponse) Reset() { *m = RemoveSecretResponse{} } +func (*RemoveSecretResponse) ProtoMessage() {} +func (*RemoveSecretResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{50} } + +// GetConfigRequest is the request to get a `Config` object given a config id. +type GetConfigRequest struct { + ConfigID string `protobuf:"bytes,1,opt,name=config_id,json=configId,proto3" json:"config_id,omitempty"` +} + +func (m *GetConfigRequest) Reset() { *m = GetConfigRequest{} } +func (*GetConfigRequest) ProtoMessage() {} +func (*GetConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{51} } + +// GetConfigResponse contains the Config corresponding to the id in +// `GetConfigRequest`. +type GetConfigResponse struct { + Config *Config `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` +} + +func (m *GetConfigResponse) Reset() { *m = GetConfigResponse{} } +func (*GetConfigResponse) ProtoMessage() {} +func (*GetConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{52} } + +type UpdateConfigRequest struct { + // ConfigID is the config ID to update. + ConfigID string `protobuf:"bytes,1,opt,name=config_id,json=configId,proto3" json:"config_id,omitempty"` + // ConfigVersion is the version of the config being updated. + ConfigVersion *Version `protobuf:"bytes,2,opt,name=config_version,json=configVersion" json:"config_version,omitempty"` + // Spec is the new spec to apply to the Config + // Only some fields are allowed to be updated. + Spec *ConfigSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"` +} + +func (m *UpdateConfigRequest) Reset() { *m = UpdateConfigRequest{} } +func (*UpdateConfigRequest) ProtoMessage() {} +func (*UpdateConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{53} } + +type UpdateConfigResponse struct { + Config *Config `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` +} + +func (m *UpdateConfigResponse) Reset() { *m = UpdateConfigResponse{} } +func (*UpdateConfigResponse) ProtoMessage() {} +func (*UpdateConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{54} } + +// ListConfigRequest is the request to list all configs in the config store, +// or all configs filtered by (name or name prefix or id prefix) and labels. +type ListConfigsRequest struct { + Filters *ListConfigsRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListConfigsRequest) Reset() { *m = ListConfigsRequest{} } +func (*ListConfigsRequest) ProtoMessage() {} +func (*ListConfigsRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{55} } + +type ListConfigsRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + NamePrefixes []string `protobuf:"bytes,4,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` +} + +func (m *ListConfigsRequest_Filters) Reset() { *m = ListConfigsRequest_Filters{} } +func (*ListConfigsRequest_Filters) ProtoMessage() {} +func (*ListConfigsRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{55, 0} +} + +// ListConfigResponse contains a list of all the configs that match the name or +// name prefix filters provided in `ListConfigRequest`. +type ListConfigsResponse struct { + Configs []*Config `protobuf:"bytes,1,rep,name=configs" json:"configs,omitempty"` +} + +func (m *ListConfigsResponse) Reset() { *m = ListConfigsResponse{} } +func (*ListConfigsResponse) ProtoMessage() {} +func (*ListConfigsResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{56} } + +// CreateConfigRequest specifies a new config (it will not update an existing +// config) to create. +type CreateConfigRequest struct { + Spec *ConfigSpec `protobuf:"bytes,1,opt,name=spec" json:"spec,omitempty"` +} + +func (m *CreateConfigRequest) Reset() { *m = CreateConfigRequest{} } +func (*CreateConfigRequest) ProtoMessage() {} +func (*CreateConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{57} } + +// CreateConfigResponse contains the newly created `Config` corresponding to the +// name in `CreateConfigRequest`. +type CreateConfigResponse struct { + Config *Config `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` +} + +func (m *CreateConfigResponse) Reset() { *m = CreateConfigResponse{} } +func (*CreateConfigResponse) ProtoMessage() {} +func (*CreateConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{58} } + +// RemoveConfigRequest contains the ID of the config that should be removed. This +// removes all versions of the config. +type RemoveConfigRequest struct { + ConfigID string `protobuf:"bytes,1,opt,name=config_id,json=configId,proto3" json:"config_id,omitempty"` +} + +func (m *RemoveConfigRequest) Reset() { *m = RemoveConfigRequest{} } +func (*RemoveConfigRequest) ProtoMessage() {} +func (*RemoveConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{59} } + +// RemoveConfigResponse is an empty object indicating the successful removal of +// a config. +type RemoveConfigResponse struct { +} + +func (m *RemoveConfigResponse) Reset() { *m = RemoveConfigResponse{} } +func (*RemoveConfigResponse) ProtoMessage() {} +func (*RemoveConfigResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{60} } + +// CreateExtensionRequest creates a new extension as specified by the provided +// parameters +type CreateExtensionRequest struct { + Annotations *Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` +} + +func (m *CreateExtensionRequest) Reset() { *m = CreateExtensionRequest{} } +func (*CreateExtensionRequest) ProtoMessage() {} +func (*CreateExtensionRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{61} } + +// CreateExtensionResponse contains the newly created `Extension` corresponding +// to the parameters in the CreateExtensionRequest. +type CreateExtensionResponse struct { + Extension *Extension `protobuf:"bytes,1,opt,name=extension" json:"extension,omitempty"` +} + +func (m *CreateExtensionResponse) Reset() { *m = CreateExtensionResponse{} } +func (*CreateExtensionResponse) ProtoMessage() {} +func (*CreateExtensionResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{62} } + +// RemoveExtensionRequest contains the ID of the extension that should be removed. This +// removes all versions of the extension. +type RemoveExtensionRequest struct { + ExtensionID string `protobuf:"bytes,1,opt,name=extension_id,json=extensionId,proto3" json:"extension_id,omitempty"` +} + +func (m *RemoveExtensionRequest) Reset() { *m = RemoveExtensionRequest{} } +func (*RemoveExtensionRequest) ProtoMessage() {} +func (*RemoveExtensionRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{63} } + +// RemoveExtensionResponse is an empty object indicating the successful removal +// of an extension. +type RemoveExtensionResponse struct { +} + +func (m *RemoveExtensionResponse) Reset() { *m = RemoveExtensionResponse{} } +func (*RemoveExtensionResponse) ProtoMessage() {} +func (*RemoveExtensionResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{64} } + +// GetResourceRequest is the request to get a Extension object given a extension id. +type GetExtensionRequest struct { + ExtensionID string `protobuf:"bytes,1,opt,name=extension_id,json=extensionId,proto3" json:"extension_id,omitempty"` +} + +func (m *GetExtensionRequest) Reset() { *m = GetExtensionRequest{} } +func (*GetExtensionRequest) ProtoMessage() {} +func (*GetExtensionRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{65} } + +// GetExtensionResponse contains the Extension corresponding to the id in +// `GetExtensionRequest`. +type GetExtensionResponse struct { + Extension *Extension `protobuf:"bytes,1,opt,name=extension" json:"extension,omitempty"` +} + +func (m *GetExtensionResponse) Reset() { *m = GetExtensionResponse{} } +func (*GetExtensionResponse) ProtoMessage() {} +func (*GetExtensionResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{66} } + +// CreateResourceRequest creates a new resource specified by the included +// resource object. An existing resource will not be updated. +type CreateResourceRequest struct { + Annotations *Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations,omitempty"` + Kind string `protobuf:"bytes,2,opt,name=kind,proto3" json:"kind,omitempty"` + Payload *google_protobuf4.Any `protobuf:"bytes,3,opt,name=payload" json:"payload,omitempty"` +} + +func (m *CreateResourceRequest) Reset() { *m = CreateResourceRequest{} } +func (*CreateResourceRequest) ProtoMessage() {} +func (*CreateResourceRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{67} } + +// CreateResourceResponse contains the newly created `Resource` corresponding +// to the resource in the CreateResourceRequest. +type CreateResourceResponse struct { + Resource *Resource `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"` +} + +func (m *CreateResourceResponse) Reset() { *m = CreateResourceResponse{} } +func (*CreateResourceResponse) ProtoMessage() {} +func (*CreateResourceResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{68} } + +// RemoveResourceRequest contains the ID of the resource that should be removed. This +// removes all versions of the resource. +type RemoveResourceRequest struct { + ResourceID string `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` +} + +func (m *RemoveResourceRequest) Reset() { *m = RemoveResourceRequest{} } +func (*RemoveResourceRequest) ProtoMessage() {} +func (*RemoveResourceRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{69} } + +// RemoveResourceResponse is an empty object indicating the successful removal +// of a resource. +type RemoveResourceResponse struct { +} + +func (m *RemoveResourceResponse) Reset() { *m = RemoveResourceResponse{} } +func (*RemoveResourceResponse) ProtoMessage() {} +func (*RemoveResourceResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{70} } + +// UpdateResourceRequest updates the resource specified by the given resource object. +type UpdateResourceRequest struct { + ResourceID string `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` + ResourceVersion *Version `protobuf:"bytes,2,opt,name=resource_version,json=resourceVersion" json:"resource_version,omitempty"` + // Annotations describes the annotations to update. If the Annotations should + // be unchanged, then this field should be left empty. Note that the name of + // a Resource cannot be changed, only its labels. + Annotations *Annotations `protobuf:"bytes,3,opt,name=annotations" json:"annotations,omitempty"` + // Payload describes the new payload of the resource. If the Payload should + // be unchanged, then this field should be left empty. + Payload *google_protobuf4.Any `protobuf:"bytes,4,opt,name=payload" json:"payload,omitempty"` +} + +func (m *UpdateResourceRequest) Reset() { *m = UpdateResourceRequest{} } +func (*UpdateResourceRequest) ProtoMessage() {} +func (*UpdateResourceRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{71} } + +type UpdateResourceResponse struct { + Resource *Resource `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"` +} + +func (m *UpdateResourceResponse) Reset() { *m = UpdateResourceResponse{} } +func (*UpdateResourceResponse) ProtoMessage() {} +func (*UpdateResourceResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{72} } + +// GetResourceRequest is the request to get a Resource object given a resource id. +type GetResourceRequest struct { + ResourceID string `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` +} + +func (m *GetResourceRequest) Reset() { *m = GetResourceRequest{} } +func (*GetResourceRequest) ProtoMessage() {} +func (*GetResourceRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{73} } + +// GetResourceResponse contains the Resource corresponding to the id in +// `GetResourceRequest`. +type GetResourceResponse struct { + Resource *Resource `protobuf:"bytes,1,opt,name=resource" json:"resource,omitempty"` +} + +func (m *GetResourceResponse) Reset() { *m = GetResourceResponse{} } +func (*GetResourceResponse) ProtoMessage() {} +func (*GetResourceResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{74} } + +// ListResourcesRequest is the request to list all resources in the raft store, +// or all resources filtered by (name or name prefix or id prefix), labels and extension. +type ListResourcesRequest struct { + Filters *ListResourcesRequest_Filters `protobuf:"bytes,1,opt,name=filters" json:"filters,omitempty"` +} + +func (m *ListResourcesRequest) Reset() { *m = ListResourcesRequest{} } +func (*ListResourcesRequest) ProtoMessage() {} +func (*ListResourcesRequest) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{75} } + +type ListResourcesRequest_Filters struct { + Names []string `protobuf:"bytes,1,rep,name=names" json:"names,omitempty"` + IDPrefixes []string `protobuf:"bytes,2,rep,name=id_prefixes,json=idPrefixes" json:"id_prefixes,omitempty"` + Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + NamePrefixes []string `protobuf:"bytes,4,rep,name=name_prefixes,json=namePrefixes" json:"name_prefixes,omitempty"` + Kind string `protobuf:"bytes,5,opt,name=kind,proto3" json:"kind,omitempty"` +} + +func (m *ListResourcesRequest_Filters) Reset() { *m = ListResourcesRequest_Filters{} } +func (*ListResourcesRequest_Filters) ProtoMessage() {} +func (*ListResourcesRequest_Filters) Descriptor() ([]byte, []int) { + return fileDescriptorControl, []int{75, 0} +} + +// ListResourcesResponse contains a list of all the resources that match the name or +// name prefix filters provided in `ListResourcesRequest`. +type ListResourcesResponse struct { + Resources []*Resource `protobuf:"bytes,1,rep,name=resources" json:"resources,omitempty"` +} + +func (m *ListResourcesResponse) Reset() { *m = ListResourcesResponse{} } +func (*ListResourcesResponse) ProtoMessage() {} +func (*ListResourcesResponse) Descriptor() ([]byte, []int) { return fileDescriptorControl, []int{76} } + +func init() { + proto.RegisterType((*GetNodeRequest)(nil), "docker.swarmkit.v1.GetNodeRequest") + proto.RegisterType((*GetNodeResponse)(nil), "docker.swarmkit.v1.GetNodeResponse") + proto.RegisterType((*ListNodesRequest)(nil), "docker.swarmkit.v1.ListNodesRequest") + proto.RegisterType((*ListNodesRequest_Filters)(nil), "docker.swarmkit.v1.ListNodesRequest.Filters") + proto.RegisterType((*ListNodesResponse)(nil), "docker.swarmkit.v1.ListNodesResponse") + proto.RegisterType((*UpdateNodeRequest)(nil), "docker.swarmkit.v1.UpdateNodeRequest") + proto.RegisterType((*UpdateNodeResponse)(nil), "docker.swarmkit.v1.UpdateNodeResponse") + proto.RegisterType((*RemoveNodeRequest)(nil), "docker.swarmkit.v1.RemoveNodeRequest") + proto.RegisterType((*RemoveNodeResponse)(nil), "docker.swarmkit.v1.RemoveNodeResponse") + proto.RegisterType((*GetTaskRequest)(nil), "docker.swarmkit.v1.GetTaskRequest") + proto.RegisterType((*GetTaskResponse)(nil), "docker.swarmkit.v1.GetTaskResponse") + proto.RegisterType((*RemoveTaskRequest)(nil), "docker.swarmkit.v1.RemoveTaskRequest") + proto.RegisterType((*RemoveTaskResponse)(nil), "docker.swarmkit.v1.RemoveTaskResponse") + proto.RegisterType((*ListTasksRequest)(nil), "docker.swarmkit.v1.ListTasksRequest") + proto.RegisterType((*ListTasksRequest_Filters)(nil), "docker.swarmkit.v1.ListTasksRequest.Filters") + proto.RegisterType((*ListTasksResponse)(nil), "docker.swarmkit.v1.ListTasksResponse") + proto.RegisterType((*CreateServiceRequest)(nil), "docker.swarmkit.v1.CreateServiceRequest") + proto.RegisterType((*CreateServiceResponse)(nil), "docker.swarmkit.v1.CreateServiceResponse") + proto.RegisterType((*GetServiceRequest)(nil), "docker.swarmkit.v1.GetServiceRequest") + proto.RegisterType((*GetServiceResponse)(nil), "docker.swarmkit.v1.GetServiceResponse") + proto.RegisterType((*UpdateServiceRequest)(nil), "docker.swarmkit.v1.UpdateServiceRequest") + proto.RegisterType((*UpdateServiceResponse)(nil), "docker.swarmkit.v1.UpdateServiceResponse") + proto.RegisterType((*RemoveServiceRequest)(nil), "docker.swarmkit.v1.RemoveServiceRequest") + proto.RegisterType((*RemoveServiceResponse)(nil), "docker.swarmkit.v1.RemoveServiceResponse") + proto.RegisterType((*ListServicesRequest)(nil), "docker.swarmkit.v1.ListServicesRequest") + proto.RegisterType((*ListServicesRequest_Filters)(nil), "docker.swarmkit.v1.ListServicesRequest.Filters") + proto.RegisterType((*ListServicesResponse)(nil), "docker.swarmkit.v1.ListServicesResponse") + proto.RegisterType((*ListServiceStatusesRequest)(nil), "docker.swarmkit.v1.ListServiceStatusesRequest") + proto.RegisterType((*ListServiceStatusesResponse)(nil), "docker.swarmkit.v1.ListServiceStatusesResponse") + proto.RegisterType((*ListServiceStatusesResponse_ServiceStatus)(nil), "docker.swarmkit.v1.ListServiceStatusesResponse.ServiceStatus") + proto.RegisterType((*CreateNetworkRequest)(nil), "docker.swarmkit.v1.CreateNetworkRequest") + proto.RegisterType((*CreateNetworkResponse)(nil), "docker.swarmkit.v1.CreateNetworkResponse") + proto.RegisterType((*GetNetworkRequest)(nil), "docker.swarmkit.v1.GetNetworkRequest") + proto.RegisterType((*GetNetworkResponse)(nil), "docker.swarmkit.v1.GetNetworkResponse") + proto.RegisterType((*RemoveNetworkRequest)(nil), "docker.swarmkit.v1.RemoveNetworkRequest") + proto.RegisterType((*RemoveNetworkResponse)(nil), "docker.swarmkit.v1.RemoveNetworkResponse") + proto.RegisterType((*ListNetworksRequest)(nil), "docker.swarmkit.v1.ListNetworksRequest") + proto.RegisterType((*ListNetworksRequest_Filters)(nil), "docker.swarmkit.v1.ListNetworksRequest.Filters") + proto.RegisterType((*ListNetworksResponse)(nil), "docker.swarmkit.v1.ListNetworksResponse") + proto.RegisterType((*GetClusterRequest)(nil), "docker.swarmkit.v1.GetClusterRequest") + proto.RegisterType((*GetClusterResponse)(nil), "docker.swarmkit.v1.GetClusterResponse") + proto.RegisterType((*ListClustersRequest)(nil), "docker.swarmkit.v1.ListClustersRequest") + proto.RegisterType((*ListClustersRequest_Filters)(nil), "docker.swarmkit.v1.ListClustersRequest.Filters") + proto.RegisterType((*ListClustersResponse)(nil), "docker.swarmkit.v1.ListClustersResponse") + proto.RegisterType((*KeyRotation)(nil), "docker.swarmkit.v1.KeyRotation") + proto.RegisterType((*UpdateClusterRequest)(nil), "docker.swarmkit.v1.UpdateClusterRequest") + proto.RegisterType((*UpdateClusterResponse)(nil), "docker.swarmkit.v1.UpdateClusterResponse") + proto.RegisterType((*GetSecretRequest)(nil), "docker.swarmkit.v1.GetSecretRequest") + proto.RegisterType((*GetSecretResponse)(nil), "docker.swarmkit.v1.GetSecretResponse") + proto.RegisterType((*UpdateSecretRequest)(nil), "docker.swarmkit.v1.UpdateSecretRequest") + proto.RegisterType((*UpdateSecretResponse)(nil), "docker.swarmkit.v1.UpdateSecretResponse") + proto.RegisterType((*ListSecretsRequest)(nil), "docker.swarmkit.v1.ListSecretsRequest") + proto.RegisterType((*ListSecretsRequest_Filters)(nil), "docker.swarmkit.v1.ListSecretsRequest.Filters") + proto.RegisterType((*ListSecretsResponse)(nil), "docker.swarmkit.v1.ListSecretsResponse") + proto.RegisterType((*CreateSecretRequest)(nil), "docker.swarmkit.v1.CreateSecretRequest") + proto.RegisterType((*CreateSecretResponse)(nil), "docker.swarmkit.v1.CreateSecretResponse") + proto.RegisterType((*RemoveSecretRequest)(nil), "docker.swarmkit.v1.RemoveSecretRequest") + proto.RegisterType((*RemoveSecretResponse)(nil), "docker.swarmkit.v1.RemoveSecretResponse") + proto.RegisterType((*GetConfigRequest)(nil), "docker.swarmkit.v1.GetConfigRequest") + proto.RegisterType((*GetConfigResponse)(nil), "docker.swarmkit.v1.GetConfigResponse") + proto.RegisterType((*UpdateConfigRequest)(nil), "docker.swarmkit.v1.UpdateConfigRequest") + proto.RegisterType((*UpdateConfigResponse)(nil), "docker.swarmkit.v1.UpdateConfigResponse") + proto.RegisterType((*ListConfigsRequest)(nil), "docker.swarmkit.v1.ListConfigsRequest") + proto.RegisterType((*ListConfigsRequest_Filters)(nil), "docker.swarmkit.v1.ListConfigsRequest.Filters") + proto.RegisterType((*ListConfigsResponse)(nil), "docker.swarmkit.v1.ListConfigsResponse") + proto.RegisterType((*CreateConfigRequest)(nil), "docker.swarmkit.v1.CreateConfigRequest") + proto.RegisterType((*CreateConfigResponse)(nil), "docker.swarmkit.v1.CreateConfigResponse") + proto.RegisterType((*RemoveConfigRequest)(nil), "docker.swarmkit.v1.RemoveConfigRequest") + proto.RegisterType((*RemoveConfigResponse)(nil), "docker.swarmkit.v1.RemoveConfigResponse") + proto.RegisterType((*CreateExtensionRequest)(nil), "docker.swarmkit.v1.CreateExtensionRequest") + proto.RegisterType((*CreateExtensionResponse)(nil), "docker.swarmkit.v1.CreateExtensionResponse") + proto.RegisterType((*RemoveExtensionRequest)(nil), "docker.swarmkit.v1.RemoveExtensionRequest") + proto.RegisterType((*RemoveExtensionResponse)(nil), "docker.swarmkit.v1.RemoveExtensionResponse") + proto.RegisterType((*GetExtensionRequest)(nil), "docker.swarmkit.v1.GetExtensionRequest") + proto.RegisterType((*GetExtensionResponse)(nil), "docker.swarmkit.v1.GetExtensionResponse") + proto.RegisterType((*CreateResourceRequest)(nil), "docker.swarmkit.v1.CreateResourceRequest") + proto.RegisterType((*CreateResourceResponse)(nil), "docker.swarmkit.v1.CreateResourceResponse") + proto.RegisterType((*RemoveResourceRequest)(nil), "docker.swarmkit.v1.RemoveResourceRequest") + proto.RegisterType((*RemoveResourceResponse)(nil), "docker.swarmkit.v1.RemoveResourceResponse") + proto.RegisterType((*UpdateResourceRequest)(nil), "docker.swarmkit.v1.UpdateResourceRequest") + proto.RegisterType((*UpdateResourceResponse)(nil), "docker.swarmkit.v1.UpdateResourceResponse") + proto.RegisterType((*GetResourceRequest)(nil), "docker.swarmkit.v1.GetResourceRequest") + proto.RegisterType((*GetResourceResponse)(nil), "docker.swarmkit.v1.GetResourceResponse") + proto.RegisterType((*ListResourcesRequest)(nil), "docker.swarmkit.v1.ListResourcesRequest") + proto.RegisterType((*ListResourcesRequest_Filters)(nil), "docker.swarmkit.v1.ListResourcesRequest.Filters") + proto.RegisterType((*ListResourcesResponse)(nil), "docker.swarmkit.v1.ListResourcesResponse") + proto.RegisterEnum("docker.swarmkit.v1.UpdateServiceRequest_Rollback", UpdateServiceRequest_Rollback_name, UpdateServiceRequest_Rollback_value) +} + +type authenticatedWrapperControlServer struct { + local ControlServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperControlServer(local ControlServer, authorize func(context.Context, []string) error) ControlServer { + return &authenticatedWrapperControlServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperControlServer) GetNode(ctx context.Context, r *GetNodeRequest) (*GetNodeResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetNode(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListNodes(ctx context.Context, r *ListNodesRequest) (*ListNodesResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListNodes(ctx, r) +} + +func (p *authenticatedWrapperControlServer) UpdateNode(ctx context.Context, r *UpdateNodeRequest) (*UpdateNodeResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateNode(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveNode(ctx context.Context, r *RemoveNodeRequest) (*RemoveNodeResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveNode(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetTask(ctx context.Context, r *GetTaskRequest) (*GetTaskResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetTask(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListTasks(ctx context.Context, r *ListTasksRequest) (*ListTasksResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListTasks(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveTask(ctx context.Context, r *RemoveTaskRequest) (*RemoveTaskResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveTask(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetService(ctx context.Context, r *GetServiceRequest) (*GetServiceResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetService(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListServices(ctx context.Context, r *ListServicesRequest) (*ListServicesResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListServices(ctx, r) +} + +func (p *authenticatedWrapperControlServer) CreateService(ctx context.Context, r *CreateServiceRequest) (*CreateServiceResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.CreateService(ctx, r) +} + +func (p *authenticatedWrapperControlServer) UpdateService(ctx context.Context, r *UpdateServiceRequest) (*UpdateServiceResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateService(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveService(ctx context.Context, r *RemoveServiceRequest) (*RemoveServiceResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveService(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListServiceStatuses(ctx context.Context, r *ListServiceStatusesRequest) (*ListServiceStatusesResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListServiceStatuses(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetNetwork(ctx context.Context, r *GetNetworkRequest) (*GetNetworkResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetNetwork(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListNetworks(ctx context.Context, r *ListNetworksRequest) (*ListNetworksResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListNetworks(ctx, r) +} + +func (p *authenticatedWrapperControlServer) CreateNetwork(ctx context.Context, r *CreateNetworkRequest) (*CreateNetworkResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.CreateNetwork(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveNetwork(ctx context.Context, r *RemoveNetworkRequest) (*RemoveNetworkResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveNetwork(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetCluster(ctx context.Context, r *GetClusterRequest) (*GetClusterResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetCluster(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListClusters(ctx context.Context, r *ListClustersRequest) (*ListClustersResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListClusters(ctx, r) +} + +func (p *authenticatedWrapperControlServer) UpdateCluster(ctx context.Context, r *UpdateClusterRequest) (*UpdateClusterResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateCluster(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetSecret(ctx context.Context, r *GetSecretRequest) (*GetSecretResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetSecret(ctx, r) +} + +func (p *authenticatedWrapperControlServer) UpdateSecret(ctx context.Context, r *UpdateSecretRequest) (*UpdateSecretResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateSecret(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListSecrets(ctx context.Context, r *ListSecretsRequest) (*ListSecretsResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListSecrets(ctx, r) +} + +func (p *authenticatedWrapperControlServer) CreateSecret(ctx context.Context, r *CreateSecretRequest) (*CreateSecretResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.CreateSecret(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveSecret(ctx context.Context, r *RemoveSecretRequest) (*RemoveSecretResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveSecret(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetConfig(ctx context.Context, r *GetConfigRequest) (*GetConfigResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetConfig(ctx, r) +} + +func (p *authenticatedWrapperControlServer) UpdateConfig(ctx context.Context, r *UpdateConfigRequest) (*UpdateConfigResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateConfig(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListConfigs(ctx context.Context, r *ListConfigsRequest) (*ListConfigsResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListConfigs(ctx, r) +} + +func (p *authenticatedWrapperControlServer) CreateConfig(ctx context.Context, r *CreateConfigRequest) (*CreateConfigResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.CreateConfig(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveConfig(ctx context.Context, r *RemoveConfigRequest) (*RemoveConfigResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveConfig(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetExtension(ctx context.Context, r *GetExtensionRequest) (*GetExtensionResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetExtension(ctx, r) +} + +func (p *authenticatedWrapperControlServer) CreateExtension(ctx context.Context, r *CreateExtensionRequest) (*CreateExtensionResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.CreateExtension(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveExtension(ctx context.Context, r *RemoveExtensionRequest) (*RemoveExtensionResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveExtension(ctx, r) +} + +func (p *authenticatedWrapperControlServer) GetResource(ctx context.Context, r *GetResourceRequest) (*GetResourceResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.GetResource(ctx, r) +} + +func (p *authenticatedWrapperControlServer) UpdateResource(ctx context.Context, r *UpdateResourceRequest) (*UpdateResourceResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateResource(ctx, r) +} + +func (p *authenticatedWrapperControlServer) ListResources(ctx context.Context, r *ListResourcesRequest) (*ListResourcesResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ListResources(ctx, r) +} + +func (p *authenticatedWrapperControlServer) CreateResource(ctx context.Context, r *CreateResourceRequest) (*CreateResourceResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.CreateResource(ctx, r) +} + +func (p *authenticatedWrapperControlServer) RemoveResource(ctx context.Context, r *RemoveResourceRequest) (*RemoveResourceResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.RemoveResource(ctx, r) +} + +func (m *GetNodeRequest) Copy() *GetNodeRequest { + if m == nil { + return nil + } + o := &GetNodeRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetNodeRequest) CopyFrom(src interface{}) { + + o := src.(*GetNodeRequest) + *m = *o +} + +func (m *GetNodeResponse) Copy() *GetNodeResponse { + if m == nil { + return nil + } + o := &GetNodeResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetNodeResponse) CopyFrom(src interface{}) { + + o := src.(*GetNodeResponse) + *m = *o + if o.Node != nil { + m.Node = &Node{} + deepcopy.Copy(m.Node, o.Node) + } +} + +func (m *ListNodesRequest) Copy() *ListNodesRequest { + if m == nil { + return nil + } + o := &ListNodesRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListNodesRequest) CopyFrom(src interface{}) { + + o := src.(*ListNodesRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListNodesRequest_Filters{} + deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListNodesRequest_Filters) Copy() *ListNodesRequest_Filters { + if m == nil { + return nil + } + o := &ListNodesRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListNodesRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListNodesRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.NodeLabels != nil { + m.NodeLabels = make(map[string]string, len(o.NodeLabels)) + for k, v := range o.NodeLabels { + m.NodeLabels[k] = v + } + } + + if o.Memberships != nil { + m.Memberships = make([]NodeSpec_Membership, len(o.Memberships)) + copy(m.Memberships, o.Memberships) + } + + if o.Roles != nil { + m.Roles = make([]NodeRole, len(o.Roles)) + copy(m.Roles, o.Roles) + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + +} + +func (m *ListNodesResponse) Copy() *ListNodesResponse { + if m == nil { + return nil + } + o := &ListNodesResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListNodesResponse) CopyFrom(src interface{}) { + + o := src.(*ListNodesResponse) + *m = *o + if o.Nodes != nil { + m.Nodes = make([]*Node, len(o.Nodes)) + for i := range m.Nodes { + m.Nodes[i] = &Node{} + deepcopy.Copy(m.Nodes[i], o.Nodes[i]) + } + } + +} + +func (m *UpdateNodeRequest) Copy() *UpdateNodeRequest { + if m == nil { + return nil + } + o := &UpdateNodeRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateNodeRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateNodeRequest) + *m = *o + if o.NodeVersion != nil { + m.NodeVersion = &Version{} + deepcopy.Copy(m.NodeVersion, o.NodeVersion) + } + if o.Spec != nil { + m.Spec = &NodeSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *UpdateNodeResponse) Copy() *UpdateNodeResponse { + if m == nil { + return nil + } + o := &UpdateNodeResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateNodeResponse) CopyFrom(src interface{}) { + + o := src.(*UpdateNodeResponse) + *m = *o + if o.Node != nil { + m.Node = &Node{} + deepcopy.Copy(m.Node, o.Node) + } +} + +func (m *RemoveNodeRequest) Copy() *RemoveNodeRequest { + if m == nil { + return nil + } + o := &RemoveNodeRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveNodeRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveNodeRequest) + *m = *o +} + +func (m *RemoveNodeResponse) Copy() *RemoveNodeResponse { + if m == nil { + return nil + } + o := &RemoveNodeResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveNodeResponse) CopyFrom(src interface{}) {} +func (m *GetTaskRequest) Copy() *GetTaskRequest { + if m == nil { + return nil + } + o := &GetTaskRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetTaskRequest) CopyFrom(src interface{}) { + + o := src.(*GetTaskRequest) + *m = *o +} + +func (m *GetTaskResponse) Copy() *GetTaskResponse { + if m == nil { + return nil + } + o := &GetTaskResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetTaskResponse) CopyFrom(src interface{}) { + + o := src.(*GetTaskResponse) + *m = *o + if o.Task != nil { + m.Task = &Task{} + deepcopy.Copy(m.Task, o.Task) + } +} + +func (m *RemoveTaskRequest) Copy() *RemoveTaskRequest { + if m == nil { + return nil + } + o := &RemoveTaskRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveTaskRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveTaskRequest) + *m = *o +} + +func (m *RemoveTaskResponse) Copy() *RemoveTaskResponse { + if m == nil { + return nil + } + o := &RemoveTaskResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveTaskResponse) CopyFrom(src interface{}) {} +func (m *ListTasksRequest) Copy() *ListTasksRequest { + if m == nil { + return nil + } + o := &ListTasksRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListTasksRequest) CopyFrom(src interface{}) { + + o := src.(*ListTasksRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListTasksRequest_Filters{} + deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListTasksRequest_Filters) Copy() *ListTasksRequest_Filters { + if m == nil { + return nil + } + o := &ListTasksRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListTasksRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListTasksRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.ServiceIDs != nil { + m.ServiceIDs = make([]string, len(o.ServiceIDs)) + copy(m.ServiceIDs, o.ServiceIDs) + } + + if o.NodeIDs != nil { + m.NodeIDs = make([]string, len(o.NodeIDs)) + copy(m.NodeIDs, o.NodeIDs) + } + + if o.DesiredStates != nil { + m.DesiredStates = make([]TaskState, len(o.DesiredStates)) + copy(m.DesiredStates, o.DesiredStates) + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + + if o.Runtimes != nil { + m.Runtimes = make([]string, len(o.Runtimes)) + copy(m.Runtimes, o.Runtimes) + } + +} + +func (m *ListTasksResponse) Copy() *ListTasksResponse { + if m == nil { + return nil + } + o := &ListTasksResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListTasksResponse) CopyFrom(src interface{}) { + + o := src.(*ListTasksResponse) + *m = *o + if o.Tasks != nil { + m.Tasks = make([]*Task, len(o.Tasks)) + for i := range m.Tasks { + m.Tasks[i] = &Task{} + deepcopy.Copy(m.Tasks[i], o.Tasks[i]) + } + } + +} + +func (m *CreateServiceRequest) Copy() *CreateServiceRequest { + if m == nil { + return nil + } + o := &CreateServiceRequest{} + o.CopyFrom(m) + return o +} + +func (m *CreateServiceRequest) CopyFrom(src interface{}) { + + o := src.(*CreateServiceRequest) + *m = *o + if o.Spec != nil { + m.Spec = &ServiceSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *CreateServiceResponse) Copy() *CreateServiceResponse { + if m == nil { + return nil + } + o := &CreateServiceResponse{} + o.CopyFrom(m) + return o +} + +func (m *CreateServiceResponse) CopyFrom(src interface{}) { + + o := src.(*CreateServiceResponse) + *m = *o + if o.Service != nil { + m.Service = &Service{} + deepcopy.Copy(m.Service, o.Service) + } +} + +func (m *GetServiceRequest) Copy() *GetServiceRequest { + if m == nil { + return nil + } + o := &GetServiceRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetServiceRequest) CopyFrom(src interface{}) { + + o := src.(*GetServiceRequest) + *m = *o +} + +func (m *GetServiceResponse) Copy() *GetServiceResponse { + if m == nil { + return nil + } + o := &GetServiceResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetServiceResponse) CopyFrom(src interface{}) { + + o := src.(*GetServiceResponse) + *m = *o + if o.Service != nil { + m.Service = &Service{} + deepcopy.Copy(m.Service, o.Service) + } +} + +func (m *UpdateServiceRequest) Copy() *UpdateServiceRequest { + if m == nil { + return nil + } + o := &UpdateServiceRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateServiceRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateServiceRequest) + *m = *o + if o.ServiceVersion != nil { + m.ServiceVersion = &Version{} + deepcopy.Copy(m.ServiceVersion, o.ServiceVersion) + } + if o.Spec != nil { + m.Spec = &ServiceSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *UpdateServiceResponse) Copy() *UpdateServiceResponse { + if m == nil { + return nil + } + o := &UpdateServiceResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateServiceResponse) CopyFrom(src interface{}) { + + o := src.(*UpdateServiceResponse) + *m = *o + if o.Service != nil { + m.Service = &Service{} + deepcopy.Copy(m.Service, o.Service) + } +} + +func (m *RemoveServiceRequest) Copy() *RemoveServiceRequest { + if m == nil { + return nil + } + o := &RemoveServiceRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveServiceRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveServiceRequest) + *m = *o +} + +func (m *RemoveServiceResponse) Copy() *RemoveServiceResponse { + if m == nil { + return nil + } + o := &RemoveServiceResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveServiceResponse) CopyFrom(src interface{}) {} +func (m *ListServicesRequest) Copy() *ListServicesRequest { + if m == nil { + return nil + } + o := &ListServicesRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListServicesRequest) CopyFrom(src interface{}) { + + o := src.(*ListServicesRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListServicesRequest_Filters{} + deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListServicesRequest_Filters) Copy() *ListServicesRequest_Filters { + if m == nil { + return nil + } + o := &ListServicesRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListServicesRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListServicesRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + + if o.Runtimes != nil { + m.Runtimes = make([]string, len(o.Runtimes)) + copy(m.Runtimes, o.Runtimes) + } + +} + +func (m *ListServicesResponse) Copy() *ListServicesResponse { + if m == nil { + return nil + } + o := &ListServicesResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListServicesResponse) CopyFrom(src interface{}) { + + o := src.(*ListServicesResponse) + *m = *o + if o.Services != nil { + m.Services = make([]*Service, len(o.Services)) + for i := range m.Services { + m.Services[i] = &Service{} + deepcopy.Copy(m.Services[i], o.Services[i]) + } + } + +} + +func (m *ListServiceStatusesRequest) Copy() *ListServiceStatusesRequest { + if m == nil { + return nil + } + o := &ListServiceStatusesRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListServiceStatusesRequest) CopyFrom(src interface{}) { + + o := src.(*ListServiceStatusesRequest) + *m = *o + if o.Services != nil { + m.Services = make([]string, len(o.Services)) + copy(m.Services, o.Services) + } + +} + +func (m *ListServiceStatusesResponse) Copy() *ListServiceStatusesResponse { + if m == nil { + return nil + } + o := &ListServiceStatusesResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListServiceStatusesResponse) CopyFrom(src interface{}) { + + o := src.(*ListServiceStatusesResponse) + *m = *o + if o.Statuses != nil { + m.Statuses = make([]*ListServiceStatusesResponse_ServiceStatus, len(o.Statuses)) + for i := range m.Statuses { + m.Statuses[i] = &ListServiceStatusesResponse_ServiceStatus{} + deepcopy.Copy(m.Statuses[i], o.Statuses[i]) + } + } + +} + +func (m *ListServiceStatusesResponse_ServiceStatus) Copy() *ListServiceStatusesResponse_ServiceStatus { + if m == nil { + return nil + } + o := &ListServiceStatusesResponse_ServiceStatus{} + o.CopyFrom(m) + return o +} + +func (m *ListServiceStatusesResponse_ServiceStatus) CopyFrom(src interface{}) { + + o := src.(*ListServiceStatusesResponse_ServiceStatus) + *m = *o +} + +func (m *CreateNetworkRequest) Copy() *CreateNetworkRequest { + if m == nil { + return nil + } + o := &CreateNetworkRequest{} + o.CopyFrom(m) + return o +} + +func (m *CreateNetworkRequest) CopyFrom(src interface{}) { + + o := src.(*CreateNetworkRequest) + *m = *o + if o.Spec != nil { + m.Spec = &NetworkSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *CreateNetworkResponse) Copy() *CreateNetworkResponse { + if m == nil { + return nil + } + o := &CreateNetworkResponse{} + o.CopyFrom(m) + return o +} + +func (m *CreateNetworkResponse) CopyFrom(src interface{}) { + + o := src.(*CreateNetworkResponse) + *m = *o + if o.Network != nil { + m.Network = &Network{} + deepcopy.Copy(m.Network, o.Network) + } +} + +func (m *GetNetworkRequest) Copy() *GetNetworkRequest { + if m == nil { + return nil + } + o := &GetNetworkRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetNetworkRequest) CopyFrom(src interface{}) { + + o := src.(*GetNetworkRequest) + *m = *o +} + +func (m *GetNetworkResponse) Copy() *GetNetworkResponse { + if m == nil { + return nil + } + o := &GetNetworkResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetNetworkResponse) CopyFrom(src interface{}) { + + o := src.(*GetNetworkResponse) + *m = *o + if o.Network != nil { + m.Network = &Network{} + deepcopy.Copy(m.Network, o.Network) + } +} + +func (m *RemoveNetworkRequest) Copy() *RemoveNetworkRequest { + if m == nil { + return nil + } + o := &RemoveNetworkRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveNetworkRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveNetworkRequest) + *m = *o +} + +func (m *RemoveNetworkResponse) Copy() *RemoveNetworkResponse { + if m == nil { + return nil + } + o := &RemoveNetworkResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveNetworkResponse) CopyFrom(src interface{}) {} +func (m *ListNetworksRequest) Copy() *ListNetworksRequest { + if m == nil { + return nil + } + o := &ListNetworksRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListNetworksRequest) CopyFrom(src interface{}) { + + o := src.(*ListNetworksRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListNetworksRequest_Filters{} + deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListNetworksRequest_Filters) Copy() *ListNetworksRequest_Filters { + if m == nil { + return nil + } + o := &ListNetworksRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListNetworksRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListNetworksRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + +} + +func (m *ListNetworksResponse) Copy() *ListNetworksResponse { + if m == nil { + return nil + } + o := &ListNetworksResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListNetworksResponse) CopyFrom(src interface{}) { + + o := src.(*ListNetworksResponse) + *m = *o + if o.Networks != nil { + m.Networks = make([]*Network, len(o.Networks)) + for i := range m.Networks { + m.Networks[i] = &Network{} + deepcopy.Copy(m.Networks[i], o.Networks[i]) + } + } + +} + +func (m *GetClusterRequest) Copy() *GetClusterRequest { + if m == nil { + return nil + } + o := &GetClusterRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetClusterRequest) CopyFrom(src interface{}) { + + o := src.(*GetClusterRequest) + *m = *o +} + +func (m *GetClusterResponse) Copy() *GetClusterResponse { + if m == nil { + return nil + } + o := &GetClusterResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetClusterResponse) CopyFrom(src interface{}) { + + o := src.(*GetClusterResponse) + *m = *o + if o.Cluster != nil { + m.Cluster = &Cluster{} + deepcopy.Copy(m.Cluster, o.Cluster) + } +} + +func (m *ListClustersRequest) Copy() *ListClustersRequest { + if m == nil { + return nil + } + o := &ListClustersRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListClustersRequest) CopyFrom(src interface{}) { + + o := src.(*ListClustersRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListClustersRequest_Filters{} + deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListClustersRequest_Filters) Copy() *ListClustersRequest_Filters { + if m == nil { + return nil + } + o := &ListClustersRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListClustersRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListClustersRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + +} + +func (m *ListClustersResponse) Copy() *ListClustersResponse { + if m == nil { + return nil + } + o := &ListClustersResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListClustersResponse) CopyFrom(src interface{}) { + + o := src.(*ListClustersResponse) + *m = *o + if o.Clusters != nil { + m.Clusters = make([]*Cluster, len(o.Clusters)) + for i := range m.Clusters { + m.Clusters[i] = &Cluster{} + deepcopy.Copy(m.Clusters[i], o.Clusters[i]) + } + } + +} + +func (m *KeyRotation) Copy() *KeyRotation { + if m == nil { + return nil + } + o := &KeyRotation{} + o.CopyFrom(m) + return o +} + +func (m *KeyRotation) CopyFrom(src interface{}) { + + o := src.(*KeyRotation) + *m = *o +} + +func (m *UpdateClusterRequest) Copy() *UpdateClusterRequest { + if m == nil { + return nil + } + o := &UpdateClusterRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateClusterRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateClusterRequest) + *m = *o + if o.ClusterVersion != nil { + m.ClusterVersion = &Version{} + deepcopy.Copy(m.ClusterVersion, o.ClusterVersion) + } + if o.Spec != nil { + m.Spec = &ClusterSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } + deepcopy.Copy(&m.Rotation, &o.Rotation) +} + +func (m *UpdateClusterResponse) Copy() *UpdateClusterResponse { + if m == nil { + return nil + } + o := &UpdateClusterResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateClusterResponse) CopyFrom(src interface{}) { + + o := src.(*UpdateClusterResponse) + *m = *o + if o.Cluster != nil { + m.Cluster = &Cluster{} + deepcopy.Copy(m.Cluster, o.Cluster) + } +} + +func (m *GetSecretRequest) Copy() *GetSecretRequest { + if m == nil { + return nil + } + o := &GetSecretRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetSecretRequest) CopyFrom(src interface{}) { + + o := src.(*GetSecretRequest) + *m = *o +} + +func (m *GetSecretResponse) Copy() *GetSecretResponse { + if m == nil { + return nil + } + o := &GetSecretResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetSecretResponse) CopyFrom(src interface{}) { + + o := src.(*GetSecretResponse) + *m = *o + if o.Secret != nil { + m.Secret = &Secret{} + deepcopy.Copy(m.Secret, o.Secret) + } +} + +func (m *UpdateSecretRequest) Copy() *UpdateSecretRequest { + if m == nil { + return nil + } + o := &UpdateSecretRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateSecretRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateSecretRequest) + *m = *o + if o.SecretVersion != nil { + m.SecretVersion = &Version{} + deepcopy.Copy(m.SecretVersion, o.SecretVersion) + } + if o.Spec != nil { + m.Spec = &SecretSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *UpdateSecretResponse) Copy() *UpdateSecretResponse { + if m == nil { + return nil + } + o := &UpdateSecretResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateSecretResponse) CopyFrom(src interface{}) { + + o := src.(*UpdateSecretResponse) + *m = *o + if o.Secret != nil { + m.Secret = &Secret{} + deepcopy.Copy(m.Secret, o.Secret) + } +} + +func (m *ListSecretsRequest) Copy() *ListSecretsRequest { + if m == nil { + return nil + } + o := &ListSecretsRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListSecretsRequest) CopyFrom(src interface{}) { + + o := src.(*ListSecretsRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListSecretsRequest_Filters{} + deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListSecretsRequest_Filters) Copy() *ListSecretsRequest_Filters { + if m == nil { + return nil + } + o := &ListSecretsRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListSecretsRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListSecretsRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + +} + +func (m *ListSecretsResponse) Copy() *ListSecretsResponse { + if m == nil { + return nil + } + o := &ListSecretsResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListSecretsResponse) CopyFrom(src interface{}) { + + o := src.(*ListSecretsResponse) + *m = *o + if o.Secrets != nil { + m.Secrets = make([]*Secret, len(o.Secrets)) + for i := range m.Secrets { + m.Secrets[i] = &Secret{} + deepcopy.Copy(m.Secrets[i], o.Secrets[i]) + } + } + +} + +func (m *CreateSecretRequest) Copy() *CreateSecretRequest { + if m == nil { + return nil + } + o := &CreateSecretRequest{} + o.CopyFrom(m) + return o +} + +func (m *CreateSecretRequest) CopyFrom(src interface{}) { + + o := src.(*CreateSecretRequest) + *m = *o + if o.Spec != nil { + m.Spec = &SecretSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *CreateSecretResponse) Copy() *CreateSecretResponse { + if m == nil { + return nil + } + o := &CreateSecretResponse{} + o.CopyFrom(m) + return o +} + +func (m *CreateSecretResponse) CopyFrom(src interface{}) { + + o := src.(*CreateSecretResponse) + *m = *o + if o.Secret != nil { + m.Secret = &Secret{} + deepcopy.Copy(m.Secret, o.Secret) + } +} + +func (m *RemoveSecretRequest) Copy() *RemoveSecretRequest { + if m == nil { + return nil + } + o := &RemoveSecretRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveSecretRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveSecretRequest) + *m = *o +} + +func (m *RemoveSecretResponse) Copy() *RemoveSecretResponse { + if m == nil { + return nil + } + o := &RemoveSecretResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveSecretResponse) CopyFrom(src interface{}) {} +func (m *GetConfigRequest) Copy() *GetConfigRequest { + if m == nil { + return nil + } + o := &GetConfigRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetConfigRequest) CopyFrom(src interface{}) { + + o := src.(*GetConfigRequest) + *m = *o +} + +func (m *GetConfigResponse) Copy() *GetConfigResponse { + if m == nil { + return nil + } + o := &GetConfigResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetConfigResponse) CopyFrom(src interface{}) { + + o := src.(*GetConfigResponse) + *m = *o + if o.Config != nil { + m.Config = &Config{} + deepcopy.Copy(m.Config, o.Config) + } +} + +func (m *UpdateConfigRequest) Copy() *UpdateConfigRequest { + if m == nil { + return nil + } + o := &UpdateConfigRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateConfigRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateConfigRequest) + *m = *o + if o.ConfigVersion != nil { + m.ConfigVersion = &Version{} + deepcopy.Copy(m.ConfigVersion, o.ConfigVersion) + } + if o.Spec != nil { + m.Spec = &ConfigSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *UpdateConfigResponse) Copy() *UpdateConfigResponse { + if m == nil { + return nil + } + o := &UpdateConfigResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateConfigResponse) CopyFrom(src interface{}) { + + o := src.(*UpdateConfigResponse) + *m = *o + if o.Config != nil { + m.Config = &Config{} + deepcopy.Copy(m.Config, o.Config) + } +} + +func (m *ListConfigsRequest) Copy() *ListConfigsRequest { + if m == nil { + return nil + } + o := &ListConfigsRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListConfigsRequest) CopyFrom(src interface{}) { + + o := src.(*ListConfigsRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListConfigsRequest_Filters{} + deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListConfigsRequest_Filters) Copy() *ListConfigsRequest_Filters { + if m == nil { + return nil + } + o := &ListConfigsRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListConfigsRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListConfigsRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + +} + +func (m *ListConfigsResponse) Copy() *ListConfigsResponse { + if m == nil { + return nil + } + o := &ListConfigsResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListConfigsResponse) CopyFrom(src interface{}) { + + o := src.(*ListConfigsResponse) + *m = *o + if o.Configs != nil { + m.Configs = make([]*Config, len(o.Configs)) + for i := range m.Configs { + m.Configs[i] = &Config{} + deepcopy.Copy(m.Configs[i], o.Configs[i]) + } + } + +} + +func (m *CreateConfigRequest) Copy() *CreateConfigRequest { + if m == nil { + return nil + } + o := &CreateConfigRequest{} + o.CopyFrom(m) + return o +} + +func (m *CreateConfigRequest) CopyFrom(src interface{}) { + + o := src.(*CreateConfigRequest) + *m = *o + if o.Spec != nil { + m.Spec = &ConfigSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } +} + +func (m *CreateConfigResponse) Copy() *CreateConfigResponse { + if m == nil { + return nil + } + o := &CreateConfigResponse{} + o.CopyFrom(m) + return o +} + +func (m *CreateConfigResponse) CopyFrom(src interface{}) { + + o := src.(*CreateConfigResponse) + *m = *o + if o.Config != nil { + m.Config = &Config{} + deepcopy.Copy(m.Config, o.Config) + } +} + +func (m *RemoveConfigRequest) Copy() *RemoveConfigRequest { + if m == nil { + return nil + } + o := &RemoveConfigRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveConfigRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveConfigRequest) + *m = *o +} + +func (m *RemoveConfigResponse) Copy() *RemoveConfigResponse { + if m == nil { + return nil + } + o := &RemoveConfigResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveConfigResponse) CopyFrom(src interface{}) {} +func (m *CreateExtensionRequest) Copy() *CreateExtensionRequest { + if m == nil { + return nil + } + o := &CreateExtensionRequest{} + o.CopyFrom(m) + return o +} + +func (m *CreateExtensionRequest) CopyFrom(src interface{}) { + + o := src.(*CreateExtensionRequest) + *m = *o + if o.Annotations != nil { + m.Annotations = &Annotations{} + deepcopy.Copy(m.Annotations, o.Annotations) + } +} + +func (m *CreateExtensionResponse) Copy() *CreateExtensionResponse { + if m == nil { + return nil + } + o := &CreateExtensionResponse{} + o.CopyFrom(m) + return o +} + +func (m *CreateExtensionResponse) CopyFrom(src interface{}) { + + o := src.(*CreateExtensionResponse) + *m = *o + if o.Extension != nil { + m.Extension = &Extension{} + deepcopy.Copy(m.Extension, o.Extension) + } +} + +func (m *RemoveExtensionRequest) Copy() *RemoveExtensionRequest { + if m == nil { + return nil + } + o := &RemoveExtensionRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveExtensionRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveExtensionRequest) + *m = *o +} + +func (m *RemoveExtensionResponse) Copy() *RemoveExtensionResponse { + if m == nil { + return nil + } + o := &RemoveExtensionResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveExtensionResponse) CopyFrom(src interface{}) {} +func (m *GetExtensionRequest) Copy() *GetExtensionRequest { + if m == nil { + return nil + } + o := &GetExtensionRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetExtensionRequest) CopyFrom(src interface{}) { + + o := src.(*GetExtensionRequest) + *m = *o +} + +func (m *GetExtensionResponse) Copy() *GetExtensionResponse { + if m == nil { + return nil + } + o := &GetExtensionResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetExtensionResponse) CopyFrom(src interface{}) { + + o := src.(*GetExtensionResponse) + *m = *o + if o.Extension != nil { + m.Extension = &Extension{} + deepcopy.Copy(m.Extension, o.Extension) + } +} + +func (m *CreateResourceRequest) Copy() *CreateResourceRequest { + if m == nil { + return nil + } + o := &CreateResourceRequest{} + o.CopyFrom(m) + return o +} + +func (m *CreateResourceRequest) CopyFrom(src interface{}) { + + o := src.(*CreateResourceRequest) + *m = *o + if o.Annotations != nil { + m.Annotations = &Annotations{} + deepcopy.Copy(m.Annotations, o.Annotations) + } + if o.Payload != nil { + m.Payload = &google_protobuf4.Any{} + deepcopy.Copy(m.Payload, o.Payload) + } +} + +func (m *CreateResourceResponse) Copy() *CreateResourceResponse { + if m == nil { + return nil + } + o := &CreateResourceResponse{} + o.CopyFrom(m) + return o +} + +func (m *CreateResourceResponse) CopyFrom(src interface{}) { + + o := src.(*CreateResourceResponse) + *m = *o + if o.Resource != nil { + m.Resource = &Resource{} + deepcopy.Copy(m.Resource, o.Resource) + } +} + +func (m *RemoveResourceRequest) Copy() *RemoveResourceRequest { + if m == nil { + return nil + } + o := &RemoveResourceRequest{} + o.CopyFrom(m) + return o +} + +func (m *RemoveResourceRequest) CopyFrom(src interface{}) { + + o := src.(*RemoveResourceRequest) + *m = *o +} + +func (m *RemoveResourceResponse) Copy() *RemoveResourceResponse { + if m == nil { + return nil + } + o := &RemoveResourceResponse{} + o.CopyFrom(m) + return o +} + +func (m *RemoveResourceResponse) CopyFrom(src interface{}) {} +func (m *UpdateResourceRequest) Copy() *UpdateResourceRequest { + if m == nil { + return nil + } + o := &UpdateResourceRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateResourceRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateResourceRequest) + *m = *o + if o.ResourceVersion != nil { + m.ResourceVersion = &Version{} + deepcopy.Copy(m.ResourceVersion, o.ResourceVersion) + } + if o.Annotations != nil { + m.Annotations = &Annotations{} + deepcopy.Copy(m.Annotations, o.Annotations) + } + if o.Payload != nil { + m.Payload = &google_protobuf4.Any{} + deepcopy.Copy(m.Payload, o.Payload) + } +} + +func (m *UpdateResourceResponse) Copy() *UpdateResourceResponse { + if m == nil { + return nil + } + o := &UpdateResourceResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateResourceResponse) CopyFrom(src interface{}) { + + o := src.(*UpdateResourceResponse) + *m = *o + if o.Resource != nil { + m.Resource = &Resource{} + deepcopy.Copy(m.Resource, o.Resource) + } +} + +func (m *GetResourceRequest) Copy() *GetResourceRequest { + if m == nil { + return nil + } + o := &GetResourceRequest{} + o.CopyFrom(m) + return o +} + +func (m *GetResourceRequest) CopyFrom(src interface{}) { + + o := src.(*GetResourceRequest) + *m = *o +} + +func (m *GetResourceResponse) Copy() *GetResourceResponse { + if m == nil { + return nil + } + o := &GetResourceResponse{} + o.CopyFrom(m) + return o +} + +func (m *GetResourceResponse) CopyFrom(src interface{}) { + + o := src.(*GetResourceResponse) + *m = *o + if o.Resource != nil { + m.Resource = &Resource{} + deepcopy.Copy(m.Resource, o.Resource) + } +} + +func (m *ListResourcesRequest) Copy() *ListResourcesRequest { + if m == nil { + return nil + } + o := &ListResourcesRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListResourcesRequest) CopyFrom(src interface{}) { + + o := src.(*ListResourcesRequest) + *m = *o + if o.Filters != nil { + m.Filters = &ListResourcesRequest_Filters{} + deepcopy.Copy(m.Filters, o.Filters) + } +} + +func (m *ListResourcesRequest_Filters) Copy() *ListResourcesRequest_Filters { + if m == nil { + return nil + } + o := &ListResourcesRequest_Filters{} + o.CopyFrom(m) + return o +} + +func (m *ListResourcesRequest_Filters) CopyFrom(src interface{}) { + + o := src.(*ListResourcesRequest_Filters) + *m = *o + if o.Names != nil { + m.Names = make([]string, len(o.Names)) + copy(m.Names, o.Names) + } + + if o.IDPrefixes != nil { + m.IDPrefixes = make([]string, len(o.IDPrefixes)) + copy(m.IDPrefixes, o.IDPrefixes) + } + + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.NamePrefixes != nil { + m.NamePrefixes = make([]string, len(o.NamePrefixes)) + copy(m.NamePrefixes, o.NamePrefixes) + } + +} + +func (m *ListResourcesResponse) Copy() *ListResourcesResponse { + if m == nil { + return nil + } + o := &ListResourcesResponse{} + o.CopyFrom(m) + return o +} + +func (m *ListResourcesResponse) CopyFrom(src interface{}) { + + o := src.(*ListResourcesResponse) + *m = *o + if o.Resources != nil { + m.Resources = make([]*Resource, len(o.Resources)) + for i := range m.Resources { + m.Resources[i] = &Resource{} + deepcopy.Copy(m.Resources[i], o.Resources[i]) + } + } + +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Control service + +type ControlClient interface { + GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) + ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error) + UpdateNode(ctx context.Context, in *UpdateNodeRequest, opts ...grpc.CallOption) (*UpdateNodeResponse, error) + RemoveNode(ctx context.Context, in *RemoveNodeRequest, opts ...grpc.CallOption) (*RemoveNodeResponse, error) + GetTask(ctx context.Context, in *GetTaskRequest, opts ...grpc.CallOption) (*GetTaskResponse, error) + ListTasks(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error) + RemoveTask(ctx context.Context, in *RemoveTaskRequest, opts ...grpc.CallOption) (*RemoveTaskResponse, error) + GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*GetServiceResponse, error) + ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) + CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*CreateServiceResponse, error) + UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*UpdateServiceResponse, error) + RemoveService(ctx context.Context, in *RemoveServiceRequest, opts ...grpc.CallOption) (*RemoveServiceResponse, error) + // ListServiceStatuses returns a `ListServiceStatusesResponse` with the + // status of the requested services, formed by computing the number of + // running vs desired tasks. It is provided as a shortcut or helper method, + // which allows a client to avoid having to calculate this value by listing + // all Tasks. If any service requested does not exist, it will be returned + // but with empty status values. + ListServiceStatuses(ctx context.Context, in *ListServiceStatusesRequest, opts ...grpc.CallOption) (*ListServiceStatusesResponse, error) + GetNetwork(ctx context.Context, in *GetNetworkRequest, opts ...grpc.CallOption) (*GetNetworkResponse, error) + ListNetworks(ctx context.Context, in *ListNetworksRequest, opts ...grpc.CallOption) (*ListNetworksResponse, error) + CreateNetwork(ctx context.Context, in *CreateNetworkRequest, opts ...grpc.CallOption) (*CreateNetworkResponse, error) + RemoveNetwork(ctx context.Context, in *RemoveNetworkRequest, opts ...grpc.CallOption) (*RemoveNetworkResponse, error) + GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*GetClusterResponse, error) + ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) + UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*UpdateClusterResponse, error) + // GetSecret returns a `GetSecretResponse` with a `Secret` with the same + // id as `GetSecretRequest.SecretID` + // - Returns `NotFound` if the Secret with the given id is not found. + // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. + // - Returns an error if getting fails. + GetSecret(ctx context.Context, in *GetSecretRequest, opts ...grpc.CallOption) (*GetSecretResponse, error) + // UpdateSecret returns a `UpdateSecretResponse` with a `Secret` with the same + // id as `GetSecretRequest.SecretID` + // - Returns `NotFound` if the Secret with the given id is not found. + // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. + // - Returns an error if updating fails. + UpdateSecret(ctx context.Context, in *UpdateSecretRequest, opts ...grpc.CallOption) (*UpdateSecretResponse, error) + // ListSecrets returns a `ListSecretResponse` with a list of all non-internal `Secret`s being + // managed, or all secrets matching any name in `ListSecretsRequest.Names`, any + // name prefix in `ListSecretsRequest.NamePrefixes`, any id in + // `ListSecretsRequest.SecretIDs`, or any id prefix in `ListSecretsRequest.IDPrefixes`. + // - Returns an error if listing fails. + ListSecrets(ctx context.Context, in *ListSecretsRequest, opts ...grpc.CallOption) (*ListSecretsResponse, error) + // CreateSecret creates and return a `CreateSecretResponse` with a `Secret` based + // on the provided `CreateSecretRequest.SecretSpec`. + // - Returns `InvalidArgument` if the `CreateSecretRequest.SecretSpec` is malformed, + // or if the secret data is too long or contains invalid characters. + // - Returns an error if the creation fails. + CreateSecret(ctx context.Context, in *CreateSecretRequest, opts ...grpc.CallOption) (*CreateSecretResponse, error) + // RemoveSecret removes the secret referenced by `RemoveSecretRequest.ID`. + // - Returns `InvalidArgument` if `RemoveSecretRequest.ID` is empty. + // - Returns `NotFound` if the a secret named `RemoveSecretRequest.ID` is not found. + // - Returns an error if the deletion fails. + RemoveSecret(ctx context.Context, in *RemoveSecretRequest, opts ...grpc.CallOption) (*RemoveSecretResponse, error) + // GetConfig returns a `GetConfigResponse` with a `Config` with the same + // id as `GetConfigRequest.ConfigID` + // - Returns `NotFound` if the Config with the given id is not found. + // - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty. + // - Returns an error if getting fails. + GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) + // UpdateConfig returns a `UpdateConfigResponse` with a `Config` with the same + // id as `GetConfigRequest.ConfigID` + // - Returns `NotFound` if the Config with the given id is not found. + // - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty. + // - Returns an error if updating fails. + UpdateConfig(ctx context.Context, in *UpdateConfigRequest, opts ...grpc.CallOption) (*UpdateConfigResponse, error) + // ListConfigs returns a `ListConfigResponse` with a list of `Config`s being + // managed, or all configs matching any name in `ListConfigsRequest.Names`, any + // name prefix in `ListConfigsRequest.NamePrefixes`, any id in + // `ListConfigsRequest.ConfigIDs`, or any id prefix in `ListConfigsRequest.IDPrefixes`. + // - Returns an error if listing fails. + ListConfigs(ctx context.Context, in *ListConfigsRequest, opts ...grpc.CallOption) (*ListConfigsResponse, error) + // CreateConfig creates and return a `CreateConfigResponse` with a `Config` based + // on the provided `CreateConfigRequest.ConfigSpec`. + // - Returns `InvalidArgument` if the `CreateConfigRequest.ConfigSpec` is malformed, + // or if the config data is too long or contains invalid characters. + // - Returns an error if the creation fails. + CreateConfig(ctx context.Context, in *CreateConfigRequest, opts ...grpc.CallOption) (*CreateConfigResponse, error) + // RemoveConfig removes the config referenced by `RemoveConfigRequest.ID`. + // - Returns `InvalidArgument` if `RemoveConfigRequest.ID` is empty. + // - Returns `NotFound` if the a config named `RemoveConfigRequest.ID` is not found. + // - Returns an error if the deletion fails. + RemoveConfig(ctx context.Context, in *RemoveConfigRequest, opts ...grpc.CallOption) (*RemoveConfigResponse, error) + // GetExtension returns a `GetExtensionResponse` with a `Extension` with the same + // id as `GetExtensionRequest.ExtensionId` + // - Returns `NotFound` if the Extension with the given id is not found. + // - Returns `InvalidArgument` if the `GetExtensionRequest.ExtensionId` is empty. + // - Returns an error if the get fails. + GetExtension(ctx context.Context, in *GetExtensionRequest, opts ...grpc.CallOption) (*GetExtensionResponse, error) + // CreateExtension creates an `Extension` based on the provided `CreateExtensionRequest.Extension` + // and returns a `CreateExtensionResponse`. + // - Returns `InvalidArgument` if the `CreateExtensionRequest.Extension` is malformed, + // or fails validation. + // - Returns an error if the creation fails. + CreateExtension(ctx context.Context, in *CreateExtensionRequest, opts ...grpc.CallOption) (*CreateExtensionResponse, error) + // RemoveExtension removes the extension referenced by `RemoveExtensionRequest.ID`. + // - Returns `InvalidArgument` if `RemoveExtensionRequest.ExtensionId` is empty. + // - Returns `NotFound` if the an extension named `RemoveExtensionRequest.ExtensionId` is not found. + // - Returns an error if the deletion fails. + RemoveExtension(ctx context.Context, in *RemoveExtensionRequest, opts ...grpc.CallOption) (*RemoveExtensionResponse, error) + // GetResource returns a `GetResourceResponse` with a `Resource` with the same + // id as `GetResourceRequest.Resource` + // - Returns `NotFound` if the Resource with the given id is not found. + // - Returns `InvalidArgument` if the `GetResourceRequest.Resource` is empty. + // - Returns an error if getting fails. + GetResource(ctx context.Context, in *GetResourceRequest, opts ...grpc.CallOption) (*GetResourceResponse, error) + // UpdateResource updates the resource with the given `UpdateResourceRequest.Resource.Id` using the given `UpdateResourceRequest.Resource` and returns a `UpdateResourceResponse`. + // - Returns `NotFound` if the Resource with the given `UpdateResourceRequest.Resource.Id` is not found. + // - Returns `InvalidArgument` if the UpdateResourceRequest.Resource.Id` is empty. + // - Returns an error if updating fails. + UpdateResource(ctx context.Context, in *UpdateResourceRequest, opts ...grpc.CallOption) (*UpdateResourceResponse, error) + // ListResources returns a `ListResourcesResponse` with a list of `Resource`s stored in the raft store, + // or all resources matching any name in `ListConfigsRequest.Names`, any + // name prefix in `ListResourcesRequest.NamePrefixes`, any id in + // `ListResourcesRequest.ResourceIDs`, or any id prefix in `ListResourcesRequest.IDPrefixes`, + // extension name equal to `ListResourcesRequest.Extension`. + // - Returns an error if listing fails. + ListResources(ctx context.Context, in *ListResourcesRequest, opts ...grpc.CallOption) (*ListResourcesResponse, error) + // CreateResource returns a `CreateResourceResponse` after creating a `Resource` based + // on the provided `CreateResourceRequest.Resource`. + // - Returns `InvalidArgument` if the `CreateResourceRequest.Resource` is malformed, + // or if the config data is too long or contains invalid characters. + // - Returns an error if the creation fails. + CreateResource(ctx context.Context, in *CreateResourceRequest, opts ...grpc.CallOption) (*CreateResourceResponse, error) + // RemoveResource removes the `Resource` referenced by `RemoveResourceRequest.ResourceID`. + // - Returns `InvalidArgument` if `RemoveResourceRequest.ResourceID` is empty. + // - Returns `NotFound` if the a resource named `RemoveResourceRequest.ResourceID` is not found. + // - Returns an error if the deletion fails. + RemoveResource(ctx context.Context, in *RemoveResourceRequest, opts ...grpc.CallOption) (*RemoveResourceResponse, error) +} + +type controlClient struct { + cc *grpc.ClientConn +} + +func NewControlClient(cc *grpc.ClientConn) ControlClient { + return &controlClient{cc} +} + +func (c *controlClient) GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) { + out := new(GetNodeResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetNode", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListNodes(ctx context.Context, in *ListNodesRequest, opts ...grpc.CallOption) (*ListNodesResponse, error) { + out := new(ListNodesResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListNodes", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) UpdateNode(ctx context.Context, in *UpdateNodeRequest, opts ...grpc.CallOption) (*UpdateNodeResponse, error) { + out := new(UpdateNodeResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateNode", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveNode(ctx context.Context, in *RemoveNodeRequest, opts ...grpc.CallOption) (*RemoveNodeResponse, error) { + out := new(RemoveNodeResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveNode", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetTask(ctx context.Context, in *GetTaskRequest, opts ...grpc.CallOption) (*GetTaskResponse, error) { + out := new(GetTaskResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetTask", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListTasks(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error) { + out := new(ListTasksResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListTasks", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveTask(ctx context.Context, in *RemoveTaskRequest, opts ...grpc.CallOption) (*RemoveTaskResponse, error) { + out := new(RemoveTaskResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveTask", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetService(ctx context.Context, in *GetServiceRequest, opts ...grpc.CallOption) (*GetServiceResponse, error) { + out := new(GetServiceResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListServices(ctx context.Context, in *ListServicesRequest, opts ...grpc.CallOption) (*ListServicesResponse, error) { + out := new(ListServicesResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListServices", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) CreateService(ctx context.Context, in *CreateServiceRequest, opts ...grpc.CallOption) (*CreateServiceResponse, error) { + out := new(CreateServiceResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/CreateService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) UpdateService(ctx context.Context, in *UpdateServiceRequest, opts ...grpc.CallOption) (*UpdateServiceResponse, error) { + out := new(UpdateServiceResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveService(ctx context.Context, in *RemoveServiceRequest, opts ...grpc.CallOption) (*RemoveServiceResponse, error) { + out := new(RemoveServiceResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveService", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListServiceStatuses(ctx context.Context, in *ListServiceStatusesRequest, opts ...grpc.CallOption) (*ListServiceStatusesResponse, error) { + out := new(ListServiceStatusesResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListServiceStatuses", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetNetwork(ctx context.Context, in *GetNetworkRequest, opts ...grpc.CallOption) (*GetNetworkResponse, error) { + out := new(GetNetworkResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetNetwork", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListNetworks(ctx context.Context, in *ListNetworksRequest, opts ...grpc.CallOption) (*ListNetworksResponse, error) { + out := new(ListNetworksResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListNetworks", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) CreateNetwork(ctx context.Context, in *CreateNetworkRequest, opts ...grpc.CallOption) (*CreateNetworkResponse, error) { + out := new(CreateNetworkResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/CreateNetwork", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveNetwork(ctx context.Context, in *RemoveNetworkRequest, opts ...grpc.CallOption) (*RemoveNetworkResponse, error) { + out := new(RemoveNetworkResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveNetwork", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*GetClusterResponse, error) { + out := new(GetClusterResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) { + out := new(ListClustersResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListClusters", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*UpdateClusterResponse, error) { + out := new(UpdateClusterResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetSecret(ctx context.Context, in *GetSecretRequest, opts ...grpc.CallOption) (*GetSecretResponse, error) { + out := new(GetSecretResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetSecret", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) UpdateSecret(ctx context.Context, in *UpdateSecretRequest, opts ...grpc.CallOption) (*UpdateSecretResponse, error) { + out := new(UpdateSecretResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateSecret", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListSecrets(ctx context.Context, in *ListSecretsRequest, opts ...grpc.CallOption) (*ListSecretsResponse, error) { + out := new(ListSecretsResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListSecrets", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) CreateSecret(ctx context.Context, in *CreateSecretRequest, opts ...grpc.CallOption) (*CreateSecretResponse, error) { + out := new(CreateSecretResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/CreateSecret", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveSecret(ctx context.Context, in *RemoveSecretRequest, opts ...grpc.CallOption) (*RemoveSecretResponse, error) { + out := new(RemoveSecretResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveSecret", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) { + out := new(GetConfigResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) UpdateConfig(ctx context.Context, in *UpdateConfigRequest, opts ...grpc.CallOption) (*UpdateConfigResponse, error) { + out := new(UpdateConfigResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListConfigs(ctx context.Context, in *ListConfigsRequest, opts ...grpc.CallOption) (*ListConfigsResponse, error) { + out := new(ListConfigsResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListConfigs", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) CreateConfig(ctx context.Context, in *CreateConfigRequest, opts ...grpc.CallOption) (*CreateConfigResponse, error) { + out := new(CreateConfigResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/CreateConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveConfig(ctx context.Context, in *RemoveConfigRequest, opts ...grpc.CallOption) (*RemoveConfigResponse, error) { + out := new(RemoveConfigResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetExtension(ctx context.Context, in *GetExtensionRequest, opts ...grpc.CallOption) (*GetExtensionResponse, error) { + out := new(GetExtensionResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetExtension", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) CreateExtension(ctx context.Context, in *CreateExtensionRequest, opts ...grpc.CallOption) (*CreateExtensionResponse, error) { + out := new(CreateExtensionResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/CreateExtension", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveExtension(ctx context.Context, in *RemoveExtensionRequest, opts ...grpc.CallOption) (*RemoveExtensionResponse, error) { + out := new(RemoveExtensionResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveExtension", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) GetResource(ctx context.Context, in *GetResourceRequest, opts ...grpc.CallOption) (*GetResourceResponse, error) { + out := new(GetResourceResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/GetResource", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) UpdateResource(ctx context.Context, in *UpdateResourceRequest, opts ...grpc.CallOption) (*UpdateResourceResponse, error) { + out := new(UpdateResourceResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/UpdateResource", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) ListResources(ctx context.Context, in *ListResourcesRequest, opts ...grpc.CallOption) (*ListResourcesResponse, error) { + out := new(ListResourcesResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/ListResources", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) CreateResource(ctx context.Context, in *CreateResourceRequest, opts ...grpc.CallOption) (*CreateResourceResponse, error) { + out := new(CreateResourceResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/CreateResource", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *controlClient) RemoveResource(ctx context.Context, in *RemoveResourceRequest, opts ...grpc.CallOption) (*RemoveResourceResponse, error) { + out := new(RemoveResourceResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Control/RemoveResource", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Control service + +type ControlServer interface { + GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error) + ListNodes(context.Context, *ListNodesRequest) (*ListNodesResponse, error) + UpdateNode(context.Context, *UpdateNodeRequest) (*UpdateNodeResponse, error) + RemoveNode(context.Context, *RemoveNodeRequest) (*RemoveNodeResponse, error) + GetTask(context.Context, *GetTaskRequest) (*GetTaskResponse, error) + ListTasks(context.Context, *ListTasksRequest) (*ListTasksResponse, error) + RemoveTask(context.Context, *RemoveTaskRequest) (*RemoveTaskResponse, error) + GetService(context.Context, *GetServiceRequest) (*GetServiceResponse, error) + ListServices(context.Context, *ListServicesRequest) (*ListServicesResponse, error) + CreateService(context.Context, *CreateServiceRequest) (*CreateServiceResponse, error) + UpdateService(context.Context, *UpdateServiceRequest) (*UpdateServiceResponse, error) + RemoveService(context.Context, *RemoveServiceRequest) (*RemoveServiceResponse, error) + // ListServiceStatuses returns a `ListServiceStatusesResponse` with the + // status of the requested services, formed by computing the number of + // running vs desired tasks. It is provided as a shortcut or helper method, + // which allows a client to avoid having to calculate this value by listing + // all Tasks. If any service requested does not exist, it will be returned + // but with empty status values. + ListServiceStatuses(context.Context, *ListServiceStatusesRequest) (*ListServiceStatusesResponse, error) + GetNetwork(context.Context, *GetNetworkRequest) (*GetNetworkResponse, error) + ListNetworks(context.Context, *ListNetworksRequest) (*ListNetworksResponse, error) + CreateNetwork(context.Context, *CreateNetworkRequest) (*CreateNetworkResponse, error) + RemoveNetwork(context.Context, *RemoveNetworkRequest) (*RemoveNetworkResponse, error) + GetCluster(context.Context, *GetClusterRequest) (*GetClusterResponse, error) + ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error) + UpdateCluster(context.Context, *UpdateClusterRequest) (*UpdateClusterResponse, error) + // GetSecret returns a `GetSecretResponse` with a `Secret` with the same + // id as `GetSecretRequest.SecretID` + // - Returns `NotFound` if the Secret with the given id is not found. + // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. + // - Returns an error if getting fails. + GetSecret(context.Context, *GetSecretRequest) (*GetSecretResponse, error) + // UpdateSecret returns a `UpdateSecretResponse` with a `Secret` with the same + // id as `GetSecretRequest.SecretID` + // - Returns `NotFound` if the Secret with the given id is not found. + // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. + // - Returns an error if updating fails. + UpdateSecret(context.Context, *UpdateSecretRequest) (*UpdateSecretResponse, error) + // ListSecrets returns a `ListSecretResponse` with a list of all non-internal `Secret`s being + // managed, or all secrets matching any name in `ListSecretsRequest.Names`, any + // name prefix in `ListSecretsRequest.NamePrefixes`, any id in + // `ListSecretsRequest.SecretIDs`, or any id prefix in `ListSecretsRequest.IDPrefixes`. + // - Returns an error if listing fails. + ListSecrets(context.Context, *ListSecretsRequest) (*ListSecretsResponse, error) + // CreateSecret creates and return a `CreateSecretResponse` with a `Secret` based + // on the provided `CreateSecretRequest.SecretSpec`. + // - Returns `InvalidArgument` if the `CreateSecretRequest.SecretSpec` is malformed, + // or if the secret data is too long or contains invalid characters. + // - Returns an error if the creation fails. + CreateSecret(context.Context, *CreateSecretRequest) (*CreateSecretResponse, error) + // RemoveSecret removes the secret referenced by `RemoveSecretRequest.ID`. + // - Returns `InvalidArgument` if `RemoveSecretRequest.ID` is empty. + // - Returns `NotFound` if the a secret named `RemoveSecretRequest.ID` is not found. + // - Returns an error if the deletion fails. + RemoveSecret(context.Context, *RemoveSecretRequest) (*RemoveSecretResponse, error) + // GetConfig returns a `GetConfigResponse` with a `Config` with the same + // id as `GetConfigRequest.ConfigID` + // - Returns `NotFound` if the Config with the given id is not found. + // - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty. + // - Returns an error if getting fails. + GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error) + // UpdateConfig returns a `UpdateConfigResponse` with a `Config` with the same + // id as `GetConfigRequest.ConfigID` + // - Returns `NotFound` if the Config with the given id is not found. + // - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty. + // - Returns an error if updating fails. + UpdateConfig(context.Context, *UpdateConfigRequest) (*UpdateConfigResponse, error) + // ListConfigs returns a `ListConfigResponse` with a list of `Config`s being + // managed, or all configs matching any name in `ListConfigsRequest.Names`, any + // name prefix in `ListConfigsRequest.NamePrefixes`, any id in + // `ListConfigsRequest.ConfigIDs`, or any id prefix in `ListConfigsRequest.IDPrefixes`. + // - Returns an error if listing fails. + ListConfigs(context.Context, *ListConfigsRequest) (*ListConfigsResponse, error) + // CreateConfig creates and return a `CreateConfigResponse` with a `Config` based + // on the provided `CreateConfigRequest.ConfigSpec`. + // - Returns `InvalidArgument` if the `CreateConfigRequest.ConfigSpec` is malformed, + // or if the config data is too long or contains invalid characters. + // - Returns an error if the creation fails. + CreateConfig(context.Context, *CreateConfigRequest) (*CreateConfigResponse, error) + // RemoveConfig removes the config referenced by `RemoveConfigRequest.ID`. + // - Returns `InvalidArgument` if `RemoveConfigRequest.ID` is empty. + // - Returns `NotFound` if the a config named `RemoveConfigRequest.ID` is not found. + // - Returns an error if the deletion fails. + RemoveConfig(context.Context, *RemoveConfigRequest) (*RemoveConfigResponse, error) + // GetExtension returns a `GetExtensionResponse` with a `Extension` with the same + // id as `GetExtensionRequest.ExtensionId` + // - Returns `NotFound` if the Extension with the given id is not found. + // - Returns `InvalidArgument` if the `GetExtensionRequest.ExtensionId` is empty. + // - Returns an error if the get fails. + GetExtension(context.Context, *GetExtensionRequest) (*GetExtensionResponse, error) + // CreateExtension creates an `Extension` based on the provided `CreateExtensionRequest.Extension` + // and returns a `CreateExtensionResponse`. + // - Returns `InvalidArgument` if the `CreateExtensionRequest.Extension` is malformed, + // or fails validation. + // - Returns an error if the creation fails. + CreateExtension(context.Context, *CreateExtensionRequest) (*CreateExtensionResponse, error) + // RemoveExtension removes the extension referenced by `RemoveExtensionRequest.ID`. + // - Returns `InvalidArgument` if `RemoveExtensionRequest.ExtensionId` is empty. + // - Returns `NotFound` if the an extension named `RemoveExtensionRequest.ExtensionId` is not found. + // - Returns an error if the deletion fails. + RemoveExtension(context.Context, *RemoveExtensionRequest) (*RemoveExtensionResponse, error) + // GetResource returns a `GetResourceResponse` with a `Resource` with the same + // id as `GetResourceRequest.Resource` + // - Returns `NotFound` if the Resource with the given id is not found. + // - Returns `InvalidArgument` if the `GetResourceRequest.Resource` is empty. + // - Returns an error if getting fails. + GetResource(context.Context, *GetResourceRequest) (*GetResourceResponse, error) + // UpdateResource updates the resource with the given `UpdateResourceRequest.Resource.Id` using the given `UpdateResourceRequest.Resource` and returns a `UpdateResourceResponse`. + // - Returns `NotFound` if the Resource with the given `UpdateResourceRequest.Resource.Id` is not found. + // - Returns `InvalidArgument` if the UpdateResourceRequest.Resource.Id` is empty. + // - Returns an error if updating fails. + UpdateResource(context.Context, *UpdateResourceRequest) (*UpdateResourceResponse, error) + // ListResources returns a `ListResourcesResponse` with a list of `Resource`s stored in the raft store, + // or all resources matching any name in `ListConfigsRequest.Names`, any + // name prefix in `ListResourcesRequest.NamePrefixes`, any id in + // `ListResourcesRequest.ResourceIDs`, or any id prefix in `ListResourcesRequest.IDPrefixes`, + // extension name equal to `ListResourcesRequest.Extension`. + // - Returns an error if listing fails. + ListResources(context.Context, *ListResourcesRequest) (*ListResourcesResponse, error) + // CreateResource returns a `CreateResourceResponse` after creating a `Resource` based + // on the provided `CreateResourceRequest.Resource`. + // - Returns `InvalidArgument` if the `CreateResourceRequest.Resource` is malformed, + // or if the config data is too long or contains invalid characters. + // - Returns an error if the creation fails. + CreateResource(context.Context, *CreateResourceRequest) (*CreateResourceResponse, error) + // RemoveResource removes the `Resource` referenced by `RemoveResourceRequest.ResourceID`. + // - Returns `InvalidArgument` if `RemoveResourceRequest.ResourceID` is empty. + // - Returns `NotFound` if the a resource named `RemoveResourceRequest.ResourceID` is not found. + // - Returns an error if the deletion fails. + RemoveResource(context.Context, *RemoveResourceRequest) (*RemoveResourceResponse, error) +} + +func RegisterControlServer(s *grpc.Server, srv ControlServer) { + s.RegisterService(&_Control_serviceDesc, srv) +} + +func _Control_GetNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetNode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetNode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetNode(ctx, req.(*GetNodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListNodes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNodesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListNodes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListNodes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListNodes(ctx, req.(*ListNodesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_UpdateNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateNodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).UpdateNode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/UpdateNode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).UpdateNode(ctx, req.(*UpdateNodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveNode_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveNodeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveNode(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveNode", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveNode(ctx, req.(*RemoveNodeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetTask(ctx, req.(*GetTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListTasks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTasksRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListTasks(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListTasks", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListTasks(ctx, req.(*ListTasksRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveTask_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveTaskRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveTask(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveTask", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveTask(ctx, req.(*RemoveTaskRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetService(ctx, req.(*GetServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListServices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListServicesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListServices(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListServices", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListServices(ctx, req.(*ListServicesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_CreateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).CreateService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/CreateService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).CreateService(ctx, req.(*CreateServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_UpdateService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).UpdateService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/UpdateService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).UpdateService(ctx, req.(*UpdateServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveService_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveService(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveService", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveService(ctx, req.(*RemoveServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListServiceStatuses_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListServiceStatusesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListServiceStatuses(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListServiceStatuses", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListServiceStatuses(ctx, req.(*ListServiceStatusesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetNetwork(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetNetwork", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetNetwork(ctx, req.(*GetNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListNetworks_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListNetworksRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListNetworks(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListNetworks", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListNetworks(ctx, req.(*ListNetworksRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_CreateNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).CreateNetwork(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/CreateNetwork", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).CreateNetwork(ctx, req.(*CreateNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveNetwork(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveNetwork", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveNetwork(ctx, req.(*RemoveNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetCluster(ctx, req.(*GetClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListClusters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClustersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListClusters(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListClusters", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListClusters(ctx, req.(*ListClustersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_UpdateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).UpdateCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/UpdateCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).UpdateCluster(ctx, req.(*UpdateClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetSecret_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSecretRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetSecret(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetSecret", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetSecret(ctx, req.(*GetSecretRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_UpdateSecret_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateSecretRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).UpdateSecret(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/UpdateSecret", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).UpdateSecret(ctx, req.(*UpdateSecretRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListSecrets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSecretsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListSecrets(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListSecrets", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListSecrets(ctx, req.(*ListSecretsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_CreateSecret_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSecretRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).CreateSecret(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/CreateSecret", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).CreateSecret(ctx, req.(*CreateSecretRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveSecret_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveSecretRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveSecret(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveSecret", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveSecret(ctx, req.(*RemoveSecretRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetConfig(ctx, req.(*GetConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_UpdateConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).UpdateConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/UpdateConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).UpdateConfig(ctx, req.(*UpdateConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListConfigsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListConfigs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListConfigs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListConfigs(ctx, req.(*ListConfigsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_CreateConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).CreateConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/CreateConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).CreateConfig(ctx, req.(*CreateConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveConfig(ctx, req.(*RemoveConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetExtension_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetExtensionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetExtension(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetExtension", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetExtension(ctx, req.(*GetExtensionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_CreateExtension_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateExtensionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).CreateExtension(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/CreateExtension", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).CreateExtension(ctx, req.(*CreateExtensionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveExtension_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveExtensionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveExtension(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveExtension", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveExtension(ctx, req.(*RemoveExtensionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_GetResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetResourceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).GetResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/GetResource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).GetResource(ctx, req.(*GetResourceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_UpdateResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateResourceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).UpdateResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/UpdateResource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).UpdateResource(ctx, req.(*UpdateResourceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_ListResources_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListResourcesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).ListResources(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/ListResources", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).ListResources(ctx, req.(*ListResourcesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_CreateResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateResourceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).CreateResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/CreateResource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).CreateResource(ctx, req.(*CreateResourceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Control_RemoveResource_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RemoveResourceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ControlServer).RemoveResource(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Control/RemoveResource", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ControlServer).RemoveResource(ctx, req.(*RemoveResourceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Control_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.Control", + HandlerType: (*ControlServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetNode", + Handler: _Control_GetNode_Handler, + }, + { + MethodName: "ListNodes", + Handler: _Control_ListNodes_Handler, + }, + { + MethodName: "UpdateNode", + Handler: _Control_UpdateNode_Handler, + }, + { + MethodName: "RemoveNode", + Handler: _Control_RemoveNode_Handler, + }, + { + MethodName: "GetTask", + Handler: _Control_GetTask_Handler, + }, + { + MethodName: "ListTasks", + Handler: _Control_ListTasks_Handler, + }, + { + MethodName: "RemoveTask", + Handler: _Control_RemoveTask_Handler, + }, + { + MethodName: "GetService", + Handler: _Control_GetService_Handler, + }, + { + MethodName: "ListServices", + Handler: _Control_ListServices_Handler, + }, + { + MethodName: "CreateService", + Handler: _Control_CreateService_Handler, + }, + { + MethodName: "UpdateService", + Handler: _Control_UpdateService_Handler, + }, + { + MethodName: "RemoveService", + Handler: _Control_RemoveService_Handler, + }, + { + MethodName: "ListServiceStatuses", + Handler: _Control_ListServiceStatuses_Handler, + }, + { + MethodName: "GetNetwork", + Handler: _Control_GetNetwork_Handler, + }, + { + MethodName: "ListNetworks", + Handler: _Control_ListNetworks_Handler, + }, + { + MethodName: "CreateNetwork", + Handler: _Control_CreateNetwork_Handler, + }, + { + MethodName: "RemoveNetwork", + Handler: _Control_RemoveNetwork_Handler, + }, + { + MethodName: "GetCluster", + Handler: _Control_GetCluster_Handler, + }, + { + MethodName: "ListClusters", + Handler: _Control_ListClusters_Handler, + }, + { + MethodName: "UpdateCluster", + Handler: _Control_UpdateCluster_Handler, + }, + { + MethodName: "GetSecret", + Handler: _Control_GetSecret_Handler, + }, + { + MethodName: "UpdateSecret", + Handler: _Control_UpdateSecret_Handler, + }, + { + MethodName: "ListSecrets", + Handler: _Control_ListSecrets_Handler, + }, + { + MethodName: "CreateSecret", + Handler: _Control_CreateSecret_Handler, + }, + { + MethodName: "RemoveSecret", + Handler: _Control_RemoveSecret_Handler, + }, + { + MethodName: "GetConfig", + Handler: _Control_GetConfig_Handler, + }, + { + MethodName: "UpdateConfig", + Handler: _Control_UpdateConfig_Handler, + }, + { + MethodName: "ListConfigs", + Handler: _Control_ListConfigs_Handler, + }, + { + MethodName: "CreateConfig", + Handler: _Control_CreateConfig_Handler, + }, + { + MethodName: "RemoveConfig", + Handler: _Control_RemoveConfig_Handler, + }, + { + MethodName: "GetExtension", + Handler: _Control_GetExtension_Handler, + }, + { + MethodName: "CreateExtension", + Handler: _Control_CreateExtension_Handler, + }, + { + MethodName: "RemoveExtension", + Handler: _Control_RemoveExtension_Handler, + }, + { + MethodName: "GetResource", + Handler: _Control_GetResource_Handler, + }, + { + MethodName: "UpdateResource", + Handler: _Control_UpdateResource_Handler, + }, + { + MethodName: "ListResources", + Handler: _Control_ListResources_Handler, + }, + { + MethodName: "CreateResource", + Handler: _Control_CreateResource_Handler, + }, + { + MethodName: "RemoveResource", + Handler: _Control_RemoveResource_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/api/control.proto", +} + +func (m *GetNodeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetNodeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + return i, nil +} + +func (m *GetNodeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetNodeResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Node != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Node.Size())) + n1, err := m.Node.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + return i, nil +} + +func (m *ListNodesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListNodesRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n2, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *ListNodesRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListNodesRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Memberships) > 0 { + for _, num := range m.Memberships { + dAtA[i] = 0x20 + i++ + i = encodeVarintControl(dAtA, i, uint64(num)) + } + } + if len(m.Roles) > 0 { + for _, num := range m.Roles { + dAtA[i] = 0x28 + i++ + i = encodeVarintControl(dAtA, i, uint64(num)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x32 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.NodeLabels) > 0 { + for k, _ := range m.NodeLabels { + dAtA[i] = 0x3a + i++ + v := m.NodeLabels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *ListNodesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListNodesResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Nodes) > 0 { + for _, msg := range m.Nodes { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *UpdateNodeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateNodeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + if m.NodeVersion != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.NodeVersion.Size())) + n3, err := m.NodeVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.Spec != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n4, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} + +func (m *UpdateNodeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateNodeResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Node != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Node.Size())) + n5, err := m.Node.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} + +func (m *RemoveNodeRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveNodeRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + if m.Force { + dAtA[i] = 0x10 + i++ + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *RemoveNodeResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveNodeResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *GetTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetTaskRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TaskID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.TaskID))) + i += copy(dAtA[i:], m.TaskID) + } + return i, nil +} + +func (m *GetTaskResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetTaskResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Task != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Task.Size())) + n6, err := m.Task.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} + +func (m *RemoveTaskRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveTaskRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TaskID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.TaskID))) + i += copy(dAtA[i:], m.TaskID) + } + return i, nil +} + +func (m *RemoveTaskResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveTaskResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ListTasksRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListTasksRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n7, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func (m *ListTasksRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListTasksRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.ServiceIDs) > 0 { + for _, s := range m.ServiceIDs { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.NodeIDs) > 0 { + for _, s := range m.NodeIDs { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.DesiredStates) > 0 { + for _, num := range m.DesiredStates { + dAtA[i] = 0x30 + i++ + i = encodeVarintControl(dAtA, i, uint64(num)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.UpToDate { + dAtA[i] = 0x40 + i++ + if m.UpToDate { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Runtimes) > 0 { + for _, s := range m.Runtimes { + dAtA[i] = 0x4a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListTasksResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListTasksResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Tasks) > 0 { + for _, msg := range m.Tasks { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CreateServiceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateServiceRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Spec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n8, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} + +func (m *CreateServiceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateServiceResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Service != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Service.Size())) + n9, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} + +func (m *GetServiceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetServiceRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + if m.InsertDefaults { + dAtA[i] = 0x10 + i++ + if m.InsertDefaults { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *GetServiceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetServiceResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Service != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Service.Size())) + n10, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + return i, nil +} + +func (m *UpdateServiceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateServiceRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + if m.ServiceVersion != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.ServiceVersion.Size())) + n11, err := m.ServiceVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.Spec != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n12, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.Rollback != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Rollback)) + } + return i, nil +} + +func (m *UpdateServiceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateServiceResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Service != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Service.Size())) + n13, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + return i, nil +} + +func (m *RemoveServiceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveServiceRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + return i, nil +} + +func (m *RemoveServiceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveServiceResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ListServicesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListServicesRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n14, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + return i, nil +} + +func (m *ListServicesRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListServicesRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Runtimes) > 0 { + for _, s := range m.Runtimes { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListServicesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListServicesResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Services) > 0 { + for _, msg := range m.Services { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ListServiceStatusesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListServiceStatusesRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Services) > 0 { + for _, s := range m.Services { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListServiceStatusesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListServiceStatusesResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Statuses) > 0 { + for _, msg := range m.Statuses { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ListServiceStatusesResponse_ServiceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListServiceStatusesResponse_ServiceStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + if m.DesiredTasks != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.DesiredTasks)) + } + if m.RunningTasks != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.RunningTasks)) + } + return i, nil +} + +func (m *CreateNetworkRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateNetworkRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Spec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n15, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + return i, nil +} + +func (m *CreateNetworkResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateNetworkResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Network != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Network.Size())) + n16, err := m.Network.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + return i, nil +} + +func (m *GetNetworkRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetNetworkRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.NetworkID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.NetworkID))) + i += copy(dAtA[i:], m.NetworkID) + } + return i, nil +} + +func (m *GetNetworkResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetNetworkResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Network != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Network.Size())) + n17, err := m.Network.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + return i, nil +} + +func (m *RemoveNetworkRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveNetworkRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.NetworkID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.NetworkID))) + i += copy(dAtA[i:], m.NetworkID) + } + return i, nil +} + +func (m *RemoveNetworkResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveNetworkResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ListNetworksRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListNetworksRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n18, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + return i, nil +} + +func (m *ListNetworksRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListNetworksRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListNetworksResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListNetworksResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Networks) > 0 { + for _, msg := range m.Networks { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *GetClusterRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetClusterRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ClusterID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ClusterID))) + i += copy(dAtA[i:], m.ClusterID) + } + return i, nil +} + +func (m *GetClusterResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetClusterResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Cluster != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Cluster.Size())) + n19, err := m.Cluster.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + return i, nil +} + +func (m *ListClustersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListClustersRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n20, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + return i, nil +} + +func (m *ListClustersRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListClustersRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListClustersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListClustersResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Clusters) > 0 { + for _, msg := range m.Clusters { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *KeyRotation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyRotation) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.WorkerJoinToken { + dAtA[i] = 0x8 + i++ + if m.WorkerJoinToken { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ManagerJoinToken { + dAtA[i] = 0x10 + i++ + if m.ManagerJoinToken { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ManagerUnlockKey { + dAtA[i] = 0x18 + i++ + if m.ManagerUnlockKey { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *UpdateClusterRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateClusterRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ClusterID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ClusterID))) + i += copy(dAtA[i:], m.ClusterID) + } + if m.ClusterVersion != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.ClusterVersion.Size())) + n21, err := m.ClusterVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if m.Spec != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n22, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + dAtA[i] = 0x22 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Rotation.Size())) + n23, err := m.Rotation.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + return i, nil +} + +func (m *UpdateClusterResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateClusterResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Cluster != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Cluster.Size())) + n24, err := m.Cluster.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + return i, nil +} + +func (m *GetSecretRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSecretRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SecretID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.SecretID))) + i += copy(dAtA[i:], m.SecretID) + } + return i, nil +} + +func (m *GetSecretResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetSecretResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Secret != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Secret.Size())) + n25, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + return i, nil +} + +func (m *UpdateSecretRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateSecretRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SecretID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.SecretID))) + i += copy(dAtA[i:], m.SecretID) + } + if m.SecretVersion != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.SecretVersion.Size())) + n26, err := m.SecretVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if m.Spec != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n27, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + return i, nil +} + +func (m *UpdateSecretResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateSecretResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Secret != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Secret.Size())) + n28, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + return i, nil +} + +func (m *ListSecretsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListSecretsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n29, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + } + return i, nil +} + +func (m *ListSecretsRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListSecretsRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListSecretsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListSecretsResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Secrets) > 0 { + for _, msg := range m.Secrets { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CreateSecretRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateSecretRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Spec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n30, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + return i, nil +} + +func (m *CreateSecretResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateSecretResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Secret != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Secret.Size())) + n31, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + return i, nil +} + +func (m *RemoveSecretRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveSecretRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SecretID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.SecretID))) + i += copy(dAtA[i:], m.SecretID) + } + return i, nil +} + +func (m *RemoveSecretResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveSecretResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *GetConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetConfigRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ConfigID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ConfigID))) + i += copy(dAtA[i:], m.ConfigID) + } + return i, nil +} + +func (m *GetConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetConfigResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Config != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Config.Size())) + n32, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + return i, nil +} + +func (m *UpdateConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateConfigRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ConfigID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ConfigID))) + i += copy(dAtA[i:], m.ConfigID) + } + if m.ConfigVersion != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.ConfigVersion.Size())) + n33, err := m.ConfigVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + } + if m.Spec != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n34, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + } + return i, nil +} + +func (m *UpdateConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateConfigResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Config != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Config.Size())) + n35, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + } + return i, nil +} + +func (m *ListConfigsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListConfigsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n36, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + } + return i, nil +} + +func (m *ListConfigsRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListConfigsRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ListConfigsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListConfigsResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Configs) > 0 { + for _, msg := range m.Configs { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *CreateConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateConfigRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Spec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Spec.Size())) + n37, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + } + return i, nil +} + +func (m *CreateConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateConfigResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Config != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Config.Size())) + n38, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + } + return i, nil +} + +func (m *RemoveConfigRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveConfigRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ConfigID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ConfigID))) + i += copy(dAtA[i:], m.ConfigID) + } + return i, nil +} + +func (m *RemoveConfigResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveConfigResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *CreateExtensionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateExtensionRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Annotations != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Annotations.Size())) + n39, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + } + if len(m.Description) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + } + return i, nil +} + +func (m *CreateExtensionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateExtensionResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Extension != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Extension.Size())) + n40, err := m.Extension.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + } + return i, nil +} + +func (m *RemoveExtensionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveExtensionRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ExtensionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ExtensionID))) + i += copy(dAtA[i:], m.ExtensionID) + } + return i, nil +} + +func (m *RemoveExtensionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveExtensionResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *GetExtensionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetExtensionRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ExtensionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ExtensionID))) + i += copy(dAtA[i:], m.ExtensionID) + } + return i, nil +} + +func (m *GetExtensionResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetExtensionResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Extension != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Extension.Size())) + n41, err := m.Extension.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + } + return i, nil +} + +func (m *CreateResourceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateResourceRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Annotations != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Annotations.Size())) + n42, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 + } + if len(m.Kind) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if m.Payload != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Payload.Size())) + n43, err := m.Payload.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 + } + return i, nil +} + +func (m *CreateResourceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateResourceResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Resource != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Resource.Size())) + n44, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 + } + return i, nil +} + +func (m *RemoveResourceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveResourceRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ResourceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ResourceID))) + i += copy(dAtA[i:], m.ResourceID) + } + return i, nil +} + +func (m *RemoveResourceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveResourceResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *UpdateResourceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateResourceRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ResourceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ResourceID))) + i += copy(dAtA[i:], m.ResourceID) + } + if m.ResourceVersion != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.ResourceVersion.Size())) + n45, err := m.ResourceVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n45 + } + if m.Annotations != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Annotations.Size())) + n46, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n46 + } + if m.Payload != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Payload.Size())) + n47, err := m.Payload.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n47 + } + return i, nil +} + +func (m *UpdateResourceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateResourceResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Resource != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Resource.Size())) + n48, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 + } + return i, nil +} + +func (m *GetResourceRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetResourceRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ResourceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.ResourceID))) + i += copy(dAtA[i:], m.ResourceID) + } + return i, nil +} + +func (m *GetResourceResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetResourceResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Resource != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Resource.Size())) + n49, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n49 + } + return i, nil +} + +func (m *ListResourcesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListResourcesRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Filters != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(m.Filters.Size())) + n50, err := m.Filters.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n50 + } + return i, nil +} + +func (m *ListResourcesRequest_Filters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListResourcesRequest_Filters) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x1a + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + i = encodeVarintControl(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintControl(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Kind) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintControl(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + return i, nil +} + +func (m *ListResourcesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListResourcesResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Resources) > 0 { + for _, msg := range m.Resources { + dAtA[i] = 0xa + i++ + i = encodeVarintControl(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeVarintControl(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyControlServer struct { + local ControlServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyControlServer(local ControlServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) ControlServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyControlServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyControlServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyControlServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyControlServer) GetNode(ctx context.Context, r *GetNodeRequest) (*GetNodeResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetNode(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetNode(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetNode(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetNode(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListNodes(ctx context.Context, r *ListNodesRequest) (*ListNodesResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListNodes(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListNodes(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListNodes(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListNodes(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) UpdateNode(ctx context.Context, r *UpdateNodeRequest) (*UpdateNodeResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateNode(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).UpdateNode(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateNode(ctx, r) + } + return nil, err + } + return NewControlClient(conn).UpdateNode(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveNode(ctx context.Context, r *RemoveNodeRequest) (*RemoveNodeResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveNode(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveNode(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveNode(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveNode(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetTask(ctx context.Context, r *GetTaskRequest) (*GetTaskResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetTask(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetTask(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetTask(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetTask(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListTasks(ctx context.Context, r *ListTasksRequest) (*ListTasksResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListTasks(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListTasks(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListTasks(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListTasks(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveTask(ctx context.Context, r *RemoveTaskRequest) (*RemoveTaskResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveTask(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveTask(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveTask(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveTask(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetService(ctx context.Context, r *GetServiceRequest) (*GetServiceResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetService(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetService(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetService(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetService(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListServices(ctx context.Context, r *ListServicesRequest) (*ListServicesResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListServices(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListServices(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListServices(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListServices(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) CreateService(ctx context.Context, r *CreateServiceRequest) (*CreateServiceResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.CreateService(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).CreateService(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.CreateService(ctx, r) + } + return nil, err + } + return NewControlClient(conn).CreateService(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) UpdateService(ctx context.Context, r *UpdateServiceRequest) (*UpdateServiceResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateService(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).UpdateService(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateService(ctx, r) + } + return nil, err + } + return NewControlClient(conn).UpdateService(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveService(ctx context.Context, r *RemoveServiceRequest) (*RemoveServiceResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveService(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveService(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveService(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveService(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListServiceStatuses(ctx context.Context, r *ListServiceStatusesRequest) (*ListServiceStatusesResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListServiceStatuses(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListServiceStatuses(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListServiceStatuses(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListServiceStatuses(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetNetwork(ctx context.Context, r *GetNetworkRequest) (*GetNetworkResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetNetwork(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetNetwork(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetNetwork(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetNetwork(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListNetworks(ctx context.Context, r *ListNetworksRequest) (*ListNetworksResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListNetworks(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListNetworks(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListNetworks(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListNetworks(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) CreateNetwork(ctx context.Context, r *CreateNetworkRequest) (*CreateNetworkResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.CreateNetwork(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).CreateNetwork(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.CreateNetwork(ctx, r) + } + return nil, err + } + return NewControlClient(conn).CreateNetwork(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveNetwork(ctx context.Context, r *RemoveNetworkRequest) (*RemoveNetworkResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveNetwork(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveNetwork(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveNetwork(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveNetwork(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetCluster(ctx context.Context, r *GetClusterRequest) (*GetClusterResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetCluster(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetCluster(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetCluster(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetCluster(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListClusters(ctx context.Context, r *ListClustersRequest) (*ListClustersResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListClusters(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListClusters(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListClusters(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListClusters(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) UpdateCluster(ctx context.Context, r *UpdateClusterRequest) (*UpdateClusterResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateCluster(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).UpdateCluster(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateCluster(ctx, r) + } + return nil, err + } + return NewControlClient(conn).UpdateCluster(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetSecret(ctx context.Context, r *GetSecretRequest) (*GetSecretResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetSecret(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetSecret(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetSecret(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetSecret(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) UpdateSecret(ctx context.Context, r *UpdateSecretRequest) (*UpdateSecretResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateSecret(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).UpdateSecret(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateSecret(ctx, r) + } + return nil, err + } + return NewControlClient(conn).UpdateSecret(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListSecrets(ctx context.Context, r *ListSecretsRequest) (*ListSecretsResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListSecrets(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListSecrets(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListSecrets(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListSecrets(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) CreateSecret(ctx context.Context, r *CreateSecretRequest) (*CreateSecretResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.CreateSecret(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).CreateSecret(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.CreateSecret(ctx, r) + } + return nil, err + } + return NewControlClient(conn).CreateSecret(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveSecret(ctx context.Context, r *RemoveSecretRequest) (*RemoveSecretResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveSecret(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveSecret(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveSecret(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveSecret(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetConfig(ctx context.Context, r *GetConfigRequest) (*GetConfigResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetConfig(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetConfig(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetConfig(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetConfig(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) UpdateConfig(ctx context.Context, r *UpdateConfigRequest) (*UpdateConfigResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateConfig(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).UpdateConfig(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateConfig(ctx, r) + } + return nil, err + } + return NewControlClient(conn).UpdateConfig(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListConfigs(ctx context.Context, r *ListConfigsRequest) (*ListConfigsResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListConfigs(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListConfigs(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListConfigs(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListConfigs(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) CreateConfig(ctx context.Context, r *CreateConfigRequest) (*CreateConfigResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.CreateConfig(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).CreateConfig(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.CreateConfig(ctx, r) + } + return nil, err + } + return NewControlClient(conn).CreateConfig(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveConfig(ctx context.Context, r *RemoveConfigRequest) (*RemoveConfigResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveConfig(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveConfig(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveConfig(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveConfig(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetExtension(ctx context.Context, r *GetExtensionRequest) (*GetExtensionResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetExtension(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetExtension(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetExtension(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetExtension(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) CreateExtension(ctx context.Context, r *CreateExtensionRequest) (*CreateExtensionResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.CreateExtension(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).CreateExtension(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.CreateExtension(ctx, r) + } + return nil, err + } + return NewControlClient(conn).CreateExtension(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveExtension(ctx context.Context, r *RemoveExtensionRequest) (*RemoveExtensionResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveExtension(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveExtension(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveExtension(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveExtension(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) GetResource(ctx context.Context, r *GetResourceRequest) (*GetResourceResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.GetResource(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).GetResource(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.GetResource(ctx, r) + } + return nil, err + } + return NewControlClient(conn).GetResource(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) UpdateResource(ctx context.Context, r *UpdateResourceRequest) (*UpdateResourceResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateResource(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).UpdateResource(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateResource(ctx, r) + } + return nil, err + } + return NewControlClient(conn).UpdateResource(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) ListResources(ctx context.Context, r *ListResourcesRequest) (*ListResourcesResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ListResources(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).ListResources(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ListResources(ctx, r) + } + return nil, err + } + return NewControlClient(conn).ListResources(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) CreateResource(ctx context.Context, r *CreateResourceRequest) (*CreateResourceResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.CreateResource(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).CreateResource(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.CreateResource(ctx, r) + } + return nil, err + } + return NewControlClient(conn).CreateResource(modCtx, r) + } + return resp, err +} + +func (p *raftProxyControlServer) RemoveResource(ctx context.Context, r *RemoveResourceRequest) (*RemoveResourceResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.RemoveResource(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewControlClient(conn).RemoveResource(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.RemoveResource(ctx, r) + } + return nil, err + } + return NewControlClient(conn).RemoveResource(modCtx, r) + } + return resp, err +} + +func (m *GetNodeRequest) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetNodeResponse) Size() (n int) { + var l int + _ = l + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListNodesRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListNodesRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.Memberships) > 0 { + for _, e := range m.Memberships { + n += 1 + sovControl(uint64(e)) + } + } + if len(m.Roles) > 0 { + for _, e := range m.Roles { + n += 1 + sovControl(uint64(e)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.NodeLabels) > 0 { + for k, v := range m.NodeLabels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ListNodesResponse) Size() (n int) { + var l int + _ = l + if len(m.Nodes) > 0 { + for _, e := range m.Nodes { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *UpdateNodeRequest) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.NodeVersion != nil { + l = m.NodeVersion.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateNodeResponse) Size() (n int) { + var l int + _ = l + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveNodeRequest) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Force { + n += 2 + } + return n +} + +func (m *RemoveNodeResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *GetTaskRequest) Size() (n int) { + var l int + _ = l + l = len(m.TaskID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetTaskResponse) Size() (n int) { + var l int + _ = l + if m.Task != nil { + l = m.Task.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveTaskRequest) Size() (n int) { + var l int + _ = l + l = len(m.TaskID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveTaskResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ListTasksRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListTasksRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.ServiceIDs) > 0 { + for _, s := range m.ServiceIDs { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.NodeIDs) > 0 { + for _, s := range m.NodeIDs { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.DesiredStates) > 0 { + for _, e := range m.DesiredStates { + n += 1 + sovControl(uint64(e)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if m.UpToDate { + n += 2 + } + if len(m.Runtimes) > 0 { + for _, s := range m.Runtimes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListTasksResponse) Size() (n int) { + var l int + _ = l + if len(m.Tasks) > 0 { + for _, e := range m.Tasks { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *CreateServiceRequest) Size() (n int) { + var l int + _ = l + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *CreateServiceResponse) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetServiceRequest) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.InsertDefaults { + n += 2 + } + return n +} + +func (m *GetServiceResponse) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateServiceRequest) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.ServiceVersion != nil { + l = m.ServiceVersion.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Rollback != 0 { + n += 1 + sovControl(uint64(m.Rollback)) + } + return n +} + +func (m *UpdateServiceResponse) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveServiceRequest) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveServiceResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ListServicesRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListServicesRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Runtimes) > 0 { + for _, s := range m.Runtimes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListServicesResponse) Size() (n int) { + var l int + _ = l + if len(m.Services) > 0 { + for _, e := range m.Services { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListServiceStatusesRequest) Size() (n int) { + var l int + _ = l + if len(m.Services) > 0 { + for _, s := range m.Services { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListServiceStatusesResponse) Size() (n int) { + var l int + _ = l + if len(m.Statuses) > 0 { + for _, e := range m.Statuses { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListServiceStatusesResponse_ServiceStatus) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.DesiredTasks != 0 { + n += 1 + sovControl(uint64(m.DesiredTasks)) + } + if m.RunningTasks != 0 { + n += 1 + sovControl(uint64(m.RunningTasks)) + } + return n +} + +func (m *CreateNetworkRequest) Size() (n int) { + var l int + _ = l + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *CreateNetworkResponse) Size() (n int) { + var l int + _ = l + if m.Network != nil { + l = m.Network.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetNetworkRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.NetworkID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetNetworkResponse) Size() (n int) { + var l int + _ = l + if m.Network != nil { + l = m.Network.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveNetworkRequest) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.NetworkID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveNetworkResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ListNetworksRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListNetworksRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListNetworksResponse) Size() (n int) { + var l int + _ = l + if len(m.Networks) > 0 { + for _, e := range m.Networks { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *GetClusterRequest) Size() (n int) { + var l int + _ = l + l = len(m.ClusterID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetClusterResponse) Size() (n int) { + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListClustersRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListClustersRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListClustersResponse) Size() (n int) { + var l int + _ = l + if len(m.Clusters) > 0 { + for _, e := range m.Clusters { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *KeyRotation) Size() (n int) { + var l int + _ = l + if m.WorkerJoinToken { + n += 2 + } + if m.ManagerJoinToken { + n += 2 + } + if m.ManagerUnlockKey { + n += 2 + } + return n +} + +func (m *UpdateClusterRequest) Size() (n int) { + var l int + _ = l + l = len(m.ClusterID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.ClusterVersion != nil { + l = m.ClusterVersion.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + l = m.Rotation.Size() + n += 1 + l + sovControl(uint64(l)) + return n +} + +func (m *UpdateClusterResponse) Size() (n int) { + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetSecretRequest) Size() (n int) { + var l int + _ = l + l = len(m.SecretID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetSecretResponse) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateSecretRequest) Size() (n int) { + var l int + _ = l + l = len(m.SecretID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.SecretVersion != nil { + l = m.SecretVersion.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateSecretResponse) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListSecretsRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListSecretsRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListSecretsResponse) Size() (n int) { + var l int + _ = l + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *CreateSecretRequest) Size() (n int) { + var l int + _ = l + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *CreateSecretResponse) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveSecretRequest) Size() (n int) { + var l int + _ = l + l = len(m.SecretID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveSecretResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *GetConfigRequest) Size() (n int) { + var l int + _ = l + l = len(m.ConfigID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetConfigResponse) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateConfigRequest) Size() (n int) { + var l int + _ = l + l = len(m.ConfigID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.ConfigVersion != nil { + l = m.ConfigVersion.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateConfigResponse) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListConfigsRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListConfigsRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *ListConfigsResponse) Size() (n int) { + var l int + _ = l + if len(m.Configs) > 0 { + for _, e := range m.Configs { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func (m *CreateConfigRequest) Size() (n int) { + var l int + _ = l + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *CreateConfigResponse) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveConfigRequest) Size() (n int) { + var l int + _ = l + l = len(m.ConfigID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveConfigResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *CreateExtensionRequest) Size() (n int) { + var l int + _ = l + if m.Annotations != nil { + l = m.Annotations.Size() + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *CreateExtensionResponse) Size() (n int) { + var l int + _ = l + if m.Extension != nil { + l = m.Extension.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveExtensionRequest) Size() (n int) { + var l int + _ = l + l = len(m.ExtensionID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveExtensionResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *GetExtensionRequest) Size() (n int) { + var l int + _ = l + l = len(m.ExtensionID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetExtensionResponse) Size() (n int) { + var l int + _ = l + if m.Extension != nil { + l = m.Extension.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *CreateResourceRequest) Size() (n int) { + var l int + _ = l + if m.Annotations != nil { + l = m.Annotations.Size() + n += 1 + l + sovControl(uint64(l)) + } + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Payload != nil { + l = m.Payload.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *CreateResourceResponse) Size() (n int) { + var l int + _ = l + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveResourceRequest) Size() (n int) { + var l int + _ = l + l = len(m.ResourceID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *RemoveResourceResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *UpdateResourceRequest) Size() (n int) { + var l int + _ = l + l = len(m.ResourceID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.ResourceVersion != nil { + l = m.ResourceVersion.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Annotations != nil { + l = m.Annotations.Size() + n += 1 + l + sovControl(uint64(l)) + } + if m.Payload != nil { + l = m.Payload.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *UpdateResourceResponse) Size() (n int) { + var l int + _ = l + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetResourceRequest) Size() (n int) { + var l int + _ = l + l = len(m.ResourceID) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *GetResourceResponse) Size() (n int) { + var l int + _ = l + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListResourcesRequest) Size() (n int) { + var l int + _ = l + if m.Filters != nil { + l = m.Filters.Size() + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListResourcesRequest_Filters) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.IDPrefixes) > 0 { + for _, s := range m.IDPrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovControl(uint64(len(k))) + 1 + len(v) + sovControl(uint64(len(v))) + n += mapEntrySize + 1 + sovControl(uint64(mapEntrySize)) + } + } + if len(m.NamePrefixes) > 0 { + for _, s := range m.NamePrefixes { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + return n +} + +func (m *ListResourcesResponse) Size() (n int) { + var l int + _ = l + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + return n +} + +func sovControl(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozControl(x uint64) (n int) { + return sovControl(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *GetNodeRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetNodeRequest{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `}`, + }, "") + return s +} +func (this *GetNodeResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetNodeResponse{`, + `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "Node", "Node", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListNodesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListNodesRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListNodesRequest_Filters", "ListNodesRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListNodesRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForNodeLabels := make([]string, 0, len(this.NodeLabels)) + for k, _ := range this.NodeLabels { + keysForNodeLabels = append(keysForNodeLabels, k) + } + sortkeys.Strings(keysForNodeLabels) + mapStringForNodeLabels := "map[string]string{" + for _, k := range keysForNodeLabels { + mapStringForNodeLabels += fmt.Sprintf("%v: %v,", k, this.NodeLabels[k]) + } + mapStringForNodeLabels += "}" + s := strings.Join([]string{`&ListNodesRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Memberships:` + fmt.Sprintf("%v", this.Memberships) + `,`, + `Roles:` + fmt.Sprintf("%v", this.Roles) + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `NodeLabels:` + mapStringForNodeLabels + `,`, + `}`, + }, "") + return s +} +func (this *ListNodesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListNodesResponse{`, + `Nodes:` + strings.Replace(fmt.Sprintf("%v", this.Nodes), "Node", "Node", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateNodeRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateNodeRequest{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `NodeVersion:` + strings.Replace(fmt.Sprintf("%v", this.NodeVersion), "Version", "Version", 1) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "NodeSpec", "NodeSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateNodeResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateNodeResponse{`, + `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "Node", "Node", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveNodeRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveNodeRequest{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `Force:` + fmt.Sprintf("%v", this.Force) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveNodeResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveNodeResponse{`, + `}`, + }, "") + return s +} +func (this *GetTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetTaskRequest{`, + `TaskID:` + fmt.Sprintf("%v", this.TaskID) + `,`, + `}`, + }, "") + return s +} +func (this *GetTaskResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetTaskResponse{`, + `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveTaskRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveTaskRequest{`, + `TaskID:` + fmt.Sprintf("%v", this.TaskID) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveTaskResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveTaskResponse{`, + `}`, + }, "") + return s +} +func (this *ListTasksRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListTasksRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListTasksRequest_Filters", "ListTasksRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListTasksRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListTasksRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `ServiceIDs:` + fmt.Sprintf("%v", this.ServiceIDs) + `,`, + `NodeIDs:` + fmt.Sprintf("%v", this.NodeIDs) + `,`, + `DesiredStates:` + fmt.Sprintf("%v", this.DesiredStates) + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `UpToDate:` + fmt.Sprintf("%v", this.UpToDate) + `,`, + `Runtimes:` + fmt.Sprintf("%v", this.Runtimes) + `,`, + `}`, + }, "") + return s +} +func (this *ListTasksResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListTasksResponse{`, + `Tasks:` + strings.Replace(fmt.Sprintf("%v", this.Tasks), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateServiceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateServiceRequest{`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "ServiceSpec", "ServiceSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateServiceResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateServiceResponse{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "Service", "Service", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetServiceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetServiceRequest{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `InsertDefaults:` + fmt.Sprintf("%v", this.InsertDefaults) + `,`, + `}`, + }, "") + return s +} +func (this *GetServiceResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetServiceResponse{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "Service", "Service", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateServiceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateServiceRequest{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `ServiceVersion:` + strings.Replace(fmt.Sprintf("%v", this.ServiceVersion), "Version", "Version", 1) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "ServiceSpec", "ServiceSpec", 1) + `,`, + `Rollback:` + fmt.Sprintf("%v", this.Rollback) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateServiceResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateServiceResponse{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "Service", "Service", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveServiceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveServiceRequest{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveServiceResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveServiceResponse{`, + `}`, + }, "") + return s +} +func (this *ListServicesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListServicesRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListServicesRequest_Filters", "ListServicesRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListServicesRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListServicesRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `Runtimes:` + fmt.Sprintf("%v", this.Runtimes) + `,`, + `}`, + }, "") + return s +} +func (this *ListServicesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListServicesResponse{`, + `Services:` + strings.Replace(fmt.Sprintf("%v", this.Services), "Service", "Service", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListServiceStatusesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListServiceStatusesRequest{`, + `Services:` + fmt.Sprintf("%v", this.Services) + `,`, + `}`, + }, "") + return s +} +func (this *ListServiceStatusesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListServiceStatusesResponse{`, + `Statuses:` + strings.Replace(fmt.Sprintf("%v", this.Statuses), "ListServiceStatusesResponse_ServiceStatus", "ListServiceStatusesResponse_ServiceStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListServiceStatusesResponse_ServiceStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListServiceStatusesResponse_ServiceStatus{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `DesiredTasks:` + fmt.Sprintf("%v", this.DesiredTasks) + `,`, + `RunningTasks:` + fmt.Sprintf("%v", this.RunningTasks) + `,`, + `}`, + }, "") + return s +} +func (this *CreateNetworkRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateNetworkRequest{`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "NetworkSpec", "NetworkSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateNetworkResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateNetworkResponse{`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetNetworkRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetNetworkRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `NetworkID:` + fmt.Sprintf("%v", this.NetworkID) + `,`, + `}`, + }, "") + return s +} +func (this *GetNetworkResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetNetworkResponse{`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveNetworkRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveNetworkRequest{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `NetworkID:` + fmt.Sprintf("%v", this.NetworkID) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveNetworkResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveNetworkResponse{`, + `}`, + }, "") + return s +} +func (this *ListNetworksRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListNetworksRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListNetworksRequest_Filters", "ListNetworksRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListNetworksRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListNetworksRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `}`, + }, "") + return s +} +func (this *ListNetworksResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListNetworksResponse{`, + `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "Network", "Network", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetClusterRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetClusterRequest{`, + `ClusterID:` + fmt.Sprintf("%v", this.ClusterID) + `,`, + `}`, + }, "") + return s +} +func (this *GetClusterResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetClusterResponse{`, + `Cluster:` + strings.Replace(fmt.Sprintf("%v", this.Cluster), "Cluster", "Cluster", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListClustersRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListClustersRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListClustersRequest_Filters", "ListClustersRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListClustersRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListClustersRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `}`, + }, "") + return s +} +func (this *ListClustersResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListClustersResponse{`, + `Clusters:` + strings.Replace(fmt.Sprintf("%v", this.Clusters), "Cluster", "Cluster", 1) + `,`, + `}`, + }, "") + return s +} +func (this *KeyRotation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KeyRotation{`, + `WorkerJoinToken:` + fmt.Sprintf("%v", this.WorkerJoinToken) + `,`, + `ManagerJoinToken:` + fmt.Sprintf("%v", this.ManagerJoinToken) + `,`, + `ManagerUnlockKey:` + fmt.Sprintf("%v", this.ManagerUnlockKey) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateClusterRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateClusterRequest{`, + `ClusterID:` + fmt.Sprintf("%v", this.ClusterID) + `,`, + `ClusterVersion:` + strings.Replace(fmt.Sprintf("%v", this.ClusterVersion), "Version", "Version", 1) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "ClusterSpec", "ClusterSpec", 1) + `,`, + `Rotation:` + strings.Replace(strings.Replace(this.Rotation.String(), "KeyRotation", "KeyRotation", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateClusterResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateClusterResponse{`, + `Cluster:` + strings.Replace(fmt.Sprintf("%v", this.Cluster), "Cluster", "Cluster", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetSecretRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetSecretRequest{`, + `SecretID:` + fmt.Sprintf("%v", this.SecretID) + `,`, + `}`, + }, "") + return s +} +func (this *GetSecretResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetSecretResponse{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateSecretRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateSecretRequest{`, + `SecretID:` + fmt.Sprintf("%v", this.SecretID) + `,`, + `SecretVersion:` + strings.Replace(fmt.Sprintf("%v", this.SecretVersion), "Version", "Version", 1) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "SecretSpec", "SecretSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateSecretResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateSecretResponse{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListSecretsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListSecretsRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListSecretsRequest_Filters", "ListSecretsRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListSecretsRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListSecretsRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `}`, + }, "") + return s +} +func (this *ListSecretsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListSecretsResponse{`, + `Secrets:` + strings.Replace(fmt.Sprintf("%v", this.Secrets), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateSecretRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateSecretRequest{`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "SecretSpec", "SecretSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateSecretResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateSecretResponse{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveSecretRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveSecretRequest{`, + `SecretID:` + fmt.Sprintf("%v", this.SecretID) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveSecretResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveSecretResponse{`, + `}`, + }, "") + return s +} +func (this *GetConfigRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetConfigRequest{`, + `ConfigID:` + fmt.Sprintf("%v", this.ConfigID) + `,`, + `}`, + }, "") + return s +} +func (this *GetConfigResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetConfigResponse{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateConfigRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateConfigRequest{`, + `ConfigID:` + fmt.Sprintf("%v", this.ConfigID) + `,`, + `ConfigVersion:` + strings.Replace(fmt.Sprintf("%v", this.ConfigVersion), "Version", "Version", 1) + `,`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "ConfigSpec", "ConfigSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateConfigResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateConfigResponse{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListConfigsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListConfigsRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListConfigsRequest_Filters", "ListConfigsRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListConfigsRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListConfigsRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `}`, + }, "") + return s +} +func (this *ListConfigsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListConfigsResponse{`, + `Configs:` + strings.Replace(fmt.Sprintf("%v", this.Configs), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateConfigRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateConfigRequest{`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "ConfigSpec", "ConfigSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateConfigResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateConfigResponse{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveConfigRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveConfigRequest{`, + `ConfigID:` + fmt.Sprintf("%v", this.ConfigID) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveConfigResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveConfigResponse{`, + `}`, + }, "") + return s +} +func (this *CreateExtensionRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateExtensionRequest{`, + `Annotations:` + strings.Replace(fmt.Sprintf("%v", this.Annotations), "Annotations", "Annotations", 1) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `}`, + }, "") + return s +} +func (this *CreateExtensionResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateExtensionResponse{`, + `Extension:` + strings.Replace(fmt.Sprintf("%v", this.Extension), "Extension", "Extension", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveExtensionRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveExtensionRequest{`, + `ExtensionID:` + fmt.Sprintf("%v", this.ExtensionID) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveExtensionResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveExtensionResponse{`, + `}`, + }, "") + return s +} +func (this *GetExtensionRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetExtensionRequest{`, + `ExtensionID:` + fmt.Sprintf("%v", this.ExtensionID) + `,`, + `}`, + }, "") + return s +} +func (this *GetExtensionResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetExtensionResponse{`, + `Extension:` + strings.Replace(fmt.Sprintf("%v", this.Extension), "Extension", "Extension", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateResourceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateResourceRequest{`, + `Annotations:` + strings.Replace(fmt.Sprintf("%v", this.Annotations), "Annotations", "Annotations", 1) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Payload:` + strings.Replace(fmt.Sprintf("%v", this.Payload), "Any", "google_protobuf4.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CreateResourceResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateResourceResponse{`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "Resource", "Resource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveResourceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveResourceRequest{`, + `ResourceID:` + fmt.Sprintf("%v", this.ResourceID) + `,`, + `}`, + }, "") + return s +} +func (this *RemoveResourceResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RemoveResourceResponse{`, + `}`, + }, "") + return s +} +func (this *UpdateResourceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateResourceRequest{`, + `ResourceID:` + fmt.Sprintf("%v", this.ResourceID) + `,`, + `ResourceVersion:` + strings.Replace(fmt.Sprintf("%v", this.ResourceVersion), "Version", "Version", 1) + `,`, + `Annotations:` + strings.Replace(fmt.Sprintf("%v", this.Annotations), "Annotations", "Annotations", 1) + `,`, + `Payload:` + strings.Replace(fmt.Sprintf("%v", this.Payload), "Any", "google_protobuf4.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateResourceResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateResourceResponse{`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "Resource", "Resource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GetResourceRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetResourceRequest{`, + `ResourceID:` + fmt.Sprintf("%v", this.ResourceID) + `,`, + `}`, + }, "") + return s +} +func (this *GetResourceResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetResourceResponse{`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "Resource", "Resource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListResourcesRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListResourcesRequest{`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "ListResourcesRequest_Filters", "ListResourcesRequest_Filters", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListResourcesRequest_Filters) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&ListResourcesRequest_Filters{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `IDPrefixes:` + fmt.Sprintf("%v", this.IDPrefixes) + `,`, + `Labels:` + mapStringForLabels + `,`, + `NamePrefixes:` + fmt.Sprintf("%v", this.NamePrefixes) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *ListResourcesResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListResourcesResponse{`, + `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Resource", "Resource", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringControl(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *GetNodeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetNodeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetNodeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetNodeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetNodeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetNodeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Node == nil { + m.Node = &Node{} + } + if err := m.Node.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListNodesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListNodesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListNodesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListNodesRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListNodesRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType == 0 { + var v NodeSpec_Membership + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NodeSpec_Membership(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Memberships = append(m.Memberships, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v NodeSpec_Membership + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NodeSpec_Membership(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Memberships = append(m.Memberships, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Memberships", wireType) + } + case 5: + if wireType == 0 { + var v NodeRole + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Roles = append(m.Roles, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v NodeRole + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Roles = append(m.Roles, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeLabels == nil { + m.NodeLabels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.NodeLabels[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListNodesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListNodesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListNodesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nodes = append(m.Nodes, &Node{}) + if err := m.Nodes[len(m.Nodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateNodeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateNodeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateNodeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeVersion == nil { + m.NodeVersion = &Version{} + } + if err := m.NodeVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &NodeSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateNodeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateNodeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateNodeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Node == nil { + m.Node = &Node{} + } + if err := m.Node.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveNodeRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveNodeRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveNodeRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveNodeResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveNodeResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveNodeResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetTaskResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetTaskResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Task == nil { + m.Task = &Task{} + } + if err := m.Task.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveTaskRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveTaskRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveTaskResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveTaskResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListTasksRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListTasksRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListTasksRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListTasksRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListTasksRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceIDs = append(m.ServiceIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeIDs = append(m.NodeIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType == 0 { + var v TaskState + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DesiredStates = append(m.DesiredStates, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v TaskState + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DesiredStates = append(m.DesiredStates, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredStates", wireType) + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpToDate", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.UpToDate = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Runtimes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Runtimes = append(m.Runtimes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListTasksResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListTasksResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListTasksResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tasks = append(m.Tasks, &Task{}) + if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateServiceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateServiceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &ServiceSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateServiceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateServiceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &Service{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetServiceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetServiceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InsertDefaults", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.InsertDefaults = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetServiceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetServiceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &Service{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateServiceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateServiceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ServiceVersion == nil { + m.ServiceVersion = &Version{} + } + if err := m.ServiceVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &ServiceSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Rollback", wireType) + } + m.Rollback = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Rollback |= (UpdateServiceRequest_Rollback(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateServiceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateServiceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &Service{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveServiceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveServiceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveServiceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveServiceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveServiceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveServiceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListServicesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListServicesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListServicesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListServicesRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListServicesRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Runtimes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Runtimes = append(m.Runtimes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListServicesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListServicesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListServicesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Services = append(m.Services, &Service{}) + if err := m.Services[len(m.Services)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListServiceStatusesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListServiceStatusesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListServiceStatusesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Services = append(m.Services, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListServiceStatusesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListServiceStatusesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListServiceStatusesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Statuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Statuses = append(m.Statuses, &ListServiceStatusesResponse_ServiceStatus{}) + if err := m.Statuses[len(m.Statuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListServiceStatusesResponse_ServiceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredTasks", wireType) + } + m.DesiredTasks = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredTasks |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunningTasks", wireType) + } + m.RunningTasks = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RunningTasks |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateNetworkRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateNetworkRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateNetworkRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &NetworkSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateNetworkResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateNetworkResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateNetworkResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Network == nil { + m.Network = &Network{} + } + if err := m.Network.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetNetworkRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetNetworkRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetNetworkRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetNetworkResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetNetworkResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetNetworkResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Network == nil { + m.Network = &Network{} + } + if err := m.Network.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveNetworkRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveNetworkRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveNetworkRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveNetworkResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveNetworkResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveNetworkResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListNetworksRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListNetworksRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListNetworksRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListNetworksRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListNetworksRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListNetworksResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListNetworksResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListNetworksResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Networks = append(m.Networks, &Network{}) + if err := m.Networks[len(m.Networks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetClusterRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetClusterRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetClusterRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetClusterResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetClusterResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetClusterResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cluster == nil { + m.Cluster = &Cluster{} + } + if err := m.Cluster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListClustersRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListClustersRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListClustersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListClustersRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListClustersRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListClustersResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListClustersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListClustersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Clusters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Clusters = append(m.Clusters, &Cluster{}) + if err := m.Clusters[len(m.Clusters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyRotation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyRotation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyRotation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkerJoinToken", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.WorkerJoinToken = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ManagerJoinToken", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ManagerJoinToken = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ManagerUnlockKey", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ManagerUnlockKey = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateClusterRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateClusterRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateClusterRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClusterVersion == nil { + m.ClusterVersion = &Version{} + } + if err := m.ClusterVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &ClusterSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rotation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Rotation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateClusterResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateClusterResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateClusterResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cluster == nil { + m.Cluster = &Cluster{} + } + if err := m.Cluster.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSecretRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSecretRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSecretRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetSecretResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetSecretResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetSecretResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &Secret{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateSecretRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateSecretRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateSecretRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretVersion == nil { + m.SecretVersion = &Version{} + } + if err := m.SecretVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &SecretSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateSecretResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateSecretResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateSecretResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &Secret{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListSecretsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListSecretsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListSecretsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListSecretsRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListSecretsRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListSecretsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListSecretsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListSecretsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, &Secret{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateSecretRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateSecretRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateSecretRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &SecretSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateSecretResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateSecretResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateSecretResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &Secret{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveSecretRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveSecretRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveSecretRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveSecretResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveSecretResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveSecretResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetConfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &Config{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateConfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigVersion == nil { + m.ConfigVersion = &Version{} + } + if err := m.ConfigVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &ConfigSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &Config{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListConfigsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListConfigsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListConfigsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListConfigsRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListConfigsRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListConfigsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListConfigsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListConfigsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Configs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Configs = append(m.Configs, &Config{}) + if err := m.Configs[len(m.Configs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateConfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &ConfigSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &Config{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveConfigRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveConfigRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveConfigRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveConfigResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveConfigResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveConfigResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateExtensionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateExtensionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateExtensionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = &Annotations{} + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateExtensionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateExtensionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateExtensionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extension", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Extension == nil { + m.Extension = &Extension{} + } + if err := m.Extension.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveExtensionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveExtensionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveExtensionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtensionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExtensionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveExtensionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveExtensionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveExtensionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetExtensionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetExtensionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetExtensionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtensionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExtensionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetExtensionResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetExtensionResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetExtensionResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extension", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Extension == nil { + m.Extension = &Extension{} + } + if err := m.Extension.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateResourceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateResourceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateResourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = &Annotations{} + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Payload == nil { + m.Payload = &google_protobuf4.Any{} + } + if err := m.Payload.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateResourceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateResourceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateResourceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &Resource{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveResourceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveResourceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveResourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveResourceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveResourceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveResourceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateResourceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateResourceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateResourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceVersion == nil { + m.ResourceVersion = &Version{} + } + if err := m.ResourceVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = &Annotations{} + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Payload == nil { + m.Payload = &google_protobuf4.Any{} + } + if err := m.Payload.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateResourceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateResourceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateResourceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &Resource{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetResourceRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetResourceRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetResourceRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetResourceResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetResourceResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetResourceResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &Resource{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListResourcesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListResourcesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListResourcesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Filters == nil { + m.Filters = &ListResourcesRequest_Filters{} + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListResourcesRequest_Filters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Filters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Filters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IDPrefixes = append(m.IDPrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthControl + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefixes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NamePrefixes = append(m.NamePrefixes, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListResourcesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListResourcesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListResourcesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, &Resource{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipControl(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthControl + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowControl + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipControl(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthControl = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowControl = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/control.proto", fileDescriptorControl) } + +var fileDescriptorControl = []byte{ + // 2744 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0x49, 0x6f, 0x1b, 0xc9, + 0x15, 0x36, 0x29, 0x4a, 0x22, 0x1f, 0xb5, 0xb9, 0x2c, 0xdb, 0x1c, 0xda, 0x91, 0x8c, 0xf6, 0x46, + 0x3b, 0x0e, 0x39, 0x43, 0x67, 0x32, 0x8e, 0x67, 0x9c, 0xc4, 0xb2, 0xbc, 0xd0, 0x8b, 0x6c, 0xb4, + 0x64, 0x23, 0x39, 0x04, 0x02, 0x45, 0x96, 0x34, 0x6d, 0x52, 0xdd, 0x4c, 0x77, 0xd3, 0x63, 0x21, + 0x48, 0x90, 0xc5, 0xc1, 0x9c, 0x72, 0x0c, 0x10, 0xe4, 0x10, 0x20, 0xa7, 0x00, 0x39, 0xe4, 0x90, + 0x53, 0x0e, 0xf9, 0x01, 0x46, 0x4e, 0x39, 0xe6, 0xa4, 0x64, 0x04, 0x04, 0xc8, 0x29, 0x7f, 0x21, + 0x83, 0xda, 0x7a, 0x29, 0x56, 0x2f, 0x5c, 0x00, 0xcf, 0x49, 0x62, 0xf5, 0xf7, 0xea, 0xbd, 0x7a, + 0xef, 0xf5, 0x57, 0xd5, 0xef, 0x15, 0x5c, 0xdb, 0x33, 0xdc, 0x4f, 0xfb, 0x3b, 0xd5, 0x96, 0xb5, + 0x5f, 0x6b, 0x5b, 0xad, 0x0e, 0xb6, 0x6b, 0xce, 0x67, 0x4d, 0x7b, 0xbf, 0x63, 0xb8, 0xb5, 0x66, + 0xcf, 0xa8, 0xb5, 0x2c, 0xd3, 0xb5, 0xad, 0x6e, 0xb5, 0x67, 0x5b, 0xae, 0x85, 0x10, 0x83, 0x54, + 0x05, 0xa4, 0xfa, 0xea, 0x83, 0xf2, 0xd5, 0x84, 0x19, 0x9c, 0x1e, 0x6e, 0x39, 0x4c, 0xbe, 0x9c, + 0xa4, 0xcd, 0xda, 0x79, 0x89, 0x5b, 0xae, 0x40, 0x27, 0xcd, 0xec, 0x1e, 0xf4, 0xb0, 0xc0, 0x2e, + 0xef, 0x59, 0x7b, 0x16, 0xfd, 0xb7, 0x46, 0xfe, 0xe3, 0xa3, 0xef, 0xed, 0x59, 0xd6, 0x5e, 0x17, + 0xd7, 0xe8, 0xaf, 0x9d, 0xfe, 0x6e, 0xad, 0x69, 0x1e, 0xf0, 0x47, 0x1f, 0xc5, 0x4c, 0xee, 0xc1, + 0x7b, 0xdd, 0xfe, 0x9e, 0x61, 0xf2, 0x3f, 0x4c, 0x50, 0xfb, 0x10, 0x16, 0xee, 0x63, 0x77, 0xc3, + 0x6a, 0x63, 0x1d, 0xff, 0xa8, 0x8f, 0x1d, 0x17, 0x9d, 0x87, 0x59, 0xd3, 0x6a, 0xe3, 0x6d, 0xa3, + 0x5d, 0xca, 0x9c, 0xcb, 0x54, 0x0a, 0x6b, 0x70, 0x74, 0xb8, 0x3a, 0x43, 0x10, 0x8d, 0x75, 0x7d, + 0x86, 0x3c, 0x6a, 0xb4, 0xb5, 0xef, 0xc2, 0xa2, 0x27, 0xe6, 0xf4, 0x2c, 0xd3, 0xc1, 0xe8, 0x1a, + 0xe4, 0xc8, 0x43, 0x2a, 0x54, 0xac, 0x97, 0xaa, 0x83, 0xce, 0xad, 0x52, 0x3c, 0x45, 0x69, 0x6f, + 0xa6, 0x61, 0xe9, 0xb1, 0xe1, 0xd0, 0x29, 0x1c, 0xa1, 0xfa, 0x1e, 0xcc, 0xee, 0x1a, 0x5d, 0x17, + 0xdb, 0x0e, 0x9f, 0xe5, 0x9a, 0x6a, 0x16, 0x59, 0xac, 0x7a, 0x8f, 0xc9, 0xe8, 0x42, 0xb8, 0xfc, + 0xc7, 0x1c, 0xcc, 0xf2, 0x41, 0xb4, 0x0c, 0xd3, 0x66, 0x73, 0x1f, 0x93, 0x19, 0xa7, 0x2a, 0x05, + 0x9d, 0xfd, 0x40, 0x35, 0x28, 0x1a, 0xed, 0xed, 0x9e, 0x8d, 0x77, 0x8d, 0xd7, 0xd8, 0x29, 0x65, + 0xc9, 0xb3, 0xb5, 0x85, 0xa3, 0xc3, 0x55, 0x68, 0xac, 0x3f, 0xe3, 0xa3, 0x3a, 0x18, 0x6d, 0xf1, + 0x3f, 0x7a, 0x06, 0x33, 0xdd, 0xe6, 0x0e, 0xee, 0x3a, 0xa5, 0xa9, 0x73, 0x53, 0x95, 0x62, 0xfd, + 0xc6, 0x30, 0x96, 0x55, 0x1f, 0x53, 0xd1, 0xbb, 0xa6, 0x6b, 0x1f, 0xe8, 0x7c, 0x1e, 0xf4, 0x04, + 0x8a, 0xfb, 0x78, 0x7f, 0x07, 0xdb, 0xce, 0xa7, 0x46, 0xcf, 0x29, 0xe5, 0xce, 0x4d, 0x55, 0x16, + 0xea, 0x97, 0xa3, 0xdc, 0xb6, 0xd9, 0xc3, 0xad, 0xea, 0x13, 0x0f, 0xbf, 0x96, 0x5d, 0x3a, 0xa6, + 0x07, 0xe5, 0xd1, 0xb7, 0x60, 0xda, 0xb6, 0xba, 0xd8, 0x29, 0x4d, 0xd3, 0x89, 0xce, 0x46, 0xfa, + 0xdf, 0xea, 0x62, 0x2a, 0xcd, 0xe0, 0xe8, 0x3c, 0xcc, 0x13, 0x97, 0xf8, 0xbe, 0x98, 0xa1, 0x7e, + 0x9a, 0x23, 0x83, 0xde, 0xea, 0x7f, 0x08, 0x45, 0x9a, 0x13, 0xdc, 0x05, 0xb3, 0xd4, 0x05, 0x9f, + 0x0c, 0xe5, 0x02, 0x32, 0x18, 0x74, 0x03, 0x98, 0xde, 0x40, 0xf9, 0xdb, 0x50, 0x0c, 0x3c, 0x42, + 0x4b, 0x30, 0xd5, 0xc1, 0x07, 0x2c, 0xfb, 0x74, 0xf2, 0x2f, 0x09, 0xe2, 0xab, 0x66, 0xb7, 0x8f, + 0x4b, 0x59, 0x3a, 0xc6, 0x7e, 0xdc, 0xcc, 0xde, 0xc8, 0x94, 0x6f, 0xc1, 0xa2, 0x34, 0xf3, 0x30, + 0xe2, 0xda, 0x1d, 0x38, 0x1e, 0xb0, 0x98, 0x67, 0x72, 0x15, 0xa6, 0x89, 0x71, 0x2c, 0x65, 0xe2, + 0x52, 0x99, 0xc1, 0xb4, 0x3f, 0x65, 0xe0, 0xf8, 0xf3, 0x5e, 0xbb, 0xe9, 0xe2, 0x61, 0xdf, 0x23, + 0xf4, 0x1d, 0x98, 0xa3, 0xa0, 0x57, 0xd8, 0x76, 0x0c, 0xcb, 0xa4, 0x06, 0x16, 0xeb, 0x67, 0x54, + 0x1a, 0x5f, 0x30, 0x88, 0x4e, 0x23, 0xc1, 0x7f, 0xa0, 0xf7, 0x21, 0x47, 0x18, 0xa9, 0x34, 0x45, + 0xe5, 0xce, 0xc6, 0x65, 0x8f, 0x4e, 0x91, 0xda, 0x1a, 0xa0, 0xa0, 0xad, 0x23, 0xbd, 0xbc, 0x1b, + 0x70, 0x5c, 0xc7, 0xfb, 0xd6, 0xab, 0xe1, 0xd7, 0xbb, 0x0c, 0xd3, 0xbb, 0x96, 0xdd, 0x62, 0x91, + 0xc8, 0xeb, 0xec, 0x87, 0xb6, 0x0c, 0x28, 0x38, 0x1f, 0xb3, 0x89, 0x53, 0xd3, 0x56, 0xd3, 0xe9, + 0x04, 0x54, 0xb8, 0x4d, 0xa7, 0x23, 0xa9, 0x20, 0x08, 0xa2, 0x82, 0x3c, 0xf2, 0xa8, 0x89, 0x89, + 0xf9, 0xab, 0x23, 0x0f, 0xe3, 0x56, 0x47, 0xf1, 0x14, 0xa5, 0xdd, 0x10, 0xab, 0x1b, 0x5a, 0xb5, + 0xb7, 0x8e, 0xa0, 0x76, 0xed, 0x6f, 0x39, 0x46, 0x75, 0x64, 0x70, 0x04, 0xaa, 0x0b, 0x8a, 0x0d, + 0x52, 0xdd, 0xbf, 0xa6, 0xde, 0x1d, 0xd5, 0xa9, 0x2c, 0x53, 0x52, 0x5d, 0x0d, 0x8a, 0x0e, 0xb6, + 0x5f, 0x19, 0x2d, 0x92, 0x1d, 0x8c, 0xea, 0xb8, 0x09, 0x9b, 0x6c, 0xb8, 0xb1, 0xee, 0xe8, 0xc0, + 0x21, 0x8d, 0xb6, 0x83, 0x2e, 0x41, 0x9e, 0xe7, 0x12, 0xe3, 0xb3, 0xc2, 0x5a, 0xf1, 0xe8, 0x70, + 0x75, 0x96, 0x25, 0x93, 0xa3, 0xcf, 0xb2, 0x6c, 0x72, 0xd0, 0x03, 0x58, 0x68, 0x63, 0xc7, 0xb0, + 0x71, 0x7b, 0xdb, 0x71, 0x9b, 0x2e, 0x67, 0xaf, 0x85, 0xfa, 0xd7, 0xa2, 0x42, 0xbc, 0x49, 0x50, + 0x94, 0xfe, 0xe6, 0xb9, 0x20, 0x1d, 0x51, 0xd0, 0xe0, 0xac, 0x82, 0x06, 0xcf, 0x02, 0xf4, 0x7b, + 0xdb, 0xae, 0xb5, 0x4d, 0xde, 0x9f, 0x52, 0x9e, 0xa6, 0x70, 0xbe, 0xdf, 0xdb, 0xb2, 0xd6, 0x9b, + 0x2e, 0x46, 0x65, 0xc8, 0xdb, 0x7d, 0xd3, 0x35, 0x48, 0x04, 0x0a, 0x54, 0xda, 0xfb, 0x3d, 0x06, + 0xc3, 0x09, 0x8a, 0xe2, 0xce, 0xf6, 0x29, 0x8a, 0xe4, 0x5c, 0x2c, 0x45, 0xd1, 0x24, 0x64, 0x30, + 0xed, 0x11, 0x2c, 0xdf, 0xb1, 0x71, 0xd3, 0xc5, 0xdc, 0xe1, 0x22, 0x0d, 0xaf, 0x73, 0xfe, 0x60, + 0x39, 0xb8, 0xaa, 0x9a, 0x86, 0x4b, 0x04, 0x28, 0x64, 0x03, 0x4e, 0x4a, 0x93, 0x71, 0xab, 0x3e, + 0x84, 0x59, 0x1e, 0x44, 0x3e, 0xe1, 0x99, 0x98, 0x09, 0x75, 0x81, 0xd5, 0x5e, 0xc2, 0xf1, 0xfb, + 0xd8, 0x95, 0x2c, 0xbb, 0x06, 0xe0, 0xe7, 0x0c, 0x7f, 0xe7, 0xe6, 0x8f, 0x0e, 0x57, 0x0b, 0x5e, + 0xca, 0xe8, 0x05, 0x2f, 0x63, 0xd0, 0x65, 0x58, 0x34, 0x4c, 0x07, 0xdb, 0xee, 0x76, 0x1b, 0xef, + 0x36, 0xfb, 0x5d, 0xd7, 0xe1, 0x0c, 0xb3, 0xc0, 0x86, 0xd7, 0xf9, 0xa8, 0xf6, 0x08, 0x50, 0x50, + 0xd7, 0x78, 0x86, 0xff, 0x25, 0x0b, 0xcb, 0x8c, 0x4c, 0xc7, 0x32, 0x7e, 0x1d, 0x16, 0x05, 0x7a, + 0x88, 0x7d, 0x60, 0x81, 0xcb, 0x88, 0xad, 0xe0, 0x7a, 0x68, 0x2b, 0x48, 0x17, 0x4a, 0xf4, 0x04, + 0xf2, 0xb6, 0xd5, 0xed, 0xee, 0x34, 0x5b, 0x9d, 0x52, 0xee, 0x5c, 0xa6, 0xb2, 0x50, 0xff, 0x40, + 0x25, 0xa8, 0x5a, 0x64, 0x55, 0xe7, 0x82, 0xba, 0x37, 0x85, 0xa6, 0x41, 0x5e, 0x8c, 0xa2, 0x3c, + 0xe4, 0x36, 0x9e, 0x6e, 0xdc, 0x5d, 0x3a, 0x86, 0xe6, 0x20, 0xff, 0x4c, 0xbf, 0xfb, 0xa2, 0xf1, + 0xf4, 0xf9, 0xe6, 0x52, 0x86, 0x64, 0x8f, 0x34, 0xdd, 0x78, 0x41, 0x58, 0x87, 0x65, 0x46, 0xba, + 0xe3, 0xc4, 0x40, 0x3b, 0x0d, 0x27, 0xa5, 0x59, 0x38, 0x7b, 0xbf, 0x99, 0x82, 0x13, 0xe4, 0xfd, + 0xe3, 0xe3, 0x1e, 0x81, 0x37, 0x64, 0x02, 0xaf, 0x45, 0xd1, 0xa4, 0x24, 0x39, 0xc8, 0xe1, 0x7f, + 0xc8, 0x4e, 0x9c, 0xc3, 0x37, 0x25, 0x0e, 0xff, 0x78, 0x48, 0xe3, 0x94, 0x34, 0x3e, 0xc0, 0x91, + 0x39, 0x05, 0x47, 0x06, 0x59, 0x70, 0x7a, 0x72, 0x2c, 0xf8, 0x14, 0x96, 0xc3, 0xe6, 0xf2, 0xa4, + 0xf9, 0x08, 0xf2, 0x3c, 0x88, 0x82, 0x0b, 0x63, 0xb3, 0xc6, 0x03, 0x6b, 0x37, 0xa0, 0x1c, 0x98, + 0x90, 0xec, 0x02, 0x7d, 0xc7, 0x8f, 0x6e, 0x59, 0x9a, 0xb6, 0x10, 0x90, 0xfc, 0x45, 0x16, 0xce, + 0x28, 0x45, 0xb9, 0x49, 0x3f, 0x80, 0xbc, 0xc3, 0xc7, 0xb8, 0x49, 0xb7, 0x12, 0xbc, 0x2f, 0x4f, + 0x51, 0x0d, 0x8d, 0xeb, 0xde, 0x74, 0xe5, 0xcf, 0x33, 0x30, 0x1f, 0x7a, 0x36, 0x24, 0xd3, 0x9c, + 0x07, 0xb1, 0xed, 0x6d, 0xb3, 0xed, 0x83, 0xf8, 0x39, 0xa7, 0xcf, 0xf1, 0x41, 0xba, 0xc7, 0x10, + 0x90, 0xdd, 0x37, 0x4d, 0xc3, 0xdc, 0xe3, 0xa0, 0x29, 0x06, 0xe2, 0x83, 0x5b, 0xe1, 0x0d, 0x65, + 0x03, 0xbb, 0x9f, 0x59, 0x76, 0x67, 0x88, 0x0d, 0x85, 0x4b, 0xa8, 0x36, 0x14, 0x6f, 0x32, 0x9f, + 0x12, 0x4c, 0x36, 0x14, 0x47, 0x09, 0x42, 0x4a, 0x60, 0xb5, 0xe7, 0x74, 0x43, 0x91, 0x2c, 0x43, + 0x90, 0x23, 0x89, 0xca, 0xd3, 0x8d, 0xfe, 0x4f, 0xbc, 0xc7, 0x65, 0x88, 0xf7, 0xb2, 0xbe, 0xf7, + 0xb8, 0x2c, 0xf1, 0x1e, 0x07, 0x34, 0xda, 0x7c, 0xef, 0x98, 0x90, 0x8d, 0xdf, 0x17, 0xb4, 0x35, + 0x71, 0x33, 0x3d, 0x2a, 0x93, 0x2c, 0xd5, 0xfe, 0x9b, 0x65, 0x54, 0xc6, 0xc7, 0x47, 0xa0, 0x32, + 0x49, 0x72, 0x90, 0xca, 0x7e, 0xf5, 0x0e, 0xa9, 0x2c, 0xc2, 0xb8, 0x91, 0xa9, 0x6c, 0x02, 0x74, + 0xe5, 0x9b, 0xe4, 0xd3, 0x15, 0x0f, 0x54, 0x2c, 0x5d, 0x89, 0xc8, 0x79, 0x60, 0xed, 0x36, 0x4d, + 0xe9, 0x3b, 0xdd, 0xbe, 0xe3, 0x62, 0x3b, 0xb0, 0xc5, 0xb5, 0xd8, 0x88, 0xf4, 0xf2, 0x73, 0x1c, + 0xc9, 0x0b, 0x0e, 0xf0, 0xd2, 0xd7, 0x9b, 0xc2, 0x4f, 0x5f, 0x0e, 0x89, 0x4b, 0x5f, 0x21, 0x25, + 0xb0, 0x5e, 0x2e, 0xf1, 0x07, 0x23, 0xe4, 0x92, 0x24, 0xf9, 0xd5, 0xca, 0xa5, 0x08, 0xe3, 0xde, + 0x65, 0x2e, 0xf9, 0x26, 0xf9, 0xb9, 0xc4, 0xa3, 0x11, 0x9b, 0x4b, 0x22, 0x74, 0x1e, 0x58, 0xfb, + 0x4d, 0x06, 0x8a, 0x8f, 0xf0, 0x81, 0x6e, 0xb9, 0x4d, 0x97, 0x9c, 0x1c, 0xaf, 0xc2, 0x71, 0x92, + 0x64, 0xd8, 0xde, 0x7e, 0x69, 0x19, 0xe6, 0xb6, 0x6b, 0x75, 0xb0, 0x49, 0x4d, 0xcb, 0xeb, 0x8b, + 0xec, 0xc1, 0x43, 0xcb, 0x30, 0xb7, 0xc8, 0x30, 0xba, 0x06, 0x68, 0xbf, 0x69, 0x36, 0xf7, 0xc2, + 0x60, 0x76, 0xd6, 0x5e, 0xe2, 0x4f, 0x94, 0xe8, 0xbe, 0xd9, 0xb5, 0x5a, 0x9d, 0x6d, 0xb2, 0xea, + 0xa9, 0x10, 0xfa, 0x39, 0x7d, 0xf0, 0x08, 0x1f, 0x90, 0x8d, 0x95, 0x1f, 0xa7, 0xc7, 0xc9, 0x73, + 0x72, 0x9c, 0x16, 0xe8, 0x61, 0x8e, 0xd3, 0x5c, 0x66, 0x88, 0xe3, 0x34, 0xd7, 0x1e, 0x38, 0x4e, + 0xdf, 0x26, 0xc7, 0x69, 0xe6, 0x55, 0x7a, 0x9c, 0x8e, 0x10, 0x0c, 0x38, 0x7f, 0x2d, 0xf7, 0xf6, + 0x70, 0xf5, 0x98, 0xee, 0x89, 0xf9, 0xc7, 0xe3, 0x09, 0xbd, 0xa8, 0xb7, 0x60, 0x89, 0x7e, 0xf0, + 0xb4, 0x6c, 0xec, 0x0a, 0x7f, 0x5e, 0x81, 0x82, 0x43, 0x07, 0x7c, 0x77, 0xce, 0x1d, 0x1d, 0xae, + 0xe6, 0x19, 0xaa, 0xb1, 0x4e, 0x0e, 0x3b, 0xf4, 0xbf, 0xb6, 0x76, 0x9f, 0x7f, 0x9b, 0x31, 0x71, + 0x6e, 0x4a, 0x1d, 0x66, 0x18, 0x80, 0x5b, 0x52, 0x56, 0x1f, 0xb9, 0xa8, 0x0c, 0x47, 0x6a, 0x7f, + 0xcd, 0xc0, 0x09, 0x71, 0xee, 0x1f, 0xcd, 0x16, 0xb4, 0x06, 0x0b, 0x1c, 0x3a, 0x44, 0x5c, 0xe7, + 0x99, 0x88, 0x08, 0x6b, 0x3d, 0x14, 0xd6, 0x95, 0x68, 0xc3, 0x03, 0xc7, 0x93, 0x87, 0xfe, 0x57, + 0xde, 0xd8, 0x6e, 0xf8, 0x4f, 0x16, 0x10, 0x3b, 0xf9, 0x91, 0x9f, 0x1e, 0x6d, 0x3e, 0x90, 0x69, + 0xb3, 0x1a, 0x7d, 0x64, 0x0c, 0x0a, 0x0e, 0xb2, 0xe6, 0x9b, 0xc9, 0xb3, 0xa6, 0x2e, 0xb1, 0xe6, + 0xcd, 0xe1, 0x6c, 0x7b, 0x27, 0xa4, 0xf9, 0x48, 0x7c, 0xb5, 0x71, 0x8b, 0x78, 0xc8, 0xbe, 0x49, + 0xbe, 0x31, 0xe9, 0x10, 0xa7, 0xcc, 0xb8, 0x98, 0x09, 0xa8, 0xd6, 0x80, 0x13, 0xa2, 0xe0, 0x11, + 0x4c, 0xdd, 0x7a, 0xe8, 0xac, 0x9b, 0x3a, 0x97, 0xc2, 0x53, 0x8d, 0x91, 0x4b, 0xdf, 0x83, 0x13, + 0xe2, 0x9b, 0x75, 0xc4, 0xb7, 0xfb, 0x94, 0xff, 0xed, 0x1c, 0xb4, 0x86, 0x93, 0xc6, 0x1d, 0xcb, + 0xdc, 0x35, 0xf6, 0x02, 0xd3, 0xb6, 0xe8, 0x80, 0x34, 0x2d, 0x43, 0x91, 0x69, 0xd9, 0x63, 0x8f, + 0x34, 0x84, 0xb8, 0xbf, 0x42, 0x06, 0x88, 0x5b, 0x21, 0x97, 0xe1, 0xc8, 0x00, 0x69, 0x8c, 0x6a, + 0x0b, 0x21, 0x0d, 0x0e, 0x1d, 0x86, 0x34, 0x98, 0xc8, 0x10, 0xa4, 0xc1, 0x34, 0xab, 0x48, 0x63, + 0x02, 0x6e, 0x10, 0xa4, 0xc1, 0x86, 0x47, 0x20, 0x8d, 0xb0, 0xe0, 0x57, 0x8b, 0x34, 0xd4, 0xb6, + 0xbd, 0x4b, 0xd2, 0xf0, 0x2c, 0xf2, 0x49, 0x83, 0x05, 0x22, 0x96, 0x34, 0x78, 0xcc, 0x04, 0xd4, + 0x27, 0x8d, 0x70, 0xea, 0xa6, 0x20, 0x0d, 0x55, 0x2e, 0x85, 0xa7, 0x1a, 0x23, 0x97, 0x3c, 0xd2, + 0x18, 0xf9, 0xed, 0xf6, 0x48, 0x23, 0x6c, 0x8d, 0xf6, 0x13, 0x38, 0xc5, 0xac, 0xbc, 0xfb, 0xda, + 0xc5, 0x26, 0x7d, 0x8f, 0xf8, 0xe4, 0xb7, 0xa1, 0xd8, 0x34, 0x4d, 0x7e, 0xc2, 0x71, 0xe2, 0x6a, + 0x03, 0xb7, 0x7d, 0x98, 0x1e, 0x94, 0x41, 0xe7, 0xa0, 0xd8, 0xc6, 0x4e, 0xcb, 0x36, 0x7a, 0xae, + 0x78, 0x87, 0x0b, 0x7a, 0x70, 0x48, 0x7b, 0x01, 0xa7, 0x07, 0xd4, 0x73, 0x3f, 0x7d, 0x0c, 0x05, + 0x2c, 0x06, 0xb9, 0x76, 0x65, 0x87, 0xc0, 0x97, 0xf4, 0xf1, 0xda, 0x63, 0x38, 0xc5, 0x96, 0x3b, + 0xb0, 0xac, 0x3a, 0xcc, 0x79, 0x30, 0xdf, 0x6d, 0x8b, 0x47, 0x87, 0xab, 0x45, 0x0f, 0xdb, 0x58, + 0xd7, 0x8b, 0x1e, 0xa8, 0xd1, 0xd6, 0xde, 0x83, 0xd3, 0x03, 0xb3, 0x71, 0xff, 0x35, 0xe0, 0xc4, + 0x7d, 0xec, 0x4e, 0x44, 0xcb, 0x26, 0x2c, 0x87, 0xa7, 0x9a, 0x84, 0x23, 0x7e, 0x9f, 0x11, 0x65, + 0x1a, 0x1d, 0x3b, 0x56, 0xdf, 0xf6, 0x4b, 0xad, 0x13, 0x88, 0x2f, 0x82, 0x5c, 0xc7, 0x30, 0x79, + 0x71, 0x43, 0xa7, 0xff, 0xa3, 0x2a, 0xcc, 0xf6, 0x9a, 0x07, 0x5d, 0xab, 0xd9, 0xe6, 0xcc, 0xbb, + 0x5c, 0x65, 0x37, 0x20, 0xaa, 0xe2, 0x4a, 0x43, 0xf5, 0xb6, 0x79, 0xa0, 0x0b, 0x90, 0xa6, 0x8b, + 0x04, 0xf4, 0xed, 0xe3, 0xeb, 0xbe, 0x01, 0x79, 0x9b, 0x8f, 0x71, 0xeb, 0x94, 0xad, 0x52, 0x4f, + 0xce, 0x43, 0x6b, 0x0f, 0x44, 0x31, 0x45, 0x5e, 0x73, 0x0d, 0x8a, 0x02, 0xe4, 0x47, 0x85, 0x12, + 0xa2, 0x40, 0x36, 0xd6, 0x75, 0x10, 0x90, 0x46, 0x5b, 0x2b, 0x89, 0x3c, 0x92, 0xad, 0xd3, 0x7e, + 0x99, 0x15, 0x67, 0xfe, 0x71, 0x95, 0xa0, 0x7b, 0xb0, 0xe4, 0x09, 0x0c, 0xb1, 0xdf, 0x2d, 0x0a, + 0x21, 0xb1, 0xe3, 0x49, 0x11, 0x9d, 0x1a, 0x21, 0xa2, 0x81, 0xe8, 0xe5, 0x52, 0x46, 0x4f, 0x76, + 0xc2, 0xd8, 0xd1, 0xbb, 0x4b, 0x4b, 0x1e, 0x63, 0x87, 0xee, 0x29, 0x7d, 0x33, 0x27, 0x68, 0xd7, + 0xff, 0xb3, 0xec, 0x9b, 0x5e, 0x3c, 0xf2, 0xb6, 0xf4, 0x87, 0xf2, 0x96, 0xfe, 0x7e, 0xd4, 0xb6, + 0x29, 0x8b, 0x0e, 0x6e, 0xea, 0xbf, 0x9b, 0xfc, 0xa6, 0xbe, 0x25, 0x6d, 0xea, 0x9f, 0x0c, 0x6b, + 0xdd, 0xe8, 0x7d, 0x05, 0x41, 0x10, 0xd3, 0x3e, 0x41, 0x8c, 0xb3, 0xd5, 0x6f, 0xc2, 0x49, 0xc9, + 0x4e, 0x1e, 0xd4, 0x9b, 0x50, 0x10, 0x61, 0x12, 0xdb, 0x7d, 0x7c, 0x54, 0x7d, 0x78, 0xfd, 0xf3, + 0x0b, 0x30, 0x7b, 0x87, 0x5d, 0x31, 0x43, 0x06, 0xcc, 0xf2, 0x1b, 0x52, 0x48, 0x53, 0xc9, 0x87, + 0x6f, 0x5d, 0x95, 0xcf, 0xc7, 0x62, 0x38, 0x51, 0x9c, 0xfc, 0xfb, 0x9f, 0xff, 0xf7, 0xdb, 0xec, + 0x22, 0xcc, 0x53, 0xd0, 0x37, 0x78, 0x01, 0x05, 0x59, 0x50, 0xf0, 0x2e, 0xb1, 0xa0, 0x0b, 0x69, + 0x6e, 0xe5, 0x94, 0x2f, 0x26, 0xa0, 0xe2, 0x15, 0xda, 0x00, 0xfe, 0x1d, 0x12, 0x74, 0x31, 0xba, + 0x63, 0x18, 0x5c, 0xe1, 0xa5, 0x24, 0x58, 0xa2, 0x4e, 0xff, 0x8e, 0x88, 0x5a, 0xe7, 0xc0, 0x9d, + 0x14, 0xb5, 0x4e, 0xc5, 0x55, 0x93, 0x08, 0x9d, 0x2c, 0x86, 0x5b, 0x4d, 0xa7, 0x13, 0x19, 0xc3, + 0xc0, 0x1d, 0x91, 0xc8, 0x18, 0x86, 0x6e, 0x83, 0xc4, 0xc7, 0x90, 0x75, 0x60, 0x2e, 0xa4, 0xb9, + 0x71, 0x11, 0x1d, 0xc3, 0xd0, 0x55, 0x81, 0x44, 0x7f, 0xd2, 0xe5, 0xc5, 0xf8, 0x33, 0xb8, 0xc2, + 0x4b, 0x49, 0xb0, 0x44, 0x9d, 0x7e, 0xf3, 0x5d, 0xad, 0x73, 0xe0, 0x22, 0x80, 0x5a, 0xe7, 0x60, + 0x0f, 0x3f, 0x4a, 0xe7, 0x6b, 0x98, 0x0b, 0x36, 0x0e, 0xd1, 0xe5, 0x94, 0x9d, 0xd0, 0x72, 0x25, + 0x19, 0x18, 0xaf, 0xf9, 0xc7, 0x30, 0x1f, 0xba, 0x26, 0x81, 0x94, 0x33, 0xaa, 0xae, 0x65, 0x94, + 0xaf, 0xa4, 0x40, 0x26, 0x2a, 0x0f, 0x75, 0xd9, 0xd5, 0xca, 0x55, 0x7d, 0x7d, 0xb5, 0x72, 0x65, + 0xcb, 0x3e, 0x46, 0x79, 0xa8, 0x99, 0xae, 0x56, 0xae, 0xea, 0xda, 0xab, 0x95, 0xab, 0x3b, 0xf3, + 0x11, 0xca, 0x7f, 0x9d, 0x09, 0x35, 0xec, 0x45, 0x6f, 0x15, 0x55, 0x53, 0x37, 0x61, 0x99, 0x25, + 0xb5, 0x21, 0x9b, 0xb6, 0xf1, 0x49, 0xcf, 0x3b, 0x3a, 0x91, 0x49, 0x1f, 0xee, 0x02, 0x46, 0x26, + 0xbd, 0xdc, 0xd2, 0x8b, 0x4f, 0x7a, 0xd1, 0x7e, 0x8a, 0x4e, 0x7a, 0xa9, 0x67, 0x16, 0x9d, 0xf4, + 0x72, 0x27, 0x2b, 0x31, 0xe9, 0xc5, 0x82, 0x63, 0x92, 0x5e, 0x5a, 0xf3, 0x95, 0x14, 0xc8, 0x94, + 0x79, 0x17, 0xab, 0x5c, 0xd5, 0x76, 0x8d, 0xcb, 0xbb, 0x94, 0xca, 0x59, 0x9c, 0x79, 0xfd, 0x3d, + 0x32, 0xce, 0xe1, 0xce, 0x46, 0x64, 0x9c, 0xa5, 0xe2, 0x7f, 0x42, 0x9c, 0x45, 0x6b, 0x28, 0x3a, + 0xce, 0x52, 0x3f, 0x2b, 0x3a, 0xce, 0x72, 0x97, 0x29, 0x91, 0x5f, 0xc4, 0x82, 0x63, 0xf8, 0x45, + 0x5a, 0xf3, 0x95, 0x14, 0xc8, 0xc4, 0xcd, 0xd2, 0x6b, 0x4a, 0xa8, 0x37, 0x4b, 0xb9, 0xe5, 0x51, + 0xbe, 0x98, 0x80, 0x4a, 0xf4, 0x73, 0xb0, 0x03, 0xa0, 0xf6, 0xb3, 0xa2, 0xbb, 0x51, 0xae, 0x24, + 0x03, 0xe3, 0x35, 0xf7, 0xa1, 0x18, 0xa8, 0x63, 0xa3, 0x4b, 0xe9, 0x4a, 0xef, 0xe5, 0xcb, 0x89, + 0xb8, 0xc4, 0x05, 0x07, 0xcb, 0xd4, 0xea, 0x05, 0x2b, 0x6a, 0xe2, 0xe5, 0x4a, 0x32, 0x30, 0x51, + 0x73, 0xb0, 0x24, 0xad, 0xd6, 0xac, 0x28, 0x7b, 0x97, 0x2b, 0xc9, 0xc0, 0x34, 0x59, 0xc5, 0x8a, + 0x5a, 0x91, 0x59, 0x15, 0xaa, 0x9a, 0x45, 0x66, 0x95, 0x54, 0x19, 0x4b, 0xca, 0x2a, 0xae, 0x33, + 0x26, 0xab, 0xc2, 0x6a, 0x2b, 0xc9, 0xc0, 0x54, 0x59, 0xc5, 0x0b, 0x9d, 0xd1, 0x59, 0x15, 0xae, + 0xcd, 0x46, 0x67, 0x95, 0x54, 0x31, 0x4d, 0xcc, 0xaa, 0xb8, 0x05, 0x2b, 0x8a, 0xa6, 0x71, 0x59, + 0x95, 0xda, 0xd5, 0xc1, 0x9a, 0x65, 0x5c, 0x56, 0xa5, 0xd0, 0xac, 0x2c, 0x7f, 0x46, 0x6b, 0x0e, + 0x96, 0xe2, 0xd4, 0x9a, 0x15, 0x75, 0x3f, 0xb5, 0x66, 0x55, 0x55, 0x2f, 0x4a, 0xf3, 0xcf, 0x33, + 0xb0, 0x28, 0x55, 0x44, 0xd1, 0xd5, 0x68, 0x47, 0x0e, 0x18, 0xf0, 0xf5, 0x54, 0xd8, 0x64, 0x1b, + 0xa4, 0x7a, 0xa7, 0xda, 0x06, 0x75, 0x89, 0x55, 0x6d, 0x43, 0x54, 0x01, 0x35, 0x3a, 0xd9, 0x03, + 0xd5, 0x1b, 0x14, 0xb5, 0xe5, 0x4a, 0x55, 0xa2, 0xf2, 0xe5, 0x44, 0x5c, 0xbc, 0xda, 0x9f, 0xc2, + 0x42, 0xb8, 0x9e, 0x85, 0x62, 0x36, 0x3e, 0x59, 0xf9, 0xd5, 0x34, 0xd0, 0xc4, 0x1d, 0x3a, 0x54, + 0xe1, 0x40, 0x95, 0xb4, 0xc5, 0x1a, 0xf5, 0x0e, 0xad, 0x2c, 0x97, 0xc4, 0x2c, 0x3e, 0x5c, 0x8a, + 0x45, 0x31, 0xa7, 0xbb, 0x54, 0x8b, 0x57, 0x57, 0x76, 0x63, 0xf4, 0x87, 0x8b, 0xad, 0x28, 0xe6, + 0x80, 0x97, 0x4a, 0x7f, 0x44, 0xed, 0x56, 0xad, 0x7f, 0xad, 0xf4, 0xf6, 0x8b, 0x95, 0x63, 0xff, + 0xfc, 0x62, 0xe5, 0xd8, 0xcf, 0x8e, 0x56, 0x32, 0x6f, 0x8f, 0x56, 0x32, 0xff, 0x38, 0x5a, 0xc9, + 0xfc, 0xfb, 0x68, 0x25, 0xb3, 0x33, 0x43, 0xab, 0x9f, 0xd7, 0xbf, 0x0c, 0x00, 0x00, 0xff, 0xff, + 0x83, 0x6a, 0xc0, 0xe5, 0xab, 0x38, 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/control.proto b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/control.proto new file mode 100644 index 00000000..bda699cc --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/control.proto @@ -0,0 +1,782 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/specs.proto"; +import "github.com/docker/swarmkit/api/objects.proto"; +import "github.com/docker/swarmkit/api/types.proto"; +import "gogoproto/gogo.proto"; +import "google/protobuf/any.proto"; +import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; + +// Control defines the RPC methods for controlling a cluster. +service Control { + rpc GetNode(GetNodeRequest) returns (GetNodeResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc ListNodes(ListNodesRequest) returns (ListNodesResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc UpdateNode(UpdateNodeRequest) returns (UpdateNodeResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc RemoveNode(RemoveNodeRequest) returns (RemoveNodeResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + rpc GetTask(GetTaskRequest) returns (GetTaskResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc ListTasks(ListTasksRequest) returns (ListTasksResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc RemoveTask(RemoveTaskRequest) returns (RemoveTaskResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + rpc GetService(GetServiceRequest) returns (GetServiceResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc ListServices(ListServicesRequest) returns (ListServicesResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc CreateService(CreateServiceRequest) returns (CreateServiceResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc UpdateService(UpdateServiceRequest) returns (UpdateServiceResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc RemoveService(RemoveServiceRequest) returns (RemoveServiceResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + // ListServiceStatuses returns a `ListServiceStatusesResponse` with the + // status of the requested services, formed by computing the number of + // running vs desired tasks. It is provided as a shortcut or helper method, + // which allows a client to avoid having to calculate this value by listing + // all Tasks. If any service requested does not exist, it will be returned + // but with empty status values. + rpc ListServiceStatuses(ListServiceStatusesRequest) returns (ListServiceStatusesResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + rpc GetNetwork(GetNetworkRequest) returns (GetNetworkResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc ListNetworks(ListNetworksRequest) returns (ListNetworksResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc CreateNetwork(CreateNetworkRequest) returns (CreateNetworkResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc RemoveNetwork(RemoveNetworkRequest) returns (RemoveNetworkResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + rpc GetCluster(GetClusterRequest) returns (GetClusterResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc ListClusters(ListClustersRequest) returns (ListClustersResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + rpc UpdateCluster(UpdateClusterRequest) returns (UpdateClusterResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + // --- secret APIs --- + + // GetSecret returns a `GetSecretResponse` with a `Secret` with the same + // id as `GetSecretRequest.SecretID` + // - Returns `NotFound` if the Secret with the given id is not found. + // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. + // - Returns an error if getting fails. + rpc GetSecret(GetSecretRequest) returns (GetSecretResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + + // UpdateSecret returns a `UpdateSecretResponse` with a `Secret` with the same + // id as `GetSecretRequest.SecretID` + // - Returns `NotFound` if the Secret with the given id is not found. + // - Returns `InvalidArgument` if the `GetSecretRequest.SecretID` is empty. + // - Returns an error if updating fails. + rpc UpdateSecret(UpdateSecretRequest) returns (UpdateSecretResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + // ListSecrets returns a `ListSecretResponse` with a list of all non-internal `Secret`s being + // managed, or all secrets matching any name in `ListSecretsRequest.Names`, any + // name prefix in `ListSecretsRequest.NamePrefixes`, any id in + // `ListSecretsRequest.SecretIDs`, or any id prefix in `ListSecretsRequest.IDPrefixes`. + // - Returns an error if listing fails. + rpc ListSecrets(ListSecretsRequest) returns (ListSecretsResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + // CreateSecret creates and return a `CreateSecretResponse` with a `Secret` based + // on the provided `CreateSecretRequest.SecretSpec`. + // - Returns `InvalidArgument` if the `CreateSecretRequest.SecretSpec` is malformed, + // or if the secret data is too long or contains invalid characters. + // - Returns an error if the creation fails. + rpc CreateSecret(CreateSecretRequest) returns (CreateSecretResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + + // RemoveSecret removes the secret referenced by `RemoveSecretRequest.ID`. + // - Returns `InvalidArgument` if `RemoveSecretRequest.ID` is empty. + // - Returns `NotFound` if the a secret named `RemoveSecretRequest.ID` is not found. + // - Returns an error if the deletion fails. + rpc RemoveSecret(RemoveSecretRequest) returns (RemoveSecretResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + + // --- config APIs --- + + // GetConfig returns a `GetConfigResponse` with a `Config` with the same + // id as `GetConfigRequest.ConfigID` + // - Returns `NotFound` if the Config with the given id is not found. + // - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty. + // - Returns an error if getting fails. + rpc GetConfig(GetConfigRequest) returns (GetConfigResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + + // UpdateConfig returns a `UpdateConfigResponse` with a `Config` with the same + // id as `GetConfigRequest.ConfigID` + // - Returns `NotFound` if the Config with the given id is not found. + // - Returns `InvalidArgument` if the `GetConfigRequest.ConfigID` is empty. + // - Returns an error if updating fails. + rpc UpdateConfig(UpdateConfigRequest) returns (UpdateConfigResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + // ListConfigs returns a `ListConfigResponse` with a list of `Config`s being + // managed, or all configs matching any name in `ListConfigsRequest.Names`, any + // name prefix in `ListConfigsRequest.NamePrefixes`, any id in + // `ListConfigsRequest.ConfigIDs`, or any id prefix in `ListConfigsRequest.IDPrefixes`. + // - Returns an error if listing fails. + rpc ListConfigs(ListConfigsRequest) returns (ListConfigsResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + // CreateConfig creates and return a `CreateConfigResponse` with a `Config` based + // on the provided `CreateConfigRequest.ConfigSpec`. + // - Returns `InvalidArgument` if the `CreateConfigRequest.ConfigSpec` is malformed, + // or if the config data is too long or contains invalid characters. + // - Returns an error if the creation fails. + rpc CreateConfig(CreateConfigRequest) returns (CreateConfigResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + + // RemoveConfig removes the config referenced by `RemoveConfigRequest.ID`. + // - Returns `InvalidArgument` if `RemoveConfigRequest.ID` is empty. + // - Returns `NotFound` if the a config named `RemoveConfigRequest.ID` is not found. + // - Returns an error if the deletion fails. + rpc RemoveConfig(RemoveConfigRequest) returns (RemoveConfigResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + + // --- extension APIs --- + + // GetExtension returns a `GetExtensionResponse` with a `Extension` with the same + // id as `GetExtensionRequest.ExtensionId` + // - Returns `NotFound` if the Extension with the given id is not found. + // - Returns `InvalidArgument` if the `GetExtensionRequest.ExtensionId` is empty. + // - Returns an error if the get fails. + rpc GetExtension(GetExtensionRequest) returns (GetExtensionResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + + // CreateExtension creates an `Extension` based on the provided `CreateExtensionRequest.Extension` + // and returns a `CreateExtensionResponse`. + // - Returns `InvalidArgument` if the `CreateExtensionRequest.Extension` is malformed, + // or fails validation. + // - Returns an error if the creation fails. + rpc CreateExtension(CreateExtensionRequest) returns (CreateExtensionResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + + // RemoveExtension removes the extension referenced by `RemoveExtensionRequest.ID`. + // - Returns `InvalidArgument` if `RemoveExtensionRequest.ExtensionId` is empty. + // - Returns `NotFound` if the an extension named `RemoveExtensionRequest.ExtensionId` is not found. + // - Returns an error if the deletion fails. + rpc RemoveExtension(RemoveExtensionRequest) returns (RemoveExtensionResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + + // --- resource APIs --- + + // GetResource returns a `GetResourceResponse` with a `Resource` with the same + // id as `GetResourceRequest.Resource` + // - Returns `NotFound` if the Resource with the given id is not found. + // - Returns `InvalidArgument` if the `GetResourceRequest.Resource` is empty. + // - Returns an error if getting fails. + rpc GetResource(GetResourceRequest) returns (GetResourceResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + + // UpdateResource updates the resource with the given `UpdateResourceRequest.Resource.Id` using the given `UpdateResourceRequest.Resource` and returns a `UpdateResourceResponse`. + // - Returns `NotFound` if the Resource with the given `UpdateResourceRequest.Resource.Id` is not found. + // - Returns `InvalidArgument` if the UpdateResourceRequest.Resource.Id` is empty. + // - Returns an error if updating fails. + rpc UpdateResource(UpdateResourceRequest) returns (UpdateResourceResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + + // ListResources returns a `ListResourcesResponse` with a list of `Resource`s stored in the raft store, + // or all resources matching any name in `ListConfigsRequest.Names`, any + // name prefix in `ListResourcesRequest.NamePrefixes`, any id in + // `ListResourcesRequest.ResourceIDs`, or any id prefix in `ListResourcesRequest.IDPrefixes`, + // extension name equal to `ListResourcesRequest.Extension`. + // - Returns an error if listing fails. + rpc ListResources(ListResourcesRequest) returns (ListResourcesResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + + // CreateResource returns a `CreateResourceResponse` after creating a `Resource` based + // on the provided `CreateResourceRequest.Resource`. + // - Returns `InvalidArgument` if the `CreateResourceRequest.Resource` is malformed, + // or if the config data is too long or contains invalid characters. + // - Returns an error if the creation fails. + rpc CreateResource(CreateResourceRequest) returns (CreateResourceResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } + + // RemoveResource removes the `Resource` referenced by `RemoveResourceRequest.ResourceID`. + // - Returns `InvalidArgument` if `RemoveResourceRequest.ResourceID` is empty. + // - Returns `NotFound` if the a resource named `RemoveResourceRequest.ResourceID` is not found. + // - Returns an error if the deletion fails. + rpc RemoveResource(RemoveResourceRequest) returns (RemoveResourceResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } +} + +message GetNodeRequest { + string node_id = 1; +} + +message GetNodeResponse { + Node node = 1; +} + +message ListNodesRequest { + message Filters { + repeated string names = 1; + repeated string id_prefixes = 2; + // Labels refers to engine labels, which are labels set by the user on the + // node and reported back to the managers + map labels = 3; + // NodeLabels are labels set on the node object on the managers. + map node_labels = 7; + repeated NodeSpec.Membership memberships = 4 [packed=false]; + repeated NodeRole roles = 5 [packed=false]; + // NamePrefixes matches all objects with the given prefixes + repeated string name_prefixes = 6; + } + + Filters filters = 1; +} + +message ListNodesResponse { + repeated Node nodes = 1; +} + +// UpdateNodeRequest requests an update to the specified node. This may be used +// to request a new availability for a node, such as PAUSE. Invalid updates +// will be denied and cause an error. +message UpdateNodeRequest { + string node_id = 1; + Version node_version = 2; + NodeSpec spec = 3; +} + +message UpdateNodeResponse { + Node node = 1; +} + +// RemoveNodeRequest requests to delete the specified node from store. +message RemoveNodeRequest { + string node_id = 1; + bool force = 2; +} + +message RemoveNodeResponse { +} + +message GetTaskRequest { + string task_id = 1; +} + +message GetTaskResponse { + Task task = 1; +} + +message RemoveTaskRequest { + string task_id = 1; +} + +message RemoveTaskResponse { +} + +message ListTasksRequest { + message Filters { + repeated string names = 1; + repeated string id_prefixes = 2; + map labels = 3; + repeated string service_ids = 4; + repeated string node_ids = 5; + repeated docker.swarmkit.v1.TaskState desired_states = 6 [packed=false]; + // NamePrefixes matches all objects with the given prefixes + repeated string name_prefixes = 7; + repeated string runtimes = 9; + + // UpToDate matches tasks that are consistent with the current + // service definition. + // Note: this is intended for internal status reporting rather + // than being exposed to users. It may be removed in the future. + bool up_to_date = 8; + } + + Filters filters = 1; +} + +message ListTasksResponse { + repeated Task tasks = 1; +} + +message CreateServiceRequest { + ServiceSpec spec = 1; +} + +message CreateServiceResponse { + Service service = 1; +} + +message GetServiceRequest { + string service_id = 1; + bool insert_defaults = 2; +} + +message GetServiceResponse { + Service service = 1; +} + +message UpdateServiceRequest { + string service_id = 1; + Version service_version = 2; + ServiceSpec spec = 3; + + enum Rollback { + // This is not a rollback. The spec field of the request will + // be honored. + NONE = 0; + + // Roll back the service - get spec from the service's + // previous_spec. + PREVIOUS = 1; + } + + // Rollback may be set to PREVIOUS to request a rollback (the service's + // spec will be set to the value of its previous_spec field). In this + // case, the spec field of this request is ignored. + Rollback rollback = 4; +} + +message UpdateServiceResponse { + Service service = 1; +} + +message RemoveServiceRequest { + string service_id = 1; +} + +message RemoveServiceResponse { +} + +message ListServicesRequest { + message Filters { + repeated string names = 1; + repeated string id_prefixes = 2; + map labels = 3; + // NamePrefixes matches all objects with the given prefixes + repeated string name_prefixes = 4; + repeated string runtimes = 5; + } + + Filters filters = 1; +} + +message ListServicesResponse { + repeated Service services = 1; +} + +// ListServiceStatusesRequest is a request to get the aggregate status of a +// service by computing the number of running vs desired tasks. It includes +// only a service ID. +message ListServiceStatusesRequest { + // Services is a list of service IDs to get statuses for. + repeated string services = 1; +} + +// ListServiceStatusesResponse is a response containing the aggregate status of +// a service, formed by computing the number of running vs desired tasks. The +// values returned are only valid for the point in time at which the request is +// made. +message ListServiceStatusesResponse { + message ServiceStatus { + // ServiceID is the ID of the service this status describes + string service_id = 1; + + // DesiredTasks is the number of tasks desired to be running according to the + // service definition at request time. It is a uint64 because that is what + // the replicas field on the service spec is + uint64 desired_tasks = 2; + + // RunningTasks is the number of tasks currently in the Running state at + // request time. This may be larger than desired tasks if, for example, a + // service has been scaled down. + uint64 running_tasks = 3; + } + + repeated ServiceStatus statuses = 1; +} + +message CreateNetworkRequest { + NetworkSpec spec = 1; +} + +message CreateNetworkResponse { + Network network = 1; +} + +message GetNetworkRequest { + string name = 1; + string network_id = 2; +} + +message GetNetworkResponse { + Network network = 1; +} + +message RemoveNetworkRequest { + string name = 1; + string network_id = 2; +} + +message RemoveNetworkResponse {} + +message ListNetworksRequest { + message Filters { + repeated string names = 1; + repeated string id_prefixes = 2; + map labels = 3; + // NamePrefixes matches all objects with the given prefixes + repeated string name_prefixes = 4; + } + + Filters filters = 1; +} + +message ListNetworksResponse { + repeated Network networks = 1; +} + +message GetClusterRequest { + string cluster_id = 1; +} + +message GetClusterResponse { + Cluster cluster = 1; +} + +message ListClustersRequest { + message Filters { + repeated string names = 1; + repeated string id_prefixes = 2; + map labels = 3; + // NamePrefixes matches all objects with the given prefixes + repeated string name_prefixes = 4; + } + + Filters filters = 1; +} + +message ListClustersResponse { + repeated Cluster clusters = 1; +} + +// KeyRotation tells UpdateCluster what items to rotate +message KeyRotation { + // WorkerJoinToken tells UpdateCluster to rotate the worker secret token. + bool worker_join_token = 1; + + // ManagerJoinToken tells UpdateCluster to rotate the manager secret token. + bool manager_join_token = 2; + + // ManagerUnlockKey tells UpdateCluster to rotate the manager unlock key + bool manager_unlock_key = 3; + +} + +message UpdateClusterRequest { + // ClusterID is the cluster ID to update. + string cluster_id = 1; + + // ClusterVersion is the version of the cluster being updated. + Version cluster_version = 2; + + // Spec is the new spec to apply to the cluster. + ClusterSpec spec = 3; + + // Rotation contains flags for join token and unlock key rotation + KeyRotation rotation = 4 [(gogoproto.nullable) = false]; +} + +message UpdateClusterResponse { + Cluster cluster = 1; +} + +// GetSecretRequest is the request to get a `Secret` object given a secret id. +message GetSecretRequest { + string secret_id = 1; +} + +// GetSecretResponse contains the Secret corresponding to the id in +// `GetSecretRequest`, but the `Secret.Spec.Data` field in each `Secret` +// object should be nil instead of actually containing the secret bytes. +message GetSecretResponse { + Secret secret = 1; +} + +message UpdateSecretRequest { + // SecretID is the secret ID to update. + string secret_id = 1; + + // SecretVersion is the version of the secret being updated. + Version secret_version = 2; + + // Spec is the new spec to apply to the Secret + // Only some fields are allowed to be updated. + SecretSpec spec = 3; +} + +message UpdateSecretResponse { + Secret secret = 1; +} + +// ListSecretRequest is the request to list all non-internal secrets in the secret store, +// or all secrets filtered by (name or name prefix or id prefix) and labels. +message ListSecretsRequest { + message Filters { + repeated string names = 1; + repeated string id_prefixes = 2; + map labels = 3; + repeated string name_prefixes = 4; + } + + Filters filters = 1; +} + +// ListSecretResponse contains a list of all the secrets that match the name or +// name prefix filters provided in `ListSecretRequest`. The `Secret.Spec.Data` +// field in each `Secret` object should be nil instead of actually containing +// the secret bytes. +message ListSecretsResponse { + repeated Secret secrets = 1; +} + +// CreateSecretRequest specifies a new secret (it will not update an existing +// secret) to create. +message CreateSecretRequest { + SecretSpec spec = 1; +} + +// CreateSecretResponse contains the newly created `Secret` corresponding to the +// name in `CreateSecretRequest`. The `Secret.Spec.Data` field should be nil instead +// of actually containing the secret bytes. +message CreateSecretResponse { + Secret secret = 1; +} + +// RemoveSecretRequest contains the ID of the secret that should be removed. This +// removes all versions of the secret. +message RemoveSecretRequest { + string secret_id = 1; +} + +// RemoveSecretResponse is an empty object indicating the successful removal of +// a secret. +message RemoveSecretResponse {} + +// GetConfigRequest is the request to get a `Config` object given a config id. +message GetConfigRequest { + string config_id = 1; +} + +// GetConfigResponse contains the Config corresponding to the id in +// `GetConfigRequest`. +message GetConfigResponse { + Config config = 1; +} + +message UpdateConfigRequest { + // ConfigID is the config ID to update. + string config_id = 1; + + // ConfigVersion is the version of the config being updated. + Version config_version = 2; + + // Spec is the new spec to apply to the Config + // Only some fields are allowed to be updated. + ConfigSpec spec = 3; +} + +message UpdateConfigResponse { + Config config = 1; +} + +// ListConfigRequest is the request to list all configs in the config store, +// or all configs filtered by (name or name prefix or id prefix) and labels. +message ListConfigsRequest { + message Filters { + repeated string names = 1; + repeated string id_prefixes = 2; + map labels = 3; + repeated string name_prefixes = 4; + } + + Filters filters = 1; +} + +// ListConfigResponse contains a list of all the configs that match the name or +// name prefix filters provided in `ListConfigRequest`. +message ListConfigsResponse { + repeated Config configs = 1; +} + +// CreateConfigRequest specifies a new config (it will not update an existing +// config) to create. +message CreateConfigRequest { + ConfigSpec spec = 1; +} + +// CreateConfigResponse contains the newly created `Config` corresponding to the +// name in `CreateConfigRequest`. +message CreateConfigResponse { + Config config = 1; +} + +// RemoveConfigRequest contains the ID of the config that should be removed. This +// removes all versions of the config. +message RemoveConfigRequest { + string config_id = 1; +} + +// RemoveConfigResponse is an empty object indicating the successful removal of +// a config. +message RemoveConfigResponse {} + +// CreateExtensionRequest creates a new extension as specified by the provided +// parameters +message CreateExtensionRequest { + Annotations annotations = 1; + string description = 2; +} + +// CreateExtensionResponse contains the newly created `Extension` corresponding +// to the parameters in the CreateExtensionRequest. +message CreateExtensionResponse { + Extension extension = 1; +} + +// RemoveExtensionRequest contains the ID of the extension that should be removed. This +// removes all versions of the extension. +message RemoveExtensionRequest { + string extension_id = 1; +} + +// RemoveExtensionResponse is an empty object indicating the successful removal +// of an extension. +message RemoveExtensionResponse { +} + +// GetResourceRequest is the request to get a Extension object given a extension id. +message GetExtensionRequest { + string extension_id = 1; +} + +// GetExtensionResponse contains the Extension corresponding to the id in +// `GetExtensionRequest`. +message GetExtensionResponse { + Extension extension = 1; +} + +// CreateResourceRequest creates a new resource specified by the included +// resource object. An existing resource will not be updated. +message CreateResourceRequest { + Annotations annotations = 1; + string kind = 2; + google.protobuf.Any payload = 3; +} + +// CreateResourceResponse contains the newly created `Resource` corresponding +// to the resource in the CreateResourceRequest. +message CreateResourceResponse { + Resource resource = 1; +} + +// RemoveResourceRequest contains the ID of the resource that should be removed. This +// removes all versions of the resource. +message RemoveResourceRequest { + string resource_id = 1; +} + +// RemoveResourceResponse is an empty object indicating the successful removal +// of a resource. +message RemoveResourceResponse { +} + +// UpdateResourceRequest updates the resource specified by the given resource object. +message UpdateResourceRequest { + string resource_id = 1; + Version resource_version = 2; + // Annotations describes the annotations to update. If the Annotations should + // be unchanged, then this field should be left empty. Note that the name of + // a Resource cannot be changed, only its labels. + Annotations annotations = 3; + // Payload describes the new payload of the resource. If the Payload should + // be unchanged, then this field should be left empty. + google.protobuf.Any payload = 4; +} + +message UpdateResourceResponse { + Resource resource = 1; +} + +// GetResourceRequest is the request to get a Resource object given a resource id. +message GetResourceRequest { + string resource_id = 1; +} + +// GetResourceResponse contains the Resource corresponding to the id in +// `GetResourceRequest`. +message GetResourceResponse { + Resource resource = 1; +} + +// ListResourcesRequest is the request to list all resources in the raft store, +// or all resources filtered by (name or name prefix or id prefix), labels and extension. +message ListResourcesRequest { + message Filters { + repeated string names = 1; + repeated string id_prefixes = 2; + map labels = 3; + repeated string name_prefixes = 4; + string kind = 5; + } + + Filters filters = 1; +} + +// ListResourcesResponse contains a list of all the resources that match the name or +// name prefix filters provided in `ListResourcesRequest`. +message ListResourcesResponse { + repeated Resource resources = 1; +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/deepcopy/copy.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/deepcopy/copy.go new file mode 100644 index 00000000..fd44621f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/deepcopy/copy.go @@ -0,0 +1,59 @@ +package deepcopy + +import ( + "fmt" + "time" + + "github.com/gogo/protobuf/types" +) + +// CopierFrom can be implemented if an object knows how to copy another into itself. +type CopierFrom interface { + // Copy takes the fields from src and copies them into the target object. + // + // Calling this method with a nil receiver or a nil src may panic. + CopyFrom(src interface{}) +} + +// Copy copies src into dst. dst and src must have the same type. +// +// If the type has a copy function defined, it will be used. +// +// Default implementations for builtin types and well known protobuf types may +// be provided. +// +// If the copy cannot be performed, this function will panic. Make sure to test +// types that use this function. +func Copy(dst, src interface{}) { + switch dst := dst.(type) { + case *types.Any: + src := src.(*types.Any) + dst.TypeUrl = src.TypeUrl + if src.Value != nil { + dst.Value = make([]byte, len(src.Value)) + copy(dst.Value, src.Value) + } else { + dst.Value = nil + } + case *types.Duration: + src := src.(*types.Duration) + *dst = *src + case *time.Duration: + src := src.(*time.Duration) + *dst = *src + case *types.Timestamp: + src := src.(*types.Timestamp) + *dst = *src + case *types.BoolValue: + src := src.(*types.BoolValue) + *dst = *src + case *types.Int64Value: + src := src.(*types.Int64Value) + *dst = *src + case CopierFrom: + dst.CopyFrom(src) + default: + panic(fmt.Sprintf("Copy for %T not implemented", dst)) + } + +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/dispatcher.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/dispatcher.pb.go new file mode 100644 index 00000000..f72d3d99 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/dispatcher.pb.go @@ -0,0 +1,3830 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/dispatcher.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/docker/swarmkit/protobuf/plugin" +import _ "github.com/gogo/protobuf/types" + +import time "time" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import types "github.com/gogo/protobuf/types" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +type AssignmentChange_AssignmentAction int32 + +const ( + AssignmentChange_AssignmentActionUpdate AssignmentChange_AssignmentAction = 0 + AssignmentChange_AssignmentActionRemove AssignmentChange_AssignmentAction = 1 +) + +var AssignmentChange_AssignmentAction_name = map[int32]string{ + 0: "UPDATE", + 1: "REMOVE", +} +var AssignmentChange_AssignmentAction_value = map[string]int32{ + "UPDATE": 0, + "REMOVE": 1, +} + +func (x AssignmentChange_AssignmentAction) String() string { + return proto.EnumName(AssignmentChange_AssignmentAction_name, int32(x)) +} +func (AssignmentChange_AssignmentAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDispatcher, []int{10, 0} +} + +// AssignmentType specifies whether this assignment message carries +// the full state, or is an update to an existing state. +type AssignmentsMessage_Type int32 + +const ( + AssignmentsMessage_COMPLETE AssignmentsMessage_Type = 0 + AssignmentsMessage_INCREMENTAL AssignmentsMessage_Type = 1 +) + +var AssignmentsMessage_Type_name = map[int32]string{ + 0: "COMPLETE", + 1: "INCREMENTAL", +} +var AssignmentsMessage_Type_value = map[string]int32{ + "COMPLETE": 0, + "INCREMENTAL": 1, +} + +func (x AssignmentsMessage_Type) String() string { + return proto.EnumName(AssignmentsMessage_Type_name, int32(x)) +} +func (AssignmentsMessage_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDispatcher, []int{11, 0} +} + +// SessionRequest starts a session. +type SessionRequest struct { + Description *NodeDescription `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + // SessionID can be provided to attempt resuming an existing session. If the + // SessionID is empty or invalid, a new SessionID will be assigned. + // + // See SessionMessage.SessionID for details. + SessionID string `protobuf:"bytes,2,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` +} + +func (m *SessionRequest) Reset() { *m = SessionRequest{} } +func (*SessionRequest) ProtoMessage() {} +func (*SessionRequest) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{0} } + +// SessionMessage instructs an agent on various actions as part of the current +// session. An agent should act immediately on the contents. +type SessionMessage struct { + // SessionID is allocated after a successful registration. It should be + // used on all RPC calls after registration. A dispatcher may choose to + // change the SessionID, at which time an agent must re-register and obtain + // a new one. + // + // All Dispatcher calls after register should include the SessionID. If the + // Dispatcher so chooses, it may reject the call with an InvalidArgument + // error code, at which time the agent should call Register to start a new + // session. + // + // As a rule, once an agent has a SessionID, it should never save it to + // disk or try to otherwise reuse. If the agent loses its SessionID, it + // must start a new session through a call to Register. A Dispatcher may + // choose to reuse the SessionID, if it sees fit, but it is not advised. + // + // The actual implementation of the SessionID is Dispatcher specific and + // should be treated as opaque by agents. + // + // From a Dispatcher perspective, there are many ways to use the SessionID + // to ensure uniqueness of a set of client RPC calls. One method is to keep + // the SessionID unique to every call to Register in a single Dispatcher + // instance. This ensures that the SessionID represents the unique + // session from a single Agent to Manager. If the Agent restarts, we + // allocate a new session, since the restarted Agent is not aware of the + // new SessionID. + // + // The most compelling use case is to support duplicate node detection. If + // one clones a virtual machine, including certificate material, two nodes + // may end up with the same identity. This can also happen if two identical + // agent processes are coming from the same node. If the SessionID is + // replicated through the cluster, we can immediately detect the condition + // and address it. + // + // Extending from the case above, we can actually detect a compromised + // identity. Coupled with provisions to rebuild node identity, we can ban + // the compromised node identity and have the nodes re-authenticate and + // build a new identity. At this time, an administrator can then + // re-authorize the compromised nodes, if it was a mistake or ensure that a + // misbehaved node can no longer connect to the cluster. + // + // We considered placing this field in a GRPC header. Because this is a + // critical feature of the protocol, we thought it should be represented + // directly in the RPC message set. + SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` + // Node identifies the registering node. + Node *Node `protobuf:"bytes,2,opt,name=node" json:"node,omitempty"` + // Managers provides a weight list of alternative dispatchers + Managers []*WeightedPeer `protobuf:"bytes,3,rep,name=managers" json:"managers,omitempty"` + // Symmetric encryption key distributed by the lead manager. Used by agents + // for securing network bootstrapping and communication. + NetworkBootstrapKeys []*EncryptionKey `protobuf:"bytes,4,rep,name=network_bootstrap_keys,json=networkBootstrapKeys" json:"network_bootstrap_keys,omitempty"` + // Which root certificates to trust + RootCA []byte `protobuf:"bytes,5,opt,name=RootCA,proto3" json:"RootCA,omitempty"` +} + +func (m *SessionMessage) Reset() { *m = SessionMessage{} } +func (*SessionMessage) ProtoMessage() {} +func (*SessionMessage) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{1} } + +// HeartbeatRequest provides identifying properties for a single heartbeat. +type HeartbeatRequest struct { + SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` +} + +func (m *HeartbeatRequest) Reset() { *m = HeartbeatRequest{} } +func (*HeartbeatRequest) ProtoMessage() {} +func (*HeartbeatRequest) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{2} } + +type HeartbeatResponse struct { + // Period is the duration to wait before sending the next heartbeat. + // Well-behaved agents should update this on every heartbeat round trip. + Period time.Duration `protobuf:"bytes,1,opt,name=period,stdduration" json:"period"` +} + +func (m *HeartbeatResponse) Reset() { *m = HeartbeatResponse{} } +func (*HeartbeatResponse) ProtoMessage() {} +func (*HeartbeatResponse) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{3} } + +type UpdateTaskStatusRequest struct { + // Tasks should contain all statuses for running tasks. Only the status + // field must be set. The spec is not required. + SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` + Updates []*UpdateTaskStatusRequest_TaskStatusUpdate `protobuf:"bytes,3,rep,name=updates" json:"updates,omitempty"` +} + +func (m *UpdateTaskStatusRequest) Reset() { *m = UpdateTaskStatusRequest{} } +func (*UpdateTaskStatusRequest) ProtoMessage() {} +func (*UpdateTaskStatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptorDispatcher, []int{4} +} + +type UpdateTaskStatusRequest_TaskStatusUpdate struct { + TaskID string `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + Status *TaskStatus `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` +} + +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Reset() { + *m = UpdateTaskStatusRequest_TaskStatusUpdate{} +} +func (*UpdateTaskStatusRequest_TaskStatusUpdate) ProtoMessage() {} +func (*UpdateTaskStatusRequest_TaskStatusUpdate) Descriptor() ([]byte, []int) { + return fileDescriptorDispatcher, []int{4, 0} +} + +type UpdateTaskStatusResponse struct { +} + +func (m *UpdateTaskStatusResponse) Reset() { *m = UpdateTaskStatusResponse{} } +func (*UpdateTaskStatusResponse) ProtoMessage() {} +func (*UpdateTaskStatusResponse) Descriptor() ([]byte, []int) { + return fileDescriptorDispatcher, []int{5} +} + +type TasksRequest struct { + SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` +} + +func (m *TasksRequest) Reset() { *m = TasksRequest{} } +func (*TasksRequest) ProtoMessage() {} +func (*TasksRequest) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{6} } + +type TasksMessage struct { + // Tasks is the set of tasks that should be running on the node. + // Tasks outside of this set running on the node should be terminated. + Tasks []*Task `protobuf:"bytes,1,rep,name=tasks" json:"tasks,omitempty"` +} + +func (m *TasksMessage) Reset() { *m = TasksMessage{} } +func (*TasksMessage) ProtoMessage() {} +func (*TasksMessage) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{7} } + +type AssignmentsRequest struct { + SessionID string `protobuf:"bytes,1,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"` +} + +func (m *AssignmentsRequest) Reset() { *m = AssignmentsRequest{} } +func (*AssignmentsRequest) ProtoMessage() {} +func (*AssignmentsRequest) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{8} } + +type Assignment struct { + // Types that are valid to be assigned to Item: + // *Assignment_Task + // *Assignment_Secret + // *Assignment_Config + Item isAssignment_Item `protobuf_oneof:"item"` +} + +func (m *Assignment) Reset() { *m = Assignment{} } +func (*Assignment) ProtoMessage() {} +func (*Assignment) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{9} } + +type isAssignment_Item interface { + isAssignment_Item() + MarshalTo([]byte) (int, error) + Size() int +} + +type Assignment_Task struct { + Task *Task `protobuf:"bytes,1,opt,name=task,oneof"` +} +type Assignment_Secret struct { + Secret *Secret `protobuf:"bytes,2,opt,name=secret,oneof"` +} +type Assignment_Config struct { + Config *Config `protobuf:"bytes,3,opt,name=config,oneof"` +} + +func (*Assignment_Task) isAssignment_Item() {} +func (*Assignment_Secret) isAssignment_Item() {} +func (*Assignment_Config) isAssignment_Item() {} + +func (m *Assignment) GetItem() isAssignment_Item { + if m != nil { + return m.Item + } + return nil +} + +func (m *Assignment) GetTask() *Task { + if x, ok := m.GetItem().(*Assignment_Task); ok { + return x.Task + } + return nil +} + +func (m *Assignment) GetSecret() *Secret { + if x, ok := m.GetItem().(*Assignment_Secret); ok { + return x.Secret + } + return nil +} + +func (m *Assignment) GetConfig() *Config { + if x, ok := m.GetItem().(*Assignment_Config); ok { + return x.Config + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Assignment) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Assignment_OneofMarshaler, _Assignment_OneofUnmarshaler, _Assignment_OneofSizer, []interface{}{ + (*Assignment_Task)(nil), + (*Assignment_Secret)(nil), + (*Assignment_Config)(nil), + } +} + +func _Assignment_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Assignment) + // item + switch x := m.Item.(type) { + case *Assignment_Task: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Task); err != nil { + return err + } + case *Assignment_Secret: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Secret); err != nil { + return err + } + case *Assignment_Config: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Config); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Assignment.Item has unexpected type %T", x) + } + return nil +} + +func _Assignment_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Assignment) + switch tag { + case 1: // item.task + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Task) + err := b.DecodeMessage(msg) + m.Item = &Assignment_Task{msg} + return true, err + case 2: // item.secret + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Secret) + err := b.DecodeMessage(msg) + m.Item = &Assignment_Secret{msg} + return true, err + case 3: // item.config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Config) + err := b.DecodeMessage(msg) + m.Item = &Assignment_Config{msg} + return true, err + default: + return false, nil + } +} + +func _Assignment_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Assignment) + // item + switch x := m.Item.(type) { + case *Assignment_Task: + s := proto.Size(x.Task) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Assignment_Secret: + s := proto.Size(x.Secret) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Assignment_Config: + s := proto.Size(x.Config) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type AssignmentChange struct { + Assignment *Assignment `protobuf:"bytes,1,opt,name=assignment" json:"assignment,omitempty"` + Action AssignmentChange_AssignmentAction `protobuf:"varint,2,opt,name=action,proto3,enum=docker.swarmkit.v1.AssignmentChange_AssignmentAction" json:"action,omitempty"` +} + +func (m *AssignmentChange) Reset() { *m = AssignmentChange{} } +func (*AssignmentChange) ProtoMessage() {} +func (*AssignmentChange) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{10} } + +type AssignmentsMessage struct { + Type AssignmentsMessage_Type `protobuf:"varint,1,opt,name=type,proto3,enum=docker.swarmkit.v1.AssignmentsMessage_Type" json:"type,omitempty"` + // AppliesTo references the previous ResultsIn value, to chain + // incremental updates together. For the first update in a stream, + // AppliesTo is empty. If AppliesTo does not match the previously + // received ResultsIn, the consumer of the stream should start a new + // Assignments stream to re-sync. + AppliesTo string `protobuf:"bytes,2,opt,name=applies_to,json=appliesTo,proto3" json:"applies_to,omitempty"` + // ResultsIn identifies the result of this assignments message, to + // match against the next message's AppliesTo value and protect + // against missed messages. + ResultsIn string `protobuf:"bytes,3,opt,name=results_in,json=resultsIn,proto3" json:"results_in,omitempty"` + // AssignmentChange is a set of changes to apply on this node. + Changes []*AssignmentChange `protobuf:"bytes,4,rep,name=changes" json:"changes,omitempty"` +} + +func (m *AssignmentsMessage) Reset() { *m = AssignmentsMessage{} } +func (*AssignmentsMessage) ProtoMessage() {} +func (*AssignmentsMessage) Descriptor() ([]byte, []int) { return fileDescriptorDispatcher, []int{11} } + +func init() { + proto.RegisterType((*SessionRequest)(nil), "docker.swarmkit.v1.SessionRequest") + proto.RegisterType((*SessionMessage)(nil), "docker.swarmkit.v1.SessionMessage") + proto.RegisterType((*HeartbeatRequest)(nil), "docker.swarmkit.v1.HeartbeatRequest") + proto.RegisterType((*HeartbeatResponse)(nil), "docker.swarmkit.v1.HeartbeatResponse") + proto.RegisterType((*UpdateTaskStatusRequest)(nil), "docker.swarmkit.v1.UpdateTaskStatusRequest") + proto.RegisterType((*UpdateTaskStatusRequest_TaskStatusUpdate)(nil), "docker.swarmkit.v1.UpdateTaskStatusRequest.TaskStatusUpdate") + proto.RegisterType((*UpdateTaskStatusResponse)(nil), "docker.swarmkit.v1.UpdateTaskStatusResponse") + proto.RegisterType((*TasksRequest)(nil), "docker.swarmkit.v1.TasksRequest") + proto.RegisterType((*TasksMessage)(nil), "docker.swarmkit.v1.TasksMessage") + proto.RegisterType((*AssignmentsRequest)(nil), "docker.swarmkit.v1.AssignmentsRequest") + proto.RegisterType((*Assignment)(nil), "docker.swarmkit.v1.Assignment") + proto.RegisterType((*AssignmentChange)(nil), "docker.swarmkit.v1.AssignmentChange") + proto.RegisterType((*AssignmentsMessage)(nil), "docker.swarmkit.v1.AssignmentsMessage") + proto.RegisterEnum("docker.swarmkit.v1.AssignmentChange_AssignmentAction", AssignmentChange_AssignmentAction_name, AssignmentChange_AssignmentAction_value) + proto.RegisterEnum("docker.swarmkit.v1.AssignmentsMessage_Type", AssignmentsMessage_Type_name, AssignmentsMessage_Type_value) +} + +type authenticatedWrapperDispatcherServer struct { + local DispatcherServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperDispatcherServer(local DispatcherServer, authorize func(context.Context, []string) error) DispatcherServer { + return &authenticatedWrapperDispatcherServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperDispatcherServer) Session(r *SessionRequest, stream Dispatcher_SessionServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-worker", "swarm-manager"}); err != nil { + return err + } + return p.local.Session(r, stream) +} + +func (p *authenticatedWrapperDispatcherServer) Heartbeat(ctx context.Context, r *HeartbeatRequest) (*HeartbeatResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-worker", "swarm-manager"}); err != nil { + return nil, err + } + return p.local.Heartbeat(ctx, r) +} + +func (p *authenticatedWrapperDispatcherServer) UpdateTaskStatus(ctx context.Context, r *UpdateTaskStatusRequest) (*UpdateTaskStatusResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-worker", "swarm-manager"}); err != nil { + return nil, err + } + return p.local.UpdateTaskStatus(ctx, r) +} + +func (p *authenticatedWrapperDispatcherServer) Tasks(r *TasksRequest, stream Dispatcher_TasksServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-worker", "swarm-manager"}); err != nil { + return err + } + return p.local.Tasks(r, stream) +} + +func (p *authenticatedWrapperDispatcherServer) Assignments(r *AssignmentsRequest, stream Dispatcher_AssignmentsServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-worker", "swarm-manager"}); err != nil { + return err + } + return p.local.Assignments(r, stream) +} + +func (m *SessionRequest) Copy() *SessionRequest { + if m == nil { + return nil + } + o := &SessionRequest{} + o.CopyFrom(m) + return o +} + +func (m *SessionRequest) CopyFrom(src interface{}) { + + o := src.(*SessionRequest) + *m = *o + if o.Description != nil { + m.Description = &NodeDescription{} + deepcopy.Copy(m.Description, o.Description) + } +} + +func (m *SessionMessage) Copy() *SessionMessage { + if m == nil { + return nil + } + o := &SessionMessage{} + o.CopyFrom(m) + return o +} + +func (m *SessionMessage) CopyFrom(src interface{}) { + + o := src.(*SessionMessage) + *m = *o + if o.Node != nil { + m.Node = &Node{} + deepcopy.Copy(m.Node, o.Node) + } + if o.Managers != nil { + m.Managers = make([]*WeightedPeer, len(o.Managers)) + for i := range m.Managers { + m.Managers[i] = &WeightedPeer{} + deepcopy.Copy(m.Managers[i], o.Managers[i]) + } + } + + if o.NetworkBootstrapKeys != nil { + m.NetworkBootstrapKeys = make([]*EncryptionKey, len(o.NetworkBootstrapKeys)) + for i := range m.NetworkBootstrapKeys { + m.NetworkBootstrapKeys[i] = &EncryptionKey{} + deepcopy.Copy(m.NetworkBootstrapKeys[i], o.NetworkBootstrapKeys[i]) + } + } + + if o.RootCA != nil { + m.RootCA = make([]byte, len(o.RootCA)) + copy(m.RootCA, o.RootCA) + } +} + +func (m *HeartbeatRequest) Copy() *HeartbeatRequest { + if m == nil { + return nil + } + o := &HeartbeatRequest{} + o.CopyFrom(m) + return o +} + +func (m *HeartbeatRequest) CopyFrom(src interface{}) { + + o := src.(*HeartbeatRequest) + *m = *o +} + +func (m *HeartbeatResponse) Copy() *HeartbeatResponse { + if m == nil { + return nil + } + o := &HeartbeatResponse{} + o.CopyFrom(m) + return o +} + +func (m *HeartbeatResponse) CopyFrom(src interface{}) { + + o := src.(*HeartbeatResponse) + *m = *o + deepcopy.Copy(&m.Period, &o.Period) +} + +func (m *UpdateTaskStatusRequest) Copy() *UpdateTaskStatusRequest { + if m == nil { + return nil + } + o := &UpdateTaskStatusRequest{} + o.CopyFrom(m) + return o +} + +func (m *UpdateTaskStatusRequest) CopyFrom(src interface{}) { + + o := src.(*UpdateTaskStatusRequest) + *m = *o + if o.Updates != nil { + m.Updates = make([]*UpdateTaskStatusRequest_TaskStatusUpdate, len(o.Updates)) + for i := range m.Updates { + m.Updates[i] = &UpdateTaskStatusRequest_TaskStatusUpdate{} + deepcopy.Copy(m.Updates[i], o.Updates[i]) + } + } + +} + +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Copy() *UpdateTaskStatusRequest_TaskStatusUpdate { + if m == nil { + return nil + } + o := &UpdateTaskStatusRequest_TaskStatusUpdate{} + o.CopyFrom(m) + return o +} + +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) CopyFrom(src interface{}) { + + o := src.(*UpdateTaskStatusRequest_TaskStatusUpdate) + *m = *o + if o.Status != nil { + m.Status = &TaskStatus{} + deepcopy.Copy(m.Status, o.Status) + } +} + +func (m *UpdateTaskStatusResponse) Copy() *UpdateTaskStatusResponse { + if m == nil { + return nil + } + o := &UpdateTaskStatusResponse{} + o.CopyFrom(m) + return o +} + +func (m *UpdateTaskStatusResponse) CopyFrom(src interface{}) {} +func (m *TasksRequest) Copy() *TasksRequest { + if m == nil { + return nil + } + o := &TasksRequest{} + o.CopyFrom(m) + return o +} + +func (m *TasksRequest) CopyFrom(src interface{}) { + + o := src.(*TasksRequest) + *m = *o +} + +func (m *TasksMessage) Copy() *TasksMessage { + if m == nil { + return nil + } + o := &TasksMessage{} + o.CopyFrom(m) + return o +} + +func (m *TasksMessage) CopyFrom(src interface{}) { + + o := src.(*TasksMessage) + *m = *o + if o.Tasks != nil { + m.Tasks = make([]*Task, len(o.Tasks)) + for i := range m.Tasks { + m.Tasks[i] = &Task{} + deepcopy.Copy(m.Tasks[i], o.Tasks[i]) + } + } + +} + +func (m *AssignmentsRequest) Copy() *AssignmentsRequest { + if m == nil { + return nil + } + o := &AssignmentsRequest{} + o.CopyFrom(m) + return o +} + +func (m *AssignmentsRequest) CopyFrom(src interface{}) { + + o := src.(*AssignmentsRequest) + *m = *o +} + +func (m *Assignment) Copy() *Assignment { + if m == nil { + return nil + } + o := &Assignment{} + o.CopyFrom(m) + return o +} + +func (m *Assignment) CopyFrom(src interface{}) { + + o := src.(*Assignment) + *m = *o + if o.Item != nil { + switch o.Item.(type) { + case *Assignment_Task: + v := Assignment_Task{ + Task: &Task{}, + } + deepcopy.Copy(v.Task, o.GetTask()) + m.Item = &v + case *Assignment_Secret: + v := Assignment_Secret{ + Secret: &Secret{}, + } + deepcopy.Copy(v.Secret, o.GetSecret()) + m.Item = &v + case *Assignment_Config: + v := Assignment_Config{ + Config: &Config{}, + } + deepcopy.Copy(v.Config, o.GetConfig()) + m.Item = &v + } + } + +} + +func (m *AssignmentChange) Copy() *AssignmentChange { + if m == nil { + return nil + } + o := &AssignmentChange{} + o.CopyFrom(m) + return o +} + +func (m *AssignmentChange) CopyFrom(src interface{}) { + + o := src.(*AssignmentChange) + *m = *o + if o.Assignment != nil { + m.Assignment = &Assignment{} + deepcopy.Copy(m.Assignment, o.Assignment) + } +} + +func (m *AssignmentsMessage) Copy() *AssignmentsMessage { + if m == nil { + return nil + } + o := &AssignmentsMessage{} + o.CopyFrom(m) + return o +} + +func (m *AssignmentsMessage) CopyFrom(src interface{}) { + + o := src.(*AssignmentsMessage) + *m = *o + if o.Changes != nil { + m.Changes = make([]*AssignmentChange, len(o.Changes)) + for i := range m.Changes { + m.Changes[i] = &AssignmentChange{} + deepcopy.Copy(m.Changes[i], o.Changes[i]) + } + } + +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Dispatcher service + +type DispatcherClient interface { + // Session starts an agent session with the dispatcher. The session is + // started after the first SessionMessage is received. + // + // Once started, the agent is controlled with a stream of SessionMessage. + // Agents should list on the stream at all times for instructions. + Session(ctx context.Context, in *SessionRequest, opts ...grpc.CallOption) (Dispatcher_SessionClient, error) + // Heartbeat is heartbeat method for nodes. It returns new TTL in response. + // Node should send new heartbeat earlier than now + TTL, otherwise it will + // be deregistered from dispatcher and its status will be updated to NodeStatus_DOWN + Heartbeat(ctx context.Context, in *HeartbeatRequest, opts ...grpc.CallOption) (*HeartbeatResponse, error) + // UpdateTaskStatus updates status of task. Node should send such updates + // on every status change of its tasks. + // + // Whether receiving batch updates or single status updates, this method + // should be accepting. Errors should only be returned if the entire update + // should be retried, due to data loss or other problems. + // + // If a task is unknown the dispatcher, the status update should be + // accepted regardless. + UpdateTaskStatus(ctx context.Context, in *UpdateTaskStatusRequest, opts ...grpc.CallOption) (*UpdateTaskStatusResponse, error) + // Tasks is a stream of tasks state for node. Each message contains full list + // of tasks which should be run on node, if task is not present in that list, + // it should be terminated. + Tasks(ctx context.Context, in *TasksRequest, opts ...grpc.CallOption) (Dispatcher_TasksClient, error) + // Assignments is a stream of assignments such as tasks and secrets for node. + // The first message in the stream contains all of the tasks and secrets + // that are relevant to the node. Future messages in the stream are updates to + // the set of assignments. + Assignments(ctx context.Context, in *AssignmentsRequest, opts ...grpc.CallOption) (Dispatcher_AssignmentsClient, error) +} + +type dispatcherClient struct { + cc *grpc.ClientConn +} + +func NewDispatcherClient(cc *grpc.ClientConn) DispatcherClient { + return &dispatcherClient{cc} +} + +func (c *dispatcherClient) Session(ctx context.Context, in *SessionRequest, opts ...grpc.CallOption) (Dispatcher_SessionClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Dispatcher_serviceDesc.Streams[0], c.cc, "/docker.swarmkit.v1.Dispatcher/Session", opts...) + if err != nil { + return nil, err + } + x := &dispatcherSessionClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Dispatcher_SessionClient interface { + Recv() (*SessionMessage, error) + grpc.ClientStream +} + +type dispatcherSessionClient struct { + grpc.ClientStream +} + +func (x *dispatcherSessionClient) Recv() (*SessionMessage, error) { + m := new(SessionMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *dispatcherClient) Heartbeat(ctx context.Context, in *HeartbeatRequest, opts ...grpc.CallOption) (*HeartbeatResponse, error) { + out := new(HeartbeatResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Dispatcher/Heartbeat", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dispatcherClient) UpdateTaskStatus(ctx context.Context, in *UpdateTaskStatusRequest, opts ...grpc.CallOption) (*UpdateTaskStatusResponse, error) { + out := new(UpdateTaskStatusResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Dispatcher/UpdateTaskStatus", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dispatcherClient) Tasks(ctx context.Context, in *TasksRequest, opts ...grpc.CallOption) (Dispatcher_TasksClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Dispatcher_serviceDesc.Streams[1], c.cc, "/docker.swarmkit.v1.Dispatcher/Tasks", opts...) + if err != nil { + return nil, err + } + x := &dispatcherTasksClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Dispatcher_TasksClient interface { + Recv() (*TasksMessage, error) + grpc.ClientStream +} + +type dispatcherTasksClient struct { + grpc.ClientStream +} + +func (x *dispatcherTasksClient) Recv() (*TasksMessage, error) { + m := new(TasksMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *dispatcherClient) Assignments(ctx context.Context, in *AssignmentsRequest, opts ...grpc.CallOption) (Dispatcher_AssignmentsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Dispatcher_serviceDesc.Streams[2], c.cc, "/docker.swarmkit.v1.Dispatcher/Assignments", opts...) + if err != nil { + return nil, err + } + x := &dispatcherAssignmentsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Dispatcher_AssignmentsClient interface { + Recv() (*AssignmentsMessage, error) + grpc.ClientStream +} + +type dispatcherAssignmentsClient struct { + grpc.ClientStream +} + +func (x *dispatcherAssignmentsClient) Recv() (*AssignmentsMessage, error) { + m := new(AssignmentsMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Dispatcher service + +type DispatcherServer interface { + // Session starts an agent session with the dispatcher. The session is + // started after the first SessionMessage is received. + // + // Once started, the agent is controlled with a stream of SessionMessage. + // Agents should list on the stream at all times for instructions. + Session(*SessionRequest, Dispatcher_SessionServer) error + // Heartbeat is heartbeat method for nodes. It returns new TTL in response. + // Node should send new heartbeat earlier than now + TTL, otherwise it will + // be deregistered from dispatcher and its status will be updated to NodeStatus_DOWN + Heartbeat(context.Context, *HeartbeatRequest) (*HeartbeatResponse, error) + // UpdateTaskStatus updates status of task. Node should send such updates + // on every status change of its tasks. + // + // Whether receiving batch updates or single status updates, this method + // should be accepting. Errors should only be returned if the entire update + // should be retried, due to data loss or other problems. + // + // If a task is unknown the dispatcher, the status update should be + // accepted regardless. + UpdateTaskStatus(context.Context, *UpdateTaskStatusRequest) (*UpdateTaskStatusResponse, error) + // Tasks is a stream of tasks state for node. Each message contains full list + // of tasks which should be run on node, if task is not present in that list, + // it should be terminated. + Tasks(*TasksRequest, Dispatcher_TasksServer) error + // Assignments is a stream of assignments such as tasks and secrets for node. + // The first message in the stream contains all of the tasks and secrets + // that are relevant to the node. Future messages in the stream are updates to + // the set of assignments. + Assignments(*AssignmentsRequest, Dispatcher_AssignmentsServer) error +} + +func RegisterDispatcherServer(s *grpc.Server, srv DispatcherServer) { + s.RegisterService(&_Dispatcher_serviceDesc, srv) +} + +func _Dispatcher_Session_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SessionRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DispatcherServer).Session(m, &dispatcherSessionServer{stream}) +} + +type Dispatcher_SessionServer interface { + Send(*SessionMessage) error + grpc.ServerStream +} + +type dispatcherSessionServer struct { + grpc.ServerStream +} + +func (x *dispatcherSessionServer) Send(m *SessionMessage) error { + return x.ServerStream.SendMsg(m) +} + +func _Dispatcher_Heartbeat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HeartbeatRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DispatcherServer).Heartbeat(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Dispatcher/Heartbeat", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DispatcherServer).Heartbeat(ctx, req.(*HeartbeatRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Dispatcher_UpdateTaskStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateTaskStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DispatcherServer).UpdateTaskStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Dispatcher/UpdateTaskStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DispatcherServer).UpdateTaskStatus(ctx, req.(*UpdateTaskStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Dispatcher_Tasks_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(TasksRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DispatcherServer).Tasks(m, &dispatcherTasksServer{stream}) +} + +type Dispatcher_TasksServer interface { + Send(*TasksMessage) error + grpc.ServerStream +} + +type dispatcherTasksServer struct { + grpc.ServerStream +} + +func (x *dispatcherTasksServer) Send(m *TasksMessage) error { + return x.ServerStream.SendMsg(m) +} + +func _Dispatcher_Assignments_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(AssignmentsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(DispatcherServer).Assignments(m, &dispatcherAssignmentsServer{stream}) +} + +type Dispatcher_AssignmentsServer interface { + Send(*AssignmentsMessage) error + grpc.ServerStream +} + +type dispatcherAssignmentsServer struct { + grpc.ServerStream +} + +func (x *dispatcherAssignmentsServer) Send(m *AssignmentsMessage) error { + return x.ServerStream.SendMsg(m) +} + +var _Dispatcher_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.Dispatcher", + HandlerType: (*DispatcherServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Heartbeat", + Handler: _Dispatcher_Heartbeat_Handler, + }, + { + MethodName: "UpdateTaskStatus", + Handler: _Dispatcher_UpdateTaskStatus_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "Session", + Handler: _Dispatcher_Session_Handler, + ServerStreams: true, + }, + { + StreamName: "Tasks", + Handler: _Dispatcher_Tasks_Handler, + ServerStreams: true, + }, + { + StreamName: "Assignments", + Handler: _Dispatcher_Assignments_Handler, + ServerStreams: true, + }, + }, + Metadata: "github.com/docker/swarmkit/api/dispatcher.proto", +} + +func (m *SessionRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SessionRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Description != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Description.Size())) + n1, err := m.Description.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if len(m.SessionID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + return i, nil +} + +func (m *SessionMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SessionMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SessionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + if m.Node != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Node.Size())) + n2, err := m.Node.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if len(m.Managers) > 0 { + for _, msg := range m.Managers { + dAtA[i] = 0x1a + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.NetworkBootstrapKeys) > 0 { + for _, msg := range m.NetworkBootstrapKeys { + dAtA[i] = 0x22 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.RootCA) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.RootCA))) + i += copy(dAtA[i:], m.RootCA) + } + return i, nil +} + +func (m *HeartbeatRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeartbeatRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SessionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + return i, nil +} + +func (m *HeartbeatResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeartbeatResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(types.SizeOfStdDuration(m.Period))) + n3, err := types.StdDurationMarshalTo(m.Period, dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *UpdateTaskStatusRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateTaskStatusRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SessionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + if len(m.Updates) > 0 { + for _, msg := range m.Updates { + dAtA[i] = 0x1a + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TaskID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.TaskID))) + i += copy(dAtA[i:], m.TaskID) + } + if m.Status != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Status.Size())) + n4, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} + +func (m *UpdateTaskStatusResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateTaskStatusResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *TasksRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TasksRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SessionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + return i, nil +} + +func (m *TasksMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TasksMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Tasks) > 0 { + for _, msg := range m.Tasks { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *AssignmentsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AssignmentsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SessionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.SessionID))) + i += copy(dAtA[i:], m.SessionID) + } + return i, nil +} + +func (m *Assignment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Assignment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Item != nil { + nn5, err := m.Item.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn5 + } + return i, nil +} + +func (m *Assignment_Task) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Task != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Task.Size())) + n6, err := m.Task.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} +func (m *Assignment_Secret) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Secret != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Secret.Size())) + n7, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} +func (m *Assignment_Config) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Config != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Config.Size())) + n8, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} +func (m *AssignmentChange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AssignmentChange) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Assignment != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Assignment.Size())) + n9, err := m.Assignment.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.Action != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Action)) + } + return i, nil +} + +func (m *AssignmentsMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AssignmentsMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(m.Type)) + } + if len(m.AppliesTo) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.AppliesTo))) + i += copy(dAtA[i:], m.AppliesTo) + } + if len(m.ResultsIn) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(len(m.ResultsIn))) + i += copy(dAtA[i:], m.ResultsIn) + } + if len(m.Changes) > 0 { + for _, msg := range m.Changes { + dAtA[i] = 0x22 + i++ + i = encodeVarintDispatcher(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeVarintDispatcher(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyDispatcherServer struct { + local DispatcherServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyDispatcherServer(local DispatcherServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) DispatcherServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyDispatcherServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyDispatcherServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyDispatcherServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +type Dispatcher_SessionServerWrapper struct { + Dispatcher_SessionServer + ctx context.Context +} + +func (s Dispatcher_SessionServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyDispatcherServer) Session(r *SessionRequest, stream Dispatcher_SessionServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := Dispatcher_SessionServerWrapper{ + Dispatcher_SessionServer: stream, + ctx: ctx, + } + return p.local.Session(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewDispatcherClient(conn).Session(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +func (p *raftProxyDispatcherServer) Heartbeat(ctx context.Context, r *HeartbeatRequest) (*HeartbeatResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.Heartbeat(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewDispatcherClient(conn).Heartbeat(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.Heartbeat(ctx, r) + } + return nil, err + } + return NewDispatcherClient(conn).Heartbeat(modCtx, r) + } + return resp, err +} + +func (p *raftProxyDispatcherServer) UpdateTaskStatus(ctx context.Context, r *UpdateTaskStatusRequest) (*UpdateTaskStatusResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.UpdateTaskStatus(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewDispatcherClient(conn).UpdateTaskStatus(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.UpdateTaskStatus(ctx, r) + } + return nil, err + } + return NewDispatcherClient(conn).UpdateTaskStatus(modCtx, r) + } + return resp, err +} + +type Dispatcher_TasksServerWrapper struct { + Dispatcher_TasksServer + ctx context.Context +} + +func (s Dispatcher_TasksServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyDispatcherServer) Tasks(r *TasksRequest, stream Dispatcher_TasksServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := Dispatcher_TasksServerWrapper{ + Dispatcher_TasksServer: stream, + ctx: ctx, + } + return p.local.Tasks(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewDispatcherClient(conn).Tasks(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +type Dispatcher_AssignmentsServerWrapper struct { + Dispatcher_AssignmentsServer + ctx context.Context +} + +func (s Dispatcher_AssignmentsServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyDispatcherServer) Assignments(r *AssignmentsRequest, stream Dispatcher_AssignmentsServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := Dispatcher_AssignmentsServerWrapper{ + Dispatcher_AssignmentsServer: stream, + ctx: ctx, + } + return p.local.Assignments(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewDispatcherClient(conn).Assignments(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +func (m *SessionRequest) Size() (n int) { + var l int + _ = l + if m.Description != nil { + l = m.Description.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} + +func (m *SessionMessage) Size() (n int) { + var l int + _ = l + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + if len(m.Managers) > 0 { + for _, e := range m.Managers { + l = e.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + } + if len(m.NetworkBootstrapKeys) > 0 { + for _, e := range m.NetworkBootstrapKeys { + l = e.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + } + l = len(m.RootCA) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} + +func (m *HeartbeatRequest) Size() (n int) { + var l int + _ = l + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} + +func (m *HeartbeatResponse) Size() (n int) { + var l int + _ = l + l = types.SizeOfStdDuration(m.Period) + n += 1 + l + sovDispatcher(uint64(l)) + return n +} + +func (m *UpdateTaskStatusRequest) Size() (n int) { + var l int + _ = l + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + if len(m.Updates) > 0 { + for _, e := range m.Updates { + l = e.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + } + return n +} + +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Size() (n int) { + var l int + _ = l + l = len(m.TaskID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + if m.Status != nil { + l = m.Status.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} + +func (m *UpdateTaskStatusResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *TasksRequest) Size() (n int) { + var l int + _ = l + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} + +func (m *TasksMessage) Size() (n int) { + var l int + _ = l + if len(m.Tasks) > 0 { + for _, e := range m.Tasks { + l = e.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + } + return n +} + +func (m *AssignmentsRequest) Size() (n int) { + var l int + _ = l + l = len(m.SessionID) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} + +func (m *Assignment) Size() (n int) { + var l int + _ = l + if m.Item != nil { + n += m.Item.Size() + } + return n +} + +func (m *Assignment_Task) Size() (n int) { + var l int + _ = l + if m.Task != nil { + l = m.Task.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} +func (m *Assignment_Secret) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} +func (m *Assignment_Config) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + return n +} +func (m *AssignmentChange) Size() (n int) { + var l int + _ = l + if m.Assignment != nil { + l = m.Assignment.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + if m.Action != 0 { + n += 1 + sovDispatcher(uint64(m.Action)) + } + return n +} + +func (m *AssignmentsMessage) Size() (n int) { + var l int + _ = l + if m.Type != 0 { + n += 1 + sovDispatcher(uint64(m.Type)) + } + l = len(m.AppliesTo) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + l = len(m.ResultsIn) + if l > 0 { + n += 1 + l + sovDispatcher(uint64(l)) + } + if len(m.Changes) > 0 { + for _, e := range m.Changes { + l = e.Size() + n += 1 + l + sovDispatcher(uint64(l)) + } + } + return n +} + +func sovDispatcher(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozDispatcher(x uint64) (n int) { + return sovDispatcher(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *SessionRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SessionRequest{`, + `Description:` + strings.Replace(fmt.Sprintf("%v", this.Description), "NodeDescription", "NodeDescription", 1) + `,`, + `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`, + `}`, + }, "") + return s +} +func (this *SessionMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SessionMessage{`, + `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`, + `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "Node", "Node", 1) + `,`, + `Managers:` + strings.Replace(fmt.Sprintf("%v", this.Managers), "WeightedPeer", "WeightedPeer", 1) + `,`, + `NetworkBootstrapKeys:` + strings.Replace(fmt.Sprintf("%v", this.NetworkBootstrapKeys), "EncryptionKey", "EncryptionKey", 1) + `,`, + `RootCA:` + fmt.Sprintf("%v", this.RootCA) + `,`, + `}`, + }, "") + return s +} +func (this *HeartbeatRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HeartbeatRequest{`, + `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`, + `}`, + }, "") + return s +} +func (this *HeartbeatResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HeartbeatResponse{`, + `Period:` + strings.Replace(strings.Replace(this.Period.String(), "Duration", "google_protobuf1.Duration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateTaskStatusRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateTaskStatusRequest{`, + `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`, + `Updates:` + strings.Replace(fmt.Sprintf("%v", this.Updates), "UpdateTaskStatusRequest_TaskStatusUpdate", "UpdateTaskStatusRequest_TaskStatusUpdate", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateTaskStatusRequest_TaskStatusUpdate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateTaskStatusRequest_TaskStatusUpdate{`, + `TaskID:` + fmt.Sprintf("%v", this.TaskID) + `,`, + `Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "TaskStatus", "TaskStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateTaskStatusResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateTaskStatusResponse{`, + `}`, + }, "") + return s +} +func (this *TasksRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TasksRequest{`, + `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`, + `}`, + }, "") + return s +} +func (this *TasksMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TasksMessage{`, + `Tasks:` + strings.Replace(fmt.Sprintf("%v", this.Tasks), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AssignmentsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AssignmentsRequest{`, + `SessionID:` + fmt.Sprintf("%v", this.SessionID) + `,`, + `}`, + }, "") + return s +} +func (this *Assignment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Assignment{`, + `Item:` + fmt.Sprintf("%v", this.Item) + `,`, + `}`, + }, "") + return s +} +func (this *Assignment_Task) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Assignment_Task{`, + `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Assignment_Secret) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Assignment_Secret{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Assignment_Config) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Assignment_Config{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AssignmentChange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AssignmentChange{`, + `Assignment:` + strings.Replace(fmt.Sprintf("%v", this.Assignment), "Assignment", "Assignment", 1) + `,`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `}`, + }, "") + return s +} +func (this *AssignmentsMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AssignmentsMessage{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `AppliesTo:` + fmt.Sprintf("%v", this.AppliesTo) + `,`, + `ResultsIn:` + fmt.Sprintf("%v", this.ResultsIn) + `,`, + `Changes:` + strings.Replace(fmt.Sprintf("%v", this.Changes), "AssignmentChange", "AssignmentChange", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringDispatcher(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *SessionRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SessionRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SessionRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Description == nil { + m.Description = &NodeDescription{} + } + if err := m.Description.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SessionMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SessionMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SessionMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Node == nil { + m.Node = &Node{} + } + if err := m.Node.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Managers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Managers = append(m.Managers, &WeightedPeer{}) + if err := m.Managers[len(m.Managers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkBootstrapKeys", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkBootstrapKeys = append(m.NetworkBootstrapKeys, &EncryptionKey{}) + if err := m.NetworkBootstrapKeys[len(m.NetworkBootstrapKeys)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootCA", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RootCA = append(m.RootCA[:0], dAtA[iNdEx:postIndex]...) + if m.RootCA == nil { + m.RootCA = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HeartbeatRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HeartbeatRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HeartbeatRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HeartbeatResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HeartbeatResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HeartbeatResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Period", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := types.StdDurationUnmarshal(&m.Period, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateTaskStatusRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateTaskStatusRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateTaskStatusRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Updates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Updates = append(m.Updates, &UpdateTaskStatusRequest_TaskStatusUpdate{}) + if err := m.Updates[len(m.Updates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateTaskStatusRequest_TaskStatusUpdate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskStatusUpdate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskStatusUpdate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Status == nil { + m.Status = &TaskStatus{} + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateTaskStatusResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateTaskStatusResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateTaskStatusResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TasksRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TasksRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TasksRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TasksMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TasksMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TasksMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tasks = append(m.Tasks, &Task{}) + if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AssignmentsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AssignmentsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AssignmentsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Assignment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Assignment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Assignment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Task{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Item = &Assignment_Task{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Secret{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Item = &Assignment_Secret{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Config{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Item = &Assignment_Config{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AssignmentChange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AssignmentChange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AssignmentChange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Assignment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Assignment == nil { + m.Assignment = &Assignment{} + } + if err := m.Assignment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= (AssignmentChange_AssignmentAction(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AssignmentsMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AssignmentsMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AssignmentsMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (AssignmentsMessage_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppliesTo", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppliesTo = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResultsIn", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResultsIn = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Changes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDispatcher + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDispatcher + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Changes = append(m.Changes, &AssignmentChange{}) + if err := m.Changes[len(m.Changes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDispatcher(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDispatcher + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDispatcher(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDispatcher + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDispatcher + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDispatcher + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthDispatcher + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDispatcher + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipDispatcher(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthDispatcher = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDispatcher = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/docker/swarmkit/api/dispatcher.proto", fileDescriptorDispatcher) +} + +var fileDescriptorDispatcher = []byte{ + // 1007 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x55, 0x4f, 0x6f, 0xe3, 0x44, + 0x1c, 0xcd, 0xa4, 0xa9, 0xdb, 0xfc, 0xd2, 0x2d, 0x61, 0xb4, 0x2a, 0xc6, 0xd2, 0xa6, 0xc1, 0x65, + 0xab, 0x8a, 0x2d, 0xce, 0x12, 0xfe, 0x1d, 0xa8, 0x0a, 0x4d, 0x13, 0xa9, 0xd1, 0x6e, 0xbb, 0xd5, + 0xb4, 0xbb, 0x7b, 0xac, 0x1c, 0x7b, 0xd6, 0x35, 0x69, 0x3c, 0xc6, 0x33, 0xd9, 0x25, 0x07, 0x24, + 0x0e, 0xac, 0x84, 0x38, 0x21, 0x4e, 0x95, 0x10, 0x5f, 0x01, 0xf1, 0x31, 0x2a, 0x4e, 0x1c, 0x39, + 0x15, 0x36, 0x1f, 0x80, 0x0f, 0xc0, 0x09, 0x79, 0x3c, 0x4e, 0x42, 0x37, 0x69, 0xd3, 0x9e, 0x12, + 0xcf, 0xbc, 0xf7, 0xe6, 0xf9, 0xfd, 0x7e, 0xfe, 0x0d, 0x54, 0x3c, 0x5f, 0x1c, 0x77, 0x5b, 0x96, + 0xc3, 0x3a, 0x15, 0x97, 0x39, 0x6d, 0x1a, 0x55, 0xf8, 0x0b, 0x3b, 0xea, 0xb4, 0x7d, 0x51, 0xb1, + 0x43, 0xbf, 0xe2, 0xfa, 0x3c, 0xb4, 0x85, 0x73, 0x4c, 0x23, 0x2b, 0x8c, 0x98, 0x60, 0x18, 0x27, + 0x28, 0x2b, 0x45, 0x59, 0xcf, 0x3f, 0x30, 0xde, 0xbb, 0x42, 0x44, 0xf4, 0x42, 0xca, 0x13, 0xbe, + 0xb1, 0x7e, 0x05, 0x96, 0xb5, 0xbe, 0xa4, 0x8e, 0x48, 0xd1, 0xb7, 0x3d, 0xe6, 0x31, 0xf9, 0xb7, + 0x12, 0xff, 0x53, 0xab, 0x9f, 0x5e, 0xa2, 0x21, 0x11, 0xad, 0xee, 0xb3, 0x4a, 0x78, 0xd2, 0xf5, + 0xfc, 0x40, 0xfd, 0x28, 0x62, 0xc9, 0x63, 0xcc, 0x3b, 0xa1, 0x43, 0x90, 0xdb, 0x8d, 0x6c, 0xe1, + 0x33, 0xb5, 0x6f, 0xbe, 0x44, 0xb0, 0x78, 0x40, 0x39, 0xf7, 0x59, 0x40, 0xe8, 0x57, 0x5d, 0xca, + 0x05, 0x6e, 0x40, 0xc1, 0xa5, 0xdc, 0x89, 0xfc, 0x30, 0xc6, 0xe9, 0xa8, 0x8c, 0xd6, 0x0a, 0xd5, + 0x15, 0xeb, 0xf5, 0x14, 0xac, 0x3d, 0xe6, 0xd2, 0xfa, 0x10, 0x4a, 0x46, 0x79, 0x78, 0x1d, 0x80, + 0x27, 0xc2, 0x47, 0xbe, 0xab, 0x67, 0xcb, 0x68, 0x2d, 0x5f, 0xbb, 0xd5, 0x3f, 0x5f, 0xce, 0xab, + 0xe3, 0x9a, 0x75, 0x92, 0x57, 0x80, 0xa6, 0x6b, 0xfe, 0x9c, 0x1d, 0xf8, 0xd8, 0xa5, 0x9c, 0xdb, + 0x1e, 0xbd, 0x20, 0x80, 0x2e, 0x17, 0xc0, 0xeb, 0x90, 0x0b, 0x98, 0x4b, 0xe5, 0x41, 0x85, 0xaa, + 0x3e, 0xc9, 0x2e, 0x91, 0x28, 0xbc, 0x01, 0xf3, 0x1d, 0x3b, 0xb0, 0x3d, 0x1a, 0x71, 0x7d, 0xa6, + 0x3c, 0xb3, 0x56, 0xa8, 0x96, 0xc7, 0x31, 0x9e, 0x52, 0xdf, 0x3b, 0x16, 0xd4, 0xdd, 0xa7, 0x34, + 0x22, 0x03, 0x06, 0x7e, 0x0a, 0x4b, 0x01, 0x15, 0x2f, 0x58, 0xd4, 0x3e, 0x6a, 0x31, 0x26, 0xb8, + 0x88, 0xec, 0xf0, 0xa8, 0x4d, 0x7b, 0x5c, 0xcf, 0x49, 0xad, 0x77, 0xc6, 0x69, 0x35, 0x02, 0x27, + 0xea, 0xc9, 0x68, 0x1e, 0xd0, 0x1e, 0xb9, 0xad, 0x04, 0x6a, 0x29, 0xff, 0x01, 0xed, 0x71, 0xbc, + 0x04, 0x1a, 0x61, 0x4c, 0x6c, 0x6f, 0xe9, 0xb3, 0x65, 0xb4, 0xb6, 0x40, 0xd4, 0x93, 0xf9, 0x05, + 0x14, 0x77, 0xa8, 0x1d, 0x89, 0x16, 0xb5, 0x45, 0x5a, 0xa6, 0x6b, 0xc5, 0x63, 0xee, 0xc3, 0x9b, + 0x23, 0x0a, 0x3c, 0x64, 0x01, 0xa7, 0xf8, 0x33, 0xd0, 0x42, 0x1a, 0xf9, 0xcc, 0x55, 0x45, 0x7e, + 0xdb, 0x4a, 0xba, 0xc5, 0x4a, 0xbb, 0xc5, 0xaa, 0xab, 0x6e, 0xa9, 0xcd, 0x9f, 0x9d, 0x2f, 0x67, + 0x4e, 0xff, 0x5a, 0x46, 0x44, 0x51, 0xcc, 0x1f, 0xb3, 0xf0, 0xd6, 0xe3, 0xd0, 0xb5, 0x05, 0x3d, + 0xb4, 0x79, 0xfb, 0x40, 0xd8, 0xa2, 0xcb, 0x6f, 0xe4, 0x0d, 0x3f, 0x81, 0xb9, 0xae, 0x14, 0x4a, + 0x6b, 0xb1, 0x31, 0x2e, 0xbf, 0x09, 0x67, 0x59, 0xc3, 0x95, 0x04, 0x41, 0x52, 0x31, 0x83, 0x41, + 0xf1, 0xe2, 0x26, 0x5e, 0x81, 0x39, 0x61, 0xf3, 0xf6, 0xd0, 0x16, 0xf4, 0xcf, 0x97, 0xb5, 0x18, + 0xd6, 0xac, 0x13, 0x2d, 0xde, 0x6a, 0xba, 0xf8, 0x13, 0xd0, 0xb8, 0x24, 0xa9, 0x6e, 0x2a, 0x8d, + 0xf3, 0x33, 0xe2, 0x44, 0xa1, 0x4d, 0x03, 0xf4, 0xd7, 0x5d, 0x26, 0x59, 0x9b, 0x1b, 0xb0, 0x10, + 0xaf, 0xde, 0x2c, 0x22, 0x73, 0x53, 0xb1, 0xd3, 0x6f, 0xc3, 0x82, 0xd9, 0xd8, 0x2b, 0xd7, 0x91, + 0x0c, 0x4c, 0x9f, 0x64, 0x90, 0x24, 0x30, 0xb3, 0x06, 0x78, 0x8b, 0x73, 0xdf, 0x0b, 0x3a, 0x34, + 0x10, 0x37, 0xf4, 0xf0, 0x1b, 0x02, 0x18, 0x8a, 0x60, 0x0b, 0x72, 0xb1, 0xb6, 0x6a, 0x9d, 0x89, + 0x0e, 0x76, 0x32, 0x44, 0xe2, 0xf0, 0x47, 0xa0, 0x71, 0xea, 0x44, 0x54, 0xa8, 0x50, 0x8d, 0x71, + 0x8c, 0x03, 0x89, 0xd8, 0xc9, 0x10, 0x85, 0x8d, 0x59, 0x0e, 0x0b, 0x9e, 0xf9, 0x9e, 0x3e, 0x33, + 0x99, 0xb5, 0x2d, 0x11, 0x31, 0x2b, 0xc1, 0xd6, 0x34, 0xc8, 0xf9, 0x82, 0x76, 0xcc, 0x97, 0x59, + 0x28, 0x0e, 0x2d, 0x6f, 0x1f, 0xdb, 0x81, 0x47, 0xf1, 0x26, 0x80, 0x3d, 0x58, 0x53, 0xf6, 0xc7, + 0x56, 0x78, 0xc8, 0x24, 0x23, 0x0c, 0xbc, 0x0b, 0x9a, 0xed, 0xc8, 0xd1, 0x18, 0xbf, 0xc8, 0x62, + 0xf5, 0xe3, 0xcb, 0xb9, 0xc9, 0xa9, 0x23, 0x0b, 0x5b, 0x92, 0x4c, 0x94, 0x88, 0xd9, 0x1a, 0xb5, + 0x98, 0xec, 0xe1, 0x55, 0xd0, 0x1e, 0xef, 0xd7, 0xb7, 0x0e, 0x1b, 0xc5, 0x8c, 0x61, 0xfc, 0xf0, + 0x4b, 0x79, 0xe9, 0x22, 0x42, 0x75, 0xf3, 0x2a, 0x68, 0xa4, 0xb1, 0xfb, 0xe8, 0x49, 0xa3, 0x88, + 0xc6, 0xe3, 0x08, 0xed, 0xb0, 0xe7, 0xd4, 0xfc, 0x17, 0xfd, 0xaf, 0xfe, 0x69, 0x17, 0x7d, 0x0e, + 0xb9, 0xf8, 0xa2, 0x92, 0x19, 0x2c, 0x56, 0xef, 0x5d, 0xfe, 0x1e, 0x29, 0xcb, 0x3a, 0xec, 0x85, + 0x94, 0x48, 0x22, 0xbe, 0x03, 0x60, 0x87, 0xe1, 0x89, 0x4f, 0xf9, 0x91, 0x60, 0xc9, 0x8c, 0x27, + 0x79, 0xb5, 0x72, 0xc8, 0xe2, 0xed, 0x88, 0xf2, 0xee, 0x89, 0xe0, 0x47, 0x7e, 0x20, 0x0b, 0x98, + 0x27, 0x79, 0xb5, 0xd2, 0x0c, 0xf0, 0x26, 0xcc, 0x39, 0x32, 0x9c, 0x74, 0x6e, 0xbe, 0x3b, 0x4d, + 0x92, 0x24, 0x25, 0x99, 0x77, 0x21, 0x17, 0x7b, 0xc1, 0x0b, 0x30, 0xbf, 0xfd, 0x68, 0x77, 0xff, + 0x61, 0x23, 0xce, 0x0b, 0xbf, 0x01, 0x85, 0xe6, 0xde, 0x36, 0x69, 0xec, 0x36, 0xf6, 0x0e, 0xb7, + 0x1e, 0x16, 0x51, 0xf5, 0x74, 0x16, 0xa0, 0x3e, 0xb8, 0xd4, 0xf1, 0xd7, 0x30, 0xa7, 0xda, 0x1b, + 0x9b, 0xe3, 0x5b, 0x70, 0xf4, 0x36, 0x34, 0x2e, 0xc3, 0xa8, 0x44, 0xcc, 0x95, 0xdf, 0x7f, 0xfd, + 0xe7, 0x34, 0x7b, 0x07, 0x16, 0x24, 0xe6, 0xfd, 0x78, 0xae, 0xd3, 0x08, 0x6e, 0x25, 0x4f, 0xea, + 0xd6, 0xb8, 0x8f, 0xf0, 0x37, 0x90, 0x1f, 0xcc, 0x60, 0x3c, 0xf6, 0x5d, 0x2f, 0x0e, 0x79, 0xe3, + 0xee, 0x15, 0x28, 0x35, 0x5c, 0xa6, 0x31, 0x80, 0x7f, 0x42, 0x50, 0xbc, 0x38, 0x9e, 0xf0, 0xbd, + 0x6b, 0x8c, 0x5a, 0x63, 0x7d, 0x3a, 0xf0, 0x75, 0x4c, 0x75, 0x61, 0x56, 0x0e, 0x36, 0x5c, 0x9e, + 0x34, 0x40, 0x06, 0xa7, 0x4f, 0x46, 0xa4, 0x75, 0x58, 0x9d, 0xe2, 0xc4, 0xef, 0xb3, 0xe8, 0x3e, + 0xc2, 0xdf, 0x21, 0x28, 0x8c, 0xb4, 0x36, 0x5e, 0xbd, 0xa2, 0xf7, 0x53, 0x0f, 0xab, 0xd3, 0x7d, + 0x23, 0x53, 0x76, 0x44, 0x4d, 0x3f, 0x7b, 0x55, 0xca, 0xfc, 0xf9, 0xaa, 0x94, 0xf9, 0xb6, 0x5f, + 0x42, 0x67, 0xfd, 0x12, 0xfa, 0xa3, 0x5f, 0x42, 0x7f, 0xf7, 0x4b, 0xa8, 0xa5, 0xc9, 0x2b, 0xf8, + 0xc3, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xe0, 0xf0, 0x6a, 0xcb, 0xae, 0x0a, 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/dispatcher.proto b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/dispatcher.proto new file mode 100644 index 00000000..232580ec --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/dispatcher.proto @@ -0,0 +1,218 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/types.proto"; +import "github.com/docker/swarmkit/api/objects.proto"; +import "gogoproto/gogo.proto"; +import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; +import "google/protobuf/duration.proto"; + +// Dispatcher is the API provided by a manager group for agents to connect to. Agents +// connect to this service to receive task assignments and report status. +// +// API methods on this service are used only by agent nodes. +service Dispatcher { // maybe dispatch, al likes this + // Session starts an agent session with the dispatcher. The session is + // started after the first SessionMessage is received. + // + // Once started, the agent is controlled with a stream of SessionMessage. + // Agents should list on the stream at all times for instructions. + rpc Session(SessionRequest) returns (stream SessionMessage) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" }; + }; + + // Heartbeat is heartbeat method for nodes. It returns new TTL in response. + // Node should send new heartbeat earlier than now + TTL, otherwise it will + // be deregistered from dispatcher and its status will be updated to NodeStatus_DOWN + rpc Heartbeat(HeartbeatRequest) returns (HeartbeatResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" }; + }; + + // UpdateTaskStatus updates status of task. Node should send such updates + // on every status change of its tasks. + // + // Whether receiving batch updates or single status updates, this method + // should be accepting. Errors should only be returned if the entire update + // should be retried, due to data loss or other problems. + // + // If a task is unknown the dispatcher, the status update should be + // accepted regardless. + rpc UpdateTaskStatus(UpdateTaskStatusRequest) returns (UpdateTaskStatusResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" }; + }; + + // Tasks is a stream of tasks state for node. Each message contains full list + // of tasks which should be run on node, if task is not present in that list, + // it should be terminated. + rpc Tasks(TasksRequest) returns (stream TasksMessage) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" }; + option deprecated = true; + }; + + // Assignments is a stream of assignments such as tasks and secrets for node. + // The first message in the stream contains all of the tasks and secrets + // that are relevant to the node. Future messages in the stream are updates to + // the set of assignments. + rpc Assignments(AssignmentsRequest) returns (stream AssignmentsMessage) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" }; + }; +} + +// SessionRequest starts a session. +message SessionRequest { + NodeDescription description = 1; + // SessionID can be provided to attempt resuming an existing session. If the + // SessionID is empty or invalid, a new SessionID will be assigned. + // + // See SessionMessage.SessionID for details. + string session_id = 2; +} + +// SessionMessage instructs an agent on various actions as part of the current +// session. An agent should act immediately on the contents. +message SessionMessage { + // SessionID is allocated after a successful registration. It should be + // used on all RPC calls after registration. A dispatcher may choose to + // change the SessionID, at which time an agent must re-register and obtain + // a new one. + // + // All Dispatcher calls after register should include the SessionID. If the + // Dispatcher so chooses, it may reject the call with an InvalidArgument + // error code, at which time the agent should call Register to start a new + // session. + // + // As a rule, once an agent has a SessionID, it should never save it to + // disk or try to otherwise reuse. If the agent loses its SessionID, it + // must start a new session through a call to Register. A Dispatcher may + // choose to reuse the SessionID, if it sees fit, but it is not advised. + // + // The actual implementation of the SessionID is Dispatcher specific and + // should be treated as opaque by agents. + // + // From a Dispatcher perspective, there are many ways to use the SessionID + // to ensure uniqueness of a set of client RPC calls. One method is to keep + // the SessionID unique to every call to Register in a single Dispatcher + // instance. This ensures that the SessionID represents the unique + // session from a single Agent to Manager. If the Agent restarts, we + // allocate a new session, since the restarted Agent is not aware of the + // new SessionID. + // + // The most compelling use case is to support duplicate node detection. If + // one clones a virtual machine, including certificate material, two nodes + // may end up with the same identity. This can also happen if two identical + // agent processes are coming from the same node. If the SessionID is + // replicated through the cluster, we can immediately detect the condition + // and address it. + // + // Extending from the case above, we can actually detect a compromised + // identity. Coupled with provisions to rebuild node identity, we can ban + // the compromised node identity and have the nodes re-authenticate and + // build a new identity. At this time, an administrator can then + // re-authorize the compromised nodes, if it was a mistake or ensure that a + // misbehaved node can no longer connect to the cluster. + // + // We considered placing this field in a GRPC header. Because this is a + // critical feature of the protocol, we thought it should be represented + // directly in the RPC message set. + string session_id = 1; + + // Node identifies the registering node. + Node node = 2; + + // Managers provides a weight list of alternative dispatchers + repeated WeightedPeer managers = 3; + + // Symmetric encryption key distributed by the lead manager. Used by agents + // for securing network bootstrapping and communication. + repeated EncryptionKey network_bootstrap_keys = 4; + + // Which root certificates to trust + bytes RootCA = 5; +} + +// HeartbeatRequest provides identifying properties for a single heartbeat. +message HeartbeatRequest { + string session_id = 1; +} + +message HeartbeatResponse { + // Period is the duration to wait before sending the next heartbeat. + // Well-behaved agents should update this on every heartbeat round trip. + google.protobuf.Duration period = 1 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false]; +} + +message UpdateTaskStatusRequest { + // Tasks should contain all statuses for running tasks. Only the status + // field must be set. The spec is not required. + string session_id = 1; + + message TaskStatusUpdate { + string task_id = 1; + TaskStatus status = 2; + } + + repeated TaskStatusUpdate updates = 3; +} + +message UpdateTaskStatusResponse{ + // void +} + +message TasksRequest { + string session_id = 1; +} + +message TasksMessage { + // Tasks is the set of tasks that should be running on the node. + // Tasks outside of this set running on the node should be terminated. + repeated Task tasks = 1; +} + +message AssignmentsRequest { + string session_id = 1; +} + +message Assignment { + oneof item { + Task task = 1; + Secret secret = 2; + Config config = 3; + } +} + +message AssignmentChange { + enum AssignmentAction { + UPDATE = 0 [(gogoproto.enumvalue_customname) = "AssignmentActionUpdate"]; + REMOVE = 1 [(gogoproto.enumvalue_customname) = "AssignmentActionRemove"]; + } + + Assignment assignment = 1; + AssignmentAction action = 2; +} + +message AssignmentsMessage { + // AssignmentType specifies whether this assignment message carries + // the full state, or is an update to an existing state. + enum Type { + COMPLETE = 0; + INCREMENTAL = 1; + } + + Type type = 1; + + // AppliesTo references the previous ResultsIn value, to chain + // incremental updates together. For the first update in a stream, + // AppliesTo is empty. If AppliesTo does not match the previously + // received ResultsIn, the consumer of the stream should start a new + // Assignments stream to re-sync. + string applies_to = 2; + + // ResultsIn identifies the result of this assignments message, to + // match against the next message's AppliesTo value and protect + // against missed messages. + string results_in = 3; + + // AssignmentChange is a set of changes to apply on this node. + repeated AssignmentChange changes = 4; +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/equality/equality.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/equality/equality.go new file mode 100644 index 00000000..522c7198 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/equality/equality.go @@ -0,0 +1,67 @@ +package equality + +import ( + "crypto/subtle" + "reflect" + + "github.com/docker/swarmkit/api" +) + +// TasksEqualStable returns true if the tasks are functionally equal, ignoring status, +// version and other superfluous fields. +// +// This used to decide whether or not to propagate a task update to a controller. +func TasksEqualStable(a, b *api.Task) bool { + // shallow copy + copyA, copyB := *a, *b + + copyA.Status, copyB.Status = api.TaskStatus{}, api.TaskStatus{} + copyA.Meta, copyB.Meta = api.Meta{}, api.Meta{} + + return reflect.DeepEqual(©A, ©B) +} + +// TaskStatusesEqualStable compares the task status excluding timestamp fields. +func TaskStatusesEqualStable(a, b *api.TaskStatus) bool { + copyA, copyB := *a, *b + + copyA.Timestamp, copyB.Timestamp = nil, nil + return reflect.DeepEqual(©A, ©B) +} + +// RootCAEqualStable compares RootCAs, excluding join tokens, which are randomly generated +func RootCAEqualStable(a, b *api.RootCA) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + + var aRotationKey, bRotationKey []byte + if a.RootRotation != nil { + aRotationKey = a.RootRotation.CAKey + } + if b.RootRotation != nil { + bRotationKey = b.RootRotation.CAKey + } + if subtle.ConstantTimeCompare(a.CAKey, b.CAKey) != 1 || subtle.ConstantTimeCompare(aRotationKey, bRotationKey) != 1 { + return false + } + + copyA, copyB := *a, *b + copyA.JoinTokens, copyB.JoinTokens = api.JoinTokens{}, api.JoinTokens{} + return reflect.DeepEqual(copyA, copyB) +} + +// ExternalCAsEqualStable compares lists of external CAs and determines whether they are equal. +func ExternalCAsEqualStable(a, b []*api.ExternalCA) bool { + // because DeepEqual will treat an empty list and a nil list differently, we want to manually check this first + if len(a) == 0 && len(b) == 0 { + return true + } + // The assumption is that each individual api.ExternalCA within both lists are created from deserializing from a + // protobuf, so no special affordances are made to treat a nil map and empty map in the Options field of an + // api.ExternalCA as equivalent. + return reflect.DeepEqual(a, b) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/genericresource/helpers.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/genericresource/helpers.go new file mode 100644 index 00000000..350ab730 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/genericresource/helpers.go @@ -0,0 +1,111 @@ +package genericresource + +import ( + "github.com/docker/swarmkit/api" +) + +// NewSet creates a set object +func NewSet(key string, vals ...string) []*api.GenericResource { + rs := make([]*api.GenericResource, 0, len(vals)) + + for _, v := range vals { + rs = append(rs, NewString(key, v)) + } + + return rs +} + +// NewString creates a String resource +func NewString(key, val string) *api.GenericResource { + return &api.GenericResource{ + Resource: &api.GenericResource_NamedResourceSpec{ + NamedResourceSpec: &api.NamedGenericResource{ + Kind: key, + Value: val, + }, + }, + } +} + +// NewDiscrete creates a Discrete resource +func NewDiscrete(key string, val int64) *api.GenericResource { + return &api.GenericResource{ + Resource: &api.GenericResource_DiscreteResourceSpec{ + DiscreteResourceSpec: &api.DiscreteGenericResource{ + Kind: key, + Value: val, + }, + }, + } +} + +// GetResource returns resources from the "resources" parameter matching the kind key +func GetResource(kind string, resources []*api.GenericResource) []*api.GenericResource { + var res []*api.GenericResource + + for _, r := range resources { + if Kind(r) != kind { + continue + } + + res = append(res, r) + } + + return res +} + +// ConsumeNodeResources removes "res" from nodeAvailableResources +func ConsumeNodeResources(nodeAvailableResources *[]*api.GenericResource, res []*api.GenericResource) { + if nodeAvailableResources == nil { + return + } + + w := 0 + +loop: + for _, na := range *nodeAvailableResources { + for _, r := range res { + if Kind(na) != Kind(r) { + continue + } + + if remove(na, r) { + continue loop + } + // If this wasn't the right element then + // we need to continue + } + + (*nodeAvailableResources)[w] = na + w++ + } + + *nodeAvailableResources = (*nodeAvailableResources)[:w] +} + +// Returns true if the element is to be removed from the list +func remove(na, r *api.GenericResource) bool { + switch tr := r.Resource.(type) { + case *api.GenericResource_DiscreteResourceSpec: + if na.GetDiscreteResourceSpec() == nil { + return false // Type change, ignore + } + + na.GetDiscreteResourceSpec().Value -= tr.DiscreteResourceSpec.Value + if na.GetDiscreteResourceSpec().Value <= 0 { + return true + } + case *api.GenericResource_NamedResourceSpec: + if na.GetNamedResourceSpec() == nil { + return false // Type change, ignore + } + + if tr.NamedResourceSpec.Value != na.GetNamedResourceSpec().Value { + return false // not the right item, ignore + } + + return true + } + + return false +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/genericresource/parse.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/genericresource/parse.go new file mode 100644 index 00000000..f39a7077 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/genericresource/parse.go @@ -0,0 +1,111 @@ +package genericresource + +import ( + "encoding/csv" + "fmt" + "strconv" + "strings" + + "github.com/docker/swarmkit/api" +) + +func newParseError(format string, args ...interface{}) error { + return fmt.Errorf("could not parse GenericResource: "+format, args...) +} + +// discreteResourceVal returns an int64 if the string is a discreteResource +// and an error if it isn't +func discreteResourceVal(res string) (int64, error) { + return strconv.ParseInt(res, 10, 64) +} + +// allNamedResources returns true if the array of resources are all namedResources +// e.g: res = [red, orange, green] +func allNamedResources(res []string) bool { + for _, v := range res { + if _, err := discreteResourceVal(v); err == nil { + return false + } + } + + return true +} + +// ParseCmd parses the Generic Resource command line argument +// and returns a list of *api.GenericResource +func ParseCmd(cmd string) ([]*api.GenericResource, error) { + if strings.Contains(cmd, "\n") { + return nil, newParseError("unexpected '\\n' character") + } + + r := csv.NewReader(strings.NewReader(cmd)) + records, err := r.ReadAll() + + if err != nil { + return nil, newParseError("%v", err) + } + + if len(records) != 1 { + return nil, newParseError("found multiple records while parsing cmd %v", records) + } + + return Parse(records[0]) +} + +// Parse parses a table of GenericResource resources +func Parse(cmds []string) ([]*api.GenericResource, error) { + tokens := make(map[string][]string) + + for _, term := range cmds { + kva := strings.Split(term, "=") + if len(kva) != 2 { + return nil, newParseError("incorrect term %s, missing"+ + " '=' or malformed expression", term) + } + + key := strings.TrimSpace(kva[0]) + val := strings.TrimSpace(kva[1]) + + tokens[key] = append(tokens[key], val) + } + + var rs []*api.GenericResource + for k, v := range tokens { + if u, ok := isDiscreteResource(v); ok { + if u < 0 { + return nil, newParseError("cannot ask for"+ + " negative resource %s", k) + } + + rs = append(rs, NewDiscrete(k, u)) + continue + } + + if allNamedResources(v) { + rs = append(rs, NewSet(k, v...)...) + continue + } + + return nil, newParseError("mixed discrete and named resources"+ + " in expression '%s=%s'", k, v) + } + + return rs, nil +} + +// isDiscreteResource returns true if the array of resources is a +// Discrete Resource. +// e.g: res = [1] +func isDiscreteResource(values []string) (int64, bool) { + if len(values) != 1 { + return int64(0), false + } + + u, err := discreteResourceVal(values[0]) + if err != nil { + return int64(0), false + } + + return u, true + +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/genericresource/resource_management.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/genericresource/resource_management.go new file mode 100644 index 00000000..506257ab --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/genericresource/resource_management.go @@ -0,0 +1,203 @@ +package genericresource + +import ( + "fmt" + + "github.com/docker/swarmkit/api" +) + +// Claim assigns GenericResources to a task by taking them from the +// node's GenericResource list and storing them in the task's available list +func Claim(nodeAvailableResources, taskAssigned *[]*api.GenericResource, + taskReservations []*api.GenericResource) error { + var resSelected []*api.GenericResource + + for _, res := range taskReservations { + tr := res.GetDiscreteResourceSpec() + if tr == nil { + return fmt.Errorf("task should only hold Discrete type") + } + + // Select the resources + nrs, err := selectNodeResources(*nodeAvailableResources, tr) + if err != nil { + return err + } + + resSelected = append(resSelected, nrs...) + } + + ClaimResources(nodeAvailableResources, taskAssigned, resSelected) + return nil +} + +// ClaimResources adds the specified resources to the task's list +// and removes them from the node's generic resource list +func ClaimResources(nodeAvailableResources, taskAssigned *[]*api.GenericResource, + resSelected []*api.GenericResource) { + *taskAssigned = append(*taskAssigned, resSelected...) + ConsumeNodeResources(nodeAvailableResources, resSelected) +} + +func selectNodeResources(nodeRes []*api.GenericResource, + tr *api.DiscreteGenericResource) ([]*api.GenericResource, error) { + var nrs []*api.GenericResource + + for _, res := range nodeRes { + if Kind(res) != tr.Kind { + continue + } + + switch nr := res.Resource.(type) { + case *api.GenericResource_DiscreteResourceSpec: + if nr.DiscreteResourceSpec.Value >= tr.Value && tr.Value != 0 { + nrs = append(nrs, NewDiscrete(tr.Kind, tr.Value)) + } + + return nrs, nil + case *api.GenericResource_NamedResourceSpec: + nrs = append(nrs, res.Copy()) + + if int64(len(nrs)) == tr.Value { + return nrs, nil + } + } + } + + if len(nrs) == 0 { + return nil, fmt.Errorf("not enough resources available for task reservations: %+v", tr) + } + + return nrs, nil +} + +// Reclaim adds the resources taken by the task to the node's store +func Reclaim(nodeAvailableResources *[]*api.GenericResource, taskAssigned, nodeRes []*api.GenericResource) error { + err := reclaimResources(nodeAvailableResources, taskAssigned) + if err != nil { + return err + } + + sanitize(nodeRes, nodeAvailableResources) + + return nil +} + +func reclaimResources(nodeAvailableResources *[]*api.GenericResource, taskAssigned []*api.GenericResource) error { + // The node could have been updated + if nodeAvailableResources == nil { + return fmt.Errorf("node no longer has any resources") + } + + for _, res := range taskAssigned { + switch tr := res.Resource.(type) { + case *api.GenericResource_DiscreteResourceSpec: + nrs := GetResource(tr.DiscreteResourceSpec.Kind, *nodeAvailableResources) + + // If the resource went down to 0 it's no longer in the + // available list + if len(nrs) == 0 { + *nodeAvailableResources = append(*nodeAvailableResources, res.Copy()) + } + + if len(nrs) != 1 { + continue // Type change + } + + nr := nrs[0].GetDiscreteResourceSpec() + if nr == nil { + continue // Type change + } + + nr.Value += tr.DiscreteResourceSpec.Value + case *api.GenericResource_NamedResourceSpec: + *nodeAvailableResources = append(*nodeAvailableResources, res.Copy()) + } + } + + return nil +} + +// sanitize checks that nodeAvailableResources does not add resources unknown +// to the nodeSpec (nodeRes) or goes over the integer bound specified +// by the spec. +// Note this is because the user is able to update a node's resources +func sanitize(nodeRes []*api.GenericResource, nodeAvailableResources *[]*api.GenericResource) { + // - We add the sanitized resources at the end, after + // having removed the elements from the list + + // - When a set changes to a Discrete we also need + // to make sure that we don't add the Discrete multiple + // time hence, the need of a map to remember that + var sanitized []*api.GenericResource + kindSanitized := make(map[string]struct{}) + w := 0 + + for _, na := range *nodeAvailableResources { + ok, nrs := sanitizeResource(nodeRes, na) + if !ok { + if _, ok = kindSanitized[Kind(na)]; ok { + continue + } + + kindSanitized[Kind(na)] = struct{}{} + sanitized = append(sanitized, nrs...) + + continue + } + + (*nodeAvailableResources)[w] = na + w++ + } + + *nodeAvailableResources = (*nodeAvailableResources)[:w] + *nodeAvailableResources = append(*nodeAvailableResources, sanitized...) +} + +// Returns true if the element is in nodeRes and "sane" +// Returns false if the element isn't in nodeRes and "sane" and the element(s) that should be replacing it +func sanitizeResource(nodeRes []*api.GenericResource, res *api.GenericResource) (ok bool, nrs []*api.GenericResource) { + switch na := res.Resource.(type) { + case *api.GenericResource_DiscreteResourceSpec: + nrs := GetResource(na.DiscreteResourceSpec.Kind, nodeRes) + + // Type change or removed: reset + if len(nrs) != 1 { + return false, nrs + } + + // Type change: reset + nr := nrs[0].GetDiscreteResourceSpec() + if nr == nil { + return false, nrs + } + + // Amount change: reset + if na.DiscreteResourceSpec.Value > nr.Value { + return false, nrs + } + case *api.GenericResource_NamedResourceSpec: + nrs := GetResource(na.NamedResourceSpec.Kind, nodeRes) + + // Type change + if len(nrs) == 0 { + return false, nrs + } + + for _, nr := range nrs { + // Type change: reset + if nr.GetDiscreteResourceSpec() != nil { + return false, nrs + } + + if na.NamedResourceSpec.Value == nr.GetNamedResourceSpec().Value { + return true, nil + } + } + + // Removed + return false, nil + } + + return true, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/genericresource/string.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/genericresource/string.go new file mode 100644 index 00000000..5e388beb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/genericresource/string.go @@ -0,0 +1,54 @@ +package genericresource + +import ( + "strconv" + "strings" + + "github.com/docker/swarmkit/api" +) + +func discreteToString(d *api.GenericResource_DiscreteResourceSpec) string { + return strconv.FormatInt(d.DiscreteResourceSpec.Value, 10) +} + +// Kind returns the kind key as a string +func Kind(res *api.GenericResource) string { + switch r := res.Resource.(type) { + case *api.GenericResource_DiscreteResourceSpec: + return r.DiscreteResourceSpec.Kind + case *api.GenericResource_NamedResourceSpec: + return r.NamedResourceSpec.Kind + } + + return "" +} + +// Value returns the value key as a string +func Value(res *api.GenericResource) string { + switch res := res.Resource.(type) { + case *api.GenericResource_DiscreteResourceSpec: + return discreteToString(res) + case *api.GenericResource_NamedResourceSpec: + return res.NamedResourceSpec.Value + } + + return "" +} + +// EnvFormat returns the environment string version of the resource +func EnvFormat(res []*api.GenericResource, prefix string) []string { + envs := make(map[string][]string) + for _, v := range res { + key := Kind(v) + val := Value(v) + envs[key] = append(envs[key], val) + } + + env := make([]string, 0, len(res)) + for k, v := range envs { + k = strings.ToUpper(prefix + "_" + k) + env = append(env, k+"="+strings.Join(v, ",")) + } + + return env +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/genericresource/validate.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/genericresource/validate.go new file mode 100644 index 00000000..0ad49ff7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/genericresource/validate.go @@ -0,0 +1,85 @@ +package genericresource + +import ( + "fmt" + + "github.com/docker/swarmkit/api" +) + +// ValidateTask validates that the task only uses integers +// for generic resources +func ValidateTask(resources *api.Resources) error { + for _, v := range resources.Generic { + if v.GetDiscreteResourceSpec() != nil { + continue + } + + return fmt.Errorf("invalid argument for resource %s", Kind(v)) + } + + return nil +} + +// HasEnough returns true if node can satisfy the task's GenericResource request +func HasEnough(nodeRes []*api.GenericResource, taskRes *api.GenericResource) (bool, error) { + t := taskRes.GetDiscreteResourceSpec() + if t == nil { + return false, fmt.Errorf("task should only hold Discrete type") + } + + if nodeRes == nil { + return false, nil + } + + nrs := GetResource(t.Kind, nodeRes) + if len(nrs) == 0 { + return false, nil + } + + switch nr := nrs[0].Resource.(type) { + case *api.GenericResource_DiscreteResourceSpec: + if t.Value > nr.DiscreteResourceSpec.Value { + return false, nil + } + case *api.GenericResource_NamedResourceSpec: + if t.Value > int64(len(nrs)) { + return false, nil + } + } + + return true, nil +} + +// HasResource checks if there is enough "res" in the "resources" argument +func HasResource(res *api.GenericResource, resources []*api.GenericResource) bool { + for _, r := range resources { + if Kind(res) != Kind(r) { + continue + } + + switch rtype := r.Resource.(type) { + case *api.GenericResource_DiscreteResourceSpec: + if res.GetDiscreteResourceSpec() == nil { + return false + } + + if res.GetDiscreteResourceSpec().Value < rtype.DiscreteResourceSpec.Value { + return false + } + + return true + case *api.GenericResource_NamedResourceSpec: + if res.GetNamedResourceSpec() == nil { + return false + } + + if res.GetNamedResourceSpec().Value != rtype.NamedResourceSpec.Value { + continue + } + + return true + } + } + + return false +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/health.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/health.pb.go new file mode 100644 index 00000000..453e01fc --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/health.pb.go @@ -0,0 +1,703 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/health.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type HealthCheckResponse_ServingStatus int32 + +const ( + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 +) + +var HealthCheckResponse_ServingStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SERVING", + 2: "NOT_SERVING", +} +var HealthCheckResponse_ServingStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, +} + +func (x HealthCheckResponse_ServingStatus) String() string { + return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) +} +func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptorHealth, []int{1, 0} +} + +type HealthCheckRequest struct { + Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"` +} + +func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } +func (*HealthCheckRequest) ProtoMessage() {} +func (*HealthCheckRequest) Descriptor() ([]byte, []int) { return fileDescriptorHealth, []int{0} } + +type HealthCheckResponse struct { + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,proto3,enum=docker.swarmkit.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` +} + +func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } +func (*HealthCheckResponse) ProtoMessage() {} +func (*HealthCheckResponse) Descriptor() ([]byte, []int) { return fileDescriptorHealth, []int{1} } + +func init() { + proto.RegisterType((*HealthCheckRequest)(nil), "docker.swarmkit.v1.HealthCheckRequest") + proto.RegisterType((*HealthCheckResponse)(nil), "docker.swarmkit.v1.HealthCheckResponse") + proto.RegisterEnum("docker.swarmkit.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) +} + +type authenticatedWrapperHealthServer struct { + local HealthServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperHealthServer(local HealthServer, authorize func(context.Context, []string) error) HealthServer { + return &authenticatedWrapperHealthServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperHealthServer) Check(ctx context.Context, r *HealthCheckRequest) (*HealthCheckResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.Check(ctx, r) +} + +func (m *HealthCheckRequest) Copy() *HealthCheckRequest { + if m == nil { + return nil + } + o := &HealthCheckRequest{} + o.CopyFrom(m) + return o +} + +func (m *HealthCheckRequest) CopyFrom(src interface{}) { + + o := src.(*HealthCheckRequest) + *m = *o +} + +func (m *HealthCheckResponse) Copy() *HealthCheckResponse { + if m == nil { + return nil + } + o := &HealthCheckResponse{} + o.CopyFrom(m) + return o +} + +func (m *HealthCheckResponse) CopyFrom(src interface{}) { + + o := src.(*HealthCheckResponse) + *m = *o +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Health service + +type HealthClient interface { + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) +} + +type healthClient struct { + cc *grpc.ClientConn +} + +func NewHealthClient(cc *grpc.ClientConn) HealthClient { + return &healthClient{cc} +} + +func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + out := new(HealthCheckResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Health/Check", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Health service + +type HealthServer interface { + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) +} + +func RegisterHealthServer(s *grpc.Server, srv HealthServer) { + s.RegisterService(&_Health_serviceDesc, srv) +} + +func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Health/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Health_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.Health", + HandlerType: (*HealthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _Health_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/api/health.proto", +} + +func (m *HealthCheckRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheckRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Service) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintHealth(dAtA, i, uint64(len(m.Service))) + i += copy(dAtA[i:], m.Service) + } + return i, nil +} + +func (m *HealthCheckResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthCheckResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Status != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintHealth(dAtA, i, uint64(m.Status)) + } + return i, nil +} + +func encodeVarintHealth(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyHealthServer struct { + local HealthServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyHealthServer(local HealthServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) HealthServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyHealthServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyHealthServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyHealthServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyHealthServer) Check(ctx context.Context, r *HealthCheckRequest) (*HealthCheckResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.Check(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewHealthClient(conn).Check(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.Check(ctx, r) + } + return nil, err + } + return NewHealthClient(conn).Check(modCtx, r) + } + return resp, err +} + +func (m *HealthCheckRequest) Size() (n int) { + var l int + _ = l + l = len(m.Service) + if l > 0 { + n += 1 + l + sovHealth(uint64(l)) + } + return n +} + +func (m *HealthCheckResponse) Size() (n int) { + var l int + _ = l + if m.Status != 0 { + n += 1 + sovHealth(uint64(m.Status)) + } + return n +} + +func sovHealth(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozHealth(x uint64) (n int) { + return sovHealth(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *HealthCheckRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HealthCheckRequest{`, + `Service:` + fmt.Sprintf("%v", this.Service) + `,`, + `}`, + }, "") + return s +} +func (this *HealthCheckResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HealthCheckResponse{`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `}`, + }, "") + return s +} +func valueToStringHealth(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *HealthCheckRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHealth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HealthCheckRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HealthCheckRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHealth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHealth + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Service = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipHealth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthHealth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HealthCheckResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHealth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HealthCheckResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HealthCheckResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + m.Status = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHealth + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Status |= (HealthCheckResponse_ServingStatus(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipHealth(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthHealth + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipHealth(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHealth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHealth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHealth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthHealth + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHealth + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipHealth(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthHealth = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowHealth = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/health.proto", fileDescriptorHealth) } + +var fileDescriptorHealth = []byte{ + // 315 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4e, 0xcf, 0x2c, 0xc9, + 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xc9, 0x4f, 0xce, 0x4e, 0x2d, 0xd2, 0x2f, 0x2e, + 0x4f, 0x2c, 0xca, 0xcd, 0xce, 0x2c, 0xd1, 0x4f, 0x2c, 0xc8, 0xd4, 0xcf, 0x48, 0x4d, 0xcc, 0x29, + 0xc9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x82, 0xa8, 0xd0, 0x83, 0xa9, 0xd0, 0x2b, + 0x33, 0x94, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x4b, 0xeb, 0x83, 0x58, 0x10, 0x95, 0x52, 0xe6, + 0x78, 0x8c, 0x05, 0xab, 0x48, 0x2a, 0x4d, 0xd3, 0x2f, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x83, 0x52, + 0x10, 0x8d, 0x4a, 0x7a, 0x5c, 0x42, 0x1e, 0x60, 0x2b, 0x9d, 0x33, 0x52, 0x93, 0xb3, 0x83, 0x52, + 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, 0x24, 0xb8, 0xd8, 0x8b, 0x53, 0x8b, 0xca, 0x32, 0x93, 0x53, + 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0x60, 0x5c, 0xa5, 0x05, 0x8c, 0x5c, 0xc2, 0x28, 0x1a, + 0x8a, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0x7c, 0xb9, 0xd8, 0x8a, 0x4b, 0x12, 0x4b, 0x4a, 0x8b, + 0xc1, 0x1a, 0xf8, 0x8c, 0x4c, 0xf5, 0x30, 0xdd, 0xae, 0x87, 0x45, 0xa3, 0x5e, 0x30, 0xc8, 0xe0, + 0xbc, 0xf4, 0x60, 0xb0, 0xe6, 0x20, 0xa8, 0x21, 0x4a, 0x56, 0x5c, 0xbc, 0x28, 0x12, 0x42, 0xdc, + 0x5c, 0xec, 0xa1, 0x7e, 0xde, 0x7e, 0xfe, 0xe1, 0x7e, 0x02, 0x0c, 0x20, 0x4e, 0xb0, 0x6b, 0x50, + 0x98, 0xa7, 0x9f, 0xbb, 0x00, 0xa3, 0x10, 0x3f, 0x17, 0xb7, 0x9f, 0x7f, 0x48, 0x3c, 0x4c, 0x80, + 0xc9, 0xa8, 0x92, 0x8b, 0x0d, 0x62, 0x91, 0x50, 0x3e, 0x17, 0x2b, 0xd8, 0x32, 0x21, 0x35, 0x82, + 0xae, 0x01, 0xfb, 0x5b, 0x4a, 0x9d, 0x48, 0x57, 0x2b, 0x89, 0x9e, 0x5a, 0xf7, 0x6e, 0x06, 0x13, + 0x3f, 0x17, 0x2f, 0x58, 0xa1, 0x6e, 0x6e, 0x62, 0x5e, 0x62, 0x7a, 0x6a, 0x91, 0x93, 0xc4, 0x89, + 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, 0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, + 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x93, 0xd8, 0xc0, 0xc1, 0x6d, 0x0c, 0x08, 0x00, + 0x00, 0xff, 0xff, 0x7b, 0xf2, 0xdd, 0x23, 0x00, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/health.proto b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/health.proto new file mode 100644 index 00000000..8e066c0f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/health.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +// See: https://github.com/grpc/grpc-go/blob/master/health/grpc_health_v1/health.proto +// +// We use the same health check service proto description defined in the gRPC documentation, +// including the authorization check. This requires our own implementation of the health +// package located in `manager/health`. +// +// For more infos, refer to: +// https://github.com/grpc/grpc/blob/master/doc/health-checking.md + +package docker.swarmkit.v1; + +import "gogoproto/gogo.proto"; +import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; + +service Health { + rpc Check(HealthCheckRequest) returns (HealthCheckResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; +} + +message HealthCheckRequest { + string service = 1; +} + +message HealthCheckResponse { + enum ServingStatus { + UNKNOWN = 0; + SERVING = 1; + NOT_SERVING = 2; + } + ServingStatus status = 1; +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/logbroker.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/logbroker.pb.go new file mode 100644 index 00000000..5456c858 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/logbroker.pb.go @@ -0,0 +1,3400 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/logbroker.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import google_protobuf "github.com/gogo/protobuf/types" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// LogStream defines the stream from which the log message came. +type LogStream int32 + +const ( + LogStreamUnknown LogStream = 0 + LogStreamStdout LogStream = 1 + LogStreamStderr LogStream = 2 +) + +var LogStream_name = map[int32]string{ + 0: "LOG_STREAM_UNKNOWN", + 1: "LOG_STREAM_STDOUT", + 2: "LOG_STREAM_STDERR", +} +var LogStream_value = map[string]int32{ + "LOG_STREAM_UNKNOWN": 0, + "LOG_STREAM_STDOUT": 1, + "LOG_STREAM_STDERR": 2, +} + +func (x LogStream) String() string { + return proto.EnumName(LogStream_name, int32(x)) +} +func (LogStream) EnumDescriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{0} } + +type LogSubscriptionOptions struct { + // Streams defines which log streams should be sent from the task source. + // Empty means send all the messages. + Streams []LogStream `protobuf:"varint,1,rep,name=streams,enum=docker.swarmkit.v1.LogStream" json:"streams,omitempty"` + // Follow instructs the publisher to continue sending log messages as they + // are produced, after satisfying the initial query. + Follow bool `protobuf:"varint,2,opt,name=follow,proto3" json:"follow,omitempty"` + // Tail defines how many messages relative to the log stream to send when + // starting the stream. + // + // Positive values will skip that number of messages from the start of the + // stream before publishing. + // + // Negative values will specify messages relative to the end of the stream, + // offset by one. We can say that the last (-n-1) lines are returned when n + // < 0. As reference, -1 would mean send no log lines (typically used with + // follow), -2 would return the last log line, -11 would return the last 10 + // and so on. + // + // The default value of zero will return all logs. + // + // Note that this is very different from the Docker API. + Tail int64 `protobuf:"varint,3,opt,name=tail,proto3" json:"tail,omitempty"` + // Since indicates that only log messages produced after this timestamp + // should be sent. + // Note: can't use stdtime because this field is nullable. + Since *google_protobuf.Timestamp `protobuf:"bytes,4,opt,name=since" json:"since,omitempty"` +} + +func (m *LogSubscriptionOptions) Reset() { *m = LogSubscriptionOptions{} } +func (*LogSubscriptionOptions) ProtoMessage() {} +func (*LogSubscriptionOptions) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{0} } + +// LogSelector will match logs from ANY of the defined parameters. +// +// For the best effect, the client should use the least specific parameter +// possible. For example, if they want to listen to all the tasks of a service, +// they should use the service id, rather than specifying the individual tasks. +type LogSelector struct { + ServiceIDs []string `protobuf:"bytes,1,rep,name=service_ids,json=serviceIds" json:"service_ids,omitempty"` + NodeIDs []string `protobuf:"bytes,2,rep,name=node_ids,json=nodeIds" json:"node_ids,omitempty"` + TaskIDs []string `protobuf:"bytes,3,rep,name=task_ids,json=taskIds" json:"task_ids,omitempty"` +} + +func (m *LogSelector) Reset() { *m = LogSelector{} } +func (*LogSelector) ProtoMessage() {} +func (*LogSelector) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{1} } + +// LogContext marks the context from which a log message was generated. +type LogContext struct { + ServiceID string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + NodeID string `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + TaskID string `protobuf:"bytes,3,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` +} + +func (m *LogContext) Reset() { *m = LogContext{} } +func (*LogContext) ProtoMessage() {} +func (*LogContext) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{2} } + +// LogAttr is an extra key/value pair that may be have been set by users +type LogAttr struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *LogAttr) Reset() { *m = LogAttr{} } +func (*LogAttr) ProtoMessage() {} +func (*LogAttr) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{3} } + +// LogMessage +type LogMessage struct { + // Context identifies the source of the log message. + Context LogContext `protobuf:"bytes,1,opt,name=context" json:"context"` + // Timestamp is the time at which the message was generated. + // Note: can't use stdtime because this field is nullable. + Timestamp *google_protobuf.Timestamp `protobuf:"bytes,2,opt,name=timestamp" json:"timestamp,omitempty"` + // Stream identifies the stream of the log message, stdout or stderr. + Stream LogStream `protobuf:"varint,3,opt,name=stream,proto3,enum=docker.swarmkit.v1.LogStream" json:"stream,omitempty"` + // Data is the raw log message, as generated by the application. + Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` + // Attrs is a list of key value pairs representing additional log details + // that may have been returned from the logger + Attrs []LogAttr `protobuf:"bytes,5,rep,name=attrs" json:"attrs"` +} + +func (m *LogMessage) Reset() { *m = LogMessage{} } +func (*LogMessage) ProtoMessage() {} +func (*LogMessage) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{4} } + +type SubscribeLogsRequest struct { + // LogSelector describes the logs to which the subscriber is + Selector *LogSelector `protobuf:"bytes,1,opt,name=selector" json:"selector,omitempty"` + Options *LogSubscriptionOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` +} + +func (m *SubscribeLogsRequest) Reset() { *m = SubscribeLogsRequest{} } +func (*SubscribeLogsRequest) ProtoMessage() {} +func (*SubscribeLogsRequest) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{5} } + +type SubscribeLogsMessage struct { + Messages []LogMessage `protobuf:"bytes,1,rep,name=messages" json:"messages"` +} + +func (m *SubscribeLogsMessage) Reset() { *m = SubscribeLogsMessage{} } +func (*SubscribeLogsMessage) ProtoMessage() {} +func (*SubscribeLogsMessage) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{6} } + +// ListenSubscriptionsRequest is a placeholder to begin listening for +// subscriptions. +type ListenSubscriptionsRequest struct { +} + +func (m *ListenSubscriptionsRequest) Reset() { *m = ListenSubscriptionsRequest{} } +func (*ListenSubscriptionsRequest) ProtoMessage() {} +func (*ListenSubscriptionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptorLogbroker, []int{7} +} + +// SubscriptionMessage instructs the listener to start publishing messages for +// the stream or end a subscription. +// +// If Options.Follow == false, the worker should end the subscription on its own. +type SubscriptionMessage struct { + // ID identifies the subscription. + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Selector defines which sources should be sent for the subscription. + Selector *LogSelector `protobuf:"bytes,2,opt,name=selector" json:"selector,omitempty"` + // Options specify how the subscription should be satisfied. + Options *LogSubscriptionOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Close will be true if the node should shutdown the subscription with the + // provided identifier. + Close bool `protobuf:"varint,4,opt,name=close,proto3" json:"close,omitempty"` +} + +func (m *SubscriptionMessage) Reset() { *m = SubscriptionMessage{} } +func (*SubscriptionMessage) ProtoMessage() {} +func (*SubscriptionMessage) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{8} } + +type PublishLogsMessage struct { + // SubscriptionID identifies which subscription the set of messages should + // be sent to. We can think of this as a "mail box" for the subscription. + SubscriptionID string `protobuf:"bytes,1,opt,name=subscription_id,json=subscriptionId,proto3" json:"subscription_id,omitempty"` + // Messages is the log message for publishing. + Messages []LogMessage `protobuf:"bytes,2,rep,name=messages" json:"messages"` + // Close is a boolean for whether or not the client has completed its log + // stream. When close is called, the manager can hang up the subscription. + // Any further logs from this subscription are an error condition. Any + // messages included when close is set can be discarded + Close bool `protobuf:"varint,3,opt,name=close,proto3" json:"close,omitempty"` +} + +func (m *PublishLogsMessage) Reset() { *m = PublishLogsMessage{} } +func (*PublishLogsMessage) ProtoMessage() {} +func (*PublishLogsMessage) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{9} } + +type PublishLogsResponse struct { +} + +func (m *PublishLogsResponse) Reset() { *m = PublishLogsResponse{} } +func (*PublishLogsResponse) ProtoMessage() {} +func (*PublishLogsResponse) Descriptor() ([]byte, []int) { return fileDescriptorLogbroker, []int{10} } + +func init() { + proto.RegisterType((*LogSubscriptionOptions)(nil), "docker.swarmkit.v1.LogSubscriptionOptions") + proto.RegisterType((*LogSelector)(nil), "docker.swarmkit.v1.LogSelector") + proto.RegisterType((*LogContext)(nil), "docker.swarmkit.v1.LogContext") + proto.RegisterType((*LogAttr)(nil), "docker.swarmkit.v1.LogAttr") + proto.RegisterType((*LogMessage)(nil), "docker.swarmkit.v1.LogMessage") + proto.RegisterType((*SubscribeLogsRequest)(nil), "docker.swarmkit.v1.SubscribeLogsRequest") + proto.RegisterType((*SubscribeLogsMessage)(nil), "docker.swarmkit.v1.SubscribeLogsMessage") + proto.RegisterType((*ListenSubscriptionsRequest)(nil), "docker.swarmkit.v1.ListenSubscriptionsRequest") + proto.RegisterType((*SubscriptionMessage)(nil), "docker.swarmkit.v1.SubscriptionMessage") + proto.RegisterType((*PublishLogsMessage)(nil), "docker.swarmkit.v1.PublishLogsMessage") + proto.RegisterType((*PublishLogsResponse)(nil), "docker.swarmkit.v1.PublishLogsResponse") + proto.RegisterEnum("docker.swarmkit.v1.LogStream", LogStream_name, LogStream_value) +} + +type authenticatedWrapperLogsServer struct { + local LogsServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperLogsServer(local LogsServer, authorize func(context.Context, []string) error) LogsServer { + return &authenticatedWrapperLogsServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperLogsServer) SubscribeLogs(r *SubscribeLogsRequest, stream Logs_SubscribeLogsServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-manager"}); err != nil { + return err + } + return p.local.SubscribeLogs(r, stream) +} + +type authenticatedWrapperLogBrokerServer struct { + local LogBrokerServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperLogBrokerServer(local LogBrokerServer, authorize func(context.Context, []string) error) LogBrokerServer { + return &authenticatedWrapperLogBrokerServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperLogBrokerServer) ListenSubscriptions(r *ListenSubscriptionsRequest, stream LogBroker_ListenSubscriptionsServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-worker", "swarm-manager"}); err != nil { + return err + } + return p.local.ListenSubscriptions(r, stream) +} + +func (p *authenticatedWrapperLogBrokerServer) PublishLogs(stream LogBroker_PublishLogsServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-worker", "swarm-manager"}); err != nil { + return err + } + return p.local.PublishLogs(stream) +} + +func (m *LogSubscriptionOptions) Copy() *LogSubscriptionOptions { + if m == nil { + return nil + } + o := &LogSubscriptionOptions{} + o.CopyFrom(m) + return o +} + +func (m *LogSubscriptionOptions) CopyFrom(src interface{}) { + + o := src.(*LogSubscriptionOptions) + *m = *o + if o.Streams != nil { + m.Streams = make([]LogStream, len(o.Streams)) + copy(m.Streams, o.Streams) + } + + if o.Since != nil { + m.Since = &google_protobuf.Timestamp{} + deepcopy.Copy(m.Since, o.Since) + } +} + +func (m *LogSelector) Copy() *LogSelector { + if m == nil { + return nil + } + o := &LogSelector{} + o.CopyFrom(m) + return o +} + +func (m *LogSelector) CopyFrom(src interface{}) { + + o := src.(*LogSelector) + *m = *o + if o.ServiceIDs != nil { + m.ServiceIDs = make([]string, len(o.ServiceIDs)) + copy(m.ServiceIDs, o.ServiceIDs) + } + + if o.NodeIDs != nil { + m.NodeIDs = make([]string, len(o.NodeIDs)) + copy(m.NodeIDs, o.NodeIDs) + } + + if o.TaskIDs != nil { + m.TaskIDs = make([]string, len(o.TaskIDs)) + copy(m.TaskIDs, o.TaskIDs) + } + +} + +func (m *LogContext) Copy() *LogContext { + if m == nil { + return nil + } + o := &LogContext{} + o.CopyFrom(m) + return o +} + +func (m *LogContext) CopyFrom(src interface{}) { + + o := src.(*LogContext) + *m = *o +} + +func (m *LogAttr) Copy() *LogAttr { + if m == nil { + return nil + } + o := &LogAttr{} + o.CopyFrom(m) + return o +} + +func (m *LogAttr) CopyFrom(src interface{}) { + + o := src.(*LogAttr) + *m = *o +} + +func (m *LogMessage) Copy() *LogMessage { + if m == nil { + return nil + } + o := &LogMessage{} + o.CopyFrom(m) + return o +} + +func (m *LogMessage) CopyFrom(src interface{}) { + + o := src.(*LogMessage) + *m = *o + deepcopy.Copy(&m.Context, &o.Context) + if o.Timestamp != nil { + m.Timestamp = &google_protobuf.Timestamp{} + deepcopy.Copy(m.Timestamp, o.Timestamp) + } + if o.Data != nil { + m.Data = make([]byte, len(o.Data)) + copy(m.Data, o.Data) + } + if o.Attrs != nil { + m.Attrs = make([]LogAttr, len(o.Attrs)) + for i := range m.Attrs { + deepcopy.Copy(&m.Attrs[i], &o.Attrs[i]) + } + } + +} + +func (m *SubscribeLogsRequest) Copy() *SubscribeLogsRequest { + if m == nil { + return nil + } + o := &SubscribeLogsRequest{} + o.CopyFrom(m) + return o +} + +func (m *SubscribeLogsRequest) CopyFrom(src interface{}) { + + o := src.(*SubscribeLogsRequest) + *m = *o + if o.Selector != nil { + m.Selector = &LogSelector{} + deepcopy.Copy(m.Selector, o.Selector) + } + if o.Options != nil { + m.Options = &LogSubscriptionOptions{} + deepcopy.Copy(m.Options, o.Options) + } +} + +func (m *SubscribeLogsMessage) Copy() *SubscribeLogsMessage { + if m == nil { + return nil + } + o := &SubscribeLogsMessage{} + o.CopyFrom(m) + return o +} + +func (m *SubscribeLogsMessage) CopyFrom(src interface{}) { + + o := src.(*SubscribeLogsMessage) + *m = *o + if o.Messages != nil { + m.Messages = make([]LogMessage, len(o.Messages)) + for i := range m.Messages { + deepcopy.Copy(&m.Messages[i], &o.Messages[i]) + } + } + +} + +func (m *ListenSubscriptionsRequest) Copy() *ListenSubscriptionsRequest { + if m == nil { + return nil + } + o := &ListenSubscriptionsRequest{} + o.CopyFrom(m) + return o +} + +func (m *ListenSubscriptionsRequest) CopyFrom(src interface{}) {} +func (m *SubscriptionMessage) Copy() *SubscriptionMessage { + if m == nil { + return nil + } + o := &SubscriptionMessage{} + o.CopyFrom(m) + return o +} + +func (m *SubscriptionMessage) CopyFrom(src interface{}) { + + o := src.(*SubscriptionMessage) + *m = *o + if o.Selector != nil { + m.Selector = &LogSelector{} + deepcopy.Copy(m.Selector, o.Selector) + } + if o.Options != nil { + m.Options = &LogSubscriptionOptions{} + deepcopy.Copy(m.Options, o.Options) + } +} + +func (m *PublishLogsMessage) Copy() *PublishLogsMessage { + if m == nil { + return nil + } + o := &PublishLogsMessage{} + o.CopyFrom(m) + return o +} + +func (m *PublishLogsMessage) CopyFrom(src interface{}) { + + o := src.(*PublishLogsMessage) + *m = *o + if o.Messages != nil { + m.Messages = make([]LogMessage, len(o.Messages)) + for i := range m.Messages { + deepcopy.Copy(&m.Messages[i], &o.Messages[i]) + } + } + +} + +func (m *PublishLogsResponse) Copy() *PublishLogsResponse { + if m == nil { + return nil + } + o := &PublishLogsResponse{} + o.CopyFrom(m) + return o +} + +func (m *PublishLogsResponse) CopyFrom(src interface{}) {} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Logs service + +type LogsClient interface { + // SubscribeLogs starts a subscription with the specified selector and options. + // + // The subscription will be distributed to relevant nodes and messages will + // be collected and sent via the returned stream. + // + // The subscription will end with an EOF. + SubscribeLogs(ctx context.Context, in *SubscribeLogsRequest, opts ...grpc.CallOption) (Logs_SubscribeLogsClient, error) +} + +type logsClient struct { + cc *grpc.ClientConn +} + +func NewLogsClient(cc *grpc.ClientConn) LogsClient { + return &logsClient{cc} +} + +func (c *logsClient) SubscribeLogs(ctx context.Context, in *SubscribeLogsRequest, opts ...grpc.CallOption) (Logs_SubscribeLogsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Logs_serviceDesc.Streams[0], c.cc, "/docker.swarmkit.v1.Logs/SubscribeLogs", opts...) + if err != nil { + return nil, err + } + x := &logsSubscribeLogsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Logs_SubscribeLogsClient interface { + Recv() (*SubscribeLogsMessage, error) + grpc.ClientStream +} + +type logsSubscribeLogsClient struct { + grpc.ClientStream +} + +func (x *logsSubscribeLogsClient) Recv() (*SubscribeLogsMessage, error) { + m := new(SubscribeLogsMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Logs service + +type LogsServer interface { + // SubscribeLogs starts a subscription with the specified selector and options. + // + // The subscription will be distributed to relevant nodes and messages will + // be collected and sent via the returned stream. + // + // The subscription will end with an EOF. + SubscribeLogs(*SubscribeLogsRequest, Logs_SubscribeLogsServer) error +} + +func RegisterLogsServer(s *grpc.Server, srv LogsServer) { + s.RegisterService(&_Logs_serviceDesc, srv) +} + +func _Logs_SubscribeLogs_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(SubscribeLogsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(LogsServer).SubscribeLogs(m, &logsSubscribeLogsServer{stream}) +} + +type Logs_SubscribeLogsServer interface { + Send(*SubscribeLogsMessage) error + grpc.ServerStream +} + +type logsSubscribeLogsServer struct { + grpc.ServerStream +} + +func (x *logsSubscribeLogsServer) Send(m *SubscribeLogsMessage) error { + return x.ServerStream.SendMsg(m) +} + +var _Logs_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.Logs", + HandlerType: (*LogsServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "SubscribeLogs", + Handler: _Logs_SubscribeLogs_Handler, + ServerStreams: true, + }, + }, + Metadata: "github.com/docker/swarmkit/api/logbroker.proto", +} + +// Client API for LogBroker service + +type LogBrokerClient interface { + // ListenSubscriptions starts a subscription stream for the node. For each + // message received, the node should attempt to satisfy the subscription. + // + // Log messages that match the provided subscription should be sent via + // PublishLogs. + ListenSubscriptions(ctx context.Context, in *ListenSubscriptionsRequest, opts ...grpc.CallOption) (LogBroker_ListenSubscriptionsClient, error) + // PublishLogs receives sets of log messages destined for a single + // subscription identifier. + PublishLogs(ctx context.Context, opts ...grpc.CallOption) (LogBroker_PublishLogsClient, error) +} + +type logBrokerClient struct { + cc *grpc.ClientConn +} + +func NewLogBrokerClient(cc *grpc.ClientConn) LogBrokerClient { + return &logBrokerClient{cc} +} + +func (c *logBrokerClient) ListenSubscriptions(ctx context.Context, in *ListenSubscriptionsRequest, opts ...grpc.CallOption) (LogBroker_ListenSubscriptionsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_LogBroker_serviceDesc.Streams[0], c.cc, "/docker.swarmkit.v1.LogBroker/ListenSubscriptions", opts...) + if err != nil { + return nil, err + } + x := &logBrokerListenSubscriptionsClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type LogBroker_ListenSubscriptionsClient interface { + Recv() (*SubscriptionMessage, error) + grpc.ClientStream +} + +type logBrokerListenSubscriptionsClient struct { + grpc.ClientStream +} + +func (x *logBrokerListenSubscriptionsClient) Recv() (*SubscriptionMessage, error) { + m := new(SubscriptionMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *logBrokerClient) PublishLogs(ctx context.Context, opts ...grpc.CallOption) (LogBroker_PublishLogsClient, error) { + stream, err := grpc.NewClientStream(ctx, &_LogBroker_serviceDesc.Streams[1], c.cc, "/docker.swarmkit.v1.LogBroker/PublishLogs", opts...) + if err != nil { + return nil, err + } + x := &logBrokerPublishLogsClient{stream} + return x, nil +} + +type LogBroker_PublishLogsClient interface { + Send(*PublishLogsMessage) error + CloseAndRecv() (*PublishLogsResponse, error) + grpc.ClientStream +} + +type logBrokerPublishLogsClient struct { + grpc.ClientStream +} + +func (x *logBrokerPublishLogsClient) Send(m *PublishLogsMessage) error { + return x.ClientStream.SendMsg(m) +} + +func (x *logBrokerPublishLogsClient) CloseAndRecv() (*PublishLogsResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(PublishLogsResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for LogBroker service + +type LogBrokerServer interface { + // ListenSubscriptions starts a subscription stream for the node. For each + // message received, the node should attempt to satisfy the subscription. + // + // Log messages that match the provided subscription should be sent via + // PublishLogs. + ListenSubscriptions(*ListenSubscriptionsRequest, LogBroker_ListenSubscriptionsServer) error + // PublishLogs receives sets of log messages destined for a single + // subscription identifier. + PublishLogs(LogBroker_PublishLogsServer) error +} + +func RegisterLogBrokerServer(s *grpc.Server, srv LogBrokerServer) { + s.RegisterService(&_LogBroker_serviceDesc, srv) +} + +func _LogBroker_ListenSubscriptions_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListenSubscriptionsRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(LogBrokerServer).ListenSubscriptions(m, &logBrokerListenSubscriptionsServer{stream}) +} + +type LogBroker_ListenSubscriptionsServer interface { + Send(*SubscriptionMessage) error + grpc.ServerStream +} + +type logBrokerListenSubscriptionsServer struct { + grpc.ServerStream +} + +func (x *logBrokerListenSubscriptionsServer) Send(m *SubscriptionMessage) error { + return x.ServerStream.SendMsg(m) +} + +func _LogBroker_PublishLogs_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(LogBrokerServer).PublishLogs(&logBrokerPublishLogsServer{stream}) +} + +type LogBroker_PublishLogsServer interface { + SendAndClose(*PublishLogsResponse) error + Recv() (*PublishLogsMessage, error) + grpc.ServerStream +} + +type logBrokerPublishLogsServer struct { + grpc.ServerStream +} + +func (x *logBrokerPublishLogsServer) SendAndClose(m *PublishLogsResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *logBrokerPublishLogsServer) Recv() (*PublishLogsMessage, error) { + m := new(PublishLogsMessage) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _LogBroker_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.LogBroker", + HandlerType: (*LogBrokerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "ListenSubscriptions", + Handler: _LogBroker_ListenSubscriptions_Handler, + ServerStreams: true, + }, + { + StreamName: "PublishLogs", + Handler: _LogBroker_PublishLogs_Handler, + ClientStreams: true, + }, + }, + Metadata: "github.com/docker/swarmkit/api/logbroker.proto", +} + +func (m *LogSubscriptionOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogSubscriptionOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Streams) > 0 { + for _, num := range m.Streams { + dAtA[i] = 0x8 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(num)) + } + } + if m.Follow { + dAtA[i] = 0x10 + i++ + if m.Follow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Tail != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Tail)) + } + if m.Since != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Since.Size())) + n1, err := m.Since.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + return i, nil +} + +func (m *LogSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogSelector) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceIDs) > 0 { + for _, s := range m.ServiceIDs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.NodeIDs) > 0 { + for _, s := range m.NodeIDs { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.TaskIDs) > 0 { + for _, s := range m.TaskIDs { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *LogContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogContext) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + if len(m.NodeID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + if len(m.TaskID) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.TaskID))) + i += copy(dAtA[i:], m.TaskID) + } + return i, nil +} + +func (m *LogAttr) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogAttr) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *LogMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LogMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Context.Size())) + n2, err := m.Context.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if m.Timestamp != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Timestamp.Size())) + n3, err := m.Timestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if m.Stream != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Stream)) + } + if len(m.Data) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if len(m.Attrs) > 0 { + for _, msg := range m.Attrs { + dAtA[i] = 0x2a + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *SubscribeLogsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubscribeLogsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Selector != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Selector.Size())) + n4, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.Options != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Options.Size())) + n5, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} + +func (m *SubscribeLogsMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubscribeLogsMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Messages) > 0 { + for _, msg := range m.Messages { + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ListenSubscriptionsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListenSubscriptionsRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *SubscriptionMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubscriptionMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Selector.Size())) + n6, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.Options != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(m.Options.Size())) + n7, err := m.Options.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.Close { + dAtA[i] = 0x20 + i++ + if m.Close { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PublishLogsMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PublishLogsMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SubscriptionID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(len(m.SubscriptionID))) + i += copy(dAtA[i:], m.SubscriptionID) + } + if len(m.Messages) > 0 { + for _, msg := range m.Messages { + dAtA[i] = 0x12 + i++ + i = encodeVarintLogbroker(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Close { + dAtA[i] = 0x18 + i++ + if m.Close { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *PublishLogsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PublishLogsResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func encodeVarintLogbroker(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyLogsServer struct { + local LogsServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyLogsServer(local LogsServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) LogsServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyLogsServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyLogsServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyLogsServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +type Logs_SubscribeLogsServerWrapper struct { + Logs_SubscribeLogsServer + ctx context.Context +} + +func (s Logs_SubscribeLogsServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyLogsServer) SubscribeLogs(r *SubscribeLogsRequest, stream Logs_SubscribeLogsServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := Logs_SubscribeLogsServerWrapper{ + Logs_SubscribeLogsServer: stream, + ctx: ctx, + } + return p.local.SubscribeLogs(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewLogsClient(conn).SubscribeLogs(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +type raftProxyLogBrokerServer struct { + local LogBrokerServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyLogBrokerServer(local LogBrokerServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) LogBrokerServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyLogBrokerServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyLogBrokerServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyLogBrokerServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +type LogBroker_ListenSubscriptionsServerWrapper struct { + LogBroker_ListenSubscriptionsServer + ctx context.Context +} + +func (s LogBroker_ListenSubscriptionsServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyLogBrokerServer) ListenSubscriptions(r *ListenSubscriptionsRequest, stream LogBroker_ListenSubscriptionsServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := LogBroker_ListenSubscriptionsServerWrapper{ + LogBroker_ListenSubscriptionsServer: stream, + ctx: ctx, + } + return p.local.ListenSubscriptions(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewLogBrokerClient(conn).ListenSubscriptions(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +type LogBroker_PublishLogsServerWrapper struct { + LogBroker_PublishLogsServer + ctx context.Context +} + +func (s LogBroker_PublishLogsServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyLogBrokerServer) PublishLogs(stream LogBroker_PublishLogsServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := LogBroker_PublishLogsServerWrapper{ + LogBroker_PublishLogsServer: stream, + ctx: ctx, + } + return p.local.PublishLogs(streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewLogBrokerClient(conn).PublishLogs(ctx) + + if err != nil { + return err + } + + for { + msg, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := clientStream.Send(msg); err != nil { + return err + } + } + + reply, err := clientStream.CloseAndRecv() + if err != nil { + return err + } + + return stream.SendAndClose(reply) +} + +func (m *LogSubscriptionOptions) Size() (n int) { + var l int + _ = l + if len(m.Streams) > 0 { + for _, e := range m.Streams { + n += 1 + sovLogbroker(uint64(e)) + } + } + if m.Follow { + n += 2 + } + if m.Tail != 0 { + n += 1 + sovLogbroker(uint64(m.Tail)) + } + if m.Since != nil { + l = m.Since.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + return n +} + +func (m *LogSelector) Size() (n int) { + var l int + _ = l + if len(m.ServiceIDs) > 0 { + for _, s := range m.ServiceIDs { + l = len(s) + n += 1 + l + sovLogbroker(uint64(l)) + } + } + if len(m.NodeIDs) > 0 { + for _, s := range m.NodeIDs { + l = len(s) + n += 1 + l + sovLogbroker(uint64(l)) + } + } + if len(m.TaskIDs) > 0 { + for _, s := range m.TaskIDs { + l = len(s) + n += 1 + l + sovLogbroker(uint64(l)) + } + } + return n +} + +func (m *LogContext) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + l = len(m.TaskID) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + return n +} + +func (m *LogAttr) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + return n +} + +func (m *LogMessage) Size() (n int) { + var l int + _ = l + l = m.Context.Size() + n += 1 + l + sovLogbroker(uint64(l)) + if m.Timestamp != nil { + l = m.Timestamp.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + if m.Stream != 0 { + n += 1 + sovLogbroker(uint64(m.Stream)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + if len(m.Attrs) > 0 { + for _, e := range m.Attrs { + l = e.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + } + return n +} + +func (m *SubscribeLogsRequest) Size() (n int) { + var l int + _ = l + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + return n +} + +func (m *SubscribeLogsMessage) Size() (n int) { + var l int + _ = l + if len(m.Messages) > 0 { + for _, e := range m.Messages { + l = e.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + } + return n +} + +func (m *ListenSubscriptionsRequest) Size() (n int) { + var l int + _ = l + return n +} + +func (m *SubscriptionMessage) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + if m.Options != nil { + l = m.Options.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + if m.Close { + n += 2 + } + return n +} + +func (m *PublishLogsMessage) Size() (n int) { + var l int + _ = l + l = len(m.SubscriptionID) + if l > 0 { + n += 1 + l + sovLogbroker(uint64(l)) + } + if len(m.Messages) > 0 { + for _, e := range m.Messages { + l = e.Size() + n += 1 + l + sovLogbroker(uint64(l)) + } + } + if m.Close { + n += 2 + } + return n +} + +func (m *PublishLogsResponse) Size() (n int) { + var l int + _ = l + return n +} + +func sovLogbroker(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozLogbroker(x uint64) (n int) { + return sovLogbroker(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *LogSubscriptionOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LogSubscriptionOptions{`, + `Streams:` + fmt.Sprintf("%v", this.Streams) + `,`, + `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, + `Tail:` + fmt.Sprintf("%v", this.Tail) + `,`, + `Since:` + strings.Replace(fmt.Sprintf("%v", this.Since), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LogSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LogSelector{`, + `ServiceIDs:` + fmt.Sprintf("%v", this.ServiceIDs) + `,`, + `NodeIDs:` + fmt.Sprintf("%v", this.NodeIDs) + `,`, + `TaskIDs:` + fmt.Sprintf("%v", this.TaskIDs) + `,`, + `}`, + }, "") + return s +} +func (this *LogContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LogContext{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `TaskID:` + fmt.Sprintf("%v", this.TaskID) + `,`, + `}`, + }, "") + return s +} +func (this *LogAttr) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LogAttr{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *LogMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LogMessage{`, + `Context:` + strings.Replace(strings.Replace(this.Context.String(), "LogContext", "LogContext", 1), `&`, ``, 1) + `,`, + `Timestamp:` + strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `Stream:` + fmt.Sprintf("%v", this.Stream) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `Attrs:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Attrs), "LogAttr", "LogAttr", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubscribeLogsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubscribeLogsRequest{`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LogSelector", "LogSelector", 1) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "LogSubscriptionOptions", "LogSubscriptionOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubscribeLogsMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubscribeLogsMessage{`, + `Messages:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Messages), "LogMessage", "LogMessage", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListenSubscriptionsRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListenSubscriptionsRequest{`, + `}`, + }, "") + return s +} +func (this *SubscriptionMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubscriptionMessage{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LogSelector", "LogSelector", 1) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "LogSubscriptionOptions", "LogSubscriptionOptions", 1) + `,`, + `Close:` + fmt.Sprintf("%v", this.Close) + `,`, + `}`, + }, "") + return s +} +func (this *PublishLogsMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PublishLogsMessage{`, + `SubscriptionID:` + fmt.Sprintf("%v", this.SubscriptionID) + `,`, + `Messages:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Messages), "LogMessage", "LogMessage", 1), `&`, ``, 1) + `,`, + `Close:` + fmt.Sprintf("%v", this.Close) + `,`, + `}`, + }, "") + return s +} +func (this *PublishLogsResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PublishLogsResponse{`, + `}`, + }, "") + return s +} +func valueToStringLogbroker(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *LogSubscriptionOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogSubscriptionOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogSubscriptionOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v LogStream + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (LogStream(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Streams = append(m.Streams, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v LogStream + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (LogStream(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Streams = append(m.Streams, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Streams", wireType) + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Follow", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Follow = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Tail", wireType) + } + m.Tail = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Tail |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Since == nil { + m.Since = &google_protobuf.Timestamp{} + } + if err := m.Since.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LogSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceIDs = append(m.ServiceIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeIDs = append(m.NodeIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskIDs = append(m.TaskIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LogContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TaskID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LogAttr) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogAttr: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogAttr: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LogMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LogMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LogMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Context.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Timestamp == nil { + m.Timestamp = &google_protobuf.Timestamp{} + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType) + } + m.Stream = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Stream |= (LogStream(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attrs = append(m.Attrs, LogAttr{}) + if err := m.Attrs[len(m.Attrs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubscribeLogsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubscribeLogsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubscribeLogsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &LogSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &LogSubscriptionOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubscribeLogsMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubscribeLogsMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubscribeLogsMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Messages", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Messages = append(m.Messages, LogMessage{}) + if err := m.Messages[len(m.Messages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListenSubscriptionsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListenSubscriptionsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListenSubscriptionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubscriptionMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubscriptionMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubscriptionMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &LogSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = &LogSubscriptionOptions{} + } + if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Close", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Close = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PublishLogsMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PublishLogsMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PublishLogsMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubscriptionID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubscriptionID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Messages", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLogbroker + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Messages = append(m.Messages, LogMessage{}) + if err := m.Messages[len(m.Messages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Close", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Close = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PublishLogsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLogbroker + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PublishLogsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PublishLogsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipLogbroker(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthLogbroker + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipLogbroker(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogbroker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogbroker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogbroker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthLogbroker + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLogbroker + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipLogbroker(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthLogbroker = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowLogbroker = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/docker/swarmkit/api/logbroker.proto", fileDescriptorLogbroker) +} + +var fileDescriptorLogbroker = []byte{ + // 966 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x95, 0x41, 0x6f, 0x1b, 0x45, + 0x14, 0xc7, 0x3d, 0xeb, 0xc4, 0x8e, 0x9f, 0x9b, 0xc4, 0x9d, 0xa4, 0x91, 0x65, 0xa8, 0x6d, 0x6d, + 0xa5, 0x62, 0x45, 0x65, 0xdd, 0x1a, 0xa1, 0x22, 0x45, 0x42, 0xd4, 0xb8, 0x42, 0x16, 0x6e, 0x82, + 0xc6, 0x8e, 0xe0, 0x16, 0xad, 0xbd, 0xd3, 0xed, 0xca, 0xeb, 0x1d, 0xb3, 0x33, 0x4e, 0x40, 0xe2, + 0xc0, 0xa1, 0x48, 0x28, 0x07, 0x6e, 0x48, 0x70, 0xe8, 0x89, 0x5e, 0x10, 0x12, 0x17, 0x6e, 0x7c, + 0x00, 0x14, 0x71, 0xe2, 0xc8, 0xc9, 0xa2, 0xfb, 0x01, 0xf8, 0x0c, 0x68, 0x67, 0xd6, 0xeb, 0x0d, + 0xb6, 0x53, 0x54, 0x2e, 0xf6, 0x8c, 0xe7, 0xf7, 0xf6, 0xfd, 0xdf, 0x7f, 0xde, 0x5b, 0x83, 0x61, + 0x3b, 0xe2, 0xc9, 0xa4, 0x6f, 0x0c, 0xd8, 0xa8, 0x6e, 0xb1, 0xc1, 0x90, 0xfa, 0x75, 0x7e, 0x66, + 0xfa, 0xa3, 0xa1, 0x23, 0xea, 0xe6, 0xd8, 0xa9, 0xbb, 0xcc, 0xee, 0xfb, 0x6c, 0x48, 0x7d, 0x63, + 0xec, 0x33, 0xc1, 0x30, 0x56, 0x90, 0x31, 0x83, 0x8c, 0xd3, 0x7b, 0xa5, 0x5d, 0x9b, 0xd9, 0x4c, + 0x1e, 0xd7, 0xc3, 0x95, 0x22, 0x4b, 0x15, 0x9b, 0x31, 0xdb, 0xa5, 0x75, 0xb9, 0xeb, 0x4f, 0x1e, + 0xd7, 0x85, 0x33, 0xa2, 0x5c, 0x98, 0xa3, 0x71, 0x04, 0xdc, 0xbf, 0x22, 0x75, 0x1c, 0x34, 0x76, + 0x27, 0xb6, 0xe3, 0x45, 0x5f, 0x2a, 0x50, 0xff, 0x05, 0xc1, 0x5e, 0x87, 0xd9, 0xdd, 0x49, 0x9f, + 0x0f, 0x7c, 0x67, 0x2c, 0x1c, 0xe6, 0x1d, 0xc9, 0x4f, 0x8e, 0x0f, 0x20, 0xcb, 0x85, 0x4f, 0xcd, + 0x11, 0x2f, 0xa2, 0x6a, 0xba, 0xb6, 0xd5, 0xb8, 0x69, 0x2c, 0x0a, 0x36, 0xc2, 0x60, 0x49, 0x35, + 0xb5, 0x42, 0x8a, 0xcc, 0x22, 0xf0, 0x1e, 0x64, 0x1e, 0x33, 0xd7, 0x65, 0x67, 0x45, 0xad, 0x8a, + 0x6a, 0x1b, 0x24, 0xda, 0x61, 0x0c, 0x6b, 0xc2, 0x74, 0xdc, 0x62, 0xba, 0x8a, 0x6a, 0x69, 0x22, + 0xd7, 0xf8, 0x2e, 0xac, 0x73, 0xc7, 0x1b, 0xd0, 0xe2, 0x5a, 0x15, 0xd5, 0xf2, 0x8d, 0x92, 0xa1, + 0xaa, 0x35, 0x66, 0xc2, 0x8d, 0xde, 0xac, 0x5a, 0xa2, 0x40, 0xfd, 0x1b, 0x04, 0xf9, 0x30, 0x31, + 0x75, 0xe9, 0x40, 0x30, 0x1f, 0xd7, 0x21, 0xcf, 0xa9, 0x7f, 0xea, 0x0c, 0xe8, 0x89, 0x63, 0x29, + 0xb9, 0xb9, 0xe6, 0x56, 0x30, 0xad, 0x40, 0x57, 0xfd, 0xdc, 0x6e, 0x71, 0x02, 0x11, 0xd2, 0xb6, + 0x38, 0xbe, 0x0d, 0x1b, 0x1e, 0xb3, 0x14, 0xad, 0x49, 0x3a, 0x1f, 0x4c, 0x2b, 0xd9, 0x43, 0x66, + 0x49, 0x34, 0x1b, 0x1e, 0x46, 0x9c, 0x30, 0xf9, 0x50, 0x72, 0xe9, 0x39, 0xd7, 0x33, 0xf9, 0x50, + 0x72, 0xe1, 0x61, 0xdb, 0xe2, 0xfa, 0x53, 0x04, 0xd0, 0x61, 0xf6, 0xfb, 0xcc, 0x13, 0xf4, 0x33, + 0x81, 0xef, 0x00, 0xcc, 0xf5, 0x14, 0x51, 0x15, 0xd5, 0x72, 0xcd, 0xcd, 0x60, 0x5a, 0xc9, 0xc5, + 0x72, 0x48, 0x2e, 0x56, 0x83, 0x6f, 0x41, 0x36, 0x12, 0x23, 0xcd, 0xca, 0x35, 0x21, 0x98, 0x56, + 0x32, 0x4a, 0x0b, 0xc9, 0x28, 0x29, 0x21, 0x14, 0x29, 0x91, 0xde, 0x45, 0x90, 0x12, 0x42, 0x32, + 0x4a, 0x87, 0x7e, 0x0f, 0xb2, 0x1d, 0x66, 0x3f, 0x10, 0xc2, 0xc7, 0x05, 0x48, 0x0f, 0xe9, 0xe7, + 0x2a, 0x37, 0x09, 0x97, 0x78, 0x17, 0xd6, 0x4f, 0x4d, 0x77, 0x42, 0x55, 0x12, 0xa2, 0x36, 0xfa, + 0xb9, 0x26, 0x95, 0x3f, 0xa2, 0x9c, 0x9b, 0x36, 0xc5, 0xef, 0x42, 0x76, 0xa0, 0x8a, 0x90, 0xa1, + 0xf9, 0x46, 0x79, 0xc5, 0xa5, 0x47, 0xa5, 0x36, 0xd7, 0x2e, 0xa6, 0x95, 0x14, 0x99, 0x05, 0xe1, + 0x77, 0x20, 0x17, 0xf7, 0xa6, 0x4c, 0x74, 0xf5, 0x7d, 0xce, 0x61, 0xfc, 0x36, 0x64, 0x54, 0xf3, + 0xc8, 0xfa, 0x5e, 0xd6, 0x6d, 0x24, 0x82, 0xc3, 0x86, 0xb2, 0x4c, 0x61, 0xca, 0xde, 0xb9, 0x46, + 0xe4, 0x1a, 0xdf, 0x87, 0x75, 0x53, 0x08, 0x9f, 0x17, 0xd7, 0xab, 0xe9, 0x5a, 0xbe, 0xf1, 0xda, + 0x8a, 0x27, 0x85, 0x3e, 0x45, 0xfa, 0x15, 0xaf, 0x7f, 0x8f, 0x60, 0x37, 0x1a, 0x85, 0x3e, 0xed, + 0x30, 0x9b, 0x13, 0xfa, 0xe9, 0x84, 0x72, 0x81, 0x0f, 0x60, 0x83, 0x47, 0xcd, 0x16, 0xf9, 0x52, + 0x59, 0x25, 0x2f, 0xc2, 0x48, 0x1c, 0x80, 0x5b, 0x90, 0x65, 0x6a, 0xa6, 0x22, 0x47, 0xf6, 0x57, + 0xc5, 0x2e, 0x4e, 0x21, 0x99, 0x85, 0xea, 0x9f, 0xfc, 0x4b, 0xda, 0xec, 0xc6, 0xde, 0x83, 0x8d, + 0x91, 0x5a, 0xaa, 0xc6, 0x5f, 0x7d, 0x65, 0x51, 0x44, 0x54, 0x72, 0x1c, 0xa5, 0xbf, 0x0e, 0xa5, + 0x8e, 0xc3, 0x05, 0xf5, 0x92, 0xf9, 0x67, 0xa5, 0xeb, 0xbf, 0x21, 0xd8, 0x49, 0x1e, 0xcc, 0xf2, + 0xee, 0x81, 0x16, 0xf7, 0x76, 0x26, 0x98, 0x56, 0xb4, 0x76, 0x8b, 0x68, 0x8e, 0x75, 0xc9, 0x2a, + 0xed, 0x7f, 0x58, 0x95, 0x7e, 0x65, 0xab, 0xc2, 0x4e, 0x1f, 0xb8, 0x8c, 0xab, 0x17, 0xca, 0x06, + 0x51, 0x1b, 0xfd, 0x47, 0x04, 0xf8, 0xa3, 0x49, 0xdf, 0x75, 0xf8, 0x93, 0xa4, 0x7f, 0x07, 0xb0, + 0xcd, 0x13, 0x0f, 0x9b, 0x0f, 0x2c, 0x0e, 0xa6, 0x95, 0xad, 0x64, 0x9e, 0x76, 0x8b, 0x6c, 0x25, + 0xd1, 0xb6, 0x75, 0xc9, 0x7c, 0xed, 0x55, 0xcc, 0x9f, 0x6b, 0x4d, 0x27, 0xb5, 0xde, 0x80, 0x9d, + 0x84, 0x54, 0x42, 0xf9, 0x98, 0x79, 0x9c, 0xee, 0x3f, 0x47, 0x90, 0x8b, 0x47, 0x00, 0xdf, 0x01, + 0xdc, 0x39, 0xfa, 0xe0, 0xa4, 0xdb, 0x23, 0x0f, 0x1f, 0x3c, 0x3a, 0x39, 0x3e, 0xfc, 0xf0, 0xf0, + 0xe8, 0xe3, 0xc3, 0x42, 0xaa, 0xb4, 0x7b, 0xfe, 0xac, 0x5a, 0x88, 0xb1, 0x63, 0x6f, 0xe8, 0xb1, + 0x33, 0x0f, 0xef, 0xc3, 0xf5, 0x04, 0xdd, 0xed, 0xb5, 0x8e, 0x8e, 0x7b, 0x05, 0x54, 0xda, 0x39, + 0x7f, 0x56, 0xdd, 0x8e, 0xe1, 0xae, 0xb0, 0xd8, 0x44, 0x2c, 0xb2, 0x0f, 0x09, 0x29, 0x68, 0x8b, + 0x2c, 0xf5, 0xfd, 0xd2, 0xf5, 0xaf, 0x7f, 0x28, 0xa7, 0x7e, 0x7d, 0x5e, 0x9e, 0x0b, 0x6b, 0x3c, + 0x45, 0xb0, 0x16, 0xea, 0xc6, 0x5f, 0xc0, 0xe6, 0xa5, 0x9e, 0xc5, 0xb5, 0x65, 0xee, 0x2c, 0x9b, + 0xb8, 0xd2, 0xcb, 0xc9, 0xc8, 0x51, 0xfd, 0xc6, 0xef, 0x3f, 0xff, 0xfd, 0x9d, 0xb6, 0x0d, 0x9b, + 0x92, 0x7c, 0x73, 0x64, 0x7a, 0xa6, 0x4d, 0xfd, 0xbb, 0xa8, 0xf1, 0x93, 0x26, 0xdd, 0x6a, 0xca, + 0xff, 0x5c, 0xfc, 0x2d, 0x82, 0x9d, 0x25, 0x6d, 0x8e, 0x8d, 0xa5, 0x17, 0xb6, 0x72, 0x1e, 0x4a, + 0x6f, 0x5c, 0x21, 0x2c, 0x39, 0x20, 0xfa, 0x2d, 0xa9, 0xeb, 0x26, 0x5c, 0x53, 0xba, 0xce, 0x98, + 0x3f, 0xa4, 0xfe, 0x82, 0x4a, 0xfc, 0x15, 0x82, 0x7c, 0xe2, 0xae, 0xf1, 0xed, 0x65, 0xcf, 0x5f, + 0xec, 0xdb, 0xe5, 0x3a, 0x96, 0x34, 0xcd, 0x7f, 0xd2, 0x51, 0x43, 0xcd, 0xe2, 0xc5, 0x8b, 0x72, + 0xea, 0xcf, 0x17, 0xe5, 0xd4, 0x97, 0x41, 0x19, 0x5d, 0x04, 0x65, 0xf4, 0x47, 0x50, 0x46, 0x7f, + 0x05, 0x65, 0xd4, 0xcf, 0xc8, 0x17, 0xf7, 0x5b, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x95, 0x7b, + 0x3c, 0x04, 0xe0, 0x08, 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/logbroker.proto b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/logbroker.proto new file mode 100644 index 00000000..1549640d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/logbroker.proto @@ -0,0 +1,188 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; +import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; + +// LogStream defines the stream from which the log message came. +enum LogStream { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "LogStream"; + + LOG_STREAM_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "LogStreamUnknown"]; + LOG_STREAM_STDOUT = 1 [(gogoproto.enumvalue_customname) = "LogStreamStdout"]; + LOG_STREAM_STDERR = 2 [(gogoproto.enumvalue_customname) = "LogStreamStderr"]; +} + +message LogSubscriptionOptions { + // Streams defines which log streams should be sent from the task source. + // Empty means send all the messages. + repeated LogStream streams = 1 [packed=false]; + + // Follow instructs the publisher to continue sending log messages as they + // are produced, after satisfying the initial query. + bool follow = 2; + + // Tail defines how many messages relative to the log stream to send when + // starting the stream. + // + // Positive values will skip that number of messages from the start of the + // stream before publishing. + // + // Negative values will specify messages relative to the end of the stream, + // offset by one. We can say that the last (-n-1) lines are returned when n + // < 0. As reference, -1 would mean send no log lines (typically used with + // follow), -2 would return the last log line, -11 would return the last 10 + // and so on. + // + // The default value of zero will return all logs. + // + // Note that this is very different from the Docker API. + int64 tail = 3; + + // Since indicates that only log messages produced after this timestamp + // should be sent. + // Note: can't use stdtime because this field is nullable. + google.protobuf.Timestamp since = 4; +} + +// LogSelector will match logs from ANY of the defined parameters. +// +// For the best effect, the client should use the least specific parameter +// possible. For example, if they want to listen to all the tasks of a service, +// they should use the service id, rather than specifying the individual tasks. +message LogSelector { + repeated string service_ids = 1; + repeated string node_ids = 2; + repeated string task_ids = 3; +} + +// LogContext marks the context from which a log message was generated. +message LogContext { + string service_id = 1; + string node_id = 2; + string task_id = 3; +} + +// LogAttr is an extra key/value pair that may be have been set by users +message LogAttr { + string key = 1; + string value = 2; +} + +// LogMessage +message LogMessage { + // Context identifies the source of the log message. + LogContext context = 1 [(gogoproto.nullable) = false]; + + // Timestamp is the time at which the message was generated. + // Note: can't use stdtime because this field is nullable. + google.protobuf.Timestamp timestamp = 2; + + // Stream identifies the stream of the log message, stdout or stderr. + LogStream stream = 3; + + // Data is the raw log message, as generated by the application. + bytes data = 4; + + // Attrs is a list of key value pairs representing additional log details + // that may have been returned from the logger + repeated LogAttr attrs = 5 [(gogoproto.nullable) = false]; +} + +// Logs defines the methods for retrieving task logs messages from a cluster. +service Logs { + // SubscribeLogs starts a subscription with the specified selector and options. + // + // The subscription will be distributed to relevant nodes and messages will + // be collected and sent via the returned stream. + // + // The subscription will end with an EOF. + rpc SubscribeLogs(SubscribeLogsRequest) returns (stream SubscribeLogsMessage) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + } +} + +message SubscribeLogsRequest { + // LogSelector describes the logs to which the subscriber is + LogSelector selector = 1; + + LogSubscriptionOptions options = 2; +} + +message SubscribeLogsMessage { + repeated LogMessage messages = 1 [(gogoproto.nullable) = false]; +} + +// LogBroker defines the API used by the worker to send task logs back to a +// manager. A client listens for subscriptions then optimistically retrieves +// logs satisfying said subscriptions, calling PublishLogs for results that are +// relevant. +// +// The structure of ListenSubscriptions is similar to the Dispatcher API but +// decoupled to allow log distribution to work outside of the regular task +// flow. +service LogBroker { + // ListenSubscriptions starts a subscription stream for the node. For each + // message received, the node should attempt to satisfy the subscription. + // + // Log messages that match the provided subscription should be sent via + // PublishLogs. + rpc ListenSubscriptions(ListenSubscriptionsRequest) returns (stream SubscriptionMessage) { + option (docker.protobuf.plugin.tls_authorization) = { + roles: "swarm-worker" + roles: "swarm-manager" + }; + } + + // PublishLogs receives sets of log messages destined for a single + // subscription identifier. + rpc PublishLogs(stream PublishLogsMessage) returns (PublishLogsResponse) { + option (docker.protobuf.plugin.tls_authorization) = { + roles: "swarm-worker" + roles: "swarm-manager" + }; + } +} + +// ListenSubscriptionsRequest is a placeholder to begin listening for +// subscriptions. +message ListenSubscriptionsRequest { } + +// SubscriptionMessage instructs the listener to start publishing messages for +// the stream or end a subscription. +// +// If Options.Follow == false, the worker should end the subscription on its own. +message SubscriptionMessage { + // ID identifies the subscription. + string id = 1; + + // Selector defines which sources should be sent for the subscription. + LogSelector selector = 2; + + // Options specify how the subscription should be satisfied. + LogSubscriptionOptions options = 3; + + // Close will be true if the node should shutdown the subscription with the + // provided identifier. + bool close = 4; +} + +message PublishLogsMessage { + // SubscriptionID identifies which subscription the set of messages should + // be sent to. We can think of this as a "mail box" for the subscription. + string subscription_id = 1; + + // Messages is the log message for publishing. + repeated LogMessage messages = 2 [(gogoproto.nullable) = false]; + + // Close is a boolean for whether or not the client has completed its log + // stream. When close is called, the manager can hang up the subscription. + // Any further logs from this subscription are an error condition. Any + // messages included when close is set can be discarded + bool close = 3; +} + +message PublishLogsResponse { } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/naming/naming.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/naming/naming.go new file mode 100644 index 00000000..7e7d4581 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/naming/naming.go @@ -0,0 +1,49 @@ +// Package naming centralizes the naming of SwarmKit objects. +package naming + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/swarmkit/api" +) + +var ( + errUnknownRuntime = errors.New("unrecognized runtime") +) + +// Task returns the task name from Annotations.Name, +// and, in case Annotations.Name is missing, fallback +// to construct the name from other information. +func Task(t *api.Task) string { + if t.Annotations.Name != "" { + // if set, use the container Annotations.Name field, set in the orchestrator. + return t.Annotations.Name + } + + slot := fmt.Sprint(t.Slot) + if slot == "" || t.Slot == 0 { + // when no slot id is assigned, we assume that this is node-bound task. + slot = t.NodeID + } + + // fallback to service.instance.id. + return fmt.Sprintf("%s.%s.%s", t.ServiceAnnotations.Name, slot, t.ID) +} + +// TODO(stevvooe): Consolidate "Hostname" style validation here. + +// Runtime returns the runtime name from a given spec. +func Runtime(t api.TaskSpec) (string, error) { + switch r := t.GetRuntime().(type) { + case *api.TaskSpec_Attachment: + return "attachment", nil + case *api.TaskSpec_Container: + return "container", nil + case *api.TaskSpec_Generic: + return strings.ToLower(r.Generic.Kind), nil + default: + return "", errUnknownRuntime + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/objects.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/objects.pb.go new file mode 100644 index 00000000..495dfb9d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/objects.pb.go @@ -0,0 +1,8386 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/objects.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/gogo/protobuf/types" +import _ "github.com/gogo/protobuf/gogoproto" +import google_protobuf4 "github.com/gogo/protobuf/types" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import go_events "github.com/docker/go-events" +import strings "strings" + +import reflect "reflect" +import sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Meta contains metadata about objects. Every object contains a meta field. +type Meta struct { + // Version tracks the current version of the object. + Version Version `protobuf:"bytes,1,opt,name=version" json:"version"` + // Object timestamps. + // Note: can't use stdtime because these fields are nullable. + CreatedAt *google_protobuf.Timestamp `protobuf:"bytes,2,opt,name=created_at,json=createdAt" json:"created_at,omitempty"` + UpdatedAt *google_protobuf.Timestamp `protobuf:"bytes,3,opt,name=updated_at,json=updatedAt" json:"updated_at,omitempty"` +} + +func (m *Meta) Reset() { *m = Meta{} } +func (*Meta) ProtoMessage() {} +func (*Meta) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{0} } + +// Node provides the internal node state as seen by the cluster. +type Node struct { + // ID specifies the identity of the node. + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + // Spec defines the desired state of the node as specified by the user. + // The system will honor this and will *never* modify it. + Spec NodeSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` + // Description encapsulated the properties of the Node as reported by the + // agent. + Description *NodeDescription `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` + // Status provides the current status of the node, as seen by the manager. + Status NodeStatus `protobuf:"bytes,5,opt,name=status" json:"status"` + // ManagerStatus provides the current status of the node's manager + // component, if the node is a manager. + ManagerStatus *ManagerStatus `protobuf:"bytes,6,opt,name=manager_status,json=managerStatus" json:"manager_status,omitempty"` + // DEPRECATED: Use Attachments to find the ingress network + // The node attachment to the ingress network. + Attachment *NetworkAttachment `protobuf:"bytes,7,opt,name=attachment" json:"attachment,omitempty"` + // Certificate is the TLS certificate issued for the node, if any. + Certificate Certificate `protobuf:"bytes,8,opt,name=certificate" json:"certificate"` + // Role is the *observed* role for this node. It differs from the + // desired role set in Node.Spec.Role because the role here is only + // updated after the Raft member list has been reconciled with the + // desired role from the spec. + // + // This field represents the current reconciled state. If an action is + // to be performed, first verify the role in the cert. This field only + // shows the privilege level that the CA would currently grant when + // issuing or renewing the node's certificate. + Role NodeRole `protobuf:"varint,9,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"` + // Attachments enumerates the network attachments for the node to set up an + // endpoint on the node to be used for load balancing. Each overlay + // network, including ingress network, will have an NetworkAttachment. + Attachments []*NetworkAttachment `protobuf:"bytes,10,rep,name=attachments" json:"attachments,omitempty"` + // VXLANUDPPort specifies the UDP port for VXLAN traffic. + // This information is passed from cluster object to individual nodes. + VXLANUDPPort uint32 `protobuf:"varint,11,opt,name=VXLANUDPPort,proto3" json:"VXLANUDPPort,omitempty"` +} + +func (m *Node) Reset() { *m = Node{} } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{1} } + +type Service struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + Spec ServiceSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` + // SpecVersion versions Spec, to identify changes in the spec. Note that + // this is not directly comparable to the service's Version. + SpecVersion *Version `protobuf:"bytes,10,opt,name=spec_version,json=specVersion" json:"spec_version,omitempty"` + // PreviousSpec is the previous service spec that was in place before + // "Spec". + PreviousSpec *ServiceSpec `protobuf:"bytes,6,opt,name=previous_spec,json=previousSpec" json:"previous_spec,omitempty"` + // PreviousSpecVersion versions PreviousSpec. Note that this is not + // directly comparable to the service's Version. + PreviousSpecVersion *Version `protobuf:"bytes,11,opt,name=previous_spec_version,json=previousSpecVersion" json:"previous_spec_version,omitempty"` + // Runtime state of service endpoint. This may be different + // from the spec version because the user may not have entered + // the optional fields like node_port or virtual_ip and it + // could be auto allocated by the system. + Endpoint *Endpoint `protobuf:"bytes,4,opt,name=endpoint" json:"endpoint,omitempty"` + // UpdateStatus contains the status of an update, if one is in + // progress. + UpdateStatus *UpdateStatus `protobuf:"bytes,5,opt,name=update_status,json=updateStatus" json:"update_status,omitempty"` + // PendingDelete indicates that this service's deletion has been requested. + // Services, as well as all service-level resources, can only be deleted + // after all of the service's containers have properly shut down. + // When a user requests a deletion, we just flip this flag + // the deallocator will take it from there - it will start monitoring + // this service's tasks, and proceed to delete the service itself (and + // potentially its associated resources also marked for deletion) when + // all of its tasks are gone + PendingDelete bool `protobuf:"varint,7,opt,name=pending_delete,json=pendingDelete,proto3" json:"pending_delete,omitempty"` +} + +func (m *Service) Reset() { *m = Service{} } +func (*Service) ProtoMessage() {} +func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{2} } + +// Endpoint specified all the network parameters required to +// correctly discover and load balance a service +type Endpoint struct { + Spec *EndpointSpec `protobuf:"bytes,1,opt,name=spec" json:"spec,omitempty"` + // Runtime state of the exposed ports which may carry + // auto-allocated swarm ports in addition to the user + // configured information. + Ports []*PortConfig `protobuf:"bytes,2,rep,name=ports" json:"ports,omitempty"` + // VirtualIPs specifies the IP addresses under which this endpoint will be + // made available. + VirtualIPs []*Endpoint_VirtualIP `protobuf:"bytes,3,rep,name=virtual_ips,json=virtualIps" json:"virtual_ips,omitempty"` +} + +func (m *Endpoint) Reset() { *m = Endpoint{} } +func (*Endpoint) ProtoMessage() {} +func (*Endpoint) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{3} } + +// VirtualIP specifies a set of networks this endpoint will be attached to +// and the IP addresses the target service will be made available under. +type Endpoint_VirtualIP struct { + // NetworkID for which this endpoint attachment was created. + NetworkID string `protobuf:"bytes,1,opt,name=network_id,json=networkId,proto3" json:"network_id,omitempty"` + // A virtual IP is used to address this service in IP + // layer that the client can use to send requests to + // this service. A DNS A/AAAA query on the service + // name might return this IP to the client. This is + // strictly a logical IP and there may not be any + // interfaces assigned this IP address or any route + // created for this address. More than one to + // accommodate for both IPv4 and IPv6 + Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` +} + +func (m *Endpoint_VirtualIP) Reset() { *m = Endpoint_VirtualIP{} } +func (*Endpoint_VirtualIP) ProtoMessage() {} +func (*Endpoint_VirtualIP) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{3, 0} } + +// Task specifies the parameters for implementing a Spec. A task is effectively +// immutable and idempotent. Once it is dispatched to a node, it will not be +// dispatched to another node. +type Task struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + // Spec defines the desired state of the task as specified by the user. + // The system will honor this and will *never* modify it. + Spec TaskSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` + // SpecVersion is copied from Service, to identify which version of the + // spec this task has. Note that this is not directly comparable to the + // service's Version. + SpecVersion *Version `protobuf:"bytes,14,opt,name=spec_version,json=specVersion" json:"spec_version,omitempty"` + // ServiceID indicates the service under which this task is orchestrated. This + // should almost always be set. + ServiceID string `protobuf:"bytes,4,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + // Slot is the service slot number for a task. + // For example, if a replicated service has replicas = 2, there will be a + // task with slot = 1, and another with slot = 2. + Slot uint64 `protobuf:"varint,5,opt,name=slot,proto3" json:"slot,omitempty"` + // NodeID indicates the node to which the task is assigned. If this field + // is empty or not set, the task is unassigned. + NodeID string `protobuf:"bytes,6,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // Annotations defines the names and labels for the runtime, as set by + // the cluster manager. + // + // As backup, if this field has an empty name, the runtime will + // allocate a unique name for the actual container. + // + // NOTE(stevvooe): The preserves the ability for us to making naming + // decisions for tasks in orchestrator, albeit, this is left empty for now. + Annotations Annotations `protobuf:"bytes,7,opt,name=annotations" json:"annotations"` + // ServiceAnnotations is a direct copy of the service name and labels when + // this task is created. + // + // Labels set here will *not* be propagated to the runtime target, such as a + // container. Use labels on the runtime target for that purpose. + ServiceAnnotations Annotations `protobuf:"bytes,8,opt,name=service_annotations,json=serviceAnnotations" json:"service_annotations"` + Status TaskStatus `protobuf:"bytes,9,opt,name=status" json:"status"` + // DesiredState is the target state for the task. It is set to + // TaskStateRunning when a task is first created, and changed to + // TaskStateShutdown if the manager wants to terminate the task. This field + // is only written by the manager. + DesiredState TaskState `protobuf:"varint,10,opt,name=desired_state,json=desiredState,proto3,enum=docker.swarmkit.v1.TaskState" json:"desired_state,omitempty"` + // List of network attachments by the task. + Networks []*NetworkAttachment `protobuf:"bytes,11,rep,name=networks" json:"networks,omitempty"` + // A copy of runtime state of service endpoint from Service + // object to be distributed to agents as part of the task. + Endpoint *Endpoint `protobuf:"bytes,12,opt,name=endpoint" json:"endpoint,omitempty"` + // LogDriver specifies the selected log driver to use for the task. Agent + // processes should always favor the value in this field. + // + // If present in the TaskSpec, this will be a copy of that value. The + // orchestrator may choose to insert a value here, which should be honored, + // such a cluster default or policy-based value. + // + // If not present, the daemon's default will be used. + LogDriver *Driver `protobuf:"bytes,13,opt,name=log_driver,json=logDriver" json:"log_driver,omitempty"` + AssignedGenericResources []*GenericResource `protobuf:"bytes,15,rep,name=assigned_generic_resources,json=assignedGenericResources" json:"assigned_generic_resources,omitempty"` +} + +func (m *Task) Reset() { *m = Task{} } +func (*Task) ProtoMessage() {} +func (*Task) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{4} } + +// NetworkAttachment specifies the network parameters of attachment to +// a single network by an object such as task or node. +type NetworkAttachment struct { + // Network state as a whole becomes part of the object so that + // it always is available for use in agents so that agents + // don't have any other dependency during execution. + Network *Network `protobuf:"bytes,1,opt,name=network" json:"network,omitempty"` + // List of IPv4/IPv6 addresses that are assigned to the object + // as part of getting attached to this network. + Addresses []string `protobuf:"bytes,2,rep,name=addresses" json:"addresses,omitempty"` + // List of aliases by which a task is resolved in a network + Aliases []string `protobuf:"bytes,3,rep,name=aliases" json:"aliases,omitempty"` + // Map of all the driver attachment options for this network + DriverAttachmentOpts map[string]string `protobuf:"bytes,4,rep,name=driver_attachment_opts,json=driverAttachmentOpts" json:"driver_attachment_opts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *NetworkAttachment) Reset() { *m = NetworkAttachment{} } +func (*NetworkAttachment) ProtoMessage() {} +func (*NetworkAttachment) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{5} } + +type Network struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + Spec NetworkSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` + // Driver specific operational state provided by the network driver. + DriverState *Driver `protobuf:"bytes,4,opt,name=driver_state,json=driverState" json:"driver_state,omitempty"` + // Runtime state of IPAM options. This may not reflect the + // ipam options from NetworkSpec. + IPAM *IPAMOptions `protobuf:"bytes,5,opt,name=ipam" json:"ipam,omitempty"` + // PendingDelete indicates that this network's deletion has been requested. + // Services, as well as all service-level resources, can only be deleted + // after all the service's containers have properly shut down + // when a user requests a deletion, we just flip this flag + // the deallocator will take it from there + // PendingDelete indicates that this network's deletion has been requested. + // Services, as well as all service-level resources, can only be deleted + // after all of the service's containers have properly shut down. + // When a user requests a deletion of this network, we just flip this flag + // the deallocator will take it from there - it will start monitoring + // the services that still use this service, and proceed to delete + // this network when all of these services are gone + PendingDelete bool `protobuf:"varint,6,opt,name=pending_delete,json=pendingDelete,proto3" json:"pending_delete,omitempty"` +} + +func (m *Network) Reset() { *m = Network{} } +func (*Network) ProtoMessage() {} +func (*Network) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{6} } + +// Cluster provides global cluster settings. +type Cluster struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + Spec ClusterSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` + // RootCA contains key material for the root CA. + RootCA RootCA `protobuf:"bytes,4,opt,name=root_ca,json=rootCa" json:"root_ca"` + // Symmetric encryption key distributed by the lead manager. Used by agents + // for securing network bootstrapping and communication. + NetworkBootstrapKeys []*EncryptionKey `protobuf:"bytes,5,rep,name=network_bootstrap_keys,json=networkBootstrapKeys" json:"network_bootstrap_keys,omitempty"` + // Logical clock used to timestamp every key. It allows other managers + // and agents to unambiguously identify the older key to be deleted when + // a new key is allocated on key rotation. + EncryptionKeyLamportClock uint64 `protobuf:"varint,6,opt,name=encryption_key_lamport_clock,json=encryptionKeyLamportClock,proto3" json:"encryption_key_lamport_clock,omitempty"` + // BlacklistedCertificates tracks certificates that should no longer + // be honored. It's a mapping from CN -> BlacklistedCertificate. + // swarm. Their certificates should effectively be blacklisted. + BlacklistedCertificates map[string]*BlacklistedCertificate `protobuf:"bytes,8,rep,name=blacklisted_certificates,json=blacklistedCertificates" json:"blacklisted_certificates,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` + // UnlockKeys defines the keys that lock node data at rest. For example, + // this would contain the key encrypting key (KEK) that will encrypt the + // manager TLS keys at rest and the raft encryption keys at rest. + // If the key is empty, the node will be unlocked (will not require a key + // to start up from a shut down state). + UnlockKeys []*EncryptionKey `protobuf:"bytes,9,rep,name=unlock_keys,json=unlockKeys" json:"unlock_keys,omitempty"` + // FIPS specifies whether this cluster should be in FIPS mode. This changes + // the format of the join tokens, and nodes that are not FIPS-enabled should + // reject joining the cluster. Nodes that report themselves to be non-FIPS + // should be rejected from the cluster. + FIPS bool `protobuf:"varint,10,opt,name=fips,proto3" json:"fips,omitempty"` + // This field specifies default subnet pools for global scope networks. If + // unspecified, Docker will use the predefined subnets as it works on older releases. + // Format Example : {"20.20.0.0/16",""20.20.0.0/16"} + DefaultAddressPool []string `protobuf:"bytes,11,rep,name=defaultAddressPool" json:"defaultAddressPool,omitempty"` + // This flag specifies the default subnet size of global scope networks by giving + // the length of the subnet masks for every such network + SubnetSize uint32 `protobuf:"varint,12,opt,name=subnetSize,proto3" json:"subnetSize,omitempty"` + // VXLANUDPPort specifies the UDP port for VXLAN traffic. + VXLANUDPPort uint32 `protobuf:"varint,13,opt,name=VXLANUDPPort,proto3" json:"VXLANUDPPort,omitempty"` +} + +func (m *Cluster) Reset() { *m = Cluster{} } +func (*Cluster) ProtoMessage() {} +func (*Cluster) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{7} } + +// Secret represents a secret that should be passed to a container or a node, +// and is immutable. +type Secret struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + // Spec contains the actual secret data, as well as any context around the + // secret data that the user provides. + Spec SecretSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` + // Whether the secret is an internal secret (not set by a user) or not. + Internal bool `protobuf:"varint,4,opt,name=internal,proto3" json:"internal,omitempty"` +} + +func (m *Secret) Reset() { *m = Secret{} } +func (*Secret) ProtoMessage() {} +func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{8} } + +// Config represents a set of configuration files that should be passed to a +// container. +type Config struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + // Spec contains the actual config data, as well as any context around the + // config data that the user provides. + Spec ConfigSpec `protobuf:"bytes,3,opt,name=spec" json:"spec"` +} + +func (m *Config) Reset() { *m = Config{} } +func (*Config) ProtoMessage() {} +func (*Config) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{9} } + +// Resource is a top-level object with externally defined content and indexing. +// SwarmKit can serve as a store for these objects without understanding their +// meanings. +type Resource struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + Annotations Annotations `protobuf:"bytes,3,opt,name=annotations" json:"annotations"` + // Kind identifies this class of object. It is essentially a namespace + // to keep IDs or indices from colliding between unrelated Resource + // objects. This must correspond to the name of an Extension. + Kind string `protobuf:"bytes,4,opt,name=kind,proto3" json:"kind,omitempty"` + // Payload bytes. This data is not interpreted in any way by SwarmKit. + // By convention, it should be a marshalled protocol buffers message. + Payload *google_protobuf4.Any `protobuf:"bytes,5,opt,name=payload" json:"payload,omitempty"` +} + +func (m *Resource) Reset() { *m = Resource{} } +func (*Resource) ProtoMessage() {} +func (*Resource) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{10} } + +// Extension declares a type of "resource" object. This message provides some +// metadata about the objects. +type Extension struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Meta Meta `protobuf:"bytes,2,opt,name=meta" json:"meta"` + Annotations Annotations `protobuf:"bytes,3,opt,name=annotations" json:"annotations"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` +} + +func (m *Extension) Reset() { *m = Extension{} } +func (*Extension) ProtoMessage() {} +func (*Extension) Descriptor() ([]byte, []int) { return fileDescriptorObjects, []int{11} } + +func init() { + proto.RegisterType((*Meta)(nil), "docker.swarmkit.v1.Meta") + proto.RegisterType((*Node)(nil), "docker.swarmkit.v1.Node") + proto.RegisterType((*Service)(nil), "docker.swarmkit.v1.Service") + proto.RegisterType((*Endpoint)(nil), "docker.swarmkit.v1.Endpoint") + proto.RegisterType((*Endpoint_VirtualIP)(nil), "docker.swarmkit.v1.Endpoint.VirtualIP") + proto.RegisterType((*Task)(nil), "docker.swarmkit.v1.Task") + proto.RegisterType((*NetworkAttachment)(nil), "docker.swarmkit.v1.NetworkAttachment") + proto.RegisterType((*Network)(nil), "docker.swarmkit.v1.Network") + proto.RegisterType((*Cluster)(nil), "docker.swarmkit.v1.Cluster") + proto.RegisterType((*Secret)(nil), "docker.swarmkit.v1.Secret") + proto.RegisterType((*Config)(nil), "docker.swarmkit.v1.Config") + proto.RegisterType((*Resource)(nil), "docker.swarmkit.v1.Resource") + proto.RegisterType((*Extension)(nil), "docker.swarmkit.v1.Extension") +} + +func (m *Meta) Copy() *Meta { + if m == nil { + return nil + } + o := &Meta{} + o.CopyFrom(m) + return o +} + +func (m *Meta) CopyFrom(src interface{}) { + + o := src.(*Meta) + *m = *o + deepcopy.Copy(&m.Version, &o.Version) + if o.CreatedAt != nil { + m.CreatedAt = &google_protobuf.Timestamp{} + deepcopy.Copy(m.CreatedAt, o.CreatedAt) + } + if o.UpdatedAt != nil { + m.UpdatedAt = &google_protobuf.Timestamp{} + deepcopy.Copy(m.UpdatedAt, o.UpdatedAt) + } +} + +func (m *Node) Copy() *Node { + if m == nil { + return nil + } + o := &Node{} + o.CopyFrom(m) + return o +} + +func (m *Node) CopyFrom(src interface{}) { + + o := src.(*Node) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) + if o.Description != nil { + m.Description = &NodeDescription{} + deepcopy.Copy(m.Description, o.Description) + } + deepcopy.Copy(&m.Status, &o.Status) + if o.ManagerStatus != nil { + m.ManagerStatus = &ManagerStatus{} + deepcopy.Copy(m.ManagerStatus, o.ManagerStatus) + } + if o.Attachment != nil { + m.Attachment = &NetworkAttachment{} + deepcopy.Copy(m.Attachment, o.Attachment) + } + deepcopy.Copy(&m.Certificate, &o.Certificate) + if o.Attachments != nil { + m.Attachments = make([]*NetworkAttachment, len(o.Attachments)) + for i := range m.Attachments { + m.Attachments[i] = &NetworkAttachment{} + deepcopy.Copy(m.Attachments[i], o.Attachments[i]) + } + } + +} + +func (m *Service) Copy() *Service { + if m == nil { + return nil + } + o := &Service{} + o.CopyFrom(m) + return o +} + +func (m *Service) CopyFrom(src interface{}) { + + o := src.(*Service) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) + if o.SpecVersion != nil { + m.SpecVersion = &Version{} + deepcopy.Copy(m.SpecVersion, o.SpecVersion) + } + if o.PreviousSpec != nil { + m.PreviousSpec = &ServiceSpec{} + deepcopy.Copy(m.PreviousSpec, o.PreviousSpec) + } + if o.PreviousSpecVersion != nil { + m.PreviousSpecVersion = &Version{} + deepcopy.Copy(m.PreviousSpecVersion, o.PreviousSpecVersion) + } + if o.Endpoint != nil { + m.Endpoint = &Endpoint{} + deepcopy.Copy(m.Endpoint, o.Endpoint) + } + if o.UpdateStatus != nil { + m.UpdateStatus = &UpdateStatus{} + deepcopy.Copy(m.UpdateStatus, o.UpdateStatus) + } +} + +func (m *Endpoint) Copy() *Endpoint { + if m == nil { + return nil + } + o := &Endpoint{} + o.CopyFrom(m) + return o +} + +func (m *Endpoint) CopyFrom(src interface{}) { + + o := src.(*Endpoint) + *m = *o + if o.Spec != nil { + m.Spec = &EndpointSpec{} + deepcopy.Copy(m.Spec, o.Spec) + } + if o.Ports != nil { + m.Ports = make([]*PortConfig, len(o.Ports)) + for i := range m.Ports { + m.Ports[i] = &PortConfig{} + deepcopy.Copy(m.Ports[i], o.Ports[i]) + } + } + + if o.VirtualIPs != nil { + m.VirtualIPs = make([]*Endpoint_VirtualIP, len(o.VirtualIPs)) + for i := range m.VirtualIPs { + m.VirtualIPs[i] = &Endpoint_VirtualIP{} + deepcopy.Copy(m.VirtualIPs[i], o.VirtualIPs[i]) + } + } + +} + +func (m *Endpoint_VirtualIP) Copy() *Endpoint_VirtualIP { + if m == nil { + return nil + } + o := &Endpoint_VirtualIP{} + o.CopyFrom(m) + return o +} + +func (m *Endpoint_VirtualIP) CopyFrom(src interface{}) { + + o := src.(*Endpoint_VirtualIP) + *m = *o +} + +func (m *Task) Copy() *Task { + if m == nil { + return nil + } + o := &Task{} + o.CopyFrom(m) + return o +} + +func (m *Task) CopyFrom(src interface{}) { + + o := src.(*Task) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) + if o.SpecVersion != nil { + m.SpecVersion = &Version{} + deepcopy.Copy(m.SpecVersion, o.SpecVersion) + } + deepcopy.Copy(&m.Annotations, &o.Annotations) + deepcopy.Copy(&m.ServiceAnnotations, &o.ServiceAnnotations) + deepcopy.Copy(&m.Status, &o.Status) + if o.Networks != nil { + m.Networks = make([]*NetworkAttachment, len(o.Networks)) + for i := range m.Networks { + m.Networks[i] = &NetworkAttachment{} + deepcopy.Copy(m.Networks[i], o.Networks[i]) + } + } + + if o.Endpoint != nil { + m.Endpoint = &Endpoint{} + deepcopy.Copy(m.Endpoint, o.Endpoint) + } + if o.LogDriver != nil { + m.LogDriver = &Driver{} + deepcopy.Copy(m.LogDriver, o.LogDriver) + } + if o.AssignedGenericResources != nil { + m.AssignedGenericResources = make([]*GenericResource, len(o.AssignedGenericResources)) + for i := range m.AssignedGenericResources { + m.AssignedGenericResources[i] = &GenericResource{} + deepcopy.Copy(m.AssignedGenericResources[i], o.AssignedGenericResources[i]) + } + } + +} + +func (m *NetworkAttachment) Copy() *NetworkAttachment { + if m == nil { + return nil + } + o := &NetworkAttachment{} + o.CopyFrom(m) + return o +} + +func (m *NetworkAttachment) CopyFrom(src interface{}) { + + o := src.(*NetworkAttachment) + *m = *o + if o.Network != nil { + m.Network = &Network{} + deepcopy.Copy(m.Network, o.Network) + } + if o.Addresses != nil { + m.Addresses = make([]string, len(o.Addresses)) + copy(m.Addresses, o.Addresses) + } + + if o.Aliases != nil { + m.Aliases = make([]string, len(o.Aliases)) + copy(m.Aliases, o.Aliases) + } + + if o.DriverAttachmentOpts != nil { + m.DriverAttachmentOpts = make(map[string]string, len(o.DriverAttachmentOpts)) + for k, v := range o.DriverAttachmentOpts { + m.DriverAttachmentOpts[k] = v + } + } + +} + +func (m *Network) Copy() *Network { + if m == nil { + return nil + } + o := &Network{} + o.CopyFrom(m) + return o +} + +func (m *Network) CopyFrom(src interface{}) { + + o := src.(*Network) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) + if o.DriverState != nil { + m.DriverState = &Driver{} + deepcopy.Copy(m.DriverState, o.DriverState) + } + if o.IPAM != nil { + m.IPAM = &IPAMOptions{} + deepcopy.Copy(m.IPAM, o.IPAM) + } +} + +func (m *Cluster) Copy() *Cluster { + if m == nil { + return nil + } + o := &Cluster{} + o.CopyFrom(m) + return o +} + +func (m *Cluster) CopyFrom(src interface{}) { + + o := src.(*Cluster) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) + deepcopy.Copy(&m.RootCA, &o.RootCA) + if o.NetworkBootstrapKeys != nil { + m.NetworkBootstrapKeys = make([]*EncryptionKey, len(o.NetworkBootstrapKeys)) + for i := range m.NetworkBootstrapKeys { + m.NetworkBootstrapKeys[i] = &EncryptionKey{} + deepcopy.Copy(m.NetworkBootstrapKeys[i], o.NetworkBootstrapKeys[i]) + } + } + + if o.BlacklistedCertificates != nil { + m.BlacklistedCertificates = make(map[string]*BlacklistedCertificate, len(o.BlacklistedCertificates)) + for k, v := range o.BlacklistedCertificates { + m.BlacklistedCertificates[k] = &BlacklistedCertificate{} + deepcopy.Copy(m.BlacklistedCertificates[k], v) + } + } + + if o.UnlockKeys != nil { + m.UnlockKeys = make([]*EncryptionKey, len(o.UnlockKeys)) + for i := range m.UnlockKeys { + m.UnlockKeys[i] = &EncryptionKey{} + deepcopy.Copy(m.UnlockKeys[i], o.UnlockKeys[i]) + } + } + + if o.DefaultAddressPool != nil { + m.DefaultAddressPool = make([]string, len(o.DefaultAddressPool)) + copy(m.DefaultAddressPool, o.DefaultAddressPool) + } + +} + +func (m *Secret) Copy() *Secret { + if m == nil { + return nil + } + o := &Secret{} + o.CopyFrom(m) + return o +} + +func (m *Secret) CopyFrom(src interface{}) { + + o := src.(*Secret) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) +} + +func (m *Config) Copy() *Config { + if m == nil { + return nil + } + o := &Config{} + o.CopyFrom(m) + return o +} + +func (m *Config) CopyFrom(src interface{}) { + + o := src.(*Config) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Spec, &o.Spec) +} + +func (m *Resource) Copy() *Resource { + if m == nil { + return nil + } + o := &Resource{} + o.CopyFrom(m) + return o +} + +func (m *Resource) CopyFrom(src interface{}) { + + o := src.(*Resource) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Annotations, &o.Annotations) + if o.Payload != nil { + m.Payload = &google_protobuf4.Any{} + deepcopy.Copy(m.Payload, o.Payload) + } +} + +func (m *Extension) Copy() *Extension { + if m == nil { + return nil + } + o := &Extension{} + o.CopyFrom(m) + return o +} + +func (m *Extension) CopyFrom(src interface{}) { + + o := src.(*Extension) + *m = *o + deepcopy.Copy(&m.Meta, &o.Meta) + deepcopy.Copy(&m.Annotations, &o.Annotations) +} + +func (m *Meta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Meta) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Version.Size())) + n1, err := m.Version.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if m.CreatedAt != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.CreatedAt.Size())) + n2, err := m.CreatedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.UpdatedAt != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.UpdatedAt.Size())) + n3, err := m.UpdatedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} + +func (m *Node) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Node) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n4, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n5, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + if m.Description != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Description.Size())) + n6, err := m.Description.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + dAtA[i] = 0x2a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Status.Size())) + n7, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + if m.ManagerStatus != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.ManagerStatus.Size())) + n8, err := m.ManagerStatus.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.Attachment != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Attachment.Size())) + n9, err := m.Attachment.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + dAtA[i] = 0x42 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Certificate.Size())) + n10, err := m.Certificate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + if m.Role != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Role)) + } + if len(m.Attachments) > 0 { + for _, msg := range m.Attachments { + dAtA[i] = 0x52 + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.VXLANUDPPort != 0 { + dAtA[i] = 0x58 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.VXLANUDPPort)) + } + return i, nil +} + +func (m *Service) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Service) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n11, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n12, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + if m.Endpoint != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Endpoint.Size())) + n13, err := m.Endpoint.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + if m.UpdateStatus != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.UpdateStatus.Size())) + n14, err := m.UpdateStatus.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if m.PreviousSpec != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.PreviousSpec.Size())) + n15, err := m.PreviousSpec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + if m.PendingDelete { + dAtA[i] = 0x38 + i++ + if m.PendingDelete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.SpecVersion != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.SpecVersion.Size())) + n16, err := m.SpecVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.PreviousSpecVersion != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.PreviousSpecVersion.Size())) + n17, err := m.PreviousSpecVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + return i, nil +} + +func (m *Endpoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Spec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n18, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.VirtualIPs) > 0 { + for _, msg := range m.VirtualIPs { + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Endpoint_VirtualIP) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint_VirtualIP) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NetworkID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.NetworkID))) + i += copy(dAtA[i:], m.NetworkID) + } + if len(m.Addr) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + return i, nil +} + +func (m *Task) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Task) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n19, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n20, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + if len(m.ServiceID) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + if m.Slot != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Slot)) + } + if len(m.NodeID) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + dAtA[i] = 0x3a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Annotations.Size())) + n21, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + dAtA[i] = 0x42 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.ServiceAnnotations.Size())) + n22, err := m.ServiceAnnotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + dAtA[i] = 0x4a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Status.Size())) + n23, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + if m.DesiredState != 0 { + dAtA[i] = 0x50 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.DesiredState)) + } + if len(m.Networks) > 0 { + for _, msg := range m.Networks { + dAtA[i] = 0x5a + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Endpoint != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Endpoint.Size())) + n24, err := m.Endpoint.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + if m.LogDriver != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.LogDriver.Size())) + n25, err := m.LogDriver.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + if m.SpecVersion != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.SpecVersion.Size())) + n26, err := m.SpecVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if len(m.AssignedGenericResources) > 0 { + for _, msg := range m.AssignedGenericResources { + dAtA[i] = 0x7a + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NetworkAttachment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkAttachment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Network != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Network.Size())) + n27, err := m.Network.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Aliases) > 0 { + for _, s := range m.Aliases { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.DriverAttachmentOpts) > 0 { + for k, _ := range m.DriverAttachmentOpts { + dAtA[i] = 0x22 + i++ + v := m.DriverAttachmentOpts[k] + mapSize := 1 + len(k) + sovObjects(uint64(len(k))) + 1 + len(v) + sovObjects(uint64(len(v))) + i = encodeVarintObjects(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *Network) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Network) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n28, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n29, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + if m.DriverState != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.DriverState.Size())) + n30, err := m.DriverState.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + if m.IPAM != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.IPAM.Size())) + n31, err := m.IPAM.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + if m.PendingDelete { + dAtA[i] = 0x30 + i++ + if m.PendingDelete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *Cluster) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Cluster) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n32, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n33, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.RootCA.Size())) + n34, err := m.RootCA.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + if len(m.NetworkBootstrapKeys) > 0 { + for _, msg := range m.NetworkBootstrapKeys { + dAtA[i] = 0x2a + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.EncryptionKeyLamportClock != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.EncryptionKeyLamportClock)) + } + if len(m.BlacklistedCertificates) > 0 { + for k, _ := range m.BlacklistedCertificates { + dAtA[i] = 0x42 + i++ + v := m.BlacklistedCertificates[k] + msgSize := 0 + if v != nil { + msgSize = v.Size() + msgSize += 1 + sovObjects(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovObjects(uint64(len(k))) + msgSize + i = encodeVarintObjects(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if v != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(v.Size())) + n35, err := v.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + } + } + } + if len(m.UnlockKeys) > 0 { + for _, msg := range m.UnlockKeys { + dAtA[i] = 0x4a + i++ + i = encodeVarintObjects(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.FIPS { + dAtA[i] = 0x50 + i++ + if m.FIPS { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.DefaultAddressPool) > 0 { + for _, s := range m.DefaultAddressPool { + dAtA[i] = 0x5a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.SubnetSize != 0 { + dAtA[i] = 0x60 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.SubnetSize)) + } + if m.VXLANUDPPort != 0 { + dAtA[i] = 0x68 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.VXLANUDPPort)) + } + return i, nil +} + +func (m *Secret) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Secret) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n36, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n37, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + if m.Internal { + dAtA[i] = 0x20 + i++ + if m.Internal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *Config) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Config) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n38, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Spec.Size())) + n39, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + return i, nil +} + +func (m *Resource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n40, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Annotations.Size())) + n41, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + if len(m.Kind) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if m.Payload != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Payload.Size())) + n42, err := m.Payload.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 + } + return i, nil +} + +func (m *Extension) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Extension) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Meta.Size())) + n43, err := m.Meta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 + dAtA[i] = 0x1a + i++ + i = encodeVarintObjects(dAtA, i, uint64(m.Annotations.Size())) + n44, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 + if len(m.Description) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintObjects(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + } + return i, nil +} + +func encodeVarintObjects(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +func (m *Meta) Size() (n int) { + var l int + _ = l + l = m.Version.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.CreatedAt != nil { + l = m.CreatedAt.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.UpdatedAt != nil { + l = m.UpdatedAt.Size() + n += 1 + l + sovObjects(uint64(l)) + } + return n +} + +func (m *Node) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.Description != nil { + l = m.Description.Size() + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Status.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.ManagerStatus != nil { + l = m.ManagerStatus.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.Attachment != nil { + l = m.Attachment.Size() + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Certificate.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.Role != 0 { + n += 1 + sovObjects(uint64(m.Role)) + } + if len(m.Attachments) > 0 { + for _, e := range m.Attachments { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + if m.VXLANUDPPort != 0 { + n += 1 + sovObjects(uint64(m.VXLANUDPPort)) + } + return n +} + +func (m *Service) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.Endpoint != nil { + l = m.Endpoint.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.UpdateStatus != nil { + l = m.UpdateStatus.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.PreviousSpec != nil { + l = m.PreviousSpec.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.PendingDelete { + n += 2 + } + if m.SpecVersion != nil { + l = m.SpecVersion.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.PreviousSpecVersion != nil { + l = m.PreviousSpecVersion.Size() + n += 1 + l + sovObjects(uint64(l)) + } + return n +} + +func (m *Endpoint) Size() (n int) { + var l int + _ = l + if m.Spec != nil { + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + if len(m.VirtualIPs) > 0 { + for _, e := range m.VirtualIPs { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + return n +} + +func (m *Endpoint_VirtualIP) Size() (n int) { + var l int + _ = l + l = len(m.NetworkID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + return n +} + +func (m *Task) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + if m.Slot != 0 { + n += 1 + sovObjects(uint64(m.Slot)) + } + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Annotations.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.ServiceAnnotations.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.DesiredState != 0 { + n += 1 + sovObjects(uint64(m.DesiredState)) + } + if len(m.Networks) > 0 { + for _, e := range m.Networks { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + if m.Endpoint != nil { + l = m.Endpoint.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.LogDriver != nil { + l = m.LogDriver.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.SpecVersion != nil { + l = m.SpecVersion.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if len(m.AssignedGenericResources) > 0 { + for _, e := range m.AssignedGenericResources { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + return n +} + +func (m *NetworkAttachment) Size() (n int) { + var l int + _ = l + if m.Network != nil { + l = m.Network.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + l = len(s) + n += 1 + l + sovObjects(uint64(l)) + } + } + if len(m.Aliases) > 0 { + for _, s := range m.Aliases { + l = len(s) + n += 1 + l + sovObjects(uint64(l)) + } + } + if len(m.DriverAttachmentOpts) > 0 { + for k, v := range m.DriverAttachmentOpts { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovObjects(uint64(len(k))) + 1 + len(v) + sovObjects(uint64(len(v))) + n += mapEntrySize + 1 + sovObjects(uint64(mapEntrySize)) + } + } + return n +} + +func (m *Network) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.DriverState != nil { + l = m.DriverState.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.IPAM != nil { + l = m.IPAM.Size() + n += 1 + l + sovObjects(uint64(l)) + } + if m.PendingDelete { + n += 2 + } + return n +} + +func (m *Cluster) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.RootCA.Size() + n += 1 + l + sovObjects(uint64(l)) + if len(m.NetworkBootstrapKeys) > 0 { + for _, e := range m.NetworkBootstrapKeys { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + if m.EncryptionKeyLamportClock != 0 { + n += 1 + sovObjects(uint64(m.EncryptionKeyLamportClock)) + } + if len(m.BlacklistedCertificates) > 0 { + for k, v := range m.BlacklistedCertificates { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovObjects(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovObjects(uint64(len(k))) + l + n += mapEntrySize + 1 + sovObjects(uint64(mapEntrySize)) + } + } + if len(m.UnlockKeys) > 0 { + for _, e := range m.UnlockKeys { + l = e.Size() + n += 1 + l + sovObjects(uint64(l)) + } + } + if m.FIPS { + n += 2 + } + if len(m.DefaultAddressPool) > 0 { + for _, s := range m.DefaultAddressPool { + l = len(s) + n += 1 + l + sovObjects(uint64(l)) + } + } + if m.SubnetSize != 0 { + n += 1 + sovObjects(uint64(m.SubnetSize)) + } + if m.VXLANUDPPort != 0 { + n += 1 + sovObjects(uint64(m.VXLANUDPPort)) + } + return n +} + +func (m *Secret) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + if m.Internal { + n += 2 + } + return n +} + +func (m *Config) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovObjects(uint64(l)) + return n +} + +func (m *Resource) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Annotations.Size() + n += 1 + l + sovObjects(uint64(l)) + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + if m.Payload != nil { + l = m.Payload.Size() + n += 1 + l + sovObjects(uint64(l)) + } + return n +} + +func (m *Extension) Size() (n int) { + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + l = m.Meta.Size() + n += 1 + l + sovObjects(uint64(l)) + l = m.Annotations.Size() + n += 1 + l + sovObjects(uint64(l)) + l = len(m.Description) + if l > 0 { + n += 1 + l + sovObjects(uint64(l)) + } + return n +} + +func sovObjects(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozObjects(x uint64) (n int) { + return sovObjects(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +type NodeCheckFunc func(t1, t2 *Node) bool + +type EventNode interface { + IsEventNode() bool +} + +type EventCreateNode struct { + Node *Node + Checks []NodeCheckFunc +} + +func (e EventCreateNode) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateNode) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Node, typedEvent.Node) { + return false + } + } + return true +} + +func (e EventCreateNode) IsEventCreate() bool { + return true +} + +func (e EventCreateNode) IsEventNode() bool { + return true +} + +type EventUpdateNode struct { + Node *Node + OldNode *Node + Checks []NodeCheckFunc +} + +func (e EventUpdateNode) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateNode) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Node, typedEvent.Node) { + return false + } + } + return true +} + +func (e EventUpdateNode) IsEventUpdate() bool { + return true +} + +func (e EventUpdateNode) IsEventNode() bool { + return true +} + +type EventDeleteNode struct { + Node *Node + Checks []NodeCheckFunc +} + +func (e EventDeleteNode) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteNode) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Node, typedEvent.Node) { + return false + } + } + return true +} + +func (e EventDeleteNode) IsEventDelete() bool { + return true +} + +func (e EventDeleteNode) IsEventNode() bool { + return true +} + +func (m *Node) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Node) GetMeta() Meta { + return m.Meta +} + +func (m *Node) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Node) GetID() string { + return m.ID +} + +func (m *Node) EventCreate() Event { + return EventCreateNode{Node: m} +} + +func (m *Node) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateNode{Node: m, OldNode: oldObject.(*Node)} + } else { + return EventUpdateNode{Node: m} + } +} + +func (m *Node) EventDelete() Event { + return EventDeleteNode{Node: m} +} + +func NodeCheckID(v1, v2 *Node) bool { + return v1.ID == v2.ID +} + +func NodeCheckIDPrefix(v1, v2 *Node) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func NodeCheckName(v1, v2 *Node) bool { + if v1.Description == nil || v2.Description == nil { + return false + } + return v1.Description.Hostname == v2.Description.Hostname +} + +func NodeCheckNamePrefix(v1, v2 *Node) bool { + if v1.Description == nil || v2.Description == nil { + return false + } + return strings.HasPrefix(v2.Description.Hostname, v1.Description.Hostname) +} + +func NodeCheckCustom(v1, v2 *Node) bool { + return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func NodeCheckCustomPrefix(v1, v2 *Node) bool { + return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func NodeCheckRole(v1, v2 *Node) bool { + return v1.Role == v2.Role +} + +func NodeCheckMembership(v1, v2 *Node) bool { + return v1.Spec.Membership == v2.Spec.Membership +} + +func ConvertNodeWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Node + checkFuncs []NodeCheckFunc + hasRole bool + hasMembership bool + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, NodeCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, NodeCheckIDPrefix) + case *SelectBy_Name: + if m.Description != nil { + return nil, errConflictingFilters + } + m.Description = &NodeDescription{Hostname: v.Name} + checkFuncs = append(checkFuncs, NodeCheckName) + case *SelectBy_NamePrefix: + if m.Description != nil { + return nil, errConflictingFilters + } + m.Description = &NodeDescription{Hostname: v.NamePrefix} + checkFuncs = append(checkFuncs, NodeCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, NodeCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, NodeCheckCustomPrefix) + case *SelectBy_Role: + if hasRole { + return nil, errConflictingFilters + } + hasRole = true + m.Role = v.Role + checkFuncs = append(checkFuncs, NodeCheckRole) + case *SelectBy_Membership: + if hasMembership { + return nil, errConflictingFilters + } + hasMembership = true + m.Spec.Membership = v.Membership + checkFuncs = append(checkFuncs, NodeCheckMembership) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateNode{Node: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateNode{Node: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteNode{Node: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type NodeIndexerByID struct{} + +func (indexer NodeIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer NodeIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer NodeIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Node) + return true, []byte(m.ID + "\x00"), nil +} + +type NodeIndexerByName struct{} + +func (indexer NodeIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer NodeIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer NodeIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Node) + val := m.Spec.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type NodeCustomIndexer struct{} + +func (indexer NodeCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer NodeCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer NodeCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Node) + return customIndexer("", &m.Spec.Annotations) +} + +type ServiceCheckFunc func(t1, t2 *Service) bool + +type EventService interface { + IsEventService() bool +} + +type EventCreateService struct { + Service *Service + Checks []ServiceCheckFunc +} + +func (e EventCreateService) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateService) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Service, typedEvent.Service) { + return false + } + } + return true +} + +func (e EventCreateService) IsEventCreate() bool { + return true +} + +func (e EventCreateService) IsEventService() bool { + return true +} + +type EventUpdateService struct { + Service *Service + OldService *Service + Checks []ServiceCheckFunc +} + +func (e EventUpdateService) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateService) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Service, typedEvent.Service) { + return false + } + } + return true +} + +func (e EventUpdateService) IsEventUpdate() bool { + return true +} + +func (e EventUpdateService) IsEventService() bool { + return true +} + +type EventDeleteService struct { + Service *Service + Checks []ServiceCheckFunc +} + +func (e EventDeleteService) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteService) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Service, typedEvent.Service) { + return false + } + } + return true +} + +func (e EventDeleteService) IsEventDelete() bool { + return true +} + +func (e EventDeleteService) IsEventService() bool { + return true +} + +func (m *Service) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Service) GetMeta() Meta { + return m.Meta +} + +func (m *Service) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Service) GetID() string { + return m.ID +} + +func (m *Service) EventCreate() Event { + return EventCreateService{Service: m} +} + +func (m *Service) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateService{Service: m, OldService: oldObject.(*Service)} + } else { + return EventUpdateService{Service: m} + } +} + +func (m *Service) EventDelete() Event { + return EventDeleteService{Service: m} +} + +func ServiceCheckID(v1, v2 *Service) bool { + return v1.ID == v2.ID +} + +func ServiceCheckIDPrefix(v1, v2 *Service) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func ServiceCheckName(v1, v2 *Service) bool { + return v1.Spec.Annotations.Name == v2.Spec.Annotations.Name +} + +func ServiceCheckNamePrefix(v1, v2 *Service) bool { + return strings.HasPrefix(v2.Spec.Annotations.Name, v1.Spec.Annotations.Name) +} + +func ServiceCheckCustom(v1, v2 *Service) bool { + return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ServiceCheckCustomPrefix(v1, v2 *Service) bool { + return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ConvertServiceWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Service + checkFuncs []ServiceCheckFunc + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, ServiceCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, ServiceCheckIDPrefix) + case *SelectBy_Name: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, ServiceCheckName) + case *SelectBy_NamePrefix: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, ServiceCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, ServiceCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, ServiceCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateService{Service: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateService{Service: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteService{Service: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type ServiceIndexerByID struct{} + +func (indexer ServiceIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ServiceIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ServiceIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Service) + return true, []byte(m.ID + "\x00"), nil +} + +type ServiceIndexerByName struct{} + +func (indexer ServiceIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ServiceIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ServiceIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Service) + val := m.Spec.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type ServiceCustomIndexer struct{} + +func (indexer ServiceCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ServiceCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ServiceCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Service) + return customIndexer("", &m.Spec.Annotations) +} + +type TaskCheckFunc func(t1, t2 *Task) bool + +type EventTask interface { + IsEventTask() bool +} + +type EventCreateTask struct { + Task *Task + Checks []TaskCheckFunc +} + +func (e EventCreateTask) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateTask) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Task, typedEvent.Task) { + return false + } + } + return true +} + +func (e EventCreateTask) IsEventCreate() bool { + return true +} + +func (e EventCreateTask) IsEventTask() bool { + return true +} + +type EventUpdateTask struct { + Task *Task + OldTask *Task + Checks []TaskCheckFunc +} + +func (e EventUpdateTask) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateTask) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Task, typedEvent.Task) { + return false + } + } + return true +} + +func (e EventUpdateTask) IsEventUpdate() bool { + return true +} + +func (e EventUpdateTask) IsEventTask() bool { + return true +} + +type EventDeleteTask struct { + Task *Task + Checks []TaskCheckFunc +} + +func (e EventDeleteTask) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteTask) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Task, typedEvent.Task) { + return false + } + } + return true +} + +func (e EventDeleteTask) IsEventDelete() bool { + return true +} + +func (e EventDeleteTask) IsEventTask() bool { + return true +} + +func (m *Task) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Task) GetMeta() Meta { + return m.Meta +} + +func (m *Task) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Task) GetID() string { + return m.ID +} + +func (m *Task) EventCreate() Event { + return EventCreateTask{Task: m} +} + +func (m *Task) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateTask{Task: m, OldTask: oldObject.(*Task)} + } else { + return EventUpdateTask{Task: m} + } +} + +func (m *Task) EventDelete() Event { + return EventDeleteTask{Task: m} +} + +func TaskCheckID(v1, v2 *Task) bool { + return v1.ID == v2.ID +} + +func TaskCheckIDPrefix(v1, v2 *Task) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func TaskCheckName(v1, v2 *Task) bool { + return v1.Annotations.Name == v2.Annotations.Name +} + +func TaskCheckNamePrefix(v1, v2 *Task) bool { + return strings.HasPrefix(v2.Annotations.Name, v1.Annotations.Name) +} + +func TaskCheckCustom(v1, v2 *Task) bool { + return checkCustom(v1.Annotations, v2.Annotations) +} + +func TaskCheckCustomPrefix(v1, v2 *Task) bool { + return checkCustomPrefix(v1.Annotations, v2.Annotations) +} + +func TaskCheckNodeID(v1, v2 *Task) bool { + return v1.NodeID == v2.NodeID +} + +func TaskCheckServiceID(v1, v2 *Task) bool { + return v1.ServiceID == v2.ServiceID +} + +func TaskCheckSlot(v1, v2 *Task) bool { + return v1.Slot == v2.Slot +} + +func TaskCheckDesiredState(v1, v2 *Task) bool { + return v1.DesiredState == v2.DesiredState +} + +func ConvertTaskWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Task + checkFuncs []TaskCheckFunc + hasDesiredState bool + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, TaskCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, TaskCheckIDPrefix) + case *SelectBy_Name: + if m.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, TaskCheckName) + case *SelectBy_NamePrefix: + if m.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, TaskCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, TaskCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, TaskCheckCustomPrefix) + case *SelectBy_ServiceID: + if m.ServiceID != "" { + return nil, errConflictingFilters + } + m.ServiceID = v.ServiceID + checkFuncs = append(checkFuncs, TaskCheckServiceID) + case *SelectBy_NodeID: + if m.NodeID != "" { + return nil, errConflictingFilters + } + m.NodeID = v.NodeID + checkFuncs = append(checkFuncs, TaskCheckNodeID) + case *SelectBy_Slot: + if m.Slot != 0 || m.ServiceID != "" { + return nil, errConflictingFilters + } + m.ServiceID = v.Slot.ServiceID + m.Slot = v.Slot.Slot + checkFuncs = append(checkFuncs, TaskCheckNodeID, TaskCheckSlot) + case *SelectBy_DesiredState: + if hasDesiredState { + return nil, errConflictingFilters + } + hasDesiredState = true + m.DesiredState = v.DesiredState + checkFuncs = append(checkFuncs, TaskCheckDesiredState) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateTask{Task: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateTask{Task: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteTask{Task: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type TaskIndexerByID struct{} + +func (indexer TaskIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer TaskIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer TaskIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Task) + return true, []byte(m.ID + "\x00"), nil +} + +type TaskIndexerByName struct{} + +func (indexer TaskIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer TaskIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer TaskIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Task) + val := m.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type TaskCustomIndexer struct{} + +func (indexer TaskCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer TaskCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer TaskCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Task) + return customIndexer("", &m.Annotations) +} + +type NetworkCheckFunc func(t1, t2 *Network) bool + +type EventNetwork interface { + IsEventNetwork() bool +} + +type EventCreateNetwork struct { + Network *Network + Checks []NetworkCheckFunc +} + +func (e EventCreateNetwork) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateNetwork) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Network, typedEvent.Network) { + return false + } + } + return true +} + +func (e EventCreateNetwork) IsEventCreate() bool { + return true +} + +func (e EventCreateNetwork) IsEventNetwork() bool { + return true +} + +type EventUpdateNetwork struct { + Network *Network + OldNetwork *Network + Checks []NetworkCheckFunc +} + +func (e EventUpdateNetwork) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateNetwork) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Network, typedEvent.Network) { + return false + } + } + return true +} + +func (e EventUpdateNetwork) IsEventUpdate() bool { + return true +} + +func (e EventUpdateNetwork) IsEventNetwork() bool { + return true +} + +type EventDeleteNetwork struct { + Network *Network + Checks []NetworkCheckFunc +} + +func (e EventDeleteNetwork) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteNetwork) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Network, typedEvent.Network) { + return false + } + } + return true +} + +func (e EventDeleteNetwork) IsEventDelete() bool { + return true +} + +func (e EventDeleteNetwork) IsEventNetwork() bool { + return true +} + +func (m *Network) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Network) GetMeta() Meta { + return m.Meta +} + +func (m *Network) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Network) GetID() string { + return m.ID +} + +func (m *Network) EventCreate() Event { + return EventCreateNetwork{Network: m} +} + +func (m *Network) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateNetwork{Network: m, OldNetwork: oldObject.(*Network)} + } else { + return EventUpdateNetwork{Network: m} + } +} + +func (m *Network) EventDelete() Event { + return EventDeleteNetwork{Network: m} +} + +func NetworkCheckID(v1, v2 *Network) bool { + return v1.ID == v2.ID +} + +func NetworkCheckIDPrefix(v1, v2 *Network) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func NetworkCheckName(v1, v2 *Network) bool { + return v1.Spec.Annotations.Name == v2.Spec.Annotations.Name +} + +func NetworkCheckNamePrefix(v1, v2 *Network) bool { + return strings.HasPrefix(v2.Spec.Annotations.Name, v1.Spec.Annotations.Name) +} + +func NetworkCheckCustom(v1, v2 *Network) bool { + return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func NetworkCheckCustomPrefix(v1, v2 *Network) bool { + return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ConvertNetworkWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Network + checkFuncs []NetworkCheckFunc + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, NetworkCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, NetworkCheckIDPrefix) + case *SelectBy_Name: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, NetworkCheckName) + case *SelectBy_NamePrefix: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, NetworkCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, NetworkCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, NetworkCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateNetwork{Network: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateNetwork{Network: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteNetwork{Network: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type NetworkIndexerByID struct{} + +func (indexer NetworkIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer NetworkIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer NetworkIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Network) + return true, []byte(m.ID + "\x00"), nil +} + +type NetworkIndexerByName struct{} + +func (indexer NetworkIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer NetworkIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer NetworkIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Network) + val := m.Spec.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type NetworkCustomIndexer struct{} + +func (indexer NetworkCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer NetworkCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer NetworkCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Network) + return customIndexer("", &m.Spec.Annotations) +} + +type ClusterCheckFunc func(t1, t2 *Cluster) bool + +type EventCluster interface { + IsEventCluster() bool +} + +type EventCreateCluster struct { + Cluster *Cluster + Checks []ClusterCheckFunc +} + +func (e EventCreateCluster) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateCluster) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Cluster, typedEvent.Cluster) { + return false + } + } + return true +} + +func (e EventCreateCluster) IsEventCreate() bool { + return true +} + +func (e EventCreateCluster) IsEventCluster() bool { + return true +} + +type EventUpdateCluster struct { + Cluster *Cluster + OldCluster *Cluster + Checks []ClusterCheckFunc +} + +func (e EventUpdateCluster) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateCluster) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Cluster, typedEvent.Cluster) { + return false + } + } + return true +} + +func (e EventUpdateCluster) IsEventUpdate() bool { + return true +} + +func (e EventUpdateCluster) IsEventCluster() bool { + return true +} + +type EventDeleteCluster struct { + Cluster *Cluster + Checks []ClusterCheckFunc +} + +func (e EventDeleteCluster) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteCluster) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Cluster, typedEvent.Cluster) { + return false + } + } + return true +} + +func (e EventDeleteCluster) IsEventDelete() bool { + return true +} + +func (e EventDeleteCluster) IsEventCluster() bool { + return true +} + +func (m *Cluster) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Cluster) GetMeta() Meta { + return m.Meta +} + +func (m *Cluster) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Cluster) GetID() string { + return m.ID +} + +func (m *Cluster) EventCreate() Event { + return EventCreateCluster{Cluster: m} +} + +func (m *Cluster) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateCluster{Cluster: m, OldCluster: oldObject.(*Cluster)} + } else { + return EventUpdateCluster{Cluster: m} + } +} + +func (m *Cluster) EventDelete() Event { + return EventDeleteCluster{Cluster: m} +} + +func ClusterCheckID(v1, v2 *Cluster) bool { + return v1.ID == v2.ID +} + +func ClusterCheckIDPrefix(v1, v2 *Cluster) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func ClusterCheckName(v1, v2 *Cluster) bool { + return v1.Spec.Annotations.Name == v2.Spec.Annotations.Name +} + +func ClusterCheckNamePrefix(v1, v2 *Cluster) bool { + return strings.HasPrefix(v2.Spec.Annotations.Name, v1.Spec.Annotations.Name) +} + +func ClusterCheckCustom(v1, v2 *Cluster) bool { + return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ClusterCheckCustomPrefix(v1, v2 *Cluster) bool { + return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ConvertClusterWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Cluster + checkFuncs []ClusterCheckFunc + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, ClusterCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, ClusterCheckIDPrefix) + case *SelectBy_Name: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, ClusterCheckName) + case *SelectBy_NamePrefix: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, ClusterCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, ClusterCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, ClusterCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateCluster{Cluster: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateCluster{Cluster: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteCluster{Cluster: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type ClusterIndexerByID struct{} + +func (indexer ClusterIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ClusterIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ClusterIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Cluster) + return true, []byte(m.ID + "\x00"), nil +} + +type ClusterIndexerByName struct{} + +func (indexer ClusterIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ClusterIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ClusterIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Cluster) + val := m.Spec.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type ClusterCustomIndexer struct{} + +func (indexer ClusterCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ClusterCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ClusterCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Cluster) + return customIndexer("", &m.Spec.Annotations) +} + +type SecretCheckFunc func(t1, t2 *Secret) bool + +type EventSecret interface { + IsEventSecret() bool +} + +type EventCreateSecret struct { + Secret *Secret + Checks []SecretCheckFunc +} + +func (e EventCreateSecret) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateSecret) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Secret, typedEvent.Secret) { + return false + } + } + return true +} + +func (e EventCreateSecret) IsEventCreate() bool { + return true +} + +func (e EventCreateSecret) IsEventSecret() bool { + return true +} + +type EventUpdateSecret struct { + Secret *Secret + OldSecret *Secret + Checks []SecretCheckFunc +} + +func (e EventUpdateSecret) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateSecret) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Secret, typedEvent.Secret) { + return false + } + } + return true +} + +func (e EventUpdateSecret) IsEventUpdate() bool { + return true +} + +func (e EventUpdateSecret) IsEventSecret() bool { + return true +} + +type EventDeleteSecret struct { + Secret *Secret + Checks []SecretCheckFunc +} + +func (e EventDeleteSecret) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteSecret) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Secret, typedEvent.Secret) { + return false + } + } + return true +} + +func (e EventDeleteSecret) IsEventDelete() bool { + return true +} + +func (e EventDeleteSecret) IsEventSecret() bool { + return true +} + +func (m *Secret) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Secret) GetMeta() Meta { + return m.Meta +} + +func (m *Secret) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Secret) GetID() string { + return m.ID +} + +func (m *Secret) EventCreate() Event { + return EventCreateSecret{Secret: m} +} + +func (m *Secret) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateSecret{Secret: m, OldSecret: oldObject.(*Secret)} + } else { + return EventUpdateSecret{Secret: m} + } +} + +func (m *Secret) EventDelete() Event { + return EventDeleteSecret{Secret: m} +} + +func SecretCheckID(v1, v2 *Secret) bool { + return v1.ID == v2.ID +} + +func SecretCheckIDPrefix(v1, v2 *Secret) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func SecretCheckName(v1, v2 *Secret) bool { + return v1.Spec.Annotations.Name == v2.Spec.Annotations.Name +} + +func SecretCheckNamePrefix(v1, v2 *Secret) bool { + return strings.HasPrefix(v2.Spec.Annotations.Name, v1.Spec.Annotations.Name) +} + +func SecretCheckCustom(v1, v2 *Secret) bool { + return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func SecretCheckCustomPrefix(v1, v2 *Secret) bool { + return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ConvertSecretWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Secret + checkFuncs []SecretCheckFunc + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, SecretCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, SecretCheckIDPrefix) + case *SelectBy_Name: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, SecretCheckName) + case *SelectBy_NamePrefix: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, SecretCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, SecretCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, SecretCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateSecret{Secret: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateSecret{Secret: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteSecret{Secret: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type SecretIndexerByID struct{} + +func (indexer SecretIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer SecretIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer SecretIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Secret) + return true, []byte(m.ID + "\x00"), nil +} + +type SecretIndexerByName struct{} + +func (indexer SecretIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer SecretIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer SecretIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Secret) + val := m.Spec.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type SecretCustomIndexer struct{} + +func (indexer SecretCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer SecretCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer SecretCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Secret) + return customIndexer("", &m.Spec.Annotations) +} + +type ConfigCheckFunc func(t1, t2 *Config) bool + +type EventConfig interface { + IsEventConfig() bool +} + +type EventCreateConfig struct { + Config *Config + Checks []ConfigCheckFunc +} + +func (e EventCreateConfig) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateConfig) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Config, typedEvent.Config) { + return false + } + } + return true +} + +func (e EventCreateConfig) IsEventCreate() bool { + return true +} + +func (e EventCreateConfig) IsEventConfig() bool { + return true +} + +type EventUpdateConfig struct { + Config *Config + OldConfig *Config + Checks []ConfigCheckFunc +} + +func (e EventUpdateConfig) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateConfig) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Config, typedEvent.Config) { + return false + } + } + return true +} + +func (e EventUpdateConfig) IsEventUpdate() bool { + return true +} + +func (e EventUpdateConfig) IsEventConfig() bool { + return true +} + +type EventDeleteConfig struct { + Config *Config + Checks []ConfigCheckFunc +} + +func (e EventDeleteConfig) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteConfig) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Config, typedEvent.Config) { + return false + } + } + return true +} + +func (e EventDeleteConfig) IsEventDelete() bool { + return true +} + +func (e EventDeleteConfig) IsEventConfig() bool { + return true +} + +func (m *Config) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Config) GetMeta() Meta { + return m.Meta +} + +func (m *Config) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Config) GetID() string { + return m.ID +} + +func (m *Config) EventCreate() Event { + return EventCreateConfig{Config: m} +} + +func (m *Config) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateConfig{Config: m, OldConfig: oldObject.(*Config)} + } else { + return EventUpdateConfig{Config: m} + } +} + +func (m *Config) EventDelete() Event { + return EventDeleteConfig{Config: m} +} + +func ConfigCheckID(v1, v2 *Config) bool { + return v1.ID == v2.ID +} + +func ConfigCheckIDPrefix(v1, v2 *Config) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func ConfigCheckName(v1, v2 *Config) bool { + return v1.Spec.Annotations.Name == v2.Spec.Annotations.Name +} + +func ConfigCheckNamePrefix(v1, v2 *Config) bool { + return strings.HasPrefix(v2.Spec.Annotations.Name, v1.Spec.Annotations.Name) +} + +func ConfigCheckCustom(v1, v2 *Config) bool { + return checkCustom(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ConfigCheckCustomPrefix(v1, v2 *Config) bool { + return checkCustomPrefix(v1.Spec.Annotations, v2.Spec.Annotations) +} + +func ConvertConfigWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Config + checkFuncs []ConfigCheckFunc + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, ConfigCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, ConfigCheckIDPrefix) + case *SelectBy_Name: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, ConfigCheckName) + case *SelectBy_NamePrefix: + if m.Spec.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Spec.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, ConfigCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, ConfigCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Spec.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Spec.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, ConfigCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateConfig{Config: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateConfig{Config: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteConfig{Config: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type ConfigIndexerByID struct{} + +func (indexer ConfigIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ConfigIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ConfigIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Config) + return true, []byte(m.ID + "\x00"), nil +} + +type ConfigIndexerByName struct{} + +func (indexer ConfigIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ConfigIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ConfigIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Config) + val := m.Spec.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type ConfigCustomIndexer struct{} + +func (indexer ConfigCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ConfigCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ConfigCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Config) + return customIndexer("", &m.Spec.Annotations) +} + +type ResourceCheckFunc func(t1, t2 *Resource) bool + +type EventResource interface { + IsEventResource() bool +} + +type EventCreateResource struct { + Resource *Resource + Checks []ResourceCheckFunc +} + +func (e EventCreateResource) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateResource) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Resource, typedEvent.Resource) { + return false + } + } + return true +} + +func (e EventCreateResource) IsEventCreate() bool { + return true +} + +func (e EventCreateResource) IsEventResource() bool { + return true +} + +type EventUpdateResource struct { + Resource *Resource + OldResource *Resource + Checks []ResourceCheckFunc +} + +func (e EventUpdateResource) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateResource) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Resource, typedEvent.Resource) { + return false + } + } + return true +} + +func (e EventUpdateResource) IsEventUpdate() bool { + return true +} + +func (e EventUpdateResource) IsEventResource() bool { + return true +} + +type EventDeleteResource struct { + Resource *Resource + Checks []ResourceCheckFunc +} + +func (e EventDeleteResource) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteResource) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Resource, typedEvent.Resource) { + return false + } + } + return true +} + +func (e EventDeleteResource) IsEventDelete() bool { + return true +} + +func (e EventDeleteResource) IsEventResource() bool { + return true +} + +func (m *Resource) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Resource) GetMeta() Meta { + return m.Meta +} + +func (m *Resource) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Resource) GetID() string { + return m.ID +} + +func (m *Resource) EventCreate() Event { + return EventCreateResource{Resource: m} +} + +func (m *Resource) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateResource{Resource: m, OldResource: oldObject.(*Resource)} + } else { + return EventUpdateResource{Resource: m} + } +} + +func (m *Resource) EventDelete() Event { + return EventDeleteResource{Resource: m} +} + +func ResourceCheckID(v1, v2 *Resource) bool { + return v1.ID == v2.ID +} + +func ResourceCheckIDPrefix(v1, v2 *Resource) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func ResourceCheckName(v1, v2 *Resource) bool { + return v1.Annotations.Name == v2.Annotations.Name +} + +func ResourceCheckNamePrefix(v1, v2 *Resource) bool { + return strings.HasPrefix(v2.Annotations.Name, v1.Annotations.Name) +} + +func ResourceCheckCustom(v1, v2 *Resource) bool { + return checkCustom(v1.Annotations, v2.Annotations) +} + +func ResourceCheckCustomPrefix(v1, v2 *Resource) bool { + return checkCustomPrefix(v1.Annotations, v2.Annotations) +} + +func ResourceCheckKind(v1, v2 *Resource) bool { + return v1.Kind == v2.Kind +} + +func ConvertResourceWatch(action WatchActionKind, filters []*SelectBy, kind string) ([]Event, error) { + var ( + m Resource + checkFuncs []ResourceCheckFunc + ) + m.Kind = kind + checkFuncs = append(checkFuncs, ResourceCheckKind) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, ResourceCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, ResourceCheckIDPrefix) + case *SelectBy_Name: + if m.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, ResourceCheckName) + case *SelectBy_NamePrefix: + if m.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, ResourceCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, ResourceCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, ResourceCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateResource{Resource: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateResource{Resource: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteResource{Resource: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type ResourceIndexerByID struct{} + +func (indexer ResourceIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ResourceIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ResourceIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Resource) + return true, []byte(m.ID + "\x00"), nil +} + +type ResourceIndexerByName struct{} + +func (indexer ResourceIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ResourceIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ResourceIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Resource) + val := m.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type ResourceCustomIndexer struct{} + +func (indexer ResourceCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ResourceCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ResourceCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Resource) + return customIndexer("", &m.Annotations) +} + +type ExtensionCheckFunc func(t1, t2 *Extension) bool + +type EventExtension interface { + IsEventExtension() bool +} + +type EventCreateExtension struct { + Extension *Extension + Checks []ExtensionCheckFunc +} + +func (e EventCreateExtension) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventCreateExtension) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Extension, typedEvent.Extension) { + return false + } + } + return true +} + +func (e EventCreateExtension) IsEventCreate() bool { + return true +} + +func (e EventCreateExtension) IsEventExtension() bool { + return true +} + +type EventUpdateExtension struct { + Extension *Extension + OldExtension *Extension + Checks []ExtensionCheckFunc +} + +func (e EventUpdateExtension) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventUpdateExtension) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Extension, typedEvent.Extension) { + return false + } + } + return true +} + +func (e EventUpdateExtension) IsEventUpdate() bool { + return true +} + +func (e EventUpdateExtension) IsEventExtension() bool { + return true +} + +type EventDeleteExtension struct { + Extension *Extension + Checks []ExtensionCheckFunc +} + +func (e EventDeleteExtension) Matches(apiEvent go_events.Event) bool { + typedEvent, ok := apiEvent.(EventDeleteExtension) + if !ok { + return false + } + + for _, check := range e.Checks { + if !check(e.Extension, typedEvent.Extension) { + return false + } + } + return true +} + +func (e EventDeleteExtension) IsEventDelete() bool { + return true +} + +func (e EventDeleteExtension) IsEventExtension() bool { + return true +} + +func (m *Extension) CopyStoreObject() StoreObject { + return m.Copy() +} + +func (m *Extension) GetMeta() Meta { + return m.Meta +} + +func (m *Extension) SetMeta(meta Meta) { + m.Meta = meta +} + +func (m *Extension) GetID() string { + return m.ID +} + +func (m *Extension) EventCreate() Event { + return EventCreateExtension{Extension: m} +} + +func (m *Extension) EventUpdate(oldObject StoreObject) Event { + if oldObject != nil { + return EventUpdateExtension{Extension: m, OldExtension: oldObject.(*Extension)} + } else { + return EventUpdateExtension{Extension: m} + } +} + +func (m *Extension) EventDelete() Event { + return EventDeleteExtension{Extension: m} +} + +func ExtensionCheckID(v1, v2 *Extension) bool { + return v1.ID == v2.ID +} + +func ExtensionCheckIDPrefix(v1, v2 *Extension) bool { + return strings.HasPrefix(v2.ID, v1.ID) +} + +func ExtensionCheckName(v1, v2 *Extension) bool { + return v1.Annotations.Name == v2.Annotations.Name +} + +func ExtensionCheckNamePrefix(v1, v2 *Extension) bool { + return strings.HasPrefix(v2.Annotations.Name, v1.Annotations.Name) +} + +func ExtensionCheckCustom(v1, v2 *Extension) bool { + return checkCustom(v1.Annotations, v2.Annotations) +} + +func ExtensionCheckCustomPrefix(v1, v2 *Extension) bool { + return checkCustomPrefix(v1.Annotations, v2.Annotations) +} + +func ConvertExtensionWatch(action WatchActionKind, filters []*SelectBy) ([]Event, error) { + var ( + m Extension + checkFuncs []ExtensionCheckFunc + ) + + for _, filter := range filters { + switch v := filter.By.(type) { + case *SelectBy_ID: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.ID + checkFuncs = append(checkFuncs, ExtensionCheckID) + case *SelectBy_IDPrefix: + if m.ID != "" { + return nil, errConflictingFilters + } + m.ID = v.IDPrefix + checkFuncs = append(checkFuncs, ExtensionCheckIDPrefix) + case *SelectBy_Name: + if m.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Annotations.Name = v.Name + checkFuncs = append(checkFuncs, ExtensionCheckName) + case *SelectBy_NamePrefix: + if m.Annotations.Name != "" { + return nil, errConflictingFilters + } + m.Annotations.Name = v.NamePrefix + checkFuncs = append(checkFuncs, ExtensionCheckNamePrefix) + case *SelectBy_Custom: + if len(m.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Annotations.Indices = []IndexEntry{{Key: v.Custom.Index, Val: v.Custom.Value}} + checkFuncs = append(checkFuncs, ExtensionCheckCustom) + case *SelectBy_CustomPrefix: + if len(m.Annotations.Indices) != 0 { + return nil, errConflictingFilters + } + m.Annotations.Indices = []IndexEntry{{Key: v.CustomPrefix.Index, Val: v.CustomPrefix.Value}} + checkFuncs = append(checkFuncs, ExtensionCheckCustomPrefix) + } + } + var events []Event + if (action & WatchActionKindCreate) != 0 { + events = append(events, EventCreateExtension{Extension: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindUpdate) != 0 { + events = append(events, EventUpdateExtension{Extension: &m, Checks: checkFuncs}) + } + if (action & WatchActionKindRemove) != 0 { + events = append(events, EventDeleteExtension{Extension: &m, Checks: checkFuncs}) + } + if len(events) == 0 { + return nil, errUnrecognizedAction + } + return events, nil +} + +type ExtensionIndexerByID struct{} + +func (indexer ExtensionIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ExtensionIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ExtensionIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Extension) + return true, []byte(m.ID + "\x00"), nil +} + +type ExtensionIndexerByName struct{} + +func (indexer ExtensionIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ExtensionIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ExtensionIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + m := obj.(*Extension) + val := m.Annotations.Name + return true, []byte(strings.ToLower(val) + "\x00"), nil +} + +type ExtensionCustomIndexer struct{} + +func (indexer ExtensionCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} +func (indexer ExtensionCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} +func (indexer ExtensionCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + m := obj.(*Extension) + return customIndexer("", &m.Annotations) +} +func NewStoreAction(c Event) (StoreAction, error) { + var sa StoreAction + switch v := c.(type) { + case EventCreateNode: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Node{Node: v.Node} + case EventUpdateNode: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Node{Node: v.Node} + case EventDeleteNode: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Node{Node: v.Node} + case EventCreateService: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Service{Service: v.Service} + case EventUpdateService: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Service{Service: v.Service} + case EventDeleteService: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Service{Service: v.Service} + case EventCreateTask: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Task{Task: v.Task} + case EventUpdateTask: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Task{Task: v.Task} + case EventDeleteTask: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Task{Task: v.Task} + case EventCreateNetwork: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Network{Network: v.Network} + case EventUpdateNetwork: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Network{Network: v.Network} + case EventDeleteNetwork: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Network{Network: v.Network} + case EventCreateCluster: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Cluster{Cluster: v.Cluster} + case EventUpdateCluster: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Cluster{Cluster: v.Cluster} + case EventDeleteCluster: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Cluster{Cluster: v.Cluster} + case EventCreateSecret: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Secret{Secret: v.Secret} + case EventUpdateSecret: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Secret{Secret: v.Secret} + case EventDeleteSecret: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Secret{Secret: v.Secret} + case EventCreateConfig: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Config{Config: v.Config} + case EventUpdateConfig: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Config{Config: v.Config} + case EventDeleteConfig: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Config{Config: v.Config} + case EventCreateResource: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Resource{Resource: v.Resource} + case EventUpdateResource: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Resource{Resource: v.Resource} + case EventDeleteResource: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Resource{Resource: v.Resource} + case EventCreateExtension: + sa.Action = StoreActionKindCreate + sa.Target = &StoreAction_Extension{Extension: v.Extension} + case EventUpdateExtension: + sa.Action = StoreActionKindUpdate + sa.Target = &StoreAction_Extension{Extension: v.Extension} + case EventDeleteExtension: + sa.Action = StoreActionKindRemove + sa.Target = &StoreAction_Extension{Extension: v.Extension} + default: + return StoreAction{}, errUnknownStoreAction + } + return sa, nil +} + +func EventFromStoreAction(sa StoreAction, oldObject StoreObject) (Event, error) { + switch v := sa.Target.(type) { + case *StoreAction_Node: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateNode{Node: v.Node}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateNode{Node: v.Node, OldNode: oldObject.(*Node)}, nil + } else { + return EventUpdateNode{Node: v.Node}, nil + } + case StoreActionKindRemove: + return EventDeleteNode{Node: v.Node}, nil + } + case *StoreAction_Service: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateService{Service: v.Service}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateService{Service: v.Service, OldService: oldObject.(*Service)}, nil + } else { + return EventUpdateService{Service: v.Service}, nil + } + case StoreActionKindRemove: + return EventDeleteService{Service: v.Service}, nil + } + case *StoreAction_Task: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateTask{Task: v.Task}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateTask{Task: v.Task, OldTask: oldObject.(*Task)}, nil + } else { + return EventUpdateTask{Task: v.Task}, nil + } + case StoreActionKindRemove: + return EventDeleteTask{Task: v.Task}, nil + } + case *StoreAction_Network: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateNetwork{Network: v.Network}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateNetwork{Network: v.Network, OldNetwork: oldObject.(*Network)}, nil + } else { + return EventUpdateNetwork{Network: v.Network}, nil + } + case StoreActionKindRemove: + return EventDeleteNetwork{Network: v.Network}, nil + } + case *StoreAction_Cluster: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateCluster{Cluster: v.Cluster}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateCluster{Cluster: v.Cluster, OldCluster: oldObject.(*Cluster)}, nil + } else { + return EventUpdateCluster{Cluster: v.Cluster}, nil + } + case StoreActionKindRemove: + return EventDeleteCluster{Cluster: v.Cluster}, nil + } + case *StoreAction_Secret: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateSecret{Secret: v.Secret}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateSecret{Secret: v.Secret, OldSecret: oldObject.(*Secret)}, nil + } else { + return EventUpdateSecret{Secret: v.Secret}, nil + } + case StoreActionKindRemove: + return EventDeleteSecret{Secret: v.Secret}, nil + } + case *StoreAction_Config: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateConfig{Config: v.Config}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateConfig{Config: v.Config, OldConfig: oldObject.(*Config)}, nil + } else { + return EventUpdateConfig{Config: v.Config}, nil + } + case StoreActionKindRemove: + return EventDeleteConfig{Config: v.Config}, nil + } + case *StoreAction_Resource: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateResource{Resource: v.Resource}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateResource{Resource: v.Resource, OldResource: oldObject.(*Resource)}, nil + } else { + return EventUpdateResource{Resource: v.Resource}, nil + } + case StoreActionKindRemove: + return EventDeleteResource{Resource: v.Resource}, nil + } + case *StoreAction_Extension: + switch sa.Action { + case StoreActionKindCreate: + return EventCreateExtension{Extension: v.Extension}, nil + case StoreActionKindUpdate: + if oldObject != nil { + return EventUpdateExtension{Extension: v.Extension, OldExtension: oldObject.(*Extension)}, nil + } else { + return EventUpdateExtension{Extension: v.Extension}, nil + } + case StoreActionKindRemove: + return EventDeleteExtension{Extension: v.Extension}, nil + } + } + return nil, errUnknownStoreAction +} + +func WatchMessageEvent(c Event) *WatchMessage_Event { + switch v := c.(type) { + case EventCreateNode: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Node{Node: v.Node}}} + case EventUpdateNode: + if v.OldNode != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Node{Node: v.Node}}, OldObject: &Object{Object: &Object_Node{Node: v.OldNode}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Node{Node: v.Node}}} + } + case EventDeleteNode: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Node{Node: v.Node}}} + case EventCreateService: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Service{Service: v.Service}}} + case EventUpdateService: + if v.OldService != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Service{Service: v.Service}}, OldObject: &Object{Object: &Object_Service{Service: v.OldService}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Service{Service: v.Service}}} + } + case EventDeleteService: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Service{Service: v.Service}}} + case EventCreateTask: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Task{Task: v.Task}}} + case EventUpdateTask: + if v.OldTask != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Task{Task: v.Task}}, OldObject: &Object{Object: &Object_Task{Task: v.OldTask}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Task{Task: v.Task}}} + } + case EventDeleteTask: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Task{Task: v.Task}}} + case EventCreateNetwork: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Network{Network: v.Network}}} + case EventUpdateNetwork: + if v.OldNetwork != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Network{Network: v.Network}}, OldObject: &Object{Object: &Object_Network{Network: v.OldNetwork}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Network{Network: v.Network}}} + } + case EventDeleteNetwork: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Network{Network: v.Network}}} + case EventCreateCluster: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Cluster{Cluster: v.Cluster}}} + case EventUpdateCluster: + if v.OldCluster != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Cluster{Cluster: v.Cluster}}, OldObject: &Object{Object: &Object_Cluster{Cluster: v.OldCluster}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Cluster{Cluster: v.Cluster}}} + } + case EventDeleteCluster: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Cluster{Cluster: v.Cluster}}} + case EventCreateSecret: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Secret{Secret: v.Secret}}} + case EventUpdateSecret: + if v.OldSecret != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Secret{Secret: v.Secret}}, OldObject: &Object{Object: &Object_Secret{Secret: v.OldSecret}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Secret{Secret: v.Secret}}} + } + case EventDeleteSecret: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Secret{Secret: v.Secret}}} + case EventCreateConfig: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Config{Config: v.Config}}} + case EventUpdateConfig: + if v.OldConfig != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Config{Config: v.Config}}, OldObject: &Object{Object: &Object_Config{Config: v.OldConfig}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Config{Config: v.Config}}} + } + case EventDeleteConfig: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Config{Config: v.Config}}} + case EventCreateResource: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Resource{Resource: v.Resource}}} + case EventUpdateResource: + if v.OldResource != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Resource{Resource: v.Resource}}, OldObject: &Object{Object: &Object_Resource{Resource: v.OldResource}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Resource{Resource: v.Resource}}} + } + case EventDeleteResource: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Resource{Resource: v.Resource}}} + case EventCreateExtension: + return &WatchMessage_Event{Action: WatchActionKindCreate, Object: &Object{Object: &Object_Extension{Extension: v.Extension}}} + case EventUpdateExtension: + if v.OldExtension != nil { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Extension{Extension: v.Extension}}, OldObject: &Object{Object: &Object_Extension{Extension: v.OldExtension}}} + } else { + return &WatchMessage_Event{Action: WatchActionKindUpdate, Object: &Object{Object: &Object_Extension{Extension: v.Extension}}} + } + case EventDeleteExtension: + return &WatchMessage_Event{Action: WatchActionKindRemove, Object: &Object{Object: &Object_Extension{Extension: v.Extension}}} + } + return nil +} + +func ConvertWatchArgs(entries []*WatchRequest_WatchEntry) ([]Event, error) { + var events []Event + for _, entry := range entries { + var newEvents []Event + var err error + switch entry.Kind { + case "": + return nil, errNoKindSpecified + case "node": + newEvents, err = ConvertNodeWatch(entry.Action, entry.Filters) + case "service": + newEvents, err = ConvertServiceWatch(entry.Action, entry.Filters) + case "task": + newEvents, err = ConvertTaskWatch(entry.Action, entry.Filters) + case "network": + newEvents, err = ConvertNetworkWatch(entry.Action, entry.Filters) + case "cluster": + newEvents, err = ConvertClusterWatch(entry.Action, entry.Filters) + case "secret": + newEvents, err = ConvertSecretWatch(entry.Action, entry.Filters) + case "config": + newEvents, err = ConvertConfigWatch(entry.Action, entry.Filters) + default: + newEvents, err = ConvertResourceWatch(entry.Action, entry.Filters, entry.Kind) + case "extension": + newEvents, err = ConvertExtensionWatch(entry.Action, entry.Filters) + } + if err != nil { + return nil, err + } + events = append(events, newEvents...) + } + return events, nil +} + +func (this *Meta) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Meta{`, + `Version:` + strings.Replace(strings.Replace(this.Version.String(), "Version", "Version", 1), `&`, ``, 1) + `,`, + `CreatedAt:` + strings.Replace(fmt.Sprintf("%v", this.CreatedAt), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `UpdatedAt:` + strings.Replace(fmt.Sprintf("%v", this.UpdatedAt), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Node) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Node{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NodeSpec", "NodeSpec", 1), `&`, ``, 1) + `,`, + `Description:` + strings.Replace(fmt.Sprintf("%v", this.Description), "NodeDescription", "NodeDescription", 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NodeStatus", "NodeStatus", 1), `&`, ``, 1) + `,`, + `ManagerStatus:` + strings.Replace(fmt.Sprintf("%v", this.ManagerStatus), "ManagerStatus", "ManagerStatus", 1) + `,`, + `Attachment:` + strings.Replace(fmt.Sprintf("%v", this.Attachment), "NetworkAttachment", "NetworkAttachment", 1) + `,`, + `Certificate:` + strings.Replace(strings.Replace(this.Certificate.String(), "Certificate", "Certificate", 1), `&`, ``, 1) + `,`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `Attachments:` + strings.Replace(fmt.Sprintf("%v", this.Attachments), "NetworkAttachment", "NetworkAttachment", 1) + `,`, + `VXLANUDPPort:` + fmt.Sprintf("%v", this.VXLANUDPPort) + `,`, + `}`, + }, "") + return s +} +func (this *Service) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Service{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`, + `Endpoint:` + strings.Replace(fmt.Sprintf("%v", this.Endpoint), "Endpoint", "Endpoint", 1) + `,`, + `UpdateStatus:` + strings.Replace(fmt.Sprintf("%v", this.UpdateStatus), "UpdateStatus", "UpdateStatus", 1) + `,`, + `PreviousSpec:` + strings.Replace(fmt.Sprintf("%v", this.PreviousSpec), "ServiceSpec", "ServiceSpec", 1) + `,`, + `PendingDelete:` + fmt.Sprintf("%v", this.PendingDelete) + `,`, + `SpecVersion:` + strings.Replace(fmt.Sprintf("%v", this.SpecVersion), "Version", "Version", 1) + `,`, + `PreviousSpecVersion:` + strings.Replace(fmt.Sprintf("%v", this.PreviousSpecVersion), "Version", "Version", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Endpoint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Endpoint{`, + `Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "EndpointSpec", "EndpointSpec", 1) + `,`, + `Ports:` + strings.Replace(fmt.Sprintf("%v", this.Ports), "PortConfig", "PortConfig", 1) + `,`, + `VirtualIPs:` + strings.Replace(fmt.Sprintf("%v", this.VirtualIPs), "Endpoint_VirtualIP", "Endpoint_VirtualIP", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Endpoint_VirtualIP) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Endpoint_VirtualIP{`, + `NetworkID:` + fmt.Sprintf("%v", this.NetworkID) + `,`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `}`, + }, "") + return s +} +func (this *Task) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Task{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TaskSpec", "TaskSpec", 1), `&`, ``, 1) + `,`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `Slot:` + fmt.Sprintf("%v", this.Slot) + `,`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `ServiceAnnotations:` + strings.Replace(strings.Replace(this.ServiceAnnotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TaskStatus", "TaskStatus", 1), `&`, ``, 1) + `,`, + `DesiredState:` + fmt.Sprintf("%v", this.DesiredState) + `,`, + `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "NetworkAttachment", "NetworkAttachment", 1) + `,`, + `Endpoint:` + strings.Replace(fmt.Sprintf("%v", this.Endpoint), "Endpoint", "Endpoint", 1) + `,`, + `LogDriver:` + strings.Replace(fmt.Sprintf("%v", this.LogDriver), "Driver", "Driver", 1) + `,`, + `SpecVersion:` + strings.Replace(fmt.Sprintf("%v", this.SpecVersion), "Version", "Version", 1) + `,`, + `AssignedGenericResources:` + strings.Replace(fmt.Sprintf("%v", this.AssignedGenericResources), "GenericResource", "GenericResource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkAttachment) String() string { + if this == nil { + return "nil" + } + keysForDriverAttachmentOpts := make([]string, 0, len(this.DriverAttachmentOpts)) + for k, _ := range this.DriverAttachmentOpts { + keysForDriverAttachmentOpts = append(keysForDriverAttachmentOpts, k) + } + sortkeys.Strings(keysForDriverAttachmentOpts) + mapStringForDriverAttachmentOpts := "map[string]string{" + for _, k := range keysForDriverAttachmentOpts { + mapStringForDriverAttachmentOpts += fmt.Sprintf("%v: %v,", k, this.DriverAttachmentOpts[k]) + } + mapStringForDriverAttachmentOpts += "}" + s := strings.Join([]string{`&NetworkAttachment{`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`, + `Aliases:` + fmt.Sprintf("%v", this.Aliases) + `,`, + `DriverAttachmentOpts:` + mapStringForDriverAttachmentOpts + `,`, + `}`, + }, "") + return s +} +func (this *Network) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Network{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkSpec", "NetworkSpec", 1), `&`, ``, 1) + `,`, + `DriverState:` + strings.Replace(fmt.Sprintf("%v", this.DriverState), "Driver", "Driver", 1) + `,`, + `IPAM:` + strings.Replace(fmt.Sprintf("%v", this.IPAM), "IPAMOptions", "IPAMOptions", 1) + `,`, + `PendingDelete:` + fmt.Sprintf("%v", this.PendingDelete) + `,`, + `}`, + }, "") + return s +} +func (this *Cluster) String() string { + if this == nil { + return "nil" + } + keysForBlacklistedCertificates := make([]string, 0, len(this.BlacklistedCertificates)) + for k, _ := range this.BlacklistedCertificates { + keysForBlacklistedCertificates = append(keysForBlacklistedCertificates, k) + } + sortkeys.Strings(keysForBlacklistedCertificates) + mapStringForBlacklistedCertificates := "map[string]*BlacklistedCertificate{" + for _, k := range keysForBlacklistedCertificates { + mapStringForBlacklistedCertificates += fmt.Sprintf("%v: %v,", k, this.BlacklistedCertificates[k]) + } + mapStringForBlacklistedCertificates += "}" + s := strings.Join([]string{`&Cluster{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterSpec", "ClusterSpec", 1), `&`, ``, 1) + `,`, + `RootCA:` + strings.Replace(strings.Replace(this.RootCA.String(), "RootCA", "RootCA", 1), `&`, ``, 1) + `,`, + `NetworkBootstrapKeys:` + strings.Replace(fmt.Sprintf("%v", this.NetworkBootstrapKeys), "EncryptionKey", "EncryptionKey", 1) + `,`, + `EncryptionKeyLamportClock:` + fmt.Sprintf("%v", this.EncryptionKeyLamportClock) + `,`, + `BlacklistedCertificates:` + mapStringForBlacklistedCertificates + `,`, + `UnlockKeys:` + strings.Replace(fmt.Sprintf("%v", this.UnlockKeys), "EncryptionKey", "EncryptionKey", 1) + `,`, + `FIPS:` + fmt.Sprintf("%v", this.FIPS) + `,`, + `DefaultAddressPool:` + fmt.Sprintf("%v", this.DefaultAddressPool) + `,`, + `SubnetSize:` + fmt.Sprintf("%v", this.SubnetSize) + `,`, + `VXLANUDPPort:` + fmt.Sprintf("%v", this.VXLANUDPPort) + `,`, + `}`, + }, "") + return s +} +func (this *Secret) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Secret{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SecretSpec", "SecretSpec", 1), `&`, ``, 1) + `,`, + `Internal:` + fmt.Sprintf("%v", this.Internal) + `,`, + `}`, + }, "") + return s +} +func (this *Config) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Config{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ConfigSpec", "ConfigSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Resource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Resource{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Payload:` + strings.Replace(fmt.Sprintf("%v", this.Payload), "Any", "google_protobuf4.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Extension) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Extension{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Meta:` + strings.Replace(strings.Replace(this.Meta.String(), "Meta", "Meta", 1), `&`, ``, 1) + `,`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `}`, + }, "") + return s +} +func valueToStringObjects(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Meta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Meta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Meta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CreatedAt == nil { + m.CreatedAt = &google_protobuf.Timestamp{} + } + if err := m.CreatedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpdatedAt == nil { + m.UpdatedAt = &google_protobuf.Timestamp{} + } + if err := m.UpdatedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Node) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Node: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Node: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Description == nil { + m.Description = &NodeDescription{} + } + if err := m.Description.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ManagerStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ManagerStatus == nil { + m.ManagerStatus = &ManagerStatus{} + } + if err := m.ManagerStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attachment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Attachment == nil { + m.Attachment = &NetworkAttachment{} + } + if err := m.Attachment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Certificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + m.Role = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Role |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attachments", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attachments = append(m.Attachments, &NetworkAttachment{}) + if err := m.Attachments[len(m.Attachments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field VXLANUDPPort", wireType) + } + m.VXLANUDPPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.VXLANUDPPort |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Service) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Service: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Service: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Endpoint == nil { + m.Endpoint = &Endpoint{} + } + if err := m.Endpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpdateStatus == nil { + m.UpdateStatus = &UpdateStatus{} + } + if err := m.UpdateStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PreviousSpec == nil { + m.PreviousSpec = &ServiceSpec{} + } + if err := m.PreviousSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PendingDelete", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PendingDelete = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpecVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SpecVersion == nil { + m.SpecVersion = &Version{} + } + if err := m.SpecVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreviousSpecVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PreviousSpecVersion == nil { + m.PreviousSpecVersion = &Version{} + } + if err := m.PreviousSpecVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Endpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Spec == nil { + m.Spec = &EndpointSpec{} + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, &PortConfig{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VirtualIPs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VirtualIPs = append(m.VirtualIPs, &Endpoint_VirtualIP{}) + if err := m.VirtualIPs[len(m.VirtualIPs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Endpoint_VirtualIP) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VirtualIP: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VirtualIP: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Task) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Task: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Task: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Slot", wireType) + } + m.Slot = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Slot |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServiceAnnotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredState", wireType) + } + m.DesiredState = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredState |= (TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Networks = append(m.Networks, &NetworkAttachment{}) + if err := m.Networks[len(m.Networks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Endpoint == nil { + m.Endpoint = &Endpoint{} + } + if err := m.Endpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogDriver", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LogDriver == nil { + m.LogDriver = &Driver{} + } + if err := m.LogDriver.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpecVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SpecVersion == nil { + m.SpecVersion = &Version{} + } + if err := m.SpecVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AssignedGenericResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AssignedGenericResources = append(m.AssignedGenericResources, &GenericResource{}) + if err := m.AssignedGenericResources[len(m.AssignedGenericResources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkAttachment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkAttachment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkAttachment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Network == nil { + m.Network = &Network{} + } + if err := m.Network.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Aliases = append(m.Aliases, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverAttachmentOpts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DriverAttachmentOpts == nil { + m.DriverAttachmentOpts = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthObjects + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthObjects + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.DriverAttachmentOpts[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Network) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Network: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Network: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DriverState == nil { + m.DriverState = &Driver{} + } + if err := m.DriverState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPAM", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IPAM == nil { + m.IPAM = &IPAMOptions{} + } + if err := m.IPAM.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PendingDelete", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.PendingDelete = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Cluster) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Cluster: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Cluster: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootCA", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RootCA.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NetworkBootstrapKeys", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NetworkBootstrapKeys = append(m.NetworkBootstrapKeys, &EncryptionKey{}) + if err := m.NetworkBootstrapKeys[len(m.NetworkBootstrapKeys)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EncryptionKeyLamportClock", wireType) + } + m.EncryptionKeyLamportClock = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EncryptionKeyLamportClock |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BlacklistedCertificates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BlacklistedCertificates == nil { + m.BlacklistedCertificates = make(map[string]*BlacklistedCertificate) + } + var mapkey string + var mapvalue *BlacklistedCertificate + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthObjects + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthObjects + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthObjects + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &BlacklistedCertificate{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.BlacklistedCertificates[mapkey] = mapvalue + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnlockKeys", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UnlockKeys = append(m.UnlockKeys, &EncryptionKey{}) + if err := m.UnlockKeys[len(m.UnlockKeys)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FIPS", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.FIPS = bool(v != 0) + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultAddressPool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultAddressPool = append(m.DefaultAddressPool, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SubnetSize", wireType) + } + m.SubnetSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SubnetSize |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field VXLANUDPPort", wireType) + } + m.VXLANUDPPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.VXLANUDPPort |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Secret) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Secret: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Secret: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Internal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Internal = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Config) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Config: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Config: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Resource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Resource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Resource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Payload == nil { + m.Payload = &google_protobuf4.Any{} + } + if err := m.Payload.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Extension) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Extension: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Extension: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowObjects + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthObjects + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipObjects(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthObjects + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipObjects(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowObjects + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowObjects + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowObjects + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthObjects + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowObjects + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipObjects(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthObjects = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowObjects = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/objects.proto", fileDescriptorObjects) } + +var fileDescriptorObjects = []byte{ + // 1643 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcd, 0x73, 0x23, 0x47, + 0x15, 0xdf, 0x91, 0x67, 0xf5, 0xf1, 0xf4, 0xc1, 0xd2, 0x31, 0x66, 0x56, 0x18, 0xc9, 0x28, 0x15, + 0x6a, 0x2b, 0xb5, 0x25, 0x87, 0x25, 0x80, 0xd7, 0x10, 0x12, 0xc9, 0x32, 0x1b, 0x55, 0xb2, 0x59, + 0x57, 0x3b, 0x71, 0x72, 0x1b, 0x5a, 0x33, 0x6d, 0xed, 0xa0, 0xd1, 0xf4, 0xd4, 0x74, 0x4b, 0x41, + 0x9c, 0x38, 0x9b, 0x3f, 0xc0, 0x37, 0x0e, 0xc0, 0x3f, 0xc1, 0x85, 0x03, 0x07, 0x6a, 0xb9, 0x71, + 0xa2, 0x38, 0xb9, 0x58, 0xfd, 0x17, 0xdc, 0xa8, 0xee, 0xe9, 0x91, 0xc6, 0xd6, 0xf8, 0x8b, 0xda, + 0x72, 0x71, 0x72, 0x7f, 0xfc, 0x7e, 0xaf, 0xdf, 0x7b, 0xf3, 0xbe, 0x2c, 0x78, 0x3c, 0xf4, 0xc4, + 0xcb, 0xc9, 0xa0, 0xed, 0xb0, 0xf1, 0xb6, 0xcb, 0x9c, 0x11, 0x8d, 0xb6, 0xf9, 0xd7, 0x24, 0x1a, + 0x8f, 0x3c, 0xb1, 0x4d, 0x42, 0x6f, 0x9b, 0x0d, 0x7e, 0x45, 0x1d, 0xc1, 0xdb, 0x61, 0xc4, 0x04, + 0x43, 0x28, 0x86, 0xb4, 0x13, 0x48, 0x7b, 0xfa, 0x83, 0xfa, 0xbb, 0xd7, 0x48, 0x10, 0xb3, 0x90, + 0x6a, 0xfe, 0xb5, 0x58, 0x1e, 0x52, 0x27, 0xc1, 0x36, 0x87, 0x8c, 0x0d, 0x7d, 0xba, 0xad, 0x76, + 0x83, 0xc9, 0xf1, 0xb6, 0xf0, 0xc6, 0x94, 0x0b, 0x32, 0x0e, 0x35, 0x60, 0x7d, 0xc8, 0x86, 0x4c, + 0x2d, 0xb7, 0xe5, 0x4a, 0x9f, 0x3e, 0xbc, 0x48, 0x23, 0xc1, 0x4c, 0x5f, 0xfd, 0xe4, 0x8a, 0xd7, + 0x17, 0xf0, 0xd0, 0x9f, 0x0c, 0xbd, 0x40, 0xff, 0x89, 0x89, 0xad, 0x3f, 0x1b, 0x60, 0x3e, 0xa7, + 0x82, 0xa0, 0x9f, 0x42, 0x61, 0x4a, 0x23, 0xee, 0xb1, 0xc0, 0x32, 0xb6, 0x8c, 0x47, 0xe5, 0x27, + 0xdf, 0x69, 0xaf, 0x7a, 0xa4, 0x7d, 0x14, 0x43, 0xba, 0xe6, 0xab, 0xb3, 0xe6, 0x3d, 0x9c, 0x30, + 0xd0, 0x53, 0x00, 0x27, 0xa2, 0x44, 0x50, 0xd7, 0x26, 0xc2, 0xca, 0x29, 0x7e, 0xbd, 0x1d, 0xab, + 0xdb, 0x4e, 0xde, 0x6f, 0x7f, 0x9e, 0x58, 0x89, 0x4b, 0x1a, 0xdd, 0x11, 0x92, 0x3a, 0x09, 0xdd, + 0x84, 0xba, 0x76, 0x3d, 0x55, 0xa3, 0x3b, 0xa2, 0xf5, 0xa7, 0xfb, 0x60, 0x7e, 0xc6, 0x5c, 0x8a, + 0x36, 0x20, 0xe7, 0xb9, 0x4a, 0xed, 0x52, 0x37, 0x3f, 0x3f, 0x6b, 0xe6, 0xfa, 0x3d, 0x9c, 0xf3, + 0x5c, 0xf4, 0x04, 0xcc, 0x31, 0x15, 0x44, 0x2b, 0x64, 0x65, 0x19, 0x24, 0x6d, 0xd7, 0xd6, 0x28, + 0x2c, 0xfa, 0x31, 0x98, 0xf2, 0x53, 0x69, 0x4d, 0x36, 0xb3, 0x38, 0xf2, 0xcd, 0xc3, 0x90, 0x3a, + 0x09, 0x4f, 0xe2, 0xd1, 0x3e, 0x94, 0x5d, 0xca, 0x9d, 0xc8, 0x0b, 0x85, 0xf4, 0xa1, 0xa9, 0xe8, + 0x6f, 0x5f, 0x46, 0xef, 0x2d, 0xa1, 0x38, 0xcd, 0x43, 0x3f, 0x83, 0x3c, 0x17, 0x44, 0x4c, 0xb8, + 0x75, 0x5f, 0x49, 0x68, 0x5c, 0xaa, 0x80, 0x42, 0x69, 0x15, 0x34, 0x07, 0x7d, 0x0c, 0xb5, 0x31, + 0x09, 0xc8, 0x90, 0x46, 0xb6, 0x96, 0x92, 0x57, 0x52, 0xbe, 0x97, 0x69, 0x7a, 0x8c, 0x8c, 0x05, + 0xe1, 0xea, 0x38, 0xbd, 0x45, 0x7d, 0x00, 0x22, 0x04, 0x71, 0x5e, 0x8e, 0x69, 0x20, 0xac, 0x82, + 0x92, 0xf2, 0x4e, 0xa6, 0x2e, 0x54, 0x7c, 0xcd, 0xa2, 0x51, 0x67, 0x01, 0xee, 0xe6, 0x2c, 0x03, + 0xa7, 0xc8, 0xe8, 0x19, 0x94, 0x1d, 0x1a, 0x09, 0xef, 0xd8, 0x73, 0x88, 0xa0, 0x56, 0x51, 0xc9, + 0x6a, 0x66, 0xc9, 0xda, 0x5b, 0xc2, 0xb4, 0x61, 0x69, 0x26, 0x7a, 0x0f, 0xcc, 0x88, 0xf9, 0xd4, + 0x2a, 0x6d, 0x19, 0x8f, 0x6a, 0x97, 0x7f, 0x1a, 0xcc, 0x7c, 0x8a, 0x15, 0x52, 0x3e, 0xbd, 0x54, + 0x84, 0x5b, 0xb0, 0xb5, 0x76, 0x63, 0x33, 0x70, 0x9a, 0x89, 0x5a, 0x50, 0x39, 0xfa, 0xea, 0xd3, + 0xce, 0x67, 0x5f, 0xf4, 0x0e, 0x0e, 0x58, 0x24, 0xac, 0xf2, 0x96, 0xf1, 0xa8, 0x8a, 0xcf, 0x9d, + 0xed, 0x6e, 0x9c, 0x9c, 0xb6, 0x10, 0x3c, 0x28, 0x1a, 0x0f, 0x0c, 0x15, 0x8b, 0xc6, 0x7b, 0xc6, + 0x57, 0xc6, 0x2f, 0x8d, 0xd6, 0x1f, 0x4d, 0x28, 0x1c, 0xd2, 0x68, 0xea, 0x39, 0x6f, 0x36, 0x52, + 0x9f, 0x9e, 0x8b, 0xd4, 0x4c, 0x87, 0xea, 0x67, 0x57, 0x82, 0x75, 0x07, 0x8a, 0x34, 0x70, 0x43, + 0xe6, 0x05, 0x42, 0x47, 0x6a, 0xa6, 0x37, 0xf7, 0x35, 0x06, 0x2f, 0xd0, 0x68, 0x1f, 0xaa, 0x71, + 0x02, 0xda, 0xe7, 0xc2, 0x74, 0x2b, 0x8b, 0xfe, 0x85, 0x02, 0xea, 0xf8, 0xaa, 0x4c, 0x52, 0x3b, + 0xd4, 0x83, 0x6a, 0x18, 0xd1, 0xa9, 0xc7, 0x26, 0xdc, 0x56, 0x46, 0xe4, 0x6f, 0x64, 0x04, 0xae, + 0x24, 0x2c, 0xb9, 0x43, 0xef, 0x40, 0x2d, 0xa4, 0x81, 0xeb, 0x05, 0x43, 0xdb, 0xa5, 0x3e, 0x15, + 0x54, 0x05, 0x6a, 0x11, 0x57, 0xf5, 0x69, 0x4f, 0x1d, 0xa2, 0x9f, 0x43, 0x45, 0xbe, 0x61, 0x27, + 0xf5, 0x0d, 0xae, 0xad, 0x6f, 0xb8, 0x2c, 0x09, 0x7a, 0x83, 0x5e, 0xc0, 0xb7, 0xce, 0x29, 0xbb, + 0x10, 0x54, 0xbe, 0x5e, 0xd0, 0x5b, 0x69, 0x85, 0xf5, 0xe1, 0x2e, 0x3a, 0x39, 0x6d, 0xd5, 0xa0, + 0x92, 0x8e, 0x94, 0xd6, 0xef, 0x73, 0x50, 0x4c, 0xfc, 0x8d, 0xde, 0xd7, 0x9f, 0xd6, 0xb8, 0xdc, + 0xb9, 0x09, 0x56, 0xb9, 0x25, 0xfe, 0xaa, 0xef, 0xc3, 0xfd, 0x90, 0x45, 0x82, 0x5b, 0x39, 0x15, + 0xe7, 0x99, 0xa5, 0x43, 0x46, 0xea, 0x1e, 0x0b, 0x8e, 0xbd, 0x21, 0x8e, 0xc1, 0xe8, 0x4b, 0x28, + 0x4f, 0xbd, 0x48, 0x4c, 0x88, 0x6f, 0x7b, 0x21, 0xb7, 0xd6, 0x14, 0xf7, 0xfb, 0x57, 0x3d, 0xd9, + 0x3e, 0x8a, 0xf1, 0xfd, 0x83, 0x6e, 0x6d, 0x7e, 0xd6, 0x84, 0xc5, 0x96, 0x63, 0xd0, 0xa2, 0xfa, + 0x21, 0xaf, 0x3f, 0x87, 0xd2, 0xe2, 0x06, 0x3d, 0x06, 0x08, 0xe2, 0x14, 0xb3, 0x17, 0x09, 0x50, + 0x9d, 0x9f, 0x35, 0x4b, 0x3a, 0xf1, 0xfa, 0x3d, 0x5c, 0xd2, 0x80, 0xbe, 0x8b, 0x10, 0x98, 0xc4, + 0x75, 0x23, 0x95, 0x0e, 0x25, 0xac, 0xd6, 0xad, 0xdf, 0x15, 0xc0, 0xfc, 0x9c, 0xf0, 0xd1, 0x5d, + 0x57, 0x7b, 0xf9, 0xe6, 0x4a, 0x02, 0x3d, 0x06, 0xe0, 0x71, 0x58, 0x4a, 0x73, 0xcc, 0xa5, 0x39, + 0x3a, 0x58, 0xa5, 0x39, 0x1a, 0x10, 0x9b, 0xc3, 0x7d, 0x26, 0x54, 0xae, 0x98, 0x58, 0xad, 0xd1, + 0xdb, 0x50, 0x08, 0x98, 0xab, 0xe8, 0x79, 0x45, 0x87, 0xf9, 0x59, 0x33, 0x2f, 0xeb, 0x57, 0xbf, + 0x87, 0xf3, 0xf2, 0xaa, 0xef, 0xaa, 0xfa, 0x15, 0x04, 0x4c, 0x10, 0xd9, 0x1b, 0xb8, 0x2e, 0xc3, + 0x99, 0x49, 0xd2, 0x59, 0xc2, 0x92, 0xd2, 0x99, 0x62, 0xa2, 0x23, 0x78, 0x2b, 0xd1, 0x37, 0x2d, + 0xb0, 0x78, 0x1b, 0x81, 0x48, 0x4b, 0x48, 0xdd, 0xa4, 0xda, 0x55, 0xe9, 0xf2, 0x76, 0xa5, 0x3c, + 0x98, 0xd5, 0xae, 0xba, 0x50, 0x75, 0x29, 0xf7, 0x22, 0xea, 0xaa, 0x6a, 0x42, 0x55, 0x66, 0xd6, + 0x9e, 0x7c, 0xf7, 0x2a, 0x21, 0x14, 0x57, 0x34, 0x47, 0xed, 0x50, 0x07, 0x8a, 0x3a, 0x6e, 0xb8, + 0x55, 0xbe, 0x4d, 0x7d, 0x5f, 0xd0, 0xce, 0x55, 0xc3, 0xca, 0xad, 0xaa, 0xe1, 0x53, 0x00, 0x9f, + 0x0d, 0x6d, 0x37, 0xf2, 0xa6, 0x34, 0xb2, 0xaa, 0x7a, 0x78, 0xc9, 0xe0, 0xf6, 0x14, 0x02, 0x97, + 0x7c, 0x36, 0x8c, 0x97, 0x2b, 0x45, 0xa9, 0x76, 0xcb, 0xa2, 0x44, 0xa0, 0x4e, 0x38, 0xf7, 0x86, + 0x01, 0x75, 0xed, 0x21, 0x0d, 0x68, 0xe4, 0x39, 0x76, 0x44, 0x39, 0x9b, 0x44, 0x0e, 0xe5, 0xd6, + 0x37, 0x94, 0x27, 0x32, 0xc7, 0x8f, 0x67, 0x31, 0x18, 0x6b, 0x2c, 0xb6, 0x12, 0x31, 0x17, 0x2e, + 0xf8, 0x6e, 0xfd, 0xe4, 0xb4, 0xb5, 0x01, 0xeb, 0xe9, 0x32, 0xb5, 0x63, 0x7c, 0x64, 0x7c, 0x6c, + 0x1c, 0x18, 0xad, 0xbf, 0xe6, 0xe0, 0x9b, 0x2b, 0x3e, 0x45, 0x3f, 0x82, 0x82, 0xf6, 0xea, 0x55, + 0x43, 0xa4, 0xe6, 0xe1, 0x04, 0x8b, 0x36, 0xa1, 0x24, 0x53, 0x9c, 0x72, 0x4e, 0xe3, 0xe2, 0x55, + 0xc2, 0xcb, 0x03, 0x64, 0x41, 0x81, 0xf8, 0x1e, 0x91, 0x77, 0x6b, 0xea, 0x2e, 0xd9, 0xa2, 0x09, + 0x6c, 0xc4, 0xae, 0xb7, 0x97, 0xbd, 0xda, 0x66, 0xa1, 0xe0, 0x96, 0xa9, 0xec, 0xff, 0xf0, 0x46, + 0x91, 0xa0, 0x3f, 0xce, 0xf2, 0xe0, 0x45, 0x28, 0xf8, 0x7e, 0x20, 0xa2, 0x19, 0x5e, 0x77, 0x33, + 0xae, 0xea, 0xcf, 0xe0, 0xe1, 0xa5, 0x14, 0xf4, 0x00, 0xd6, 0x46, 0x74, 0x16, 0x97, 0x27, 0x2c, + 0x97, 0x68, 0x1d, 0xee, 0x4f, 0x89, 0x3f, 0xa1, 0xba, 0x9a, 0xc5, 0x9b, 0xdd, 0xdc, 0x8e, 0xd1, + 0xfa, 0x7b, 0x0e, 0x0a, 0x5a, 0x9d, 0xbb, 0x9e, 0x0c, 0xf4, 0xb3, 0x2b, 0x85, 0xed, 0x03, 0xa8, + 0x68, 0x97, 0xc6, 0x19, 0x69, 0x5e, 0x1b, 0xd3, 0xe5, 0x18, 0x1f, 0x67, 0xe3, 0x07, 0x60, 0x7a, + 0x21, 0x19, 0xeb, 0xa9, 0x20, 0xf3, 0xe5, 0xfe, 0x41, 0xe7, 0xf9, 0x8b, 0x30, 0x2e, 0x2c, 0xc5, + 0xf9, 0x59, 0xd3, 0x94, 0x07, 0x58, 0xd1, 0x32, 0x1a, 0x7a, 0x3e, 0xa3, 0xa1, 0x67, 0xf6, 0xcf, + 0xbf, 0xe5, 0xa1, 0xb0, 0xe7, 0x4f, 0xb8, 0xa0, 0xd1, 0x5d, 0xfb, 0x52, 0x3f, 0xbb, 0xe2, 0xcb, + 0x3d, 0x28, 0x44, 0x8c, 0x09, 0xdb, 0x21, 0x57, 0xb9, 0x11, 0x33, 0x26, 0xf6, 0x3a, 0xdd, 0x9a, + 0x24, 0xca, 0x16, 0x10, 0xef, 0x71, 0x5e, 0x52, 0xf7, 0x08, 0xfa, 0x12, 0x36, 0x92, 0xc6, 0x39, + 0x60, 0x4c, 0x70, 0x11, 0x91, 0xd0, 0x1e, 0xd1, 0x99, 0x9c, 0xbc, 0xd6, 0x2e, 0x1b, 0xed, 0xf7, + 0x03, 0x27, 0x9a, 0x29, 0x1f, 0x7f, 0x42, 0x67, 0x78, 0x5d, 0x0b, 0xe8, 0x26, 0xfc, 0x4f, 0xe8, + 0x8c, 0xa3, 0x0f, 0x61, 0x93, 0x2e, 0x60, 0x52, 0xa2, 0xed, 0x93, 0xb1, 0x1c, 0x09, 0x6c, 0xc7, + 0x67, 0xce, 0x48, 0x79, 0xde, 0xc4, 0x0f, 0x69, 0x5a, 0xd4, 0xa7, 0x31, 0x62, 0x4f, 0x02, 0x10, + 0x07, 0x6b, 0xe0, 0x13, 0x67, 0xe4, 0x7b, 0x5c, 0xfe, 0xf7, 0x96, 0x9a, 0xd4, 0x65, 0x63, 0x91, + 0xba, 0xed, 0x5c, 0xe1, 0xad, 0x76, 0x77, 0xc9, 0x4d, 0xcd, 0xfd, 0x3a, 0xf1, 0xbe, 0x3d, 0xc8, + 0xbe, 0x45, 0x5d, 0x28, 0x4f, 0x02, 0xf9, 0x7c, 0xec, 0x83, 0xd2, 0x4d, 0x7d, 0x00, 0x31, 0x4b, + 0x59, 0xbe, 0x09, 0xe6, 0xb1, 0x1c, 0x75, 0x64, 0xb7, 0x29, 0xc6, 0x31, 0xf8, 0x8b, 0xfe, 0xc1, + 0x21, 0x56, 0xa7, 0xa8, 0x0d, 0xc8, 0xa5, 0xc7, 0x64, 0xe2, 0x8b, 0x4e, 0x5c, 0x82, 0x0e, 0x18, + 0xf3, 0x55, 0x6b, 0x29, 0xe1, 0x8c, 0x1b, 0xd4, 0x00, 0xe0, 0x93, 0x41, 0x40, 0xc5, 0xa1, 0xf7, + 0x1b, 0xaa, 0xfa, 0x47, 0x15, 0xa7, 0x4e, 0x56, 0xfe, 0x75, 0xa8, 0xae, 0xfe, 0xeb, 0x50, 0x9f, + 0xc2, 0xe6, 0x55, 0xee, 0xc8, 0x28, 0x2a, 0x1f, 0xa5, 0x8b, 0x4a, 0xf9, 0xc9, 0xbb, 0x59, 0x1e, + 0xc8, 0x16, 0x99, 0x2a, 0x40, 0x99, 0x89, 0xf4, 0x17, 0x03, 0xf2, 0x87, 0xd4, 0x89, 0xa8, 0x78, + 0xa3, 0x79, 0xb4, 0x73, 0x2e, 0x8f, 0x1a, 0xd9, 0x83, 0xbe, 0x7c, 0x75, 0x25, 0x8d, 0xea, 0x50, + 0xf4, 0x02, 0x41, 0xa3, 0x80, 0xf8, 0x2a, 0x8f, 0x8a, 0x78, 0xb1, 0xcf, 0x34, 0xe0, 0x0f, 0x06, + 0xe4, 0xe3, 0x11, 0xf7, 0xae, 0x0d, 0x88, 0x5f, 0xbd, 0x68, 0x40, 0xa6, 0x92, 0xff, 0x31, 0xa0, + 0x98, 0x74, 0xda, 0x37, 0xaa, 0xe6, 0x85, 0x91, 0x71, 0xed, 0x7f, 0x1e, 0x19, 0x11, 0x98, 0x23, + 0x2f, 0xd0, 0xc3, 0x2d, 0x56, 0x6b, 0xd4, 0x86, 0x42, 0x48, 0x66, 0x3e, 0x23, 0xae, 0xae, 0xf0, + 0xeb, 0x2b, 0xbf, 0xd4, 0x74, 0x82, 0x19, 0x4e, 0x40, 0xbb, 0xeb, 0x27, 0xa7, 0xad, 0x07, 0x50, + 0x4b, 0x5b, 0xfe, 0xd2, 0x68, 0xfd, 0xd3, 0x80, 0xd2, 0xfe, 0xaf, 0x05, 0x0d, 0xd4, 0x20, 0xf3, + 0x7f, 0x69, 0xfc, 0xd6, 0xea, 0xaf, 0x39, 0xa5, 0x73, 0x3f, 0xd4, 0x64, 0x7d, 0xd4, 0xae, 0xf5, + 0xea, 0x75, 0xe3, 0xde, 0xbf, 0x5e, 0x37, 0xee, 0xfd, 0x76, 0xde, 0x30, 0x5e, 0xcd, 0x1b, 0xc6, + 0x3f, 0xe6, 0x0d, 0xe3, 0xdf, 0xf3, 0x86, 0x31, 0xc8, 0x2b, 0xff, 0xfc, 0xf0, 0xbf, 0x01, 0x00, + 0x00, 0xff, 0xff, 0xc3, 0x37, 0x09, 0x2f, 0x94, 0x14, 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/objects.proto b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/objects.proto new file mode 100644 index 00000000..b91c27ef --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/objects.proto @@ -0,0 +1,504 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/types.proto"; +import "github.com/docker/swarmkit/api/specs.proto"; +import "google/protobuf/timestamp.proto"; +import "gogoproto/gogo.proto"; +import "google/protobuf/any.proto"; +import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; + +// This file contains definitions for all first-class objects in the cluster +// API. Such types typically have a corresponding specification, with the +// naming XXXSpec, but not all. + +// Meta contains metadata about objects. Every object contains a meta field. +message Meta { + // Version tracks the current version of the object. + Version version = 1 [(gogoproto.nullable) = false]; + + // Object timestamps. + // Note: can't use stdtime because these fields are nullable. + google.protobuf.Timestamp created_at = 2; + google.protobuf.Timestamp updated_at = 3; +} + +// Node provides the internal node state as seen by the cluster. +message Node { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + role: true + membership: true + } + }; + + // ID specifies the identity of the node. + string id = 1; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + // Spec defines the desired state of the node as specified by the user. + // The system will honor this and will *never* modify it. + NodeSpec spec = 3 [(gogoproto.nullable) = false]; + + // Description encapsulated the properties of the Node as reported by the + // agent. + NodeDescription description = 4; + + // Status provides the current status of the node, as seen by the manager. + NodeStatus status = 5 [(gogoproto.nullable) = false]; + + // ManagerStatus provides the current status of the node's manager + // component, if the node is a manager. + ManagerStatus manager_status = 6; + + // DEPRECATED: Use Attachments to find the ingress network + // The node attachment to the ingress network. + NetworkAttachment attachment = 7 [deprecated=true]; + + // Certificate is the TLS certificate issued for the node, if any. + Certificate certificate = 8 [(gogoproto.nullable) = false]; + + // Role is the *observed* role for this node. It differs from the + // desired role set in Node.Spec.Role because the role here is only + // updated after the Raft member list has been reconciled with the + // desired role from the spec. + // + // This field represents the current reconciled state. If an action is + // to be performed, first verify the role in the cert. This field only + // shows the privilege level that the CA would currently grant when + // issuing or renewing the node's certificate. + NodeRole role = 9; + + // Attachments enumerates the network attachments for the node to set up an + // endpoint on the node to be used for load balancing. Each overlay + // network, including ingress network, will have an NetworkAttachment. + repeated NetworkAttachment attachments = 10; + + // VXLANUDPPort specifies the UDP port for VXLAN traffic. + // This information is passed from cluster object to individual nodes. + uint32 VXLANUDPPort = 11; +} + +message Service { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + } + }; + + string id = 1; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + ServiceSpec spec = 3 [(gogoproto.nullable) = false]; + + // SpecVersion versions Spec, to identify changes in the spec. Note that + // this is not directly comparable to the service's Version. + Version spec_version = 10; + + // PreviousSpec is the previous service spec that was in place before + // "Spec". + ServiceSpec previous_spec = 6; + + // PreviousSpecVersion versions PreviousSpec. Note that this is not + // directly comparable to the service's Version. + Version previous_spec_version = 11; + + // Runtime state of service endpoint. This may be different + // from the spec version because the user may not have entered + // the optional fields like node_port or virtual_ip and it + // could be auto allocated by the system. + Endpoint endpoint = 4; + + // UpdateStatus contains the status of an update, if one is in + // progress. + UpdateStatus update_status = 5; + + // PendingDelete indicates that this service's deletion has been requested. + // Services, as well as all service-level resources, can only be deleted + // after all of the service's containers have properly shut down. + // When a user requests a deletion, we just flip this flag + // the deallocator will take it from there - it will start monitoring + // this service's tasks, and proceed to delete the service itself (and + // potentially its associated resources also marked for deletion) when + // all of its tasks are gone + bool pending_delete = 7; +} + +// Endpoint specified all the network parameters required to +// correctly discover and load balance a service +message Endpoint { + EndpointSpec spec = 1; + + // Runtime state of the exposed ports which may carry + // auto-allocated swarm ports in addition to the user + // configured information. + repeated PortConfig ports = 2; + + // An endpoint attachment specifies the data that the process + // of attaching an endpoint to a network creates. + + // VirtualIP specifies a set of networks this endpoint will be attached to + // and the IP addresses the target service will be made available under. + message VirtualIP { + // NetworkID for which this endpoint attachment was created. + string network_id = 1; + + // A virtual IP is used to address this service in IP + // layer that the client can use to send requests to + // this service. A DNS A/AAAA query on the service + // name might return this IP to the client. This is + // strictly a logical IP and there may not be any + // interfaces assigned this IP address or any route + // created for this address. More than one to + // accommodate for both IPv4 and IPv6 + string addr = 2; + } + + // VirtualIPs specifies the IP addresses under which this endpoint will be + // made available. + repeated VirtualIP virtual_ips = 3 [(gogoproto.customname) = "VirtualIPs"]; +} + +// Task specifies the parameters for implementing a Spec. A task is effectively +// immutable and idempotent. Once it is dispatched to a node, it will not be +// dispatched to another node. +message Task { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + service_id: true + node_id: true + slot: true + desired_state: true + } + }; + + string id = 1; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + // Spec defines the desired state of the task as specified by the user. + // The system will honor this and will *never* modify it. + TaskSpec spec = 3 [(gogoproto.nullable) = false]; + + // SpecVersion is copied from Service, to identify which version of the + // spec this task has. Note that this is not directly comparable to the + // service's Version. + Version spec_version = 14; + + // ServiceID indicates the service under which this task is orchestrated. This + // should almost always be set. + string service_id = 4; + + // Slot is the service slot number for a task. + // For example, if a replicated service has replicas = 2, there will be a + // task with slot = 1, and another with slot = 2. + uint64 slot = 5; + + // NodeID indicates the node to which the task is assigned. If this field + // is empty or not set, the task is unassigned. + string node_id = 6; + + // Annotations defines the names and labels for the runtime, as set by + // the cluster manager. + // + // As backup, if this field has an empty name, the runtime will + // allocate a unique name for the actual container. + // + // NOTE(stevvooe): The preserves the ability for us to making naming + // decisions for tasks in orchestrator, albeit, this is left empty for now. + Annotations annotations = 7 [(gogoproto.nullable) = false]; + + // ServiceAnnotations is a direct copy of the service name and labels when + // this task is created. + // + // Labels set here will *not* be propagated to the runtime target, such as a + // container. Use labels on the runtime target for that purpose. + Annotations service_annotations = 8 [(gogoproto.nullable) = false]; + + TaskStatus status = 9 [(gogoproto.nullable) = false]; + + // DesiredState is the target state for the task. It is set to + // TaskStateRunning when a task is first created, and changed to + // TaskStateShutdown if the manager wants to terminate the task. This field + // is only written by the manager. + TaskState desired_state = 10; + + // List of network attachments by the task. + repeated NetworkAttachment networks = 11; + + // A copy of runtime state of service endpoint from Service + // object to be distributed to agents as part of the task. + Endpoint endpoint = 12; + + // LogDriver specifies the selected log driver to use for the task. Agent + // processes should always favor the value in this field. + // + // If present in the TaskSpec, this will be a copy of that value. The + // orchestrator may choose to insert a value here, which should be honored, + // such a cluster default or policy-based value. + // + // If not present, the daemon's default will be used. + Driver log_driver = 13; + + repeated GenericResource assigned_generic_resources = 15; +} + +// NetworkAttachment specifies the network parameters of attachment to +// a single network by an object such as task or node. +message NetworkAttachment { + // Network state as a whole becomes part of the object so that + // it always is available for use in agents so that agents + // don't have any other dependency during execution. + Network network = 1; + + // List of IPv4/IPv6 addresses that are assigned to the object + // as part of getting attached to this network. + repeated string addresses = 2; + + // List of aliases by which a task is resolved in a network + repeated string aliases = 3; + + // Map of all the driver attachment options for this network + map driver_attachment_opts = 4; +} + +message Network { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + } + }; + + string id = 1; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + NetworkSpec spec = 3 [(gogoproto.nullable) = false]; + + // Driver specific operational state provided by the network driver. + Driver driver_state = 4; + + // Runtime state of IPAM options. This may not reflect the + // ipam options from NetworkSpec. + IPAMOptions ipam = 5 [(gogoproto.customname) = "IPAM"]; + + // PendingDelete indicates that this network's deletion has been requested. + // Services, as well as all service-level resources, can only be deleted + // after all the service's containers have properly shut down + // when a user requests a deletion, we just flip this flag + // the deallocator will take it from there + // PendingDelete indicates that this network's deletion has been requested. + // Services, as well as all service-level resources, can only be deleted + // after all of the service's containers have properly shut down. + // When a user requests a deletion of this network, we just flip this flag + // the deallocator will take it from there - it will start monitoring + // the services that still use this service, and proceed to delete + // this network when all of these services are gone + bool pending_delete = 6; +} + +// Cluster provides global cluster settings. +message Cluster { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + } + }; + + string id = 1; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + ClusterSpec spec = 3 [(gogoproto.nullable) = false]; + + // RootCA contains key material for the root CA. + RootCA root_ca = 4 [(gogoproto.nullable)=false, (gogoproto.customname) = "RootCA"]; + + // Symmetric encryption key distributed by the lead manager. Used by agents + // for securing network bootstrapping and communication. + repeated EncryptionKey network_bootstrap_keys = 5; + + // Logical clock used to timestamp every key. It allows other managers + // and agents to unambiguously identify the older key to be deleted when + // a new key is allocated on key rotation. + uint64 encryption_key_lamport_clock = 6; + + // BlacklistedCertificates tracks certificates that should no longer + // be honored. It's a mapping from CN -> BlacklistedCertificate. + // swarm. Their certificates should effectively be blacklisted. + map blacklisted_certificates = 8; + + // UnlockKeys defines the keys that lock node data at rest. For example, + // this would contain the key encrypting key (KEK) that will encrypt the + // manager TLS keys at rest and the raft encryption keys at rest. + // If the key is empty, the node will be unlocked (will not require a key + // to start up from a shut down state). + repeated EncryptionKey unlock_keys = 9; + + // FIPS specifies whether this cluster should be in FIPS mode. This changes + // the format of the join tokens, and nodes that are not FIPS-enabled should + // reject joining the cluster. Nodes that report themselves to be non-FIPS + // should be rejected from the cluster. + bool fips = 10 [(gogoproto.customname) = "FIPS"]; + + // This field specifies default subnet pools for global scope networks. If + // unspecified, Docker will use the predefined subnets as it works on older releases. + // Format Example : {"20.20.0.0/16",""20.20.0.0/16"} + repeated string defaultAddressPool = 11; + + // This flag specifies the default subnet size of global scope networks by giving + // the length of the subnet masks for every such network + uint32 subnetSize = 12; + + // VXLANUDPPort specifies the UDP port for VXLAN traffic. + uint32 VXLANUDPPort = 13; +} + +// Secret represents a secret that should be passed to a container or a node, +// and is immutable. +message Secret { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + } + }; + + string id = 1; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + // Spec contains the actual secret data, as well as any context around the + // secret data that the user provides. + SecretSpec spec = 3 [(gogoproto.nullable) = false]; + + // Whether the secret is an internal secret (not set by a user) or not. + bool internal = 4; +} + +// Config represents a set of configuration files that should be passed to a +// container. +message Config { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + } + }; + + string id = 1; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + // Spec contains the actual config data, as well as any context around the + // config data that the user provides. + ConfigSpec spec = 3 [(gogoproto.nullable) = false]; +} + +// Resource is a top-level object with externally defined content and indexing. +// SwarmKit can serve as a store for these objects without understanding their +// meanings. +message Resource { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + kind: true + } + }; + + string id = 1 [(gogoproto.customname) = "ID"]; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + Annotations annotations = 3 [(gogoproto.nullable) = false]; + + // Kind identifies this class of object. It is essentially a namespace + // to keep IDs or indices from colliding between unrelated Resource + // objects. This must correspond to the name of an Extension. + string kind = 4; + + // Payload bytes. This data is not interpreted in any way by SwarmKit. + // By convention, it should be a marshalled protocol buffers message. + google.protobuf.Any payload = 5; +} + +// Extension declares a type of "resource" object. This message provides some +// metadata about the objects. +message Extension { + option (docker.protobuf.plugin.store_object) = { + watch_selectors: { + id: true + id_prefix: true + name: true + name_prefix: true + custom: true + custom_prefix: true + } + }; + + string id = 1 [(gogoproto.customname) = "ID"]; + + Meta meta = 2 [(gogoproto.nullable) = false]; + + Annotations annotations = 3 [(gogoproto.nullable) = false]; + + string description = 4; + + // TODO(aaronl): Add optional indexing capabilities. It would be + // extremely useful be able to automatically introspect protobuf, json, + // etc. objects and automatically index them based on a schema and field + // paths defined here. + // + //oneof Schema { + // google.protobuf.Descriptor protobuf = 1; + // bytes json = 2; + //} + // + //Schema schema = 5; + // + // // Indices, with values expressed as Go templates. + //repeated IndexEntry index_templates = 6; +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/raft.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/raft.pb.go new file mode 100644 index 00000000..a32a6001 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/raft.pb.go @@ -0,0 +1,4008 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/raft.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import raftpb "github.com/coreos/etcd/raft/raftpb" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" +// skipping weak import docker_protobuf_plugin "github.com/docker/swarmkit/protobuf/plugin" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// StoreActionKind defines the operation to take on the store for the target of +// a storage action. +type StoreActionKind int32 + +const ( + StoreActionKindUnknown StoreActionKind = 0 + StoreActionKindCreate StoreActionKind = 1 + StoreActionKindUpdate StoreActionKind = 2 + StoreActionKindRemove StoreActionKind = 3 +) + +var StoreActionKind_name = map[int32]string{ + 0: "UNKNOWN", + 1: "STORE_ACTION_CREATE", + 2: "STORE_ACTION_UPDATE", + 3: "STORE_ACTION_REMOVE", +} +var StoreActionKind_value = map[string]int32{ + "UNKNOWN": 0, + "STORE_ACTION_CREATE": 1, + "STORE_ACTION_UPDATE": 2, + "STORE_ACTION_REMOVE": 3, +} + +func (x StoreActionKind) String() string { + return proto.EnumName(StoreActionKind_name, int32(x)) +} +func (StoreActionKind) EnumDescriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } + +type RaftMember struct { + // RaftID specifies the internal ID used by the manager in a raft context, it can never be modified + // and is used only for information purposes + RaftID uint64 `protobuf:"varint,1,opt,name=raft_id,json=raftId,proto3" json:"raft_id,omitempty"` + // NodeID is the node's ID. + NodeID string `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // Addr specifies the address of the member + Addr string `protobuf:"bytes,3,opt,name=addr,proto3" json:"addr,omitempty"` + // Status provides the current status of the manager from the perspective of another manager. + Status RaftMemberStatus `protobuf:"bytes,4,opt,name=status" json:"status"` +} + +func (m *RaftMember) Reset() { *m = RaftMember{} } +func (*RaftMember) ProtoMessage() {} +func (*RaftMember) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{0} } + +type JoinRequest struct { + // Addr specifies the address of the member + Addr string `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"` +} + +func (m *JoinRequest) Reset() { *m = JoinRequest{} } +func (*JoinRequest) ProtoMessage() {} +func (*JoinRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{1} } + +type JoinResponse struct { + // RaftID is the ID assigned to the new member. + RaftID uint64 `protobuf:"varint,1,opt,name=raft_id,json=raftId,proto3" json:"raft_id,omitempty"` + // Members is the membership set of the cluster. + Members []*RaftMember `protobuf:"bytes,2,rep,name=members" json:"members,omitempty"` + // RemovedMembers is a list of members that have been removed from + // the cluster, so the new node can avoid communicating with them. + RemovedMembers []uint64 `protobuf:"varint,3,rep,name=removed_members,json=removedMembers" json:"removed_members,omitempty"` +} + +func (m *JoinResponse) Reset() { *m = JoinResponse{} } +func (*JoinResponse) ProtoMessage() {} +func (*JoinResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{2} } + +type LeaveRequest struct { + Node *RaftMember `protobuf:"bytes,1,opt,name=node" json:"node,omitempty"` +} + +func (m *LeaveRequest) Reset() { *m = LeaveRequest{} } +func (*LeaveRequest) ProtoMessage() {} +func (*LeaveRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{3} } + +type LeaveResponse struct { +} + +func (m *LeaveResponse) Reset() { *m = LeaveResponse{} } +func (*LeaveResponse) ProtoMessage() {} +func (*LeaveResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{4} } + +type ProcessRaftMessageRequest struct { + Message *raftpb.Message `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` +} + +func (m *ProcessRaftMessageRequest) Reset() { *m = ProcessRaftMessageRequest{} } +func (*ProcessRaftMessageRequest) ProtoMessage() {} +func (*ProcessRaftMessageRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{5} } + +type ProcessRaftMessageResponse struct { +} + +func (m *ProcessRaftMessageResponse) Reset() { *m = ProcessRaftMessageResponse{} } +func (*ProcessRaftMessageResponse) ProtoMessage() {} +func (*ProcessRaftMessageResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{6} } + +// Raft message streaming request. +type StreamRaftMessageRequest struct { + Message *raftpb.Message `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"` +} + +func (m *StreamRaftMessageRequest) Reset() { *m = StreamRaftMessageRequest{} } +func (*StreamRaftMessageRequest) ProtoMessage() {} +func (*StreamRaftMessageRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{7} } + +// Raft message streaming response. +type StreamRaftMessageResponse struct { +} + +func (m *StreamRaftMessageResponse) Reset() { *m = StreamRaftMessageResponse{} } +func (*StreamRaftMessageResponse) ProtoMessage() {} +func (*StreamRaftMessageResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{8} } + +type ResolveAddressRequest struct { + // raft_id is the ID to resolve to an address. + RaftID uint64 `protobuf:"varint,1,opt,name=raft_id,json=raftId,proto3" json:"raft_id,omitempty"` +} + +func (m *ResolveAddressRequest) Reset() { *m = ResolveAddressRequest{} } +func (*ResolveAddressRequest) ProtoMessage() {} +func (*ResolveAddressRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{9} } + +type ResolveAddressResponse struct { + // Addr specifies the address of the member + Addr string `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"` +} + +func (m *ResolveAddressResponse) Reset() { *m = ResolveAddressResponse{} } +func (*ResolveAddressResponse) ProtoMessage() {} +func (*ResolveAddressResponse) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{10} } + +// Contains one of many protobuf encoded objects to replicate +// over the raft backend with a request ID to track when the +// action is effectively applied +type InternalRaftRequest struct { + ID uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Action []StoreAction `protobuf:"bytes,2,rep,name=action" json:"action"` +} + +func (m *InternalRaftRequest) Reset() { *m = InternalRaftRequest{} } +func (*InternalRaftRequest) ProtoMessage() {} +func (*InternalRaftRequest) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{11} } + +// StoreAction defines a target and operation to apply on the storage system. +type StoreAction struct { + Action StoreActionKind `protobuf:"varint,1,opt,name=action,proto3,enum=docker.swarmkit.v1.StoreActionKind" json:"action,omitempty"` + // Types that are valid to be assigned to Target: + // *StoreAction_Node + // *StoreAction_Service + // *StoreAction_Task + // *StoreAction_Network + // *StoreAction_Cluster + // *StoreAction_Secret + // *StoreAction_Resource + // *StoreAction_Extension + // *StoreAction_Config + Target isStoreAction_Target `protobuf_oneof:"target"` +} + +func (m *StoreAction) Reset() { *m = StoreAction{} } +func (*StoreAction) ProtoMessage() {} +func (*StoreAction) Descriptor() ([]byte, []int) { return fileDescriptorRaft, []int{12} } + +type isStoreAction_Target interface { + isStoreAction_Target() + MarshalTo([]byte) (int, error) + Size() int +} + +type StoreAction_Node struct { + Node *Node `protobuf:"bytes,2,opt,name=node,oneof"` +} +type StoreAction_Service struct { + Service *Service `protobuf:"bytes,3,opt,name=service,oneof"` +} +type StoreAction_Task struct { + Task *Task `protobuf:"bytes,4,opt,name=task,oneof"` +} +type StoreAction_Network struct { + Network *Network `protobuf:"bytes,5,opt,name=network,oneof"` +} +type StoreAction_Cluster struct { + Cluster *Cluster `protobuf:"bytes,6,opt,name=cluster,oneof"` +} +type StoreAction_Secret struct { + Secret *Secret `protobuf:"bytes,7,opt,name=secret,oneof"` +} +type StoreAction_Resource struct { + Resource *Resource `protobuf:"bytes,8,opt,name=resource,oneof"` +} +type StoreAction_Extension struct { + Extension *Extension `protobuf:"bytes,9,opt,name=extension,oneof"` +} +type StoreAction_Config struct { + Config *Config `protobuf:"bytes,10,opt,name=config,oneof"` +} + +func (*StoreAction_Node) isStoreAction_Target() {} +func (*StoreAction_Service) isStoreAction_Target() {} +func (*StoreAction_Task) isStoreAction_Target() {} +func (*StoreAction_Network) isStoreAction_Target() {} +func (*StoreAction_Cluster) isStoreAction_Target() {} +func (*StoreAction_Secret) isStoreAction_Target() {} +func (*StoreAction_Resource) isStoreAction_Target() {} +func (*StoreAction_Extension) isStoreAction_Target() {} +func (*StoreAction_Config) isStoreAction_Target() {} + +func (m *StoreAction) GetTarget() isStoreAction_Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *StoreAction) GetNode() *Node { + if x, ok := m.GetTarget().(*StoreAction_Node); ok { + return x.Node + } + return nil +} + +func (m *StoreAction) GetService() *Service { + if x, ok := m.GetTarget().(*StoreAction_Service); ok { + return x.Service + } + return nil +} + +func (m *StoreAction) GetTask() *Task { + if x, ok := m.GetTarget().(*StoreAction_Task); ok { + return x.Task + } + return nil +} + +func (m *StoreAction) GetNetwork() *Network { + if x, ok := m.GetTarget().(*StoreAction_Network); ok { + return x.Network + } + return nil +} + +func (m *StoreAction) GetCluster() *Cluster { + if x, ok := m.GetTarget().(*StoreAction_Cluster); ok { + return x.Cluster + } + return nil +} + +func (m *StoreAction) GetSecret() *Secret { + if x, ok := m.GetTarget().(*StoreAction_Secret); ok { + return x.Secret + } + return nil +} + +func (m *StoreAction) GetResource() *Resource { + if x, ok := m.GetTarget().(*StoreAction_Resource); ok { + return x.Resource + } + return nil +} + +func (m *StoreAction) GetExtension() *Extension { + if x, ok := m.GetTarget().(*StoreAction_Extension); ok { + return x.Extension + } + return nil +} + +func (m *StoreAction) GetConfig() *Config { + if x, ok := m.GetTarget().(*StoreAction_Config); ok { + return x.Config + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*StoreAction) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _StoreAction_OneofMarshaler, _StoreAction_OneofUnmarshaler, _StoreAction_OneofSizer, []interface{}{ + (*StoreAction_Node)(nil), + (*StoreAction_Service)(nil), + (*StoreAction_Task)(nil), + (*StoreAction_Network)(nil), + (*StoreAction_Cluster)(nil), + (*StoreAction_Secret)(nil), + (*StoreAction_Resource)(nil), + (*StoreAction_Extension)(nil), + (*StoreAction_Config)(nil), + } +} + +func _StoreAction_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*StoreAction) + // target + switch x := m.Target.(type) { + case *StoreAction_Node: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Node); err != nil { + return err + } + case *StoreAction_Service: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Service); err != nil { + return err + } + case *StoreAction_Task: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Task); err != nil { + return err + } + case *StoreAction_Network: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Network); err != nil { + return err + } + case *StoreAction_Cluster: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Cluster); err != nil { + return err + } + case *StoreAction_Secret: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Secret); err != nil { + return err + } + case *StoreAction_Resource: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Resource); err != nil { + return err + } + case *StoreAction_Extension: + _ = b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Extension); err != nil { + return err + } + case *StoreAction_Config: + _ = b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Config); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("StoreAction.Target has unexpected type %T", x) + } + return nil +} + +func _StoreAction_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*StoreAction) + switch tag { + case 2: // target.node + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Node) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Node{msg} + return true, err + case 3: // target.service + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Service) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Service{msg} + return true, err + case 4: // target.task + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Task) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Task{msg} + return true, err + case 5: // target.network + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Network) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Network{msg} + return true, err + case 6: // target.cluster + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Cluster) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Cluster{msg} + return true, err + case 7: // target.secret + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Secret) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Secret{msg} + return true, err + case 8: // target.resource + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Resource) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Resource{msg} + return true, err + case 9: // target.extension + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Extension) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Extension{msg} + return true, err + case 10: // target.config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Config) + err := b.DecodeMessage(msg) + m.Target = &StoreAction_Config{msg} + return true, err + default: + return false, nil + } +} + +func _StoreAction_OneofSizer(msg proto.Message) (n int) { + m := msg.(*StoreAction) + // target + switch x := m.Target.(type) { + case *StoreAction_Node: + s := proto.Size(x.Node) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Service: + s := proto.Size(x.Service) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Task: + s := proto.Size(x.Task) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Network: + s := proto.Size(x.Network) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Cluster: + s := proto.Size(x.Cluster) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Secret: + s := proto.Size(x.Secret) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Resource: + s := proto.Size(x.Resource) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Extension: + s := proto.Size(x.Extension) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *StoreAction_Config: + s := proto.Size(x.Config) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*RaftMember)(nil), "docker.swarmkit.v1.RaftMember") + proto.RegisterType((*JoinRequest)(nil), "docker.swarmkit.v1.JoinRequest") + proto.RegisterType((*JoinResponse)(nil), "docker.swarmkit.v1.JoinResponse") + proto.RegisterType((*LeaveRequest)(nil), "docker.swarmkit.v1.LeaveRequest") + proto.RegisterType((*LeaveResponse)(nil), "docker.swarmkit.v1.LeaveResponse") + proto.RegisterType((*ProcessRaftMessageRequest)(nil), "docker.swarmkit.v1.ProcessRaftMessageRequest") + proto.RegisterType((*ProcessRaftMessageResponse)(nil), "docker.swarmkit.v1.ProcessRaftMessageResponse") + proto.RegisterType((*StreamRaftMessageRequest)(nil), "docker.swarmkit.v1.StreamRaftMessageRequest") + proto.RegisterType((*StreamRaftMessageResponse)(nil), "docker.swarmkit.v1.StreamRaftMessageResponse") + proto.RegisterType((*ResolveAddressRequest)(nil), "docker.swarmkit.v1.ResolveAddressRequest") + proto.RegisterType((*ResolveAddressResponse)(nil), "docker.swarmkit.v1.ResolveAddressResponse") + proto.RegisterType((*InternalRaftRequest)(nil), "docker.swarmkit.v1.InternalRaftRequest") + proto.RegisterType((*StoreAction)(nil), "docker.swarmkit.v1.StoreAction") + proto.RegisterEnum("docker.swarmkit.v1.StoreActionKind", StoreActionKind_name, StoreActionKind_value) +} + +type authenticatedWrapperRaftServer struct { + local RaftServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperRaftServer(local RaftServer, authorize func(context.Context, []string) error) RaftServer { + return &authenticatedWrapperRaftServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperRaftServer) ProcessRaftMessage(ctx context.Context, r *ProcessRaftMessageRequest) (*ProcessRaftMessageResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ProcessRaftMessage(ctx, r) +} + +func (p *authenticatedWrapperRaftServer) StreamRaftMessage(stream Raft_StreamRaftMessageServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-manager"}); err != nil { + return err + } + return p.local.StreamRaftMessage(stream) +} + +func (p *authenticatedWrapperRaftServer) ResolveAddress(ctx context.Context, r *ResolveAddressRequest) (*ResolveAddressResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.ResolveAddress(ctx, r) +} + +type authenticatedWrapperRaftMembershipServer struct { + local RaftMembershipServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperRaftMembershipServer(local RaftMembershipServer, authorize func(context.Context, []string) error) RaftMembershipServer { + return &authenticatedWrapperRaftMembershipServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperRaftMembershipServer) Join(ctx context.Context, r *JoinRequest) (*JoinResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.Join(ctx, r) +} + +func (p *authenticatedWrapperRaftMembershipServer) Leave(ctx context.Context, r *LeaveRequest) (*LeaveResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-manager"}); err != nil { + return nil, err + } + return p.local.Leave(ctx, r) +} + +func (m *RaftMember) Copy() *RaftMember { + if m == nil { + return nil + } + o := &RaftMember{} + o.CopyFrom(m) + return o +} + +func (m *RaftMember) CopyFrom(src interface{}) { + + o := src.(*RaftMember) + *m = *o + deepcopy.Copy(&m.Status, &o.Status) +} + +func (m *JoinRequest) Copy() *JoinRequest { + if m == nil { + return nil + } + o := &JoinRequest{} + o.CopyFrom(m) + return o +} + +func (m *JoinRequest) CopyFrom(src interface{}) { + + o := src.(*JoinRequest) + *m = *o +} + +func (m *JoinResponse) Copy() *JoinResponse { + if m == nil { + return nil + } + o := &JoinResponse{} + o.CopyFrom(m) + return o +} + +func (m *JoinResponse) CopyFrom(src interface{}) { + + o := src.(*JoinResponse) + *m = *o + if o.Members != nil { + m.Members = make([]*RaftMember, len(o.Members)) + for i := range m.Members { + m.Members[i] = &RaftMember{} + deepcopy.Copy(m.Members[i], o.Members[i]) + } + } + + if o.RemovedMembers != nil { + m.RemovedMembers = make([]uint64, len(o.RemovedMembers)) + copy(m.RemovedMembers, o.RemovedMembers) + } + +} + +func (m *LeaveRequest) Copy() *LeaveRequest { + if m == nil { + return nil + } + o := &LeaveRequest{} + o.CopyFrom(m) + return o +} + +func (m *LeaveRequest) CopyFrom(src interface{}) { + + o := src.(*LeaveRequest) + *m = *o + if o.Node != nil { + m.Node = &RaftMember{} + deepcopy.Copy(m.Node, o.Node) + } +} + +func (m *LeaveResponse) Copy() *LeaveResponse { + if m == nil { + return nil + } + o := &LeaveResponse{} + o.CopyFrom(m) + return o +} + +func (m *LeaveResponse) CopyFrom(src interface{}) {} +func (m *ProcessRaftMessageResponse) Copy() *ProcessRaftMessageResponse { + if m == nil { + return nil + } + o := &ProcessRaftMessageResponse{} + o.CopyFrom(m) + return o +} + +func (m *ProcessRaftMessageResponse) CopyFrom(src interface{}) {} +func (m *StreamRaftMessageResponse) Copy() *StreamRaftMessageResponse { + if m == nil { + return nil + } + o := &StreamRaftMessageResponse{} + o.CopyFrom(m) + return o +} + +func (m *StreamRaftMessageResponse) CopyFrom(src interface{}) {} +func (m *ResolveAddressRequest) Copy() *ResolveAddressRequest { + if m == nil { + return nil + } + o := &ResolveAddressRequest{} + o.CopyFrom(m) + return o +} + +func (m *ResolveAddressRequest) CopyFrom(src interface{}) { + + o := src.(*ResolveAddressRequest) + *m = *o +} + +func (m *ResolveAddressResponse) Copy() *ResolveAddressResponse { + if m == nil { + return nil + } + o := &ResolveAddressResponse{} + o.CopyFrom(m) + return o +} + +func (m *ResolveAddressResponse) CopyFrom(src interface{}) { + + o := src.(*ResolveAddressResponse) + *m = *o +} + +func (m *InternalRaftRequest) Copy() *InternalRaftRequest { + if m == nil { + return nil + } + o := &InternalRaftRequest{} + o.CopyFrom(m) + return o +} + +func (m *InternalRaftRequest) CopyFrom(src interface{}) { + + o := src.(*InternalRaftRequest) + *m = *o + if o.Action != nil { + m.Action = make([]StoreAction, len(o.Action)) + for i := range m.Action { + deepcopy.Copy(&m.Action[i], &o.Action[i]) + } + } + +} + +func (m *StoreAction) Copy() *StoreAction { + if m == nil { + return nil + } + o := &StoreAction{} + o.CopyFrom(m) + return o +} + +func (m *StoreAction) CopyFrom(src interface{}) { + + o := src.(*StoreAction) + *m = *o + if o.Target != nil { + switch o.Target.(type) { + case *StoreAction_Node: + v := StoreAction_Node{ + Node: &Node{}, + } + deepcopy.Copy(v.Node, o.GetNode()) + m.Target = &v + case *StoreAction_Service: + v := StoreAction_Service{ + Service: &Service{}, + } + deepcopy.Copy(v.Service, o.GetService()) + m.Target = &v + case *StoreAction_Task: + v := StoreAction_Task{ + Task: &Task{}, + } + deepcopy.Copy(v.Task, o.GetTask()) + m.Target = &v + case *StoreAction_Network: + v := StoreAction_Network{ + Network: &Network{}, + } + deepcopy.Copy(v.Network, o.GetNetwork()) + m.Target = &v + case *StoreAction_Cluster: + v := StoreAction_Cluster{ + Cluster: &Cluster{}, + } + deepcopy.Copy(v.Cluster, o.GetCluster()) + m.Target = &v + case *StoreAction_Secret: + v := StoreAction_Secret{ + Secret: &Secret{}, + } + deepcopy.Copy(v.Secret, o.GetSecret()) + m.Target = &v + case *StoreAction_Resource: + v := StoreAction_Resource{ + Resource: &Resource{}, + } + deepcopy.Copy(v.Resource, o.GetResource()) + m.Target = &v + case *StoreAction_Extension: + v := StoreAction_Extension{ + Extension: &Extension{}, + } + deepcopy.Copy(v.Extension, o.GetExtension()) + m.Target = &v + case *StoreAction_Config: + v := StoreAction_Config{ + Config: &Config{}, + } + deepcopy.Copy(v.Config, o.GetConfig()) + m.Target = &v + } + } + +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Raft service + +type RaftClient interface { + // ProcessRaftMessage sends a raft message to be processed on a raft member, it is + // called from the RaftMember willing to send a message to its destination ('To' field) + ProcessRaftMessage(ctx context.Context, in *ProcessRaftMessageRequest, opts ...grpc.CallOption) (*ProcessRaftMessageResponse, error) + // StreamRaftMessage accepts a stream of raft messages of type StreamRaftMessageRequest + // to be processed on a raft member, returning a StreamRaftMessageResponse + // when processing of the streamed messages is complete. A single stream corresponds + // to a single raft message, which may be disassembled and streamed as individual messages. + // It is called from the Raft leader, which uses it to stream messages to a raft member. + StreamRaftMessage(ctx context.Context, opts ...grpc.CallOption) (Raft_StreamRaftMessageClient, error) + // ResolveAddress returns the address where the node with the given ID can be reached. + ResolveAddress(ctx context.Context, in *ResolveAddressRequest, opts ...grpc.CallOption) (*ResolveAddressResponse, error) +} + +type raftClient struct { + cc *grpc.ClientConn +} + +func NewRaftClient(cc *grpc.ClientConn) RaftClient { + return &raftClient{cc} +} + +func (c *raftClient) ProcessRaftMessage(ctx context.Context, in *ProcessRaftMessageRequest, opts ...grpc.CallOption) (*ProcessRaftMessageResponse, error) { + out := new(ProcessRaftMessageResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Raft/ProcessRaftMessage", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *raftClient) StreamRaftMessage(ctx context.Context, opts ...grpc.CallOption) (Raft_StreamRaftMessageClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Raft_serviceDesc.Streams[0], c.cc, "/docker.swarmkit.v1.Raft/StreamRaftMessage", opts...) + if err != nil { + return nil, err + } + x := &raftStreamRaftMessageClient{stream} + return x, nil +} + +type Raft_StreamRaftMessageClient interface { + Send(*StreamRaftMessageRequest) error + CloseAndRecv() (*StreamRaftMessageResponse, error) + grpc.ClientStream +} + +type raftStreamRaftMessageClient struct { + grpc.ClientStream +} + +func (x *raftStreamRaftMessageClient) Send(m *StreamRaftMessageRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *raftStreamRaftMessageClient) CloseAndRecv() (*StreamRaftMessageResponse, error) { + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + m := new(StreamRaftMessageResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *raftClient) ResolveAddress(ctx context.Context, in *ResolveAddressRequest, opts ...grpc.CallOption) (*ResolveAddressResponse, error) { + out := new(ResolveAddressResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.Raft/ResolveAddress", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Raft service + +type RaftServer interface { + // ProcessRaftMessage sends a raft message to be processed on a raft member, it is + // called from the RaftMember willing to send a message to its destination ('To' field) + ProcessRaftMessage(context.Context, *ProcessRaftMessageRequest) (*ProcessRaftMessageResponse, error) + // StreamRaftMessage accepts a stream of raft messages of type StreamRaftMessageRequest + // to be processed on a raft member, returning a StreamRaftMessageResponse + // when processing of the streamed messages is complete. A single stream corresponds + // to a single raft message, which may be disassembled and streamed as individual messages. + // It is called from the Raft leader, which uses it to stream messages to a raft member. + StreamRaftMessage(Raft_StreamRaftMessageServer) error + // ResolveAddress returns the address where the node with the given ID can be reached. + ResolveAddress(context.Context, *ResolveAddressRequest) (*ResolveAddressResponse, error) +} + +func RegisterRaftServer(s *grpc.Server, srv RaftServer) { + s.RegisterService(&_Raft_serviceDesc, srv) +} + +func _Raft_ProcessRaftMessage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ProcessRaftMessageRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RaftServer).ProcessRaftMessage(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Raft/ProcessRaftMessage", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RaftServer).ProcessRaftMessage(ctx, req.(*ProcessRaftMessageRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Raft_StreamRaftMessage_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(RaftServer).StreamRaftMessage(&raftStreamRaftMessageServer{stream}) +} + +type Raft_StreamRaftMessageServer interface { + SendAndClose(*StreamRaftMessageResponse) error + Recv() (*StreamRaftMessageRequest, error) + grpc.ServerStream +} + +type raftStreamRaftMessageServer struct { + grpc.ServerStream +} + +func (x *raftStreamRaftMessageServer) SendAndClose(m *StreamRaftMessageResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *raftStreamRaftMessageServer) Recv() (*StreamRaftMessageRequest, error) { + m := new(StreamRaftMessageRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Raft_ResolveAddress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ResolveAddressRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RaftServer).ResolveAddress(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.Raft/ResolveAddress", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RaftServer).ResolveAddress(ctx, req.(*ResolveAddressRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Raft_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.Raft", + HandlerType: (*RaftServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ProcessRaftMessage", + Handler: _Raft_ProcessRaftMessage_Handler, + }, + { + MethodName: "ResolveAddress", + Handler: _Raft_ResolveAddress_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamRaftMessage", + Handler: _Raft_StreamRaftMessage_Handler, + ClientStreams: true, + }, + }, + Metadata: "github.com/docker/swarmkit/api/raft.proto", +} + +// Client API for RaftMembership service + +type RaftMembershipClient interface { + // Join adds a RaftMember to the raft cluster. + Join(ctx context.Context, in *JoinRequest, opts ...grpc.CallOption) (*JoinResponse, error) + // Leave removes a RaftMember from the raft cluster. + Leave(ctx context.Context, in *LeaveRequest, opts ...grpc.CallOption) (*LeaveResponse, error) +} + +type raftMembershipClient struct { + cc *grpc.ClientConn +} + +func NewRaftMembershipClient(cc *grpc.ClientConn) RaftMembershipClient { + return &raftMembershipClient{cc} +} + +func (c *raftMembershipClient) Join(ctx context.Context, in *JoinRequest, opts ...grpc.CallOption) (*JoinResponse, error) { + out := new(JoinResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.RaftMembership/Join", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *raftMembershipClient) Leave(ctx context.Context, in *LeaveRequest, opts ...grpc.CallOption) (*LeaveResponse, error) { + out := new(LeaveResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.RaftMembership/Leave", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for RaftMembership service + +type RaftMembershipServer interface { + // Join adds a RaftMember to the raft cluster. + Join(context.Context, *JoinRequest) (*JoinResponse, error) + // Leave removes a RaftMember from the raft cluster. + Leave(context.Context, *LeaveRequest) (*LeaveResponse, error) +} + +func RegisterRaftMembershipServer(s *grpc.Server, srv RaftMembershipServer) { + s.RegisterService(&_RaftMembership_serviceDesc, srv) +} + +func _RaftMembership_Join_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(JoinRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RaftMembershipServer).Join(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.RaftMembership/Join", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RaftMembershipServer).Join(ctx, req.(*JoinRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _RaftMembership_Leave_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LeaveRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(RaftMembershipServer).Leave(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.RaftMembership/Leave", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(RaftMembershipServer).Leave(ctx, req.(*LeaveRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _RaftMembership_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.RaftMembership", + HandlerType: (*RaftMembershipServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Join", + Handler: _RaftMembership_Join_Handler, + }, + { + MethodName: "Leave", + Handler: _RaftMembership_Leave_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/api/raft.proto", +} + +func (m *RaftMember) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftMember) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RaftID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.RaftID)) + } + if len(m.NodeID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + if len(m.Addr) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Status.Size())) + n1, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + return i, nil +} + +func (m *JoinRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JoinRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Addr) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + return i, nil +} + +func (m *JoinResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JoinResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RaftID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.RaftID)) + } + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.RemovedMembers) > 0 { + for _, num := range m.RemovedMembers { + dAtA[i] = 0x18 + i++ + i = encodeVarintRaft(dAtA, i, uint64(num)) + } + } + return i, nil +} + +func (m *LeaveRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaveRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Node != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Node.Size())) + n2, err := m.Node.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func (m *LeaveResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaveResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ProcessRaftMessageRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProcessRaftMessageRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Message != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Message.Size())) + n3, err := m.Message.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} + +func (m *ProcessRaftMessageResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProcessRaftMessageResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *StreamRaftMessageRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamRaftMessageRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Message != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Message.Size())) + n4, err := m.Message.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} + +func (m *StreamRaftMessageResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamRaftMessageResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *ResolveAddressRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResolveAddressRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RaftID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.RaftID)) + } + return i, nil +} + +func (m *ResolveAddressResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResolveAddressResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Addr) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintRaft(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + return i, nil +} + +func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.ID)) + } + if len(m.Action) > 0 { + for _, msg := range m.Action { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *StoreAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreAction) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Action != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Action)) + } + if m.Target != nil { + nn5, err := m.Target.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn5 + } + return i, nil +} + +func (m *StoreAction_Node) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Node != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Node.Size())) + n6, err := m.Node.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} +func (m *StoreAction_Service) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Service != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Service.Size())) + n7, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} +func (m *StoreAction_Task) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Task != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Task.Size())) + n8, err := m.Task.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} +func (m *StoreAction_Network) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Network != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Network.Size())) + n9, err := m.Network.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} +func (m *StoreAction_Cluster) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Cluster != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Cluster.Size())) + n10, err := m.Cluster.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + return i, nil +} +func (m *StoreAction_Secret) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Secret != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Secret.Size())) + n11, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + return i, nil +} +func (m *StoreAction_Resource) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Resource != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Resource.Size())) + n12, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} +func (m *StoreAction_Extension) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Extension != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Extension.Size())) + n13, err := m.Extension.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + return i, nil +} +func (m *StoreAction_Config) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Config != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintRaft(dAtA, i, uint64(m.Config.Size())) + n14, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + return i, nil +} +func encodeVarintRaft(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyRaftServer struct { + local RaftServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyRaftServer(local RaftServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) RaftServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyRaftServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyRaftServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyRaftServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyRaftServer) ProcessRaftMessage(ctx context.Context, r *ProcessRaftMessageRequest) (*ProcessRaftMessageResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ProcessRaftMessage(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewRaftClient(conn).ProcessRaftMessage(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ProcessRaftMessage(ctx, r) + } + return nil, err + } + return NewRaftClient(conn).ProcessRaftMessage(modCtx, r) + } + return resp, err +} + +type Raft_StreamRaftMessageServerWrapper struct { + Raft_StreamRaftMessageServer + ctx context.Context +} + +func (s Raft_StreamRaftMessageServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyRaftServer) StreamRaftMessage(stream Raft_StreamRaftMessageServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := Raft_StreamRaftMessageServerWrapper{ + Raft_StreamRaftMessageServer: stream, + ctx: ctx, + } + return p.local.StreamRaftMessage(streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewRaftClient(conn).StreamRaftMessage(ctx) + + if err != nil { + return err + } + + for { + msg, err := stream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := clientStream.Send(msg); err != nil { + return err + } + } + + reply, err := clientStream.CloseAndRecv() + if err != nil { + return err + } + + return stream.SendAndClose(reply) +} + +func (p *raftProxyRaftServer) ResolveAddress(ctx context.Context, r *ResolveAddressRequest) (*ResolveAddressResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.ResolveAddress(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewRaftClient(conn).ResolveAddress(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.ResolveAddress(ctx, r) + } + return nil, err + } + return NewRaftClient(conn).ResolveAddress(modCtx, r) + } + return resp, err +} + +type raftProxyRaftMembershipServer struct { + local RaftMembershipServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyRaftMembershipServer(local RaftMembershipServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) RaftMembershipServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyRaftMembershipServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyRaftMembershipServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyRaftMembershipServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyRaftMembershipServer) Join(ctx context.Context, r *JoinRequest) (*JoinResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.Join(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewRaftMembershipClient(conn).Join(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.Join(ctx, r) + } + return nil, err + } + return NewRaftMembershipClient(conn).Join(modCtx, r) + } + return resp, err +} + +func (p *raftProxyRaftMembershipServer) Leave(ctx context.Context, r *LeaveRequest) (*LeaveResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.Leave(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewRaftMembershipClient(conn).Leave(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.Leave(ctx, r) + } + return nil, err + } + return NewRaftMembershipClient(conn).Leave(modCtx, r) + } + return resp, err +} + +func (m *RaftMember) Size() (n int) { + var l int + _ = l + if m.RaftID != 0 { + n += 1 + sovRaft(uint64(m.RaftID)) + } + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovRaft(uint64(l)) + } + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovRaft(uint64(l)) + } + l = m.Status.Size() + n += 1 + l + sovRaft(uint64(l)) + return n +} + +func (m *JoinRequest) Size() (n int) { + var l int + _ = l + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovRaft(uint64(l)) + } + return n +} + +func (m *JoinResponse) Size() (n int) { + var l int + _ = l + if m.RaftID != 0 { + n += 1 + sovRaft(uint64(m.RaftID)) + } + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovRaft(uint64(l)) + } + } + if len(m.RemovedMembers) > 0 { + for _, e := range m.RemovedMembers { + n += 1 + sovRaft(uint64(e)) + } + } + return n +} + +func (m *LeaveRequest) Size() (n int) { + var l int + _ = l + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} + +func (m *LeaveResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ProcessRaftMessageRequest) Size() (n int) { + var l int + _ = l + if m.Message != nil { + l = m.Message.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} + +func (m *ProcessRaftMessageResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *StreamRaftMessageRequest) Size() (n int) { + var l int + _ = l + if m.Message != nil { + l = m.Message.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} + +func (m *StreamRaftMessageResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *ResolveAddressRequest) Size() (n int) { + var l int + _ = l + if m.RaftID != 0 { + n += 1 + sovRaft(uint64(m.RaftID)) + } + return n +} + +func (m *ResolveAddressResponse) Size() (n int) { + var l int + _ = l + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovRaft(uint64(l)) + } + return n +} + +func (m *InternalRaftRequest) Size() (n int) { + var l int + _ = l + if m.ID != 0 { + n += 1 + sovRaft(uint64(m.ID)) + } + if len(m.Action) > 0 { + for _, e := range m.Action { + l = e.Size() + n += 1 + l + sovRaft(uint64(l)) + } + } + return n +} + +func (m *StoreAction) Size() (n int) { + var l int + _ = l + if m.Action != 0 { + n += 1 + sovRaft(uint64(m.Action)) + } + if m.Target != nil { + n += m.Target.Size() + } + return n +} + +func (m *StoreAction_Node) Size() (n int) { + var l int + _ = l + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Service) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Task) Size() (n int) { + var l int + _ = l + if m.Task != nil { + l = m.Task.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Network) Size() (n int) { + var l int + _ = l + if m.Network != nil { + l = m.Network.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Cluster) Size() (n int) { + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Secret) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Resource) Size() (n int) { + var l int + _ = l + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Extension) Size() (n int) { + var l int + _ = l + if m.Extension != nil { + l = m.Extension.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} +func (m *StoreAction_Config) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovRaft(uint64(l)) + } + return n +} + +func sovRaft(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozRaft(x uint64) (n int) { + return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *RaftMember) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RaftMember{`, + `RaftID:` + fmt.Sprintf("%v", this.RaftID) + `,`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "RaftMemberStatus", "RaftMemberStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *JoinRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JoinRequest{`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `}`, + }, "") + return s +} +func (this *JoinResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JoinResponse{`, + `RaftID:` + fmt.Sprintf("%v", this.RaftID) + `,`, + `Members:` + strings.Replace(fmt.Sprintf("%v", this.Members), "RaftMember", "RaftMember", 1) + `,`, + `RemovedMembers:` + fmt.Sprintf("%v", this.RemovedMembers) + `,`, + `}`, + }, "") + return s +} +func (this *LeaveRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LeaveRequest{`, + `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "RaftMember", "RaftMember", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LeaveResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LeaveResponse{`, + `}`, + }, "") + return s +} +func (this *ProcessRaftMessageRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProcessRaftMessageRequest{`, + `Message:` + strings.Replace(fmt.Sprintf("%v", this.Message), "Message", "raftpb.Message", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ProcessRaftMessageResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ProcessRaftMessageResponse{`, + `}`, + }, "") + return s +} +func (this *StreamRaftMessageRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StreamRaftMessageRequest{`, + `Message:` + strings.Replace(fmt.Sprintf("%v", this.Message), "Message", "raftpb.Message", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StreamRaftMessageResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StreamRaftMessageResponse{`, + `}`, + }, "") + return s +} +func (this *ResolveAddressRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResolveAddressRequest{`, + `RaftID:` + fmt.Sprintf("%v", this.RaftID) + `,`, + `}`, + }, "") + return s +} +func (this *ResolveAddressResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResolveAddressResponse{`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `}`, + }, "") + return s +} +func (this *InternalRaftRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&InternalRaftRequest{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `Action:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Action), "StoreAction", "StoreAction", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction{`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Node) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Node{`, + `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "Node", "Node", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Service) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Service{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "Service", "Service", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Task) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Task{`, + `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Network) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Network{`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Cluster) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Cluster{`, + `Cluster:` + strings.Replace(fmt.Sprintf("%v", this.Cluster), "Cluster", "Cluster", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Secret) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Secret{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Resource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Resource{`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "Resource", "Resource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Extension) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Extension{`, + `Extension:` + strings.Replace(fmt.Sprintf("%v", this.Extension), "Extension", "Extension", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StoreAction_Config) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreAction_Config{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringRaft(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *RaftMember) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftMember: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftMember: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftID", wireType) + } + m.RaftID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JoinRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JoinRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JoinRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JoinResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JoinResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JoinResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftID", wireType) + } + m.RaftID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &RaftMember{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RemovedMembers = append(m.RemovedMembers, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RemovedMembers = append(m.RemovedMembers, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field RemovedMembers", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaveRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaveRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaveRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Node == nil { + m.Node = &RaftMember{} + } + if err := m.Node.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaveResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaveResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaveResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProcessRaftMessageRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProcessRaftMessageRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProcessRaftMessageRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Message == nil { + m.Message = &raftpb.Message{} + } + if err := m.Message.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProcessRaftMessageResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProcessRaftMessageResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProcessRaftMessageResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamRaftMessageRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamRaftMessageRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamRaftMessageRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Message == nil { + m.Message = &raftpb.Message{} + } + if err := m.Message.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamRaftMessageResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamRaftMessageResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamRaftMessageResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResolveAddressRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResolveAddressRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResolveAddressRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftID", wireType) + } + m.RaftID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResolveAddressResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResolveAddressResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResolveAddressResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Action = append(m.Action, StoreAction{}) + if err := m.Action[len(m.Action)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StoreAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= (StoreActionKind(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Node{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Node{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Service{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Service{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Task{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Task{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Network{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Network{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Cluster{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Cluster{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Secret{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Secret{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Resource{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Resource{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extension", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Extension{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Extension{v} + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRaft + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRaft + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Config{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &StoreAction_Config{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRaft(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthRaft + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRaft(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthRaft + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRaft + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipRaft(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/raft.proto", fileDescriptorRaft) } + +var fileDescriptorRaft = []byte{ + // 1015 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x96, 0xc1, 0x6e, 0x1b, 0x45, + 0x18, 0xc7, 0x77, 0xed, 0xad, 0xd3, 0x7c, 0x69, 0x93, 0x30, 0x25, 0x61, 0xb3, 0x2d, 0x8e, 0xbb, + 0x45, 0xc2, 0x09, 0xc9, 0x5a, 0x18, 0xa4, 0xa2, 0x42, 0x0f, 0x71, 0x62, 0x29, 0x26, 0xad, 0x53, + 0x6d, 0x12, 0xe8, 0x2d, 0xac, 0x77, 0x27, 0xee, 0x62, 0x7b, 0xc7, 0xcc, 0x8c, 0x1d, 0xb8, 0xa0, + 0x1e, 0x21, 0x2f, 0x00, 0x42, 0xaa, 0x38, 0xc0, 0xb9, 0x0f, 0xc0, 0x03, 0xa0, 0x88, 0x13, 0x37, + 0x38, 0x45, 0xd4, 0x0f, 0x00, 0xaf, 0x80, 0x66, 0x76, 0xd7, 0x31, 0xf6, 0xda, 0xf1, 0x81, 0x4b, + 0x32, 0xda, 0xf9, 0xfd, 0xbf, 0xff, 0x37, 0x33, 0xdf, 0x7c, 0x63, 0x58, 0xab, 0xfb, 0xfc, 0x59, + 0xa7, 0x66, 0xb9, 0xa4, 0x55, 0xf0, 0x88, 0xdb, 0xc0, 0xb4, 0xc0, 0x4e, 0x1d, 0xda, 0x6a, 0xf8, + 0xbc, 0xe0, 0xb4, 0xfd, 0x02, 0x75, 0x4e, 0xb8, 0xd5, 0xa6, 0x84, 0x13, 0x84, 0xc2, 0x79, 0x2b, + 0x9e, 0xb7, 0xba, 0xef, 0x1a, 0x1b, 0x57, 0xc8, 0x49, 0xed, 0x73, 0xec, 0x72, 0x16, 0x46, 0x30, + 0xd6, 0xaf, 0xa0, 0xf9, 0x57, 0x6d, 0x1c, 0xb3, 0x9b, 0x03, 0xac, 0x4b, 0x28, 0x26, 0xac, 0x80, + 0xb9, 0xeb, 0xc9, 0x84, 0xe4, 0x9f, 0x76, 0x6d, 0x20, 0x39, 0xe3, 0xf5, 0x3a, 0xa9, 0x13, 0x39, + 0x2c, 0x88, 0x51, 0xf4, 0xf5, 0xfe, 0x04, 0x43, 0x49, 0xd4, 0x3a, 0x27, 0x85, 0x76, 0xb3, 0x53, + 0xf7, 0x83, 0xe8, 0x5f, 0x28, 0x34, 0x5f, 0xaa, 0x00, 0xb6, 0x73, 0xc2, 0x1f, 0xe3, 0x56, 0x0d, + 0x53, 0x74, 0x0f, 0x66, 0x84, 0xd7, 0xb1, 0xef, 0xe9, 0x6a, 0x4e, 0xcd, 0x6b, 0x25, 0xe8, 0x5d, + 0xac, 0x66, 0x04, 0x50, 0xd9, 0xb1, 0x33, 0x62, 0xaa, 0xe2, 0x09, 0x28, 0x20, 0x1e, 0x16, 0x50, + 0x2a, 0xa7, 0xe6, 0x67, 0x43, 0xa8, 0x4a, 0x3c, 0x2c, 0x20, 0x31, 0x55, 0xf1, 0x10, 0x02, 0xcd, + 0xf1, 0x3c, 0xaa, 0xa7, 0x05, 0x61, 0xcb, 0x31, 0x2a, 0x41, 0x86, 0x71, 0x87, 0x77, 0x98, 0xae, + 0xe5, 0xd4, 0xfc, 0x5c, 0xf1, 0x2d, 0x6b, 0x74, 0xa7, 0xad, 0xcb, 0x6c, 0x0e, 0x24, 0x5b, 0xd2, + 0xce, 0x2f, 0x56, 0x15, 0x3b, 0x52, 0x9a, 0x77, 0x61, 0xee, 0x63, 0xe2, 0x07, 0x36, 0xfe, 0xa2, + 0x83, 0x19, 0xef, 0xdb, 0xa8, 0x97, 0x36, 0xe6, 0x0f, 0x2a, 0xdc, 0x08, 0x19, 0xd6, 0x26, 0x01, + 0xc3, 0xd3, 0xad, 0xea, 0x03, 0x98, 0x69, 0x49, 0x5b, 0xa6, 0xa7, 0x72, 0xe9, 0xfc, 0x5c, 0x31, + 0x3b, 0x39, 0x3b, 0x3b, 0xc6, 0xd1, 0x3b, 0xb0, 0x40, 0x71, 0x8b, 0x74, 0xb1, 0x77, 0x1c, 0x47, + 0x48, 0xe7, 0xd2, 0x79, 0xad, 0x94, 0x5a, 0x54, 0xec, 0xf9, 0x68, 0x2a, 0x14, 0x31, 0xb3, 0x04, + 0x37, 0x1e, 0x61, 0xa7, 0x8b, 0xe3, 0x05, 0x14, 0x41, 0x13, 0x3b, 0x26, 0x13, 0xbb, 0xda, 0x53, + 0xb2, 0xe6, 0x02, 0xdc, 0x8c, 0x62, 0x84, 0x0b, 0x34, 0x1f, 0xc1, 0xca, 0x13, 0x4a, 0x5c, 0xcc, + 0x58, 0xc8, 0x32, 0xe6, 0xd4, 0xfb, 0x0e, 0x6b, 0x62, 0x61, 0xf2, 0x4b, 0x64, 0xb2, 0x60, 0x85, + 0x65, 0x65, 0xc5, 0x60, 0x3c, 0xff, 0x40, 0x7b, 0xfe, 0x9d, 0xa9, 0x98, 0x77, 0xc0, 0x48, 0x8a, + 0x16, 0x79, 0xed, 0x81, 0x7e, 0xc0, 0x29, 0x76, 0x5a, 0xff, 0x87, 0xd5, 0x6d, 0x58, 0x49, 0x08, + 0x16, 0x39, 0x7d, 0x04, 0x4b, 0x36, 0x66, 0xa4, 0xd9, 0xc5, 0x5b, 0x9e, 0x47, 0x45, 0x3a, 0x91, + 0xcd, 0x34, 0xe7, 0x69, 0x6e, 0xc0, 0xf2, 0xb0, 0x3a, 0x2a, 0x87, 0xa4, 0x9a, 0x69, 0xc2, 0xad, + 0x4a, 0xc0, 0x31, 0x0d, 0x9c, 0xa6, 0x88, 0x13, 0x3b, 0x2d, 0x43, 0xaa, 0x6f, 0x92, 0xe9, 0x5d, + 0xac, 0xa6, 0x2a, 0x3b, 0x76, 0xca, 0xf7, 0xd0, 0x43, 0xc8, 0x38, 0x2e, 0xf7, 0x49, 0x10, 0xd5, + 0xca, 0x6a, 0xd2, 0xb9, 0x1d, 0x70, 0x42, 0xf1, 0x96, 0xc4, 0xe2, 0x22, 0x0e, 0x45, 0xe6, 0xaf, + 0x1a, 0xcc, 0x0d, 0xcc, 0xa2, 0x0f, 0xfb, 0xe1, 0x84, 0xd5, 0x7c, 0xf1, 0xde, 0x15, 0xe1, 0xf6, + 0xfc, 0xc0, 0x8b, 0x83, 0x21, 0x2b, 0xaa, 0xa0, 0x94, 0xdc, 0x71, 0x3d, 0x49, 0x2a, 0xee, 0xe6, + 0xae, 0x12, 0x56, 0x0f, 0xba, 0x0f, 0x33, 0x0c, 0xd3, 0xae, 0xef, 0x62, 0x79, 0x39, 0xe7, 0x8a, + 0xb7, 0x13, 0xdd, 0x42, 0x64, 0x57, 0xb1, 0x63, 0x5a, 0x18, 0x71, 0x87, 0x35, 0xa2, 0xcb, 0x9b, + 0x68, 0x74, 0xe8, 0xb0, 0x86, 0x30, 0x12, 0x9c, 0x30, 0x0a, 0x30, 0x3f, 0x25, 0xb4, 0xa1, 0x5f, + 0x1b, 0x6f, 0x54, 0x0d, 0x11, 0x61, 0x14, 0xd1, 0x42, 0xe8, 0x36, 0x3b, 0x8c, 0x63, 0xaa, 0x67, + 0xc6, 0x0b, 0xb7, 0x43, 0x44, 0x08, 0x23, 0x1a, 0xbd, 0x0f, 0x19, 0x86, 0x5d, 0x8a, 0xb9, 0x3e, + 0x23, 0x75, 0x46, 0xf2, 0xca, 0x04, 0xb1, 0x2b, 0x5a, 0x8a, 0x1c, 0xa1, 0x07, 0x70, 0x9d, 0x62, + 0x46, 0x3a, 0xd4, 0xc5, 0xfa, 0x75, 0xa9, 0xbb, 0x93, 0x78, 0x0d, 0x23, 0x66, 0x57, 0xb1, 0xfb, + 0x3c, 0x7a, 0x08, 0xb3, 0xf8, 0x4b, 0x8e, 0x03, 0x26, 0x0e, 0x6f, 0x56, 0x8a, 0xdf, 0x4c, 0x12, + 0x97, 0x63, 0x68, 0x57, 0xb1, 0x2f, 0x15, 0x22, 0x61, 0x97, 0x04, 0x27, 0x7e, 0x5d, 0x87, 0xf1, + 0x09, 0x6f, 0x4b, 0x42, 0x24, 0x1c, 0xb2, 0xa5, 0xeb, 0x90, 0xe1, 0x0e, 0xad, 0x63, 0xbe, 0xfe, + 0x8f, 0x0a, 0x0b, 0x43, 0x75, 0x81, 0xde, 0x86, 0x99, 0xa3, 0xea, 0x5e, 0x75, 0xff, 0xd3, 0xea, + 0xa2, 0x62, 0x18, 0x67, 0x2f, 0x72, 0xcb, 0x43, 0xc4, 0x51, 0xd0, 0x08, 0xc8, 0x69, 0x80, 0x8a, + 0x70, 0xeb, 0xe0, 0x70, 0xdf, 0x2e, 0x1f, 0x6f, 0x6d, 0x1f, 0x56, 0xf6, 0xab, 0xc7, 0xdb, 0x76, + 0x79, 0xeb, 0xb0, 0xbc, 0xa8, 0x1a, 0x2b, 0x67, 0x2f, 0x72, 0x4b, 0x43, 0xa2, 0x6d, 0x8a, 0x1d, + 0x8e, 0x47, 0x34, 0x47, 0x4f, 0x76, 0x84, 0x26, 0x95, 0xa8, 0x39, 0x6a, 0x7b, 0x49, 0x1a, 0xbb, + 0xfc, 0x78, 0xff, 0x93, 0xf2, 0x62, 0x3a, 0x51, 0x63, 0xcb, 0x76, 0x69, 0xbc, 0xf1, 0xcd, 0x4f, + 0x59, 0xe5, 0x97, 0x9f, 0xb3, 0xc3, 0xab, 0x2b, 0xfe, 0x98, 0x06, 0x4d, 0xdc, 0x50, 0x74, 0xa6, + 0x02, 0x1a, 0x6d, 0x53, 0x68, 0x33, 0x69, 0x07, 0xc7, 0x36, 0x47, 0xc3, 0x9a, 0x16, 0x8f, 0x7a, + 0xd2, 0xd2, 0x6f, 0x2f, 0xff, 0xfe, 0x3e, 0xb5, 0x00, 0x37, 0x25, 0xbf, 0xd9, 0x72, 0x02, 0xa7, + 0x8e, 0x29, 0xfa, 0x56, 0x85, 0xd7, 0x46, 0x1a, 0x19, 0xda, 0x48, 0xbe, 0xc6, 0xc9, 0xcd, 0xd3, + 0xd8, 0x9c, 0x92, 0x9e, 0x98, 0x49, 0x5e, 0x45, 0x5f, 0xc3, 0xfc, 0x7f, 0x1b, 0x1f, 0x5a, 0x1b, + 0x57, 0xce, 0x23, 0xad, 0xd5, 0x58, 0x9f, 0x06, 0x9d, 0x98, 0x41, 0xf1, 0x0f, 0x15, 0xe6, 0x2f, + 0x9f, 0x2c, 0xf6, 0xcc, 0x6f, 0xa3, 0xcf, 0x40, 0x13, 0x0f, 0x32, 0x4a, 0x6c, 0x93, 0x03, 0xcf, + 0xb9, 0x91, 0x1b, 0x0f, 0x4c, 0x3e, 0x00, 0x17, 0xae, 0xc9, 0x27, 0x11, 0x25, 0x46, 0x18, 0x7c, + 0x71, 0x8d, 0xbb, 0x13, 0x88, 0x89, 0x26, 0x25, 0xfd, 0xfc, 0x55, 0x56, 0xf9, 0xf3, 0x55, 0x56, + 0x79, 0xde, 0xcb, 0xaa, 0xe7, 0xbd, 0xac, 0xfa, 0x7b, 0x2f, 0xab, 0xfe, 0xd5, 0xcb, 0xaa, 0x4f, + 0xd3, 0x4f, 0xb5, 0x5a, 0x46, 0xfe, 0xa2, 0x7a, 0xef, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3e, + 0x7a, 0x8b, 0xe7, 0x6a, 0x0a, 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/raft.proto b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/raft.proto new file mode 100644 index 00000000..b351c15b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/raft.proto @@ -0,0 +1,150 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/objects.proto"; +import "github.com/docker/swarmkit/api/types.proto"; +import "github.com/coreos/etcd/raft/raftpb/raft.proto"; +import weak "gogoproto/gogo.proto"; +import weak "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; + +// Raft defines the RPC communication between raft nodes. +service Raft { + // ProcessRaftMessage sends a raft message to be processed on a raft member, it is + // called from the RaftMember willing to send a message to its destination ('To' field) + rpc ProcessRaftMessage(ProcessRaftMessageRequest) returns (ProcessRaftMessageResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + // StreamRaftMessage accepts a stream of raft messages of type StreamRaftMessageRequest + // to be processed on a raft member, returning a StreamRaftMessageResponse + // when processing of the streamed messages is complete. A single stream corresponds + // to a single raft message, which may be disassembled and streamed as individual messages. + // It is called from the Raft leader, which uses it to stream messages to a raft member. + rpc StreamRaftMessage(stream StreamRaftMessageRequest) returns (StreamRaftMessageResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + // ResolveAddress returns the address where the node with the given ID can be reached. + rpc ResolveAddress(ResolveAddressRequest) returns (ResolveAddressResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; +} + +// RaftMembership defines RPCs for adding and removing members from the +// cluster. These RPCs must always run on the leader, so they are in a separate +// service to support the raft proxy. +service RaftMembership { + // Join adds a RaftMember to the raft cluster. + rpc Join(JoinRequest) returns (JoinResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; + + // Leave removes a RaftMember from the raft cluster. + rpc Leave(LeaveRequest) returns (LeaveResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; +} + +message RaftMember { + // RaftID specifies the internal ID used by the manager in a raft context, it can never be modified + // and is used only for information purposes + uint64 raft_id = 1; + + // NodeID is the node's ID. + string node_id = 2; + + // Addr specifies the address of the member + string addr = 3; + + // Status provides the current status of the manager from the perspective of another manager. + RaftMemberStatus status = 4 [(gogoproto.nullable) = false]; +} + +message JoinRequest { + // Addr specifies the address of the member + string addr = 1; +} + +message JoinResponse { + // RaftID is the ID assigned to the new member. + uint64 raft_id = 1; + + // Members is the membership set of the cluster. + repeated RaftMember members = 2; + + // RemovedMembers is a list of members that have been removed from + // the cluster, so the new node can avoid communicating with them. + repeated uint64 removed_members = 3 [packed=false]; +} + +message LeaveRequest { + RaftMember node = 1; +} + +message LeaveResponse {} + +message ProcessRaftMessageRequest { + option (docker.protobuf.plugin.deepcopy) = false; + raftpb.Message message = 1; +} + +message ProcessRaftMessageResponse {} + +// Raft message streaming request. +message StreamRaftMessageRequest { + option (docker.protobuf.plugin.deepcopy) = false; + raftpb.Message message = 1; +} + +// Raft message streaming response. +message StreamRaftMessageResponse {} + +message ResolveAddressRequest { + // raft_id is the ID to resolve to an address. + uint64 raft_id = 1; +} + +message ResolveAddressResponse { + // Addr specifies the address of the member + string addr = 1; +} + +// Contains one of many protobuf encoded objects to replicate +// over the raft backend with a request ID to track when the +// action is effectively applied +message InternalRaftRequest { + uint64 id = 1; + + repeated StoreAction action = 2 [(gogoproto.nullable) = false]; +} + +// TODO(stevvooe): Storage actions may belong in another protobuf file. They +// aren't necessarily first-class "types" in the cluster schema. + +// StoreActionKind defines the operation to take on the store for the target of +// a storage action. +enum StoreActionKind { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "StoreActionKind"; + UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "StoreActionKindUnknown"]; // default value, invalid + STORE_ACTION_CREATE = 1 [(gogoproto.enumvalue_customname) = "StoreActionKindCreate"]; + STORE_ACTION_UPDATE = 2 [(gogoproto.enumvalue_customname) = "StoreActionKindUpdate"]; + STORE_ACTION_REMOVE = 3 [(gogoproto.enumvalue_customname) = "StoreActionKindRemove"]; +} + +// StoreAction defines a target and operation to apply on the storage system. +message StoreAction { + StoreActionKind action = 1; + oneof target { + Node node = 2; + Service service = 3; + Task task = 4; + Network network = 5; + Cluster cluster = 6; + Secret secret = 7; + Resource resource = 8; + Extension extension = 9; + Config config = 10; + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/resource.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/resource.pb.go new file mode 100644 index 00000000..2d474199 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/resource.pb.go @@ -0,0 +1,1075 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/resource.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type AttachNetworkRequest struct { + Config *NetworkAttachmentConfig `protobuf:"bytes,1,opt,name=config" json:"config,omitempty"` + ContainerID string `protobuf:"bytes,2,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` +} + +func (m *AttachNetworkRequest) Reset() { *m = AttachNetworkRequest{} } +func (*AttachNetworkRequest) ProtoMessage() {} +func (*AttachNetworkRequest) Descriptor() ([]byte, []int) { return fileDescriptorResource, []int{0} } + +type AttachNetworkResponse struct { + AttachmentID string `protobuf:"bytes,1,opt,name=attachment_id,json=attachmentId,proto3" json:"attachment_id,omitempty"` +} + +func (m *AttachNetworkResponse) Reset() { *m = AttachNetworkResponse{} } +func (*AttachNetworkResponse) ProtoMessage() {} +func (*AttachNetworkResponse) Descriptor() ([]byte, []int) { return fileDescriptorResource, []int{1} } + +type DetachNetworkRequest struct { + AttachmentID string `protobuf:"bytes,1,opt,name=attachment_id,json=attachmentId,proto3" json:"attachment_id,omitempty"` +} + +func (m *DetachNetworkRequest) Reset() { *m = DetachNetworkRequest{} } +func (*DetachNetworkRequest) ProtoMessage() {} +func (*DetachNetworkRequest) Descriptor() ([]byte, []int) { return fileDescriptorResource, []int{2} } + +type DetachNetworkResponse struct { +} + +func (m *DetachNetworkResponse) Reset() { *m = DetachNetworkResponse{} } +func (*DetachNetworkResponse) ProtoMessage() {} +func (*DetachNetworkResponse) Descriptor() ([]byte, []int) { return fileDescriptorResource, []int{3} } + +func init() { + proto.RegisterType((*AttachNetworkRequest)(nil), "docker.swarmkit.v1.AttachNetworkRequest") + proto.RegisterType((*AttachNetworkResponse)(nil), "docker.swarmkit.v1.AttachNetworkResponse") + proto.RegisterType((*DetachNetworkRequest)(nil), "docker.swarmkit.v1.DetachNetworkRequest") + proto.RegisterType((*DetachNetworkResponse)(nil), "docker.swarmkit.v1.DetachNetworkResponse") +} + +type authenticatedWrapperResourceAllocatorServer struct { + local ResourceAllocatorServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperResourceAllocatorServer(local ResourceAllocatorServer, authorize func(context.Context, []string) error) ResourceAllocatorServer { + return &authenticatedWrapperResourceAllocatorServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperResourceAllocatorServer) AttachNetwork(ctx context.Context, r *AttachNetworkRequest) (*AttachNetworkResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-worker", "swarm-manager"}); err != nil { + return nil, err + } + return p.local.AttachNetwork(ctx, r) +} + +func (p *authenticatedWrapperResourceAllocatorServer) DetachNetwork(ctx context.Context, r *DetachNetworkRequest) (*DetachNetworkResponse, error) { + + if err := p.authorize(ctx, []string{"swarm-worker", "swarm-manager"}); err != nil { + return nil, err + } + return p.local.DetachNetwork(ctx, r) +} + +func (m *AttachNetworkRequest) Copy() *AttachNetworkRequest { + if m == nil { + return nil + } + o := &AttachNetworkRequest{} + o.CopyFrom(m) + return o +} + +func (m *AttachNetworkRequest) CopyFrom(src interface{}) { + + o := src.(*AttachNetworkRequest) + *m = *o + if o.Config != nil { + m.Config = &NetworkAttachmentConfig{} + deepcopy.Copy(m.Config, o.Config) + } +} + +func (m *AttachNetworkResponse) Copy() *AttachNetworkResponse { + if m == nil { + return nil + } + o := &AttachNetworkResponse{} + o.CopyFrom(m) + return o +} + +func (m *AttachNetworkResponse) CopyFrom(src interface{}) { + + o := src.(*AttachNetworkResponse) + *m = *o +} + +func (m *DetachNetworkRequest) Copy() *DetachNetworkRequest { + if m == nil { + return nil + } + o := &DetachNetworkRequest{} + o.CopyFrom(m) + return o +} + +func (m *DetachNetworkRequest) CopyFrom(src interface{}) { + + o := src.(*DetachNetworkRequest) + *m = *o +} + +func (m *DetachNetworkResponse) Copy() *DetachNetworkResponse { + if m == nil { + return nil + } + o := &DetachNetworkResponse{} + o.CopyFrom(m) + return o +} + +func (m *DetachNetworkResponse) CopyFrom(src interface{}) {} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ResourceAllocator service + +type ResourceAllocatorClient interface { + AttachNetwork(ctx context.Context, in *AttachNetworkRequest, opts ...grpc.CallOption) (*AttachNetworkResponse, error) + DetachNetwork(ctx context.Context, in *DetachNetworkRequest, opts ...grpc.CallOption) (*DetachNetworkResponse, error) +} + +type resourceAllocatorClient struct { + cc *grpc.ClientConn +} + +func NewResourceAllocatorClient(cc *grpc.ClientConn) ResourceAllocatorClient { + return &resourceAllocatorClient{cc} +} + +func (c *resourceAllocatorClient) AttachNetwork(ctx context.Context, in *AttachNetworkRequest, opts ...grpc.CallOption) (*AttachNetworkResponse, error) { + out := new(AttachNetworkResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.ResourceAllocator/AttachNetwork", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *resourceAllocatorClient) DetachNetwork(ctx context.Context, in *DetachNetworkRequest, opts ...grpc.CallOption) (*DetachNetworkResponse, error) { + out := new(DetachNetworkResponse) + err := grpc.Invoke(ctx, "/docker.swarmkit.v1.ResourceAllocator/DetachNetwork", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ResourceAllocator service + +type ResourceAllocatorServer interface { + AttachNetwork(context.Context, *AttachNetworkRequest) (*AttachNetworkResponse, error) + DetachNetwork(context.Context, *DetachNetworkRequest) (*DetachNetworkResponse, error) +} + +func RegisterResourceAllocatorServer(s *grpc.Server, srv ResourceAllocatorServer) { + s.RegisterService(&_ResourceAllocator_serviceDesc, srv) +} + +func _ResourceAllocator_AttachNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AttachNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourceAllocatorServer).AttachNetwork(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.ResourceAllocator/AttachNetwork", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourceAllocatorServer).AttachNetwork(ctx, req.(*AttachNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ResourceAllocator_DetachNetwork_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DetachNetworkRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ResourceAllocatorServer).DetachNetwork(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/docker.swarmkit.v1.ResourceAllocator/DetachNetwork", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ResourceAllocatorServer).DetachNetwork(ctx, req.(*DetachNetworkRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ResourceAllocator_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.ResourceAllocator", + HandlerType: (*ResourceAllocatorServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AttachNetwork", + Handler: _ResourceAllocator_AttachNetwork_Handler, + }, + { + MethodName: "DetachNetwork", + Handler: _ResourceAllocator_DetachNetwork_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "github.com/docker/swarmkit/api/resource.proto", +} + +func (m *AttachNetworkRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttachNetworkRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Config != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintResource(dAtA, i, uint64(m.Config.Size())) + n1, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if len(m.ContainerID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintResource(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + return i, nil +} + +func (m *AttachNetworkResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttachNetworkResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.AttachmentID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintResource(dAtA, i, uint64(len(m.AttachmentID))) + i += copy(dAtA[i:], m.AttachmentID) + } + return i, nil +} + +func (m *DetachNetworkRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DetachNetworkRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.AttachmentID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintResource(dAtA, i, uint64(len(m.AttachmentID))) + i += copy(dAtA[i:], m.AttachmentID) + } + return i, nil +} + +func (m *DetachNetworkResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DetachNetworkResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func encodeVarintResource(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyResourceAllocatorServer struct { + local ResourceAllocatorServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyResourceAllocatorServer(local ResourceAllocatorServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) ResourceAllocatorServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyResourceAllocatorServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyResourceAllocatorServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyResourceAllocatorServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +func (p *raftProxyResourceAllocatorServer) AttachNetwork(ctx context.Context, r *AttachNetworkRequest) (*AttachNetworkResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.AttachNetwork(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewResourceAllocatorClient(conn).AttachNetwork(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.AttachNetwork(ctx, r) + } + return nil, err + } + return NewResourceAllocatorClient(conn).AttachNetwork(modCtx, r) + } + return resp, err +} + +func (p *raftProxyResourceAllocatorServer) DetachNetwork(ctx context.Context, r *DetachNetworkRequest) (*DetachNetworkResponse, error) { + + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return nil, err + } + return p.local.DetachNetwork(ctx, r) + } + return nil, err + } + modCtx, err := p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return nil, err + } + + resp, err := NewResourceAllocatorClient(conn).DetachNetwork(modCtx, r) + if err != nil { + if !strings.Contains(err.Error(), "is closing") && !strings.Contains(err.Error(), "the connection is unavailable") && !strings.Contains(err.Error(), "connection error") { + return resp, err + } + conn, err := p.pollNewLeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + return p.local.DetachNetwork(ctx, r) + } + return nil, err + } + return NewResourceAllocatorClient(conn).DetachNetwork(modCtx, r) + } + return resp, err +} + +func (m *AttachNetworkRequest) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovResource(uint64(l)) + } + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovResource(uint64(l)) + } + return n +} + +func (m *AttachNetworkResponse) Size() (n int) { + var l int + _ = l + l = len(m.AttachmentID) + if l > 0 { + n += 1 + l + sovResource(uint64(l)) + } + return n +} + +func (m *DetachNetworkRequest) Size() (n int) { + var l int + _ = l + l = len(m.AttachmentID) + if l > 0 { + n += 1 + l + sovResource(uint64(l)) + } + return n +} + +func (m *DetachNetworkResponse) Size() (n int) { + var l int + _ = l + return n +} + +func sovResource(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozResource(x uint64) (n int) { + return sovResource(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AttachNetworkRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AttachNetworkRequest{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "NetworkAttachmentConfig", "NetworkAttachmentConfig", 1) + `,`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *AttachNetworkResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AttachNetworkResponse{`, + `AttachmentID:` + fmt.Sprintf("%v", this.AttachmentID) + `,`, + `}`, + }, "") + return s +} +func (this *DetachNetworkRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DetachNetworkRequest{`, + `AttachmentID:` + fmt.Sprintf("%v", this.AttachmentID) + `,`, + `}`, + }, "") + return s +} +func (this *DetachNetworkResponse) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DetachNetworkResponse{`, + `}`, + }, "") + return s +} +func valueToStringResource(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AttachNetworkRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttachNetworkRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttachNetworkRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthResource + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &NetworkAttachmentConfig{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthResource + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipResource(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthResource + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttachNetworkResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttachNetworkResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttachNetworkResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachmentID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthResource + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AttachmentID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipResource(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthResource + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DetachNetworkRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DetachNetworkRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DetachNetworkRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachmentID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthResource + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AttachmentID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipResource(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthResource + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DetachNetworkResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResource + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DetachNetworkResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DetachNetworkResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipResource(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthResource + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipResource(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResource + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResource + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResource + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthResource + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResource + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipResource(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthResource = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowResource = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/docker/swarmkit/api/resource.proto", fileDescriptorResource) +} + +var fileDescriptorResource = []byte{ + // 397 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0xcf, 0x4e, 0xf2, 0x40, + 0x14, 0xc5, 0x19, 0x16, 0x24, 0xdf, 0x50, 0xf2, 0x69, 0x03, 0x91, 0x90, 0x58, 0x48, 0xdd, 0xa0, + 0x86, 0x36, 0x62, 0x8c, 0x6b, 0xfe, 0x6c, 0xba, 0x90, 0x45, 0x5f, 0xc0, 0x0c, 0xed, 0x50, 0x1a, + 0x68, 0xa7, 0x4e, 0xa7, 0x12, 0x77, 0x6e, 0x5d, 0xb9, 0xf5, 0x1d, 0x4c, 0x7c, 0x0e, 0xe2, 0xca, + 0xa5, 0x2b, 0x22, 0x7d, 0x00, 0x9f, 0xc1, 0xd0, 0x29, 0x10, 0x70, 0xa2, 0xc4, 0x55, 0xa7, 0xd3, + 0x73, 0xce, 0xfd, 0xdd, 0x7b, 0x0b, 0x1b, 0x8e, 0xcb, 0x86, 0x51, 0x5f, 0xb3, 0x88, 0xa7, 0xdb, + 0xc4, 0x1a, 0x61, 0xaa, 0x87, 0x13, 0x44, 0xbd, 0x91, 0xcb, 0x74, 0x14, 0xb8, 0x3a, 0xc5, 0x21, + 0x89, 0xa8, 0x85, 0xb5, 0x80, 0x12, 0x46, 0x64, 0x99, 0x6b, 0xb4, 0xa5, 0x46, 0xbb, 0x3d, 0xab, + 0x9c, 0xfc, 0x12, 0xc1, 0xee, 0x02, 0x1c, 0x72, 0x7f, 0xa5, 0xe8, 0x10, 0x87, 0x24, 0x47, 0x7d, + 0x71, 0x4a, 0x6f, 0x2f, 0x7f, 0x48, 0x48, 0x14, 0xfd, 0x68, 0xa0, 0x07, 0xe3, 0xc8, 0x71, 0xfd, + 0xf4, 0xc1, 0x8d, 0xea, 0x23, 0x80, 0xc5, 0x16, 0x63, 0xc8, 0x1a, 0xf6, 0x30, 0x9b, 0x10, 0x3a, + 0x32, 0xf1, 0x4d, 0x84, 0x43, 0x26, 0x77, 0x60, 0xce, 0x22, 0xfe, 0xc0, 0x75, 0xca, 0xa0, 0x06, + 0xea, 0xf9, 0xe6, 0xa9, 0xf6, 0x1d, 0x5c, 0x4b, 0x3d, 0x3c, 0xc0, 0xc3, 0x3e, 0xeb, 0x24, 0x16, + 0x33, 0xb5, 0xca, 0x4d, 0x28, 0x59, 0xc4, 0x67, 0xc8, 0xf5, 0x31, 0xbd, 0x76, 0xed, 0x72, 0xb6, + 0x06, 0xea, 0xff, 0xda, 0xff, 0xe3, 0x59, 0x35, 0xdf, 0x59, 0xde, 0x1b, 0x5d, 0x33, 0xbf, 0x12, + 0x19, 0xb6, 0xda, 0x83, 0xa5, 0x2d, 0xa0, 0x30, 0x20, 0x7e, 0x88, 0xe5, 0x0b, 0x58, 0x40, 0xab, + 0x42, 0x8b, 0x34, 0x90, 0xa4, 0xed, 0xc5, 0xb3, 0xaa, 0xb4, 0x26, 0x30, 0xba, 0xa6, 0xb4, 0x96, + 0x19, 0xb6, 0x7a, 0x05, 0x8b, 0x5d, 0x2c, 0x68, 0xf0, 0x8f, 0x71, 0x07, 0xb0, 0xb4, 0x15, 0xc7, + 0xf1, 0x9a, 0xcf, 0x59, 0xb8, 0x6f, 0xa6, 0xbb, 0x6e, 0x8d, 0xc7, 0xc4, 0x42, 0x8c, 0x50, 0xf9, + 0x01, 0xc0, 0xc2, 0x46, 0x3b, 0x72, 0x5d, 0x34, 0x48, 0xd1, 0x0a, 0x2a, 0xc7, 0x3b, 0x28, 0x79, + 0x71, 0xf5, 0xe8, 0xf5, 0xe5, 0xf3, 0x29, 0x7b, 0x08, 0xa5, 0x44, 0xda, 0x58, 0x7c, 0xc3, 0x14, + 0x16, 0xf8, 0x9b, 0x87, 0x7c, 0xe4, 0x60, 0xce, 0xb2, 0xc1, 0x2e, 0x66, 0x11, 0x4d, 0x4b, 0xcc, + 0x22, 0x1c, 0xc4, 0x4e, 0x2c, 0xed, 0xf2, 0x74, 0xae, 0x64, 0xde, 0xe7, 0x4a, 0xe6, 0x3e, 0x56, + 0xc0, 0x34, 0x56, 0xc0, 0x5b, 0xac, 0x80, 0x8f, 0x58, 0x01, 0xfd, 0x5c, 0xf2, 0x63, 0x9e, 0x7f, + 0x05, 0x00, 0x00, 0xff, 0xff, 0xc1, 0x7a, 0x29, 0xfc, 0x58, 0x03, 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/resource.proto b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/resource.proto new file mode 100644 index 00000000..ecaa749e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/resource.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/types.proto"; +import "gogoproto/gogo.proto"; +import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; + +// Allocator is the API provided by a manager group for agents to control the allocation of certain entities. +// +// API methods on this service are used only by agent nodes. +service ResourceAllocator { + rpc AttachNetwork(AttachNetworkRequest) returns (AttachNetworkResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" }; + }; + rpc DetachNetwork(DetachNetworkRequest) returns (DetachNetworkResponse) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-worker" roles: "swarm-manager" }; + }; +} + +message AttachNetworkRequest { + NetworkAttachmentConfig config = 1; + string container_id = 2; +} + +message AttachNetworkResponse { + string attachment_id = 1; +} + +message DetachNetworkRequest { + string attachment_id = 1; +} + +message DetachNetworkResponse {} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/snapshot.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/snapshot.pb.go new file mode 100644 index 00000000..4d6893a9 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/snapshot.pb.go @@ -0,0 +1,1326 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/snapshot.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +// skipping weak import gogoproto "github.com/gogo/protobuf/gogoproto" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Snapshot_Version int32 + +const ( + // V0 is the initial version of the StoreSnapshot message. + Snapshot_V0 Snapshot_Version = 0 +) + +var Snapshot_Version_name = map[int32]string{ + 0: "V0", +} +var Snapshot_Version_value = map[string]int32{ + "V0": 0, +} + +func (x Snapshot_Version) String() string { + return proto.EnumName(Snapshot_Version_name, int32(x)) +} +func (Snapshot_Version) EnumDescriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{2, 0} } + +// StoreSnapshot is used to store snapshots of the store. +type StoreSnapshot struct { + Nodes []*Node `protobuf:"bytes,1,rep,name=nodes" json:"nodes,omitempty"` + Services []*Service `protobuf:"bytes,2,rep,name=services" json:"services,omitempty"` + Networks []*Network `protobuf:"bytes,3,rep,name=networks" json:"networks,omitempty"` + Tasks []*Task `protobuf:"bytes,4,rep,name=tasks" json:"tasks,omitempty"` + Clusters []*Cluster `protobuf:"bytes,5,rep,name=clusters" json:"clusters,omitempty"` + Secrets []*Secret `protobuf:"bytes,6,rep,name=secrets" json:"secrets,omitempty"` + Resources []*Resource `protobuf:"bytes,7,rep,name=resources" json:"resources,omitempty"` + Extensions []*Extension `protobuf:"bytes,8,rep,name=extensions" json:"extensions,omitempty"` + Configs []*Config `protobuf:"bytes,9,rep,name=configs" json:"configs,omitempty"` +} + +func (m *StoreSnapshot) Reset() { *m = StoreSnapshot{} } +func (*StoreSnapshot) ProtoMessage() {} +func (*StoreSnapshot) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{0} } + +// ClusterSnapshot stores cluster membership information in snapshots. +type ClusterSnapshot struct { + Members []*RaftMember `protobuf:"bytes,1,rep,name=members" json:"members,omitempty"` + Removed []uint64 `protobuf:"varint,2,rep,name=removed" json:"removed,omitempty"` +} + +func (m *ClusterSnapshot) Reset() { *m = ClusterSnapshot{} } +func (*ClusterSnapshot) ProtoMessage() {} +func (*ClusterSnapshot) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{1} } + +type Snapshot struct { + Version Snapshot_Version `protobuf:"varint,1,opt,name=version,proto3,enum=docker.swarmkit.v1.Snapshot_Version" json:"version,omitempty"` + Membership ClusterSnapshot `protobuf:"bytes,2,opt,name=membership" json:"membership"` + Store StoreSnapshot `protobuf:"bytes,3,opt,name=store" json:"store"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{2} } + +func init() { + proto.RegisterType((*StoreSnapshot)(nil), "docker.swarmkit.v1.StoreSnapshot") + proto.RegisterType((*ClusterSnapshot)(nil), "docker.swarmkit.v1.ClusterSnapshot") + proto.RegisterType((*Snapshot)(nil), "docker.swarmkit.v1.Snapshot") + proto.RegisterEnum("docker.swarmkit.v1.Snapshot_Version", Snapshot_Version_name, Snapshot_Version_value) +} + +func (m *StoreSnapshot) Copy() *StoreSnapshot { + if m == nil { + return nil + } + o := &StoreSnapshot{} + o.CopyFrom(m) + return o +} + +func (m *StoreSnapshot) CopyFrom(src interface{}) { + + o := src.(*StoreSnapshot) + *m = *o + if o.Nodes != nil { + m.Nodes = make([]*Node, len(o.Nodes)) + for i := range m.Nodes { + m.Nodes[i] = &Node{} + deepcopy.Copy(m.Nodes[i], o.Nodes[i]) + } + } + + if o.Services != nil { + m.Services = make([]*Service, len(o.Services)) + for i := range m.Services { + m.Services[i] = &Service{} + deepcopy.Copy(m.Services[i], o.Services[i]) + } + } + + if o.Networks != nil { + m.Networks = make([]*Network, len(o.Networks)) + for i := range m.Networks { + m.Networks[i] = &Network{} + deepcopy.Copy(m.Networks[i], o.Networks[i]) + } + } + + if o.Tasks != nil { + m.Tasks = make([]*Task, len(o.Tasks)) + for i := range m.Tasks { + m.Tasks[i] = &Task{} + deepcopy.Copy(m.Tasks[i], o.Tasks[i]) + } + } + + if o.Clusters != nil { + m.Clusters = make([]*Cluster, len(o.Clusters)) + for i := range m.Clusters { + m.Clusters[i] = &Cluster{} + deepcopy.Copy(m.Clusters[i], o.Clusters[i]) + } + } + + if o.Secrets != nil { + m.Secrets = make([]*Secret, len(o.Secrets)) + for i := range m.Secrets { + m.Secrets[i] = &Secret{} + deepcopy.Copy(m.Secrets[i], o.Secrets[i]) + } + } + + if o.Resources != nil { + m.Resources = make([]*Resource, len(o.Resources)) + for i := range m.Resources { + m.Resources[i] = &Resource{} + deepcopy.Copy(m.Resources[i], o.Resources[i]) + } + } + + if o.Extensions != nil { + m.Extensions = make([]*Extension, len(o.Extensions)) + for i := range m.Extensions { + m.Extensions[i] = &Extension{} + deepcopy.Copy(m.Extensions[i], o.Extensions[i]) + } + } + + if o.Configs != nil { + m.Configs = make([]*Config, len(o.Configs)) + for i := range m.Configs { + m.Configs[i] = &Config{} + deepcopy.Copy(m.Configs[i], o.Configs[i]) + } + } + +} + +func (m *ClusterSnapshot) Copy() *ClusterSnapshot { + if m == nil { + return nil + } + o := &ClusterSnapshot{} + o.CopyFrom(m) + return o +} + +func (m *ClusterSnapshot) CopyFrom(src interface{}) { + + o := src.(*ClusterSnapshot) + *m = *o + if o.Members != nil { + m.Members = make([]*RaftMember, len(o.Members)) + for i := range m.Members { + m.Members[i] = &RaftMember{} + deepcopy.Copy(m.Members[i], o.Members[i]) + } + } + + if o.Removed != nil { + m.Removed = make([]uint64, len(o.Removed)) + copy(m.Removed, o.Removed) + } + +} + +func (m *Snapshot) Copy() *Snapshot { + if m == nil { + return nil + } + o := &Snapshot{} + o.CopyFrom(m) + return o +} + +func (m *Snapshot) CopyFrom(src interface{}) { + + o := src.(*Snapshot) + *m = *o + deepcopy.Copy(&m.Membership, &o.Membership) + deepcopy.Copy(&m.Store, &o.Store) +} + +func (m *StoreSnapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreSnapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Nodes) > 0 { + for _, msg := range m.Nodes { + dAtA[i] = 0xa + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Services) > 0 { + for _, msg := range m.Services { + dAtA[i] = 0x12 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Networks) > 0 { + for _, msg := range m.Networks { + dAtA[i] = 0x1a + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Tasks) > 0 { + for _, msg := range m.Tasks { + dAtA[i] = 0x22 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Clusters) > 0 { + for _, msg := range m.Clusters { + dAtA[i] = 0x2a + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Secrets) > 0 { + for _, msg := range m.Secrets { + dAtA[i] = 0x32 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Resources) > 0 { + for _, msg := range m.Resources { + dAtA[i] = 0x3a + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Extensions) > 0 { + for _, msg := range m.Extensions { + dAtA[i] = 0x42 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Configs) > 0 { + for _, msg := range m.Configs { + dAtA[i] = 0x4a + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ClusterSnapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterSnapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Members) > 0 { + for _, msg := range m.Members { + dAtA[i] = 0xa + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Removed) > 0 { + for _, num := range m.Removed { + dAtA[i] = 0x10 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(num)) + } + } + return i, nil +} + +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Version != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(m.Version)) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(m.Membership.Size())) + n1, err := m.Membership.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x1a + i++ + i = encodeVarintSnapshot(dAtA, i, uint64(m.Store.Size())) + n2, err := m.Store.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + return i, nil +} + +func encodeVarintSnapshot(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +func (m *StoreSnapshot) Size() (n int) { + var l int + _ = l + if len(m.Nodes) > 0 { + for _, e := range m.Nodes { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Services) > 0 { + for _, e := range m.Services { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Networks) > 0 { + for _, e := range m.Networks { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Tasks) > 0 { + for _, e := range m.Tasks { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Clusters) > 0 { + for _, e := range m.Clusters { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Extensions) > 0 { + for _, e := range m.Extensions { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Configs) > 0 { + for _, e := range m.Configs { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + return n +} + +func (m *ClusterSnapshot) Size() (n int) { + var l int + _ = l + if len(m.Members) > 0 { + for _, e := range m.Members { + l = e.Size() + n += 1 + l + sovSnapshot(uint64(l)) + } + } + if len(m.Removed) > 0 { + for _, e := range m.Removed { + n += 1 + sovSnapshot(uint64(e)) + } + } + return n +} + +func (m *Snapshot) Size() (n int) { + var l int + _ = l + if m.Version != 0 { + n += 1 + sovSnapshot(uint64(m.Version)) + } + l = m.Membership.Size() + n += 1 + l + sovSnapshot(uint64(l)) + l = m.Store.Size() + n += 1 + l + sovSnapshot(uint64(l)) + return n +} + +func sovSnapshot(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozSnapshot(x uint64) (n int) { + return sovSnapshot(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *StoreSnapshot) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreSnapshot{`, + `Nodes:` + strings.Replace(fmt.Sprintf("%v", this.Nodes), "Node", "Node", 1) + `,`, + `Services:` + strings.Replace(fmt.Sprintf("%v", this.Services), "Service", "Service", 1) + `,`, + `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "Network", "Network", 1) + `,`, + `Tasks:` + strings.Replace(fmt.Sprintf("%v", this.Tasks), "Task", "Task", 1) + `,`, + `Clusters:` + strings.Replace(fmt.Sprintf("%v", this.Clusters), "Cluster", "Cluster", 1) + `,`, + `Secrets:` + strings.Replace(fmt.Sprintf("%v", this.Secrets), "Secret", "Secret", 1) + `,`, + `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Resource", "Resource", 1) + `,`, + `Extensions:` + strings.Replace(fmt.Sprintf("%v", this.Extensions), "Extension", "Extension", 1) + `,`, + `Configs:` + strings.Replace(fmt.Sprintf("%v", this.Configs), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterSnapshot) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterSnapshot{`, + `Members:` + strings.Replace(fmt.Sprintf("%v", this.Members), "RaftMember", "RaftMember", 1) + `,`, + `Removed:` + fmt.Sprintf("%v", this.Removed) + `,`, + `}`, + }, "") + return s +} +func (this *Snapshot) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Snapshot{`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Membership:` + strings.Replace(strings.Replace(this.Membership.String(), "ClusterSnapshot", "ClusterSnapshot", 1), `&`, ``, 1) + `,`, + `Store:` + strings.Replace(strings.Replace(this.Store.String(), "StoreSnapshot", "StoreSnapshot", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringSnapshot(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *StoreSnapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreSnapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreSnapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nodes = append(m.Nodes, &Node{}) + if err := m.Nodes[len(m.Nodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Services = append(m.Services, &Service{}) + if err := m.Services[len(m.Services)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Networks = append(m.Networks, &Network{}) + if err := m.Networks[len(m.Networks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tasks = append(m.Tasks, &Task{}) + if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Clusters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Clusters = append(m.Clusters, &Cluster{}) + if err := m.Clusters[len(m.Clusters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, &Secret{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, &Resource{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extensions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Extensions = append(m.Extensions, &Extension{}) + if err := m.Extensions[len(m.Extensions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Configs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Configs = append(m.Configs, &Config{}) + if err := m.Configs[len(m.Configs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSnapshot(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSnapshot + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterSnapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterSnapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterSnapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Members = append(m.Members, &RaftMember{}) + if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Removed = append(m.Removed, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Removed = append(m.Removed, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Removed", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipSnapshot(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSnapshot + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Snapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= (Snapshot_Version(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Membership", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Membership.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Store", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSnapshot + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSnapshot + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Store.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSnapshot(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSnapshot + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSnapshot(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnapshot + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnapshot + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnapshot + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthSnapshot + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSnapshot + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipSnapshot(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthSnapshot = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSnapshot = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/docker/swarmkit/api/snapshot.proto", fileDescriptorSnapshot) +} + +var fileDescriptorSnapshot = []byte{ + // 493 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xbf, 0x8f, 0xd3, 0x30, + 0x14, 0xc7, 0x93, 0xf4, 0x47, 0x7a, 0x3e, 0x01, 0x87, 0xc5, 0x60, 0x95, 0x23, 0x94, 0xc2, 0x50, + 0x24, 0x48, 0xa0, 0x20, 0x81, 0x90, 0x8e, 0xa1, 0x27, 0x06, 0x06, 0x6e, 0x70, 0xd1, 0x89, 0x35, + 0x4d, 0xdd, 0x36, 0x84, 0xc4, 0x95, 0x9f, 0xdb, 0x63, 0x84, 0xff, 0xae, 0x23, 0x23, 0x13, 0xe2, + 0xba, 0xf0, 0x6f, 0x20, 0xdb, 0x71, 0xa8, 0x44, 0x7a, 0xb7, 0x45, 0xd6, 0xe7, 0xf3, 0xde, 0xd7, + 0xce, 0x7b, 0xe8, 0xe9, 0x3c, 0x95, 0x8b, 0xd5, 0x24, 0x4c, 0x78, 0x1e, 0x4d, 0x79, 0x92, 0x31, + 0x11, 0xc1, 0x45, 0x2c, 0xf2, 0x2c, 0x95, 0x51, 0xbc, 0x4c, 0x23, 0x28, 0xe2, 0x25, 0x2c, 0xb8, + 0x0c, 0x97, 0x82, 0x4b, 0x8e, 0xb1, 0x61, 0x42, 0xcb, 0x84, 0xeb, 0xe7, 0xdd, 0x27, 0xd7, 0x94, + 0xe0, 0x93, 0xcf, 0x2c, 0x91, 0x60, 0x2a, 0x74, 0x1f, 0x5f, 0x43, 0x8b, 0x78, 0x56, 0x36, 0xeb, + 0xde, 0x99, 0xf3, 0x39, 0xd7, 0x9f, 0x91, 0xfa, 0x32, 0xa7, 0xfd, 0xef, 0x4d, 0x74, 0x63, 0x2c, + 0xb9, 0x60, 0xe3, 0x32, 0x1a, 0x0e, 0x51, 0xab, 0xe0, 0x53, 0x06, 0xc4, 0xed, 0x35, 0x06, 0x87, + 0x43, 0x12, 0xfe, 0x1f, 0x32, 0x3c, 0xe3, 0x53, 0x46, 0x0d, 0x86, 0x5f, 0xa1, 0x0e, 0x30, 0xb1, + 0x4e, 0x13, 0x06, 0xc4, 0xd3, 0xca, 0xdd, 0x3a, 0x65, 0x6c, 0x18, 0x5a, 0xc1, 0x4a, 0x2c, 0x98, + 0xbc, 0xe0, 0x22, 0x03, 0xd2, 0xd8, 0x2f, 0x9e, 0x19, 0x86, 0x56, 0xb0, 0x4a, 0x28, 0x63, 0xc8, + 0x80, 0x34, 0xf7, 0x27, 0xfc, 0x18, 0x43, 0x46, 0x0d, 0xa6, 0x1a, 0x25, 0x5f, 0x56, 0x20, 0x99, + 0x00, 0xd2, 0xda, 0xdf, 0xe8, 0xd4, 0x30, 0xb4, 0x82, 0xf1, 0x4b, 0xe4, 0x03, 0x4b, 0x04, 0x93, + 0x40, 0xda, 0xda, 0xeb, 0xd6, 0xdf, 0x4c, 0x21, 0xd4, 0xa2, 0xf8, 0x0d, 0x3a, 0x10, 0x0c, 0xf8, + 0x4a, 0xa8, 0x17, 0xf1, 0xb5, 0x77, 0x5c, 0xe7, 0xd1, 0x12, 0xa2, 0xff, 0x70, 0x7c, 0x82, 0x10, + 0xfb, 0x2a, 0x59, 0x01, 0x29, 0x2f, 0x80, 0x74, 0xb4, 0x7c, 0xaf, 0x4e, 0x7e, 0x67, 0x29, 0xba, + 0x23, 0xa8, 0xc0, 0x09, 0x2f, 0x66, 0xe9, 0x1c, 0xc8, 0xc1, 0xfe, 0xc0, 0xa7, 0x1a, 0xa1, 0x16, + 0xed, 0xa7, 0xe8, 0x56, 0x79, 0xf7, 0x6a, 0x08, 0x5e, 0x23, 0x3f, 0x67, 0xf9, 0x44, 0xbd, 0x98, + 0x19, 0x83, 0xa0, 0xf6, 0x06, 0xf1, 0x4c, 0x7e, 0xd0, 0x18, 0xb5, 0x38, 0x3e, 0x46, 0xbe, 0x60, + 0x39, 0x5f, 0xb3, 0xa9, 0x9e, 0x86, 0xe6, 0xc8, 0x3b, 0x72, 0xa8, 0x3d, 0xea, 0xff, 0x71, 0x51, + 0xa7, 0x6a, 0xf2, 0x16, 0xf9, 0x6b, 0x26, 0x54, 0x72, 0xe2, 0xf6, 0xdc, 0xc1, 0xcd, 0xe1, 0xa3, + 0xda, 0xe7, 0xb5, 0x3b, 0x73, 0x6e, 0x58, 0x6a, 0x25, 0xfc, 0x1e, 0xa1, 0xb2, 0xeb, 0x22, 0x5d, + 0x12, 0xaf, 0xe7, 0x0e, 0x0e, 0x87, 0x0f, 0xaf, 0xf8, 0xb3, 0xb6, 0xd2, 0xa8, 0xb9, 0xf9, 0x75, + 0xdf, 0xa1, 0x3b, 0x32, 0x3e, 0x41, 0x2d, 0x50, 0x5b, 0x40, 0x1a, 0xba, 0xca, 0x83, 0xda, 0x20, + 0xbb, 0x6b, 0x52, 0xd6, 0x30, 0x56, 0xff, 0x36, 0xf2, 0xcb, 0x74, 0xb8, 0x8d, 0xbc, 0xf3, 0x67, + 0x47, 0xce, 0x88, 0x6c, 0x2e, 0x03, 0xe7, 0xe7, 0x65, 0xe0, 0x7c, 0xdb, 0x06, 0xee, 0x66, 0x1b, + 0xb8, 0x3f, 0xb6, 0x81, 0xfb, 0x7b, 0x1b, 0xb8, 0x9f, 0xbc, 0x49, 0x5b, 0xef, 0xde, 0x8b, 0xbf, + 0x01, 0x00, 0x00, 0xff, 0xff, 0xfd, 0xbe, 0x47, 0xec, 0x2f, 0x04, 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/snapshot.proto b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/snapshot.proto new file mode 100644 index 00000000..91e9592d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/snapshot.proto @@ -0,0 +1,44 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/objects.proto"; +import "github.com/docker/swarmkit/api/raft.proto"; +import weak "gogoproto/gogo.proto"; + +// StoreSnapshot is used to store snapshots of the store. +message StoreSnapshot { + // TODO(aaronl): The current method of assembling a StoreSnapshot + // structure and marshalling it is not optimal. It may be better to + // write out nodes, networks, tasks, etc. one at a time to an io.Writer + // using gogo-protobuf's io.DelimitedWriter. A new value of the version + // field could support this approach. + + repeated Node nodes = 1; + repeated Service services = 2; + repeated Network networks = 3; + repeated Task tasks = 4; + repeated Cluster clusters = 5; + repeated Secret secrets = 6; + repeated Resource resources = 7; + repeated Extension extensions = 8; + repeated Config configs = 9; +} + +// ClusterSnapshot stores cluster membership information in snapshots. +message ClusterSnapshot { + repeated RaftMember members = 1; + repeated uint64 removed = 2 [packed=false]; +} + +message Snapshot { + enum Version { + // V0 is the initial version of the StoreSnapshot message. + V0 = 0; + } + + Version version = 1; + + ClusterSnapshot membership = 2 [(gogoproto.nullable) = false]; + StoreSnapshot store = 3 [(gogoproto.nullable) = false]; +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/specs.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/specs.pb.go new file mode 100644 index 00000000..660e59ca --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/specs.pb.go @@ -0,0 +1,6966 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/specs.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import google_protobuf1 "github.com/gogo/protobuf/types" +import google_protobuf4 "github.com/gogo/protobuf/types" +import google_protobuf2 "github.com/gogo/protobuf/types" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import strings "strings" +import reflect "reflect" +import sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type NodeSpec_Membership int32 + +const ( + NodeMembershipPending NodeSpec_Membership = 0 + NodeMembershipAccepted NodeSpec_Membership = 1 +) + +var NodeSpec_Membership_name = map[int32]string{ + 0: "PENDING", + 1: "ACCEPTED", +} +var NodeSpec_Membership_value = map[string]int32{ + "PENDING": 0, + "ACCEPTED": 1, +} + +func (x NodeSpec_Membership) String() string { + return proto.EnumName(NodeSpec_Membership_name, int32(x)) +} +func (NodeSpec_Membership) EnumDescriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{0, 0} } + +type NodeSpec_Availability int32 + +const ( + // Active nodes. + NodeAvailabilityActive NodeSpec_Availability = 0 + // Paused nodes won't be considered by the scheduler, preventing any + // further task to run on them. + NodeAvailabilityPause NodeSpec_Availability = 1 + // Drained nodes are paused and any task already running on them will + // be evicted. + NodeAvailabilityDrain NodeSpec_Availability = 2 +) + +var NodeSpec_Availability_name = map[int32]string{ + 0: "ACTIVE", + 1: "PAUSE", + 2: "DRAIN", +} +var NodeSpec_Availability_value = map[string]int32{ + "ACTIVE": 0, + "PAUSE": 1, + "DRAIN": 2, +} + +func (x NodeSpec_Availability) String() string { + return proto.EnumName(NodeSpec_Availability_name, int32(x)) +} +func (NodeSpec_Availability) EnumDescriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{0, 1} } + +type ContainerSpec_Isolation int32 + +const ( + // ISOLATION_DEFAULT uses whatever default value from the container runtime + ContainerIsolationDefault ContainerSpec_Isolation = 0 + // ISOLATION_PROCESS forces windows container isolation + ContainerIsolationProcess ContainerSpec_Isolation = 1 + // ISOLATION_HYPERV forces Hyper-V isolation + ContainerIsolationHyperV ContainerSpec_Isolation = 2 +) + +var ContainerSpec_Isolation_name = map[int32]string{ + 0: "ISOLATION_DEFAULT", + 1: "ISOLATION_PROCESS", + 2: "ISOLATION_HYPERV", +} +var ContainerSpec_Isolation_value = map[string]int32{ + "ISOLATION_DEFAULT": 0, + "ISOLATION_PROCESS": 1, + "ISOLATION_HYPERV": 2, +} + +func (x ContainerSpec_Isolation) String() string { + return proto.EnumName(ContainerSpec_Isolation_name, int32(x)) +} +func (ContainerSpec_Isolation) EnumDescriptor() ([]byte, []int) { + return fileDescriptorSpecs, []int{8, 0} +} + +// ResolutionMode specifies the mode of resolution to use for +// internal loadbalancing between tasks which are all within +// the cluster. This is sometimes calls east-west data path. +type EndpointSpec_ResolutionMode int32 + +const ( + // VIP resolution mode specifies that the + // service resolves to a logical IP and the requests + // are sent to that logical IP. Packets hitting that + // logical IP are load balanced to a chosen backend. + ResolutionModeVirtualIP EndpointSpec_ResolutionMode = 0 + // DNSRR resolution mode specifies that the + // service directly gets resolved to one of the + // backend IP and the client directly initiates a + // request towards the actual backend. This requires + // that the client does not cache the DNS responses + // when the DNS response TTL is 0. + ResolutionModeDNSRoundRobin EndpointSpec_ResolutionMode = 1 +) + +var EndpointSpec_ResolutionMode_name = map[int32]string{ + 0: "VIP", + 1: "DNSRR", +} +var EndpointSpec_ResolutionMode_value = map[string]int32{ + "VIP": 0, + "DNSRR": 1, +} + +func (x EndpointSpec_ResolutionMode) String() string { + return proto.EnumName(EndpointSpec_ResolutionMode_name, int32(x)) +} +func (EndpointSpec_ResolutionMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptorSpecs, []int{9, 0} +} + +type NodeSpec struct { + Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"` + // DesiredRole defines the role the node should have. + DesiredRole NodeRole `protobuf:"varint,2,opt,name=desired_role,json=desiredRole,proto3,enum=docker.swarmkit.v1.NodeRole" json:"desired_role,omitempty"` + // Membership controls the admission of the node into the cluster. + Membership NodeSpec_Membership `protobuf:"varint,3,opt,name=membership,proto3,enum=docker.swarmkit.v1.NodeSpec_Membership" json:"membership,omitempty"` + // Availability allows a user to control the current scheduling status of a + // node. + Availability NodeSpec_Availability `protobuf:"varint,4,opt,name=availability,proto3,enum=docker.swarmkit.v1.NodeSpec_Availability" json:"availability,omitempty"` +} + +func (m *NodeSpec) Reset() { *m = NodeSpec{} } +func (*NodeSpec) ProtoMessage() {} +func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{0} } + +// ServiceSpec defines the properties of a service. +// +// A service instructs the cluster in orchestrating repeated instances of a +// template, implemented as tasks. Based on the number of instances, scheduling +// strategy and restart policy, a number of application-level behaviors can be +// defined. +type ServiceSpec struct { + Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"` + // Task defines the task template this service will spawn. + Task TaskSpec `protobuf:"bytes,2,opt,name=task" json:"task"` + // Types that are valid to be assigned to Mode: + // *ServiceSpec_Replicated + // *ServiceSpec_Global + Mode isServiceSpec_Mode `protobuf_oneof:"mode"` + // Update contains settings which affect updates. + Update *UpdateConfig `protobuf:"bytes,6,opt,name=update" json:"update,omitempty"` + // Rollback contains settings which affect rollbacks of updates. + Rollback *UpdateConfig `protobuf:"bytes,9,opt,name=rollback" json:"rollback,omitempty"` + // ServiceSpec.Networks has been deprecated and is replaced by + // Networks field in Task (TaskSpec.Networks). + // This field (ServiceSpec.Networks) is kept for compatibility. + // In case TaskSpec.Networks does not exist, ServiceSpec.Networks + // is still honored if it exists. + Networks []*NetworkAttachmentConfig `protobuf:"bytes,7,rep,name=networks" json:"networks,omitempty"` + // Service endpoint specifies the user provided configuration + // to properly discover and load balance a service. + Endpoint *EndpointSpec `protobuf:"bytes,8,opt,name=endpoint" json:"endpoint,omitempty"` +} + +func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } +func (*ServiceSpec) ProtoMessage() {} +func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{1} } + +type isServiceSpec_Mode interface { + isServiceSpec_Mode() + MarshalTo([]byte) (int, error) + Size() int +} + +type ServiceSpec_Replicated struct { + Replicated *ReplicatedService `protobuf:"bytes,3,opt,name=replicated,oneof"` +} +type ServiceSpec_Global struct { + Global *GlobalService `protobuf:"bytes,4,opt,name=global,oneof"` +} + +func (*ServiceSpec_Replicated) isServiceSpec_Mode() {} +func (*ServiceSpec_Global) isServiceSpec_Mode() {} + +func (m *ServiceSpec) GetMode() isServiceSpec_Mode { + if m != nil { + return m.Mode + } + return nil +} + +func (m *ServiceSpec) GetReplicated() *ReplicatedService { + if x, ok := m.GetMode().(*ServiceSpec_Replicated); ok { + return x.Replicated + } + return nil +} + +func (m *ServiceSpec) GetGlobal() *GlobalService { + if x, ok := m.GetMode().(*ServiceSpec_Global); ok { + return x.Global + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ServiceSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ServiceSpec_OneofMarshaler, _ServiceSpec_OneofUnmarshaler, _ServiceSpec_OneofSizer, []interface{}{ + (*ServiceSpec_Replicated)(nil), + (*ServiceSpec_Global)(nil), + } +} + +func _ServiceSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ServiceSpec) + // mode + switch x := m.Mode.(type) { + case *ServiceSpec_Replicated: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Replicated); err != nil { + return err + } + case *ServiceSpec_Global: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Global); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ServiceSpec.Mode has unexpected type %T", x) + } + return nil +} + +func _ServiceSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ServiceSpec) + switch tag { + case 3: // mode.replicated + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ReplicatedService) + err := b.DecodeMessage(msg) + m.Mode = &ServiceSpec_Replicated{msg} + return true, err + case 4: // mode.global + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GlobalService) + err := b.DecodeMessage(msg) + m.Mode = &ServiceSpec_Global{msg} + return true, err + default: + return false, nil + } +} + +func _ServiceSpec_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ServiceSpec) + // mode + switch x := m.Mode.(type) { + case *ServiceSpec_Replicated: + s := proto.Size(x.Replicated) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ServiceSpec_Global: + s := proto.Size(x.Global) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// ReplicatedService sets the reconciliation target to certain number of replicas. +type ReplicatedService struct { + Replicas uint64 `protobuf:"varint,1,opt,name=replicas,proto3" json:"replicas,omitempty"` +} + +func (m *ReplicatedService) Reset() { *m = ReplicatedService{} } +func (*ReplicatedService) ProtoMessage() {} +func (*ReplicatedService) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{2} } + +// GlobalService represents global service. +type GlobalService struct { +} + +func (m *GlobalService) Reset() { *m = GlobalService{} } +func (*GlobalService) ProtoMessage() {} +func (*GlobalService) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{3} } + +type TaskSpec struct { + // Types that are valid to be assigned to Runtime: + // *TaskSpec_Attachment + // *TaskSpec_Container + // *TaskSpec_Generic + Runtime isTaskSpec_Runtime `protobuf_oneof:"runtime"` + // Resource requirements for the container. + Resources *ResourceRequirements `protobuf:"bytes,2,opt,name=resources" json:"resources,omitempty"` + // RestartPolicy specifies what to do when a task fails or finishes. + Restart *RestartPolicy `protobuf:"bytes,4,opt,name=restart" json:"restart,omitempty"` + // Placement specifies node selection constraints + Placement *Placement `protobuf:"bytes,5,opt,name=placement" json:"placement,omitempty"` + // LogDriver specifies the log driver to use for the task. Any runtime will + // direct logs into the specified driver for the duration of the task. + LogDriver *Driver `protobuf:"bytes,6,opt,name=log_driver,json=logDriver" json:"log_driver,omitempty"` + // Networks specifies the list of network attachment + // configurations (which specify the network and per-network + // aliases) that this task spec is bound to. + Networks []*NetworkAttachmentConfig `protobuf:"bytes,7,rep,name=networks" json:"networks,omitempty"` + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. We do this to allow forced restarts + // using the same reconciliation-based mechanism that performs rolling + // updates. + ForceUpdate uint64 `protobuf:"varint,9,opt,name=force_update,json=forceUpdate,proto3" json:"force_update,omitempty"` + // ResourceReferences provides a generic way to specify resources that + // are used by this task, and should be sent down to agents along with + // the task. Inside the runtime field there may be more specific + // information about how to use the resource, but ResourceReferences + // establishes the relationship at the store level, and instructs the + // dispatcher to send the related objects. + // + // ResourceReferences is a list of ResourceReferences used by the task. + ResourceReferences []ResourceReference `protobuf:"bytes,11,rep,name=resource_references,json=resourceReferences" json:"resource_references"` +} + +func (m *TaskSpec) Reset() { *m = TaskSpec{} } +func (*TaskSpec) ProtoMessage() {} +func (*TaskSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{4} } + +type isTaskSpec_Runtime interface { + isTaskSpec_Runtime() + MarshalTo([]byte) (int, error) + Size() int +} + +type TaskSpec_Attachment struct { + Attachment *NetworkAttachmentSpec `protobuf:"bytes,8,opt,name=attachment,oneof"` +} +type TaskSpec_Container struct { + Container *ContainerSpec `protobuf:"bytes,1,opt,name=container,oneof"` +} +type TaskSpec_Generic struct { + Generic *GenericRuntimeSpec `protobuf:"bytes,10,opt,name=generic,oneof"` +} + +func (*TaskSpec_Attachment) isTaskSpec_Runtime() {} +func (*TaskSpec_Container) isTaskSpec_Runtime() {} +func (*TaskSpec_Generic) isTaskSpec_Runtime() {} + +func (m *TaskSpec) GetRuntime() isTaskSpec_Runtime { + if m != nil { + return m.Runtime + } + return nil +} + +func (m *TaskSpec) GetAttachment() *NetworkAttachmentSpec { + if x, ok := m.GetRuntime().(*TaskSpec_Attachment); ok { + return x.Attachment + } + return nil +} + +func (m *TaskSpec) GetContainer() *ContainerSpec { + if x, ok := m.GetRuntime().(*TaskSpec_Container); ok { + return x.Container + } + return nil +} + +func (m *TaskSpec) GetGeneric() *GenericRuntimeSpec { + if x, ok := m.GetRuntime().(*TaskSpec_Generic); ok { + return x.Generic + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TaskSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TaskSpec_OneofMarshaler, _TaskSpec_OneofUnmarshaler, _TaskSpec_OneofSizer, []interface{}{ + (*TaskSpec_Attachment)(nil), + (*TaskSpec_Container)(nil), + (*TaskSpec_Generic)(nil), + } +} + +func _TaskSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TaskSpec) + // runtime + switch x := m.Runtime.(type) { + case *TaskSpec_Attachment: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Attachment); err != nil { + return err + } + case *TaskSpec_Container: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Container); err != nil { + return err + } + case *TaskSpec_Generic: + _ = b.EncodeVarint(10<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Generic); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TaskSpec.Runtime has unexpected type %T", x) + } + return nil +} + +func _TaskSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TaskSpec) + switch tag { + case 8: // runtime.attachment + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NetworkAttachmentSpec) + err := b.DecodeMessage(msg) + m.Runtime = &TaskSpec_Attachment{msg} + return true, err + case 1: // runtime.container + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ContainerSpec) + err := b.DecodeMessage(msg) + m.Runtime = &TaskSpec_Container{msg} + return true, err + case 10: // runtime.generic + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(GenericRuntimeSpec) + err := b.DecodeMessage(msg) + m.Runtime = &TaskSpec_Generic{msg} + return true, err + default: + return false, nil + } +} + +func _TaskSpec_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TaskSpec) + // runtime + switch x := m.Runtime.(type) { + case *TaskSpec_Attachment: + s := proto.Size(x.Attachment) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TaskSpec_Container: + s := proto.Size(x.Container) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *TaskSpec_Generic: + s := proto.Size(x.Generic) + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ResourceReference struct { + ResourceID string `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"` + ResourceType ResourceType `protobuf:"varint,2,opt,name=resource_type,json=resourceType,proto3,enum=docker.swarmkit.v1.ResourceType" json:"resource_type,omitempty"` +} + +func (m *ResourceReference) Reset() { *m = ResourceReference{} } +func (*ResourceReference) ProtoMessage() {} +func (*ResourceReference) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{5} } + +type GenericRuntimeSpec struct { + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + Payload *google_protobuf4.Any `protobuf:"bytes,2,opt,name=payload" json:"payload,omitempty"` +} + +func (m *GenericRuntimeSpec) Reset() { *m = GenericRuntimeSpec{} } +func (*GenericRuntimeSpec) ProtoMessage() {} +func (*GenericRuntimeSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{6} } + +// NetworkAttachmentSpec specifies runtime parameters required to attach +// a container to a network. +type NetworkAttachmentSpec struct { + // ContainerID specifies a unique ID of the container for which + // this attachment is for. + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` +} + +func (m *NetworkAttachmentSpec) Reset() { *m = NetworkAttachmentSpec{} } +func (*NetworkAttachmentSpec) ProtoMessage() {} +func (*NetworkAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{7} } + +// Container specifies runtime parameters for a container. +type ContainerSpec struct { + // image defines the image reference, as specified in the + // distribution/reference package. This may include a registry host, name, + // tag or digest. + // + // The field will be directly passed to the engine pulling. Well-behaved + // service definitions will used immutable references, either through tags + // that don't change or verifiable digests. + Image string `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` + // Labels defines labels to be added to the container at creation time. If + // collisions with system labels occur, these labels will be overridden. + // + // This field *must* remain compatible with the Labels field of + // Annotations. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Command to run the the container. The first element is a path to the + // executable and the following elements are treated as arguments. + // + // If command is empty, execution will fall back to the image's entrypoint. + // + // Command should only be used when overriding entrypoint. + Command []string `protobuf:"bytes,3,rep,name=command" json:"command,omitempty"` + // Args specifies arguments provided to the image's entrypoint. + // + // If Command and Args are provided, Args will be appended to Command. + Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"` + // Hostname specifies the hostname that will be set on containers created by docker swarm. + // All containers for a given service will have the same hostname + Hostname string `protobuf:"bytes,14,opt,name=hostname,proto3" json:"hostname,omitempty"` + // Env specifies the environment variables for the container in NAME=VALUE + // format. These must be compliant with [IEEE Std + // 1003.1-2001](http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html). + Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` + // Dir defines the working directory to set for the container process. + Dir string `protobuf:"bytes,6,opt,name=dir,proto3" json:"dir,omitempty"` + // User specifies the user that should be employed to run the container. + // + // Note that the primary group may be specified by appending the group name + // or id to the user name, separated by a `:`. This syntax is + // `:`. + User string `protobuf:"bytes,7,opt,name=user,proto3" json:"user,omitempty"` + // Groups specifies supplementary groups available to the user. + Groups []string `protobuf:"bytes,11,rep,name=groups" json:"groups,omitempty"` + // Privileges specifies security configuration/permissions. + Privileges *Privileges `protobuf:"bytes,22,opt,name=privileges" json:"privileges,omitempty"` + // Init declares that a custom init will be running inside the container, if null, use the daemon's configured settings + Init *google_protobuf2.BoolValue `protobuf:"bytes,23,opt,name=init" json:"init,omitempty"` + // TTY declares that a TTY should be attached to the standard streams, + // including stdin if it is still open. + TTY bool `protobuf:"varint,13,opt,name=tty,proto3" json:"tty,omitempty"` + // OpenStdin declares that the standard input (stdin) should be open. + OpenStdin bool `protobuf:"varint,18,opt,name=open_stdin,json=openStdin,proto3" json:"open_stdin,omitempty"` + // ReadOnly declares that the container root filesystem is read-only. + // This only impacts the root filesystem, not additional mounts (including + // tmpfs). For additional mounts that are not part of the initial rootfs, + // they will be decided by the modes passed in the mount definition. + ReadOnly bool `protobuf:"varint,19,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + // StopSignal defines the signal to stop the container. + StopSignal string `protobuf:"bytes,20,opt,name=stop_signal,json=stopSignal,proto3" json:"stop_signal,omitempty"` + Mounts []Mount `protobuf:"bytes,8,rep,name=mounts" json:"mounts"` + // StopGracePeriod the grace period for stopping the container before + // forcefully killing the container. + // Note: Can't use stdduration here because this needs to be nullable. + StopGracePeriod *google_protobuf1.Duration `protobuf:"bytes,9,opt,name=stop_grace_period,json=stopGracePeriod" json:"stop_grace_period,omitempty"` + // PullOptions parameterize the behavior of image pulls. + PullOptions *ContainerSpec_PullOptions `protobuf:"bytes,10,opt,name=pull_options,json=pullOptions" json:"pull_options,omitempty"` + // SecretReference contains references to zero or more secrets that + // will be exposed to the container. + Secrets []*SecretReference `protobuf:"bytes,12,rep,name=secrets" json:"secrets,omitempty"` + // ConfigReference contains references to zero or more configs that + // will be exposed to the container. + Configs []*ConfigReference `protobuf:"bytes,21,rep,name=configs" json:"configs,omitempty"` + // Hosts allow additional entries to be specified in /etc/hosts + // that associates IP addresses with hostnames. + // Detailed documentation is available in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + // + // The format of the Hosts in swarmkit follows the same as + // above. + // This is different from `docker run --add-host :` + // where format is `:` + Hosts []string `protobuf:"bytes,17,rep,name=hosts" json:"hosts,omitempty"` + // DNSConfig allows one to specify DNS related configuration in resolv.conf + DNSConfig *ContainerSpec_DNSConfig `protobuf:"bytes,15,opt,name=dns_config,json=dnsConfig" json:"dns_config,omitempty"` + // Healthcheck describes how to check the container is healthy. If the + // container is considered unhealthy, it will be destroyed, its creating + // task will exit and a new task will be rescheduled elsewhere. A container + // is considered unhealthy after `Retries` number of consecutive failures. + Healthcheck *HealthConfig `protobuf:"bytes,16,opt,name=healthcheck" json:"healthcheck,omitempty"` + // Isolation defines the isolation level for windows containers (default, process, hyperv). + // Runtimes that don't support it ignore that field + Isolation ContainerSpec_Isolation `protobuf:"varint,24,opt,name=isolation,proto3,enum=docker.swarmkit.v1.ContainerSpec_Isolation" json:"isolation,omitempty"` + // PidsLimit prevents from OS resource damage by applications inside the container + // using fork bomb attack. + PidsLimit int64 `protobuf:"varint,25,opt,name=pidsLimit,proto3" json:"pidsLimit,omitempty"` + // Sysctls sets namespaced kernel parameters (sysctls) in the container. This + // option is equivalent to passing --sysctl to docker run. + // + // Note that while options are subject to the same restrictions as arguments + // passed to the --sysctl flag on docker run, those options are not further + // validated to ensure that they are safe or sensible in a clustered + // environment. + // + // Additionally, sysctls are not validated for support in the underlying + // daemon. For information about supported options, refer to the + // documentation at: + // + // https://docs.docker.com/engine/reference/commandline/run/#configure-namespaced-kernel-parameters-sysctls-at-runtime + Sysctls map[string]string `protobuf:"bytes,26,rep,name=sysctls" json:"sysctls,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Capabilities is the list of Linux capabilities to be available for container (this overrides the default set of capabilities) + Capabilities []string `protobuf:"bytes,27,rep,name=capabilities" json:"capabilities,omitempty"` +} + +func (m *ContainerSpec) Reset() { *m = ContainerSpec{} } +func (*ContainerSpec) ProtoMessage() {} +func (*ContainerSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{8} } + +// PullOptions allows one to parameterize an image pull. +type ContainerSpec_PullOptions struct { + // RegistryAuth is the registry auth token obtained from the client, required + // to pull private images. This is the unmodified JSON used as part of + // the `X-Registry-Auth` header. + // TODO(nishanttotla): This field will later be deprecated + RegistryAuth string `protobuf:"bytes,64,opt,name=registry_auth,json=registryAuth,proto3" json:"registry_auth,omitempty"` +} + +func (m *ContainerSpec_PullOptions) Reset() { *m = ContainerSpec_PullOptions{} } +func (*ContainerSpec_PullOptions) ProtoMessage() {} +func (*ContainerSpec_PullOptions) Descriptor() ([]byte, []int) { + return fileDescriptorSpecs, []int{8, 1} +} + +// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) +// Detailed documentation is available in: +// http://man7.org/linux/man-pages/man5/resolv.conf.5.html +// TODO: domain is not supported yet +type ContainerSpec_DNSConfig struct { + // Nameservers specifies the IP addresses of the name servers + Nameservers []string `protobuf:"bytes,1,rep,name=nameservers" json:"nameservers,omitempty"` + // Search specifies the search list for host-name lookup + Search []string `protobuf:"bytes,2,rep,name=search" json:"search,omitempty"` + // Options allows certain internal resolver variables to be modified + Options []string `protobuf:"bytes,3,rep,name=options" json:"options,omitempty"` +} + +func (m *ContainerSpec_DNSConfig) Reset() { *m = ContainerSpec_DNSConfig{} } +func (*ContainerSpec_DNSConfig) ProtoMessage() {} +func (*ContainerSpec_DNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{8, 2} } + +// EndpointSpec defines the properties that can be configured to +// access and loadbalance the service. +type EndpointSpec struct { + Mode EndpointSpec_ResolutionMode `protobuf:"varint,1,opt,name=mode,proto3,enum=docker.swarmkit.v1.EndpointSpec_ResolutionMode" json:"mode,omitempty"` + // List of exposed ports that this service is accessible from + // external to the cluster. + Ports []*PortConfig `protobuf:"bytes,2,rep,name=ports" json:"ports,omitempty"` +} + +func (m *EndpointSpec) Reset() { *m = EndpointSpec{} } +func (*EndpointSpec) ProtoMessage() {} +func (*EndpointSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{9} } + +// NetworkSpec specifies user defined network parameters. +type NetworkSpec struct { + Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"` + // DriverConfig specific configuration consumed by the network driver. + DriverConfig *Driver `protobuf:"bytes,2,opt,name=driver_config,json=driverConfig" json:"driver_config,omitempty"` + // IPv6Enabled enables support for IPv6 on the network. + Ipv6Enabled bool `protobuf:"varint,3,opt,name=ipv6_enabled,json=ipv6Enabled,proto3" json:"ipv6_enabled,omitempty"` + // internal restricts external access to the network. This may be + // accomplished by disabling the default gateway or through other means. + Internal bool `protobuf:"varint,4,opt,name=internal,proto3" json:"internal,omitempty"` + IPAM *IPAMOptions `protobuf:"bytes,5,opt,name=ipam" json:"ipam,omitempty"` + // Attachable allows external(to swarm) entities to manually + // attach to this network. With this flag enabled, external + // entities such as containers running in an worker node in + // the cluster can manually attach to this network and access + // the services attached to this network. If this flag is not + // enabled(default case) no manual attachment to this network + // can happen. + Attachable bool `protobuf:"varint,6,opt,name=attachable,proto3" json:"attachable,omitempty"` + // Ingress indicates this network will provide the routing-mesh. + // In older versions, the network providing the routing mesh was + // swarm internally created only and it was identified by the name + // "ingress" and the label "com.docker.swarm.internal": "true". + Ingress bool `protobuf:"varint,7,opt,name=ingress,proto3" json:"ingress,omitempty"` + // ConfigFrom is the source of the configuration for this network. + // + // Types that are valid to be assigned to ConfigFrom: + // *NetworkSpec_Network + ConfigFrom isNetworkSpec_ConfigFrom `protobuf_oneof:"config_from"` +} + +func (m *NetworkSpec) Reset() { *m = NetworkSpec{} } +func (*NetworkSpec) ProtoMessage() {} +func (*NetworkSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{10} } + +type isNetworkSpec_ConfigFrom interface { + isNetworkSpec_ConfigFrom() + MarshalTo([]byte) (int, error) + Size() int +} + +type NetworkSpec_Network struct { + Network string `protobuf:"bytes,8,opt,name=network,proto3,oneof"` +} + +func (*NetworkSpec_Network) isNetworkSpec_ConfigFrom() {} + +func (m *NetworkSpec) GetConfigFrom() isNetworkSpec_ConfigFrom { + if m != nil { + return m.ConfigFrom + } + return nil +} + +func (m *NetworkSpec) GetNetwork() string { + if x, ok := m.GetConfigFrom().(*NetworkSpec_Network); ok { + return x.Network + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*NetworkSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _NetworkSpec_OneofMarshaler, _NetworkSpec_OneofUnmarshaler, _NetworkSpec_OneofSizer, []interface{}{ + (*NetworkSpec_Network)(nil), + } +} + +func _NetworkSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*NetworkSpec) + // config_from + switch x := m.ConfigFrom.(type) { + case *NetworkSpec_Network: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.Network) + case nil: + default: + return fmt.Errorf("NetworkSpec.ConfigFrom has unexpected type %T", x) + } + return nil +} + +func _NetworkSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*NetworkSpec) + switch tag { + case 8: // config_from.network + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.ConfigFrom = &NetworkSpec_Network{x} + return true, err + default: + return false, nil + } +} + +func _NetworkSpec_OneofSizer(msg proto.Message) (n int) { + m := msg.(*NetworkSpec) + // config_from + switch x := m.ConfigFrom.(type) { + case *NetworkSpec_Network: + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Network))) + n += len(x.Network) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// ClusterSpec specifies global cluster settings. +type ClusterSpec struct { + Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"` + // DEPRECATED: AcceptancePolicy defines the certificate issuance policy. + // Acceptance policy is no longer customizable, and secrets have been + // replaced with join tokens. + AcceptancePolicy AcceptancePolicy `protobuf:"bytes,2,opt,name=acceptance_policy,json=acceptancePolicy" json:"acceptance_policy"` + // Orchestration defines cluster-level orchestration settings. + Orchestration OrchestrationConfig `protobuf:"bytes,3,opt,name=orchestration" json:"orchestration"` + // Raft defines the cluster's raft settings. + Raft RaftConfig `protobuf:"bytes,4,opt,name=raft" json:"raft"` + // Dispatcher defines cluster-level dispatcher settings. + Dispatcher DispatcherConfig `protobuf:"bytes,5,opt,name=dispatcher" json:"dispatcher"` + // CAConfig defines cluster-level certificate authority settings. + CAConfig CAConfig `protobuf:"bytes,6,opt,name=ca_config,json=caConfig" json:"ca_config"` + // TaskDefaults specifies the default values to use for task creation. + TaskDefaults TaskDefaults `protobuf:"bytes,7,opt,name=task_defaults,json=taskDefaults" json:"task_defaults"` + // EncryptionConfig defines the cluster's encryption settings. + EncryptionConfig EncryptionConfig `protobuf:"bytes,8,opt,name=encryption_config,json=encryptionConfig" json:"encryption_config"` +} + +func (m *ClusterSpec) Reset() { *m = ClusterSpec{} } +func (*ClusterSpec) ProtoMessage() {} +func (*ClusterSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{11} } + +// SecretSpec specifies a user-provided secret. +type SecretSpec struct { + Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"` + // Data is the secret payload - the maximum size is 500KB (that is, 500*1024 bytes) + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + // + // The currently recognized values are: + // - golang: Go templating + Templating *Driver `protobuf:"bytes,3,opt,name=templating" json:"templating,omitempty"` + // Driver is the the secret driver that is used to store the specified secret + Driver *Driver `protobuf:"bytes,4,opt,name=driver" json:"driver,omitempty"` +} + +func (m *SecretSpec) Reset() { *m = SecretSpec{} } +func (*SecretSpec) ProtoMessage() {} +func (*SecretSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{12} } + +// ConfigSpec specifies user-provided configuration files. +type ConfigSpec struct { + Annotations Annotations `protobuf:"bytes,1,opt,name=annotations" json:"annotations"` + // Data is the config payload - the maximum size is 500KB (that is, 500*1024 bytes) + // TODO(aaronl): Do we want to revise this to include multiple payloads in a single + // ConfigSpec? Define this to be a tar? etc... + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + // + // The currently recognized values are: + // - golang: Go templating + Templating *Driver `protobuf:"bytes,3,opt,name=templating" json:"templating,omitempty"` +} + +func (m *ConfigSpec) Reset() { *m = ConfigSpec{} } +func (*ConfigSpec) ProtoMessage() {} +func (*ConfigSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{13} } + +func init() { + proto.RegisterType((*NodeSpec)(nil), "docker.swarmkit.v1.NodeSpec") + proto.RegisterType((*ServiceSpec)(nil), "docker.swarmkit.v1.ServiceSpec") + proto.RegisterType((*ReplicatedService)(nil), "docker.swarmkit.v1.ReplicatedService") + proto.RegisterType((*GlobalService)(nil), "docker.swarmkit.v1.GlobalService") + proto.RegisterType((*TaskSpec)(nil), "docker.swarmkit.v1.TaskSpec") + proto.RegisterType((*ResourceReference)(nil), "docker.swarmkit.v1.ResourceReference") + proto.RegisterType((*GenericRuntimeSpec)(nil), "docker.swarmkit.v1.GenericRuntimeSpec") + proto.RegisterType((*NetworkAttachmentSpec)(nil), "docker.swarmkit.v1.NetworkAttachmentSpec") + proto.RegisterType((*ContainerSpec)(nil), "docker.swarmkit.v1.ContainerSpec") + proto.RegisterType((*ContainerSpec_PullOptions)(nil), "docker.swarmkit.v1.ContainerSpec.PullOptions") + proto.RegisterType((*ContainerSpec_DNSConfig)(nil), "docker.swarmkit.v1.ContainerSpec.DNSConfig") + proto.RegisterType((*EndpointSpec)(nil), "docker.swarmkit.v1.EndpointSpec") + proto.RegisterType((*NetworkSpec)(nil), "docker.swarmkit.v1.NetworkSpec") + proto.RegisterType((*ClusterSpec)(nil), "docker.swarmkit.v1.ClusterSpec") + proto.RegisterType((*SecretSpec)(nil), "docker.swarmkit.v1.SecretSpec") + proto.RegisterType((*ConfigSpec)(nil), "docker.swarmkit.v1.ConfigSpec") + proto.RegisterEnum("docker.swarmkit.v1.NodeSpec_Membership", NodeSpec_Membership_name, NodeSpec_Membership_value) + proto.RegisterEnum("docker.swarmkit.v1.NodeSpec_Availability", NodeSpec_Availability_name, NodeSpec_Availability_value) + proto.RegisterEnum("docker.swarmkit.v1.ContainerSpec_Isolation", ContainerSpec_Isolation_name, ContainerSpec_Isolation_value) + proto.RegisterEnum("docker.swarmkit.v1.EndpointSpec_ResolutionMode", EndpointSpec_ResolutionMode_name, EndpointSpec_ResolutionMode_value) +} + +func (m *NodeSpec) Copy() *NodeSpec { + if m == nil { + return nil + } + o := &NodeSpec{} + o.CopyFrom(m) + return o +} + +func (m *NodeSpec) CopyFrom(src interface{}) { + + o := src.(*NodeSpec) + *m = *o + deepcopy.Copy(&m.Annotations, &o.Annotations) +} + +func (m *ServiceSpec) Copy() *ServiceSpec { + if m == nil { + return nil + } + o := &ServiceSpec{} + o.CopyFrom(m) + return o +} + +func (m *ServiceSpec) CopyFrom(src interface{}) { + + o := src.(*ServiceSpec) + *m = *o + deepcopy.Copy(&m.Annotations, &o.Annotations) + deepcopy.Copy(&m.Task, &o.Task) + if o.Update != nil { + m.Update = &UpdateConfig{} + deepcopy.Copy(m.Update, o.Update) + } + if o.Rollback != nil { + m.Rollback = &UpdateConfig{} + deepcopy.Copy(m.Rollback, o.Rollback) + } + if o.Networks != nil { + m.Networks = make([]*NetworkAttachmentConfig, len(o.Networks)) + for i := range m.Networks { + m.Networks[i] = &NetworkAttachmentConfig{} + deepcopy.Copy(m.Networks[i], o.Networks[i]) + } + } + + if o.Endpoint != nil { + m.Endpoint = &EndpointSpec{} + deepcopy.Copy(m.Endpoint, o.Endpoint) + } + if o.Mode != nil { + switch o.Mode.(type) { + case *ServiceSpec_Replicated: + v := ServiceSpec_Replicated{ + Replicated: &ReplicatedService{}, + } + deepcopy.Copy(v.Replicated, o.GetReplicated()) + m.Mode = &v + case *ServiceSpec_Global: + v := ServiceSpec_Global{ + Global: &GlobalService{}, + } + deepcopy.Copy(v.Global, o.GetGlobal()) + m.Mode = &v + } + } + +} + +func (m *ReplicatedService) Copy() *ReplicatedService { + if m == nil { + return nil + } + o := &ReplicatedService{} + o.CopyFrom(m) + return o +} + +func (m *ReplicatedService) CopyFrom(src interface{}) { + + o := src.(*ReplicatedService) + *m = *o +} + +func (m *GlobalService) Copy() *GlobalService { + if m == nil { + return nil + } + o := &GlobalService{} + o.CopyFrom(m) + return o +} + +func (m *GlobalService) CopyFrom(src interface{}) {} +func (m *TaskSpec) Copy() *TaskSpec { + if m == nil { + return nil + } + o := &TaskSpec{} + o.CopyFrom(m) + return o +} + +func (m *TaskSpec) CopyFrom(src interface{}) { + + o := src.(*TaskSpec) + *m = *o + if o.Resources != nil { + m.Resources = &ResourceRequirements{} + deepcopy.Copy(m.Resources, o.Resources) + } + if o.Restart != nil { + m.Restart = &RestartPolicy{} + deepcopy.Copy(m.Restart, o.Restart) + } + if o.Placement != nil { + m.Placement = &Placement{} + deepcopy.Copy(m.Placement, o.Placement) + } + if o.LogDriver != nil { + m.LogDriver = &Driver{} + deepcopy.Copy(m.LogDriver, o.LogDriver) + } + if o.Networks != nil { + m.Networks = make([]*NetworkAttachmentConfig, len(o.Networks)) + for i := range m.Networks { + m.Networks[i] = &NetworkAttachmentConfig{} + deepcopy.Copy(m.Networks[i], o.Networks[i]) + } + } + + if o.ResourceReferences != nil { + m.ResourceReferences = make([]ResourceReference, len(o.ResourceReferences)) + for i := range m.ResourceReferences { + deepcopy.Copy(&m.ResourceReferences[i], &o.ResourceReferences[i]) + } + } + + if o.Runtime != nil { + switch o.Runtime.(type) { + case *TaskSpec_Attachment: + v := TaskSpec_Attachment{ + Attachment: &NetworkAttachmentSpec{}, + } + deepcopy.Copy(v.Attachment, o.GetAttachment()) + m.Runtime = &v + case *TaskSpec_Container: + v := TaskSpec_Container{ + Container: &ContainerSpec{}, + } + deepcopy.Copy(v.Container, o.GetContainer()) + m.Runtime = &v + case *TaskSpec_Generic: + v := TaskSpec_Generic{ + Generic: &GenericRuntimeSpec{}, + } + deepcopy.Copy(v.Generic, o.GetGeneric()) + m.Runtime = &v + } + } + +} + +func (m *ResourceReference) Copy() *ResourceReference { + if m == nil { + return nil + } + o := &ResourceReference{} + o.CopyFrom(m) + return o +} + +func (m *ResourceReference) CopyFrom(src interface{}) { + + o := src.(*ResourceReference) + *m = *o +} + +func (m *GenericRuntimeSpec) Copy() *GenericRuntimeSpec { + if m == nil { + return nil + } + o := &GenericRuntimeSpec{} + o.CopyFrom(m) + return o +} + +func (m *GenericRuntimeSpec) CopyFrom(src interface{}) { + + o := src.(*GenericRuntimeSpec) + *m = *o + if o.Payload != nil { + m.Payload = &google_protobuf4.Any{} + deepcopy.Copy(m.Payload, o.Payload) + } +} + +func (m *NetworkAttachmentSpec) Copy() *NetworkAttachmentSpec { + if m == nil { + return nil + } + o := &NetworkAttachmentSpec{} + o.CopyFrom(m) + return o +} + +func (m *NetworkAttachmentSpec) CopyFrom(src interface{}) { + + o := src.(*NetworkAttachmentSpec) + *m = *o +} + +func (m *ContainerSpec) Copy() *ContainerSpec { + if m == nil { + return nil + } + o := &ContainerSpec{} + o.CopyFrom(m) + return o +} + +func (m *ContainerSpec) CopyFrom(src interface{}) { + + o := src.(*ContainerSpec) + *m = *o + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.Command != nil { + m.Command = make([]string, len(o.Command)) + copy(m.Command, o.Command) + } + + if o.Args != nil { + m.Args = make([]string, len(o.Args)) + copy(m.Args, o.Args) + } + + if o.Env != nil { + m.Env = make([]string, len(o.Env)) + copy(m.Env, o.Env) + } + + if o.Groups != nil { + m.Groups = make([]string, len(o.Groups)) + copy(m.Groups, o.Groups) + } + + if o.Privileges != nil { + m.Privileges = &Privileges{} + deepcopy.Copy(m.Privileges, o.Privileges) + } + if o.Init != nil { + m.Init = &google_protobuf2.BoolValue{} + deepcopy.Copy(m.Init, o.Init) + } + if o.Mounts != nil { + m.Mounts = make([]Mount, len(o.Mounts)) + for i := range m.Mounts { + deepcopy.Copy(&m.Mounts[i], &o.Mounts[i]) + } + } + + if o.StopGracePeriod != nil { + m.StopGracePeriod = &google_protobuf1.Duration{} + deepcopy.Copy(m.StopGracePeriod, o.StopGracePeriod) + } + if o.PullOptions != nil { + m.PullOptions = &ContainerSpec_PullOptions{} + deepcopy.Copy(m.PullOptions, o.PullOptions) + } + if o.Secrets != nil { + m.Secrets = make([]*SecretReference, len(o.Secrets)) + for i := range m.Secrets { + m.Secrets[i] = &SecretReference{} + deepcopy.Copy(m.Secrets[i], o.Secrets[i]) + } + } + + if o.Configs != nil { + m.Configs = make([]*ConfigReference, len(o.Configs)) + for i := range m.Configs { + m.Configs[i] = &ConfigReference{} + deepcopy.Copy(m.Configs[i], o.Configs[i]) + } + } + + if o.Hosts != nil { + m.Hosts = make([]string, len(o.Hosts)) + copy(m.Hosts, o.Hosts) + } + + if o.DNSConfig != nil { + m.DNSConfig = &ContainerSpec_DNSConfig{} + deepcopy.Copy(m.DNSConfig, o.DNSConfig) + } + if o.Healthcheck != nil { + m.Healthcheck = &HealthConfig{} + deepcopy.Copy(m.Healthcheck, o.Healthcheck) + } + if o.Sysctls != nil { + m.Sysctls = make(map[string]string, len(o.Sysctls)) + for k, v := range o.Sysctls { + m.Sysctls[k] = v + } + } + + if o.Capabilities != nil { + m.Capabilities = make([]string, len(o.Capabilities)) + copy(m.Capabilities, o.Capabilities) + } + +} + +func (m *ContainerSpec_PullOptions) Copy() *ContainerSpec_PullOptions { + if m == nil { + return nil + } + o := &ContainerSpec_PullOptions{} + o.CopyFrom(m) + return o +} + +func (m *ContainerSpec_PullOptions) CopyFrom(src interface{}) { + + o := src.(*ContainerSpec_PullOptions) + *m = *o +} + +func (m *ContainerSpec_DNSConfig) Copy() *ContainerSpec_DNSConfig { + if m == nil { + return nil + } + o := &ContainerSpec_DNSConfig{} + o.CopyFrom(m) + return o +} + +func (m *ContainerSpec_DNSConfig) CopyFrom(src interface{}) { + + o := src.(*ContainerSpec_DNSConfig) + *m = *o + if o.Nameservers != nil { + m.Nameservers = make([]string, len(o.Nameservers)) + copy(m.Nameservers, o.Nameservers) + } + + if o.Search != nil { + m.Search = make([]string, len(o.Search)) + copy(m.Search, o.Search) + } + + if o.Options != nil { + m.Options = make([]string, len(o.Options)) + copy(m.Options, o.Options) + } + +} + +func (m *EndpointSpec) Copy() *EndpointSpec { + if m == nil { + return nil + } + o := &EndpointSpec{} + o.CopyFrom(m) + return o +} + +func (m *EndpointSpec) CopyFrom(src interface{}) { + + o := src.(*EndpointSpec) + *m = *o + if o.Ports != nil { + m.Ports = make([]*PortConfig, len(o.Ports)) + for i := range m.Ports { + m.Ports[i] = &PortConfig{} + deepcopy.Copy(m.Ports[i], o.Ports[i]) + } + } + +} + +func (m *NetworkSpec) Copy() *NetworkSpec { + if m == nil { + return nil + } + o := &NetworkSpec{} + o.CopyFrom(m) + return o +} + +func (m *NetworkSpec) CopyFrom(src interface{}) { + + o := src.(*NetworkSpec) + *m = *o + deepcopy.Copy(&m.Annotations, &o.Annotations) + if o.DriverConfig != nil { + m.DriverConfig = &Driver{} + deepcopy.Copy(m.DriverConfig, o.DriverConfig) + } + if o.IPAM != nil { + m.IPAM = &IPAMOptions{} + deepcopy.Copy(m.IPAM, o.IPAM) + } + if o.ConfigFrom != nil { + switch o.ConfigFrom.(type) { + case *NetworkSpec_Network: + v := NetworkSpec_Network{ + Network: o.GetNetwork(), + } + m.ConfigFrom = &v + } + } + +} + +func (m *ClusterSpec) Copy() *ClusterSpec { + if m == nil { + return nil + } + o := &ClusterSpec{} + o.CopyFrom(m) + return o +} + +func (m *ClusterSpec) CopyFrom(src interface{}) { + + o := src.(*ClusterSpec) + *m = *o + deepcopy.Copy(&m.Annotations, &o.Annotations) + deepcopy.Copy(&m.AcceptancePolicy, &o.AcceptancePolicy) + deepcopy.Copy(&m.Orchestration, &o.Orchestration) + deepcopy.Copy(&m.Raft, &o.Raft) + deepcopy.Copy(&m.Dispatcher, &o.Dispatcher) + deepcopy.Copy(&m.CAConfig, &o.CAConfig) + deepcopy.Copy(&m.TaskDefaults, &o.TaskDefaults) + deepcopy.Copy(&m.EncryptionConfig, &o.EncryptionConfig) +} + +func (m *SecretSpec) Copy() *SecretSpec { + if m == nil { + return nil + } + o := &SecretSpec{} + o.CopyFrom(m) + return o +} + +func (m *SecretSpec) CopyFrom(src interface{}) { + + o := src.(*SecretSpec) + *m = *o + deepcopy.Copy(&m.Annotations, &o.Annotations) + if o.Data != nil { + m.Data = make([]byte, len(o.Data)) + copy(m.Data, o.Data) + } + if o.Templating != nil { + m.Templating = &Driver{} + deepcopy.Copy(m.Templating, o.Templating) + } + if o.Driver != nil { + m.Driver = &Driver{} + deepcopy.Copy(m.Driver, o.Driver) + } +} + +func (m *ConfigSpec) Copy() *ConfigSpec { + if m == nil { + return nil + } + o := &ConfigSpec{} + o.CopyFrom(m) + return o +} + +func (m *ConfigSpec) CopyFrom(src interface{}) { + + o := src.(*ConfigSpec) + *m = *o + deepcopy.Copy(&m.Annotations, &o.Annotations) + if o.Data != nil { + m.Data = make([]byte, len(o.Data)) + copy(m.Data, o.Data) + } + if o.Templating != nil { + m.Templating = &Driver{} + deepcopy.Copy(m.Templating, o.Templating) + } +} + +func (m *NodeSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) + n1, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if m.DesiredRole != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.DesiredRole)) + } + if m.Membership != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Membership)) + } + if m.Availability != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Availability)) + } + return i, nil +} + +func (m *ServiceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) + n2, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Task.Size())) + n3, err := m.Task.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + if m.Mode != nil { + nn4, err := m.Mode.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn4 + } + if m.Update != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Update.Size())) + n5, err := m.Update.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if len(m.Networks) > 0 { + for _, msg := range m.Networks { + dAtA[i] = 0x3a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Endpoint != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Endpoint.Size())) + n6, err := m.Endpoint.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.Rollback != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Rollback.Size())) + n7, err := m.Rollback.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func (m *ServiceSpec_Replicated) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Replicated != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Replicated.Size())) + n8, err := m.Replicated.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} +func (m *ServiceSpec_Global) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Global != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Global.Size())) + n9, err := m.Global.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} +func (m *ReplicatedService) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicatedService) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Replicas)) + } + return i, nil +} + +func (m *GlobalService) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GlobalService) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *TaskSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Runtime != nil { + nn10, err := m.Runtime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn10 + } + if m.Resources != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Resources.Size())) + n11, err := m.Resources.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.Restart != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Restart.Size())) + n12, err := m.Restart.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.Placement != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Placement.Size())) + n13, err := m.Placement.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + if m.LogDriver != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.LogDriver.Size())) + n14, err := m.LogDriver.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if len(m.Networks) > 0 { + for _, msg := range m.Networks { + dAtA[i] = 0x3a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.ForceUpdate != 0 { + dAtA[i] = 0x48 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.ForceUpdate)) + } + if len(m.ResourceReferences) > 0 { + for _, msg := range m.ResourceReferences { + dAtA[i] = 0x5a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *TaskSpec_Container) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Container != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Container.Size())) + n15, err := m.Container.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + return i, nil +} +func (m *TaskSpec_Attachment) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Attachment != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Attachment.Size())) + n16, err := m.Attachment.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + return i, nil +} +func (m *TaskSpec_Generic) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Generic != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Generic.Size())) + n17, err := m.Generic.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + return i, nil +} +func (m *ResourceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ResourceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.ResourceID))) + i += copy(dAtA[i:], m.ResourceID) + } + if m.ResourceType != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.ResourceType)) + } + return i, nil +} + +func (m *GenericRuntimeSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenericRuntimeSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Kind) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if m.Payload != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Payload.Size())) + n18, err := m.Payload.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + return i, nil +} + +func (m *NetworkAttachmentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + return i, nil +} + +func (m *ContainerSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Image) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Image))) + i += copy(dAtA[i:], m.Image) + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x12 + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovSpecs(uint64(len(k))) + 1 + len(v) + sovSpecs(uint64(len(v))) + i = encodeVarintSpecs(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Command) > 0 { + for _, s := range m.Command { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Env) > 0 { + for _, s := range m.Env { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Dir) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Dir))) + i += copy(dAtA[i:], m.Dir) + } + if len(m.User) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + } + if len(m.Mounts) > 0 { + for _, msg := range m.Mounts { + dAtA[i] = 0x42 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.StopGracePeriod != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.StopGracePeriod.Size())) + n19, err := m.StopGracePeriod.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + if m.PullOptions != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.PullOptions.Size())) + n20, err := m.PullOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if len(m.Groups) > 0 { + for _, s := range m.Groups { + dAtA[i] = 0x5a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Secrets) > 0 { + for _, msg := range m.Secrets { + dAtA[i] = 0x62 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.TTY { + dAtA[i] = 0x68 + i++ + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Hostname) > 0 { + dAtA[i] = 0x72 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Hostname))) + i += copy(dAtA[i:], m.Hostname) + } + if m.DNSConfig != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.DNSConfig.Size())) + n21, err := m.DNSConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if m.Healthcheck != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Healthcheck.Size())) + n22, err := m.Healthcheck.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.OpenStdin { + dAtA[i] = 0x90 + i++ + dAtA[i] = 0x1 + i++ + if m.OpenStdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ReadOnly { + dAtA[i] = 0x98 + i++ + dAtA[i] = 0x1 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.StopSignal) > 0 { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.StopSignal))) + i += copy(dAtA[i:], m.StopSignal) + } + if len(m.Configs) > 0 { + for _, msg := range m.Configs { + dAtA[i] = 0xaa + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Privileges != nil { + dAtA[i] = 0xb2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Privileges.Size())) + n23, err := m.Privileges.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + if m.Init != nil { + dAtA[i] = 0xba + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Init.Size())) + n24, err := m.Init.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + if m.Isolation != 0 { + dAtA[i] = 0xc0 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Isolation)) + } + if m.PidsLimit != 0 { + dAtA[i] = 0xc8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.PidsLimit)) + } + if len(m.Sysctls) > 0 { + for k, _ := range m.Sysctls { + dAtA[i] = 0xd2 + i++ + dAtA[i] = 0x1 + i++ + v := m.Sysctls[k] + mapSize := 1 + len(k) + sovSpecs(uint64(len(k))) + 1 + len(v) + sovSpecs(uint64(len(v))) + i = encodeVarintSpecs(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Capabilities) > 0 { + for _, s := range m.Capabilities { + dAtA[i] = 0xda + i++ + dAtA[i] = 0x1 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *ContainerSpec_PullOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerSpec_PullOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.RegistryAuth) > 0 { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x4 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.RegistryAuth))) + i += copy(dAtA[i:], m.RegistryAuth) + } + return i, nil +} + +func (m *ContainerSpec_DNSConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerSpec_DNSConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Search) > 0 { + for _, s := range m.Search { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Options) > 0 { + for _, s := range m.Options { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *EndpointSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Mode != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Mode)) + } + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NetworkSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) + n25, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + if m.DriverConfig != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.DriverConfig.Size())) + n26, err := m.DriverConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + if m.Ipv6Enabled { + dAtA[i] = 0x18 + i++ + if m.Ipv6Enabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Internal { + dAtA[i] = 0x20 + i++ + if m.Internal { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.IPAM != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.IPAM.Size())) + n27, err := m.IPAM.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + if m.Attachable { + dAtA[i] = 0x30 + i++ + if m.Attachable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Ingress { + dAtA[i] = 0x38 + i++ + if m.Ingress { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ConfigFrom != nil { + nn28, err := m.ConfigFrom.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn28 + } + return i, nil +} + +func (m *NetworkSpec_Network) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x42 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Network))) + i += copy(dAtA[i:], m.Network) + return i, nil +} +func (m *ClusterSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) + n29, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.AcceptancePolicy.Size())) + n30, err := m.AcceptancePolicy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + dAtA[i] = 0x1a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Orchestration.Size())) + n31, err := m.Orchestration.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + dAtA[i] = 0x22 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Raft.Size())) + n32, err := m.Raft.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + dAtA[i] = 0x2a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Dispatcher.Size())) + n33, err := m.Dispatcher.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + dAtA[i] = 0x32 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.CAConfig.Size())) + n34, err := m.CAConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + dAtA[i] = 0x3a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.TaskDefaults.Size())) + n35, err := m.TaskDefaults.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + dAtA[i] = 0x42 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.EncryptionConfig.Size())) + n36, err := m.EncryptionConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + return i, nil +} + +func (m *SecretSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) + n37, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + if len(m.Data) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.Templating != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Templating.Size())) + n38, err := m.Templating.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + } + if m.Driver != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Driver.Size())) + n39, err := m.Driver.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + } + return i, nil +} + +func (m *ConfigSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Annotations.Size())) + n40, err := m.Annotations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + if len(m.Data) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintSpecs(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.Templating != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintSpecs(dAtA, i, uint64(m.Templating.Size())) + n41, err := m.Templating.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + } + return i, nil +} + +func encodeVarintSpecs(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +func (m *NodeSpec) Size() (n int) { + var l int + _ = l + l = m.Annotations.Size() + n += 1 + l + sovSpecs(uint64(l)) + if m.DesiredRole != 0 { + n += 1 + sovSpecs(uint64(m.DesiredRole)) + } + if m.Membership != 0 { + n += 1 + sovSpecs(uint64(m.Membership)) + } + if m.Availability != 0 { + n += 1 + sovSpecs(uint64(m.Availability)) + } + return n +} + +func (m *ServiceSpec) Size() (n int) { + var l int + _ = l + l = m.Annotations.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.Task.Size() + n += 1 + l + sovSpecs(uint64(l)) + if m.Mode != nil { + n += m.Mode.Size() + } + if m.Update != nil { + l = m.Update.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if len(m.Networks) > 0 { + for _, e := range m.Networks { + l = e.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + } + if m.Endpoint != nil { + l = m.Endpoint.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Rollback != nil { + l = m.Rollback.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} + +func (m *ServiceSpec_Replicated) Size() (n int) { + var l int + _ = l + if m.Replicated != nil { + l = m.Replicated.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} +func (m *ServiceSpec_Global) Size() (n int) { + var l int + _ = l + if m.Global != nil { + l = m.Global.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} +func (m *ReplicatedService) Size() (n int) { + var l int + _ = l + if m.Replicas != 0 { + n += 1 + sovSpecs(uint64(m.Replicas)) + } + return n +} + +func (m *GlobalService) Size() (n int) { + var l int + _ = l + return n +} + +func (m *TaskSpec) Size() (n int) { + var l int + _ = l + if m.Runtime != nil { + n += m.Runtime.Size() + } + if m.Resources != nil { + l = m.Resources.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Restart != nil { + l = m.Restart.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Placement != nil { + l = m.Placement.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.LogDriver != nil { + l = m.LogDriver.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if len(m.Networks) > 0 { + for _, e := range m.Networks { + l = e.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + } + if m.ForceUpdate != 0 { + n += 1 + sovSpecs(uint64(m.ForceUpdate)) + } + if len(m.ResourceReferences) > 0 { + for _, e := range m.ResourceReferences { + l = e.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + } + return n +} + +func (m *TaskSpec_Container) Size() (n int) { + var l int + _ = l + if m.Container != nil { + l = m.Container.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} +func (m *TaskSpec_Attachment) Size() (n int) { + var l int + _ = l + if m.Attachment != nil { + l = m.Attachment.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} +func (m *TaskSpec_Generic) Size() (n int) { + var l int + _ = l + if m.Generic != nil { + l = m.Generic.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} +func (m *ResourceReference) Size() (n int) { + var l int + _ = l + l = len(m.ResourceID) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if m.ResourceType != 0 { + n += 1 + sovSpecs(uint64(m.ResourceType)) + } + return n +} + +func (m *GenericRuntimeSpec) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Payload != nil { + l = m.Payload.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} + +func (m *NetworkAttachmentSpec) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} + +func (m *ContainerSpec) Size() (n int) { + var l int + _ = l + l = len(m.Image) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovSpecs(uint64(len(k))) + 1 + len(v) + sovSpecs(uint64(len(v))) + n += mapEntrySize + 1 + sovSpecs(uint64(mapEntrySize)) + } + } + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, s := range m.Env { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + l = len(m.Dir) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + l = len(m.User) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if len(m.Mounts) > 0 { + for _, e := range m.Mounts { + l = e.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + } + if m.StopGracePeriod != nil { + l = m.StopGracePeriod.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.PullOptions != nil { + l = m.PullOptions.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + } + if m.TTY { + n += 2 + } + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if m.DNSConfig != nil { + l = m.DNSConfig.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Healthcheck != nil { + l = m.Healthcheck.Size() + n += 2 + l + sovSpecs(uint64(l)) + } + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) + n += 2 + l + sovSpecs(uint64(l)) + } + } + if m.OpenStdin { + n += 3 + } + if m.ReadOnly { + n += 3 + } + l = len(m.StopSignal) + if l > 0 { + n += 2 + l + sovSpecs(uint64(l)) + } + if len(m.Configs) > 0 { + for _, e := range m.Configs { + l = e.Size() + n += 2 + l + sovSpecs(uint64(l)) + } + } + if m.Privileges != nil { + l = m.Privileges.Size() + n += 2 + l + sovSpecs(uint64(l)) + } + if m.Init != nil { + l = m.Init.Size() + n += 2 + l + sovSpecs(uint64(l)) + } + if m.Isolation != 0 { + n += 2 + sovSpecs(uint64(m.Isolation)) + } + if m.PidsLimit != 0 { + n += 2 + sovSpecs(uint64(m.PidsLimit)) + } + if len(m.Sysctls) > 0 { + for k, v := range m.Sysctls { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovSpecs(uint64(len(k))) + 1 + len(v) + sovSpecs(uint64(len(v))) + n += mapEntrySize + 2 + sovSpecs(uint64(mapEntrySize)) + } + } + if len(m.Capabilities) > 0 { + for _, s := range m.Capabilities { + l = len(s) + n += 2 + l + sovSpecs(uint64(l)) + } + } + return n +} + +func (m *ContainerSpec_PullOptions) Size() (n int) { + var l int + _ = l + l = len(m.RegistryAuth) + if l > 0 { + n += 2 + l + sovSpecs(uint64(l)) + } + return n +} + +func (m *ContainerSpec_DNSConfig) Size() (n int) { + var l int + _ = l + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + if len(m.Search) > 0 { + for _, s := range m.Search { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, s := range m.Options { + l = len(s) + n += 1 + l + sovSpecs(uint64(l)) + } + } + return n +} + +func (m *EndpointSpec) Size() (n int) { + var l int + _ = l + if m.Mode != 0 { + n += 1 + sovSpecs(uint64(m.Mode)) + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + } + return n +} + +func (m *NetworkSpec) Size() (n int) { + var l int + _ = l + l = m.Annotations.Size() + n += 1 + l + sovSpecs(uint64(l)) + if m.DriverConfig != nil { + l = m.DriverConfig.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Ipv6Enabled { + n += 2 + } + if m.Internal { + n += 2 + } + if m.IPAM != nil { + l = m.IPAM.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Attachable { + n += 2 + } + if m.Ingress { + n += 2 + } + if m.ConfigFrom != nil { + n += m.ConfigFrom.Size() + } + return n +} + +func (m *NetworkSpec_Network) Size() (n int) { + var l int + _ = l + l = len(m.Network) + n += 1 + l + sovSpecs(uint64(l)) + return n +} +func (m *ClusterSpec) Size() (n int) { + var l int + _ = l + l = m.Annotations.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.AcceptancePolicy.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.Orchestration.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.Raft.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.Dispatcher.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.CAConfig.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.TaskDefaults.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = m.EncryptionConfig.Size() + n += 1 + l + sovSpecs(uint64(l)) + return n +} + +func (m *SecretSpec) Size() (n int) { + var l int + _ = l + l = m.Annotations.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = len(m.Data) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Templating != nil { + l = m.Templating.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Driver != nil { + l = m.Driver.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} + +func (m *ConfigSpec) Size() (n int) { + var l int + _ = l + l = m.Annotations.Size() + n += 1 + l + sovSpecs(uint64(l)) + l = len(m.Data) + if l > 0 { + n += 1 + l + sovSpecs(uint64(l)) + } + if m.Templating != nil { + l = m.Templating.Size() + n += 1 + l + sovSpecs(uint64(l)) + } + return n +} + +func sovSpecs(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozSpecs(x uint64) (n int) { + return sovSpecs(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *NodeSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSpec{`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `DesiredRole:` + fmt.Sprintf("%v", this.DesiredRole) + `,`, + `Membership:` + fmt.Sprintf("%v", this.Membership) + `,`, + `Availability:` + fmt.Sprintf("%v", this.Availability) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceSpec{`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `Task:` + strings.Replace(strings.Replace(this.Task.String(), "TaskSpec", "TaskSpec", 1), `&`, ``, 1) + `,`, + `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, + `Update:` + strings.Replace(fmt.Sprintf("%v", this.Update), "UpdateConfig", "UpdateConfig", 1) + `,`, + `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "NetworkAttachmentConfig", "NetworkAttachmentConfig", 1) + `,`, + `Endpoint:` + strings.Replace(fmt.Sprintf("%v", this.Endpoint), "EndpointSpec", "EndpointSpec", 1) + `,`, + `Rollback:` + strings.Replace(fmt.Sprintf("%v", this.Rollback), "UpdateConfig", "UpdateConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceSpec_Replicated) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceSpec_Replicated{`, + `Replicated:` + strings.Replace(fmt.Sprintf("%v", this.Replicated), "ReplicatedService", "ReplicatedService", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceSpec_Global) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceSpec_Global{`, + `Global:` + strings.Replace(fmt.Sprintf("%v", this.Global), "GlobalService", "GlobalService", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicatedService) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicatedService{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func (this *GlobalService) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GlobalService{`, + `}`, + }, "") + return s +} +func (this *TaskSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskSpec{`, + `Runtime:` + fmt.Sprintf("%v", this.Runtime) + `,`, + `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceRequirements", "ResourceRequirements", 1) + `,`, + `Restart:` + strings.Replace(fmt.Sprintf("%v", this.Restart), "RestartPolicy", "RestartPolicy", 1) + `,`, + `Placement:` + strings.Replace(fmt.Sprintf("%v", this.Placement), "Placement", "Placement", 1) + `,`, + `LogDriver:` + strings.Replace(fmt.Sprintf("%v", this.LogDriver), "Driver", "Driver", 1) + `,`, + `Networks:` + strings.Replace(fmt.Sprintf("%v", this.Networks), "NetworkAttachmentConfig", "NetworkAttachmentConfig", 1) + `,`, + `ForceUpdate:` + fmt.Sprintf("%v", this.ForceUpdate) + `,`, + `ResourceReferences:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResourceReferences), "ResourceReference", "ResourceReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskSpec_Container) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskSpec_Container{`, + `Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "ContainerSpec", "ContainerSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskSpec_Attachment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskSpec_Attachment{`, + `Attachment:` + strings.Replace(fmt.Sprintf("%v", this.Attachment), "NetworkAttachmentSpec", "NetworkAttachmentSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskSpec_Generic) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskSpec_Generic{`, + `Generic:` + strings.Replace(fmt.Sprintf("%v", this.Generic), "GenericRuntimeSpec", "GenericRuntimeSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceReference{`, + `ResourceID:` + fmt.Sprintf("%v", this.ResourceID) + `,`, + `ResourceType:` + fmt.Sprintf("%v", this.ResourceType) + `,`, + `}`, + }, "") + return s +} +func (this *GenericRuntimeSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GenericRuntimeSpec{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Payload:` + strings.Replace(fmt.Sprintf("%v", this.Payload), "Any", "google_protobuf4.Any", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkAttachmentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkAttachmentSpec{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerSpec) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForSysctls := make([]string, 0, len(this.Sysctls)) + for k, _ := range this.Sysctls { + keysForSysctls = append(keysForSysctls, k) + } + sortkeys.Strings(keysForSysctls) + mapStringForSysctls := "map[string]string{" + for _, k := range keysForSysctls { + mapStringForSysctls += fmt.Sprintf("%v: %v,", k, this.Sysctls[k]) + } + mapStringForSysctls += "}" + s := strings.Join([]string{`&ContainerSpec{`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `Env:` + fmt.Sprintf("%v", this.Env) + `,`, + `Dir:` + fmt.Sprintf("%v", this.Dir) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Mounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "Mount", 1), `&`, ``, 1) + `,`, + `StopGracePeriod:` + strings.Replace(fmt.Sprintf("%v", this.StopGracePeriod), "Duration", "google_protobuf1.Duration", 1) + `,`, + `PullOptions:` + strings.Replace(fmt.Sprintf("%v", this.PullOptions), "ContainerSpec_PullOptions", "ContainerSpec_PullOptions", 1) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Secrets:` + strings.Replace(fmt.Sprintf("%v", this.Secrets), "SecretReference", "SecretReference", 1) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `DNSConfig:` + strings.Replace(fmt.Sprintf("%v", this.DNSConfig), "ContainerSpec_DNSConfig", "ContainerSpec_DNSConfig", 1) + `,`, + `Healthcheck:` + strings.Replace(fmt.Sprintf("%v", this.Healthcheck), "HealthConfig", "HealthConfig", 1) + `,`, + `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, + `OpenStdin:` + fmt.Sprintf("%v", this.OpenStdin) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `StopSignal:` + fmt.Sprintf("%v", this.StopSignal) + `,`, + `Configs:` + strings.Replace(fmt.Sprintf("%v", this.Configs), "ConfigReference", "ConfigReference", 1) + `,`, + `Privileges:` + strings.Replace(fmt.Sprintf("%v", this.Privileges), "Privileges", "Privileges", 1) + `,`, + `Init:` + strings.Replace(fmt.Sprintf("%v", this.Init), "BoolValue", "google_protobuf2.BoolValue", 1) + `,`, + `Isolation:` + fmt.Sprintf("%v", this.Isolation) + `,`, + `PidsLimit:` + fmt.Sprintf("%v", this.PidsLimit) + `,`, + `Sysctls:` + mapStringForSysctls + `,`, + `Capabilities:` + fmt.Sprintf("%v", this.Capabilities) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerSpec_PullOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerSpec_PullOptions{`, + `RegistryAuth:` + fmt.Sprintf("%v", this.RegistryAuth) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerSpec_DNSConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerSpec_DNSConfig{`, + `Nameservers:` + fmt.Sprintf("%v", this.Nameservers) + `,`, + `Search:` + fmt.Sprintf("%v", this.Search) + `,`, + `Options:` + fmt.Sprintf("%v", this.Options) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointSpec{`, + `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, + `Ports:` + strings.Replace(fmt.Sprintf("%v", this.Ports), "PortConfig", "PortConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkSpec{`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `DriverConfig:` + strings.Replace(fmt.Sprintf("%v", this.DriverConfig), "Driver", "Driver", 1) + `,`, + `Ipv6Enabled:` + fmt.Sprintf("%v", this.Ipv6Enabled) + `,`, + `Internal:` + fmt.Sprintf("%v", this.Internal) + `,`, + `IPAM:` + strings.Replace(fmt.Sprintf("%v", this.IPAM), "IPAMOptions", "IPAMOptions", 1) + `,`, + `Attachable:` + fmt.Sprintf("%v", this.Attachable) + `,`, + `Ingress:` + fmt.Sprintf("%v", this.Ingress) + `,`, + `ConfigFrom:` + fmt.Sprintf("%v", this.ConfigFrom) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkSpec_Network) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkSpec_Network{`, + `Network:` + fmt.Sprintf("%v", this.Network) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterSpec{`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `AcceptancePolicy:` + strings.Replace(strings.Replace(this.AcceptancePolicy.String(), "AcceptancePolicy", "AcceptancePolicy", 1), `&`, ``, 1) + `,`, + `Orchestration:` + strings.Replace(strings.Replace(this.Orchestration.String(), "OrchestrationConfig", "OrchestrationConfig", 1), `&`, ``, 1) + `,`, + `Raft:` + strings.Replace(strings.Replace(this.Raft.String(), "RaftConfig", "RaftConfig", 1), `&`, ``, 1) + `,`, + `Dispatcher:` + strings.Replace(strings.Replace(this.Dispatcher.String(), "DispatcherConfig", "DispatcherConfig", 1), `&`, ``, 1) + `,`, + `CAConfig:` + strings.Replace(strings.Replace(this.CAConfig.String(), "CAConfig", "CAConfig", 1), `&`, ``, 1) + `,`, + `TaskDefaults:` + strings.Replace(strings.Replace(this.TaskDefaults.String(), "TaskDefaults", "TaskDefaults", 1), `&`, ``, 1) + `,`, + `EncryptionConfig:` + strings.Replace(strings.Replace(this.EncryptionConfig.String(), "EncryptionConfig", "EncryptionConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SecretSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretSpec{`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `Templating:` + strings.Replace(fmt.Sprintf("%v", this.Templating), "Driver", "Driver", 1) + `,`, + `Driver:` + strings.Replace(fmt.Sprintf("%v", this.Driver), "Driver", "Driver", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigSpec{`, + `Annotations:` + strings.Replace(strings.Replace(this.Annotations.String(), "Annotations", "Annotations", 1), `&`, ``, 1) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `Templating:` + strings.Replace(fmt.Sprintf("%v", this.Templating), "Driver", "Driver", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringSpecs(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *NodeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredRole", wireType) + } + m.DesiredRole = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredRole |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Membership", wireType) + } + m.Membership = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Membership |= (NodeSpec_Membership(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Availability", wireType) + } + m.Availability = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Availability |= (NodeSpec_Availability(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Task.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicated", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ReplicatedService{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Mode = &ServiceSpec_Replicated{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Global", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &GlobalService{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Mode = &ServiceSpec_Global{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Update", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Update == nil { + m.Update = &UpdateConfig{} + } + if err := m.Update.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Networks = append(m.Networks, &NetworkAttachmentConfig{}) + if err := m.Networks[len(m.Networks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Endpoint == nil { + m.Endpoint = &EndpointSpec{} + } + if err := m.Endpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rollback", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Rollback == nil { + m.Rollback = &UpdateConfig{} + } + if err := m.Rollback.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicatedService) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicatedService: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicatedService: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GlobalService) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GlobalService: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GlobalService: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ContainerSpec{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Runtime = &TaskSpec_Container{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &ResourceRequirements{} + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Restart", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Restart == nil { + m.Restart = &RestartPolicy{} + } + if err := m.Restart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Placement", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Placement == nil { + m.Placement = &Placement{} + } + if err := m.Placement.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogDriver", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LogDriver == nil { + m.LogDriver = &Driver{} + } + if err := m.LogDriver.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Networks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Networks = append(m.Networks, &NetworkAttachmentConfig{}) + if err := m.Networks[len(m.Networks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attachment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &NetworkAttachmentSpec{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Runtime = &TaskSpec_Attachment{v} + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForceUpdate", wireType) + } + m.ForceUpdate = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ForceUpdate |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Generic", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &GenericRuntimeSpec{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Runtime = &TaskSpec_Generic{v} + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceReferences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceReferences = append(m.ResourceReferences, ResourceReference{}) + if err := m.ResourceReferences[len(m.ResourceReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceType", wireType) + } + m.ResourceType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ResourceType |= (ResourceType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenericRuntimeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenericRuntimeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenericRuntimeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payload", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Payload == nil { + m.Payload = &google_protobuf4.Any{} + } + if err := m.Payload.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkAttachmentSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkAttachmentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkAttachmentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthSpecs + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthSpecs + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mounts = append(m.Mounts, Mount{}) + if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StopGracePeriod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StopGracePeriod == nil { + m.StopGracePeriod = &google_protobuf1.Duration{} + } + if err := m.StopGracePeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PullOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PullOptions == nil { + m.PullOptions = &ContainerSpec_PullOptions{} + } + if err := m.PullOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, &SecretReference{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DNSConfig == nil { + m.DNSConfig = &ContainerSpec_DNSConfig{} + } + if err := m.DNSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Healthcheck", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Healthcheck == nil { + m.Healthcheck = &HealthConfig{} + } + if err := m.Healthcheck.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OpenStdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.OpenStdin = bool(v != 0) + case 19: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StopSignal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StopSignal = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Configs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Configs = append(m.Configs, &ConfigReference{}) + if err := m.Configs[len(m.Configs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Privileges == nil { + m.Privileges = &Privileges{} + } + if err := m.Privileges.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Init", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Init == nil { + m.Init = &google_protobuf2.BoolValue{} + } + if err := m.Init.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 24: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Isolation", wireType) + } + m.Isolation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Isolation |= (ContainerSpec_Isolation(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 25: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PidsLimit", wireType) + } + m.PidsLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PidsLimit |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sysctls", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Sysctls == nil { + m.Sysctls = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthSpecs + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthSpecs + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Sysctls[mapkey] = mapvalue + iNdEx = postIndex + case 27: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Capabilities = append(m.Capabilities, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerSpec_PullOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PullOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PullOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 64: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegistryAuth", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RegistryAuth = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerSpec_DNSConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DNSConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DNSConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nameservers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nameservers = append(m.Nameservers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Search", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Search = append(m.Search, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= (EndpointSpec_ResolutionMode(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, &PortConfig{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DriverConfig == nil { + m.DriverConfig = &Driver{} + } + if err := m.DriverConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ipv6Enabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Ipv6Enabled = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Internal", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Internal = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPAM", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IPAM == nil { + m.IPAM = &IPAMOptions{} + } + if err := m.IPAM.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Attachable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Attachable = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Ingress = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigFrom = &NetworkSpec_Network{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AcceptancePolicy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AcceptancePolicy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Orchestration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Orchestration.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raft", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Raft.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Dispatcher", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Dispatcher.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CAConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CAConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskDefaults", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TaskDefaults.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EncryptionConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EncryptionConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Templating", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Templating == nil { + m.Templating = &Driver{} + } + if err := m.Templating.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Driver == nil { + m.Driver = &Driver{} + } + if err := m.Driver.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Annotations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Templating", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSpecs + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthSpecs + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Templating == nil { + m.Templating = &Driver{} + } + if err := m.Templating.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSpecs(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSpecs + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSpecs(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSpecs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSpecs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSpecs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthSpecs + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSpecs + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipSpecs(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthSpecs = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSpecs = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/specs.proto", fileDescriptorSpecs) } + +var fileDescriptorSpecs = []byte{ + // 2178 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0x4f, 0x6f, 0x1b, 0xb9, + 0x15, 0xb7, 0x6c, 0x59, 0x7f, 0x9e, 0xe4, 0x44, 0xe6, 0x26, 0xd9, 0xb1, 0x92, 0xd8, 0x8a, 0x36, + 0x9b, 0x7a, 0x77, 0x51, 0x19, 0x75, 0x17, 0xdb, 0x6c, 0xd2, 0x6d, 0x2b, 0x59, 0x5a, 0x5b, 0x4d, + 0x62, 0x0b, 0x94, 0xe3, 0x36, 0x40, 0x01, 0x81, 0x9e, 0xa1, 0x25, 0xc2, 0xa3, 0xe1, 0x94, 0xa4, + 0x1c, 0xe8, 0xd6, 0xe3, 0xc2, 0xfd, 0x0c, 0x46, 0x0f, 0x45, 0xef, 0xed, 0xb7, 0xc8, 0xb1, 0xc7, + 0xf6, 0x62, 0x74, 0xfd, 0x15, 0x7a, 0xeb, 0xa5, 0x05, 0x39, 0x1c, 0xfd, 0x71, 0xe4, 0x38, 0x45, + 0x73, 0xe8, 0x8d, 0x7c, 0xf3, 0xfb, 0x3d, 0xfe, 0xfb, 0xbd, 0xc7, 0xc7, 0x81, 0xcf, 0xbb, 0x4c, + 0xf5, 0x06, 0x87, 0x15, 0x97, 0xf7, 0x37, 0x3c, 0xee, 0x1e, 0x53, 0xb1, 0x21, 0x5f, 0x13, 0xd1, + 0x3f, 0x66, 0x6a, 0x83, 0x84, 0x6c, 0x43, 0x86, 0xd4, 0x95, 0x95, 0x50, 0x70, 0xc5, 0x11, 0x8a, + 0x00, 0x95, 0x18, 0x50, 0x39, 0xf9, 0x51, 0xf1, 0x3a, 0xbe, 0x1a, 0x86, 0xd4, 0xf2, 0x8b, 0xb7, + 0xba, 0xbc, 0xcb, 0x4d, 0x73, 0x43, 0xb7, 0xac, 0x75, 0xb5, 0xcb, 0x79, 0xd7, 0xa7, 0x1b, 0xa6, + 0x77, 0x38, 0x38, 0xda, 0xf0, 0x06, 0x82, 0x28, 0xc6, 0x03, 0xfb, 0x7d, 0xe5, 0xf2, 0x77, 0x12, + 0x0c, 0xaf, 0xa2, 0xbe, 0x16, 0x24, 0x0c, 0xa9, 0xb0, 0x03, 0x96, 0xcf, 0x92, 0x90, 0xd9, 0xe5, + 0x1e, 0x6d, 0x87, 0xd4, 0x45, 0xdb, 0x90, 0x23, 0x41, 0xc0, 0x95, 0xf1, 0x2d, 0x9d, 0x44, 0x29, + 0xb1, 0x9e, 0xdb, 0x5c, 0xab, 0xbc, 0xbd, 0xa6, 0x4a, 0x75, 0x0c, 0xab, 0x25, 0xdf, 0x9c, 0xaf, + 0xcd, 0xe1, 0x49, 0x26, 0xfa, 0x39, 0xe4, 0x3d, 0x2a, 0x99, 0xa0, 0x5e, 0x47, 0x70, 0x9f, 0x3a, + 0xf3, 0xa5, 0xc4, 0xfa, 0x8d, 0xcd, 0x7b, 0xb3, 0x3c, 0xe9, 0xc1, 0x31, 0xf7, 0x29, 0xce, 0x59, + 0x86, 0xee, 0xa0, 0x6d, 0x80, 0x3e, 0xed, 0x1f, 0x52, 0x21, 0x7b, 0x2c, 0x74, 0x16, 0x0c, 0xfd, + 0x07, 0x57, 0xd1, 0xf5, 0xdc, 0x2b, 0x2f, 0x46, 0x70, 0x3c, 0x41, 0x45, 0x2f, 0x20, 0x4f, 0x4e, + 0x08, 0xf3, 0xc9, 0x21, 0xf3, 0x99, 0x1a, 0x3a, 0x49, 0xe3, 0xea, 0xb3, 0x77, 0xba, 0xaa, 0x4e, + 0x10, 0xf0, 0x14, 0xbd, 0xec, 0x01, 0x8c, 0x07, 0x42, 0x8f, 0x20, 0xdd, 0x6a, 0xec, 0xd6, 0x9b, + 0xbb, 0xdb, 0x85, 0xb9, 0xe2, 0xca, 0xe9, 0x59, 0xe9, 0xb6, 0xf6, 0x31, 0x06, 0xb4, 0x68, 0xe0, + 0xb1, 0xa0, 0x8b, 0xd6, 0x21, 0x53, 0xdd, 0xda, 0x6a, 0xb4, 0xf6, 0x1b, 0xf5, 0x42, 0xa2, 0x58, + 0x3c, 0x3d, 0x2b, 0xdd, 0x99, 0x06, 0x56, 0x5d, 0x97, 0x86, 0x8a, 0x7a, 0xc5, 0xe4, 0x77, 0x7f, + 0x5c, 0x9d, 0x2b, 0x7f, 0x97, 0x80, 0xfc, 0xe4, 0x24, 0xd0, 0x23, 0x48, 0x55, 0xb7, 0xf6, 0x9b, + 0x07, 0x8d, 0xc2, 0xdc, 0x98, 0x3e, 0x89, 0xa8, 0xba, 0x8a, 0x9d, 0x50, 0xf4, 0x10, 0x16, 0x5b, + 0xd5, 0x97, 0xed, 0x46, 0x21, 0x31, 0x9e, 0xce, 0x24, 0xac, 0x45, 0x06, 0xd2, 0xa0, 0xea, 0xb8, + 0xda, 0xdc, 0x2d, 0xcc, 0xcf, 0x46, 0xd5, 0x05, 0x61, 0x81, 0x9d, 0xca, 0x1f, 0x92, 0x90, 0x6b, + 0x53, 0x71, 0xc2, 0xdc, 0x0f, 0x2c, 0x91, 0xaf, 0x20, 0xa9, 0x88, 0x3c, 0x36, 0xd2, 0xc8, 0xcd, + 0x96, 0xc6, 0x3e, 0x91, 0xc7, 0x7a, 0x50, 0x4b, 0x37, 0x78, 0xad, 0x0c, 0x41, 0x43, 0x9f, 0xb9, + 0x44, 0x51, 0xcf, 0x28, 0x23, 0xb7, 0xf9, 0xe9, 0x2c, 0x36, 0x1e, 0xa1, 0xec, 0xfc, 0x77, 0xe6, + 0xf0, 0x04, 0x15, 0x3d, 0x85, 0x54, 0xd7, 0xe7, 0x87, 0xc4, 0x37, 0x9a, 0xc8, 0x6d, 0x3e, 0x98, + 0xe5, 0x64, 0xdb, 0x20, 0xc6, 0x0e, 0x2c, 0x05, 0x3d, 0x86, 0xd4, 0x20, 0xf4, 0x88, 0xa2, 0x4e, + 0xca, 0x90, 0x4b, 0xb3, 0xc8, 0x2f, 0x0d, 0x62, 0x8b, 0x07, 0x47, 0xac, 0x8b, 0x2d, 0x1e, 0x3d, + 0x83, 0x4c, 0x40, 0xd5, 0x6b, 0x2e, 0x8e, 0xa5, 0x93, 0x2e, 0x2d, 0xac, 0xe7, 0x36, 0xbf, 0x98, + 0x29, 0xc6, 0x08, 0x53, 0x55, 0x8a, 0xb8, 0xbd, 0x3e, 0x0d, 0x54, 0xe4, 0xa6, 0x36, 0xef, 0x24, + 0xf0, 0xc8, 0x01, 0xfa, 0x29, 0x64, 0x68, 0xe0, 0x85, 0x9c, 0x05, 0xca, 0xc9, 0x5c, 0x3d, 0x91, + 0x86, 0xc5, 0xe8, 0xcd, 0xc4, 0x23, 0x86, 0x66, 0x0b, 0xee, 0xfb, 0x87, 0xc4, 0x3d, 0x76, 0xb2, + 0xef, 0xb9, 0x8c, 0x11, 0xa3, 0x96, 0x82, 0x64, 0x9f, 0x7b, 0xb4, 0xbc, 0x01, 0xcb, 0x6f, 0x6d, + 0x35, 0x2a, 0x42, 0xc6, 0x6e, 0x75, 0xa4, 0x91, 0x24, 0x1e, 0xf5, 0xcb, 0x37, 0x61, 0x69, 0x6a, + 0x5b, 0xcb, 0x7f, 0x5e, 0x84, 0x4c, 0x7c, 0xd6, 0xa8, 0x0a, 0x59, 0x97, 0x07, 0x8a, 0xb0, 0x80, + 0x0a, 0x2b, 0xaf, 0x99, 0x27, 0xb3, 0x15, 0x83, 0x34, 0x6b, 0x67, 0x0e, 0x8f, 0x59, 0xe8, 0x5b, + 0xc8, 0x0a, 0x2a, 0xf9, 0x40, 0xb8, 0x54, 0x5a, 0x7d, 0xad, 0xcf, 0x56, 0x48, 0x04, 0xc2, 0xf4, + 0xb7, 0x03, 0x26, 0xa8, 0xde, 0x65, 0x89, 0xc7, 0x54, 0xf4, 0x14, 0xd2, 0x82, 0x4a, 0x45, 0x84, + 0x7a, 0x97, 0x44, 0x70, 0x04, 0x69, 0x71, 0x9f, 0xb9, 0x43, 0x1c, 0x33, 0xd0, 0x53, 0xc8, 0x86, + 0x3e, 0x71, 0x8d, 0x57, 0x67, 0xd1, 0xd0, 0xef, 0xcf, 0xa2, 0xb7, 0x62, 0x10, 0x1e, 0xe3, 0xd1, + 0xd7, 0x00, 0x3e, 0xef, 0x76, 0x3c, 0xc1, 0x4e, 0xa8, 0xb0, 0x12, 0x2b, 0xce, 0x62, 0xd7, 0x0d, + 0x02, 0x67, 0x7d, 0xde, 0x8d, 0x9a, 0x68, 0xfb, 0x7f, 0xd2, 0xd7, 0x84, 0xb6, 0x9e, 0x01, 0x90, + 0xd1, 0x57, 0xab, 0xae, 0xcf, 0xde, 0xcb, 0x95, 0x3d, 0x91, 0x09, 0x3a, 0x7a, 0x00, 0xf9, 0x23, + 0x2e, 0x5c, 0xda, 0xb1, 0x51, 0x93, 0x35, 0x9a, 0xc8, 0x19, 0x5b, 0xa4, 0x2f, 0x54, 0x83, 0x74, + 0x97, 0x06, 0x54, 0x30, 0xd7, 0x01, 0x33, 0xd8, 0xa3, 0x99, 0x01, 0x19, 0x41, 0xf0, 0x20, 0x50, + 0xac, 0x4f, 0xed, 0x48, 0x31, 0x11, 0xfd, 0x06, 0x3e, 0x8a, 0x8f, 0xaf, 0x23, 0xe8, 0x11, 0x15, + 0x34, 0xd0, 0x1a, 0xc8, 0x99, 0x7d, 0xf8, 0xf4, 0xdd, 0x1a, 0xb0, 0x68, 0x9b, 0x6c, 0x90, 0xb8, + 0xfc, 0x41, 0xd6, 0xb2, 0x90, 0x16, 0xd1, 0xb8, 0xe5, 0xdf, 0x27, 0xb4, 0xea, 0x2f, 0x21, 0xd0, + 0x06, 0xe4, 0x46, 0xc3, 0x33, 0xcf, 0xa8, 0x37, 0x5b, 0xbb, 0x71, 0x71, 0xbe, 0x06, 0x31, 0xb6, + 0x59, 0xd7, 0x39, 0xc8, 0xb6, 0x3d, 0xd4, 0x80, 0xa5, 0x11, 0x41, 0x97, 0x01, 0xf6, 0xa2, 0x2c, + 0xbd, 0x6b, 0xa6, 0xfb, 0xc3, 0x90, 0xe2, 0xbc, 0x98, 0xe8, 0x95, 0x7f, 0x0d, 0xe8, 0xed, 0x7d, + 0x41, 0x08, 0x92, 0xc7, 0x2c, 0xb0, 0xd3, 0xc0, 0xa6, 0x8d, 0x2a, 0x90, 0x0e, 0xc9, 0xd0, 0xe7, + 0xc4, 0xb3, 0x81, 0x71, 0xab, 0x12, 0x15, 0x08, 0x95, 0xb8, 0x40, 0xa8, 0x54, 0x83, 0x21, 0x8e, + 0x41, 0xe5, 0x67, 0x70, 0x7b, 0xe6, 0xf1, 0xa2, 0x4d, 0xc8, 0x8f, 0x02, 0x6e, 0xbc, 0xd6, 0x9b, + 0x17, 0xe7, 0x6b, 0xb9, 0x51, 0x64, 0x36, 0xeb, 0x38, 0x37, 0x02, 0x35, 0xbd, 0xf2, 0xe9, 0x12, + 0x2c, 0x4d, 0x85, 0x2d, 0xba, 0x05, 0x8b, 0xac, 0x4f, 0xba, 0xd4, 0xce, 0x31, 0xea, 0xa0, 0x06, + 0xa4, 0x7c, 0x72, 0x48, 0x7d, 0x1d, 0xbc, 0xfa, 0xe0, 0x7e, 0x78, 0x6d, 0xfc, 0x57, 0x9e, 0x1b, + 0x7c, 0x23, 0x50, 0x62, 0x88, 0x2d, 0x19, 0x39, 0x90, 0x76, 0x79, 0xbf, 0x4f, 0x02, 0x7d, 0x4d, + 0x2c, 0xac, 0x67, 0x71, 0xdc, 0xd5, 0x3b, 0x43, 0x44, 0x57, 0x3a, 0x49, 0x63, 0x36, 0x6d, 0x54, + 0x80, 0x05, 0x1a, 0x9c, 0x38, 0x8b, 0xc6, 0xa4, 0x9b, 0xda, 0xe2, 0xb1, 0x28, 0xfa, 0xb2, 0x58, + 0x37, 0x35, 0x6f, 0x20, 0xa9, 0x70, 0xd2, 0xd1, 0x8e, 0xea, 0x36, 0xfa, 0x09, 0xa4, 0xfa, 0x7c, + 0x10, 0x28, 0xe9, 0x64, 0xcc, 0x64, 0x57, 0x66, 0x4d, 0xf6, 0x85, 0x46, 0x58, 0x65, 0x59, 0x38, + 0x6a, 0xc0, 0xb2, 0x54, 0x3c, 0xec, 0x74, 0x05, 0x71, 0x69, 0x27, 0xa4, 0x82, 0x71, 0xcf, 0xa6, + 0xe1, 0x95, 0xb7, 0x0e, 0xa5, 0x6e, 0x0b, 0x3e, 0x7c, 0x53, 0x73, 0xb6, 0x35, 0xa5, 0x65, 0x18, + 0xa8, 0x05, 0xf9, 0x70, 0xe0, 0xfb, 0x1d, 0x1e, 0x46, 0x37, 0x72, 0x14, 0x3b, 0xef, 0xb1, 0x65, + 0xad, 0x81, 0xef, 0xef, 0x45, 0x24, 0x9c, 0x0b, 0xc7, 0x1d, 0x74, 0x07, 0x52, 0x5d, 0xc1, 0x07, + 0x61, 0x14, 0x37, 0x59, 0x6c, 0x7b, 0xe8, 0x1b, 0x48, 0x4b, 0xea, 0x0a, 0xaa, 0xa4, 0x93, 0x37, + 0x4b, 0xfd, 0x64, 0xd6, 0x20, 0x6d, 0x03, 0x19, 0xc5, 0x04, 0x8e, 0x39, 0x68, 0x05, 0x16, 0x94, + 0x1a, 0x3a, 0x4b, 0xa5, 0xc4, 0x7a, 0xa6, 0x96, 0xbe, 0x38, 0x5f, 0x5b, 0xd8, 0xdf, 0x7f, 0x85, + 0xb5, 0x4d, 0xdf, 0x16, 0x3d, 0x2e, 0x55, 0x40, 0xfa, 0xd4, 0xb9, 0x61, 0xf6, 0x76, 0xd4, 0x47, + 0xaf, 0x00, 0xbc, 0x40, 0x76, 0x5c, 0x93, 0x9e, 0x9c, 0x9b, 0x66, 0x75, 0x5f, 0x5c, 0xbf, 0xba, + 0xfa, 0x6e, 0xdb, 0xde, 0x98, 0x4b, 0x17, 0xe7, 0x6b, 0xd9, 0x51, 0x17, 0x67, 0xbd, 0x40, 0x46, + 0x4d, 0x54, 0x83, 0x5c, 0x8f, 0x12, 0x5f, 0xf5, 0xdc, 0x1e, 0x75, 0x8f, 0x9d, 0xc2, 0xd5, 0x57, + 0xe0, 0x8e, 0x81, 0x59, 0x0f, 0x93, 0x24, 0xad, 0x60, 0x3d, 0x55, 0xe9, 0x2c, 0x9b, 0xbd, 0x8a, + 0x3a, 0xe8, 0x3e, 0x00, 0x0f, 0x69, 0xd0, 0x91, 0xca, 0x63, 0x81, 0x83, 0xf4, 0x92, 0x71, 0x56, + 0x5b, 0xda, 0xda, 0x80, 0xee, 0xea, 0x0b, 0x8a, 0x78, 0x1d, 0x1e, 0xf8, 0x43, 0xe7, 0x23, 0xf3, + 0x35, 0xa3, 0x0d, 0x7b, 0x81, 0x3f, 0x44, 0x6b, 0x90, 0x33, 0xba, 0x90, 0xac, 0x1b, 0x10, 0xdf, + 0xb9, 0x65, 0xf6, 0x03, 0xb4, 0xa9, 0x6d, 0x2c, 0xfa, 0x1c, 0xa2, 0xdd, 0x90, 0xce, 0xed, 0xab, + 0xcf, 0xc1, 0x4e, 0x76, 0x7c, 0x0e, 0x96, 0x83, 0x7e, 0x06, 0x10, 0x0a, 0x76, 0xc2, 0x7c, 0xda, + 0xa5, 0xd2, 0xb9, 0x63, 0x16, 0xbd, 0x3a, 0xf3, 0x66, 0x1a, 0xa1, 0xf0, 0x04, 0x03, 0x55, 0x20, + 0xc9, 0x02, 0xa6, 0x9c, 0x8f, 0xed, 0xad, 0x74, 0x59, 0xaa, 0x35, 0xce, 0xfd, 0x03, 0xe2, 0x0f, + 0x28, 0x36, 0x38, 0xd4, 0x84, 0x2c, 0x93, 0xdc, 0x37, 0xf2, 0x75, 0x1c, 0x93, 0xdf, 0xde, 0xe3, + 0xfc, 0x9a, 0x31, 0x05, 0x8f, 0xd9, 0xe8, 0x1e, 0x64, 0x43, 0xe6, 0xc9, 0xe7, 0xac, 0xcf, 0x94, + 0xb3, 0x52, 0x4a, 0xac, 0x2f, 0xe0, 0xb1, 0x01, 0xed, 0x40, 0x5a, 0x0e, 0xa5, 0xab, 0x7c, 0xe9, + 0x14, 0xcd, 0xbe, 0x54, 0xae, 0x1f, 0xa6, 0x1d, 0x11, 0xa2, 0xc4, 0x11, 0xd3, 0x51, 0x19, 0xf2, + 0x2e, 0x09, 0xa3, 0x6a, 0x98, 0x51, 0xe9, 0xdc, 0x35, 0x67, 0x3b, 0x65, 0x2b, 0x7e, 0x0d, 0xb9, + 0x89, 0xa4, 0xa3, 0x93, 0xc5, 0x31, 0x1d, 0xda, 0x3c, 0xa6, 0x9b, 0x5a, 0x19, 0x27, 0x7a, 0x1b, + 0x4c, 0xa2, 0xcd, 0xe2, 0xa8, 0xf3, 0x64, 0xfe, 0x71, 0xa2, 0xb8, 0x09, 0xb9, 0x89, 0xe0, 0x43, + 0x9f, 0xe8, 0x4b, 0xa0, 0xcb, 0xa4, 0x12, 0xc3, 0x0e, 0x19, 0xa8, 0x9e, 0xf3, 0x0b, 0x43, 0xc8, + 0xc7, 0xc6, 0xea, 0x40, 0xf5, 0x8a, 0x1d, 0x18, 0x6b, 0x18, 0x95, 0x20, 0xa7, 0x63, 0x43, 0x52, + 0x71, 0x42, 0x85, 0x2e, 0xb0, 0xf4, 0xf4, 0x26, 0x4d, 0x3a, 0x86, 0x25, 0x25, 0xc2, 0xed, 0x99, + 0x14, 0x9a, 0xc5, 0xb6, 0xa7, 0x73, 0x62, 0x9c, 0x28, 0x6c, 0x4e, 0xb4, 0xdd, 0xe2, 0x13, 0xc8, + 0x4f, 0x6e, 0xc6, 0x7f, 0xb3, 0xa0, 0xf2, 0x5f, 0x12, 0x90, 0x1d, 0x1d, 0x18, 0xfa, 0x12, 0x96, + 0x9b, 0xed, 0xbd, 0xe7, 0xd5, 0xfd, 0xe6, 0xde, 0x6e, 0xa7, 0xde, 0xf8, 0xb6, 0xfa, 0xf2, 0xf9, + 0x7e, 0x61, 0xae, 0x78, 0xff, 0xf4, 0xac, 0xb4, 0x32, 0xbe, 0x1b, 0x62, 0x78, 0x9d, 0x1e, 0x91, + 0x81, 0xaf, 0xa6, 0x59, 0x2d, 0xbc, 0xb7, 0xd5, 0x68, 0xb7, 0x0b, 0x89, 0xab, 0x58, 0x2d, 0xc1, + 0x5d, 0x2a, 0x25, 0xda, 0x84, 0xc2, 0x98, 0xb5, 0xf3, 0xaa, 0xd5, 0xc0, 0x07, 0x85, 0xf9, 0xe2, + 0xbd, 0xd3, 0xb3, 0x92, 0xf3, 0x36, 0x69, 0x67, 0x18, 0x52, 0x71, 0x60, 0x1f, 0x36, 0xff, 0x4c, + 0x40, 0x7e, 0xb2, 0x2e, 0x46, 0x5b, 0x51, 0x3d, 0x6b, 0x56, 0x7c, 0x63, 0x73, 0xe3, 0xba, 0x3a, + 0xda, 0xdc, 0xc7, 0xfe, 0x40, 0xfb, 0x7d, 0xa1, 0x9f, 0xb0, 0x86, 0x8c, 0xbe, 0x84, 0xc5, 0x90, + 0x0b, 0x15, 0xdf, 0x5c, 0xb3, 0xe3, 0x8a, 0x8b, 0xb8, 0xda, 0x8a, 0xc0, 0xe5, 0x1e, 0xdc, 0x98, + 0xf6, 0x86, 0x1e, 0xc2, 0xc2, 0x41, 0xb3, 0x55, 0x98, 0x2b, 0xde, 0x3d, 0x3d, 0x2b, 0x7d, 0x3c, + 0xfd, 0xf1, 0x80, 0x09, 0x35, 0x20, 0x7e, 0xb3, 0x85, 0x3e, 0x87, 0xc5, 0xfa, 0x6e, 0x1b, 0xe3, + 0x42, 0xa2, 0xb8, 0x76, 0x7a, 0x56, 0xba, 0x3b, 0x8d, 0xd3, 0x9f, 0xf8, 0x20, 0xf0, 0x30, 0x3f, + 0x1c, 0x3d, 0xe7, 0xfe, 0x35, 0x0f, 0x39, 0x7b, 0xa1, 0x7f, 0xe8, 0x17, 0xff, 0x52, 0x54, 0xad, + 0xc6, 0x99, 0x7a, 0xfe, 0xda, 0xa2, 0x35, 0x1f, 0x11, 0xac, 0xa6, 0x1f, 0x40, 0x9e, 0x85, 0x27, + 0x5f, 0x75, 0x68, 0x40, 0x0e, 0x7d, 0xfb, 0xb2, 0xcb, 0xe0, 0x9c, 0xb6, 0x35, 0x22, 0x93, 0xbe, + 0x26, 0x58, 0xa0, 0xa8, 0x08, 0xec, 0x9b, 0x2d, 0x83, 0x47, 0x7d, 0xf4, 0x0d, 0x24, 0x59, 0x48, + 0xfa, 0xb6, 0xd2, 0x9e, 0xb9, 0x82, 0x66, 0xab, 0xfa, 0xc2, 0xc6, 0x5c, 0x2d, 0x73, 0x71, 0xbe, + 0x96, 0xd4, 0x06, 0x6c, 0x68, 0x68, 0x35, 0x2e, 0x76, 0xf5, 0x48, 0xe6, 0xca, 0xcf, 0xe0, 0x09, + 0x8b, 0x8e, 0x1b, 0x16, 0x74, 0x05, 0x95, 0xd2, 0x5c, 0xfe, 0x19, 0x1c, 0x77, 0x51, 0x11, 0xd2, + 0xb6, 0x64, 0x36, 0x35, 0x72, 0x56, 0x97, 0xa3, 0xd6, 0x50, 0x5b, 0x82, 0x5c, 0xb4, 0x1b, 0x9d, + 0x23, 0xc1, 0xfb, 0xe5, 0x7f, 0x27, 0x21, 0xb7, 0xe5, 0x0f, 0xa4, 0xb2, 0xd5, 0xcf, 0x07, 0xdb, + 0xfc, 0x57, 0xb0, 0x4c, 0xcc, 0x1f, 0x04, 0x12, 0xe8, 0x52, 0xc2, 0xbc, 0x44, 0xec, 0x01, 0x3c, + 0x9c, 0xe9, 0x6e, 0x04, 0x8e, 0x5e, 0x2d, 0xb5, 0x94, 0xf6, 0xe9, 0x24, 0x70, 0x81, 0x5c, 0xfa, + 0x82, 0xda, 0xb0, 0xc4, 0x85, 0xdb, 0xa3, 0x52, 0x45, 0x05, 0x88, 0x7d, 0x71, 0xcf, 0xfc, 0x17, + 0xb3, 0x37, 0x09, 0xb4, 0xb7, 0x6f, 0x34, 0xdb, 0x69, 0x1f, 0xe8, 0x31, 0x24, 0x05, 0x39, 0x8a, + 0x5f, 0x55, 0x33, 0x83, 0x04, 0x93, 0x23, 0x35, 0xe5, 0xc2, 0x30, 0xd0, 0x2f, 0x01, 0x3c, 0x26, + 0x43, 0xa2, 0xdc, 0x1e, 0x15, 0xf6, 0xb0, 0x67, 0x2e, 0xb1, 0x3e, 0x42, 0x4d, 0x79, 0x99, 0x60, + 0xa3, 0x67, 0x90, 0x75, 0x49, 0x2c, 0xd7, 0xd4, 0xd5, 0xbf, 0x21, 0xb6, 0xaa, 0xd6, 0x45, 0x41, + 0xbb, 0xb8, 0x38, 0x5f, 0xcb, 0xc4, 0x16, 0x9c, 0x71, 0x89, 0x95, 0xef, 0x33, 0x58, 0x52, 0x44, + 0x1e, 0x77, 0xbc, 0x28, 0x9d, 0x45, 0x32, 0xb9, 0xa2, 0x9a, 0xd0, 0x6f, 0x5d, 0x9b, 0xf6, 0xe2, + 0xe3, 0xcc, 0xab, 0x09, 0x1b, 0xfa, 0x15, 0x2c, 0xd3, 0xc0, 0x15, 0x43, 0x23, 0xd6, 0x78, 0x86, + 0x99, 0xab, 0x17, 0xdb, 0x18, 0x81, 0xa7, 0x16, 0x5b, 0xa0, 0x97, 0xec, 0xe5, 0xbf, 0x27, 0x00, + 0xa2, 0x02, 0xed, 0xc3, 0x0a, 0x10, 0x41, 0xd2, 0x23, 0x8a, 0x18, 0xcd, 0xe5, 0xb1, 0x69, 0xa3, + 0x27, 0x00, 0x8a, 0xf6, 0x43, 0x9d, 0x7a, 0x83, 0xae, 0x95, 0xcd, 0xbb, 0xd2, 0xc1, 0x04, 0x1a, + 0x6d, 0x42, 0xca, 0xbe, 0x7d, 0x93, 0xd7, 0xf2, 0x2c, 0xb2, 0xfc, 0xa7, 0x04, 0x40, 0xb4, 0xcc, + 0xff, 0xeb, 0xb5, 0xd5, 0x9c, 0x37, 0xdf, 0xaf, 0xce, 0xfd, 0xed, 0xfb, 0xd5, 0xb9, 0xdf, 0x5d, + 0xac, 0x26, 0xde, 0x5c, 0xac, 0x26, 0xfe, 0x7a, 0xb1, 0x9a, 0xf8, 0xc7, 0xc5, 0x6a, 0xe2, 0x30, + 0x65, 0x6a, 0xa8, 0x1f, 0xff, 0x27, 0x00, 0x00, 0xff, 0xff, 0x9c, 0x13, 0x42, 0x73, 0x71, 0x16, + 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/specs.proto b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/specs.proto new file mode 100644 index 00000000..6858a30b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/specs.proto @@ -0,0 +1,474 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/types.proto"; +import "gogoproto/gogo.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/any.proto"; +import "google/protobuf/wrappers.proto"; + +// Specs are container objects for user provided input. All creations and +// updates are done through spec types. As a convention, user input from a spec +// is never touched in created objects. This allows one to verify that the +// users intent has not been modified. +// +// Put differently, spec types can be said to represent the desired state of +// the system. In situations where modifications need to be made to a +// particular component, API objects will either contain a copy of the spec +// component or a different representation to reflect allocation or resolution. + +message NodeSpec { + Annotations annotations = 1 [(gogoproto.nullable) = false]; + + enum Membership { + option (gogoproto.goproto_enum_prefix) = false; + + PENDING = 0 [(gogoproto.enumvalue_customname) = "NodeMembershipPending"]; + ACCEPTED = 1 [(gogoproto.enumvalue_customname) = "NodeMembershipAccepted"]; + } + + enum Availability { + option (gogoproto.goproto_enum_prefix) = false; + + // Active nodes. + ACTIVE = 0 [(gogoproto.enumvalue_customname) = "NodeAvailabilityActive"]; + + // Paused nodes won't be considered by the scheduler, preventing any + // further task to run on them. + PAUSE = 1 [(gogoproto.enumvalue_customname) = "NodeAvailabilityPause"]; + + // Drained nodes are paused and any task already running on them will + // be evicted. + DRAIN = 2 [(gogoproto.enumvalue_customname) = "NodeAvailabilityDrain"]; + } + + // DesiredRole defines the role the node should have. + NodeRole desired_role = 2; + + // Membership controls the admission of the node into the cluster. + Membership membership = 3; + + // Availability allows a user to control the current scheduling status of a + // node. + Availability availability = 4; +} + +// ServiceSpec defines the properties of a service. +// +// A service instructs the cluster in orchestrating repeated instances of a +// template, implemented as tasks. Based on the number of instances, scheduling +// strategy and restart policy, a number of application-level behaviors can be +// defined. +message ServiceSpec { + Annotations annotations = 1 [(gogoproto.nullable) = false]; + + // Task defines the task template this service will spawn. + TaskSpec task = 2 [(gogoproto.nullable) = false]; + + oneof mode { + ReplicatedService replicated = 3; + GlobalService global = 4; + } + + // Update contains settings which affect updates. + UpdateConfig update = 6; + + // Rollback contains settings which affect rollbacks of updates. + UpdateConfig rollback = 9; + + // ServiceSpec.Networks has been deprecated and is replaced by + // Networks field in Task (TaskSpec.Networks). + // This field (ServiceSpec.Networks) is kept for compatibility. + // In case TaskSpec.Networks does not exist, ServiceSpec.Networks + // is still honored if it exists. + repeated NetworkAttachmentConfig networks = 7 [deprecated=true]; + + // Service endpoint specifies the user provided configuration + // to properly discover and load balance a service. + EndpointSpec endpoint = 8; +} + +// ReplicatedService sets the reconciliation target to certain number of replicas. +message ReplicatedService { + uint64 replicas = 1; +} + +// GlobalService represents global service. +message GlobalService { + // Empty message for now. +} + +message TaskSpec { + oneof runtime { + NetworkAttachmentSpec attachment = 8; + ContainerSpec container = 1; + GenericRuntimeSpec generic = 10; + } + + // Resource requirements for the container. + ResourceRequirements resources = 2; + + // RestartPolicy specifies what to do when a task fails or finishes. + RestartPolicy restart = 4; + + // Placement specifies node selection constraints + Placement placement = 5; + + // LogDriver specifies the log driver to use for the task. Any runtime will + // direct logs into the specified driver for the duration of the task. + Driver log_driver = 6; + + // Networks specifies the list of network attachment + // configurations (which specify the network and per-network + // aliases) that this task spec is bound to. + repeated NetworkAttachmentConfig networks = 7; + + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. We do this to allow forced restarts + // using the same reconciliation-based mechanism that performs rolling + // updates. + uint64 force_update = 9; + + // ResourceReferences provides a generic way to specify resources that + // are used by this task, and should be sent down to agents along with + // the task. Inside the runtime field there may be more specific + // information about how to use the resource, but ResourceReferences + // establishes the relationship at the store level, and instructs the + // dispatcher to send the related objects. + // + // ResourceReferences is a list of ResourceReferences used by the task. + repeated ResourceReference resource_references = 11 [(gogoproto.nullable) = false]; +} + +message ResourceReference { + string resource_id = 1; + ResourceType resource_type = 2; +} + +message GenericRuntimeSpec { + string kind = 1; + google.protobuf.Any payload = 2; +} + +// NetworkAttachmentSpec specifies runtime parameters required to attach +// a container to a network. +message NetworkAttachmentSpec { + // ContainerID specifies a unique ID of the container for which + // this attachment is for. + string container_id = 1; +} + + +// Container specifies runtime parameters for a container. +message ContainerSpec { + // image defines the image reference, as specified in the + // distribution/reference package. This may include a registry host, name, + // tag or digest. + // + // The field will be directly passed to the engine pulling. Well-behaved + // service definitions will used immutable references, either through tags + // that don't change or verifiable digests. + string image = 1; + + // Labels defines labels to be added to the container at creation time. If + // collisions with system labels occur, these labels will be overridden. + // + // This field *must* remain compatible with the Labels field of + // Annotations. + map labels = 2; + + // Command to run the the container. The first element is a path to the + // executable and the following elements are treated as arguments. + // + // If command is empty, execution will fall back to the image's entrypoint. + // + // Command should only be used when overriding entrypoint. + repeated string command = 3; + + // Args specifies arguments provided to the image's entrypoint. + // + // If Command and Args are provided, Args will be appended to Command. + repeated string args = 4; + + // Hostname specifies the hostname that will be set on containers created by docker swarm. + // All containers for a given service will have the same hostname + string hostname = 14; + + // Env specifies the environment variables for the container in NAME=VALUE + // format. These must be compliant with [IEEE Std + // 1003.1-2001](http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html). + repeated string env = 5; + + // Dir defines the working directory to set for the container process. + string dir = 6; + + // User specifies the user that should be employed to run the container. + // + // Note that the primary group may be specified by appending the group name + // or id to the user name, separated by a `:`. This syntax is + // `:`. + string user = 7; + + // Groups specifies supplementary groups available to the user. + repeated string groups = 11; + + // Privileges specifies security configuration/permissions. + Privileges privileges = 22; + + // Init declares that a custom init will be running inside the container, if null, use the daemon's configured settings + google.protobuf.BoolValue init = 23; + + // TTY declares that a TTY should be attached to the standard streams, + // including stdin if it is still open. + bool tty = 13 [(gogoproto.customname) = "TTY"]; + + // OpenStdin declares that the standard input (stdin) should be open. + bool open_stdin = 18; + + // ReadOnly declares that the container root filesystem is read-only. + // This only impacts the root filesystem, not additional mounts (including + // tmpfs). For additional mounts that are not part of the initial rootfs, + // they will be decided by the modes passed in the mount definition. + bool read_only = 19; + + // StopSignal defines the signal to stop the container. + string stop_signal = 20; + + repeated Mount mounts = 8 [(gogoproto.nullable) = false]; + + // StopGracePeriod the grace period for stopping the container before + // forcefully killing the container. + // Note: Can't use stdduration here because this needs to be nullable. + google.protobuf.Duration stop_grace_period = 9; + + // PullOptions allows one to parameterize an image pull. + message PullOptions { + // RegistryAuth is the registry auth token obtained from the client, required + // to pull private images. This is the unmodified JSON used as part of + // the `X-Registry-Auth` header. + // TODO(nishanttotla): This field will later be deprecated + string registry_auth = 64; + } + + // PullOptions parameterize the behavior of image pulls. + PullOptions pull_options = 10; + + // SecretReference contains references to zero or more secrets that + // will be exposed to the container. + repeated SecretReference secrets = 12; + + // ConfigReference contains references to zero or more configs that + // will be exposed to the container. + repeated ConfigReference configs = 21; + + // Hosts allow additional entries to be specified in /etc/hosts + // that associates IP addresses with hostnames. + // Detailed documentation is available in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + // + // The format of the Hosts in swarmkit follows the same as + // above. + // This is different from `docker run --add-host :` + // where format is `:` + repeated string hosts = 17; + + // DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) + // Detailed documentation is available in: + // http://man7.org/linux/man-pages/man5/resolv.conf.5.html + // TODO: domain is not supported yet + message DNSConfig { + // Nameservers specifies the IP addresses of the name servers + repeated string nameservers = 1; + + // Search specifies the search list for host-name lookup + repeated string search = 2; + + // Options allows certain internal resolver variables to be modified + repeated string options = 3; + } + + // DNSConfig allows one to specify DNS related configuration in resolv.conf + DNSConfig dns_config = 15 [(gogoproto.customname) = "DNSConfig"]; + + // Healthcheck describes how to check the container is healthy. If the + // container is considered unhealthy, it will be destroyed, its creating + // task will exit and a new task will be rescheduled elsewhere. A container + // is considered unhealthy after `Retries` number of consecutive failures. + HealthConfig healthcheck = 16; + + enum Isolation { + option (gogoproto.goproto_enum_prefix) = false; + + // ISOLATION_DEFAULT uses whatever default value from the container runtime + ISOLATION_DEFAULT = 0 [(gogoproto.enumvalue_customname) = "ContainerIsolationDefault"]; + + // ISOLATION_PROCESS forces windows container isolation + ISOLATION_PROCESS = 1 [(gogoproto.enumvalue_customname) = "ContainerIsolationProcess"]; + + // ISOLATION_HYPERV forces Hyper-V isolation + ISOLATION_HYPERV = 2 [(gogoproto.enumvalue_customname) = "ContainerIsolationHyperV"]; + } + + // Isolation defines the isolation level for windows containers (default, process, hyperv). + // Runtimes that don't support it ignore that field + Isolation isolation = 24; + + // PidsLimit prevents from OS resource damage by applications inside the container + // using fork bomb attack. + int64 pidsLimit = 25; + + // Sysctls sets namespaced kernel parameters (sysctls) in the container. This + // option is equivalent to passing --sysctl to docker run. + // + // Note that while options are subject to the same restrictions as arguments + // passed to the --sysctl flag on docker run, those options are not further + // validated to ensure that they are safe or sensible in a clustered + // environment. + // + // Additionally, sysctls are not validated for support in the underlying + // daemon. For information about supported options, refer to the + // documentation at: + // + // https://docs.docker.com/engine/reference/commandline/run/#configure-namespaced-kernel-parameters-sysctls-at-runtime + map sysctls = 26; + + // Capabilities is the list of Linux capabilities to be available for container (this overrides the default set of capabilities) + repeated string capabilities = 27; +} + +// EndpointSpec defines the properties that can be configured to +// access and loadbalance the service. +message EndpointSpec { + // ResolutionMode specifies the mode of resolution to use for + // internal loadbalancing between tasks which are all within + // the cluster. This is sometimes calls east-west data path. + enum ResolutionMode { + option (gogoproto.goproto_enum_prefix) = false; + + // VIP resolution mode specifies that the + // service resolves to a logical IP and the requests + // are sent to that logical IP. Packets hitting that + // logical IP are load balanced to a chosen backend. + VIP = 0 [(gogoproto.enumvalue_customname) = "ResolutionModeVirtualIP"]; + + // DNSRR resolution mode specifies that the + // service directly gets resolved to one of the + // backend IP and the client directly initiates a + // request towards the actual backend. This requires + // that the client does not cache the DNS responses + // when the DNS response TTL is 0. + DNSRR = 1 [(gogoproto.enumvalue_customname) = "ResolutionModeDNSRoundRobin"]; + } + + ResolutionMode mode = 1; + + // List of exposed ports that this service is accessible from + // external to the cluster. + repeated PortConfig ports = 2; +} + +// NetworkSpec specifies user defined network parameters. +message NetworkSpec { + Annotations annotations = 1 [(gogoproto.nullable) = false]; + + // DriverConfig specific configuration consumed by the network driver. + Driver driver_config = 2; + + // IPv6Enabled enables support for IPv6 on the network. + bool ipv6_enabled = 3; + + // internal restricts external access to the network. This may be + // accomplished by disabling the default gateway or through other means. + bool internal = 4; + + IPAMOptions ipam = 5 [(gogoproto.customname) = "IPAM"]; + + // Attachable allows external(to swarm) entities to manually + // attach to this network. With this flag enabled, external + // entities such as containers running in an worker node in + // the cluster can manually attach to this network and access + // the services attached to this network. If this flag is not + // enabled(default case) no manual attachment to this network + // can happen. + bool attachable = 6; + + // Ingress indicates this network will provide the routing-mesh. + // In older versions, the network providing the routing mesh was + // swarm internally created only and it was identified by the name + // "ingress" and the label "com.docker.swarm.internal": "true". + bool ingress = 7; + + // ConfigFrom is the source of the configuration for this network. + oneof config_from { + // Network is the name of a network that provides the network + // specific configuration for this network, locally on the node + // where this network is being plumbed. + string network = 8; + } + +} + +// ClusterSpec specifies global cluster settings. +message ClusterSpec { + Annotations annotations = 1 [(gogoproto.nullable) = false]; + + // DEPRECATED: AcceptancePolicy defines the certificate issuance policy. + // Acceptance policy is no longer customizable, and secrets have been + // replaced with join tokens. + AcceptancePolicy acceptance_policy = 2 [deprecated=true, (gogoproto.nullable) = false]; + + // Orchestration defines cluster-level orchestration settings. + OrchestrationConfig orchestration = 3 [(gogoproto.nullable) = false]; + + // Raft defines the cluster's raft settings. + RaftConfig raft = 4 [(gogoproto.nullable) = false]; + + // Dispatcher defines cluster-level dispatcher settings. + DispatcherConfig dispatcher = 5 [(gogoproto.nullable) = false]; + + // CAConfig defines cluster-level certificate authority settings. + CAConfig ca_config = 6 [(gogoproto.nullable) = false, (gogoproto.customname) = "CAConfig"]; + + // TaskDefaults specifies the default values to use for task creation. + TaskDefaults task_defaults = 7 [(gogoproto.nullable) = false]; + + // EncryptionConfig defines the cluster's encryption settings. + EncryptionConfig encryption_config = 8 [(gogoproto.nullable) = false]; +} + +// SecretSpec specifies a user-provided secret. +message SecretSpec { + Annotations annotations = 1 [(gogoproto.nullable) = false]; + + // Data is the secret payload - the maximum size is 500KB (that is, 500*1024 bytes) + bytes data = 2; + + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + // + // The currently recognized values are: + // - golang: Go templating + Driver templating = 3; + + // Driver is the the secret driver that is used to store the specified secret + Driver driver = 4; +} + +// ConfigSpec specifies user-provided configuration files. +message ConfigSpec { + Annotations annotations = 1 [(gogoproto.nullable) = false]; + + // Data is the config payload - the maximum size is 500KB (that is, 500*1024 bytes) + // TODO(aaronl): Do we want to revise this to include multiple payloads in a single + // ConfigSpec? Define this to be a tar? etc... + bytes data = 2; + + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + // + // The currently recognized values are: + // - golang: Go templating + Driver templating = 3; +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/storeobject.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/storeobject.go new file mode 100644 index 00000000..d140fa3e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/storeobject.go @@ -0,0 +1,123 @@ +package api + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/go-events" +) + +var ( + errUnknownStoreAction = errors.New("unrecognized action type") + errConflictingFilters = errors.New("conflicting filters specified") + errNoKindSpecified = errors.New("no kind of object specified") + errUnrecognizedAction = errors.New("unrecognized action") +) + +// StoreObject is an abstract object that can be handled by the store. +type StoreObject interface { + GetID() string // Get ID + GetMeta() Meta // Retrieve metadata + SetMeta(Meta) // Set metadata + CopyStoreObject() StoreObject // Return a copy of this object + EventCreate() Event // Return a creation event + EventUpdate(oldObject StoreObject) Event // Return an update event + EventDelete() Event // Return a deletion event +} + +// Event is the type used for events passed over watcher channels, and also +// the type used to specify filtering in calls to Watch. +type Event interface { + // TODO(stevvooe): Consider whether it makes sense to squish both the + // matcher type and the primary type into the same type. It might be better + // to build a matcher from an event prototype. + + // Matches checks if this item in a watch queue Matches the event + // description. + Matches(events.Event) bool +} + +// EventCreate is an interface implemented by every creation event type +type EventCreate interface { + IsEventCreate() bool +} + +// EventUpdate is an interface impelemented by every update event type +type EventUpdate interface { + IsEventUpdate() bool +} + +// EventDelete is an interface implemented by every delete event type +type EventDelete interface { + IsEventDelete() +} + +func customIndexer(kind string, annotations *Annotations) (bool, [][]byte, error) { + var converted [][]byte + + for _, entry := range annotations.Indices { + index := make([]byte, 0, len(kind)+1+len(entry.Key)+1+len(entry.Val)+1) + if kind != "" { + index = append(index, []byte(kind)...) + index = append(index, '|') + } + index = append(index, []byte(entry.Key)...) + index = append(index, '|') + index = append(index, []byte(entry.Val)...) + index = append(index, '\x00') + converted = append(converted, index) + } + + // Add the null character as a terminator + return len(converted) != 0, converted, nil +} + +func fromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +func prefixFromArgs(args ...interface{}) ([]byte, error) { + val, err := fromArgs(args...) + if err != nil { + return nil, err + } + + // Strip the null terminator, the rest is a prefix + n := len(val) + if n > 0 { + return val[:n-1], nil + } + return val, nil +} + +func checkCustom(a1, a2 Annotations) bool { + if len(a1.Indices) == 1 { + for _, ind := range a2.Indices { + if ind.Key == a1.Indices[0].Key && ind.Val == a1.Indices[0].Val { + return true + } + } + } + return false +} + +func checkCustomPrefix(a1, a2 Annotations) bool { + if len(a1.Indices) == 1 { + for _, ind := range a2.Indices { + if ind.Key == a1.Indices[0].Key && strings.HasPrefix(ind.Val, a1.Indices[0].Val) { + return true + } + } + } + return false +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/types.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/types.pb.go new file mode 100644 index 00000000..822a69cb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/types.pb.go @@ -0,0 +1,17932 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/types.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/gogo/protobuf/types" +import google_protobuf1 "github.com/gogo/protobuf/types" +import google_protobuf2 "github.com/gogo/protobuf/types" +import _ "github.com/gogo/protobuf/gogoproto" + +import os "os" +import time "time" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import binary "encoding/binary" +import types "github.com/gogo/protobuf/types" + +import strings "strings" +import reflect "reflect" +import sortkeys "github.com/gogo/protobuf/sortkeys" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +type ResourceType int32 + +const ( + ResourceType_TASK ResourceType = 0 + ResourceType_SECRET ResourceType = 1 + ResourceType_CONFIG ResourceType = 2 +) + +var ResourceType_name = map[int32]string{ + 0: "TASK", + 1: "SECRET", + 2: "CONFIG", +} +var ResourceType_value = map[string]int32{ + "TASK": 0, + "SECRET": 1, + "CONFIG": 2, +} + +func (x ResourceType) String() string { + return proto.EnumName(ResourceType_name, int32(x)) +} +func (ResourceType) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} } + +// Only the manager create a NEW task, and move the task to PENDING and ASSIGNED. +// Afterward, the manager must rely on the agent to update the task status +// (pre-run: preparing, ready, starting; +// running; +// end-state: complete, shutdown, failed, rejected) +type TaskState int32 + +const ( + TaskStateNew TaskState = 0 + TaskStatePending TaskState = 64 + TaskStateAssigned TaskState = 192 + TaskStateAccepted TaskState = 256 + TaskStatePreparing TaskState = 320 + TaskStateReady TaskState = 384 + TaskStateStarting TaskState = 448 + TaskStateRunning TaskState = 512 + TaskStateCompleted TaskState = 576 + TaskStateShutdown TaskState = 640 + TaskStateFailed TaskState = 704 + // TaskStateRejected means a task never ran, for instance if something about + // the environment failed (e.g. setting up a port on that node failed). + TaskStateRejected TaskState = 768 + // TaskStateRemove is used to correctly handle service deletions and scale + // downs. This allows us to keep track of tasks that have been marked for + // deletion, but can't yet be removed because the agent is in the process of + // shutting them down. Once the agent has shut down tasks with desired state + // REMOVE, the task reaper is responsible for removing them. + TaskStateRemove TaskState = 800 + // TaskStateOrphaned is used to free up resources associated with service + // tasks on unresponsive nodes without having to delete those tasks. This + // state is directly assigned to the task by the orchestrator. + TaskStateOrphaned TaskState = 832 +) + +var TaskState_name = map[int32]string{ + 0: "NEW", + 64: "PENDING", + 192: "ASSIGNED", + 256: "ACCEPTED", + 320: "PREPARING", + 384: "READY", + 448: "STARTING", + 512: "RUNNING", + 576: "COMPLETE", + 640: "SHUTDOWN", + 704: "FAILED", + 768: "REJECTED", + 800: "REMOVE", + 832: "ORPHANED", +} +var TaskState_value = map[string]int32{ + "NEW": 0, + "PENDING": 64, + "ASSIGNED": 192, + "ACCEPTED": 256, + "PREPARING": 320, + "READY": 384, + "STARTING": 448, + "RUNNING": 512, + "COMPLETE": 576, + "SHUTDOWN": 640, + "FAILED": 704, + "REJECTED": 768, + "REMOVE": 800, + "ORPHANED": 832, +} + +func (x TaskState) String() string { + return proto.EnumName(TaskState_name, int32(x)) +} +func (TaskState) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} } + +type NodeRole int32 + +const ( + NodeRoleWorker NodeRole = 0 + NodeRoleManager NodeRole = 1 +) + +var NodeRole_name = map[int32]string{ + 0: "WORKER", + 1: "MANAGER", +} +var NodeRole_value = map[string]int32{ + "WORKER": 0, + "MANAGER": 1, +} + +func (x NodeRole) String() string { + return proto.EnumName(NodeRole_name, int32(x)) +} +func (NodeRole) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{2} } + +type RaftMemberStatus_Reachability int32 + +const ( + // Unknown indicates that the manager state cannot be resolved + RaftMemberStatus_UNKNOWN RaftMemberStatus_Reachability = 0 + // Unreachable indicates that the node cannot be contacted by other + // raft cluster members. + RaftMemberStatus_UNREACHABLE RaftMemberStatus_Reachability = 1 + // Reachable indicates that the node is healthy and reachable + // by other members. + RaftMemberStatus_REACHABLE RaftMemberStatus_Reachability = 2 +) + +var RaftMemberStatus_Reachability_name = map[int32]string{ + 0: "UNKNOWN", + 1: "UNREACHABLE", + 2: "REACHABLE", +} +var RaftMemberStatus_Reachability_value = map[string]int32{ + "UNKNOWN": 0, + "UNREACHABLE": 1, + "REACHABLE": 2, +} + +func (x RaftMemberStatus_Reachability) String() string { + return proto.EnumName(RaftMemberStatus_Reachability_name, int32(x)) +} +func (RaftMemberStatus_Reachability) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{13, 0} +} + +// TODO(aluzzardi) These should be using `gogoproto.enumvalue_customname`. +type NodeStatus_State int32 + +const ( + // Unknown indicates the node state cannot be resolved. + NodeStatus_UNKNOWN NodeStatus_State = 0 + // Down indicates the node is down. + NodeStatus_DOWN NodeStatus_State = 1 + // Ready indicates the node is ready to accept tasks. + NodeStatus_READY NodeStatus_State = 2 + // Disconnected indicates the node is currently trying to find new manager. + NodeStatus_DISCONNECTED NodeStatus_State = 3 +) + +var NodeStatus_State_name = map[int32]string{ + 0: "UNKNOWN", + 1: "DOWN", + 2: "READY", + 3: "DISCONNECTED", +} +var NodeStatus_State_value = map[string]int32{ + "UNKNOWN": 0, + "DOWN": 1, + "READY": 2, + "DISCONNECTED": 3, +} + +func (x NodeStatus_State) String() string { + return proto.EnumName(NodeStatus_State_name, int32(x)) +} +func (NodeStatus_State) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{14, 0} } + +type Mount_MountType int32 + +const ( + MountTypeBind Mount_MountType = 0 + MountTypeVolume Mount_MountType = 1 + MountTypeTmpfs Mount_MountType = 2 + MountTypeNamedPipe Mount_MountType = 3 +) + +var Mount_MountType_name = map[int32]string{ + 0: "BIND", + 1: "VOLUME", + 2: "TMPFS", + 3: "NPIPE", +} +var Mount_MountType_value = map[string]int32{ + "BIND": 0, + "VOLUME": 1, + "TMPFS": 2, + "NPIPE": 3, +} + +func (x Mount_MountType) String() string { + return proto.EnumName(Mount_MountType_name, int32(x)) +} +func (Mount_MountType) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16, 0} } + +// Consistency indicates the tolerable level of file system consistency +type Mount_MountConsistency int32 + +const ( + MountConsistencyDefault Mount_MountConsistency = 0 + MountConsistencyFull Mount_MountConsistency = 1 + MountConsistencyCached Mount_MountConsistency = 2 + MountConsistencyDelegated Mount_MountConsistency = 3 +) + +var Mount_MountConsistency_name = map[int32]string{ + 0: "DEFAULT", + 1: "CONSISTENT", + 2: "CACHED", + 3: "DELEGATED", +} +var Mount_MountConsistency_value = map[string]int32{ + "DEFAULT": 0, + "CONSISTENT": 1, + "CACHED": 2, + "DELEGATED": 3, +} + +func (x Mount_MountConsistency) String() string { + return proto.EnumName(Mount_MountConsistency_name, int32(x)) +} +func (Mount_MountConsistency) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{16, 1} +} + +type Mount_BindOptions_MountPropagation int32 + +const ( + MountPropagationRPrivate Mount_BindOptions_MountPropagation = 0 + MountPropagationPrivate Mount_BindOptions_MountPropagation = 1 + MountPropagationRShared Mount_BindOptions_MountPropagation = 2 + MountPropagationShared Mount_BindOptions_MountPropagation = 3 + MountPropagationRSlave Mount_BindOptions_MountPropagation = 4 + MountPropagationSlave Mount_BindOptions_MountPropagation = 5 +) + +var Mount_BindOptions_MountPropagation_name = map[int32]string{ + 0: "RPRIVATE", + 1: "PRIVATE", + 2: "RSHARED", + 3: "SHARED", + 4: "RSLAVE", + 5: "SLAVE", +} +var Mount_BindOptions_MountPropagation_value = map[string]int32{ + "RPRIVATE": 0, + "PRIVATE": 1, + "RSHARED": 2, + "SHARED": 3, + "RSLAVE": 4, + "SLAVE": 5, +} + +func (x Mount_BindOptions_MountPropagation) String() string { + return proto.EnumName(Mount_BindOptions_MountPropagation_name, int32(x)) +} +func (Mount_BindOptions_MountPropagation) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{16, 0, 0} +} + +type RestartPolicy_RestartCondition int32 + +const ( + RestartOnNone RestartPolicy_RestartCondition = 0 + RestartOnFailure RestartPolicy_RestartCondition = 1 + RestartOnAny RestartPolicy_RestartCondition = 2 +) + +var RestartPolicy_RestartCondition_name = map[int32]string{ + 0: "NONE", + 1: "ON_FAILURE", + 2: "ANY", +} +var RestartPolicy_RestartCondition_value = map[string]int32{ + "NONE": 0, + "ON_FAILURE": 1, + "ANY": 2, +} + +func (x RestartPolicy_RestartCondition) String() string { + return proto.EnumName(RestartPolicy_RestartCondition_name, int32(x)) +} +func (RestartPolicy_RestartCondition) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{17, 0} +} + +type UpdateConfig_FailureAction int32 + +const ( + UpdateConfig_PAUSE UpdateConfig_FailureAction = 0 + UpdateConfig_CONTINUE UpdateConfig_FailureAction = 1 + UpdateConfig_ROLLBACK UpdateConfig_FailureAction = 2 +) + +var UpdateConfig_FailureAction_name = map[int32]string{ + 0: "PAUSE", + 1: "CONTINUE", + 2: "ROLLBACK", +} +var UpdateConfig_FailureAction_value = map[string]int32{ + "PAUSE": 0, + "CONTINUE": 1, + "ROLLBACK": 2, +} + +func (x UpdateConfig_FailureAction) String() string { + return proto.EnumName(UpdateConfig_FailureAction_name, int32(x)) +} +func (UpdateConfig_FailureAction) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{18, 0} +} + +// UpdateOrder controls the order of operations when rolling out an +// updated task. Either the old task is shut down before the new task +// is started, or the new task is started before the old task is shut +// down. +type UpdateConfig_UpdateOrder int32 + +const ( + UpdateConfig_STOP_FIRST UpdateConfig_UpdateOrder = 0 + UpdateConfig_START_FIRST UpdateConfig_UpdateOrder = 1 +) + +var UpdateConfig_UpdateOrder_name = map[int32]string{ + 0: "STOP_FIRST", + 1: "START_FIRST", +} +var UpdateConfig_UpdateOrder_value = map[string]int32{ + "STOP_FIRST": 0, + "START_FIRST": 1, +} + +func (x UpdateConfig_UpdateOrder) String() string { + return proto.EnumName(UpdateConfig_UpdateOrder_name, int32(x)) +} +func (UpdateConfig_UpdateOrder) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{18, 1} +} + +type UpdateStatus_UpdateState int32 + +const ( + UpdateStatus_UNKNOWN UpdateStatus_UpdateState = 0 + UpdateStatus_UPDATING UpdateStatus_UpdateState = 1 + UpdateStatus_PAUSED UpdateStatus_UpdateState = 2 + UpdateStatus_COMPLETED UpdateStatus_UpdateState = 3 + UpdateStatus_ROLLBACK_STARTED UpdateStatus_UpdateState = 4 + UpdateStatus_ROLLBACK_PAUSED UpdateStatus_UpdateState = 5 + UpdateStatus_ROLLBACK_COMPLETED UpdateStatus_UpdateState = 6 +) + +var UpdateStatus_UpdateState_name = map[int32]string{ + 0: "UNKNOWN", + 1: "UPDATING", + 2: "PAUSED", + 3: "COMPLETED", + 4: "ROLLBACK_STARTED", + 5: "ROLLBACK_PAUSED", + 6: "ROLLBACK_COMPLETED", +} +var UpdateStatus_UpdateState_value = map[string]int32{ + "UNKNOWN": 0, + "UPDATING": 1, + "PAUSED": 2, + "COMPLETED": 3, + "ROLLBACK_STARTED": 4, + "ROLLBACK_PAUSED": 5, + "ROLLBACK_COMPLETED": 6, +} + +func (x UpdateStatus_UpdateState) String() string { + return proto.EnumName(UpdateStatus_UpdateState_name, int32(x)) +} +func (UpdateStatus_UpdateState) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{19, 0} +} + +// AddressFamily specifies the network address family that +// this IPAMConfig belongs to. +type IPAMConfig_AddressFamily int32 + +const ( + IPAMConfig_UNKNOWN IPAMConfig_AddressFamily = 0 + IPAMConfig_IPV4 IPAMConfig_AddressFamily = 4 + IPAMConfig_IPV6 IPAMConfig_AddressFamily = 6 +) + +var IPAMConfig_AddressFamily_name = map[int32]string{ + 0: "UNKNOWN", + 4: "IPV4", + 6: "IPV6", +} +var IPAMConfig_AddressFamily_value = map[string]int32{ + "UNKNOWN": 0, + "IPV4": 4, + "IPV6": 6, +} + +func (x IPAMConfig_AddressFamily) String() string { + return proto.EnumName(IPAMConfig_AddressFamily_name, int32(x)) +} +func (IPAMConfig_AddressFamily) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{24, 0} +} + +type PortConfig_Protocol int32 + +const ( + ProtocolTCP PortConfig_Protocol = 0 + ProtocolUDP PortConfig_Protocol = 1 + ProtocolSCTP PortConfig_Protocol = 2 +) + +var PortConfig_Protocol_name = map[int32]string{ + 0: "TCP", + 1: "UDP", + 2: "SCTP", +} +var PortConfig_Protocol_value = map[string]int32{ + "TCP": 0, + "UDP": 1, + "SCTP": 2, +} + +func (x PortConfig_Protocol) String() string { + return proto.EnumName(PortConfig_Protocol_name, int32(x)) +} +func (PortConfig_Protocol) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{25, 0} } + +// PublishMode controls how ports are published on the swarm. +type PortConfig_PublishMode int32 + +const ( + // PublishModeIngress exposes the port across the cluster on all nodes. + PublishModeIngress PortConfig_PublishMode = 0 + // PublishModeHost exposes the port on just the target host. If the + // published port is undefined, an ephemeral port will be allocated. If + // the published port is defined, the node will attempt to allocate it, + // erroring the task if it fails. + PublishModeHost PortConfig_PublishMode = 1 +) + +var PortConfig_PublishMode_name = map[int32]string{ + 0: "INGRESS", + 1: "HOST", +} +var PortConfig_PublishMode_value = map[string]int32{ + "INGRESS": 0, + "HOST": 1, +} + +func (x PortConfig_PublishMode) String() string { + return proto.EnumName(PortConfig_PublishMode_name, int32(x)) +} +func (PortConfig_PublishMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{25, 1} +} + +type IssuanceStatus_State int32 + +const ( + IssuanceStateUnknown IssuanceStatus_State = 0 + // A new certificate should be issued + IssuanceStateRenew IssuanceStatus_State = 1 + // Certificate is pending acceptance + IssuanceStatePending IssuanceStatus_State = 2 + // successful completion certificate issuance + IssuanceStateIssued IssuanceStatus_State = 3 + // Certificate issuance failed + IssuanceStateFailed IssuanceStatus_State = 4 + // Signals workers to renew their certificate. From the CA's perspective + // this is equivalent to IssuanceStateIssued: a noop. + IssuanceStateRotate IssuanceStatus_State = 5 +) + +var IssuanceStatus_State_name = map[int32]string{ + 0: "UNKNOWN", + 1: "RENEW", + 2: "PENDING", + 3: "ISSUED", + 4: "FAILED", + 5: "ROTATE", +} +var IssuanceStatus_State_value = map[string]int32{ + "UNKNOWN": 0, + "RENEW": 1, + "PENDING": 2, + "ISSUED": 3, + "FAILED": 4, + "ROTATE": 5, +} + +func (x IssuanceStatus_State) String() string { + return proto.EnumName(IssuanceStatus_State_name, int32(x)) +} +func (IssuanceStatus_State) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{30, 0} } + +type ExternalCA_CAProtocol int32 + +const ( + ExternalCA_CAProtocolCFSSL ExternalCA_CAProtocol = 0 +) + +var ExternalCA_CAProtocol_name = map[int32]string{ + 0: "CFSSL", +} +var ExternalCA_CAProtocol_value = map[string]int32{ + "CFSSL": 0, +} + +func (x ExternalCA_CAProtocol) String() string { + return proto.EnumName(ExternalCA_CAProtocol_name, int32(x)) +} +func (ExternalCA_CAProtocol) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{32, 0} +} + +// Encryption algorithm that can implemented using this key +type EncryptionKey_Algorithm int32 + +const ( + AES_128_GCM EncryptionKey_Algorithm = 0 +) + +var EncryptionKey_Algorithm_name = map[int32]string{ + 0: "AES_128_GCM", +} +var EncryptionKey_Algorithm_value = map[string]int32{ + "AES_128_GCM": 0, +} + +func (x EncryptionKey_Algorithm) String() string { + return proto.EnumName(EncryptionKey_Algorithm_name, int32(x)) +} +func (EncryptionKey_Algorithm) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{45, 0} +} + +type MaybeEncryptedRecord_Algorithm int32 + +const ( + MaybeEncryptedRecord_NotEncrypted MaybeEncryptedRecord_Algorithm = 0 + MaybeEncryptedRecord_NACLSecretboxSalsa20Poly1305 MaybeEncryptedRecord_Algorithm = 1 + MaybeEncryptedRecord_FernetAES128CBC MaybeEncryptedRecord_Algorithm = 2 +) + +var MaybeEncryptedRecord_Algorithm_name = map[int32]string{ + 0: "NONE", + 1: "SECRETBOX_SALSA20_POLY1305", + 2: "FERNET_AES_128_CBC", +} +var MaybeEncryptedRecord_Algorithm_value = map[string]int32{ + "NONE": 0, + "SECRETBOX_SALSA20_POLY1305": 1, + "FERNET_AES_128_CBC": 2, +} + +func (x MaybeEncryptedRecord_Algorithm) String() string { + return proto.EnumName(MaybeEncryptedRecord_Algorithm_name, int32(x)) +} +func (MaybeEncryptedRecord_Algorithm) EnumDescriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{53, 0} +} + +// Version tracks the last time an object in the store was updated. +type Version struct { + Index uint64 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` +} + +func (m *Version) Reset() { *m = Version{} } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{0} } + +type IndexEntry struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Val string `protobuf:"bytes,2,opt,name=val,proto3" json:"val,omitempty"` +} + +func (m *IndexEntry) Reset() { *m = IndexEntry{} } +func (*IndexEntry) ProtoMessage() {} +func (*IndexEntry) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{1} } + +// Annotations provide useful information to identify API objects. They are +// common to all API specs. +type Annotations struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Indices provides keys and values for indexing this object. + // A single key may have multiple values. + Indices []IndexEntry `protobuf:"bytes,4,rep,name=indices" json:"indices"` +} + +func (m *Annotations) Reset() { *m = Annotations{} } +func (*Annotations) ProtoMessage() {} +func (*Annotations) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{2} } + +// NamedGenericResource represents a "user defined" resource which is defined +// as a string. +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) +type NamedGenericResource struct { + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *NamedGenericResource) Reset() { *m = NamedGenericResource{} } +func (*NamedGenericResource) ProtoMessage() {} +func (*NamedGenericResource) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{3} } + +// DiscreteGenericResource represents a "user defined" resource which is defined +// as an integer +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to count the resource (SSD=5, HDD=3, ...) +type DiscreteGenericResource struct { + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *DiscreteGenericResource) Reset() { *m = DiscreteGenericResource{} } +func (*DiscreteGenericResource) ProtoMessage() {} +func (*DiscreteGenericResource) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{4} } + +// GenericResource represents a "user defined" resource which can +// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) +type GenericResource struct { + // Types that are valid to be assigned to Resource: + // *GenericResource_NamedResourceSpec + // *GenericResource_DiscreteResourceSpec + Resource isGenericResource_Resource `protobuf_oneof:"resource"` +} + +func (m *GenericResource) Reset() { *m = GenericResource{} } +func (*GenericResource) ProtoMessage() {} +func (*GenericResource) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{5} } + +type isGenericResource_Resource interface { + isGenericResource_Resource() + MarshalTo([]byte) (int, error) + Size() int +} + +type GenericResource_NamedResourceSpec struct { + NamedResourceSpec *NamedGenericResource `protobuf:"bytes,1,opt,name=named_resource_spec,json=namedResourceSpec,oneof"` +} +type GenericResource_DiscreteResourceSpec struct { + DiscreteResourceSpec *DiscreteGenericResource `protobuf:"bytes,2,opt,name=discrete_resource_spec,json=discreteResourceSpec,oneof"` +} + +func (*GenericResource_NamedResourceSpec) isGenericResource_Resource() {} +func (*GenericResource_DiscreteResourceSpec) isGenericResource_Resource() {} + +func (m *GenericResource) GetResource() isGenericResource_Resource { + if m != nil { + return m.Resource + } + return nil +} + +func (m *GenericResource) GetNamedResourceSpec() *NamedGenericResource { + if x, ok := m.GetResource().(*GenericResource_NamedResourceSpec); ok { + return x.NamedResourceSpec + } + return nil +} + +func (m *GenericResource) GetDiscreteResourceSpec() *DiscreteGenericResource { + if x, ok := m.GetResource().(*GenericResource_DiscreteResourceSpec); ok { + return x.DiscreteResourceSpec + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*GenericResource) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _GenericResource_OneofMarshaler, _GenericResource_OneofUnmarshaler, _GenericResource_OneofSizer, []interface{}{ + (*GenericResource_NamedResourceSpec)(nil), + (*GenericResource_DiscreteResourceSpec)(nil), + } +} + +func _GenericResource_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*GenericResource) + // resource + switch x := m.Resource.(type) { + case *GenericResource_NamedResourceSpec: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.NamedResourceSpec); err != nil { + return err + } + case *GenericResource_DiscreteResourceSpec: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.DiscreteResourceSpec); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("GenericResource.Resource has unexpected type %T", x) + } + return nil +} + +func _GenericResource_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*GenericResource) + switch tag { + case 1: // resource.named_resource_spec + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NamedGenericResource) + err := b.DecodeMessage(msg) + m.Resource = &GenericResource_NamedResourceSpec{msg} + return true, err + case 2: // resource.discrete_resource_spec + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(DiscreteGenericResource) + err := b.DecodeMessage(msg) + m.Resource = &GenericResource_DiscreteResourceSpec{msg} + return true, err + default: + return false, nil + } +} + +func _GenericResource_OneofSizer(msg proto.Message) (n int) { + m := msg.(*GenericResource) + // resource + switch x := m.Resource.(type) { + case *GenericResource_NamedResourceSpec: + s := proto.Size(x.NamedResourceSpec) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *GenericResource_DiscreteResourceSpec: + s := proto.Size(x.DiscreteResourceSpec) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Resources struct { + // Amount of CPUs (e.g. 2000000000 = 2 CPU cores) + NanoCPUs int64 `protobuf:"varint,1,opt,name=nano_cpus,json=nanoCpus,proto3" json:"nano_cpus,omitempty"` + // Amount of memory in bytes. + MemoryBytes int64 `protobuf:"varint,2,opt,name=memory_bytes,json=memoryBytes,proto3" json:"memory_bytes,omitempty"` + // User specified resource (e.g: bananas=2;apple={red,yellow,green}) + Generic []*GenericResource `protobuf:"bytes,3,rep,name=generic" json:"generic,omitempty"` +} + +func (m *Resources) Reset() { *m = Resources{} } +func (*Resources) ProtoMessage() {} +func (*Resources) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{6} } + +type ResourceRequirements struct { + Limits *Resources `protobuf:"bytes,1,opt,name=limits" json:"limits,omitempty"` + Reservations *Resources `protobuf:"bytes,2,opt,name=reservations" json:"reservations,omitempty"` + // Amount of swap in bytes - can only be used together with a memory limit + // -1 means unlimited + // a null pointer indicates that the default behaviour of granting twice + // the memory is maintained + SwapBytes *google_protobuf2.Int64Value `protobuf:"bytes,3,opt,name=swap_bytes,json=swapBytes" json:"swap_bytes,omitempty"` + // Tune container memory swappiness (0 to 100) - if not specified, defaults + // to the container OS's default - generally 60, or the value predefined in + // the image; set to -1 to unset a previously set value + MemorySwappiness *google_protobuf2.Int64Value `protobuf:"bytes,4,opt,name=memory_swappiness,json=memorySwappiness" json:"memory_swappiness,omitempty"` +} + +func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } +func (*ResourceRequirements) ProtoMessage() {} +func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{7} } + +type Platform struct { + // Architecture (e.g. x86_64) + Architecture string `protobuf:"bytes,1,opt,name=architecture,proto3" json:"architecture,omitempty"` + // Operating System (e.g. linux) + OS string `protobuf:"bytes,2,opt,name=os,proto3" json:"os,omitempty"` +} + +func (m *Platform) Reset() { *m = Platform{} } +func (*Platform) ProtoMessage() {} +func (*Platform) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{8} } + +// PluginDescription describes an engine plugin. +type PluginDescription struct { + // Type of plugin. Canonical values for existing types are + // Volume, Network, and Authorization. More types could be + // supported in the future. + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + // Name of the plugin + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` +} + +func (m *PluginDescription) Reset() { *m = PluginDescription{} } +func (*PluginDescription) ProtoMessage() {} +func (*PluginDescription) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{9} } + +type EngineDescription struct { + // Docker daemon version running on the node. + EngineVersion string `protobuf:"bytes,1,opt,name=engine_version,json=engineVersion,proto3" json:"engine_version,omitempty"` + // Labels attached to the engine. + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Volume, Network, and Auth plugins + Plugins []PluginDescription `protobuf:"bytes,3,rep,name=plugins" json:"plugins"` +} + +func (m *EngineDescription) Reset() { *m = EngineDescription{} } +func (*EngineDescription) ProtoMessage() {} +func (*EngineDescription) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{10} } + +type NodeDescription struct { + // Hostname of the node as reported by the agent. + // This is different from spec.meta.name which is user-defined. + Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"` + // Platform of the node. + Platform *Platform `protobuf:"bytes,2,opt,name=platform" json:"platform,omitempty"` + // Total resources on the node. + Resources *Resources `protobuf:"bytes,3,opt,name=resources" json:"resources,omitempty"` + // Information about the Docker Engine on the node. + Engine *EngineDescription `protobuf:"bytes,4,opt,name=engine" json:"engine,omitempty"` + // Information on the node's TLS setup + TLSInfo *NodeTLSInfo `protobuf:"bytes,5,opt,name=tls_info,json=tlsInfo" json:"tls_info,omitempty"` + // FIPS indicates whether the node has FIPS-enabled + FIPS bool `protobuf:"varint,6,opt,name=fips,proto3" json:"fips,omitempty"` +} + +func (m *NodeDescription) Reset() { *m = NodeDescription{} } +func (*NodeDescription) ProtoMessage() {} +func (*NodeDescription) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{11} } + +type NodeTLSInfo struct { + // Information about which root certs the node trusts + TrustRoot []byte `protobuf:"bytes,1,opt,name=trust_root,json=trustRoot,proto3" json:"trust_root,omitempty"` + // Information about the node's current TLS certificate + CertIssuerSubject []byte `protobuf:"bytes,2,opt,name=cert_issuer_subject,json=certIssuerSubject,proto3" json:"cert_issuer_subject,omitempty"` + CertIssuerPublicKey []byte `protobuf:"bytes,3,opt,name=cert_issuer_public_key,json=certIssuerPublicKey,proto3" json:"cert_issuer_public_key,omitempty"` +} + +func (m *NodeTLSInfo) Reset() { *m = NodeTLSInfo{} } +func (*NodeTLSInfo) ProtoMessage() {} +func (*NodeTLSInfo) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{12} } + +type RaftMemberStatus struct { + Leader bool `protobuf:"varint,1,opt,name=leader,proto3" json:"leader,omitempty"` + Reachability RaftMemberStatus_Reachability `protobuf:"varint,2,opt,name=reachability,proto3,enum=docker.swarmkit.v1.RaftMemberStatus_Reachability" json:"reachability,omitempty"` + Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` +} + +func (m *RaftMemberStatus) Reset() { *m = RaftMemberStatus{} } +func (*RaftMemberStatus) ProtoMessage() {} +func (*RaftMemberStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{13} } + +type NodeStatus struct { + State NodeStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=docker.swarmkit.v1.NodeStatus_State" json:"state,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // Addr is the node's IP address as observed by the manager + Addr string `protobuf:"bytes,3,opt,name=addr,proto3" json:"addr,omitempty"` +} + +func (m *NodeStatus) Reset() { *m = NodeStatus{} } +func (*NodeStatus) ProtoMessage() {} +func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{14} } + +type Image struct { + // reference is a docker image reference. This can include a rpository, tag + // or be fully qualified witha digest. The format is specified in the + // distribution/reference package. + Reference string `protobuf:"bytes,1,opt,name=reference,proto3" json:"reference,omitempty"` +} + +func (m *Image) Reset() { *m = Image{} } +func (*Image) ProtoMessage() {} +func (*Image) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{15} } + +// Mount describes volume mounts for a container. +// +// The Mount type follows the structure of the mount syscall, including a type, +// source, target. Top-level flags, such as writable, are common to all kinds +// of mounts, where we also provide options that are specific to a type of +// mount. This corresponds to flags and data, respectively, in the syscall. +type Mount struct { + // Type defines the nature of the mount. + Type Mount_MountType `protobuf:"varint,1,opt,name=type,proto3,enum=docker.swarmkit.v1.Mount_MountType" json:"type,omitempty"` + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"` + // Target path in container + Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"` + // ReadOnly should be set to true if the mount should not be writable. + ReadOnly bool `protobuf:"varint,4,opt,name=readonly,proto3" json:"readonly,omitempty"` + Consistency Mount_MountConsistency `protobuf:"varint,8,opt,name=consistency,proto3,enum=docker.swarmkit.v1.Mount_MountConsistency" json:"consistency,omitempty"` + // BindOptions configures properties of a bind mount type. + // + // For mounts of type bind, the source must be an absolute host path. + BindOptions *Mount_BindOptions `protobuf:"bytes,5,opt,name=bind_options,json=bindOptions" json:"bind_options,omitempty"` + // VolumeOptions configures the properties specific to a volume mount type. + // + // For mounts of type volume, the source will be used as the volume name. + VolumeOptions *Mount_VolumeOptions `protobuf:"bytes,6,opt,name=volume_options,json=volumeOptions" json:"volume_options,omitempty"` + // TmpfsOptions allows one to set options for mounting a temporary + // filesystem. + // + // The source field will be ignored when using mounts of type tmpfs. + TmpfsOptions *Mount_TmpfsOptions `protobuf:"bytes,7,opt,name=tmpfs_options,json=tmpfsOptions" json:"tmpfs_options,omitempty"` +} + +func (m *Mount) Reset() { *m = Mount{} } +func (*Mount) ProtoMessage() {} +func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16} } + +// BindOptions specifies options that are specific to a bind mount. +type Mount_BindOptions struct { + // Propagation mode of mount. + Propagation Mount_BindOptions_MountPropagation `protobuf:"varint,1,opt,name=propagation,proto3,enum=docker.swarmkit.v1.Mount_BindOptions_MountPropagation" json:"propagation,omitempty"` + // allows non-recursive bind-mount, i.e. mount(2) with "bind" rather than "rbind". + NonRecursive bool `protobuf:"varint,2,opt,name=nonrecursive,proto3" json:"nonrecursive,omitempty"` +} + +func (m *Mount_BindOptions) Reset() { *m = Mount_BindOptions{} } +func (*Mount_BindOptions) ProtoMessage() {} +func (*Mount_BindOptions) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16, 0} } + +// VolumeOptions contains parameters for mounting the volume. +type Mount_VolumeOptions struct { + // nocopy prevents automatic copying of data to the volume with data from target + NoCopy bool `protobuf:"varint,1,opt,name=nocopy,proto3" json:"nocopy,omitempty"` + // labels to apply to the volume if creating + Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // DriverConfig specifies the options that may be passed to the driver + // if the volume is created. + // + // If this is empty, no volume will be created if the volume is missing. + DriverConfig *Driver `protobuf:"bytes,3,opt,name=driver_config,json=driverConfig" json:"driver_config,omitempty"` +} + +func (m *Mount_VolumeOptions) Reset() { *m = Mount_VolumeOptions{} } +func (*Mount_VolumeOptions) ProtoMessage() {} +func (*Mount_VolumeOptions) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16, 1} } + +type Mount_TmpfsOptions struct { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be convered to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + SizeBytes int64 `protobuf:"varint,1,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"` + // Mode of the tmpfs upon creation + Mode os.FileMode `protobuf:"varint,2,opt,name=mode,proto3,customtype=os.FileMode" json:"mode"` + // Options passed to tmpfs mount + Options string `protobuf:"bytes,3,opt,name=options,proto3" json:"options,omitempty"` +} + +func (m *Mount_TmpfsOptions) Reset() { *m = Mount_TmpfsOptions{} } +func (*Mount_TmpfsOptions) ProtoMessage() {} +func (*Mount_TmpfsOptions) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16, 2} } + +type RestartPolicy struct { + Condition RestartPolicy_RestartCondition `protobuf:"varint,1,opt,name=condition,proto3,enum=docker.swarmkit.v1.RestartPolicy_RestartCondition" json:"condition,omitempty"` + // Delay between restart attempts + // Note: can't use stdduration because this field needs to be nullable. + Delay *google_protobuf1.Duration `protobuf:"bytes,2,opt,name=delay" json:"delay,omitempty"` + // MaxAttempts is the maximum number of restarts to attempt on an + // instance before giving up. Ignored if 0. + MaxAttempts uint64 `protobuf:"varint,3,opt,name=max_attempts,json=maxAttempts,proto3" json:"max_attempts,omitempty"` + // Window is the time window used to evaluate the restart policy. + // The time window is unbounded if this is 0. + // Note: can't use stdduration because this field needs to be nullable. + Window *google_protobuf1.Duration `protobuf:"bytes,4,opt,name=window" json:"window,omitempty"` +} + +func (m *RestartPolicy) Reset() { *m = RestartPolicy{} } +func (*RestartPolicy) ProtoMessage() {} +func (*RestartPolicy) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{17} } + +// UpdateConfig specifies the rate and policy of updates. +// TODO(aluzzardi): Consider making this a oneof with RollingStrategy and LockstepStrategy. +type UpdateConfig struct { + // Maximum number of tasks to be updated in one iteration. + // 0 means unlimited parallelism. + Parallelism uint64 `protobuf:"varint,1,opt,name=parallelism,proto3" json:"parallelism,omitempty"` + // Amount of time between updates. + Delay time.Duration `protobuf:"bytes,2,opt,name=delay,stdduration" json:"delay"` + // FailureAction is the action to take when an update failures. + FailureAction UpdateConfig_FailureAction `protobuf:"varint,3,opt,name=failure_action,json=failureAction,proto3,enum=docker.swarmkit.v1.UpdateConfig_FailureAction" json:"failure_action,omitempty"` + // Monitor indicates how long to monitor a task for failure after it is + // created. If the task fails by ending up in one of the states + // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, + // this counts as a failure. If it fails after Monitor, it does not + // count as a failure. If Monitor is unspecified, a default value will + // be used. + // Note: can't use stdduration because this field needs to be nullable. + Monitor *google_protobuf1.Duration `protobuf:"bytes,4,opt,name=monitor" json:"monitor,omitempty"` + // MaxFailureRatio is the fraction of tasks that may fail during + // an update before the failure action is invoked. Any task created by + // the current update which ends up in one of the states REJECTED, + // COMPLETED or FAILED within Monitor from its creation counts as a + // failure. The number of failures is divided by the number of tasks + // being updated, and if this fraction is greater than + // MaxFailureRatio, the failure action is invoked. + // + // If the failure action is CONTINUE, there is no effect. + // If the failure action is PAUSE, no more tasks will be updated until + // another update is started. + // If the failure action is ROLLBACK, the orchestrator will attempt to + // roll back to the previous service spec. If the MaxFailureRatio + // threshold is hit during the rollback, the rollback will pause. + MaxFailureRatio float32 `protobuf:"fixed32,5,opt,name=max_failure_ratio,json=maxFailureRatio,proto3" json:"max_failure_ratio,omitempty"` + Order UpdateConfig_UpdateOrder `protobuf:"varint,6,opt,name=order,proto3,enum=docker.swarmkit.v1.UpdateConfig_UpdateOrder" json:"order,omitempty"` +} + +func (m *UpdateConfig) Reset() { *m = UpdateConfig{} } +func (*UpdateConfig) ProtoMessage() {} +func (*UpdateConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{18} } + +// UpdateStatus is the status of an update in progress. +type UpdateStatus struct { + // State is the state of this update. It indicates whether the + // update is in progress, completed, paused, rolling back, or + // finished rolling back. + State UpdateStatus_UpdateState `protobuf:"varint,1,opt,name=state,proto3,enum=docker.swarmkit.v1.UpdateStatus_UpdateState" json:"state,omitempty"` + // StartedAt is the time at which the update was started. + // Note: can't use stdtime because this field is nullable. + StartedAt *google_protobuf.Timestamp `protobuf:"bytes,2,opt,name=started_at,json=startedAt" json:"started_at,omitempty"` + // CompletedAt is the time at which the update completed successfully, + // paused, or finished rolling back. + // Note: can't use stdtime because this field is nullable. + CompletedAt *google_protobuf.Timestamp `protobuf:"bytes,3,opt,name=completed_at,json=completedAt" json:"completed_at,omitempty"` + // Message explains how the update got into its current state. For + // example, if the update is paused, it will explain what is preventing + // the update from proceeding (typically the failure of a task to start up + // when OnFailure is PAUSE). + Message string `protobuf:"bytes,4,opt,name=message,proto3" json:"message,omitempty"` +} + +func (m *UpdateStatus) Reset() { *m = UpdateStatus{} } +func (*UpdateStatus) ProtoMessage() {} +func (*UpdateStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{19} } + +// Container specific status. +type ContainerStatus struct { + ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"` + PID int32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"` + ExitCode int32 `protobuf:"varint,3,opt,name=exit_code,json=exitCode,proto3" json:"exit_code,omitempty"` +} + +func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } +func (*ContainerStatus) ProtoMessage() {} +func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{20} } + +// PortStatus specifies the actual allocated runtime state of a list +// of port configs. +type PortStatus struct { + Ports []*PortConfig `protobuf:"bytes,1,rep,name=ports" json:"ports,omitempty"` +} + +func (m *PortStatus) Reset() { *m = PortStatus{} } +func (*PortStatus) ProtoMessage() {} +func (*PortStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{21} } + +type TaskStatus struct { + // Note: can't use stdtime because this field is nullable. + Timestamp *google_protobuf.Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"` + // State expresses the current state of the task. + State TaskState `protobuf:"varint,2,opt,name=state,proto3,enum=docker.swarmkit.v1.TaskState" json:"state,omitempty"` + // Message reports a message for the task status. This should provide a + // human readable message that can point to how the task actually arrived + // at a current state. + // + // As a convention, we place the a small message here that led to the + // current state. For example, if the task is in ready, because it was + // prepared, we'd place "prepared" in this field. If we skipped preparation + // because the task is prepared, we would put "already prepared" in this + // field. + Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` + // Err is set if the task is in an error state, or is unable to + // progress from an earlier state because a precondition is + // unsatisfied. + // + // The following states should report a companion error: + // + // FAILED, REJECTED + // + // In general, messages that should be surfaced to users belong in the + // Err field, and notes on routine state transitions belong in Message. + // + // TODO(stevvooe) Integrate this field with the error interface. + Err string `protobuf:"bytes,4,opt,name=err,proto3" json:"err,omitempty"` + // Container status contains container specific status information. + // + // Types that are valid to be assigned to RuntimeStatus: + // *TaskStatus_Container + RuntimeStatus isTaskStatus_RuntimeStatus `protobuf_oneof:"runtime_status"` + // HostPorts provides a list of ports allocated at the host + // level. + PortStatus *PortStatus `protobuf:"bytes,6,opt,name=port_status,json=portStatus" json:"port_status,omitempty"` + // AppliedBy gives the node ID of the manager that applied this task + // status update to the Task object. + AppliedBy string `protobuf:"bytes,7,opt,name=applied_by,json=appliedBy,proto3" json:"applied_by,omitempty"` + // AppliedAt gives a timestamp of when this status update was applied to + // the Task object. + // Note: can't use stdtime because this field is nullable. + AppliedAt *google_protobuf.Timestamp `protobuf:"bytes,8,opt,name=applied_at,json=appliedAt" json:"applied_at,omitempty"` +} + +func (m *TaskStatus) Reset() { *m = TaskStatus{} } +func (*TaskStatus) ProtoMessage() {} +func (*TaskStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{22} } + +type isTaskStatus_RuntimeStatus interface { + isTaskStatus_RuntimeStatus() + MarshalTo([]byte) (int, error) + Size() int +} + +type TaskStatus_Container struct { + Container *ContainerStatus `protobuf:"bytes,5,opt,name=container,oneof"` +} + +func (*TaskStatus_Container) isTaskStatus_RuntimeStatus() {} + +func (m *TaskStatus) GetRuntimeStatus() isTaskStatus_RuntimeStatus { + if m != nil { + return m.RuntimeStatus + } + return nil +} + +func (m *TaskStatus) GetContainer() *ContainerStatus { + if x, ok := m.GetRuntimeStatus().(*TaskStatus_Container); ok { + return x.Container + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*TaskStatus) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _TaskStatus_OneofMarshaler, _TaskStatus_OneofUnmarshaler, _TaskStatus_OneofSizer, []interface{}{ + (*TaskStatus_Container)(nil), + } +} + +func _TaskStatus_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*TaskStatus) + // runtime_status + switch x := m.RuntimeStatus.(type) { + case *TaskStatus_Container: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Container); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("TaskStatus.RuntimeStatus has unexpected type %T", x) + } + return nil +} + +func _TaskStatus_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*TaskStatus) + switch tag { + case 5: // runtime_status.container + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ContainerStatus) + err := b.DecodeMessage(msg) + m.RuntimeStatus = &TaskStatus_Container{msg} + return true, err + default: + return false, nil + } +} + +func _TaskStatus_OneofSizer(msg proto.Message) (n int) { + m := msg.(*TaskStatus) + // runtime_status + switch x := m.RuntimeStatus.(type) { + case *TaskStatus_Container: + s := proto.Size(x.Container) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// NetworkAttachmentConfig specifies how a service should be attached to a particular network. +// +// For now, this is a simple struct, but this can include future information +// instructing Swarm on how this service should work on the particular +// network. +type NetworkAttachmentConfig struct { + // Target specifies the target network for attachment. This value must be a + // network ID. + Target string `protobuf:"bytes,1,opt,name=target,proto3" json:"target,omitempty"` + // Aliases specifies a list of discoverable alternate names for the service on this Target. + Aliases []string `protobuf:"bytes,2,rep,name=aliases" json:"aliases,omitempty"` + // Addresses specifies a list of ipv4 and ipv6 addresses + // preferred. If these addresses are not available then the + // attachment might fail. + Addresses []string `protobuf:"bytes,3,rep,name=addresses" json:"addresses,omitempty"` + // DriverAttachmentOpts is a map of driver attachment options for the network target + DriverAttachmentOpts map[string]string `protobuf:"bytes,4,rep,name=driver_attachment_opts,json=driverAttachmentOpts" json:"driver_attachment_opts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *NetworkAttachmentConfig) Reset() { *m = NetworkAttachmentConfig{} } +func (*NetworkAttachmentConfig) ProtoMessage() {} +func (*NetworkAttachmentConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{23} } + +// IPAMConfig specifies parameters for IP Address Management. +type IPAMConfig struct { + Family IPAMConfig_AddressFamily `protobuf:"varint,1,opt,name=family,proto3,enum=docker.swarmkit.v1.IPAMConfig_AddressFamily" json:"family,omitempty"` + // Subnet defines a network as a CIDR address (ie network and mask + // 192.168.0.1/24). + Subnet string `protobuf:"bytes,2,opt,name=subnet,proto3" json:"subnet,omitempty"` + // Range defines the portion of the subnet to allocate to tasks. This is + // defined as a subnet within the primary subnet. + Range string `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"` + // Gateway address within the subnet. + Gateway string `protobuf:"bytes,4,opt,name=gateway,proto3" json:"gateway,omitempty"` + // Reserved is a list of address from the master pool that should *not* be + // allocated. These addresses may have already been allocated or may be + // reserved for another allocation manager. + Reserved map[string]string `protobuf:"bytes,5,rep,name=reserved" json:"reserved,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *IPAMConfig) Reset() { *m = IPAMConfig{} } +func (*IPAMConfig) ProtoMessage() {} +func (*IPAMConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{24} } + +// PortConfig specifies an exposed port which can be +// addressed using the given name. This can be later queried +// using a service discovery api or a DNS SRV query. The node +// port specifies a port that can be used to address this +// service external to the cluster by sending a connection +// request to this port to any node on the cluster. +type PortConfig struct { + // Name for the port. If provided the port information can + // be queried using the name as in a DNS SRV query. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Protocol for the port which is exposed. + Protocol PortConfig_Protocol `protobuf:"varint,2,opt,name=protocol,proto3,enum=docker.swarmkit.v1.PortConfig_Protocol" json:"protocol,omitempty"` + // The port which the application is exposing and is bound to. + TargetPort uint32 `protobuf:"varint,3,opt,name=target_port,json=targetPort,proto3" json:"target_port,omitempty"` + // PublishedPort specifies the port on which the service is exposed. If + // specified, the port must be within the available range. If not specified + // (value is zero), an available port is automatically assigned. + PublishedPort uint32 `protobuf:"varint,4,opt,name=published_port,json=publishedPort,proto3" json:"published_port,omitempty"` + // PublishMode controls how the port is published. + PublishMode PortConfig_PublishMode `protobuf:"varint,5,opt,name=publish_mode,json=publishMode,proto3,enum=docker.swarmkit.v1.PortConfig_PublishMode" json:"publish_mode,omitempty"` +} + +func (m *PortConfig) Reset() { *m = PortConfig{} } +func (*PortConfig) ProtoMessage() {} +func (*PortConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{25} } + +// Driver is a generic driver type to be used throughout the API. For now, a +// driver is simply a name and set of options. The field contents depend on the +// target use case and driver application. For example, a network driver may +// have different rules than a volume driver. +type Driver struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Options map[string]string `protobuf:"bytes,2,rep,name=options" json:"options,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *Driver) Reset() { *m = Driver{} } +func (*Driver) ProtoMessage() {} +func (*Driver) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{26} } + +type IPAMOptions struct { + Driver *Driver `protobuf:"bytes,1,opt,name=driver" json:"driver,omitempty"` + Configs []*IPAMConfig `protobuf:"bytes,3,rep,name=configs" json:"configs,omitempty"` +} + +func (m *IPAMOptions) Reset() { *m = IPAMOptions{} } +func (*IPAMOptions) ProtoMessage() {} +func (*IPAMOptions) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{27} } + +// Peer should be used anywhere where we are describing a remote peer. +type Peer struct { + NodeID string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` +} + +func (m *Peer) Reset() { *m = Peer{} } +func (*Peer) ProtoMessage() {} +func (*Peer) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{28} } + +// WeightedPeer should be used anywhere where we are describing a remote peer +// with a weight. +type WeightedPeer struct { + Peer *Peer `protobuf:"bytes,1,opt,name=peer" json:"peer,omitempty"` + Weight int64 `protobuf:"varint,2,opt,name=weight,proto3" json:"weight,omitempty"` +} + +func (m *WeightedPeer) Reset() { *m = WeightedPeer{} } +func (*WeightedPeer) ProtoMessage() {} +func (*WeightedPeer) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{29} } + +type IssuanceStatus struct { + State IssuanceStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=docker.swarmkit.v1.IssuanceStatus_State" json:"state,omitempty"` + // Err is set if the Certificate Issuance is in an error state. + // The following states should report a companion error: + // FAILED + Err string `protobuf:"bytes,2,opt,name=err,proto3" json:"err,omitempty"` +} + +func (m *IssuanceStatus) Reset() { *m = IssuanceStatus{} } +func (*IssuanceStatus) ProtoMessage() {} +func (*IssuanceStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{30} } + +type AcceptancePolicy struct { + Policies []*AcceptancePolicy_RoleAdmissionPolicy `protobuf:"bytes,1,rep,name=policies" json:"policies,omitempty"` +} + +func (m *AcceptancePolicy) Reset() { *m = AcceptancePolicy{} } +func (*AcceptancePolicy) ProtoMessage() {} +func (*AcceptancePolicy) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{31} } + +type AcceptancePolicy_RoleAdmissionPolicy struct { + Role NodeRole `protobuf:"varint,1,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"` + // Autoaccept controls which roles' certificates are automatically + // issued without administrator intervention. + Autoaccept bool `protobuf:"varint,2,opt,name=autoaccept,proto3" json:"autoaccept,omitempty"` + // Secret represents a user-provided string that is necessary for new + // nodes to join the cluster + Secret *AcceptancePolicy_RoleAdmissionPolicy_Secret `protobuf:"bytes,3,opt,name=secret" json:"secret,omitempty"` +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy) Reset() { *m = AcceptancePolicy_RoleAdmissionPolicy{} } +func (*AcceptancePolicy_RoleAdmissionPolicy) ProtoMessage() {} +func (*AcceptancePolicy_RoleAdmissionPolicy) Descriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{31, 0} +} + +type AcceptancePolicy_RoleAdmissionPolicy_Secret struct { + // The actual content (possibly hashed) + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + // The type of hash we are using, or "plaintext" + Alg string `protobuf:"bytes,2,opt,name=alg,proto3" json:"alg,omitempty"` +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Reset() { + *m = AcceptancePolicy_RoleAdmissionPolicy_Secret{} +} +func (*AcceptancePolicy_RoleAdmissionPolicy_Secret) ProtoMessage() {} +func (*AcceptancePolicy_RoleAdmissionPolicy_Secret) Descriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{31, 0, 0} +} + +type ExternalCA struct { + // Protocol is the protocol used by this external CA. + Protocol ExternalCA_CAProtocol `protobuf:"varint,1,opt,name=protocol,proto3,enum=docker.swarmkit.v1.ExternalCA_CAProtocol" json:"protocol,omitempty"` + // URL is the URL where the external CA can be reached. + URL string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + // Options is a set of additional key/value pairs whose interpretation + // depends on the specified CA type. + Options map[string]string `protobuf:"bytes,3,rep,name=options" json:"options,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // CACert specifies which root CA is used by this external CA + CACert []byte `protobuf:"bytes,4,opt,name=ca_cert,json=caCert,proto3" json:"ca_cert,omitempty"` +} + +func (m *ExternalCA) Reset() { *m = ExternalCA{} } +func (*ExternalCA) ProtoMessage() {} +func (*ExternalCA) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{32} } + +type CAConfig struct { + // NodeCertExpiry is the duration certificates should be issued for + // Note: can't use stdduration because this field needs to be nullable. + NodeCertExpiry *google_protobuf1.Duration `protobuf:"bytes,1,opt,name=node_cert_expiry,json=nodeCertExpiry" json:"node_cert_expiry,omitempty"` + // ExternalCAs is a list of CAs to which a manager node will make + // certificate signing requests for node certificates. + ExternalCAs []*ExternalCA `protobuf:"bytes,2,rep,name=external_cas,json=externalCas" json:"external_cas,omitempty"` + // SigningCACert is the desired CA certificate to be used as the root and + // signing CA for the swarm. If not provided, indicates that we are either happy + // with the current configuration, or (together with a bump in the ForceRotate value) + // that we want a certificate and key generated for us. + SigningCACert []byte `protobuf:"bytes,3,opt,name=signing_ca_cert,json=signingCaCert,proto3" json:"signing_ca_cert,omitempty"` + // SigningCAKey is the desired private key, matching the signing CA cert, to be used + // to sign certificates for the swarm + SigningCAKey []byte `protobuf:"bytes,4,opt,name=signing_ca_key,json=signingCaKey,proto3" json:"signing_ca_key,omitempty"` + // ForceRotate is a counter that triggers a root CA rotation even if no relevant + // parameters have been in the spec. This will force the manager to generate a new + // certificate and key, if none have been provided. + ForceRotate uint64 `protobuf:"varint,5,opt,name=force_rotate,json=forceRotate,proto3" json:"force_rotate,omitempty"` +} + +func (m *CAConfig) Reset() { *m = CAConfig{} } +func (*CAConfig) ProtoMessage() {} +func (*CAConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{33} } + +// OrchestrationConfig defines cluster-level orchestration settings. +type OrchestrationConfig struct { + // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or + // node. If negative, never remove completed or failed tasks. + TaskHistoryRetentionLimit int64 `protobuf:"varint,1,opt,name=task_history_retention_limit,json=taskHistoryRetentionLimit,proto3" json:"task_history_retention_limit,omitempty"` +} + +func (m *OrchestrationConfig) Reset() { *m = OrchestrationConfig{} } +func (*OrchestrationConfig) ProtoMessage() {} +func (*OrchestrationConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{34} } + +// TaskDefaults specifies default values for task creation. +type TaskDefaults struct { + // LogDriver specifies the log driver to use for the cluster if not + // specified for each task. + // + // If this is changed, only new tasks will pick up the new log driver. + // Existing tasks will continue to use the previous default until rescheduled. + LogDriver *Driver `protobuf:"bytes,1,opt,name=log_driver,json=logDriver" json:"log_driver,omitempty"` +} + +func (m *TaskDefaults) Reset() { *m = TaskDefaults{} } +func (*TaskDefaults) ProtoMessage() {} +func (*TaskDefaults) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{35} } + +// DispatcherConfig defines cluster-level dispatcher settings. +type DispatcherConfig struct { + // HeartbeatPeriod defines how often agent should send heartbeats to + // dispatcher. + // Note: can't use stdduration because this field needs to be nullable. + HeartbeatPeriod *google_protobuf1.Duration `protobuf:"bytes,1,opt,name=heartbeat_period,json=heartbeatPeriod" json:"heartbeat_period,omitempty"` +} + +func (m *DispatcherConfig) Reset() { *m = DispatcherConfig{} } +func (*DispatcherConfig) ProtoMessage() {} +func (*DispatcherConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{36} } + +// RaftConfig defines raft settings for the cluster. +type RaftConfig struct { + // SnapshotInterval is the number of log entries between snapshots. + SnapshotInterval uint64 `protobuf:"varint,1,opt,name=snapshot_interval,json=snapshotInterval,proto3" json:"snapshot_interval,omitempty"` + // KeepOldSnapshots is the number of snapshots to keep beyond the + // current snapshot. + KeepOldSnapshots uint64 `protobuf:"varint,2,opt,name=keep_old_snapshots,json=keepOldSnapshots,proto3" json:"keep_old_snapshots,omitempty"` + // LogEntriesForSlowFollowers is the number of log entries to keep + // around to sync up slow followers after a snapshot is created. + LogEntriesForSlowFollowers uint64 `protobuf:"varint,3,opt,name=log_entries_for_slow_followers,json=logEntriesForSlowFollowers,proto3" json:"log_entries_for_slow_followers,omitempty"` + // HeartbeatTick defines the amount of ticks (in seconds) between + // each heartbeat message sent to other members for health-check. + HeartbeatTick uint32 `protobuf:"varint,4,opt,name=heartbeat_tick,json=heartbeatTick,proto3" json:"heartbeat_tick,omitempty"` + // ElectionTick defines the amount of ticks (in seconds) needed + // without a leader to trigger a new election. + ElectionTick uint32 `protobuf:"varint,5,opt,name=election_tick,json=electionTick,proto3" json:"election_tick,omitempty"` +} + +func (m *RaftConfig) Reset() { *m = RaftConfig{} } +func (*RaftConfig) ProtoMessage() {} +func (*RaftConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{37} } + +type EncryptionConfig struct { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + AutoLockManagers bool `protobuf:"varint,1,opt,name=auto_lock_managers,json=autoLockManagers,proto3" json:"auto_lock_managers,omitempty"` +} + +func (m *EncryptionConfig) Reset() { *m = EncryptionConfig{} } +func (*EncryptionConfig) ProtoMessage() {} +func (*EncryptionConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{38} } + +type SpreadOver struct { + SpreadDescriptor string `protobuf:"bytes,1,opt,name=spread_descriptor,json=spreadDescriptor,proto3" json:"spread_descriptor,omitempty"` +} + +func (m *SpreadOver) Reset() { *m = SpreadOver{} } +func (*SpreadOver) ProtoMessage() {} +func (*SpreadOver) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{39} } + +type PlacementPreference struct { + // Types that are valid to be assigned to Preference: + // *PlacementPreference_Spread + Preference isPlacementPreference_Preference `protobuf_oneof:"Preference"` +} + +func (m *PlacementPreference) Reset() { *m = PlacementPreference{} } +func (*PlacementPreference) ProtoMessage() {} +func (*PlacementPreference) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{40} } + +type isPlacementPreference_Preference interface { + isPlacementPreference_Preference() + MarshalTo([]byte) (int, error) + Size() int +} + +type PlacementPreference_Spread struct { + Spread *SpreadOver `protobuf:"bytes,1,opt,name=spread,oneof"` +} + +func (*PlacementPreference_Spread) isPlacementPreference_Preference() {} + +func (m *PlacementPreference) GetPreference() isPlacementPreference_Preference { + if m != nil { + return m.Preference + } + return nil +} + +func (m *PlacementPreference) GetSpread() *SpreadOver { + if x, ok := m.GetPreference().(*PlacementPreference_Spread); ok { + return x.Spread + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*PlacementPreference) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _PlacementPreference_OneofMarshaler, _PlacementPreference_OneofUnmarshaler, _PlacementPreference_OneofSizer, []interface{}{ + (*PlacementPreference_Spread)(nil), + } +} + +func _PlacementPreference_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*PlacementPreference) + // Preference + switch x := m.Preference.(type) { + case *PlacementPreference_Spread: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Spread); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("PlacementPreference.Preference has unexpected type %T", x) + } + return nil +} + +func _PlacementPreference_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*PlacementPreference) + switch tag { + case 1: // Preference.spread + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SpreadOver) + err := b.DecodeMessage(msg) + m.Preference = &PlacementPreference_Spread{msg} + return true, err + default: + return false, nil + } +} + +func _PlacementPreference_OneofSizer(msg proto.Message) (n int) { + m := msg.(*PlacementPreference) + // Preference + switch x := m.Preference.(type) { + case *PlacementPreference_Spread: + s := proto.Size(x.Spread) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Placement specifies task distribution constraints. +type Placement struct { + // Constraints specifies a set of requirements a node should meet for a task. + Constraints []string `protobuf:"bytes,1,rep,name=constraints" json:"constraints,omitempty"` + // Preferences provide a way to make the scheduler aware of factors + // such as topology. They are provided in order from highest to lowest + // precedence. + Preferences []*PlacementPreference `protobuf:"bytes,2,rep,name=preferences" json:"preferences,omitempty"` + // Platforms stores all the platforms that the image can run on. + // This field is used in the platform filter for scheduling. If empty, + // then the platform filter is off, meaning there are no scheduling restrictions. + Platforms []*Platform `protobuf:"bytes,3,rep,name=platforms" json:"platforms,omitempty"` + // MaxReplicas specifies the limit for maximum number of replicas running on one node. + MaxReplicas uint64 `protobuf:"varint,4,opt,name=max_replicas,json=maxReplicas,proto3" json:"max_replicas,omitempty"` +} + +func (m *Placement) Reset() { *m = Placement{} } +func (*Placement) ProtoMessage() {} +func (*Placement) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{41} } + +// JoinToken contains the join tokens for workers and managers. +type JoinTokens struct { + // Worker is the join token workers may use to join the swarm. + Worker string `protobuf:"bytes,1,opt,name=worker,proto3" json:"worker,omitempty"` + // Manager is the join token workers may use to join the swarm. + Manager string `protobuf:"bytes,2,opt,name=manager,proto3" json:"manager,omitempty"` +} + +func (m *JoinTokens) Reset() { *m = JoinTokens{} } +func (*JoinTokens) ProtoMessage() {} +func (*JoinTokens) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{42} } + +type RootCA struct { + // CAKey is the root CA private key. + CAKey []byte `protobuf:"bytes,1,opt,name=ca_key,json=caKey,proto3" json:"ca_key,omitempty"` + // CACert is the root CA certificate. + CACert []byte `protobuf:"bytes,2,opt,name=ca_cert,json=caCert,proto3" json:"ca_cert,omitempty"` + // CACertHash is the digest of the CA Certificate. + CACertHash string `protobuf:"bytes,3,opt,name=ca_cert_hash,json=caCertHash,proto3" json:"ca_cert_hash,omitempty"` + // JoinTokens contains the join tokens for workers and managers. + JoinTokens JoinTokens `protobuf:"bytes,4,opt,name=join_tokens,json=joinTokens" json:"join_tokens"` + // RootRotation contains the new root cert and key we want to rotate to - if this is nil, we are not in the + // middle of a root rotation + RootRotation *RootRotation `protobuf:"bytes,5,opt,name=root_rotation,json=rootRotation" json:"root_rotation,omitempty"` + // LastForcedRotation matches the Cluster Spec's CAConfig's ForceRotation counter. + // It indicates when the current CA cert and key were generated (or updated). + LastForcedRotation uint64 `protobuf:"varint,6,opt,name=last_forced_rotation,json=lastForcedRotation,proto3" json:"last_forced_rotation,omitempty"` +} + +func (m *RootCA) Reset() { *m = RootCA{} } +func (*RootCA) ProtoMessage() {} +func (*RootCA) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{43} } + +type Certificate struct { + Role NodeRole `protobuf:"varint,1,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"` + CSR []byte `protobuf:"bytes,2,opt,name=csr,proto3" json:"csr,omitempty"` + Status IssuanceStatus `protobuf:"bytes,3,opt,name=status" json:"status"` + Certificate []byte `protobuf:"bytes,4,opt,name=certificate,proto3" json:"certificate,omitempty"` + // CN represents the node ID. + CN string `protobuf:"bytes,5,opt,name=cn,proto3" json:"cn,omitempty"` +} + +func (m *Certificate) Reset() { *m = Certificate{} } +func (*Certificate) ProtoMessage() {} +func (*Certificate) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{44} } + +// Symmetric keys to encrypt inter-agent communication. +type EncryptionKey struct { + // Agent subsystem the key is intended for. Example: + // networking:gossip + Subsystem string `protobuf:"bytes,1,opt,name=subsystem,proto3" json:"subsystem,omitempty"` + Algorithm EncryptionKey_Algorithm `protobuf:"varint,2,opt,name=algorithm,proto3,enum=docker.swarmkit.v1.EncryptionKey_Algorithm" json:"algorithm,omitempty"` + Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"` + // Time stamp from the lamport clock of the key allocator to + // identify the relative age of the key. + LamportTime uint64 `protobuf:"varint,4,opt,name=lamport_time,json=lamportTime,proto3" json:"lamport_time,omitempty"` +} + +func (m *EncryptionKey) Reset() { *m = EncryptionKey{} } +func (*EncryptionKey) ProtoMessage() {} +func (*EncryptionKey) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{45} } + +// ManagerStatus provides informations about the state of a manager in the cluster. +type ManagerStatus struct { + // RaftID specifies the internal ID used by the manager in a raft context, it can never be modified + // and is used only for information purposes + RaftID uint64 `protobuf:"varint,1,opt,name=raft_id,json=raftId,proto3" json:"raft_id,omitempty"` + // Addr is the address advertised to raft. + Addr string `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` + // Leader is set to true if this node is the raft leader. + Leader bool `protobuf:"varint,3,opt,name=leader,proto3" json:"leader,omitempty"` + // Reachability specifies whether this node is reachable. + Reachability RaftMemberStatus_Reachability `protobuf:"varint,4,opt,name=reachability,proto3,enum=docker.swarmkit.v1.RaftMemberStatus_Reachability" json:"reachability,omitempty"` +} + +func (m *ManagerStatus) Reset() { *m = ManagerStatus{} } +func (*ManagerStatus) ProtoMessage() {} +func (*ManagerStatus) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{46} } + +// FileTarget represents a specific target that is backed by a file +type FileTarget struct { + // Name represents the final filename in the filesystem + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // UID represents the file UID + UID string `protobuf:"bytes,2,opt,name=uid,proto3" json:"uid,omitempty"` + // GID represents the file GID + GID string `protobuf:"bytes,3,opt,name=gid,proto3" json:"gid,omitempty"` + // Mode represents the FileMode of the file + Mode os.FileMode `protobuf:"varint,4,opt,name=mode,proto3,customtype=os.FileMode" json:"mode"` +} + +func (m *FileTarget) Reset() { *m = FileTarget{} } +func (*FileTarget) ProtoMessage() {} +func (*FileTarget) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{47} } + +// RuntimeTarget represents that this secret is _not_ mounted into the +// container, but is used for some other purpose by the container runtime. +// +// Currently, RuntimeTarget has no fields; it's just a placeholder. +type RuntimeTarget struct { +} + +func (m *RuntimeTarget) Reset() { *m = RuntimeTarget{} } +func (*RuntimeTarget) ProtoMessage() {} +func (*RuntimeTarget) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{48} } + +// SecretReference is the linkage between a service and a secret that it uses. +type SecretReference struct { + // SecretID represents the ID of the specific Secret that we're + // referencing. This identifier exists so that SecretReferences don't leak + // any information about the secret contents. + SecretID string `protobuf:"bytes,1,opt,name=secret_id,json=secretId,proto3" json:"secret_id,omitempty"` + // SecretName is the name of the secret that this references, but this is just provided for + // lookup/display purposes. The secret in the reference will be identified by its ID. + SecretName string `protobuf:"bytes,2,opt,name=secret_name,json=secretName,proto3" json:"secret_name,omitempty"` + // Target specifies how this secret should be exposed to the task. + // + // Types that are valid to be assigned to Target: + // *SecretReference_File + Target isSecretReference_Target `protobuf_oneof:"target"` +} + +func (m *SecretReference) Reset() { *m = SecretReference{} } +func (*SecretReference) ProtoMessage() {} +func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{49} } + +type isSecretReference_Target interface { + isSecretReference_Target() + MarshalTo([]byte) (int, error) + Size() int +} + +type SecretReference_File struct { + File *FileTarget `protobuf:"bytes,3,opt,name=file,oneof"` +} + +func (*SecretReference_File) isSecretReference_Target() {} + +func (m *SecretReference) GetTarget() isSecretReference_Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *SecretReference) GetFile() *FileTarget { + if x, ok := m.GetTarget().(*SecretReference_File); ok { + return x.File + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SecretReference) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SecretReference_OneofMarshaler, _SecretReference_OneofUnmarshaler, _SecretReference_OneofSizer, []interface{}{ + (*SecretReference_File)(nil), + } +} + +func _SecretReference_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SecretReference) + // target + switch x := m.Target.(type) { + case *SecretReference_File: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.File); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("SecretReference.Target has unexpected type %T", x) + } + return nil +} + +func _SecretReference_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SecretReference) + switch tag { + case 3: // target.file + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FileTarget) + err := b.DecodeMessage(msg) + m.Target = &SecretReference_File{msg} + return true, err + default: + return false, nil + } +} + +func _SecretReference_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SecretReference) + // target + switch x := m.Target.(type) { + case *SecretReference_File: + s := proto.Size(x.File) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// ConfigReference is the linkage between a service and a config that it uses. +type ConfigReference struct { + // ConfigID represents the ID of the specific Config that we're + // referencing. + ConfigID string `protobuf:"bytes,1,opt,name=config_id,json=configId,proto3" json:"config_id,omitempty"` + // ConfigName is the name of the config that this references, but this is just provided for + // lookup/display purposes. The config in the reference will be identified by its ID. + ConfigName string `protobuf:"bytes,2,opt,name=config_name,json=configName,proto3" json:"config_name,omitempty"` + // Target specifies how this config should be exposed to the task. + // + // Types that are valid to be assigned to Target: + // *ConfigReference_File + // *ConfigReference_Runtime + Target isConfigReference_Target `protobuf_oneof:"target"` +} + +func (m *ConfigReference) Reset() { *m = ConfigReference{} } +func (*ConfigReference) ProtoMessage() {} +func (*ConfigReference) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{50} } + +type isConfigReference_Target interface { + isConfigReference_Target() + MarshalTo([]byte) (int, error) + Size() int +} + +type ConfigReference_File struct { + File *FileTarget `protobuf:"bytes,3,opt,name=file,oneof"` +} +type ConfigReference_Runtime struct { + Runtime *RuntimeTarget `protobuf:"bytes,4,opt,name=runtime,oneof"` +} + +func (*ConfigReference_File) isConfigReference_Target() {} +func (*ConfigReference_Runtime) isConfigReference_Target() {} + +func (m *ConfigReference) GetTarget() isConfigReference_Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *ConfigReference) GetFile() *FileTarget { + if x, ok := m.GetTarget().(*ConfigReference_File); ok { + return x.File + } + return nil +} + +func (m *ConfigReference) GetRuntime() *RuntimeTarget { + if x, ok := m.GetTarget().(*ConfigReference_Runtime); ok { + return x.Runtime + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ConfigReference) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ConfigReference_OneofMarshaler, _ConfigReference_OneofUnmarshaler, _ConfigReference_OneofSizer, []interface{}{ + (*ConfigReference_File)(nil), + (*ConfigReference_Runtime)(nil), + } +} + +func _ConfigReference_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ConfigReference) + // target + switch x := m.Target.(type) { + case *ConfigReference_File: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.File); err != nil { + return err + } + case *ConfigReference_Runtime: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Runtime); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ConfigReference.Target has unexpected type %T", x) + } + return nil +} + +func _ConfigReference_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ConfigReference) + switch tag { + case 3: // target.file + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FileTarget) + err := b.DecodeMessage(msg) + m.Target = &ConfigReference_File{msg} + return true, err + case 4: // target.runtime + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(RuntimeTarget) + err := b.DecodeMessage(msg) + m.Target = &ConfigReference_Runtime{msg} + return true, err + default: + return false, nil + } +} + +func _ConfigReference_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ConfigReference) + // target + switch x := m.Target.(type) { + case *ConfigReference_File: + s := proto.Size(x.File) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ConfigReference_Runtime: + s := proto.Size(x.Runtime) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// BlacklistedCertificate is a record for a blacklisted certificate. It does not +// contain the certificate's CN, because these records are indexed by CN. +type BlacklistedCertificate struct { + // Expiry is the latest known expiration time of a certificate that + // was issued for the given CN. + // Note: can't use stdtime because this field is nullable. + Expiry *google_protobuf.Timestamp `protobuf:"bytes,1,opt,name=expiry" json:"expiry,omitempty"` +} + +func (m *BlacklistedCertificate) Reset() { *m = BlacklistedCertificate{} } +func (*BlacklistedCertificate) ProtoMessage() {} +func (*BlacklistedCertificate) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{51} } + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `protobuf:"bytes,1,rep,name=test" json:"test,omitempty"` + // Interval is the time to wait between checks. Zero means inherit. + // Note: can't use stdduration because this field needs to be nullable. + Interval *google_protobuf1.Duration `protobuf:"bytes,2,opt,name=interval" json:"interval,omitempty"` + // Timeout is the time to wait before considering the check to have hung. + // Zero means inherit. + // Note: can't use stdduration because this field needs to be nullable. + Timeout *google_protobuf1.Duration `protobuf:"bytes,3,opt,name=timeout" json:"timeout,omitempty"` + // Retries is the number of consecutive failures needed to consider a + // container as unhealthy. Zero means inherit. + Retries int32 `protobuf:"varint,4,opt,name=retries,proto3" json:"retries,omitempty"` + // Start period is the period for container initialization during + // which health check failures will note count towards the maximum + // number of retries. + StartPeriod *google_protobuf1.Duration `protobuf:"bytes,5,opt,name=start_period,json=startPeriod" json:"start_period,omitempty"` +} + +func (m *HealthConfig) Reset() { *m = HealthConfig{} } +func (*HealthConfig) ProtoMessage() {} +func (*HealthConfig) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{52} } + +type MaybeEncryptedRecord struct { + Algorithm MaybeEncryptedRecord_Algorithm `protobuf:"varint,1,opt,name=algorithm,proto3,enum=docker.swarmkit.v1.MaybeEncryptedRecord_Algorithm" json:"algorithm,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + Nonce []byte `protobuf:"bytes,3,opt,name=nonce,proto3" json:"nonce,omitempty"` +} + +func (m *MaybeEncryptedRecord) Reset() { *m = MaybeEncryptedRecord{} } +func (*MaybeEncryptedRecord) ProtoMessage() {} +func (*MaybeEncryptedRecord) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{53} } + +type RootRotation struct { + CACert []byte `protobuf:"bytes,1,opt,name=ca_cert,json=caCert,proto3" json:"ca_cert,omitempty"` + CAKey []byte `protobuf:"bytes,2,opt,name=ca_key,json=caKey,proto3" json:"ca_key,omitempty"` + // cross-signed CA cert is the CACert that has been cross-signed by the previous root + CrossSignedCACert []byte `protobuf:"bytes,3,opt,name=cross_signed_ca_cert,json=crossSignedCaCert,proto3" json:"cross_signed_ca_cert,omitempty"` +} + +func (m *RootRotation) Reset() { *m = RootRotation{} } +func (*RootRotation) ProtoMessage() {} +func (*RootRotation) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{54} } + +// Privileges specifies security configuration/permissions. +type Privileges struct { + CredentialSpec *Privileges_CredentialSpec `protobuf:"bytes,1,opt,name=credential_spec,json=credentialSpec" json:"credential_spec,omitempty"` + SELinuxContext *Privileges_SELinuxContext `protobuf:"bytes,2,opt,name=selinux_context,json=selinuxContext" json:"selinux_context,omitempty"` +} + +func (m *Privileges) Reset() { *m = Privileges{} } +func (*Privileges) ProtoMessage() {} +func (*Privileges) Descriptor() ([]byte, []int) { return fileDescriptorTypes, []int{55} } + +// CredentialSpec for managed service account (Windows only). +type Privileges_CredentialSpec struct { + // Types that are valid to be assigned to Source: + // *Privileges_CredentialSpec_File + // *Privileges_CredentialSpec_Registry + // *Privileges_CredentialSpec_Config + Source isPrivileges_CredentialSpec_Source `protobuf_oneof:"source"` +} + +func (m *Privileges_CredentialSpec) Reset() { *m = Privileges_CredentialSpec{} } +func (*Privileges_CredentialSpec) ProtoMessage() {} +func (*Privileges_CredentialSpec) Descriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{55, 0} +} + +type isPrivileges_CredentialSpec_Source interface { + isPrivileges_CredentialSpec_Source() + MarshalTo([]byte) (int, error) + Size() int +} + +type Privileges_CredentialSpec_File struct { + File string `protobuf:"bytes,1,opt,name=file,proto3,oneof"` +} +type Privileges_CredentialSpec_Registry struct { + Registry string `protobuf:"bytes,2,opt,name=registry,proto3,oneof"` +} +type Privileges_CredentialSpec_Config struct { + Config string `protobuf:"bytes,3,opt,name=config,proto3,oneof"` +} + +func (*Privileges_CredentialSpec_File) isPrivileges_CredentialSpec_Source() {} +func (*Privileges_CredentialSpec_Registry) isPrivileges_CredentialSpec_Source() {} +func (*Privileges_CredentialSpec_Config) isPrivileges_CredentialSpec_Source() {} + +func (m *Privileges_CredentialSpec) GetSource() isPrivileges_CredentialSpec_Source { + if m != nil { + return m.Source + } + return nil +} + +func (m *Privileges_CredentialSpec) GetFile() string { + if x, ok := m.GetSource().(*Privileges_CredentialSpec_File); ok { + return x.File + } + return "" +} + +func (m *Privileges_CredentialSpec) GetRegistry() string { + if x, ok := m.GetSource().(*Privileges_CredentialSpec_Registry); ok { + return x.Registry + } + return "" +} + +func (m *Privileges_CredentialSpec) GetConfig() string { + if x, ok := m.GetSource().(*Privileges_CredentialSpec_Config); ok { + return x.Config + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Privileges_CredentialSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Privileges_CredentialSpec_OneofMarshaler, _Privileges_CredentialSpec_OneofUnmarshaler, _Privileges_CredentialSpec_OneofSizer, []interface{}{ + (*Privileges_CredentialSpec_File)(nil), + (*Privileges_CredentialSpec_Registry)(nil), + (*Privileges_CredentialSpec_Config)(nil), + } +} + +func _Privileges_CredentialSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Privileges_CredentialSpec) + // source + switch x := m.Source.(type) { + case *Privileges_CredentialSpec_File: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.File) + case *Privileges_CredentialSpec_Registry: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.Registry) + case *Privileges_CredentialSpec_Config: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.Config) + case nil: + default: + return fmt.Errorf("Privileges_CredentialSpec.Source has unexpected type %T", x) + } + return nil +} + +func _Privileges_CredentialSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Privileges_CredentialSpec) + switch tag { + case 1: // source.file + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &Privileges_CredentialSpec_File{x} + return true, err + case 2: // source.registry + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &Privileges_CredentialSpec_Registry{x} + return true, err + case 3: // source.config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Source = &Privileges_CredentialSpec_Config{x} + return true, err + default: + return false, nil + } +} + +func _Privileges_CredentialSpec_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Privileges_CredentialSpec) + // source + switch x := m.Source.(type) { + case *Privileges_CredentialSpec_File: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.File))) + n += len(x.File) + case *Privileges_CredentialSpec_Registry: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Registry))) + n += len(x.Registry) + case *Privileges_CredentialSpec_Config: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Config))) + n += len(x.Config) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// SELinuxContext contains the SELinux labels for the container. +type Privileges_SELinuxContext struct { + Disable bool `protobuf:"varint,1,opt,name=disable,proto3" json:"disable,omitempty"` + User string `protobuf:"bytes,2,opt,name=user,proto3" json:"user,omitempty"` + Role string `protobuf:"bytes,3,opt,name=role,proto3" json:"role,omitempty"` + Type string `protobuf:"bytes,4,opt,name=type,proto3" json:"type,omitempty"` + Level string `protobuf:"bytes,5,opt,name=level,proto3" json:"level,omitempty"` +} + +func (m *Privileges_SELinuxContext) Reset() { *m = Privileges_SELinuxContext{} } +func (*Privileges_SELinuxContext) ProtoMessage() {} +func (*Privileges_SELinuxContext) Descriptor() ([]byte, []int) { + return fileDescriptorTypes, []int{55, 1} +} + +func init() { + proto.RegisterType((*Version)(nil), "docker.swarmkit.v1.Version") + proto.RegisterType((*IndexEntry)(nil), "docker.swarmkit.v1.IndexEntry") + proto.RegisterType((*Annotations)(nil), "docker.swarmkit.v1.Annotations") + proto.RegisterType((*NamedGenericResource)(nil), "docker.swarmkit.v1.NamedGenericResource") + proto.RegisterType((*DiscreteGenericResource)(nil), "docker.swarmkit.v1.DiscreteGenericResource") + proto.RegisterType((*GenericResource)(nil), "docker.swarmkit.v1.GenericResource") + proto.RegisterType((*Resources)(nil), "docker.swarmkit.v1.Resources") + proto.RegisterType((*ResourceRequirements)(nil), "docker.swarmkit.v1.ResourceRequirements") + proto.RegisterType((*Platform)(nil), "docker.swarmkit.v1.Platform") + proto.RegisterType((*PluginDescription)(nil), "docker.swarmkit.v1.PluginDescription") + proto.RegisterType((*EngineDescription)(nil), "docker.swarmkit.v1.EngineDescription") + proto.RegisterType((*NodeDescription)(nil), "docker.swarmkit.v1.NodeDescription") + proto.RegisterType((*NodeTLSInfo)(nil), "docker.swarmkit.v1.NodeTLSInfo") + proto.RegisterType((*RaftMemberStatus)(nil), "docker.swarmkit.v1.RaftMemberStatus") + proto.RegisterType((*NodeStatus)(nil), "docker.swarmkit.v1.NodeStatus") + proto.RegisterType((*Image)(nil), "docker.swarmkit.v1.Image") + proto.RegisterType((*Mount)(nil), "docker.swarmkit.v1.Mount") + proto.RegisterType((*Mount_BindOptions)(nil), "docker.swarmkit.v1.Mount.BindOptions") + proto.RegisterType((*Mount_VolumeOptions)(nil), "docker.swarmkit.v1.Mount.VolumeOptions") + proto.RegisterType((*Mount_TmpfsOptions)(nil), "docker.swarmkit.v1.Mount.TmpfsOptions") + proto.RegisterType((*RestartPolicy)(nil), "docker.swarmkit.v1.RestartPolicy") + proto.RegisterType((*UpdateConfig)(nil), "docker.swarmkit.v1.UpdateConfig") + proto.RegisterType((*UpdateStatus)(nil), "docker.swarmkit.v1.UpdateStatus") + proto.RegisterType((*ContainerStatus)(nil), "docker.swarmkit.v1.ContainerStatus") + proto.RegisterType((*PortStatus)(nil), "docker.swarmkit.v1.PortStatus") + proto.RegisterType((*TaskStatus)(nil), "docker.swarmkit.v1.TaskStatus") + proto.RegisterType((*NetworkAttachmentConfig)(nil), "docker.swarmkit.v1.NetworkAttachmentConfig") + proto.RegisterType((*IPAMConfig)(nil), "docker.swarmkit.v1.IPAMConfig") + proto.RegisterType((*PortConfig)(nil), "docker.swarmkit.v1.PortConfig") + proto.RegisterType((*Driver)(nil), "docker.swarmkit.v1.Driver") + proto.RegisterType((*IPAMOptions)(nil), "docker.swarmkit.v1.IPAMOptions") + proto.RegisterType((*Peer)(nil), "docker.swarmkit.v1.Peer") + proto.RegisterType((*WeightedPeer)(nil), "docker.swarmkit.v1.WeightedPeer") + proto.RegisterType((*IssuanceStatus)(nil), "docker.swarmkit.v1.IssuanceStatus") + proto.RegisterType((*AcceptancePolicy)(nil), "docker.swarmkit.v1.AcceptancePolicy") + proto.RegisterType((*AcceptancePolicy_RoleAdmissionPolicy)(nil), "docker.swarmkit.v1.AcceptancePolicy.RoleAdmissionPolicy") + proto.RegisterType((*AcceptancePolicy_RoleAdmissionPolicy_Secret)(nil), "docker.swarmkit.v1.AcceptancePolicy.RoleAdmissionPolicy.Secret") + proto.RegisterType((*ExternalCA)(nil), "docker.swarmkit.v1.ExternalCA") + proto.RegisterType((*CAConfig)(nil), "docker.swarmkit.v1.CAConfig") + proto.RegisterType((*OrchestrationConfig)(nil), "docker.swarmkit.v1.OrchestrationConfig") + proto.RegisterType((*TaskDefaults)(nil), "docker.swarmkit.v1.TaskDefaults") + proto.RegisterType((*DispatcherConfig)(nil), "docker.swarmkit.v1.DispatcherConfig") + proto.RegisterType((*RaftConfig)(nil), "docker.swarmkit.v1.RaftConfig") + proto.RegisterType((*EncryptionConfig)(nil), "docker.swarmkit.v1.EncryptionConfig") + proto.RegisterType((*SpreadOver)(nil), "docker.swarmkit.v1.SpreadOver") + proto.RegisterType((*PlacementPreference)(nil), "docker.swarmkit.v1.PlacementPreference") + proto.RegisterType((*Placement)(nil), "docker.swarmkit.v1.Placement") + proto.RegisterType((*JoinTokens)(nil), "docker.swarmkit.v1.JoinTokens") + proto.RegisterType((*RootCA)(nil), "docker.swarmkit.v1.RootCA") + proto.RegisterType((*Certificate)(nil), "docker.swarmkit.v1.Certificate") + proto.RegisterType((*EncryptionKey)(nil), "docker.swarmkit.v1.EncryptionKey") + proto.RegisterType((*ManagerStatus)(nil), "docker.swarmkit.v1.ManagerStatus") + proto.RegisterType((*FileTarget)(nil), "docker.swarmkit.v1.FileTarget") + proto.RegisterType((*RuntimeTarget)(nil), "docker.swarmkit.v1.RuntimeTarget") + proto.RegisterType((*SecretReference)(nil), "docker.swarmkit.v1.SecretReference") + proto.RegisterType((*ConfigReference)(nil), "docker.swarmkit.v1.ConfigReference") + proto.RegisterType((*BlacklistedCertificate)(nil), "docker.swarmkit.v1.BlacklistedCertificate") + proto.RegisterType((*HealthConfig)(nil), "docker.swarmkit.v1.HealthConfig") + proto.RegisterType((*MaybeEncryptedRecord)(nil), "docker.swarmkit.v1.MaybeEncryptedRecord") + proto.RegisterType((*RootRotation)(nil), "docker.swarmkit.v1.RootRotation") + proto.RegisterType((*Privileges)(nil), "docker.swarmkit.v1.Privileges") + proto.RegisterType((*Privileges_CredentialSpec)(nil), "docker.swarmkit.v1.Privileges.CredentialSpec") + proto.RegisterType((*Privileges_SELinuxContext)(nil), "docker.swarmkit.v1.Privileges.SELinuxContext") + proto.RegisterEnum("docker.swarmkit.v1.ResourceType", ResourceType_name, ResourceType_value) + proto.RegisterEnum("docker.swarmkit.v1.TaskState", TaskState_name, TaskState_value) + proto.RegisterEnum("docker.swarmkit.v1.NodeRole", NodeRole_name, NodeRole_value) + proto.RegisterEnum("docker.swarmkit.v1.RaftMemberStatus_Reachability", RaftMemberStatus_Reachability_name, RaftMemberStatus_Reachability_value) + proto.RegisterEnum("docker.swarmkit.v1.NodeStatus_State", NodeStatus_State_name, NodeStatus_State_value) + proto.RegisterEnum("docker.swarmkit.v1.Mount_MountType", Mount_MountType_name, Mount_MountType_value) + proto.RegisterEnum("docker.swarmkit.v1.Mount_MountConsistency", Mount_MountConsistency_name, Mount_MountConsistency_value) + proto.RegisterEnum("docker.swarmkit.v1.Mount_BindOptions_MountPropagation", Mount_BindOptions_MountPropagation_name, Mount_BindOptions_MountPropagation_value) + proto.RegisterEnum("docker.swarmkit.v1.RestartPolicy_RestartCondition", RestartPolicy_RestartCondition_name, RestartPolicy_RestartCondition_value) + proto.RegisterEnum("docker.swarmkit.v1.UpdateConfig_FailureAction", UpdateConfig_FailureAction_name, UpdateConfig_FailureAction_value) + proto.RegisterEnum("docker.swarmkit.v1.UpdateConfig_UpdateOrder", UpdateConfig_UpdateOrder_name, UpdateConfig_UpdateOrder_value) + proto.RegisterEnum("docker.swarmkit.v1.UpdateStatus_UpdateState", UpdateStatus_UpdateState_name, UpdateStatus_UpdateState_value) + proto.RegisterEnum("docker.swarmkit.v1.IPAMConfig_AddressFamily", IPAMConfig_AddressFamily_name, IPAMConfig_AddressFamily_value) + proto.RegisterEnum("docker.swarmkit.v1.PortConfig_Protocol", PortConfig_Protocol_name, PortConfig_Protocol_value) + proto.RegisterEnum("docker.swarmkit.v1.PortConfig_PublishMode", PortConfig_PublishMode_name, PortConfig_PublishMode_value) + proto.RegisterEnum("docker.swarmkit.v1.IssuanceStatus_State", IssuanceStatus_State_name, IssuanceStatus_State_value) + proto.RegisterEnum("docker.swarmkit.v1.ExternalCA_CAProtocol", ExternalCA_CAProtocol_name, ExternalCA_CAProtocol_value) + proto.RegisterEnum("docker.swarmkit.v1.EncryptionKey_Algorithm", EncryptionKey_Algorithm_name, EncryptionKey_Algorithm_value) + proto.RegisterEnum("docker.swarmkit.v1.MaybeEncryptedRecord_Algorithm", MaybeEncryptedRecord_Algorithm_name, MaybeEncryptedRecord_Algorithm_value) +} + +func (m *Version) Copy() *Version { + if m == nil { + return nil + } + o := &Version{} + o.CopyFrom(m) + return o +} + +func (m *Version) CopyFrom(src interface{}) { + + o := src.(*Version) + *m = *o +} + +func (m *IndexEntry) Copy() *IndexEntry { + if m == nil { + return nil + } + o := &IndexEntry{} + o.CopyFrom(m) + return o +} + +func (m *IndexEntry) CopyFrom(src interface{}) { + + o := src.(*IndexEntry) + *m = *o +} + +func (m *Annotations) Copy() *Annotations { + if m == nil { + return nil + } + o := &Annotations{} + o.CopyFrom(m) + return o +} + +func (m *Annotations) CopyFrom(src interface{}) { + + o := src.(*Annotations) + *m = *o + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.Indices != nil { + m.Indices = make([]IndexEntry, len(o.Indices)) + for i := range m.Indices { + deepcopy.Copy(&m.Indices[i], &o.Indices[i]) + } + } + +} + +func (m *NamedGenericResource) Copy() *NamedGenericResource { + if m == nil { + return nil + } + o := &NamedGenericResource{} + o.CopyFrom(m) + return o +} + +func (m *NamedGenericResource) CopyFrom(src interface{}) { + + o := src.(*NamedGenericResource) + *m = *o +} + +func (m *DiscreteGenericResource) Copy() *DiscreteGenericResource { + if m == nil { + return nil + } + o := &DiscreteGenericResource{} + o.CopyFrom(m) + return o +} + +func (m *DiscreteGenericResource) CopyFrom(src interface{}) { + + o := src.(*DiscreteGenericResource) + *m = *o +} + +func (m *GenericResource) Copy() *GenericResource { + if m == nil { + return nil + } + o := &GenericResource{} + o.CopyFrom(m) + return o +} + +func (m *GenericResource) CopyFrom(src interface{}) { + + o := src.(*GenericResource) + *m = *o + if o.Resource != nil { + switch o.Resource.(type) { + case *GenericResource_NamedResourceSpec: + v := GenericResource_NamedResourceSpec{ + NamedResourceSpec: &NamedGenericResource{}, + } + deepcopy.Copy(v.NamedResourceSpec, o.GetNamedResourceSpec()) + m.Resource = &v + case *GenericResource_DiscreteResourceSpec: + v := GenericResource_DiscreteResourceSpec{ + DiscreteResourceSpec: &DiscreteGenericResource{}, + } + deepcopy.Copy(v.DiscreteResourceSpec, o.GetDiscreteResourceSpec()) + m.Resource = &v + } + } + +} + +func (m *Resources) Copy() *Resources { + if m == nil { + return nil + } + o := &Resources{} + o.CopyFrom(m) + return o +} + +func (m *Resources) CopyFrom(src interface{}) { + + o := src.(*Resources) + *m = *o + if o.Generic != nil { + m.Generic = make([]*GenericResource, len(o.Generic)) + for i := range m.Generic { + m.Generic[i] = &GenericResource{} + deepcopy.Copy(m.Generic[i], o.Generic[i]) + } + } + +} + +func (m *ResourceRequirements) Copy() *ResourceRequirements { + if m == nil { + return nil + } + o := &ResourceRequirements{} + o.CopyFrom(m) + return o +} + +func (m *ResourceRequirements) CopyFrom(src interface{}) { + + o := src.(*ResourceRequirements) + *m = *o + if o.Limits != nil { + m.Limits = &Resources{} + deepcopy.Copy(m.Limits, o.Limits) + } + if o.Reservations != nil { + m.Reservations = &Resources{} + deepcopy.Copy(m.Reservations, o.Reservations) + } + if o.SwapBytes != nil { + m.SwapBytes = &google_protobuf2.Int64Value{} + deepcopy.Copy(m.SwapBytes, o.SwapBytes) + } + if o.MemorySwappiness != nil { + m.MemorySwappiness = &google_protobuf2.Int64Value{} + deepcopy.Copy(m.MemorySwappiness, o.MemorySwappiness) + } +} + +func (m *Platform) Copy() *Platform { + if m == nil { + return nil + } + o := &Platform{} + o.CopyFrom(m) + return o +} + +func (m *Platform) CopyFrom(src interface{}) { + + o := src.(*Platform) + *m = *o +} + +func (m *PluginDescription) Copy() *PluginDescription { + if m == nil { + return nil + } + o := &PluginDescription{} + o.CopyFrom(m) + return o +} + +func (m *PluginDescription) CopyFrom(src interface{}) { + + o := src.(*PluginDescription) + *m = *o +} + +func (m *EngineDescription) Copy() *EngineDescription { + if m == nil { + return nil + } + o := &EngineDescription{} + o.CopyFrom(m) + return o +} + +func (m *EngineDescription) CopyFrom(src interface{}) { + + o := src.(*EngineDescription) + *m = *o + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.Plugins != nil { + m.Plugins = make([]PluginDescription, len(o.Plugins)) + for i := range m.Plugins { + deepcopy.Copy(&m.Plugins[i], &o.Plugins[i]) + } + } + +} + +func (m *NodeDescription) Copy() *NodeDescription { + if m == nil { + return nil + } + o := &NodeDescription{} + o.CopyFrom(m) + return o +} + +func (m *NodeDescription) CopyFrom(src interface{}) { + + o := src.(*NodeDescription) + *m = *o + if o.Platform != nil { + m.Platform = &Platform{} + deepcopy.Copy(m.Platform, o.Platform) + } + if o.Resources != nil { + m.Resources = &Resources{} + deepcopy.Copy(m.Resources, o.Resources) + } + if o.Engine != nil { + m.Engine = &EngineDescription{} + deepcopy.Copy(m.Engine, o.Engine) + } + if o.TLSInfo != nil { + m.TLSInfo = &NodeTLSInfo{} + deepcopy.Copy(m.TLSInfo, o.TLSInfo) + } +} + +func (m *NodeTLSInfo) Copy() *NodeTLSInfo { + if m == nil { + return nil + } + o := &NodeTLSInfo{} + o.CopyFrom(m) + return o +} + +func (m *NodeTLSInfo) CopyFrom(src interface{}) { + + o := src.(*NodeTLSInfo) + *m = *o + if o.TrustRoot != nil { + m.TrustRoot = make([]byte, len(o.TrustRoot)) + copy(m.TrustRoot, o.TrustRoot) + } + if o.CertIssuerSubject != nil { + m.CertIssuerSubject = make([]byte, len(o.CertIssuerSubject)) + copy(m.CertIssuerSubject, o.CertIssuerSubject) + } + if o.CertIssuerPublicKey != nil { + m.CertIssuerPublicKey = make([]byte, len(o.CertIssuerPublicKey)) + copy(m.CertIssuerPublicKey, o.CertIssuerPublicKey) + } +} + +func (m *RaftMemberStatus) Copy() *RaftMemberStatus { + if m == nil { + return nil + } + o := &RaftMemberStatus{} + o.CopyFrom(m) + return o +} + +func (m *RaftMemberStatus) CopyFrom(src interface{}) { + + o := src.(*RaftMemberStatus) + *m = *o +} + +func (m *NodeStatus) Copy() *NodeStatus { + if m == nil { + return nil + } + o := &NodeStatus{} + o.CopyFrom(m) + return o +} + +func (m *NodeStatus) CopyFrom(src interface{}) { + + o := src.(*NodeStatus) + *m = *o +} + +func (m *Image) Copy() *Image { + if m == nil { + return nil + } + o := &Image{} + o.CopyFrom(m) + return o +} + +func (m *Image) CopyFrom(src interface{}) { + + o := src.(*Image) + *m = *o +} + +func (m *Mount) Copy() *Mount { + if m == nil { + return nil + } + o := &Mount{} + o.CopyFrom(m) + return o +} + +func (m *Mount) CopyFrom(src interface{}) { + + o := src.(*Mount) + *m = *o + if o.BindOptions != nil { + m.BindOptions = &Mount_BindOptions{} + deepcopy.Copy(m.BindOptions, o.BindOptions) + } + if o.VolumeOptions != nil { + m.VolumeOptions = &Mount_VolumeOptions{} + deepcopy.Copy(m.VolumeOptions, o.VolumeOptions) + } + if o.TmpfsOptions != nil { + m.TmpfsOptions = &Mount_TmpfsOptions{} + deepcopy.Copy(m.TmpfsOptions, o.TmpfsOptions) + } +} + +func (m *Mount_BindOptions) Copy() *Mount_BindOptions { + if m == nil { + return nil + } + o := &Mount_BindOptions{} + o.CopyFrom(m) + return o +} + +func (m *Mount_BindOptions) CopyFrom(src interface{}) { + + o := src.(*Mount_BindOptions) + *m = *o +} + +func (m *Mount_VolumeOptions) Copy() *Mount_VolumeOptions { + if m == nil { + return nil + } + o := &Mount_VolumeOptions{} + o.CopyFrom(m) + return o +} + +func (m *Mount_VolumeOptions) CopyFrom(src interface{}) { + + o := src.(*Mount_VolumeOptions) + *m = *o + if o.Labels != nil { + m.Labels = make(map[string]string, len(o.Labels)) + for k, v := range o.Labels { + m.Labels[k] = v + } + } + + if o.DriverConfig != nil { + m.DriverConfig = &Driver{} + deepcopy.Copy(m.DriverConfig, o.DriverConfig) + } +} + +func (m *Mount_TmpfsOptions) Copy() *Mount_TmpfsOptions { + if m == nil { + return nil + } + o := &Mount_TmpfsOptions{} + o.CopyFrom(m) + return o +} + +func (m *Mount_TmpfsOptions) CopyFrom(src interface{}) { + + o := src.(*Mount_TmpfsOptions) + *m = *o +} + +func (m *RestartPolicy) Copy() *RestartPolicy { + if m == nil { + return nil + } + o := &RestartPolicy{} + o.CopyFrom(m) + return o +} + +func (m *RestartPolicy) CopyFrom(src interface{}) { + + o := src.(*RestartPolicy) + *m = *o + if o.Delay != nil { + m.Delay = &google_protobuf1.Duration{} + deepcopy.Copy(m.Delay, o.Delay) + } + if o.Window != nil { + m.Window = &google_protobuf1.Duration{} + deepcopy.Copy(m.Window, o.Window) + } +} + +func (m *UpdateConfig) Copy() *UpdateConfig { + if m == nil { + return nil + } + o := &UpdateConfig{} + o.CopyFrom(m) + return o +} + +func (m *UpdateConfig) CopyFrom(src interface{}) { + + o := src.(*UpdateConfig) + *m = *o + deepcopy.Copy(&m.Delay, &o.Delay) + if o.Monitor != nil { + m.Monitor = &google_protobuf1.Duration{} + deepcopy.Copy(m.Monitor, o.Monitor) + } +} + +func (m *UpdateStatus) Copy() *UpdateStatus { + if m == nil { + return nil + } + o := &UpdateStatus{} + o.CopyFrom(m) + return o +} + +func (m *UpdateStatus) CopyFrom(src interface{}) { + + o := src.(*UpdateStatus) + *m = *o + if o.StartedAt != nil { + m.StartedAt = &google_protobuf.Timestamp{} + deepcopy.Copy(m.StartedAt, o.StartedAt) + } + if o.CompletedAt != nil { + m.CompletedAt = &google_protobuf.Timestamp{} + deepcopy.Copy(m.CompletedAt, o.CompletedAt) + } +} + +func (m *ContainerStatus) Copy() *ContainerStatus { + if m == nil { + return nil + } + o := &ContainerStatus{} + o.CopyFrom(m) + return o +} + +func (m *ContainerStatus) CopyFrom(src interface{}) { + + o := src.(*ContainerStatus) + *m = *o +} + +func (m *PortStatus) Copy() *PortStatus { + if m == nil { + return nil + } + o := &PortStatus{} + o.CopyFrom(m) + return o +} + +func (m *PortStatus) CopyFrom(src interface{}) { + + o := src.(*PortStatus) + *m = *o + if o.Ports != nil { + m.Ports = make([]*PortConfig, len(o.Ports)) + for i := range m.Ports { + m.Ports[i] = &PortConfig{} + deepcopy.Copy(m.Ports[i], o.Ports[i]) + } + } + +} + +func (m *TaskStatus) Copy() *TaskStatus { + if m == nil { + return nil + } + o := &TaskStatus{} + o.CopyFrom(m) + return o +} + +func (m *TaskStatus) CopyFrom(src interface{}) { + + o := src.(*TaskStatus) + *m = *o + if o.Timestamp != nil { + m.Timestamp = &google_protobuf.Timestamp{} + deepcopy.Copy(m.Timestamp, o.Timestamp) + } + if o.PortStatus != nil { + m.PortStatus = &PortStatus{} + deepcopy.Copy(m.PortStatus, o.PortStatus) + } + if o.AppliedAt != nil { + m.AppliedAt = &google_protobuf.Timestamp{} + deepcopy.Copy(m.AppliedAt, o.AppliedAt) + } + if o.RuntimeStatus != nil { + switch o.RuntimeStatus.(type) { + case *TaskStatus_Container: + v := TaskStatus_Container{ + Container: &ContainerStatus{}, + } + deepcopy.Copy(v.Container, o.GetContainer()) + m.RuntimeStatus = &v + } + } + +} + +func (m *NetworkAttachmentConfig) Copy() *NetworkAttachmentConfig { + if m == nil { + return nil + } + o := &NetworkAttachmentConfig{} + o.CopyFrom(m) + return o +} + +func (m *NetworkAttachmentConfig) CopyFrom(src interface{}) { + + o := src.(*NetworkAttachmentConfig) + *m = *o + if o.Aliases != nil { + m.Aliases = make([]string, len(o.Aliases)) + copy(m.Aliases, o.Aliases) + } + + if o.Addresses != nil { + m.Addresses = make([]string, len(o.Addresses)) + copy(m.Addresses, o.Addresses) + } + + if o.DriverAttachmentOpts != nil { + m.DriverAttachmentOpts = make(map[string]string, len(o.DriverAttachmentOpts)) + for k, v := range o.DriverAttachmentOpts { + m.DriverAttachmentOpts[k] = v + } + } + +} + +func (m *IPAMConfig) Copy() *IPAMConfig { + if m == nil { + return nil + } + o := &IPAMConfig{} + o.CopyFrom(m) + return o +} + +func (m *IPAMConfig) CopyFrom(src interface{}) { + + o := src.(*IPAMConfig) + *m = *o + if o.Reserved != nil { + m.Reserved = make(map[string]string, len(o.Reserved)) + for k, v := range o.Reserved { + m.Reserved[k] = v + } + } + +} + +func (m *PortConfig) Copy() *PortConfig { + if m == nil { + return nil + } + o := &PortConfig{} + o.CopyFrom(m) + return o +} + +func (m *PortConfig) CopyFrom(src interface{}) { + + o := src.(*PortConfig) + *m = *o +} + +func (m *Driver) Copy() *Driver { + if m == nil { + return nil + } + o := &Driver{} + o.CopyFrom(m) + return o +} + +func (m *Driver) CopyFrom(src interface{}) { + + o := src.(*Driver) + *m = *o + if o.Options != nil { + m.Options = make(map[string]string, len(o.Options)) + for k, v := range o.Options { + m.Options[k] = v + } + } + +} + +func (m *IPAMOptions) Copy() *IPAMOptions { + if m == nil { + return nil + } + o := &IPAMOptions{} + o.CopyFrom(m) + return o +} + +func (m *IPAMOptions) CopyFrom(src interface{}) { + + o := src.(*IPAMOptions) + *m = *o + if o.Driver != nil { + m.Driver = &Driver{} + deepcopy.Copy(m.Driver, o.Driver) + } + if o.Configs != nil { + m.Configs = make([]*IPAMConfig, len(o.Configs)) + for i := range m.Configs { + m.Configs[i] = &IPAMConfig{} + deepcopy.Copy(m.Configs[i], o.Configs[i]) + } + } + +} + +func (m *Peer) Copy() *Peer { + if m == nil { + return nil + } + o := &Peer{} + o.CopyFrom(m) + return o +} + +func (m *Peer) CopyFrom(src interface{}) { + + o := src.(*Peer) + *m = *o +} + +func (m *WeightedPeer) Copy() *WeightedPeer { + if m == nil { + return nil + } + o := &WeightedPeer{} + o.CopyFrom(m) + return o +} + +func (m *WeightedPeer) CopyFrom(src interface{}) { + + o := src.(*WeightedPeer) + *m = *o + if o.Peer != nil { + m.Peer = &Peer{} + deepcopy.Copy(m.Peer, o.Peer) + } +} + +func (m *IssuanceStatus) Copy() *IssuanceStatus { + if m == nil { + return nil + } + o := &IssuanceStatus{} + o.CopyFrom(m) + return o +} + +func (m *IssuanceStatus) CopyFrom(src interface{}) { + + o := src.(*IssuanceStatus) + *m = *o +} + +func (m *AcceptancePolicy) Copy() *AcceptancePolicy { + if m == nil { + return nil + } + o := &AcceptancePolicy{} + o.CopyFrom(m) + return o +} + +func (m *AcceptancePolicy) CopyFrom(src interface{}) { + + o := src.(*AcceptancePolicy) + *m = *o + if o.Policies != nil { + m.Policies = make([]*AcceptancePolicy_RoleAdmissionPolicy, len(o.Policies)) + for i := range m.Policies { + m.Policies[i] = &AcceptancePolicy_RoleAdmissionPolicy{} + deepcopy.Copy(m.Policies[i], o.Policies[i]) + } + } + +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy) Copy() *AcceptancePolicy_RoleAdmissionPolicy { + if m == nil { + return nil + } + o := &AcceptancePolicy_RoleAdmissionPolicy{} + o.CopyFrom(m) + return o +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy) CopyFrom(src interface{}) { + + o := src.(*AcceptancePolicy_RoleAdmissionPolicy) + *m = *o + if o.Secret != nil { + m.Secret = &AcceptancePolicy_RoleAdmissionPolicy_Secret{} + deepcopy.Copy(m.Secret, o.Secret) + } +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Copy() *AcceptancePolicy_RoleAdmissionPolicy_Secret { + if m == nil { + return nil + } + o := &AcceptancePolicy_RoleAdmissionPolicy_Secret{} + o.CopyFrom(m) + return o +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) CopyFrom(src interface{}) { + + o := src.(*AcceptancePolicy_RoleAdmissionPolicy_Secret) + *m = *o + if o.Data != nil { + m.Data = make([]byte, len(o.Data)) + copy(m.Data, o.Data) + } +} + +func (m *ExternalCA) Copy() *ExternalCA { + if m == nil { + return nil + } + o := &ExternalCA{} + o.CopyFrom(m) + return o +} + +func (m *ExternalCA) CopyFrom(src interface{}) { + + o := src.(*ExternalCA) + *m = *o + if o.Options != nil { + m.Options = make(map[string]string, len(o.Options)) + for k, v := range o.Options { + m.Options[k] = v + } + } + + if o.CACert != nil { + m.CACert = make([]byte, len(o.CACert)) + copy(m.CACert, o.CACert) + } +} + +func (m *CAConfig) Copy() *CAConfig { + if m == nil { + return nil + } + o := &CAConfig{} + o.CopyFrom(m) + return o +} + +func (m *CAConfig) CopyFrom(src interface{}) { + + o := src.(*CAConfig) + *m = *o + if o.NodeCertExpiry != nil { + m.NodeCertExpiry = &google_protobuf1.Duration{} + deepcopy.Copy(m.NodeCertExpiry, o.NodeCertExpiry) + } + if o.ExternalCAs != nil { + m.ExternalCAs = make([]*ExternalCA, len(o.ExternalCAs)) + for i := range m.ExternalCAs { + m.ExternalCAs[i] = &ExternalCA{} + deepcopy.Copy(m.ExternalCAs[i], o.ExternalCAs[i]) + } + } + + if o.SigningCACert != nil { + m.SigningCACert = make([]byte, len(o.SigningCACert)) + copy(m.SigningCACert, o.SigningCACert) + } + if o.SigningCAKey != nil { + m.SigningCAKey = make([]byte, len(o.SigningCAKey)) + copy(m.SigningCAKey, o.SigningCAKey) + } +} + +func (m *OrchestrationConfig) Copy() *OrchestrationConfig { + if m == nil { + return nil + } + o := &OrchestrationConfig{} + o.CopyFrom(m) + return o +} + +func (m *OrchestrationConfig) CopyFrom(src interface{}) { + + o := src.(*OrchestrationConfig) + *m = *o +} + +func (m *TaskDefaults) Copy() *TaskDefaults { + if m == nil { + return nil + } + o := &TaskDefaults{} + o.CopyFrom(m) + return o +} + +func (m *TaskDefaults) CopyFrom(src interface{}) { + + o := src.(*TaskDefaults) + *m = *o + if o.LogDriver != nil { + m.LogDriver = &Driver{} + deepcopy.Copy(m.LogDriver, o.LogDriver) + } +} + +func (m *DispatcherConfig) Copy() *DispatcherConfig { + if m == nil { + return nil + } + o := &DispatcherConfig{} + o.CopyFrom(m) + return o +} + +func (m *DispatcherConfig) CopyFrom(src interface{}) { + + o := src.(*DispatcherConfig) + *m = *o + if o.HeartbeatPeriod != nil { + m.HeartbeatPeriod = &google_protobuf1.Duration{} + deepcopy.Copy(m.HeartbeatPeriod, o.HeartbeatPeriod) + } +} + +func (m *RaftConfig) Copy() *RaftConfig { + if m == nil { + return nil + } + o := &RaftConfig{} + o.CopyFrom(m) + return o +} + +func (m *RaftConfig) CopyFrom(src interface{}) { + + o := src.(*RaftConfig) + *m = *o +} + +func (m *EncryptionConfig) Copy() *EncryptionConfig { + if m == nil { + return nil + } + o := &EncryptionConfig{} + o.CopyFrom(m) + return o +} + +func (m *EncryptionConfig) CopyFrom(src interface{}) { + + o := src.(*EncryptionConfig) + *m = *o +} + +func (m *SpreadOver) Copy() *SpreadOver { + if m == nil { + return nil + } + o := &SpreadOver{} + o.CopyFrom(m) + return o +} + +func (m *SpreadOver) CopyFrom(src interface{}) { + + o := src.(*SpreadOver) + *m = *o +} + +func (m *PlacementPreference) Copy() *PlacementPreference { + if m == nil { + return nil + } + o := &PlacementPreference{} + o.CopyFrom(m) + return o +} + +func (m *PlacementPreference) CopyFrom(src interface{}) { + + o := src.(*PlacementPreference) + *m = *o + if o.Preference != nil { + switch o.Preference.(type) { + case *PlacementPreference_Spread: + v := PlacementPreference_Spread{ + Spread: &SpreadOver{}, + } + deepcopy.Copy(v.Spread, o.GetSpread()) + m.Preference = &v + } + } + +} + +func (m *Placement) Copy() *Placement { + if m == nil { + return nil + } + o := &Placement{} + o.CopyFrom(m) + return o +} + +func (m *Placement) CopyFrom(src interface{}) { + + o := src.(*Placement) + *m = *o + if o.Constraints != nil { + m.Constraints = make([]string, len(o.Constraints)) + copy(m.Constraints, o.Constraints) + } + + if o.Preferences != nil { + m.Preferences = make([]*PlacementPreference, len(o.Preferences)) + for i := range m.Preferences { + m.Preferences[i] = &PlacementPreference{} + deepcopy.Copy(m.Preferences[i], o.Preferences[i]) + } + } + + if o.Platforms != nil { + m.Platforms = make([]*Platform, len(o.Platforms)) + for i := range m.Platforms { + m.Platforms[i] = &Platform{} + deepcopy.Copy(m.Platforms[i], o.Platforms[i]) + } + } + +} + +func (m *JoinTokens) Copy() *JoinTokens { + if m == nil { + return nil + } + o := &JoinTokens{} + o.CopyFrom(m) + return o +} + +func (m *JoinTokens) CopyFrom(src interface{}) { + + o := src.(*JoinTokens) + *m = *o +} + +func (m *RootCA) Copy() *RootCA { + if m == nil { + return nil + } + o := &RootCA{} + o.CopyFrom(m) + return o +} + +func (m *RootCA) CopyFrom(src interface{}) { + + o := src.(*RootCA) + *m = *o + if o.CAKey != nil { + m.CAKey = make([]byte, len(o.CAKey)) + copy(m.CAKey, o.CAKey) + } + if o.CACert != nil { + m.CACert = make([]byte, len(o.CACert)) + copy(m.CACert, o.CACert) + } + deepcopy.Copy(&m.JoinTokens, &o.JoinTokens) + if o.RootRotation != nil { + m.RootRotation = &RootRotation{} + deepcopy.Copy(m.RootRotation, o.RootRotation) + } +} + +func (m *Certificate) Copy() *Certificate { + if m == nil { + return nil + } + o := &Certificate{} + o.CopyFrom(m) + return o +} + +func (m *Certificate) CopyFrom(src interface{}) { + + o := src.(*Certificate) + *m = *o + if o.CSR != nil { + m.CSR = make([]byte, len(o.CSR)) + copy(m.CSR, o.CSR) + } + deepcopy.Copy(&m.Status, &o.Status) + if o.Certificate != nil { + m.Certificate = make([]byte, len(o.Certificate)) + copy(m.Certificate, o.Certificate) + } +} + +func (m *EncryptionKey) Copy() *EncryptionKey { + if m == nil { + return nil + } + o := &EncryptionKey{} + o.CopyFrom(m) + return o +} + +func (m *EncryptionKey) CopyFrom(src interface{}) { + + o := src.(*EncryptionKey) + *m = *o + if o.Key != nil { + m.Key = make([]byte, len(o.Key)) + copy(m.Key, o.Key) + } +} + +func (m *ManagerStatus) Copy() *ManagerStatus { + if m == nil { + return nil + } + o := &ManagerStatus{} + o.CopyFrom(m) + return o +} + +func (m *ManagerStatus) CopyFrom(src interface{}) { + + o := src.(*ManagerStatus) + *m = *o +} + +func (m *FileTarget) Copy() *FileTarget { + if m == nil { + return nil + } + o := &FileTarget{} + o.CopyFrom(m) + return o +} + +func (m *FileTarget) CopyFrom(src interface{}) { + + o := src.(*FileTarget) + *m = *o +} + +func (m *RuntimeTarget) Copy() *RuntimeTarget { + if m == nil { + return nil + } + o := &RuntimeTarget{} + o.CopyFrom(m) + return o +} + +func (m *RuntimeTarget) CopyFrom(src interface{}) {} +func (m *SecretReference) Copy() *SecretReference { + if m == nil { + return nil + } + o := &SecretReference{} + o.CopyFrom(m) + return o +} + +func (m *SecretReference) CopyFrom(src interface{}) { + + o := src.(*SecretReference) + *m = *o + if o.Target != nil { + switch o.Target.(type) { + case *SecretReference_File: + v := SecretReference_File{ + File: &FileTarget{}, + } + deepcopy.Copy(v.File, o.GetFile()) + m.Target = &v + } + } + +} + +func (m *ConfigReference) Copy() *ConfigReference { + if m == nil { + return nil + } + o := &ConfigReference{} + o.CopyFrom(m) + return o +} + +func (m *ConfigReference) CopyFrom(src interface{}) { + + o := src.(*ConfigReference) + *m = *o + if o.Target != nil { + switch o.Target.(type) { + case *ConfigReference_File: + v := ConfigReference_File{ + File: &FileTarget{}, + } + deepcopy.Copy(v.File, o.GetFile()) + m.Target = &v + case *ConfigReference_Runtime: + v := ConfigReference_Runtime{ + Runtime: &RuntimeTarget{}, + } + deepcopy.Copy(v.Runtime, o.GetRuntime()) + m.Target = &v + } + } + +} + +func (m *BlacklistedCertificate) Copy() *BlacklistedCertificate { + if m == nil { + return nil + } + o := &BlacklistedCertificate{} + o.CopyFrom(m) + return o +} + +func (m *BlacklistedCertificate) CopyFrom(src interface{}) { + + o := src.(*BlacklistedCertificate) + *m = *o + if o.Expiry != nil { + m.Expiry = &google_protobuf.Timestamp{} + deepcopy.Copy(m.Expiry, o.Expiry) + } +} + +func (m *HealthConfig) Copy() *HealthConfig { + if m == nil { + return nil + } + o := &HealthConfig{} + o.CopyFrom(m) + return o +} + +func (m *HealthConfig) CopyFrom(src interface{}) { + + o := src.(*HealthConfig) + *m = *o + if o.Test != nil { + m.Test = make([]string, len(o.Test)) + copy(m.Test, o.Test) + } + + if o.Interval != nil { + m.Interval = &google_protobuf1.Duration{} + deepcopy.Copy(m.Interval, o.Interval) + } + if o.Timeout != nil { + m.Timeout = &google_protobuf1.Duration{} + deepcopy.Copy(m.Timeout, o.Timeout) + } + if o.StartPeriod != nil { + m.StartPeriod = &google_protobuf1.Duration{} + deepcopy.Copy(m.StartPeriod, o.StartPeriod) + } +} + +func (m *MaybeEncryptedRecord) Copy() *MaybeEncryptedRecord { + if m == nil { + return nil + } + o := &MaybeEncryptedRecord{} + o.CopyFrom(m) + return o +} + +func (m *MaybeEncryptedRecord) CopyFrom(src interface{}) { + + o := src.(*MaybeEncryptedRecord) + *m = *o + if o.Data != nil { + m.Data = make([]byte, len(o.Data)) + copy(m.Data, o.Data) + } + if o.Nonce != nil { + m.Nonce = make([]byte, len(o.Nonce)) + copy(m.Nonce, o.Nonce) + } +} + +func (m *RootRotation) Copy() *RootRotation { + if m == nil { + return nil + } + o := &RootRotation{} + o.CopyFrom(m) + return o +} + +func (m *RootRotation) CopyFrom(src interface{}) { + + o := src.(*RootRotation) + *m = *o + if o.CACert != nil { + m.CACert = make([]byte, len(o.CACert)) + copy(m.CACert, o.CACert) + } + if o.CAKey != nil { + m.CAKey = make([]byte, len(o.CAKey)) + copy(m.CAKey, o.CAKey) + } + if o.CrossSignedCACert != nil { + m.CrossSignedCACert = make([]byte, len(o.CrossSignedCACert)) + copy(m.CrossSignedCACert, o.CrossSignedCACert) + } +} + +func (m *Privileges) Copy() *Privileges { + if m == nil { + return nil + } + o := &Privileges{} + o.CopyFrom(m) + return o +} + +func (m *Privileges) CopyFrom(src interface{}) { + + o := src.(*Privileges) + *m = *o + if o.CredentialSpec != nil { + m.CredentialSpec = &Privileges_CredentialSpec{} + deepcopy.Copy(m.CredentialSpec, o.CredentialSpec) + } + if o.SELinuxContext != nil { + m.SELinuxContext = &Privileges_SELinuxContext{} + deepcopy.Copy(m.SELinuxContext, o.SELinuxContext) + } +} + +func (m *Privileges_CredentialSpec) Copy() *Privileges_CredentialSpec { + if m == nil { + return nil + } + o := &Privileges_CredentialSpec{} + o.CopyFrom(m) + return o +} + +func (m *Privileges_CredentialSpec) CopyFrom(src interface{}) { + + o := src.(*Privileges_CredentialSpec) + *m = *o + if o.Source != nil { + switch o.Source.(type) { + case *Privileges_CredentialSpec_File: + v := Privileges_CredentialSpec_File{ + File: o.GetFile(), + } + m.Source = &v + case *Privileges_CredentialSpec_Registry: + v := Privileges_CredentialSpec_Registry{ + Registry: o.GetRegistry(), + } + m.Source = &v + case *Privileges_CredentialSpec_Config: + v := Privileges_CredentialSpec_Config{ + Config: o.GetConfig(), + } + m.Source = &v + } + } + +} + +func (m *Privileges_SELinuxContext) Copy() *Privileges_SELinuxContext { + if m == nil { + return nil + } + o := &Privileges_SELinuxContext{} + o.CopyFrom(m) + return o +} + +func (m *Privileges_SELinuxContext) CopyFrom(src interface{}) { + + o := src.(*Privileges_SELinuxContext) + *m = *o +} + +func (m *Version) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Version) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Index != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + } + return i, nil +} + +func (m *IndexEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IndexEntry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Val) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Val))) + i += copy(dAtA[i:], m.Val) + } + return i, nil +} + +func (m *Annotations) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Annotations) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x12 + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Indices) > 0 { + for _, msg := range m.Indices { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NamedGenericResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedGenericResource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Kind) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *DiscreteGenericResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DiscreteGenericResource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Kind) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if m.Value != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Value)) + } + return i, nil +} + +func (m *GenericResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenericResource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Resource != nil { + nn1, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn1 + } + return i, nil +} + +func (m *GenericResource_NamedResourceSpec) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.NamedResourceSpec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.NamedResourceSpec.Size())) + n2, err := m.NamedResourceSpec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} +func (m *GenericResource_DiscreteResourceSpec) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.DiscreteResourceSpec != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.DiscreteResourceSpec.Size())) + n3, err := m.DiscreteResourceSpec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} +func (m *Resources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resources) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NanoCPUs != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.NanoCPUs)) + } + if m.MemoryBytes != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.MemoryBytes)) + } + if len(m.Generic) > 0 { + for _, msg := range m.Generic { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ResourceRequirements) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Limits != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Limits.Size())) + n4, err := m.Limits.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + if m.Reservations != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Reservations.Size())) + n5, err := m.Reservations.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + if m.SwapBytes != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.SwapBytes.Size())) + n6, err := m.SwapBytes.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.MemorySwappiness != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.MemorySwappiness.Size())) + n7, err := m.MemorySwappiness.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func (m *Platform) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Platform) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Architecture) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Architecture))) + i += copy(dAtA[i:], m.Architecture) + } + if len(m.OS) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.OS))) + i += copy(dAtA[i:], m.OS) + } + return i, nil +} + +func (m *PluginDescription) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginDescription) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Type) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + } + if len(m.Name) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + return i, nil +} + +func (m *EngineDescription) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EngineDescription) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.EngineVersion) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.EngineVersion))) + i += copy(dAtA[i:], m.EngineVersion) + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x12 + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Plugins) > 0 { + for _, msg := range m.Plugins { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *NodeDescription) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeDescription) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Hostname) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hostname))) + i += copy(dAtA[i:], m.Hostname) + } + if m.Platform != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Platform.Size())) + n8, err := m.Platform.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.Resources != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Resources.Size())) + n9, err := m.Resources.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + if m.Engine != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Engine.Size())) + n10, err := m.Engine.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if m.TLSInfo != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.TLSInfo.Size())) + n11, err := m.TLSInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.FIPS { + dAtA[i] = 0x30 + i++ + if m.FIPS { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *NodeTLSInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeTLSInfo) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TrustRoot) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.TrustRoot))) + i += copy(dAtA[i:], m.TrustRoot) + } + if len(m.CertIssuerSubject) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CertIssuerSubject))) + i += copy(dAtA[i:], m.CertIssuerSubject) + } + if len(m.CertIssuerPublicKey) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CertIssuerPublicKey))) + i += copy(dAtA[i:], m.CertIssuerPublicKey) + } + return i, nil +} + +func (m *RaftMemberStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftMemberStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Leader { + dAtA[i] = 0x8 + i++ + if m.Leader { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Reachability != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Reachability)) + } + if len(m.Message) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + } + return i, nil +} + +func (m *NodeStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.State != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.State)) + } + if len(m.Message) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + } + if len(m.Addr) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + return i, nil +} + +func (m *Image) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Image) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Reference) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Reference))) + i += copy(dAtA[i:], m.Reference) + } + return i, nil +} + +func (m *Mount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mount) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Type)) + } + if len(m.Source) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Source))) + i += copy(dAtA[i:], m.Source) + } + if len(m.Target) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Target))) + i += copy(dAtA[i:], m.Target) + } + if m.ReadOnly { + dAtA[i] = 0x20 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.BindOptions != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.BindOptions.Size())) + n12, err := m.BindOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + if m.VolumeOptions != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.VolumeOptions.Size())) + n13, err := m.VolumeOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + if m.TmpfsOptions != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.TmpfsOptions.Size())) + n14, err := m.TmpfsOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + if m.Consistency != 0 { + dAtA[i] = 0x40 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Consistency)) + } + return i, nil +} + +func (m *Mount_BindOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mount_BindOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Propagation != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Propagation)) + } + if m.NonRecursive { + dAtA[i] = 0x10 + i++ + if m.NonRecursive { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *Mount_VolumeOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mount_VolumeOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NoCopy { + dAtA[i] = 0x8 + i++ + if m.NoCopy { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Labels) > 0 { + for k, _ := range m.Labels { + dAtA[i] = 0x12 + i++ + v := m.Labels[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.DriverConfig != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.DriverConfig.Size())) + n15, err := m.DriverConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + return i, nil +} + +func (m *Mount_TmpfsOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mount_TmpfsOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.SizeBytes != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.SizeBytes)) + } + if m.Mode != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Mode)) + } + if len(m.Options) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Options))) + i += copy(dAtA[i:], m.Options) + } + return i, nil +} + +func (m *RestartPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RestartPolicy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Condition != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Condition)) + } + if m.Delay != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Delay.Size())) + n16, err := m.Delay.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + if m.MaxAttempts != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.MaxAttempts)) + } + if m.Window != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Window.Size())) + n17, err := m.Window.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + return i, nil +} + +func (m *UpdateConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Parallelism != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Parallelism)) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(types.SizeOfStdDuration(m.Delay))) + n18, err := types.StdDurationMarshalTo(m.Delay, dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + if m.FailureAction != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.FailureAction)) + } + if m.Monitor != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Monitor.Size())) + n19, err := m.Monitor.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + if m.MaxFailureRatio != 0 { + dAtA[i] = 0x2d + i++ + binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.MaxFailureRatio)))) + i += 4 + } + if m.Order != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Order)) + } + return i, nil +} + +func (m *UpdateStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.State != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.State)) + } + if m.StartedAt != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.StartedAt.Size())) + n20, err := m.StartedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + } + if m.CompletedAt != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.CompletedAt.Size())) + n21, err := m.CompletedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + } + if len(m.Message) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + } + return i, nil +} + +func (m *ContainerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ContainerID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + } + if m.PID != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.PID)) + } + if m.ExitCode != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.ExitCode)) + } + return i, nil +} + +func (m *PortStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PortStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *TaskStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Timestamp != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Timestamp.Size())) + n22, err := m.Timestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + if m.State != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.State)) + } + if len(m.Message) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + } + if len(m.Err) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Err))) + i += copy(dAtA[i:], m.Err) + } + if m.RuntimeStatus != nil { + nn23, err := m.RuntimeStatus.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn23 + } + if m.PortStatus != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.PortStatus.Size())) + n24, err := m.PortStatus.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + if len(m.AppliedBy) > 0 { + dAtA[i] = 0x3a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppliedBy))) + i += copy(dAtA[i:], m.AppliedBy) + } + if m.AppliedAt != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.AppliedAt.Size())) + n25, err := m.AppliedAt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + return i, nil +} + +func (m *TaskStatus_Container) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Container != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Container.Size())) + n26, err := m.Container.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + return i, nil +} +func (m *NetworkAttachmentConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkAttachmentConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Target) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Target))) + i += copy(dAtA[i:], m.Target) + } + if len(m.Aliases) > 0 { + for _, s := range m.Aliases { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.DriverAttachmentOpts) > 0 { + for k, _ := range m.DriverAttachmentOpts { + dAtA[i] = 0x22 + i++ + v := m.DriverAttachmentOpts[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *IPAMConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAMConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Family != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Family)) + } + if len(m.Subnet) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Subnet))) + i += copy(dAtA[i:], m.Subnet) + } + if len(m.Range) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Range))) + i += copy(dAtA[i:], m.Range) + } + if len(m.Gateway) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Gateway))) + i += copy(dAtA[i:], m.Gateway) + } + if len(m.Reserved) > 0 { + for k, _ := range m.Reserved { + dAtA[i] = 0x2a + i++ + v := m.Reserved[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *PortConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PortConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Protocol != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Protocol)) + } + if m.TargetPort != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.TargetPort)) + } + if m.PublishedPort != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.PublishedPort)) + } + if m.PublishMode != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.PublishMode)) + } + return i, nil +} + +func (m *Driver) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Driver) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Options) > 0 { + for k, _ := range m.Options { + dAtA[i] = 0x12 + i++ + v := m.Options[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *IPAMOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPAMOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Driver != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Driver.Size())) + n27, err := m.Driver.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + if len(m.Configs) > 0 { + for _, msg := range m.Configs { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Peer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Peer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.NodeID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + } + if len(m.Addr) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + return i, nil +} + +func (m *WeightedPeer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WeightedPeer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Peer != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Peer.Size())) + n28, err := m.Peer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + if m.Weight != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Weight)) + } + return i, nil +} + +func (m *IssuanceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IssuanceStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.State != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.State)) + } + if len(m.Err) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Err))) + i += copy(dAtA[i:], m.Err) + } + return i, nil +} + +func (m *AcceptancePolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AcceptancePolicy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Policies) > 0 { + for _, msg := range m.Policies { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Role != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Role)) + } + if m.Autoaccept { + dAtA[i] = 0x10 + i++ + if m.Autoaccept { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Secret != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Secret.Size())) + n29, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + } + return i, nil +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Data) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if len(m.Alg) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Alg))) + i += copy(dAtA[i:], m.Alg) + } + return i, nil +} + +func (m *ExternalCA) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalCA) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Protocol != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Protocol)) + } + if len(m.URL) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.URL))) + i += copy(dAtA[i:], m.URL) + } + if len(m.Options) > 0 { + for k, _ := range m.Options { + dAtA[i] = 0x1a + i++ + v := m.Options[k] + mapSize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + i = encodeVarintTypes(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.CACert) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CACert))) + i += copy(dAtA[i:], m.CACert) + } + return i, nil +} + +func (m *CAConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CAConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NodeCertExpiry != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.NodeCertExpiry.Size())) + n30, err := m.NodeCertExpiry.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + } + if len(m.ExternalCAs) > 0 { + for _, msg := range m.ExternalCAs { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.SigningCACert) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.SigningCACert))) + i += copy(dAtA[i:], m.SigningCACert) + } + if len(m.SigningCAKey) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.SigningCAKey))) + i += copy(dAtA[i:], m.SigningCAKey) + } + if m.ForceRotate != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.ForceRotate)) + } + return i, nil +} + +func (m *OrchestrationConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OrchestrationConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.TaskHistoryRetentionLimit != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.TaskHistoryRetentionLimit)) + } + return i, nil +} + +func (m *TaskDefaults) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TaskDefaults) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.LogDriver != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.LogDriver.Size())) + n31, err := m.LogDriver.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + return i, nil +} + +func (m *DispatcherConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DispatcherConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.HeartbeatPeriod != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.HeartbeatPeriod.Size())) + n32, err := m.HeartbeatPeriod.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + return i, nil +} + +func (m *RaftConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RaftConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.SnapshotInterval != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.SnapshotInterval)) + } + if m.KeepOldSnapshots != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.KeepOldSnapshots)) + } + if m.LogEntriesForSlowFollowers != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.LogEntriesForSlowFollowers)) + } + if m.HeartbeatTick != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.HeartbeatTick)) + } + if m.ElectionTick != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.ElectionTick)) + } + return i, nil +} + +func (m *EncryptionConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EncryptionConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.AutoLockManagers { + dAtA[i] = 0x8 + i++ + if m.AutoLockManagers { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SpreadOver) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SpreadOver) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SpreadDescriptor) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.SpreadDescriptor))) + i += copy(dAtA[i:], m.SpreadDescriptor) + } + return i, nil +} + +func (m *PlacementPreference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PlacementPreference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Preference != nil { + nn33, err := m.Preference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn33 + } + return i, nil +} + +func (m *PlacementPreference_Spread) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Spread != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Spread.Size())) + n34, err := m.Spread.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + } + return i, nil +} +func (m *Placement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Placement) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Constraints) > 0 { + for _, s := range m.Constraints { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Preferences) > 0 { + for _, msg := range m.Preferences { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Platforms) > 0 { + for _, msg := range m.Platforms { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.MaxReplicas != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.MaxReplicas)) + } + return i, nil +} + +func (m *JoinTokens) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JoinTokens) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Worker) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Worker))) + i += copy(dAtA[i:], m.Worker) + } + if len(m.Manager) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Manager))) + i += copy(dAtA[i:], m.Manager) + } + return i, nil +} + +func (m *RootCA) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RootCA) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.CAKey) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CAKey))) + i += copy(dAtA[i:], m.CAKey) + } + if len(m.CACert) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CACert))) + i += copy(dAtA[i:], m.CACert) + } + if len(m.CACertHash) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CACertHash))) + i += copy(dAtA[i:], m.CACertHash) + } + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.JoinTokens.Size())) + n35, err := m.JoinTokens.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + if m.RootRotation != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.RootRotation.Size())) + n36, err := m.RootRotation.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + } + if m.LastForcedRotation != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.LastForcedRotation)) + } + return i, nil +} + +func (m *Certificate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Certificate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Role != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Role)) + } + if len(m.CSR) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CSR))) + i += copy(dAtA[i:], m.CSR) + } + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Status.Size())) + n37, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + if len(m.Certificate) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Certificate))) + i += copy(dAtA[i:], m.Certificate) + } + if len(m.CN) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CN))) + i += copy(dAtA[i:], m.CN) + } + return i, nil +} + +func (m *EncryptionKey) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EncryptionKey) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Subsystem) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Subsystem))) + i += copy(dAtA[i:], m.Subsystem) + } + if m.Algorithm != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Algorithm)) + } + if len(m.Key) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if m.LamportTime != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.LamportTime)) + } + return i, nil +} + +func (m *ManagerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ManagerStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.RaftID != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.RaftID)) + } + if len(m.Addr) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Addr))) + i += copy(dAtA[i:], m.Addr) + } + if m.Leader { + dAtA[i] = 0x18 + i++ + if m.Leader { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Reachability != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Reachability)) + } + return i, nil +} + +func (m *FileTarget) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FileTarget) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.UID) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + } + if len(m.GID) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.GID))) + i += copy(dAtA[i:], m.GID) + } + if m.Mode != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Mode)) + } + return i, nil +} + +func (m *RuntimeTarget) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeTarget) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *SecretReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.SecretID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.SecretID))) + i += copy(dAtA[i:], m.SecretID) + } + if len(m.SecretName) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.SecretName))) + i += copy(dAtA[i:], m.SecretName) + } + if m.Target != nil { + nn38, err := m.Target.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn38 + } + return i, nil +} + +func (m *SecretReference_File) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.File != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.File.Size())) + n39, err := m.File.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + } + return i, nil +} +func (m *ConfigReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ConfigID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.ConfigID))) + i += copy(dAtA[i:], m.ConfigID) + } + if len(m.ConfigName) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.ConfigName))) + i += copy(dAtA[i:], m.ConfigName) + } + if m.Target != nil { + nn40, err := m.Target.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn40 + } + return i, nil +} + +func (m *ConfigReference_File) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.File != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.File.Size())) + n41, err := m.File.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + } + return i, nil +} +func (m *ConfigReference_Runtime) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Runtime != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Runtime.Size())) + n42, err := m.Runtime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 + } + return i, nil +} +func (m *BlacklistedCertificate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BlacklistedCertificate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Expiry != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Expiry.Size())) + n43, err := m.Expiry.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 + } + return i, nil +} + +func (m *HealthConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HealthConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Test) > 0 { + for _, s := range m.Test { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.Interval != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Interval.Size())) + n44, err := m.Interval.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 + } + if m.Timeout != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Timeout.Size())) + n45, err := m.Timeout.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n45 + } + if m.Retries != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Retries)) + } + if m.StartPeriod != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.StartPeriod.Size())) + n46, err := m.StartPeriod.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n46 + } + return i, nil +} + +func (m *MaybeEncryptedRecord) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MaybeEncryptedRecord) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Algorithm != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.Algorithm)) + } + if len(m.Data) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if len(m.Nonce) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Nonce))) + i += copy(dAtA[i:], m.Nonce) + } + return i, nil +} + +func (m *RootRotation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RootRotation) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.CACert) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CACert))) + i += copy(dAtA[i:], m.CACert) + } + if len(m.CAKey) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CAKey))) + i += copy(dAtA[i:], m.CAKey) + } + if len(m.CrossSignedCACert) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.CrossSignedCACert))) + i += copy(dAtA[i:], m.CrossSignedCACert) + } + return i, nil +} + +func (m *Privileges) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Privileges) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.CredentialSpec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.CredentialSpec.Size())) + n47, err := m.CredentialSpec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n47 + } + if m.SELinuxContext != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(m.SELinuxContext.Size())) + n48, err := m.SELinuxContext.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 + } + return i, nil +} + +func (m *Privileges_CredentialSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Privileges_CredentialSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Source != nil { + nn49, err := m.Source.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn49 + } + return i, nil +} + +func (m *Privileges_CredentialSpec_File) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0xa + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.File))) + i += copy(dAtA[i:], m.File) + return i, nil +} +func (m *Privileges_CredentialSpec_Registry) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Registry))) + i += copy(dAtA[i:], m.Registry) + return i, nil +} +func (m *Privileges_CredentialSpec_Config) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Config))) + i += copy(dAtA[i:], m.Config) + return i, nil +} +func (m *Privileges_SELinuxContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Privileges_SELinuxContext) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Disable { + dAtA[i] = 0x8 + i++ + if m.Disable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.User) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + } + if len(m.Role) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + } + if len(m.Type) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + } + if len(m.Level) > 0 { + dAtA[i] = 0x2a + i++ + i = encodeVarintTypes(dAtA, i, uint64(len(m.Level))) + i += copy(dAtA[i:], m.Level) + } + return i, nil +} + +func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +func (m *Version) Size() (n int) { + var l int + _ = l + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) + } + return n +} + +func (m *IndexEntry) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Val) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Annotations) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + if len(m.Indices) > 0 { + for _, e := range m.Indices { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *NamedGenericResource) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *DiscreteGenericResource) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Value != 0 { + n += 1 + sovTypes(uint64(m.Value)) + } + return n +} + +func (m *GenericResource) Size() (n int) { + var l int + _ = l + if m.Resource != nil { + n += m.Resource.Size() + } + return n +} + +func (m *GenericResource_NamedResourceSpec) Size() (n int) { + var l int + _ = l + if m.NamedResourceSpec != nil { + l = m.NamedResourceSpec.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *GenericResource_DiscreteResourceSpec) Size() (n int) { + var l int + _ = l + if m.DiscreteResourceSpec != nil { + l = m.DiscreteResourceSpec.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Resources) Size() (n int) { + var l int + _ = l + if m.NanoCPUs != 0 { + n += 1 + sovTypes(uint64(m.NanoCPUs)) + } + if m.MemoryBytes != 0 { + n += 1 + sovTypes(uint64(m.MemoryBytes)) + } + if len(m.Generic) > 0 { + for _, e := range m.Generic { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *ResourceRequirements) Size() (n int) { + var l int + _ = l + if m.Limits != nil { + l = m.Limits.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Reservations != nil { + l = m.Reservations.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.SwapBytes != nil { + l = m.SwapBytes.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.MemorySwappiness != nil { + l = m.MemorySwappiness.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Platform) Size() (n int) { + var l int + _ = l + l = len(m.Architecture) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.OS) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *PluginDescription) Size() (n int) { + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *EngineDescription) Size() (n int) { + var l int + _ = l + l = len(m.EngineVersion) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + if len(m.Plugins) > 0 { + for _, e := range m.Plugins { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *NodeDescription) Size() (n int) { + var l int + _ = l + l = len(m.Hostname) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Platform != nil { + l = m.Platform.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Resources != nil { + l = m.Resources.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Engine != nil { + l = m.Engine.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.TLSInfo != nil { + l = m.TLSInfo.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.FIPS { + n += 2 + } + return n +} + +func (m *NodeTLSInfo) Size() (n int) { + var l int + _ = l + l = len(m.TrustRoot) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CertIssuerSubject) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CertIssuerPublicKey) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RaftMemberStatus) Size() (n int) { + var l int + _ = l + if m.Leader { + n += 2 + } + if m.Reachability != 0 { + n += 1 + sovTypes(uint64(m.Reachability)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *NodeStatus) Size() (n int) { + var l int + _ = l + if m.State != 0 { + n += 1 + sovTypes(uint64(m.State)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Image) Size() (n int) { + var l int + _ = l + l = len(m.Reference) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Mount) Size() (n int) { + var l int + _ = l + if m.Type != 0 { + n += 1 + sovTypes(uint64(m.Type)) + } + l = len(m.Source) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Target) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.ReadOnly { + n += 2 + } + if m.BindOptions != nil { + l = m.BindOptions.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.VolumeOptions != nil { + l = m.VolumeOptions.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.TmpfsOptions != nil { + l = m.TmpfsOptions.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Consistency != 0 { + n += 1 + sovTypes(uint64(m.Consistency)) + } + return n +} + +func (m *Mount_BindOptions) Size() (n int) { + var l int + _ = l + if m.Propagation != 0 { + n += 1 + sovTypes(uint64(m.Propagation)) + } + if m.NonRecursive { + n += 2 + } + return n +} + +func (m *Mount_VolumeOptions) Size() (n int) { + var l int + _ = l + if m.NoCopy { + n += 2 + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + if m.DriverConfig != nil { + l = m.DriverConfig.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Mount_TmpfsOptions) Size() (n int) { + var l int + _ = l + if m.SizeBytes != 0 { + n += 1 + sovTypes(uint64(m.SizeBytes)) + } + if m.Mode != 0 { + n += 1 + sovTypes(uint64(m.Mode)) + } + l = len(m.Options) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RestartPolicy) Size() (n int) { + var l int + _ = l + if m.Condition != 0 { + n += 1 + sovTypes(uint64(m.Condition)) + } + if m.Delay != nil { + l = m.Delay.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.MaxAttempts != 0 { + n += 1 + sovTypes(uint64(m.MaxAttempts)) + } + if m.Window != nil { + l = m.Window.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *UpdateConfig) Size() (n int) { + var l int + _ = l + if m.Parallelism != 0 { + n += 1 + sovTypes(uint64(m.Parallelism)) + } + l = types.SizeOfStdDuration(m.Delay) + n += 1 + l + sovTypes(uint64(l)) + if m.FailureAction != 0 { + n += 1 + sovTypes(uint64(m.FailureAction)) + } + if m.Monitor != nil { + l = m.Monitor.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.MaxFailureRatio != 0 { + n += 5 + } + if m.Order != 0 { + n += 1 + sovTypes(uint64(m.Order)) + } + return n +} + +func (m *UpdateStatus) Size() (n int) { + var l int + _ = l + if m.State != 0 { + n += 1 + sovTypes(uint64(m.State)) + } + if m.StartedAt != nil { + l = m.StartedAt.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.CompletedAt != nil { + l = m.CompletedAt.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ContainerStatus) Size() (n int) { + var l int + _ = l + l = len(m.ContainerID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.PID != 0 { + n += 1 + sovTypes(uint64(m.PID)) + } + if m.ExitCode != 0 { + n += 1 + sovTypes(uint64(m.ExitCode)) + } + return n +} + +func (m *PortStatus) Size() (n int) { + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *TaskStatus) Size() (n int) { + var l int + _ = l + if m.Timestamp != nil { + l = m.Timestamp.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.State != 0 { + n += 1 + sovTypes(uint64(m.State)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Err) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.RuntimeStatus != nil { + n += m.RuntimeStatus.Size() + } + if m.PortStatus != nil { + l = m.PortStatus.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.AppliedBy) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.AppliedAt != nil { + l = m.AppliedAt.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *TaskStatus_Container) Size() (n int) { + var l int + _ = l + if m.Container != nil { + l = m.Container.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *NetworkAttachmentConfig) Size() (n int) { + var l int + _ = l + l = len(m.Target) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Aliases) > 0 { + for _, s := range m.Aliases { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.DriverAttachmentOpts) > 0 { + for k, v := range m.DriverAttachmentOpts { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + return n +} + +func (m *IPAMConfig) Size() (n int) { + var l int + _ = l + if m.Family != 0 { + n += 1 + sovTypes(uint64(m.Family)) + } + l = len(m.Subnet) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Range) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Gateway) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Reserved) > 0 { + for k, v := range m.Reserved { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + return n +} + +func (m *PortConfig) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Protocol != 0 { + n += 1 + sovTypes(uint64(m.Protocol)) + } + if m.TargetPort != 0 { + n += 1 + sovTypes(uint64(m.TargetPort)) + } + if m.PublishedPort != 0 { + n += 1 + sovTypes(uint64(m.PublishedPort)) + } + if m.PublishMode != 0 { + n += 1 + sovTypes(uint64(m.PublishMode)) + } + return n +} + +func (m *Driver) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + return n +} + +func (m *IPAMOptions) Size() (n int) { + var l int + _ = l + if m.Driver != nil { + l = m.Driver.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Configs) > 0 { + for _, e := range m.Configs { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *Peer) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *WeightedPeer) Size() (n int) { + var l int + _ = l + if m.Peer != nil { + l = m.Peer.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Weight != 0 { + n += 1 + sovTypes(uint64(m.Weight)) + } + return n +} + +func (m *IssuanceStatus) Size() (n int) { + var l int + _ = l + if m.State != 0 { + n += 1 + sovTypes(uint64(m.State)) + } + l = len(m.Err) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *AcceptancePolicy) Size() (n int) { + var l int + _ = l + if len(m.Policies) > 0 { + for _, e := range m.Policies { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy) Size() (n int) { + var l int + _ = l + if m.Role != 0 { + n += 1 + sovTypes(uint64(m.Role)) + } + if m.Autoaccept { + n += 2 + } + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Size() (n int) { + var l int + _ = l + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Alg) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *ExternalCA) Size() (n int) { + var l int + _ = l + if m.Protocol != 0 { + n += 1 + sovTypes(uint64(m.Protocol)) + } + l = len(m.URL) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovTypes(uint64(len(k))) + 1 + len(v) + sovTypes(uint64(len(v))) + n += mapEntrySize + 1 + sovTypes(uint64(mapEntrySize)) + } + } + l = len(m.CACert) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *CAConfig) Size() (n int) { + var l int + _ = l + if m.NodeCertExpiry != nil { + l = m.NodeCertExpiry.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if len(m.ExternalCAs) > 0 { + for _, e := range m.ExternalCAs { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + l = len(m.SigningCACert) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.SigningCAKey) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.ForceRotate != 0 { + n += 1 + sovTypes(uint64(m.ForceRotate)) + } + return n +} + +func (m *OrchestrationConfig) Size() (n int) { + var l int + _ = l + if m.TaskHistoryRetentionLimit != 0 { + n += 1 + sovTypes(uint64(m.TaskHistoryRetentionLimit)) + } + return n +} + +func (m *TaskDefaults) Size() (n int) { + var l int + _ = l + if m.LogDriver != nil { + l = m.LogDriver.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *DispatcherConfig) Size() (n int) { + var l int + _ = l + if m.HeartbeatPeriod != nil { + l = m.HeartbeatPeriod.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RaftConfig) Size() (n int) { + var l int + _ = l + if m.SnapshotInterval != 0 { + n += 1 + sovTypes(uint64(m.SnapshotInterval)) + } + if m.KeepOldSnapshots != 0 { + n += 1 + sovTypes(uint64(m.KeepOldSnapshots)) + } + if m.LogEntriesForSlowFollowers != 0 { + n += 1 + sovTypes(uint64(m.LogEntriesForSlowFollowers)) + } + if m.HeartbeatTick != 0 { + n += 1 + sovTypes(uint64(m.HeartbeatTick)) + } + if m.ElectionTick != 0 { + n += 1 + sovTypes(uint64(m.ElectionTick)) + } + return n +} + +func (m *EncryptionConfig) Size() (n int) { + var l int + _ = l + if m.AutoLockManagers { + n += 2 + } + return n +} + +func (m *SpreadOver) Size() (n int) { + var l int + _ = l + l = len(m.SpreadDescriptor) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *PlacementPreference) Size() (n int) { + var l int + _ = l + if m.Preference != nil { + n += m.Preference.Size() + } + return n +} + +func (m *PlacementPreference_Spread) Size() (n int) { + var l int + _ = l + if m.Spread != nil { + l = m.Spread.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Placement) Size() (n int) { + var l int + _ = l + if len(m.Constraints) > 0 { + for _, s := range m.Constraints { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Preferences) > 0 { + for _, e := range m.Preferences { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if len(m.Platforms) > 0 { + for _, e := range m.Platforms { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.MaxReplicas != 0 { + n += 1 + sovTypes(uint64(m.MaxReplicas)) + } + return n +} + +func (m *JoinTokens) Size() (n int) { + var l int + _ = l + l = len(m.Worker) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Manager) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RootCA) Size() (n int) { + var l int + _ = l + l = len(m.CAKey) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CACert) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CACertHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.JoinTokens.Size() + n += 1 + l + sovTypes(uint64(l)) + if m.RootRotation != nil { + l = m.RootRotation.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.LastForcedRotation != 0 { + n += 1 + sovTypes(uint64(m.LastForcedRotation)) + } + return n +} + +func (m *Certificate) Size() (n int) { + var l int + _ = l + if m.Role != 0 { + n += 1 + sovTypes(uint64(m.Role)) + } + l = len(m.CSR) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = m.Status.Size() + n += 1 + l + sovTypes(uint64(l)) + l = len(m.Certificate) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CN) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *EncryptionKey) Size() (n int) { + var l int + _ = l + l = len(m.Subsystem) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Algorithm != 0 { + n += 1 + sovTypes(uint64(m.Algorithm)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.LamportTime != 0 { + n += 1 + sovTypes(uint64(m.LamportTime)) + } + return n +} + +func (m *ManagerStatus) Size() (n int) { + var l int + _ = l + if m.RaftID != 0 { + n += 1 + sovTypes(uint64(m.RaftID)) + } + l = len(m.Addr) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Leader { + n += 2 + } + if m.Reachability != 0 { + n += 1 + sovTypes(uint64(m.Reachability)) + } + return n +} + +func (m *FileTarget) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.UID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.GID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Mode != 0 { + n += 1 + sovTypes(uint64(m.Mode)) + } + return n +} + +func (m *RuntimeTarget) Size() (n int) { + var l int + _ = l + return n +} + +func (m *SecretReference) Size() (n int) { + var l int + _ = l + l = len(m.SecretID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.SecretName) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Target != nil { + n += m.Target.Size() + } + return n +} + +func (m *SecretReference_File) Size() (n int) { + var l int + _ = l + if m.File != nil { + l = m.File.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *ConfigReference) Size() (n int) { + var l int + _ = l + l = len(m.ConfigID) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.ConfigName) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.Target != nil { + n += m.Target.Size() + } + return n +} + +func (m *ConfigReference_File) Size() (n int) { + var l int + _ = l + if m.File != nil { + l = m.File.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *ConfigReference_Runtime) Size() (n int) { + var l int + _ = l + if m.Runtime != nil { + l = m.Runtime.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *BlacklistedCertificate) Size() (n int) { + var l int + _ = l + if m.Expiry != nil { + l = m.Expiry.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *HealthConfig) Size() (n int) { + var l int + _ = l + if len(m.Test) > 0 { + for _, s := range m.Test { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.Interval != nil { + l = m.Interval.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Timeout != nil { + l = m.Timeout.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.Retries != 0 { + n += 1 + sovTypes(uint64(m.Retries)) + } + if m.StartPeriod != nil { + l = m.StartPeriod.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *MaybeEncryptedRecord) Size() (n int) { + var l int + _ = l + if m.Algorithm != 0 { + n += 1 + sovTypes(uint64(m.Algorithm)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Nonce) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *RootRotation) Size() (n int) { + var l int + _ = l + l = len(m.CACert) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CAKey) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.CrossSignedCACert) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Privileges) Size() (n int) { + var l int + _ = l + if m.CredentialSpec != nil { + l = m.CredentialSpec.Size() + n += 1 + l + sovTypes(uint64(l)) + } + if m.SELinuxContext != nil { + l = m.SELinuxContext.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func (m *Privileges_CredentialSpec) Size() (n int) { + var l int + _ = l + if m.Source != nil { + n += m.Source.Size() + } + return n +} + +func (m *Privileges_CredentialSpec_File) Size() (n int) { + var l int + _ = l + l = len(m.File) + n += 1 + l + sovTypes(uint64(l)) + return n +} +func (m *Privileges_CredentialSpec_Registry) Size() (n int) { + var l int + _ = l + l = len(m.Registry) + n += 1 + l + sovTypes(uint64(l)) + return n +} +func (m *Privileges_CredentialSpec_Config) Size() (n int) { + var l int + _ = l + l = len(m.Config) + n += 1 + l + sovTypes(uint64(l)) + return n +} +func (m *Privileges_SELinuxContext) Size() (n int) { + var l int + _ = l + if m.Disable { + n += 2 + } + l = len(m.User) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Role) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Type) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Level) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + return n +} + +func sovTypes(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozTypes(x uint64) (n int) { + return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Version) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Version{`, + `Index:` + fmt.Sprintf("%v", this.Index) + `,`, + `}`, + }, "") + return s +} +func (this *IndexEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IndexEntry{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Val:` + fmt.Sprintf("%v", this.Val) + `,`, + `}`, + }, "") + return s +} +func (this *Annotations) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&Annotations{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Indices:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Indices), "IndexEntry", "IndexEntry", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamedGenericResource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamedGenericResource{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *DiscreteGenericResource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DiscreteGenericResource{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *GenericResource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GenericResource{`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `}`, + }, "") + return s +} +func (this *GenericResource_NamedResourceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GenericResource_NamedResourceSpec{`, + `NamedResourceSpec:` + strings.Replace(fmt.Sprintf("%v", this.NamedResourceSpec), "NamedGenericResource", "NamedGenericResource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *GenericResource_DiscreteResourceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GenericResource_DiscreteResourceSpec{`, + `DiscreteResourceSpec:` + strings.Replace(fmt.Sprintf("%v", this.DiscreteResourceSpec), "DiscreteGenericResource", "DiscreteGenericResource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Resources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Resources{`, + `NanoCPUs:` + fmt.Sprintf("%v", this.NanoCPUs) + `,`, + `MemoryBytes:` + fmt.Sprintf("%v", this.MemoryBytes) + `,`, + `Generic:` + strings.Replace(fmt.Sprintf("%v", this.Generic), "GenericResource", "GenericResource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceRequirements) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceRequirements{`, + `Limits:` + strings.Replace(fmt.Sprintf("%v", this.Limits), "Resources", "Resources", 1) + `,`, + `Reservations:` + strings.Replace(fmt.Sprintf("%v", this.Reservations), "Resources", "Resources", 1) + `,`, + `SwapBytes:` + strings.Replace(fmt.Sprintf("%v", this.SwapBytes), "Int64Value", "google_protobuf2.Int64Value", 1) + `,`, + `MemorySwappiness:` + strings.Replace(fmt.Sprintf("%v", this.MemorySwappiness), "Int64Value", "google_protobuf2.Int64Value", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Platform) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Platform{`, + `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, + `OS:` + fmt.Sprintf("%v", this.OS) + `,`, + `}`, + }, "") + return s +} +func (this *PluginDescription) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PluginDescription{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *EngineDescription) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&EngineDescription{`, + `EngineVersion:` + fmt.Sprintf("%v", this.EngineVersion) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Plugins:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Plugins), "PluginDescription", "PluginDescription", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeDescription) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeDescription{`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Platform:` + strings.Replace(fmt.Sprintf("%v", this.Platform), "Platform", "Platform", 1) + `,`, + `Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Resources", "Resources", 1) + `,`, + `Engine:` + strings.Replace(fmt.Sprintf("%v", this.Engine), "EngineDescription", "EngineDescription", 1) + `,`, + `TLSInfo:` + strings.Replace(fmt.Sprintf("%v", this.TLSInfo), "NodeTLSInfo", "NodeTLSInfo", 1) + `,`, + `FIPS:` + fmt.Sprintf("%v", this.FIPS) + `,`, + `}`, + }, "") + return s +} +func (this *NodeTLSInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeTLSInfo{`, + `TrustRoot:` + fmt.Sprintf("%v", this.TrustRoot) + `,`, + `CertIssuerSubject:` + fmt.Sprintf("%v", this.CertIssuerSubject) + `,`, + `CertIssuerPublicKey:` + fmt.Sprintf("%v", this.CertIssuerPublicKey) + `,`, + `}`, + }, "") + return s +} +func (this *RaftMemberStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RaftMemberStatus{`, + `Leader:` + fmt.Sprintf("%v", this.Leader) + `,`, + `Reachability:` + fmt.Sprintf("%v", this.Reachability) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *NodeStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeStatus{`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `}`, + }, "") + return s +} +func (this *Image) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Image{`, + `Reference:` + fmt.Sprintf("%v", this.Reference) + `,`, + `}`, + }, "") + return s +} +func (this *Mount) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mount{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Source:` + fmt.Sprintf("%v", this.Source) + `,`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `BindOptions:` + strings.Replace(fmt.Sprintf("%v", this.BindOptions), "Mount_BindOptions", "Mount_BindOptions", 1) + `,`, + `VolumeOptions:` + strings.Replace(fmt.Sprintf("%v", this.VolumeOptions), "Mount_VolumeOptions", "Mount_VolumeOptions", 1) + `,`, + `TmpfsOptions:` + strings.Replace(fmt.Sprintf("%v", this.TmpfsOptions), "Mount_TmpfsOptions", "Mount_TmpfsOptions", 1) + `,`, + `Consistency:` + fmt.Sprintf("%v", this.Consistency) + `,`, + `}`, + }, "") + return s +} +func (this *Mount_BindOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mount_BindOptions{`, + `Propagation:` + fmt.Sprintf("%v", this.Propagation) + `,`, + `NonRecursive:` + fmt.Sprintf("%v", this.NonRecursive) + `,`, + `}`, + }, "") + return s +} +func (this *Mount_VolumeOptions) String() string { + if this == nil { + return "nil" + } + keysForLabels := make([]string, 0, len(this.Labels)) + for k, _ := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + s := strings.Join([]string{`&Mount_VolumeOptions{`, + `NoCopy:` + fmt.Sprintf("%v", this.NoCopy) + `,`, + `Labels:` + mapStringForLabels + `,`, + `DriverConfig:` + strings.Replace(fmt.Sprintf("%v", this.DriverConfig), "Driver", "Driver", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Mount_TmpfsOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mount_TmpfsOptions{`, + `SizeBytes:` + fmt.Sprintf("%v", this.SizeBytes) + `,`, + `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, + `Options:` + fmt.Sprintf("%v", this.Options) + `,`, + `}`, + }, "") + return s +} +func (this *RestartPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RestartPolicy{`, + `Condition:` + fmt.Sprintf("%v", this.Condition) + `,`, + `Delay:` + strings.Replace(fmt.Sprintf("%v", this.Delay), "Duration", "google_protobuf1.Duration", 1) + `,`, + `MaxAttempts:` + fmt.Sprintf("%v", this.MaxAttempts) + `,`, + `Window:` + strings.Replace(fmt.Sprintf("%v", this.Window), "Duration", "google_protobuf1.Duration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateConfig{`, + `Parallelism:` + fmt.Sprintf("%v", this.Parallelism) + `,`, + `Delay:` + strings.Replace(strings.Replace(this.Delay.String(), "Duration", "google_protobuf1.Duration", 1), `&`, ``, 1) + `,`, + `FailureAction:` + fmt.Sprintf("%v", this.FailureAction) + `,`, + `Monitor:` + strings.Replace(fmt.Sprintf("%v", this.Monitor), "Duration", "google_protobuf1.Duration", 1) + `,`, + `MaxFailureRatio:` + fmt.Sprintf("%v", this.MaxFailureRatio) + `,`, + `Order:` + fmt.Sprintf("%v", this.Order) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateStatus{`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `StartedAt:` + strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `CompletedAt:` + strings.Replace(fmt.Sprintf("%v", this.CompletedAt), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStatus{`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `PID:` + fmt.Sprintf("%v", this.PID) + `,`, + `ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`, + `}`, + }, "") + return s +} +func (this *PortStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortStatus{`, + `Ports:` + strings.Replace(fmt.Sprintf("%v", this.Ports), "PortConfig", "PortConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskStatus{`, + `Timestamp:` + strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Err:` + fmt.Sprintf("%v", this.Err) + `,`, + `RuntimeStatus:` + fmt.Sprintf("%v", this.RuntimeStatus) + `,`, + `PortStatus:` + strings.Replace(fmt.Sprintf("%v", this.PortStatus), "PortStatus", "PortStatus", 1) + `,`, + `AppliedBy:` + fmt.Sprintf("%v", this.AppliedBy) + `,`, + `AppliedAt:` + strings.Replace(fmt.Sprintf("%v", this.AppliedAt), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TaskStatus_Container) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskStatus_Container{`, + `Container:` + strings.Replace(fmt.Sprintf("%v", this.Container), "ContainerStatus", "ContainerStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkAttachmentConfig) String() string { + if this == nil { + return "nil" + } + keysForDriverAttachmentOpts := make([]string, 0, len(this.DriverAttachmentOpts)) + for k, _ := range this.DriverAttachmentOpts { + keysForDriverAttachmentOpts = append(keysForDriverAttachmentOpts, k) + } + sortkeys.Strings(keysForDriverAttachmentOpts) + mapStringForDriverAttachmentOpts := "map[string]string{" + for _, k := range keysForDriverAttachmentOpts { + mapStringForDriverAttachmentOpts += fmt.Sprintf("%v: %v,", k, this.DriverAttachmentOpts[k]) + } + mapStringForDriverAttachmentOpts += "}" + s := strings.Join([]string{`&NetworkAttachmentConfig{`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, + `Aliases:` + fmt.Sprintf("%v", this.Aliases) + `,`, + `Addresses:` + fmt.Sprintf("%v", this.Addresses) + `,`, + `DriverAttachmentOpts:` + mapStringForDriverAttachmentOpts + `,`, + `}`, + }, "") + return s +} +func (this *IPAMConfig) String() string { + if this == nil { + return "nil" + } + keysForReserved := make([]string, 0, len(this.Reserved)) + for k, _ := range this.Reserved { + keysForReserved = append(keysForReserved, k) + } + sortkeys.Strings(keysForReserved) + mapStringForReserved := "map[string]string{" + for _, k := range keysForReserved { + mapStringForReserved += fmt.Sprintf("%v: %v,", k, this.Reserved[k]) + } + mapStringForReserved += "}" + s := strings.Join([]string{`&IPAMConfig{`, + `Family:` + fmt.Sprintf("%v", this.Family) + `,`, + `Subnet:` + fmt.Sprintf("%v", this.Subnet) + `,`, + `Range:` + fmt.Sprintf("%v", this.Range) + `,`, + `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, + `Reserved:` + mapStringForReserved + `,`, + `}`, + }, "") + return s +} +func (this *PortConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortConfig{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `TargetPort:` + fmt.Sprintf("%v", this.TargetPort) + `,`, + `PublishedPort:` + fmt.Sprintf("%v", this.PublishedPort) + `,`, + `PublishMode:` + fmt.Sprintf("%v", this.PublishMode) + `,`, + `}`, + }, "") + return s +} +func (this *Driver) String() string { + if this == nil { + return "nil" + } + keysForOptions := make([]string, 0, len(this.Options)) + for k, _ := range this.Options { + keysForOptions = append(keysForOptions, k) + } + sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + } + mapStringForOptions += "}" + s := strings.Join([]string{`&Driver{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Options:` + mapStringForOptions + `,`, + `}`, + }, "") + return s +} +func (this *IPAMOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPAMOptions{`, + `Driver:` + strings.Replace(fmt.Sprintf("%v", this.Driver), "Driver", "Driver", 1) + `,`, + `Configs:` + strings.Replace(fmt.Sprintf("%v", this.Configs), "IPAMConfig", "IPAMConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Peer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Peer{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `}`, + }, "") + return s +} +func (this *WeightedPeer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WeightedPeer{`, + `Peer:` + strings.Replace(fmt.Sprintf("%v", this.Peer), "Peer", "Peer", 1) + `,`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `}`, + }, "") + return s +} +func (this *IssuanceStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IssuanceStatus{`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `Err:` + fmt.Sprintf("%v", this.Err) + `,`, + `}`, + }, "") + return s +} +func (this *AcceptancePolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AcceptancePolicy{`, + `Policies:` + strings.Replace(fmt.Sprintf("%v", this.Policies), "AcceptancePolicy_RoleAdmissionPolicy", "AcceptancePolicy_RoleAdmissionPolicy", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AcceptancePolicy_RoleAdmissionPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AcceptancePolicy_RoleAdmissionPolicy{`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `Autoaccept:` + fmt.Sprintf("%v", this.Autoaccept) + `,`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "AcceptancePolicy_RoleAdmissionPolicy_Secret", "AcceptancePolicy_RoleAdmissionPolicy_Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AcceptancePolicy_RoleAdmissionPolicy_Secret) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AcceptancePolicy_RoleAdmissionPolicy_Secret{`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `Alg:` + fmt.Sprintf("%v", this.Alg) + `,`, + `}`, + }, "") + return s +} +func (this *ExternalCA) String() string { + if this == nil { + return "nil" + } + keysForOptions := make([]string, 0, len(this.Options)) + for k, _ := range this.Options { + keysForOptions = append(keysForOptions, k) + } + sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + } + mapStringForOptions += "}" + s := strings.Join([]string{`&ExternalCA{`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `URL:` + fmt.Sprintf("%v", this.URL) + `,`, + `Options:` + mapStringForOptions + `,`, + `CACert:` + fmt.Sprintf("%v", this.CACert) + `,`, + `}`, + }, "") + return s +} +func (this *CAConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CAConfig{`, + `NodeCertExpiry:` + strings.Replace(fmt.Sprintf("%v", this.NodeCertExpiry), "Duration", "google_protobuf1.Duration", 1) + `,`, + `ExternalCAs:` + strings.Replace(fmt.Sprintf("%v", this.ExternalCAs), "ExternalCA", "ExternalCA", 1) + `,`, + `SigningCACert:` + fmt.Sprintf("%v", this.SigningCACert) + `,`, + `SigningCAKey:` + fmt.Sprintf("%v", this.SigningCAKey) + `,`, + `ForceRotate:` + fmt.Sprintf("%v", this.ForceRotate) + `,`, + `}`, + }, "") + return s +} +func (this *OrchestrationConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OrchestrationConfig{`, + `TaskHistoryRetentionLimit:` + fmt.Sprintf("%v", this.TaskHistoryRetentionLimit) + `,`, + `}`, + }, "") + return s +} +func (this *TaskDefaults) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TaskDefaults{`, + `LogDriver:` + strings.Replace(fmt.Sprintf("%v", this.LogDriver), "Driver", "Driver", 1) + `,`, + `}`, + }, "") + return s +} +func (this *DispatcherConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DispatcherConfig{`, + `HeartbeatPeriod:` + strings.Replace(fmt.Sprintf("%v", this.HeartbeatPeriod), "Duration", "google_protobuf1.Duration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RaftConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RaftConfig{`, + `SnapshotInterval:` + fmt.Sprintf("%v", this.SnapshotInterval) + `,`, + `KeepOldSnapshots:` + fmt.Sprintf("%v", this.KeepOldSnapshots) + `,`, + `LogEntriesForSlowFollowers:` + fmt.Sprintf("%v", this.LogEntriesForSlowFollowers) + `,`, + `HeartbeatTick:` + fmt.Sprintf("%v", this.HeartbeatTick) + `,`, + `ElectionTick:` + fmt.Sprintf("%v", this.ElectionTick) + `,`, + `}`, + }, "") + return s +} +func (this *EncryptionConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EncryptionConfig{`, + `AutoLockManagers:` + fmt.Sprintf("%v", this.AutoLockManagers) + `,`, + `}`, + }, "") + return s +} +func (this *SpreadOver) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SpreadOver{`, + `SpreadDescriptor:` + fmt.Sprintf("%v", this.SpreadDescriptor) + `,`, + `}`, + }, "") + return s +} +func (this *PlacementPreference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PlacementPreference{`, + `Preference:` + fmt.Sprintf("%v", this.Preference) + `,`, + `}`, + }, "") + return s +} +func (this *PlacementPreference_Spread) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PlacementPreference_Spread{`, + `Spread:` + strings.Replace(fmt.Sprintf("%v", this.Spread), "SpreadOver", "SpreadOver", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Placement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Placement{`, + `Constraints:` + fmt.Sprintf("%v", this.Constraints) + `,`, + `Preferences:` + strings.Replace(fmt.Sprintf("%v", this.Preferences), "PlacementPreference", "PlacementPreference", 1) + `,`, + `Platforms:` + strings.Replace(fmt.Sprintf("%v", this.Platforms), "Platform", "Platform", 1) + `,`, + `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, + `}`, + }, "") + return s +} +func (this *JoinTokens) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JoinTokens{`, + `Worker:` + fmt.Sprintf("%v", this.Worker) + `,`, + `Manager:` + fmt.Sprintf("%v", this.Manager) + `,`, + `}`, + }, "") + return s +} +func (this *RootCA) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RootCA{`, + `CAKey:` + fmt.Sprintf("%v", this.CAKey) + `,`, + `CACert:` + fmt.Sprintf("%v", this.CACert) + `,`, + `CACertHash:` + fmt.Sprintf("%v", this.CACertHash) + `,`, + `JoinTokens:` + strings.Replace(strings.Replace(this.JoinTokens.String(), "JoinTokens", "JoinTokens", 1), `&`, ``, 1) + `,`, + `RootRotation:` + strings.Replace(fmt.Sprintf("%v", this.RootRotation), "RootRotation", "RootRotation", 1) + `,`, + `LastForcedRotation:` + fmt.Sprintf("%v", this.LastForcedRotation) + `,`, + `}`, + }, "") + return s +} +func (this *Certificate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Certificate{`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `CSR:` + fmt.Sprintf("%v", this.CSR) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IssuanceStatus", "IssuanceStatus", 1), `&`, ``, 1) + `,`, + `Certificate:` + fmt.Sprintf("%v", this.Certificate) + `,`, + `CN:` + fmt.Sprintf("%v", this.CN) + `,`, + `}`, + }, "") + return s +} +func (this *EncryptionKey) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EncryptionKey{`, + `Subsystem:` + fmt.Sprintf("%v", this.Subsystem) + `,`, + `Algorithm:` + fmt.Sprintf("%v", this.Algorithm) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `LamportTime:` + fmt.Sprintf("%v", this.LamportTime) + `,`, + `}`, + }, "") + return s +} +func (this *ManagerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ManagerStatus{`, + `RaftID:` + fmt.Sprintf("%v", this.RaftID) + `,`, + `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, + `Leader:` + fmt.Sprintf("%v", this.Leader) + `,`, + `Reachability:` + fmt.Sprintf("%v", this.Reachability) + `,`, + `}`, + }, "") + return s +} +func (this *FileTarget) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FileTarget{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `GID:` + fmt.Sprintf("%v", this.GID) + `,`, + `Mode:` + fmt.Sprintf("%v", this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeTarget) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeTarget{`, + `}`, + }, "") + return s +} +func (this *SecretReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretReference{`, + `SecretID:` + fmt.Sprintf("%v", this.SecretID) + `,`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, + `}`, + }, "") + return s +} +func (this *SecretReference_File) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretReference_File{`, + `File:` + strings.Replace(fmt.Sprintf("%v", this.File), "FileTarget", "FileTarget", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigReference{`, + `ConfigID:` + fmt.Sprintf("%v", this.ConfigID) + `,`, + `ConfigName:` + fmt.Sprintf("%v", this.ConfigName) + `,`, + `Target:` + fmt.Sprintf("%v", this.Target) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigReference_File) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigReference_File{`, + `File:` + strings.Replace(fmt.Sprintf("%v", this.File), "FileTarget", "FileTarget", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigReference_Runtime) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigReference_Runtime{`, + `Runtime:` + strings.Replace(fmt.Sprintf("%v", this.Runtime), "RuntimeTarget", "RuntimeTarget", 1) + `,`, + `}`, + }, "") + return s +} +func (this *BlacklistedCertificate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BlacklistedCertificate{`, + `Expiry:` + strings.Replace(fmt.Sprintf("%v", this.Expiry), "Timestamp", "google_protobuf.Timestamp", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HealthConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HealthConfig{`, + `Test:` + fmt.Sprintf("%v", this.Test) + `,`, + `Interval:` + strings.Replace(fmt.Sprintf("%v", this.Interval), "Duration", "google_protobuf1.Duration", 1) + `,`, + `Timeout:` + strings.Replace(fmt.Sprintf("%v", this.Timeout), "Duration", "google_protobuf1.Duration", 1) + `,`, + `Retries:` + fmt.Sprintf("%v", this.Retries) + `,`, + `StartPeriod:` + strings.Replace(fmt.Sprintf("%v", this.StartPeriod), "Duration", "google_protobuf1.Duration", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MaybeEncryptedRecord) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MaybeEncryptedRecord{`, + `Algorithm:` + fmt.Sprintf("%v", this.Algorithm) + `,`, + `Data:` + fmt.Sprintf("%v", this.Data) + `,`, + `Nonce:` + fmt.Sprintf("%v", this.Nonce) + `,`, + `}`, + }, "") + return s +} +func (this *RootRotation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RootRotation{`, + `CACert:` + fmt.Sprintf("%v", this.CACert) + `,`, + `CAKey:` + fmt.Sprintf("%v", this.CAKey) + `,`, + `CrossSignedCACert:` + fmt.Sprintf("%v", this.CrossSignedCACert) + `,`, + `}`, + }, "") + return s +} +func (this *Privileges) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Privileges{`, + `CredentialSpec:` + strings.Replace(fmt.Sprintf("%v", this.CredentialSpec), "Privileges_CredentialSpec", "Privileges_CredentialSpec", 1) + `,`, + `SELinuxContext:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxContext), "Privileges_SELinuxContext", "Privileges_SELinuxContext", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Privileges_CredentialSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Privileges_CredentialSpec{`, + `Source:` + fmt.Sprintf("%v", this.Source) + `,`, + `}`, + }, "") + return s +} +func (this *Privileges_CredentialSpec_File) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Privileges_CredentialSpec_File{`, + `File:` + fmt.Sprintf("%v", this.File) + `,`, + `}`, + }, "") + return s +} +func (this *Privileges_CredentialSpec_Registry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Privileges_CredentialSpec_Registry{`, + `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`, + `}`, + }, "") + return s +} +func (this *Privileges_CredentialSpec_Config) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Privileges_CredentialSpec_Config{`, + `Config:` + fmt.Sprintf("%v", this.Config) + `,`, + `}`, + }, "") + return s +} +func (this *Privileges_SELinuxContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Privileges_SELinuxContext{`, + `Disable:` + fmt.Sprintf("%v", this.Disable) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `}`, + }, "") + return s +} +func valueToStringTypes(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Version) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Version: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Version: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IndexEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IndexEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IndexEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Val = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Annotations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Annotations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Annotations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Indices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Indices = append(m.Indices, IndexEntry{}) + if err := m.Indices[len(m.Indices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedGenericResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedGenericResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedGenericResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DiscreteGenericResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DiscreteGenericResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DiscreteGenericResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenericResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenericResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenericResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamedResourceSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &NamedGenericResource{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Resource = &GenericResource_NamedResourceSpec{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DiscreteResourceSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &DiscreteGenericResource{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Resource = &GenericResource_DiscreteResourceSpec{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Resources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Resources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Resources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NanoCPUs", wireType) + } + m.NanoCPUs = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NanoCPUs |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MemoryBytes", wireType) + } + m.MemoryBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MemoryBytes |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Generic", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Generic = append(m.Generic, &GenericResource{}) + if err := m.Generic[len(m.Generic)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceRequirements: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceRequirements: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Limits == nil { + m.Limits = &Resources{} + } + if err := m.Limits.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reservations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Reservations == nil { + m.Reservations = &Resources{} + } + if err := m.Reservations.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SwapBytes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SwapBytes == nil { + m.SwapBytes = &google_protobuf2.Int64Value{} + } + if err := m.SwapBytes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MemorySwappiness", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MemorySwappiness == nil { + m.MemorySwappiness = &google_protobuf2.Int64Value{} + } + if err := m.MemorySwappiness.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Platform) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Platform: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Platform: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Architecture = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OS = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PluginDescription) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginDescription: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginDescription: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EngineDescription) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EngineDescription: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EngineDescription: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EngineVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EngineVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Plugins", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Plugins = append(m.Plugins, PluginDescription{}) + if err := m.Plugins[len(m.Plugins)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeDescription) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeDescription: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeDescription: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Platform == nil { + m.Platform = &Platform{} + } + if err := m.Platform.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resources == nil { + m.Resources = &Resources{} + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Engine", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Engine == nil { + m.Engine = &EngineDescription{} + } + if err := m.Engine.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TLSInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TLSInfo == nil { + m.TLSInfo = &NodeTLSInfo{} + } + if err := m.TLSInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FIPS", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.FIPS = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeTLSInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeTLSInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeTLSInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TrustRoot", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TrustRoot = append(m.TrustRoot[:0], dAtA[iNdEx:postIndex]...) + if m.TrustRoot == nil { + m.TrustRoot = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CertIssuerSubject", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CertIssuerSubject = append(m.CertIssuerSubject[:0], dAtA[iNdEx:postIndex]...) + if m.CertIssuerSubject == nil { + m.CertIssuerSubject = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CertIssuerPublicKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CertIssuerPublicKey = append(m.CertIssuerPublicKey[:0], dAtA[iNdEx:postIndex]...) + if m.CertIssuerPublicKey == nil { + m.CertIssuerPublicKey = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RaftMemberStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftMemberStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftMemberStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Leader = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reachability", wireType) + } + m.Reachability = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Reachability |= (RaftMemberStatus_Reachability(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= (NodeStatus_State(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Image) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Image: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Image: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reference = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (Mount_MountType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Target = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BindOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BindOptions == nil { + m.BindOptions = &Mount_BindOptions{} + } + if err := m.BindOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VolumeOptions == nil { + m.VolumeOptions = &Mount_VolumeOptions{} + } + if err := m.VolumeOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TmpfsOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TmpfsOptions == nil { + m.TmpfsOptions = &Mount_TmpfsOptions{} + } + if err := m.TmpfsOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Consistency", wireType) + } + m.Consistency = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Consistency |= (Mount_MountConsistency(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mount_BindOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BindOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BindOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Propagation", wireType) + } + m.Propagation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Propagation |= (Mount_BindOptions_MountPropagation(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NonRecursive", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NonRecursive = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mount_VolumeOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NoCopy", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.NoCopy = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DriverConfig == nil { + m.DriverConfig = &Driver{} + } + if err := m.DriverConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mount_TmpfsOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TmpfsOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TmpfsOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) + } + m.SizeBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SizeBytes |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= (os.FileMode(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RestartPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RestartPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RestartPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Condition", wireType) + } + m.Condition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Condition |= (RestartPolicy_RestartCondition(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Delay", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Delay == nil { + m.Delay = &google_protobuf1.Duration{} + } + if err := m.Delay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxAttempts", wireType) + } + m.MaxAttempts = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxAttempts |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Window", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Window == nil { + m.Window = &google_protobuf1.Duration{} + } + if err := m.Window.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) + } + m.Parallelism = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Parallelism |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Delay", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := types.StdDurationUnmarshal(&m.Delay, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureAction", wireType) + } + m.FailureAction = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FailureAction |= (UpdateConfig_FailureAction(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Monitor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Monitor == nil { + m.Monitor = &google_protobuf1.Duration{} + } + if err := m.Monitor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxFailureRatio", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.MaxFailureRatio = float32(math.Float32frombits(v)) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Order", wireType) + } + m.Order = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Order |= (UpdateConfig_UpdateOrder(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= (UpdateStatus_UpdateState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartedAt == nil { + m.StartedAt = &google_protobuf.Timestamp{} + } + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompletedAt == nil { + m.CompletedAt = &google_protobuf.Timestamp{} + } + if err := m.CompletedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PID", wireType) + } + m.PID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PID |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) + } + m.ExitCode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExitCode |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PortStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, &PortConfig{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Timestamp == nil { + m.Timestamp = &google_protobuf.Timestamp{} + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= (TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Err = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ContainerStatus{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.RuntimeStatus = &TaskStatus_Container{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortStatus", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PortStatus == nil { + m.PortStatus = &PortStatus{} + } + if err := m.PortStatus.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppliedBy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppliedBy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppliedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AppliedAt == nil { + m.AppliedAt = &google_protobuf.Timestamp{} + } + if err := m.AppliedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkAttachmentConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkAttachmentConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkAttachmentConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Target = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Aliases", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Aliases = append(m.Aliases, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DriverAttachmentOpts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DriverAttachmentOpts == nil { + m.DriverAttachmentOpts = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.DriverAttachmentOpts[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAMConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAMConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAMConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Family", wireType) + } + m.Family = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Family |= (IPAMConfig_AddressFamily(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subnet", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subnet = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Range = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gateway", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gateway = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reserved", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Reserved == nil { + m.Reserved = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Reserved[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PortConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + m.Protocol = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Protocol |= (PortConfig_Protocol(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType) + } + m.TargetPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TargetPort |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PublishedPort", wireType) + } + m.PublishedPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PublishedPort |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PublishMode", wireType) + } + m.PublishMode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PublishMode |= (PortConfig_PublishMode(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Driver) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Driver: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Driver: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Options[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPAMOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPAMOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPAMOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Driver == nil { + m.Driver = &Driver{} + } + if err := m.Driver.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Configs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Configs = append(m.Configs, &IPAMConfig{}) + if err := m.Configs[len(m.Configs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Peer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Peer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Peer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WeightedPeer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WeightedPeer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WeightedPeer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Peer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Peer == nil { + m.Peer = &Peer{} + } + if err := m.Peer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + m.Weight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Weight |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IssuanceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IssuanceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IssuanceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= (IssuanceStatus_State(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Err = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AcceptancePolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AcceptancePolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AcceptancePolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Policies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Policies = append(m.Policies, &AcceptancePolicy_RoleAdmissionPolicy{}) + if err := m.Policies[len(m.Policies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AcceptancePolicy_RoleAdmissionPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleAdmissionPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleAdmissionPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + m.Role = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Role |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Autoaccept", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Autoaccept = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &AcceptancePolicy_RoleAdmissionPolicy_Secret{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AcceptancePolicy_RoleAdmissionPolicy_Secret) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Secret: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Secret: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Alg", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Alg = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalCA) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalCA: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalCA: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + m.Protocol = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Protocol |= (ExternalCA_CAProtocol(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.URL = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthTypes + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Options[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CACert", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CACert = append(m.CACert[:0], dAtA[iNdEx:postIndex]...) + if m.CACert == nil { + m.CACert = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CAConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CAConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CAConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeCertExpiry", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeCertExpiry == nil { + m.NodeCertExpiry = &google_protobuf1.Duration{} + } + if err := m.NodeCertExpiry.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalCAs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalCAs = append(m.ExternalCAs, &ExternalCA{}) + if err := m.ExternalCAs[len(m.ExternalCAs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SigningCACert", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SigningCACert = append(m.SigningCACert[:0], dAtA[iNdEx:postIndex]...) + if m.SigningCACert == nil { + m.SigningCACert = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SigningCAKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SigningCAKey = append(m.SigningCAKey[:0], dAtA[iNdEx:postIndex]...) + if m.SigningCAKey == nil { + m.SigningCAKey = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ForceRotate", wireType) + } + m.ForceRotate = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ForceRotate |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OrchestrationConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OrchestrationConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OrchestrationConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TaskHistoryRetentionLimit", wireType) + } + m.TaskHistoryRetentionLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TaskHistoryRetentionLimit |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TaskDefaults) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TaskDefaults: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TaskDefaults: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LogDriver", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LogDriver == nil { + m.LogDriver = &Driver{} + } + if err := m.LogDriver.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DispatcherConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DispatcherConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DispatcherConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HeartbeatPeriod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HeartbeatPeriod == nil { + m.HeartbeatPeriod = &google_protobuf1.Duration{} + } + if err := m.HeartbeatPeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RaftConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RaftConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RaftConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SnapshotInterval", wireType) + } + m.SnapshotInterval = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SnapshotInterval |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field KeepOldSnapshots", wireType) + } + m.KeepOldSnapshots = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.KeepOldSnapshots |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LogEntriesForSlowFollowers", wireType) + } + m.LogEntriesForSlowFollowers = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LogEntriesForSlowFollowers |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HeartbeatTick", wireType) + } + m.HeartbeatTick = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HeartbeatTick |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ElectionTick", wireType) + } + m.ElectionTick = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ElectionTick |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EncryptionConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EncryptionConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EncryptionConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutoLockManagers", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.AutoLockManagers = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SpreadOver) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SpreadOver: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SpreadOver: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpreadDescriptor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SpreadDescriptor = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PlacementPreference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PlacementPreference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PlacementPreference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spread", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SpreadOver{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Preference = &PlacementPreference_Spread{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Placement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Placement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Placement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Constraints = append(m.Constraints, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Preferences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Preferences = append(m.Preferences, &PlacementPreference{}) + if err := m.Preferences[len(m.Preferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Platforms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Platforms = append(m.Platforms, &Platform{}) + if err := m.Platforms[len(m.Platforms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) + } + m.MaxReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxReplicas |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JoinTokens) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JoinTokens: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JoinTokens: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Worker", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Worker = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Manager", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Manager = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RootCA) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RootCA: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RootCA: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CAKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CAKey = append(m.CAKey[:0], dAtA[iNdEx:postIndex]...) + if m.CAKey == nil { + m.CAKey = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CACert", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CACert = append(m.CACert[:0], dAtA[iNdEx:postIndex]...) + if m.CACert == nil { + m.CACert = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CACertHash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CACertHash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JoinTokens", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.JoinTokens.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootRotation", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RootRotation == nil { + m.RootRotation = &RootRotation{} + } + if err := m.RootRotation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastForcedRotation", wireType) + } + m.LastForcedRotation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastForcedRotation |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Certificate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Certificate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Certificate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + m.Role = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Role |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CSR", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CSR = append(m.CSR[:0], dAtA[iNdEx:postIndex]...) + if m.CSR == nil { + m.CSR = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Certificate = append(m.Certificate[:0], dAtA[iNdEx:postIndex]...) + if m.Certificate == nil { + m.Certificate = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CN", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CN = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EncryptionKey) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EncryptionKey: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EncryptionKey: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subsystem", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subsystem = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Algorithm", wireType) + } + m.Algorithm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Algorithm |= (EncryptionKey_Algorithm(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LamportTime", wireType) + } + m.LamportTime = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LamportTime |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ManagerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ManagerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ManagerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RaftID", wireType) + } + m.RaftID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RaftID |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Leader = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Reachability", wireType) + } + m.Reachability = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Reachability |= (RaftMemberStatus_Reachability(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FileTarget) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FileTarget: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FileTarget: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= (os.FileMode(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeTarget) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeTarget: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeTarget: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &FileTarget{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &SecretReference_File{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConfigName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &FileTarget{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &ConfigReference_File{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RuntimeTarget{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Target = &ConfigReference_Runtime{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BlacklistedCertificate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BlacklistedCertificate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BlacklistedCertificate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expiry", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Expiry == nil { + m.Expiry = &google_protobuf.Timestamp{} + } + if err := m.Expiry.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HealthConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HealthConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HealthConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Test", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Test = append(m.Test, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Interval", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Interval == nil { + m.Interval = &google_protobuf1.Duration{} + } + if err := m.Interval.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timeout", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Timeout == nil { + m.Timeout = &google_protobuf1.Duration{} + } + if err := m.Timeout.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Retries", wireType) + } + m.Retries = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Retries |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartPeriod", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartPeriod == nil { + m.StartPeriod = &google_protobuf1.Duration{} + } + if err := m.StartPeriod.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MaybeEncryptedRecord) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MaybeEncryptedRecord: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MaybeEncryptedRecord: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Algorithm", wireType) + } + m.Algorithm = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Algorithm |= (MaybeEncryptedRecord_Algorithm(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nonce", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nonce = append(m.Nonce[:0], dAtA[iNdEx:postIndex]...) + if m.Nonce == nil { + m.Nonce = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RootRotation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RootRotation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RootRotation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CACert", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CACert = append(m.CACert[:0], dAtA[iNdEx:postIndex]...) + if m.CACert == nil { + m.CACert = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CAKey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CAKey = append(m.CAKey[:0], dAtA[iNdEx:postIndex]...) + if m.CAKey == nil { + m.CAKey = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CrossSignedCACert", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CrossSignedCACert = append(m.CrossSignedCACert[:0], dAtA[iNdEx:postIndex]...) + if m.CrossSignedCACert == nil { + m.CrossSignedCACert = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Privileges) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Privileges: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Privileges: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CredentialSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CredentialSpec == nil { + m.CredentialSpec = &Privileges_CredentialSpec{} + } + if err := m.CredentialSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SELinuxContext == nil { + m.SELinuxContext = &Privileges_SELinuxContext{} + } + if err := m.SELinuxContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Privileges_CredentialSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CredentialSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CredentialSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = &Privileges_CredentialSpec_File{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Registry", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = &Privileges_CredentialSpec_Registry{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Source = &Privileges_CredentialSpec_Config{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Privileges_SELinuxContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SELinuxContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SELinuxContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Disable = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Level = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTypes(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthTypes + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTypes + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTypes(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/types.proto", fileDescriptorTypes) } + +var fileDescriptorTypes = []byte{ + // 5274 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x5a, 0x4d, 0x70, 0x23, 0x49, + 0x56, 0xb6, 0x64, 0x49, 0x96, 0x9e, 0x64, 0xbb, 0x9c, 0xed, 0xed, 0x71, 0x6b, 0x7a, 0x6c, 0x4d, + 0xcd, 0xcc, 0xce, 0x6c, 0xef, 0xa0, 0xfe, 0x9b, 0x9d, 0xe8, 0x99, 0x61, 0x76, 0x46, 0x7f, 0x6e, + 0x6b, 0xdb, 0x96, 0x14, 0x29, 0xb9, 0x7b, 0x97, 0x08, 0x28, 0xca, 0x55, 0x69, 0xb9, 0xc6, 0xa5, + 0xca, 0xa2, 0xaa, 0x64, 0xb7, 0x58, 0x08, 0xe6, 0x04, 0x84, 0x6f, 0x5c, 0x96, 0xdd, 0x20, 0x1c, + 0x41, 0x04, 0xdc, 0x38, 0x70, 0xe0, 0xc0, 0xc2, 0x69, 0x88, 0x20, 0x88, 0x0d, 0x2e, 0xb0, 0x10, + 0x01, 0x1b, 0x40, 0x18, 0xc6, 0x07, 0x6e, 0x04, 0x5c, 0x08, 0x2e, 0x1c, 0x88, 0xfc, 0xa9, 0x52, + 0x59, 0x5d, 0xb6, 0x67, 0x76, 0xb9, 0xd8, 0xca, 0xf7, 0xbe, 0xf7, 0x32, 0xf3, 0x65, 0xe6, 0xcb, + 0xf7, 0x5e, 0x16, 0xdc, 0x19, 0x5a, 0xc1, 0xc1, 0x78, 0xaf, 0x6a, 0xd0, 0xd1, 0x5d, 0x93, 0x1a, + 0x87, 0xc4, 0xbb, 0xeb, 0x1f, 0xeb, 0xde, 0xe8, 0xd0, 0x0a, 0xee, 0xea, 0xae, 0x75, 0x37, 0x98, + 0xb8, 0xc4, 0xaf, 0xba, 0x1e, 0x0d, 0x28, 0x42, 0x02, 0x50, 0x0d, 0x01, 0xd5, 0xa3, 0xfb, 0xe5, + 0x8d, 0x21, 0xa5, 0x43, 0x9b, 0xdc, 0xe5, 0x88, 0xbd, 0xf1, 0xfe, 0xdd, 0xc0, 0x1a, 0x11, 0x3f, + 0xd0, 0x47, 0xae, 0x10, 0x2a, 0xaf, 0xcf, 0x02, 0xcc, 0xb1, 0xa7, 0x07, 0x16, 0x75, 0x2e, 0xe3, + 0x1f, 0x7b, 0xba, 0xeb, 0x12, 0x4f, 0x76, 0x5a, 0x5e, 0x1d, 0xd2, 0x21, 0xe5, 0x3f, 0xef, 0xb2, + 0x5f, 0x82, 0xaa, 0x6e, 0xc0, 0xc2, 0x53, 0xe2, 0xf9, 0x16, 0x75, 0xd0, 0x2a, 0x64, 0x2d, 0xc7, + 0x24, 0xcf, 0xd7, 0x52, 0x95, 0xd4, 0x5b, 0x19, 0x2c, 0x1a, 0xea, 0x3d, 0x80, 0x36, 0xfb, 0xd1, + 0x72, 0x02, 0x6f, 0x82, 0x14, 0x98, 0x3f, 0x24, 0x13, 0x8e, 0x28, 0x60, 0xf6, 0x93, 0x51, 0x8e, + 0x74, 0x7b, 0x2d, 0x2d, 0x28, 0x47, 0xba, 0xad, 0x7e, 0x9e, 0x82, 0x62, 0xcd, 0x71, 0x68, 0xc0, + 0x47, 0xe7, 0x23, 0x04, 0x19, 0x47, 0x1f, 0x11, 0x29, 0xc4, 0x7f, 0xa3, 0x06, 0xe4, 0x6c, 0x7d, + 0x8f, 0xd8, 0xfe, 0x5a, 0xba, 0x32, 0xff, 0x56, 0xf1, 0xc1, 0xd7, 0xab, 0x2f, 0x9a, 0xa4, 0x1a, + 0x53, 0x52, 0xdd, 0xe6, 0x68, 0x3e, 0x08, 0x2c, 0x45, 0xd1, 0x37, 0x61, 0xc1, 0x72, 0x4c, 0xcb, + 0x20, 0xfe, 0x5a, 0x86, 0x6b, 0x59, 0x4f, 0xd2, 0x32, 0x1d, 0x7d, 0x3d, 0xf3, 0xa3, 0xb3, 0x8d, + 0x39, 0x1c, 0x0a, 0x95, 0xdf, 0x83, 0x62, 0x4c, 0x6d, 0xc2, 0xdc, 0x56, 0x21, 0x7b, 0xa4, 0xdb, + 0x63, 0x22, 0x67, 0x27, 0x1a, 0xef, 0xa7, 0x1f, 0xa5, 0xd4, 0x8f, 0x61, 0xb5, 0xa3, 0x8f, 0x88, + 0xf9, 0x98, 0x38, 0xc4, 0xb3, 0x0c, 0x4c, 0x7c, 0x3a, 0xf6, 0x0c, 0xc2, 0xe6, 0x7a, 0x68, 0x39, + 0x66, 0x38, 0x57, 0xf6, 0x3b, 0x59, 0x8b, 0xda, 0x80, 0x97, 0x9a, 0x96, 0x6f, 0x78, 0x24, 0x20, + 0x5f, 0x5a, 0xc9, 0x7c, 0xa8, 0xe4, 0x2c, 0x05, 0xcb, 0xb3, 0xd2, 0xbf, 0x00, 0x37, 0x98, 0x89, + 0x4d, 0xcd, 0x93, 0x14, 0xcd, 0x77, 0x89, 0xc1, 0x95, 0x15, 0x1f, 0xbc, 0x95, 0x64, 0xa1, 0xa4, + 0x99, 0x6c, 0xcd, 0xe1, 0x15, 0xae, 0x26, 0x24, 0xf4, 0x5d, 0x62, 0x20, 0x03, 0x6e, 0x9a, 0x72, + 0xd0, 0x33, 0xea, 0xd3, 0x5c, 0x7d, 0xe2, 0x32, 0x5e, 0x32, 0xcd, 0xad, 0x39, 0xbc, 0x1a, 0x2a, + 0x8b, 0x77, 0x52, 0x07, 0xc8, 0x87, 0xba, 0xd5, 0xef, 0xa7, 0xa0, 0x10, 0x32, 0x7d, 0xf4, 0x35, + 0x28, 0x38, 0xba, 0x43, 0x35, 0xc3, 0x1d, 0xfb, 0x7c, 0x42, 0xf3, 0xf5, 0xd2, 0xf9, 0xd9, 0x46, + 0xbe, 0xa3, 0x3b, 0xb4, 0xd1, 0xdb, 0xf5, 0x71, 0x9e, 0xb1, 0x1b, 0xee, 0xd8, 0x47, 0xaf, 0x42, + 0x69, 0x44, 0x46, 0xd4, 0x9b, 0x68, 0x7b, 0x93, 0x80, 0xf8, 0xd2, 0x6c, 0x45, 0x41, 0xab, 0x33, + 0x12, 0xfa, 0x10, 0x16, 0x86, 0x62, 0x48, 0x6b, 0xf3, 0x7c, 0xfb, 0xbc, 0x96, 0x34, 0xfa, 0x99, + 0x51, 0xe3, 0x50, 0x46, 0xfd, 0x5e, 0x1a, 0x56, 0x23, 0x2a, 0xf9, 0x95, 0xb1, 0xe5, 0x91, 0x11, + 0x71, 0x02, 0x1f, 0x7d, 0x03, 0x72, 0xb6, 0x35, 0xb2, 0x02, 0x5f, 0xda, 0xfc, 0x95, 0x24, 0xb5, + 0xd1, 0xa4, 0xb0, 0x04, 0xa3, 0x1a, 0x94, 0x3c, 0xe2, 0x13, 0xef, 0x48, 0xec, 0x78, 0x69, 0xd1, + 0x6b, 0x84, 0x2f, 0x88, 0xa0, 0xf7, 0x01, 0xfc, 0x63, 0xdd, 0x95, 0x53, 0x9e, 0xe7, 0x0a, 0x5e, + 0xae, 0x0a, 0xbf, 0x50, 0x0d, 0xfd, 0x42, 0xb5, 0xed, 0x04, 0xef, 0xbe, 0xf3, 0x94, 0xed, 0x1f, + 0x5c, 0x60, 0x70, 0x61, 0x8d, 0x2d, 0x58, 0x91, 0x06, 0x63, 0x34, 0xd7, 0x72, 0x88, 0xcf, 0x8e, + 0xd5, 0xb5, 0x2a, 0x14, 0x21, 0xd5, 0x8f, 0x84, 0xd4, 0x4d, 0xc8, 0xf7, 0x6c, 0x3d, 0xd8, 0xa7, + 0xde, 0x08, 0xa9, 0x50, 0xd2, 0x3d, 0xe3, 0xc0, 0x0a, 0x88, 0x11, 0x8c, 0xbd, 0xd0, 0x07, 0x5c, + 0xa0, 0xa1, 0x9b, 0x90, 0xa6, 0x62, 0xba, 0x85, 0x7a, 0xee, 0xfc, 0x6c, 0x23, 0xdd, 0xed, 0xe3, + 0x34, 0xf5, 0xd5, 0x0f, 0x60, 0xa5, 0x67, 0x8f, 0x87, 0x96, 0xd3, 0x24, 0xbe, 0xe1, 0x59, 0x2e, + 0x9b, 0x23, 0x3b, 0x1b, 0xcc, 0x93, 0x86, 0x67, 0x83, 0xfd, 0x8e, 0x1c, 0x4c, 0x7a, 0xea, 0x60, + 0xd4, 0xdf, 0x4a, 0xc3, 0x4a, 0xcb, 0x19, 0x5a, 0x0e, 0x89, 0x4b, 0xbf, 0x01, 0x4b, 0x84, 0x13, + 0xb5, 0x23, 0xe1, 0xf4, 0xa4, 0x9e, 0x45, 0x41, 0x0d, 0x3d, 0x61, 0x7b, 0xc6, 0x3b, 0xdd, 0x4f, + 0x5a, 0x84, 0x17, 0xb4, 0x27, 0xfa, 0xa8, 0x16, 0x2c, 0xb8, 0x7c, 0x12, 0xbe, 0xdc, 0x64, 0x6f, + 0x24, 0xe9, 0x7a, 0x61, 0x9e, 0xa1, 0xab, 0x92, 0xb2, 0x3f, 0x8b, 0xab, 0xfa, 0xeb, 0x34, 0x2c, + 0x77, 0xa8, 0x79, 0xc1, 0x0e, 0x65, 0xc8, 0x1f, 0x50, 0x3f, 0x88, 0xb9, 0xe5, 0xa8, 0x8d, 0x1e, + 0x41, 0xde, 0x95, 0xcb, 0x27, 0xf7, 0xe0, 0xed, 0xe4, 0x21, 0x0b, 0x0c, 0x8e, 0xd0, 0xe8, 0x03, + 0x28, 0x84, 0x07, 0x37, 0xdc, 0x7d, 0xd7, 0x6c, 0xdf, 0x29, 0x1e, 0x7d, 0x08, 0x39, 0xb1, 0x08, + 0x72, 0xd3, 0xbd, 0xf1, 0x85, 0x6c, 0x8e, 0xa5, 0x10, 0x7a, 0x0c, 0xf9, 0xc0, 0xf6, 0x35, 0xcb, + 0xd9, 0xa7, 0x6b, 0x59, 0xae, 0x60, 0x23, 0xd1, 0xd5, 0x51, 0x93, 0x0c, 0xb6, 0xfb, 0x6d, 0x67, + 0x9f, 0xd6, 0x8b, 0xe7, 0x67, 0x1b, 0x0b, 0xb2, 0x81, 0x17, 0x02, 0xdb, 0x67, 0x3f, 0xd0, 0x6d, + 0xc8, 0xec, 0x5b, 0xae, 0xbf, 0x96, 0xab, 0xa4, 0xde, 0xca, 0xd7, 0xf3, 0xe7, 0x67, 0x1b, 0x99, + 0xcd, 0x76, 0xaf, 0x8f, 0x39, 0x55, 0xfd, 0x9d, 0x14, 0x14, 0x63, 0x3a, 0xd0, 0x2b, 0x00, 0x81, + 0x37, 0xf6, 0x03, 0xcd, 0xa3, 0x34, 0xe0, 0xa6, 0x2c, 0xe1, 0x02, 0xa7, 0x60, 0x4a, 0x03, 0x54, + 0x85, 0x1b, 0x06, 0xf1, 0x02, 0xcd, 0xf2, 0xfd, 0x31, 0xf1, 0x34, 0x7f, 0xbc, 0xf7, 0x09, 0x31, + 0x02, 0x6e, 0xd6, 0x12, 0x5e, 0x61, 0xac, 0x36, 0xe7, 0xf4, 0x05, 0x03, 0x3d, 0x84, 0x9b, 0x71, + 0xbc, 0x3b, 0xde, 0xb3, 0x2d, 0x43, 0x63, 0x4b, 0x3d, 0xcf, 0x45, 0x6e, 0x4c, 0x45, 0x7a, 0x9c, + 0xf7, 0x84, 0x4c, 0xd4, 0x9f, 0xa4, 0x40, 0xc1, 0xfa, 0x7e, 0xb0, 0x43, 0x46, 0x7b, 0xc4, 0xeb, + 0x07, 0x7a, 0x30, 0xf6, 0xd1, 0x4d, 0xc8, 0xd9, 0x44, 0x37, 0x89, 0xc7, 0x07, 0x95, 0xc7, 0xb2, + 0x85, 0x76, 0x99, 0x97, 0xd1, 0x8d, 0x03, 0x7d, 0xcf, 0xb2, 0xad, 0x60, 0xc2, 0x87, 0xb2, 0x94, + 0xbc, 0xc1, 0x67, 0x75, 0x56, 0x71, 0x4c, 0x10, 0x5f, 0x50, 0x83, 0xd6, 0x60, 0x61, 0x44, 0x7c, + 0x5f, 0x1f, 0x12, 0x3e, 0xd2, 0x02, 0x0e, 0x9b, 0xea, 0x07, 0x50, 0x8a, 0xcb, 0xa1, 0x22, 0x2c, + 0xec, 0x76, 0x9e, 0x74, 0xba, 0xcf, 0x3a, 0xca, 0x1c, 0x5a, 0x86, 0xe2, 0x6e, 0x07, 0xb7, 0x6a, + 0x8d, 0xad, 0x5a, 0x7d, 0xbb, 0xa5, 0xa4, 0xd0, 0x22, 0x14, 0xa6, 0xcd, 0xb4, 0xfa, 0x27, 0x29, + 0x00, 0x66, 0x6e, 0x39, 0xa9, 0xf7, 0x21, 0xeb, 0x07, 0x7a, 0x20, 0xf6, 0xec, 0xd2, 0x83, 0xd7, + 0x2f, 0x5b, 0x61, 0x39, 0x5e, 0xf6, 0x8f, 0x60, 0x21, 0x12, 0x1f, 0x61, 0xfa, 0xc2, 0x08, 0x99, + 0xfb, 0xd0, 0x4d, 0xd3, 0x93, 0x03, 0xe7, 0xbf, 0xd5, 0x0f, 0x20, 0xcb, 0xa5, 0x2f, 0x0e, 0x37, + 0x0f, 0x99, 0x26, 0xfb, 0x95, 0x42, 0x05, 0xc8, 0xe2, 0x56, 0xad, 0xf9, 0x1d, 0x25, 0x8d, 0x14, + 0x28, 0x35, 0xdb, 0xfd, 0x46, 0xb7, 0xd3, 0x69, 0x35, 0x06, 0xad, 0xa6, 0x32, 0xaf, 0xbe, 0x01, + 0xd9, 0xf6, 0x88, 0x69, 0xbe, 0xcd, 0x0e, 0xc4, 0x3e, 0xf1, 0x88, 0x63, 0x84, 0xe7, 0x6c, 0x4a, + 0x50, 0x3f, 0x2f, 0x41, 0x76, 0x87, 0x8e, 0x9d, 0x00, 0x3d, 0x88, 0x39, 0xb5, 0xa5, 0xe4, 0x28, + 0x86, 0x03, 0xab, 0x83, 0x89, 0x4b, 0xa4, 0xd3, 0xbb, 0x09, 0x39, 0x71, 0x74, 0xe4, 0x74, 0x64, + 0x8b, 0xd1, 0x03, 0xdd, 0x1b, 0x92, 0x40, 0xce, 0x47, 0xb6, 0xd0, 0x5b, 0xec, 0x56, 0xd5, 0x4d, + 0xea, 0xd8, 0x13, 0x7e, 0xc2, 0xf2, 0xe2, 0xea, 0xc4, 0x44, 0x37, 0xbb, 0x8e, 0x3d, 0xc1, 0x11, + 0x17, 0x6d, 0x41, 0x69, 0xcf, 0x72, 0x4c, 0x8d, 0xba, 0xe2, 0x22, 0xca, 0x5e, 0x7e, 0x1e, 0xc5, + 0xa8, 0xea, 0x96, 0x63, 0x76, 0x05, 0x18, 0x17, 0xf7, 0xa6, 0x0d, 0xd4, 0x81, 0xa5, 0x23, 0x6a, + 0x8f, 0x47, 0x24, 0xd2, 0x95, 0xe3, 0xba, 0xde, 0xbc, 0x5c, 0xd7, 0x53, 0x8e, 0x0f, 0xb5, 0x2d, + 0x1e, 0xc5, 0x9b, 0xe8, 0x09, 0x2c, 0x06, 0x23, 0x77, 0xdf, 0x8f, 0xd4, 0x2d, 0x70, 0x75, 0x5f, + 0xbd, 0xc2, 0x60, 0x0c, 0x1e, 0x6a, 0x2b, 0x05, 0xb1, 0x16, 0x7a, 0x0c, 0x45, 0x83, 0x3a, 0xbe, + 0xe5, 0x07, 0xc4, 0x31, 0x26, 0x6b, 0x79, 0x6e, 0xfb, 0x2b, 0x66, 0xd9, 0x98, 0x82, 0x71, 0x5c, + 0xb2, 0xfc, 0xc3, 0x79, 0x28, 0xc6, 0x4c, 0x80, 0xfa, 0x50, 0x74, 0x3d, 0xea, 0xea, 0x43, 0x7e, + 0x2b, 0xcb, 0x45, 0xbd, 0xff, 0x85, 0xcc, 0x57, 0xed, 0x4d, 0x05, 0x71, 0x5c, 0x0b, 0x7a, 0x07, + 0x4a, 0x0e, 0x75, 0x3c, 0x62, 0x8c, 0x3d, 0xdf, 0x3a, 0x12, 0x8b, 0x9e, 0xaf, 0x2b, 0xe7, 0x67, + 0x1b, 0xa5, 0x0e, 0x75, 0x70, 0x48, 0xc7, 0x17, 0x50, 0xea, 0x69, 0x1a, 0x8a, 0x31, 0x95, 0xe8, + 0x0e, 0xe4, 0x71, 0x0f, 0xb7, 0x9f, 0xd6, 0x06, 0x2d, 0x65, 0xae, 0x7c, 0xfb, 0xe4, 0xb4, 0xb2, + 0xc6, 0xc7, 0x10, 0xef, 0xb6, 0xe7, 0x59, 0x47, 0x6c, 0xe7, 0xbf, 0x05, 0x0b, 0x21, 0x34, 0x55, + 0x7e, 0xf9, 0xe4, 0xb4, 0xf2, 0xd2, 0x2c, 0x34, 0x86, 0xc4, 0xfd, 0xad, 0x1a, 0x6e, 0x35, 0x95, + 0x74, 0x32, 0x12, 0xf7, 0x0f, 0x74, 0x8f, 0x98, 0xe8, 0xab, 0x90, 0x93, 0xc0, 0xf9, 0x72, 0xf9, + 0xe4, 0xb4, 0x72, 0x73, 0x16, 0x38, 0xc5, 0xe1, 0xfe, 0x76, 0xed, 0x69, 0x4b, 0xc9, 0x24, 0xe3, + 0x70, 0xdf, 0xd6, 0x8f, 0x08, 0x7a, 0x1d, 0xb2, 0x02, 0x96, 0x2d, 0xdf, 0x3a, 0x39, 0xad, 0x7c, + 0xe5, 0x05, 0x75, 0x0c, 0x55, 0x5e, 0xfb, 0xed, 0x3f, 0x58, 0x9f, 0xfb, 0xf3, 0x3f, 0x5c, 0x57, + 0x66, 0xd9, 0xe5, 0xff, 0x4d, 0xc1, 0xe2, 0x85, 0x1d, 0x87, 0x54, 0xc8, 0x39, 0xd4, 0xa0, 0xae, + 0xb8, 0x5c, 0xf3, 0x75, 0x38, 0x3f, 0xdb, 0xc8, 0x75, 0x68, 0x83, 0xba, 0x13, 0x2c, 0x39, 0xe8, + 0xc9, 0x4c, 0x78, 0xf0, 0xf0, 0x0b, 0x6e, 0xe7, 0xc4, 0x00, 0xe1, 0x23, 0x58, 0x34, 0x3d, 0xeb, + 0x88, 0x78, 0x9a, 0x41, 0x9d, 0x7d, 0x6b, 0x28, 0x2f, 0xce, 0x72, 0x62, 0x24, 0xcd, 0x81, 0xb8, + 0x24, 0x04, 0x1a, 0x1c, 0xff, 0x33, 0x84, 0x06, 0x65, 0x17, 0x4a, 0xf1, 0x03, 0xc2, 0x6e, 0x33, + 0xdf, 0xfa, 0x55, 0x22, 0xe3, 0x47, 0x1e, 0x60, 0xe3, 0x02, 0xa3, 0x88, 0x10, 0xf1, 0x4d, 0xc8, + 0x8c, 0xa8, 0x29, 0xf4, 0x2c, 0xd6, 0x6f, 0xb0, 0x08, 0xe5, 0x9f, 0xce, 0x36, 0x8a, 0xd4, 0xaf, + 0x6e, 0x5a, 0x36, 0xd9, 0xa1, 0x26, 0xc1, 0x1c, 0xc0, 0x7c, 0x6d, 0x78, 0x42, 0xe5, 0x6d, 0x20, + 0x9b, 0xea, 0x0f, 0x52, 0x90, 0x61, 0x4e, 0x0c, 0xbd, 0x0c, 0x99, 0x7a, 0xbb, 0xd3, 0x54, 0xe6, + 0xca, 0x2b, 0x27, 0xa7, 0x95, 0x45, 0x6e, 0x2d, 0xc6, 0x60, 0x87, 0x01, 0x6d, 0x40, 0xee, 0x69, + 0x77, 0x7b, 0x77, 0x87, 0xed, 0xbc, 0x1b, 0x27, 0xa7, 0x95, 0xe5, 0x88, 0x2d, 0xec, 0x89, 0x5e, + 0x81, 0xec, 0x60, 0xa7, 0xb7, 0xd9, 0x57, 0xd2, 0x65, 0x74, 0x72, 0x5a, 0x59, 0x8a, 0xf8, 0x7c, + 0x3a, 0xe8, 0x55, 0xc8, 0x76, 0x7a, 0xed, 0x5e, 0x4b, 0x99, 0x2f, 0xdf, 0x3c, 0x39, 0xad, 0xa0, + 0x88, 0xcd, 0x33, 0x9d, 0x9e, 0xe5, 0x92, 0xf2, 0x8a, 0xdc, 0x13, 0x85, 0x88, 0xa7, 0xfe, 0x38, + 0x05, 0xc5, 0xd8, 0x21, 0x67, 0xdb, 0xba, 0xd9, 0xda, 0xac, 0xed, 0x6e, 0x0f, 0x94, 0xb9, 0xd8, + 0xb6, 0x8e, 0x41, 0x9a, 0x64, 0x5f, 0x1f, 0xdb, 0xcc, 0xb7, 0x42, 0xa3, 0xdb, 0xe9, 0xb7, 0xfb, + 0x83, 0x56, 0x67, 0xa0, 0xa4, 0xca, 0x6b, 0x27, 0xa7, 0x95, 0xd5, 0x59, 0xf0, 0xe6, 0xd8, 0xb6, + 0xd9, 0xc6, 0x6e, 0xd4, 0x1a, 0x5b, 0xfc, 0xa4, 0x4c, 0x37, 0x76, 0x0c, 0xd5, 0xd0, 0x8d, 0x03, + 0x62, 0xa2, 0xb7, 0xa1, 0xd0, 0x6c, 0x6d, 0xb7, 0x1e, 0xd7, 0xf8, 0x8d, 0x52, 0x7e, 0xe5, 0xe4, + 0xb4, 0x72, 0xeb, 0xc5, 0xde, 0x6d, 0x32, 0xd4, 0x03, 0x62, 0xce, 0x6c, 0xf0, 0x18, 0x44, 0xfd, + 0xef, 0x34, 0x2c, 0x62, 0xe2, 0x07, 0xba, 0x17, 0xf4, 0xa8, 0x6d, 0x19, 0x13, 0xd4, 0x83, 0x82, + 0x41, 0x1d, 0xd3, 0x8a, 0xf9, 0xa6, 0x07, 0x97, 0x04, 0x69, 0x53, 0xa9, 0xb0, 0xd5, 0x08, 0x25, + 0xf1, 0x54, 0x09, 0xba, 0x0b, 0x59, 0x93, 0xd8, 0xfa, 0x44, 0x46, 0x8b, 0xb7, 0x5e, 0xc8, 0x16, + 0x9a, 0xb2, 0x50, 0x81, 0x05, 0x8e, 0xe7, 0x66, 0xfa, 0x73, 0x4d, 0x0f, 0x02, 0x32, 0x72, 0x03, + 0xb1, 0x47, 0x32, 0xb8, 0x38, 0xd2, 0x9f, 0xd7, 0x24, 0x09, 0xdd, 0x87, 0xdc, 0xb1, 0xe5, 0x98, + 0xf4, 0x58, 0x46, 0x83, 0x57, 0x28, 0x95, 0x40, 0xf5, 0x84, 0x85, 0x41, 0x33, 0xc3, 0x64, 0xdb, + 0xac, 0xd3, 0xed, 0xb4, 0xc2, 0x6d, 0x26, 0xf9, 0x5d, 0xa7, 0x43, 0x1d, 0xe6, 0x3d, 0xa0, 0xdb, + 0xd1, 0x36, 0x6b, 0xed, 0xed, 0x5d, 0xcc, 0xb6, 0xda, 0xea, 0xc9, 0x69, 0x45, 0x89, 0x20, 0x9b, + 0xba, 0x65, 0xb3, 0xf4, 0xe4, 0x16, 0xcc, 0xd7, 0x3a, 0xdf, 0x51, 0xd2, 0x65, 0xe5, 0xe4, 0xb4, + 0x52, 0x8a, 0xd8, 0x35, 0x67, 0x32, 0xb5, 0xfb, 0x6c, 0xbf, 0xea, 0xdf, 0xcc, 0x43, 0x69, 0xd7, + 0x35, 0xf5, 0x80, 0x88, 0x53, 0x8a, 0x2a, 0x50, 0x74, 0x75, 0x4f, 0xb7, 0x6d, 0x62, 0x5b, 0xfe, + 0x48, 0x96, 0x58, 0xe2, 0x24, 0xf4, 0xde, 0x17, 0x35, 0x63, 0x3d, 0xcf, 0x4e, 0xde, 0xf7, 0xff, + 0x75, 0x23, 0x15, 0x1a, 0x74, 0x17, 0x96, 0xf6, 0xc5, 0x68, 0x35, 0xdd, 0xe0, 0x0b, 0x3b, 0xcf, + 0x17, 0xb6, 0x9a, 0xb4, 0xb0, 0xf1, 0x61, 0x55, 0xe5, 0x24, 0x6b, 0x5c, 0x0a, 0x2f, 0xee, 0xc7, + 0x9b, 0xe8, 0x21, 0x2c, 0x8c, 0xa8, 0x63, 0x05, 0xd4, 0xbb, 0x7e, 0x15, 0x42, 0x24, 0xba, 0x03, + 0x2b, 0x6c, 0x71, 0xc3, 0xf1, 0x70, 0x36, 0x0f, 0x21, 0xd2, 0x78, 0x79, 0xa4, 0x3f, 0x97, 0x1d, + 0x62, 0x46, 0x46, 0x75, 0xc8, 0x52, 0x8f, 0xc5, 0xa8, 0x39, 0x3e, 0xdc, 0xb7, 0xaf, 0x1d, 0xae, + 0x68, 0x74, 0x99, 0x0c, 0x16, 0xa2, 0xea, 0xbb, 0xb0, 0x78, 0x61, 0x12, 0x2c, 0x34, 0xeb, 0xd5, + 0x76, 0xfb, 0x2d, 0x65, 0x0e, 0x95, 0x20, 0xdf, 0xe8, 0x76, 0x06, 0xed, 0xce, 0x2e, 0x8b, 0x2d, + 0x4b, 0x90, 0xc7, 0xdd, 0xed, 0xed, 0x7a, 0xad, 0xf1, 0x44, 0x49, 0xab, 0x55, 0x28, 0xc6, 0xb4, + 0xa1, 0x25, 0x80, 0xfe, 0xa0, 0xdb, 0xd3, 0x36, 0xdb, 0xb8, 0x3f, 0x10, 0x91, 0x69, 0x7f, 0x50, + 0xc3, 0x03, 0x49, 0x48, 0xa9, 0xff, 0x99, 0x0e, 0x57, 0x54, 0x06, 0xa3, 0xf5, 0x8b, 0xc1, 0xe8, + 0x15, 0x83, 0x97, 0xe1, 0xe8, 0xb4, 0x11, 0x05, 0xa5, 0xef, 0x01, 0xf0, 0x8d, 0x43, 0x4c, 0x4d, + 0x0f, 0xe4, 0xc2, 0x97, 0x5f, 0x30, 0xf2, 0x20, 0xac, 0x04, 0xe2, 0x82, 0x44, 0xd7, 0x02, 0xf4, + 0x21, 0x94, 0x0c, 0x3a, 0x72, 0x6d, 0x22, 0x85, 0xe7, 0xaf, 0x15, 0x2e, 0x46, 0xf8, 0x5a, 0x10, + 0x0f, 0x87, 0x33, 0x17, 0x03, 0xf6, 0xdf, 0x4c, 0x85, 0x96, 0x49, 0x88, 0x80, 0x4b, 0x90, 0xdf, + 0xed, 0x35, 0x6b, 0x83, 0x76, 0xe7, 0xb1, 0x92, 0x42, 0x00, 0x39, 0x6e, 0xea, 0xa6, 0x92, 0x66, + 0x91, 0x7b, 0xa3, 0xbb, 0xd3, 0xdb, 0x6e, 0x71, 0x8f, 0x85, 0x56, 0x41, 0x09, 0x8d, 0xad, 0x71, + 0x43, 0xb6, 0x9a, 0x4a, 0x06, 0xdd, 0x80, 0xe5, 0x88, 0x2a, 0x25, 0xb3, 0xe8, 0x26, 0xa0, 0x88, + 0x38, 0x55, 0x91, 0x53, 0x7f, 0x1d, 0x96, 0x1b, 0xd4, 0x09, 0x74, 0xcb, 0x89, 0xb2, 0x9a, 0x07, + 0x6c, 0xd2, 0x92, 0xa4, 0x59, 0xb2, 0x42, 0x56, 0x5f, 0x3e, 0x3f, 0xdb, 0x28, 0x46, 0xd0, 0x76, + 0x93, 0x87, 0x67, 0xb2, 0x61, 0xb2, 0xf3, 0xeb, 0x5a, 0x26, 0x37, 0x6e, 0xb6, 0xbe, 0x70, 0x7e, + 0xb6, 0x31, 0xdf, 0x6b, 0x37, 0x31, 0xa3, 0xa1, 0x97, 0xa1, 0x40, 0x9e, 0x5b, 0x81, 0x66, 0xb0, + 0x5b, 0x8d, 0x19, 0x30, 0x8b, 0xf3, 0x8c, 0xd0, 0xa0, 0x26, 0x51, 0xeb, 0x00, 0x3d, 0xea, 0x05, + 0xb2, 0xe7, 0x77, 0x20, 0xeb, 0x52, 0x8f, 0xd7, 0x74, 0x2e, 0xad, 0x34, 0x32, 0xb8, 0xd8, 0xa8, + 0x58, 0x80, 0xd5, 0x1f, 0xcc, 0x03, 0x0c, 0x74, 0xff, 0x50, 0x2a, 0x79, 0x04, 0x85, 0xa8, 0xaa, + 0x2b, 0x8b, 0x43, 0x57, 0xae, 0x76, 0x04, 0x46, 0x0f, 0xc3, 0xcd, 0x26, 0xf2, 0xb5, 0xc4, 0xb4, + 0x3a, 0xec, 0x28, 0x29, 0xe5, 0xb9, 0x98, 0x94, 0xb1, 0x20, 0x81, 0x78, 0x9e, 0x5c, 0x79, 0xf6, + 0x13, 0x35, 0xf8, 0xb5, 0x20, 0x8c, 0x26, 0x23, 0xfe, 0xc4, 0x72, 0xd8, 0xcc, 0x8a, 0x6c, 0xcd, + 0xe1, 0xa9, 0x1c, 0xfa, 0x08, 0x8a, 0x6c, 0xde, 0x9a, 0xcf, 0x79, 0x32, 0xd8, 0xbf, 0xd4, 0x54, + 0x42, 0x03, 0x06, 0x77, 0x6a, 0xe5, 0x57, 0x00, 0x74, 0xd7, 0xb5, 0x2d, 0x62, 0x6a, 0x7b, 0x13, + 0x1e, 0xdd, 0x17, 0x70, 0x41, 0x52, 0xea, 0x13, 0x76, 0x5c, 0x42, 0xb6, 0x1e, 0xf0, 0x88, 0xfd, + 0x1a, 0x03, 0x4a, 0x74, 0x2d, 0xa8, 0x2b, 0xb0, 0xe4, 0x8d, 0x1d, 0x66, 0x50, 0x39, 0x3a, 0xf5, + 0x8f, 0xd3, 0xf0, 0x52, 0x87, 0x04, 0xc7, 0xd4, 0x3b, 0xac, 0x05, 0x81, 0x6e, 0x1c, 0x8c, 0x88, + 0x23, 0x97, 0x2f, 0x96, 0x44, 0xa5, 0x2e, 0x24, 0x51, 0x6b, 0xb0, 0xa0, 0xdb, 0x96, 0xee, 0x13, + 0x11, 0xfa, 0x15, 0x70, 0xd8, 0x64, 0xa9, 0x1e, 0x4b, 0x1c, 0x89, 0xef, 0x13, 0x51, 0xe9, 0x61, + 0x03, 0x0f, 0x09, 0xe8, 0xbb, 0x70, 0x53, 0x06, 0x79, 0x7a, 0xd4, 0x15, 0x4b, 0x62, 0xc2, 0xc2, + 0x75, 0x2b, 0x31, 0x93, 0x4d, 0x1e, 0x9c, 0x8c, 0x02, 0xa7, 0xe4, 0xae, 0x1b, 0xc8, 0x98, 0x72, + 0xd5, 0x4c, 0x60, 0x95, 0x1f, 0xc3, 0xad, 0x4b, 0x45, 0xbe, 0x54, 0x25, 0xe9, 0xef, 0xd3, 0x00, + 0xed, 0x5e, 0x6d, 0x47, 0x1a, 0xa9, 0x09, 0xb9, 0x7d, 0x7d, 0x64, 0xd9, 0x93, 0xab, 0x3c, 0xe0, + 0x14, 0x5f, 0xad, 0x09, 0x73, 0x6c, 0x72, 0x19, 0x2c, 0x65, 0x79, 0x1e, 0x3b, 0xde, 0x73, 0x48, + 0x10, 0xe5, 0xb1, 0xbc, 0xc5, 0x86, 0xe1, 0xe9, 0x4e, 0xb4, 0x75, 0x45, 0x83, 0x2d, 0x00, 0x0b, + 0x79, 0x8e, 0xf5, 0x49, 0xe8, 0xb6, 0x64, 0x13, 0x6d, 0xf1, 0xaa, 0x31, 0xf1, 0x8e, 0x88, 0xb9, + 0x96, 0xe5, 0x46, 0xbd, 0x6e, 0x3c, 0x58, 0xc2, 0x85, 0xed, 0x22, 0xe9, 0xf2, 0x07, 0x3c, 0x64, + 0x9a, 0xb2, 0xbe, 0x94, 0x8d, 0xee, 0xc1, 0xe2, 0x85, 0x79, 0xbe, 0x50, 0x40, 0x68, 0xf7, 0x9e, + 0xbe, 0xa3, 0x64, 0xe4, 0xaf, 0x77, 0x95, 0x9c, 0xfa, 0x57, 0xf3, 0xc2, 0xd1, 0x48, 0xab, 0x26, + 0xbf, 0x96, 0xe4, 0xf9, 0xee, 0x36, 0xa8, 0x2d, 0x1d, 0xc0, 0x9b, 0x57, 0xfb, 0x1f, 0x96, 0x47, + 0x72, 0x38, 0x8e, 0x04, 0xd1, 0x06, 0x14, 0xc5, 0x2e, 0xd6, 0xd8, 0x81, 0xe3, 0x66, 0x5d, 0xc4, + 0x20, 0x48, 0x4c, 0x12, 0xbd, 0x01, 0x4b, 0xbc, 0xe0, 0xe4, 0x1f, 0x10, 0x53, 0x60, 0x32, 0x1c, + 0xb3, 0x18, 0x51, 0x39, 0x6c, 0x07, 0x4a, 0x92, 0xa0, 0xf1, 0x6c, 0x20, 0xcb, 0x07, 0x74, 0xe7, + 0xba, 0x01, 0x09, 0x11, 0x9e, 0x24, 0x14, 0xdd, 0x69, 0x43, 0xfd, 0x65, 0xc8, 0x87, 0x83, 0x45, + 0x6b, 0x30, 0x3f, 0x68, 0xf4, 0x94, 0xb9, 0xf2, 0xf2, 0xc9, 0x69, 0xa5, 0x18, 0x92, 0x07, 0x8d, + 0x1e, 0xe3, 0xec, 0x36, 0x7b, 0x4a, 0xea, 0x22, 0x67, 0xb7, 0xd9, 0x43, 0x65, 0xc8, 0xf4, 0x1b, + 0x83, 0x5e, 0x18, 0x9f, 0x85, 0x2c, 0x46, 0x2b, 0x67, 0x58, 0x7c, 0xa6, 0xee, 0x43, 0x31, 0xd6, + 0x3b, 0x7a, 0x0d, 0x16, 0xda, 0x9d, 0xc7, 0xb8, 0xd5, 0xef, 0x2b, 0x73, 0x22, 0x3d, 0x88, 0x71, + 0xdb, 0xce, 0x90, 0xad, 0x1d, 0x7a, 0x05, 0x32, 0x5b, 0x5d, 0x76, 0xef, 0x8b, 0xfc, 0x23, 0x86, + 0xd8, 0xa2, 0x7e, 0x50, 0xbe, 0x21, 0x03, 0xbf, 0xb8, 0x62, 0xf5, 0xf7, 0x52, 0x90, 0x13, 0x07, + 0x2d, 0x71, 0x11, 0x6b, 0xd3, 0xa4, 0x48, 0xa4, 0x8d, 0x6f, 0x5e, 0x9e, 0xe2, 0x55, 0x65, 0x46, + 0x26, 0xb6, 0x66, 0x28, 0x57, 0x7e, 0x1f, 0x4a, 0x71, 0xc6, 0x97, 0xda, 0x98, 0xdf, 0x85, 0x22, + 0xdb, 0xfb, 0x61, 0xaa, 0xf7, 0x00, 0x72, 0xc2, 0x59, 0x44, 0xf7, 0xd0, 0xe5, 0xf9, 0xa6, 0x44, + 0xa2, 0x47, 0xb0, 0x20, 0x72, 0xd4, 0xb0, 0x96, 0xbd, 0x7e, 0xf5, 0x09, 0xc3, 0x21, 0x5c, 0xfd, + 0x08, 0x32, 0x3d, 0x42, 0x3c, 0x66, 0x7b, 0x87, 0x9a, 0x64, 0x7a, 0x75, 0xcb, 0xf4, 0xda, 0x24, + 0xed, 0x26, 0x4b, 0xaf, 0x4d, 0xd2, 0x36, 0xa3, 0x7a, 0x5c, 0x3a, 0x56, 0x8f, 0x1b, 0x40, 0xe9, + 0x19, 0xb1, 0x86, 0x07, 0x01, 0x31, 0xb9, 0xa2, 0xb7, 0x21, 0xe3, 0x92, 0x68, 0xf0, 0x6b, 0x89, + 0x9b, 0x8f, 0x10, 0x0f, 0x73, 0x14, 0xf3, 0x31, 0xc7, 0x5c, 0x5a, 0x3e, 0x03, 0xc9, 0x96, 0xfa, + 0x77, 0x69, 0x58, 0x6a, 0xfb, 0xfe, 0x58, 0x77, 0x8c, 0x30, 0xaa, 0xfb, 0xe6, 0xc5, 0xa8, 0x2e, + 0xf1, 0xbd, 0xec, 0xa2, 0xc8, 0xc5, 0x32, 0xa3, 0xbc, 0x59, 0xd3, 0xd1, 0xcd, 0xaa, 0xfe, 0x47, + 0x2a, 0xac, 0x25, 0xbe, 0x11, 0x73, 0x05, 0x22, 0x47, 0x8c, 0x6b, 0x22, 0xbb, 0xce, 0xa1, 0x43, + 0x8f, 0x1d, 0x96, 0xbd, 0xe2, 0x56, 0xa7, 0xf5, 0x4c, 0x49, 0x89, 0xed, 0x79, 0x01, 0x84, 0x89, + 0x43, 0x8e, 0x99, 0xa6, 0x5e, 0xab, 0xd3, 0x64, 0x51, 0x58, 0x3a, 0x41, 0x53, 0x8f, 0x38, 0xa6, + 0xe5, 0x0c, 0xd1, 0x6b, 0x90, 0x6b, 0xf7, 0xfb, 0xbb, 0x3c, 0x85, 0x7c, 0xe9, 0xe4, 0xb4, 0x72, + 0xe3, 0x02, 0x8a, 0xd7, 0x91, 0x4d, 0x06, 0x62, 0x29, 0x10, 0x8b, 0xcf, 0x12, 0x40, 0x2c, 0xb6, + 0x16, 0x20, 0xdc, 0x1d, 0xd4, 0x06, 0x2d, 0x25, 0x9b, 0x00, 0xc2, 0x94, 0xfd, 0x95, 0xc7, 0xed, + 0x9f, 0xd3, 0xa0, 0xd4, 0x0c, 0x83, 0xb8, 0x01, 0xe3, 0xcb, 0xac, 0x73, 0x00, 0x79, 0x97, 0xfd, + 0xb2, 0x48, 0x18, 0x41, 0x3d, 0x4a, 0x7c, 0xf1, 0x9d, 0x91, 0xab, 0x62, 0x6a, 0x93, 0x9a, 0x39, + 0xb2, 0x7c, 0xdf, 0xa2, 0x8e, 0xa0, 0xe1, 0x48, 0x53, 0xf9, 0xbf, 0x52, 0x70, 0x23, 0x01, 0x81, + 0xee, 0x41, 0xc6, 0xa3, 0x76, 0xb8, 0x86, 0xb7, 0x2f, 0x2b, 0x13, 0x33, 0x51, 0xcc, 0x91, 0x68, + 0x1d, 0x40, 0x1f, 0x07, 0x54, 0xe7, 0xfd, 0x8b, 0xe2, 0x1a, 0x8e, 0x51, 0xd0, 0x33, 0xc8, 0xf9, + 0xc4, 0xf0, 0x48, 0x18, 0x67, 0x7f, 0xf4, 0xd3, 0x8e, 0xbe, 0xda, 0xe7, 0x6a, 0xb0, 0x54, 0x57, + 0xae, 0x42, 0x4e, 0x50, 0xd8, 0xb6, 0x37, 0xf5, 0x40, 0x97, 0x8f, 0x08, 0xfc, 0x37, 0xdb, 0x4d, + 0xba, 0x3d, 0x0c, 0x77, 0x93, 0x6e, 0x0f, 0xd5, 0xbf, 0x4c, 0x03, 0xb4, 0x9e, 0x07, 0xc4, 0x73, + 0x74, 0xbb, 0x51, 0x43, 0xad, 0xd8, 0xcd, 0x20, 0x66, 0xfb, 0xb5, 0xc4, 0x77, 0x93, 0x48, 0xa2, + 0xda, 0xa8, 0x25, 0xdc, 0x0d, 0xb7, 0x60, 0x7e, 0xec, 0xc9, 0x47, 0x7c, 0x11, 0x23, 0xef, 0xe2, + 0x6d, 0xcc, 0x68, 0xa8, 0x15, 0xaf, 0xe5, 0x5c, 0xfa, 0x54, 0x1f, 0xeb, 0x20, 0xd1, 0x75, 0xb1, + 0x93, 0x6f, 0xe8, 0x9a, 0x41, 0xe4, 0xad, 0x52, 0x12, 0x27, 0xbf, 0x51, 0x6b, 0x10, 0x2f, 0xc0, + 0x39, 0x43, 0x67, 0xff, 0x7f, 0x26, 0xff, 0xf6, 0x36, 0xc0, 0x74, 0x6a, 0x68, 0x1d, 0xb2, 0x8d, + 0xcd, 0x7e, 0x7f, 0x5b, 0x99, 0x13, 0x0e, 0x7c, 0xca, 0xe2, 0x64, 0xf5, 0xcf, 0xd2, 0x90, 0x6f, + 0xd4, 0xe4, 0x95, 0xdb, 0x00, 0x85, 0x7b, 0x25, 0xfe, 0xf4, 0x42, 0x9e, 0xbb, 0x96, 0x37, 0x91, + 0x8e, 0xe5, 0x8a, 0x84, 0x77, 0x89, 0x89, 0xb0, 0x51, 0xb7, 0xb8, 0x00, 0xc2, 0x50, 0x22, 0xd2, + 0x08, 0x9a, 0xa1, 0x87, 0x3e, 0x7e, 0xfd, 0x6a, 0x63, 0x89, 0xd4, 0x65, 0xda, 0xf6, 0x71, 0x31, + 0x54, 0xd2, 0xd0, 0x7d, 0xf4, 0x1e, 0x2c, 0xfb, 0xd6, 0xd0, 0xb1, 0x9c, 0xa1, 0x16, 0x1a, 0x8f, + 0xbf, 0x03, 0xd5, 0x57, 0xce, 0xcf, 0x36, 0x16, 0xfb, 0x82, 0x25, 0x6d, 0xb8, 0x28, 0x91, 0x0d, + 0x6e, 0x4a, 0xf4, 0x2e, 0x2c, 0xc5, 0x44, 0x99, 0x15, 0x85, 0xd9, 0x79, 0xc5, 0x38, 0x92, 0x7c, + 0x42, 0x26, 0xb8, 0x14, 0x09, 0x3e, 0x21, 0xbc, 0x36, 0xb3, 0x4f, 0x3d, 0x83, 0x68, 0x1e, 0x3f, + 0xd3, 0xfc, 0x76, 0xcf, 0xe0, 0x22, 0xa7, 0x89, 0x63, 0xae, 0x3e, 0x85, 0x1b, 0x5d, 0xcf, 0x38, + 0x20, 0x7e, 0x20, 0x4c, 0x21, 0xad, 0xf8, 0x11, 0xdc, 0x0e, 0x74, 0xff, 0x50, 0x3b, 0xb0, 0xfc, + 0x80, 0x7a, 0x13, 0xcd, 0x23, 0x01, 0x71, 0x18, 0x5f, 0xe3, 0x0f, 0xdc, 0xb2, 0x9c, 0x78, 0x8b, + 0x61, 0xb6, 0x04, 0x04, 0x87, 0x88, 0x6d, 0x06, 0x50, 0xdb, 0x50, 0x62, 0x29, 0x8c, 0x2c, 0xaa, + 0xb1, 0xd9, 0x83, 0x4d, 0x87, 0xda, 0x17, 0xbe, 0xa6, 0x0a, 0x36, 0x1d, 0x8a, 0x9f, 0xea, 0xb7, + 0x41, 0x69, 0x5a, 0xbe, 0xab, 0x07, 0xc6, 0x41, 0x58, 0x27, 0x45, 0x4d, 0x50, 0x0e, 0x88, 0xee, + 0x05, 0x7b, 0x44, 0x0f, 0x34, 0x97, 0x78, 0x16, 0x35, 0xaf, 0x5f, 0xe5, 0xe5, 0x48, 0xa4, 0xc7, + 0x25, 0xd4, 0xff, 0x49, 0x01, 0x60, 0x7d, 0x3f, 0x8c, 0xd6, 0xbe, 0x0e, 0x2b, 0xbe, 0xa3, 0xbb, + 0xfe, 0x01, 0x0d, 0x34, 0xcb, 0x09, 0x88, 0x77, 0xa4, 0xdb, 0xb2, 0xb8, 0xa3, 0x84, 0x8c, 0xb6, + 0xa4, 0xa3, 0xb7, 0x01, 0x1d, 0x12, 0xe2, 0x6a, 0xd4, 0x36, 0xb5, 0x90, 0x29, 0x1e, 0xbe, 0x33, + 0x58, 0x61, 0x9c, 0xae, 0x6d, 0xf6, 0x43, 0x3a, 0xaa, 0xc3, 0x3a, 0x9b, 0x3e, 0x71, 0x02, 0xcf, + 0x22, 0xbe, 0xb6, 0x4f, 0x3d, 0xcd, 0xb7, 0xe9, 0xb1, 0xb6, 0x4f, 0x6d, 0x9b, 0x1e, 0x13, 0x2f, + 0xac, 0x9b, 0x95, 0x6d, 0x3a, 0x6c, 0x09, 0xd0, 0x26, 0xf5, 0xfa, 0x36, 0x3d, 0xde, 0x0c, 0x11, + 0x2c, 0xa4, 0x9b, 0xce, 0x39, 0xb0, 0x8c, 0xc3, 0x30, 0xa4, 0x8b, 0xa8, 0x03, 0xcb, 0x38, 0x44, + 0xaf, 0xc1, 0x22, 0xb1, 0x09, 0x2f, 0x9f, 0x08, 0x54, 0x96, 0xa3, 0x4a, 0x21, 0x91, 0x81, 0xd4, + 0x8f, 0x41, 0x69, 0x39, 0x86, 0x37, 0x71, 0x63, 0x6b, 0xfe, 0x36, 0x20, 0xe6, 0x24, 0x35, 0x9b, + 0x1a, 0x87, 0xda, 0x48, 0x77, 0xf4, 0x21, 0x1b, 0x97, 0x78, 0x71, 0x54, 0x18, 0x67, 0x9b, 0x1a, + 0x87, 0x3b, 0x92, 0xae, 0xbe, 0x07, 0xd0, 0x77, 0x3d, 0xa2, 0x9b, 0x5d, 0x16, 0x4d, 0x30, 0xd3, + 0xf1, 0x96, 0x66, 0xca, 0xf7, 0x5c, 0xea, 0xc9, 0xa3, 0xae, 0x08, 0x46, 0x33, 0xa2, 0xab, 0xbf, + 0x08, 0x37, 0x7a, 0xb6, 0x6e, 0xf0, 0x2f, 0x2c, 0x7a, 0xd1, 0x13, 0x1a, 0x7a, 0x04, 0x39, 0x01, + 0x95, 0x2b, 0x99, 0x78, 0xdc, 0xa6, 0x7d, 0x6e, 0xcd, 0x61, 0x89, 0xaf, 0x97, 0x00, 0xa6, 0x7a, + 0xd4, 0x7f, 0x4c, 0x41, 0x21, 0xd2, 0x8f, 0x2a, 0xe2, 0x65, 0x28, 0xf0, 0x74, 0xcb, 0x91, 0x19, + 0x7f, 0x01, 0xc7, 0x49, 0xa8, 0x0d, 0x45, 0x37, 0x92, 0xbe, 0x32, 0x9e, 0x4b, 0x18, 0x35, 0x8e, + 0xcb, 0xa2, 0xf7, 0xa1, 0x10, 0x3e, 0xa0, 0x87, 0x1e, 0xf6, 0xea, 0xf7, 0xf6, 0x29, 0x3c, 0x2c, + 0xa4, 0x7a, 0xc4, 0xb5, 0x2d, 0xe6, 0x73, 0x32, 0x51, 0x21, 0x15, 0x4b, 0x92, 0xfa, 0x4d, 0x80, + 0x6f, 0x51, 0xcb, 0x19, 0xd0, 0x43, 0xe2, 0xf0, 0x57, 0x61, 0x96, 0x52, 0x92, 0xd0, 0xd0, 0xb2, + 0xc5, 0x2b, 0x05, 0x62, 0x95, 0xa2, 0xc7, 0x51, 0xd1, 0x54, 0xff, 0x22, 0x0d, 0x39, 0x4c, 0x69, + 0xd0, 0xa8, 0xa1, 0x0a, 0xe4, 0xa4, 0x2b, 0xe1, 0x57, 0x54, 0xbd, 0x70, 0x7e, 0xb6, 0x91, 0x15, + 0x3e, 0x24, 0x6b, 0x70, 0xe7, 0x11, 0x73, 0xf2, 0xe9, 0xcb, 0x9c, 0x3c, 0xba, 0x07, 0x25, 0x09, + 0xd2, 0x0e, 0x74, 0xff, 0x40, 0xe4, 0x77, 0xf5, 0xa5, 0xf3, 0xb3, 0x0d, 0x10, 0xc8, 0x2d, 0xdd, + 0x3f, 0xc0, 0x20, 0xd0, 0xec, 0x37, 0x6a, 0x41, 0xf1, 0x13, 0x6a, 0x39, 0x5a, 0xc0, 0x27, 0x21, + 0x6b, 0x91, 0x89, 0x4b, 0x3d, 0x9d, 0xaa, 0xfc, 0x80, 0x02, 0x3e, 0x99, 0x4e, 0xbe, 0x05, 0x8b, + 0x1e, 0xa5, 0x81, 0xf0, 0x6c, 0x16, 0x75, 0x64, 0x99, 0xa3, 0x92, 0x58, 0xfd, 0xa6, 0x34, 0xc0, + 0x12, 0x87, 0x4b, 0x5e, 0xac, 0x85, 0xee, 0xc1, 0xaa, 0xad, 0xfb, 0x81, 0xc6, 0x5d, 0xa2, 0x39, + 0xd5, 0x96, 0xe3, 0xc6, 0x47, 0x8c, 0xb7, 0xc9, 0x59, 0xa1, 0x84, 0xfa, 0x0f, 0x29, 0x28, 0xb2, + 0xc9, 0x58, 0xfb, 0x96, 0xc1, 0xe2, 0xc0, 0x2f, 0x1f, 0x9e, 0xdc, 0x82, 0x79, 0xc3, 0xf7, 0xa4, + 0x51, 0xf9, 0xfd, 0xdc, 0xe8, 0x63, 0xcc, 0x68, 0xe8, 0x63, 0xc8, 0xc9, 0x72, 0x8b, 0x88, 0x4c, + 0xd4, 0xeb, 0x23, 0x56, 0x69, 0x1b, 0x29, 0xc7, 0xb7, 0xfb, 0x74, 0x74, 0xe2, 0x9e, 0xc0, 0x71, + 0x12, 0xba, 0x09, 0x69, 0x43, 0x98, 0x4b, 0x7e, 0xa1, 0xd3, 0xe8, 0xe0, 0xb4, 0xe1, 0xa8, 0x3f, + 0x4e, 0xc1, 0xe2, 0xd4, 0x27, 0xb0, 0x1d, 0x70, 0x1b, 0x0a, 0xfe, 0x78, 0xcf, 0x9f, 0xf8, 0x01, + 0x19, 0x85, 0x2f, 0xde, 0x11, 0x01, 0xb5, 0xa1, 0xa0, 0xdb, 0x43, 0xea, 0x59, 0xc1, 0xc1, 0x48, + 0x26, 0xb2, 0xc9, 0xd1, 0x44, 0x5c, 0x67, 0xb5, 0x16, 0x8a, 0xe0, 0xa9, 0x74, 0x18, 0x1a, 0x88, + 0xcf, 0x22, 0x78, 0x68, 0xf0, 0x2a, 0x94, 0x6c, 0x7d, 0xc4, 0xeb, 0x4f, 0x81, 0x35, 0x22, 0xe1, + 0x61, 0x90, 0xb4, 0x81, 0x35, 0x22, 0xaa, 0x0a, 0x85, 0x48, 0x19, 0x5a, 0x86, 0x62, 0xad, 0xd5, + 0xd7, 0xee, 0x3f, 0x78, 0xa4, 0x3d, 0x6e, 0xec, 0x28, 0x73, 0x32, 0x7c, 0xfd, 0xd3, 0x14, 0x2c, + 0x4a, 0x8f, 0x25, 0x53, 0x82, 0xd7, 0x60, 0xc1, 0xd3, 0xf7, 0x83, 0x30, 0x69, 0xc9, 0x88, 0x5d, + 0xcd, 0x2e, 0x01, 0x96, 0xb4, 0x30, 0x56, 0x72, 0xd2, 0x12, 0xfb, 0x06, 0x63, 0xfe, 0xca, 0x6f, + 0x30, 0x32, 0xff, 0x2f, 0xdf, 0x60, 0xa8, 0xbf, 0x01, 0xb0, 0x69, 0xd9, 0x64, 0x20, 0x4a, 0x55, + 0x49, 0x29, 0x28, 0x0b, 0xf3, 0x64, 0x29, 0x34, 0x0c, 0xf3, 0xda, 0x4d, 0xcc, 0x68, 0x8c, 0x35, + 0xb4, 0x4c, 0x79, 0x18, 0x39, 0xeb, 0x31, 0x63, 0x0d, 0x2d, 0x33, 0x7a, 0xf6, 0xcb, 0x5c, 0xf3, + 0xec, 0xa7, 0x2e, 0xc3, 0x22, 0x16, 0x35, 0x36, 0x31, 0x06, 0xf5, 0x34, 0x05, 0xcb, 0x32, 0xde, + 0x8d, 0x5c, 0xf6, 0xd7, 0xa0, 0x20, 0x42, 0xdf, 0x69, 0x12, 0xc8, 0x3f, 0x44, 0x10, 0xb8, 0x76, + 0x13, 0xe7, 0x05, 0xbb, 0x6d, 0xa2, 0x0d, 0x28, 0x4a, 0x68, 0xec, 0xf3, 0x2e, 0x10, 0xa4, 0x0e, + 0x9b, 0xcf, 0x3b, 0x90, 0xd9, 0xb7, 0x6c, 0x22, 0x77, 0x7e, 0xa2, 0x47, 0x98, 0x5a, 0x64, 0x6b, + 0x0e, 0x73, 0x74, 0x3d, 0x1f, 0x16, 0xf7, 0xd4, 0x7f, 0x49, 0xf1, 0x12, 0x33, 0x4b, 0x55, 0xe3, + 0xe3, 0x13, 0x59, 0xeb, 0xcc, 0xf8, 0x04, 0x8e, 0x8d, 0x4f, 0xb0, 0xc5, 0xf8, 0x24, 0x34, 0x3e, + 0x3e, 0x41, 0xfa, 0xe9, 0xc7, 0x87, 0x3e, 0x84, 0x05, 0x59, 0xaa, 0x94, 0xae, 0xee, 0xd5, 0xc4, + 0x9d, 0x11, 0xb7, 0xf4, 0xd6, 0x1c, 0x0e, 0x65, 0x62, 0xd3, 0xdb, 0x86, 0x9b, 0x75, 0x5b, 0x37, + 0x0e, 0x6d, 0xcb, 0x0f, 0x88, 0x19, 0xf7, 0x40, 0x0f, 0x20, 0x77, 0x21, 0xce, 0xbd, 0xaa, 0x88, + 0x2a, 0x91, 0xea, 0xbf, 0xa7, 0xa0, 0xb4, 0x45, 0x74, 0x3b, 0x38, 0x98, 0x56, 0xaa, 0x02, 0xe2, + 0x07, 0xf2, 0x7e, 0xe4, 0xbf, 0xd1, 0x37, 0x20, 0x1f, 0x85, 0x41, 0xd7, 0x3e, 0x07, 0x46, 0x50, + 0xf4, 0x10, 0x16, 0xd8, 0xd8, 0xe9, 0x38, 0xcc, 0xaf, 0xae, 0x7a, 0x69, 0x92, 0x48, 0x76, 0x69, + 0x79, 0x84, 0xc7, 0x3d, 0xdc, 0x4e, 0x59, 0x1c, 0x36, 0xd1, 0xcf, 0x43, 0x89, 0x3f, 0x94, 0x84, + 0x61, 0x5e, 0xf6, 0x3a, 0x9d, 0x45, 0xf1, 0xd6, 0x29, 0x42, 0xbc, 0x3f, 0x4a, 0xc3, 0xea, 0x8e, + 0x3e, 0xd9, 0x23, 0xd2, 0x0d, 0x11, 0x13, 0x13, 0x83, 0x7a, 0x26, 0xea, 0xc5, 0xdd, 0xd7, 0x15, + 0x4f, 0xa7, 0x49, 0xc2, 0xc9, 0x5e, 0x2c, 0xcc, 0xf9, 0xd2, 0xb1, 0x9c, 0x6f, 0x15, 0xb2, 0x0e, + 0x75, 0x0c, 0x22, 0x7d, 0x9b, 0x68, 0xa8, 0xdf, 0x4b, 0xc5, 0x7d, 0x57, 0x39, 0x7a, 0xd6, 0xe4, + 0x45, 0xaf, 0x0e, 0x0d, 0xa2, 0xee, 0xd0, 0xc7, 0x50, 0xee, 0xb7, 0x1a, 0xb8, 0x35, 0xa8, 0x77, + 0xbf, 0xad, 0xf5, 0x6b, 0xdb, 0xfd, 0xda, 0x83, 0x7b, 0x5a, 0xaf, 0xbb, 0xfd, 0x9d, 0xfb, 0x0f, + 0xef, 0x7d, 0x43, 0x49, 0x95, 0x2b, 0x27, 0xa7, 0x95, 0xdb, 0x9d, 0x5a, 0x63, 0x5b, 0x9c, 0xb8, + 0x3d, 0xfa, 0xbc, 0xaf, 0xdb, 0xbe, 0xfe, 0xe0, 0x5e, 0x8f, 0xda, 0x13, 0x86, 0x41, 0x5f, 0x07, + 0xb4, 0xd9, 0xc2, 0x9d, 0xd6, 0x40, 0x0b, 0x1d, 0x64, 0xa3, 0xde, 0x50, 0xd2, 0x22, 0x93, 0xda, + 0x24, 0x9e, 0x43, 0x82, 0x5a, 0xab, 0x7f, 0xff, 0xc1, 0xa3, 0x46, 0xbd, 0xc1, 0xce, 0x78, 0x29, + 0x7e, 0x5b, 0xc6, 0x83, 0x80, 0xd4, 0xa5, 0x41, 0xc0, 0x34, 0x96, 0x48, 0x5f, 0x12, 0x4b, 0x6c, + 0xc2, 0xaa, 0xe1, 0x51, 0xdf, 0xd7, 0x58, 0x7a, 0x42, 0xcc, 0x99, 0x04, 0xe8, 0x2b, 0xe7, 0x67, + 0x1b, 0x2b, 0x0d, 0xc6, 0xef, 0x73, 0xb6, 0x54, 0xbf, 0x62, 0xc4, 0x48, 0xbc, 0x27, 0xf5, 0x87, + 0xf3, 0x2c, 0xd2, 0xb3, 0x8e, 0x2c, 0x9b, 0x0c, 0x89, 0x8f, 0x9e, 0xc2, 0xb2, 0xe1, 0x11, 0x93, + 0xe5, 0x1d, 0xba, 0x1d, 0xff, 0x32, 0xfa, 0xe7, 0x12, 0x83, 0xae, 0x48, 0xb0, 0xda, 0x88, 0xa4, + 0xfa, 0x2e, 0x31, 0xf0, 0x92, 0x71, 0xa1, 0x8d, 0x3e, 0x81, 0x65, 0x9f, 0xd8, 0x96, 0x33, 0x7e, + 0xae, 0x19, 0xd4, 0x09, 0xc8, 0xf3, 0xf0, 0x39, 0xef, 0x3a, 0xbd, 0xfd, 0xd6, 0x36, 0x93, 0x6a, + 0x08, 0xa1, 0x3a, 0x3a, 0x3f, 0xdb, 0x58, 0xba, 0x48, 0xc3, 0x4b, 0x52, 0xb3, 0x6c, 0x97, 0x0f, + 0x60, 0xe9, 0xe2, 0x68, 0xd0, 0xaa, 0x74, 0x34, 0xdc, 0x5f, 0x45, 0x8e, 0xe4, 0x36, 0xe4, 0x3d, + 0x32, 0xb4, 0xfc, 0xc0, 0x13, 0x66, 0x66, 0x9c, 0x88, 0x82, 0xd6, 0x20, 0x17, 0xfb, 0xe2, 0x84, + 0xf1, 0x64, 0x9b, 0x79, 0x10, 0xf1, 0x31, 0x59, 0xf9, 0xd7, 0x60, 0x66, 0x2c, 0xec, 0xd0, 0x99, + 0x96, 0xaf, 0xef, 0xc9, 0xce, 0xf2, 0x38, 0x6c, 0xb2, 0xbd, 0x3c, 0xf6, 0xa3, 0x00, 0x92, 0xff, + 0x66, 0x34, 0x1e, 0xe9, 0xc8, 0x4f, 0xeb, 0x78, 0x2c, 0x13, 0x7e, 0xc1, 0x9b, 0x89, 0x7d, 0xc1, + 0xbb, 0x0a, 0x59, 0x9b, 0x1c, 0x11, 0x5b, 0xc4, 0x18, 0x58, 0x34, 0xee, 0xdc, 0x83, 0x52, 0xf8, + 0xa9, 0x28, 0xff, 0x66, 0x24, 0x0f, 0x99, 0x41, 0xad, 0xff, 0x44, 0x99, 0x43, 0x00, 0x39, 0xb1, + 0xc7, 0xc5, 0x23, 0x64, 0xa3, 0xdb, 0xd9, 0x6c, 0x3f, 0x56, 0xd2, 0x77, 0x7e, 0x37, 0x03, 0x85, + 0xe8, 0x19, 0x8c, 0xdd, 0x69, 0x9d, 0xd6, 0xb3, 0xf0, 0x90, 0x44, 0xf4, 0x0e, 0x39, 0x46, 0xaf, + 0x4e, 0x0b, 0x68, 0x1f, 0x8b, 0x77, 0xff, 0x88, 0x1d, 0x16, 0xcf, 0x5e, 0x87, 0x7c, 0xad, 0xdf, + 0x6f, 0x3f, 0xee, 0xb4, 0x9a, 0xca, 0x67, 0xa9, 0xf2, 0x57, 0x4e, 0x4e, 0x2b, 0x2b, 0x11, 0xa8, + 0xe6, 0x8b, 0x6d, 0xc9, 0x51, 0x8d, 0x46, 0xab, 0x37, 0x68, 0x35, 0x95, 0x4f, 0xd3, 0xb3, 0x28, + 0x5e, 0x10, 0xe2, 0xdf, 0x33, 0x15, 0x7a, 0xb8, 0xd5, 0xab, 0x61, 0xd6, 0xe1, 0x67, 0x69, 0x51, + 0xd7, 0x9b, 0xf6, 0xe8, 0x11, 0x57, 0xf7, 0x58, 0x9f, 0xeb, 0xe1, 0x67, 0x85, 0x9f, 0xce, 0x8b, + 0x0f, 0x5b, 0xa6, 0x6f, 0x7a, 0x44, 0x37, 0x27, 0xac, 0x37, 0xfe, 0x98, 0xca, 0xd5, 0xcc, 0xcf, + 0xf4, 0xd6, 0x67, 0x3e, 0x8c, 0x69, 0x51, 0x61, 0x01, 0xef, 0x76, 0x3a, 0x0c, 0xf4, 0x69, 0x66, + 0x66, 0x76, 0x78, 0xec, 0xb0, 0x64, 0x1f, 0xbd, 0x01, 0xf9, 0xf0, 0xad, 0x55, 0xf9, 0x2c, 0x33, + 0x33, 0xa0, 0x46, 0xf8, 0x50, 0xcc, 0x3b, 0xdc, 0xda, 0x1d, 0xf0, 0xaf, 0x1e, 0x3f, 0xcd, 0xce, + 0x76, 0x78, 0x30, 0x0e, 0x4c, 0x7a, 0xec, 0xb0, 0xd3, 0x2c, 0x4b, 0x88, 0x9f, 0x65, 0x85, 0x97, + 0x88, 0x30, 0xb2, 0x7e, 0xf8, 0x3a, 0xe4, 0x71, 0xeb, 0x5b, 0xe2, 0x03, 0xc9, 0x4f, 0x73, 0x33, + 0x7a, 0x30, 0xf9, 0x84, 0x18, 0xac, 0xb7, 0x0a, 0xe4, 0x70, 0x6b, 0xa7, 0xfb, 0xb4, 0xa5, 0xfc, + 0x7e, 0x6e, 0x46, 0x0f, 0x26, 0x23, 0xca, 0x3f, 0xf8, 0xca, 0x77, 0x71, 0x6f, 0xab, 0xc6, 0x17, + 0x65, 0x56, 0x4f, 0xd7, 0x73, 0x0f, 0x74, 0x87, 0x98, 0xd3, 0x8f, 0x7b, 0x22, 0xd6, 0x9d, 0x5f, + 0x82, 0x7c, 0x18, 0x53, 0xa3, 0x75, 0xc8, 0x3d, 0xeb, 0xe2, 0x27, 0x2d, 0xac, 0xcc, 0x09, 0x2b, + 0x87, 0x9c, 0x67, 0x22, 0x1b, 0xaa, 0xc0, 0xc2, 0x4e, 0xad, 0x53, 0x7b, 0xdc, 0xc2, 0x61, 0xfd, + 0x3f, 0x04, 0xc8, 0xc0, 0xb0, 0xac, 0xc8, 0x0e, 0x22, 0x9d, 0xf5, 0xb5, 0x1f, 0x7d, 0xbe, 0x3e, + 0xf7, 0x93, 0xcf, 0xd7, 0xe7, 0x3e, 0x3d, 0x5f, 0x4f, 0xfd, 0xe8, 0x7c, 0x3d, 0xf5, 0xb7, 0xe7, + 0xeb, 0xa9, 0x7f, 0x3b, 0x5f, 0x4f, 0xed, 0xe5, 0xf8, 0x75, 0xf3, 0xf0, 0xff, 0x02, 0x00, 0x00, + 0xff, 0xff, 0x6e, 0xf6, 0x47, 0x4c, 0x21, 0x34, 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/types.proto b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/types.proto new file mode 100644 index 00000000..0eb97c4b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/types.proto @@ -0,0 +1,1117 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/wrappers.proto"; +import "gogoproto/gogo.proto"; + +// This file contains types that are common to objects and spec or that are not +// considered first-class within the cluster object-model. + +// Version tracks the last time an object in the store was updated. +message Version { + uint64 index = 1; +} + +message IndexEntry { + string key = 1; + string val = 2; +} + +// Annotations provide useful information to identify API objects. They are +// common to all API specs. +message Annotations { + string name = 1; + map labels = 2; + + // Indices provides keys and values for indexing this object. + // A single key may have multiple values. + repeated IndexEntry indices = 4 [(gogoproto.nullable) = false]; +} + +// NamedGenericResource represents a "user defined" resource which is defined +// as a string. +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) +message NamedGenericResource { + string kind = 1; + string value = 2; +} + +// DiscreteGenericResource represents a "user defined" resource which is defined +// as an integer +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to count the resource (SSD=5, HDD=3, ...) +message DiscreteGenericResource { + string kind = 1; + int64 value = 2; +} + +// GenericResource represents a "user defined" resource which can +// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) +message GenericResource { + oneof resource { + NamedGenericResource named_resource_spec = 1; + DiscreteGenericResource discrete_resource_spec = 2; + } +} + +enum ResourceType { + TASK = 0; + SECRET = 1; + CONFIG = 2; +} + +message Resources { + // Amount of CPUs (e.g. 2000000000 = 2 CPU cores) + int64 nano_cpus = 1 [(gogoproto.customname) = "NanoCPUs"]; + + // Amount of memory in bytes. + int64 memory_bytes = 2; + + // User specified resource (e.g: bananas=2;apple={red,yellow,green}) + repeated GenericResource generic = 3; +} + +message ResourceRequirements { + Resources limits = 1; + Resources reservations = 2; + + // Amount of swap in bytes - can only be used together with a memory limit + // -1 means unlimited + // a null pointer indicates that the default behaviour of granting twice + // the memory is maintained + google.protobuf.Int64Value swap_bytes = 3; + + // Tune container memory swappiness (0 to 100) - if not specified, defaults + // to the container OS's default - generally 60, or the value predefined in + // the image; set to -1 to unset a previously set value + google.protobuf.Int64Value memory_swappiness = 4; +} + +message Platform { + // Architecture (e.g. x86_64) + string architecture = 1; + + // Operating System (e.g. linux) + string os = 2 [(gogoproto.customname) = "OS"]; +} + +// PluginDescription describes an engine plugin. +message PluginDescription { + // Type of plugin. Canonical values for existing types are + // Volume, Network, and Authorization. More types could be + // supported in the future. + string type = 1; + + // Name of the plugin + string name = 2; +} + +message EngineDescription { + // Docker daemon version running on the node. + string engine_version = 1; + + // Labels attached to the engine. + map labels = 2; + + // Volume, Network, and Auth plugins + repeated PluginDescription plugins = 3 [(gogoproto.nullable) = false]; +} + +message NodeDescription { + // Hostname of the node as reported by the agent. + // This is different from spec.meta.name which is user-defined. + string hostname = 1; + + // Platform of the node. + Platform platform = 2; + + // Total resources on the node. + Resources resources = 3; + + // Information about the Docker Engine on the node. + EngineDescription engine = 4; + + // Information on the node's TLS setup + NodeTLSInfo tls_info = 5 [(gogoproto.customname) = "TLSInfo"]; + + // FIPS indicates whether the node has FIPS-enabled + bool fips = 6 [(gogoproto.customname) = "FIPS"]; +} + +message NodeTLSInfo { + // Information about which root certs the node trusts + bytes trust_root = 1; + + // Information about the node's current TLS certificate + bytes cert_issuer_subject = 2; + bytes cert_issuer_public_key = 3; +} + +message RaftMemberStatus { + bool leader = 1; + + enum Reachability { + // Unknown indicates that the manager state cannot be resolved + UNKNOWN = 0; + + // Unreachable indicates that the node cannot be contacted by other + // raft cluster members. + UNREACHABLE = 1; + + // Reachable indicates that the node is healthy and reachable + // by other members. + REACHABLE = 2; + } + + Reachability reachability = 2; + string message = 3; +} + +message NodeStatus { + // TODO(aluzzardi) These should be using `gogoproto.enumvalue_customname`. + enum State { + // Unknown indicates the node state cannot be resolved. + UNKNOWN = 0; + + // Down indicates the node is down. + DOWN = 1; + + // Ready indicates the node is ready to accept tasks. + READY = 2; + + // Disconnected indicates the node is currently trying to find new manager. + DISCONNECTED = 3; + } + + State state = 1; + string message = 2; + // Addr is the node's IP address as observed by the manager + string addr = 3; +} + +message Image { + // reference is a docker image reference. This can include a rpository, tag + // or be fully qualified witha digest. The format is specified in the + // distribution/reference package. + string reference = 1; +} + +// Mount describes volume mounts for a container. +// +// The Mount type follows the structure of the mount syscall, including a type, +// source, target. Top-level flags, such as writable, are common to all kinds +// of mounts, where we also provide options that are specific to a type of +// mount. This corresponds to flags and data, respectively, in the syscall. +message Mount { + enum Type { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "MountType"; + + BIND = 0 [(gogoproto.enumvalue_customname) = "MountTypeBind"]; // Bind mount host dir + VOLUME = 1 [(gogoproto.enumvalue_customname) = "MountTypeVolume"]; // Remote storage volumes + TMPFS = 2 [(gogoproto.enumvalue_customname) = "MountTypeTmpfs"]; // Mount a tmpfs + NPIPE = 3 [(gogoproto.enumvalue_customname) = "MountTypeNamedPipe"]; // Windows named pipes + } + + // Type defines the nature of the mount. + Type type = 1; + + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + string source = 2; + + // Target path in container + string target = 3; + + // ReadOnly should be set to true if the mount should not be writable. + bool readonly = 4 [(gogoproto.customname) = "ReadOnly"]; + + // Consistency indicates the tolerable level of file system consistency + enum Consistency { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "MountConsistency"; + + DEFAULT = 0 [(gogoproto.enumvalue_customname) = "MountConsistencyDefault"]; + CONSISTENT = 1 [(gogoproto.enumvalue_customname) = "MountConsistencyFull"]; + CACHED = 2 [(gogoproto.enumvalue_customname) = "MountConsistencyCached"]; + DELEGATED = 3 [(gogoproto.enumvalue_customname) = "MountConsistencyDelegated"]; + } + Consistency consistency = 8; + + // BindOptions specifies options that are specific to a bind mount. + message BindOptions { + enum Propagation { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "MountPropagation"; + + RPRIVATE = 0 [(gogoproto.enumvalue_customname) = "MountPropagationRPrivate"]; + PRIVATE = 1 [(gogoproto.enumvalue_customname) = "MountPropagationPrivate"]; + RSHARED = 2 [(gogoproto.enumvalue_customname) = "MountPropagationRShared"]; + SHARED = 3 [(gogoproto.enumvalue_customname) = "MountPropagationShared"]; + RSLAVE = 4 [(gogoproto.enumvalue_customname) = "MountPropagationRSlave"]; + SLAVE = 5 [(gogoproto.enumvalue_customname) = "MountPropagationSlave"]; + } + + // Propagation mode of mount. + Propagation propagation = 1; + // allows non-recursive bind-mount, i.e. mount(2) with "bind" rather than "rbind". + bool nonrecursive = 2 [(gogoproto.customname) = "NonRecursive"]; + } + + // VolumeOptions contains parameters for mounting the volume. + message VolumeOptions { + // nocopy prevents automatic copying of data to the volume with data from target + bool nocopy = 1 [(gogoproto.customname) = "NoCopy"]; + + // labels to apply to the volume if creating + map labels = 2; + + // DriverConfig specifies the options that may be passed to the driver + // if the volume is created. + // + // If this is empty, no volume will be created if the volume is missing. + Driver driver_config = 3; + } + + message TmpfsOptions { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be convered to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + int64 size_bytes = 1; + + // Mode of the tmpfs upon creation + uint32 mode = 2 [(gogoproto.customtype) = "os.FileMode", (gogoproto.nullable) = false]; + + // Options passed to tmpfs mount + string options = 3; + // TODO(stevvooe): There are several more tmpfs flags, specified in the + // daemon, that are accepted. Only the most basic are added for now. + // + // From docker/docker/pkg/mount/flags.go: + // + // var validFlags = map[string]bool{ + // "": true, + // "size": true, X + // "mode": true, X + // "uid": true, + // "gid": true, + // "nr_inodes": true, + // "nr_blocks": true, + // "mpol": true, + // } + // + // Some of these may be straightforward to add, but others, such as + // uid/gid have implications in a clustered system. + } + + // Depending on type, one of bind_options or volumes_options will be set. + + // BindOptions configures properties of a bind mount type. + // + // For mounts of type bind, the source must be an absolute host path. + BindOptions bind_options = 5; + + // VolumeOptions configures the properties specific to a volume mount type. + // + // For mounts of type volume, the source will be used as the volume name. + VolumeOptions volume_options = 6; + + // TmpfsOptions allows one to set options for mounting a temporary + // filesystem. + // + // The source field will be ignored when using mounts of type tmpfs. + TmpfsOptions tmpfs_options = 7; + + // TODO(stevvooe): It be better to use a oneof field above, although the + // type is enough to make the decision, while being primary to the + // datastructure. +} + +message RestartPolicy { + enum RestartCondition { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "RestartCondition"; + NONE = 0 [(gogoproto.enumvalue_customname) = "RestartOnNone"]; + ON_FAILURE = 1 [(gogoproto.enumvalue_customname) = "RestartOnFailure"]; + ANY = 2 [(gogoproto.enumvalue_customname) = "RestartOnAny"]; + } + + RestartCondition condition = 1; + + // Delay between restart attempts + // Note: can't use stdduration because this field needs to be nullable. + google.protobuf.Duration delay = 2; + + // MaxAttempts is the maximum number of restarts to attempt on an + // instance before giving up. Ignored if 0. + uint64 max_attempts = 3; + + // Window is the time window used to evaluate the restart policy. + // The time window is unbounded if this is 0. + // Note: can't use stdduration because this field needs to be nullable. + google.protobuf.Duration window = 4; +} + +// UpdateConfig specifies the rate and policy of updates. +// TODO(aluzzardi): Consider making this a oneof with RollingStrategy and LockstepStrategy. +message UpdateConfig { + // Maximum number of tasks to be updated in one iteration. + // 0 means unlimited parallelism. + uint64 parallelism = 1; + + // Amount of time between updates. + google.protobuf.Duration delay = 2 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false]; + + enum FailureAction { + PAUSE = 0; + CONTINUE = 1; + ROLLBACK = 2; + } + + // FailureAction is the action to take when an update failures. + FailureAction failure_action = 3; + + // Monitor indicates how long to monitor a task for failure after it is + // created. If the task fails by ending up in one of the states + // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, + // this counts as a failure. If it fails after Monitor, it does not + // count as a failure. If Monitor is unspecified, a default value will + // be used. + // Note: can't use stdduration because this field needs to be nullable. + google.protobuf.Duration monitor = 4; + + // MaxFailureRatio is the fraction of tasks that may fail during + // an update before the failure action is invoked. Any task created by + // the current update which ends up in one of the states REJECTED, + // COMPLETED or FAILED within Monitor from its creation counts as a + // failure. The number of failures is divided by the number of tasks + // being updated, and if this fraction is greater than + // MaxFailureRatio, the failure action is invoked. + // + // If the failure action is CONTINUE, there is no effect. + // If the failure action is PAUSE, no more tasks will be updated until + // another update is started. + // If the failure action is ROLLBACK, the orchestrator will attempt to + // roll back to the previous service spec. If the MaxFailureRatio + // threshold is hit during the rollback, the rollback will pause. + float max_failure_ratio = 5; + + // UpdateOrder controls the order of operations when rolling out an + // updated task. Either the old task is shut down before the new task + // is started, or the new task is started before the old task is shut + // down. + enum UpdateOrder { + STOP_FIRST = 0; + START_FIRST = 1; + } + + UpdateOrder order = 6; +} + +// UpdateStatus is the status of an update in progress. +message UpdateStatus { + enum UpdateState { + UNKNOWN = 0; + UPDATING = 1; + PAUSED = 2; + COMPLETED = 3; + ROLLBACK_STARTED = 4; + ROLLBACK_PAUSED = 5; // if a rollback fails + ROLLBACK_COMPLETED = 6; + } + + // State is the state of this update. It indicates whether the + // update is in progress, completed, paused, rolling back, or + // finished rolling back. + UpdateState state = 1; + + // StartedAt is the time at which the update was started. + // Note: can't use stdtime because this field is nullable. + google.protobuf.Timestamp started_at = 2; + + // CompletedAt is the time at which the update completed successfully, + // paused, or finished rolling back. + // Note: can't use stdtime because this field is nullable. + google.protobuf.Timestamp completed_at = 3; + + // TODO(aaronl): Consider adding a timestamp showing when the most + // recent task update took place. Currently, this is nontrivial + // because each service update kicks off a replacement update, so + // updating the service object with a timestamp at every step along + // the rolling update would cause the rolling update to be constantly + // restarted. + + // Message explains how the update got into its current state. For + // example, if the update is paused, it will explain what is preventing + // the update from proceeding (typically the failure of a task to start up + // when OnFailure is PAUSE). + string message = 4; +} + +// TaskState enumerates the states that a task progresses through within an +// agent. States are designed to be monotonically increasing, such that if two +// states are seen by a task, the greater of the new represents the true state. + +// Only the manager create a NEW task, and move the task to PENDING and ASSIGNED. +// Afterward, the manager must rely on the agent to update the task status +// (pre-run: preparing, ready, starting; +// running; +// end-state: complete, shutdown, failed, rejected) +enum TaskState { + // TODO(aluzzardi): Move it back into `TaskStatus` because of the naming + // collisions of enums. + + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "TaskState"; + NEW = 0 [(gogoproto.enumvalue_customname)="TaskStateNew"]; + PENDING = 64 [(gogoproto.enumvalue_customname)="TaskStatePending"]; // waiting for scheduling decision + ASSIGNED = 192 [(gogoproto.enumvalue_customname)="TaskStateAssigned"]; + ACCEPTED = 256 [(gogoproto.enumvalue_customname)="TaskStateAccepted"]; // task has been accepted by an agent. + PREPARING = 320 [(gogoproto.enumvalue_customname)="TaskStatePreparing"]; + READY = 384 [(gogoproto.enumvalue_customname)="TaskStateReady"]; + STARTING = 448 [(gogoproto.enumvalue_customname)="TaskStateStarting"]; + RUNNING = 512 [(gogoproto.enumvalue_customname)="TaskStateRunning"]; + COMPLETE = 576 [(gogoproto.enumvalue_customname)="TaskStateCompleted"]; // successful completion of task (not error code, just ran) + SHUTDOWN = 640 [(gogoproto.enumvalue_customname)="TaskStateShutdown"]; // orchestrator requested shutdown + FAILED = 704 [(gogoproto.enumvalue_customname)="TaskStateFailed"]; // task execution failed with error + // TaskStateRejected means a task never ran, for instance if something about + // the environment failed (e.g. setting up a port on that node failed). + REJECTED = 768 [(gogoproto.enumvalue_customname)="TaskStateRejected"]; // task could not be executed here. + // TaskStateRemove is used to correctly handle service deletions and scale + // downs. This allows us to keep track of tasks that have been marked for + // deletion, but can't yet be removed because the agent is in the process of + // shutting them down. Once the agent has shut down tasks with desired state + // REMOVE, the task reaper is responsible for removing them. + REMOVE = 800 [(gogoproto.enumvalue_customname)="TaskStateRemove"]; + // TaskStateOrphaned is used to free up resources associated with service + // tasks on unresponsive nodes without having to delete those tasks. This + // state is directly assigned to the task by the orchestrator. + ORPHANED = 832 [(gogoproto.enumvalue_customname)="TaskStateOrphaned"]; + + // NOTE(stevvooe): The state of a task is actually a lamport clock, in that + // given two observations, the greater of the two can be considered + // correct. To enforce this, we only allow tasks to proceed to a greater + // state. + // + // A byproduct of this design decision is that we must also maintain this + // invariant in the protobuf enum values, such that when comparing two + // values, the one with the greater value is also the greater state. + // + // Because we may want to add intervening states a later date, we've left + // 64 spaces between each one. This should allow us to make 5 or 6 + // insertions between each state if we find that we made a mistake and need + // another state. + // + // Remove this message when the states are deemed perfect. +} + +// Container specific status. +message ContainerStatus { + string container_id = 1; + + int32 pid = 2 [(gogoproto.customname) = "PID"]; + int32 exit_code = 3; +} + +// PortStatus specifies the actual allocated runtime state of a list +// of port configs. +message PortStatus { + repeated PortConfig ports = 1; +} + +message TaskStatus { + // Note: can't use stdtime because this field is nullable. + google.protobuf.Timestamp timestamp = 1; + + // State expresses the current state of the task. + TaskState state = 2; + + // Message reports a message for the task status. This should provide a + // human readable message that can point to how the task actually arrived + // at a current state. + // + // As a convention, we place the a small message here that led to the + // current state. For example, if the task is in ready, because it was + // prepared, we'd place "prepared" in this field. If we skipped preparation + // because the task is prepared, we would put "already prepared" in this + // field. + string message = 3; + + // Err is set if the task is in an error state, or is unable to + // progress from an earlier state because a precondition is + // unsatisfied. + // + // The following states should report a companion error: + // + // FAILED, REJECTED + // + // In general, messages that should be surfaced to users belong in the + // Err field, and notes on routine state transitions belong in Message. + // + // TODO(stevvooe) Integrate this field with the error interface. + string err = 4; + + // Container status contains container specific status information. + oneof runtime_status { + ContainerStatus container = 5; + } + + // HostPorts provides a list of ports allocated at the host + // level. + PortStatus port_status = 6; + + // AppliedBy gives the node ID of the manager that applied this task + // status update to the Task object. + string applied_by = 7; + + // AppliedAt gives a timestamp of when this status update was applied to + // the Task object. + // Note: can't use stdtime because this field is nullable. + google.protobuf.Timestamp applied_at = 8; +} + +// NetworkAttachmentConfig specifies how a service should be attached to a particular network. +// +// For now, this is a simple struct, but this can include future information +// instructing Swarm on how this service should work on the particular +// network. +message NetworkAttachmentConfig { + // Target specifies the target network for attachment. This value must be a + // network ID. + string target = 1; + // Aliases specifies a list of discoverable alternate names for the service on this Target. + repeated string aliases = 2; + // Addresses specifies a list of ipv4 and ipv6 addresses + // preferred. If these addresses are not available then the + // attachment might fail. + repeated string addresses = 3; + // DriverAttachmentOpts is a map of driver attachment options for the network target + map driver_attachment_opts = 4; +} + +// IPAMConfig specifies parameters for IP Address Management. +message IPAMConfig { + // TODO(stevvooe): It may make more sense to manage IPAM and network + // definitions separately. This will allow multiple networks to share IPAM + // instances. For now, we will follow the conventions of libnetwork and + // specify this as part of the network specification. + + // AddressFamily specifies the network address family that + // this IPAMConfig belongs to. + enum AddressFamily { + UNKNOWN = 0; // satisfy proto3 + IPV4 = 4; + IPV6 = 6; + } + + AddressFamily family = 1; + + // Subnet defines a network as a CIDR address (ie network and mask + // 192.168.0.1/24). + string subnet = 2; + + // Range defines the portion of the subnet to allocate to tasks. This is + // defined as a subnet within the primary subnet. + string range = 3; + + // Gateway address within the subnet. + string gateway = 4; + + // Reserved is a list of address from the master pool that should *not* be + // allocated. These addresses may have already been allocated or may be + // reserved for another allocation manager. + map reserved = 5; +} + +// PortConfig specifies an exposed port which can be +// addressed using the given name. This can be later queried +// using a service discovery api or a DNS SRV query. The node +// port specifies a port that can be used to address this +// service external to the cluster by sending a connection +// request to this port to any node on the cluster. +message PortConfig { + enum Protocol { + option (gogoproto.goproto_enum_prefix) = false; + + TCP = 0 [(gogoproto.enumvalue_customname) = "ProtocolTCP"]; + UDP = 1 [(gogoproto.enumvalue_customname) = "ProtocolUDP"]; + SCTP = 2 [(gogoproto.enumvalue_customname) = "ProtocolSCTP"]; + } + + // PublishMode controls how ports are published on the swarm. + enum PublishMode { + option (gogoproto.enum_customname) = "PublishMode"; + option (gogoproto.goproto_enum_prefix) = false; + + // PublishModeIngress exposes the port across the cluster on all nodes. + INGRESS = 0 [(gogoproto.enumvalue_customname) = "PublishModeIngress"]; + + // PublishModeHost exposes the port on just the target host. If the + // published port is undefined, an ephemeral port will be allocated. If + // the published port is defined, the node will attempt to allocate it, + // erroring the task if it fails. + HOST = 1 [(gogoproto.enumvalue_customname) = "PublishModeHost"]; + } + + // Name for the port. If provided the port information can + // be queried using the name as in a DNS SRV query. + string name = 1; + + // Protocol for the port which is exposed. + Protocol protocol = 2; + + // The port which the application is exposing and is bound to. + uint32 target_port = 3; + + // PublishedPort specifies the port on which the service is exposed. If + // specified, the port must be within the available range. If not specified + // (value is zero), an available port is automatically assigned. + uint32 published_port = 4; + + // PublishMode controls how the port is published. + PublishMode publish_mode = 5; +} + +// Driver is a generic driver type to be used throughout the API. For now, a +// driver is simply a name and set of options. The field contents depend on the +// target use case and driver application. For example, a network driver may +// have different rules than a volume driver. +message Driver { + string name = 1; + map options = 2; +} + +message IPAMOptions { + Driver driver = 1; + repeated IPAMConfig configs = 3; +} + +// Peer should be used anywhere where we are describing a remote peer. +message Peer { + string node_id = 1; + string addr = 2; +} + +// WeightedPeer should be used anywhere where we are describing a remote peer +// with a weight. +message WeightedPeer { + Peer peer = 1; + int64 weight = 2; +} + + +message IssuanceStatus { + enum State { + option (gogoproto.goproto_enum_prefix) = false; + + UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "IssuanceStateUnknown"]; + // A new certificate should be issued + RENEW = 1 [(gogoproto.enumvalue_customname)="IssuanceStateRenew"]; + // Certificate is pending acceptance + PENDING = 2 [(gogoproto.enumvalue_customname)="IssuanceStatePending"]; + // successful completion certificate issuance + ISSUED = 3 [(gogoproto.enumvalue_customname)="IssuanceStateIssued"]; + // Certificate issuance failed + FAILED = 4 [(gogoproto.enumvalue_customname)="IssuanceStateFailed"]; + // Signals workers to renew their certificate. From the CA's perspective + // this is equivalent to IssuanceStateIssued: a noop. + ROTATE = 5 [(gogoproto.enumvalue_customname)="IssuanceStateRotate"]; + } + State state = 1; + + // Err is set if the Certificate Issuance is in an error state. + // The following states should report a companion error: + // FAILED + string err = 2; +} + +message AcceptancePolicy { + message RoleAdmissionPolicy { + message Secret { + // The actual content (possibly hashed) + bytes data = 1; + // The type of hash we are using, or "plaintext" + string alg = 2; + } + + NodeRole role = 1; + // Autoaccept controls which roles' certificates are automatically + // issued without administrator intervention. + bool autoaccept = 2; + // Secret represents a user-provided string that is necessary for new + // nodes to join the cluster + Secret secret = 3; + } + + repeated RoleAdmissionPolicy policies = 1; +} + +message ExternalCA { + enum CAProtocol { + CFSSL = 0 [(gogoproto.enumvalue_customname) = "CAProtocolCFSSL"]; + } + + // Protocol is the protocol used by this external CA. + CAProtocol protocol = 1; + + // URL is the URL where the external CA can be reached. + string url = 2 [(gogoproto.customname) = "URL"]; + + // Options is a set of additional key/value pairs whose interpretation + // depends on the specified CA type. + map options = 3; + + // CACert specifies which root CA is used by this external CA + bytes ca_cert = 4 [(gogoproto.customname) = "CACert"]; +} + +message CAConfig { + // NodeCertExpiry is the duration certificates should be issued for + // Note: can't use stdduration because this field needs to be nullable. + google.protobuf.Duration node_cert_expiry = 1; + + // ExternalCAs is a list of CAs to which a manager node will make + // certificate signing requests for node certificates. + repeated ExternalCA external_cas = 2 [(gogoproto.customname) = "ExternalCAs"]; + + // SigningCACert is the desired CA certificate to be used as the root and + // signing CA for the swarm. If not provided, indicates that we are either happy + // with the current configuration, or (together with a bump in the ForceRotate value) + // that we want a certificate and key generated for us. + bytes signing_ca_cert = 3 [(gogoproto.customname) = "SigningCACert"]; + + // SigningCAKey is the desired private key, matching the signing CA cert, to be used + // to sign certificates for the swarm + bytes signing_ca_key = 4 [(gogoproto.customname) = "SigningCAKey"]; + + // ForceRotate is a counter that triggers a root CA rotation even if no relevant + // parameters have been in the spec. This will force the manager to generate a new + // certificate and key, if none have been provided. + uint64 force_rotate = 5; +} + +// OrchestrationConfig defines cluster-level orchestration settings. +message OrchestrationConfig { + // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or + // node. If negative, never remove completed or failed tasks. + int64 task_history_retention_limit = 1; + +} + +// TaskDefaults specifies default values for task creation. +message TaskDefaults { + // LogDriver specifies the log driver to use for the cluster if not + // specified for each task. + // + // If this is changed, only new tasks will pick up the new log driver. + // Existing tasks will continue to use the previous default until rescheduled. + Driver log_driver = 1; +} + +// DispatcherConfig defines cluster-level dispatcher settings. +message DispatcherConfig { + // HeartbeatPeriod defines how often agent should send heartbeats to + // dispatcher. + // Note: can't use stdduration because this field needs to be nullable. + google.protobuf.Duration heartbeat_period = 1; +} + +// RaftConfig defines raft settings for the cluster. +message RaftConfig { + // SnapshotInterval is the number of log entries between snapshots. + uint64 snapshot_interval = 1; + // KeepOldSnapshots is the number of snapshots to keep beyond the + // current snapshot. + uint64 keep_old_snapshots = 2; + // LogEntriesForSlowFollowers is the number of log entries to keep + // around to sync up slow followers after a snapshot is created. + uint64 log_entries_for_slow_followers = 3; + // HeartbeatTick defines the amount of ticks (in seconds) between + // each heartbeat message sent to other members for health-check. + uint32 heartbeat_tick = 4; + // ElectionTick defines the amount of ticks (in seconds) needed + // without a leader to trigger a new election. + uint32 election_tick = 5; +} + +message EncryptionConfig { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + bool auto_lock_managers = 1; +} + +message SpreadOver { + string spread_descriptor = 1; // label descriptor, such as engine.labels.az + // TODO: support node information beyond engine and node labels + + // TODO: in the future, add a map that provides weights for weighted + // spreading. +} + +message PlacementPreference { + oneof Preference { + SpreadOver spread = 1; + } +} + +// Placement specifies task distribution constraints. +message Placement { + // Constraints specifies a set of requirements a node should meet for a task. + repeated string constraints = 1; + + // Preferences provide a way to make the scheduler aware of factors + // such as topology. They are provided in order from highest to lowest + // precedence. + repeated PlacementPreference preferences = 2; + + // Platforms stores all the platforms that the image can run on. + // This field is used in the platform filter for scheduling. If empty, + // then the platform filter is off, meaning there are no scheduling restrictions. + repeated Platform platforms = 3; + + // MaxReplicas specifies the limit for maximum number of replicas running on one node. + uint64 max_replicas = 4; +} + +// JoinToken contains the join tokens for workers and managers. +message JoinTokens { + // Worker is the join token workers may use to join the swarm. + string worker = 1; + + // Manager is the join token workers may use to join the swarm. + string manager = 2; +} + +message RootCA { + // CAKey is the root CA private key. + bytes ca_key = 1 [(gogoproto.customname) = "CAKey"]; + + // CACert is the root CA certificate. + bytes ca_cert = 2 [(gogoproto.customname) = "CACert"]; + + // CACertHash is the digest of the CA Certificate. + string ca_cert_hash = 3 [(gogoproto.customname) = "CACertHash"]; + + // JoinTokens contains the join tokens for workers and managers. + JoinTokens join_tokens = 4 [(gogoproto.nullable) = false]; + + // RootRotation contains the new root cert and key we want to rotate to - if this is nil, we are not in the + // middle of a root rotation + RootRotation root_rotation = 5; + + // LastForcedRotation matches the Cluster Spec's CAConfig's ForceRotation counter. + // It indicates when the current CA cert and key were generated (or updated). + uint64 last_forced_rotation = 6; +} + + +enum NodeRole { + option (gogoproto.enum_customname) = "NodeRole"; + option (gogoproto.goproto_enum_prefix) = false; + + WORKER = 0 [(gogoproto.enumvalue_customname) = "NodeRoleWorker"]; + MANAGER = 1 [(gogoproto.enumvalue_customname) = "NodeRoleManager"]; +} + +message Certificate { + NodeRole role = 1; + + bytes csr = 2 [(gogoproto.customname) = "CSR"]; + + IssuanceStatus status = 3 [(gogoproto.nullable) = false]; + + bytes certificate = 4; + + // CN represents the node ID. + string cn = 5 [(gogoproto.customname) = "CN"]; +} + + +// Symmetric keys to encrypt inter-agent communication. +message EncryptionKey { + // Agent subsystem the key is intended for. Example: + // networking:gossip + string subsystem = 1; + + // Encryption algorithm that can implemented using this key + enum Algorithm { + option (gogoproto.goproto_enum_prefix) = false; + + AES_128_GCM = 0; + } + + Algorithm algorithm = 2; + + bytes key = 3; + + // Time stamp from the lamport clock of the key allocator to + // identify the relative age of the key. + uint64 lamport_time = 4; +} + +// ManagerStatus provides informations about the state of a manager in the cluster. +message ManagerStatus { + // RaftID specifies the internal ID used by the manager in a raft context, it can never be modified + // and is used only for information purposes + uint64 raft_id = 1; + + // Addr is the address advertised to raft. + string addr = 2; + + // Leader is set to true if this node is the raft leader. + bool leader = 3; + + // Reachability specifies whether this node is reachable. + RaftMemberStatus.Reachability reachability = 4; +} + +// FileTarget represents a specific target that is backed by a file +message FileTarget { + // Name represents the final filename in the filesystem + string name = 1; + + // UID represents the file UID + string uid = 2 [(gogoproto.customname) = "UID"]; + + // GID represents the file GID + string gid = 3 [(gogoproto.customname) = "GID"]; + + // Mode represents the FileMode of the file + uint32 mode = 4 [(gogoproto.customtype) = "os.FileMode", (gogoproto.nullable) = false]; +} + +// RuntimeTarget represents that this secret is _not_ mounted into the +// container, but is used for some other purpose by the container runtime. +// +// Currently, RuntimeTarget has no fields; it's just a placeholder. +message RuntimeTarget {} + +// SecretReference is the linkage between a service and a secret that it uses. +message SecretReference { + // SecretID represents the ID of the specific Secret that we're + // referencing. This identifier exists so that SecretReferences don't leak + // any information about the secret contents. + string secret_id = 1; + + // SecretName is the name of the secret that this references, but this is just provided for + // lookup/display purposes. The secret in the reference will be identified by its ID. + string secret_name = 2; + + // Target specifies how this secret should be exposed to the task. + oneof target { + FileTarget file = 3; + } +} + +// ConfigReference is the linkage between a service and a config that it uses. +message ConfigReference { + // ConfigID represents the ID of the specific Config that we're + // referencing. + string config_id = 1; + + // ConfigName is the name of the config that this references, but this is just provided for + // lookup/display purposes. The config in the reference will be identified by its ID. + string config_name = 2; + + // Target specifies how this config should be exposed to the task. + oneof target { + FileTarget file = 3; + RuntimeTarget runtime = 4; + } +} + +// BlacklistedCertificate is a record for a blacklisted certificate. It does not +// contain the certificate's CN, because these records are indexed by CN. +message BlacklistedCertificate { + // Expiry is the latest known expiration time of a certificate that + // was issued for the given CN. + // Note: can't use stdtime because this field is nullable. + google.protobuf.Timestamp expiry = 1; +} + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +message HealthConfig { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + repeated string test = 1; + + // Interval is the time to wait between checks. Zero means inherit. + // Note: can't use stdduration because this field needs to be nullable. + google.protobuf.Duration interval = 2; + + // Timeout is the time to wait before considering the check to have hung. + // Zero means inherit. + // Note: can't use stdduration because this field needs to be nullable. + google.protobuf.Duration timeout = 3; + + // Retries is the number of consecutive failures needed to consider a + // container as unhealthy. Zero means inherit. + int32 retries = 4; + + // Start period is the period for container initialization during + // which health check failures will note count towards the maximum + // number of retries. + google.protobuf.Duration start_period = 5; +} + +message MaybeEncryptedRecord { + enum Algorithm { + NONE = 0 [(gogoproto.enumvalue_customname) = "NotEncrypted"]; + SECRETBOX_SALSA20_POLY1305 = 1 [(gogoproto.enumvalue_customname) = "NACLSecretboxSalsa20Poly1305"]; + FERNET_AES_128_CBC = 2 [(gogoproto.enumvalue_customname) = "FernetAES128CBC"]; + } + + Algorithm algorithm = 1; + bytes data = 2; + bytes nonce = 3; +} + + +message RootRotation { + bytes ca_cert = 1 [(gogoproto.customname) = "CACert"]; + bytes ca_key = 2 [(gogoproto.customname) = "CAKey"]; + // cross-signed CA cert is the CACert that has been cross-signed by the previous root + bytes cross_signed_ca_cert = 3 [(gogoproto.customname) = "CrossSignedCACert"]; +} + +// Privileges specifies security configuration/permissions. +message Privileges { + // CredentialSpec for managed service account (Windows only). + message CredentialSpec { + oneof source { + string file = 1; + string registry = 2; + + // Config represents a Config ID from which to get the CredentialSpec. + // The Config MUST be included in the SecretReferences with a RuntimeTarget + string config = 3; + } + } + CredentialSpec credential_spec = 1; + + // SELinuxContext contains the SELinux labels for the container. + message SELinuxContext { + bool disable = 1; + + string user = 2; + string role = 3; + string type = 4; + string level = 5; + } + SELinuxContext selinux_context = 2 [(gogoproto.customname) = "SELinuxContext"]; +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/watch.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/watch.pb.go new file mode 100644 index 00000000..9d152514 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/watch.pb.go @@ -0,0 +1,4581 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/api/watch.proto + +package api + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/docker/swarmkit/protobuf/plugin" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import context "golang.org/x/net/context" +import grpc "google.golang.org/grpc" + +import raftselector "github.com/docker/swarmkit/manager/raftselector" +import codes "google.golang.org/grpc/codes" +import status "google.golang.org/grpc/status" +import metadata "google.golang.org/grpc/metadata" +import peer "google.golang.org/grpc/peer" +import rafttime "time" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// WatchActionKind distinguishes between creations, updates, and removals. It +// is structured as a bitmap so multiple kinds of events can be requested with +// a mask. +type WatchActionKind int32 + +const ( + WatchActionKindUnknown WatchActionKind = 0 + WatchActionKindCreate WatchActionKind = 1 + WatchActionKindUpdate WatchActionKind = 2 + WatchActionKindRemove WatchActionKind = 4 +) + +var WatchActionKind_name = map[int32]string{ + 0: "WATCH_ACTION_UNKNOWN", + 1: "WATCH_ACTION_CREATE", + 2: "WATCH_ACTION_UPDATE", + 4: "WATCH_ACTION_REMOVE", +} +var WatchActionKind_value = map[string]int32{ + "WATCH_ACTION_UNKNOWN": 0, + "WATCH_ACTION_CREATE": 1, + "WATCH_ACTION_UPDATE": 2, + "WATCH_ACTION_REMOVE": 4, +} + +func (x WatchActionKind) String() string { + return proto.EnumName(WatchActionKind_name, int32(x)) +} +func (WatchActionKind) EnumDescriptor() ([]byte, []int) { return fileDescriptorWatch, []int{0} } + +type Object struct { + // Types that are valid to be assigned to Object: + // *Object_Node + // *Object_Service + // *Object_Network + // *Object_Task + // *Object_Cluster + // *Object_Secret + // *Object_Resource + // *Object_Extension + // *Object_Config + Object isObject_Object `protobuf_oneof:"Object"` +} + +func (m *Object) Reset() { *m = Object{} } +func (*Object) ProtoMessage() {} +func (*Object) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{0} } + +type isObject_Object interface { + isObject_Object() + MarshalTo([]byte) (int, error) + Size() int +} + +type Object_Node struct { + Node *Node `protobuf:"bytes,1,opt,name=node,oneof"` +} +type Object_Service struct { + Service *Service `protobuf:"bytes,2,opt,name=service,oneof"` +} +type Object_Network struct { + Network *Network `protobuf:"bytes,3,opt,name=network,oneof"` +} +type Object_Task struct { + Task *Task `protobuf:"bytes,4,opt,name=task,oneof"` +} +type Object_Cluster struct { + Cluster *Cluster `protobuf:"bytes,5,opt,name=cluster,oneof"` +} +type Object_Secret struct { + Secret *Secret `protobuf:"bytes,6,opt,name=secret,oneof"` +} +type Object_Resource struct { + Resource *Resource `protobuf:"bytes,7,opt,name=resource,oneof"` +} +type Object_Extension struct { + Extension *Extension `protobuf:"bytes,8,opt,name=extension,oneof"` +} +type Object_Config struct { + Config *Config `protobuf:"bytes,9,opt,name=config,oneof"` +} + +func (*Object_Node) isObject_Object() {} +func (*Object_Service) isObject_Object() {} +func (*Object_Network) isObject_Object() {} +func (*Object_Task) isObject_Object() {} +func (*Object_Cluster) isObject_Object() {} +func (*Object_Secret) isObject_Object() {} +func (*Object_Resource) isObject_Object() {} +func (*Object_Extension) isObject_Object() {} +func (*Object_Config) isObject_Object() {} + +func (m *Object) GetObject() isObject_Object { + if m != nil { + return m.Object + } + return nil +} + +func (m *Object) GetNode() *Node { + if x, ok := m.GetObject().(*Object_Node); ok { + return x.Node + } + return nil +} + +func (m *Object) GetService() *Service { + if x, ok := m.GetObject().(*Object_Service); ok { + return x.Service + } + return nil +} + +func (m *Object) GetNetwork() *Network { + if x, ok := m.GetObject().(*Object_Network); ok { + return x.Network + } + return nil +} + +func (m *Object) GetTask() *Task { + if x, ok := m.GetObject().(*Object_Task); ok { + return x.Task + } + return nil +} + +func (m *Object) GetCluster() *Cluster { + if x, ok := m.GetObject().(*Object_Cluster); ok { + return x.Cluster + } + return nil +} + +func (m *Object) GetSecret() *Secret { + if x, ok := m.GetObject().(*Object_Secret); ok { + return x.Secret + } + return nil +} + +func (m *Object) GetResource() *Resource { + if x, ok := m.GetObject().(*Object_Resource); ok { + return x.Resource + } + return nil +} + +func (m *Object) GetExtension() *Extension { + if x, ok := m.GetObject().(*Object_Extension); ok { + return x.Extension + } + return nil +} + +func (m *Object) GetConfig() *Config { + if x, ok := m.GetObject().(*Object_Config); ok { + return x.Config + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Object) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Object_OneofMarshaler, _Object_OneofUnmarshaler, _Object_OneofSizer, []interface{}{ + (*Object_Node)(nil), + (*Object_Service)(nil), + (*Object_Network)(nil), + (*Object_Task)(nil), + (*Object_Cluster)(nil), + (*Object_Secret)(nil), + (*Object_Resource)(nil), + (*Object_Extension)(nil), + (*Object_Config)(nil), + } +} + +func _Object_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Object) + // Object + switch x := m.Object.(type) { + case *Object_Node: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Node); err != nil { + return err + } + case *Object_Service: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Service); err != nil { + return err + } + case *Object_Network: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Network); err != nil { + return err + } + case *Object_Task: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Task); err != nil { + return err + } + case *Object_Cluster: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Cluster); err != nil { + return err + } + case *Object_Secret: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Secret); err != nil { + return err + } + case *Object_Resource: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Resource); err != nil { + return err + } + case *Object_Extension: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Extension); err != nil { + return err + } + case *Object_Config: + _ = b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Config); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Object.Object has unexpected type %T", x) + } + return nil +} + +func _Object_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Object) + switch tag { + case 1: // Object.node + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Node) + err := b.DecodeMessage(msg) + m.Object = &Object_Node{msg} + return true, err + case 2: // Object.service + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Service) + err := b.DecodeMessage(msg) + m.Object = &Object_Service{msg} + return true, err + case 3: // Object.network + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Network) + err := b.DecodeMessage(msg) + m.Object = &Object_Network{msg} + return true, err + case 4: // Object.task + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Task) + err := b.DecodeMessage(msg) + m.Object = &Object_Task{msg} + return true, err + case 5: // Object.cluster + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Cluster) + err := b.DecodeMessage(msg) + m.Object = &Object_Cluster{msg} + return true, err + case 6: // Object.secret + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Secret) + err := b.DecodeMessage(msg) + m.Object = &Object_Secret{msg} + return true, err + case 7: // Object.resource + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Resource) + err := b.DecodeMessage(msg) + m.Object = &Object_Resource{msg} + return true, err + case 8: // Object.extension + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Extension) + err := b.DecodeMessage(msg) + m.Object = &Object_Extension{msg} + return true, err + case 9: // Object.config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Config) + err := b.DecodeMessage(msg) + m.Object = &Object_Config{msg} + return true, err + default: + return false, nil + } +} + +func _Object_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Object) + // Object + switch x := m.Object.(type) { + case *Object_Node: + s := proto.Size(x.Node) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Service: + s := proto.Size(x.Service) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Network: + s := proto.Size(x.Network) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Task: + s := proto.Size(x.Task) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Cluster: + s := proto.Size(x.Cluster) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Secret: + s := proto.Size(x.Secret) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Resource: + s := proto.Size(x.Resource) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Extension: + s := proto.Size(x.Extension) + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Object_Config: + s := proto.Size(x.Config) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// FIXME(aaronl): These messages should ideally be embedded in SelectBy, but +// protoc generates bad code for that. +type SelectBySlot struct { + ServiceID string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + Slot uint64 `protobuf:"varint,2,opt,name=slot,proto3" json:"slot,omitempty"` +} + +func (m *SelectBySlot) Reset() { *m = SelectBySlot{} } +func (*SelectBySlot) ProtoMessage() {} +func (*SelectBySlot) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{1} } + +type SelectByCustom struct { + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + Index string `protobuf:"bytes,2,opt,name=index,proto3" json:"index,omitempty"` + Value string `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *SelectByCustom) Reset() { *m = SelectByCustom{} } +func (*SelectByCustom) ProtoMessage() {} +func (*SelectByCustom) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{2} } + +type SelectBy struct { + // TODO(aaronl): Are all of these things we want to expose in + // the API? Exposing them may commit us to maintaining those + // internal indices going forward. + // + // Types that are valid to be assigned to By: + // *SelectBy_ID + // *SelectBy_IDPrefix + // *SelectBy_Name + // *SelectBy_NamePrefix + // *SelectBy_Custom + // *SelectBy_CustomPrefix + // *SelectBy_ServiceID + // *SelectBy_NodeID + // *SelectBy_Slot + // *SelectBy_DesiredState + // *SelectBy_Role + // *SelectBy_Membership + // *SelectBy_ReferencedNetworkID + // *SelectBy_ReferencedSecretID + // *SelectBy_ReferencedConfigID + // *SelectBy_Kind + By isSelectBy_By `protobuf_oneof:"By"` +} + +func (m *SelectBy) Reset() { *m = SelectBy{} } +func (*SelectBy) ProtoMessage() {} +func (*SelectBy) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{3} } + +type isSelectBy_By interface { + isSelectBy_By() + MarshalTo([]byte) (int, error) + Size() int +} + +type SelectBy_ID struct { + ID string `protobuf:"bytes,1,opt,name=id,proto3,oneof"` +} +type SelectBy_IDPrefix struct { + IDPrefix string `protobuf:"bytes,2,opt,name=id_prefix,json=idPrefix,proto3,oneof"` +} +type SelectBy_Name struct { + Name string `protobuf:"bytes,3,opt,name=name,proto3,oneof"` +} +type SelectBy_NamePrefix struct { + NamePrefix string `protobuf:"bytes,4,opt,name=name_prefix,json=namePrefix,proto3,oneof"` +} +type SelectBy_Custom struct { + Custom *SelectByCustom `protobuf:"bytes,5,opt,name=custom,oneof"` +} +type SelectBy_CustomPrefix struct { + CustomPrefix *SelectByCustom `protobuf:"bytes,6,opt,name=custom_prefix,json=customPrefix,oneof"` +} +type SelectBy_ServiceID struct { + ServiceID string `protobuf:"bytes,7,opt,name=service_id,json=serviceId,proto3,oneof"` +} +type SelectBy_NodeID struct { + NodeID string `protobuf:"bytes,8,opt,name=node_id,json=nodeId,proto3,oneof"` +} +type SelectBy_Slot struct { + Slot *SelectBySlot `protobuf:"bytes,9,opt,name=slot,oneof"` +} +type SelectBy_DesiredState struct { + DesiredState TaskState `protobuf:"varint,10,opt,name=desired_state,json=desiredState,proto3,enum=docker.swarmkit.v1.TaskState,oneof"` +} +type SelectBy_Role struct { + Role NodeRole `protobuf:"varint,11,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole,oneof"` +} +type SelectBy_Membership struct { + Membership NodeSpec_Membership `protobuf:"varint,12,opt,name=membership,proto3,enum=docker.swarmkit.v1.NodeSpec_Membership,oneof"` +} +type SelectBy_ReferencedNetworkID struct { + ReferencedNetworkID string `protobuf:"bytes,13,opt,name=referenced_network_id,json=referencedNetworkId,proto3,oneof"` +} +type SelectBy_ReferencedSecretID struct { + ReferencedSecretID string `protobuf:"bytes,14,opt,name=referenced_secret_id,json=referencedSecretId,proto3,oneof"` +} +type SelectBy_ReferencedConfigID struct { + ReferencedConfigID string `protobuf:"bytes,16,opt,name=referenced_config_id,json=referencedConfigId,proto3,oneof"` +} +type SelectBy_Kind struct { + Kind string `protobuf:"bytes,15,opt,name=kind,proto3,oneof"` +} + +func (*SelectBy_ID) isSelectBy_By() {} +func (*SelectBy_IDPrefix) isSelectBy_By() {} +func (*SelectBy_Name) isSelectBy_By() {} +func (*SelectBy_NamePrefix) isSelectBy_By() {} +func (*SelectBy_Custom) isSelectBy_By() {} +func (*SelectBy_CustomPrefix) isSelectBy_By() {} +func (*SelectBy_ServiceID) isSelectBy_By() {} +func (*SelectBy_NodeID) isSelectBy_By() {} +func (*SelectBy_Slot) isSelectBy_By() {} +func (*SelectBy_DesiredState) isSelectBy_By() {} +func (*SelectBy_Role) isSelectBy_By() {} +func (*SelectBy_Membership) isSelectBy_By() {} +func (*SelectBy_ReferencedNetworkID) isSelectBy_By() {} +func (*SelectBy_ReferencedSecretID) isSelectBy_By() {} +func (*SelectBy_ReferencedConfigID) isSelectBy_By() {} +func (*SelectBy_Kind) isSelectBy_By() {} + +func (m *SelectBy) GetBy() isSelectBy_By { + if m != nil { + return m.By + } + return nil +} + +func (m *SelectBy) GetID() string { + if x, ok := m.GetBy().(*SelectBy_ID); ok { + return x.ID + } + return "" +} + +func (m *SelectBy) GetIDPrefix() string { + if x, ok := m.GetBy().(*SelectBy_IDPrefix); ok { + return x.IDPrefix + } + return "" +} + +func (m *SelectBy) GetName() string { + if x, ok := m.GetBy().(*SelectBy_Name); ok { + return x.Name + } + return "" +} + +func (m *SelectBy) GetNamePrefix() string { + if x, ok := m.GetBy().(*SelectBy_NamePrefix); ok { + return x.NamePrefix + } + return "" +} + +func (m *SelectBy) GetCustom() *SelectByCustom { + if x, ok := m.GetBy().(*SelectBy_Custom); ok { + return x.Custom + } + return nil +} + +func (m *SelectBy) GetCustomPrefix() *SelectByCustom { + if x, ok := m.GetBy().(*SelectBy_CustomPrefix); ok { + return x.CustomPrefix + } + return nil +} + +func (m *SelectBy) GetServiceID() string { + if x, ok := m.GetBy().(*SelectBy_ServiceID); ok { + return x.ServiceID + } + return "" +} + +func (m *SelectBy) GetNodeID() string { + if x, ok := m.GetBy().(*SelectBy_NodeID); ok { + return x.NodeID + } + return "" +} + +func (m *SelectBy) GetSlot() *SelectBySlot { + if x, ok := m.GetBy().(*SelectBy_Slot); ok { + return x.Slot + } + return nil +} + +func (m *SelectBy) GetDesiredState() TaskState { + if x, ok := m.GetBy().(*SelectBy_DesiredState); ok { + return x.DesiredState + } + return TaskStateNew +} + +func (m *SelectBy) GetRole() NodeRole { + if x, ok := m.GetBy().(*SelectBy_Role); ok { + return x.Role + } + return NodeRoleWorker +} + +func (m *SelectBy) GetMembership() NodeSpec_Membership { + if x, ok := m.GetBy().(*SelectBy_Membership); ok { + return x.Membership + } + return NodeMembershipPending +} + +func (m *SelectBy) GetReferencedNetworkID() string { + if x, ok := m.GetBy().(*SelectBy_ReferencedNetworkID); ok { + return x.ReferencedNetworkID + } + return "" +} + +func (m *SelectBy) GetReferencedSecretID() string { + if x, ok := m.GetBy().(*SelectBy_ReferencedSecretID); ok { + return x.ReferencedSecretID + } + return "" +} + +func (m *SelectBy) GetReferencedConfigID() string { + if x, ok := m.GetBy().(*SelectBy_ReferencedConfigID); ok { + return x.ReferencedConfigID + } + return "" +} + +func (m *SelectBy) GetKind() string { + if x, ok := m.GetBy().(*SelectBy_Kind); ok { + return x.Kind + } + return "" +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SelectBy) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SelectBy_OneofMarshaler, _SelectBy_OneofUnmarshaler, _SelectBy_OneofSizer, []interface{}{ + (*SelectBy_ID)(nil), + (*SelectBy_IDPrefix)(nil), + (*SelectBy_Name)(nil), + (*SelectBy_NamePrefix)(nil), + (*SelectBy_Custom)(nil), + (*SelectBy_CustomPrefix)(nil), + (*SelectBy_ServiceID)(nil), + (*SelectBy_NodeID)(nil), + (*SelectBy_Slot)(nil), + (*SelectBy_DesiredState)(nil), + (*SelectBy_Role)(nil), + (*SelectBy_Membership)(nil), + (*SelectBy_ReferencedNetworkID)(nil), + (*SelectBy_ReferencedSecretID)(nil), + (*SelectBy_ReferencedConfigID)(nil), + (*SelectBy_Kind)(nil), + } +} + +func _SelectBy_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SelectBy) + // By + switch x := m.By.(type) { + case *SelectBy_ID: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.ID) + case *SelectBy_IDPrefix: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.IDPrefix) + case *SelectBy_Name: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.Name) + case *SelectBy_NamePrefix: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.NamePrefix) + case *SelectBy_Custom: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Custom); err != nil { + return err + } + case *SelectBy_CustomPrefix: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.CustomPrefix); err != nil { + return err + } + case *SelectBy_ServiceID: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.ServiceID) + case *SelectBy_NodeID: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.NodeID) + case *SelectBy_Slot: + _ = b.EncodeVarint(9<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Slot); err != nil { + return err + } + case *SelectBy_DesiredState: + _ = b.EncodeVarint(10<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.DesiredState)) + case *SelectBy_Role: + _ = b.EncodeVarint(11<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.Role)) + case *SelectBy_Membership: + _ = b.EncodeVarint(12<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.Membership)) + case *SelectBy_ReferencedNetworkID: + _ = b.EncodeVarint(13<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.ReferencedNetworkID) + case *SelectBy_ReferencedSecretID: + _ = b.EncodeVarint(14<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.ReferencedSecretID) + case *SelectBy_ReferencedConfigID: + _ = b.EncodeVarint(16<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.ReferencedConfigID) + case *SelectBy_Kind: + _ = b.EncodeVarint(15<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.Kind) + case nil: + default: + return fmt.Errorf("SelectBy.By has unexpected type %T", x) + } + return nil +} + +func _SelectBy_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SelectBy) + switch tag { + case 1: // By.id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_ID{x} + return true, err + case 2: // By.id_prefix + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_IDPrefix{x} + return true, err + case 3: // By.name + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_Name{x} + return true, err + case 4: // By.name_prefix + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_NamePrefix{x} + return true, err + case 5: // By.custom + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SelectByCustom) + err := b.DecodeMessage(msg) + m.By = &SelectBy_Custom{msg} + return true, err + case 6: // By.custom_prefix + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SelectByCustom) + err := b.DecodeMessage(msg) + m.By = &SelectBy_CustomPrefix{msg} + return true, err + case 7: // By.service_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_ServiceID{x} + return true, err + case 8: // By.node_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_NodeID{x} + return true, err + case 9: // By.slot + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SelectBySlot) + err := b.DecodeMessage(msg) + m.By = &SelectBy_Slot{msg} + return true, err + case 10: // By.desired_state + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.By = &SelectBy_DesiredState{TaskState(x)} + return true, err + case 11: // By.role + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.By = &SelectBy_Role{NodeRole(x)} + return true, err + case 12: // By.membership + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.By = &SelectBy_Membership{NodeSpec_Membership(x)} + return true, err + case 13: // By.referenced_network_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_ReferencedNetworkID{x} + return true, err + case 14: // By.referenced_secret_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_ReferencedSecretID{x} + return true, err + case 16: // By.referenced_config_id + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_ReferencedConfigID{x} + return true, err + case 15: // By.kind + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.By = &SelectBy_Kind{x} + return true, err + default: + return false, nil + } +} + +func _SelectBy_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SelectBy) + // By + switch x := m.By.(type) { + case *SelectBy_ID: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ID))) + n += len(x.ID) + case *SelectBy_IDPrefix: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.IDPrefix))) + n += len(x.IDPrefix) + case *SelectBy_Name: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Name))) + n += len(x.Name) + case *SelectBy_NamePrefix: + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.NamePrefix))) + n += len(x.NamePrefix) + case *SelectBy_Custom: + s := proto.Size(x.Custom) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SelectBy_CustomPrefix: + s := proto.Size(x.CustomPrefix) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SelectBy_ServiceID: + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ServiceID))) + n += len(x.ServiceID) + case *SelectBy_NodeID: + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.NodeID))) + n += len(x.NodeID) + case *SelectBy_Slot: + s := proto.Size(x.Slot) + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SelectBy_DesiredState: + n += proto.SizeVarint(10<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.DesiredState)) + case *SelectBy_Role: + n += proto.SizeVarint(11<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Role)) + case *SelectBy_Membership: + n += proto.SizeVarint(12<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Membership)) + case *SelectBy_ReferencedNetworkID: + n += proto.SizeVarint(13<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ReferencedNetworkID))) + n += len(x.ReferencedNetworkID) + case *SelectBy_ReferencedSecretID: + n += proto.SizeVarint(14<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ReferencedSecretID))) + n += len(x.ReferencedSecretID) + case *SelectBy_ReferencedConfigID: + n += proto.SizeVarint(16<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.ReferencedConfigID))) + n += len(x.ReferencedConfigID) + case *SelectBy_Kind: + n += proto.SizeVarint(15<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.Kind))) + n += len(x.Kind) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type WatchRequest struct { + // Multiple entries are combined using OR logic - i.e. if an event + // matches all of the selectors specified in any single watch entry, + // the event will be sent to the client. + Entries []*WatchRequest_WatchEntry `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"` + // ResumeFrom provides an version to resume the watch from, if non-nil. + // The watch will return changes since this version, and continue to + // return new changes afterwards. Watch will return an error if the + // server has compacted its log and no longer has complete history to + // this point. + ResumeFrom *Version `protobuf:"bytes,2,opt,name=resume_from,json=resumeFrom" json:"resume_from,omitempty"` + // IncludeOldObject causes WatchMessages to include a copy of the + // previous version of the object on updates. Note that only live + // changes will include the old object (not historical changes + // retrieved using ResumeFrom). + IncludeOldObject bool `protobuf:"varint,3,opt,name=include_old_object,json=includeOldObject,proto3" json:"include_old_object,omitempty"` +} + +func (m *WatchRequest) Reset() { *m = WatchRequest{} } +func (*WatchRequest) ProtoMessage() {} +func (*WatchRequest) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{4} } + +type WatchRequest_WatchEntry struct { + // Kind can contain a builtin type such as "node", "secret", etc. or + // the kind specified by a custom-defined object. + Kind string `protobuf:"bytes,1,opt,name=kind,proto3" json:"kind,omitempty"` + // Action (create/update/delete) + // This is a bitmask, so multiple actions may be OR'd together + Action WatchActionKind `protobuf:"varint,2,opt,name=action,proto3,enum=docker.swarmkit.v1.WatchActionKind" json:"action,omitempty"` + // Filters are combined using AND logic - an event must match + // all of them to pass the filter. + Filters []*SelectBy `protobuf:"bytes,3,rep,name=filters" json:"filters,omitempty"` +} + +func (m *WatchRequest_WatchEntry) Reset() { *m = WatchRequest_WatchEntry{} } +func (*WatchRequest_WatchEntry) ProtoMessage() {} +func (*WatchRequest_WatchEntry) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{4, 0} } + +// WatchMessage is the type of the stream that's returned to the client by +// Watch. Note that the first item of this stream will always be a WatchMessage +// with a nil Object, to signal that the stream has started. +type WatchMessage struct { + Events []*WatchMessage_Event `protobuf:"bytes,1,rep,name=events" json:"events,omitempty"` + // Index versions this change to the data store. It can be used to + // resume the watch from this point. + Version *Version `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` +} + +func (m *WatchMessage) Reset() { *m = WatchMessage{} } +func (*WatchMessage) ProtoMessage() {} +func (*WatchMessage) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{5} } + +type WatchMessage_Event struct { + // Action (create/update/delete) + // Note that WatchMessage does not expose "commit" events that + // mark transaction boundaries. + Action WatchActionKind `protobuf:"varint,1,opt,name=action,proto3,enum=docker.swarmkit.v1.WatchActionKind" json:"action,omitempty"` + // Matched object + Object *Object `protobuf:"bytes,2,opt,name=object" json:"object,omitempty"` + // For updates, OldObject will optionally be included in the + // watch message, containing the previous version of the + // object, if IncludeOldObject was set in WatchRequest. + OldObject *Object `protobuf:"bytes,3,opt,name=old_object,json=oldObject" json:"old_object,omitempty"` +} + +func (m *WatchMessage_Event) Reset() { *m = WatchMessage_Event{} } +func (*WatchMessage_Event) ProtoMessage() {} +func (*WatchMessage_Event) Descriptor() ([]byte, []int) { return fileDescriptorWatch, []int{5, 0} } + +func init() { + proto.RegisterType((*Object)(nil), "docker.swarmkit.v1.Object") + proto.RegisterType((*SelectBySlot)(nil), "docker.swarmkit.v1.SelectBySlot") + proto.RegisterType((*SelectByCustom)(nil), "docker.swarmkit.v1.SelectByCustom") + proto.RegisterType((*SelectBy)(nil), "docker.swarmkit.v1.SelectBy") + proto.RegisterType((*WatchRequest)(nil), "docker.swarmkit.v1.WatchRequest") + proto.RegisterType((*WatchRequest_WatchEntry)(nil), "docker.swarmkit.v1.WatchRequest.WatchEntry") + proto.RegisterType((*WatchMessage)(nil), "docker.swarmkit.v1.WatchMessage") + proto.RegisterType((*WatchMessage_Event)(nil), "docker.swarmkit.v1.WatchMessage.Event") + proto.RegisterEnum("docker.swarmkit.v1.WatchActionKind", WatchActionKind_name, WatchActionKind_value) +} + +type authenticatedWrapperWatchServer struct { + local WatchServer + authorize func(context.Context, []string) error +} + +func NewAuthenticatedWrapperWatchServer(local WatchServer, authorize func(context.Context, []string) error) WatchServer { + return &authenticatedWrapperWatchServer{ + local: local, + authorize: authorize, + } +} + +func (p *authenticatedWrapperWatchServer) Watch(r *WatchRequest, stream Watch_WatchServer) error { + + if err := p.authorize(stream.Context(), []string{"swarm-manager"}); err != nil { + return err + } + return p.local.Watch(r, stream) +} + +func (m *Object) Copy() *Object { + if m == nil { + return nil + } + o := &Object{} + o.CopyFrom(m) + return o +} + +func (m *Object) CopyFrom(src interface{}) { + + o := src.(*Object) + *m = *o + if o.Object != nil { + switch o.Object.(type) { + case *Object_Node: + v := Object_Node{ + Node: &Node{}, + } + deepcopy.Copy(v.Node, o.GetNode()) + m.Object = &v + case *Object_Service: + v := Object_Service{ + Service: &Service{}, + } + deepcopy.Copy(v.Service, o.GetService()) + m.Object = &v + case *Object_Network: + v := Object_Network{ + Network: &Network{}, + } + deepcopy.Copy(v.Network, o.GetNetwork()) + m.Object = &v + case *Object_Task: + v := Object_Task{ + Task: &Task{}, + } + deepcopy.Copy(v.Task, o.GetTask()) + m.Object = &v + case *Object_Cluster: + v := Object_Cluster{ + Cluster: &Cluster{}, + } + deepcopy.Copy(v.Cluster, o.GetCluster()) + m.Object = &v + case *Object_Secret: + v := Object_Secret{ + Secret: &Secret{}, + } + deepcopy.Copy(v.Secret, o.GetSecret()) + m.Object = &v + case *Object_Resource: + v := Object_Resource{ + Resource: &Resource{}, + } + deepcopy.Copy(v.Resource, o.GetResource()) + m.Object = &v + case *Object_Extension: + v := Object_Extension{ + Extension: &Extension{}, + } + deepcopy.Copy(v.Extension, o.GetExtension()) + m.Object = &v + case *Object_Config: + v := Object_Config{ + Config: &Config{}, + } + deepcopy.Copy(v.Config, o.GetConfig()) + m.Object = &v + } + } + +} + +func (m *SelectBySlot) Copy() *SelectBySlot { + if m == nil { + return nil + } + o := &SelectBySlot{} + o.CopyFrom(m) + return o +} + +func (m *SelectBySlot) CopyFrom(src interface{}) { + + o := src.(*SelectBySlot) + *m = *o +} + +func (m *SelectByCustom) Copy() *SelectByCustom { + if m == nil { + return nil + } + o := &SelectByCustom{} + o.CopyFrom(m) + return o +} + +func (m *SelectByCustom) CopyFrom(src interface{}) { + + o := src.(*SelectByCustom) + *m = *o +} + +func (m *SelectBy) Copy() *SelectBy { + if m == nil { + return nil + } + o := &SelectBy{} + o.CopyFrom(m) + return o +} + +func (m *SelectBy) CopyFrom(src interface{}) { + + o := src.(*SelectBy) + *m = *o + if o.By != nil { + switch o.By.(type) { + case *SelectBy_ID: + v := SelectBy_ID{ + ID: o.GetID(), + } + m.By = &v + case *SelectBy_IDPrefix: + v := SelectBy_IDPrefix{ + IDPrefix: o.GetIDPrefix(), + } + m.By = &v + case *SelectBy_Name: + v := SelectBy_Name{ + Name: o.GetName(), + } + m.By = &v + case *SelectBy_NamePrefix: + v := SelectBy_NamePrefix{ + NamePrefix: o.GetNamePrefix(), + } + m.By = &v + case *SelectBy_Custom: + v := SelectBy_Custom{ + Custom: &SelectByCustom{}, + } + deepcopy.Copy(v.Custom, o.GetCustom()) + m.By = &v + case *SelectBy_CustomPrefix: + v := SelectBy_CustomPrefix{ + CustomPrefix: &SelectByCustom{}, + } + deepcopy.Copy(v.CustomPrefix, o.GetCustomPrefix()) + m.By = &v + case *SelectBy_ServiceID: + v := SelectBy_ServiceID{ + ServiceID: o.GetServiceID(), + } + m.By = &v + case *SelectBy_NodeID: + v := SelectBy_NodeID{ + NodeID: o.GetNodeID(), + } + m.By = &v + case *SelectBy_Slot: + v := SelectBy_Slot{ + Slot: &SelectBySlot{}, + } + deepcopy.Copy(v.Slot, o.GetSlot()) + m.By = &v + case *SelectBy_DesiredState: + v := SelectBy_DesiredState{ + DesiredState: o.GetDesiredState(), + } + m.By = &v + case *SelectBy_Role: + v := SelectBy_Role{ + Role: o.GetRole(), + } + m.By = &v + case *SelectBy_Membership: + v := SelectBy_Membership{ + Membership: o.GetMembership(), + } + m.By = &v + case *SelectBy_ReferencedNetworkID: + v := SelectBy_ReferencedNetworkID{ + ReferencedNetworkID: o.GetReferencedNetworkID(), + } + m.By = &v + case *SelectBy_ReferencedSecretID: + v := SelectBy_ReferencedSecretID{ + ReferencedSecretID: o.GetReferencedSecretID(), + } + m.By = &v + case *SelectBy_ReferencedConfigID: + v := SelectBy_ReferencedConfigID{ + ReferencedConfigID: o.GetReferencedConfigID(), + } + m.By = &v + case *SelectBy_Kind: + v := SelectBy_Kind{ + Kind: o.GetKind(), + } + m.By = &v + } + } + +} + +func (m *WatchRequest) Copy() *WatchRequest { + if m == nil { + return nil + } + o := &WatchRequest{} + o.CopyFrom(m) + return o +} + +func (m *WatchRequest) CopyFrom(src interface{}) { + + o := src.(*WatchRequest) + *m = *o + if o.Entries != nil { + m.Entries = make([]*WatchRequest_WatchEntry, len(o.Entries)) + for i := range m.Entries { + m.Entries[i] = &WatchRequest_WatchEntry{} + deepcopy.Copy(m.Entries[i], o.Entries[i]) + } + } + + if o.ResumeFrom != nil { + m.ResumeFrom = &Version{} + deepcopy.Copy(m.ResumeFrom, o.ResumeFrom) + } +} + +func (m *WatchRequest_WatchEntry) Copy() *WatchRequest_WatchEntry { + if m == nil { + return nil + } + o := &WatchRequest_WatchEntry{} + o.CopyFrom(m) + return o +} + +func (m *WatchRequest_WatchEntry) CopyFrom(src interface{}) { + + o := src.(*WatchRequest_WatchEntry) + *m = *o + if o.Filters != nil { + m.Filters = make([]*SelectBy, len(o.Filters)) + for i := range m.Filters { + m.Filters[i] = &SelectBy{} + deepcopy.Copy(m.Filters[i], o.Filters[i]) + } + } + +} + +func (m *WatchMessage) Copy() *WatchMessage { + if m == nil { + return nil + } + o := &WatchMessage{} + o.CopyFrom(m) + return o +} + +func (m *WatchMessage) CopyFrom(src interface{}) { + + o := src.(*WatchMessage) + *m = *o + if o.Events != nil { + m.Events = make([]*WatchMessage_Event, len(o.Events)) + for i := range m.Events { + m.Events[i] = &WatchMessage_Event{} + deepcopy.Copy(m.Events[i], o.Events[i]) + } + } + + if o.Version != nil { + m.Version = &Version{} + deepcopy.Copy(m.Version, o.Version) + } +} + +func (m *WatchMessage_Event) Copy() *WatchMessage_Event { + if m == nil { + return nil + } + o := &WatchMessage_Event{} + o.CopyFrom(m) + return o +} + +func (m *WatchMessage_Event) CopyFrom(src interface{}) { + + o := src.(*WatchMessage_Event) + *m = *o + if o.Object != nil { + m.Object = &Object{} + deepcopy.Copy(m.Object, o.Object) + } + if o.OldObject != nil { + m.OldObject = &Object{} + deepcopy.Copy(m.OldObject, o.OldObject) + } +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Watch service + +type WatchClient interface { + // Watch starts a stream that returns any changes to objects that match + // the specified selectors. When the stream begins, it immediately sends + // an empty message back to the client. It is important to wait for + // this message before taking any actions that depend on an established + // stream of changes for consistency. + Watch(ctx context.Context, in *WatchRequest, opts ...grpc.CallOption) (Watch_WatchClient, error) +} + +type watchClient struct { + cc *grpc.ClientConn +} + +func NewWatchClient(cc *grpc.ClientConn) WatchClient { + return &watchClient{cc} +} + +func (c *watchClient) Watch(ctx context.Context, in *WatchRequest, opts ...grpc.CallOption) (Watch_WatchClient, error) { + stream, err := grpc.NewClientStream(ctx, &_Watch_serviceDesc.Streams[0], c.cc, "/docker.swarmkit.v1.Watch/Watch", opts...) + if err != nil { + return nil, err + } + x := &watchWatchClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type Watch_WatchClient interface { + Recv() (*WatchMessage, error) + grpc.ClientStream +} + +type watchWatchClient struct { + grpc.ClientStream +} + +func (x *watchWatchClient) Recv() (*WatchMessage, error) { + m := new(WatchMessage) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for Watch service + +type WatchServer interface { + // Watch starts a stream that returns any changes to objects that match + // the specified selectors. When the stream begins, it immediately sends + // an empty message back to the client. It is important to wait for + // this message before taking any actions that depend on an established + // stream of changes for consistency. + Watch(*WatchRequest, Watch_WatchServer) error +} + +func RegisterWatchServer(s *grpc.Server, srv WatchServer) { + s.RegisterService(&_Watch_serviceDesc, srv) +} + +func _Watch_Watch_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(WatchRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(WatchServer).Watch(m, &watchWatchServer{stream}) +} + +type Watch_WatchServer interface { + Send(*WatchMessage) error + grpc.ServerStream +} + +type watchWatchServer struct { + grpc.ServerStream +} + +func (x *watchWatchServer) Send(m *WatchMessage) error { + return x.ServerStream.SendMsg(m) +} + +var _Watch_serviceDesc = grpc.ServiceDesc{ + ServiceName: "docker.swarmkit.v1.Watch", + HandlerType: (*WatchServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Watch", + Handler: _Watch_Watch_Handler, + ServerStreams: true, + }, + }, + Metadata: "github.com/docker/swarmkit/api/watch.proto", +} + +func (m *Object) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Object) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Object != nil { + nn1, err := m.Object.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn1 + } + return i, nil +} + +func (m *Object_Node) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Node != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Node.Size())) + n2, err := m.Node.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} +func (m *Object_Service) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Service != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Service.Size())) + n3, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} +func (m *Object_Network) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Network != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Network.Size())) + n4, err := m.Network.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} +func (m *Object_Task) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Task != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Task.Size())) + n5, err := m.Task.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} +func (m *Object_Cluster) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Cluster != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Cluster.Size())) + n6, err := m.Cluster.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} +func (m *Object_Secret) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Secret != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Secret.Size())) + n7, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} +func (m *Object_Resource) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Resource != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Resource.Size())) + n8, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} +func (m *Object_Extension) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Extension != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Extension.Size())) + n9, err := m.Extension.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} +func (m *Object_Config) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Config != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Config.Size())) + n10, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + return i, nil +} +func (m *SelectBySlot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelectBySlot) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ServiceID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + } + if m.Slot != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Slot)) + } + return i, nil +} + +func (m *SelectByCustom) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelectByCustom) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Kind) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if len(m.Index) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.Index))) + i += copy(dAtA[i:], m.Index) + } + if len(m.Value) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *SelectBy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelectBy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.By != nil { + nn11, err := m.By.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn11 + } + return i, nil +} + +func (m *SelectBy_ID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + return i, nil +} +func (m *SelectBy_IDPrefix) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x12 + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.IDPrefix))) + i += copy(dAtA[i:], m.IDPrefix) + return i, nil +} +func (m *SelectBy_Name) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x1a + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + return i, nil +} +func (m *SelectBy_NamePrefix) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x22 + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.NamePrefix))) + i += copy(dAtA[i:], m.NamePrefix) + return i, nil +} +func (m *SelectBy_Custom) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Custom != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Custom.Size())) + n12, err := m.Custom.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} +func (m *SelectBy_CustomPrefix) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.CustomPrefix != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.CustomPrefix.Size())) + n13, err := m.CustomPrefix.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } + return i, nil +} +func (m *SelectBy_ServiceID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x3a + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.ServiceID))) + i += copy(dAtA[i:], m.ServiceID) + return i, nil +} +func (m *SelectBy_NodeID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x42 + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.NodeID))) + i += copy(dAtA[i:], m.NodeID) + return i, nil +} +func (m *SelectBy_Slot) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Slot != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Slot.Size())) + n14, err := m.Slot.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + } + return i, nil +} +func (m *SelectBy_DesiredState) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x50 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.DesiredState)) + return i, nil +} +func (m *SelectBy_Role) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x58 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Role)) + return i, nil +} +func (m *SelectBy_Membership) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x60 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Membership)) + return i, nil +} +func (m *SelectBy_ReferencedNetworkID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x6a + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.ReferencedNetworkID))) + i += copy(dAtA[i:], m.ReferencedNetworkID) + return i, nil +} +func (m *SelectBy_ReferencedSecretID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x72 + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.ReferencedSecretID))) + i += copy(dAtA[i:], m.ReferencedSecretID) + return i, nil +} +func (m *SelectBy_Kind) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x7a + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + return i, nil +} +func (m *SelectBy_ReferencedConfigID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.ReferencedConfigID))) + i += copy(dAtA[i:], m.ReferencedConfigID) + return i, nil +} +func (m *WatchRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Entries) > 0 { + for _, msg := range m.Entries { + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.ResumeFrom != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.ResumeFrom.Size())) + n15, err := m.ResumeFrom.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + } + if m.IncludeOldObject { + dAtA[i] = 0x18 + i++ + if m.IncludeOldObject { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *WatchRequest_WatchEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchRequest_WatchEntry) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Kind) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + } + if m.Action != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Action)) + } + if len(m.Filters) > 0 { + for _, msg := range m.Filters { + dAtA[i] = 0x1a + i++ + i = encodeVarintWatch(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *WatchMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchMessage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Events) > 0 { + for _, msg := range m.Events { + dAtA[i] = 0xa + i++ + i = encodeVarintWatch(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Version != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Version.Size())) + n16, err := m.Version.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } + return i, nil +} + +func (m *WatchMessage_Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchMessage_Event) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Action != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Action)) + } + if m.Object != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.Object.Size())) + n17, err := m.Object.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + } + if m.OldObject != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintWatch(dAtA, i, uint64(m.OldObject.Size())) + n18, err := m.OldObject.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + } + return i, nil +} + +func encodeVarintWatch(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +type raftProxyWatchServer struct { + local WatchServer + connSelector raftselector.ConnProvider + localCtxMods, remoteCtxMods []func(context.Context) (context.Context, error) +} + +func NewRaftProxyWatchServer(local WatchServer, connSelector raftselector.ConnProvider, localCtxMod, remoteCtxMod func(context.Context) (context.Context, error)) WatchServer { + redirectChecker := func(ctx context.Context) (context.Context, error) { + p, ok := peer.FromContext(ctx) + if !ok { + return ctx, status.Errorf(codes.InvalidArgument, "remote addr is not found in context") + } + addr := p.Addr.String() + md, ok := metadata.FromIncomingContext(ctx) + if ok && len(md["redirect"]) != 0 { + return ctx, status.Errorf(codes.ResourceExhausted, "more than one redirect to leader from: %s", md["redirect"]) + } + if !ok { + md = metadata.New(map[string]string{}) + } + md["redirect"] = append(md["redirect"], addr) + return metadata.NewOutgoingContext(ctx, md), nil + } + remoteMods := []func(context.Context) (context.Context, error){redirectChecker} + remoteMods = append(remoteMods, remoteCtxMod) + + var localMods []func(context.Context) (context.Context, error) + if localCtxMod != nil { + localMods = []func(context.Context) (context.Context, error){localCtxMod} + } + + return &raftProxyWatchServer{ + local: local, + connSelector: connSelector, + localCtxMods: localMods, + remoteCtxMods: remoteMods, + } +} +func (p *raftProxyWatchServer) runCtxMods(ctx context.Context, ctxMods []func(context.Context) (context.Context, error)) (context.Context, error) { + var err error + for _, mod := range ctxMods { + ctx, err = mod(ctx) + if err != nil { + return ctx, err + } + } + return ctx, nil +} +func (p *raftProxyWatchServer) pollNewLeaderConn(ctx context.Context) (*grpc.ClientConn, error) { + ticker := rafttime.NewTicker(500 * rafttime.Millisecond) + defer ticker.Stop() + for { + select { + case <-ticker.C: + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + return nil, err + } + + client := NewHealthClient(conn) + + resp, err := client.Check(ctx, &HealthCheckRequest{Service: "Raft"}) + if err != nil || resp.Status != HealthCheckResponse_SERVING { + continue + } + return conn, nil + case <-ctx.Done(): + return nil, ctx.Err() + } + } +} + +type Watch_WatchServerWrapper struct { + Watch_WatchServer + ctx context.Context +} + +func (s Watch_WatchServerWrapper) Context() context.Context { + return s.ctx +} + +func (p *raftProxyWatchServer) Watch(r *WatchRequest, stream Watch_WatchServer) error { + ctx := stream.Context() + conn, err := p.connSelector.LeaderConn(ctx) + if err != nil { + if err == raftselector.ErrIsLeader { + ctx, err = p.runCtxMods(ctx, p.localCtxMods) + if err != nil { + return err + } + streamWrapper := Watch_WatchServerWrapper{ + Watch_WatchServer: stream, + ctx: ctx, + } + return p.local.Watch(r, streamWrapper) + } + return err + } + ctx, err = p.runCtxMods(ctx, p.remoteCtxMods) + if err != nil { + return err + } + clientStream, err := NewWatchClient(conn).Watch(ctx, r) + + if err != nil { + return err + } + + for { + msg, err := clientStream.Recv() + if err == io.EOF { + break + } + if err != nil { + return err + } + if err := stream.Send(msg); err != nil { + return err + } + } + return nil +} + +func (m *Object) Size() (n int) { + var l int + _ = l + if m.Object != nil { + n += m.Object.Size() + } + return n +} + +func (m *Object_Node) Size() (n int) { + var l int + _ = l + if m.Node != nil { + l = m.Node.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Service) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Network) Size() (n int) { + var l int + _ = l + if m.Network != nil { + l = m.Network.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Task) Size() (n int) { + var l int + _ = l + if m.Task != nil { + l = m.Task.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Cluster) Size() (n int) { + var l int + _ = l + if m.Cluster != nil { + l = m.Cluster.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Secret) Size() (n int) { + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Resource) Size() (n int) { + var l int + _ = l + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Extension) Size() (n int) { + var l int + _ = l + if m.Extension != nil { + l = m.Extension.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *Object_Config) Size() (n int) { + var l int + _ = l + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *SelectBySlot) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + if l > 0 { + n += 1 + l + sovWatch(uint64(l)) + } + if m.Slot != 0 { + n += 1 + sovWatch(uint64(m.Slot)) + } + return n +} + +func (m *SelectByCustom) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovWatch(uint64(l)) + } + l = len(m.Index) + if l > 0 { + n += 1 + l + sovWatch(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovWatch(uint64(l)) + } + return n +} + +func (m *SelectBy) Size() (n int) { + var l int + _ = l + if m.By != nil { + n += m.By.Size() + } + return n +} + +func (m *SelectBy_ID) Size() (n int) { + var l int + _ = l + l = len(m.ID) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_IDPrefix) Size() (n int) { + var l int + _ = l + l = len(m.IDPrefix) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_Name) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_NamePrefix) Size() (n int) { + var l int + _ = l + l = len(m.NamePrefix) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_Custom) Size() (n int) { + var l int + _ = l + if m.Custom != nil { + l = m.Custom.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *SelectBy_CustomPrefix) Size() (n int) { + var l int + _ = l + if m.CustomPrefix != nil { + l = m.CustomPrefix.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *SelectBy_ServiceID) Size() (n int) { + var l int + _ = l + l = len(m.ServiceID) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_NodeID) Size() (n int) { + var l int + _ = l + l = len(m.NodeID) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_Slot) Size() (n int) { + var l int + _ = l + if m.Slot != nil { + l = m.Slot.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} +func (m *SelectBy_DesiredState) Size() (n int) { + var l int + _ = l + n += 1 + sovWatch(uint64(m.DesiredState)) + return n +} +func (m *SelectBy_Role) Size() (n int) { + var l int + _ = l + n += 1 + sovWatch(uint64(m.Role)) + return n +} +func (m *SelectBy_Membership) Size() (n int) { + var l int + _ = l + n += 1 + sovWatch(uint64(m.Membership)) + return n +} +func (m *SelectBy_ReferencedNetworkID) Size() (n int) { + var l int + _ = l + l = len(m.ReferencedNetworkID) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_ReferencedSecretID) Size() (n int) { + var l int + _ = l + l = len(m.ReferencedSecretID) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_Kind) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovWatch(uint64(l)) + return n +} +func (m *SelectBy_ReferencedConfigID) Size() (n int) { + var l int + _ = l + l = len(m.ReferencedConfigID) + n += 2 + l + sovWatch(uint64(l)) + return n +} +func (m *WatchRequest) Size() (n int) { + var l int + _ = l + if len(m.Entries) > 0 { + for _, e := range m.Entries { + l = e.Size() + n += 1 + l + sovWatch(uint64(l)) + } + } + if m.ResumeFrom != nil { + l = m.ResumeFrom.Size() + n += 1 + l + sovWatch(uint64(l)) + } + if m.IncludeOldObject { + n += 2 + } + return n +} + +func (m *WatchRequest_WatchEntry) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + if l > 0 { + n += 1 + l + sovWatch(uint64(l)) + } + if m.Action != 0 { + n += 1 + sovWatch(uint64(m.Action)) + } + if len(m.Filters) > 0 { + for _, e := range m.Filters { + l = e.Size() + n += 1 + l + sovWatch(uint64(l)) + } + } + return n +} + +func (m *WatchMessage) Size() (n int) { + var l int + _ = l + if len(m.Events) > 0 { + for _, e := range m.Events { + l = e.Size() + n += 1 + l + sovWatch(uint64(l)) + } + } + if m.Version != nil { + l = m.Version.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} + +func (m *WatchMessage_Event) Size() (n int) { + var l int + _ = l + if m.Action != 0 { + n += 1 + sovWatch(uint64(m.Action)) + } + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovWatch(uint64(l)) + } + if m.OldObject != nil { + l = m.OldObject.Size() + n += 1 + l + sovWatch(uint64(l)) + } + return n +} + +func sovWatch(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozWatch(x uint64) (n int) { + return sovWatch(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Object) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object{`, + `Object:` + fmt.Sprintf("%v", this.Object) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Node) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Node{`, + `Node:` + strings.Replace(fmt.Sprintf("%v", this.Node), "Node", "Node", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Service) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Service{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "Service", "Service", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Network) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Network{`, + `Network:` + strings.Replace(fmt.Sprintf("%v", this.Network), "Network", "Network", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Task) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Task{`, + `Task:` + strings.Replace(fmt.Sprintf("%v", this.Task), "Task", "Task", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Cluster) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Cluster{`, + `Cluster:` + strings.Replace(fmt.Sprintf("%v", this.Cluster), "Cluster", "Cluster", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Secret) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Secret{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "Secret", "Secret", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Resource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Resource{`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "Resource", "Resource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Extension) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Extension{`, + `Extension:` + strings.Replace(fmt.Sprintf("%v", this.Extension), "Extension", "Extension", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Object_Config) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Object_Config{`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "Config", "Config", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBySlot) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBySlot{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `Slot:` + fmt.Sprintf("%v", this.Slot) + `,`, + `}`, + }, "") + return s +} +func (this *SelectByCustom) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectByCustom{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Index:` + fmt.Sprintf("%v", this.Index) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy{`, + `By:` + fmt.Sprintf("%v", this.By) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_ID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_ID{`, + `ID:` + fmt.Sprintf("%v", this.ID) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_IDPrefix) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_IDPrefix{`, + `IDPrefix:` + fmt.Sprintf("%v", this.IDPrefix) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_Name) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_Name{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_NamePrefix) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_NamePrefix{`, + `NamePrefix:` + fmt.Sprintf("%v", this.NamePrefix) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_Custom) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_Custom{`, + `Custom:` + strings.Replace(fmt.Sprintf("%v", this.Custom), "SelectByCustom", "SelectByCustom", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_CustomPrefix) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_CustomPrefix{`, + `CustomPrefix:` + strings.Replace(fmt.Sprintf("%v", this.CustomPrefix), "SelectByCustom", "SelectByCustom", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_ServiceID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_ServiceID{`, + `ServiceID:` + fmt.Sprintf("%v", this.ServiceID) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_NodeID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_NodeID{`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_Slot) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_Slot{`, + `Slot:` + strings.Replace(fmt.Sprintf("%v", this.Slot), "SelectBySlot", "SelectBySlot", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_DesiredState) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_DesiredState{`, + `DesiredState:` + fmt.Sprintf("%v", this.DesiredState) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_Role) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_Role{`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_Membership) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_Membership{`, + `Membership:` + fmt.Sprintf("%v", this.Membership) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_ReferencedNetworkID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_ReferencedNetworkID{`, + `ReferencedNetworkID:` + fmt.Sprintf("%v", this.ReferencedNetworkID) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_ReferencedSecretID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_ReferencedSecretID{`, + `ReferencedSecretID:` + fmt.Sprintf("%v", this.ReferencedSecretID) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_Kind) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_Kind{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *SelectBy_ReferencedConfigID) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelectBy_ReferencedConfigID{`, + `ReferencedConfigID:` + fmt.Sprintf("%v", this.ReferencedConfigID) + `,`, + `}`, + }, "") + return s +} +func (this *WatchRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchRequest{`, + `Entries:` + strings.Replace(fmt.Sprintf("%v", this.Entries), "WatchRequest_WatchEntry", "WatchRequest_WatchEntry", 1) + `,`, + `ResumeFrom:` + strings.Replace(fmt.Sprintf("%v", this.ResumeFrom), "Version", "Version", 1) + `,`, + `IncludeOldObject:` + fmt.Sprintf("%v", this.IncludeOldObject) + `,`, + `}`, + }, "") + return s +} +func (this *WatchRequest_WatchEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchRequest_WatchEntry{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Filters:` + strings.Replace(fmt.Sprintf("%v", this.Filters), "SelectBy", "SelectBy", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WatchMessage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchMessage{`, + `Events:` + strings.Replace(fmt.Sprintf("%v", this.Events), "WatchMessage_Event", "WatchMessage_Event", 1) + `,`, + `Version:` + strings.Replace(fmt.Sprintf("%v", this.Version), "Version", "Version", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WatchMessage_Event) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchMessage_Event{`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "Object", "Object", 1) + `,`, + `OldObject:` + strings.Replace(fmt.Sprintf("%v", this.OldObject), "Object", "Object", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringWatch(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Object) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Object: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Object: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Node", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Node{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Node{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Service{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Service{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Network{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Network{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Task", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Task{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Task{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cluster", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Cluster{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Cluster{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Secret{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Secret{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Resource{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Resource{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extension", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Extension{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Extension{v} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Config{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Object = &Object_Config{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelectBySlot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelectBySlot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelectBySlot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Slot", wireType) + } + m.Slot = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Slot |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelectByCustom) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelectByCustom: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelectByCustom: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Index = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelectBy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelectBy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelectBy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_ID{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_IDPrefix{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_Name{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_NamePrefix{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Custom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SelectByCustom{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.By = &SelectBy_Custom{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CustomPrefix", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SelectByCustom{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.By = &SelectBy_CustomPrefix{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_ServiceID{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_NodeID{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Slot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SelectBySlot{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.By = &SelectBy_Slot{v} + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredState", wireType) + } + var v TaskState + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (TaskState(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.By = &SelectBy_DesiredState{v} + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var v NodeRole + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NodeRole(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.By = &SelectBy_Role{v} + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Membership", wireType) + } + var v NodeSpec_Membership + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NodeSpec_Membership(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.By = &SelectBy_Membership{v} + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReferencedNetworkID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_ReferencedNetworkID{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReferencedSecretID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_ReferencedSecretID{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_Kind{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReferencedConfigID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.By = &SelectBy_ReferencedConfigID{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Entries = append(m.Entries, &WatchRequest_WatchEntry{}) + if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResumeFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResumeFrom == nil { + m.ResumeFrom = &Version{} + } + if err := m.ResumeFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeOldObject", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeOldObject = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchRequest_WatchEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= (WatchActionKind(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Filters = append(m.Filters, &SelectBy{}) + if err := m.Filters[len(m.Filters)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Events = append(m.Events, &WatchMessage_Event{}) + if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Version == nil { + m.Version = &Version{} + } + if err := m.Version.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchMessage_Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + m.Action = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Action |= (WatchActionKind(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &Object{} + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OldObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWatch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWatch + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.OldObject == nil { + m.OldObject = &Object{} + } + if err := m.OldObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWatch(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWatch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWatch(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWatch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWatch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWatch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthWatch + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWatch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipWatch(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthWatch = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWatch = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("github.com/docker/swarmkit/api/watch.proto", fileDescriptorWatch) } + +var fileDescriptorWatch = []byte{ + // 1186 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x96, 0xbd, 0x73, 0x1b, 0xc5, + 0x1b, 0xc7, 0x75, 0x8a, 0x7c, 0x92, 0x1e, 0xdb, 0x89, 0x67, 0xe3, 0x24, 0xf7, 0xd3, 0x2f, 0xc8, + 0x42, 0x0c, 0x90, 0x49, 0x82, 0x0c, 0x26, 0x24, 0x03, 0x04, 0x66, 0x2c, 0x59, 0x8c, 0x44, 0xc6, + 0x2f, 0xb3, 0xb6, 0x93, 0x52, 0x73, 0xbe, 0x7b, 0xac, 0x1c, 0xba, 0xbb, 0x15, 0x7b, 0x27, 0x39, + 0xee, 0x28, 0x28, 0x98, 0xf4, 0xcc, 0xd0, 0xa4, 0x82, 0x9a, 0x86, 0x0e, 0xfe, 0x81, 0x0c, 0x15, + 0x25, 0x34, 0x1a, 0xa2, 0x92, 0x82, 0xbf, 0x80, 0x82, 0xd9, 0x97, 0xf3, 0x8b, 0x72, 0xb2, 0x49, + 0xa5, 0xbd, 0xbd, 0xcf, 0xf7, 0xd9, 0x67, 0x9f, 0xb7, 0x13, 0xdc, 0xec, 0x7a, 0xf1, 0xe3, 0xc1, + 0x5e, 0xcd, 0x61, 0xc1, 0xb2, 0xcb, 0x9c, 0x1e, 0xf2, 0xe5, 0xe8, 0xc0, 0xe6, 0x41, 0xcf, 0x8b, + 0x97, 0xed, 0xbe, 0xb7, 0x7c, 0x60, 0xc7, 0xce, 0xe3, 0x5a, 0x9f, 0xb3, 0x98, 0x11, 0xa2, 0x80, + 0x5a, 0x02, 0xd4, 0x86, 0xef, 0x95, 0xce, 0xd3, 0x47, 0x7d, 0x74, 0x22, 0xa5, 0x2f, 0xdd, 0x3e, + 0x87, 0x65, 0x7b, 0x5f, 0xa0, 0x13, 0x27, 0xf4, 0x79, 0x96, 0xe3, 0xc3, 0x3e, 0x26, 0xec, 0x62, + 0x97, 0x75, 0x99, 0x5c, 0x2e, 0x8b, 0x95, 0xde, 0xbd, 0x77, 0x86, 0x05, 0x49, 0xec, 0x0d, 0xf6, + 0x97, 0xfb, 0xfe, 0xa0, 0xeb, 0x85, 0xfa, 0x47, 0x09, 0xab, 0x5f, 0xe7, 0xc0, 0xdc, 0x94, 0xce, + 0x90, 0x1a, 0xe4, 0x42, 0xe6, 0xa2, 0x65, 0x54, 0x8c, 0x1b, 0xb3, 0x2b, 0x56, 0xed, 0xe5, 0x10, + 0xd4, 0x36, 0x98, 0x8b, 0xad, 0x0c, 0x95, 0x1c, 0xb9, 0x07, 0xf9, 0x08, 0xf9, 0xd0, 0x73, 0xd0, + 0xca, 0x4a, 0xc9, 0xff, 0xd3, 0x24, 0xdb, 0x0a, 0x69, 0x65, 0x68, 0x42, 0x0b, 0x61, 0x88, 0xf1, + 0x01, 0xe3, 0x3d, 0xeb, 0xc2, 0x74, 0xe1, 0x86, 0x42, 0x84, 0x50, 0xd3, 0xc2, 0xc3, 0xd8, 0x8e, + 0x7a, 0x56, 0x6e, 0xba, 0x87, 0x3b, 0x76, 0x24, 0x24, 0x92, 0x13, 0x07, 0x39, 0xfe, 0x20, 0x8a, + 0x91, 0x5b, 0x33, 0xd3, 0x0f, 0x6a, 0x28, 0x44, 0x1c, 0xa4, 0x69, 0x72, 0x07, 0xcc, 0x08, 0x1d, + 0x8e, 0xb1, 0x65, 0x4a, 0x5d, 0x29, 0xfd, 0x66, 0x82, 0x68, 0x65, 0xa8, 0x66, 0xc9, 0x47, 0x50, + 0xe0, 0x18, 0xb1, 0x01, 0x77, 0xd0, 0xca, 0x4b, 0xdd, 0xf5, 0x34, 0x1d, 0xd5, 0x4c, 0x2b, 0x43, + 0x8f, 0x78, 0xf2, 0x09, 0x14, 0xf1, 0x49, 0x8c, 0x61, 0xe4, 0xb1, 0xd0, 0x2a, 0x48, 0xf1, 0x6b, + 0x69, 0xe2, 0x66, 0x02, 0xb5, 0x32, 0xf4, 0x58, 0x21, 0x1c, 0x76, 0x58, 0xb8, 0xef, 0x75, 0xad, + 0xe2, 0x74, 0x87, 0x1b, 0x92, 0x10, 0x0e, 0x2b, 0xb6, 0x5e, 0x48, 0x72, 0x5f, 0xdd, 0x82, 0xb9, + 0x6d, 0xf4, 0xd1, 0x89, 0xeb, 0x87, 0xdb, 0x3e, 0x8b, 0xc9, 0x6d, 0x00, 0x9d, 0xad, 0x8e, 0xe7, + 0xca, 0x8a, 0x28, 0xd6, 0xe7, 0xc7, 0xa3, 0xa5, 0xa2, 0x4e, 0x67, 0x7b, 0x8d, 0x16, 0x35, 0xd0, + 0x76, 0x09, 0x81, 0x5c, 0xe4, 0xb3, 0x58, 0x96, 0x41, 0x8e, 0xca, 0x75, 0x75, 0x0b, 0x2e, 0x26, + 0x16, 0x1b, 0x83, 0x28, 0x66, 0x81, 0xa0, 0x7a, 0x5e, 0xa8, 0xad, 0x51, 0xb9, 0x26, 0x8b, 0x30, + 0xe3, 0x85, 0x2e, 0x3e, 0x91, 0xd2, 0x22, 0x55, 0x0f, 0x62, 0x77, 0x68, 0xfb, 0x03, 0x94, 0xe5, + 0x51, 0xa4, 0xea, 0xa1, 0xfa, 0x97, 0x09, 0x85, 0xc4, 0x24, 0xb1, 0x20, 0x7b, 0xe4, 0x98, 0x39, + 0x1e, 0x2d, 0x65, 0xdb, 0x6b, 0xad, 0x0c, 0xcd, 0x7a, 0x2e, 0xb9, 0x05, 0x45, 0xcf, 0xed, 0xf4, + 0x39, 0xee, 0x7b, 0xda, 0x6c, 0x7d, 0x6e, 0x3c, 0x5a, 0x2a, 0xb4, 0xd7, 0xb6, 0xe4, 0x9e, 0x08, + 0xbb, 0xe7, 0xaa, 0x35, 0x59, 0x84, 0x5c, 0x68, 0x07, 0xfa, 0x20, 0x59, 0xd9, 0x76, 0x80, 0xe4, + 0x75, 0x98, 0x15, 0xbf, 0x89, 0x91, 0x9c, 0x7e, 0x09, 0x62, 0x53, 0x0b, 0xef, 0x83, 0xe9, 0xc8, + 0x6b, 0xe9, 0xca, 0xaa, 0xa6, 0x57, 0xc8, 0xc9, 0x00, 0xc8, 0xc0, 0xab, 0x50, 0xb4, 0x61, 0x5e, + 0xad, 0x92, 0x23, 0xcc, 0x57, 0x30, 0x32, 0xa7, 0xa4, 0xda, 0x91, 0xda, 0xa9, 0x4c, 0xe5, 0x53, + 0x32, 0x25, 0x2a, 0xe5, 0x38, 0x57, 0x6f, 0x42, 0x5e, 0x74, 0xaf, 0x80, 0x0b, 0x12, 0x86, 0xf1, + 0x68, 0xc9, 0x14, 0x8d, 0x2d, 0x49, 0x53, 0xbc, 0x6c, 0xbb, 0xe4, 0xae, 0x4e, 0xa9, 0x2a, 0xa7, + 0xca, 0x59, 0x8e, 0x89, 0x82, 0x11, 0xa1, 0x13, 0x3c, 0x59, 0x83, 0x79, 0x17, 0x23, 0x8f, 0xa3, + 0xdb, 0x89, 0x62, 0x3b, 0x46, 0x0b, 0x2a, 0xc6, 0x8d, 0x8b, 0xe9, 0xb5, 0x2c, 0x7a, 0x75, 0x5b, + 0x40, 0xe2, 0x52, 0x5a, 0x25, 0x9f, 0xc9, 0x0a, 0xe4, 0x38, 0xf3, 0xd1, 0x9a, 0x95, 0xe2, 0xeb, + 0xd3, 0x46, 0x11, 0x65, 0xbe, 0x1c, 0x47, 0x82, 0x25, 0x6d, 0x80, 0x00, 0x83, 0x3d, 0xe4, 0xd1, + 0x63, 0xaf, 0x6f, 0xcd, 0x49, 0xe5, 0xdb, 0xd3, 0x94, 0xdb, 0x7d, 0x74, 0x6a, 0xeb, 0x47, 0xb8, + 0x48, 0xee, 0xb1, 0x98, 0xac, 0xc3, 0x15, 0x8e, 0xfb, 0xc8, 0x31, 0x74, 0xd0, 0xed, 0xe8, 0xe9, + 0x23, 0x22, 0x36, 0x2f, 0x23, 0x76, 0x6d, 0x3c, 0x5a, 0xba, 0x4c, 0x8f, 0x00, 0x3d, 0xa8, 0x64, + 0xf8, 0x2e, 0xf3, 0x97, 0xb6, 0x5d, 0xf2, 0x39, 0x2c, 0x9e, 0x30, 0xa7, 0x86, 0x85, 0xb0, 0x76, + 0x51, 0x5a, 0xbb, 0x3a, 0x1e, 0x2d, 0x91, 0x63, 0x6b, 0x6a, 0xaa, 0x48, 0x63, 0x84, 0x4f, 0xee, + 0x8a, 0x86, 0x51, 0x4d, 0x74, 0x29, 0x29, 0x58, 0xd9, 0x46, 0xa7, 0x4f, 0x50, 0xdd, 0x2d, 0x4e, + 0x58, 0x48, 0x3b, 0x41, 0x8d, 0x81, 0xc9, 0x13, 0xf4, 0xae, 0x5b, 0xcf, 0x41, 0xb6, 0x7e, 0x58, + 0xfd, 0x23, 0x0b, 0x73, 0x8f, 0xc4, 0x07, 0x91, 0xe2, 0x97, 0x03, 0x8c, 0x62, 0xd2, 0x84, 0x3c, + 0x86, 0x31, 0xf7, 0x30, 0xb2, 0x8c, 0xca, 0x85, 0x1b, 0xb3, 0x2b, 0xb7, 0xd2, 0x62, 0x7b, 0x52, + 0xa2, 0x1e, 0x9a, 0x61, 0xcc, 0x0f, 0x69, 0xa2, 0x25, 0xf7, 0x61, 0x96, 0x63, 0x34, 0x08, 0xb0, + 0xb3, 0xcf, 0x59, 0x70, 0xd6, 0x87, 0xe3, 0x21, 0x72, 0x31, 0xda, 0x28, 0x28, 0xfe, 0x33, 0xce, + 0x02, 0x72, 0x1b, 0x88, 0x17, 0x3a, 0xfe, 0xc0, 0xc5, 0x0e, 0xf3, 0xdd, 0x8e, 0xfa, 0x8a, 0xca, + 0xe6, 0x2d, 0xd0, 0x05, 0xfd, 0x66, 0xd3, 0x77, 0xd5, 0x50, 0x2b, 0x7d, 0x6b, 0x00, 0x1c, 0xfb, + 0x90, 0x3a, 0x7f, 0x3e, 0x06, 0xd3, 0x76, 0x62, 0x31, 0x73, 0xb3, 0xb2, 0x60, 0xde, 0x98, 0x7a, + 0xa9, 0x55, 0x89, 0x3d, 0xf0, 0x42, 0x97, 0x6a, 0x09, 0xb9, 0x0b, 0xf9, 0x7d, 0xcf, 0x8f, 0x91, + 0x47, 0xd6, 0x05, 0x19, 0x92, 0xeb, 0x67, 0xb5, 0x09, 0x4d, 0xe0, 0xea, 0x2f, 0x49, 0x6c, 0xd7, + 0x31, 0x8a, 0xec, 0x2e, 0x92, 0x4f, 0xc1, 0xc4, 0x21, 0x86, 0x71, 0x12, 0xda, 0xb7, 0xa6, 0x7a, + 0xa1, 0x15, 0xb5, 0xa6, 0xc0, 0xa9, 0x56, 0x91, 0x0f, 0x20, 0x3f, 0x54, 0xd1, 0xfa, 0x2f, 0x01, + 0x4d, 0xd8, 0xd2, 0x4f, 0x06, 0xcc, 0x48, 0x43, 0x27, 0xc2, 0x60, 0xbc, 0x7a, 0x18, 0x56, 0xc0, + 0xd4, 0x89, 0xc8, 0x4e, 0xff, 0xf6, 0xa8, 0x94, 0x50, 0x4d, 0x92, 0x0f, 0x01, 0x26, 0x12, 0x78, + 0xb6, 0xae, 0xc8, 0x92, 0xac, 0xde, 0xfc, 0xc7, 0x80, 0x4b, 0x13, 0xae, 0x90, 0x3b, 0xb0, 0xf8, + 0x68, 0x75, 0xa7, 0xd1, 0xea, 0xac, 0x36, 0x76, 0xda, 0x9b, 0x1b, 0x9d, 0xdd, 0x8d, 0x07, 0x1b, + 0x9b, 0x8f, 0x36, 0x16, 0x32, 0xa5, 0xd2, 0xd3, 0x67, 0x95, 0xab, 0x13, 0xf8, 0x6e, 0xd8, 0x0b, + 0xd9, 0x81, 0x70, 0xfc, 0xf2, 0x29, 0x55, 0x83, 0x36, 0x57, 0x77, 0x9a, 0x0b, 0x46, 0xe9, 0x7f, + 0x4f, 0x9f, 0x55, 0xae, 0x4c, 0x88, 0x1a, 0x1c, 0xd5, 0x64, 0x3a, 0xad, 0xd9, 0xdd, 0x5a, 0x13, + 0x9a, 0x6c, 0xaa, 0x66, 0xb7, 0xef, 0xa6, 0x69, 0x68, 0x73, 0x7d, 0xf3, 0x61, 0x73, 0x21, 0x97, + 0xaa, 0xa1, 0x18, 0xb0, 0x21, 0x96, 0xae, 0x7d, 0xf3, 0x7d, 0x39, 0xf3, 0xf3, 0x0f, 0xe5, 0xc9, + 0xab, 0xae, 0x04, 0x30, 0x23, 0xb7, 0x88, 0x9b, 0x2c, 0x2a, 0xe7, 0x35, 0x62, 0xa9, 0x72, 0x5e, + 0x3d, 0x55, 0xaf, 0xfc, 0xfa, 0xe3, 0xdf, 0xdf, 0x65, 0x2f, 0xc1, 0xbc, 0x24, 0xde, 0x09, 0xec, + 0xd0, 0xee, 0x22, 0x7f, 0xd7, 0xa8, 0x5b, 0xcf, 0x5f, 0x94, 0x33, 0xbf, 0xbf, 0x28, 0x67, 0xbe, + 0x1a, 0x97, 0x8d, 0xe7, 0xe3, 0xb2, 0xf1, 0xdb, 0xb8, 0x6c, 0xfc, 0x39, 0x2e, 0x1b, 0x7b, 0xa6, + 0xfc, 0x03, 0xf9, 0xfe, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xe6, 0x76, 0x89, 0xef, 0x57, 0x0b, + 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/watch.proto b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/watch.proto new file mode 100644 index 00000000..d017730b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/api/watch.proto @@ -0,0 +1,154 @@ +syntax = "proto3"; + +package docker.swarmkit.v1; + +import "github.com/docker/swarmkit/api/specs.proto"; +import "github.com/docker/swarmkit/api/objects.proto"; +import "github.com/docker/swarmkit/api/types.proto"; +import "gogoproto/gogo.proto"; +import "github.com/docker/swarmkit/protobuf/plugin/plugin.proto"; + +message Object { + oneof Object { + Node node = 1; + Service service = 2; + Network network = 3; + Task task = 4; + Cluster cluster = 5; + Secret secret = 6; + Resource resource = 7; + Extension extension = 8; + Config config = 9; + } +} + +// FIXME(aaronl): These messages should ideally be embedded in SelectBy, but +// protoc generates bad code for that. +message SelectBySlot { + string service_id = 1 [(gogoproto.customname) = "ServiceID"]; + uint64 slot = 2; +} + +message SelectByCustom { + string kind = 1; + string index = 2; + string value = 3; +} + +message SelectBy { + // TODO(aaronl): Are all of these things we want to expose in + // the API? Exposing them may commit us to maintaining those + // internal indices going forward. + oneof By { + // supported by all object types + string id = 1 [(gogoproto.customname) = "ID"]; // not applicable for FindObjects - use GetObject instead + string id_prefix = 2 [(gogoproto.customname) = "IDPrefix"]; + string name = 3; + string name_prefix = 4; + SelectByCustom custom = 5; + SelectByCustom custom_prefix = 6; + + // supported by tasks only + string service_id = 7 [(gogoproto.customname) = "ServiceID"]; + string node_id = 8 [(gogoproto.customname) = "NodeID"]; + SelectBySlot slot = 9; + TaskState desired_state = 10; + + // supported by nodes only + NodeRole role = 11; + NodeSpec.Membership membership = 12; + + // supported by: service, task + string referenced_network_id = 13 [(gogoproto.customname) = "ReferencedNetworkID"]; + string referenced_secret_id = 14 [(gogoproto.customname) = "ReferencedSecretID"]; + string referenced_config_id = 16 [(gogoproto.customname) = "ReferencedConfigID"]; + + // supported by: resource + string kind = 15; + } +} + + +// Watch defines the RPC methods for monitoring data store change. +service Watch { + // Watch starts a stream that returns any changes to objects that match + // the specified selectors. When the stream begins, it immediately sends + // an empty message back to the client. It is important to wait for + // this message before taking any actions that depend on an established + // stream of changes for consistency. + rpc Watch(WatchRequest) returns (stream WatchMessage) { + option (docker.protobuf.plugin.tls_authorization) = { roles: "swarm-manager" }; + }; +} + +message WatchRequest { + message WatchEntry { + // Kind can contain a builtin type such as "node", "secret", etc. or + // the kind specified by a custom-defined object. + string kind = 1; + + // Action (create/update/delete) + // This is a bitmask, so multiple actions may be OR'd together + WatchActionKind action = 2; + + // Filters are combined using AND logic - an event must match + // all of them to pass the filter. + repeated SelectBy filters = 3; + } + + // Multiple entries are combined using OR logic - i.e. if an event + // matches all of the selectors specified in any single watch entry, + // the event will be sent to the client. + repeated WatchEntry entries = 1; + + // ResumeFrom provides an version to resume the watch from, if non-nil. + // The watch will return changes since this version, and continue to + // return new changes afterwards. Watch will return an error if the + // server has compacted its log and no longer has complete history to + // this point. + Version resume_from = 2; + + // IncludeOldObject causes WatchMessages to include a copy of the + // previous version of the object on updates. Note that only live + // changes will include the old object (not historical changes + // retrieved using ResumeFrom). + bool include_old_object = 3; +} + +// WatchMessage is the type of the stream that's returned to the client by +// Watch. Note that the first item of this stream will always be a WatchMessage +// with a nil Object, to signal that the stream has started. +message WatchMessage { + message Event { + // Action (create/update/delete) + // Note that WatchMessage does not expose "commit" events that + // mark transaction boundaries. + WatchActionKind action = 1; + + // Matched object + Object object = 2; + + // For updates, OldObject will optionally be included in the + // watch message, containing the previous version of the + // object, if IncludeOldObject was set in WatchRequest. + Object old_object = 3; + } + + repeated Event events = 1; + + // Index versions this change to the data store. It can be used to + // resume the watch from this point. + Version version = 2; +} + +// WatchActionKind distinguishes between creations, updates, and removals. It +// is structured as a bitmap so multiple kinds of events can be requested with +// a mask. +enum WatchActionKind { + option (gogoproto.goproto_enum_prefix) = false; + option (gogoproto.enum_customname) = "WatchActionKind"; + WATCH_ACTION_UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "WatchActionKindUnknown"]; // default value, invalid + WATCH_ACTION_CREATE = 1 [(gogoproto.enumvalue_customname) = "WatchActionKindCreate"]; + WATCH_ACTION_UPDATE = 2 [(gogoproto.enumvalue_customname) = "WatchActionKindUpdate"]; + WATCH_ACTION_REMOVE = 4 [(gogoproto.enumvalue_customname) = "WatchActionKindRemove"]; +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/auth.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/auth.go new file mode 100644 index 00000000..e0ff898c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/auth.go @@ -0,0 +1,247 @@ +package ca + +import ( + "context" + "crypto/tls" + "crypto/x509/pkix" + "strings" + + "github.com/sirupsen/logrus" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/log" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" +) + +type localRequestKeyType struct{} + +// LocalRequestKey is a context key to mark a request that originating on the +// local node. The associated value is a RemoteNodeInfo structure describing the +// local node. +var LocalRequestKey = localRequestKeyType{} + +// LogTLSState logs information about the TLS connection and remote peers +func LogTLSState(ctx context.Context, tlsState *tls.ConnectionState) { + if tlsState == nil { + log.G(ctx).Debugf("no TLS Chains found") + return + } + + peerCerts := []string{} + verifiedChain := []string{} + for _, cert := range tlsState.PeerCertificates { + peerCerts = append(peerCerts, cert.Subject.CommonName) + } + for _, chain := range tlsState.VerifiedChains { + subjects := []string{} + for _, cert := range chain { + subjects = append(subjects, cert.Subject.CommonName) + } + verifiedChain = append(verifiedChain, strings.Join(subjects, ",")) + } + + log.G(ctx).WithFields(logrus.Fields{ + "peer.peerCert": peerCerts, + // "peer.verifiedChain": verifiedChain}, + }).Debugf("") +} + +// getCertificateSubject extracts the subject from a verified client certificate +func getCertificateSubject(tlsState *tls.ConnectionState) (pkix.Name, error) { + if tlsState == nil { + return pkix.Name{}, status.Errorf(codes.PermissionDenied, "request is not using TLS") + } + if len(tlsState.PeerCertificates) == 0 { + return pkix.Name{}, status.Errorf(codes.PermissionDenied, "no client certificates in request") + } + if len(tlsState.VerifiedChains) == 0 { + return pkix.Name{}, status.Errorf(codes.PermissionDenied, "no verified chains for remote certificate") + } + + return tlsState.VerifiedChains[0][0].Subject, nil +} + +func tlsConnStateFromContext(ctx context.Context) (*tls.ConnectionState, error) { + peer, ok := peer.FromContext(ctx) + if !ok { + return nil, status.Errorf(codes.PermissionDenied, "Permission denied: no peer info") + } + tlsInfo, ok := peer.AuthInfo.(credentials.TLSInfo) + if !ok { + return nil, status.Errorf(codes.PermissionDenied, "Permission denied: peer didn't not present valid peer certificate") + } + return &tlsInfo.State, nil +} + +// certSubjectFromContext extracts pkix.Name from context. +func certSubjectFromContext(ctx context.Context) (pkix.Name, error) { + connState, err := tlsConnStateFromContext(ctx) + if err != nil { + return pkix.Name{}, err + } + return getCertificateSubject(connState) +} + +// AuthorizeOrgAndRole takes in a context and a list of roles, and returns +// the Node ID of the node. +func AuthorizeOrgAndRole(ctx context.Context, org string, blacklistedCerts map[string]*api.BlacklistedCertificate, ou ...string) (string, error) { + certSubj, err := certSubjectFromContext(ctx) + if err != nil { + return "", err + } + // Check if the current certificate has an OU that authorizes + // access to this method + if intersectArrays(certSubj.OrganizationalUnit, ou) { + return authorizeOrg(certSubj, org, blacklistedCerts) + } + + return "", status.Errorf(codes.PermissionDenied, "Permission denied: remote certificate not part of OUs: %v", ou) +} + +// authorizeOrg takes in a certificate subject and an organization, and returns +// the Node ID of the node. +func authorizeOrg(certSubj pkix.Name, org string, blacklistedCerts map[string]*api.BlacklistedCertificate) (string, error) { + if _, ok := blacklistedCerts[certSubj.CommonName]; ok { + return "", status.Errorf(codes.PermissionDenied, "Permission denied: node %s was removed from swarm", certSubj.CommonName) + } + + if len(certSubj.Organization) > 0 && certSubj.Organization[0] == org { + return certSubj.CommonName, nil + } + + return "", status.Errorf(codes.PermissionDenied, "Permission denied: remote certificate not part of organization: %s", org) +} + +// AuthorizeForwardedRoleAndOrg checks for proper roles and organization of caller. The RPC may have +// been proxied by a manager, in which case the manager is authenticated and +// so is the certificate information that it forwarded. It returns the node ID +// of the original client. +func AuthorizeForwardedRoleAndOrg(ctx context.Context, authorizedRoles, forwarderRoles []string, org string, blacklistedCerts map[string]*api.BlacklistedCertificate) (string, error) { + if isForwardedRequest(ctx) { + _, err := AuthorizeOrgAndRole(ctx, org, blacklistedCerts, forwarderRoles...) + if err != nil { + return "", status.Errorf(codes.PermissionDenied, "Permission denied: unauthorized forwarder role: %v", err) + } + + // This was a forwarded request. Authorize the forwarder, and + // check if the forwarded role matches one of the authorized + // roles. + _, forwardedID, forwardedOrg, forwardedOUs := forwardedTLSInfoFromContext(ctx) + + if len(forwardedOUs) == 0 || forwardedID == "" || forwardedOrg == "" { + return "", status.Errorf(codes.PermissionDenied, "Permission denied: missing information in forwarded request") + } + + if !intersectArrays(forwardedOUs, authorizedRoles) { + return "", status.Errorf(codes.PermissionDenied, "Permission denied: unauthorized forwarded role, expecting: %v", authorizedRoles) + } + + if forwardedOrg != org { + return "", status.Errorf(codes.PermissionDenied, "Permission denied: organization mismatch, expecting: %s", org) + } + + return forwardedID, nil + } + + // There wasn't any node being forwarded, check if this is a direct call by the expected role + nodeID, err := AuthorizeOrgAndRole(ctx, org, blacklistedCerts, authorizedRoles...) + if err == nil { + return nodeID, nil + } + + return "", status.Errorf(codes.PermissionDenied, "Permission denied: unauthorized peer role: %v", err) +} + +// intersectArrays returns true when there is at least one element in common +// between the two arrays +func intersectArrays(orig, tgt []string) bool { + for _, i := range orig { + for _, x := range tgt { + if i == x { + return true + } + } + } + return false +} + +// RemoteNodeInfo describes a node sending an RPC request. +type RemoteNodeInfo struct { + // Roles is a list of roles contained in the node's certificate + // (or forwarded by a trusted node). + Roles []string + + // Organization is the organization contained in the node's certificate + // (or forwarded by a trusted node). + Organization string + + // NodeID is the node's ID, from the CN field in its certificate + // (or forwarded by a trusted node). + NodeID string + + // ForwardedBy contains information for the node that forwarded this + // request. It is set to nil if the request was received directly. + ForwardedBy *RemoteNodeInfo + + // RemoteAddr is the address that this node is connecting to the cluster + // from. + RemoteAddr string +} + +// RemoteNode returns the node ID and role from the client's TLS certificate. +// If the RPC was forwarded, the original client's ID and role is returned, as +// well as the forwarder's ID. This function does not do authorization checks - +// it only looks up the node ID. +func RemoteNode(ctx context.Context) (RemoteNodeInfo, error) { + // If we have a value on the context that marks this as a local + // request, we return the node info from the context. + localNodeInfo := ctx.Value(LocalRequestKey) + + if localNodeInfo != nil { + nodeInfo, ok := localNodeInfo.(RemoteNodeInfo) + if ok { + return nodeInfo, nil + } + } + + certSubj, err := certSubjectFromContext(ctx) + if err != nil { + return RemoteNodeInfo{}, err + } + + org := "" + if len(certSubj.Organization) > 0 { + org = certSubj.Organization[0] + } + + peer, ok := peer.FromContext(ctx) + if !ok { + return RemoteNodeInfo{}, status.Errorf(codes.PermissionDenied, "Permission denied: no peer info") + } + + directInfo := RemoteNodeInfo{ + Roles: certSubj.OrganizationalUnit, + NodeID: certSubj.CommonName, + Organization: org, + RemoteAddr: peer.Addr.String(), + } + + if isForwardedRequest(ctx) { + remoteAddr, cn, org, ous := forwardedTLSInfoFromContext(ctx) + if len(ous) == 0 || cn == "" || org == "" { + return RemoteNodeInfo{}, status.Errorf(codes.PermissionDenied, "Permission denied: missing information in forwarded request") + } + return RemoteNodeInfo{ + Roles: ous, + NodeID: cn, + Organization: org, + ForwardedBy: &directInfo, + RemoteAddr: remoteAddr, + }, nil + } + + return directInfo, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/certificates.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/certificates.go new file mode 100644 index 00000000..dd0297ab --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/certificates.go @@ -0,0 +1,954 @@ +package ca + +import ( + "bytes" + "context" + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + cryptorand "crypto/rand" + "crypto/rsa" + "crypto/tls" + "crypto/x509" + "encoding/asn1" + "encoding/pem" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + cfcsr "github.com/cloudflare/cfssl/csr" + "github.com/cloudflare/cfssl/helpers" + "github.com/cloudflare/cfssl/initca" + cflog "github.com/cloudflare/cfssl/log" + cfsigner "github.com/cloudflare/cfssl/signer" + "github.com/cloudflare/cfssl/signer/local" + "github.com/docker/go-events" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/ca/pkcs8" + "github.com/docker/swarmkit/connectionbroker" + "github.com/docker/swarmkit/ioutils" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/status" +) + +const ( + // Security Strength Equivalence + //----------------------------------- + //| ECC | DH/DSA/RSA | + //| 256 | 3072 | + //| 384 | 7680 | + //----------------------------------- + + // RootKeySize is the default size of the root CA key + // It would be ideal for the root key to use P-384, but in P-384 is not optimized in go yet :( + RootKeySize = 256 + // RootKeyAlgo defines the default algorithm for the root CA Key + RootKeyAlgo = "ecdsa" + // RootCAExpiration represents the default expiration for the root CA in seconds (20 years) + RootCAExpiration = "630720000s" + // DefaultNodeCertExpiration represents the default expiration for node certificates (3 months) + DefaultNodeCertExpiration = 2160 * time.Hour + // CertBackdate represents the amount of time each certificate is backdated to try to avoid + // clock drift issues. + CertBackdate = 1 * time.Hour + // CertLowerRotationRange represents the minimum fraction of time that we will wait when randomly + // choosing our next certificate rotation + CertLowerRotationRange = 0.5 + // CertUpperRotationRange represents the maximum fraction of time that we will wait when randomly + // choosing our next certificate rotation + CertUpperRotationRange = 0.8 + // MinNodeCertExpiration represents the minimum expiration for node certificates + MinNodeCertExpiration = 1 * time.Hour +) + +// BasicConstraintsOID is the ASN1 Object ID indicating a basic constraints extension +var BasicConstraintsOID = asn1.ObjectIdentifier{2, 5, 29, 19} + +// A recoverableErr is a non-fatal error encountered signing a certificate, +// which means that the certificate issuance may be retried at a later time. +type recoverableErr struct { + err error +} + +func (r recoverableErr) Error() string { + return r.err.Error() +} + +// ErrNoLocalRootCA is an error type used to indicate that the local root CA +// certificate file does not exist. +var ErrNoLocalRootCA = errors.New("local root CA certificate does not exist") + +// ErrNoValidSigner is an error type used to indicate that our RootCA doesn't have the ability to +// sign certificates. +var ErrNoValidSigner = recoverableErr{err: errors.New("no valid signer found")} + +func init() { + cflog.Level = 5 +} + +// CertPaths is a helper struct that keeps track of the paths of a +// Cert and corresponding Key +type CertPaths struct { + Cert, Key string +} + +// IssuerInfo contains the subject and public key of the issuer of a certificate +type IssuerInfo struct { + Subject []byte + PublicKey []byte +} + +// LocalSigner is a signer that can sign CSRs +type LocalSigner struct { + cfsigner.Signer + + // Key will only be used by the original manager to put the private + // key-material in raft, no signing operations depend on it. + Key []byte + + // Cert is one PEM encoded Certificate used as the signing CA. It must correspond to the key. + Cert []byte + + // just cached parsed values for validation, etc. + parsedCert *x509.Certificate + cryptoSigner crypto.Signer +} + +type x509UnknownAuthError struct { + error + failedLeafCert *x509.Certificate +} + +// RootCA is the representation of everything we need to sign certificates and/or to verify certificates +// +// RootCA.Cert: [CA cert1][CA cert2] +// RootCA.Intermediates: [intermediate CA1][intermediate CA2][intermediate CA3] +// RootCA.signer.Cert: [signing CA cert] +// RootCA.signer.Key: [signing CA key] +// +// Requirements: +// +// - [signing CA key] must be the private key for [signing CA cert], and either both or none must be provided +// +// - [intermediate CA1] must have the same public key and subject as [signing CA cert], because otherwise when +// appended to a leaf certificate, the intermediates will not form a chain (because [intermediate CA1] won't because +// the signer of the leaf certificate) +// - [intermediate CA1] must be signed by [intermediate CA2], which must be signed by [intermediate CA3] +// +// - When we issue a certificate, the intermediates will be appended so that the certificate looks like: +// [leaf signed by signing CA cert][intermediate CA1][intermediate CA2][intermediate CA3] +// - [leaf signed by signing CA cert][intermediate CA1][intermediate CA2][intermediate CA3] is guaranteed to form a +// valid chain from [leaf signed by signing CA cert] to one of the root certs ([signing CA cert], [CA cert1], [CA cert2]) +// using zero or more of the intermediate certs ([intermediate CA1][intermediate CA2][intermediate CA3]) as intermediates +// +// Example 1: Simple root rotation +// - Initial state: +// - RootCA.Cert: [Root CA1 self-signed] +// - RootCA.Intermediates: [] +// - RootCA.signer.Cert: [Root CA1 self-signed] +// - Issued TLS cert: [leaf signed by Root CA1] +// +// - Intermediate state (during root rotation): +// - RootCA.Cert: [Root CA1 self-signed] +// - RootCA.Intermediates: [Root CA2 signed by Root CA1] +// - RootCA.signer.Cert: [Root CA2 signed by Root CA1] +// - Issued TLS cert: [leaf signed by Root CA2][Root CA2 signed by Root CA1] +// +// - Final state: +// - RootCA.Cert: [Root CA2 self-signed] +// - RootCA.Intermediates: [] +// - RootCA.signer.Cert: [Root CA2 self-signed] +// - Issued TLS cert: [leaf signed by Root CA2] +// +type RootCA struct { + // Certs contains a bundle of self-signed, PEM encoded certificates for the Root CA to be used + // as the root of trust. + Certs []byte + + // Intermediates contains a bundle of PEM encoded intermediate CA certificates to append to any + // issued TLS (leaf) certificates. The first one must have the same public key and subject as the + // signing root certificate, and the rest must form a chain, each one certifying the one above it, + // as per RFC5246 section 7.4.2. + Intermediates []byte + + // Pool is the root pool used to validate TLS certificates + Pool *x509.CertPool + + // Digest of the serialized bytes of the certificate(s) + Digest digest.Digest + + // This signer will be nil if the node doesn't have the appropriate key material + signer *LocalSigner +} + +// Signer is an accessor for the local signer that returns an error if this root cannot sign. +func (rca *RootCA) Signer() (*LocalSigner, error) { + if rca.Pool == nil || rca.signer == nil || len(rca.signer.Cert) == 0 || rca.signer.Signer == nil { + return nil, ErrNoValidSigner + } + + return rca.signer, nil +} + +// IssueAndSaveNewCertificates generates a new key-pair, signs it with the local root-ca, and returns a +// TLS certificate and the issuer information for the certificate. +func (rca *RootCA) IssueAndSaveNewCertificates(kw KeyWriter, cn, ou, org string) (*tls.Certificate, *IssuerInfo, error) { + csr, key, err := GenerateNewCSR() + if err != nil { + return nil, nil, errors.Wrap(err, "error when generating new node certs") + } + + // Obtain a signed Certificate + certChain, err := rca.ParseValidateAndSignCSR(csr, cn, ou, org) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to sign node certificate") + } + signer, err := rca.Signer() + if err != nil { // should never happen, since if ParseValidateAndSignCSR did not fail this root CA must have a signer + return nil, nil, err + } + + // Create a valid TLSKeyPair out of the PEM encoded private key and certificate + tlsKeyPair, err := tls.X509KeyPair(certChain, key) + if err != nil { + return nil, nil, err + } + + if err := kw.Write(NormalizePEMs(certChain), key, nil); err != nil { + return nil, nil, err + } + + return &tlsKeyPair, &IssuerInfo{ + PublicKey: signer.parsedCert.RawSubjectPublicKeyInfo, + Subject: signer.parsedCert.RawSubject, + }, nil +} + +// RequestAndSaveNewCertificates gets new certificates issued, either by signing them locally if a signer is +// available, or by requesting them from the remote server at remoteAddr. This function returns the TLS +// certificate and the issuer information for the certificate. +func (rca *RootCA) RequestAndSaveNewCertificates(ctx context.Context, kw KeyWriter, config CertificateRequestConfig) (*tls.Certificate, *IssuerInfo, error) { + // Create a new key/pair and CSR + csr, key, err := GenerateNewCSR() + if err != nil { + return nil, nil, errors.Wrap(err, "error when generating new node certs") + } + + // Get the remote manager to issue a CA signed certificate for this node + // Retry up to 5 times in case the manager we first try to contact isn't + // responding properly (for example, it may have just been demoted). + var signedCert []byte + for i := 0; i != 5; i++ { + signedCert, err = GetRemoteSignedCertificate(ctx, csr, rca.Pool, config) + if err == nil { + break + } + + // If the first attempt fails, we should try a remote + // connection. The local node may be a manager that was + // demoted, so the local connection (which is preferred) may + // not work. If we are successful in renewing the certificate, + // the local connection will not be returned by the connection + // broker anymore. + config.ForceRemote = true + + // Wait a moment, in case a leader election was taking place. + select { + case <-time.After(config.RetryInterval): + case <-ctx.Done(): + return nil, nil, ctx.Err() + } + } + if err != nil { + return nil, nil, err + } + + // Доверяй, но проверяй. + // Before we overwrite our local key + certificate, let's make sure the server gave us one that is valid + // Create an X509Cert so we can .Verify() + // Check to see if this certificate was signed by our CA, and isn't expired + parsedCerts, chains, err := ValidateCertChain(rca.Pool, signedCert, false) + // TODO(cyli): - right now we need the invalid certificate in order to determine whether or not we should + // download a new root, because we only want to do that in the case of workers. When we have a single + // codepath for updating the root CAs for both managers and workers, this snippet can go. + if _, ok := err.(x509.UnknownAuthorityError); ok { + if parsedCerts, parseErr := helpers.ParseCertificatesPEM(signedCert); parseErr == nil && len(parsedCerts) > 0 { + return nil, nil, x509UnknownAuthError{ + error: err, + failedLeafCert: parsedCerts[0], + } + } + } + if err != nil { + return nil, nil, err + } + + // ValidateChain, if successful, will always return at least 1 parsed cert and at least 1 chain containing + // at least 2 certificates: the leaf and the root. + leafCert := parsedCerts[0] + issuer := chains[0][1] + + // Create a valid TLSKeyPair out of the PEM encoded private key and certificate + tlsKeyPair, err := tls.X509KeyPair(signedCert, key) + if err != nil { + return nil, nil, err + } + + var kekUpdate *KEKData + for i := 0; i < 5; i++ { + // ValidateCertChain will always return at least 1 cert, so indexing at 0 is safe + kekUpdate, err = rca.getKEKUpdate(ctx, leafCert, tlsKeyPair, config) + if err == nil { + break + } + + config.ForceRemote = true + + // Wait a moment, in case a leader election was taking place. + select { + case <-time.After(config.RetryInterval): + case <-ctx.Done(): + return nil, nil, ctx.Err() + } + } + if err != nil { + return nil, nil, err + } + + if err := kw.Write(NormalizePEMs(signedCert), key, kekUpdate); err != nil { + return nil, nil, err + } + + return &tlsKeyPair, &IssuerInfo{ + PublicKey: issuer.RawSubjectPublicKeyInfo, + Subject: issuer.RawSubject, + }, nil +} + +func (rca *RootCA) getKEKUpdate(ctx context.Context, leafCert *x509.Certificate, keypair tls.Certificate, config CertificateRequestConfig) (*KEKData, error) { + var managerRole bool + for _, ou := range leafCert.Subject.OrganizationalUnit { + if ou == ManagerRole { + managerRole = true + break + } + } + + if managerRole { + mtlsCreds := credentials.NewTLS(&tls.Config{ServerName: CARole, RootCAs: rca.Pool, Certificates: []tls.Certificate{keypair}}) + conn, err := getGRPCConnection(mtlsCreds, config.ConnBroker, config.ForceRemote) + if err != nil { + return nil, err + } + + client := api.NewCAClient(conn.ClientConn) + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + response, err := client.GetUnlockKey(ctx, &api.GetUnlockKeyRequest{}) + if err != nil { + s, _ := status.FromError(err) + if s.Code() == codes.Unimplemented { // if the server does not support keks, return as if no encryption key was specified + conn.Close(true) + return &KEKData{}, nil + } + + conn.Close(false) + return nil, err + } + conn.Close(true) + return &KEKData{KEK: response.UnlockKey, Version: response.Version.Index}, nil + } + + // If this is a worker, set to never encrypt. We always want to set to the lock key to nil, + // in case this was a manager that was demoted to a worker. + return &KEKData{}, nil +} + +// PrepareCSR creates a CFSSL Sign Request based on the given raw CSR and +// overrides the Subject and Hosts with the given extra args. +func PrepareCSR(csrBytes []byte, cn, ou, org string) cfsigner.SignRequest { + // All managers get added the subject-alt-name of CA, so they can be + // used for cert issuance. + hosts := []string{ou, cn} + if ou == ManagerRole { + hosts = append(hosts, CARole) + } + + return cfsigner.SignRequest{ + Request: string(csrBytes), + // OU is used for Authentication of the node type. The CN has the random + // node ID. + Subject: &cfsigner.Subject{CN: cn, Names: []cfcsr.Name{{OU: ou, O: org}}}, + // Adding ou as DNS alt name, so clients can connect to ManagerRole and CARole + Hosts: hosts, + } +} + +// ParseValidateAndSignCSR returns a signed certificate from a particular rootCA and a CSR. +func (rca *RootCA) ParseValidateAndSignCSR(csrBytes []byte, cn, ou, org string) ([]byte, error) { + signRequest := PrepareCSR(csrBytes, cn, ou, org) + signer, err := rca.Signer() + if err != nil { + return nil, err + } + cert, err := signer.Sign(signRequest) + if err != nil { + return nil, errors.Wrap(err, "failed to sign node certificate") + } + + return append(cert, rca.Intermediates...), nil +} + +// CrossSignCACertificate takes a CA root certificate and generates an intermediate CA from it signed with the current root signer +func (rca *RootCA) CrossSignCACertificate(otherCAPEM []byte) ([]byte, error) { + signer, err := rca.Signer() + if err != nil { + return nil, err + } + + // create a new cert with exactly the same parameters, including the public key and exact NotBefore and NotAfter + template, err := helpers.ParseCertificatePEM(otherCAPEM) + if err != nil { + return nil, errors.New("could not parse new CA certificate") + } + + if !template.IsCA { + return nil, errors.New("certificate not a CA") + } + + template.SignatureAlgorithm = signer.parsedCert.SignatureAlgorithm // make sure we can sign with the signer key + derBytes, err := x509.CreateCertificate(cryptorand.Reader, template, signer.parsedCert, template.PublicKey, signer.cryptoSigner) + if err != nil { + return nil, errors.Wrap(err, "could not cross-sign new CA certificate using old CA material") + } + + return pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: derBytes, + }), nil +} + +func validateSignatureAlgorithm(cert *x509.Certificate) error { + switch cert.SignatureAlgorithm { + case x509.SHA256WithRSA, x509.SHA384WithRSA, x509.SHA512WithRSA, x509.ECDSAWithSHA256, x509.ECDSAWithSHA384, x509.ECDSAWithSHA512: + return nil + default: + return fmt.Errorf("unsupported signature algorithm: %s", cert.SignatureAlgorithm.String()) + } +} + +// NewRootCA creates a new RootCA object from unparsed PEM cert bundle and key byte +// slices. key may be nil, and in this case NewRootCA will return a RootCA +// without a signer. +func NewRootCA(rootCertBytes, signCertBytes, signKeyBytes []byte, certExpiry time.Duration, intermediates []byte) (RootCA, error) { + // Parse all the certificates in the cert bundle + parsedCerts, err := helpers.ParseCertificatesPEM(rootCertBytes) + if err != nil { + return RootCA{}, errors.Wrap(err, "invalid root certificates") + } + // Check to see if we have at least one valid cert + if len(parsedCerts) < 1 { + return RootCA{}, errors.New("no valid root CA certificates found") + } + + // Create a Pool with all of the certificates found + pool := x509.NewCertPool() + for _, cert := range parsedCerts { + if err := validateSignatureAlgorithm(cert); err != nil { + return RootCA{}, err + } + // Check to see if all of the certificates are valid, self-signed root CA certs + selfpool := x509.NewCertPool() + selfpool.AddCert(cert) + if _, err := cert.Verify(x509.VerifyOptions{Roots: selfpool}); err != nil { + return RootCA{}, errors.Wrap(err, "error while validating Root CA Certificate") + } + pool.AddCert(cert) + } + + // Calculate the digest for our Root CA bundle + digest := digest.FromBytes(rootCertBytes) + + // The intermediates supplied must be able to chain up to the root certificates, so that when they are appended to + // a leaf certificate, the leaf certificate can be validated through the intermediates to the root certificates. + var intermediatePool *x509.CertPool + var parsedIntermediates []*x509.Certificate + if len(intermediates) > 0 { + parsedIntermediates, _, err = ValidateCertChain(pool, intermediates, false) + if err != nil { + return RootCA{}, errors.Wrap(err, "invalid intermediate chain") + } + intermediatePool = x509.NewCertPool() + for _, cert := range parsedIntermediates { + intermediatePool.AddCert(cert) + } + } + + var localSigner *LocalSigner + if len(signKeyBytes) != 0 || len(signCertBytes) != 0 { + localSigner, err = newLocalSigner(signKeyBytes, signCertBytes, certExpiry, pool, intermediatePool) + if err != nil { + return RootCA{}, err + } + + // If a signer is provided and there are intermediates, then either the first intermediate would be the signer CA + // certificate (in which case it'd have the same subject and public key), or it would be a cross-signed + // intermediate with the same subject and public key as our signing CA certificate (which could be either an + // intermediate cert or a self-signed root cert). + if len(parsedIntermediates) > 0 && (!bytes.Equal(parsedIntermediates[0].RawSubject, localSigner.parsedCert.RawSubject) || + !bytes.Equal(parsedIntermediates[0].RawSubjectPublicKeyInfo, localSigner.parsedCert.RawSubjectPublicKeyInfo)) { + return RootCA{}, errors.New( + "invalid intermediate chain - the first intermediate must have the same subject and public key as the signing cert") + } + } + + return RootCA{signer: localSigner, Intermediates: intermediates, Digest: digest, Certs: rootCertBytes, Pool: pool}, nil +} + +// ValidateCertChain checks checks that the certificates provided chain up to the root pool provided. In addition +// it also enforces that every cert in the bundle certificates form a chain, each one certifying the one above, +// as per RFC5246 section 7.4.2, and that every certificate (whether or not it is necessary to form a chain to the root +// pool) is currently valid and not yet expired (unless allowExpiry is set to true). +// This is additional validation not required by go's Certificate.Verify (which allows invalid certs in the +// intermediate pool), because this function is intended to be used when reading certs from untrusted locations such as +// from disk or over a network when a CSR is signed, so it is extra pedantic. +// This function always returns all the parsed certificates in the bundle in order, which means there will always be +// at least 1 certificate if there is no error, and the valid chains found by Certificate.Verify +func ValidateCertChain(rootPool *x509.CertPool, certs []byte, allowExpired bool) ([]*x509.Certificate, [][]*x509.Certificate, error) { + // Parse all the certificates in the cert bundle + parsedCerts, err := helpers.ParseCertificatesPEM(certs) + if err != nil { + return nil, nil, err + } + if len(parsedCerts) == 0 { + return nil, nil, errors.New("no certificates to validate") + } + now := time.Now() + // ensure that they form a chain, each one being signed by the one after it + var intermediatePool *x509.CertPool + for i, cert := range parsedCerts { + // Manual expiry validation because we want more information on which certificate in the chain is expired, and + // because this is an easier way to allow expired certs. + if now.Before(cert.NotBefore) { + return nil, nil, errors.Wrapf( + x509.CertificateInvalidError{ + Cert: cert, + Reason: x509.Expired, + }, + "certificate (%d - %s) not valid before %s, and it is currently %s", + i+1, cert.Subject.CommonName, cert.NotBefore.UTC().Format(time.RFC1123), now.Format(time.RFC1123)) + } + if !allowExpired && now.After(cert.NotAfter) { + return nil, nil, errors.Wrapf( + x509.CertificateInvalidError{ + Cert: cert, + Reason: x509.Expired, + }, + "certificate (%d - %s) not valid after %s, and it is currently %s", + i+1, cert.Subject.CommonName, cert.NotAfter.UTC().Format(time.RFC1123), now.Format(time.RFC1123)) + } + + if i > 0 { + // check that the previous cert was signed by this cert + prevCert := parsedCerts[i-1] + if err := prevCert.CheckSignatureFrom(cert); err != nil { + return nil, nil, errors.Wrapf(err, "certificates do not form a chain: (%d - %s) is not signed by (%d - %s)", + i, prevCert.Subject.CommonName, i+1, cert.Subject.CommonName) + } + + if intermediatePool == nil { + intermediatePool = x509.NewCertPool() + } + intermediatePool.AddCert(cert) + + } + } + + verifyOpts := x509.VerifyOptions{ + Roots: rootPool, + Intermediates: intermediatePool, + CurrentTime: now, + } + + var chains [][]*x509.Certificate + + // If we accept expired certs, try to build a valid cert chain using some subset of the certs. We start off using the + // first certificate's NotAfter as the current time, thus ensuring that the first cert is not expired. If the chain + // still fails to validate due to expiry issues, continue iterating over the rest of the certs. + // If any of the other certs has an earlier NotAfter time, use that time as the current time instead. This insures that + // particular cert, and any that came before it, are not expired. Note that the root that the certs chain up to + // should also not be expired at that "current" time. + if allowExpired { + verifyOpts.CurrentTime = parsedCerts[0].NotAfter.Add(time.Hour) + for _, cert := range parsedCerts { + if !cert.NotAfter.Before(verifyOpts.CurrentTime) { + continue + } + verifyOpts.CurrentTime = cert.NotAfter + + chains, err = parsedCerts[0].Verify(verifyOpts) + if err == nil { + return parsedCerts, chains, nil + } + } + if invalid, ok := err.(x509.CertificateInvalidError); ok && invalid.Reason == x509.Expired { + return nil, nil, errors.New("there is no time span for which all of the certificates, including a root, are valid") + } + return nil, nil, err + } + + chains, err = parsedCerts[0].Verify(verifyOpts) + if err != nil { + return nil, nil, err + } + return parsedCerts, chains, nil +} + +// newLocalSigner validates the signing cert and signing key to create a local signer, which accepts a crypto signer and a cert +func newLocalSigner(keyBytes, certBytes []byte, certExpiry time.Duration, rootPool, intermediatePool *x509.CertPool) (*LocalSigner, error) { + if len(keyBytes) == 0 || len(certBytes) == 0 { + return nil, errors.New("must provide both a signing key and a signing cert, or neither") + } + + parsedCerts, err := helpers.ParseCertificatesPEM(certBytes) + if err != nil { + return nil, errors.Wrap(err, "invalid signing CA cert") + } + if len(parsedCerts) == 0 { + return nil, errors.New("no valid signing CA certificates found") + } + if err := validateSignatureAlgorithm(parsedCerts[0]); err != nil { + return nil, err + } + opts := x509.VerifyOptions{ + Roots: rootPool, + Intermediates: intermediatePool, + } + if _, err := parsedCerts[0].Verify(opts); err != nil { + return nil, errors.Wrap(err, "error while validating signing CA certificate against roots and intermediates") + } + + // The key should not be encrypted, but it could be in PKCS8 format rather than PKCS1 + priv, err := helpers.ParsePrivateKeyPEM(keyBytes) + if err != nil { + return nil, errors.Wrap(err, "malformed private key") + } + + // We will always use the first certificate inside of the root bundle as the active one + if err := ensureCertKeyMatch(parsedCerts[0], priv.Public()); err != nil { + return nil, err + } + + signer, err := local.NewSigner(priv, parsedCerts[0], cfsigner.DefaultSigAlgo(priv), SigningPolicy(certExpiry)) + if err != nil { + return nil, err + } + + return &LocalSigner{Cert: certBytes, Key: keyBytes, Signer: signer, parsedCert: parsedCerts[0], cryptoSigner: priv}, nil +} + +func ensureCertKeyMatch(cert *x509.Certificate, key crypto.PublicKey) error { + switch certPub := cert.PublicKey.(type) { + case *rsa.PublicKey: + if certPub.N.BitLen() < 2048 || certPub.E == 1 { + return errors.New("unsupported RSA key parameters") + } + rsaKey, ok := key.(*rsa.PublicKey) + if ok && certPub.E == rsaKey.E && certPub.N.Cmp(rsaKey.N) == 0 { + return nil + } + case *ecdsa.PublicKey: + switch certPub.Curve { + case elliptic.P256(), elliptic.P384(), elliptic.P521(): + break + default: + return errors.New("unsupported ECDSA key parameters") + } + + ecKey, ok := key.(*ecdsa.PublicKey) + if ok && certPub.X.Cmp(ecKey.X) == 0 && certPub.Y.Cmp(ecKey.Y) == 0 { + return nil + } + default: + return errors.New("unknown or unsupported certificate public key algorithm") + } + + return errors.New("certificate key mismatch") +} + +// GetLocalRootCA validates if the contents of the file are a valid self-signed +// CA certificate, and returns the PEM-encoded Certificate if so +func GetLocalRootCA(paths CertPaths) (RootCA, error) { + // Check if we have a Certificate file + cert, err := ioutil.ReadFile(paths.Cert) + if err != nil { + if os.IsNotExist(err) { + err = ErrNoLocalRootCA + } + + return RootCA{}, err + } + signingCert := cert + + key, err := ioutil.ReadFile(paths.Key) + if err != nil { + if !os.IsNotExist(err) { + return RootCA{}, err + } + // There may not be a local key. It's okay to pass in a nil + // key. We'll get a root CA without a signer. + key = nil + signingCert = nil + } + + return NewRootCA(cert, signingCert, key, DefaultNodeCertExpiration, nil) +} + +func getGRPCConnection(creds credentials.TransportCredentials, connBroker *connectionbroker.Broker, forceRemote bool) (*connectionbroker.Conn, error) { + dialOpts := []grpc.DialOption{ + grpc.WithTransportCredentials(creds), + grpc.WithTimeout(5 * time.Second), + grpc.WithBackoffMaxDelay(5 * time.Second), + } + if forceRemote { + return connBroker.SelectRemote(dialOpts...) + } + return connBroker.Select(dialOpts...) +} + +// GetRemoteCA returns the remote endpoint's CA certificate bundle +func GetRemoteCA(ctx context.Context, d digest.Digest, connBroker *connectionbroker.Broker) (RootCA, error) { + // This TLS Config is intentionally using InsecureSkipVerify. We use the + // digest instead to check the integrity of the CA certificate. + insecureCreds := credentials.NewTLS(&tls.Config{InsecureSkipVerify: true}) + conn, err := getGRPCConnection(insecureCreds, connBroker, false) + if err != nil { + return RootCA{}, err + } + + client := api.NewCAClient(conn.ClientConn) + ctx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + defer func() { + conn.Close(err == nil) + }() + response, err := client.GetRootCACertificate(ctx, &api.GetRootCACertificateRequest{}) + if err != nil { + return RootCA{}, err + } + + // If a bundle of certificates are provided, the digest covers the entire bundle and not just + // one of the certificates in the bundle. Otherwise, a node can be MITMed while joining if + // the MITM CA provides a single certificate which matches the digest, and providing arbitrary + // other non-verified root certs that the manager certificate actually chains up to. + if d != "" { + verifier := d.Verifier() + if err != nil { + return RootCA{}, errors.Wrap(err, "unexpected error getting digest verifier") + } + + io.Copy(verifier, bytes.NewReader(response.Certificate)) + + if !verifier.Verified() { + return RootCA{}, errors.Errorf("remote CA does not match fingerprint. Expected: %s", d.Hex()) + } + } + + // NewRootCA will validate that the certificates are otherwise valid and create a RootCA object. + // Since there is no key, the certificate expiry does not matter and will not be used. + return NewRootCA(response.Certificate, nil, nil, DefaultNodeCertExpiration, nil) +} + +// CreateRootCA creates a Certificate authority for a new Swarm Cluster, potentially +// overwriting any existing CAs. +func CreateRootCA(rootCN string) (RootCA, error) { + // Create a simple CSR for the CA using the default CA validator and policy + req := cfcsr.CertificateRequest{ + CN: rootCN, + KeyRequest: &cfcsr.BasicKeyRequest{A: RootKeyAlgo, S: RootKeySize}, + CA: &cfcsr.CAConfig{Expiry: RootCAExpiration}, + } + + // Generate the CA and get the certificate and private key + cert, _, key, err := initca.New(&req) + if err != nil { + return RootCA{}, err + } + + rootCA, err := NewRootCA(cert, cert, key, DefaultNodeCertExpiration, nil) + if err != nil { + return RootCA{}, err + } + + return rootCA, nil +} + +// GetRemoteSignedCertificate submits a CSR to a remote CA server address, +// and that is part of a CA identified by a specific certificate pool. +func GetRemoteSignedCertificate(ctx context.Context, csr []byte, rootCAPool *x509.CertPool, config CertificateRequestConfig) ([]byte, error) { + if rootCAPool == nil { + return nil, errors.New("valid root CA pool required") + } + creds := config.Credentials + + if creds == nil { + // This is our only non-MTLS request, and it happens when we are boostraping our TLS certs + // We're using CARole as server name, so an external CA doesn't also have to have ManagerRole in the cert SANs + creds = credentials.NewTLS(&tls.Config{ServerName: CARole, RootCAs: rootCAPool}) + } + + conn, err := getGRPCConnection(creds, config.ConnBroker, config.ForceRemote) + if err != nil { + return nil, err + } + + // Create a CAClient to retrieve a new Certificate + caClient := api.NewNodeCAClient(conn.ClientConn) + + issueCtx, issueCancel := context.WithTimeout(ctx, 5*time.Second) + defer issueCancel() + + // Send the Request and retrieve the request token + issueRequest := &api.IssueNodeCertificateRequest{CSR: csr, Token: config.Token, Availability: config.Availability} + issueResponse, err := caClient.IssueNodeCertificate(issueCtx, issueRequest) + if err != nil { + conn.Close(false) + return nil, err + } + + statusRequest := &api.NodeCertificateStatusRequest{NodeID: issueResponse.NodeID} + expBackoff := events.NewExponentialBackoff(events.ExponentialBackoffConfig{ + Base: time.Second, + Factor: time.Second, + Max: 30 * time.Second, + }) + + // Exponential backoff with Max of 30 seconds to wait for a new retry + for { + timeout := 5 * time.Second + if config.NodeCertificateStatusRequestTimeout > 0 { + timeout = config.NodeCertificateStatusRequestTimeout + } + // Send the Request and retrieve the certificate + stateCtx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + statusResponse, err := caClient.NodeCertificateStatus(stateCtx, statusRequest) + s, _ := status.FromError(err) + switch { + case err != nil && s.Code() != codes.DeadlineExceeded: + conn.Close(false) + // Because IssueNodeCertificate succeeded, if this call failed likely it is due to an issue with this + // particular connection, so we need to get another. We should try a remote connection - the local node + // may be a manager that was demoted, so the local connection (which is preferred) may not work. + config.ForceRemote = true + conn, err = getGRPCConnection(creds, config.ConnBroker, config.ForceRemote) + if err != nil { + return nil, err + } + caClient = api.NewNodeCAClient(conn.ClientConn) + + // If there was no deadline exceeded error, and the certificate was issued, return + case err == nil && (statusResponse.Status.State == api.IssuanceStateIssued || statusResponse.Status.State == api.IssuanceStateRotate): + if statusResponse.Certificate == nil { + conn.Close(false) + return nil, errors.New("no certificate in CertificateStatus response") + } + + // The certificate in the response must match the CSR + // we submitted. If we are getting a response for a + // certificate that was previously issued, we need to + // retry until the certificate gets updated per our + // current request. + if bytes.Equal(statusResponse.Certificate.CSR, csr) { + conn.Close(true) + return statusResponse.Certificate.Certificate, nil + } + } + + // If NodeCertificateStatus timed out, we're still pending, the issuance failed, or + // the state is unknown let's continue trying after an exponential backoff + expBackoff.Failure(nil, nil) + select { + case <-ctx.Done(): + conn.Close(true) + return nil, err + case <-time.After(expBackoff.Proceed(nil)): + } + } +} + +// readCertValidity returns the certificate issue and expiration time +func readCertValidity(kr KeyReader) (time.Time, time.Time, error) { + var zeroTime time.Time + // Read the Cert + cert, _, err := kr.Read() + if err != nil { + return zeroTime, zeroTime, err + } + + // Create an x509 certificate out of the contents on disk + certBlock, _ := pem.Decode(cert) + if certBlock == nil { + return zeroTime, zeroTime, errors.New("failed to decode certificate block") + } + X509Cert, err := x509.ParseCertificate(certBlock.Bytes) + if err != nil { + return zeroTime, zeroTime, err + } + + return X509Cert.NotBefore, X509Cert.NotAfter, nil + +} + +// SaveRootCA saves a RootCA object to disk +func SaveRootCA(rootCA RootCA, paths CertPaths) error { + // Make sure the necessary dirs exist and they are writable + err := os.MkdirAll(filepath.Dir(paths.Cert), 0755) + if err != nil { + return err + } + + // If the root certificate got returned successfully, save the rootCA to disk. + return ioutils.AtomicWriteFile(paths.Cert, rootCA.Certs, 0644) +} + +// GenerateNewCSR returns a newly generated key and CSR signed with said key +func GenerateNewCSR() ([]byte, []byte, error) { + req := &cfcsr.CertificateRequest{ + KeyRequest: cfcsr.NewBasicKeyRequest(), + } + + csr, key, err := cfcsr.ParseRequest(req) + if err != nil { + return nil, nil, err + } + + key, err = pkcs8.ConvertECPrivateKeyPEM(key) + return csr, key, err +} + +// NormalizePEMs takes a bundle of PEM-encoded certificates in a certificate bundle, +// decodes them, removes headers, and re-encodes them to make sure that they have +// consistent whitespace. Note that this is intended to normalize x509 certificates +// in PEM format, hence the stripping out of headers. +func NormalizePEMs(certs []byte) []byte { + var ( + results []byte + pemBlock *pem.Block + ) + for { + pemBlock, certs = pem.Decode(bytes.TrimSpace(certs)) + if pemBlock == nil { + return results + } + pemBlock.Headers = nil + results = append(results, pem.EncodeToMemory(pemBlock)...) + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/config.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/config.go new file mode 100644 index 00000000..4befee5b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/config.go @@ -0,0 +1,719 @@ +package ca + +import ( + "context" + cryptorand "crypto/rand" + "crypto/tls" + "crypto/x509" + "fmt" + "math/big" + "math/rand" + "path/filepath" + "strings" + "sync" + "time" + + cfconfig "github.com/cloudflare/cfssl/config" + events "github.com/docker/go-events" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/connectionbroker" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/watch" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "google.golang.org/grpc/credentials" +) + +const ( + rootCACertFilename = "swarm-root-ca.crt" + rootCAKeyFilename = "swarm-root-ca.key" + nodeTLSCertFilename = "swarm-node.crt" + nodeTLSKeyFilename = "swarm-node.key" + + // DefaultRootCN represents the root CN that we should create roots CAs with by default + DefaultRootCN = "swarm-ca" + // ManagerRole represents the Manager node type, and is used for authorization to endpoints + ManagerRole = "swarm-manager" + // WorkerRole represents the Worker node type, and is used for authorization to endpoints + WorkerRole = "swarm-worker" + // CARole represents the CA node type, and is used for clients attempting to get new certificates issued + CARole = "swarm-ca" + + generatedSecretEntropyBytes = 16 + joinTokenBase = 36 + // ceil(log(2^128-1, 36)) + maxGeneratedSecretLength = 25 + // ceil(log(2^256-1, 36)) + base36DigestLen = 50 +) + +var ( + // GetCertRetryInterval is how long to wait before retrying a node + // certificate or root certificate request. + GetCertRetryInterval = 2 * time.Second + + // errInvalidJoinToken is returned when attempting to parse an invalid join + // token (e.g. when attempting to get the version, fipsness, or the root ca + // digest) + errInvalidJoinToken = errors.New("invalid join token") +) + +// SecurityConfig is used to represent a node's security configuration. It includes information about +// the RootCA and ServerTLSCreds/ClientTLSCreds transport authenticators to be used for MTLS +type SecurityConfig struct { + // mu protects against concurrent access to fields inside the structure. + mu sync.Mutex + + // renewalMu makes sure only one certificate renewal attempt happens at + // a time. It should never be locked after mu is already locked. + renewalMu sync.Mutex + + rootCA *RootCA + keyReadWriter *KeyReadWriter + + certificate *tls.Certificate + issuerInfo *IssuerInfo + + ServerTLSCreds *MutableTLSCreds + ClientTLSCreds *MutableTLSCreds + + // An optional queue for anyone interested in subscribing to SecurityConfig updates + queue *watch.Queue +} + +// CertificateUpdate represents a change in the underlying TLS configuration being returned by +// a certificate renewal event. +type CertificateUpdate struct { + Role string + Err error +} + +// ParsedJoinToken is the data from a join token, once parsed +type ParsedJoinToken struct { + // Version is the version of the join token that is being parsed + Version int + + // RootDigest is the digest of the root CA certificate of the cluster, which + // is always part of the join token so that the root CA can be verified + // upon initial node join + RootDigest digest.Digest + + // Secret is the randomly-generated secret part of the join token - when + // rotating a join token, this is the value that is changed unless some other + // property of the cluster (like the root CA) is changed. + Secret string + + // FIPS indicates whether the join token specifies that the cluster mandates + // that all nodes must have FIPS mode enabled. + FIPS bool +} + +// ParseJoinToken parses a join token. Current format is v2, but this is currently used only if the cluster requires +// mandatory FIPS, in order to facilitate mixed version clusters. +// v1: SWMTKN-1--<16-byte secret in base 36 0-left-padded to 25 chars> +// v2: SWMTKN-2-<0/1 whether its FIPS or not>- +func ParseJoinToken(token string) (*ParsedJoinToken, error) { + split := strings.Split(token, "-") + numParts := len(split) + + // v1 has 4, v2 has 5 + if numParts < 4 || split[0] != "SWMTKN" { + return nil, errInvalidJoinToken + } + + var ( + version int + fips bool + ) + + switch split[1] { + case "1": + if numParts != 4 { + return nil, errInvalidJoinToken + } + version = 1 + case "2": + if numParts != 5 || (split[2] != "0" && split[2] != "1") { + return nil, errInvalidJoinToken + } + version = 2 + fips = split[2] == "1" + default: + return nil, errInvalidJoinToken + } + + secret := split[numParts-1] + rootDigest := split[numParts-2] + if len(rootDigest) != base36DigestLen || len(secret) != maxGeneratedSecretLength { + return nil, errInvalidJoinToken + } + + var digestInt big.Int + digestInt.SetString(rootDigest, joinTokenBase) + + d, err := digest.Parse(fmt.Sprintf("sha256:%0[1]*s", 64, digestInt.Text(16))) + if err != nil { + return nil, err + } + return &ParsedJoinToken{ + Version: version, + RootDigest: d, + Secret: secret, + FIPS: fips, + }, nil +} + +func validateRootCAAndTLSCert(rootCA *RootCA, tlsKeyPair *tls.Certificate) error { + var ( + leafCert *x509.Certificate + intermediatePool *x509.CertPool + ) + for i, derBytes := range tlsKeyPair.Certificate { + parsed, err := x509.ParseCertificate(derBytes) + if err != nil { + return errors.Wrap(err, "could not validate new root certificates due to parse error") + } + if i == 0 { + leafCert = parsed + } else { + if intermediatePool == nil { + intermediatePool = x509.NewCertPool() + } + intermediatePool.AddCert(parsed) + } + } + opts := x509.VerifyOptions{ + Roots: rootCA.Pool, + Intermediates: intermediatePool, + } + if _, err := leafCert.Verify(opts); err != nil { + return errors.Wrap(err, "new root CA does not match existing TLS credentials") + } + return nil +} + +// NewSecurityConfig initializes and returns a new SecurityConfig. +func NewSecurityConfig(rootCA *RootCA, krw *KeyReadWriter, tlsKeyPair *tls.Certificate, issuerInfo *IssuerInfo) (*SecurityConfig, func() error, error) { + // Create the Server TLS Credentials for this node. These will not be used by workers. + serverTLSCreds, err := rootCA.NewServerTLSCredentials(tlsKeyPair) + if err != nil { + return nil, nil, err + } + + // Create a TLSConfig to be used when this node connects as a client to another remote node. + // We're using ManagerRole as remote serverName for TLS host verification because both workers + // and managers always connect to remote managers. + clientTLSCreds, err := rootCA.NewClientTLSCredentials(tlsKeyPair, ManagerRole) + if err != nil { + return nil, nil, err + } + + q := watch.NewQueue() + return &SecurityConfig{ + rootCA: rootCA, + keyReadWriter: krw, + + certificate: tlsKeyPair, + issuerInfo: issuerInfo, + queue: q, + + ClientTLSCreds: clientTLSCreds, + ServerTLSCreds: serverTLSCreds, + }, q.Close, nil +} + +// RootCA returns the root CA. +func (s *SecurityConfig) RootCA() *RootCA { + s.mu.Lock() + defer s.mu.Unlock() + + return s.rootCA +} + +// KeyWriter returns the object that can write keys to disk +func (s *SecurityConfig) KeyWriter() KeyWriter { + return s.keyReadWriter +} + +// KeyReader returns the object that can read keys from disk +func (s *SecurityConfig) KeyReader() KeyReader { + return s.keyReadWriter +} + +// UpdateRootCA replaces the root CA with a new root CA +func (s *SecurityConfig) UpdateRootCA(rootCA *RootCA) error { + s.mu.Lock() + defer s.mu.Unlock() + + // refuse to update the root CA if the current TLS credentials do not validate against it + if err := validateRootCAAndTLSCert(rootCA, s.certificate); err != nil { + return err + } + + s.rootCA = rootCA + return s.updateTLSCredentials(s.certificate, s.issuerInfo) +} + +// Watch allows you to set a watch on the security config, in order to be notified of any changes +func (s *SecurityConfig) Watch() (chan events.Event, func()) { + return s.queue.Watch() +} + +// IssuerInfo returns the issuer subject and issuer public key +func (s *SecurityConfig) IssuerInfo() *IssuerInfo { + s.mu.Lock() + defer s.mu.Unlock() + return s.issuerInfo +} + +// This function expects something else to have taken out a lock on the SecurityConfig. +func (s *SecurityConfig) updateTLSCredentials(certificate *tls.Certificate, issuerInfo *IssuerInfo) error { + certs := []tls.Certificate{*certificate} + clientConfig, err := NewClientTLSConfig(certs, s.rootCA.Pool, ManagerRole) + if err != nil { + return errors.Wrap(err, "failed to create a new client config using the new root CA") + } + + serverConfig, err := NewServerTLSConfig(certs, s.rootCA.Pool) + if err != nil { + return errors.Wrap(err, "failed to create a new server config using the new root CA") + } + + if err := s.ClientTLSCreds.loadNewTLSConfig(clientConfig); err != nil { + return errors.Wrap(err, "failed to update the client credentials") + } + + if err := s.ServerTLSCreds.loadNewTLSConfig(serverConfig); err != nil { + return errors.Wrap(err, "failed to update the server TLS credentials") + } + + s.certificate = certificate + s.issuerInfo = issuerInfo + if s.queue != nil { + s.queue.Publish(&api.NodeTLSInfo{ + TrustRoot: s.rootCA.Certs, + CertIssuerPublicKey: s.issuerInfo.PublicKey, + CertIssuerSubject: s.issuerInfo.Subject, + }) + } + return nil +} + +// UpdateTLSCredentials updates the security config with an updated TLS certificate and issuer info +func (s *SecurityConfig) UpdateTLSCredentials(certificate *tls.Certificate, issuerInfo *IssuerInfo) error { + s.mu.Lock() + defer s.mu.Unlock() + return s.updateTLSCredentials(certificate, issuerInfo) +} + +// SigningPolicy creates a policy used by the signer to ensure that the only fields +// from the remote CSRs we trust are: PublicKey, PublicKeyAlgorithm and SignatureAlgorithm. +// It receives the duration a certificate will be valid for +func SigningPolicy(certExpiry time.Duration) *cfconfig.Signing { + // Force the minimum Certificate expiration to be fifteen minutes + if certExpiry < MinNodeCertExpiration { + certExpiry = DefaultNodeCertExpiration + } + + // Add the backdate + certExpiry = certExpiry + CertBackdate + + return &cfconfig.Signing{ + Default: &cfconfig.SigningProfile{ + Usage: []string{"signing", "key encipherment", "server auth", "client auth"}, + Expiry: certExpiry, + Backdate: CertBackdate, + // Only trust the key components from the CSR. Everything else should + // come directly from API call params. + CSRWhitelist: &cfconfig.CSRWhitelist{ + PublicKey: true, + PublicKeyAlgorithm: true, + SignatureAlgorithm: true, + }, + }, + } +} + +// SecurityConfigPaths is used as a helper to hold all the paths of security relevant files +type SecurityConfigPaths struct { + Node, RootCA CertPaths +} + +// NewConfigPaths returns the absolute paths to all of the different types of files +func NewConfigPaths(baseCertDir string) *SecurityConfigPaths { + return &SecurityConfigPaths{ + Node: CertPaths{ + Cert: filepath.Join(baseCertDir, nodeTLSCertFilename), + Key: filepath.Join(baseCertDir, nodeTLSKeyFilename)}, + RootCA: CertPaths{ + Cert: filepath.Join(baseCertDir, rootCACertFilename), + Key: filepath.Join(baseCertDir, rootCAKeyFilename)}, + } +} + +// GenerateJoinToken creates a new join token. Current format is v2, but this is +// currently used only if the cluster requires mandatory FIPS, in order to +// facilitate mixed version clusters (the `fips` parameter is set to true). +// Otherwise, v1 is used so as to maintain compatibility in mixed version +// non-FIPS clusters. +// v1: SWMTKN-1--<16-byte secret in base 36 0-left-padded to 25 chars> +// v2: SWMTKN-2-<0/1 whether its FIPS or not>- +func GenerateJoinToken(rootCA *RootCA, fips bool) string { + var secretBytes [generatedSecretEntropyBytes]byte + + if _, err := cryptorand.Read(secretBytes[:]); err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + + var nn, digest big.Int + nn.SetBytes(secretBytes[:]) + digest.SetString(rootCA.Digest.Hex(), 16) + + fmtString := "SWMTKN-1-%0[1]*s-%0[3]*s" + if fips { + fmtString = "SWMTKN-2-1-%0[1]*s-%0[3]*s" + } + return fmt.Sprintf(fmtString, base36DigestLen, + digest.Text(joinTokenBase), maxGeneratedSecretLength, nn.Text(joinTokenBase)) +} + +// DownloadRootCA tries to retrieve a remote root CA and matches the digest against the provided token. +func DownloadRootCA(ctx context.Context, paths CertPaths, token string, connBroker *connectionbroker.Broker) (RootCA, error) { + var rootCA RootCA + // Get a digest for the optional CA hash string that we've been provided + // If we were provided a non-empty string, and it is an invalid hash, return + // otherwise, allow the invalid digest through. + var ( + d digest.Digest + err error + ) + if token != "" { + parsed, err := ParseJoinToken(token) + if err != nil { + return RootCA{}, err + } + d = parsed.RootDigest + } + // Get the remote CA certificate, verify integrity with the + // hash provided. Retry up to 5 times, in case the manager we + // first try to contact is not responding properly (it may have + // just been demoted, for example). + + for i := 0; i != 5; i++ { + rootCA, err = GetRemoteCA(ctx, d, connBroker) + if err == nil { + break + } + log.G(ctx).WithError(err).Errorf("failed to retrieve remote root CA certificate") + + select { + case <-time.After(GetCertRetryInterval): + case <-ctx.Done(): + return RootCA{}, ctx.Err() + } + } + if err != nil { + return RootCA{}, err + } + + // Save root CA certificate to disk + if err = SaveRootCA(rootCA, paths); err != nil { + return RootCA{}, err + } + + log.G(ctx).Debugf("retrieved remote CA certificate: %s", paths.Cert) + return rootCA, nil +} + +// LoadSecurityConfig loads TLS credentials from disk, or returns an error if +// these credentials do not exist or are unusable. +func LoadSecurityConfig(ctx context.Context, rootCA RootCA, krw *KeyReadWriter, allowExpired bool) (*SecurityConfig, func() error, error) { + ctx = log.WithModule(ctx, "tls") + + // At this point we've successfully loaded the CA details from disk, or + // successfully downloaded them remotely. The next step is to try to + // load our certificates. + + // Read both the Cert and Key from disk + cert, key, err := krw.Read() + if err != nil { + return nil, nil, err + } + + // Check to see if this certificate was signed by our CA, and isn't expired + _, chains, err := ValidateCertChain(rootCA.Pool, cert, allowExpired) + if err != nil { + return nil, nil, err + } + // ValidateChain, if successful, will always return at least 1 chain containing + // at least 2 certificates: the leaf and the root. + issuer := chains[0][1] + + // Now that we know this certificate is valid, create a TLS Certificate for our + // credentials + keyPair, err := tls.X509KeyPair(cert, key) + if err != nil { + return nil, nil, err + } + + secConfig, cleanup, err := NewSecurityConfig(&rootCA, krw, &keyPair, &IssuerInfo{ + Subject: issuer.RawSubject, + PublicKey: issuer.RawSubjectPublicKeyInfo, + }) + if err == nil { + log.G(ctx).WithFields(logrus.Fields{ + "node.id": secConfig.ClientTLSCreds.NodeID(), + "node.role": secConfig.ClientTLSCreds.Role(), + }).Debug("loaded node credentials") + } + return secConfig, cleanup, err +} + +// CertificateRequestConfig contains the information needed to request a +// certificate from a remote CA. +type CertificateRequestConfig struct { + // Token is the join token that authenticates us with the CA. + Token string + // Availability allows a user to control the current scheduling status of a node + Availability api.NodeSpec_Availability + // ConnBroker provides connections to CAs. + ConnBroker *connectionbroker.Broker + // Credentials provides transport credentials for communicating with the + // remote server. + Credentials credentials.TransportCredentials + // ForceRemote specifies that only a remote (TCP) connection should + // be used to request the certificate. This may be necessary in cases + // where the local node is running a manager, but is in the process of + // being demoted. + ForceRemote bool + // NodeCertificateStatusRequestTimeout determines how long to wait for a node + // status RPC result. If not provided (zero value), will default to 5 seconds. + NodeCertificateStatusRequestTimeout time.Duration + // RetryInterval specifies how long to delay between retries, if non-zero. + RetryInterval time.Duration + // Organization is the organization to use for a TLS certificate when creating + // a security config from scratch. If not provided, a random ID is generated. + // For swarm certificates, the organization is the cluster ID. + Organization string +} + +// CreateSecurityConfig creates a new key and cert for this node, either locally +// or via a remote CA. +func (rootCA RootCA) CreateSecurityConfig(ctx context.Context, krw *KeyReadWriter, config CertificateRequestConfig) (*SecurityConfig, func() error, error) { + ctx = log.WithModule(ctx, "tls") + + // Create a new random ID for this certificate + cn := identity.NewID() + org := config.Organization + if config.Organization == "" { + org = identity.NewID() + } + + proposedRole := ManagerRole + tlsKeyPair, issuerInfo, err := rootCA.IssueAndSaveNewCertificates(krw, cn, proposedRole, org) + switch errors.Cause(err) { + case ErrNoValidSigner: + config.RetryInterval = GetCertRetryInterval + // Request certificate issuance from a remote CA. + // Last argument is nil because at this point we don't have any valid TLS creds + tlsKeyPair, issuerInfo, err = rootCA.RequestAndSaveNewCertificates(ctx, krw, config) + if err != nil { + log.G(ctx).WithError(err).Error("failed to request and save new certificate") + return nil, nil, err + } + case nil: + log.G(ctx).WithFields(logrus.Fields{ + "node.id": cn, + "node.role": proposedRole, + }).Debug("issued new TLS certificate") + default: + log.G(ctx).WithFields(logrus.Fields{ + "node.id": cn, + "node.role": proposedRole, + }).WithError(err).Errorf("failed to issue and save new certificate") + return nil, nil, err + } + + secConfig, cleanup, err := NewSecurityConfig(&rootCA, krw, tlsKeyPair, issuerInfo) + if err == nil { + log.G(ctx).WithFields(logrus.Fields{ + "node.id": secConfig.ClientTLSCreds.NodeID(), + "node.role": secConfig.ClientTLSCreds.Role(), + }).Debugf("new node credentials generated: %s", krw.Target()) + } + return secConfig, cleanup, err +} + +// TODO(cyli): currently we have to only update if it's a worker role - if we have a single root CA update path for +// both managers and workers, we won't need to check any more. +func updateRootThenUpdateCert(ctx context.Context, s *SecurityConfig, connBroker *connectionbroker.Broker, rootPaths CertPaths, failedCert *x509.Certificate) (*tls.Certificate, *IssuerInfo, error) { + if len(failedCert.Subject.OrganizationalUnit) == 0 || failedCert.Subject.OrganizationalUnit[0] != WorkerRole { + return nil, nil, errors.New("cannot update root CA since this is not a worker") + } + // try downloading a new root CA if it's an unknown authority issue, in case there was a root rotation completion + // and we just didn't get the new root + rootCA, err := GetRemoteCA(ctx, "", connBroker) + if err != nil { + return nil, nil, err + } + // validate against the existing security config creds + if err := s.UpdateRootCA(&rootCA); err != nil { + return nil, nil, err + } + if err := SaveRootCA(rootCA, rootPaths); err != nil { + return nil, nil, err + } + return rootCA.RequestAndSaveNewCertificates(ctx, s.KeyWriter(), + CertificateRequestConfig{ + ConnBroker: connBroker, + Credentials: s.ClientTLSCreds, + }) +} + +// RenewTLSConfigNow gets a new TLS cert and key, and updates the security config if provided. This is similar to +// RenewTLSConfig, except while that monitors for expiry, and periodically renews, this renews once and is blocking +func RenewTLSConfigNow(ctx context.Context, s *SecurityConfig, connBroker *connectionbroker.Broker, rootPaths CertPaths) error { + s.renewalMu.Lock() + defer s.renewalMu.Unlock() + + ctx = log.WithModule(ctx, "tls") + log := log.G(ctx).WithFields(logrus.Fields{ + "node.id": s.ClientTLSCreds.NodeID(), + "node.role": s.ClientTLSCreds.Role(), + }) + + // Let's request new certs. Renewals don't require a token. + rootCA := s.RootCA() + tlsKeyPair, issuerInfo, err := rootCA.RequestAndSaveNewCertificates(ctx, + s.KeyWriter(), + CertificateRequestConfig{ + ConnBroker: connBroker, + Credentials: s.ClientTLSCreds, + }) + if wrappedError, ok := err.(x509UnknownAuthError); ok { + var newErr error + tlsKeyPair, issuerInfo, newErr = updateRootThenUpdateCert(ctx, s, connBroker, rootPaths, wrappedError.failedLeafCert) + if newErr != nil { + err = wrappedError.error + } else { + err = nil + } + } + if err != nil { + log.WithError(err).Errorf("failed to renew the certificate") + return err + } + + return s.UpdateTLSCredentials(tlsKeyPair, issuerInfo) +} + +// calculateRandomExpiry returns a random duration between 50% and 80% of the +// original validity period +func calculateRandomExpiry(validFrom, validUntil time.Time) time.Duration { + duration := validUntil.Sub(validFrom) + + var randomExpiry int + // Our lower bound of renewal will be half of the total expiration time + minValidity := int(duration.Minutes() * CertLowerRotationRange) + // Our upper bound of renewal will be 80% of the total expiration time + maxValidity := int(duration.Minutes() * CertUpperRotationRange) + // Let's select a random number of minutes between min and max, and set our retry for that + // Using randomly selected rotation allows us to avoid certificate thundering herds. + if maxValidity-minValidity < 1 { + randomExpiry = minValidity + } else { + randomExpiry = rand.Intn(maxValidity-minValidity) + minValidity + } + + expiry := time.Until(validFrom.Add(time.Duration(randomExpiry) * time.Minute)) + if expiry < 0 { + return 0 + } + return expiry +} + +// NewServerTLSConfig returns a tls.Config configured for a TLS Server, given a tls.Certificate +// and the PEM-encoded root CA Certificate +func NewServerTLSConfig(certs []tls.Certificate, rootCAPool *x509.CertPool) (*tls.Config, error) { + if rootCAPool == nil { + return nil, errors.New("valid root CA pool required") + } + + return &tls.Config{ + Certificates: certs, + // Since we're using the same CA server to issue Certificates to new nodes, we can't + // use tls.RequireAndVerifyClientCert + ClientAuth: tls.VerifyClientCertIfGiven, + RootCAs: rootCAPool, + ClientCAs: rootCAPool, + PreferServerCipherSuites: true, + MinVersion: tls.VersionTLS12, + }, nil +} + +// NewClientTLSConfig returns a tls.Config configured for a TLS Client, given a tls.Certificate +// the PEM-encoded root CA Certificate, and the name of the remote server the client wants to connect to. +func NewClientTLSConfig(certs []tls.Certificate, rootCAPool *x509.CertPool, serverName string) (*tls.Config, error) { + if rootCAPool == nil { + return nil, errors.New("valid root CA pool required") + } + + return &tls.Config{ + ServerName: serverName, + Certificates: certs, + RootCAs: rootCAPool, + MinVersion: tls.VersionTLS12, + }, nil +} + +// NewClientTLSCredentials returns GRPC credentials for a TLS GRPC client, given a tls.Certificate +// a PEM-Encoded root CA Certificate, and the name of the remote server the client wants to connect to. +func (rootCA *RootCA) NewClientTLSCredentials(cert *tls.Certificate, serverName string) (*MutableTLSCreds, error) { + tlsConfig, err := NewClientTLSConfig([]tls.Certificate{*cert}, rootCA.Pool, serverName) + if err != nil { + return nil, err + } + + mtls, err := NewMutableTLS(tlsConfig) + + return mtls, err +} + +// NewServerTLSCredentials returns GRPC credentials for a TLS GRPC client, given a tls.Certificate +// a PEM-Encoded root CA Certificate, and the name of the remote server the client wants to connect to. +func (rootCA *RootCA) NewServerTLSCredentials(cert *tls.Certificate) (*MutableTLSCreds, error) { + tlsConfig, err := NewServerTLSConfig([]tls.Certificate{*cert}, rootCA.Pool) + if err != nil { + return nil, err + } + + mtls, err := NewMutableTLS(tlsConfig) + + return mtls, err +} + +// ParseRole parses an apiRole into an internal role string +func ParseRole(apiRole api.NodeRole) (string, error) { + switch apiRole { + case api.NodeRoleManager: + return ManagerRole, nil + case api.NodeRoleWorker: + return WorkerRole, nil + default: + return "", errors.Errorf("failed to parse api role: %v", apiRole) + } +} + +// FormatRole parses an internal role string into an apiRole +func FormatRole(role string) (api.NodeRole, error) { + switch strings.ToLower(role) { + case strings.ToLower(ManagerRole): + return api.NodeRoleManager, nil + case strings.ToLower(WorkerRole): + return api.NodeRoleWorker, nil + default: + return 0, errors.Errorf("failed to parse role: %s", role) + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/external.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/external.go new file mode 100644 index 00000000..6b812045 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/external.go @@ -0,0 +1,230 @@ +package ca + +import ( + "bytes" + "context" + cryptorand "crypto/rand" + "crypto/tls" + "crypto/x509" + "encoding/hex" + "encoding/json" + "encoding/pem" + "io" + "io/ioutil" + "net/http" + "sync" + "time" + + "github.com/cloudflare/cfssl/api" + "github.com/cloudflare/cfssl/config" + "github.com/cloudflare/cfssl/csr" + "github.com/cloudflare/cfssl/signer" + "github.com/docker/swarmkit/log" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/net/context/ctxhttp" +) + +const ( + // ExternalCrossSignProfile is the profile that we will be sending cross-signing CSR sign requests with + ExternalCrossSignProfile = "CA" + + // CertificateMaxSize is the maximum expected size of a certificate. + // While there is no specced upper limit to the size of an x509 certificate in PEM format, + // one with a ridiculous RSA key size (16384) and 26 256-character DNS SAN fields is about 14k. + // While there is no upper limit on the length of certificate chains, long chains are impractical. + // To be conservative, and to also account for external CA certificate responses in JSON format + // from CFSSL, we'll set the max to be 256KiB. + CertificateMaxSize int64 = 256 << 10 +) + +// ErrNoExternalCAURLs is an error used it indicate that an ExternalCA is +// configured with no URLs to which it can proxy certificate signing requests. +var ErrNoExternalCAURLs = errors.New("no external CA URLs") + +// ExternalCA is able to make certificate signing requests to one of a list +// remote CFSSL API endpoints. +type ExternalCA struct { + ExternalRequestTimeout time.Duration + + mu sync.Mutex + intermediates []byte + urls []string + client *http.Client +} + +// NewExternalCATLSConfig takes a TLS certificate and root pool and returns a TLS config that can be updated +// without killing existing connections +func NewExternalCATLSConfig(certs []tls.Certificate, rootPool *x509.CertPool) *tls.Config { + return &tls.Config{ + Certificates: certs, + RootCAs: rootPool, + MinVersion: tls.VersionTLS12, + } +} + +// NewExternalCA creates a new ExternalCA which uses the given tlsConfig to +// authenticate to any of the given URLS of CFSSL API endpoints. +func NewExternalCA(intermediates []byte, tlsConfig *tls.Config, urls ...string) *ExternalCA { + return &ExternalCA{ + ExternalRequestTimeout: 5 * time.Second, + intermediates: intermediates, + urls: urls, + client: &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + }, + } +} + +// UpdateTLSConfig updates the HTTP Client for this ExternalCA by creating +// a new client which uses the given tlsConfig. +func (eca *ExternalCA) UpdateTLSConfig(tlsConfig *tls.Config) { + eca.mu.Lock() + defer eca.mu.Unlock() + + eca.client = &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsConfig, + }, + } +} + +// UpdateURLs updates the list of CSR API endpoints by setting it to the given urls. +func (eca *ExternalCA) UpdateURLs(urls ...string) { + eca.mu.Lock() + defer eca.mu.Unlock() + + eca.urls = urls +} + +// Sign signs a new certificate by proxying the given certificate signing +// request to an external CFSSL API server. +func (eca *ExternalCA) Sign(ctx context.Context, req signer.SignRequest) (cert []byte, err error) { + // Get the current HTTP client and list of URLs in a small critical + // section. We will use these to make certificate signing requests. + eca.mu.Lock() + urls := eca.urls + client := eca.client + intermediates := eca.intermediates + eca.mu.Unlock() + + if len(urls) == 0 { + return nil, ErrNoExternalCAURLs + } + + csrJSON, err := json.Marshal(req) + if err != nil { + return nil, errors.Wrap(err, "unable to JSON-encode CFSSL signing request") + } + + // Try each configured proxy URL. Return after the first success. If + // all fail then the last error will be returned. + for _, url := range urls { + requestCtx, cancel := context.WithTimeout(ctx, eca.ExternalRequestTimeout) + cert, err = makeExternalSignRequest(requestCtx, client, url, csrJSON) + cancel() + if err == nil { + return append(cert, intermediates...), err + } + log.G(ctx).Debugf("unable to proxy certificate signing request to %s: %s", url, err) + } + + return nil, err +} + +// CrossSignRootCA takes a RootCA object, generates a CA CSR, sends a signing request with the CA CSR to the external +// CFSSL API server in order to obtain a cross-signed root +func (eca *ExternalCA) CrossSignRootCA(ctx context.Context, rca RootCA) ([]byte, error) { + // ExtractCertificateRequest generates a new key request, and we want to continue to use the old + // key. However, ExtractCertificateRequest will also convert the pkix.Name to csr.Name, which we + // need in order to generate a signing request + rcaSigner, err := rca.Signer() + if err != nil { + return nil, err + } + rootCert := rcaSigner.parsedCert + cfCSRObj := csr.ExtractCertificateRequest(rootCert) + + der, err := x509.CreateCertificateRequest(cryptorand.Reader, &x509.CertificateRequest{ + RawSubjectPublicKeyInfo: rootCert.RawSubjectPublicKeyInfo, + RawSubject: rootCert.RawSubject, + PublicKeyAlgorithm: rootCert.PublicKeyAlgorithm, + Subject: rootCert.Subject, + Extensions: rootCert.Extensions, + DNSNames: rootCert.DNSNames, + EmailAddresses: rootCert.EmailAddresses, + IPAddresses: rootCert.IPAddresses, + }, rcaSigner.cryptoSigner) + if err != nil { + return nil, err + } + req := signer.SignRequest{ + Request: string(pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE REQUEST", + Bytes: der, + })), + Subject: &signer.Subject{ + CN: rootCert.Subject.CommonName, + Names: cfCSRObj.Names, + }, + Profile: ExternalCrossSignProfile, + } + // cfssl actually ignores non subject alt name extensions in the CSR, so we have to add the CA extension in the signing + // request as well + for _, ext := range rootCert.Extensions { + if ext.Id.Equal(BasicConstraintsOID) { + req.Extensions = append(req.Extensions, signer.Extension{ + ID: config.OID(ext.Id), + Critical: ext.Critical, + Value: hex.EncodeToString(ext.Value), + }) + } + } + return eca.Sign(ctx, req) +} + +func makeExternalSignRequest(ctx context.Context, client *http.Client, url string, csrJSON []byte) (cert []byte, err error) { + resp, err := ctxhttp.Post(ctx, client, url, "application/json", bytes.NewReader(csrJSON)) + if err != nil { + return nil, recoverableErr{err: errors.Wrap(err, "unable to perform certificate signing request")} + } + defer resp.Body.Close() + + b := io.LimitReader(resp.Body, CertificateMaxSize) + body, err := ioutil.ReadAll(b) + if err != nil { + return nil, recoverableErr{err: errors.Wrap(err, "unable to read CSR response body")} + } + + if resp.StatusCode != http.StatusOK { + return nil, recoverableErr{err: errors.Errorf("unexpected status code in CSR response: %d - %s", resp.StatusCode, string(body))} + } + + var apiResponse api.Response + if err := json.Unmarshal(body, &apiResponse); err != nil { + logrus.Debugf("unable to JSON-parse CFSSL API response body: %s", string(body)) + return nil, recoverableErr{err: errors.Wrap(err, "unable to parse JSON response")} + } + + if !apiResponse.Success || apiResponse.Result == nil { + if len(apiResponse.Errors) > 0 { + return nil, errors.Errorf("response errors: %v", apiResponse.Errors) + } + + return nil, errors.New("certificate signing request failed") + } + + result, ok := apiResponse.Result.(map[string]interface{}) + if !ok { + return nil, errors.Errorf("invalid result type: %T", apiResponse.Result) + } + + certPEM, ok := result["certificate"].(string) + if !ok { + return nil, errors.Errorf("invalid result certificate field type: %T", result["certificate"]) + } + + return []byte(certPEM), nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/forward.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/forward.go new file mode 100644 index 00000000..7ad7c7dd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/forward.go @@ -0,0 +1,78 @@ +package ca + +import ( + "context" + + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" +) + +const ( + certForwardedKey = "forwarded_cert" + certCNKey = "forwarded_cert_cn" + certOUKey = "forwarded_cert_ou" + certOrgKey = "forwarded_cert_org" + remoteAddrKey = "remote_addr" +) + +// forwardedTLSInfoFromContext obtains forwarded TLS CN/OU from the grpc.MD +// object in ctx. +func forwardedTLSInfoFromContext(ctx context.Context) (remoteAddr string, cn string, org string, ous []string) { + md, _ := metadata.FromIncomingContext(ctx) + if len(md[remoteAddrKey]) != 0 { + remoteAddr = md[remoteAddrKey][0] + } + if len(md[certCNKey]) != 0 { + cn = md[certCNKey][0] + } + if len(md[certOrgKey]) != 0 { + org = md[certOrgKey][0] + } + ous = md[certOUKey] + return +} + +func isForwardedRequest(ctx context.Context) bool { + md, _ := metadata.FromIncomingContext(ctx) + if len(md[certForwardedKey]) != 1 { + return false + } + return md[certForwardedKey][0] == "true" +} + +// WithMetadataForwardTLSInfo reads certificate from context and returns context where +// ForwardCert is set based on original certificate. +func WithMetadataForwardTLSInfo(ctx context.Context) (context.Context, error) { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + md = metadata.MD{} + } + + ous := []string{} + org := "" + cn := "" + + certSubj, err := certSubjectFromContext(ctx) + if err == nil { + cn = certSubj.CommonName + ous = certSubj.OrganizationalUnit + if len(certSubj.Organization) > 0 { + org = certSubj.Organization[0] + } + } + + // If there's no TLS cert, forward with blank TLS metadata. + // Note that the presence of this blank metadata is extremely + // important. Without it, it would look like manager is making + // the request directly. + md[certForwardedKey] = []string{"true"} + md[certCNKey] = []string{cn} + md[certOrgKey] = []string{org} + md[certOUKey] = ous + peer, ok := peer.FromContext(ctx) + if ok { + md[remoteAddrKey] = []string{peer.Addr.String()} + } + + return metadata.NewOutgoingContext(ctx, md), nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/keyreadwriter.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/keyreadwriter.go new file mode 100644 index 00000000..09114409 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/keyreadwriter.go @@ -0,0 +1,493 @@ +package ca + +import ( + "crypto/x509" + "encoding/pem" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + + "crypto/tls" + + "github.com/docker/swarmkit/ca/keyutils" + "github.com/docker/swarmkit/ca/pkcs8" + "github.com/docker/swarmkit/ioutils" + "github.com/pkg/errors" +) + +const ( + // keyPerms are the permissions used to write the TLS keys + keyPerms = 0600 + // certPerms are the permissions used to write TLS certificates + certPerms = 0644 + // versionHeader is the TLS PEM key header that contains the KEK version + versionHeader = "kek-version" +) + +// PEMKeyHeaders is an interface for something that needs to know about PEM headers +// when reading or writing TLS keys in order to keep them updated with the latest +// KEK. +type PEMKeyHeaders interface { + + // UnmarshalHeaders loads the headers map given the current KEK + UnmarshalHeaders(map[string]string, KEKData) (PEMKeyHeaders, error) + + // MarshalHeaders returns a header map given the current KEK + MarshalHeaders(KEKData) (map[string]string, error) + + // UpdateKEK gets called whenever KeyReadWriter gets a KEK update. This allows the + // PEMKeyHeaders to optionally update any internal state. It should return an + // updated (if needed) versino of itself. + UpdateKEK(KEKData, KEKData) PEMKeyHeaders +} + +// KeyReader reads a TLS cert and key from disk +type KeyReader interface { + + // Read reads and returns the certificate and the key PEM bytes that are on disk + Read() ([]byte, []byte, error) + + // Target returns a string representation of where the cert data is being read from + Target() string +} + +// KeyWriter writes a TLS key and cert to disk +type KeyWriter interface { + + // Write accepts a certificate and key in PEM format, as well as an optional KEKData object. + // If there is a current KEK, the key is encrypted using the current KEK. If the KEKData object + // is provided (not nil), the key will be encrypted using the new KEK instead, and the current + // KEK in memory will be replaced by the provided KEKData. The reason to allow changing key + // material and KEK in a single step, as opposed to two steps, is to prevent the key material + // from being written unencrypted or with an old KEK in the first place (when a node gets a + // certificate from the CA, it will also request the current KEK so it won't have to immediately + // do a KEK rotation after getting the key). + Write([]byte, []byte, *KEKData) error + + // ViewAndUpdateHeaders is a function that reads and updates the headers of the key in a single + // transaction (e.g. within a lock). It accepts a callback function which will be passed the + // current header management object, and which must return a new, updated, or same header + // management object. KeyReadWriter then performs the following actions: + // - uses the old header management object and the current KEK to deserialize/decrypt + // the existing PEM headers + // - uses the new header management object and the current KEK to to reserialize/encrypt + // the PEM headers + // - writes the new PEM headers, as well as the key material, unchanged, to disk + ViewAndUpdateHeaders(func(PEMKeyHeaders) (PEMKeyHeaders, error)) error + + // ViewAndRotateKEK is a function that just re-encrypts the TLS key and headers in a single + // transaction (e.g. within a lock). It accepts a callback unction which will be passed the + // current KEK and the current headers management object, and which should return a new + // KEK and header management object. KeyReadWriter then performs the following actions: + // - uses the old KEK and header management object to deserialize/decrypt the + // TLS key and PEM headers + // - uses the new KEK and header management object to serialize/encrypt the TLS key + // and PEM headers + // - writes the new PEM headers and newly encrypted TLS key to disk + ViewAndRotateKEK(func(KEKData, PEMKeyHeaders) (KEKData, PEMKeyHeaders, error)) error + + // GetCurrentState returns the current header management object and the current KEK. + GetCurrentState() (PEMKeyHeaders, KEKData) + + // Target returns a string representation of where the cert data is being read from + Target() string +} + +// KEKData provides an optional update to the kek when writing. The structure +// is needed so that we can tell the difference between "do not encrypt anymore" +// and there is "no update". +type KEKData struct { + KEK []byte + Version uint64 +} + +// ErrInvalidKEK means that we cannot decrypt the TLS key for some reason +type ErrInvalidKEK struct { + Wrapped error +} + +func (e ErrInvalidKEK) Error() string { + return e.Wrapped.Error() +} + +// KeyReadWriter is an object that knows how to read and write TLS keys and certs to disk, +// optionally encrypted and optionally updating PEM headers. It should be the only object which +// can write the TLS key, to ensure that writes are serialized and that the TLS key, the +// KEK (key encrypting key), and any headers which need to be written are never out of sync. +// It accepts a PEMKeyHeaders object, which is used to serialize/encrypt and deserialize/decrypt +// the PEM headers when given the current headers and the current KEK. +type KeyReadWriter struct { + + // This lock is held whenever a key is read from or written to disk, or whenever the internal + // state of the KeyReadWriter (such as the KEK, the key formatter, or the PEM header management + // object changes.) + mu sync.Mutex + + kekData KEKData + paths CertPaths + headersObj PEMKeyHeaders + keyFormatter keyutils.Formatter +} + +// NewKeyReadWriter creates a new KeyReadWriter +func NewKeyReadWriter(paths CertPaths, kek []byte, headersObj PEMKeyHeaders) *KeyReadWriter { + return &KeyReadWriter{ + kekData: KEKData{KEK: kek}, + paths: paths, + headersObj: headersObj, + keyFormatter: keyutils.Default, + } +} + +// SetKeyFormatter sets the keyformatter with which to encrypt and decrypt keys +func (k *KeyReadWriter) SetKeyFormatter(kf keyutils.Formatter) { + k.mu.Lock() + defer k.mu.Unlock() + k.keyFormatter = kf +} + +// Migrate checks to see if a temporary key file exists. Older versions of +// swarmkit wrote temporary keys instead of temporary certificates, so +// migrate that temporary key if it exists. We want to write temporary certificates, +// instead of temporary keys, because we may need to periodically re-encrypt the +// keys and modify the headers, and it's easier to have a single canonical key +// location than two possible key locations. +func (k *KeyReadWriter) Migrate() error { + tmpPaths := k.genTempPaths() + keyBytes, err := ioutil.ReadFile(tmpPaths.Key) + if err != nil { + return nil // no key? no migration + } + + // it does exist - no need to decrypt, because previous versions of swarmkit + // which supported this temporary key did not support encrypting TLS keys + cert, err := ioutil.ReadFile(k.paths.Cert) + if err != nil { + return os.RemoveAll(tmpPaths.Key) // no cert? no migration + } + + // nope, this does not match the cert + if _, err = tls.X509KeyPair(cert, keyBytes); err != nil { + return os.RemoveAll(tmpPaths.Key) + } + + return os.Rename(tmpPaths.Key, k.paths.Key) +} + +// Read will read a TLS cert and key from the given paths +func (k *KeyReadWriter) Read() ([]byte, []byte, error) { + k.mu.Lock() + defer k.mu.Unlock() + keyBlock, err := k.readKey() + if err != nil { + return nil, nil, err + } + + if version, ok := keyBlock.Headers[versionHeader]; ok { + if versionInt, err := strconv.ParseUint(version, 10, 64); err == nil { + k.kekData.Version = versionInt + } + } + delete(keyBlock.Headers, versionHeader) + + if k.headersObj != nil { + newHeaders, err := k.headersObj.UnmarshalHeaders(keyBlock.Headers, k.kekData) + if err != nil { + return nil, nil, errors.Wrap(err, "unable to read TLS key headers") + } + k.headersObj = newHeaders + } + + keyBytes := pem.EncodeToMemory(keyBlock) + cert, err := ioutil.ReadFile(k.paths.Cert) + // The cert is written to a temporary file first, then the key, and then + // the cert gets renamed - so, if interrupted, it's possible to end up with + // a cert that only exists in the temporary location. + switch { + case err == nil: + _, err = tls.X509KeyPair(cert, keyBytes) + case os.IsNotExist(err): //continue to try temp location + break + default: + return nil, nil, err + } + + // either the cert doesn't exist, or it doesn't match the key - try the temp file, if it exists + if err != nil { + var tempErr error + tmpPaths := k.genTempPaths() + cert, tempErr = ioutil.ReadFile(tmpPaths.Cert) + if tempErr != nil { + return nil, nil, err // return the original error + } + if _, tempErr := tls.X509KeyPair(cert, keyBytes); tempErr != nil { + os.RemoveAll(tmpPaths.Cert) // nope, it doesn't match either - remove and return the original error + return nil, nil, err + } + os.Rename(tmpPaths.Cert, k.paths.Cert) // try to move the temp cert back to the regular location + + } + + return cert, keyBytes, nil +} + +// ViewAndRotateKEK re-encrypts the key with a new KEK +func (k *KeyReadWriter) ViewAndRotateKEK(cb func(KEKData, PEMKeyHeaders) (KEKData, PEMKeyHeaders, error)) error { + k.mu.Lock() + defer k.mu.Unlock() + + updatedKEK, updatedHeaderObj, err := cb(k.kekData, k.headersObj) + if err != nil { + return err + } + + keyBlock, err := k.readKey() + if err != nil { + return err + } + + return k.writeKey(keyBlock, updatedKEK, updatedHeaderObj) +} + +// ViewAndUpdateHeaders updates the header manager, and updates any headers on the existing key +func (k *KeyReadWriter) ViewAndUpdateHeaders(cb func(PEMKeyHeaders) (PEMKeyHeaders, error)) error { + k.mu.Lock() + defer k.mu.Unlock() + + pkh, err := cb(k.headersObj) + if err != nil { + return err + } + + keyBlock, err := k.readKeyblock() + if err != nil { + return err + } + + headers := make(map[string]string) + if pkh != nil { + var err error + headers, err = pkh.MarshalHeaders(k.kekData) + if err != nil { + return err + } + } + // we WANT any original encryption headers + for key, value := range keyBlock.Headers { + normalizedKey := strings.TrimSpace(strings.ToLower(key)) + if normalizedKey == "proc-type" || normalizedKey == "dek-info" { + headers[key] = value + } + } + headers[versionHeader] = strconv.FormatUint(k.kekData.Version, 10) + keyBlock.Headers = headers + + if err = ioutils.AtomicWriteFile(k.paths.Key, pem.EncodeToMemory(keyBlock), keyPerms); err != nil { + return err + } + k.headersObj = pkh + return nil +} + +// GetCurrentState returns the current KEK data, including version +func (k *KeyReadWriter) GetCurrentState() (PEMKeyHeaders, KEKData) { + k.mu.Lock() + defer k.mu.Unlock() + return k.headersObj, k.kekData +} + +// Write attempts write a cert and key to text. This can also optionally update +// the KEK while writing, if an updated KEK is provided. If the pointer to the +// update KEK is nil, then we don't update. If the updated KEK itself is nil, +// then we update the KEK to be nil (data should be unencrypted). +func (k *KeyReadWriter) Write(certBytes, plaintextKeyBytes []byte, kekData *KEKData) error { + k.mu.Lock() + defer k.mu.Unlock() + + // current assumption is that the cert and key will be in the same directory + if err := os.MkdirAll(filepath.Dir(k.paths.Key), 0755); err != nil { + return err + } + + // Ensure that we will have a keypair on disk at all times by writing the cert to a + // temp path first. This is because we want to have only a single copy of the key + // for rotation and header modification. + tmpPaths := k.genTempPaths() + if err := ioutils.AtomicWriteFile(tmpPaths.Cert, certBytes, certPerms); err != nil { + return err + } + + keyBlock, _ := pem.Decode(plaintextKeyBytes) + if keyBlock == nil { + return errors.New("invalid PEM-encoded private key") + } + + if kekData == nil { + kekData = &k.kekData + } + pkh := k.headersObj + if k.headersObj != nil { + pkh = k.headersObj.UpdateKEK(k.kekData, *kekData) + } + + if err := k.writeKey(keyBlock, *kekData, pkh); err != nil { + return err + } + return os.Rename(tmpPaths.Cert, k.paths.Cert) +} + +func (k *KeyReadWriter) genTempPaths() CertPaths { + return CertPaths{ + Key: filepath.Join(filepath.Dir(k.paths.Key), "."+filepath.Base(k.paths.Key)), + Cert: filepath.Join(filepath.Dir(k.paths.Cert), "."+filepath.Base(k.paths.Cert)), + } +} + +// Target returns a string representation of this KeyReadWriter, namely where +// it is writing to +func (k *KeyReadWriter) Target() string { + return k.paths.Cert +} + +func (k *KeyReadWriter) readKeyblock() (*pem.Block, error) { + key, err := ioutil.ReadFile(k.paths.Key) + if err != nil { + return nil, err + } + + // Decode the PEM private key + keyBlock, _ := pem.Decode(key) + if keyBlock == nil { + return nil, errors.New("invalid PEM-encoded private key") + } + + return keyBlock, nil +} + +// readKey returns the decrypted key pem bytes, and enforces the KEK if applicable +// (writes it back with the correct encryption if it is not correctly encrypted) +func (k *KeyReadWriter) readKey() (*pem.Block, error) { + keyBlock, err := k.readKeyblock() + if err != nil { + return nil, err + } + + if !keyutils.IsEncryptedPEMBlock(keyBlock) { + return keyBlock, nil + } + + // If it's encrypted, we can't read without a passphrase (we're assuming + // empty passphrases are invalid) + if k.kekData.KEK == nil { + return nil, ErrInvalidKEK{Wrapped: x509.IncorrectPasswordError} + } + + derBytes, err := k.keyFormatter.DecryptPEMBlock(keyBlock, k.kekData.KEK) + if err == keyutils.ErrFIPSUnsupportedKeyFormat { + return nil, err + } else if err != nil { + return nil, ErrInvalidKEK{Wrapped: err} + } + + // change header only if its pkcs8 + if keyBlock.Type == "ENCRYPTED PRIVATE KEY" { + keyBlock.Type = "PRIVATE KEY" + } + + // remove encryption PEM headers + headers := make(map[string]string) + mergePEMHeaders(headers, keyBlock.Headers) + + return &pem.Block{ + Type: keyBlock.Type, // the key type doesn't change + Bytes: derBytes, + Headers: headers, + }, nil +} + +// writeKey takes an unencrypted keyblock and, if the kek is not nil, encrypts it before +// writing it to disk. If the kek is nil, writes it to disk unencrypted. +func (k *KeyReadWriter) writeKey(keyBlock *pem.Block, kekData KEKData, pkh PEMKeyHeaders) error { + if kekData.KEK != nil { + encryptedPEMBlock, err := k.keyFormatter.EncryptPEMBlock(keyBlock.Bytes, kekData.KEK) + if err != nil { + return err + } + if !keyutils.IsEncryptedPEMBlock(encryptedPEMBlock) { + return errors.New("unable to encrypt key - invalid PEM file produced") + } + keyBlock = encryptedPEMBlock + } + + if pkh != nil { + headers, err := pkh.MarshalHeaders(kekData) + if err != nil { + return err + } + mergePEMHeaders(keyBlock.Headers, headers) + } + keyBlock.Headers[versionHeader] = strconv.FormatUint(kekData.Version, 10) + + if err := ioutils.AtomicWriteFile(k.paths.Key, pem.EncodeToMemory(keyBlock), keyPerms); err != nil { + return err + } + k.kekData = kekData + k.headersObj = pkh + return nil +} + +// DowngradeKey converts the PKCS#8 key to PKCS#1 format and save it +func (k *KeyReadWriter) DowngradeKey() error { + _, key, err := k.Read() + if err != nil { + return err + } + + oldBlock, _ := pem.Decode(key) + if oldBlock == nil { + return errors.New("invalid PEM-encoded private key") + } + + // stop if the key is already downgraded to pkcs1 + if !keyutils.IsPKCS8(oldBlock.Bytes) { + return errors.New("key is already downgraded to PKCS#1") + } + + eckey, err := pkcs8.ConvertToECPrivateKeyPEM(key) + if err != nil { + return err + } + + newBlock, _ := pem.Decode(eckey) + if newBlock == nil { + return errors.New("invalid PEM-encoded private key") + } + + if k.kekData.KEK != nil { + newBlock, err = k.keyFormatter.EncryptPEMBlock(newBlock.Bytes, k.kekData.KEK) + if err != nil { + return err + } + } + + // add kek-version header back to the new key + newBlock.Headers[versionHeader] = strconv.FormatUint(k.kekData.Version, 10) + mergePEMHeaders(newBlock.Headers, oldBlock.Headers) + + // do not use krw.Write as it will convert the key to pkcs8 + return ioutils.AtomicWriteFile(k.paths.Key, pem.EncodeToMemory(newBlock), keyPerms) +} + +// merges one set of PEM headers onto another, excepting for key encryption value +// "proc-type" and "dek-info" +func mergePEMHeaders(original, newSet map[string]string) { + for key, value := range newSet { + normalizedKey := strings.TrimSpace(strings.ToLower(key)) + if normalizedKey != "proc-type" && normalizedKey != "dek-info" { + original[key] = value + } + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/keyutils/keyutils.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/keyutils/keyutils.go new file mode 100644 index 00000000..ea45aab7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/keyutils/keyutils.go @@ -0,0 +1,101 @@ +// Package keyutils serves as a utility to parse, encrypt and decrypt +// PKCS#1 and PKCS#8 private keys based on current FIPS mode status, +// supporting only EC type keys. It always allows PKCS#8 private keys +// and disallow PKCS#1 private keys in FIPS-mode. +package keyutils + +import ( + "crypto" + cryptorand "crypto/rand" + "crypto/x509" + "encoding/pem" + "errors" + + "github.com/cloudflare/cfssl/helpers" + "github.com/docker/swarmkit/ca/pkcs8" +) + +// Formatter provides an interface for converting keys to the right format, and encrypting and decrypting keys +type Formatter interface { + ParsePrivateKeyPEMWithPassword(pemBytes, password []byte) (crypto.Signer, error) + DecryptPEMBlock(block *pem.Block, password []byte) ([]byte, error) + EncryptPEMBlock(data, password []byte) (*pem.Block, error) +} + +// ErrFIPSUnsupportedKeyFormat is returned when encryption/decryption operations are attempted on a PKCS1 key +// when FIPS mode is enabled. +var ErrFIPSUnsupportedKeyFormat = errors.New("unsupported key format due to FIPS compliance") + +// Default is the default key util, where FIPS is not required +var Default Formatter = &utils{fips: false} + +// FIPS is the key utility which enforces FIPS compliance +var FIPS Formatter = &utils{fips: true} + +type utils struct { + fips bool +} + +// IsPKCS8 returns true if the provided der bytes is encrypted/unencrypted PKCS#8 key +func IsPKCS8(derBytes []byte) bool { + if _, err := x509.ParsePKCS8PrivateKey(derBytes); err == nil { + return true + } + + return pkcs8.IsEncryptedPEMBlock(&pem.Block{ + Type: "PRIVATE KEY", + Headers: nil, + Bytes: derBytes, + }) +} + +// IsEncryptedPEMBlock checks if a PKCS#1 or PKCS#8 PEM-block is encrypted or not +func IsEncryptedPEMBlock(block *pem.Block) bool { + return pkcs8.IsEncryptedPEMBlock(block) || x509.IsEncryptedPEMBlock(block) +} + +// ParsePrivateKeyPEMWithPassword parses an encrypted or a decrypted PKCS#1 or PKCS#8 PEM to crypto.Signer. +// It returns an error in FIPS mode if PKCS#1 PEM bytes are passed. +func (u *utils) ParsePrivateKeyPEMWithPassword(pemBytes, password []byte) (crypto.Signer, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.New("Could not parse PEM") + } + + if IsPKCS8(block.Bytes) { + return pkcs8.ParsePrivateKeyPEMWithPassword(pemBytes, password) + } else if u.fips { + return nil, ErrFIPSUnsupportedKeyFormat + } + + return helpers.ParsePrivateKeyPEMWithPassword(pemBytes, password) +} + +// DecryptPEMBlock requires PKCS#1 or PKCS#8 PEM Block and password to decrypt and return unencrypted der []byte +// It returns an error in FIPS mode when PKCS#1 PEM Block is passed. +func (u *utils) DecryptPEMBlock(block *pem.Block, password []byte) ([]byte, error) { + if IsPKCS8(block.Bytes) { + return pkcs8.DecryptPEMBlock(block, password) + } else if u.fips { + return nil, ErrFIPSUnsupportedKeyFormat + } + + return x509.DecryptPEMBlock(block, password) +} + +// EncryptPEMBlock takes DER-format bytes and password to return an encrypted PKCS#1 or PKCS#8 PEM-block +// It returns an error in FIPS mode when PKCS#1 PEM bytes are passed. +func (u *utils) EncryptPEMBlock(data, password []byte) (*pem.Block, error) { + if IsPKCS8(data) { + return pkcs8.EncryptPEMBlock(data, password) + } else if u.fips { + return nil, ErrFIPSUnsupportedKeyFormat + } + + cipherType := x509.PEMCipherAES256 + return x509.EncryptPEMBlock(cryptorand.Reader, + "EC PRIVATE KEY", + data, + password, + cipherType) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/pkcs8/pkcs8.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/pkcs8/pkcs8.go new file mode 100644 index 00000000..223fc99d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/pkcs8/pkcs8.go @@ -0,0 +1,311 @@ +// Package pkcs8 implements functions to encrypt, decrypt, parse and to convert +// EC private keys to PKCS#8 format. However this package is hard forked from +// https://github.com/youmark/pkcs8 and modified function signatures to match +// signatures of crypto/x509 and cloudflare/cfssl/helpers to simplify package +// swapping. License for original package is as follow: +// +// The MIT License (MIT) +// +// Copyright (c) 2014 youmark +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +package pkcs8 + +import ( + "bytes" + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/rand" + "crypto/sha1" + "encoding/asn1" + "encoding/pem" + "errors" + + "github.com/cloudflare/cfssl/helpers/derhelpers" + "golang.org/x/crypto/pbkdf2" +) + +// Copy from crypto/x509 +var ( + oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1} +) + +// Unencrypted PKCS#8 +var ( + oidPKCS5PBKDF2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 12} + oidPBES2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 13} + oidAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42} +) + +type ecPrivateKey struct { + Version int + PrivateKey []byte + NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"` + PublicKey asn1.BitString `asn1:"optional,explicit,tag:1"` +} + +type privateKeyInfo struct { + Version int + PrivateKeyAlgorithm []asn1.ObjectIdentifier + PrivateKey []byte +} + +// Encrypted PKCS8 +type pbkdf2Params struct { + Salt []byte + IterationCount int +} + +type pbkdf2Algorithms struct { + IDPBKDF2 asn1.ObjectIdentifier + PBKDF2Params pbkdf2Params +} + +type pbkdf2Encs struct { + EncryAlgo asn1.ObjectIdentifier + IV []byte +} + +type pbes2Params struct { + KeyDerivationFunc pbkdf2Algorithms + EncryptionScheme pbkdf2Encs +} + +type pbes2Algorithms struct { + IDPBES2 asn1.ObjectIdentifier + PBES2Params pbes2Params +} + +type encryptedPrivateKeyInfo struct { + EncryptionAlgorithm pbes2Algorithms + EncryptedData []byte +} + +// ParsePrivateKeyPEMWithPassword parses an encrypted or a decrypted PKCS#8 PEM to crypto.signer +func ParsePrivateKeyPEMWithPassword(pemBytes, password []byte) (crypto.Signer, error) { + block, _ := pem.Decode(pemBytes) + if block == nil { + return nil, errors.New("invalid pem file") + } + + var ( + der []byte + err error + ) + der = block.Bytes + + if ok := IsEncryptedPEMBlock(block); ok { + der, err = DecryptPEMBlock(block, password) + if err != nil { + return nil, err + } + } + + return derhelpers.ParsePrivateKeyDER(der) +} + +// IsEncryptedPEMBlock checks if a PKCS#8 PEM-block is encrypted or not +func IsEncryptedPEMBlock(block *pem.Block) bool { + der := block.Bytes + + var privKey encryptedPrivateKeyInfo + if _, err := asn1.Unmarshal(der, &privKey); err != nil { + return false + } + + return true +} + +// DecryptPEMBlock requires PKCS#8 PEM Block and password to decrypt and return unencrypted der []byte +func DecryptPEMBlock(block *pem.Block, password []byte) ([]byte, error) { + der := block.Bytes + + var privKey encryptedPrivateKeyInfo + if _, err := asn1.Unmarshal(der, &privKey); err != nil { + return nil, errors.New("pkcs8: only PKCS #5 v2.0 supported") + } + + if !privKey.EncryptionAlgorithm.IDPBES2.Equal(oidPBES2) { + return nil, errors.New("pkcs8: only PBES2 supported") + } + + if !privKey.EncryptionAlgorithm.PBES2Params.KeyDerivationFunc.IDPBKDF2.Equal(oidPKCS5PBKDF2) { + return nil, errors.New("pkcs8: only PBKDF2 supported") + } + + encParam := privKey.EncryptionAlgorithm.PBES2Params.EncryptionScheme + kdfParam := privKey.EncryptionAlgorithm.PBES2Params.KeyDerivationFunc.PBKDF2Params + + switch { + case encParam.EncryAlgo.Equal(oidAES256CBC): + iv := encParam.IV + salt := kdfParam.Salt + iter := kdfParam.IterationCount + + encryptedKey := privKey.EncryptedData + symkey := pbkdf2.Key(password, salt, iter, 32, sha1.New) + block, err := aes.NewCipher(symkey) + if err != nil { + return nil, err + } + mode := cipher.NewCBCDecrypter(block, iv) + mode.CryptBlocks(encryptedKey, encryptedKey) + + if _, err := derhelpers.ParsePrivateKeyDER(encryptedKey); err != nil { + return nil, errors.New("pkcs8: incorrect password") + } + + // Remove padding from key as it might be used to encode to memory as pem + keyLen := len(encryptedKey) + padLen := int(encryptedKey[keyLen-1]) + if padLen > keyLen || padLen > aes.BlockSize { + return nil, errors.New("pkcs8: invalid padding size") + } + encryptedKey = encryptedKey[:keyLen-padLen] + + return encryptedKey, nil + default: + return nil, errors.New("pkcs8: only AES-256-CBC supported") + } +} + +func encryptPrivateKey(pkey, password []byte) ([]byte, error) { + // Calculate key from password based on PKCS5 algorithm + // Use 8 byte salt, 16 byte IV, and 2048 iteration + iter := 2048 + salt := make([]byte, 8) + iv := make([]byte, 16) + + if _, err := rand.Reader.Read(salt); err != nil { + return nil, err + } + + if _, err := rand.Reader.Read(iv); err != nil { + return nil, err + } + + key := pbkdf2.Key(password, salt, iter, 32, sha1.New) + + // Use AES256-CBC mode, pad plaintext with PKCS5 padding scheme + n := len(pkey) + padLen := aes.BlockSize - n%aes.BlockSize + if padLen > 0 { + padValue := []byte{byte(padLen)} + padding := bytes.Repeat(padValue, padLen) + pkey = append(pkey, padding...) + } + + encryptedKey := make([]byte, len(pkey)) + block, err := aes.NewCipher(key) + if err != nil { + return nil, err + } + mode := cipher.NewCBCEncrypter(block, iv) + mode.CryptBlocks(encryptedKey, pkey) + + pbkdf2algo := pbkdf2Algorithms{oidPKCS5PBKDF2, pbkdf2Params{salt, iter}} + pbkdf2encs := pbkdf2Encs{oidAES256CBC, iv} + pbes2algo := pbes2Algorithms{oidPBES2, pbes2Params{pbkdf2algo, pbkdf2encs}} + + encryptedPkey := encryptedPrivateKeyInfo{pbes2algo, encryptedKey} + return asn1.Marshal(encryptedPkey) +} + +// EncryptPEMBlock takes DER-format bytes and password to return an encrypted PKCS#8 PEM-block +func EncryptPEMBlock(data, password []byte) (*pem.Block, error) { + encryptedBytes, err := encryptPrivateKey(data, password) + if err != nil { + return nil, err + } + + return &pem.Block{ + Type: "ENCRYPTED PRIVATE KEY", + Headers: map[string]string{}, + Bytes: encryptedBytes, + }, nil +} + +// ConvertECPrivateKeyPEM takes an EC Private Key as input and returns PKCS#8 version of it +func ConvertECPrivateKeyPEM(inPEM []byte) ([]byte, error) { + block, _ := pem.Decode(inPEM) + if block == nil { + return nil, errors.New("invalid pem bytes") + } + + var ecPrivKey ecPrivateKey + if _, err := asn1.Unmarshal(block.Bytes, &ecPrivKey); err != nil { + return nil, errors.New("invalid ec private key") + } + + var pkey privateKeyInfo + pkey.Version = 0 + pkey.PrivateKeyAlgorithm = make([]asn1.ObjectIdentifier, 2) + pkey.PrivateKeyAlgorithm[0] = oidPublicKeyECDSA + pkey.PrivateKeyAlgorithm[1] = ecPrivKey.NamedCurveOID + + // remove curve oid from private bytes as it is already mentioned in algorithm + ecPrivKey.NamedCurveOID = nil + + privatekey, err := asn1.Marshal(ecPrivKey) + if err != nil { + return nil, err + } + pkey.PrivateKey = privatekey + + der, err := asn1.Marshal(pkey) + if err != nil { + return nil, err + } + + return pem.EncodeToMemory(&pem.Block{ + Type: "PRIVATE KEY", + Bytes: der, + }), nil +} + +// ConvertToECPrivateKeyPEM takes an unencrypted PKCS#8 PEM and converts it to +// EC Private Key +func ConvertToECPrivateKeyPEM(inPEM []byte) ([]byte, error) { + block, _ := pem.Decode(inPEM) + if block == nil { + return nil, errors.New("invalid pem bytes") + } + + var pkey privateKeyInfo + if _, err := asn1.Unmarshal(block.Bytes, &pkey); err != nil { + return nil, errors.New("invalid pkcs8 key") + } + + var ecPrivKey ecPrivateKey + if _, err := asn1.Unmarshal(pkey.PrivateKey, &ecPrivKey); err != nil { + return nil, errors.New("invalid private key") + } + + ecPrivKey.NamedCurveOID = pkey.PrivateKeyAlgorithm[1] + key, err := asn1.Marshal(ecPrivKey) + if err != nil { + return nil, err + } + + return pem.EncodeToMemory(&pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: key, + }), nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/reconciler.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/reconciler.go new file mode 100644 index 00000000..d906475d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/reconciler.go @@ -0,0 +1,259 @@ +package ca + +import ( + "bytes" + "context" + "fmt" + "reflect" + "sync" + "time" + + "github.com/cloudflare/cfssl/helpers" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/equality" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/state/store" + "github.com/pkg/errors" +) + +// IssuanceStateRotateMaxBatchSize is the maximum number of nodes we'll tell to rotate their certificates in any given update +const IssuanceStateRotateMaxBatchSize = 30 + +func hasIssuer(n *api.Node, info *IssuerInfo) bool { + if n.Description == nil || n.Description.TLSInfo == nil { + return false + } + return bytes.Equal(info.Subject, n.Description.TLSInfo.CertIssuerSubject) && bytes.Equal(info.PublicKey, n.Description.TLSInfo.CertIssuerPublicKey) +} + +var errRootRotationChanged = errors.New("target root rotation has changed") + +// rootRotationReconciler keeps track of all the nodes in the store so that we can determine which ones need reconciliation when nodes are updated +// or the root CA is updated. This is meant to be used with watches on nodes and the cluster, and provides functions to be called when the +// cluster's RootCA has changed and when a node is added, updated, or removed. +type rootRotationReconciler struct { + mu sync.Mutex + clusterID string + batchUpdateInterval time.Duration + ctx context.Context + store *store.MemoryStore + + currentRootCA *api.RootCA + currentIssuer IssuerInfo + unconvergedNodes map[string]*api.Node + + wg sync.WaitGroup + cancel func() +} + +// IssuerFromAPIRootCA returns the desired issuer given an API root CA object +func IssuerFromAPIRootCA(rootCA *api.RootCA) (*IssuerInfo, error) { + wantedIssuer := rootCA.CACert + if rootCA.RootRotation != nil { + wantedIssuer = rootCA.RootRotation.CACert + } + issuerCerts, err := helpers.ParseCertificatesPEM(wantedIssuer) + if err != nil { + return nil, errors.Wrap(err, "invalid certificate in cluster root CA object") + } + if len(issuerCerts) == 0 { + return nil, errors.New("invalid certificate in cluster root CA object") + } + return &IssuerInfo{ + Subject: issuerCerts[0].RawSubject, + PublicKey: issuerCerts[0].RawSubjectPublicKeyInfo, + }, nil +} + +// assumption: UpdateRootCA will never be called with a `nil` root CA because the caller will be acting in response to +// a store update event +func (r *rootRotationReconciler) UpdateRootCA(newRootCA *api.RootCA) { + issuerInfo, err := IssuerFromAPIRootCA(newRootCA) + if err != nil { + log.G(r.ctx).WithError(err).Error("unable to update process the current root CA") + return + } + + var ( + shouldStartNewLoop, waitForPrevLoop bool + loopCtx context.Context + ) + r.mu.Lock() + defer func() { + r.mu.Unlock() + if shouldStartNewLoop { + if waitForPrevLoop { + r.wg.Wait() + } + r.wg.Add(1) + go r.runReconcilerLoop(loopCtx, newRootCA) + } + }() + + // check if the issuer has changed, first + if reflect.DeepEqual(&r.currentIssuer, issuerInfo) { + r.currentRootCA = newRootCA + return + } + // If the issuer has changed, iterate through all the nodes to figure out which ones need rotation + if newRootCA.RootRotation != nil { + var nodes []*api.Node + r.store.View(func(tx store.ReadTx) { + nodes, err = store.FindNodes(tx, store.All) + }) + if err != nil { + log.G(r.ctx).WithError(err).Error("unable to list nodes, so unable to process the current root CA") + return + } + + // from here on out, there will be no more errors that cause us to have to abandon updating the Root CA, + // so we can start making changes to r's fields + r.unconvergedNodes = make(map[string]*api.Node) + for _, n := range nodes { + if !hasIssuer(n, issuerInfo) { + r.unconvergedNodes[n.ID] = n + } + } + shouldStartNewLoop = true + if r.cancel != nil { // there's already a loop going, so cancel it + r.cancel() + waitForPrevLoop = true + } + loopCtx, r.cancel = context.WithCancel(r.ctx) + } else { + r.unconvergedNodes = nil + } + r.currentRootCA = newRootCA + r.currentIssuer = *issuerInfo +} + +// assumption: UpdateNode will never be called with a `nil` node because the caller will be acting in response to +// a store update event +func (r *rootRotationReconciler) UpdateNode(node *api.Node) { + r.mu.Lock() + defer r.mu.Unlock() + // if we're not in the middle of a root rotation ignore the update + if r.currentRootCA == nil || r.currentRootCA.RootRotation == nil { + return + } + if hasIssuer(node, &r.currentIssuer) { + delete(r.unconvergedNodes, node.ID) + } else { + r.unconvergedNodes[node.ID] = node + } +} + +// assumption: DeleteNode will never be called with a `nil` node because the caller will be acting in response to +// a store update event +func (r *rootRotationReconciler) DeleteNode(node *api.Node) { + r.mu.Lock() + delete(r.unconvergedNodes, node.ID) + r.mu.Unlock() +} + +func (r *rootRotationReconciler) runReconcilerLoop(ctx context.Context, loopRootCA *api.RootCA) { + defer r.wg.Done() + for { + r.mu.Lock() + if len(r.unconvergedNodes) == 0 { + r.mu.Unlock() + + err := r.store.Update(func(tx store.Tx) error { + return r.finishRootRotation(tx, loopRootCA) + }) + if err == nil { + log.G(r.ctx).Info("completed root rotation") + return + } + log.G(r.ctx).WithError(err).Error("could not complete root rotation") + if err == errRootRotationChanged { + // if the root rotation has changed, this loop will be cancelled anyway, so may as well abort early + return + } + } else { + var toUpdate []*api.Node + for _, n := range r.unconvergedNodes { + iState := n.Certificate.Status.State + if iState != api.IssuanceStateRenew && iState != api.IssuanceStatePending && iState != api.IssuanceStateRotate { + n = n.Copy() + n.Certificate.Status.State = api.IssuanceStateRotate + toUpdate = append(toUpdate, n) + if len(toUpdate) >= IssuanceStateRotateMaxBatchSize { + break + } + } + } + r.mu.Unlock() + + if err := r.batchUpdateNodes(toUpdate); err != nil { + log.G(r.ctx).WithError(err).Errorf("store error when trying to batch update %d nodes to request certificate rotation", len(toUpdate)) + } + } + + select { + case <-ctx.Done(): + return + case <-time.After(r.batchUpdateInterval): + } + } +} + +// This function assumes that the expected root CA has root rotation. This is intended to be used by +// `reconcileNodeRootsAndCerts`, which uses the root CA from the `lastSeenClusterRootCA`, and checks +// that it has a root rotation before calling this function. +func (r *rootRotationReconciler) finishRootRotation(tx store.Tx, expectedRootCA *api.RootCA) error { + cluster := store.GetCluster(tx, r.clusterID) + if cluster == nil { + return fmt.Errorf("unable to get cluster %s", r.clusterID) + } + + // If the RootCA object has changed (because another root rotation was started or because some other node + // had finished the root rotation), we cannot finish the root rotation that we were working on. + if !equality.RootCAEqualStable(expectedRootCA, &cluster.RootCA) { + return errRootRotationChanged + } + + var signerCert []byte + if len(cluster.RootCA.RootRotation.CAKey) > 0 { + signerCert = cluster.RootCA.RootRotation.CACert + } + // we don't actually have to parse out the default node expiration from the cluster - we are just using + // the ca.RootCA object to generate new tokens and the digest + updatedRootCA, err := NewRootCA(cluster.RootCA.RootRotation.CACert, signerCert, cluster.RootCA.RootRotation.CAKey, + DefaultNodeCertExpiration, nil) + if err != nil { + return errors.Wrap(err, "invalid cluster root rotation object") + } + cluster.RootCA = api.RootCA{ + CACert: cluster.RootCA.RootRotation.CACert, + CAKey: cluster.RootCA.RootRotation.CAKey, + CACertHash: updatedRootCA.Digest.String(), + JoinTokens: api.JoinTokens{ + Worker: GenerateJoinToken(&updatedRootCA, cluster.FIPS), + Manager: GenerateJoinToken(&updatedRootCA, cluster.FIPS), + }, + LastForcedRotation: cluster.RootCA.LastForcedRotation, + } + return store.UpdateCluster(tx, cluster) +} + +func (r *rootRotationReconciler) batchUpdateNodes(toUpdate []*api.Node) error { + if len(toUpdate) == 0 { + return nil + } + err := r.store.Batch(func(batch *store.Batch) error { + // Directly update the nodes rather than get + update, and ignore version errors. Since + // `rootRotationReconciler` should be hooked up to all node update/delete/create events, we should have + // close to the latest versions of all the nodes. If not, the node will updated later and the + // next batch of updates should catch it. + for _, n := range toUpdate { + if err := batch.Update(func(tx store.Tx) error { + return store.UpdateNode(tx, n) + }); err != nil && err != store.ErrSequenceConflict { + log.G(r.ctx).WithError(err).Errorf("unable to update node %s to request a certificate rotation", n.ID) + } + } + return nil + }) + return err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/renewer.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/renewer.go new file mode 100644 index 00000000..e5d165f6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/renewer.go @@ -0,0 +1,168 @@ +package ca + +import ( + "context" + "sync" + "time" + + "github.com/docker/go-events" + "github.com/docker/swarmkit/connectionbroker" + "github.com/docker/swarmkit/log" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// RenewTLSExponentialBackoff sets the exponential backoff when trying to renew TLS certificates that have expired +var RenewTLSExponentialBackoff = events.ExponentialBackoffConfig{ + Base: time.Second * 5, + Factor: time.Second * 5, + Max: 1 * time.Hour, +} + +// TLSRenewer handles renewing TLS certificates, either automatically or upon +// request. +type TLSRenewer struct { + mu sync.Mutex + s *SecurityConfig + connBroker *connectionbroker.Broker + renew chan struct{} + expectedRole string + rootPaths CertPaths +} + +// NewTLSRenewer creates a new TLS renewer. It must be started with Start. +func NewTLSRenewer(s *SecurityConfig, connBroker *connectionbroker.Broker, rootPaths CertPaths) *TLSRenewer { + return &TLSRenewer{ + s: s, + connBroker: connBroker, + renew: make(chan struct{}, 1), + rootPaths: rootPaths, + } +} + +// SetExpectedRole sets the expected role. If a renewal is forced, and the role +// doesn't match this expectation, renewal will be retried with exponential +// backoff until it does match. +func (t *TLSRenewer) SetExpectedRole(role string) { + t.mu.Lock() + t.expectedRole = role + t.mu.Unlock() +} + +// Renew causes the TLSRenewer to renew the certificate (nearly) right away, +// instead of waiting for the next automatic renewal. +func (t *TLSRenewer) Renew() { + select { + case t.renew <- struct{}{}: + default: + } +} + +// Start will continuously monitor for the necessity of renewing the local certificates, either by +// issuing them locally if key-material is available, or requesting them from a remote CA. +func (t *TLSRenewer) Start(ctx context.Context) <-chan CertificateUpdate { + updates := make(chan CertificateUpdate) + + go func() { + var ( + retry time.Duration + forceRetry bool + ) + expBackoff := events.NewExponentialBackoff(RenewTLSExponentialBackoff) + defer close(updates) + for { + ctx = log.WithModule(ctx, "tls") + log := log.G(ctx).WithFields(logrus.Fields{ + "node.id": t.s.ClientTLSCreds.NodeID(), + "node.role": t.s.ClientTLSCreds.Role(), + }) + // Our starting default will be 5 minutes + retry = 5 * time.Minute + + // Since the expiration of the certificate is managed remotely we should update our + // retry timer on every iteration of this loop. + // Retrieve the current certificate expiration information. + validFrom, validUntil, err := readCertValidity(t.s.KeyReader()) + if err != nil { + // We failed to read the expiration, let's stick with the starting default + log.Errorf("failed to read the expiration of the TLS certificate in: %s", t.s.KeyReader().Target()) + + select { + case updates <- CertificateUpdate{Err: errors.New("failed to read certificate expiration")}: + case <-ctx.Done(): + log.Info("shutting down certificate renewal routine") + return + } + } else { + // If we have an expired certificate, try to renew immediately: the hope that this is a temporary clock skew, or + // we can issue our own TLS certs. + if validUntil.Before(time.Now()) { + log.Warn("the current TLS certificate is expired, so an attempt to renew it will be made immediately") + // retry immediately(ish) with exponential backoff + retry = expBackoff.Proceed(nil) + } else if forceRetry { + // A forced renewal was requested, but did not succeed yet. + // retry immediately(ish) with exponential backoff + retry = expBackoff.Proceed(nil) + } else { + // Random retry time between 50% and 80% of the total time to expiration + retry = calculateRandomExpiry(validFrom, validUntil) + } + } + + log.WithFields(logrus.Fields{ + "time": time.Now().Add(retry), + }).Debugf("next certificate renewal scheduled for %v from now", retry) + + select { + case <-time.After(retry): + log.Info("renewing certificate") + case <-t.renew: + forceRetry = true + log.Info("forced certificate renewal") + + // Pause briefly before attempting the renewal, + // to give the CA a chance to reconcile the + // desired role. + select { + case <-time.After(500 * time.Millisecond): + case <-ctx.Done(): + log.Info("shutting down certificate renewal routine") + return + } + case <-ctx.Done(): + log.Info("shutting down certificate renewal routine") + return + } + + // ignore errors - it will just try again later + var certUpdate CertificateUpdate + if err := RenewTLSConfigNow(ctx, t.s, t.connBroker, t.rootPaths); err != nil { + certUpdate.Err = err + expBackoff.Failure(nil, nil) + } else { + newRole := t.s.ClientTLSCreds.Role() + t.mu.Lock() + expectedRole := t.expectedRole + t.mu.Unlock() + if expectedRole != "" && expectedRole != newRole { + expBackoff.Failure(nil, nil) + continue + } + + certUpdate.Role = newRole + expBackoff.Success(nil) + forceRetry = false + } + + select { + case updates <- certUpdate: + case <-ctx.Done(): + log.Info("shutting down certificate renewal routine") + return + } + } + }() + + return updates +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/server.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/server.go new file mode 100644 index 00000000..01959972 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/server.go @@ -0,0 +1,917 @@ +package ca + +import ( + "bytes" + "context" + "crypto/subtle" + "crypto/x509" + "sync" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/equality" + "github.com/docker/swarmkit/identity" + "github.com/docker/swarmkit/log" + "github.com/docker/swarmkit/manager/state/store" + gogotypes "github.com/gogo/protobuf/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + defaultReconciliationRetryInterval = 10 * time.Second + defaultRootReconciliationInterval = 3 * time.Second +) + +// Server is the CA and NodeCA API gRPC server. +// TODO(aaronl): At some point we may want to have separate implementations of +// CA, NodeCA, and other hypothetical future CA services. At the moment, +// breaking it apart doesn't seem worth it. +type Server struct { + mu sync.Mutex + wg sync.WaitGroup + ctx context.Context + cancel func() + store *store.MemoryStore + securityConfig *SecurityConfig + clusterID string + localRootCA *RootCA + externalCA *ExternalCA + externalCAPool *x509.CertPool + joinTokens *api.JoinTokens + reconciliationRetryInterval time.Duration + + // pending is a map of nodes with pending certificates issuance or + // renewal. They are indexed by node ID. + pending map[string]*api.Node + + // started is a channel which gets closed once the server is running + // and able to service RPCs. + started chan struct{} + + // these are cached values to ensure we only update the security config when + // the cluster root CA and external CAs have changed - the cluster object + // can change for other reasons, and it would not be necessary to update + // the security config as a result + lastSeenClusterRootCA *api.RootCA + lastSeenExternalCAs []*api.ExternalCA + + // This mutex protects the components of the CA server used to issue new certificates + // (and any attributes used to update those components): `lastSeenClusterRootCA` and + // `lastSeenExternalCA`, which are used to update `externalCA` and the `rootCA` object + // of the SecurityConfig + signingMu sync.Mutex + + // lets us monitor and finish root rotations + rootReconciler *rootRotationReconciler + rootReconciliationRetryInterval time.Duration +} + +// DefaultCAConfig returns the default CA Config, with a default expiration. +func DefaultCAConfig() api.CAConfig { + return api.CAConfig{ + NodeCertExpiry: gogotypes.DurationProto(DefaultNodeCertExpiration), + } +} + +// NewServer creates a CA API server. +func NewServer(store *store.MemoryStore, securityConfig *SecurityConfig) *Server { + return &Server{ + store: store, + securityConfig: securityConfig, + localRootCA: securityConfig.RootCA(), + externalCA: NewExternalCA(nil, nil), + pending: make(map[string]*api.Node), + started: make(chan struct{}), + reconciliationRetryInterval: defaultReconciliationRetryInterval, + rootReconciliationRetryInterval: defaultRootReconciliationInterval, + clusterID: securityConfig.ClientTLSCreds.Organization(), + } +} + +// ExternalCA returns the current external CA - this is exposed to support unit testing only, and the external CA +// should really be a private field +func (s *Server) ExternalCA() *ExternalCA { + s.signingMu.Lock() + defer s.signingMu.Unlock() + return s.externalCA +} + +// RootCA returns the current local root CA - this is exposed to support unit testing only, and the root CA +// should really be a private field +func (s *Server) RootCA() *RootCA { + s.signingMu.Lock() + defer s.signingMu.Unlock() + return s.localRootCA +} + +// SetReconciliationRetryInterval changes the time interval between +// reconciliation attempts. This function must be called before Run. +func (s *Server) SetReconciliationRetryInterval(reconciliationRetryInterval time.Duration) { + s.reconciliationRetryInterval = reconciliationRetryInterval +} + +// SetRootReconciliationInterval changes the time interval between root rotation +// reconciliation attempts. This function must be called before Run. +func (s *Server) SetRootReconciliationInterval(interval time.Duration) { + s.rootReconciliationRetryInterval = interval +} + +// GetUnlockKey is responsible for returning the current unlock key used for encrypting TLS private keys and +// other at rest data. Access to this RPC call should only be allowed via mutual TLS from managers. +func (s *Server) GetUnlockKey(ctx context.Context, request *api.GetUnlockKeyRequest) (*api.GetUnlockKeyResponse, error) { + // This directly queries the store, rather than storing the unlock key and version on + // the `Server` object and updating it `updateCluster` is called, because we need this + // API to return the latest version of the key. Otherwise, there might be a slight delay + // between when the cluster gets updated, and when this function returns the latest key. + // This delay is currently unacceptable because this RPC call is the only way, after a + // cluster update, to get the actual value of the unlock key, and we don't want to return + // a cached value. + resp := api.GetUnlockKeyResponse{} + s.store.View(func(tx store.ReadTx) { + cluster := store.GetCluster(tx, s.clusterID) + resp.Version = cluster.Meta.Version + if cluster.Spec.EncryptionConfig.AutoLockManagers { + for _, encryptionKey := range cluster.UnlockKeys { + if encryptionKey.Subsystem == ManagerRole { + resp.UnlockKey = encryptionKey.Key + return + } + } + } + }) + + return &resp, nil +} + +// NodeCertificateStatus returns the current issuance status of an issuance request identified by the nodeID +func (s *Server) NodeCertificateStatus(ctx context.Context, request *api.NodeCertificateStatusRequest) (*api.NodeCertificateStatusResponse, error) { + if request.NodeID == "" { + return nil, status.Errorf(codes.InvalidArgument, codes.InvalidArgument.String()) + } + + serverCtx, err := s.isRunningLocked() + if err != nil { + return nil, err + } + + var node *api.Node + + event := api.EventUpdateNode{ + Node: &api.Node{ID: request.NodeID}, + Checks: []api.NodeCheckFunc{api.NodeCheckID}, + } + + // Retrieve the current value of the certificate with this token, and create a watcher + updates, cancel, err := store.ViewAndWatch( + s.store, + func(tx store.ReadTx) error { + node = store.GetNode(tx, request.NodeID) + return nil + }, + event, + ) + if err != nil { + return nil, err + } + defer cancel() + + // This node ID doesn't exist + if node == nil { + return nil, status.Errorf(codes.NotFound, codes.NotFound.String()) + } + + log.G(ctx).WithFields(logrus.Fields{ + "node.id": node.ID, + "status": node.Certificate.Status, + "method": "NodeCertificateStatus", + }) + + // If this certificate has a final state, return it immediately (both pending and renew are transition states) + if isFinalState(node.Certificate.Status) { + return &api.NodeCertificateStatusResponse{ + Status: &node.Certificate.Status, + Certificate: &node.Certificate, + }, nil + } + + log.G(ctx).WithFields(logrus.Fields{ + "node.id": node.ID, + "status": node.Certificate.Status, + "method": "NodeCertificateStatus", + }).Debugf("started watching for certificate updates") + + // Certificate is Pending or in an Unknown state, let's wait for changes. + for { + select { + case event := <-updates: + switch v := event.(type) { + case api.EventUpdateNode: + // We got an update on the certificate record. If the status is a final state, + // return the certificate. + if isFinalState(v.Node.Certificate.Status) { + cert := v.Node.Certificate.Copy() + return &api.NodeCertificateStatusResponse{ + Status: &cert.Status, + Certificate: cert, + }, nil + } + } + case <-ctx.Done(): + return nil, ctx.Err() + case <-serverCtx.Done(): + return nil, s.ctx.Err() + } + } +} + +// IssueNodeCertificate is responsible for gatekeeping both certificate requests from new nodes in the swarm, +// and authorizing certificate renewals. +// If a node presented a valid certificate, the corresponding certificate is set in a RENEW state. +// If a node failed to present a valid certificate, we check for a valid join token and set the +// role accordingly. A new random node ID is generated, and the corresponding node entry is created. +// IssueNodeCertificate is the only place where new node entries to raft should be created. +func (s *Server) IssueNodeCertificate(ctx context.Context, request *api.IssueNodeCertificateRequest) (*api.IssueNodeCertificateResponse, error) { + // First, let's see if the remote node is presenting a non-empty CSR + if len(request.CSR) == 0 { + return nil, status.Errorf(codes.InvalidArgument, codes.InvalidArgument.String()) + } + + if err := s.isReadyLocked(); err != nil { + return nil, err + } + + var ( + blacklistedCerts map[string]*api.BlacklistedCertificate + clusters []*api.Cluster + err error + ) + + s.store.View(func(readTx store.ReadTx) { + clusters, err = store.FindClusters(readTx, store.ByName(store.DefaultClusterName)) + }) + + // Not having a cluster object yet means we can't check + // the blacklist. + if err == nil && len(clusters) == 1 { + blacklistedCerts = clusters[0].BlacklistedCertificates + } + + // Renewing the cert with a local (unix socket) is always valid. + localNodeInfo := ctx.Value(LocalRequestKey) + if localNodeInfo != nil { + nodeInfo, ok := localNodeInfo.(RemoteNodeInfo) + if ok && nodeInfo.NodeID != "" { + return s.issueRenewCertificate(ctx, nodeInfo.NodeID, request.CSR) + } + } + + // If the remote node is a worker (either forwarded by a manager, or calling directly), + // issue a renew worker certificate entry with the correct ID + nodeID, err := AuthorizeForwardedRoleAndOrg(ctx, []string{WorkerRole}, []string{ManagerRole}, s.clusterID, blacklistedCerts) + if err == nil { + return s.issueRenewCertificate(ctx, nodeID, request.CSR) + } + + // If the remote node is a manager (either forwarded by another manager, or calling directly), + // issue a renew certificate entry with the correct ID + nodeID, err = AuthorizeForwardedRoleAndOrg(ctx, []string{ManagerRole}, []string{ManagerRole}, s.clusterID, blacklistedCerts) + if err == nil { + return s.issueRenewCertificate(ctx, nodeID, request.CSR) + } + + // The remote node didn't successfully present a valid MTLS certificate, let's issue a + // certificate with a new random ID + role := api.NodeRole(-1) + + s.mu.Lock() + if subtle.ConstantTimeCompare([]byte(s.joinTokens.Manager), []byte(request.Token)) == 1 { + role = api.NodeRoleManager + } else if subtle.ConstantTimeCompare([]byte(s.joinTokens.Worker), []byte(request.Token)) == 1 { + role = api.NodeRoleWorker + } + s.mu.Unlock() + + if role < 0 { + return nil, status.Errorf(codes.InvalidArgument, "A valid join token is necessary to join this cluster") + } + + // Max number of collisions of ID or CN to tolerate before giving up + maxRetries := 3 + // Generate a random ID for this new node + for i := 0; ; i++ { + nodeID = identity.NewID() + + // Create a new node + err := s.store.Update(func(tx store.Tx) error { + node := &api.Node{ + Role: role, + ID: nodeID, + Certificate: api.Certificate{ + CSR: request.CSR, + CN: nodeID, + Role: role, + Status: api.IssuanceStatus{ + State: api.IssuanceStatePending, + }, + }, + Spec: api.NodeSpec{ + DesiredRole: role, + Membership: api.NodeMembershipAccepted, + Availability: request.Availability, + }, + } + node.VXLANUDPPort = clusters[0].VXLANUDPPort + return store.CreateNode(tx, node) + }) + if err == nil { + log.G(ctx).WithFields(logrus.Fields{ + "node.id": nodeID, + "node.role": role, + "method": "IssueNodeCertificate", + }).Debugf("new certificate entry added") + break + } + if err != store.ErrExist { + return nil, err + } + if i == maxRetries { + return nil, err + } + log.G(ctx).WithFields(logrus.Fields{ + "node.id": nodeID, + "node.role": role, + "method": "IssueNodeCertificate", + }).Errorf("randomly generated node ID collided with an existing one - retrying") + } + + return &api.IssueNodeCertificateResponse{ + NodeID: nodeID, + NodeMembership: api.NodeMembershipAccepted, + }, nil +} + +// issueRenewCertificate receives a nodeID and a CSR and modifies the node's certificate entry with the new CSR +// and changes the state to RENEW, so it can be picked up and signed by the signing reconciliation loop +func (s *Server) issueRenewCertificate(ctx context.Context, nodeID string, csr []byte) (*api.IssueNodeCertificateResponse, error) { + var ( + cert api.Certificate + node *api.Node + ) + err := s.store.Update(func(tx store.Tx) error { + // Attempt to retrieve the node with nodeID + node = store.GetNode(tx, nodeID) + if node == nil { + log.G(ctx).WithFields(logrus.Fields{ + "node.id": nodeID, + "method": "issueRenewCertificate", + }).Warnf("node does not exist") + // If this node doesn't exist, we shouldn't be renewing a certificate for it + return status.Errorf(codes.NotFound, "node %s not found when attempting to renew certificate", nodeID) + } + + // Create a new Certificate entry for this node with the new CSR and a RENEW state + cert = api.Certificate{ + CSR: csr, + CN: node.ID, + Role: node.Role, + Status: api.IssuanceStatus{ + State: api.IssuanceStateRenew, + }, + } + + node.Certificate = cert + return store.UpdateNode(tx, node) + }) + if err != nil { + return nil, err + } + + log.G(ctx).WithFields(logrus.Fields{ + "cert.cn": cert.CN, + "cert.role": cert.Role, + "method": "issueRenewCertificate", + }).Debugf("node certificate updated") + + return &api.IssueNodeCertificateResponse{ + NodeID: nodeID, + NodeMembership: node.Spec.Membership, + }, nil +} + +// GetRootCACertificate returns the certificate of the Root CA. It is used as a convenience for distributing +// the root of trust for the swarm. Clients should be using the CA hash to verify if they weren't target to +// a MiTM. If they fail to do so, node bootstrap works with TOFU semantics. +func (s *Server) GetRootCACertificate(ctx context.Context, request *api.GetRootCACertificateRequest) (*api.GetRootCACertificateResponse, error) { + log.G(ctx).WithFields(logrus.Fields{ + "method": "GetRootCACertificate", + }) + + s.signingMu.Lock() + defer s.signingMu.Unlock() + + return &api.GetRootCACertificateResponse{ + Certificate: s.localRootCA.Certs, + }, nil +} + +// Run runs the CA signer main loop. +// The CA signer can be stopped with cancelling ctx or calling Stop(). +func (s *Server) Run(ctx context.Context) error { + s.mu.Lock() + if s.isRunning() { + s.mu.Unlock() + return errors.New("CA signer is already running") + } + s.wg.Add(1) + s.ctx, s.cancel = context.WithCancel(log.WithModule(ctx, "ca")) + ctx = s.ctx + s.mu.Unlock() + defer s.wg.Done() + defer func() { + s.mu.Lock() + s.mu.Unlock() + }() + + // Retrieve the channels to keep track of changes in the cluster + // Retrieve all the currently registered nodes + var ( + nodes []*api.Node + cluster *api.Cluster + err error + ) + updates, cancel, err := store.ViewAndWatch( + s.store, + func(readTx store.ReadTx) error { + cluster = store.GetCluster(readTx, s.clusterID) + if cluster == nil { + return errors.New("could not find cluster object") + } + nodes, err = store.FindNodes(readTx, store.All) + return err + }, + api.EventCreateNode{}, + api.EventUpdateNode{}, + api.EventDeleteNode{}, + api.EventUpdateCluster{ + Cluster: &api.Cluster{ID: s.clusterID}, + Checks: []api.ClusterCheckFunc{api.ClusterCheckID}, + }, + ) + + // call once to ensure that the join tokens and local/external CA signer are always set + rootReconciler := &rootRotationReconciler{ + ctx: log.WithField(ctx, "method", "(*Server).rootRotationReconciler"), + clusterID: s.clusterID, + store: s.store, + batchUpdateInterval: s.rootReconciliationRetryInterval, + } + + s.UpdateRootCA(ctx, cluster, rootReconciler) + + // Do this after updateCluster has been called, so Ready() and isRunning never returns true without + // the join tokens and external CA/security config's root CA being set correctly + s.mu.Lock() + close(s.started) + s.mu.Unlock() + + if err != nil { + log.G(ctx).WithFields(logrus.Fields{ + "method": "(*Server).Run", + }).WithError(err).Errorf("snapshot store view failed") + return err + } + defer cancel() + + // We might have missed some updates if there was a leader election, + // so let's pick up the slack. + if err := s.reconcileNodeCertificates(ctx, nodes); err != nil { + // We don't return here because that means the Run loop would + // never run. Log an error instead. + log.G(ctx).WithFields(logrus.Fields{ + "method": "(*Server).Run", + }).WithError(err).Errorf("error attempting to reconcile certificates") + } + + ticker := time.NewTicker(s.reconciliationRetryInterval) + defer ticker.Stop() + + externalTLSCredsChange, externalTLSWatchCancel := s.securityConfig.Watch() + defer externalTLSWatchCancel() + + // Watch for new nodes being created, new nodes being updated, and changes + // to the cluster + for { + select { + case <-ctx.Done(): + return nil + default: + } + + select { + case event := <-updates: + switch v := event.(type) { + case api.EventCreateNode: + s.evaluateAndSignNodeCert(ctx, v.Node) + rootReconciler.UpdateNode(v.Node) + case api.EventUpdateNode: + // If this certificate is already at a final state + // no need to evaluate and sign it. + if !isFinalState(v.Node.Certificate.Status) { + s.evaluateAndSignNodeCert(ctx, v.Node) + } + rootReconciler.UpdateNode(v.Node) + case api.EventDeleteNode: + rootReconciler.DeleteNode(v.Node) + case api.EventUpdateCluster: + if v.Cluster.ID == s.clusterID { + s.UpdateRootCA(ctx, v.Cluster, rootReconciler) + } + } + case <-externalTLSCredsChange: + // The TLS certificates can rotate independently of the root CA (and hence which roots the + // external CA trusts) and external CA URLs. It's possible that the root CA update is received + // before the external TLS cred change notification. During that period, it is possible that + // the TLS creds will expire or otherwise fail to authorize against external CAs. However, in + // that case signing will just fail with a recoverable connectivity error - the state of the + // certificate issuance is left as pending, and on the next tick, the server will try to sign + // all nodes with pending certs again (by which time the TLS cred change will have been + // received). + + // Note that if the external CA changes, the new external CA *MUST* trust the current server's + // certificate issuer, and this server's certificates should not be extremely close to expiry, + // otherwise this server would not be able to get new TLS certificates and will no longer be + // able to function. + s.signingMu.Lock() + s.externalCA.UpdateTLSConfig(NewExternalCATLSConfig( + s.securityConfig.ClientTLSCreds.Config().Certificates, s.externalCAPool)) + s.signingMu.Unlock() + case <-ticker.C: + for _, node := range s.pending { + if err := s.evaluateAndSignNodeCert(ctx, node); err != nil { + // If this sign operation did not succeed, the rest are + // unlikely to. Yield so that we don't hammer an external CA. + // Since the map iteration order is randomized, there is no + // risk of getting stuck on a problematic CSR. + break + } + } + case <-ctx.Done(): + return nil + } + } +} + +// Stop stops the CA and closes all grpc streams. +func (s *Server) Stop() error { + s.mu.Lock() + + if !s.isRunning() { + s.mu.Unlock() + return errors.New("CA signer is already stopped") + } + s.cancel() + s.started = make(chan struct{}) + s.joinTokens = nil + s.mu.Unlock() + + // Wait for Run to complete + s.wg.Wait() + + return nil +} + +// Ready waits on the ready channel and returns when the server is ready to serve. +func (s *Server) Ready() <-chan struct{} { + s.mu.Lock() + defer s.mu.Unlock() + return s.started +} + +func (s *Server) isRunningLocked() (context.Context, error) { + s.mu.Lock() + if !s.isRunning() { + s.mu.Unlock() + return nil, status.Errorf(codes.Aborted, "CA signer is stopped") + } + ctx := s.ctx + s.mu.Unlock() + return ctx, nil +} + +func (s *Server) isReadyLocked() error { + s.mu.Lock() + defer s.mu.Unlock() + if !s.isRunning() { + return status.Errorf(codes.Aborted, "CA signer is stopped") + } + if s.joinTokens == nil { + return status.Errorf(codes.Aborted, "CA signer is still starting") + } + return nil +} + +func (s *Server) isRunning() bool { + if s.ctx == nil { + return false + } + select { + case <-s.ctx.Done(): + return false + default: + } + return true +} + +// filterExternalCAURLS returns a list of external CA urls filtered by the desired cert. +func filterExternalCAURLS(ctx context.Context, desiredCert, defaultCert []byte, apiExternalCAs []*api.ExternalCA) (urls []string) { + desiredCert = NormalizePEMs(desiredCert) + + // TODO(aaronl): In the future, this will be abstracted with an ExternalCA interface that has different + // implementations for different CA types. At the moment, only CFSSL is supported. + for i, extCA := range apiExternalCAs { + // We want to support old external CA specifications which did not have a CA cert. If there is no cert specified, + // we assume it's the old cert + certForExtCA := extCA.CACert + if len(certForExtCA) == 0 { + certForExtCA = defaultCert + } + certForExtCA = NormalizePEMs(certForExtCA) + if extCA.Protocol != api.ExternalCA_CAProtocolCFSSL { + log.G(ctx).Debugf("skipping external CA %d (url: %s) due to unknown protocol type", i, extCA.URL) + continue + } + if !bytes.Equal(certForExtCA, desiredCert) { + log.G(ctx).Debugf("skipping external CA %d (url: %s) because it has the wrong CA cert", i, extCA.URL) + continue + } + urls = append(urls, extCA.URL) + } + return +} + +// UpdateRootCA is called when there are cluster changes, and it ensures that the local RootCA is +// always aware of changes in clusterExpiry and the Root CA key material - this can be called by +// anything to update the root CA material +func (s *Server) UpdateRootCA(ctx context.Context, cluster *api.Cluster, reconciler *rootRotationReconciler) error { + s.mu.Lock() + s.joinTokens = cluster.RootCA.JoinTokens.Copy() + s.mu.Unlock() + rCA := cluster.RootCA.Copy() + if reconciler != nil { + reconciler.UpdateRootCA(rCA) + } + + s.signingMu.Lock() + defer s.signingMu.Unlock() + firstSeenCluster := s.lastSeenClusterRootCA == nil && s.lastSeenExternalCAs == nil + rootCAChanged := len(rCA.CACert) != 0 && !equality.RootCAEqualStable(s.lastSeenClusterRootCA, rCA) + externalCAChanged := !equality.ExternalCAsEqualStable(s.lastSeenExternalCAs, cluster.Spec.CAConfig.ExternalCAs) + ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{ + "cluster.id": cluster.ID, + "method": "(*Server).UpdateRootCA", + })) + + if rootCAChanged { + setOrUpdate := "set" + if !firstSeenCluster { + log.G(ctx).Debug("Updating signing root CA and external CA due to change in cluster Root CA") + setOrUpdate = "updated" + } + expiry := DefaultNodeCertExpiration + if cluster.Spec.CAConfig.NodeCertExpiry != nil { + // NodeCertExpiry exists, let's try to parse the duration out of it + clusterExpiry, err := gogotypes.DurationFromProto(cluster.Spec.CAConfig.NodeCertExpiry) + if err != nil { + log.G(ctx).WithError(err).Warn("failed to parse certificate expiration, using default") + } else { + // We were able to successfully parse the expiration out of the cluster. + expiry = clusterExpiry + } + } else { + // NodeCertExpiry seems to be nil + log.G(ctx).Warn("no certificate expiration specified, using default") + } + // Attempt to update our local RootCA with the new parameters + updatedRootCA, err := RootCAFromAPI(ctx, rCA, expiry) + if err != nil { + return errors.Wrap(err, "invalid Root CA object in cluster") + } + + s.localRootCA = &updatedRootCA + s.externalCAPool = updatedRootCA.Pool + externalCACert := rCA.CACert + if rCA.RootRotation != nil { + externalCACert = rCA.RootRotation.CACert + // the external CA has to trust the new CA cert + s.externalCAPool = x509.NewCertPool() + s.externalCAPool.AppendCertsFromPEM(rCA.CACert) + s.externalCAPool.AppendCertsFromPEM(rCA.RootRotation.CACert) + } + s.lastSeenExternalCAs = cluster.Spec.CAConfig.Copy().ExternalCAs + urls := filterExternalCAURLS(ctx, externalCACert, rCA.CACert, s.lastSeenExternalCAs) + // Replace the external CA with the relevant intermediates, URLS, and TLS config + s.externalCA = NewExternalCA(updatedRootCA.Intermediates, + NewExternalCATLSConfig(s.securityConfig.ClientTLSCreds.Config().Certificates, s.externalCAPool), urls...) + + // only update the server cache if we've successfully updated the root CA + log.G(ctx).Debugf("Root CA %s successfully", setOrUpdate) + s.lastSeenClusterRootCA = rCA + } else if externalCAChanged { + // we want to update only if the external CA URLS have changed, since if the root CA has changed we already + // run similar logic + if !firstSeenCluster { + log.G(ctx).Debug("Updating security config external CA URLs due to change in cluster spec's list of external CAs") + } + wantedExternalCACert := rCA.CACert // we want to only add external CA URLs that use this cert + if rCA.RootRotation != nil { + // we're rotating to a new root, so we only want external CAs with the new root cert + wantedExternalCACert = rCA.RootRotation.CACert + } + // Update our external CA with the list of External CA URLs from the new cluster state + s.lastSeenExternalCAs = cluster.Spec.CAConfig.Copy().ExternalCAs + urls := filterExternalCAURLS(ctx, wantedExternalCACert, rCA.CACert, s.lastSeenExternalCAs) + s.externalCA.UpdateURLs(urls...) + } + return nil +} + +// evaluateAndSignNodeCert implements the logic of which certificates to sign +func (s *Server) evaluateAndSignNodeCert(ctx context.Context, node *api.Node) error { + // If the desired membership and actual state are in sync, there's + // nothing to do. + certState := node.Certificate.Status.State + if node.Spec.Membership == api.NodeMembershipAccepted && + (certState == api.IssuanceStateIssued || certState == api.IssuanceStateRotate) { + return nil + } + + // If the certificate state is renew, then it is a server-sided accepted cert (cert renewals) + if certState == api.IssuanceStateRenew { + return s.signNodeCert(ctx, node) + } + + // Sign this certificate if a user explicitly changed it to Accepted, and + // the certificate is in pending state + if node.Spec.Membership == api.NodeMembershipAccepted && certState == api.IssuanceStatePending { + return s.signNodeCert(ctx, node) + } + + return nil +} + +// signNodeCert does the bulk of the work for signing a certificate +func (s *Server) signNodeCert(ctx context.Context, node *api.Node) error { + s.signingMu.Lock() + rootCA := s.localRootCA + externalCA := s.externalCA + s.signingMu.Unlock() + + node = node.Copy() + nodeID := node.ID + // Convert the role from proto format + role, err := ParseRole(node.Certificate.Role) + if err != nil { + log.G(ctx).WithFields(logrus.Fields{ + "node.id": node.ID, + "method": "(*Server).signNodeCert", + }).WithError(err).Errorf("failed to parse role") + return errors.New("failed to parse role") + } + + s.pending[node.ID] = node + + // Attempt to sign the CSR + var ( + rawCSR = node.Certificate.CSR + cn = node.Certificate.CN + ou = role + org = s.clusterID + ) + + // Try using the external CA first. + cert, err := externalCA.Sign(ctx, PrepareCSR(rawCSR, cn, ou, org)) + if err == ErrNoExternalCAURLs { + // No external CA servers configured. Try using the local CA. + cert, err = rootCA.ParseValidateAndSignCSR(rawCSR, cn, ou, org) + } + + if err != nil { + log.G(ctx).WithFields(logrus.Fields{ + "node.id": node.ID, + "method": "(*Server).signNodeCert", + }).WithError(err).Errorf("failed to sign CSR") + + // If the current state is already Failed, no need to change it + if node.Certificate.Status.State == api.IssuanceStateFailed { + delete(s.pending, node.ID) + return errors.New("failed to sign CSR") + } + + if _, ok := err.(recoverableErr); ok { + // Return without changing the state of the certificate. We may + // retry signing it in the future. + return errors.New("failed to sign CSR") + } + + // We failed to sign this CSR, change the state to FAILED + err = s.store.Update(func(tx store.Tx) error { + node := store.GetNode(tx, nodeID) + if node == nil { + return errors.Errorf("node %s not found", nodeID) + } + + node.Certificate.Status = api.IssuanceStatus{ + State: api.IssuanceStateFailed, + Err: err.Error(), + } + + return store.UpdateNode(tx, node) + }) + if err != nil { + log.G(ctx).WithFields(logrus.Fields{ + "node.id": nodeID, + "method": "(*Server).signNodeCert", + }).WithError(err).Errorf("transaction failed when setting state to FAILED") + } + + delete(s.pending, node.ID) + return errors.New("failed to sign CSR") + } + + // We were able to successfully sign the new CSR. Let's try to update the nodeStore + for { + err = s.store.Update(func(tx store.Tx) error { + node.Certificate.Certificate = cert + node.Certificate.Status = api.IssuanceStatus{ + State: api.IssuanceStateIssued, + } + + err := store.UpdateNode(tx, node) + if err != nil { + node = store.GetNode(tx, nodeID) + if node == nil { + err = errors.Errorf("node %s does not exist", nodeID) + } + } + return err + }) + if err == nil { + log.G(ctx).WithFields(logrus.Fields{ + "node.id": node.ID, + "node.role": node.Certificate.Role, + "method": "(*Server).signNodeCert", + }).Debugf("certificate issued") + delete(s.pending, node.ID) + break + } + if err == store.ErrSequenceConflict { + continue + } + + log.G(ctx).WithFields(logrus.Fields{ + "node.id": nodeID, + "method": "(*Server).signNodeCert", + }).WithError(err).Errorf("transaction failed") + return errors.New("transaction failed") + } + return nil +} + +// reconcileNodeCertificates is a helper method that calls evaluateAndSignNodeCert on all the +// nodes. +func (s *Server) reconcileNodeCertificates(ctx context.Context, nodes []*api.Node) error { + for _, node := range nodes { + s.evaluateAndSignNodeCert(ctx, node) + } + + return nil +} + +// A successfully issued certificate and a failed certificate are our current final states +func isFinalState(status api.IssuanceStatus) bool { + if status.State == api.IssuanceStateIssued || status.State == api.IssuanceStateFailed || + status.State == api.IssuanceStateRotate { + return true + } + + return false +} + +// RootCAFromAPI creates a RootCA object from an api.RootCA object +func RootCAFromAPI(ctx context.Context, apiRootCA *api.RootCA, expiry time.Duration) (RootCA, error) { + var intermediates []byte + signingCert := apiRootCA.CACert + signingKey := apiRootCA.CAKey + if apiRootCA.RootRotation != nil { + signingCert = apiRootCA.RootRotation.CrossSignedCACert + signingKey = apiRootCA.RootRotation.CAKey + intermediates = apiRootCA.RootRotation.CrossSignedCACert + } + if signingKey == nil { + signingCert = nil + } + return NewRootCA(apiRootCA.CACert, signingCert, signingKey, expiry, intermediates) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/transport.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/transport.go new file mode 100644 index 00000000..69c4379b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ca/transport.go @@ -0,0 +1,207 @@ +package ca + +import ( + "context" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "net" + "strings" + "sync" + + "github.com/pkg/errors" + "google.golang.org/grpc/credentials" +) + +var ( + // alpnProtoStr is the specified application level protocols for gRPC. + alpnProtoStr = []string{"h2"} +) + +// MutableTLSCreds is the credentials required for authenticating a connection using TLS. +type MutableTLSCreds struct { + // Mutex for the tls config + sync.Mutex + // TLS configuration + config *tls.Config + // TLS Credentials + tlsCreds credentials.TransportCredentials + // store the subject for easy access + subject pkix.Name +} + +// Info implements the credentials.TransportCredentials interface +func (c *MutableTLSCreds) Info() credentials.ProtocolInfo { + return credentials.ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + } +} + +// Clone returns new MutableTLSCreds created from underlying *tls.Config. +// It panics if validation of underlying config fails. +func (c *MutableTLSCreds) Clone() credentials.TransportCredentials { + c.Lock() + newCfg, err := NewMutableTLS(c.config.Clone()) + if err != nil { + panic("validation error on Clone") + } + c.Unlock() + return newCfg +} + +// OverrideServerName overrides *tls.Config.ServerName. +func (c *MutableTLSCreds) OverrideServerName(name string) error { + c.Lock() + c.config.ServerName = name + c.Unlock() + return nil +} + +// GetRequestMetadata implements the credentials.TransportCredentials interface +func (c *MutableTLSCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { + return nil, nil +} + +// RequireTransportSecurity implements the credentials.TransportCredentials interface +func (c *MutableTLSCreds) RequireTransportSecurity() bool { + return true +} + +// ClientHandshake implements the credentials.TransportCredentials interface +func (c *MutableTLSCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + // borrow all the code from the original TLS credentials + c.Lock() + if c.config.ServerName == "" { + colonPos := strings.LastIndex(addr, ":") + if colonPos == -1 { + colonPos = len(addr) + } + c.config.ServerName = addr[:colonPos] + } + + conn := tls.Client(rawConn, c.config) + // Need to allow conn.Handshake to have access to config, + // would create a deadlock otherwise + c.Unlock() + var err error + errChannel := make(chan error, 1) + go func() { + errChannel <- conn.Handshake() + }() + select { + case err = <-errChannel: + case <-ctx.Done(): + err = ctx.Err() + } + if err != nil { + rawConn.Close() + return nil, nil, err + } + return conn, nil, nil +} + +// ServerHandshake implements the credentials.TransportCredentials interface +func (c *MutableTLSCreds) ServerHandshake(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + c.Lock() + conn := tls.Server(rawConn, c.config) + c.Unlock() + if err := conn.Handshake(); err != nil { + rawConn.Close() + return nil, nil, err + } + + return conn, credentials.TLSInfo{State: conn.ConnectionState()}, nil +} + +// loadNewTLSConfig replaces the currently loaded TLS config with a new one +func (c *MutableTLSCreds) loadNewTLSConfig(newConfig *tls.Config) error { + newSubject, err := GetAndValidateCertificateSubject(newConfig.Certificates) + if err != nil { + return err + } + + c.Lock() + defer c.Unlock() + c.subject = newSubject + c.config = newConfig + + return nil +} + +// Config returns the current underlying TLS config. +func (c *MutableTLSCreds) Config() *tls.Config { + c.Lock() + defer c.Unlock() + + return c.config +} + +// Role returns the OU for the certificate encapsulated in this TransportCredentials +func (c *MutableTLSCreds) Role() string { + c.Lock() + defer c.Unlock() + + return c.subject.OrganizationalUnit[0] +} + +// Organization returns the O for the certificate encapsulated in this TransportCredentials +func (c *MutableTLSCreds) Organization() string { + c.Lock() + defer c.Unlock() + + return c.subject.Organization[0] +} + +// NodeID returns the CN for the certificate encapsulated in this TransportCredentials +func (c *MutableTLSCreds) NodeID() string { + c.Lock() + defer c.Unlock() + + return c.subject.CommonName +} + +// NewMutableTLS uses c to construct a mutable TransportCredentials based on TLS. +func NewMutableTLS(c *tls.Config) (*MutableTLSCreds, error) { + originalTC := credentials.NewTLS(c) + + if len(c.Certificates) < 1 { + return nil, errors.New("invalid configuration: needs at least one certificate") + } + + subject, err := GetAndValidateCertificateSubject(c.Certificates) + if err != nil { + return nil, err + } + + tc := &MutableTLSCreds{config: c, tlsCreds: originalTC, subject: subject} + tc.config.NextProtos = alpnProtoStr + + return tc, nil +} + +// GetAndValidateCertificateSubject is a helper method to retrieve and validate the subject +// from the x509 certificate underlying a tls.Certificate +func GetAndValidateCertificateSubject(certs []tls.Certificate) (pkix.Name, error) { + for i := range certs { + cert := &certs[i] + x509Cert, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + continue + } + if len(x509Cert.Subject.OrganizationalUnit) < 1 { + return pkix.Name{}, errors.New("no OU found in certificate subject") + } + + if len(x509Cert.Subject.Organization) < 1 { + return pkix.Name{}, errors.New("no organization found in certificate subject") + } + if x509Cert.Subject.CommonName == "" { + return pkix.Name{}, errors.New("no valid subject names found for TLS configuration") + } + + return x509Cert.Subject, nil + } + + return pkix.Name{}, errors.New("no valid certificates found for TLS configuration") +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/connectionbroker/broker.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/connectionbroker/broker.go new file mode 100644 index 00000000..a5510a9f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/connectionbroker/broker.go @@ -0,0 +1,123 @@ +// Package connectionbroker is a layer on top of remotes that returns +// a gRPC connection to a manager. The connection may be a local connection +// using a local socket such as a UNIX socket. +package connectionbroker + +import ( + "net" + "sync" + "time" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/remotes" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "google.golang.org/grpc" +) + +// Broker is a simple connection broker. It can either return a fresh +// connection to a remote manager selected with weighted randomization, or a +// local gRPC connection to the local manager. +type Broker struct { + mu sync.Mutex + remotes remotes.Remotes + localConn *grpc.ClientConn +} + +// New creates a new connection broker. +func New(remotes remotes.Remotes) *Broker { + return &Broker{ + remotes: remotes, + } +} + +// SetLocalConn changes the local gRPC connection used by the connection broker. +func (b *Broker) SetLocalConn(localConn *grpc.ClientConn) { + b.mu.Lock() + defer b.mu.Unlock() + + b.localConn = localConn +} + +// Select a manager from the set of available managers, and return a connection. +func (b *Broker) Select(dialOpts ...grpc.DialOption) (*Conn, error) { + b.mu.Lock() + localConn := b.localConn + b.mu.Unlock() + + if localConn != nil { + return &Conn{ + ClientConn: localConn, + isLocal: true, + }, nil + } + + return b.SelectRemote(dialOpts...) +} + +// SelectRemote chooses a manager from the remotes, and returns a TCP +// connection. +func (b *Broker) SelectRemote(dialOpts ...grpc.DialOption) (*Conn, error) { + peer, err := b.remotes.Select() + + if err != nil { + return nil, err + } + + // gRPC dialer connects to proxy first. Provide a custom dialer here avoid that. + // TODO(anshul) Add an option to configure this. + dialOpts = append(dialOpts, + grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor), + grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor), + grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { + return net.DialTimeout("tcp", addr, timeout) + })) + + cc, err := grpc.Dial(peer.Addr, dialOpts...) + if err != nil { + b.remotes.ObserveIfExists(peer, -remotes.DefaultObservationWeight) + return nil, err + } + + return &Conn{ + ClientConn: cc, + remotes: b.remotes, + peer: peer, + }, nil +} + +// Remotes returns the remotes interface used by the broker, so the caller +// can make observations or see weights directly. +func (b *Broker) Remotes() remotes.Remotes { + return b.remotes +} + +// Conn is a wrapper around a gRPC client connection. +type Conn struct { + *grpc.ClientConn + isLocal bool + remotes remotes.Remotes + peer api.Peer +} + +// Peer returns the peer for this Conn. +func (c *Conn) Peer() api.Peer { + return c.peer +} + +// Close closes the client connection if it is a remote connection. It also +// records a positive experience with the remote peer if success is true, +// otherwise it records a negative experience. If a local connection is in use, +// Close is a noop. +func (c *Conn) Close(success bool) error { + if c.isLocal { + return nil + } + + if success { + c.remotes.ObserveIfExists(c.peer, remotes.DefaultObservationWeight) + } else { + c.remotes.ObserveIfExists(c.peer, -remotes.DefaultObservationWeight) + } + + return c.ClientConn.Close() +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/identity/combined_id.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/identity/combined_id.go new file mode 100644 index 00000000..2c4a2927 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/identity/combined_id.go @@ -0,0 +1,8 @@ +package identity + +import "fmt" + +// CombineTwoIDs combines the given IDs into a new ID, e.g. a secret and a task ID. +func CombineTwoIDs(id1, id2 string) string { + return fmt.Sprintf("%s.%s", id1, id2) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/identity/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/identity/doc.go new file mode 100644 index 00000000..b91aca7e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/identity/doc.go @@ -0,0 +1,16 @@ +// Package identity provides functionality for generating and managing +// identifiers within a swarm. This includes entity identification, such as for +// Services, Tasks and Networks but also cryptographically-secure Node identities. +// +// Random Identifiers +// +// Identifiers provided by this package are cryptographically-strong, random +// 128 bit numbers encoded in Base36. This method is preferred over UUID4 since +// it requires less storage and leverages the full 128 bits of entropy. +// +// Generating an identifier is simple. Simply call the `NewID` function: +// +// id := NewID() +// +// If an error occurs while generating the ID, it will panic. +package identity diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/identity/randomid.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/identity/randomid.go new file mode 100644 index 00000000..0eb13527 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/identity/randomid.go @@ -0,0 +1,53 @@ +package identity + +import ( + cryptorand "crypto/rand" + "fmt" + "io" + "math/big" +) + +var ( + // idReader is used for random id generation. This declaration allows us to + // replace it for testing. + idReader = cryptorand.Reader +) + +// parameters for random identifier generation. We can tweak this when there is +// time for further analysis. +const ( + randomIDEntropyBytes = 17 + randomIDBase = 36 + + // To ensure that all identifiers are fixed length, we make sure they + // get padded out or truncated to 25 characters. + // + // For academics, f5lxx1zz5pnorynqglhzmsp33 == 2^128 - 1. This value + // was calculated from floor(log(2^128-1, 36)) + 1. + // + // While 128 bits is the largest whole-byte size that fits into 25 + // base-36 characters, we generate an extra byte of entropy to fill + // in the high bits, which would otherwise be 0. This gives us a more + // even distribution of the first character. + // + // See http://mathworld.wolfram.com/NumberLength.html for more information. + maxRandomIDLength = 25 +) + +// NewID generates a new identifier for use where random identifiers with low +// collision probability are required. +// +// With the parameters in this package, the generated identifier will provide +// ~129 bits of entropy encoded with base36. Leading padding is added if the +// string is less 25 bytes. We do not intend to maintain this interface, so +// identifiers should be treated opaquely. +func NewID() string { + var p [randomIDEntropyBytes]byte + + if _, err := io.ReadFull(idReader, p[:]); err != nil { + panic(fmt.Errorf("failed to read random bytes: %v", err)) + } + + p[0] |= 0x80 // set high bit to avoid the need for padding + return (&big.Int{}).SetBytes(p[:]).Text(randomIDBase)[1 : maxRandomIDLength+1] +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ioutils/ioutils.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ioutils/ioutils.go new file mode 100644 index 00000000..25e2a780 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/ioutils/ioutils.go @@ -0,0 +1,40 @@ +package ioutils + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// todo: split docker/pkg/ioutils into a separate repo + +// AtomicWriteFile atomically writes data to a file specified by filename. +func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + if err != nil { + return err + } + err = os.Chmod(f.Name(), perm) + if err != nil { + f.Close() + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + f.Close() + return io.ErrShortWrite + } + if err != nil { + f.Close() + return err + } + if err := f.Sync(); err != nil { + f.Close() + return err + } + if err := f.Close(); err != nil { + return err + } + return os.Rename(f.Name(), filename) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/log/context.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/log/context.go new file mode 100644 index 00000000..cc1d590f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/log/context.go @@ -0,0 +1,96 @@ +package log + +import ( + "context" + "path" + + "github.com/sirupsen/logrus" +) + +var ( + // G is an alias for GetLogger. + // + // We may want to define this locally to a package to get package tagged log + // messages. + G = GetLogger + + // L is an alias for the the standard logger. + L = logrus.NewEntry(logrus.StandardLogger()) +) + +type ( + loggerKey struct{} + moduleKey struct{} +) + +// WithLogger returns a new context with the provided logger. Use in +// combination with logger.WithField(s) for great effect. +func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) +} + +// WithFields returns a new context with added fields to logger. +func WithFields(ctx context.Context, fields logrus.Fields) context.Context { + logger := ctx.Value(loggerKey{}) + + if logger == nil { + logger = L + } + return WithLogger(ctx, logger.(*logrus.Entry).WithFields(fields)) +} + +// WithField is convenience wrapper around WithFields. +func WithField(ctx context.Context, key, value string) context.Context { + return WithFields(ctx, logrus.Fields{key: value}) +} + +// GetLogger retrieves the current logger from the context. If no logger is +// available, the default logger is returned. +func GetLogger(ctx context.Context) *logrus.Entry { + logger := ctx.Value(loggerKey{}) + + if logger == nil { + return L + } + + return logger.(*logrus.Entry) +} + +// WithModule adds the module to the context, appending it with a slash if a +// module already exists. A module is just a roughly correlated defined by the +// call tree for a given context. +// +// As an example, we might have a "node" module already part of a context. If +// this function is called with "tls", the new value of module will be +// "node/tls". +// +// Modules represent the call path. If the new module and last module are the +// same, a new module entry will not be created. If the new module and old +// older module are the same but separated by other modules, the cycle will be +// represented by the module path. +func WithModule(ctx context.Context, module string) context.Context { + parent := GetModulePath(ctx) + + if parent != "" { + // don't re-append module when module is the same. + if path.Base(parent) == module { + return ctx + } + + module = path.Join(parent, module) + } + + ctx = WithLogger(ctx, GetLogger(ctx).WithField("module", module)) + return context.WithValue(ctx, moduleKey{}, module) +} + +// GetModulePath returns the module path for the provided context. If no module +// is set, an empty string is returned. +func GetModulePath(ctx context.Context) string { + module := ctx.Value(moduleKey{}) + if module == nil { + return "" + } + + return module.(string) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/log/grpc.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/log/grpc.go new file mode 100644 index 00000000..bced5cfa --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/log/grpc.go @@ -0,0 +1,31 @@ +package log + +import ( + "context" + + "github.com/sirupsen/logrus" + "google.golang.org/grpc/grpclog" +) + +type logrusWrapper struct { + *logrus.Entry +} + +// V provides the functionality that returns whether a particular log level is at +// least l - this is needed to meet the LoggerV2 interface. GRPC's logging levels +// are: https://github.com/grpc/grpc-go/blob/master/grpclog/loggerv2.go#L71 +// 0=info, 1=warning, 2=error, 3=fatal +// logrus's are: https://github.com/sirupsen/logrus/blob/master/logrus.go +// 0=panic, 1=fatal, 2=error, 3=warn, 4=info, 5=debug +func (lw logrusWrapper) V(l int) bool { + // translate to logrus level + logrusLevel := 4 - l + return int(lw.Logger.Level) <= logrusLevel +} + +func init() { + ctx := WithModule(context.Background(), "grpc") + + // completely replace the grpc logger with the logrus logger. + grpclog.SetLoggerV2(logrusWrapper{Entry: G(ctx)}) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/raftselector/raftselector.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/raftselector/raftselector.go new file mode 100644 index 00000000..47adcf0f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/raftselector/raftselector.go @@ -0,0 +1,19 @@ +package raftselector + +import ( + "context" + "errors" + + "google.golang.org/grpc" +) + +// ConnProvider is basic interface for connecting API package(raft proxy in particular) +// to manager/state/raft package without import cycles. It provides only one +// method for obtaining connection to leader. +type ConnProvider interface { + LeaderConn(ctx context.Context) (*grpc.ClientConn, error) +} + +// ErrIsLeader is returned from LeaderConn method when current machine is leader. +// It's just shim between packages to avoid import cycles. +var ErrIsLeader = errors.New("current node is leader") diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/proposer.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/proposer.go new file mode 100644 index 00000000..8d53f577 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/proposer.go @@ -0,0 +1,31 @@ +package state + +import ( + "context" + + "github.com/docker/swarmkit/api" +) + +// A Change includes a version number and a set of store actions from a +// particular log entry. +type Change struct { + StoreActions []api.StoreAction + Version api.Version +} + +// A Proposer can propose actions to a cluster. +type Proposer interface { + // ProposeValue adds storeAction to the distributed log. If this + // completes successfully, ProposeValue calls cb to commit the + // proposed changes. The callback is necessary for the Proposer to make + // sure that the changes are committed before it interacts further + // with the store. + ProposeValue(ctx context.Context, storeAction []api.StoreAction, cb func()) error + // GetVersion returns the monotonic index of the most recent item in + // the distributed log. + GetVersion() *api.Version + // ChangesBetween returns the changes starting after "from", up to and + // including "to". If these changes are not available because the log + // has been compacted, an error will be returned. + ChangesBetween(from, to api.Version) ([]Change, error) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/apply.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/apply.go new file mode 100644 index 00000000..e5f5c494 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/apply.go @@ -0,0 +1,49 @@ +package store + +import ( + "errors" + + "github.com/docker/go-events" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/state" +) + +// Apply takes an item from the event stream of one Store and applies it to +// a second Store. +func Apply(store *MemoryStore, item events.Event) (err error) { + return store.Update(func(tx Tx) error { + switch v := item.(type) { + case api.EventCreateTask: + return CreateTask(tx, v.Task) + case api.EventUpdateTask: + return UpdateTask(tx, v.Task) + case api.EventDeleteTask: + return DeleteTask(tx, v.Task.ID) + + case api.EventCreateService: + return CreateService(tx, v.Service) + case api.EventUpdateService: + return UpdateService(tx, v.Service) + case api.EventDeleteService: + return DeleteService(tx, v.Service.ID) + + case api.EventCreateNetwork: + return CreateNetwork(tx, v.Network) + case api.EventUpdateNetwork: + return UpdateNetwork(tx, v.Network) + case api.EventDeleteNetwork: + return DeleteNetwork(tx, v.Network.ID) + + case api.EventCreateNode: + return CreateNode(tx, v.Node) + case api.EventUpdateNode: + return UpdateNode(tx, v.Node) + case api.EventDeleteNode: + return DeleteNode(tx, v.Node.ID) + + case state.EventCommit: + return nil + } + return errors.New("unrecognized event type") + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/by.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/by.go new file mode 100644 index 00000000..f785d795 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/by.go @@ -0,0 +1,214 @@ +package store + +import "github.com/docker/swarmkit/api" + +// By is an interface type passed to Find methods. Implementations must be +// defined in this package. +type By interface { + // isBy allows this interface to only be satisfied by certain internal + // types. + isBy() +} + +type byAll struct{} + +func (a byAll) isBy() { +} + +// All is an argument that can be passed to find to list all items in the +// set. +var All byAll + +type byNamePrefix string + +func (b byNamePrefix) isBy() { +} + +// ByNamePrefix creates an object to pass to Find to select by query. +func ByNamePrefix(namePrefix string) By { + return byNamePrefix(namePrefix) +} + +type byIDPrefix string + +func (b byIDPrefix) isBy() { +} + +// ByIDPrefix creates an object to pass to Find to select by query. +func ByIDPrefix(idPrefix string) By { + return byIDPrefix(idPrefix) +} + +type byName string + +func (b byName) isBy() { +} + +// ByName creates an object to pass to Find to select by name. +func ByName(name string) By { + return byName(name) +} + +type byService string + +func (b byService) isBy() { +} + +type byRuntime string + +func (b byRuntime) isBy() { +} + +// ByRuntime creates an object to pass to Find to select by runtime. +func ByRuntime(runtime string) By { + return byRuntime(runtime) +} + +// ByServiceID creates an object to pass to Find to select by service. +func ByServiceID(serviceID string) By { + return byService(serviceID) +} + +type byNode string + +func (b byNode) isBy() { +} + +// ByNodeID creates an object to pass to Find to select by node. +func ByNodeID(nodeID string) By { + return byNode(nodeID) +} + +type bySlot struct { + serviceID string + slot uint64 +} + +func (b bySlot) isBy() { +} + +// BySlot creates an object to pass to Find to select by slot. +func BySlot(serviceID string, slot uint64) By { + return bySlot{serviceID: serviceID, slot: slot} +} + +type byDesiredState api.TaskState + +func (b byDesiredState) isBy() { +} + +// ByDesiredState creates an object to pass to Find to select by desired state. +func ByDesiredState(state api.TaskState) By { + return byDesiredState(state) +} + +type byTaskState api.TaskState + +func (b byTaskState) isBy() { +} + +// ByTaskState creates an object to pass to Find to select by task state. +func ByTaskState(state api.TaskState) By { + return byTaskState(state) +} + +type byRole api.NodeRole + +func (b byRole) isBy() { +} + +// ByRole creates an object to pass to Find to select by role. +func ByRole(role api.NodeRole) By { + return byRole(role) +} + +type byMembership api.NodeSpec_Membership + +func (b byMembership) isBy() { +} + +// ByMembership creates an object to pass to Find to select by Membership. +func ByMembership(membership api.NodeSpec_Membership) By { + return byMembership(membership) +} + +type byReferencedNetworkID string + +func (b byReferencedNetworkID) isBy() { +} + +// ByReferencedNetworkID creates an object to pass to Find to search for a +// service or task that references a network with the given ID. +func ByReferencedNetworkID(networkID string) By { + return byReferencedNetworkID(networkID) +} + +type byReferencedSecretID string + +func (b byReferencedSecretID) isBy() { +} + +// ByReferencedSecretID creates an object to pass to Find to search for a +// service or task that references a secret with the given ID. +func ByReferencedSecretID(secretID string) By { + return byReferencedSecretID(secretID) +} + +type byReferencedConfigID string + +func (b byReferencedConfigID) isBy() { +} + +// ByReferencedConfigID creates an object to pass to Find to search for a +// service or task that references a config with the given ID. +func ByReferencedConfigID(configID string) By { + return byReferencedConfigID(configID) +} + +type byKind string + +func (b byKind) isBy() { +} + +// ByKind creates an object to pass to Find to search for a Resource of a +// particular kind. +func ByKind(kind string) By { + return byKind(kind) +} + +type byCustom struct { + objType string + index string + value string +} + +func (b byCustom) isBy() { +} + +// ByCustom creates an object to pass to Find to search a custom index. +func ByCustom(objType, index, value string) By { + return byCustom{ + objType: objType, + index: index, + value: value, + } +} + +type byCustomPrefix struct { + objType string + index string + value string +} + +func (b byCustomPrefix) isBy() { +} + +// ByCustomPrefix creates an object to pass to Find to search a custom index by +// a value prefix. +func ByCustomPrefix(objType, index, value string) By { + return byCustomPrefix{ + objType: objType, + index: index, + value: value, + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/clusters.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/clusters.go new file mode 100644 index 00000000..495fc040 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/clusters.go @@ -0,0 +1,128 @@ +package store + +import ( + "strings" + + "github.com/docker/swarmkit/api" + memdb "github.com/hashicorp/go-memdb" +) + +const ( + tableCluster = "cluster" + + // DefaultClusterName is the default name to use for the cluster + // object. + DefaultClusterName = "default" +) + +func init() { + register(ObjectStoreConfig{ + Table: &memdb.TableSchema{ + Name: tableCluster, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + Unique: true, + Indexer: api.ClusterIndexerByID{}, + }, + indexName: { + Name: indexName, + Unique: true, + Indexer: api.ClusterIndexerByName{}, + }, + indexCustom: { + Name: indexCustom, + Indexer: api.ClusterCustomIndexer{}, + AllowMissing: true, + }, + }, + }, + Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error { + var err error + snapshot.Clusters, err = FindClusters(tx, All) + return err + }, + Restore: func(tx Tx, snapshot *api.StoreSnapshot) error { + toStoreObj := make([]api.StoreObject, len(snapshot.Clusters)) + for i, x := range snapshot.Clusters { + toStoreObj[i] = x + } + return RestoreTable(tx, tableCluster, toStoreObj) + }, + ApplyStoreAction: func(tx Tx, sa api.StoreAction) error { + switch v := sa.Target.(type) { + case *api.StoreAction_Cluster: + obj := v.Cluster + switch sa.Action { + case api.StoreActionKindCreate: + return CreateCluster(tx, obj) + case api.StoreActionKindUpdate: + return UpdateCluster(tx, obj) + case api.StoreActionKindRemove: + return DeleteCluster(tx, obj.ID) + } + } + return errUnknownStoreAction + }, + }) +} + +// CreateCluster adds a new cluster to the store. +// Returns ErrExist if the ID is already taken. +func CreateCluster(tx Tx, c *api.Cluster) error { + // Ensure the name is not already in use. + if tx.lookup(tableCluster, indexName, strings.ToLower(c.Spec.Annotations.Name)) != nil { + return ErrNameConflict + } + + return tx.create(tableCluster, c) +} + +// UpdateCluster updates an existing cluster in the store. +// Returns ErrNotExist if the cluster doesn't exist. +func UpdateCluster(tx Tx, c *api.Cluster) error { + // Ensure the name is either not in use or already used by this same Cluster. + if existing := tx.lookup(tableCluster, indexName, strings.ToLower(c.Spec.Annotations.Name)); existing != nil { + if existing.GetID() != c.ID { + return ErrNameConflict + } + } + + return tx.update(tableCluster, c) +} + +// DeleteCluster removes a cluster from the store. +// Returns ErrNotExist if the cluster doesn't exist. +func DeleteCluster(tx Tx, id string) error { + return tx.delete(tableCluster, id) +} + +// GetCluster looks up a cluster by ID. +// Returns nil if the cluster doesn't exist. +func GetCluster(tx ReadTx, id string) *api.Cluster { + n := tx.get(tableCluster, id) + if n == nil { + return nil + } + return n.(*api.Cluster) +} + +// FindClusters selects a set of clusters and returns them. +func FindClusters(tx ReadTx, by By) ([]*api.Cluster, error) { + checkType := func(by By) error { + switch by.(type) { + case byName, byNamePrefix, byIDPrefix, byCustom, byCustomPrefix: + return nil + default: + return ErrInvalidFindBy + } + } + + clusterList := []*api.Cluster{} + appendResult := func(o api.StoreObject) { + clusterList = append(clusterList, o.(*api.Cluster)) + } + + err := tx.find(tableCluster, by, checkType, appendResult) + return clusterList, err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/combinators.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/combinators.go new file mode 100644 index 00000000..7cea6b43 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/combinators.go @@ -0,0 +1,14 @@ +package store + +type orCombinator struct { + bys []By +} + +func (b orCombinator) isBy() { +} + +// Or returns a combinator that applies OR logic on all the supplied By +// arguments. +func Or(bys ...By) By { + return orCombinator{bys: bys} +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/configs.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/configs.go new file mode 100644 index 00000000..d02e04ba --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/configs.go @@ -0,0 +1,122 @@ +package store + +import ( + "strings" + + "github.com/docker/swarmkit/api" + memdb "github.com/hashicorp/go-memdb" +) + +const tableConfig = "config" + +func init() { + register(ObjectStoreConfig{ + Table: &memdb.TableSchema{ + Name: tableConfig, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + Unique: true, + Indexer: api.ConfigIndexerByID{}, + }, + indexName: { + Name: indexName, + Unique: true, + Indexer: api.ConfigIndexerByName{}, + }, + indexCustom: { + Name: indexCustom, + Indexer: api.ConfigCustomIndexer{}, + AllowMissing: true, + }, + }, + }, + Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error { + var err error + snapshot.Configs, err = FindConfigs(tx, All) + return err + }, + Restore: func(tx Tx, snapshot *api.StoreSnapshot) error { + toStoreObj := make([]api.StoreObject, len(snapshot.Configs)) + for i, x := range snapshot.Configs { + toStoreObj[i] = x + } + return RestoreTable(tx, tableConfig, toStoreObj) + }, + ApplyStoreAction: func(tx Tx, sa api.StoreAction) error { + switch v := sa.Target.(type) { + case *api.StoreAction_Config: + obj := v.Config + switch sa.Action { + case api.StoreActionKindCreate: + return CreateConfig(tx, obj) + case api.StoreActionKindUpdate: + return UpdateConfig(tx, obj) + case api.StoreActionKindRemove: + return DeleteConfig(tx, obj.ID) + } + } + return errUnknownStoreAction + }, + }) +} + +// CreateConfig adds a new config to the store. +// Returns ErrExist if the ID is already taken. +func CreateConfig(tx Tx, c *api.Config) error { + // Ensure the name is not already in use. + if tx.lookup(tableConfig, indexName, strings.ToLower(c.Spec.Annotations.Name)) != nil { + return ErrNameConflict + } + + return tx.create(tableConfig, c) +} + +// UpdateConfig updates an existing config in the store. +// Returns ErrNotExist if the config doesn't exist. +func UpdateConfig(tx Tx, c *api.Config) error { + // Ensure the name is either not in use or already used by this same Config. + if existing := tx.lookup(tableConfig, indexName, strings.ToLower(c.Spec.Annotations.Name)); existing != nil { + if existing.GetID() != c.ID { + return ErrNameConflict + } + } + + return tx.update(tableConfig, c) +} + +// DeleteConfig removes a config from the store. +// Returns ErrNotExist if the config doesn't exist. +func DeleteConfig(tx Tx, id string) error { + return tx.delete(tableConfig, id) +} + +// GetConfig looks up a config by ID. +// Returns nil if the config doesn't exist. +func GetConfig(tx ReadTx, id string) *api.Config { + c := tx.get(tableConfig, id) + if c == nil { + return nil + } + return c.(*api.Config) +} + +// FindConfigs selects a set of configs and returns them. +func FindConfigs(tx ReadTx, by By) ([]*api.Config, error) { + checkType := func(by By) error { + switch by.(type) { + case byName, byNamePrefix, byIDPrefix, byCustom, byCustomPrefix: + return nil + default: + return ErrInvalidFindBy + } + } + + configList := []*api.Config{} + appendResult := func(o api.StoreObject) { + configList = append(configList, o.(*api.Config)) + } + + err := tx.find(tableConfig, by, checkType, appendResult) + return configList, err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/doc.go new file mode 100644 index 00000000..660c7c69 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/doc.go @@ -0,0 +1,32 @@ +// Package store provides interfaces to work with swarm cluster state. +// +// The primary interface is MemoryStore, which abstracts storage of this cluster +// state. MemoryStore exposes a transactional interface for both reads and writes. +// To perform a read transaction, View accepts a callback function that it +// will invoke with a ReadTx object that gives it a consistent view of the +// state. Similarly, Update accepts a callback function that it will invoke with +// a Tx object that allows reads and writes to happen without interference from +// other transactions. +// +// This is an example of making an update to a MemoryStore: +// +// err := store.Update(func(tx store.Tx) { +// if err := tx.Nodes().Update(newNode); err != nil { +// return err +// } +// return nil +// }) +// if err != nil { +// return fmt.Errorf("transaction failed: %v", err) +// } +// +// MemoryStore exposes watch functionality. +// It exposes a publish/subscribe queue where code can subscribe to +// changes of interest. This can be combined with the ViewAndWatch function to +// "fork" a store, by making a snapshot and then applying future changes +// to keep the copy in sync. This approach lets consumers of the data +// use their own data structures and implement their own concurrency +// strategies. It can lead to more efficient code because data consumers +// don't necessarily have to lock the main data store if they are +// maintaining their own copies of the state. +package store diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/extensions.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/extensions.go new file mode 100644 index 00000000..8dac4baa --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/extensions.go @@ -0,0 +1,188 @@ +package store + +import ( + "errors" + "strings" + + "github.com/docker/swarmkit/api" + memdb "github.com/hashicorp/go-memdb" +) + +const tableExtension = "extension" + +func init() { + register(ObjectStoreConfig{ + Table: &memdb.TableSchema{ + Name: tableExtension, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + Unique: true, + Indexer: extensionIndexerByID{}, + }, + indexName: { + Name: indexName, + Unique: true, + Indexer: extensionIndexerByName{}, + }, + indexCustom: { + Name: indexCustom, + Indexer: extensionCustomIndexer{}, + AllowMissing: true, + }, + }, + }, + Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error { + var err error + snapshot.Extensions, err = FindExtensions(tx, All) + return err + }, + Restore: func(tx Tx, snapshot *api.StoreSnapshot) error { + toStoreObj := make([]api.StoreObject, len(snapshot.Extensions)) + for i, x := range snapshot.Extensions { + toStoreObj[i] = extensionEntry{x} + } + return RestoreTable(tx, tableExtension, toStoreObj) + }, + ApplyStoreAction: func(tx Tx, sa api.StoreAction) error { + switch v := sa.Target.(type) { + case *api.StoreAction_Extension: + obj := v.Extension + switch sa.Action { + case api.StoreActionKindCreate: + return CreateExtension(tx, obj) + case api.StoreActionKindUpdate: + return UpdateExtension(tx, obj) + case api.StoreActionKindRemove: + return DeleteExtension(tx, obj.ID) + } + } + return errUnknownStoreAction + }, + }) +} + +type extensionEntry struct { + *api.Extension +} + +func (e extensionEntry) CopyStoreObject() api.StoreObject { + return extensionEntry{Extension: e.Extension.Copy()} +} + +// ensure that when update events are emitted, we unwrap extensionEntry +func (e extensionEntry) EventUpdate(oldObject api.StoreObject) api.Event { + if oldObject != nil { + return api.EventUpdateExtension{Extension: e.Extension, OldExtension: oldObject.(extensionEntry).Extension} + } + return api.EventUpdateExtension{Extension: e.Extension} +} + +// CreateExtension adds a new extension to the store. +// Returns ErrExist if the ID is already taken. +func CreateExtension(tx Tx, e *api.Extension) error { + // Ensure the name is not already in use. + if tx.lookup(tableExtension, indexName, strings.ToLower(e.Annotations.Name)) != nil { + return ErrNameConflict + } + + // It can't conflict with built-in kinds either. + if _, ok := schema.Tables[e.Annotations.Name]; ok { + return ErrNameConflict + } + + return tx.create(tableExtension, extensionEntry{e}) +} + +// UpdateExtension updates an existing extension in the store. +// Returns ErrNotExist if the object doesn't exist. +func UpdateExtension(tx Tx, e *api.Extension) error { + // TODO(aaronl): For the moment, extensions are immutable + return errors.New("extensions are immutable") +} + +// DeleteExtension removes an extension from the store. +// Returns ErrNotExist if the object doesn't exist. +func DeleteExtension(tx Tx, id string) error { + e := tx.get(tableExtension, id) + if e == nil { + return ErrNotExist + } + + resources, err := FindResources(tx, ByKind(e.(extensionEntry).Annotations.Name)) + if err != nil { + return err + } + + if len(resources) != 0 { + return errors.New("cannot delete extension because objects of this type exist in the data store") + } + + return tx.delete(tableExtension, id) +} + +// GetExtension looks up an extension by ID. +// Returns nil if the object doesn't exist. +func GetExtension(tx ReadTx, id string) *api.Extension { + e := tx.get(tableExtension, id) + if e == nil { + return nil + } + return e.(extensionEntry).Extension +} + +// FindExtensions selects a set of extensions and returns them. +func FindExtensions(tx ReadTx, by By) ([]*api.Extension, error) { + checkType := func(by By) error { + switch by.(type) { + case byIDPrefix, byName, byCustom, byCustomPrefix: + return nil + default: + return ErrInvalidFindBy + } + } + + extensionList := []*api.Extension{} + appendResult := func(o api.StoreObject) { + extensionList = append(extensionList, o.(extensionEntry).Extension) + } + + err := tx.find(tableExtension, by, checkType, appendResult) + return extensionList, err +} + +type extensionIndexerByID struct{} + +func (indexer extensionIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return api.ExtensionIndexerByID{}.FromArgs(args...) +} +func (indexer extensionIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return api.ExtensionIndexerByID{}.PrefixFromArgs(args...) +} +func (indexer extensionIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + return api.ExtensionIndexerByID{}.FromObject(obj.(extensionEntry).Extension) +} + +type extensionIndexerByName struct{} + +func (indexer extensionIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return api.ExtensionIndexerByName{}.FromArgs(args...) +} +func (indexer extensionIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return api.ExtensionIndexerByName{}.PrefixFromArgs(args...) +} +func (indexer extensionIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + return api.ExtensionIndexerByName{}.FromObject(obj.(extensionEntry).Extension) +} + +type extensionCustomIndexer struct{} + +func (indexer extensionCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return api.ExtensionCustomIndexer{}.FromArgs(args...) +} +func (indexer extensionCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return api.ExtensionCustomIndexer{}.PrefixFromArgs(args...) +} +func (indexer extensionCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + return api.ExtensionCustomIndexer{}.FromObject(obj.(extensionEntry).Extension) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/memory.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/memory.go new file mode 100644 index 00000000..d0319c7f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/memory.go @@ -0,0 +1,979 @@ +package store + +import ( + "context" + "errors" + "fmt" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/docker/go-events" + "github.com/docker/go-metrics" + "github.com/docker/swarmkit/api" + pb "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/manager/state" + "github.com/docker/swarmkit/watch" + gogotypes "github.com/gogo/protobuf/types" + memdb "github.com/hashicorp/go-memdb" +) + +const ( + indexID = "id" + indexName = "name" + indexRuntime = "runtime" + indexServiceID = "serviceid" + indexNodeID = "nodeid" + indexSlot = "slot" + indexDesiredState = "desiredstate" + indexTaskState = "taskstate" + indexRole = "role" + indexMembership = "membership" + indexNetwork = "network" + indexSecret = "secret" + indexConfig = "config" + indexKind = "kind" + indexCustom = "custom" + + prefix = "_prefix" + + // MaxChangesPerTransaction is the number of changes after which a new + // transaction should be started within Batch. + MaxChangesPerTransaction = 200 + + // MaxTransactionBytes is the maximum serialized transaction size. + MaxTransactionBytes = 1.5 * 1024 * 1024 +) + +var ( + // ErrExist is returned by create operations if the provided ID is already + // taken. + ErrExist = errors.New("object already exists") + + // ErrNotExist is returned by altering operations (update, delete) if the + // provided ID is not found. + ErrNotExist = errors.New("object does not exist") + + // ErrNameConflict is returned by create/update if the object name is + // already in use by another object. + ErrNameConflict = errors.New("name conflicts with an existing object") + + // ErrInvalidFindBy is returned if an unrecognized type is passed to Find. + ErrInvalidFindBy = errors.New("invalid find argument type") + + // ErrSequenceConflict is returned when trying to update an object + // whose sequence information does not match the object in the store's. + ErrSequenceConflict = errors.New("update out of sequence") + + objectStorers []ObjectStoreConfig + schema = &memdb.DBSchema{ + Tables: map[string]*memdb.TableSchema{}, + } + errUnknownStoreAction = errors.New("unknown store action") + + // WedgeTimeout is the maximum amount of time the store lock may be + // held before declaring a suspected deadlock. + WedgeTimeout = 30 * time.Second + + // update()/write tx latency timer. + updateLatencyTimer metrics.Timer + + // view()/read tx latency timer. + viewLatencyTimer metrics.Timer + + // lookup() latency timer. + lookupLatencyTimer metrics.Timer + + // Batch() latency timer. + batchLatencyTimer metrics.Timer + + // timer to capture the duration for which the memory store mutex is locked. + storeLockDurationTimer metrics.Timer +) + +func init() { + ns := metrics.NewNamespace("swarm", "store", nil) + updateLatencyTimer = ns.NewTimer("write_tx_latency", + "Raft store write tx latency.") + viewLatencyTimer = ns.NewTimer("read_tx_latency", + "Raft store read tx latency.") + lookupLatencyTimer = ns.NewTimer("lookup_latency", + "Raft store read latency.") + batchLatencyTimer = ns.NewTimer("batch_latency", + "Raft store batch latency.") + storeLockDurationTimer = ns.NewTimer("memory_store_lock_duration", + "Duration for which the raft memory store lock was held.") + metrics.Register(ns) +} + +func register(os ObjectStoreConfig) { + objectStorers = append(objectStorers, os) + schema.Tables[os.Table.Name] = os.Table +} + +// timedMutex wraps a sync.Mutex, and keeps track of when it was locked. +type timedMutex struct { + sync.Mutex + lockedAt atomic.Value +} + +func (m *timedMutex) Lock() { + m.Mutex.Lock() + m.lockedAt.Store(time.Now()) +} + +// Unlocks the timedMutex and captures the duration +// for which it was locked in a metric. +func (m *timedMutex) Unlock() { + unlockedTimestamp := m.lockedAt.Load() + m.lockedAt.Store(time.Time{}) + m.Mutex.Unlock() + lockedFor := time.Since(unlockedTimestamp.(time.Time)) + storeLockDurationTimer.Update(lockedFor) +} + +func (m *timedMutex) LockedAt() time.Time { + lockedTimestamp := m.lockedAt.Load() + if lockedTimestamp == nil { + return time.Time{} + } + return lockedTimestamp.(time.Time) +} + +// MemoryStore is a concurrency-safe, in-memory implementation of the Store +// interface. +type MemoryStore struct { + // updateLock must be held during an update transaction. + updateLock timedMutex + + memDB *memdb.MemDB + queue *watch.Queue + + proposer state.Proposer +} + +// NewMemoryStore returns an in-memory store. The argument is an optional +// Proposer which will be used to propagate changes to other members in a +// cluster. +func NewMemoryStore(proposer state.Proposer) *MemoryStore { + memDB, err := memdb.NewMemDB(schema) + if err != nil { + // This shouldn't fail + panic(err) + } + + return &MemoryStore{ + memDB: memDB, + queue: watch.NewQueue(), + proposer: proposer, + } +} + +// Close closes the memory store and frees its associated resources. +func (s *MemoryStore) Close() error { + return s.queue.Close() +} + +func fromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +func prefixFromArgs(args ...interface{}) ([]byte, error) { + val, err := fromArgs(args...) + if err != nil { + return nil, err + } + + // Strip the null terminator, the rest is a prefix + n := len(val) + if n > 0 { + return val[:n-1], nil + } + return val, nil +} + +// ReadTx is a read transaction. Note that transaction does not imply +// any internal batching. It only means that the transaction presents a +// consistent view of the data that cannot be affected by other +// transactions. +type ReadTx interface { + lookup(table, index, id string) api.StoreObject + get(table, id string) api.StoreObject + find(table string, by By, checkType func(By) error, appendResult func(api.StoreObject)) error +} + +type readTx struct { + memDBTx *memdb.Txn +} + +// View executes a read transaction. +func (s *MemoryStore) View(cb func(ReadTx)) { + defer metrics.StartTimer(viewLatencyTimer)() + memDBTx := s.memDB.Txn(false) + + readTx := readTx{ + memDBTx: memDBTx, + } + cb(readTx) + memDBTx.Commit() +} + +// Tx is a read/write transaction. Note that transaction does not imply +// any internal batching. The purpose of this transaction is to give the +// user a guarantee that its changes won't be visible to other transactions +// until the transaction is over. +type Tx interface { + ReadTx + create(table string, o api.StoreObject) error + update(table string, o api.StoreObject) error + delete(table, id string) error +} + +type tx struct { + readTx + curVersion *api.Version + changelist []api.Event +} + +// changelistBetweenVersions returns the changes after "from" up to and +// including "to". +func (s *MemoryStore) changelistBetweenVersions(from, to api.Version) ([]api.Event, error) { + if s.proposer == nil { + return nil, errors.New("store does not support versioning") + } + changes, err := s.proposer.ChangesBetween(from, to) + if err != nil { + return nil, err + } + + var changelist []api.Event + + for _, change := range changes { + for _, sa := range change.StoreActions { + event, err := api.EventFromStoreAction(sa, nil) + if err != nil { + return nil, err + } + changelist = append(changelist, event) + } + changelist = append(changelist, state.EventCommit{Version: change.Version.Copy()}) + } + + return changelist, nil +} + +// ApplyStoreActions updates a store based on StoreAction messages. +func (s *MemoryStore) ApplyStoreActions(actions []api.StoreAction) error { + s.updateLock.Lock() + memDBTx := s.memDB.Txn(true) + + tx := tx{ + readTx: readTx{ + memDBTx: memDBTx, + }, + } + + for _, sa := range actions { + if err := applyStoreAction(&tx, sa); err != nil { + memDBTx.Abort() + s.updateLock.Unlock() + return err + } + } + + memDBTx.Commit() + + for _, c := range tx.changelist { + s.queue.Publish(c) + } + if len(tx.changelist) != 0 { + s.queue.Publish(state.EventCommit{}) + } + s.updateLock.Unlock() + return nil +} + +func applyStoreAction(tx Tx, sa api.StoreAction) error { + for _, os := range objectStorers { + err := os.ApplyStoreAction(tx, sa) + if err != errUnknownStoreAction { + return err + } + } + + return errors.New("unrecognized action type") +} + +func (s *MemoryStore) update(proposer state.Proposer, cb func(Tx) error) error { + defer metrics.StartTimer(updateLatencyTimer)() + s.updateLock.Lock() + memDBTx := s.memDB.Txn(true) + + var curVersion *api.Version + + if proposer != nil { + curVersion = proposer.GetVersion() + } + + var tx tx + tx.init(memDBTx, curVersion) + + err := cb(&tx) + + if err == nil { + if proposer == nil { + memDBTx.Commit() + } else { + var sa []api.StoreAction + sa, err = tx.changelistStoreActions() + + if err == nil { + if len(sa) != 0 { + err = proposer.ProposeValue(context.Background(), sa, func() { + memDBTx.Commit() + }) + } else { + memDBTx.Commit() + } + } + } + } + + if err == nil { + for _, c := range tx.changelist { + s.queue.Publish(c) + } + if len(tx.changelist) != 0 { + if proposer != nil { + curVersion = proposer.GetVersion() + } + + s.queue.Publish(state.EventCommit{Version: curVersion}) + } + } else { + memDBTx.Abort() + } + s.updateLock.Unlock() + return err +} + +func (s *MemoryStore) updateLocal(cb func(Tx) error) error { + return s.update(nil, cb) +} + +// Update executes a read/write transaction. +func (s *MemoryStore) Update(cb func(Tx) error) error { + return s.update(s.proposer, cb) +} + +// Batch provides a mechanism to batch updates to a store. +type Batch struct { + tx tx + store *MemoryStore + // applied counts the times Update has run successfully + applied int + // transactionSizeEstimate is the running count of the size of the + // current transaction. + transactionSizeEstimate int + // changelistLen is the last known length of the transaction's + // changelist. + changelistLen int + err error +} + +// Update adds a single change to a batch. Each call to Update is atomic, but +// different calls to Update may be spread across multiple transactions to +// circumvent transaction size limits. +func (batch *Batch) Update(cb func(Tx) error) error { + if batch.err != nil { + return batch.err + } + + if err := cb(&batch.tx); err != nil { + return err + } + + batch.applied++ + + for batch.changelistLen < len(batch.tx.changelist) { + sa, err := api.NewStoreAction(batch.tx.changelist[batch.changelistLen]) + if err != nil { + return err + } + batch.transactionSizeEstimate += sa.Size() + batch.changelistLen++ + } + + if batch.changelistLen >= MaxChangesPerTransaction || batch.transactionSizeEstimate >= (MaxTransactionBytes*3)/4 { + if err := batch.commit(); err != nil { + return err + } + + // Yield the update lock + batch.store.updateLock.Unlock() + runtime.Gosched() + batch.store.updateLock.Lock() + + batch.newTx() + } + + return nil +} + +func (batch *Batch) newTx() { + var curVersion *api.Version + + if batch.store.proposer != nil { + curVersion = batch.store.proposer.GetVersion() + } + + batch.tx.init(batch.store.memDB.Txn(true), curVersion) + batch.transactionSizeEstimate = 0 + batch.changelistLen = 0 +} + +func (batch *Batch) commit() error { + if batch.store.proposer != nil { + var sa []api.StoreAction + sa, batch.err = batch.tx.changelistStoreActions() + + if batch.err == nil { + if len(sa) != 0 { + batch.err = batch.store.proposer.ProposeValue(context.Background(), sa, func() { + batch.tx.memDBTx.Commit() + }) + } else { + batch.tx.memDBTx.Commit() + } + } + } else { + batch.tx.memDBTx.Commit() + } + + if batch.err != nil { + batch.tx.memDBTx.Abort() + return batch.err + } + + for _, c := range batch.tx.changelist { + batch.store.queue.Publish(c) + } + if len(batch.tx.changelist) != 0 { + batch.store.queue.Publish(state.EventCommit{}) + } + + return nil +} + +// Batch performs one or more transactions that allow reads and writes +// It invokes a callback that is passed a Batch object. The callback may +// call batch.Update for each change it wants to make as part of the +// batch. The changes in the batch may be split over multiple +// transactions if necessary to keep transactions below the size limit. +// Batch holds a lock over the state, but will yield this lock every +// it creates a new transaction to allow other writers to proceed. +// Thus, unrelated changes to the state may occur between calls to +// batch.Update. +// +// This method allows the caller to iterate over a data set and apply +// changes in sequence without holding the store write lock for an +// excessive time, or producing a transaction that exceeds the maximum +// size. +// +// If Batch returns an error, no guarantees are made about how many updates +// were committed successfully. +func (s *MemoryStore) Batch(cb func(*Batch) error) error { + defer metrics.StartTimer(batchLatencyTimer)() + s.updateLock.Lock() + + batch := Batch{ + store: s, + } + batch.newTx() + + if err := cb(&batch); err != nil { + batch.tx.memDBTx.Abort() + s.updateLock.Unlock() + return err + } + + err := batch.commit() + s.updateLock.Unlock() + return err +} + +func (tx *tx) init(memDBTx *memdb.Txn, curVersion *api.Version) { + tx.memDBTx = memDBTx + tx.curVersion = curVersion + tx.changelist = nil +} + +func (tx tx) changelistStoreActions() ([]api.StoreAction, error) { + var actions []api.StoreAction + + for _, c := range tx.changelist { + sa, err := api.NewStoreAction(c) + if err != nil { + return nil, err + } + actions = append(actions, sa) + } + + return actions, nil +} + +// lookup is an internal typed wrapper around memdb. +func (tx readTx) lookup(table, index, id string) api.StoreObject { + defer metrics.StartTimer(lookupLatencyTimer)() + j, err := tx.memDBTx.First(table, index, id) + if err != nil { + return nil + } + if j != nil { + return j.(api.StoreObject) + } + return nil +} + +// create adds a new object to the store. +// Returns ErrExist if the ID is already taken. +func (tx *tx) create(table string, o api.StoreObject) error { + if tx.lookup(table, indexID, o.GetID()) != nil { + return ErrExist + } + + copy := o.CopyStoreObject() + meta := copy.GetMeta() + if err := touchMeta(&meta, tx.curVersion); err != nil { + return err + } + copy.SetMeta(meta) + + err := tx.memDBTx.Insert(table, copy) + if err == nil { + tx.changelist = append(tx.changelist, copy.EventCreate()) + o.SetMeta(meta) + } + return err +} + +// Update updates an existing object in the store. +// Returns ErrNotExist if the object doesn't exist. +func (tx *tx) update(table string, o api.StoreObject) error { + oldN := tx.lookup(table, indexID, o.GetID()) + if oldN == nil { + return ErrNotExist + } + + meta := o.GetMeta() + + if tx.curVersion != nil { + if oldN.GetMeta().Version != meta.Version { + return ErrSequenceConflict + } + } + + copy := o.CopyStoreObject() + if err := touchMeta(&meta, tx.curVersion); err != nil { + return err + } + copy.SetMeta(meta) + + err := tx.memDBTx.Insert(table, copy) + if err == nil { + tx.changelist = append(tx.changelist, copy.EventUpdate(oldN)) + o.SetMeta(meta) + } + return err +} + +// Delete removes an object from the store. +// Returns ErrNotExist if the object doesn't exist. +func (tx *tx) delete(table, id string) error { + n := tx.lookup(table, indexID, id) + if n == nil { + return ErrNotExist + } + + err := tx.memDBTx.Delete(table, n) + if err == nil { + tx.changelist = append(tx.changelist, n.EventDelete()) + } + return err +} + +// Get looks up an object by ID. +// Returns nil if the object doesn't exist. +func (tx readTx) get(table, id string) api.StoreObject { + o := tx.lookup(table, indexID, id) + if o == nil { + return nil + } + return o.CopyStoreObject() +} + +// findIterators returns a slice of iterators. The union of items from these +// iterators provides the result of the query. +func (tx readTx) findIterators(table string, by By, checkType func(By) error) ([]memdb.ResultIterator, error) { + switch by.(type) { + case byAll, orCombinator: // generic types + default: // all other types + if err := checkType(by); err != nil { + return nil, err + } + } + + switch v := by.(type) { + case byAll: + it, err := tx.memDBTx.Get(table, indexID) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case orCombinator: + var iters []memdb.ResultIterator + for _, subBy := range v.bys { + it, err := tx.findIterators(table, subBy, checkType) + if err != nil { + return nil, err + } + iters = append(iters, it...) + } + return iters, nil + case byName: + it, err := tx.memDBTx.Get(table, indexName, strings.ToLower(string(v))) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byIDPrefix: + it, err := tx.memDBTx.Get(table, indexID+prefix, string(v)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byNamePrefix: + it, err := tx.memDBTx.Get(table, indexName+prefix, strings.ToLower(string(v))) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byRuntime: + it, err := tx.memDBTx.Get(table, indexRuntime, string(v)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byNode: + it, err := tx.memDBTx.Get(table, indexNodeID, string(v)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byService: + it, err := tx.memDBTx.Get(table, indexServiceID, string(v)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case bySlot: + it, err := tx.memDBTx.Get(table, indexSlot, v.serviceID+"\x00"+strconv.FormatUint(v.slot, 10)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byDesiredState: + it, err := tx.memDBTx.Get(table, indexDesiredState, strconv.FormatInt(int64(v), 10)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byTaskState: + it, err := tx.memDBTx.Get(table, indexTaskState, strconv.FormatInt(int64(v), 10)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byRole: + it, err := tx.memDBTx.Get(table, indexRole, strconv.FormatInt(int64(v), 10)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byMembership: + it, err := tx.memDBTx.Get(table, indexMembership, strconv.FormatInt(int64(v), 10)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byReferencedNetworkID: + it, err := tx.memDBTx.Get(table, indexNetwork, string(v)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byReferencedSecretID: + it, err := tx.memDBTx.Get(table, indexSecret, string(v)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byReferencedConfigID: + it, err := tx.memDBTx.Get(table, indexConfig, string(v)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byKind: + it, err := tx.memDBTx.Get(table, indexKind, string(v)) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byCustom: + var key string + if v.objType != "" { + key = v.objType + "|" + v.index + "|" + v.value + } else { + key = v.index + "|" + v.value + } + it, err := tx.memDBTx.Get(table, indexCustom, key) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + case byCustomPrefix: + var key string + if v.objType != "" { + key = v.objType + "|" + v.index + "|" + v.value + } else { + key = v.index + "|" + v.value + } + it, err := tx.memDBTx.Get(table, indexCustom+prefix, key) + if err != nil { + return nil, err + } + return []memdb.ResultIterator{it}, nil + default: + return nil, ErrInvalidFindBy + } +} + +// find selects a set of objects calls a callback for each matching object. +func (tx readTx) find(table string, by By, checkType func(By) error, appendResult func(api.StoreObject)) error { + fromResultIterators := func(its ...memdb.ResultIterator) { + ids := make(map[string]struct{}) + for _, it := range its { + for { + obj := it.Next() + if obj == nil { + break + } + o := obj.(api.StoreObject) + id := o.GetID() + if _, exists := ids[id]; !exists { + appendResult(o.CopyStoreObject()) + ids[id] = struct{}{} + } + } + } + } + + iters, err := tx.findIterators(table, by, checkType) + if err != nil { + return err + } + + fromResultIterators(iters...) + + return nil +} + +// Save serializes the data in the store. +func (s *MemoryStore) Save(tx ReadTx) (*pb.StoreSnapshot, error) { + var snapshot pb.StoreSnapshot + for _, os := range objectStorers { + if err := os.Save(tx, &snapshot); err != nil { + return nil, err + } + } + + return &snapshot, nil +} + +// Restore sets the contents of the store to the serialized data in the +// argument. +func (s *MemoryStore) Restore(snapshot *pb.StoreSnapshot) error { + return s.updateLocal(func(tx Tx) error { + for _, os := range objectStorers { + if err := os.Restore(tx, snapshot); err != nil { + return err + } + } + return nil + }) +} + +// WatchQueue returns the publish/subscribe queue. +func (s *MemoryStore) WatchQueue() *watch.Queue { + return s.queue +} + +// ViewAndWatch calls a callback which can observe the state of this +// MemoryStore. It also returns a channel that will return further events from +// this point so the snapshot can be kept up to date. The watch channel must be +// released with watch.StopWatch when it is no longer needed. The channel is +// guaranteed to get all events after the moment of the snapshot, and only +// those events. +func ViewAndWatch(store *MemoryStore, cb func(ReadTx) error, specifiers ...api.Event) (watch chan events.Event, cancel func(), err error) { + // Using Update to lock the store and guarantee consistency between + // the watcher and the the state seen by the callback. snapshotReadTx + // exposes this Tx as a ReadTx so the callback can't modify it. + err = store.Update(func(tx Tx) error { + if err := cb(tx); err != nil { + return err + } + watch, cancel = state.Watch(store.WatchQueue(), specifiers...) + return nil + }) + if watch != nil && err != nil { + cancel() + cancel = nil + watch = nil + } + return +} + +// WatchFrom returns a channel that will return past events from starting +// from "version", and new events until the channel is closed. If "version" +// is nil, this function is equivalent to +// +// state.Watch(store.WatchQueue(), specifiers...). +// +// If the log has been compacted and it's not possible to produce the exact +// set of events leading from "version" to the current state, this function +// will return an error, and the caller should re-sync. +// +// The watch channel must be released with watch.StopWatch when it is no +// longer needed. +func WatchFrom(store *MemoryStore, version *api.Version, specifiers ...api.Event) (chan events.Event, func(), error) { + if version == nil { + ch, cancel := state.Watch(store.WatchQueue(), specifiers...) + return ch, cancel, nil + } + + if store.proposer == nil { + return nil, nil, errors.New("store does not support versioning") + } + + var ( + curVersion *api.Version + watch chan events.Event + cancelWatch func() + ) + // Using Update to lock the store + err := store.Update(func(tx Tx) error { + // Get current version + curVersion = store.proposer.GetVersion() + // Start the watch with the store locked so events cannot be + // missed + watch, cancelWatch = state.Watch(store.WatchQueue(), specifiers...) + return nil + }) + if watch != nil && err != nil { + cancelWatch() + return nil, nil, err + } + + if curVersion == nil { + cancelWatch() + return nil, nil, errors.New("could not get current version from store") + } + + changelist, err := store.changelistBetweenVersions(*version, *curVersion) + if err != nil { + cancelWatch() + return nil, nil, err + } + + ch := make(chan events.Event) + stop := make(chan struct{}) + cancel := func() { + close(stop) + } + + go func() { + defer cancelWatch() + + matcher := state.Matcher(specifiers...) + for _, change := range changelist { + if matcher(change) { + select { + case ch <- change: + case <-stop: + return + } + } + } + + for { + select { + case <-stop: + return + case e := <-watch: + ch <- e + } + } + }() + + return ch, cancel, nil +} + +// touchMeta updates an object's timestamps when necessary and bumps the version +// if provided. +func touchMeta(meta *api.Meta, version *api.Version) error { + // Skip meta update if version is not defined as it means we're applying + // from raft or restoring from a snapshot. + if version == nil { + return nil + } + + now, err := gogotypes.TimestampProto(time.Now()) + if err != nil { + return err + } + + meta.Version = *version + + // Updated CreatedAt if not defined + if meta.CreatedAt == nil { + meta.CreatedAt = now + } + + meta.UpdatedAt = now + + return nil +} + +// Wedged returns true if the store lock has been held for a long time, +// possibly indicating a deadlock. +func (s *MemoryStore) Wedged() bool { + lockedAt := s.updateLock.LockedAt() + if lockedAt.IsZero() { + return false + } + + return time.Since(lockedAt) > WedgeTimeout +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/networks.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/networks.go new file mode 100644 index 00000000..fa887b3b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/networks.go @@ -0,0 +1,122 @@ +package store + +import ( + "strings" + + "github.com/docker/swarmkit/api" + memdb "github.com/hashicorp/go-memdb" +) + +const tableNetwork = "network" + +func init() { + register(ObjectStoreConfig{ + Table: &memdb.TableSchema{ + Name: tableNetwork, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + Unique: true, + Indexer: api.NetworkIndexerByID{}, + }, + indexName: { + Name: indexName, + Unique: true, + Indexer: api.NetworkIndexerByName{}, + }, + indexCustom: { + Name: indexCustom, + Indexer: api.NetworkCustomIndexer{}, + AllowMissing: true, + }, + }, + }, + Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error { + var err error + snapshot.Networks, err = FindNetworks(tx, All) + return err + }, + Restore: func(tx Tx, snapshot *api.StoreSnapshot) error { + toStoreObj := make([]api.StoreObject, len(snapshot.Networks)) + for i, x := range snapshot.Networks { + toStoreObj[i] = x + } + return RestoreTable(tx, tableNetwork, toStoreObj) + }, + ApplyStoreAction: func(tx Tx, sa api.StoreAction) error { + switch v := sa.Target.(type) { + case *api.StoreAction_Network: + obj := v.Network + switch sa.Action { + case api.StoreActionKindCreate: + return CreateNetwork(tx, obj) + case api.StoreActionKindUpdate: + return UpdateNetwork(tx, obj) + case api.StoreActionKindRemove: + return DeleteNetwork(tx, obj.ID) + } + } + return errUnknownStoreAction + }, + }) +} + +// CreateNetwork adds a new network to the store. +// Returns ErrExist if the ID is already taken. +func CreateNetwork(tx Tx, n *api.Network) error { + // Ensure the name is not already in use. + if tx.lookup(tableNetwork, indexName, strings.ToLower(n.Spec.Annotations.Name)) != nil { + return ErrNameConflict + } + + return tx.create(tableNetwork, n) +} + +// UpdateNetwork updates an existing network in the store. +// Returns ErrNotExist if the network doesn't exist. +func UpdateNetwork(tx Tx, n *api.Network) error { + // Ensure the name is either not in use or already used by this same Network. + if existing := tx.lookup(tableNetwork, indexName, strings.ToLower(n.Spec.Annotations.Name)); existing != nil { + if existing.GetID() != n.ID { + return ErrNameConflict + } + } + + return tx.update(tableNetwork, n) +} + +// DeleteNetwork removes a network from the store. +// Returns ErrNotExist if the network doesn't exist. +func DeleteNetwork(tx Tx, id string) error { + return tx.delete(tableNetwork, id) +} + +// GetNetwork looks up a network by ID. +// Returns nil if the network doesn't exist. +func GetNetwork(tx ReadTx, id string) *api.Network { + n := tx.get(tableNetwork, id) + if n == nil { + return nil + } + return n.(*api.Network) +} + +// FindNetworks selects a set of networks and returns them. +func FindNetworks(tx ReadTx, by By) ([]*api.Network, error) { + checkType := func(by By) error { + switch by.(type) { + case byName, byNamePrefix, byIDPrefix, byCustom, byCustomPrefix, byAll: + return nil + default: + return ErrInvalidFindBy + } + } + + networkList := []*api.Network{} + appendResult := func(o api.StoreObject) { + networkList = append(networkList, o.(*api.Network)) + } + + err := tx.find(tableNetwork, by, checkType, appendResult) + return networkList, err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/nodes.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/nodes.go new file mode 100644 index 00000000..fa6ae85b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/nodes.go @@ -0,0 +1,166 @@ +package store + +import ( + "strconv" + "strings" + + "github.com/docker/swarmkit/api" + memdb "github.com/hashicorp/go-memdb" +) + +const tableNode = "node" + +func init() { + register(ObjectStoreConfig{ + Table: &memdb.TableSchema{ + Name: tableNode, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + Unique: true, + Indexer: api.NodeIndexerByID{}, + }, + // TODO(aluzzardi): Use `indexHostname` instead. + indexName: { + Name: indexName, + AllowMissing: true, + Indexer: nodeIndexerByHostname{}, + }, + indexRole: { + Name: indexRole, + Indexer: nodeIndexerByRole{}, + }, + indexMembership: { + Name: indexMembership, + Indexer: nodeIndexerByMembership{}, + }, + indexCustom: { + Name: indexCustom, + Indexer: api.NodeCustomIndexer{}, + AllowMissing: true, + }, + }, + }, + Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error { + var err error + snapshot.Nodes, err = FindNodes(tx, All) + return err + }, + Restore: func(tx Tx, snapshot *api.StoreSnapshot) error { + toStoreObj := make([]api.StoreObject, len(snapshot.Nodes)) + for i, x := range snapshot.Nodes { + toStoreObj[i] = x + } + return RestoreTable(tx, tableNode, toStoreObj) + }, + ApplyStoreAction: func(tx Tx, sa api.StoreAction) error { + switch v := sa.Target.(type) { + case *api.StoreAction_Node: + obj := v.Node + switch sa.Action { + case api.StoreActionKindCreate: + return CreateNode(tx, obj) + case api.StoreActionKindUpdate: + return UpdateNode(tx, obj) + case api.StoreActionKindRemove: + return DeleteNode(tx, obj.ID) + } + } + return errUnknownStoreAction + }, + }) +} + +// CreateNode adds a new node to the store. +// Returns ErrExist if the ID is already taken. +func CreateNode(tx Tx, n *api.Node) error { + return tx.create(tableNode, n) +} + +// UpdateNode updates an existing node in the store. +// Returns ErrNotExist if the node doesn't exist. +func UpdateNode(tx Tx, n *api.Node) error { + return tx.update(tableNode, n) +} + +// DeleteNode removes a node from the store. +// Returns ErrNotExist if the node doesn't exist. +func DeleteNode(tx Tx, id string) error { + return tx.delete(tableNode, id) +} + +// GetNode looks up a node by ID. +// Returns nil if the node doesn't exist. +func GetNode(tx ReadTx, id string) *api.Node { + n := tx.get(tableNode, id) + if n == nil { + return nil + } + return n.(*api.Node) +} + +// FindNodes selects a set of nodes and returns them. +func FindNodes(tx ReadTx, by By) ([]*api.Node, error) { + checkType := func(by By) error { + switch by.(type) { + case byName, byNamePrefix, byIDPrefix, byRole, byMembership, byCustom, byCustomPrefix: + return nil + default: + return ErrInvalidFindBy + } + } + + nodeList := []*api.Node{} + appendResult := func(o api.StoreObject) { + nodeList = append(nodeList, o.(*api.Node)) + } + + err := tx.find(tableNode, by, checkType, appendResult) + return nodeList, err +} + +type nodeIndexerByHostname struct{} + +func (ni nodeIndexerByHostname) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ni nodeIndexerByHostname) FromObject(obj interface{}) (bool, []byte, error) { + n := obj.(*api.Node) + + if n.Description == nil { + return false, nil, nil + } + // Add the null character as a terminator + return true, []byte(strings.ToLower(n.Description.Hostname) + "\x00"), nil +} + +func (ni nodeIndexerByHostname) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} + +type nodeIndexerByRole struct{} + +func (ni nodeIndexerByRole) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ni nodeIndexerByRole) FromObject(obj interface{}) (bool, []byte, error) { + n := obj.(*api.Node) + + // Add the null character as a terminator + return true, []byte(strconv.FormatInt(int64(n.Role), 10) + "\x00"), nil +} + +type nodeIndexerByMembership struct{} + +func (ni nodeIndexerByMembership) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ni nodeIndexerByMembership) FromObject(obj interface{}) (bool, []byte, error) { + n := obj.(*api.Node) + + // Add the null character as a terminator + return true, []byte(strconv.FormatInt(int64(n.Spec.Membership), 10) + "\x00"), nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/object.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/object.go new file mode 100644 index 00000000..89029afb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/object.go @@ -0,0 +1,58 @@ +package store + +import ( + "github.com/docker/swarmkit/api" + memdb "github.com/hashicorp/go-memdb" +) + +// ObjectStoreConfig provides the necessary methods to store a particular object +// type inside MemoryStore. +type ObjectStoreConfig struct { + Table *memdb.TableSchema + Save func(ReadTx, *api.StoreSnapshot) error + Restore func(Tx, *api.StoreSnapshot) error + ApplyStoreAction func(Tx, api.StoreAction) error +} + +// RestoreTable takes a list of new objects of a particular type (e.g. clusters, +// nodes, etc., which conform to the StoreObject interface) and replaces the +// existing objects in the store of that type with the new objects. +func RestoreTable(tx Tx, table string, newObjects []api.StoreObject) error { + checkType := func(by By) error { + return nil + } + var oldObjects []api.StoreObject + appendResult := func(o api.StoreObject) { + oldObjects = append(oldObjects, o) + } + + err := tx.find(table, All, checkType, appendResult) + if err != nil { + return nil + } + + updated := make(map[string]struct{}) + + for _, o := range newObjects { + objectID := o.GetID() + if existing := tx.lookup(table, indexID, objectID); existing != nil { + if err := tx.update(table, o); err != nil { + return err + } + updated[objectID] = struct{}{} + } else { + if err := tx.create(table, o); err != nil { + return err + } + } + } + for _, o := range oldObjects { + objectID := o.GetID() + if _, ok := updated[objectID]; !ok { + if err := tx.delete(table, objectID); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/resources.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/resources.go new file mode 100644 index 00000000..9852f64f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/resources.go @@ -0,0 +1,214 @@ +package store + +import ( + "strings" + + "github.com/docker/swarmkit/api" + memdb "github.com/hashicorp/go-memdb" + "github.com/pkg/errors" +) + +const tableResource = "resource" + +var ( + // ErrNoKind is returned by resource create operations if the provided Kind + // of the resource does not exist + ErrNoKind = errors.New("object kind is unregistered") +) + +func init() { + register(ObjectStoreConfig{ + Table: &memdb.TableSchema{ + Name: tableResource, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + Unique: true, + Indexer: resourceIndexerByID{}, + }, + indexName: { + Name: indexName, + Unique: true, + Indexer: resourceIndexerByName{}, + }, + indexKind: { + Name: indexKind, + Indexer: resourceIndexerByKind{}, + }, + indexCustom: { + Name: indexCustom, + Indexer: resourceCustomIndexer{}, + AllowMissing: true, + }, + }, + }, + Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error { + var err error + snapshot.Resources, err = FindResources(tx, All) + return err + }, + Restore: func(tx Tx, snapshot *api.StoreSnapshot) error { + toStoreObj := make([]api.StoreObject, len(snapshot.Resources)) + for i, x := range snapshot.Resources { + toStoreObj[i] = resourceEntry{x} + } + return RestoreTable(tx, tableResource, toStoreObj) + }, + ApplyStoreAction: func(tx Tx, sa api.StoreAction) error { + switch v := sa.Target.(type) { + case *api.StoreAction_Resource: + obj := v.Resource + switch sa.Action { + case api.StoreActionKindCreate: + return CreateResource(tx, obj) + case api.StoreActionKindUpdate: + return UpdateResource(tx, obj) + case api.StoreActionKindRemove: + return DeleteResource(tx, obj.ID) + } + } + return errUnknownStoreAction + }, + }) +} + +type resourceEntry struct { + *api.Resource +} + +func (r resourceEntry) CopyStoreObject() api.StoreObject { + return resourceEntry{Resource: r.Resource.Copy()} +} + +// ensure that when update events are emitted, we unwrap resourceEntry +func (r resourceEntry) EventUpdate(oldObject api.StoreObject) api.Event { + if oldObject != nil { + return api.EventUpdateResource{Resource: r.Resource, OldResource: oldObject.(resourceEntry).Resource} + } + return api.EventUpdateResource{Resource: r.Resource} +} + +func confirmExtension(tx Tx, r *api.Resource) error { + // There must be an extension corresponding to the Kind field. + extensions, err := FindExtensions(tx, ByName(r.Kind)) + if err != nil { + return errors.Wrap(err, "failed to query extensions") + } + if len(extensions) == 0 { + return ErrNoKind + } + return nil +} + +// CreateResource adds a new resource object to the store. +// Returns ErrExist if the ID is already taken. +// Returns ErrNameConflict if a Resource with this Name already exists +// Returns ErrNoKind if the specified Kind does not exist +func CreateResource(tx Tx, r *api.Resource) error { + if err := confirmExtension(tx, r); err != nil { + return err + } + // TODO(dperny): currently the "name" index is unique, which means only one + // Resource of _any_ Kind can exist with that name. This isn't a problem + // right now, but the ideal case would be for names to be namespaced to the + // kind. + if tx.lookup(tableResource, indexName, strings.ToLower(r.Annotations.Name)) != nil { + return ErrNameConflict + } + return tx.create(tableResource, resourceEntry{r}) +} + +// UpdateResource updates an existing resource object in the store. +// Returns ErrNotExist if the object doesn't exist. +func UpdateResource(tx Tx, r *api.Resource) error { + if err := confirmExtension(tx, r); err != nil { + return err + } + return tx.update(tableResource, resourceEntry{r}) +} + +// DeleteResource removes a resource object from the store. +// Returns ErrNotExist if the object doesn't exist. +func DeleteResource(tx Tx, id string) error { + return tx.delete(tableResource, id) +} + +// GetResource looks up a resource object by ID. +// Returns nil if the object doesn't exist. +func GetResource(tx ReadTx, id string) *api.Resource { + r := tx.get(tableResource, id) + if r == nil { + return nil + } + return r.(resourceEntry).Resource +} + +// FindResources selects a set of resource objects and returns them. +func FindResources(tx ReadTx, by By) ([]*api.Resource, error) { + checkType := func(by By) error { + switch by.(type) { + case byIDPrefix, byName, byNamePrefix, byKind, byCustom, byCustomPrefix: + return nil + default: + return ErrInvalidFindBy + } + } + + resourceList := []*api.Resource{} + appendResult := func(o api.StoreObject) { + resourceList = append(resourceList, o.(resourceEntry).Resource) + } + + err := tx.find(tableResource, by, checkType, appendResult) + return resourceList, err +} + +type resourceIndexerByKind struct{} + +func (ri resourceIndexerByKind) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ri resourceIndexerByKind) FromObject(obj interface{}) (bool, []byte, error) { + r := obj.(resourceEntry) + + // Add the null character as a terminator + val := r.Resource.Kind + "\x00" + return true, []byte(val), nil +} + +type resourceIndexerByID struct{} + +func (indexer resourceIndexerByID) FromArgs(args ...interface{}) ([]byte, error) { + return api.ResourceIndexerByID{}.FromArgs(args...) +} +func (indexer resourceIndexerByID) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return api.ResourceIndexerByID{}.PrefixFromArgs(args...) +} +func (indexer resourceIndexerByID) FromObject(obj interface{}) (bool, []byte, error) { + return api.ResourceIndexerByID{}.FromObject(obj.(resourceEntry).Resource) +} + +type resourceIndexerByName struct{} + +func (indexer resourceIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return api.ResourceIndexerByName{}.FromArgs(args...) +} +func (indexer resourceIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return api.ResourceIndexerByName{}.PrefixFromArgs(args...) +} +func (indexer resourceIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + return api.ResourceIndexerByName{}.FromObject(obj.(resourceEntry).Resource) +} + +type resourceCustomIndexer struct{} + +func (indexer resourceCustomIndexer) FromArgs(args ...interface{}) ([]byte, error) { + return api.ResourceCustomIndexer{}.FromArgs(args...) +} +func (indexer resourceCustomIndexer) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return api.ResourceCustomIndexer{}.PrefixFromArgs(args...) +} +func (indexer resourceCustomIndexer) FromObject(obj interface{}) (bool, [][]byte, error) { + return api.ResourceCustomIndexer{}.FromObject(obj.(resourceEntry).Resource) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/secrets.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/secrets.go new file mode 100644 index 00000000..bf5653fd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/secrets.go @@ -0,0 +1,122 @@ +package store + +import ( + "strings" + + "github.com/docker/swarmkit/api" + memdb "github.com/hashicorp/go-memdb" +) + +const tableSecret = "secret" + +func init() { + register(ObjectStoreConfig{ + Table: &memdb.TableSchema{ + Name: tableSecret, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + Unique: true, + Indexer: api.SecretIndexerByID{}, + }, + indexName: { + Name: indexName, + Unique: true, + Indexer: api.SecretIndexerByName{}, + }, + indexCustom: { + Name: indexCustom, + Indexer: api.SecretCustomIndexer{}, + AllowMissing: true, + }, + }, + }, + Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error { + var err error + snapshot.Secrets, err = FindSecrets(tx, All) + return err + }, + Restore: func(tx Tx, snapshot *api.StoreSnapshot) error { + toStoreObj := make([]api.StoreObject, len(snapshot.Secrets)) + for i, x := range snapshot.Secrets { + toStoreObj[i] = x + } + return RestoreTable(tx, tableSecret, toStoreObj) + }, + ApplyStoreAction: func(tx Tx, sa api.StoreAction) error { + switch v := sa.Target.(type) { + case *api.StoreAction_Secret: + obj := v.Secret + switch sa.Action { + case api.StoreActionKindCreate: + return CreateSecret(tx, obj) + case api.StoreActionKindUpdate: + return UpdateSecret(tx, obj) + case api.StoreActionKindRemove: + return DeleteSecret(tx, obj.ID) + } + } + return errUnknownStoreAction + }, + }) +} + +// CreateSecret adds a new secret to the store. +// Returns ErrExist if the ID is already taken. +func CreateSecret(tx Tx, s *api.Secret) error { + // Ensure the name is not already in use. + if tx.lookup(tableSecret, indexName, strings.ToLower(s.Spec.Annotations.Name)) != nil { + return ErrNameConflict + } + + return tx.create(tableSecret, s) +} + +// UpdateSecret updates an existing secret in the store. +// Returns ErrNotExist if the secret doesn't exist. +func UpdateSecret(tx Tx, s *api.Secret) error { + // Ensure the name is either not in use or already used by this same Secret. + if existing := tx.lookup(tableSecret, indexName, strings.ToLower(s.Spec.Annotations.Name)); existing != nil { + if existing.GetID() != s.ID { + return ErrNameConflict + } + } + + return tx.update(tableSecret, s) +} + +// DeleteSecret removes a secret from the store. +// Returns ErrNotExist if the secret doesn't exist. +func DeleteSecret(tx Tx, id string) error { + return tx.delete(tableSecret, id) +} + +// GetSecret looks up a secret by ID. +// Returns nil if the secret doesn't exist. +func GetSecret(tx ReadTx, id string) *api.Secret { + n := tx.get(tableSecret, id) + if n == nil { + return nil + } + return n.(*api.Secret) +} + +// FindSecrets selects a set of secrets and returns them. +func FindSecrets(tx ReadTx, by By) ([]*api.Secret, error) { + checkType := func(by By) error { + switch by.(type) { + case byName, byNamePrefix, byIDPrefix, byCustom, byCustomPrefix: + return nil + default: + return ErrInvalidFindBy + } + } + + secretList := []*api.Secret{} + appendResult := func(o api.StoreObject) { + secretList = append(secretList, o.(*api.Secret)) + } + + err := tx.find(tableSecret, by, checkType, appendResult) + return secretList, err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/services.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/services.go new file mode 100644 index 00000000..a5817373 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/services.go @@ -0,0 +1,238 @@ +package store + +import ( + "strings" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/naming" + memdb "github.com/hashicorp/go-memdb" +) + +const tableService = "service" + +func init() { + register(ObjectStoreConfig{ + Table: &memdb.TableSchema{ + Name: tableService, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + Unique: true, + Indexer: api.ServiceIndexerByID{}, + }, + indexName: { + Name: indexName, + Unique: true, + Indexer: api.ServiceIndexerByName{}, + }, + indexRuntime: { + Name: indexRuntime, + AllowMissing: true, + Indexer: serviceIndexerByRuntime{}, + }, + indexNetwork: { + Name: indexNetwork, + AllowMissing: true, + Indexer: serviceIndexerByNetwork{}, + }, + indexSecret: { + Name: indexSecret, + AllowMissing: true, + Indexer: serviceIndexerBySecret{}, + }, + indexConfig: { + Name: indexConfig, + AllowMissing: true, + Indexer: serviceIndexerByConfig{}, + }, + indexCustom: { + Name: indexCustom, + Indexer: api.ServiceCustomIndexer{}, + AllowMissing: true, + }, + }, + }, + Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error { + var err error + snapshot.Services, err = FindServices(tx, All) + return err + }, + Restore: func(tx Tx, snapshot *api.StoreSnapshot) error { + toStoreObj := make([]api.StoreObject, len(snapshot.Services)) + for i, x := range snapshot.Services { + toStoreObj[i] = x + } + return RestoreTable(tx, tableService, toStoreObj) + }, + ApplyStoreAction: func(tx Tx, sa api.StoreAction) error { + switch v := sa.Target.(type) { + case *api.StoreAction_Service: + obj := v.Service + switch sa.Action { + case api.StoreActionKindCreate: + return CreateService(tx, obj) + case api.StoreActionKindUpdate: + return UpdateService(tx, obj) + case api.StoreActionKindRemove: + return DeleteService(tx, obj.ID) + } + } + return errUnknownStoreAction + }, + }) +} + +// CreateService adds a new service to the store. +// Returns ErrExist if the ID is already taken. +func CreateService(tx Tx, s *api.Service) error { + // Ensure the name is not already in use. + if tx.lookup(tableService, indexName, strings.ToLower(s.Spec.Annotations.Name)) != nil { + return ErrNameConflict + } + + return tx.create(tableService, s) +} + +// UpdateService updates an existing service in the store. +// Returns ErrNotExist if the service doesn't exist. +func UpdateService(tx Tx, s *api.Service) error { + // Ensure the name is either not in use or already used by this same Service. + if existing := tx.lookup(tableService, indexName, strings.ToLower(s.Spec.Annotations.Name)); existing != nil { + if existing.GetID() != s.ID { + return ErrNameConflict + } + } + + return tx.update(tableService, s) +} + +// DeleteService removes a service from the store. +// Returns ErrNotExist if the service doesn't exist. +func DeleteService(tx Tx, id string) error { + return tx.delete(tableService, id) +} + +// GetService looks up a service by ID. +// Returns nil if the service doesn't exist. +func GetService(tx ReadTx, id string) *api.Service { + s := tx.get(tableService, id) + if s == nil { + return nil + } + return s.(*api.Service) +} + +// FindServices selects a set of services and returns them. +func FindServices(tx ReadTx, by By) ([]*api.Service, error) { + checkType := func(by By) error { + switch by.(type) { + case byName, byNamePrefix, byIDPrefix, byRuntime, byReferencedNetworkID, byReferencedSecretID, byReferencedConfigID, byCustom, byCustomPrefix, byAll: + return nil + default: + return ErrInvalidFindBy + } + } + + serviceList := []*api.Service{} + appendResult := func(o api.StoreObject) { + serviceList = append(serviceList, o.(*api.Service)) + } + + err := tx.find(tableService, by, checkType, appendResult) + return serviceList, err +} + +type serviceIndexerByRuntime struct{} + +func (si serviceIndexerByRuntime) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (si serviceIndexerByRuntime) FromObject(obj interface{}) (bool, []byte, error) { + s := obj.(*api.Service) + r, err := naming.Runtime(s.Spec.Task) + if err != nil { + return false, nil, nil + } + return true, []byte(r + "\x00"), nil +} + +func (si serviceIndexerByRuntime) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} + +type serviceIndexerByNetwork struct{} + +func (si serviceIndexerByNetwork) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (si serviceIndexerByNetwork) FromObject(obj interface{}) (bool, [][]byte, error) { + s := obj.(*api.Service) + + var networkIDs [][]byte + + specNetworks := s.Spec.Task.Networks + + if len(specNetworks) == 0 { + specNetworks = s.Spec.Networks + } + + for _, na := range specNetworks { + // Add the null character as a terminator + networkIDs = append(networkIDs, []byte(na.Target+"\x00")) + } + + return len(networkIDs) != 0, networkIDs, nil +} + +type serviceIndexerBySecret struct{} + +func (si serviceIndexerBySecret) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (si serviceIndexerBySecret) FromObject(obj interface{}) (bool, [][]byte, error) { + s := obj.(*api.Service) + + container := s.Spec.Task.GetContainer() + if container == nil { + return false, nil, nil + } + + var secretIDs [][]byte + + for _, secretRef := range container.Secrets { + // Add the null character as a terminator + secretIDs = append(secretIDs, []byte(secretRef.SecretID+"\x00")) + } + + return len(secretIDs) != 0, secretIDs, nil +} + +type serviceIndexerByConfig struct{} + +func (si serviceIndexerByConfig) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (si serviceIndexerByConfig) FromObject(obj interface{}) (bool, [][]byte, error) { + s, ok := obj.(*api.Service) + if !ok { + panic("unexpected type passed to FromObject") + } + + container := s.Spec.Task.GetContainer() + if container == nil { + return false, nil, nil + } + + var configIDs [][]byte + + for _, configRef := range container.Configs { + // Add the null character as a terminator + configIDs = append(configIDs, []byte(configRef.ConfigID+"\x00")) + } + + return len(configIDs) != 0, configIDs, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/tasks.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/tasks.go new file mode 100644 index 00000000..bf31d764 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/store/tasks.go @@ -0,0 +1,331 @@ +package store + +import ( + "strconv" + "strings" + + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/api/naming" + memdb "github.com/hashicorp/go-memdb" +) + +const tableTask = "task" + +func init() { + register(ObjectStoreConfig{ + Table: &memdb.TableSchema{ + Name: tableTask, + Indexes: map[string]*memdb.IndexSchema{ + indexID: { + Name: indexID, + Unique: true, + Indexer: api.TaskIndexerByID{}, + }, + indexName: { + Name: indexName, + AllowMissing: true, + Indexer: taskIndexerByName{}, + }, + indexRuntime: { + Name: indexRuntime, + AllowMissing: true, + Indexer: taskIndexerByRuntime{}, + }, + indexServiceID: { + Name: indexServiceID, + AllowMissing: true, + Indexer: taskIndexerByServiceID{}, + }, + indexNodeID: { + Name: indexNodeID, + AllowMissing: true, + Indexer: taskIndexerByNodeID{}, + }, + indexSlot: { + Name: indexSlot, + AllowMissing: true, + Indexer: taskIndexerBySlot{}, + }, + indexDesiredState: { + Name: indexDesiredState, + Indexer: taskIndexerByDesiredState{}, + }, + indexTaskState: { + Name: indexTaskState, + Indexer: taskIndexerByTaskState{}, + }, + indexNetwork: { + Name: indexNetwork, + AllowMissing: true, + Indexer: taskIndexerByNetwork{}, + }, + indexSecret: { + Name: indexSecret, + AllowMissing: true, + Indexer: taskIndexerBySecret{}, + }, + indexConfig: { + Name: indexConfig, + AllowMissing: true, + Indexer: taskIndexerByConfig{}, + }, + indexCustom: { + Name: indexCustom, + Indexer: api.TaskCustomIndexer{}, + AllowMissing: true, + }, + }, + }, + Save: func(tx ReadTx, snapshot *api.StoreSnapshot) error { + var err error + snapshot.Tasks, err = FindTasks(tx, All) + return err + }, + Restore: func(tx Tx, snapshot *api.StoreSnapshot) error { + toStoreObj := make([]api.StoreObject, len(snapshot.Tasks)) + for i, x := range snapshot.Tasks { + toStoreObj[i] = x + } + return RestoreTable(tx, tableTask, toStoreObj) + }, + ApplyStoreAction: func(tx Tx, sa api.StoreAction) error { + switch v := sa.Target.(type) { + case *api.StoreAction_Task: + obj := v.Task + switch sa.Action { + case api.StoreActionKindCreate: + return CreateTask(tx, obj) + case api.StoreActionKindUpdate: + return UpdateTask(tx, obj) + case api.StoreActionKindRemove: + return DeleteTask(tx, obj.ID) + } + } + return errUnknownStoreAction + }, + }) +} + +// CreateTask adds a new task to the store. +// Returns ErrExist if the ID is already taken. +func CreateTask(tx Tx, t *api.Task) error { + return tx.create(tableTask, t) +} + +// UpdateTask updates an existing task in the store. +// Returns ErrNotExist if the node doesn't exist. +func UpdateTask(tx Tx, t *api.Task) error { + return tx.update(tableTask, t) +} + +// DeleteTask removes a task from the store. +// Returns ErrNotExist if the task doesn't exist. +func DeleteTask(tx Tx, id string) error { + return tx.delete(tableTask, id) +} + +// GetTask looks up a task by ID. +// Returns nil if the task doesn't exist. +func GetTask(tx ReadTx, id string) *api.Task { + t := tx.get(tableTask, id) + if t == nil { + return nil + } + return t.(*api.Task) +} + +// FindTasks selects a set of tasks and returns them. +func FindTasks(tx ReadTx, by By) ([]*api.Task, error) { + checkType := func(by By) error { + switch by.(type) { + case byName, byNamePrefix, byIDPrefix, byRuntime, byDesiredState, byTaskState, byNode, byService, bySlot, byReferencedNetworkID, byReferencedSecretID, byReferencedConfigID, byCustom, byCustomPrefix: + return nil + default: + return ErrInvalidFindBy + } + } + + taskList := []*api.Task{} + appendResult := func(o api.StoreObject) { + taskList = append(taskList, o.(*api.Task)) + } + + err := tx.find(tableTask, by, checkType, appendResult) + return taskList, err +} + +type taskIndexerByName struct{} + +func (ti taskIndexerByName) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ti taskIndexerByName) FromObject(obj interface{}) (bool, []byte, error) { + t := obj.(*api.Task) + + name := naming.Task(t) + + // Add the null character as a terminator + return true, []byte(strings.ToLower(name) + "\x00"), nil +} + +func (ti taskIndexerByName) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} + +type taskIndexerByRuntime struct{} + +func (ti taskIndexerByRuntime) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ti taskIndexerByRuntime) FromObject(obj interface{}) (bool, []byte, error) { + t := obj.(*api.Task) + r, err := naming.Runtime(t.Spec) + if err != nil { + return false, nil, nil + } + return true, []byte(r + "\x00"), nil +} + +func (ti taskIndexerByRuntime) PrefixFromArgs(args ...interface{}) ([]byte, error) { + return prefixFromArgs(args...) +} + +type taskIndexerByServiceID struct{} + +func (ti taskIndexerByServiceID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ti taskIndexerByServiceID) FromObject(obj interface{}) (bool, []byte, error) { + t := obj.(*api.Task) + + // Add the null character as a terminator + val := t.ServiceID + "\x00" + return true, []byte(val), nil +} + +type taskIndexerByNodeID struct{} + +func (ti taskIndexerByNodeID) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ti taskIndexerByNodeID) FromObject(obj interface{}) (bool, []byte, error) { + t := obj.(*api.Task) + + // Add the null character as a terminator + val := t.NodeID + "\x00" + return true, []byte(val), nil +} + +type taskIndexerBySlot struct{} + +func (ti taskIndexerBySlot) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ti taskIndexerBySlot) FromObject(obj interface{}) (bool, []byte, error) { + t := obj.(*api.Task) + + // Add the null character as a terminator + val := t.ServiceID + "\x00" + strconv.FormatUint(t.Slot, 10) + "\x00" + return true, []byte(val), nil +} + +type taskIndexerByDesiredState struct{} + +func (ti taskIndexerByDesiredState) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ti taskIndexerByDesiredState) FromObject(obj interface{}) (bool, []byte, error) { + t := obj.(*api.Task) + + // Add the null character as a terminator + return true, []byte(strconv.FormatInt(int64(t.DesiredState), 10) + "\x00"), nil +} + +type taskIndexerByNetwork struct{} + +func (ti taskIndexerByNetwork) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ti taskIndexerByNetwork) FromObject(obj interface{}) (bool, [][]byte, error) { + t := obj.(*api.Task) + + var networkIDs [][]byte + + for _, na := range t.Spec.Networks { + // Add the null character as a terminator + networkIDs = append(networkIDs, []byte(na.Target+"\x00")) + } + + return len(networkIDs) != 0, networkIDs, nil +} + +type taskIndexerBySecret struct{} + +func (ti taskIndexerBySecret) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ti taskIndexerBySecret) FromObject(obj interface{}) (bool, [][]byte, error) { + t := obj.(*api.Task) + + container := t.Spec.GetContainer() + if container == nil { + return false, nil, nil + } + + var secretIDs [][]byte + + for _, secretRef := range container.Secrets { + // Add the null character as a terminator + secretIDs = append(secretIDs, []byte(secretRef.SecretID+"\x00")) + } + + return len(secretIDs) != 0, secretIDs, nil +} + +type taskIndexerByConfig struct{} + +func (ti taskIndexerByConfig) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ti taskIndexerByConfig) FromObject(obj interface{}) (bool, [][]byte, error) { + t, ok := obj.(*api.Task) + if !ok { + panic("unexpected type passed to FromObject") + } + + container := t.Spec.GetContainer() + if container == nil { + return false, nil, nil + } + + var configIDs [][]byte + + for _, configRef := range container.Configs { + // Add the null character as a terminator + configIDs = append(configIDs, []byte(configRef.ConfigID+"\x00")) + } + + return len(configIDs) != 0, configIDs, nil +} + +type taskIndexerByTaskState struct{} + +func (ts taskIndexerByTaskState) FromArgs(args ...interface{}) ([]byte, error) { + return fromArgs(args...) +} + +func (ts taskIndexerByTaskState) FromObject(obj interface{}) (bool, []byte, error) { + t := obj.(*api.Task) + + // Add the null character as a terminator + return true, []byte(strconv.FormatInt(int64(t.Status.State), 10) + "\x00"), nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/watch.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/watch.go new file mode 100644 index 00000000..ad0ebd75 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/manager/state/watch.go @@ -0,0 +1,74 @@ +package state + +import ( + "github.com/docker/go-events" + "github.com/docker/swarmkit/api" + "github.com/docker/swarmkit/watch" +) + +// EventCommit delineates a transaction boundary. +type EventCommit struct { + Version *api.Version +} + +// Matches returns true if this event is a commit event. +func (e EventCommit) Matches(watchEvent events.Event) bool { + _, ok := watchEvent.(EventCommit) + return ok +} + +// TaskCheckStateGreaterThan is a TaskCheckFunc for checking task state. +func TaskCheckStateGreaterThan(t1, t2 *api.Task) bool { + return t2.Status.State > t1.Status.State +} + +// NodeCheckState is a NodeCheckFunc for matching node state. +func NodeCheckState(n1, n2 *api.Node) bool { + return n1.Status.State == n2.Status.State +} + +// Watch takes a variable number of events to match against. The subscriber +// will receive events that match any of the arguments passed to Watch. +// +// Examples: +// +// // subscribe to all events +// Watch(q) +// +// // subscribe to all UpdateTask events +// Watch(q, EventUpdateTask{}) +// +// // subscribe to all task-related events +// Watch(q, EventUpdateTask{}, EventCreateTask{}, EventDeleteTask{}) +// +// // subscribe to UpdateTask for node 123 +// Watch(q, EventUpdateTask{Task: &api.Task{NodeID: 123}, +// Checks: []TaskCheckFunc{TaskCheckNodeID}}) +// +// // subscribe to UpdateTask for node 123, as well as CreateTask +// // for node 123 that also has ServiceID set to "abc" +// Watch(q, EventUpdateTask{Task: &api.Task{NodeID: 123}, +// Checks: []TaskCheckFunc{TaskCheckNodeID}}, +// EventCreateTask{Task: &api.Task{NodeID: 123, ServiceID: "abc"}, +// Checks: []TaskCheckFunc{TaskCheckNodeID, +// func(t1, t2 *api.Task) bool { +// return t1.ServiceID == t2.ServiceID +// }}}) +func Watch(queue *watch.Queue, specifiers ...api.Event) (eventq chan events.Event, cancel func()) { + if len(specifiers) == 0 { + return queue.Watch() + } + return queue.CallbackWatch(Matcher(specifiers...)) +} + +// Matcher returns an events.Matcher that Matches the specifiers with OR logic. +func Matcher(specifiers ...api.Event) events.MatcherFunc { + return events.MatcherFunc(func(event events.Event) bool { + for _, s := range specifiers { + if s.Matches(event) { + return true + } + } + return false + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/protobuf/plugin/helpers.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/protobuf/plugin/helpers.go new file mode 100644 index 00000000..daea795b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/protobuf/plugin/helpers.go @@ -0,0 +1,11 @@ +package plugin + +import ( + "github.com/gogo/protobuf/proto" + google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +) + +// DeepcopyEnabled returns true if deepcopy is enabled for the descriptor. +func DeepcopyEnabled(options *google_protobuf.MessageOptions) bool { + return proto.GetBoolExtension(options, E_Deepcopy, true) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go new file mode 100644 index 00000000..0d08eb6e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go @@ -0,0 +1,1225 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: github.com/docker/swarmkit/protobuf/plugin/plugin.proto + +/* + Package plugin is a generated protocol buffer package. + + It is generated from these files: + github.com/docker/swarmkit/protobuf/plugin/plugin.proto + + It has these top-level messages: + WatchSelectors + StoreObject + TLSAuthorization +*/ +package plugin + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +import deepcopy "github.com/docker/swarmkit/api/deepcopy" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type WatchSelectors struct { + // supported by all object types + ID *bool `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` + IDPrefix *bool `protobuf:"varint,2,opt,name=id_prefix,json=idPrefix" json:"id_prefix,omitempty"` + Name *bool `protobuf:"varint,3,opt,name=name" json:"name,omitempty"` + NamePrefix *bool `protobuf:"varint,4,opt,name=name_prefix,json=namePrefix" json:"name_prefix,omitempty"` + Custom *bool `protobuf:"varint,5,opt,name=custom" json:"custom,omitempty"` + CustomPrefix *bool `protobuf:"varint,6,opt,name=custom_prefix,json=customPrefix" json:"custom_prefix,omitempty"` + // supported by tasks only + ServiceID *bool `protobuf:"varint,7,opt,name=service_id,json=serviceId" json:"service_id,omitempty"` + NodeID *bool `protobuf:"varint,8,opt,name=node_id,json=nodeId" json:"node_id,omitempty"` + Slot *bool `protobuf:"varint,9,opt,name=slot" json:"slot,omitempty"` + DesiredState *bool `protobuf:"varint,10,opt,name=desired_state,json=desiredState" json:"desired_state,omitempty"` + // supported by nodes only + Role *bool `protobuf:"varint,11,opt,name=role" json:"role,omitempty"` + Membership *bool `protobuf:"varint,12,opt,name=membership" json:"membership,omitempty"` + // supported by: resource + Kind *bool `protobuf:"varint,13,opt,name=kind" json:"kind,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *WatchSelectors) Reset() { *m = WatchSelectors{} } +func (*WatchSelectors) ProtoMessage() {} +func (*WatchSelectors) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } + +type StoreObject struct { + WatchSelectors *WatchSelectors `protobuf:"bytes,1,req,name=watch_selectors,json=watchSelectors" json:"watch_selectors,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *StoreObject) Reset() { *m = StoreObject{} } +func (*StoreObject) ProtoMessage() {} +func (*StoreObject) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } + +type TLSAuthorization struct { + // Roles contains the acceptable TLS OU roles for the handler. + Roles []string `protobuf:"bytes,1,rep,name=roles" json:"roles,omitempty"` + // Insecure is set to true if this method does not require + // authorization. NOTE: Specifying both "insecure" and a nonempty + // list of roles is invalid. This would fail at codegen time. + Insecure *bool `protobuf:"varint,2,opt,name=insecure" json:"insecure,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *TLSAuthorization) Reset() { *m = TLSAuthorization{} } +func (*TLSAuthorization) ProtoMessage() {} +func (*TLSAuthorization) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{2} } + +var E_Deepcopy = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 70000, + Name: "docker.protobuf.plugin.deepcopy", + Tag: "varint,70000,opt,name=deepcopy,def=1", + Filename: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto", +} + +var E_StoreObject = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*StoreObject)(nil), + Field: 70001, + Name: "docker.protobuf.plugin.store_object", + Tag: "bytes,70001,opt,name=store_object,json=storeObject", + Filename: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto", +} + +var E_TlsAuthorization = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MethodOptions)(nil), + ExtensionType: (*TLSAuthorization)(nil), + Field: 73626345, + Name: "docker.protobuf.plugin.tls_authorization", + Tag: "bytes,73626345,opt,name=tls_authorization,json=tlsAuthorization", + Filename: "github.com/docker/swarmkit/protobuf/plugin/plugin.proto", +} + +func init() { + proto.RegisterType((*WatchSelectors)(nil), "docker.protobuf.plugin.WatchSelectors") + proto.RegisterType((*StoreObject)(nil), "docker.protobuf.plugin.StoreObject") + proto.RegisterType((*TLSAuthorization)(nil), "docker.protobuf.plugin.TLSAuthorization") + proto.RegisterExtension(E_Deepcopy) + proto.RegisterExtension(E_StoreObject) + proto.RegisterExtension(E_TlsAuthorization) +} + +func (m *WatchSelectors) Copy() *WatchSelectors { + if m == nil { + return nil + } + o := &WatchSelectors{} + o.CopyFrom(m) + return o +} + +func (m *WatchSelectors) CopyFrom(src interface{}) { + + o := src.(*WatchSelectors) + *m = *o +} + +func (m *StoreObject) Copy() *StoreObject { + if m == nil { + return nil + } + o := &StoreObject{} + o.CopyFrom(m) + return o +} + +func (m *StoreObject) CopyFrom(src interface{}) { + + o := src.(*StoreObject) + *m = *o + if o.WatchSelectors != nil { + m.WatchSelectors = &WatchSelectors{} + deepcopy.Copy(m.WatchSelectors, o.WatchSelectors) + } +} + +func (m *TLSAuthorization) Copy() *TLSAuthorization { + if m == nil { + return nil + } + o := &TLSAuthorization{} + o.CopyFrom(m) + return o +} + +func (m *TLSAuthorization) CopyFrom(src interface{}) { + + o := src.(*TLSAuthorization) + *m = *o + if o.Roles != nil { + m.Roles = make([]string, len(o.Roles)) + copy(m.Roles, o.Roles) + } + +} + +func (m *WatchSelectors) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchSelectors) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.ID != nil { + dAtA[i] = 0x8 + i++ + if *m.ID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.IDPrefix != nil { + dAtA[i] = 0x10 + i++ + if *m.IDPrefix { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Name != nil { + dAtA[i] = 0x18 + i++ + if *m.Name { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.NamePrefix != nil { + dAtA[i] = 0x20 + i++ + if *m.NamePrefix { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Custom != nil { + dAtA[i] = 0x28 + i++ + if *m.Custom { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.CustomPrefix != nil { + dAtA[i] = 0x30 + i++ + if *m.CustomPrefix { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.ServiceID != nil { + dAtA[i] = 0x38 + i++ + if *m.ServiceID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.NodeID != nil { + dAtA[i] = 0x40 + i++ + if *m.NodeID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Slot != nil { + dAtA[i] = 0x48 + i++ + if *m.Slot { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.DesiredState != nil { + dAtA[i] = 0x50 + i++ + if *m.DesiredState { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Role != nil { + dAtA[i] = 0x58 + i++ + if *m.Role { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Membership != nil { + dAtA[i] = 0x60 + i++ + if *m.Membership { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Kind != nil { + dAtA[i] = 0x68 + i++ + if *m.Kind { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *StoreObject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StoreObject) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.WatchSelectors == nil { + return 0, proto.NewRequiredNotSetError("watch_selectors") + } else { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(m.WatchSelectors.Size())) + n1, err := m.WatchSelectors.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *TLSAuthorization) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TLSAuthorization) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Roles) > 0 { + for _, s := range m.Roles { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.Insecure != nil { + dAtA[i] = 0x10 + i++ + if *m.Insecure { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} + +func (m *WatchSelectors) Size() (n int) { + var l int + _ = l + if m.ID != nil { + n += 2 + } + if m.IDPrefix != nil { + n += 2 + } + if m.Name != nil { + n += 2 + } + if m.NamePrefix != nil { + n += 2 + } + if m.Custom != nil { + n += 2 + } + if m.CustomPrefix != nil { + n += 2 + } + if m.ServiceID != nil { + n += 2 + } + if m.NodeID != nil { + n += 2 + } + if m.Slot != nil { + n += 2 + } + if m.DesiredState != nil { + n += 2 + } + if m.Role != nil { + n += 2 + } + if m.Membership != nil { + n += 2 + } + if m.Kind != nil { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StoreObject) Size() (n int) { + var l int + _ = l + if m.WatchSelectors != nil { + l = m.WatchSelectors.Size() + n += 1 + l + sovPlugin(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *TLSAuthorization) Size() (n int) { + var l int + _ = l + if len(m.Roles) > 0 { + for _, s := range m.Roles { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + if m.Insecure != nil { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovPlugin(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozPlugin(x uint64) (n int) { + return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *WatchSelectors) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchSelectors{`, + `ID:` + valueToStringPlugin(this.ID) + `,`, + `IDPrefix:` + valueToStringPlugin(this.IDPrefix) + `,`, + `Name:` + valueToStringPlugin(this.Name) + `,`, + `NamePrefix:` + valueToStringPlugin(this.NamePrefix) + `,`, + `Custom:` + valueToStringPlugin(this.Custom) + `,`, + `CustomPrefix:` + valueToStringPlugin(this.CustomPrefix) + `,`, + `ServiceID:` + valueToStringPlugin(this.ServiceID) + `,`, + `NodeID:` + valueToStringPlugin(this.NodeID) + `,`, + `Slot:` + valueToStringPlugin(this.Slot) + `,`, + `DesiredState:` + valueToStringPlugin(this.DesiredState) + `,`, + `Role:` + valueToStringPlugin(this.Role) + `,`, + `Membership:` + valueToStringPlugin(this.Membership) + `,`, + `Kind:` + valueToStringPlugin(this.Kind) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *StoreObject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StoreObject{`, + `WatchSelectors:` + strings.Replace(fmt.Sprintf("%v", this.WatchSelectors), "WatchSelectors", "WatchSelectors", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *TLSAuthorization) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TLSAuthorization{`, + `Roles:` + fmt.Sprintf("%v", this.Roles) + `,`, + `Insecure:` + valueToStringPlugin(this.Insecure) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringPlugin(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *WatchSelectors) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchSelectors: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchSelectors: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ID = &b + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IDPrefix", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.IDPrefix = &b + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Name = &b + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NamePrefix", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.NamePrefix = &b + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Custom", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Custom = &b + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CustomPrefix", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.CustomPrefix = &b + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ServiceID = &b + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.NodeID = &b + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Slot", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Slot = &b + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredState", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.DesiredState = &b + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Role = &b + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Membership", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Membership = &b + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Kind = &b + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StoreObject) Unmarshal(dAtA []byte) error { + var hasFields [1]uint64 + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StoreObject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StoreObject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WatchSelectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WatchSelectors == nil { + m.WatchSelectors = &WatchSelectors{} + } + if err := m.WatchSelectors.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + hasFields[0] |= uint64(0x00000001) + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + if hasFields[0]&uint64(0x00000001) == 0 { + return proto.NewRequiredNotSetError("watch_selectors") + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TLSAuthorization) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TLSAuthorization: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TLSAuthorization: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Insecure", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Insecure = &b + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPlugin(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthPlugin + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPlugin(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("github.com/docker/swarmkit/protobuf/plugin/plugin.proto", fileDescriptorPlugin) +} + +var fileDescriptorPlugin = []byte{ + // 575 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x52, 0xc1, 0x6e, 0xd3, 0x4c, + 0x10, 0xae, 0xd3, 0x36, 0x4d, 0x26, 0x69, 0xff, 0xfe, 0x2b, 0x54, 0xad, 0x7a, 0x70, 0xaa, 0x46, + 0x42, 0x41, 0x42, 0x8e, 0xd4, 0x0b, 0x52, 0x6e, 0x94, 0x5c, 0x22, 0x01, 0x45, 0x0e, 0x12, 0x37, + 0x22, 0xc7, 0x3b, 0x4d, 0x96, 0x3a, 0x5e, 0x6b, 0x77, 0x4d, 0x0a, 0x27, 0x5e, 0x80, 0x07, 0xe0, + 0xca, 0xd3, 0xf4, 0xc8, 0x91, 0x53, 0x44, 0x2d, 0x71, 0xe0, 0x06, 0x6f, 0x80, 0x76, 0xd7, 0x69, + 0x08, 0x6a, 0xc5, 0xc9, 0x33, 0xdf, 0x7c, 0xdf, 0xcc, 0x7c, 0x3b, 0x86, 0x47, 0x13, 0xae, 0xa7, + 0xf9, 0x38, 0x88, 0xc5, 0xac, 0xcb, 0x44, 0x7c, 0x81, 0xb2, 0xab, 0xe6, 0x91, 0x9c, 0x5d, 0x70, + 0xdd, 0xcd, 0xa4, 0xd0, 0x62, 0x9c, 0x9f, 0x77, 0xb3, 0x24, 0x9f, 0xf0, 0xb4, 0xfc, 0x04, 0x16, + 0x26, 0x07, 0x8e, 0x1d, 0x2c, 0x49, 0x81, 0xab, 0x1e, 0x1e, 0x4d, 0x84, 0x98, 0x24, 0xb8, 0x12, + 0x33, 0x54, 0xb1, 0xe4, 0x99, 0x16, 0x25, 0xf7, 0xf8, 0xd3, 0x26, 0xec, 0xbd, 0x8a, 0x74, 0x3c, + 0x1d, 0x62, 0x82, 0xb1, 0x16, 0x52, 0x91, 0x03, 0xa8, 0x70, 0x46, 0xbd, 0x23, 0xaf, 0x53, 0x3b, + 0xad, 0x16, 0x8b, 0x56, 0x65, 0xd0, 0x0f, 0x2b, 0x9c, 0x91, 0x07, 0x50, 0xe7, 0x6c, 0x94, 0x49, + 0x3c, 0xe7, 0x97, 0xb4, 0x62, 0xcb, 0xcd, 0x62, 0xd1, 0xaa, 0x0d, 0xfa, 0x2f, 0x2c, 0x16, 0xd6, + 0x38, 0x73, 0x11, 0x21, 0xb0, 0x95, 0x46, 0x33, 0xa4, 0x9b, 0x86, 0x15, 0xda, 0x98, 0xb4, 0xa0, + 0x61, 0xbe, 0xcb, 0x06, 0x5b, 0xb6, 0x04, 0x06, 0x2a, 0x45, 0x07, 0x50, 0x8d, 0x73, 0xa5, 0xc5, + 0x8c, 0x6e, 0xdb, 0x5a, 0x99, 0x91, 0x36, 0xec, 0xba, 0x68, 0x29, 0xad, 0xda, 0x72, 0xd3, 0x81, + 0xa5, 0xf8, 0x21, 0x80, 0x42, 0xf9, 0x96, 0xc7, 0x38, 0xe2, 0x8c, 0xee, 0xd8, 0xed, 0x76, 0x8b, + 0x45, 0xab, 0x3e, 0x74, 0xe8, 0xa0, 0x1f, 0xd6, 0x4b, 0xc2, 0x80, 0x91, 0x36, 0xec, 0xa4, 0x82, + 0x59, 0x6a, 0xcd, 0x52, 0xa1, 0x58, 0xb4, 0xaa, 0xcf, 0x05, 0x33, 0xbc, 0xaa, 0x29, 0x0d, 0x98, + 0x31, 0xa1, 0x12, 0xa1, 0x69, 0xdd, 0x99, 0x30, 0xb1, 0xd9, 0x85, 0xa1, 0xe2, 0x12, 0xd9, 0x48, + 0xe9, 0x48, 0x23, 0x05, 0xb7, 0x4b, 0x09, 0x0e, 0x0d, 0x66, 0x84, 0x52, 0x24, 0x48, 0x1b, 0x4e, + 0x68, 0x62, 0xe2, 0x03, 0xcc, 0x70, 0x36, 0x46, 0xa9, 0xa6, 0x3c, 0xa3, 0x4d, 0x67, 0x7e, 0x85, + 0x18, 0xcd, 0x05, 0x4f, 0x19, 0xdd, 0x75, 0x1a, 0x13, 0x1f, 0xbf, 0x86, 0xc6, 0x50, 0x0b, 0x89, + 0x67, 0xe3, 0x37, 0x18, 0x6b, 0x72, 0x06, 0xff, 0xcd, 0xcd, 0xa5, 0x46, 0x6a, 0x79, 0x2a, 0xea, + 0x1d, 0x55, 0x3a, 0x8d, 0x93, 0xfb, 0xc1, 0xed, 0xe7, 0x0f, 0xd6, 0x0f, 0x1b, 0xee, 0xcd, 0xd7, + 0xf2, 0xe3, 0x3e, 0xec, 0xbf, 0x7c, 0x3a, 0x7c, 0x9c, 0xeb, 0xa9, 0x90, 0xfc, 0x7d, 0xa4, 0xb9, + 0x48, 0xc9, 0x3d, 0xd8, 0x36, 0xfb, 0x9a, 0xd6, 0x9b, 0x9d, 0x7a, 0xe8, 0x12, 0x72, 0x08, 0x35, + 0x9e, 0x2a, 0x8c, 0x73, 0x89, 0xee, 0xf2, 0xe1, 0x4d, 0xde, 0x7b, 0x02, 0x35, 0x86, 0x98, 0xc5, + 0x22, 0x7b, 0x47, 0x5a, 0x81, 0xfb, 0xe1, 0x56, 0x9b, 0x3c, 0x43, 0xa5, 0xa2, 0x09, 0x9e, 0x65, + 0xa6, 0xbb, 0xa2, 0x3f, 0x3f, 0xdb, 0xbb, 0xf7, 0xb6, 0xb4, 0xcc, 0x31, 0xbc, 0x11, 0xf6, 0x38, + 0x34, 0x95, 0xb1, 0x3a, 0x12, 0xce, 0xeb, 0x3f, 0x1b, 0xfd, 0xb2, 0x8d, 0x1a, 0x27, 0xed, 0xbb, + 0xbc, 0xff, 0xf1, 0x72, 0x61, 0x43, 0xad, 0x92, 0xde, 0x25, 0xfc, 0xaf, 0x13, 0x35, 0x8a, 0xd6, + 0x6c, 0xfb, 0xb7, 0xcc, 0xd3, 0x53, 0xc1, 0x96, 0xe3, 0x7e, 0x7c, 0xff, 0xd8, 0xb6, 0xf3, 0x3a, + 0x77, 0xcd, 0xfb, 0xfb, 0x25, 0xc3, 0x7d, 0x9d, 0xa8, 0x35, 0xe4, 0x94, 0x5e, 0x5d, 0xfb, 0x1b, + 0x5f, 0xaf, 0xfd, 0x8d, 0x0f, 0x85, 0xef, 0x5d, 0x15, 0xbe, 0xf7, 0xa5, 0xf0, 0xbd, 0x6f, 0x85, + 0xef, 0xfd, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xb3, 0x99, 0x7d, 0xfb, 0xf9, 0x03, 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.proto b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.proto new file mode 100644 index 00000000..312517d7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/protobuf/plugin/plugin.proto @@ -0,0 +1,53 @@ +syntax = "proto2"; + +package docker.protobuf.plugin; + +import "google/protobuf/descriptor.proto"; + +message WatchSelectors { + // supported by all object types + optional bool id = 1; + optional bool id_prefix = 2; + optional bool name = 3; + optional bool name_prefix = 4; + optional bool custom = 5; + optional bool custom_prefix = 6; + + // supported by tasks only + optional bool service_id = 7; + optional bool node_id = 8; + optional bool slot = 9; + optional bool desired_state = 10; + + // supported by nodes only + optional bool role = 11; + optional bool membership = 12; + + // supported by: resource + optional bool kind = 13; +} + +message StoreObject { + required WatchSelectors watch_selectors = 1; +} + +extend google.protobuf.MessageOptions { + optional bool deepcopy = 70000 [default=true]; + optional StoreObject store_object = 70001; +} + +message TLSAuthorization { + // Roles contains the acceptable TLS OU roles for the handler. + repeated string roles = 1; + + // Insecure is set to true if this method does not require + // authorization. NOTE: Specifying both "insecure" and a nonempty + // list of roles is invalid. This would fail at codegen time. + optional bool insecure = 2; +} + +extend google.protobuf.MethodOptions { + // TLSAuthorization contains the authorization parameters for this + // method. + optional TLSAuthorization tls_authorization = 73626345; +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/remotes/remotes.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/remotes/remotes.go new file mode 100644 index 00000000..e79ed3f3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/remotes/remotes.go @@ -0,0 +1,203 @@ +package remotes + +import ( + "fmt" + "math" + "math/rand" + "sort" + "sync" + + "github.com/docker/swarmkit/api" +) + +var errRemotesUnavailable = fmt.Errorf("no remote hosts provided") + +// DefaultObservationWeight provides a weight to use for positive observations +// that will balance well under repeated observations. +const DefaultObservationWeight = 10 + +// Remotes keeps track of remote addresses by weight, informed by +// observations. +type Remotes interface { + // Weight returns the remotes with their current weights. + Weights() map[api.Peer]int + + // Select a remote from the set of available remotes with optionally + // excluding ID or address. + Select(...string) (api.Peer, error) + + // Observe records an experience with a particular remote. A positive weight + // indicates a good experience and a negative weight a bad experience. + // + // The observation will be used to calculate a moving weight, which is + // implementation dependent. This method will be called such that repeated + // observations of the same master in each session request are favored. + Observe(peer api.Peer, weight int) + + // ObserveIfExists records an experience with a particular remote if when a + // remote exists. + ObserveIfExists(peer api.Peer, weight int) + + // Remove the remote from the list completely. + Remove(addrs ...api.Peer) +} + +// NewRemotes returns a Remotes instance with the provided set of addresses. +// Entries provided are heavily weighted initially. +func NewRemotes(peers ...api.Peer) Remotes { + mwr := &remotesWeightedRandom{ + remotes: make(map[api.Peer]int), + } + + for _, peer := range peers { + mwr.Observe(peer, DefaultObservationWeight) + } + + return mwr +} + +type remotesWeightedRandom struct { + remotes map[api.Peer]int + mu sync.Mutex + + // workspace to avoid reallocation. these get lazily allocated when + // selecting values. + cdf []float64 + peers []api.Peer +} + +func (mwr *remotesWeightedRandom) Weights() map[api.Peer]int { + mwr.mu.Lock() + defer mwr.mu.Unlock() + + ms := make(map[api.Peer]int, len(mwr.remotes)) + for addr, weight := range mwr.remotes { + ms[addr] = weight + } + + return ms +} + +func (mwr *remotesWeightedRandom) Select(excludes ...string) (api.Peer, error) { + mwr.mu.Lock() + defer mwr.mu.Unlock() + + // NOTE(stevvooe): We then use a weighted random selection algorithm + // (http://stackoverflow.com/questions/4463561/weighted-random-selection-from-array) + // to choose the master to connect to. + // + // It is possible that this is insufficient. The following may inform a + // better solution: + + // https://github.com/LK4D4/sample + // + // The first link applies exponential distribution weight choice reservoir + // sampling. This may be relevant if we view the master selection as a + // distributed reservoir sampling problem. + + // bias to zero-weighted remotes have same probability. otherwise, we + // always select first entry when all are zero. + const bias = 0.001 + + // clear out workspace + mwr.cdf = mwr.cdf[:0] + mwr.peers = mwr.peers[:0] + + cum := 0.0 + // calculate CDF over weights +Loop: + for peer, weight := range mwr.remotes { + for _, exclude := range excludes { + if peer.NodeID == exclude || peer.Addr == exclude { + // if this peer is excluded, ignore it by continuing the loop to label Loop + continue Loop + } + } + if weight < 0 { + // treat these as zero, to keep there selection unlikely. + weight = 0 + } + + cum += float64(weight) + bias + mwr.cdf = append(mwr.cdf, cum) + mwr.peers = append(mwr.peers, peer) + } + + if len(mwr.peers) == 0 { + return api.Peer{}, errRemotesUnavailable + } + + r := mwr.cdf[len(mwr.cdf)-1] * rand.Float64() + i := sort.SearchFloat64s(mwr.cdf, r) + + return mwr.peers[i], nil +} + +func (mwr *remotesWeightedRandom) Observe(peer api.Peer, weight int) { + mwr.mu.Lock() + defer mwr.mu.Unlock() + + mwr.observe(peer, float64(weight)) +} + +func (mwr *remotesWeightedRandom) ObserveIfExists(peer api.Peer, weight int) { + mwr.mu.Lock() + defer mwr.mu.Unlock() + + if _, ok := mwr.remotes[peer]; !ok { + return + } + + mwr.observe(peer, float64(weight)) +} + +func (mwr *remotesWeightedRandom) Remove(addrs ...api.Peer) { + mwr.mu.Lock() + defer mwr.mu.Unlock() + + for _, addr := range addrs { + delete(mwr.remotes, addr) + } +} + +const ( + // remoteWeightSmoothingFactor for exponential smoothing. This adjusts how + // much of the // observation and old value we are using to calculate the new value. + // See + // https://en.wikipedia.org/wiki/Exponential_smoothing#Basic_exponential_smoothing + // for details. + remoteWeightSmoothingFactor = 0.5 + remoteWeightMax = 1 << 8 +) + +func clip(x float64) float64 { + if math.IsNaN(x) { + // treat garbage as such + // acts like a no-op for us. + return 0 + } + return math.Max(math.Min(remoteWeightMax, x), -remoteWeightMax) +} + +func (mwr *remotesWeightedRandom) observe(peer api.Peer, weight float64) { + + // While we have a decent, ad-hoc approach here to weight subsequent + // observations, we may want to look into applying forward decay: + // + // http://dimacs.rutgers.edu/~graham/pubs/papers/fwddecay.pdf + // + // We need to get better data from behavior in a cluster. + + // makes the math easier to read below + var ( + w0 = float64(mwr.remotes[peer]) + w1 = clip(weight) + ) + const α = remoteWeightSmoothingFactor + + // Multiply the new value to current value, and appy smoothing against the old + // value. + wn := clip(α*w1 + (1-α)*w0) + + mwr.remotes[peer] = int(math.Ceil(wn)) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/watch/queue/queue.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/watch/queue/queue.go new file mode 100644 index 00000000..bb6f92da --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/watch/queue/queue.go @@ -0,0 +1,158 @@ +package queue + +import ( + "container/list" + "fmt" + "sync" + + "github.com/docker/go-events" + "github.com/sirupsen/logrus" +) + +// ErrQueueFull is returned by a Write operation when that Write causes the +// queue to reach its size limit. +var ErrQueueFull = fmt.Errorf("queue closed due to size limit") + +// LimitQueue accepts all messages into a queue for asynchronous consumption by +// a sink until an upper limit of messages is reached. When that limit is +// reached, the entire Queue is Closed. It is thread safe but the +// sink must be reliable or events will be dropped. +// If a size of 0 is provided, the LimitQueue is considered limitless. +type LimitQueue struct { + dst events.Sink + events *list.List + limit uint64 + cond *sync.Cond + mu sync.Mutex + closed bool + full chan struct{} + fullClosed bool +} + +// NewLimitQueue returns a queue to the provided Sink dst. +func NewLimitQueue(dst events.Sink, limit uint64) *LimitQueue { + eq := LimitQueue{ + dst: dst, + events: list.New(), + limit: limit, + full: make(chan struct{}), + } + + eq.cond = sync.NewCond(&eq.mu) + go eq.run() + return &eq +} + +// Write accepts the events into the queue, only failing if the queue has +// been closed or has reached its size limit. +func (eq *LimitQueue) Write(event events.Event) error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return events.ErrSinkClosed + } + + if eq.limit > 0 && uint64(eq.events.Len()) >= eq.limit { + // If the limit has been reached, don't write the event to the queue, + // and close the Full channel. This notifies listeners that the queue + // is now full, but the sink is still permitted to consume events. It's + // the responsibility of the listener to decide whether they want to + // live with dropped events or whether they want to Close() the + // LimitQueue + if !eq.fullClosed { + eq.fullClosed = true + close(eq.full) + } + return ErrQueueFull + } + + eq.events.PushBack(event) + eq.cond.Signal() // signal waiters + + return nil +} + +// Full returns a channel that is closed when the queue becomes full for the +// first time. +func (eq *LimitQueue) Full() chan struct{} { + return eq.full +} + +// Close shuts down the event queue, flushing all events +func (eq *LimitQueue) Close() error { + eq.mu.Lock() + defer eq.mu.Unlock() + + if eq.closed { + return nil + } + + // set the closed flag + eq.closed = true + eq.cond.Signal() // signal flushes queue + eq.cond.Wait() // wait for signal from last flush + return eq.dst.Close() +} + +// run is the main goroutine to flush events to the target sink. +func (eq *LimitQueue) run() { + for { + event := eq.next() + + if event == nil { + return // nil block means event queue is closed. + } + + if err := eq.dst.Write(event); err != nil { + // TODO(aaronl): Dropping events could be bad depending + // on the application. We should have a way of + // communicating this condition. However, logging + // at a log level above debug may not be appropriate. + // Eventually, go-events should not use logrus at all, + // and should bubble up conditions like this through + // error values. + logrus.WithFields(logrus.Fields{ + "event": event, + "sink": eq.dst, + }).WithError(err).Debug("eventqueue: dropped event") + } + } +} + +// Len returns the number of items that are currently stored in the queue and +// not consumed by its sink. +func (eq *LimitQueue) Len() int { + eq.mu.Lock() + defer eq.mu.Unlock() + return eq.events.Len() +} + +func (eq *LimitQueue) String() string { + eq.mu.Lock() + defer eq.mu.Unlock() + return fmt.Sprintf("%v", eq.events) +} + +// next encompasses the critical section of the run loop. When the queue is +// empty, it will block on the condition. If new data arrives, it will wake +// and return a block. When closed, a nil slice will be returned. +func (eq *LimitQueue) next() events.Event { + eq.mu.Lock() + defer eq.mu.Unlock() + + for eq.events.Len() < 1 { + if eq.closed { + eq.cond.Broadcast() + return nil + } + + eq.cond.Wait() + } + + front := eq.events.Front() + block := front.Value.(events.Event) + eq.events.Remove(front) + + return block +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/watch/sinks.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/watch/sinks.go new file mode 100644 index 00000000..b22b4842 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/watch/sinks.go @@ -0,0 +1,95 @@ +package watch + +import ( + "fmt" + "time" + + events "github.com/docker/go-events" +) + +// ErrSinkTimeout is returned from the Write method when a sink times out. +var ErrSinkTimeout = fmt.Errorf("timeout exceeded, tearing down sink") + +// timeoutSink is a sink that wraps another sink with a timeout. If the +// embedded sink fails to complete a Write operation within the specified +// timeout, the Write operation of the timeoutSink fails. +type timeoutSink struct { + timeout time.Duration + sink events.Sink +} + +func (s timeoutSink) Write(event events.Event) error { + errChan := make(chan error) + go func(c chan<- error) { + c <- s.sink.Write(event) + }(errChan) + + timer := time.NewTimer(s.timeout) + select { + case err := <-errChan: + timer.Stop() + return err + case <-timer.C: + s.sink.Close() + return ErrSinkTimeout + } +} + +func (s timeoutSink) Close() error { + return s.sink.Close() +} + +// dropErrClosed is a sink that suppresses ErrSinkClosed from Write, to avoid +// debug log messages that may be confusing. It is possible that the queue +// will try to write an event to its destination channel while the queue is +// being removed from the broadcaster. Since the channel is closed before the +// queue, there is a narrow window when this is possible. In some event-based +// dropping events when a sink is removed from a broadcaster is a problem, but +// for the usage in this watch package that's the expected behavior. +type dropErrClosed struct { + sink events.Sink +} + +func (s dropErrClosed) Write(event events.Event) error { + err := s.sink.Write(event) + if err == events.ErrSinkClosed { + return nil + } + return err +} + +func (s dropErrClosed) Close() error { + return s.sink.Close() +} + +// dropErrClosedChanGen is a ChannelSinkGenerator for dropErrClosed sinks wrapping +// unbuffered channels. +type dropErrClosedChanGen struct{} + +func (s *dropErrClosedChanGen) NewChannelSink() (events.Sink, *events.Channel) { + ch := events.NewChannel(0) + return dropErrClosed{sink: ch}, ch +} + +// TimeoutDropErrChanGen is a ChannelSinkGenerator that creates a channel, +// wrapped by the dropErrClosed sink and a timeout. +type TimeoutDropErrChanGen struct { + timeout time.Duration +} + +// NewChannelSink creates a new sink chain of timeoutSink->dropErrClosed->Channel +func (s *TimeoutDropErrChanGen) NewChannelSink() (events.Sink, *events.Channel) { + ch := events.NewChannel(0) + return timeoutSink{ + timeout: s.timeout, + sink: dropErrClosed{ + sink: ch, + }, + }, ch +} + +// NewTimeoutDropErrSinkGen returns a generator of timeoutSinks wrapping dropErrClosed +// sinks, wrapping unbuffered channel sinks. +func NewTimeoutDropErrSinkGen(timeout time.Duration) ChannelSinkGenerator { + return &TimeoutDropErrChanGen{timeout: timeout} +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/watch/watch.go b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/watch/watch.go new file mode 100644 index 00000000..ed5b8344 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/docker/swarmkit/watch/watch.go @@ -0,0 +1,197 @@ +package watch + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/docker/go-events" + "github.com/docker/swarmkit/watch/queue" +) + +// ChannelSinkGenerator is a constructor of sinks that eventually lead to a +// channel. +type ChannelSinkGenerator interface { + NewChannelSink() (events.Sink, *events.Channel) +} + +// Queue is the structure used to publish events and watch for them. +type Queue struct { + sinkGen ChannelSinkGenerator + // limit is the max number of items to be held in memory for a watcher + limit uint64 + mu sync.Mutex + broadcast *events.Broadcaster + cancelFuncs map[events.Sink]func() + + // closeOutChan indicates whether the watchers' channels should be closed + // when a watcher queue reaches its limit or when the Close method of the + // sink is called. + closeOutChan bool +} + +// NewQueue creates a new publish/subscribe queue which supports watchers. +// The channels that it will create for subscriptions will have the buffer +// size specified by buffer. +func NewQueue(options ...func(*Queue) error) *Queue { + // Create a queue with the default values + q := &Queue{ + sinkGen: &dropErrClosedChanGen{}, + broadcast: events.NewBroadcaster(), + cancelFuncs: make(map[events.Sink]func()), + limit: 0, + closeOutChan: false, + } + + for _, option := range options { + err := option(q) + if err != nil { + panic(fmt.Sprintf("Failed to apply options to queue: %s", err)) + } + } + + return q +} + +// WithTimeout returns a functional option for a queue that sets a write timeout +func WithTimeout(timeout time.Duration) func(*Queue) error { + return func(q *Queue) error { + q.sinkGen = NewTimeoutDropErrSinkGen(timeout) + return nil + } +} + +// WithCloseOutChan returns a functional option for a queue whose watcher +// channel is closed when no more events are expected to be sent to the watcher. +func WithCloseOutChan() func(*Queue) error { + return func(q *Queue) error { + q.closeOutChan = true + return nil + } +} + +// WithLimit returns a functional option for a queue with a max size limit. +func WithLimit(limit uint64) func(*Queue) error { + return func(q *Queue) error { + q.limit = limit + return nil + } +} + +// Watch returns a channel which will receive all items published to the +// queue from this point, until cancel is called. +func (q *Queue) Watch() (eventq chan events.Event, cancel func()) { + return q.CallbackWatch(nil) +} + +// WatchContext returns a channel where all items published to the queue will +// be received. The channel will be closed when the provided context is +// cancelled. +func (q *Queue) WatchContext(ctx context.Context) (eventq chan events.Event) { + return q.CallbackWatchContext(ctx, nil) +} + +// CallbackWatch returns a channel which will receive all events published to +// the queue from this point that pass the check in the provided callback +// function. The returned cancel function will stop the flow of events and +// close the channel. +func (q *Queue) CallbackWatch(matcher events.Matcher) (eventq chan events.Event, cancel func()) { + chanSink, ch := q.sinkGen.NewChannelSink() + lq := queue.NewLimitQueue(chanSink, q.limit) + sink := events.Sink(lq) + + if matcher != nil { + sink = events.NewFilter(sink, matcher) + } + + q.broadcast.Add(sink) + + cancelFunc := func() { + q.broadcast.Remove(sink) + ch.Close() + sink.Close() + } + + externalCancelFunc := func() { + q.mu.Lock() + cancelFunc := q.cancelFuncs[sink] + delete(q.cancelFuncs, sink) + q.mu.Unlock() + + if cancelFunc != nil { + cancelFunc() + } + } + + q.mu.Lock() + q.cancelFuncs[sink] = cancelFunc + q.mu.Unlock() + + // If the output channel shouldn't be closed and the queue is limitless, + // there's no need for an additional goroutine. + if !q.closeOutChan && q.limit == 0 { + return ch.C, externalCancelFunc + } + + outChan := make(chan events.Event) + go func() { + for { + select { + case <-ch.Done(): + // Close the output channel if the ChannelSink is Done for any + // reason. This can happen if the cancelFunc is called + // externally or if it has been closed by a wrapper sink, such + // as the TimeoutSink. + if q.closeOutChan { + close(outChan) + } + externalCancelFunc() + return + case <-lq.Full(): + // Close the output channel and tear down the Queue if the + // LimitQueue becomes full. + if q.closeOutChan { + close(outChan) + } + externalCancelFunc() + return + case event := <-ch.C: + outChan <- event + } + } + }() + + return outChan, externalCancelFunc +} + +// CallbackWatchContext returns a channel where all items published to the queue will +// be received. The channel will be closed when the provided context is +// cancelled. +func (q *Queue) CallbackWatchContext(ctx context.Context, matcher events.Matcher) (eventq chan events.Event) { + c, cancel := q.CallbackWatch(matcher) + go func() { + <-ctx.Done() + cancel() + }() + return c +} + +// Publish adds an item to the queue. +func (q *Queue) Publish(item events.Event) { + q.broadcast.Write(item) +} + +// Close closes the queue and frees the associated resources. +func (q *Queue) Close() error { + // Make sure all watchers have been closed to avoid a deadlock when + // closing the broadcaster. + q.mu.Lock() + for _, cancelFunc := range q.cancelFuncs { + cancelFunc() + } + q.cancelFuncs = make(map[events.Sink]func()) + q.mu.Unlock() + + return q.broadcast.Close() +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/ecs/NOTICE.txt b/vendor/github.com/elastic/beats/vendor/github.com/elastic/ecs/NOTICE.txt deleted file mode 100644 index 1d8b8268..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/ecs/NOTICE.txt +++ /dev/null @@ -1,14 +0,0 @@ -Elastic Common Schema -Copyright 2018 Elasticsearch B.V. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/NOTICE.txt b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/NOTICE.txt deleted file mode 100644 index 9e299aad..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-libaudit/NOTICE.txt +++ /dev/null @@ -1,5 +0,0 @@ -Elastic go-libaudit -Copyright 2017-2018 Elasticsearch B.V. - -This product includes software developed at -Elasticsearch, B.V. (https://www.elastic.co/). diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/CONTRIBUTING.md b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/CONTRIBUTING.md new file mode 100644 index 00000000..0c8d15b6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/CONTRIBUTING.md @@ -0,0 +1,47 @@ +# Contributing + +Contributions are very welcome, this includes documentation, tutorials, bug reports, issues, feature requests, feature implementations, pull requests or simply organizing the repository issues. + +*Pull requests that contain changes on the code base **and** related documentation, e.g. for a new feature, shall remain a single, atomic one.* + +## Building From Source + +### Environment Prerequisites + +To install the latest changes directly from source code, you will need to have `go` installed with `$GOPATH` defined. If you need assistance with this please follow [golangbootcamp guide](http://www.golangbootcamp.com/book/get_setup#cha-get_setup). + +### Actual installation commands + +**Make sure you have followed through the environment requisites** + +```sh +go get -u github.com/elastic/go-licenser +``` + +## Reporting Issues + +If you have found an issue or defect in `go-licenser` or the latest documentation, use the GitHub [issue tracker](https://github.com/elastic/go-licenser/issues) to report the problem. Make sure to follow the template provided for you to provide all the useful details possible. + + +### Code Contribution Guidelines + +For the benefit of all, here are some recommendations with regards to your PR: + +* Go ahead and fork the project and make your changes. We encourage pull requests to allow for review and discussion of code changes. +* As a best practice it's best to open an Issue on the repository before submitting a PR. +* When you’re ready to create a pull request, be sure to: + * Sign your commit messages, see [DCO details](https://probot.github.io/apps/dco/) + * Have test cases for the new code. If you have questions about how to do this, please ask in your pull request. + * Run `make format` and `make lint`. + * Ensure that `make unit` succeeds. + + +### Golden Files + +If you're working with a function that relies on testdata or golden files, you might need to update those if your +change is modifying that logic. + +```console +$ make update-golden-files +ok github.com/elastic/go-licenser 0.029s +``` diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/LICENSE new file mode 100644 index 00000000..0d4ee751 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 Elasticsearch B.V. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/Makefile b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/Makefile new file mode 100644 index 00000000..c60e3872 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/Makefile @@ -0,0 +1,105 @@ +export VERSION := 0.2.0 +OWNER ?= elastic +REPO ?= go-licenser +TEST_UNIT_FLAGS ?= -timeout 10s -p 4 -race -cover +TEST_UNIT_PACKAGE ?= ./... +GOLINT_PRESENT := $(shell command -v golint 2> /dev/null) +GOIMPORTS_PRESENT := $(shell command -v goimports 2> /dev/null) +GORELEASER_PRESENT := $(shell command -v goreleaser 2> /dev/null) +RELEASED = $(shell git tag -l $(VERSION)) +DEFAULT_LDFLAGS ?= -X main.version=$(VERSION)-dev -X main.commit=$(shell git rev-parse HEAD) + +define HELP +///////////////////////////////////////// +/\t$(REPO) Makefile \t\t/ +///////////////////////////////////////// + +## Build target + +- build: It will build $(REPO) for the current architecture in bin/$(REPO). +- install: It will install $(REPO) in the current system (by default in $(GOPATH)/bin/$(REPO)). + +## Development targets + +- deps: It will install the dependencies required to run developemtn targets. +- unit: Runs the unit tests. +- lint: Runs the linters. +- format: Formats the source files according to gofmt, goimports and go-licenser. +- update-golden-files: Updates the test golden files. + +## Release targets + +- release: Creates and publishes a new release matching the VERSION variable. +- snapshot: Creates a snapshot locally in the dist/ folder. + +endef +export HELP + +.DEFAULT: help +.PHONY: help +help: + @ echo "$$HELP" + +.PHONY: deps +deps: +ifndef GOLINT_PRESENT + @ go get -u golang.org/x/lint/golint +endif +ifndef GOIMPORTS_PRESENT + @ go get -u golang.org/x/tools/cmd/goimports +endif + +.PHONY: release_deps +release_deps: +ifndef GORELEASER_PRESENT + @ echo "-> goreleaser not found in path, please install it following the instructions:" + @ echo "-> https://goreleaser.com/introduction" + @ exit 1 +endif + +.PHONY: update-golden-files +update-golden-files: + $(eval GOLDEN_FILE_PACKAGES := "github.com/$(OWNER)/$(REPO)") + @ go test $(GOLDEN_FILE_PACKAGES) -update + +.PHONY: unit +unit: + @ go test $(TEST_UNIT_FLAGS) $(TEST_UNIT_PACKAGE) + +.PHONY: build +build: deps + @ go build -o bin/$(REPO) -ldflags="$(DEFAULT_LDFLAGS)" + +.PHONY: install +install: deps + @ go install + +.PHONY: lint +lint: build + @ golint -set_exit_status $(shell go list ./...) + @ gofmt -d -e -s . + @ ./bin/go-licenser -d -exclude golden + +.PHONY: format +format: deps build + @ gofmt -e -w -s . + @ goimports -w . + @ ./bin/go-licenser -exclude golden + +.PHONY: release +release: deps release_deps + @ echo "-> Releasing $(REPO) $(VERSION)..." + @ git fetch upstream +ifeq ($(strip $(RELEASED)),) + @ echo "-> Creating and pushing a new tag $(VERSION)..." + @ git tag $(VERSION) + @ git push upstream $(VERSION) + @ goreleaser release --rm-dist +else + @ echo "-> git tag $(VERSION) already present, skipping release..." +endif + +.PHONY: snapshot +snapshot: deps release_deps + @ echo "-> Snapshotting $(REPO) $(VERSION)..." + @ goreleaser release --snapshot --rm-dist diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-seccomp-bpf/NOTICE.txt b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/NOTICE similarity index 84% rename from vendor/github.com/elastic/beats/vendor/github.com/elastic/go-seccomp-bpf/NOTICE.txt rename to vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/NOTICE index e02df499..4972cec5 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-seccomp-bpf/NOTICE.txt +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/NOTICE @@ -1,4 +1,4 @@ -Elastic go-seccomp-bpf +Elastic go-licenser Copyright 2018 Elasticsearch B.V. This product includes software developed at diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/README.md b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/README.md new file mode 100644 index 00000000..435b4b1c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/README.md @@ -0,0 +1,46 @@ +# Go Licenser [![Build Status](https://travis-ci.org/elastic/go-licenser.svg?branch=master)](https://travis-ci.org/elastic/go-licenser) + +Small zero dependency license header checker for source files. The aim of this project is to provide a common +binary that can be used to ensure that code source files contain a license header. It's unlikely that this project +is useful outside of Elastic **_at the current stage_**, but the `licensing` package can be used as a building block. + +## Supported Licenses + +* Apache 2.0 +* Elastic + +## Supported languages + +* Go + +## Installing + +``` +go get -u github.com/elastic/go-licenser +``` + +## Usage + +``` +Usage: go-licenser [flags] [path] + + go-licenser walks the specified path recursiely and appends a license Header if the current + header doesn't match the one found in the file. + +Options: + + -d skips rewriting files and returns exitcode 1 if any discrepancies are found. + -exclude value + path to exclude (can be specified multiple times). + -ext string + sets the file extension to scan for. (default ".go") + -license string + sets the license type to check: ASL2, Elastic (default "ASL2") + -version + prints out the binary version. +``` + +## Contributing + +See [CONTRIBUTING.md](./CONTRIBUTING.md). + diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/error.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/error.go new file mode 100644 index 00000000..5c40faa3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/error.go @@ -0,0 +1,39 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package main + +// Error wraps a normal error with an Exitcode. +type Error struct { + err error + code int +} + +func (e Error) Error() string { + if e.err != nil { + return e.err.Error() + } + return "" +} + +// Code returns the exitcode for the error +func Code(e error) int { + if err, ok := e.(*Error); ok { + return err.code + } + return 0 +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/go.mod b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/go.mod new file mode 100644 index 00000000..89b964cb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/go.mod @@ -0,0 +1 @@ +module github.com/elastic/go-licenser diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/licensing/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/licensing/doc.go new file mode 100644 index 00000000..3fcc1ac5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/licensing/doc.go @@ -0,0 +1,20 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// Package licensing provides a set of functions that read the top +// lines of a file and can determine if they match a specific header. +package licensing diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/licensing/license.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/licensing/license.go new file mode 100644 index 00000000..fad14c14 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/licensing/license.go @@ -0,0 +1,132 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package licensing + +import ( + "bufio" + "bytes" + "errors" + "io" + "io/ioutil" + "os" + "reflect" + "strings" +) + +var ( + startPrefixes = []string{"// Copyright", "// copyright", "// Licensed", "// licensed"} + endPrefixes = []string{"package ", "// Package ", "// +build ", "// Code generated by", "// code generated by"} + + errHeaderIsTooShort = errors.New("header is too short") +) + +// ContainsHeader reads the first N lines of a file and checks if the header +// matches the one that is expected +func ContainsHeader(r io.Reader, headerLines []string) bool { + var found []string + var scanner = bufio.NewScanner(r) + + for scanner.Scan() { + found = append(found, scanner.Text()) + } + + if len(found) < len(headerLines) { + return false + } + + if !reflect.DeepEqual(found[:len(headerLines)], headerLines) { + return false + } + + return true +} + +// RewriteFileWithHeader reads a file from a path and rewrites it with a header +func RewriteFileWithHeader(path string, header []byte) error { + if len(header) < 2 { + return errHeaderIsTooShort + } + + info, err := os.Stat(path) + if err != nil { + return err + } + + origin, err := ioutil.ReadFile(path) + if err != nil { + return err + } + + data := RewriteWithHeader(origin, header) + return ioutil.WriteFile(path, data, info.Mode()) +} + +// RewriteWithHeader rewrites the src byte buffers header with the new header. +func RewriteWithHeader(src []byte, header []byte) []byte { + // Ensures that the header includes two break lines as the last bytes + for !reflect.DeepEqual(header[len(header)-2:], []byte("\n\n")) { + header = append(header, []byte("\n")...) + } + + var oldHeader = headerBytes(bytes.NewReader(src)) + return bytes.Replace(src, oldHeader, header, 1) +} + +// headerBytes detects the header lines of an io.Reader contents and returns +// what it considerst to be the header as a slice of bytes. +func headerBytes(r io.Reader) []byte { + var scanner = bufio.NewScanner(r) + var replaceableHeader []byte + var continuedHeader bool + for scanner.Scan() { + var t = scanner.Text() + + for i := range endPrefixes { + if strings.HasPrefix(t, endPrefixes[i]) { + return replaceableHeader + } + } + + for i := range startPrefixes { + if strings.HasPrefix(t, startPrefixes[i]) { + continuedHeader = true + } + } + + if continuedHeader { + replaceableHeader = append(replaceableHeader, []byte(t+"\n")...) + } + } + + return replaceableHeader +} + +// containsHeaderLine reads the first N lines of a file and checks if the header +// matches the one that is expected +func containsHeaderLine(r io.Reader, headerLines []string) bool { + var scanner = bufio.NewScanner(r) + for scanner.Scan() { + for i := range headerLines { + if scanner.Text() == headerLines[i] { + return true + } + } + } + + return false +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/main.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/main.go new file mode 100644 index 00000000..e04a421e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/main.go @@ -0,0 +1,236 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package main + +import ( + "flag" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/elastic/go-licenser/licensing" +) + +const ( + defaultExt = ".go" + defaultPath = "." + defaultLicense = "ASL2" + defaultFormat = "%s: is missing the license header\n" +) + +const ( + exitDefault = iota + exitSourceNeedsToBeRewritten + exitFailedToStatTree + exitFailedToStatFile + exitFailedToWalkPath + exitFailedToOpenWalkFile + errFailedRewrittingFile + errUnknownLicense +) + +var usageText = ` +Usage: go-licenser [flags] [path] + + go-licenser walks the specified path recursiely and appends a license Header if the current + header doesn't match the one found in the file. + +Options: + +`[1:] + +// Headers is the map of supported licenses +var Headers = map[string][]string{ + "ASL2": { + `// Licensed to Elasticsearch B.V. under one or more contributor`, + `// license agreements. See the NOTICE file distributed with`, + `// this work for additional information regarding copyright`, + `// ownership. Elasticsearch B.V. licenses this file to you under`, + `// the Apache License, Version 2.0 (the "License"); you may`, + `// not use this file except in compliance with the License.`, + `// You may obtain a copy of the License at`, + `//`, + `// http://www.apache.org/licenses/LICENSE-2.0`, + `//`, + `// Unless required by applicable law or agreed to in writing,`, + `// software distributed under the License is distributed on an`, + `// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY`, + `// KIND, either express or implied. See the License for the`, + `// specific language governing permissions and limitations`, + `// under the License.`, + }, + "Elastic": { + `// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one`, + `// or more contributor license agreements. Licensed under the Elastic License;`, + `// you may not use this file except in compliance with the Elastic License.`, + }, +} + +var ( + dryRun bool + showVersion bool + extension string + args []string + license string + exclude sliceFlag + defaultExludedDirs = []string{"vendor", ".git"} +) + +type sliceFlag []string + +func (f *sliceFlag) String() string { + var s string + for _, i := range *f { + s += i + " " + } + return s +} + +func (f *sliceFlag) Set(value string) error { + *f = append(*f, value) + return nil +} + +func init() { + flag.Var(&exclude, "exclude", `path to exclude (can be specified multiple times).`) + flag.BoolVar(&dryRun, "d", false, `skips rewriting files and returns exitcode 1 if any discrepancies are found.`) + flag.BoolVar(&showVersion, "version", false, `prints out the binary version.`) + flag.StringVar(&extension, "ext", defaultExt, "sets the file extension to scan for.") + flag.StringVar(&license, "license", defaultLicense, "sets the license type to check: ASL2, Elastic") + flag.Usage = usageFlag + flag.Parse() + args = flag.Args() +} + +func main() { + if showVersion { + fmt.Printf("go-licenser %s (%s)\n", version, commit) + return + } + + err := run(args, license, exclude, extension, dryRun, os.Stdout) + if err != nil && err.Error() != "" { + fmt.Fprint(os.Stderr, err) + } + + os.Exit(Code(err)) +} + +func run(args []string, license string, exclude []string, ext string, dry bool, out io.Writer) error { + header, ok := Headers[license] + if !ok { + return &Error{err: fmt.Errorf("unknown license: %s", license), code: errUnknownLicense} + } + + var headerBytes []byte + for i := range header { + headerBytes = append(headerBytes, []byte(header[i])...) + headerBytes = append(headerBytes, []byte("\n")...) + } + + var path = defaultPath + if len(args) > 0 { + path = args[0] + } + + if _, err := os.Stat(path); err != nil { + return &Error{err: err, code: exitFailedToStatTree} + } + + return walk(path, ext, license, headerBytes, exclude, dry, out) +} + +func reportFile(out io.Writer, f string) { + cwd, _ := filepath.Abs(filepath.Dir(os.Args[0])) + rel, err := filepath.Rel(cwd, f) + if err != nil { + rel = f + } + fmt.Fprintf(out, defaultFormat, rel) +} + +func walk(p, ext, license string, headerBytes []byte, exclude []string, dry bool, out io.Writer) error { + var err error + filepath.Walk(p, func(path string, info os.FileInfo, walkErr error) error { + if walkErr != nil { + err = &Error{err: walkErr, code: exitFailedToWalkPath} + return walkErr + } + + var currentPath = cleanPathPrefixes( + strings.Replace(path, p, "", 1), + []string{string(os.PathSeparator)}, + ) + + var excludedDir = info.IsDir() && stringInSlice(info.Name(), defaultExludedDirs) + if needsExclusion(currentPath, exclude) || excludedDir { + return filepath.SkipDir + } + + if e := addOrCheckLicense(path, ext, license, headerBytes, info, dry, out); e != nil { + err = e + } + + return nil + }) + + return err +} + +func addOrCheckLicense(path, ext, license string, headerBytes []byte, info os.FileInfo, dry bool, out io.Writer) error { + if info.IsDir() || filepath.Ext(path) != ext { + return nil + } + + f, e := os.Open(path) + if e != nil { + return &Error{err: e, code: exitFailedToOpenWalkFile} + } + defer f.Close() + + if licensing.ContainsHeader(f, Headers[license]) { + return nil + } + + if dry { + reportFile(out, path) + return &Error{code: exitSourceNeedsToBeRewritten} + } + + if err := licensing.RewriteFileWithHeader(path, headerBytes); err != nil { + return &Error{err: err, code: errFailedRewrittingFile} + } + + return nil +} + +func stringInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} + +func usageFlag() { + fmt.Fprintf(os.Stderr, usageText) + flag.PrintDefaults() +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/path.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/path.go new file mode 100644 index 00000000..fad555b0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/path.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package main + +import ( + "os" + "strings" +) + +func needsExclusion(path string, exclude []string) bool { + for _, excluded := range exclude { + excluded = cleanPathSuffixes(excluded, []string{"*", string(os.PathSeparator)}) + if strings.HasPrefix(path, excluded) { + return true + } + } + + return false +} + +func cleanPathSuffixes(path string, sufixes []string) string { + for _, suffix := range sufixes { + for strings.HasSuffix(path, suffix) && len(path) > 0 { + path = path[:len(path)-len(suffix)] + } + } + + return path +} + +func cleanPathPrefixes(path string, prefixes []string) string { + for _, prefix := range prefixes { + for strings.HasPrefix(path, prefix) && len(path) > 0 { + path = path[len(prefix):] + } + } + + return path +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/version.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/version.go new file mode 100644 index 00000000..426d1234 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-licenser/version.go @@ -0,0 +1,23 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package main + +var ( + version string + commit string +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-lookslike/README.md b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-lookslike/README.md index 4c6486b6..eea0d0dd 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-lookslike/README.md +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-lookslike/README.md @@ -7,7 +7,7 @@ This library is here to help you with all your data validation needs. It's ideal ## Quick Links * [GoDoc](https://godoc.org/github.com/elastic/go-lookslike) for this library. -* [Runnable Examples](https://github.com/elastic/go-lookslike/blob/master/lookslike/doc_test.go). +* [Runnable Examples](https://github.com/elastic/go-lookslike/blob/master/doc_test.go). ## Install @@ -25,7 +25,7 @@ If using govendor run: ## Real World Usage Examples -lookslike was created to improve the testing of various structures in [elastic/beats](https://github.com/elastic/beats). Searching the tests for `lookslike` will show real world usage. +lookslike was created to improve the testing of various structures in [elastic/beats](https://github.com/elastic/beats/search?q=lookslike.MustCompile&unscoped_q=lookslike.MustCompile). Searching the tests for `lookslike.MustCompile` will show real world usage. ## Call for More `isdef`s! diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-lookslike/compiled_schema.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-lookslike/compiled_schema.go index 5139ed38..919ca283 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-lookslike/compiled_schema.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-lookslike/compiled_schema.go @@ -21,6 +21,7 @@ import ( "github.com/elastic/go-lookslike/isdef" "github.com/elastic/go-lookslike/llpath" "github.com/elastic/go-lookslike/llresult" + "reflect" ) type flatValidator struct { @@ -35,11 +36,16 @@ type CompiledSchema []flatValidator func (cs CompiledSchema) Check(actual interface{}) *llresult.Results { res := llresult.NewResults() for _, pv := range cs { - actualV, actualKeyExists := pv.path.GetFrom(actual) + actualVal, actualKeyExists := pv.path.GetFrom(reflect.ValueOf(actual)) + var actualInter interface{} + zero := reflect.Value{} + if actualVal != zero { + actualInter = actualVal.Interface() + } if !pv.isDef.Optional || pv.isDef.Optional && actualKeyExists { var checkRes *llresult.Results - checkRes = pv.isDef.Check(pv.path, actualV, actualKeyExists) + checkRes = pv.isDef.Check(pv.path, actualInter, actualKeyExists) res.Merge(checkRes) } } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-lookslike/core.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-lookslike/core.go index bf33df31..3068d8e6 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-lookslike/core.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-lookslike/core.go @@ -74,7 +74,7 @@ func Strict(laxValidator validator.Validator) validator.Validator { } sort.Strings(validatedPaths) - walk(actual, false, func(woi walkObserverInfo) error { + walk(reflect.ValueOf(actual), false, func(woi walkObserverInfo) error { _, validatedExactly := res.Fields[woi.path.String()] if validatedExactly { return nil // This key was tested, passes strict test @@ -98,33 +98,36 @@ func Strict(laxValidator validator.Validator) validator.Validator { func compile(in interface{}) (validator.Validator, error) { switch in.(type) { - case map[string]interface{}: - return compileMap(in.(map[string]interface{})) - case []interface{}: - return compileSlice(in.([]interface{})) case isdef.IsDef: return compileIsDef(in.(isdef.IsDef)) case nil: // nil can't be handled by the default case of IsEqual return compileIsDef(isdef.IsNil) default: - // By default we just check reflection equality - return compileIsDef(isdef.IsEqual(in)) + inVal := reflect.ValueOf(in) + switch inVal.Kind() { + case reflect.Map: + return compileMap(inVal) + case reflect.Slice, reflect.Array: + return compileSlice(inVal) + default: + return compileIsDef(isdef.IsEqual(in)) + } } } -func compileMap(in map[string]interface{}) (validator validator.Validator, err error) { +func compileMap(inVal reflect.Value) (validator validator.Validator, err error) { wo, compiled := setupWalkObserver() - err = walkMap(in, true, wo) + err = walkMap(inVal, true, wo) return func(actual interface{}) *llresult.Results { return compiled.Check(actual) }, err } -func compileSlice(in []interface{}) (validator validator.Validator, err error) { +func compileSlice(inVal reflect.Value) (validator validator.Validator, err error) { wo, compiled := setupWalkObserver() - err = walkSlice(in, true, wo) + err = walkSlice(inVal, true, wo) // Slices are always strict in validation because // it would be surprising to only validate the first specified values @@ -142,18 +145,16 @@ func compileIsDef(def isdef.IsDef) (validator validator.Validator, err error) { func setupWalkObserver() (walkObserver, *CompiledSchema) { compiled := make(CompiledSchema, 0) return func(current walkObserverInfo) error { - // Determine whether we should test this value - // We want to test all values except collections that contain a value - // If a collection contains a value, we Check those 'leaf' values instead - rv := reflect.ValueOf(current.value) - kind := rv.Kind() + kind := current.value.Kind() isCollection := kind == reflect.Map || kind == reflect.Slice - isNonEmptyCollection := isCollection && rv.Len() > 0 + isEmptyCollection := isCollection && current.value.Len() == 0 - if !isNonEmptyCollection { - isDef, isIsDef := current.value.(isdef.IsDef) + // We do comparisons on all leaf nodes. If the leaf is an empty collection + // we do a comparison to let us test empty structures. + if !isCollection || isEmptyCollection { + isDef, isIsDef := current.value.Interface().(isdef.IsDef) if !isIsDef { - isDef = isdef.IsEqual(current.value) + isDef = isdef.IsEqual(current.value.Interface()) } compiled = append(compiled, flatValidator{current.path, isDef}) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-lookslike/internal/llreflect/chase.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-lookslike/internal/llreflect/chase.go new file mode 100644 index 00000000..b5f6c12b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-lookslike/internal/llreflect/chase.go @@ -0,0 +1,13 @@ +package llreflect + +import ( + "reflect" +) + +// ChaseValue takes a value and returns the underlying type even if it is nested inpointers or wrapped in interface{} +func ChaseValue(v reflect.Value) reflect.Value { + for (v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface) && !v.IsNil() { + v = v.Elem() + } + return v +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-lookslike/llpath/path.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-lookslike/llpath/path.go index 0ac5f910..293cf29c 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-lookslike/llpath/path.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-lookslike/llpath/path.go @@ -117,46 +117,44 @@ func (p Path) Last() *PathComponent { } // GetFrom takes a map and fetches the given Path from it. -func (p Path) GetFrom(m interface{}) (value interface{}, exists bool) { +func (p Path) GetFrom(source reflect.Value) (result reflect.Value, exists bool) { // nil values are handled specially. If we're fetching from a nil // there's one case where it exists, when comparing it to another nil. - if m == nil { + if (source.Kind() == reflect.Map || source.Kind() == reflect.Slice) && source.IsNil() { // since another nil would be scalar, we just check that the // path length is 0. - return nil, len(p) == 0 + return source, len(p) == 0 } - value = m + result = source exists = true for _, pc := range p { - rt := reflect.TypeOf(value) - switch rt.Kind() { + switch result.Kind() { case reflect.Map: - converted := llreflect.InterfaceToMap(value) - value, exists = converted[pc.Key] - case reflect.Slice: - converted := llreflect.InterfaceToSliceOfInterfaces(value) - if pc.Index < len(converted) { - exists = true - value = converted[pc.Index] + result = llreflect.ChaseValue(result.MapIndex(reflect.ValueOf(pc.Key))) + exists = result != reflect.Value{} + case reflect.Slice, reflect.Array: + if pc.Index < result.Len() { + result = llreflect.ChaseValue(result.Index(pc.Index)) + exists = result != reflect.Value{} } else { + result = reflect.ValueOf(nil) exists = false - value = nil } default: // If this case has been reached this means the expected type, say a map, // is actually something else, like a string or an array. In this case we - // simply say the value doesn't exist. From a practical perspective this is + // simply say the result doesn't exist. From a practical perspective this is // the right behavior since it will cause validation to fail. - return nil, false + return reflect.ValueOf(nil), false } if exists == false { - return nil, exists + return reflect.ValueOf(nil), exists } } - return value, exists + return result, exists } var arrMatcher = regexp.MustCompile("\\[(\\d+)\\]") diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-lookslike/walk.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-lookslike/walk.go index 2c0189a6..3ac05fdf 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-lookslike/walk.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-lookslike/walk.go @@ -18,17 +18,17 @@ package lookslike import ( + "fmt" "reflect" - "github.com/elastic/go-lookslike/internal/llreflect" "github.com/elastic/go-lookslike/llpath" ) type walkObserverInfo struct { - key llpath.PathComponent - value interface{} - root map[string]interface{} - path llpath.Path + key llpath.PathComponent + value reflect.Value + rootVal reflect.Value + path llpath.Path } // walkObserver functions run once per object in the tree. @@ -36,65 +36,68 @@ type walkObserver func(info walkObserverInfo) error // walk determine if in is a `map[string]interface{}` or a `Slice` and traverse it if so, otherwise will // treat it as a scalar and invoke the walk observer on the input value directly. -func walk(in interface{}, expandPaths bool, wo walkObserver) error { - switch in.(type) { - case map[string]interface{}: - return walkMap(in.(map[string]interface{}), expandPaths, wo) - case []interface{}: - return walkSlice(in.([]interface{}), expandPaths, wo) +func walk(inVal reflect.Value, expandPaths bool, wo walkObserver) error { + switch inVal.Kind() { + case reflect.Map: + return walkMap(inVal, expandPaths, wo) + case reflect.Slice: + return walkSlice(inVal, expandPaths, wo) default: - return walkInterface(in, expandPaths, wo) + return walkInterface(inVal, expandPaths, wo) } } // walkmap[string]interface{} is a shorthand way to walk a tree with a map as the root. -func walkMap(m map[string]interface{}, expandPaths bool, wo walkObserver) error { - return walkFullMap(m, m, llpath.Path{}, expandPaths, wo) +func walkMap(mVal reflect.Value, expandPaths bool, wo walkObserver) error { + return walkFullMap(mVal, mVal, llpath.Path{}, expandPaths, wo) } // walkSlice walks the provided root slice. -func walkSlice(s []interface{}, expandPaths bool, wo walkObserver) error { - return walkFullSlice(s, map[string]interface{}{}, llpath.Path{}, expandPaths, wo) +func walkSlice(sVal reflect.Value, expandPaths bool, wo walkObserver) error { + return walkFullSlice(sVal, reflect.ValueOf(map[string]interface{}{}), llpath.Path{}, expandPaths, wo) } -func walkInterface(s interface{}, expandPaths bool, wo walkObserver) error { +func walkInterface(s reflect.Value, expandPaths bool, wo walkObserver) error { return wo(walkObserverInfo{ - value: s, - key: llpath.PathComponent{}, - root: map[string]interface{}{}, - path: llpath.Path{}, + value: s, + key: llpath.PathComponent{}, + rootVal: reflect.ValueOf(map[string]interface{}{}), + path: llpath.Path{}, }) } -func walkFull(o interface{}, root map[string]interface{}, path llpath.Path, expandPaths bool, wo walkObserver) (err error) { +func walkFull(oVal, rootVal reflect.Value, path llpath.Path, expandPaths bool, wo walkObserver) (err error) { + + // Unpack any wrapped interfaces + for oVal.Kind() == reflect.Interface { + oVal = reflect.ValueOf(oVal.Interface()) + } + lastPathComponent := path.Last() if lastPathComponent == nil { // In the case of a slice we can have an empty path - if _, ok := o.([]interface{}); ok { + if oVal.Kind() == reflect.Slice || oVal.Kind() == reflect.Array { lastPathComponent = &llpath.PathComponent{} } else { - panic("Attempted to traverse an empty Path on a map[string]interface{} in lookslike.walkFull, this should never happen.") + panic("Attempted to traverse an empty Path on non array/slice in lookslike.walkFull, this should never happen.") } } - err = wo(walkObserverInfo{*lastPathComponent, o, root, path}) + err = wo(walkObserverInfo{*lastPathComponent, oVal, rootVal, path}) if err != nil { return err } - switch reflect.TypeOf(o).Kind() { + switch oVal.Kind() { case reflect.Map: - converted := llreflect.InterfaceToMap(o) - err := walkFullMap(converted, root, path, expandPaths, wo) + err := walkFullMap(oVal, rootVal, path, expandPaths, wo) if err != nil { return err } case reflect.Slice: - converted := llreflect.InterfaceToSliceOfInterfaces(o) - - for idx, v := range converted { - newPath := path.ExtendSlice(idx) - err := walkFull(v, root, newPath, expandPaths, wo) + for i := 0; i < oVal.Len(); i++ { + newPath := path.ExtendSlice(i) + err := walkFull(oVal.Index(i), rootVal, newPath, expandPaths, wo) if err != nil { return err } @@ -105,8 +108,15 @@ func walkFull(o interface{}, root map[string]interface{}, path llpath.Path, expa } // walkFull walks the given map[string]interface{} tree. -func walkFullMap(m map[string]interface{}, root map[string]interface{}, p llpath.Path, expandPaths bool, wo walkObserver) (err error) { - for k, v := range m { +func walkFullMap(mVal, rootVal reflect.Value, p llpath.Path, expandPaths bool, wo walkObserver) (err error) { + if mVal.Kind() != reflect.Map { + return fmt.Errorf("could not walk not map type for %s", mVal) + } + + for _, kVal := range mVal.MapKeys() { + vVal := mVal.MapIndex(kVal) + k := kVal.String() + var newPath llpath.Path if !expandPaths { newPath = p.ExtendMap(k) @@ -118,7 +128,7 @@ func walkFullMap(m map[string]interface{}, root map[string]interface{}, p llpath newPath = p.Concat(additionalPath) } - err = walkFull(v, root, newPath, expandPaths, wo) + err = walkFull(vVal, rootVal, newPath, expandPaths, wo) if err != nil { return err } @@ -127,12 +137,12 @@ func walkFullMap(m map[string]interface{}, root map[string]interface{}, p llpath return nil } -func walkFullSlice(s []interface{}, root map[string]interface{}, p llpath.Path, expandPaths bool, wo walkObserver) (err error) { - for idx, v := range s { +func walkFullSlice(sVal reflect.Value, rootVal reflect.Value, p llpath.Path, expandPaths bool, wo walkObserver) (err error) { + for i := 0; i < sVal.Len(); i++ { var newPath llpath.Path - newPath = p.ExtendSlice(idx) + newPath = p.ExtendSlice(i) - err = walkFull(v, root, newPath, expandPaths, wo) + err = walkFull(sVal.Index(i), rootVal, newPath, expandPaths, wo) if err != nil { return err } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_map_inline.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_map_inline.yml deleted file mode 100644 index ce01bfec..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_map_inline.yml +++ /dev/null @@ -1,81 +0,0 @@ -# Licensed to Elasticsearch B.V. under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch B.V. licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import: - - types.yml - -main: | - package gotype - - import ( - "reflect" - - stunsafe "github.com/urso/go-structform/internal/unsafe" - ) - - var _mapInlineMapping = map[reflect.Type]reFoldFn{ - {{ range data.primitiveTypes }} - {{- $t := capitalize . -}} - t{{ $t }}: foldMapInline{{ $t }}, - {{ end }} - } - - func getMapInlineByPrimitiveElem(t reflect.Type) reFoldFn { - if t == tInterface { - return foldMapInlineInterface - } - return _mapInlineMapping[t] - } - - func foldMapInlineInterface(C *foldContext, v reflect.Value) (err error) { - ptr := unsafe.Pointer(v.Pointer()) - if ptr == nil { - return nil - } - - m := *((*map[string]interface{})(unsafe.Pointer(&ptr))) - for k, v := range m { - if err = C.OnKey(k); err != nil { - return err - } - if err = foldInterfaceValue(C, v); err != nil { - return err - } - } - return - } - - {{ range data.primitiveTypes }} - {{ $t := capitalize . }} - func foldMapInline{{ $t }}(C *foldContext, v reflect.Value) (err error) { - ptr := unsafe.Pointer(v.Pointer()) - if ptr == nil { - return nil - } - - m := *((*map[string]{{ . }})(unsafe.Pointer(&ptr))) - for k, v := range m { - if err = C.OnKey(k); err != nil { - return err - } - if err = C.On{{ $t }}(v); err != nil { - return err - } - } - return - } - {{ end }} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_refl_sel.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_refl_sel.yml deleted file mode 100644 index d45f9703..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/fold_refl_sel.yml +++ /dev/null @@ -1,47 +0,0 @@ -# Licensed to Elasticsearch B.V. under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch B.V. licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import: - - types.yml - -main: | - package gotype - - var _reflPrimitivesMapping = map[reflect.Type]reFoldFn{ - {{ range data.primitiveTypes }} - {{ $t := capitalize . }} - t{{ $t }}: reFold{{ $t }}, - reflect.SliceOf(t{{ $t }}): reFoldArr{{ $t }}, - reflect.MapOf(tString, t{{ $t }}): reFoldMap{{ $t }}, - {{ end }} - } - - func getReflectFoldPrimitive(t reflect.Type) reFoldFn { - return _reflPrimitivesMapping[t] - } - - func getReflectFoldPrimitiveKind(t reflect.Type) (reFoldFn, error) { - switch t.Kind() { - {{ range data.primitiveTypes }} - {{ $t := capitalize . }} - case reflect.{{ $t }}: - return reFold{{ $t }}, nil - {{ end }} - default: - return nil, errUnsupported - } - } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/stacks.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/stacks.yml deleted file mode 100644 index 5e472e01..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/stacks.yml +++ /dev/null @@ -1,65 +0,0 @@ -# Licensed to Elasticsearch B.V. under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch B.V. licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -data.stacks: -- name: unfolderStack - type: unfolder -- name: reflectValueStack - type: reflect.Value -- name: ptrStack - init: 'nil' - type: unsafe.Pointer -- name: keyStack - type: string - init: '""' -- name: idxStack - type: int - init: -1 -- name: structformTypeStack - type: structform.BaseType - init: structform.AnyType - -main: | - package gotype - - {{ range .stacks }} - type {{ .name }} struct { - current {{ .type }} - stack []{{ .type }} - stack0 [{{ if .size0 }}{{ .size0 }}{{ else }}32{{ end }}]{{ .type }} - } - {{ end }} - - {{ range .stacks }} - func (s *{{ .name }}) init({{ if isnil .init }}v {{ .type }}{{end}}) { - s.current = {{ if isnil .init }}v{{ else }}{{ .init }}{{end}} - s.stack = s.stack0[:0] - } - - func (s *{{ .name }}) push(v {{ .type }}) { - s.stack = append(s.stack, s.current) - s.current = v - } - - func (s *{{ .name }}) pop() {{ .type }} { - old := s.current - last := len(s.stack) - 1 - s.current = s.stack[last] - s.stack = s.stack[:last] - return old - } - {{ end }} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/types.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/types.yml deleted file mode 100644 index 011a93fe..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/types.yml +++ /dev/null @@ -1,68 +0,0 @@ -# Licensed to Elasticsearch B.V. under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch B.V. licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -data.numTypes: [ - uint, uint8, uint16, uint32, uint64, - int, int8, int16, int32, int64, - float32, float64 - ] - -data.primitiveTypes: [ - bool, - string, - uint, uint8, uint16, uint32, uint64, - int, int8, int16, int32, int64, - float32, float64 - ] - -data.mapTypes: - AnyType: 'interface{}' - ByteType: uint8 - StringType: string - BoolType: bool - ZeroType: 'interface{}' - IntType: int - Int8Type: int8 - Int16Type: int16 - Int32Type: int32 - Int64Type: int64 - UintType: uint - Uint8Type: uint8 - Uint16Type: uint16 - Uint32Type: uint32 - Uint64Type: uint64 - Float32Type: float32 - Float64Type: float64 - -data.mapReflTypes: - AnyType: tInterface - ByteType: tByte - StringType: tString - BoolType: tBool - ZeroType: tInterface - IntType: tInt - Int8Type: tInt8 - Int16Type: tInt16 - Int32Type: tInt32 - Int64Type: tInt64 - UintType: tUint - Uint8Type: tUint8 - Uint16Type: tUint16 - Uint32Type: tUint32 - Uint64Type: tUint64 - Float32Type: tFloat32 - Float64Type: tFloat64 diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_arr.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_arr.yml deleted file mode 100644 index 8f1770cb..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_arr.yml +++ /dev/null @@ -1,169 +0,0 @@ -# Licensed to Elasticsearch B.V. under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch B.V. licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import: - - unfold_templates.yml - -main: | - package gotype - - import "github.com/urso/go-structform" - - {{/* defined 'lifted' pointer slice unfolders into reflection based unfolders */}} - var ( - unfolderReflArrIfc = liftGoUnfolder(newUnfolderArrIfc()) - {{ range data.primitiveTypes }} - {{ $t := capitalize . }} - unfolderReflArr{{ $t }} = liftGoUnfolder(newUnfolderArr{{ $t }}()) - {{ end }} - ) - - {{/* define pointer based unfolder types */}} - {{ invoke "makeTypeWithName" "name" "Ifc" "type" "interface{}" }} - {{ template "makeType" "bool" }} - {{ template "makeType" "string" }} - {{ range .numTypes }} - {{ template "makeType" . }} - {{ end }} - - {{/* create visitor callbacks */}} - {{ invoke "onIfcFns" "name" "unfolderArrIfc" "fn" "append" }} - {{ invoke "onBoolFns" "name" "unfolderArrBool" "fn" "append" }} - {{ invoke "onStringFns" "name" "unfolderArrString" "fn" "append" }} - {{ range .numTypes }} - {{ $type := . }} - {{ $name := capitalize $type | printf "unfolderArr%v" }} - {{ invoke "onNumberFns" "name" $name "type" $type "fn" "append" }} - {{ end }} - - {{ template "arrIfc" }} - - -# makeTypeWithName(name, type) -templates.makeTypeWithName: | - {{ $type := .type }} - {{ $name := capitalize .name | printf "unfolderArr%v" }} - {{ $startName := capitalize .name | printf "unfoldArrStart%v" }} - - {{ invoke "makeUnfoldType" "name" $name }} - {{ invoke "makeUnfoldType" "name" $startName "base" "unfolderErrArrayStart" }} - - func (u *{{ $name }} ) initState(ctx *unfoldCtx, ptr unsafe.Pointer) { - ctx.unfolder.push(u) - ctx.unfolder.push(new{{ $startName | capitalize}}()) - ctx.idx.push(0) - ctx.ptr.push(ptr) - } - - func (u * {{ $name }} ) cleanup(ctx *unfoldCtx) { - ctx.unfolder.pop() - ctx.idx.pop() - ctx.ptr.pop() - } - - func (u * {{ $startName }}) cleanup(ctx *unfoldCtx) { - ctx.unfolder.pop() - } - - func (u *{{ $startName }} ) ptr(ctx *unfoldCtx) *[]{{ $type }} { - return (*[]{{ $type }})(ctx.ptr.current) - } - - func (u *{{ $name }} ) ptr(ctx *unfoldCtx) *[]{{ $type }} { - return (*[]{{ $type }})(ctx.ptr.current) - } - - func (u *{{ $startName }}) OnArrayStart(ctx *unfoldCtx, l int, baseType structform.BaseType) error { - to := u.ptr(ctx) - if l < 0 { - l = 0 - } - - if *to == nil && l > 0 { - *to = make([]{{ $type }}, l) - } else if l < len(*to) { - *to = (*to)[:l] - } - - u.cleanup(ctx) - return nil - } - - func (u *{{ $name }} ) OnArrayFinished(ctx *unfoldCtx) error { - u.cleanup(ctx) - return nil - } - - func (u *{{ $name }} ) append(ctx *unfoldCtx, v {{ $type }}) error { - idx := &ctx.idx - to := u.ptr(ctx) - if len(*to) <= idx.current { - *to = append(*to, v) - } else { - (*to)[idx.current] = v - } - - idx.current++ - return nil - } - -templates.arrIfc: | - - func unfoldIfcStartSubArray(ctx *unfoldCtx, l int, baseType structform.BaseType) error { - _, ptr, unfolder := makeArrayPtr(ctx, l, baseType) - ctx.ptr.push(ptr) // store pointer for use in 'Finish' - ctx.baseType.push(baseType) - unfolder.initState(ctx, ptr) - return ctx.unfolder.current.OnArrayStart(ctx, l, baseType) - } - - func unfoldIfcFinishSubArray(ctx *unfoldCtx) (interface{}, error) { - child := ctx.ptr.pop() - bt := ctx.baseType.pop() - switch bt { - {{ range $bt, $gt := data.mapTypes }} - case structform.{{ $bt }}: - value := *(*[]{{ $gt }})(child) - last := len(ctx.valueBuffer.arrays) - 1 - ctx.valueBuffer.arrays = ctx.valueBuffer.arrays[:last] - return value, nil - {{ end }} - default: - return nil, errTODO() - } - } - - func makeArrayPtr(ctx *unfoldCtx, l int, bt structform.BaseType) (interface{}, unsafe.Pointer, ptrUnfolder) { - switch bt { - {{ range $bt, $gt := data.mapTypes }} - case structform.{{ $bt }}: - idx := len(ctx.valueBuffer.arrays) - ctx.valueBuffer.arrays = append(ctx.valueBuffer.arrays, nil) - arrPtr := &ctx.valueBuffer.arrays[idx] - ptr := unsafe.Pointer(arrPtr) - to := (*[]{{ $gt }})(ptr) - {{- if or (eq $bt "AnyType") (eq $bt "ZeroType") }} - unfolder := newUnfolderArrIfc() - {{ else }} - unfolder := newUnfolderArr{{ $gt | capitalize }}() - {{ end }} - return to, ptr, unfolder - {{ end }} - default: - panic("invalid type code") - } - } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_err.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_err.yml deleted file mode 100644 index 8b3814c4..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_err.yml +++ /dev/null @@ -1,63 +0,0 @@ -# Licensed to Elasticsearch B.V. under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch B.V. licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import: - - unfold_templates.yml - -data.errorUnfolders: - unfolderNoTarget: errNotInitialized - unfolderErrUnknown: errUnsupported - unfolderErrArrayStart: errExpectedArray - unfolderErrObjectStart: errExpectedObject - unfolderErrExpectKey: errExpectedObjectKey - -main: | - package gotype - - {{ range $name, $err := data.errorUnfolders }} - type {{ $name }} struct {} - {{ end }} - - {{ range $name, $err := data.errorUnfolders }} - - func (*{{ $name }}) OnNil(*unfoldCtx) error { return {{ $err }} } - func (*{{ $name }}) OnBool(*unfoldCtx, bool) error { return {{ $err }} } - func (*{{ $name }}) OnString(*unfoldCtx, string) error { return {{ $err }} } - func (*{{ $name }}) OnStringRef(*unfoldCtx, []byte) error { return {{ $err }} } - func (*{{ $name }}) OnInt8(*unfoldCtx, int8) error { return {{ $err }} } - func (*{{ $name }}) OnInt16(*unfoldCtx, int16) error { return {{ $err }} } - func (*{{ $name }}) OnInt32(*unfoldCtx, int32) error { return {{ $err }} } - func (*{{ $name }}) OnInt64(*unfoldCtx, int64) error { return {{ $err }} } - func (*{{ $name }}) OnInt(*unfoldCtx, int) error { return {{ $err }} } - func (*{{ $name }}) OnByte(*unfoldCtx, byte) error { return {{ $err }} } - func (*{{ $name }}) OnUint8(*unfoldCtx, uint8) error { return {{ $err }} } - func (*{{ $name }}) OnUint16(*unfoldCtx, uint16) error { return {{ $err }} } - func (*{{ $name }}) OnUint32(*unfoldCtx, uint32) error { return {{ $err }} } - func (*{{ $name }}) OnUint64(*unfoldCtx, uint64) error { return {{ $err }} } - func (*{{ $name }}) OnUint(*unfoldCtx, uint) error { return {{ $err }} } - func (*{{ $name }}) OnFloat32(*unfoldCtx, float32) error { return {{ $err }} } - func (*{{ $name }}) OnFloat64(*unfoldCtx, float64) error { return {{ $err }} } - func (*{{ $name }}) OnArrayStart(*unfoldCtx, int, structform.BaseType) error { return {{ $err }} } - func (*{{ $name }}) OnArrayFinished(*unfoldCtx) error { return {{ $err }} } - func (*{{ $name }}) OnChildArrayDone(*unfoldCtx) error { return {{ $err }} } - func (*{{ $name }}) OnObjectStart(*unfoldCtx, int, structform.BaseType) error { return {{ $err }} } - func (*{{ $name }}) OnObjectFinished(*unfoldCtx) error { return {{ $err }} } - func (*{{ $name }}) OnKey(*unfoldCtx, string) error { return {{ $err }} } - func (*{{ $name }}) OnKeyRef(*unfoldCtx, []byte) error { return {{ $err }} } - func (*{{ $name }}) OnChildObjectDone(*unfoldCtx) error { return {{ $err }} } - - {{ end }} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_ignore.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_ignore.yml deleted file mode 100644 index d2548e08..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_ignore.yml +++ /dev/null @@ -1,107 +0,0 @@ -# Licensed to Elasticsearch B.V. under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch B.V. licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -main: | - package gotype - - {{ invoke "makeIgnoreType" "type" "" }} - - func (*unfolderIgnore) onValue(ctx *unfoldCtx) error { - ctx.unfolder.pop() - return nil - } - - {{ invoke "makeIgnoreType" "type" "Arr" }} - - func (*unfolderIgnoreArr) onValue(ctx *unfoldCtx) error { - return nil - } - - func (*unfolderIgnoreArr) OnArrayFinished(ctx *unfoldCtx) error { - ctx.unfolder.pop() - return nil - } - - {{ invoke "makeIgnoreType" "type" "Obj" }} - - func (*unfolderIgnoreObj) onValue(ctx *unfoldCtx) error { - return nil - } - - func (*unfolderIgnoreObj) OnObjectFinished(ctx *unfoldCtx) error { - ctx.unfolder.pop() - return nil - } - -templates.makeIgnoreType: | - {{ $type := .type }} - {{ $tUnfolder := printf "unfolderIgnore%v" $type }} - - type unfoldIgnore{{ $type }}Value struct {} - type unfoldIgnore{{ $type }}Ptr struct {} - type {{ $tUnfolder }} struct { - unfolderErrUnknown - } - - var ( - _singletonUnfoldIgnore{{ $type }}Value = &unfoldIgnore{{ $type }}Value{} - _singletonUnfoldIgnore{{ $type }}Ptr = &unfoldIgnore{{ $type }}Ptr{} - _singleton{{ $tUnfolder }} = &{{ $tUnfolder }}{} - ) - - func (*unfoldIgnore{{ $type }}Value) initState(ctx *unfoldCtx, _ reflect.Value) { - ctx.unfolder.push(_singleton{{ $tUnfolder }}) - } - - func (*unfoldIgnore{{ $type }}Ptr) initState(ctx *unfoldCtx, _ unsafe.Pointer) { - ctx.unfolder.push(_singleton{{ $tUnfolder }}) - } - - func (u *{{ $tUnfolder }}) OnNil(ctx *unfoldCtx) error { return u.onValue(ctx) } - func (u *{{ $tUnfolder }}) OnBool(ctx *unfoldCtx, _ bool) error { return u.onValue(ctx) } - func (u *{{ $tUnfolder }}) OnString(ctx *unfoldCtx, _ string) error { return u.onValue(ctx) } - func (u *{{ $tUnfolder }}) OnInt8(ctx *unfoldCtx, _ int8) error { return u.onValue(ctx) } - func (u *{{ $tUnfolder }}) OnInt16(ctx *unfoldCtx, _ int16) error { return u.onValue(ctx) } - func (u *{{ $tUnfolder }}) OnInt32(ctx *unfoldCtx, _ int32) error { return u.onValue(ctx) } - func (u *{{ $tUnfolder }}) OnInt64(ctx *unfoldCtx, _ int64) error { return u.onValue(ctx) } - func (u *{{ $tUnfolder }}) OnInt(ctx *unfoldCtx, _ int) error { return u.onValue(ctx) } - func (u *{{ $tUnfolder }}) OnByte(ctx *unfoldCtx, _ byte) error { return u.onValue(ctx) } - func (u *{{ $tUnfolder }}) OnUint8(ctx *unfoldCtx, _ uint8) error { return u.onValue(ctx) } - func (u *{{ $tUnfolder }}) OnUint16(ctx *unfoldCtx, _ uint16) error { return u.onValue(ctx) } - func (u *{{ $tUnfolder }}) OnUint32(ctx *unfoldCtx, _ uint32) error { return u.onValue(ctx) } - func (u *{{ $tUnfolder }}) OnUint64(ctx *unfoldCtx, _ uint64) error { return u.onValue(ctx) } - func (u *{{ $tUnfolder }}) OnUint(ctx *unfoldCtx, _ uint) error { return u.onValue(ctx) } - func (u *{{ $tUnfolder }}) OnFloat32(ctx *unfoldCtx, _ float32) error { return u.onValue(ctx) } - func (u *{{ $tUnfolder }}) OnFloat64(ctx *unfoldCtx, _ float64) error { return u.onValue(ctx) } - - func (u *{{ $tUnfolder }}) OnArrayStart(ctx *unfoldCtx, _ int, _ structform.BaseType) error { - _singletonUnfoldIgnoreArrPtr.initState(ctx, nil) - return nil - } - - func (u *{{ $tUnfolder }}) OnChildArrayDone(ctx *unfoldCtx) error { - return u.onValue(ctx) - } - - func (u *{{ $tUnfolder }}) OnObjectStart(ctx *unfoldCtx, _ int, _ structform.BaseType) error { - _singletonUnfoldIgnoreObjPtr.initState(ctx, nil) - return nil - } - - func (u *{{ $tUnfolder }}) OnChildObjectDone(ctx *unfoldCtx) error { - return u.onValue(ctx) - } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_lookup_go.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_lookup_go.yml deleted file mode 100644 index 8e45e446..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_lookup_go.yml +++ /dev/null @@ -1,208 +0,0 @@ -# Licensed to Elasticsearch B.V. under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch B.V. licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import: - - unfold_templates.yml - -main: | - package gotype - - func lookupUserPrimitiveConstructor(t reflect.Type) func(reflect.Value) ptrUnfolder { - switch t.Kind() { - {{- range data.primitiveTypes }} - case reflect.{{ capitalize . }}: - return {{ capitalize . | printf "newUserUnfolder%v" }} - {{ end }} - default: - return nil - } - } - - func lookupGoTypeUnfolder(to interface{}) (unsafe.Pointer, ptrUnfolder) { - switch ptr := to.(type) { - case *interface{}: - return unsafe.Pointer(ptr), newUnfolderIfc() - case *[]interface{}: - return unsafe.Pointer(ptr), newUnfolderArrIfc() - case *map[string]interface{}: - return unsafe.Pointer(ptr), newUnfolderMapIfc() - - {{ range data.primitiveTypes }} - case *{{ . }}: - return unsafe.Pointer(ptr), newUnfolder{{ . | capitalize }}() - case *[]{{ . }}: - return unsafe.Pointer(ptr), newUnfolderArr{{ . | capitalize }}() - case *map[string]{{ . }}: - return unsafe.Pointer(ptr), newUnfolderMap{{ . | capitalize }}() - {{ end }} - default: - return nil, nil - } - } - - func lookupGoPtrUnfolder(t reflect.Type) (ptrUnfolder) { - switch t.Kind() { - case reflect.Interface: - return newUnfolderIfc() - {{ range data.primitiveTypes }} - case reflect.{{ . | capitalize }}: - return newUnfolder{{ . | capitalize }}() - {{ end }} - - case reflect.Slice: - et := t.Elem() - switch et.Kind() { - case reflect.Interface: - return newUnfolderArrIfc() - {{ range data.primitiveTypes }} - case reflect.{{ . | capitalize }}: - return newUnfolderArr{{ . | capitalize }}() - {{ end }} - } - - case reflect.Map: - if t.Key().Kind() != reflect.String { - return nil - } - - et := t.Elem() - switch et.Kind() { - case reflect.Interface: - return newUnfolderMapIfc() - {{ range data.primitiveTypes }} - case reflect.{{ . | capitalize }}: - return newUnfolderMap{{ . | capitalize }}() - {{ end }} - } - - } - - return nil - } - - func lookupReflUnfolder(ctx *unfoldCtx, t reflect.Type, withUser bool) (reflUnfolder, error) { - if withUser { - if f := lookupReflUser(ctx, t); f != nil { - return f, nil - } - } - - if t.Implements(tExpander) { - return newExpanderInit(), nil - } - - if f := ctx.reg.find(t); f != nil { - return f, nil - } - - f, err := buildReflUnfolder(ctx, t) - if err != nil { - return nil, err - } - - ctx.reg.set(t, f) - return f, nil - } - - func lookupReflUser(ctx *unfoldCtx, t reflect.Type) reflUnfolder { - if ctx.userReg != nil { - return ctx.userReg[t] - } - return nil - } - - func buildReflUnfolder(ctx *unfoldCtx, t reflect.Type) (reflUnfolder, error) { - // we always expect a pointer - bt := t.Elem() - - switch bt.Kind() { - case reflect.Interface: - return unfolderReflIfc, nil - {{ range data.primitiveTypes }} - case reflect.{{ . | capitalize }}: - return unfolderRefl{{ . | capitalize }}, nil - {{ end }} - - case reflect.Array: - return nil, errTODO() - - case reflect.Ptr: - unfolderElem, err := lookupReflUnfolder(ctx, bt, true) - if err != nil { - return nil, err - } - return newUnfolderReflPtr(unfolderElem), nil - - case reflect.Slice: - et := bt.Elem() - - if unfolderElem := lookupReflUser(ctx, et); unfolderElem != nil { - return newUnfolderReflSlice(unfolderElem), nil - } - - if reflect.PtrTo(et).Implements(tExpander) { - return newUnfolderReflSlice(newExpanderInit()), nil - } - - switch et.Kind() { - case reflect.Interface: - return unfolderReflArrIfc, nil - {{ range data.primitiveTypes }} - case reflect.{{ . | capitalize }}: - return unfolderReflArr{{ . | capitalize }}, nil - {{ end }} - } - - unfolderElem, err := lookupReflUnfolder(ctx, reflect.PtrTo(et), false) - if err != nil { - return nil, err - } - return newUnfolderReflSlice(unfolderElem), nil - - case reflect.Map: - et := bt.Elem() - - if unfolderElem := lookupReflUser(ctx, et); unfolderElem != nil { - return newUnfolderReflMap(unfolderElem), nil - } - - if reflect.PtrTo(et).Implements(tExpander) { - return newUnfolderReflMap(newExpanderInit()), nil - } - - switch et.Kind() { - case reflect.Interface: - return unfolderReflMapIfc, nil - {{ range data.primitiveTypes }} - case reflect.{{ . | capitalize }}: - return unfolderReflMap{{ . | capitalize }}, nil - {{ end }} - } - - unfolderElem, err := lookupReflUnfolder(ctx, reflect.PtrTo(et), false) - if err != nil { - return nil, err - } - return newUnfolderReflMap(unfolderElem), nil - - case reflect.Struct: - return createUnfolderReflStruct(ctx, t) - - default: - return nil, errTODO() - } - } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_map.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_map.yml deleted file mode 100644 index fdb8ed16..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_map.yml +++ /dev/null @@ -1,177 +0,0 @@ -# Licensed to Elasticsearch B.V. under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch B.V. licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import: - - unfold_templates.yml - -main: | - package gotype - - import "github.com/urso/go-structform" - - {{/* defined 'lifted' pointer map unfolders into reflection based unfolders */}} - var ( - unfolderReflMapIfc = liftGoUnfolder(newUnfolderMapIfc()) - {{ range data.primitiveTypes }} - {{ $t := capitalize . }} - unfolderReflMap{{ $t }} = liftGoUnfolder(newUnfolderMap{{ $t }}()) - {{ end }} - ) - - {{/* define pointer based unfolder types */}} - {{ invoke "makeTypeWithName" "name" "Ifc" "type" "interface{}" }} - {{ template "makeType" "bool" }} - {{ template "makeType" "string" }} - {{ range .numTypes }} - {{ template "makeType" . }} - {{ end }} - - {{/* create value visitor callbacks */}} - {{ invoke "onIfcFns" "name" "unfolderMapIfc" "fn" "put" }} - {{ invoke "onBoolFns" "name" "unfolderMapBool" "fn" "put" }} - {{ invoke "onStringFns" "name" "unfolderMapString" "fn" "put" }} - {{ range .numTypes }} - {{ $type := . }} - {{ $name := capitalize $type | printf "unfolderMap%v" }} - {{ invoke "onNumberFns" "name" $name "type" $type "fn" "put" }} - {{ end }} - - {{ template "mapIfc" }} - - -# makeTypeWithName(name, type) -templates.makeTypeWithName: | - {{ $type := .type }} - {{ $name := capitalize .name | printf "unfolderMap%v" }} - {{ $startName := capitalize .name | printf "unfoldMapStart%v" }} - {{ $keyName := capitalize .name | printf "unfoldMapKey%v" }} - - {{ invoke "makeUnfoldType" "name" $name }} - {{ invoke "makeUnfoldType" "name" $startName "base" "unfolderErrObjectStart" }} - {{ invoke "makeUnfoldType" "name" $keyName "base" "unfolderErrExpectKey" }} - - func (u *{{ $name }} ) initState(ctx *unfoldCtx, ptr unsafe.Pointer) { - ctx.unfolder.push(new{{ $keyName | capitalize}}()) - ctx.unfolder.push(new{{ $startName | capitalize}}()) - ctx.ptr.push(ptr) - } - - func (u * {{ $keyName }} ) cleanup(ctx *unfoldCtx) { - ctx.unfolder.pop() - ctx.ptr.pop() - } - - func (u * {{ $startName }}) cleanup(ctx *unfoldCtx) { - ctx.unfolder.pop() - } - - - func (u *{{ $name }} ) ptr(ctx *unfoldCtx) *map[string]{{ $type }} { - return (*map[string]{{ $type }})(ctx.ptr.current) - } - - - func (u *{{ $startName }} ) OnObjectStart(ctx *unfoldCtx, l int, baseType structform.BaseType) error { - // TODO: validate baseType - - u.cleanup(ctx) - return nil - } - - func (u *{{ $keyName }} ) OnKeyRef(ctx *unfoldCtx, key []byte) error { - return u.OnKey(ctx, ctx.keyCache.get(key)) - } - - func (u *{{ $keyName }} ) OnKey(ctx *unfoldCtx, key string) error { - ctx.key.push(key) - ctx.unfolder.current = new{{ $name | capitalize }}() - return nil - } - - func (u *{{ $keyName }} ) OnObjectFinished(ctx *unfoldCtx) error { - u.cleanup(ctx) - return nil - } - - func (u *{{ $name }} ) put(ctx *unfoldCtx, v {{ $type }}) error { - to := u.ptr(ctx) - if *to == nil { - *to = map[string]{{ $type }}{} - } - (*to)[ctx.key.pop()] = v - - ctx.unfolder.current = new{{ $keyName | capitalize }}() - return nil - } - -templates.mapIfc: | - func unfoldIfcStartSubMap(ctx *unfoldCtx, l int, baseType structform.BaseType) error { - _, ptr, unfolder := makeMapPtr(ctx, l, baseType) - ctx.ptr.push(ptr) - ctx.baseType.push(baseType) - unfolder.initState(ctx, ptr) - return ctx.unfolder.current.OnObjectStart(ctx, l, baseType) - } - - func unfoldIfcFinishSubMap(ctx *unfoldCtx) (interface{}, error) { - child := ctx.ptr.pop() - bt := ctx.baseType.pop() - switch bt { - {{ range $bt, $gt := data.mapTypes }} - case structform.{{ $bt }}: - value := *(*map[string]{{ $gt }})(child) - {{- if or (eq $bt "AnyType") (eq $bt "ZeroType") }} - last := len(ctx.valueBuffer.mapAny)-1 - ctx.valueBuffer.mapAny = ctx.valueBuffer.mapAny[:last] - {{ else }} - last := len(ctx.valueBuffer.mapPrimitive)-1 - ctx.valueBuffer.mapPrimitive = ctx.valueBuffer.mapPrimitive[:last] - {{ end -}} - return value, nil - - {{ end }} - default: - return nil, errTODO() - } - } - - func makeMapPtr(ctx *unfoldCtx, l int, bt structform.BaseType) (interface{}, unsafe.Pointer, ptrUnfolder) { - switch bt { - {{ range $bt, $gt := data.mapTypes }} - case structform.{{ $bt }}: - {{- if or (eq $bt "AnyType") (eq $bt "ZeroType") }} - idx := len(ctx.valueBuffer.mapAny) - ctx.valueBuffer.mapAny = append(ctx.valueBuffer.mapAny, nil) - to := &ctx.valueBuffer.mapAny[idx] - ptr := unsafe.Pointer(to) - unfolder := newUnfolderMapIfc() - return to, ptr, unfolder - {{ else }} - idx := len(ctx.valueBuffer.mapPrimitive) - ctx.valueBuffer.mapPrimitive = append(ctx.valueBuffer.mapPrimitive, nil) - mapPtr := &ctx.valueBuffer.mapPrimitive[idx] - ptr := unsafe.Pointer(mapPtr) - to := (*map[string]{{ $gt }})(ptr) - unfolder := newUnfolderMap{{ $gt | capitalize }}() - return to, ptr, unfolder - {{ end }} - - {{ end }} - default: - panic("invalid type code") - } - } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_primitive.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_primitive.yml deleted file mode 100644 index affa3702..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_primitive.yml +++ /dev/null @@ -1,89 +0,0 @@ -# Licensed to Elasticsearch B.V. under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch B.V. licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import: - - unfold_templates.yml - -main: | - package gotype - - var ( - {{/* defined 'lifted' pointer primitive unfolders into reflection based unfolders */}} - unfolderReflIfc = liftGoUnfolder(newUnfolderIfc()) - {{ range data.primitiveTypes }} - {{ $t := capitalize . }} - unfolderRefl{{ $t }} = liftGoUnfolder(newUnfolder{{ $t }}()) - {{ end }} - ) - - {{/* define pointer based unfolder types */}} - {{ invoke "makeTypeWithName" "name" "ifc" "type" "interface{}" }} - {{ template "makeType" "bool" }} - {{ template "makeType" "string" }} - {{ range .numTypes }} - {{ template "makeType" . }} - {{ end }} - - {{/* create value visitor callbacks */}} - {{ invoke "onIfcFns" "name" "unfolderIfc" "fn" "assign" }} - {{ invoke "onBoolFns" "name" "unfolderBool" "fn" "assign" }} - {{ invoke "onStringFns" "name" "unfolderString" "fn" "assign" }} - {{ range .numTypes }} - {{ $type := . }} - {{ $name := capitalize . | printf "unfolder%v" }} - {{ invoke "onNumberFns" "name" $name "type" $type "fn" "assign" }} - {{ end }} - - /* - func (*unfolderIfc) OnArrayStart(ctx *unfoldCtx, l int, bt structform.BaseType) error { - return unfoldIfcStartSubArray(ctx, l, bt) - } - - func (u *unfolderIfc) OnChildArrayDone(ctx *unfoldCtx) error { - v, err := unfoldIfcFinishSubArray(ctx) - if err == nil { - err = u.assign(ctx, v) - } - return err - } - */ - -# makeTypeWithName(name, type, [base]) -templates.makeTypeWithName: | - {{ $type := .type }} - {{ $name := capitalize .name | printf "unfolder%v" }} - {{ invoke "makeUnfoldType" "type" $type "name" $name "base" .base }} - - func (u *{{ $name }} ) initState(ctx *unfoldCtx, ptr unsafe.Pointer) { - ctx.unfolder.push(u) - ctx.ptr.push(ptr) - } - - func (u *{{ $name }} ) cleanup(ctx *unfoldCtx) { - ctx.unfolder.pop() - ctx.ptr.pop() - } - - func (u *{{ $name }} ) ptr(ctx *unfoldCtx) *{{ $type }} { - return (*{{ $type }})(ctx.ptr.current) - } - - func (u *{{ $name }}) assign(ctx *unfoldCtx, v {{ $type }}) error { - *u.ptr(ctx) = v - u.cleanup(ctx) - return nil - } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_refl.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_refl.yml deleted file mode 100644 index c7e4b447..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_refl.yml +++ /dev/null @@ -1,138 +0,0 @@ -# Licensed to Elasticsearch B.V. under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch B.V. licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import: - - unfold_templates.yml - -main: | - package gotype - - func (u *unfolderReflSlice) OnNil(ctx *unfoldCtx) error { - u.prepare(ctx) - return nil - } - - {{ invoke "makeReflPrimitives" "type" "unfolderReflSlice" }} - {{ invoke "makeReflChildArrays" "type" "unfolderReflSlice" }} - {{ invoke "makeReflChildObjects" "type" "unfolderReflSlice" }} - - func (u *unfolderReflMapOnElem) OnNil(ctx *unfoldCtx) error { - ptr := ctx.value.current - m := ptr.Elem() - v := reflect.Zero(m.Type().Elem()) - m.SetMapIndex(reflect.ValueOf(ctx.key.pop()), v) - - ctx.unfolder.current = u.shared.waitKey - return nil - } - - {{ invoke "makeReflPrimitives" "type" "unfolderReflMapOnElem" "process" "process" }} - {{ invoke "makeReflChildArrays" "type" "unfolderReflMapOnElem" "process" "process" }} - {{ invoke "makeReflChildObjects" "type" "unfolderReflMapOnElem" "process" "process" "error" "errExpectedObjectValue" }} - - func (u *unfolderReflPtr) OnNil(ctx *unfoldCtx) error { - ptr := ctx.value.current - v := ptr.Elem() - v.Set(reflect.Zero(v.Type())) - u.cleanup(ctx) - return nil - } - - {{ invoke "makeReflPrimitives" "type" "unfolderReflPtr" "process" "process" }} - {{ invoke "makeReflChildArrays" "type" "unfolderReflPtr" "process" "process" }} - {{ invoke "makeReflChildObjects" "type" "unfolderReflPtr" "process" "process" }} - -# makeReflPrimitiveCallbacks(type, [process]) -templates.makeReflPrimitives: | - {{ $type := .type}} - {{ $process := .process }} - - func (u *{{ $type }}) OnByte(ctx *unfoldCtx, v byte) error { - elem := u.prepare(ctx) - u.elem.initState(ctx, elem) - err := ctx.unfolder.current.OnByte(ctx, v) - {{ if $process }} - if err == nil { - u.{{ $process }}(ctx) - } - {{ end }} - return err - } - - func (u *{{ $type }}) OnStringRef(ctx *unfoldCtx, v []byte) error { - return u.OnString(ctx, string(v)) - } - - {{ range data.primitiveTypes }} - func (u *{{ $type }}) On{{ . | capitalize}}(ctx *unfoldCtx, v {{ . }}) error { - elem := u.prepare(ctx) - u.elem.initState(ctx, elem) - err := ctx.unfolder.current.On{{ . | capitalize }}(ctx, v) - {{ if $process }} - if err == nil { - u.{{ $process }}(ctx) - } - {{ end }} - return err - } - {{ end }} - -# makeReflChildArrays(type, [process]) -templates.makeReflChildArrays: | - {{ $type := .type}} - {{ $process := .process }} - - func (u *{{ $type }}) OnArrayStart(ctx *unfoldCtx, l int, bt structform.BaseType) error { - elem := u.prepare(ctx) - u.elem.initState(ctx, elem) - return ctx.unfolder.current.OnArrayStart(ctx, l, bt) - } - - func (u *{{ $type }}) OnChildArrayDone(ctx *unfoldCtx) error { - {{ if $process }} - u.{{ $process }}(ctx) - {{ end }} - return nil - } - -# makeReflChildObjects(type, [process], [error]) -templates.makeReflChildObjects: | - {{ $type := .type}} - {{ $process := .process }} - {{ $error := default "errUnsupported" .error }} - - func (u *{{ $type }}) OnObjectStart(ctx *unfoldCtx, l int, bt structform.BaseType) error { - elem := u.prepare(ctx) - u.elem.initState(ctx, elem) - return ctx.unfolder.current.OnObjectStart(ctx, l, bt) - } - - func (u *{{ $type }}) OnKey(_ *unfoldCtx, _ string) error { - return {{ $error }} - } - - func (u *{{ $type }}) OnKeyRef(_ *unfoldCtx, _ []byte) error { - return {{ $error }} - } - - func (u *{{ $type }}) OnChildObjectDone(ctx *unfoldCtx) error { - {{ if $process }} - u.{{ $process }}(ctx) - {{ end }} - return nil - } - diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_templates.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_templates.yml deleted file mode 100644 index 2c594a7e..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_templates.yml +++ /dev/null @@ -1,119 +0,0 @@ -# Licensed to Elasticsearch B.V. under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch B.V. licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import: - - "types.yml" - -# makeUnfoldType(name, [.base]) -templates.makeUnfoldType: | - {{ $name := .name }} - {{ $base := default "unfolderErrUnknown" .base }} - - type {{ $name }} struct { - {{ $base }} - } - - var _singleton{{ $name | capitalize }} = &{{ $name }}{} - - func new{{ $name | capitalize }}() *{{ $name }} { - return _singleton{{ $name | capitalize }} - } - -# makeType(.) -> invokes makeTypeWithName(name, type) -templates.makeType: | - {{ invoke "makeTypeWithName" "name" (capitalize .) "type" . }} - -# makeBoolFns(name, fn) -templates.onBoolFns: | - {{ invoke "onNil" "name" .name "fn" .fn "default" "false" }} - {{ invoke "onBool" "name" .name "fn" .fn }} - -# makeStringFns(name, fn) -templates.onStringFns: | - {{ invoke "onNil" "name" .name "fn" .fn "default" "\"\"" }} - {{ invoke "onString" "name" .name "fn" .fn }} - -# makeNumberFns(name, fn) -templates.onNumberFns: | - {{ invoke "onNil" "name" .name "fn" .fn "default" "0" }} - {{ invoke "onNumber" "name" .name "type" .type "fn" .fn }} - -# onIfcFns(name, fn) -templates.onIfcFns: | - {{ invoke "onNil" "name" .name "fn" .fn "default" "nil" }} - {{ invoke "onBool" "name" .name "fn" .fn }} - {{ invoke "onString" "name" .name "fn" .fn }} - {{ invoke "onNumber" "name" .name "type" "(interface{})" "fn" .fn }} - - func (*{{ .name }} ) OnArrayStart(ctx *unfoldCtx, l int, bt structform.BaseType) error { - return unfoldIfcStartSubArray(ctx, l, bt) - } - - func (u *{{ .name }}) OnChildArrayDone(ctx *unfoldCtx) error { - v, err := unfoldIfcFinishSubArray(ctx) - if err == nil { - err = u.{{ .fn }}(ctx, v) - } - return err - } - - func (*{{ .name }}) OnObjectStart(ctx *unfoldCtx, l int, bt structform.BaseType) error { - return unfoldIfcStartSubMap(ctx, l, bt) - } - - func (u *{{ .name }}) OnChildObjectDone(ctx *unfoldCtx) error { - v, err := unfoldIfcFinishSubMap(ctx) - if err == nil { - err = u.{{ .fn }}(ctx, v) - } - return err - } - - -# onBool(name, fn) -templates.onBool: | - func (u *{{ .name }}) OnBool(ctx *unfoldCtx, v bool) error { return u.{{ .fn }} (ctx, v) } - -# onString(name, fn) -templates.onString: | - func (u *{{ .name }}) OnString(ctx *unfoldCtx, v string) error { return u.{{ .fn }} (ctx, v) } - func (u *{{ .name }}) OnStringRef(ctx *unfoldCtx, v []byte) error { - return u.OnString(ctx, string(v)) - } - - -# onNil(name, fn, default) -templates.onNil: | - func (u *{{ .name }}) OnNil(ctx *unfoldCtx) error { - return u.{{ .fn }}(ctx, {{ .default }}) - } - - -# onNumber(name, fn, type) -templates.onNumber: | - {{ $name := .name }} - {{ $fn := .fn }} - {{ $type := .type }} - - func (u *{{ $name }}) OnByte(ctx *unfoldCtx, v byte) error { - return u.{{ $fn }}(ctx, {{ $type }}(v)) - } - {{ range $t := data.numTypes }} - func (u *{{ $name }}) On{{ $t | capitalize}}(ctx *unfoldCtx, v {{ $t }}) error { - return u.{{ $fn }}(ctx, {{ $type }}(v)) - } - {{ end }} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_user_primitive.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_user_primitive.yml deleted file mode 100644 index c825ad04..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_user_primitive.yml +++ /dev/null @@ -1,82 +0,0 @@ -# Licensed to Elasticsearch B.V. under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch B.V. licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import: - - unfold_templates.yml - -main: | - package gotype - - {{/* define pointer based user unfolder types */}} - {{ template "makeType" "bool" }} - {{ template "makeType" "string" }} - {{ range .numTypes }} - {{ template "makeType" . }} - {{ end }} - - {{/* create value visitor callbacks */}} - {{ invoke "onBoolFns" "name" "userUnfolderBool" "fn" "process" }} - {{ invoke "onStringFns" "name" "userUnfolderString" "fn" "process" }} - {{ range .numTypes }} - {{ $type := . }} - {{ $name := capitalize . | printf "userUnfolder%v" }} - {{ invoke "onNumberFns" "name" $name "type" $type "fn" "process" }} - {{ end }} - -# makeTypeWithName(name, type, [base]) -templates.makeTypeWithName: | - {{ $type := .type }} - {{ $name := capitalize .name | printf "userUnfolder%v" }} - {{ invoke "makeUserUnfoldType" "type" $type "name" $name "base" .base }} - - func (u *{{ $name }} ) initState(ctx *unfoldCtx, ptr unsafe.Pointer) { - ctx.unfolder.push(u) - ctx.ptr.push(ptr) - } - - func (u *{{ $name }} ) cleanup(ctx *unfoldCtx) { - ctx.unfolder.pop() - ctx.ptr.pop() - } - - func (u *{{ $name }}) process(ctx *unfoldCtx, v {{ $type }}) error { - err := u.fn(ctx.ptr.current, v) - u.cleanup(ctx) - return err - } - -templates.makeUserUnfoldType: | - {{ $type := .type }} - {{ $name := .name }} - {{ $cbname := .name | printf "%vCB" }} - {{ $base := default "unfolderErrUnknown" .base }} - - type ( - {{ $name }} struct { - {{ $base }} - fn {{ $cbname }} - } - - {{ $cbname }} func(unsafe.Pointer, {{ $type }}) error - ) - - func new{{ $name | capitalize }}(fn reflect.Value) ptrUnfolder { - return &{{ $name }}{ - fn: *((*{{ $cbname }})(stunsafe.UnsafeFnPtr(fn))), - } - } - diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_user_processing.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_user_processing.yml deleted file mode 100644 index f4b7d8be..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-structform/gotype/unfold_user_processing.yml +++ /dev/null @@ -1,175 +0,0 @@ -# Licensed to Elasticsearch B.V. under one or more contributor -# license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright -# ownership. Elasticsearch B.V. licenses this file to you under -# the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import: - - unfold_templates.yml - -main: | - package gotype - - type unfolderUserProcessingInit struct { - fnInit userProcessingInitFn - } - - type unfolderUserFailing struct { - err error - } - - type unfolderUserProcessing struct { - // XXX: move user processing into unfoldCtx as stacks? -> no more allocations - startSz int - fn userProcessingFn - } - - type userProcessingInitFn func(unsafe.Pointer) (interface{}, userProcessingFn) - - type userProcessingFn func(unsafe.Pointer, interface{}) error - - func (u *unfolderUserProcessingInit) initState(ctx *unfoldCtx, ptr unsafe.Pointer) { - cell, cont := u.fnInit(ptr) - - unfolder, err := lookupReflUnfolder(ctx, reflect.TypeOf(cell), false) - if err != nil { - // Use unfolderUserFailing. If there is a chance that the no value is - // unfolded into the current target, then we can continue processing - // without reporting said error. - ctx.unfolder.push(&unfolderUserFailing{err}) - return - } - - startSz := len(ctx.unfolder.stack) - - v := reflect.ValueOf(cell) - ctx.ptr.push(ptr) - ctx.value.push(v) - - unfolder.initState(ctx, v) - ctx.unfolder.push(&unfolderUserProcessing{ - startSz: startSz, - fn: cont, - }) - } - - func (u *unfolderUserFailing) OnNil(*unfoldCtx) error { return u.err } - func (u *unfolderUserFailing) OnBool(*unfoldCtx, bool) error { return u.err } - func (u *unfolderUserFailing) OnByte(*unfoldCtx, byte) error { return u.err } - func (u *unfolderUserFailing) OnString(*unfoldCtx, string) error { return u.err } - func (u *unfolderUserFailing) OnStringRef(*unfoldCtx, []byte) error { return u.err } - func (u *unfolderUserFailing) OnInt8(*unfoldCtx, int8) error { return u.err } - func (u *unfolderUserFailing) OnInt16(*unfoldCtx, int16) error { return u.err } - func (u *unfolderUserFailing) OnInt32(*unfoldCtx, int32) error { return u.err } - func (u *unfolderUserFailing) OnInt64(*unfoldCtx, int64) error { return u.err } - func (u *unfolderUserFailing) OnInt(*unfoldCtx, int) error { return u.err } - func (u *unfolderUserFailing) OnUint8(*unfoldCtx, uint8) error { return u.err } - func (u *unfolderUserFailing) OnUint16(*unfoldCtx, uint16) error { return u.err } - func (u *unfolderUserFailing) OnUint32(*unfoldCtx, uint32) error { return u.err } - func (u *unfolderUserFailing) OnUint64(*unfoldCtx, uint64) error { return u.err } - func (u *unfolderUserFailing) OnUint(*unfoldCtx, uint) error { return u.err } - func (u *unfolderUserFailing) OnFloat32(*unfoldCtx, float32) error { return u.err } - func (u *unfolderUserFailing) OnFloat64(*unfoldCtx, float64) error { return u.err } - func (u *unfolderUserFailing) OnArrayStart(*unfoldCtx, int, structform.BaseType) error { return u.err } - func (u *unfolderUserFailing) OnArrayFinished(*unfoldCtx) error { return u.err } - func (u *unfolderUserFailing) OnChildArrayDone(*unfoldCtx) error { return u.err } - func (u *unfolderUserFailing) OnObjectStart(*unfoldCtx, int, structform.BaseType) error { return u.err } - func (u *unfolderUserFailing) OnObjectFinished(*unfoldCtx) error { return u.err } - func (u *unfolderUserFailing) OnKey(*unfoldCtx, string) error { return u.err } - func (u *unfolderUserFailing) OnKeyRef(*unfoldCtx, []byte) error { return u.err } - func (u *unfolderUserFailing) OnChildObjectDone(*unfoldCtx) error { return u.err } - - func (u *unfolderUserProcessing) beforeCall(ctx *unfoldCtx) { - ctx.unfolder.pop() // temporarily remove unfolder from top of stack - } - - func (u *unfolderUserProcessing) afterCall(ctx *unfoldCtx, err error) error { - if err != nil { - return err - } - - if u.startSz >= len(ctx.unfolder.stack) { - return u.finalize(ctx) - } - - ctx.unfolder.push(u) - return nil - } - - func (u *unfolderUserProcessing) finalize(ctx *unfoldCtx) error { - return u.fn(ctx.ptr.pop(), ctx.value.pop().Interface()) - } - - func (u *unfolderUserProcessing) OnNil(ctx *unfoldCtx) error { - u.beforeCall(ctx) - err := ctx.OnNil() - return u.afterCall(ctx, err) - } - - func (u *unfolderUserProcessing) OnStringRef(ctx *unfoldCtx, v []byte) error { - u.beforeCall(ctx) - err := ctx.OnStringRef(v) - return u.afterCall(ctx, err) - } - - {{ range data.primitiveTypes }} - func (u *unfolderUserProcessing) On{{ capitalize . }}(ctx *unfoldCtx, v {{ . }}) error { - u.beforeCall(ctx) - err := ctx.On{{ capitalize . }}(v) - return u.afterCall(ctx, err) - } - {{ end }} - - func (u *unfolderUserProcessing) OnByte(ctx *unfoldCtx, v byte) error { - u.beforeCall(ctx) - err := ctx.OnByte(v) - return u.afterCall(ctx, err) - } - - {{ invoke "makeCallStructured" "kind" "array" }} - {{ invoke "makeCallStructured" "kind" "object" }} - - func (u *unfolderUserProcessing) OnKey(ctx *unfoldCtx, v string) error { - u.beforeCall(ctx) - err := ctx.OnKey(v) - return u.afterCall(ctx, err) - } - - func (u *unfolderUserProcessing) OnKeyRef(ctx *unfoldCtx, v []byte) error { - u.beforeCall(ctx) - err := ctx.OnKeyRef(v) - return u.afterCall(ctx, err) - } - - -# makeCallStructured(kind) -templates.makeCallStructured: | - {{ $kind := .kind | capitalize }} - func (u *unfolderUserProcessing) On{{ $kind }}Start(ctx *unfoldCtx, v int, bt structform.BaseType) error { - u.beforeCall(ctx) - err := ctx.unfolder.current.On{{ $kind }}Start(ctx, v, bt) - return u.afterCall(ctx, err) - } - - func (u *unfolderUserProcessing) On{{ $kind }}Finished(ctx *unfoldCtx) error { - u.beforeCall(ctx) - err := ctx.unfolder.current.On{{ $kind }}Finished(ctx) - return u.afterCall(ctx, err) - } - - func (u *unfolderUserProcessing) OnChild{{ $kind }}Done(ctx *unfoldCtx) error { - u.beforeCall(ctx) - err := ctx.unfolder.current.OnChild{{ $kind }}Done(ctx) - return u.afterCall(ctx, err) - } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/CHANGELOG.md b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/CHANGELOG.md index f1727d20..280c2269 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/CHANGELOG.md +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/CHANGELOG.md @@ -18,6 +18,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Security +## [1.1.0] - 2019-08-22 + +### Added + +- Add `VMStat` interface for Linux. [#59](https://github.com/elastic/go-sysinfo/pull/59) + ## [1.0.2] - 2019-07-09 ### Fixed @@ -51,6 +57,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Changed the host containerized check to reduce false positives. [#42](https://github.com/elastic/go-sysinfo/pull/42) [#43](https://github.com/elastic/go-sysinfo/pull/43) -[Unreleased]: https://github.com/elastic/go-sysinfo/compare/v1.0.1...HEAD +[Unreleased]: https://github.com/elastic/go-sysinfo/compare/v1.1.0...HEAD +[1.1.0]: https://github.com/elastic/go-sysinfo/releases/tag/v1.1.0 +[1.0.2]: https://github.com/elastic/go-sysinfo/releases/tag/v1.0.2 [1.0.1]: https://github.com/elastic/go-sysinfo/releases/tag/v1.0.1 [1.0.0]: https://github.com/elastic/go-sysinfo/releases/tag/v1.0.0 diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/Makefile b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/Makefile new file mode 100644 index 00000000..0f69ebf3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/Makefile @@ -0,0 +1,29 @@ +GOPATH?=~/go + +.phony: update +update: fmt lic imports + +.PHONY: lic +lic: $(GOPATH)/bin/go-licenser + go-licenser + +.PHONY: fmt +fmt: $(GOPATH)/bin/gofumpt + gofumpt -w -l ./ + +.PHONY: imports +imports: $(GOPATH)/bin/goimports + goimports -l -local github.com/elastic/go-sysinfo ./ + +$(GOPATH)/bin/go-licenser: + @echo "go-licenser missing, installing" + GO111MODULE=off go get -u github.com/elastic/go-licenser + +$(GOPATH)/bin/gofumpt: + @echo "gofumpt missing, installing" + #Ugly boilerplate for go mod installs + cd $(mktemp -d); go mod init tmp; go get mvdan.cc/gofumpt + +$(GOPATH)/bin/goimports: + @echo "goimports missing, installing" + GO111MODULE=off go get -u golang.org/x/tools/cmd/goimports \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/NOTICE.txt b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/NOTICE.txt deleted file mode 100644 index c813ad58..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/NOTICE.txt +++ /dev/null @@ -1,5 +0,0 @@ -Elastic go-sysinfo -Copyright 2017-2019 Elasticsearch B.V. - -This product includes software developed at -Elasticsearch, B.V. (https://www.elastic.co/). diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/README.md b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/README.md index 3a657a4b..958f11c2 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/README.md +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/README.md @@ -36,6 +36,7 @@ that are implemented. | `Info()` | x | x | x | | `Memory()` | x | x | x | | `CPUTimer` | x | x | x | +| `VMStat` | | x | | | `Process` Features | Darwin | Linux | Windows | |------------------------|--------|-------|---------| diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/host_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/host_linux.go index 9b09ddff..19325b02 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/host_linux.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/host_linux.go @@ -71,6 +71,16 @@ func (h *host) Memory() (*types.HostMemoryInfo, error) { return parseMemInfo(content) } +// VMStat reports data from /proc/vmstat on linux. +func (h *host) VMStat() (*types.VMStatInfo, error) { + content, err := ioutil.ReadFile(h.procFS.path("vmstat")) + if err != nil { + return nil, err + } + + return parseVMStat(content) +} + func (h *host) CPUTime() (types.CPUTimes, error) { stat, err := h.procFS.NewStat() if err != nil { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/memory_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/memory_linux.go index 30357430..758caaba 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/memory_linux.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/memory_linux.go @@ -18,9 +18,6 @@ package linux import ( - "bytes" - "strconv" - "github.com/pkg/errors" "github.com/elastic/go-sysinfo/types" @@ -73,28 +70,3 @@ func parseMemInfo(content []byte) (*types.HostMemoryInfo, error) { return memInfo, nil } - -func parseBytesOrNumber(data []byte) (uint64, error) { - parts := bytes.Fields(data) - - if len(parts) == 0 { - return 0, errors.New("empty value") - } - - num, err := strconv.ParseUint(string(parts[0]), 10, 64) - if err != nil { - return 0, errors.Wrap(err, "failed to parse value") - } - - var multiplier uint64 = 1 - if len(parts) >= 2 { - switch string(parts[1]) { - case "kB": - multiplier = 1024 - default: - return 0, errors.Errorf("unhandled unit %v", string(parts[1])) - } - } - - return num * multiplier, nil -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/util.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/util.go index 066fef6a..0be3f6b0 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/util.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/util.go @@ -84,3 +84,28 @@ func decodeBitMap(s string, lookupName func(int) string) ([]string, error) { return names, nil } + +func parseBytesOrNumber(data []byte) (uint64, error) { + parts := bytes.Fields(data) + + if len(parts) == 0 { + return 0, errors.New("empty value") + } + + num, err := strconv.ParseUint(string(parts[0]), 10, 64) + if err != nil { + return 0, errors.Wrap(err, "failed to parse value") + } + + var multiplier uint64 = 1 + if len(parts) >= 2 { + switch string(parts[1]) { + case "kB": + multiplier = 1024 + default: + return 0, errors.Errorf("unhandled unit %v", string(parts[1])) + } + } + + return num * multiplier, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/vmstat.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/vmstat.go new file mode 100644 index 00000000..0a228678 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/providers/linux/vmstat.go @@ -0,0 +1,70 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package linux + +import ( + "reflect" + + "github.com/pkg/errors" + + "github.com/elastic/go-sysinfo/types" +) + +// vmstatTagToFieldIndex contains a mapping of json struct tags to struct field indices. +var vmstatTagToFieldIndex = make(map[string]int) + +func init() { + var vmstat types.VMStatInfo + val := reflect.ValueOf(vmstat) + typ := reflect.TypeOf(vmstat) + + for i := 0; i < val.NumField(); i++ { + field := typ.Field(i) + if tag := field.Tag.Get("json"); tag != "" { + vmstatTagToFieldIndex[tag] = i + } + } +} + +// parseVMStat parses the contents of /proc/vmstat. +func parseVMStat(content []byte) (*types.VMStatInfo, error) { + var vmStat types.VMStatInfo + refValues := reflect.ValueOf(&vmStat).Elem() + + err := parseKeyValue(content, " ", func(key, value []byte) error { + // turn our []byte value into an int + val, err := parseBytesOrNumber(value) + if err != nil { + return errors.Wrapf(err, "failed to parse %v value of %v", string(key), string(value)) + } + + idx, ok := vmstatTagToFieldIndex[string(key)] + if !ok { + return nil + } + + sval := refValues.Field(idx) + + if sval.CanSet() { + sval.SetUint(val) + } + return nil + }) + + return &vmStat, err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/errors.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/errors.go index b4af4052..c7ec1693 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/errors.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/errors.go @@ -19,4 +19,5 @@ package types import "github.com/pkg/errors" +// ErrNotImplemented represents an error for a function that is not implemented on a particular platform. var ErrNotImplemented = errors.New("unimplemented") diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/go.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/go.go index c195c9ff..62377441 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/go.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/go.go @@ -17,6 +17,7 @@ package types +// GoInfo contains info about the go runtime type GoInfo struct { OS string `json:"os"` Arch string `json:"arch"` diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/host.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/host.go index 32554dd9..bf5b80cd 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/host.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/host.go @@ -19,12 +19,19 @@ package types import "time" +// Host is the interface that wraps methods for returning Host stats type Host interface { CPUTimer Info() HostInfo Memory() (*HostMemoryInfo, error) } +// VMStat is the interface wrapper for platforms that support /proc/vmstat. +type VMStat interface { + VMStat() (*VMStatInfo, error) +} + +// HostInfo contains basic host information. type HostInfo struct { Architecture string `json:"architecture"` // Hardware architecture (e.g. x86_64, arm, ppc, mips). BootTime time.Time `json:"boot_time"` // Host boot time. @@ -39,10 +46,12 @@ type HostInfo struct { UniqueID string `json:"id,omitempty"` // Unique ID of the host (optional). } +// Uptime returns the system uptime func (host HostInfo) Uptime() time.Duration { return time.Since(host.BootTime) } +// OSInfo contains basic OS information type OSInfo struct { Family string `json:"family"` // OS Family (e.g. redhat, debian, freebsd, windows). Platform string `json:"platform"` // OS platform (e.g. centos, ubuntu, windows). @@ -55,10 +64,13 @@ type OSInfo struct { Codename string `json:"codename,omitempty"` // OS codename (e.g. jessie). } +// LoadAverage is the interface that wraps the LoadAverage method. +// LoadAverage returns load info on the host type LoadAverage interface { LoadAverage() LoadAverageInfo } +// LoadAverageInfo contains load statistics type LoadAverageInfo struct { One float64 `json:"one_min"` Five float64 `json:"five_min"` @@ -76,3 +88,181 @@ type HostMemoryInfo struct { VirtualFree uint64 `json:"virtual_free_bytes"` // Virtual memory that is not used. Metrics map[string]uint64 `json:"raw,omitempty"` // Other memory related metrics. } + +// VMStatInfo contains parsed info from /proc/vmstat. +// This procfs file has expanded much over the years +// with different kernel versions. If we don't have a field in vmstat, +// the field in the struct will just be blank. The comments represent kernel versions. +type VMStatInfo struct { + NrFreePages uint64 `json:"nr_free_pages"` // (since Linux 2.6.31) + NrAllocBatch uint64 `json:"nr_alloc_batch"` // (since Linux 3.12) + NrInactiveAnon uint64 `json:"nr_inactive_anon"` // (since Linux 2.6.28) + NrActiveAnon uint64 `json:"nr_active_anon"` // (since Linux 2.6.28) + NrInactiveFile uint64 `json:"nr_inactive_file"` // (since Linux 2.6.28) + NrActiveFile uint64 `json:"nr_active_file"` // (since Linux 2.6.28) + NrUnevictable uint64 `json:"nr_unevictable"` // (since Linux 2.6.28) + NrMlock uint64 `json:"nr_mlock"` // (since Linux 2.6.28) + NrAnonPages uint64 `json:"nr_anon_pages"` // (since Linux 2.6.18) + NrMapped uint64 `json:"nr_mapped"` // (since Linux 2.6.0) + NrFilePages uint64 `json:"nr_file_pages"` // (since Linux 2.6.18) + NrDirty uint64 `json:"nr_dirty"` // (since Linux 2.6.0) + NrWriteback uint64 `json:"nr_writeback"` // (since Linux 2.6.0) + NrSlabReclaimable uint64 `json:"nr_slab_reclaimable"` // (since Linux 2.6.19) + NrSlabUnreclaimable uint64 `json:"nr_slab_unreclaimable"` // (since Linux 2.6.19) + NrPageTablePages uint64 `json:"nr_page_table_pages"` // (since Linux 2.6.0) + NrKernelStack uint64 `json:"nr_kernel_stack"` // (since Linux 2.6.32) Amount of memory allocated to kernel stacks. + NrUnstable uint64 `json:"nr_unstable"` // (since Linux 2.6.0) + NrBounce uint64 `json:"nr_bounce"` // (since Linux 2.6.12) + NrVmscanWrite uint64 `json:"nr_vmscan_write"` // (since Linux 2.6.19) + NrVmscanImmediateReclaim uint64 `json:"nr_vmscan_immediate_reclaim"` // (since Linux 3.2) + NrWritebackTemp uint64 `json:"nr_writeback_temp"` // (since Linux 2.6.26) + NrIsolatedAnon uint64 `json:"nr_isolated_anon"` // (since Linux 2.6.32) + NrIsolatedFile uint64 `json:"nr_isolated_file"` // (since Linux 2.6.32) + NrShmem uint64 `json:"nr_shmem"` // (since Linux 2.6.32) Pages used by shmem and tmpfs(5). + NrDirtied uint64 `json:"nr_dirtied"` // (since Linux 2.6.37) + NrWritten uint64 `json:"nr_written"` // (since Linux 2.6.37) + NrPagesScanned uint64 `json:"nr_pages_scanned"` // (since Linux 3.17) + NumaHit uint64 `json:"numa_hit"` // (since Linux 2.6.18) + NumaMiss uint64 `json:"numa_miss"` // (since Linux 2.6.18) + NumaForeign uint64 `json:"numa_foreign"` // (since Linux 2.6.18) + NumaInterleave uint64 `json:"numa_interleave"` // (since Linux 2.6.18) + NumaLocal uint64 `json:"numa_local"` // (since Linux 2.6.18) + NumaOther uint64 `json:"numa_other"` // (since Linux 2.6.18) + WorkingsetRefault uint64 `json:"workingset_refault"` // (since Linux 3.15) + WorkingsetActivate uint64 `json:"workingset_activate"` // (since Linux 3.15) + WorkingsetNodereclaim uint64 `json:"workingset_nodereclaim"` // (since Linux 3.15) + NrAnonTransparentHugepages uint64 `json:"nr_anon_transparent_hugepages"` // (since Linux 2.6.38) + NrFreeCma uint64 `json:"nr_free_cma"` // (since Linux 3.7) Number of free CMA (Contiguous Memory Allocator) pages. + NrDirtyThreshold uint64 `json:"nr_dirty_threshold"` // (since Linux 2.6.37) + NrDirtyBackgroundThreshold uint64 `json:"nr_dirty_background_threshold"` // (since Linux 2.6.37) + Pgpgin uint64 `json:"pgpgin"` // (since Linux 2.6.0) + Pgpgout uint64 `json:"pgpgout"` // (since Linux 2.6.0) + Pswpin uint64 `json:"pswpin"` // (since Linux 2.6.0) + Pswpout uint64 `json:"pswpout"` // (since Linux 2.6.0) + PgallocDma uint64 `json:"pgalloc_dma"` // (since Linux 2.6.5) + PgallocDma32 uint64 `json:"pgalloc_dma32"` // (since Linux 2.6.16) + PgallocNormal uint64 `json:"pgalloc_normal"` // (since Linux 2.6.5) + PgallocHigh uint64 `json:"pgalloc_high"` // (since Linux 2.6.5) + PgallocMovable uint64 `json:"pgalloc_movable"` // (since Linux 2.6.23) + Pgfree uint64 `json:"pgfree"` // (since Linux 2.6.0) + Pgactivate uint64 `json:"pgactivate"` // (since Linux 2.6.0) + Pgdeactivate uint64 `json:"pgdeactivate"` // (since Linux 2.6.0) + Pgfault uint64 `json:"pgfault"` // (since Linux 2.6.0) + Pgmajfault uint64 `json:"pgmajfault"` // (since Linux 2.6.0) + PgrefillDma uint64 `json:"pgrefill_dma"` // (since Linux 2.6.5) + PgrefillDma32 uint64 `json:"pgrefill_dma32"` // (since Linux 2.6.16) + PgrefillNormal uint64 `json:"pgrefill_normal"` // (since Linux 2.6.5) + PgrefillHigh uint64 `json:"pgrefill_high"` // (since Linux 2.6.5) + PgrefillMovable uint64 `json:"pgrefill_movable"` // (since Linux 2.6.23) + PgstealKswapdDma uint64 `json:"pgsteal_kswapd_dma"` // (since Linux 3.4) + PgstealKswapdDma32 uint64 `json:"pgsteal_kswapd_dma32"` // (since Linux 3.4) + PgstealKswapdNormal uint64 `json:"pgsteal_kswapd_normal"` // (since Linux 3.4) + PgstealKswapdHigh uint64 `json:"pgsteal_kswapd_high"` // (since Linux 3.4) + PgstealKswapdMovable uint64 `json:"pgsteal_kswapd_movable"` // (since Linux 3.4) + PgstealDirectDma uint64 `json:"pgsteal_direct_dma"` + PgstealDirectDma32 uint64 `json:"pgsteal_direct_dma32"` // (since Linux 3.4) + PgstealDirectNormal uint64 `json:"pgsteal_direct_normal"` // (since Linux 3.4) + PgstealDirectHigh uint64 `json:"pgsteal_direct_high"` // (since Linux 3.4) + PgstealDirectMovable uint64 `json:"pgsteal_direct_movable"` // (since Linux 2.6.23) + PgscanKswapdDma uint64 `json:"pgscan_kswapd_dma"` + PgscanKswapdDma32 uint64 `json:"pgscan_kswapd_dma32"` // (since Linux 2.6.16) + PgscanKswapdNormal uint64 `json:"pgscan_kswapd_normal"` // (since Linux 2.6.5) + PgscanKswapdHigh uint64 `json:"pgscan_kswapd_high"` + PgscanKswapdMovable uint64 `json:"pgscan_kswapd_movable"` // (since Linux 2.6.23) + PgscanDirectDma uint64 `json:"pgscan_direct_dma"` // + PgscanDirectDma32 uint64 `json:"pgscan_direct_dma32"` // (since Linux 2.6.16) + PgscanDirectNormal uint64 `json:"pgscan_direct_normal"` + PgscanDirectHigh uint64 `json:"pgscan_direct_high"` + PgscanDirectMovable uint64 `json:"pgscan_direct_movable"` // (since Linux 2.6.23) + PgscanDirectThrottle uint64 `json:"pgscan_direct_throttle"` // (since Linux 3.6) + ZoneReclaimFailed uint64 `json:"zone_reclaim_failed"` // (since linux 2.6.31) + Pginodesteal uint64 `json:"pginodesteal"` // (since linux 2.6.0) + SlabsScanned uint64 `json:"slabs_scanned"` // (since linux 2.6.5) + KswapdInodesteal uint64 `json:"kswapd_inodesteal"` // (since linux 2.6.0) + KswapdLowWmarkHitQuickly uint64 `json:"kswapd_low_wmark_hit_quickly"` // (since 2.6.33) + KswapdHighWmarkHitQuickly uint64 `json:"kswapd_high_wmark_hit_quickly"` // (since 2.6.33) + Pageoutrun uint64 `json:"pageoutrun"` // (since Linux 2.6.0) + Allocstall uint64 `json:"allocstall"` // (since Linux 2.6.0) + Pgrotated uint64 `json:"pgrotated"` // (since Linux 2.6.0) + DropPagecache uint64 `json:"drop_pagecache"` // (since Linux 3.15) + DropSlab uint64 `json:"drop_slab"` // (since Linux 3.15) + NumaPteUpdates uint64 `json:"numa_pte_updates"` // (since Linux 3.8) + NumaHugePteUpdates uint64 `json:"numa_huge_pte_updates"` // (since Linux 3.13) + NumaHintFaults uint64 `json:"numa_hint_faults"` // (since Linux 3.8) + NumaHintFaultsLocal uint64 `json:"numa_hint_faults_local"` // (since Linux 3.8) + NumaPagesMigrated uint64 `json:"numa_pages_migrated"` // (since Linux 3.8) + PgmigrateSuccess uint64 `json:"pgmigrate_success"` // (since Linux 3.8) + PgmigrateFail uint64 `json:"pgmigrate_fail"` // (since Linux 3.8) + CompactMigrateScanned uint64 `json:"compact_migrate_scanned"` // (since Linux 3.8) + CompactFreeScanned uint64 `json:"compact_free_scanned"` // (since Linux 3.8) + CompactIsolated uint64 `json:"compact_isolated"` // (since Linux 3.8) + CompactStall uint64 `json:"compact_stall"` // (since Linux 2.6.35) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + CompactFail uint64 `json:"compact_fail"` // (since Linux 2.6.35) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + CompactSuccess uint64 `json:"compact_success"` // (since Linux 2.6.35) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + HtlbBuddyAllocSuccess uint64 `json:"htlb_buddy_alloc_success"` // (since Linux 2.6.26) + HtlbBuddyAllocFail uint64 `json:"htlb_buddy_alloc_fail"` // (since Linux 2.6.26) + UnevictablePgsCulled uint64 `json:"unevictable_pgs_culled"` // (since Linux 2.6.28) + UnevictablePgsScanned uint64 `json:"unevictable_pgs_scanned"` // (since Linux 2.6.28) + UnevictablePgsRescued uint64 `json:"unevictable_pgs_rescued"` // (since Linux 2.6.28) + UnevictablePgsMlocked uint64 `json:"unevictable_pgs_mlocked"` // (since Linux 2.6.28) + UnevictablePgsMunlocked uint64 `json:"unevictable_pgs_munlocked"` // (since Linux 2.6.28) + UnevictablePgsCleared uint64 `json:"unevictable_pgs_cleared"` // (since Linux 2.6.28) + UnevictablePgsStranded uint64 `json:"unevictable_pgs_stranded"` // (since Linux 2.6.28) + ThpFaultAlloc uint64 `json:"thp_fault_alloc"` // (since Linux 2.6.39) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + ThpFaultFallback uint64 `json:"thp_fault_fallback"` // (since Linux 2.6.39) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + ThpCollapseAlloc uint64 `json:"thp_collapse_alloc"` // (since Linux 2.6.39) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + ThpCollapseAllocFailed uint64 `json:"thp_collapse_alloc_failed"` // (since Linux 2.6.39) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + ThpSplit uint64 `json:"thp_split"` // (since Linux 2.6.39) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + ThpZeroPageAlloc uint64 `json:"thp_zero_page_alloc"` // (since Linux 3.8) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + ThpZeroPageAllocFailed uint64 `json:"thp_zero_page_alloc_failed"` // (since Linux 3.8) See the kernel source file Documentation/admin-guide/mm/transhuge.rst. + BalloonInflate uint64 `json:"balloon_inflate"` // (since Linux 3.18) + BalloonDeflate uint64 `json:"balloon_deflate"` // (since Linux 3.18) + BalloonMigrate uint64 `json:"balloon_migrate"` // (since Linux 3.18) + NrTlbRemoteFlush uint64 `json:"nr_tlb_remote_flush"` // (since Linux 3.12) + NrTlbRemoteFlushReceived uint64 `json:"nr_tlb_remote_flush_received"` // (since Linux 3.12) + NrTlbLocalFlushAll uint64 `json:"nr_tlb_local_flush_all"` // (since Linux 3.12) + NrTlbLocalFlushOne uint64 `json:"nr_tlb_local_flush_one"` // (since Linux 3.12) + VmacacheFindCalls uint64 `json:"vmacache_find_calls"` // (since Linux 3.16) + VmacacheFindHits uint64 `json:"vmacache_find_hits"` // (since Linux 3.16) + VmacacheFullFlushes uint64 `json:"vmacache_full_flushes"` // (since Linux 3.19) + // the following fields are not documented in `man 5 proc` as of 4.15 + NrZoneInactiveAnon uint64 `json:"nr_zone_inactive_anon"` + NrZoneActiveAnon uint64 `json:"nr_zone_active_anon"` + NrZoneInactiveFile uint64 `json:"nr_zone_inactive_file"` + NrZoneActiveFile uint64 `json:"nr_zone_active_file"` + NrZoneUnevictable uint64 `json:"nr_zone_unevictable"` + NrZoneWritePending uint64 `json:"nr_zone_write_pending"` + NrZspages uint64 `json:"nr_zspages"` + NrShmemHugepages uint64 `json:"nr_shmem_hugepages"` + NrShmemPmdmapped uint64 `json:"nr_shmem_pmdmapped"` + AllocstallDma uint64 `json:"allocstall_dma"` + AllocstallDma32 uint64 `json:"allocstall_dma32"` + AllocstallNormal uint64 `json:"allocstall_normal"` + AllocstallMovable uint64 `json:"allocstall_movable"` + PgskipDma uint64 `json:"pgskip_dma"` + PgskipDma32 uint64 `json:"pgskip_dma32"` + PgskipNormal uint64 `json:"pgskip_normal"` + PgskipMovable uint64 `json:"pgskip_movable"` + Pglazyfree uint64 `json:"pglazyfree"` + Pglazyfreed uint64 `json:"pglazyfreed"` + Pgrefill uint64 `json:"pgrefill"` + PgstealKswapd uint64 `json:"pgsteal_kswapd"` + PgstealDirect uint64 `json:"pgsteal_direct"` + PgscanKswapd uint64 `json:"pgscan_kswapd"` + PgscanDirect uint64 `json:"pgscan_direct"` + OomKill uint64 `json:"oom_kill"` + CompactDaemonWake uint64 `json:"compact_daemon_wake"` + CompactDaemonMigrateScanned uint64 `json:"compact_daemon_migrate_scanned"` + CompactDaemonFreeScanned uint64 `json:"compact_daemon_free_scanned"` + ThpFileAlloc uint64 `json:"thp_file_alloc"` + ThpFileMapped uint64 `json:"thp_file_mapped"` + ThpSplitPage uint64 `json:"thp_split_page"` + ThpSplitPageFailed uint64 `json:"thp_split_page_failed"` + ThpDeferredSplitPage uint64 `json:"thp_deferred_split_page"` + ThpSplitPmd uint64 `json:"thp_split_pmd"` + ThpSplitPud uint64 `json:"thp_split_pud"` + ThpSwpout uint64 `json:"thp_swpout"` + ThpSwpoutFallback uint64 `json:"thp_swpout_fallback"` + SwapRa uint64 `json:"swap_ra"` + SwapRaHit uint64 `json:"swap_ra_hit"` +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/process.go b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/process.go index 8dd2074c..20787b29 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/process.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-sysinfo/types/process.go @@ -19,6 +19,7 @@ package types import "time" +// Process is the main wrapper for gathering information on a process type Process interface { CPUTimer Info() (ProcessInfo, error) @@ -28,6 +29,7 @@ type Process interface { PID() int } +// ProcessInfo contains basic stats about a process type ProcessInfo struct { Name string `json:"name"` PID int `json:"pid"` @@ -70,20 +72,26 @@ type UserInfo struct { SGID string `json:"sgid"` } +// Environment is the interface that wraps the Environment method. +// Environment returns variables for a process type Environment interface { Environment() (map[string]string, error) } -// OpenHandleEnumerator lists the open file handles. +// OpenHandleEnumerator is the interface that wraps the OpenHandles method. +// OpenHandles lists the open file handles. type OpenHandleEnumerator interface { OpenHandles() ([]string, error) } -// OpenHandleCount returns the number the open file handles. +// OpenHandleCounter is the interface that wraps the OpenHandleCount method. +// OpenHandleCount returns the number of open file handles. type OpenHandleCounter interface { OpenHandleCount() (int, error) } +// CPUTimer is the interface that wraps the CPUTime method. +// CPUTime returns CPU time info type CPUTimer interface { // CPUTime returns a CPUTimes structure for // the host or some process. @@ -94,6 +102,7 @@ type CPUTimer interface { CPUTime() (CPUTimes, error) } +// CPUTimes contains CPU timing stats for a process type CPUTimes struct { User time.Duration `json:"user"` System time.Duration `json:"system"` @@ -105,22 +114,26 @@ type CPUTimes struct { Steal time.Duration `json:"steal,omitempty"` } +// Total returns the total CPU time func (cpu CPUTimes) Total() time.Duration { return cpu.User + cpu.System + cpu.Idle + cpu.IOWait + cpu.IRQ + cpu.Nice + cpu.SoftIRQ + cpu.Steal } +// MemoryInfo contains memory stats for a process type MemoryInfo struct { Resident uint64 `json:"resident_bytes"` Virtual uint64 `json:"virtual_bytes"` Metrics map[string]uint64 `json:"raw,omitempty"` // Other memory related metrics. } +// SeccompInfo contains seccomp info for a process type SeccompInfo struct { Mode string `json:"mode"` NoNewPrivs *bool `json:"no_new_privs,omitempty"` // Added in kernel 4.10. } +// CapabilityInfo contains capability set info. type CapabilityInfo struct { Inheritable []string `json:"inheritable"` Permitted []string `json:"permitted"` @@ -129,10 +142,14 @@ type CapabilityInfo struct { Ambient []string `json:"ambient"` } +// Capabilities is the interface that wraps the Capabilities method. +// Capabilities returns capabilities for a process type Capabilities interface { Capabilities() (*CapabilityInfo, error) } +// Seccomp is the interface that wraps the Seccomp method. +// Seccomp returns seccomp info on Linux type Seccomp interface { Seccomp() (*SeccompInfo, error) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/NOTICE.txt b/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/NOTICE.txt deleted file mode 100644 index 807d3ab9..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/go-windows/NOTICE.txt +++ /dev/null @@ -1,5 +0,0 @@ -Elastic go-windows -Copyright 2017-2019 Elasticsearch B.V. - -This product includes software developed at -Elasticsearch, B.V. (https://www.elastic.co/). diff --git a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/codecov.yml b/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/codecov.yml deleted file mode 100644 index 76ade0fd..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/elastic/gosigar/codecov.yml +++ /dev/null @@ -1,21 +0,0 @@ -# Enable coverage report message for diff on commit -coverage: - status: - project: off - patch: - default: - # basic - target: auto - threshold: null - base: auto - # advanced - branches: null - if_no_uploads: error - if_not_found: success - if_ci_failed: error - only_pulls: false - flags: null - paths: null - -# Disable comments on Pull Requests -comment: false diff --git a/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsevents/circle.yml b/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsevents/circle.yml deleted file mode 100644 index f84f557b..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/fsnotify/fsevents/circle.yml +++ /dev/null @@ -1,24 +0,0 @@ -machine: - xcode: - version: "8.0" - environment: - XCODE_WORKSPACE: NotUsed.xcworkspace - XCODE_SCHEME: NotUsed - GOPATH: $HOME/.go_project - HOMEBREW_NO_AUTO_UPDATE: 1 - -dependencies: - pre: - - brew install https://github.com/Homebrew/homebrew-core/raw/master/Formula/go.rb - override: - - sw_vers - - go get -u github.com/golang/lint/golint - -test: - override: - - go test -v -race -coverprofile=coverage.txt -covermode=atomic - - test -z "$(gofmt -s -l -w . | tee /dev/stderr)" - - test -z "$(golint ./... | tee /dev/stderr)" - - go vet ./... - post: - - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/LICENSE index 36275367..623ec06f 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/LICENSE +++ b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/LICENSE @@ -1,7 +1,21 @@ -Copyright © 2013-2018 Yasuhiro Matsumoto, http://mattn.kaoriya.net +The MIT License (MIT) -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +Copyright © 2013-2017 Yasuhiro Matsumoto, -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the “Software”), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/README.md b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/README.md index 0ea9db33..7b577558 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/README.md +++ b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/README.md @@ -1,4 +1,4 @@ -#Go OLE +# Go OLE [![Build status](https://ci.appveyor.com/api/projects/status/qr0u2sf7q43us9fj?svg=true)](https://ci.appveyor.com/project/jacobsantos/go-ole-jgs28) [![Build Status](https://travis-ci.org/go-ole/go-ole.svg?branch=master)](https://travis-ci.org/go-ole/go-ole) @@ -35,12 +35,12 @@ AppVeyor is used to build on Windows using the (in-development) test COM server. The tests currently do run and do pass and this should be maintained with commits. -##Versioning +## Versioning Go OLE uses [semantic versioning](http://semver.org) for version numbers, which is similar to the version contract of the Go language. Which means that the major version will always maintain backwards compatibility with minor versions. Minor versions will only add new additions and changes. Fixes will always be in patch. This contract should allow you to upgrade to new minor and patch versions without breakage or modifications to your existing code. Leave a ticket, if there is breakage, so that it could be fixed. -##LICENSE +## LICENSE Under the MIT License: http://mattn.mit-license.org/2013 diff --git a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/com.go b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/com.go index f224fa06..a9bef150 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/com.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/com.go @@ -3,39 +3,38 @@ package ole import ( - "errors" "syscall" - "time" "unicode/utf16" "unsafe" ) var ( - procCoInitialize, _ = modole32.FindProc("CoInitialize") - procCoInitializeEx, _ = modole32.FindProc("CoInitializeEx") - procCoUninitialize, _ = modole32.FindProc("CoUninitialize") - procCoCreateInstance, _ = modole32.FindProc("CoCreateInstance") - procCoTaskMemFree, _ = modole32.FindProc("CoTaskMemFree") - procCLSIDFromProgID, _ = modole32.FindProc("CLSIDFromProgID") - procCLSIDFromString, _ = modole32.FindProc("CLSIDFromString") - procStringFromCLSID, _ = modole32.FindProc("StringFromCLSID") - procStringFromIID, _ = modole32.FindProc("StringFromIID") - procIIDFromString, _ = modole32.FindProc("IIDFromString") - procGetUserDefaultLCID, _ = modkernel32.FindProc("GetUserDefaultLCID") - procCopyMemory, _ = modkernel32.FindProc("RtlMoveMemory") - procVariantInit, _ = modoleaut32.FindProc("VariantInit") - procVariantClear, _ = modoleaut32.FindProc("VariantClear") - procVariantTimeToSystemTime, _ = modoleaut32.FindProc("VariantTimeToSystemTime") - procSysAllocString, _ = modoleaut32.FindProc("SysAllocString") - procSysAllocStringLen, _ = modoleaut32.FindProc("SysAllocStringLen") - procSysFreeString, _ = modoleaut32.FindProc("SysFreeString") - procSysStringLen, _ = modoleaut32.FindProc("SysStringLen") - procCreateDispTypeInfo, _ = modoleaut32.FindProc("CreateDispTypeInfo") - procCreateStdDispatch, _ = modoleaut32.FindProc("CreateStdDispatch") - procGetActiveObject, _ = modoleaut32.FindProc("GetActiveObject") + procCoInitialize = modole32.NewProc("CoInitialize") + procCoInitializeEx = modole32.NewProc("CoInitializeEx") + procCoUninitialize = modole32.NewProc("CoUninitialize") + procCoCreateInstance = modole32.NewProc("CoCreateInstance") + procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") + procCLSIDFromProgID = modole32.NewProc("CLSIDFromProgID") + procCLSIDFromString = modole32.NewProc("CLSIDFromString") + procStringFromCLSID = modole32.NewProc("StringFromCLSID") + procStringFromIID = modole32.NewProc("StringFromIID") + procIIDFromString = modole32.NewProc("IIDFromString") + procCoGetObject = modole32.NewProc("CoGetObject") + procGetUserDefaultLCID = modkernel32.NewProc("GetUserDefaultLCID") + procCopyMemory = modkernel32.NewProc("RtlMoveMemory") + procVariantInit = modoleaut32.NewProc("VariantInit") + procVariantClear = modoleaut32.NewProc("VariantClear") + procVariantTimeToSystemTime = modoleaut32.NewProc("VariantTimeToSystemTime") + procSysAllocString = modoleaut32.NewProc("SysAllocString") + procSysAllocStringLen = modoleaut32.NewProc("SysAllocStringLen") + procSysFreeString = modoleaut32.NewProc("SysFreeString") + procSysStringLen = modoleaut32.NewProc("SysStringLen") + procCreateDispTypeInfo = modoleaut32.NewProc("CreateDispTypeInfo") + procCreateStdDispatch = modoleaut32.NewProc("CreateStdDispatch") + procGetActiveObject = modoleaut32.NewProc("GetActiveObject") - procGetMessageW, _ = moduser32.FindProc("GetMessageW") - procDispatchMessageW, _ = moduser32.FindProc("DispatchMessageW") + procGetMessageW = moduser32.NewProc("GetMessageW") + procDispatchMessageW = moduser32.NewProc("DispatchMessageW") ) // coInitialize initializes COM library on current thread. @@ -209,6 +208,32 @@ func GetActiveObject(clsid *GUID, iid *GUID) (unk *IUnknown, err error) { return } +type BindOpts struct { + CbStruct uint32 + GrfFlags uint32 + GrfMode uint32 + TickCountDeadline uint32 +} + +// GetObject retrieves pointer to active object. +func GetObject(programID string, bindOpts *BindOpts, iid *GUID) (unk *IUnknown, err error) { + if bindOpts != nil { + bindOpts.CbStruct = uint32(unsafe.Sizeof(BindOpts{})) + } + if iid == nil { + iid = IID_IUnknown + } + hr, _, _ := procCoGetObject.Call( + uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(programID))), + uintptr(unsafe.Pointer(bindOpts)), + uintptr(unsafe.Pointer(iid)), + uintptr(unsafe.Pointer(&unk))) + if hr != 0 { + err = NewError(hr) + } + return +} + // VariantInit initializes variant. func VariantInit(v *VARIANT) (err error) { hr, _, _ := procVariantInit.Call(uintptr(unsafe.Pointer(v))) @@ -317,13 +342,3 @@ func DispatchMessage(msg *Msg) (ret int32) { ret = int32(r0) return } - -// GetVariantDate converts COM Variant Time value to Go time.Time. -func GetVariantDate(value float64) (time.Time, error) { - var st syscall.Systemtime - r, _, _ := procVariantTimeToSystemTime.Call(uintptr(unsafe.Pointer(&value)), uintptr(unsafe.Pointer(&st))) - if r != 0 { - return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), nil), nil - } - return time.Now(), errors.New("Could not convert to time, passing current time.") -} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/com_func.go b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/com_func.go index 425aad32..cef539d9 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/com_func.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/com_func.go @@ -169,6 +169,6 @@ func DispatchMessage(msg *Msg) int32 { return int32(0) } -func GetVariantDate(value float64) (time.Time, error) { +func GetVariantDate(value uint64) (time.Time, error) { return time.Now(), NewError(E_NOTIMPL) } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/go.mod b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/go.mod new file mode 100644 index 00000000..3a21f750 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/go.mod @@ -0,0 +1,5 @@ +module github.com/go-ole/go-ole + +go 1.12 + +require golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 diff --git a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/go.sum b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/go.sum new file mode 100644 index 00000000..9814d316 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/go.sum @@ -0,0 +1,2 @@ +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3 h1:7TYNF4UdlohbFwpNH04CoPMp1cHUZgO1Ebq5r2hIjfo= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/idispatch_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/idispatch_windows.go index bb736903..6ec180b5 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/idispatch_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/idispatch_windows.go @@ -3,6 +3,7 @@ package ole import ( + "math/big" "syscall" "time" "unsafe" @@ -63,6 +64,10 @@ func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{} dispnames := [1]int32{DISPID_PROPERTYPUT} dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0])) dispparams.cNamedArgs = 1 + } else if dispatch&DISPATCH_PROPERTYPUTREF != 0 { + dispnames := [1]int32{DISPID_PROPERTYPUT} + dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0])) + dispparams.cNamedArgs = 1 } var vargs []VARIANT if len(params) > 0 { @@ -128,6 +133,8 @@ func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{} vargs[n] = NewVariant(VT_R8, *(*int64)(unsafe.Pointer(&vv))) case *float64: vargs[n] = NewVariant(VT_R8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*float64))))) + case *big.Int: + vargs[n] = NewVariant(VT_DECIMAL, v.(*big.Int).Int64()) case string: vargs[n] = NewVariant(VT_BSTR, int64(uintptr(unsafe.Pointer(SysAllocStringLen(v.(string)))))) case *string: diff --git a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/ole.go b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/ole.go index b92b4ea1..e2ae4f4b 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/ole.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/ole.go @@ -26,6 +26,16 @@ type EXCEPINFO struct { scode uint32 } +// WCode return wCode in EXCEPINFO. +func (e EXCEPINFO) WCode() uint16 { + return e.wCode +} + +// SCODE return scode in EXCEPINFO. +func (e EXCEPINFO) SCODE() uint32 { + return e.scode +} + // String convert EXCEPINFO to string. func (e EXCEPINFO) String() string { var src, desc, hlp string diff --git a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go index c3edee2e..f7803c1e 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go @@ -88,6 +88,20 @@ func MustPutProperty(disp *ole.IDispatch, name string, params ...interface{}) (r return r } +// PutPropertyRef mutates property reference. +func PutPropertyRef(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) { + return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYPUTREF, params) +} + +// MustPutPropertyRef mutates property reference or panics. +func MustPutPropertyRef(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) { + r, err := PutPropertyRef(disp, name, params...) + if err != nil { + panic(err.Error()) + } + return r +} + func ForEach(disp *ole.IDispatch, f func(v *ole.VARIANT) error) error { newEnum, err := disp.GetProperty("_NewEnum") if err != nil { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/safearray_func.go b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/safearray_func.go index 8ff0baa4..0dee670c 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/safearray_func.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/safearray_func.go @@ -124,12 +124,12 @@ func safeArrayGetElementSize(safearray *SafeArray) (*uint32, error) { } // safeArrayGetElement retrieves element at given index. -func safeArrayGetElement(safearray *SafeArray, index int64, pv unsafe.Pointer) error { +func safeArrayGetElement(safearray *SafeArray, index int32, pv unsafe.Pointer) error { return NewError(E_NOTIMPL) } // safeArrayGetElement retrieves element at given index and converts to string. -func safeArrayGetElementString(safearray *SafeArray, index int64) (string, error) { +func safeArrayGetElementString(safearray *SafeArray, index int32) (string, error) { return "", NewError(E_NOTIMPL) } @@ -146,8 +146,8 @@ func safeArrayGetIID(safearray *SafeArray) (*GUID, error) { // multidimensional array. // // AKA: SafeArrayGetLBound in Windows API. -func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (int64, error) { - return int64(0), NewError(E_NOTIMPL) +func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (int32, error) { + return int32(0), NewError(E_NOTIMPL) } // safeArrayGetUBound returns upper bounds of SafeArray. @@ -156,8 +156,8 @@ func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (int64, error) { // multidimensional array. // // AKA: SafeArrayGetUBound in Windows API. -func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (int64, error) { - return int64(0), NewError(E_NOTIMPL) +func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (int32, error) { + return int32(0), NewError(E_NOTIMPL) } // safeArrayGetVartype returns data type of SafeArray. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/safearray_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/safearray_windows.go index b27936e2..0c1b3a10 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/safearray_windows.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/safearray_windows.go @@ -7,35 +7,35 @@ import ( ) var ( - procSafeArrayAccessData, _ = modoleaut32.FindProc("SafeArrayAccessData") - procSafeArrayAllocData, _ = modoleaut32.FindProc("SafeArrayAllocData") - procSafeArrayAllocDescriptor, _ = modoleaut32.FindProc("SafeArrayAllocDescriptor") - procSafeArrayAllocDescriptorEx, _ = modoleaut32.FindProc("SafeArrayAllocDescriptorEx") - procSafeArrayCopy, _ = modoleaut32.FindProc("SafeArrayCopy") - procSafeArrayCopyData, _ = modoleaut32.FindProc("SafeArrayCopyData") - procSafeArrayCreate, _ = modoleaut32.FindProc("SafeArrayCreate") - procSafeArrayCreateEx, _ = modoleaut32.FindProc("SafeArrayCreateEx") - procSafeArrayCreateVector, _ = modoleaut32.FindProc("SafeArrayCreateVector") - procSafeArrayCreateVectorEx, _ = modoleaut32.FindProc("SafeArrayCreateVectorEx") - procSafeArrayDestroy, _ = modoleaut32.FindProc("SafeArrayDestroy") - procSafeArrayDestroyData, _ = modoleaut32.FindProc("SafeArrayDestroyData") - procSafeArrayDestroyDescriptor, _ = modoleaut32.FindProc("SafeArrayDestroyDescriptor") - procSafeArrayGetDim, _ = modoleaut32.FindProc("SafeArrayGetDim") - procSafeArrayGetElement, _ = modoleaut32.FindProc("SafeArrayGetElement") - procSafeArrayGetElemsize, _ = modoleaut32.FindProc("SafeArrayGetElemsize") - procSafeArrayGetIID, _ = modoleaut32.FindProc("SafeArrayGetIID") - procSafeArrayGetLBound, _ = modoleaut32.FindProc("SafeArrayGetLBound") - procSafeArrayGetUBound, _ = modoleaut32.FindProc("SafeArrayGetUBound") - procSafeArrayGetVartype, _ = modoleaut32.FindProc("SafeArrayGetVartype") - procSafeArrayLock, _ = modoleaut32.FindProc("SafeArrayLock") - procSafeArrayPtrOfIndex, _ = modoleaut32.FindProc("SafeArrayPtrOfIndex") - procSafeArrayUnaccessData, _ = modoleaut32.FindProc("SafeArrayUnaccessData") - procSafeArrayUnlock, _ = modoleaut32.FindProc("SafeArrayUnlock") - procSafeArrayPutElement, _ = modoleaut32.FindProc("SafeArrayPutElement") - //procSafeArrayRedim, _ = modoleaut32.FindProc("SafeArrayRedim") // TODO - //procSafeArraySetIID, _ = modoleaut32.FindProc("SafeArraySetIID") // TODO - procSafeArrayGetRecordInfo, _ = modoleaut32.FindProc("SafeArrayGetRecordInfo") - procSafeArraySetRecordInfo, _ = modoleaut32.FindProc("SafeArraySetRecordInfo") + procSafeArrayAccessData = modoleaut32.NewProc("SafeArrayAccessData") + procSafeArrayAllocData = modoleaut32.NewProc("SafeArrayAllocData") + procSafeArrayAllocDescriptor = modoleaut32.NewProc("SafeArrayAllocDescriptor") + procSafeArrayAllocDescriptorEx = modoleaut32.NewProc("SafeArrayAllocDescriptorEx") + procSafeArrayCopy = modoleaut32.NewProc("SafeArrayCopy") + procSafeArrayCopyData = modoleaut32.NewProc("SafeArrayCopyData") + procSafeArrayCreate = modoleaut32.NewProc("SafeArrayCreate") + procSafeArrayCreateEx = modoleaut32.NewProc("SafeArrayCreateEx") + procSafeArrayCreateVector = modoleaut32.NewProc("SafeArrayCreateVector") + procSafeArrayCreateVectorEx = modoleaut32.NewProc("SafeArrayCreateVectorEx") + procSafeArrayDestroy = modoleaut32.NewProc("SafeArrayDestroy") + procSafeArrayDestroyData = modoleaut32.NewProc("SafeArrayDestroyData") + procSafeArrayDestroyDescriptor = modoleaut32.NewProc("SafeArrayDestroyDescriptor") + procSafeArrayGetDim = modoleaut32.NewProc("SafeArrayGetDim") + procSafeArrayGetElement = modoleaut32.NewProc("SafeArrayGetElement") + procSafeArrayGetElemsize = modoleaut32.NewProc("SafeArrayGetElemsize") + procSafeArrayGetIID = modoleaut32.NewProc("SafeArrayGetIID") + procSafeArrayGetLBound = modoleaut32.NewProc("SafeArrayGetLBound") + procSafeArrayGetUBound = modoleaut32.NewProc("SafeArrayGetUBound") + procSafeArrayGetVartype = modoleaut32.NewProc("SafeArrayGetVartype") + procSafeArrayLock = modoleaut32.NewProc("SafeArrayLock") + procSafeArrayPtrOfIndex = modoleaut32.NewProc("SafeArrayPtrOfIndex") + procSafeArrayUnaccessData = modoleaut32.NewProc("SafeArrayUnaccessData") + procSafeArrayUnlock = modoleaut32.NewProc("SafeArrayUnlock") + procSafeArrayPutElement = modoleaut32.NewProc("SafeArrayPutElement") + //procSafeArrayRedim = modoleaut32.NewProc("SafeArrayRedim") // TODO + //procSafeArraySetIID = modoleaut32.NewProc("SafeArraySetIID") // TODO + procSafeArrayGetRecordInfo = modoleaut32.NewProc("SafeArrayGetRecordInfo") + procSafeArraySetRecordInfo = modoleaut32.NewProc("SafeArraySetRecordInfo") ) // safeArrayAccessData returns raw array pointer. @@ -205,7 +205,7 @@ func safeArrayGetElementSize(safearray *SafeArray) (length *uint32, err error) { } // safeArrayGetElement retrieves element at given index. -func safeArrayGetElement(safearray *SafeArray, index int64, pv unsafe.Pointer) error { +func safeArrayGetElement(safearray *SafeArray, index int32, pv unsafe.Pointer) error { return convertHresultToError( procSafeArrayGetElement.Call( uintptr(unsafe.Pointer(safearray)), @@ -214,7 +214,7 @@ func safeArrayGetElement(safearray *SafeArray, index int64, pv unsafe.Pointer) e } // safeArrayGetElementString retrieves element at given index and converts to string. -func safeArrayGetElementString(safearray *SafeArray, index int64) (str string, err error) { +func safeArrayGetElementString(safearray *SafeArray, index int32) (str string, err error) { var element *int16 err = convertHresultToError( procSafeArrayGetElement.Call( @@ -243,7 +243,7 @@ func safeArrayGetIID(safearray *SafeArray) (guid *GUID, err error) { // multidimensional array. // // AKA: SafeArrayGetLBound in Windows API. -func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (lowerBound int64, err error) { +func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (lowerBound int32, err error) { err = convertHresultToError( procSafeArrayGetLBound.Call( uintptr(unsafe.Pointer(safearray)), @@ -258,7 +258,7 @@ func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (lowerBound int6 // multidimensional array. // // AKA: SafeArrayGetUBound in Windows API. -func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (upperBound int64, err error) { +func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (upperBound int32, err error) { err = convertHresultToError( procSafeArrayGetUBound.Call( uintptr(unsafe.Pointer(safearray)), diff --git a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/safearrayconversion.go b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/safearrayconversion.go index ffeb2b97..259f488e 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/safearrayconversion.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/safearrayconversion.go @@ -14,7 +14,7 @@ func (sac *SafeArrayConversion) ToStringArray() (strings []string) { totalElements, _ := sac.TotalElements(0) strings = make([]string, totalElements) - for i := int64(0); i < totalElements; i++ { + for i := int32(0); i < totalElements; i++ { strings[int32(i)], _ = safeArrayGetElementString(sac.Array, i) } @@ -25,7 +25,7 @@ func (sac *SafeArrayConversion) ToByteArray() (bytes []byte) { totalElements, _ := sac.TotalElements(0) bytes = make([]byte, totalElements) - for i := int64(0); i < totalElements; i++ { + for i := int32(0); i < totalElements; i++ { safeArrayGetElement(sac.Array, i, unsafe.Pointer(&bytes[int32(i)])) } @@ -37,59 +37,59 @@ func (sac *SafeArrayConversion) ToValueArray() (values []interface{}) { values = make([]interface{}, totalElements) vt, _ := safeArrayGetVartype(sac.Array) - for i := 0; i < int(totalElements); i++ { + for i := int32(0); i < totalElements; i++ { switch VT(vt) { case VT_BOOL: var v bool - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) values[i] = v case VT_I1: var v int8 - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) values[i] = v case VT_I2: var v int16 - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) values[i] = v case VT_I4: var v int32 - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) values[i] = v case VT_I8: var v int64 - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) values[i] = v case VT_UI1: var v uint8 - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) values[i] = v case VT_UI2: var v uint16 - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) values[i] = v case VT_UI4: var v uint32 - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) values[i] = v case VT_UI8: var v uint64 - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) values[i] = v case VT_R4: var v float32 - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) values[i] = v case VT_R8: var v float64 - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) values[i] = v case VT_BSTR: var v string - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) values[i] = v case VT_VARIANT: var v VARIANT - safeArrayGetElement(sac.Array, int64(i), unsafe.Pointer(&v)) + safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v)) values[i] = v.Value() default: // TODO @@ -111,14 +111,14 @@ func (sac *SafeArrayConversion) GetSize() (length *uint32, err error) { return safeArrayGetElementSize(sac.Array) } -func (sac *SafeArrayConversion) TotalElements(index uint32) (totalElements int64, err error) { +func (sac *SafeArrayConversion) TotalElements(index uint32) (totalElements int32, err error) { if index < 1 { index = 1 } // Get array bounds - var LowerBounds int64 - var UpperBounds int64 + var LowerBounds int32 + var UpperBounds int32 LowerBounds, err = safeArrayGetLBound(sac.Array, index) if err != nil { diff --git a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/variables.go b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/variables.go index ebe00f1c..a6add1b0 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/variables.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/variables.go @@ -3,14 +3,13 @@ package ole import ( - "syscall" + "golang.org/x/sys/windows" ) var ( - modcombase = syscall.NewLazyDLL("combase.dll") - modkernel32, _ = syscall.LoadDLL("kernel32.dll") - modole32, _ = syscall.LoadDLL("ole32.dll") - modoleaut32, _ = syscall.LoadDLL("oleaut32.dll") - modmsvcrt, _ = syscall.LoadDLL("msvcrt.dll") - moduser32, _ = syscall.LoadDLL("user32.dll") + modcombase = windows.NewLazySystemDLL("combase.dll") + modkernel32 = windows.NewLazySystemDLL("kernel32.dll") + modole32 = windows.NewLazySystemDLL("ole32.dll") + modoleaut32 = windows.NewLazySystemDLL("oleaut32.dll") + moduser32 = windows.NewLazySystemDLL("user32.dll") ) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/variant.go b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/variant.go index 36969725..967a23fe 100644 --- a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/variant.go +++ b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/variant.go @@ -88,10 +88,10 @@ func (v *VARIANT) Value() interface{} { return v.ToString() case VT_DATE: // VT_DATE type will either return float64 or time.Time. - d := float64(v.Val) + d := uint64(v.Val) date, err := GetVariantDate(d) if err != nil { - return d + return float64(v.Val) } return date case VT_UNKNOWN: diff --git a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/variant_date_386.go b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/variant_date_386.go new file mode 100644 index 00000000..1b970f63 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/variant_date_386.go @@ -0,0 +1,22 @@ +// +build windows,386 + +package ole + +import ( + "errors" + "syscall" + "time" + "unsafe" +) + +// GetVariantDate converts COM Variant Time value to Go time.Time. +func GetVariantDate(value uint64) (time.Time, error) { + var st syscall.Systemtime + v1 := uint32(value) + v2 := uint32(value >> 32) + r, _, _ := procVariantTimeToSystemTime.Call(uintptr(v1), uintptr(v2), uintptr(unsafe.Pointer(&st))) + if r != 0 { + return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil + } + return time.Now(), errors.New("Could not convert to time, passing current time.") +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/variant_date_amd64.go b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/variant_date_amd64.go new file mode 100644 index 00000000..6952f1f0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/variant_date_amd64.go @@ -0,0 +1,20 @@ +// +build windows,amd64 + +package ole + +import ( + "errors" + "syscall" + "time" + "unsafe" +) + +// GetVariantDate converts COM Variant Time value to Go time.Time. +func GetVariantDate(value uint64) (time.Time, error) { + var st syscall.Systemtime + r, _, _ := procVariantTimeToSystemTime.Call(uintptr(value), uintptr(unsafe.Pointer(&st))) + if r != 0 { + return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil + } + return time.Now(), errors.New("Could not convert to time, passing current time.") +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/variant_ppc64le.go b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/variant_ppc64le.go new file mode 100644 index 00000000..326427a7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/variant_ppc64le.go @@ -0,0 +1,12 @@ +// +build ppc64le + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 + _ [8]byte // 24 +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/variant_s390x.go b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/variant_s390x.go new file mode 100644 index 00000000..9874ca66 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/go-ole/go-ole/variant_s390x.go @@ -0,0 +1,12 @@ +// +build s390x + +package ole + +type VARIANT struct { + VT VT // 2 + wReserved1 uint16 // 4 + wReserved2 uint16 // 6 + wReserved3 uint16 // 8 + Val int64 // 16 + _ [8]byte // 24 +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/gogoproto/Makefile b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/gogoproto/Makefile new file mode 100644 index 00000000..0b4659b7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/gogoproto/Makefile @@ -0,0 +1,37 @@ +# Protocol Buffers for Go with Gadgets +# +# Copyright (c) 2013, The GoGo Authors. All rights reserved. +# http://github.com/gogo/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto + +restore: + cp gogo.pb.golden gogo.pb.go + +preserve: + cp gogo.pb.go gogo.pb.golden diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/gogoproto/doc.go new file mode 100644 index 00000000..081c86fa --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -0,0 +1,169 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package gogoproto provides extensions for protocol buffers to achieve: + + - fast marshalling and unmarshalling. + - peace of mind by optionally generating test and benchmark code. + - more canonical Go structures. + - less typing by optionally generating extra helper code. + - goprotobuf compatibility + +More Canonical Go Structures + +A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. +You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct. +Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions. + + - nullable, if false, a field is generated without a pointer (see warning below). + - embed, if true, the field is generated as an embedded field. + - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128 + - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. + - casttype (beta), Changes the generated fieldtype. All generated code assumes that this type is castable to the protocol buffer field type. It does not work for structs or enums. + - castkey (beta), Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + - castvalue (beta), Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. + +Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset. + +Let us look at: + + github.com/gogo/protobuf/test/example/example.proto + +for a quicker overview. + +The following message: + + package test; + + import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + + message A { + optional string Description = 1 [(gogoproto.nullable) = false]; + optional int64 Number = 2 [(gogoproto.nullable) = false]; + optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false]; + } + +Will generate a go struct which looks a lot like this: + + type A struct { + Description string + Number int64 + Id github_com_gogo_protobuf_test_custom.Uuid + } + +You will see there are no pointers, since all fields are non-nullable. +You will also see a custom type which marshals to a string. +Be warned it is your responsibility to test your custom types thoroughly. +You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods. + +Next we will embed the message A in message B. + + message B { + optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true]; + repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false]; + } + +See below that A is embedded in B. + + type B struct { + A + G []github_com_gogo_protobuf_test_custom.Uint128 + } + +Also see the repeated custom type. + + type Uint128 [2]uint64 + +Next we will create a custom name for one of our fields. + + message C { + optional int64 size = 1 [(gogoproto.customname) = "MySize"]; + } + +See below that the field's name is MySize and not Size. + + type C struct { + MySize *int64 + } + +The is useful when having a protocol buffer message with a field name which conflicts with a generated method. +As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error. +Using customname you can fix this error without changing the field name. +This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable. + +Gogoprotobuf also has some more subtle changes, these could be changed back: + + - the generated package name for imports do not have the extra /filename.pb, + but are actually the imports specified in the .proto file. + +Gogoprotobuf also has lost some features which should be brought back with time: + + - Marshalling and unmarshalling with reflect and without the unsafe package, + this requires work in pointer_reflect.go + +Why does nullable break protocol buffer specifications: + +The protocol buffer specification states, somewhere, that you should be able to tell whether a +field is set or unset. With the option nullable=false this feature is lost, +since your non-nullable fields will always be set. It can be seen as a layer on top of +protocol buffers, where before and after marshalling all non-nullable fields are set +and they cannot be unset. + +Goprotobuf Compatibility: + +Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers. +Gogoprotobuf generates the same code as goprotobuf if no extensions are used. +The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf: + + - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. + - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix + - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method. + - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face + - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. + - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension + - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. + - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). + +Less Typing and Peace of Mind is explained in their specific plugin folders godoc: + + - github.com/gogo/protobuf/plugin/ + +If you do not use any of these extension the code that is generated +will be the same as if goprotobuf has generated it. + +The most complete way to see examples is to look at + + github.com/gogo/protobuf/test/thetest.proto + +Gogoprototest is a seperate project, +because we want to keep gogoprotobuf independent of goprotobuf, +but we still want to test it thoroughly. + +*/ +package gogoproto diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go new file mode 100644 index 00000000..e352808b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -0,0 +1,874 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: gogo.proto + +package gogoproto + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62001, + Name: "gogoproto.goproto_enum_prefix", + Tag: "varint,62001,opt,name=goproto_enum_prefix", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62021, + Name: "gogoproto.goproto_enum_stringer", + Tag: "varint,62021,opt,name=goproto_enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62022, + Name: "gogoproto.enum_stringer", + Tag: "varint,62022,opt,name=enum_stringer", + Filename: "gogo.proto", +} + +var E_EnumCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*string)(nil), + Field: 62023, + Name: "gogoproto.enum_customname", + Tag: "bytes,62023,opt,name=enum_customname", + Filename: "gogo.proto", +} + +var E_Enumdecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62024, + Name: "gogoproto.enumdecl", + Tag: "varint,62024,opt,name=enumdecl", + Filename: "gogo.proto", +} + +var E_EnumvalueCustomname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.EnumValueOptions)(nil), + ExtensionType: (*string)(nil), + Field: 66001, + Name: "gogoproto.enumvalue_customname", + Tag: "bytes,66001,opt,name=enumvalue_customname", + Filename: "gogo.proto", +} + +var E_GoprotoGettersAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63001, + Name: "gogoproto.goproto_getters_all", + Tag: "varint,63001,opt,name=goproto_getters_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63002, + Name: "gogoproto.goproto_enum_prefix_all", + Tag: "varint,63002,opt,name=goproto_enum_prefix_all", + Filename: "gogo.proto", +} + +var E_GoprotoStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63003, + Name: "gogoproto.goproto_stringer_all", + Tag: "varint,63003,opt,name=goproto_stringer_all", + Filename: "gogo.proto", +} + +var E_VerboseEqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63004, + Name: "gogoproto.verbose_equal_all", + Tag: "varint,63004,opt,name=verbose_equal_all", + Filename: "gogo.proto", +} + +var E_FaceAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63005, + Name: "gogoproto.face_all", + Tag: "varint,63005,opt,name=face_all", + Filename: "gogo.proto", +} + +var E_GostringAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63006, + Name: "gogoproto.gostring_all", + Tag: "varint,63006,opt,name=gostring_all", + Filename: "gogo.proto", +} + +var E_PopulateAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63007, + Name: "gogoproto.populate_all", + Tag: "varint,63007,opt,name=populate_all", + Filename: "gogo.proto", +} + +var E_StringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63008, + Name: "gogoproto.stringer_all", + Tag: "varint,63008,opt,name=stringer_all", + Filename: "gogo.proto", +} + +var E_OnlyoneAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63009, + Name: "gogoproto.onlyone_all", + Tag: "varint,63009,opt,name=onlyone_all", + Filename: "gogo.proto", +} + +var E_EqualAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63013, + Name: "gogoproto.equal_all", + Tag: "varint,63013,opt,name=equal_all", + Filename: "gogo.proto", +} + +var E_DescriptionAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63014, + Name: "gogoproto.description_all", + Tag: "varint,63014,opt,name=description_all", + Filename: "gogo.proto", +} + +var E_TestgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63015, + Name: "gogoproto.testgen_all", + Tag: "varint,63015,opt,name=testgen_all", + Filename: "gogo.proto", +} + +var E_BenchgenAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63016, + Name: "gogoproto.benchgen_all", + Tag: "varint,63016,opt,name=benchgen_all", + Filename: "gogo.proto", +} + +var E_MarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63017, + Name: "gogoproto.marshaler_all", + Tag: "varint,63017,opt,name=marshaler_all", + Filename: "gogo.proto", +} + +var E_UnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63018, + Name: "gogoproto.unmarshaler_all", + Tag: "varint,63018,opt,name=unmarshaler_all", + Filename: "gogo.proto", +} + +var E_StableMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63019, + Name: "gogoproto.stable_marshaler_all", + Tag: "varint,63019,opt,name=stable_marshaler_all", + Filename: "gogo.proto", +} + +var E_SizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63020, + Name: "gogoproto.sizer_all", + Tag: "varint,63020,opt,name=sizer_all", + Filename: "gogo.proto", +} + +var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63021, + Name: "gogoproto.goproto_enum_stringer_all", + Tag: "varint,63021,opt,name=goproto_enum_stringer_all", + Filename: "gogo.proto", +} + +var E_EnumStringerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63022, + Name: "gogoproto.enum_stringer_all", + Tag: "varint,63022,opt,name=enum_stringer_all", + Filename: "gogo.proto", +} + +var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63023, + Name: "gogoproto.unsafe_marshaler_all", + Tag: "varint,63023,opt,name=unsafe_marshaler_all", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63024, + Name: "gogoproto.unsafe_unmarshaler_all", + Tag: "varint,63024,opt,name=unsafe_unmarshaler_all", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63025, + Name: "gogoproto.goproto_extensions_map_all", + Tag: "varint,63025,opt,name=goproto_extensions_map_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63026, + Name: "gogoproto.goproto_unrecognized_all", + Tag: "varint,63026,opt,name=goproto_unrecognized_all", + Filename: "gogo.proto", +} + +var E_GogoprotoImport = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63027, + Name: "gogoproto.gogoproto_import", + Tag: "varint,63027,opt,name=gogoproto_import", + Filename: "gogo.proto", +} + +var E_ProtosizerAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63028, + Name: "gogoproto.protosizer_all", + Tag: "varint,63028,opt,name=protosizer_all", + Filename: "gogo.proto", +} + +var E_CompareAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63029, + Name: "gogoproto.compare_all", + Tag: "varint,63029,opt,name=compare_all", + Filename: "gogo.proto", +} + +var E_TypedeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63030, + Name: "gogoproto.typedecl_all", + Tag: "varint,63030,opt,name=typedecl_all", + Filename: "gogo.proto", +} + +var E_EnumdeclAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63031, + Name: "gogoproto.enumdecl_all", + Tag: "varint,63031,opt,name=enumdecl_all", + Filename: "gogo.proto", +} + +var E_GoprotoRegistration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63032, + Name: "gogoproto.goproto_registration", + Tag: "varint,63032,opt,name=goproto_registration", + Filename: "gogo.proto", +} + +var E_MessagenameAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63033, + Name: "gogoproto.messagename_all", + Tag: "varint,63033,opt,name=messagename_all", + Filename: "gogo.proto", +} + +var E_GoprotoSizecacheAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63034, + Name: "gogoproto.goproto_sizecache_all", + Tag: "varint,63034,opt,name=goproto_sizecache_all", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63035, + Name: "gogoproto.goproto_unkeyed_all", + Tag: "varint,63035,opt,name=goproto_unkeyed_all", + Filename: "gogo.proto", +} + +var E_GoprotoGetters = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64001, + Name: "gogoproto.goproto_getters", + Tag: "varint,64001,opt,name=goproto_getters", + Filename: "gogo.proto", +} + +var E_GoprotoStringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64003, + Name: "gogoproto.goproto_stringer", + Tag: "varint,64003,opt,name=goproto_stringer", + Filename: "gogo.proto", +} + +var E_VerboseEqual = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64004, + Name: "gogoproto.verbose_equal", + Tag: "varint,64004,opt,name=verbose_equal", + Filename: "gogo.proto", +} + +var E_Face = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64005, + Name: "gogoproto.face", + Tag: "varint,64005,opt,name=face", + Filename: "gogo.proto", +} + +var E_Gostring = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64006, + Name: "gogoproto.gostring", + Tag: "varint,64006,opt,name=gostring", + Filename: "gogo.proto", +} + +var E_Populate = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64007, + Name: "gogoproto.populate", + Tag: "varint,64007,opt,name=populate", + Filename: "gogo.proto", +} + +var E_Stringer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 67008, + Name: "gogoproto.stringer", + Tag: "varint,67008,opt,name=stringer", + Filename: "gogo.proto", +} + +var E_Onlyone = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64009, + Name: "gogoproto.onlyone", + Tag: "varint,64009,opt,name=onlyone", + Filename: "gogo.proto", +} + +var E_Equal = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64013, + Name: "gogoproto.equal", + Tag: "varint,64013,opt,name=equal", + Filename: "gogo.proto", +} + +var E_Description = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64014, + Name: "gogoproto.description", + Tag: "varint,64014,opt,name=description", + Filename: "gogo.proto", +} + +var E_Testgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64015, + Name: "gogoproto.testgen", + Tag: "varint,64015,opt,name=testgen", + Filename: "gogo.proto", +} + +var E_Benchgen = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64016, + Name: "gogoproto.benchgen", + Tag: "varint,64016,opt,name=benchgen", + Filename: "gogo.proto", +} + +var E_Marshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64017, + Name: "gogoproto.marshaler", + Tag: "varint,64017,opt,name=marshaler", + Filename: "gogo.proto", +} + +var E_Unmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64018, + Name: "gogoproto.unmarshaler", + Tag: "varint,64018,opt,name=unmarshaler", + Filename: "gogo.proto", +} + +var E_StableMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64019, + Name: "gogoproto.stable_marshaler", + Tag: "varint,64019,opt,name=stable_marshaler", + Filename: "gogo.proto", +} + +var E_Sizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64020, + Name: "gogoproto.sizer", + Tag: "varint,64020,opt,name=sizer", + Filename: "gogo.proto", +} + +var E_UnsafeMarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64023, + Name: "gogoproto.unsafe_marshaler", + Tag: "varint,64023,opt,name=unsafe_marshaler", + Filename: "gogo.proto", +} + +var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64024, + Name: "gogoproto.unsafe_unmarshaler", + Tag: "varint,64024,opt,name=unsafe_unmarshaler", + Filename: "gogo.proto", +} + +var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64025, + Name: "gogoproto.goproto_extensions_map", + Tag: "varint,64025,opt,name=goproto_extensions_map", + Filename: "gogo.proto", +} + +var E_GoprotoUnrecognized = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64026, + Name: "gogoproto.goproto_unrecognized", + Tag: "varint,64026,opt,name=goproto_unrecognized", + Filename: "gogo.proto", +} + +var E_Protosizer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64028, + Name: "gogoproto.protosizer", + Tag: "varint,64028,opt,name=protosizer", + Filename: "gogo.proto", +} + +var E_Compare = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64029, + Name: "gogoproto.compare", + Tag: "varint,64029,opt,name=compare", + Filename: "gogo.proto", +} + +var E_Typedecl = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64030, + Name: "gogoproto.typedecl", + Tag: "varint,64030,opt,name=typedecl", + Filename: "gogo.proto", +} + +var E_Messagename = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64033, + Name: "gogoproto.messagename", + Tag: "varint,64033,opt,name=messagename", + Filename: "gogo.proto", +} + +var E_GoprotoSizecache = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64034, + Name: "gogoproto.goproto_sizecache", + Tag: "varint,64034,opt,name=goproto_sizecache", + Filename: "gogo.proto", +} + +var E_GoprotoUnkeyed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64035, + Name: "gogoproto.goproto_unkeyed", + Tag: "varint,64035,opt,name=goproto_unkeyed", + Filename: "gogo.proto", +} + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65001, + Name: "gogoproto.nullable", + Tag: "varint,65001,opt,name=nullable", + Filename: "gogo.proto", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65002, + Name: "gogoproto.embed", + Tag: "varint,65002,opt,name=embed", + Filename: "gogo.proto", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65003, + Name: "gogoproto.customtype", + Tag: "bytes,65003,opt,name=customtype", + Filename: "gogo.proto", +} + +var E_Customname = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65004, + Name: "gogoproto.customname", + Tag: "bytes,65004,opt,name=customname", + Filename: "gogo.proto", +} + +var E_Jsontag = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65005, + Name: "gogoproto.jsontag", + Tag: "bytes,65005,opt,name=jsontag", + Filename: "gogo.proto", +} + +var E_Moretags = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65006, + Name: "gogoproto.moretags", + Tag: "bytes,65006,opt,name=moretags", + Filename: "gogo.proto", +} + +var E_Casttype = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65007, + Name: "gogoproto.casttype", + Tag: "bytes,65007,opt,name=casttype", + Filename: "gogo.proto", +} + +var E_Castkey = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65008, + Name: "gogoproto.castkey", + Tag: "bytes,65008,opt,name=castkey", + Filename: "gogo.proto", +} + +var E_Castvalue = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 65009, + Name: "gogoproto.castvalue", + Tag: "bytes,65009,opt,name=castvalue", + Filename: "gogo.proto", +} + +var E_Stdtime = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65010, + Name: "gogoproto.stdtime", + Tag: "varint,65010,opt,name=stdtime", + Filename: "gogo.proto", +} + +var E_Stdduration = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65011, + Name: "gogoproto.stdduration", + Tag: "varint,65011,opt,name=stdduration", + Filename: "gogo.proto", +} + +var E_Wktpointer = &proto.ExtensionDesc{ + ExtendedType: (*descriptor.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65012, + Name: "gogoproto.wktpointer", + Tag: "varint,65012,opt,name=wktpointer", + Filename: "gogo.proto", +} + +func init() { + proto.RegisterExtension(E_GoprotoEnumPrefix) + proto.RegisterExtension(E_GoprotoEnumStringer) + proto.RegisterExtension(E_EnumStringer) + proto.RegisterExtension(E_EnumCustomname) + proto.RegisterExtension(E_Enumdecl) + proto.RegisterExtension(E_EnumvalueCustomname) + proto.RegisterExtension(E_GoprotoGettersAll) + proto.RegisterExtension(E_GoprotoEnumPrefixAll) + proto.RegisterExtension(E_GoprotoStringerAll) + proto.RegisterExtension(E_VerboseEqualAll) + proto.RegisterExtension(E_FaceAll) + proto.RegisterExtension(E_GostringAll) + proto.RegisterExtension(E_PopulateAll) + proto.RegisterExtension(E_StringerAll) + proto.RegisterExtension(E_OnlyoneAll) + proto.RegisterExtension(E_EqualAll) + proto.RegisterExtension(E_DescriptionAll) + proto.RegisterExtension(E_TestgenAll) + proto.RegisterExtension(E_BenchgenAll) + proto.RegisterExtension(E_MarshalerAll) + proto.RegisterExtension(E_UnmarshalerAll) + proto.RegisterExtension(E_StableMarshalerAll) + proto.RegisterExtension(E_SizerAll) + proto.RegisterExtension(E_GoprotoEnumStringerAll) + proto.RegisterExtension(E_EnumStringerAll) + proto.RegisterExtension(E_UnsafeMarshalerAll) + proto.RegisterExtension(E_UnsafeUnmarshalerAll) + proto.RegisterExtension(E_GoprotoExtensionsMapAll) + proto.RegisterExtension(E_GoprotoUnrecognizedAll) + proto.RegisterExtension(E_GogoprotoImport) + proto.RegisterExtension(E_ProtosizerAll) + proto.RegisterExtension(E_CompareAll) + proto.RegisterExtension(E_TypedeclAll) + proto.RegisterExtension(E_EnumdeclAll) + proto.RegisterExtension(E_GoprotoRegistration) + proto.RegisterExtension(E_MessagenameAll) + proto.RegisterExtension(E_GoprotoSizecacheAll) + proto.RegisterExtension(E_GoprotoUnkeyedAll) + proto.RegisterExtension(E_GoprotoGetters) + proto.RegisterExtension(E_GoprotoStringer) + proto.RegisterExtension(E_VerboseEqual) + proto.RegisterExtension(E_Face) + proto.RegisterExtension(E_Gostring) + proto.RegisterExtension(E_Populate) + proto.RegisterExtension(E_Stringer) + proto.RegisterExtension(E_Onlyone) + proto.RegisterExtension(E_Equal) + proto.RegisterExtension(E_Description) + proto.RegisterExtension(E_Testgen) + proto.RegisterExtension(E_Benchgen) + proto.RegisterExtension(E_Marshaler) + proto.RegisterExtension(E_Unmarshaler) + proto.RegisterExtension(E_StableMarshaler) + proto.RegisterExtension(E_Sizer) + proto.RegisterExtension(E_UnsafeMarshaler) + proto.RegisterExtension(E_UnsafeUnmarshaler) + proto.RegisterExtension(E_GoprotoExtensionsMap) + proto.RegisterExtension(E_GoprotoUnrecognized) + proto.RegisterExtension(E_Protosizer) + proto.RegisterExtension(E_Compare) + proto.RegisterExtension(E_Typedecl) + proto.RegisterExtension(E_Messagename) + proto.RegisterExtension(E_GoprotoSizecache) + proto.RegisterExtension(E_GoprotoUnkeyed) + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) + proto.RegisterExtension(E_Customname) + proto.RegisterExtension(E_Jsontag) + proto.RegisterExtension(E_Moretags) + proto.RegisterExtension(E_Casttype) + proto.RegisterExtension(E_Castkey) + proto.RegisterExtension(E_Castvalue) + proto.RegisterExtension(E_Stdtime) + proto.RegisterExtension(E_Stdduration) + proto.RegisterExtension(E_Wktpointer) +} + +func init() { proto.RegisterFile("gogo.proto", fileDescriptor_592445b5231bc2b9) } + +var fileDescriptor_592445b5231bc2b9 = []byte{ + // 1328 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45, + 0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9, + 0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18, + 0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84, + 0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f, + 0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7, + 0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6, + 0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9, + 0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6, + 0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59, + 0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc, + 0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99, + 0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19, + 0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b, + 0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79, + 0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8, + 0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d, + 0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4, + 0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78, + 0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0, + 0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1, + 0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6, + 0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae, + 0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c, + 0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0, + 0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b, + 0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04, + 0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28, + 0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36, + 0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50, + 0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d, + 0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa, + 0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5, + 0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b, + 0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24, + 0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05, + 0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2, + 0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b, + 0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92, + 0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56, + 0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e, + 0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19, + 0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70, + 0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0, + 0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c, + 0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a, + 0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0, + 0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4, + 0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95, + 0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9, + 0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9, + 0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f, + 0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9, + 0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5, + 0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8, + 0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb, + 0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae, + 0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31, + 0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d, + 0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30, + 0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94, + 0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f, + 0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36, + 0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e, + 0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b, + 0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e, + 0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb, + 0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5, + 0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17, + 0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45, + 0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32, + 0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4, + 0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8, + 0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f, + 0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49, + 0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f, + 0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb, + 0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c, + 0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90, + 0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e, + 0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd, + 0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb, + 0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden new file mode 100644 index 00000000..f6502e4b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden @@ -0,0 +1,45 @@ +// Code generated by protoc-gen-go. +// source: gogo.proto +// DO NOT EDIT! + +package gogoproto + +import proto "github.com/gogo/protobuf/proto" +import json "encoding/json" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +// Reference proto, json, and math imports to suppress error if they are not otherwise used. +var _ = proto.Marshal +var _ = &json.SyntaxError{} +var _ = math.Inf + +var E_Nullable = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51235, + Name: "gogoproto.nullable", + Tag: "varint,51235,opt,name=nullable", +} + +var E_Embed = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 51236, + Name: "gogoproto.embed", + Tag: "varint,51236,opt,name=embed", +} + +var E_Customtype = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*string)(nil), + Field: 51237, + Name: "gogoproto.customtype", + Tag: "bytes,51237,opt,name=customtype", +} + +func init() { + proto.RegisterExtension(E_Nullable) + proto.RegisterExtension(E_Embed) + proto.RegisterExtension(E_Customtype) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto new file mode 100644 index 00000000..b80c8565 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -0,0 +1,144 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto2"; +package gogoproto; + +import "google/protobuf/descriptor.proto"; + +option java_package = "com.google.protobuf"; +option java_outer_classname = "GoGoProtos"; +option go_package = "github.com/gogo/protobuf/gogoproto"; + +extend google.protobuf.EnumOptions { + optional bool goproto_enum_prefix = 62001; + optional bool goproto_enum_stringer = 62021; + optional bool enum_stringer = 62022; + optional string enum_customname = 62023; + optional bool enumdecl = 62024; +} + +extend google.protobuf.EnumValueOptions { + optional string enumvalue_customname = 66001; +} + +extend google.protobuf.FileOptions { + optional bool goproto_getters_all = 63001; + optional bool goproto_enum_prefix_all = 63002; + optional bool goproto_stringer_all = 63003; + optional bool verbose_equal_all = 63004; + optional bool face_all = 63005; + optional bool gostring_all = 63006; + optional bool populate_all = 63007; + optional bool stringer_all = 63008; + optional bool onlyone_all = 63009; + + optional bool equal_all = 63013; + optional bool description_all = 63014; + optional bool testgen_all = 63015; + optional bool benchgen_all = 63016; + optional bool marshaler_all = 63017; + optional bool unmarshaler_all = 63018; + optional bool stable_marshaler_all = 63019; + + optional bool sizer_all = 63020; + + optional bool goproto_enum_stringer_all = 63021; + optional bool enum_stringer_all = 63022; + + optional bool unsafe_marshaler_all = 63023; + optional bool unsafe_unmarshaler_all = 63024; + + optional bool goproto_extensions_map_all = 63025; + optional bool goproto_unrecognized_all = 63026; + optional bool gogoproto_import = 63027; + optional bool protosizer_all = 63028; + optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; + optional bool messagename_all = 63033; + + optional bool goproto_sizecache_all = 63034; + optional bool goproto_unkeyed_all = 63035; +} + +extend google.protobuf.MessageOptions { + optional bool goproto_getters = 64001; + optional bool goproto_stringer = 64003; + optional bool verbose_equal = 64004; + optional bool face = 64005; + optional bool gostring = 64006; + optional bool populate = 64007; + optional bool stringer = 67008; + optional bool onlyone = 64009; + + optional bool equal = 64013; + optional bool description = 64014; + optional bool testgen = 64015; + optional bool benchgen = 64016; + optional bool marshaler = 64017; + optional bool unmarshaler = 64018; + optional bool stable_marshaler = 64019; + + optional bool sizer = 64020; + + optional bool unsafe_marshaler = 64023; + optional bool unsafe_unmarshaler = 64024; + + optional bool goproto_extensions_map = 64025; + optional bool goproto_unrecognized = 64026; + + optional bool protosizer = 64028; + optional bool compare = 64029; + + optional bool typedecl = 64030; + + optional bool messagename = 64033; + + optional bool goproto_sizecache = 64034; + optional bool goproto_unkeyed = 64035; +} + +extend google.protobuf.FieldOptions { + optional bool nullable = 65001; + optional bool embed = 65002; + optional string customtype = 65003; + optional string customname = 65004; + optional string jsontag = 65005; + optional string moretags = 65006; + optional string casttype = 65007; + optional string castkey = 65008; + optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; + optional bool wktpointer = 65012; + +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/gogoproto/helper.go new file mode 100644 index 00000000..390d4e4b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -0,0 +1,415 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package gogoproto + +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +import proto "github.com/gogo/protobuf/proto" + +func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Embed, false) +} + +func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Nullable, true) +} + +func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdtime, false) +} + +func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdduration, false) +} + +func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue" +} + +func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue" +} + +func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value" +} + +func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value" +} + +func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value" +} + +func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value" +} + +func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue" +} + +func IsStdString(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue" +} + +func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue" +} + +func IsStdType(field *google_protobuf.FieldDescriptorProto) bool { + return (IsStdTime(field) || IsStdDuration(field) || + IsStdDouble(field) || IsStdFloat(field) || + IsStdInt64(field) || IsStdUInt64(field) || + IsStdInt32(field) || IsStdUInt32(field) || + IsStdBool(field) || + IsStdString(field) || IsStdBytes(field)) +} + +func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Wktpointer, false) +} + +func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { + nullable := IsNullable(field) + if field.IsMessage() || IsCustomType(field) { + return nullable + } + if proto3 { + return false + } + return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES +} + +func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCustomType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastType(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastType(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastKey(field) + if len(typ) > 0 { + return true + } + return false +} + +func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { + typ := GetCastValue(field) + if len(typ) > 0 { + return true + } + return false +} + +func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) +} + +func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) +} + +func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customtype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Casttype) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castkey) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Castvalue) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool { + name := GetCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool { + name := GetEnumCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool { + name := GetEnumValueCustomName(field) + if len(name) > 0 { + return true + } + return false +} + +func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Customname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { + if field == nil { + return "" + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) + if err == nil && v.(*string) != nil { + return *(v.(*string)) + } + } + return "" +} + +func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Jsontag) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } + if field.Options != nil { + v, err := proto.GetExtension(field.Options, E_Moretags) + if err == nil && v.(*string) != nil { + return (v.(*string)) + } + } + return nil +} + +type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool + +func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true)) +} + +func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true)) +} + +func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true)) +} + +func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false)) +} + +func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false)) +} + +func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false)) +} + +func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false)) +} + +func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false)) +} + +func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false)) +} + +func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false)) +} + +func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false)) +} + +func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false)) +} + +func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false)) +} + +func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false)) +} + +func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false)) +} + +func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false)) +} + +func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false)) +} + +func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false)) +} + +func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true)) +} + +func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false)) +} + +func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false)) +} + +func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false)) +} + +func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true)) +} + +func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true)) +} + +func IsProto3(file *google_protobuf.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true) +} + +func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) +} + +func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) +} + +func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false)) +} + +func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true)) +} + +func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true)) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile new file mode 100644 index 00000000..3496dc99 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile @@ -0,0 +1,36 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + go install github.com/gogo/protobuf/protoc-gen-gostring + protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto + protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go new file mode 100644 index 00000000..a85bf198 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go @@ -0,0 +1,118 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/gogo/protobuf/proto" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} + +// Is this field a scalar numeric type? +func (field *FieldDescriptorProto) IsScalar() bool { + if field.Type == nil { + return false + } + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE, + FieldDescriptorProto_TYPE_FLOAT, + FieldDescriptorProto_TYPE_INT64, + FieldDescriptorProto_TYPE_UINT64, + FieldDescriptorProto_TYPE_INT32, + FieldDescriptorProto_TYPE_FIXED64, + FieldDescriptorProto_TYPE_FIXED32, + FieldDescriptorProto_TYPE_BOOL, + FieldDescriptorProto_TYPE_UINT32, + FieldDescriptorProto_TYPE_ENUM, + FieldDescriptorProto_TYPE_SFIXED32, + FieldDescriptorProto_TYPE_SFIXED64, + FieldDescriptorProto_TYPE_SINT32, + FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go new file mode 100644 index 00000000..cacfa392 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -0,0 +1,2865 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type FieldDescriptorProto_Type int32 + +const ( + // 0 is reserved for errors. + // Order is weird for historical reasons. + FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1 + FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3 + FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4 + // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if + // negative values are likely. + FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5 + FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6 + FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 + FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 + FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 + FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 + // New in version 2. + FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12 + FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13 + FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14 + FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15 + FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16 + FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17 + FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18 +) + +var FieldDescriptorProto_Type_name = map[int32]string{ + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var FieldDescriptorProto_Type_value = map[string]int32{ + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type { + p := new(FieldDescriptorProto_Type) + *p = x + return p +} + +func (x FieldDescriptorProto_Type) String() string { + return proto.EnumName(FieldDescriptorProto_Type_name, int32(x)) +} + +func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type") + if err != nil { + return err + } + *x = FieldDescriptorProto_Type(value) + return nil +} + +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 0} +} + +type FieldDescriptorProto_Label int32 + +const ( + // 0 is reserved for errors + FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1 + FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2 + FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3 +) + +var FieldDescriptorProto_Label_name = map[int32]string{ + 1: "LABEL_OPTIONAL", + 2: "LABEL_REQUIRED", + 3: "LABEL_REPEATED", +} + +var FieldDescriptorProto_Label_value = map[string]int32{ + "LABEL_OPTIONAL": 1, + "LABEL_REQUIRED": 2, + "LABEL_REPEATED": 3, +} + +func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label { + p := new(FieldDescriptorProto_Label) + *p = x + return p +} + +func (x FieldDescriptorProto_Label) String() string { + return proto.EnumName(FieldDescriptorProto_Label_name, int32(x)) +} + +func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label") + if err != nil { + return err + } + *x = FieldDescriptorProto_Label(value) + return nil +} + +func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4, 1} +} + +// Generated classes can be optimized for speed or code size. +type FileOptions_OptimizeMode int32 + +const ( + FileOptions_SPEED FileOptions_OptimizeMode = 1 + // etc. + FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2 + FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3 +) + +var FileOptions_OptimizeMode_name = map[int32]string{ + 1: "SPEED", + 2: "CODE_SIZE", + 3: "LITE_RUNTIME", +} + +var FileOptions_OptimizeMode_value = map[string]int32{ + "SPEED": 1, + "CODE_SIZE": 2, + "LITE_RUNTIME": 3, +} + +func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode { + p := new(FileOptions_OptimizeMode) + *p = x + return p +} + +func (x FileOptions_OptimizeMode) String() string { + return proto.EnumName(FileOptions_OptimizeMode_name, int32(x)) +} + +func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode") + if err != nil { + return err + } + *x = FileOptions_OptimizeMode(value) + return nil +} + +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10, 0} +} + +type FieldOptions_CType int32 + +const ( + // Default mode. + FieldOptions_STRING FieldOptions_CType = 0 + FieldOptions_CORD FieldOptions_CType = 1 + FieldOptions_STRING_PIECE FieldOptions_CType = 2 +) + +var FieldOptions_CType_name = map[int32]string{ + 0: "STRING", + 1: "CORD", + 2: "STRING_PIECE", +} + +var FieldOptions_CType_value = map[string]int32{ + "STRING": 0, + "CORD": 1, + "STRING_PIECE": 2, +} + +func (x FieldOptions_CType) Enum() *FieldOptions_CType { + p := new(FieldOptions_CType) + *p = x + return p +} + +func (x FieldOptions_CType) String() string { + return proto.EnumName(FieldOptions_CType_name, int32(x)) +} + +func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType") + if err != nil { + return err + } + *x = FieldOptions_CType(value) + return nil +} + +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 0} +} + +type FieldOptions_JSType int32 + +const ( + // Use the default type. + FieldOptions_JS_NORMAL FieldOptions_JSType = 0 + // Use JavaScript strings. + FieldOptions_JS_STRING FieldOptions_JSType = 1 + // Use JavaScript numbers. + FieldOptions_JS_NUMBER FieldOptions_JSType = 2 +) + +var FieldOptions_JSType_name = map[int32]string{ + 0: "JS_NORMAL", + 1: "JS_STRING", + 2: "JS_NUMBER", +} + +var FieldOptions_JSType_value = map[string]int32{ + "JS_NORMAL": 0, + "JS_STRING": 1, + "JS_NUMBER": 2, +} + +func (x FieldOptions_JSType) Enum() *FieldOptions_JSType { + p := new(FieldOptions_JSType) + *p = x + return p +} + +func (x FieldOptions_JSType) String() string { + return proto.EnumName(FieldOptions_JSType_name, int32(x)) +} + +func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType") + if err != nil { + return err + } + *x = FieldOptions_JSType(value) + return nil +} + +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} + +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} + +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} + +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} + +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17, 0} +} + +// The protocol compiler can output a FileDescriptorSet containing the .proto +// files it parses. +type FileDescriptorSet struct { + File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } +func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorSet) ProtoMessage() {} +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{0} +} +func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b) +} +func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic) +} +func (m *FileDescriptorSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorSet.Merge(m, src) +} +func (m *FileDescriptorSet) XXX_Size() int { + return xxx_messageInfo_FileDescriptorSet.Size(m) +} +func (m *FileDescriptorSet) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo + +func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { + if m != nil { + return m.File + } + return nil +} + +// Describes a complete .proto file. +type FileDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"` + // Names of files imported by this file. + Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"` + // Indexes of the public imported files in the dependency list above. + PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"` + // Indexes of the weak imported files in the dependency list. + // For Google-internal migration only. Do not use. + WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"` + // All top-level definitions in this file. + MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"` + Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + // This field contains optional information about the original source code. + // You may safely remove this entire field without harming runtime + // functionality of the descriptors -- the information is needed only by + // development tools. + SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"` + // The syntax of the proto file. + // The supported values are "proto2" and "proto3". + Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } +func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FileDescriptorProto) ProtoMessage() {} +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{1} +} +func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b) +} +func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FileDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileDescriptorProto.Merge(m, src) +} +func (m *FileDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FileDescriptorProto.Size(m) +} +func (m *FileDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo + +func (m *FileDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FileDescriptorProto) GetPackage() string { + if m != nil && m.Package != nil { + return *m.Package + } + return "" +} + +func (m *FileDescriptorProto) GetDependency() []string { + if m != nil { + return m.Dependency + } + return nil +} + +func (m *FileDescriptorProto) GetPublicDependency() []int32 { + if m != nil { + return m.PublicDependency + } + return nil +} + +func (m *FileDescriptorProto) GetWeakDependency() []int32 { + if m != nil { + return m.WeakDependency + } + return nil +} + +func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto { + if m != nil { + return m.MessageType + } + return nil +} + +func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto { + if m != nil { + return m.Service + } + return nil +} + +func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *FileDescriptorProto) GetOptions() *FileOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo { + if m != nil { + return m.SourceCodeInfo + } + return nil +} + +func (m *FileDescriptorProto) GetSyntax() string { + if m != nil && m.Syntax != nil { + return *m.Syntax + } + return "" +} + +// Describes a message type. +type DescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"` + Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"` + NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"` + EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"` + ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"` + OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"` + Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` + ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved field names, which may not be used by fields in the same message. + // A given name may only be reserved once. + ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } +func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto) ProtoMessage() {} +func (*DescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2} +} +func (m *DescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto.Unmarshal(m, b) +} +func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic) +} +func (m *DescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto.Merge(m, src) +} +func (m *DescriptorProto) XXX_Size() int { + return xxx_messageInfo_DescriptorProto.Size(m) +} +func (m *DescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo + +func (m *DescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *DescriptorProto) GetField() []*FieldDescriptorProto { + if m != nil { + return m.Field + } + return nil +} + +func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto { + if m != nil { + return m.Extension + } + return nil +} + +func (m *DescriptorProto) GetNestedType() []*DescriptorProto { + if m != nil { + return m.NestedType + } + return nil +} + +func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto { + if m != nil { + return m.EnumType + } + return nil +} + +func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange { + if m != nil { + return m.ExtensionRange + } + return nil +} + +func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto { + if m != nil { + return m.OneofDecl + } + return nil +} + +func (m *DescriptorProto) GetOptions() *MessageOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *DescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +type DescriptorProto_ExtensionRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } +func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ExtensionRange) ProtoMessage() {} +func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 0} +} +func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src) +} +func (m *DescriptorProto_ExtensionRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m) +} +func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo + +func (m *DescriptorProto_ExtensionRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + +// Range of reserved tag numbers. Reserved tag numbers may not be used by +// fields or extension ranges in the same message. Reserved ranges may +// not overlap. +type DescriptorProto_ReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} } +func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } +func (*DescriptorProto_ReservedRange) ProtoMessage() {} +func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{2, 1} +} +func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b) +} +func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic) +} +func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src) +} +func (m *DescriptorProto_ReservedRange) XXX_Size() int { + return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m) +} +func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo + +func (m *DescriptorProto_ReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *DescriptorProto_ReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{3} +} + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b) +} +func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic) +} +func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtensionRangeOptions.Merge(m, src) +} +func (m *ExtensionRangeOptions) XXX_Size() int { + return xxx_messageInfo_ExtensionRangeOptions.Size(m) +} +func (m *ExtensionRangeOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// Describes a field within a message. +type FieldDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"` + // If type_name is set, this need not be set. If both this and type_name + // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. + Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"` + // For message and enum types, this is the name of the type. If the name + // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping + // rules are used to find the type (i.e. first the nested types within this + // message are searched, then within the parent, on up to the root + // namespace). + TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"` + // For extensions, this is the name of the type being extended. It is + // resolved in the same manner as type_name. + Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"` + // For numeric types, contains the original text representation of the value. + // For booleans, "true" or "false". + // For strings, contains the default text contents (not escaped in any way). + // For bytes, contains the C escaped value. All bytes >= 128 are escaped. + // TODO(kenton): Base-64 encode? + DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"` + // If set, gives the index of a oneof in the containing type's oneof_decl + // list. This field is a member of that oneof. + OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"` + // JSON name of this field. The value is set by protocol compiler. If the + // user has set a "json_name" option on this field, that option's value + // will be used. Otherwise, it's deduced from the field's name by converting + // it to camelCase. + JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"` + Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } +func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*FieldDescriptorProto) ProtoMessage() {} +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{4} +} +func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b) +} +func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic) +} +func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldDescriptorProto.Merge(m, src) +} +func (m *FieldDescriptorProto) XXX_Size() int { + return xxx_messageInfo_FieldDescriptorProto.Size(m) +} +func (m *FieldDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo + +func (m *FieldDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *FieldDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label { + if m != nil && m.Label != nil { + return *m.Label + } + return FieldDescriptorProto_LABEL_OPTIONAL +} + +func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type { + if m != nil && m.Type != nil { + return *m.Type + } + return FieldDescriptorProto_TYPE_DOUBLE +} + +func (m *FieldDescriptorProto) GetTypeName() string { + if m != nil && m.TypeName != nil { + return *m.TypeName + } + return "" +} + +func (m *FieldDescriptorProto) GetExtendee() string { + if m != nil && m.Extendee != nil { + return *m.Extendee + } + return "" +} + +func (m *FieldDescriptorProto) GetDefaultValue() string { + if m != nil && m.DefaultValue != nil { + return *m.DefaultValue + } + return "" +} + +func (m *FieldDescriptorProto) GetOneofIndex() int32 { + if m != nil && m.OneofIndex != nil { + return *m.OneofIndex + } + return 0 +} + +func (m *FieldDescriptorProto) GetJsonName() string { + if m != nil && m.JsonName != nil { + return *m.JsonName + } + return "" +} + +func (m *FieldDescriptorProto) GetOptions() *FieldOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a oneof. +type OneofDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } +func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*OneofDescriptorProto) ProtoMessage() {} +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{5} +} +func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b) +} +func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic) +} +func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofDescriptorProto.Merge(m, src) +} +func (m *OneofDescriptorProto) XXX_Size() int { + return xxx_messageInfo_OneofDescriptorProto.Size(m) +} +func (m *OneofDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo + +func (m *OneofDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes an enum type. +type EnumDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } +func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto) ProtoMessage() {} +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6} +} +func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b) +} +func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto.Merge(m, src) +} +func (m *EnumDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto.Size(m) +} +func (m *EnumDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo + +func (m *EnumDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto { + if m != nil { + return m.Value + } + return nil +} + +func (m *EnumDescriptorProto) GetOptions() *EnumOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{6, 0} +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int { + return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m) +} +func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() { + xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +// Describes a value within an enum. +type EnumValueDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } +func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*EnumValueDescriptorProto) ProtoMessage() {} +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{7} +} +func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b) +} +func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic) +} +func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src) +} +func (m *EnumValueDescriptorProto) XXX_Size() int { + return xxx_messageInfo_EnumValueDescriptorProto.Size(m) +} +func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo + +func (m *EnumValueDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *EnumValueDescriptorProto) GetNumber() int32 { + if m != nil && m.Number != nil { + return *m.Number + } + return 0 +} + +func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a service. +type ServiceDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"` + Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } +func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*ServiceDescriptorProto) ProtoMessage() {} +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{8} +} +func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b) +} +func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic) +} +func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceDescriptorProto.Merge(m, src) +} +func (m *ServiceDescriptorProto) XXX_Size() int { + return xxx_messageInfo_ServiceDescriptorProto.Size(m) +} +func (m *ServiceDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo + +func (m *ServiceDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto { + if m != nil { + return m.Method + } + return nil +} + +func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions { + if m != nil { + return m.Options + } + return nil +} + +// Describes a method of a service. +type MethodDescriptorProto struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Input and output type names. These are resolved in the same way as + // FieldDescriptorProto.type_name, but must refer to a message type. + InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"` + OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"` + Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` + // Identifies if client streams multiple client messages + ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"` + // Identifies if server streams multiple server messages + ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } +func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } +func (*MethodDescriptorProto) ProtoMessage() {} +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{9} +} +func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b) +} +func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic) +} +func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodDescriptorProto.Merge(m, src) +} +func (m *MethodDescriptorProto) XXX_Size() int { + return xxx_messageInfo_MethodDescriptorProto.Size(m) +} +func (m *MethodDescriptorProto) XXX_DiscardUnknown() { + xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo + +const Default_MethodDescriptorProto_ClientStreaming bool = false +const Default_MethodDescriptorProto_ServerStreaming bool = false + +func (m *MethodDescriptorProto) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MethodDescriptorProto) GetInputType() string { + if m != nil && m.InputType != nil { + return *m.InputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOutputType() string { + if m != nil && m.OutputType != nil { + return *m.OutputType + } + return "" +} + +func (m *MethodDescriptorProto) GetOptions() *MethodOptions { + if m != nil { + return m.Options + } + return nil +} + +func (m *MethodDescriptorProto) GetClientStreaming() bool { + if m != nil && m.ClientStreaming != nil { + return *m.ClientStreaming + } + return Default_MethodDescriptorProto_ClientStreaming +} + +func (m *MethodDescriptorProto) GetServerStreaming() bool { + if m != nil && m.ServerStreaming != nil { + return *m.ServerStreaming + } + return Default_MethodDescriptorProto_ServerStreaming +} + +type FileOptions struct { + // Sets the Java package where classes generated from this .proto will be + // placed. By default, the proto package is used, but this is often + // inappropriate because proto packages do not normally start with backwards + // domain names. + JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"` + // If set, all the classes from the .proto file are wrapped in a single + // outer class with the given name. This applies to both Proto1 + // (equivalent to the old "--one_java_file" option) and Proto2 (where + // a .proto always translates to a single class, but you may want to + // explicitly choose the class name). + JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"` + // If set true, then the Java code generator will generate a separate .java + // file for each top-level message, enum, and service defined in the .proto + // file. Thus, these types will *not* be nested inside the outer class + // named by java_outer_classname. However, the outer class will still be + // generated to contain the file's getDescriptor() method as well as any + // top-level extensions defined in the file. + JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use. + // If set true, then the Java2 code generator will generate code that + // throws an exception whenever an attempt is made to assign a non-UTF-8 + // byte sequence to a string field. + // Message reflection will do the same. + // However, an extension field still accepts non-UTF-8 byte sequences. + // This option has no effect on when used with the lite runtime. + JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"` + OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"` + // Sets the Go package where structs generated from this .proto will be + // placed. If omitted, the Go package will be derived from the following: + // - The basename of the package import path, if provided. + // - Otherwise, the package statement in the .proto file, if present. + // - Otherwise, the basename of the .proto file, without extension. + GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"` + // Should generic services be generated in each language? "Generic" services + // are not specific to any particular RPC system. They are generated by the + // main code generators in each language (without additional plugins). + // Generic services were the only kind of service generation supported by + // early versions of google.protobuf. + // + // Generic services are now considered deprecated in favor of using plugins + // that generate code specific to your particular RPC system. Therefore, + // these default to false. Old code which depends on generic services should + // explicitly set them to true. + CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` + JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` + PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` + // Is this file deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for everything in the file, or it will be completely ignored; in the very + // least, this is a formalization for deprecating files. + Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Enables the use of arenas for the proto messages in this file. This applies + // only to generated classes for C++. + CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"` + // Sets the objective c class prefix which is prepended to all objective c + // generated classes from this .proto. There is no default. + ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` + // Namespace for generated classes; defaults to the package. + CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // Use this option to change the namespace of php generated metadata classes. + // Default is empty. When this option is empty, the proto file name will be used + // for determining the namespace. + PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"` + // Use this option to change the package of ruby generated classes. Default + // is empty. When this option is not set, the package name will be used for + // determining the ruby package. + RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FileOptions) Reset() { *m = FileOptions{} } +func (m *FileOptions) String() string { return proto.CompactTextString(m) } +func (*FileOptions) ProtoMessage() {} +func (*FileOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{10} +} + +var extRange_FileOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FileOptions +} + +func (m *FileOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FileOptions.Unmarshal(m, b) +} +func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic) +} +func (m *FileOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOptions.Merge(m, src) +} +func (m *FileOptions) XXX_Size() int { + return xxx_messageInfo_FileOptions.Size(m) +} +func (m *FileOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FileOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FileOptions proto.InternalMessageInfo + +const Default_FileOptions_JavaMultipleFiles bool = false +const Default_FileOptions_JavaStringCheckUtf8 bool = false +const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED +const Default_FileOptions_CcGenericServices bool = false +const Default_FileOptions_JavaGenericServices bool = false +const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false +const Default_FileOptions_Deprecated bool = false +const Default_FileOptions_CcEnableArenas bool = false + +func (m *FileOptions) GetJavaPackage() string { + if m != nil && m.JavaPackage != nil { + return *m.JavaPackage + } + return "" +} + +func (m *FileOptions) GetJavaOuterClassname() string { + if m != nil && m.JavaOuterClassname != nil { + return *m.JavaOuterClassname + } + return "" +} + +func (m *FileOptions) GetJavaMultipleFiles() bool { + if m != nil && m.JavaMultipleFiles != nil { + return *m.JavaMultipleFiles + } + return Default_FileOptions_JavaMultipleFiles +} + +// Deprecated: Do not use. +func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { + if m != nil && m.JavaGenerateEqualsAndHash != nil { + return *m.JavaGenerateEqualsAndHash + } + return false +} + +func (m *FileOptions) GetJavaStringCheckUtf8() bool { + if m != nil && m.JavaStringCheckUtf8 != nil { + return *m.JavaStringCheckUtf8 + } + return Default_FileOptions_JavaStringCheckUtf8 +} + +func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode { + if m != nil && m.OptimizeFor != nil { + return *m.OptimizeFor + } + return Default_FileOptions_OptimizeFor +} + +func (m *FileOptions) GetGoPackage() string { + if m != nil && m.GoPackage != nil { + return *m.GoPackage + } + return "" +} + +func (m *FileOptions) GetCcGenericServices() bool { + if m != nil && m.CcGenericServices != nil { + return *m.CcGenericServices + } + return Default_FileOptions_CcGenericServices +} + +func (m *FileOptions) GetJavaGenericServices() bool { + if m != nil && m.JavaGenericServices != nil { + return *m.JavaGenericServices + } + return Default_FileOptions_JavaGenericServices +} + +func (m *FileOptions) GetPyGenericServices() bool { + if m != nil && m.PyGenericServices != nil { + return *m.PyGenericServices + } + return Default_FileOptions_PyGenericServices +} + +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + +func (m *FileOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FileOptions_Deprecated +} + +func (m *FileOptions) GetCcEnableArenas() bool { + if m != nil && m.CcEnableArenas != nil { + return *m.CcEnableArenas + } + return Default_FileOptions_CcEnableArenas +} + +func (m *FileOptions) GetObjcClassPrefix() string { + if m != nil && m.ObjcClassPrefix != nil { + return *m.ObjcClassPrefix + } + return "" +} + +func (m *FileOptions) GetCsharpNamespace() string { + if m != nil && m.CsharpNamespace != nil { + return *m.CsharpNamespace + } + return "" +} + +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + +func (m *FileOptions) GetPhpMetadataNamespace() string { + if m != nil && m.PhpMetadataNamespace != nil { + return *m.PhpMetadataNamespace + } + return "" +} + +func (m *FileOptions) GetRubyPackage() string { + if m != nil && m.RubyPackage != nil { + return *m.RubyPackage + } + return "" +} + +func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MessageOptions struct { + // Set true to use the old proto1 MessageSet wire format for extensions. + // This is provided for backwards-compatibility with the MessageSet wire + // format. You should not use this for any other reason: It's less + // efficient, has fewer features, and is more complicated. + // + // The message must be defined exactly as follows: + // message Foo { + // option message_set_wire_format = true; + // extensions 4 to max; + // } + // Note that the message cannot have any defined fields; MessageSets only + // have extensions. + // + // All extensions of your type must be singular messages; e.g. they cannot + // be int32s, enums, or repeated messages. + // + // Because this is an option, the above two restrictions are not enforced by + // the protocol compiler. + MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"` + // Disables the generation of the standard "descriptor()" accessor, which can + // conflict with a field of the same name. This is meant to make migration + // from proto1 easier; new code should avoid fields named "descriptor". + NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"` + // Is this message deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the message, or it will be completely ignored; in the very least, + // this is a formalization for deprecating messages. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // Whether the message is an automatically generated map entry type for the + // maps field. + // + // For maps fields: + // map map_field = 1; + // The parsed descriptor looks like: + // message MapFieldEntry { + // option map_entry = true; + // optional KeyType key = 1; + // optional ValueType value = 2; + // } + // repeated MapFieldEntry map_field = 1; + // + // Implementations may choose not to generate the map_entry=true message, but + // use a native map in the target language to hold the keys and values. + // The reflection APIs in such implementions still need to work as + // if the field is a repeated message field. + // + // NOTE: Do not set the option in .proto files. Always use the maps syntax + // instead. The option should only be implicitly set by the proto compiler + // parser. + MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MessageOptions) Reset() { *m = MessageOptions{} } +func (m *MessageOptions) String() string { return proto.CompactTextString(m) } +func (*MessageOptions) ProtoMessage() {} +func (*MessageOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{11} +} + +var extRange_MessageOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MessageOptions +} + +func (m *MessageOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MessageOptions.Unmarshal(m, b) +} +func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic) +} +func (m *MessageOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MessageOptions.Merge(m, src) +} +func (m *MessageOptions) XXX_Size() int { + return xxx_messageInfo_MessageOptions.Size(m) +} +func (m *MessageOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MessageOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MessageOptions proto.InternalMessageInfo + +const Default_MessageOptions_MessageSetWireFormat bool = false +const Default_MessageOptions_NoStandardDescriptorAccessor bool = false +const Default_MessageOptions_Deprecated bool = false + +func (m *MessageOptions) GetMessageSetWireFormat() bool { + if m != nil && m.MessageSetWireFormat != nil { + return *m.MessageSetWireFormat + } + return Default_MessageOptions_MessageSetWireFormat +} + +func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool { + if m != nil && m.NoStandardDescriptorAccessor != nil { + return *m.NoStandardDescriptorAccessor + } + return Default_MessageOptions_NoStandardDescriptorAccessor +} + +func (m *MessageOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MessageOptions_Deprecated +} + +func (m *MessageOptions) GetMapEntry() bool { + if m != nil && m.MapEntry != nil { + return *m.MapEntry + } + return false +} + +func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type FieldOptions struct { + // The ctype option instructs the C++ code generator to use a different + // representation of the field than it normally would. See the specific + // options below. This option is not yet implemented in the open source + // release -- sorry, we'll try to include it in a future version! + Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"` + // The packed option can be enabled for repeated primitive fields to enable + // a more efficient representation on the wire. Rather than repeatedly + // writing the tag and type for each element, the entire array is encoded as + // a single length-delimited blob. In proto3, only explicit setting it to + // false will avoid using packed encoding. + Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` + // The jstype option determines the JavaScript type used for values of the + // field. The option is permitted only for 64 bit integral and fixed types + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. + Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` + // Should this field be parsed lazily? Lazy applies only to message-type + // fields. It means that when the outer message is initially parsed, the + // inner message's contents will not be parsed but instead stored in encoded + // form. The inner message will actually be parsed when it is first accessed. + // + // This is only a hint. Implementations are free to choose whether to use + // eager or lazy parsing regardless of the value of this option. However, + // setting this option true suggests that the protocol author believes that + // using lazy parsing on this field is worth the additional bookkeeping + // overhead typically needed to implement it. + // + // This option does not affect the public interface of any generated code; + // all method signatures remain the same. Furthermore, thread-safety of the + // interface is not affected by this option; const methods remain safe to + // call from multiple threads concurrently, while non-const methods continue + // to require exclusive access. + // + // + // Note that implementations may choose not to check required fields within + // a lazy sub-message. That is, calling IsInitialized() on the outer message + // may return true even if the inner message has missing required fields. + // This is necessary because otherwise the inner message would have to be + // parsed in order to perform the check, defeating the purpose of lazy + // parsing. An implementation which chooses not to check required fields + // must be consistent about it. That is, for any particular sub-message, the + // implementation must either *always* check its required fields, or *never* + // check its required fields, regardless of whether or not the message has + // been parsed. + Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"` + // Is this field deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for accessors, or it will be completely ignored; in the very least, this + // is a formalization for deprecating fields. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // For Google-internal migration only. Do not use. + Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldOptions) Reset() { *m = FieldOptions{} } +func (m *FieldOptions) String() string { return proto.CompactTextString(m) } +func (*FieldOptions) ProtoMessage() {} +func (*FieldOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{12} +} + +var extRange_FieldOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_FieldOptions +} + +func (m *FieldOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FieldOptions.Unmarshal(m, b) +} +func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic) +} +func (m *FieldOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldOptions.Merge(m, src) +} +func (m *FieldOptions) XXX_Size() int { + return xxx_messageInfo_FieldOptions.Size(m) +} +func (m *FieldOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FieldOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldOptions proto.InternalMessageInfo + +const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING +const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL +const Default_FieldOptions_Lazy bool = false +const Default_FieldOptions_Deprecated bool = false +const Default_FieldOptions_Weak bool = false + +func (m *FieldOptions) GetCtype() FieldOptions_CType { + if m != nil && m.Ctype != nil { + return *m.Ctype + } + return Default_FieldOptions_Ctype +} + +func (m *FieldOptions) GetPacked() bool { + if m != nil && m.Packed != nil { + return *m.Packed + } + return false +} + +func (m *FieldOptions) GetJstype() FieldOptions_JSType { + if m != nil && m.Jstype != nil { + return *m.Jstype + } + return Default_FieldOptions_Jstype +} + +func (m *FieldOptions) GetLazy() bool { + if m != nil && m.Lazy != nil { + return *m.Lazy + } + return Default_FieldOptions_Lazy +} + +func (m *FieldOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_FieldOptions_Deprecated +} + +func (m *FieldOptions) GetWeak() bool { + if m != nil && m.Weak != nil { + return *m.Weak + } + return Default_FieldOptions_Weak +} + +func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{13} +} + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_OneofOptions.Unmarshal(m, b) +} +func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic) +} +func (m *OneofOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_OneofOptions.Merge(m, src) +} +func (m *OneofOptions) XXX_Size() int { + return xxx_messageInfo_OneofOptions.Size(m) +} +func (m *OneofOptions) XXX_DiscardUnknown() { + xxx_messageInfo_OneofOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_OneofOptions proto.InternalMessageInfo + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumOptions struct { + // Set this option to true to allow mapping different tag names to the same + // value. + AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"` + // Is this enum deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum, or it will be completely ignored; in the very least, this + // is a formalization for deprecating enums. + Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumOptions) Reset() { *m = EnumOptions{} } +func (m *EnumOptions) String() string { return proto.CompactTextString(m) } +func (*EnumOptions) ProtoMessage() {} +func (*EnumOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{14} +} + +var extRange_EnumOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumOptions +} + +func (m *EnumOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumOptions.Unmarshal(m, b) +} +func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic) +} +func (m *EnumOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumOptions.Merge(m, src) +} +func (m *EnumOptions) XXX_Size() int { + return xxx_messageInfo_EnumOptions.Size(m) +} +func (m *EnumOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumOptions proto.InternalMessageInfo + +const Default_EnumOptions_Deprecated bool = false + +func (m *EnumOptions) GetAllowAlias() bool { + if m != nil && m.AllowAlias != nil { + return *m.AllowAlias + } + return false +} + +func (m *EnumOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumOptions_Deprecated +} + +func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type EnumValueOptions struct { + // Is this enum value deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the enum value, or it will be completely ignored; in the very least, + // this is a formalization for deprecating enum values. + Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } +func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } +func (*EnumValueOptions) ProtoMessage() {} +func (*EnumValueOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{15} +} + +var extRange_EnumValueOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_EnumValueOptions +} + +func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b) +} +func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic) +} +func (m *EnumValueOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValueOptions.Merge(m, src) +} +func (m *EnumValueOptions) XXX_Size() int { + return xxx_messageInfo_EnumValueOptions.Size(m) +} +func (m *EnumValueOptions) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValueOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo + +const Default_EnumValueOptions_Deprecated bool = false + +func (m *EnumValueOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_EnumValueOptions_Deprecated +} + +func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type ServiceOptions struct { + // Is this service deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the service, or it will be completely ignored; in the very least, + // this is a formalization for deprecating services. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } +func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } +func (*ServiceOptions) ProtoMessage() {} +func (*ServiceOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{16} +} + +var extRange_ServiceOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ServiceOptions +} + +func (m *ServiceOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServiceOptions.Unmarshal(m, b) +} +func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic) +} +func (m *ServiceOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceOptions.Merge(m, src) +} +func (m *ServiceOptions) XXX_Size() int { + return xxx_messageInfo_ServiceOptions.Size(m) +} +func (m *ServiceOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo + +const Default_ServiceOptions_Deprecated bool = false + +func (m *ServiceOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_ServiceOptions_Deprecated +} + +func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +type MethodOptions struct { + // Is this method deprecated? + // Depending on the target platform, this can emit Deprecated annotations + // for the method, or it will be completely ignored; in the very least, + // this is a formalization for deprecating methods. + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MethodOptions) Reset() { *m = MethodOptions{} } +func (m *MethodOptions) String() string { return proto.CompactTextString(m) } +func (*MethodOptions) ProtoMessage() {} +func (*MethodOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{17} +} + +var extRange_MethodOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_MethodOptions +} + +func (m *MethodOptions) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MethodOptions.Unmarshal(m, b) +} +func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic) +} +func (m *MethodOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_MethodOptions.Merge(m, src) +} +func (m *MethodOptions) XXX_Size() int { + return xxx_messageInfo_MethodOptions.Size(m) +} +func (m *MethodOptions) XXX_DiscardUnknown() { + xxx_messageInfo_MethodOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_MethodOptions proto.InternalMessageInfo + +const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN + +func (m *MethodOptions) GetDeprecated() bool { + if m != nil && m.Deprecated != nil { + return *m.Deprecated + } + return Default_MethodOptions_Deprecated +} + +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + +func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + +// A message representing a option the parser does not recognize. This only +// appears in options protos created by the compiler::Parser class. +// DescriptorPool resolves these when building Descriptor objects. Therefore, +// options protos in descriptor objects (e.g. returned by Descriptor::options(), +// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions +// in them. +type UninterpretedOption struct { + Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"` + // The value of the uninterpreted option, in whatever type the tokenizer + // identified it as during parsing. Exactly one of these should be set. + IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"` + PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"` + NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"` + StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"` + AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } +func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption) ProtoMessage() {} +func (*UninterpretedOption) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18} +} +func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b) +} +func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption.Merge(m, src) +} +func (m *UninterpretedOption) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption.Size(m) +} +func (m *UninterpretedOption) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo + +func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { + if m != nil { + return m.Name + } + return nil +} + +func (m *UninterpretedOption) GetIdentifierValue() string { + if m != nil && m.IdentifierValue != nil { + return *m.IdentifierValue + } + return "" +} + +func (m *UninterpretedOption) GetPositiveIntValue() uint64 { + if m != nil && m.PositiveIntValue != nil { + return *m.PositiveIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetNegativeIntValue() int64 { + if m != nil && m.NegativeIntValue != nil { + return *m.NegativeIntValue + } + return 0 +} + +func (m *UninterpretedOption) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *UninterpretedOption) GetStringValue() []byte { + if m != nil { + return m.StringValue + } + return nil +} + +func (m *UninterpretedOption) GetAggregateValue() string { + if m != nil && m.AggregateValue != nil { + return *m.AggregateValue + } + return "" +} + +// The name of the uninterpreted option. Each string represents a segment in +// a dot-separated name. is_extension is true iff a segment represents an +// extension (denoted with parentheses in options specs in .proto files). +// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents +// "foo.(bar.baz).qux". +type UninterpretedOption_NamePart struct { + NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"` + IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} } +func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } +func (*UninterpretedOption_NamePart) ProtoMessage() {} +func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{18, 0} +} +func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b) +} +func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic) +} +func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) { + xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src) +} +func (m *UninterpretedOption_NamePart) XXX_Size() int { + return xxx_messageInfo_UninterpretedOption_NamePart.Size(m) +} +func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() { + xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m) +} + +var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo + +func (m *UninterpretedOption_NamePart) GetNamePart() string { + if m != nil && m.NamePart != nil { + return *m.NamePart + } + return "" +} + +func (m *UninterpretedOption_NamePart) GetIsExtension() bool { + if m != nil && m.IsExtension != nil { + return *m.IsExtension + } + return false +} + +// Encapsulates information about the original source file from which a +// FileDescriptorProto was generated. +type SourceCodeInfo struct { + // A Location identifies a piece of source code in a .proto file which + // corresponds to a particular definition. This information is intended + // to be useful to IDEs, code indexers, documentation generators, and similar + // tools. + // + // For example, say we have a file like: + // message Foo { + // optional string foo = 1; + // } + // Let's look at just the field definition: + // optional string foo = 1; + // ^ ^^ ^^ ^ ^^^ + // a bc de f ghi + // We have the following locations: + // span path represents + // [a,i) [ 4, 0, 2, 0 ] The whole field definition. + // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). + // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). + // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). + // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). + // + // Notes: + // - A location may refer to a repeated field itself (i.e. not to any + // particular index within it). This is used whenever a set of elements are + // logically enclosed in a single code segment. For example, an entire + // extend block (possibly containing multiple extension definitions) will + // have an outer location whose path refers to the "extensions" repeated + // field without an index. + // - Multiple locations may have the same path. This happens when a single + // logical declaration is spread out across multiple places. The most + // obvious example is the "extend" block again -- there may be multiple + // extend blocks in the same scope, each of which will have the same path. + // - A location's span is not always a subset of its parent's span. For + // example, the "extendee" of an extension declaration appears at the + // beginning of the "extend" block and is shared by all extensions within + // the block. + // - Just because a location's span is a subset of some other location's span + // does not mean that it is a descendent. For example, a "group" defines + // both a type and a field in a single declaration. Thus, the locations + // corresponding to the type and field and their components will overlap. + // - Code which tries to interpret locations should probably be designed to + // ignore those that it doesn't understand, as more types of locations could + // be recorded in the future. + Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } +func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo) ProtoMessage() {} +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19} +} +func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b) +} +func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo.Merge(m, src) +} +func (m *SourceCodeInfo) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo.Size(m) +} +func (m *SourceCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo + +func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { + if m != nil { + return m.Location + } + return nil +} + +type SourceCodeInfo_Location struct { + // Identifies which part of the FileDescriptorProto was defined at this + // location. + // + // Each element is a field number or an index. They form a path from + // the root FileDescriptorProto to the place where the definition. For + // example, this path: + // [ 4, 3, 2, 7, 1 ] + // refers to: + // file.message_type(3) // 4, 3 + // .field(7) // 2, 7 + // .name() // 1 + // This is because FileDescriptorProto.message_type has field number 4: + // repeated DescriptorProto message_type = 4; + // and DescriptorProto.field has field number 2: + // repeated FieldDescriptorProto field = 2; + // and FieldDescriptorProto.name has field number 1: + // optional string name = 1; + // + // Thus, the above path gives the location of a field name. If we removed + // the last element: + // [ 4, 3, 2, 7 ] + // this path refers to the whole field declaration (from the beginning + // of the label to the terminating semicolon). + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Always has exactly three or four elements: start line, start column, + // end line (optional, otherwise assumed same as start line), end column. + // These are packed into a single field for efficiency. Note that line + // and column numbers are zero-based -- typically you will want to add + // 1 to each before displaying to a user. + Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"` + // If this SourceCodeInfo represents a complete declaration, these are any + // comments appearing before and after the declaration which appear to be + // attached to the declaration. + // + // A series of line comments appearing on consecutive lines, with no other + // tokens appearing on those lines, will be treated as a single comment. + // + // leading_detached_comments will keep paragraphs of comments that appear + // before (but not connected to) the current element. Each paragraph, + // separated by empty lines, will be one comment element in the repeated + // field. + // + // Only the comment content is provided; comment markers (e.g. //) are + // stripped out. For block comments, leading whitespace and an asterisk + // will be stripped from the beginning of each line other than the first. + // Newlines are included in the output. + // + // Examples: + // + // optional int32 foo = 1; // Comment attached to foo. + // // Comment attached to bar. + // optional int32 bar = 2; + // + // optional string baz = 3; + // // Comment attached to baz. + // // Another line attached to baz. + // + // // Comment attached to qux. + // // + // // Another line attached to qux. + // optional double qux = 4; + // + // // Detached comment for corge. This is not leading or trailing comments + // // to qux or corge because there are blank lines separating it from + // // both. + // + // // Detached comment for corge paragraph 2. + // + // optional string corge = 5; + // /* Block comment attached + // * to corge. Leading asterisks + // * will be removed. */ + // /* Block comment attached to + // * grault. */ + // optional int32 grault = 6; + // + // // ignored detached comments. + LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"` + TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"` + LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } +func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } +func (*SourceCodeInfo_Location) ProtoMessage() {} +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{19, 0} +} +func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b) +} +func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic) +} +func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src) +} +func (m *SourceCodeInfo_Location) XXX_Size() int { + return xxx_messageInfo_SourceCodeInfo_Location.Size(m) +} +func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() { + xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo + +func (m *SourceCodeInfo_Location) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *SourceCodeInfo_Location) GetSpan() []int32 { + if m != nil { + return m.Span + } + return nil +} + +func (m *SourceCodeInfo_Location) GetLeadingComments() string { + if m != nil && m.LeadingComments != nil { + return *m.LeadingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetTrailingComments() string { + if m != nil && m.TrailingComments != nil { + return *m.TrailingComments + } + return "" +} + +func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { + if m != nil { + return m.LeadingDetachedComments + } + return nil +} + +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20} +} +func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo.Merge(m, src) +} +func (m *GeneratedCodeInfo) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo.Size(m) +} +func (m *GeneratedCodeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptor_308767df5ffe18af, []int{20, 0} +} +func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) { + xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src) +} +func (m *GeneratedCodeInfo_Annotation) XXX_Size() int { + return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m) +} +func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() { + xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m) +} + +var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func init() { + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) + proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) + proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) + proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) + proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) + proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") + proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") + proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") + proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") + proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") + proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") + proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") + proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") + proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") + proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") + proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") + proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") + proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") + proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") + proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") + proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") + proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") + proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions") + proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption") + proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") + proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") + proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") +} + +func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_308767df5ffe18af) } + +var fileDescriptor_308767df5ffe18af = []byte{ + // 2522 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0x65, 0x7a, 0xec, 0x75, 0x18, 0xef, 0x47, 0x1c, 0xed, 0x66, + 0xe3, 0x24, 0xbb, 0xca, 0xc2, 0x49, 0x9c, 0xac, 0x53, 0x6c, 0x2b, 0x4b, 0x8c, 0x57, 0xa9, 0xbe, + 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, + 0x83, 0x1e, 0x02, 0xf4, 0x54, 0xa0, 0x7f, 0x40, 0x51, 0x14, 0x3d, 0xf4, 0xb2, 0x40, 0xff, 0x80, + 0x02, 0xed, 0xbd, 0xd7, 0x02, 0xbd, 0xf7, 0x50, 0xa0, 0x05, 0xda, 0x3f, 0xa1, 0xc7, 0x62, 0x66, + 0x48, 0x8a, 0xd4, 0x47, 0xe2, 0x5d, 0x20, 0xd9, 0x93, 0x3d, 0xef, 0xfd, 0xde, 0x9b, 0x37, 0x8f, + 0xbf, 0x79, 0xf3, 0x66, 0x04, 0x82, 0x46, 0x5c, 0xd5, 0xd1, 0x6d, 0xcf, 0x72, 0x2a, 0xb6, 0x63, + 0x79, 0x16, 0x5a, 0x1b, 0x5a, 0xd6, 0xd0, 0x20, 0x7c, 0x74, 0x32, 0x19, 0x94, 0x5b, 0xb0, 0x7e, + 0x4f, 0x37, 0x48, 0x3d, 0x04, 0xf6, 0x88, 0x87, 0xee, 0x40, 0x7a, 0xa0, 0x1b, 0x44, 0x4c, 0xec, + 0xa4, 0x76, 0x0b, 0x7b, 0x1f, 0x56, 0x66, 0x8c, 0x2a, 0x71, 0x8b, 0x2e, 0x15, 0xcb, 0xcc, 0xa2, + 0xfc, 0xef, 0x34, 0x6c, 0x2c, 0xd0, 0x22, 0x04, 0x69, 0x13, 0x8f, 0xa9, 0xc7, 0xc4, 0x6e, 0x5e, + 0x66, 0xff, 0x23, 0x11, 0x56, 0x6c, 0xac, 0x3e, 0xc6, 0x43, 0x22, 0x26, 0x99, 0x38, 0x18, 0xa2, + 0xf7, 0x01, 0x34, 0x62, 0x13, 0x53, 0x23, 0xa6, 0x7a, 0x2a, 0xa6, 0x76, 0x52, 0xbb, 0x79, 0x39, + 0x22, 0x41, 0xd7, 0x60, 0xdd, 0x9e, 0x9c, 0x18, 0xba, 0xaa, 0x44, 0x60, 0xb0, 0x93, 0xda, 0xcd, + 0xc8, 0x02, 0x57, 0xd4, 0xa7, 0xe0, 0xcb, 0xb0, 0xf6, 0x94, 0xe0, 0xc7, 0x51, 0x68, 0x81, 0x41, + 0x4b, 0x54, 0x1c, 0x01, 0xd6, 0xa0, 0x38, 0x26, 0xae, 0x8b, 0x87, 0x44, 0xf1, 0x4e, 0x6d, 0x22, + 0xa6, 0xd9, 0xea, 0x77, 0xe6, 0x56, 0x3f, 0xbb, 0xf2, 0x82, 0x6f, 0xd5, 0x3f, 0xb5, 0x09, 0xaa, + 0x42, 0x9e, 0x98, 0x93, 0x31, 0xf7, 0x90, 0x59, 0x92, 0x3f, 0xc9, 0x9c, 0x8c, 0x67, 0xbd, 0xe4, + 0xa8, 0x99, 0xef, 0x62, 0xc5, 0x25, 0xce, 0x13, 0x5d, 0x25, 0x62, 0x96, 0x39, 0xb8, 0x3c, 0xe7, + 0xa0, 0xc7, 0xf5, 0xb3, 0x3e, 0x02, 0x3b, 0x54, 0x83, 0x3c, 0x79, 0xe6, 0x11, 0xd3, 0xd5, 0x2d, + 0x53, 0x5c, 0x61, 0x4e, 0x2e, 0x2d, 0xf8, 0x8a, 0xc4, 0xd0, 0x66, 0x5d, 0x4c, 0xed, 0xd0, 0x3e, + 0xac, 0x58, 0xb6, 0xa7, 0x5b, 0xa6, 0x2b, 0xe6, 0x76, 0x12, 0xbb, 0x85, 0xbd, 0x77, 0x17, 0x12, + 0xa1, 0xc3, 0x31, 0x72, 0x00, 0x46, 0x0d, 0x10, 0x5c, 0x6b, 0xe2, 0xa8, 0x44, 0x51, 0x2d, 0x8d, + 0x28, 0xba, 0x39, 0xb0, 0xc4, 0x3c, 0x73, 0x70, 0x61, 0x7e, 0x21, 0x0c, 0x58, 0xb3, 0x34, 0xd2, + 0x30, 0x07, 0x96, 0x5c, 0x72, 0x63, 0x63, 0xb4, 0x05, 0x59, 0xf7, 0xd4, 0xf4, 0xf0, 0x33, 0xb1, + 0xc8, 0x18, 0xe2, 0x8f, 0xca, 0x7f, 0xce, 0xc2, 0xda, 0x59, 0x28, 0x76, 0x17, 0x32, 0x03, 0xba, + 0x4a, 0x31, 0xf9, 0x6d, 0x72, 0xc0, 0x6d, 0xe2, 0x49, 0xcc, 0x7e, 0xc7, 0x24, 0x56, 0xa1, 0x60, + 0x12, 0xd7, 0x23, 0x1a, 0x67, 0x44, 0xea, 0x8c, 0x9c, 0x02, 0x6e, 0x34, 0x4f, 0xa9, 0xf4, 0x77, + 0xa2, 0xd4, 0x03, 0x58, 0x0b, 0x43, 0x52, 0x1c, 0x6c, 0x0e, 0x03, 0x6e, 0x5e, 0x7f, 0x55, 0x24, + 0x15, 0x29, 0xb0, 0x93, 0xa9, 0x99, 0x5c, 0x22, 0xb1, 0x31, 0xaa, 0x03, 0x58, 0x26, 0xb1, 0x06, + 0x8a, 0x46, 0x54, 0x43, 0xcc, 0x2d, 0xc9, 0x52, 0x87, 0x42, 0xe6, 0xb2, 0x64, 0x71, 0xa9, 0x6a, + 0xa0, 0xcf, 0xa6, 0x54, 0x5b, 0x59, 0xc2, 0x94, 0x16, 0xdf, 0x64, 0x73, 0x6c, 0x3b, 0x86, 0x92, + 0x43, 0x28, 0xef, 0x89, 0xe6, 0xaf, 0x2c, 0xcf, 0x82, 0xa8, 0xbc, 0x72, 0x65, 0xb2, 0x6f, 0xc6, + 0x17, 0xb6, 0xea, 0x44, 0x87, 0xe8, 0x03, 0x08, 0x05, 0x0a, 0xa3, 0x15, 0xb0, 0x2a, 0x54, 0x0c, + 0x84, 0x6d, 0x3c, 0x26, 0xdb, 0xcf, 0xa1, 0x14, 0x4f, 0x0f, 0xda, 0x84, 0x8c, 0xeb, 0x61, 0xc7, + 0x63, 0x2c, 0xcc, 0xc8, 0x7c, 0x80, 0x04, 0x48, 0x11, 0x53, 0x63, 0x55, 0x2e, 0x23, 0xd3, 0x7f, + 0xd1, 0x8f, 0xa6, 0x0b, 0x4e, 0xb1, 0x05, 0x7f, 0x34, 0xff, 0x45, 0x63, 0x9e, 0x67, 0xd7, 0xbd, + 0x7d, 0x1b, 0x56, 0x63, 0x0b, 0x38, 0xeb, 0xd4, 0xe5, 0x5f, 0xc0, 0xdb, 0x0b, 0x5d, 0xa3, 0x07, + 0xb0, 0x39, 0x31, 0x75, 0xd3, 0x23, 0x8e, 0xed, 0x10, 0xca, 0x58, 0x3e, 0x95, 0xf8, 0x9f, 0x95, + 0x25, 0x9c, 0x3b, 0x8e, 0xa2, 0xb9, 0x17, 0x79, 0x63, 0x32, 0x2f, 0xbc, 0x9a, 0xcf, 0xfd, 0x77, + 0x45, 0x78, 0xf1, 0xe2, 0xc5, 0x8b, 0x64, 0xf9, 0x37, 0x59, 0xd8, 0x5c, 0xb4, 0x67, 0x16, 0x6e, + 0xdf, 0x2d, 0xc8, 0x9a, 0x93, 0xf1, 0x09, 0x71, 0x58, 0x92, 0x32, 0xb2, 0x3f, 0x42, 0x55, 0xc8, + 0x18, 0xf8, 0x84, 0x18, 0x62, 0x7a, 0x27, 0xb1, 0x5b, 0xda, 0xbb, 0x76, 0xa6, 0x5d, 0x59, 0x69, + 0x52, 0x13, 0x99, 0x5b, 0xa2, 0xcf, 0x21, 0xed, 0x97, 0x68, 0xea, 0xe1, 0xea, 0xd9, 0x3c, 0xd0, + 0xbd, 0x24, 0x33, 0x3b, 0xf4, 0x0e, 0xe4, 0xe9, 0x5f, 0xce, 0x8d, 0x2c, 0x8b, 0x39, 0x47, 0x05, + 0x94, 0x17, 0x68, 0x1b, 0x72, 0x6c, 0x9b, 0x68, 0x24, 0x38, 0xda, 0xc2, 0x31, 0x25, 0x96, 0x46, + 0x06, 0x78, 0x62, 0x78, 0xca, 0x13, 0x6c, 0x4c, 0x08, 0x23, 0x7c, 0x5e, 0x2e, 0xfa, 0xc2, 0x9f, + 0x52, 0x19, 0xba, 0x00, 0x05, 0xbe, 0xab, 0x74, 0x53, 0x23, 0xcf, 0x58, 0xf5, 0xcc, 0xc8, 0x7c, + 0xa3, 0x35, 0xa8, 0x84, 0x4e, 0xff, 0xc8, 0xb5, 0xcc, 0x80, 0x9a, 0x6c, 0x0a, 0x2a, 0x60, 0xd3, + 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x5e, 0xde, 0x2c, 0xa7, 0xca, 0x7f, 0x4a, 0x42, 0x9a, 0xd5, + 0x8b, 0x35, 0x28, 0xf4, 0x1f, 0x76, 0x25, 0xa5, 0xde, 0x39, 0x3e, 0x6c, 0x4a, 0x42, 0x02, 0x95, + 0x00, 0x98, 0xe0, 0x5e, 0xb3, 0x53, 0xed, 0x0b, 0xc9, 0x70, 0xdc, 0x68, 0xf7, 0xf7, 0x6f, 0x0a, + 0xa9, 0xd0, 0xe0, 0x98, 0x0b, 0xd2, 0x51, 0xc0, 0x8d, 0x3d, 0x21, 0x83, 0x04, 0x28, 0x72, 0x07, + 0x8d, 0x07, 0x52, 0x7d, 0xff, 0xa6, 0x90, 0x8d, 0x4b, 0x6e, 0xec, 0x09, 0x2b, 0x68, 0x15, 0xf2, + 0x4c, 0x72, 0xd8, 0xe9, 0x34, 0x85, 0x5c, 0xe8, 0xb3, 0xd7, 0x97, 0x1b, 0xed, 0x23, 0x21, 0x1f, + 0xfa, 0x3c, 0x92, 0x3b, 0xc7, 0x5d, 0x01, 0x42, 0x0f, 0x2d, 0xa9, 0xd7, 0xab, 0x1e, 0x49, 0x42, + 0x21, 0x44, 0x1c, 0x3e, 0xec, 0x4b, 0x3d, 0xa1, 0x18, 0x0b, 0xeb, 0xc6, 0x9e, 0xb0, 0x1a, 0x4e, + 0x21, 0xb5, 0x8f, 0x5b, 0x42, 0x09, 0xad, 0xc3, 0x2a, 0x9f, 0x22, 0x08, 0x62, 0x6d, 0x46, 0xb4, + 0x7f, 0x53, 0x10, 0xa6, 0x81, 0x70, 0x2f, 0xeb, 0x31, 0xc1, 0xfe, 0x4d, 0x01, 0x95, 0x6b, 0x90, + 0x61, 0xec, 0x42, 0x08, 0x4a, 0xcd, 0xea, 0xa1, 0xd4, 0x54, 0x3a, 0xdd, 0x7e, 0xa3, 0xd3, 0xae, + 0x36, 0x85, 0xc4, 0x54, 0x26, 0x4b, 0x3f, 0x39, 0x6e, 0xc8, 0x52, 0x5d, 0x48, 0x46, 0x65, 0x5d, + 0xa9, 0xda, 0x97, 0xea, 0x42, 0xaa, 0xac, 0xc2, 0xe6, 0xa2, 0x3a, 0xb9, 0x70, 0x67, 0x44, 0x3e, + 0x71, 0x72, 0xc9, 0x27, 0x66, 0xbe, 0xe6, 0x3e, 0xf1, 0xbf, 0x92, 0xb0, 0xb1, 0xe0, 0xac, 0x58, + 0x38, 0xc9, 0x0f, 0x21, 0xc3, 0x29, 0xca, 0x4f, 0xcf, 0x2b, 0x0b, 0x0f, 0x1d, 0x46, 0xd8, 0xb9, + 0x13, 0x94, 0xd9, 0x45, 0x3b, 0x88, 0xd4, 0x92, 0x0e, 0x82, 0xba, 0x98, 0xab, 0xe9, 0x3f, 0x9f, + 0xab, 0xe9, 0xfc, 0xd8, 0xdb, 0x3f, 0xcb, 0xb1, 0xc7, 0x64, 0xdf, 0xae, 0xb6, 0x67, 0x16, 0xd4, + 0xf6, 0xbb, 0xb0, 0x3e, 0xe7, 0xe8, 0xcc, 0x35, 0xf6, 0x97, 0x09, 0x10, 0x97, 0x25, 0xe7, 0x15, + 0x95, 0x2e, 0x19, 0xab, 0x74, 0x77, 0x67, 0x33, 0x78, 0x71, 0xf9, 0x47, 0x98, 0xfb, 0xd6, 0xdf, + 0x24, 0x60, 0x6b, 0x71, 0xa7, 0xb8, 0x30, 0x86, 0xcf, 0x21, 0x3b, 0x26, 0xde, 0xc8, 0x0a, 0xba, + 0xa5, 0x8f, 0x16, 0x9c, 0xc1, 0x54, 0x3d, 0xfb, 0xb1, 0x7d, 0xab, 0xe8, 0x21, 0x9e, 0x5a, 0xd6, + 0xee, 0xf1, 0x68, 0xe6, 0x22, 0xfd, 0x55, 0x12, 0xde, 0x5e, 0xe8, 0x7c, 0x61, 0xa0, 0xef, 0x01, + 0xe8, 0xa6, 0x3d, 0xf1, 0x78, 0x47, 0xc4, 0x0b, 0x6c, 0x9e, 0x49, 0x58, 0xf1, 0xa2, 0xc5, 0x73, + 0xe2, 0x85, 0xfa, 0x14, 0xd3, 0x03, 0x17, 0x31, 0xc0, 0x9d, 0x69, 0xa0, 0x69, 0x16, 0xe8, 0xfb, + 0x4b, 0x56, 0x3a, 0x47, 0xcc, 0x4f, 0x41, 0x50, 0x0d, 0x9d, 0x98, 0x9e, 0xe2, 0x7a, 0x0e, 0xc1, + 0x63, 0xdd, 0x1c, 0xb2, 0x13, 0x24, 0x77, 0x90, 0x19, 0x60, 0xc3, 0x25, 0xf2, 0x1a, 0x57, 0xf7, + 0x02, 0x2d, 0xb5, 0x60, 0x04, 0x72, 0x22, 0x16, 0xd9, 0x98, 0x05, 0x57, 0x87, 0x16, 0xe5, 0x5f, + 0xe7, 0xa1, 0x10, 0xe9, 0xab, 0xd1, 0x45, 0x28, 0x3e, 0xc2, 0x4f, 0xb0, 0x12, 0xdc, 0x95, 0x78, + 0x26, 0x0a, 0x54, 0xd6, 0xf5, 0xef, 0x4b, 0x9f, 0xc2, 0x26, 0x83, 0x58, 0x13, 0x8f, 0x38, 0x8a, + 0x6a, 0x60, 0xd7, 0x65, 0x49, 0xcb, 0x31, 0x28, 0xa2, 0xba, 0x0e, 0x55, 0xd5, 0x02, 0x0d, 0xba, + 0x05, 0x1b, 0xcc, 0x62, 0x3c, 0x31, 0x3c, 0xdd, 0x36, 0x88, 0x42, 0x6f, 0x6f, 0x2e, 0x3b, 0x49, + 0xc2, 0xc8, 0xd6, 0x29, 0xa2, 0xe5, 0x03, 0x68, 0x44, 0x2e, 0xaa, 0xc3, 0x7b, 0xcc, 0x6c, 0x48, + 0x4c, 0xe2, 0x60, 0x8f, 0x28, 0xe4, 0xeb, 0x09, 0x36, 0x5c, 0x05, 0x9b, 0x9a, 0x32, 0xc2, 0xee, + 0x48, 0xdc, 0xa4, 0x0e, 0x0e, 0x93, 0x62, 0x42, 0x3e, 0x4f, 0x81, 0x47, 0x3e, 0x4e, 0x62, 0xb0, + 0xaa, 0xa9, 0x7d, 0x81, 0xdd, 0x11, 0x3a, 0x80, 0x2d, 0xe6, 0xc5, 0xf5, 0x1c, 0xdd, 0x1c, 0x2a, + 0xea, 0x88, 0xa8, 0x8f, 0x95, 0x89, 0x37, 0xb8, 0x23, 0xbe, 0x13, 0x9d, 0x9f, 0x45, 0xd8, 0x63, + 0x98, 0x1a, 0x85, 0x1c, 0x7b, 0x83, 0x3b, 0xa8, 0x07, 0x45, 0xfa, 0x31, 0xc6, 0xfa, 0x73, 0xa2, + 0x0c, 0x2c, 0x87, 0x1d, 0x8d, 0xa5, 0x05, 0xa5, 0x29, 0x92, 0xc1, 0x4a, 0xc7, 0x37, 0x68, 0x59, + 0x1a, 0x39, 0xc8, 0xf4, 0xba, 0x92, 0x54, 0x97, 0x0b, 0x81, 0x97, 0x7b, 0x96, 0x43, 0x09, 0x35, + 0xb4, 0xc2, 0x04, 0x17, 0x38, 0xa1, 0x86, 0x56, 0x90, 0xde, 0x5b, 0xb0, 0xa1, 0xaa, 0x7c, 0xcd, + 0xba, 0xaa, 0xf8, 0x77, 0x2c, 0x57, 0x14, 0x62, 0xc9, 0x52, 0xd5, 0x23, 0x0e, 0xf0, 0x39, 0xee, + 0xa2, 0xcf, 0xe0, 0xed, 0x69, 0xb2, 0xa2, 0x86, 0xeb, 0x73, 0xab, 0x9c, 0x35, 0xbd, 0x05, 0x1b, + 0xf6, 0xe9, 0xbc, 0x21, 0x8a, 0xcd, 0x68, 0x9f, 0xce, 0x9a, 0xdd, 0x86, 0x4d, 0x7b, 0x64, 0xcf, + 0xdb, 0x5d, 0x8d, 0xda, 0x21, 0x7b, 0x64, 0xcf, 0x1a, 0x5e, 0x62, 0x17, 0x6e, 0x87, 0xa8, 0xd8, + 0x23, 0x9a, 0x78, 0x2e, 0x0a, 0x8f, 0x28, 0xd0, 0x75, 0x10, 0x54, 0x55, 0x21, 0x26, 0x3e, 0x31, + 0x88, 0x82, 0x1d, 0x62, 0x62, 0x57, 0xbc, 0x10, 0x05, 0x97, 0x54, 0x55, 0x62, 0xda, 0x2a, 0x53, + 0xa2, 0xab, 0xb0, 0x6e, 0x9d, 0x3c, 0x52, 0x39, 0x25, 0x15, 0xdb, 0x21, 0x03, 0xfd, 0x99, 0xf8, + 0x21, 0xcb, 0xef, 0x1a, 0x55, 0x30, 0x42, 0x76, 0x99, 0x18, 0x5d, 0x01, 0x41, 0x75, 0x47, 0xd8, + 0xb1, 0x59, 0x4d, 0x76, 0x6d, 0xac, 0x12, 0xf1, 0x12, 0x87, 0x72, 0x79, 0x3b, 0x10, 0xd3, 0x2d, + 0xe1, 0x3e, 0xd5, 0x07, 0x5e, 0xe0, 0xf1, 0x32, 0xdf, 0x12, 0x4c, 0xe6, 0x7b, 0xdb, 0x05, 0x81, + 0xa6, 0x22, 0x36, 0xf1, 0x2e, 0x83, 0x95, 0xec, 0x91, 0x1d, 0x9d, 0xf7, 0x03, 0x58, 0xa5, 0xc8, + 0xe9, 0xa4, 0x57, 0x78, 0x43, 0x66, 0x8f, 0x22, 0x33, 0xde, 0x84, 0x2d, 0x0a, 0x1a, 0x13, 0x0f, + 0x6b, 0xd8, 0xc3, 0x11, 0xf4, 0xc7, 0x0c, 0x4d, 0xf3, 0xde, 0xf2, 0x95, 0xb1, 0x38, 0x9d, 0xc9, + 0xc9, 0x69, 0xc8, 0xac, 0x4f, 0x78, 0x9c, 0x54, 0x16, 0x70, 0xeb, 0xb5, 0x35, 0xdd, 0xe5, 0x03, + 0x28, 0x46, 0x89, 0x8f, 0xf2, 0xc0, 0xa9, 0x2f, 0x24, 0x68, 0x17, 0x54, 0xeb, 0xd4, 0x69, 0xff, + 0xf2, 0x95, 0x24, 0x24, 0x69, 0x1f, 0xd5, 0x6c, 0xf4, 0x25, 0x45, 0x3e, 0x6e, 0xf7, 0x1b, 0x2d, + 0x49, 0x48, 0x45, 0x1b, 0xf6, 0xbf, 0x26, 0xa1, 0x14, 0xbf, 0x7b, 0xa1, 0x1f, 0xc0, 0xb9, 0xe0, + 0xa1, 0xc4, 0x25, 0x9e, 0xf2, 0x54, 0x77, 0xd8, 0x5e, 0x1c, 0x63, 0x7e, 0x2e, 0x86, 0x6c, 0xd8, + 0xf4, 0x51, 0x3d, 0xe2, 0x7d, 0xa9, 0x3b, 0x74, 0xa7, 0x8d, 0xb1, 0x87, 0x9a, 0x70, 0xc1, 0xb4, + 0x14, 0xd7, 0xc3, 0xa6, 0x86, 0x1d, 0x4d, 0x99, 0x3e, 0x51, 0x29, 0x58, 0x55, 0x89, 0xeb, 0x5a, + 0xfc, 0x0c, 0x0c, 0xbd, 0xbc, 0x6b, 0x5a, 0x3d, 0x1f, 0x3c, 0x3d, 0x1c, 0xaa, 0x3e, 0x74, 0x86, + 0xb9, 0xa9, 0x65, 0xcc, 0x7d, 0x07, 0xf2, 0x63, 0x6c, 0x2b, 0xc4, 0xf4, 0x9c, 0x53, 0xd6, 0x71, + 0xe7, 0xe4, 0xdc, 0x18, 0xdb, 0x12, 0x1d, 0xbf, 0x99, 0x8b, 0xcf, 0x3f, 0x52, 0x50, 0x8c, 0x76, + 0xdd, 0xf4, 0x12, 0xa3, 0xb2, 0x03, 0x2a, 0xc1, 0x4a, 0xd8, 0x07, 0x2f, 0xed, 0xd1, 0x2b, 0x35, + 0x7a, 0x72, 0x1d, 0x64, 0x79, 0x2f, 0x2c, 0x73, 0x4b, 0xda, 0x35, 0x50, 0x6a, 0x11, 0xde, 0x7b, + 0xe4, 0x64, 0x7f, 0x84, 0x8e, 0x20, 0xfb, 0xc8, 0x65, 0xbe, 0xb3, 0xcc, 0xf7, 0x87, 0x2f, 0xf7, + 0x7d, 0xbf, 0xc7, 0x9c, 0xe7, 0xef, 0xf7, 0x94, 0x76, 0x47, 0x6e, 0x55, 0x9b, 0xb2, 0x6f, 0x8e, + 0xce, 0x43, 0xda, 0xc0, 0xcf, 0x4f, 0xe3, 0x67, 0x1c, 0x13, 0x9d, 0x35, 0xf1, 0xe7, 0x21, 0xfd, + 0x94, 0xe0, 0xc7, 0xf1, 0x93, 0x85, 0x89, 0x5e, 0x23, 0xf5, 0xaf, 0x43, 0x86, 0xe5, 0x0b, 0x01, + 0xf8, 0x19, 0x13, 0xde, 0x42, 0x39, 0x48, 0xd7, 0x3a, 0x32, 0xa5, 0xbf, 0x00, 0x45, 0x2e, 0x55, + 0xba, 0x0d, 0xa9, 0x26, 0x09, 0xc9, 0xf2, 0x2d, 0xc8, 0xf2, 0x24, 0xd0, 0xad, 0x11, 0xa6, 0x41, + 0x78, 0xcb, 0x1f, 0xfa, 0x3e, 0x12, 0x81, 0xf6, 0xb8, 0x75, 0x28, 0xc9, 0x42, 0x32, 0xfa, 0x79, + 0x5d, 0x28, 0x46, 0x1b, 0xee, 0x37, 0xc3, 0xa9, 0xbf, 0x24, 0xa0, 0x10, 0x69, 0xa0, 0x69, 0xe7, + 0x83, 0x0d, 0xc3, 0x7a, 0xaa, 0x60, 0x43, 0xc7, 0xae, 0x4f, 0x0a, 0x60, 0xa2, 0x2a, 0x95, 0x9c, + 0xf5, 0xa3, 0xbd, 0x91, 0xe0, 0x7f, 0x9f, 0x00, 0x61, 0xb6, 0x77, 0x9d, 0x09, 0x30, 0xf1, 0xbd, + 0x06, 0xf8, 0xbb, 0x04, 0x94, 0xe2, 0x0d, 0xeb, 0x4c, 0x78, 0x17, 0xbf, 0xd7, 0xf0, 0xfe, 0x99, + 0x84, 0xd5, 0x58, 0x9b, 0x7a, 0xd6, 0xe8, 0xbe, 0x86, 0x75, 0x5d, 0x23, 0x63, 0xdb, 0xf2, 0x88, + 0xa9, 0x9e, 0x2a, 0x06, 0x79, 0x42, 0x0c, 0xb1, 0xcc, 0x0a, 0xc5, 0xf5, 0x97, 0x37, 0xc2, 0x95, + 0xc6, 0xd4, 0xae, 0x49, 0xcd, 0x0e, 0x36, 0x1a, 0x75, 0xa9, 0xd5, 0xed, 0xf4, 0xa5, 0x76, 0xed, + 0xa1, 0x72, 0xdc, 0xfe, 0x71, 0xbb, 0xf3, 0x65, 0x5b, 0x16, 0xf4, 0x19, 0xd8, 0x6b, 0xdc, 0xea, + 0x5d, 0x10, 0x66, 0x83, 0x42, 0xe7, 0x60, 0x51, 0x58, 0xc2, 0x5b, 0x68, 0x03, 0xd6, 0xda, 0x1d, + 0xa5, 0xd7, 0xa8, 0x4b, 0x8a, 0x74, 0xef, 0x9e, 0x54, 0xeb, 0xf7, 0xf8, 0xd3, 0x46, 0x88, 0xee, + 0xc7, 0x37, 0xf5, 0x6f, 0x53, 0xb0, 0xb1, 0x20, 0x12, 0x54, 0xf5, 0x2f, 0x25, 0xfc, 0x9e, 0xf4, + 0xc9, 0x59, 0xa2, 0xaf, 0xd0, 0xae, 0xa0, 0x8b, 0x1d, 0xcf, 0xbf, 0xc3, 0x5c, 0x01, 0x9a, 0x25, + 0xd3, 0xd3, 0x07, 0x3a, 0x71, 0xfc, 0x97, 0x20, 0x7e, 0x53, 0x59, 0x9b, 0xca, 0xf9, 0x63, 0xd0, + 0xc7, 0x80, 0x6c, 0xcb, 0xd5, 0x3d, 0xfd, 0x09, 0x51, 0x74, 0x33, 0x78, 0x36, 0xa2, 0x37, 0x97, + 0xb4, 0x2c, 0x04, 0x9a, 0x86, 0xe9, 0x85, 0x68, 0x93, 0x0c, 0xf1, 0x0c, 0x9a, 0x16, 0xf0, 0x94, + 0x2c, 0x04, 0x9a, 0x10, 0x7d, 0x11, 0x8a, 0x9a, 0x35, 0xa1, 0xed, 0x1c, 0xc7, 0xd1, 0xf3, 0x22, + 0x21, 0x17, 0xb8, 0x2c, 0x84, 0xf8, 0x8d, 0xfa, 0xf4, 0xbd, 0xaa, 0x28, 0x17, 0xb8, 0x8c, 0x43, + 0x2e, 0xc3, 0x1a, 0x1e, 0x0e, 0x1d, 0xea, 0x3c, 0x70, 0xc4, 0xaf, 0x1e, 0xa5, 0x50, 0xcc, 0x80, + 0xdb, 0xf7, 0x21, 0x17, 0xe4, 0x81, 0x1e, 0xc9, 0x34, 0x13, 0x8a, 0xcd, 0xef, 0xd3, 0xc9, 0xdd, + 0xbc, 0x9c, 0x33, 0x03, 0xe5, 0x45, 0x28, 0xea, 0xae, 0x32, 0x7d, 0x7e, 0x4f, 0xee, 0x24, 0x77, + 0x73, 0x72, 0x41, 0x77, 0xc3, 0xa7, 0xcb, 0xf2, 0x37, 0x49, 0x28, 0xc5, 0x7f, 0x3e, 0x40, 0x75, + 0xc8, 0x19, 0x96, 0x8a, 0x19, 0xb5, 0xf8, 0x6f, 0x57, 0xbb, 0xaf, 0xf8, 0xc5, 0xa1, 0xd2, 0xf4, + 0xf1, 0x72, 0x68, 0xb9, 0xfd, 0xb7, 0x04, 0xe4, 0x02, 0x31, 0xda, 0x82, 0xb4, 0x8d, 0xbd, 0x11, + 0x73, 0x97, 0x39, 0x4c, 0x0a, 0x09, 0x99, 0x8d, 0xa9, 0xdc, 0xb5, 0xb1, 0xc9, 0x28, 0xe0, 0xcb, + 0xe9, 0x98, 0x7e, 0x57, 0x83, 0x60, 0x8d, 0xdd, 0x6b, 0xac, 0xf1, 0x98, 0x98, 0x9e, 0x1b, 0x7c, + 0x57, 0x5f, 0x5e, 0xf3, 0xc5, 0xe8, 0x1a, 0xac, 0x7b, 0x0e, 0xd6, 0x8d, 0x18, 0x36, 0xcd, 0xb0, + 0x42, 0xa0, 0x08, 0xc1, 0x07, 0x70, 0x3e, 0xf0, 0xab, 0x11, 0x0f, 0xab, 0x23, 0xa2, 0x4d, 0x8d, + 0xb2, 0xec, 0xfd, 0xe2, 0x9c, 0x0f, 0xa8, 0xfb, 0xfa, 0xc0, 0xb6, 0xfc, 0xf7, 0x04, 0xac, 0x07, + 0x37, 0x31, 0x2d, 0x4c, 0x56, 0x0b, 0x00, 0x9b, 0xa6, 0xe5, 0x45, 0xd3, 0x35, 0x4f, 0xe5, 0x39, + 0xbb, 0x4a, 0x35, 0x34, 0x92, 0x23, 0x0e, 0xb6, 0xc7, 0x00, 0x53, 0xcd, 0xd2, 0xb4, 0x5d, 0x80, + 0x82, 0xff, 0xdb, 0x10, 0xfb, 0x81, 0x91, 0xdf, 0xdd, 0x81, 0x8b, 0xe8, 0x95, 0x0d, 0x6d, 0x42, + 0xe6, 0x84, 0x0c, 0x75, 0xd3, 0x7f, 0xf1, 0xe5, 0x83, 0xe0, 0x85, 0x25, 0x1d, 0xbe, 0xb0, 0x1c, + 0xfe, 0x0c, 0x36, 0x54, 0x6b, 0x3c, 0x1b, 0xee, 0xa1, 0x30, 0xf3, 0x7e, 0xe0, 0x7e, 0x91, 0xf8, + 0x0a, 0xa6, 0x2d, 0xe6, 0xff, 0x12, 0x89, 0x3f, 0x24, 0x53, 0x47, 0xdd, 0xc3, 0x3f, 0x26, 0xb7, + 0x8f, 0xb8, 0x69, 0x37, 0x58, 0xa9, 0x4c, 0x06, 0x06, 0x51, 0x69, 0xf4, 0xff, 0x0f, 0x00, 0x00, + 0xff, 0xff, 0x88, 0x17, 0xc1, 0xbe, 0x38, 0x1d, 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go new file mode 100644 index 00000000..165b2110 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -0,0 +1,752 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +package descriptor + +import ( + fmt "fmt" + github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" + proto "github.com/gogo/protobuf/proto" + math "math" + reflect "reflect" + sort "sort" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +func (this *FileDescriptorSet) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.FileDescriptorSet{") + if this.File != nil { + s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 16) + s = append(s, "&descriptor.FileDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Package != nil { + s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n") + } + if this.Dependency != nil { + s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n") + } + if this.PublicDependency != nil { + s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n") + } + if this.WeakDependency != nil { + s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n") + } + if this.MessageType != nil { + s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.Service != nil { + s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceCodeInfo != nil { + s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n") + } + if this.Syntax != nil { + s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.DescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Field != nil { + s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n") + } + if this.Extension != nil { + s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n") + } + if this.NestedType != nil { + s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n") + } + if this.EnumType != nil { + s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n") + } + if this.ExtensionRange != nil { + s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n") + } + if this.OneofDecl != nil { + s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ExtensionRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *DescriptorProto_ReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.DescriptorProto_ReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ExtensionRangeOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.ExtensionRangeOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&descriptor.FieldDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Label != nil { + s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n") + } + if this.Type != nil { + s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n") + } + if this.TypeName != nil { + s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") + } + if this.Extendee != nil { + s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n") + } + if this.DefaultValue != nil { + s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n") + } + if this.OneofIndex != nil { + s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n") + } + if this.JsonName != nil { + s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.OneofDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.EnumDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto_EnumReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumValueDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Number != nil { + s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.ServiceDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.Method != nil { + s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodDescriptorProto) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&descriptor.MethodDescriptorProto{") + if this.Name != nil { + s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") + } + if this.InputType != nil { + s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n") + } + if this.OutputType != nil { + s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.ClientStreaming != nil { + s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n") + } + if this.ServerStreaming != nil { + s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FileOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 25) + s = append(s, "&descriptor.FileOptions{") + if this.JavaPackage != nil { + s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") + } + if this.JavaOuterClassname != nil { + s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n") + } + if this.JavaMultipleFiles != nil { + s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n") + } + if this.JavaGenerateEqualsAndHash != nil { + s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n") + } + if this.JavaStringCheckUtf8 != nil { + s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") + } + if this.OptimizeFor != nil { + s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n") + } + if this.GoPackage != nil { + s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") + } + if this.CcGenericServices != nil { + s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n") + } + if this.JavaGenericServices != nil { + s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n") + } + if this.PyGenericServices != nil { + s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") + } + if this.PhpGenericServices != nil { + s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.CcEnableArenas != nil { + s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n") + } + if this.ObjcClassPrefix != nil { + s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n") + } + if this.CsharpNamespace != nil { + s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") + } + if this.SwiftPrefix != nil { + s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") + } + if this.PhpClassPrefix != nil { + s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n") + } + if this.PhpNamespace != nil { + s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") + } + if this.PhpMetadataNamespace != nil { + s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n") + } + if this.RubyPackage != nil { + s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MessageOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.MessageOptions{") + if this.MessageSetWireFormat != nil { + s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n") + } + if this.NoStandardDescriptorAccessor != nil { + s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.MapEntry != nil { + s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FieldOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.FieldOptions{") + if this.Ctype != nil { + s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n") + } + if this.Packed != nil { + s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") + } + if this.Jstype != nil { + s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n") + } + if this.Lazy != nil { + s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.Weak != nil { + s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *OneofOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.OneofOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.EnumOptions{") + if this.AllowAlias != nil { + s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n") + } + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValueOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumValueOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *ServiceOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.ServiceOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *MethodOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&descriptor.MethodOptions{") + if this.Deprecated != nil { + s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") + } + if this.IdempotencyLevel != nil { + s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n") + } + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&descriptor.UninterpretedOption{") + if this.Name != nil { + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + } + if this.IdentifierValue != nil { + s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n") + } + if this.PositiveIntValue != nil { + s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n") + } + if this.NegativeIntValue != nil { + s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n") + } + if this.DoubleValue != nil { + s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n") + } + if this.StringValue != nil { + s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n") + } + if this.AggregateValue != nil { + s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UninterpretedOption_NamePart) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.UninterpretedOption_NamePart{") + if this.NamePart != nil { + s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n") + } + if this.IsExtension != nil { + s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.SourceCodeInfo{") + if this.Location != nil { + s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *SourceCodeInfo_Location) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&descriptor.SourceCodeInfo_Location{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.Span != nil { + s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n") + } + if this.LeadingComments != nil { + s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n") + } + if this.TrailingComments != nil { + s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n") + } + if this.LeadingDetachedComments != nil { + s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.GeneratedCodeInfo{") + if this.Annotation != nil { + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo_Annotation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.SourceFile != nil { + s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") + } + if this.Begin != nil { + s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDescriptor(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string { + e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m) + if e == nil { + return "nil" + } + s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{" + keys := make([]int, 0, len(e)) + for k := range e { + keys = append(keys, int(k)) + } + sort.Ints(keys) + ss := []string{} + for _, k := range keys { + ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString()) + } + s += strings.Join(ss, ",") + "})" + return s +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go new file mode 100644 index 00000000..e0846a35 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go @@ -0,0 +1,390 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package descriptor + +import ( + "strings" +) + +func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) { + if !msg.GetOptions().GetMapEntry() { + return nil, nil + } + return msg.GetField()[0], msg.GetField()[1] +} + +func dotToUnderscore(r rune) rune { + if r == '.' { + return '_' + } + return r +} + +func (field *FieldDescriptorProto) WireType() (wire int) { + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE: + return 1 + case FieldDescriptorProto_TYPE_FLOAT: + return 5 + case FieldDescriptorProto_TYPE_INT64: + return 0 + case FieldDescriptorProto_TYPE_UINT64: + return 0 + case FieldDescriptorProto_TYPE_INT32: + return 0 + case FieldDescriptorProto_TYPE_UINT32: + return 0 + case FieldDescriptorProto_TYPE_FIXED64: + return 1 + case FieldDescriptorProto_TYPE_FIXED32: + return 5 + case FieldDescriptorProto_TYPE_BOOL: + return 0 + case FieldDescriptorProto_TYPE_STRING: + return 2 + case FieldDescriptorProto_TYPE_GROUP: + return 2 + case FieldDescriptorProto_TYPE_MESSAGE: + return 2 + case FieldDescriptorProto_TYPE_BYTES: + return 2 + case FieldDescriptorProto_TYPE_ENUM: + return 0 + case FieldDescriptorProto_TYPE_SFIXED32: + return 5 + case FieldDescriptorProto_TYPE_SFIXED64: + return 1 + case FieldDescriptorProto_TYPE_SINT32: + return 0 + case FieldDescriptorProto_TYPE_SINT64: + return 0 + } + panic("unreachable") +} + +func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { + packed := field.IsPacked() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { + packed := field.IsPacked3() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + +func (field *FieldDescriptorProto) GetKey() []byte { + x := field.GetKeyUint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (field *FieldDescriptorProto) GetKey3() []byte { + x := field.GetKey3Uint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + +func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { + msg := desc.GetMessage(packageName, messageName) + if msg == nil { + return nil + } + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto { + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+".")) + if nes != nil { + return nes + } + } + return nil +} + +func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+".")) + if res != nil { + return res + } + } + return nil +} + +func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return msg + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return nes + } + if msg.GetName()+"."+nes.GetName() == typeName { + return nes + } + } + } + } + return nil +} + +func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, msg := range file.GetMessageType() { + if msg.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + for _, msg := range file.GetMessageType() { + for _, nes := range msg.GetNestedType() { + if nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + if msg.GetName()+"."+nes.GetName() == typeName { + return file.GetSyntax() == "proto3" + } + } + } + } + return false +} + +func (msg *DescriptorProto) IsExtendable() bool { + return len(msg.GetExtensionRange()) > 0 +} + +func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetName() == fieldName { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", nil + } + if !parent.IsExtendable() { + return "", nil + } + extendee := "." + packageName + "." + typeName + for _, file := range desc.GetFile() { + for _, ext := range file.GetExtension() { + if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) { + if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) { + continue + } + } else { + if ext.GetExtendee() != extendee { + continue + } + } + if ext.GetNumber() == fieldNum { + return file.GetPackage(), ext + } + } + } + return "", nil +} + +func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) { + parent := desc.GetMessage(packageName, typeName) + if parent == nil { + return "", "" + } + field := parent.GetFieldDescriptor(fieldName) + if field == nil { + var extPackageName string + extPackageName, field = desc.FindExtension(packageName, typeName, fieldName) + if field == nil { + return "", "" + } + packageName = extPackageName + } + typeNames := strings.Split(field.GetTypeName(), ".") + if len(typeNames) == 1 { + msg := desc.GetMessage(packageName, typeName) + if msg == nil { + return "", "" + } + return packageName, msg.GetName() + } + if len(typeNames) > 2 { + for i := 1; i < len(typeNames)-1; i++ { + packageName = strings.Join(typeNames[1:len(typeNames)-i], ".") + typeName = strings.Join(typeNames[len(typeNames)-i:], ".") + msg := desc.GetMessage(packageName, typeName) + if msg != nil { + typeNames := strings.Split(msg.GetName(), ".") + if len(typeNames) == 1 { + return packageName, msg.GetName() + } + return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1] + } + } + } + return "", "" +} + +func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto { + for _, field := range msg.GetField() { + if field.GetName() == fieldName { + return field + } + } + return nil +} + +func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto { + for _, file := range desc.GetFile() { + if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) { + continue + } + for _, enum := range file.GetEnumType() { + if enum.GetName() == typeName { + return enum + } + } + } + return nil +} + +func (f *FieldDescriptorProto) IsEnum() bool { + return *f.Type == FieldDescriptorProto_TYPE_ENUM +} + +func (f *FieldDescriptorProto) IsMessage() bool { + return *f.Type == FieldDescriptorProto_TYPE_MESSAGE +} + +func (f *FieldDescriptorProto) IsBytes() bool { + return *f.Type == FieldDescriptorProto_TYPE_BYTES +} + +func (f *FieldDescriptorProto) IsRepeated() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED +} + +func (f *FieldDescriptorProto) IsString() bool { + return *f.Type == FieldDescriptorProto_TYPE_STRING +} + +func (f *FieldDescriptorProto) IsBool() bool { + return *f.Type == FieldDescriptorProto_TYPE_BOOL +} + +func (f *FieldDescriptorProto) IsRequired() bool { + return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED +} + +func (f *FieldDescriptorProto) IsPacked() bool { + return f.Options != nil && f.GetOptions().GetPacked() +} + +func (f *FieldDescriptorProto) IsPacked3() bool { + if f.IsRepeated() && f.IsScalar() { + if f.Options == nil || f.GetOptions().Packed == nil { + return true + } + return f.Options != nil && f.GetOptions().GetPacked() + } + return false +} + +func (m *DescriptorProto) HasExtension() bool { + return len(m.ExtensionRange) > 0 +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/any.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/any.go new file mode 100644 index 00000000..df4787de --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/any.go @@ -0,0 +1,140 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/gogo/protobuf/proto" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *Any) (string, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func EmptyAny(any *Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = EmptyAny(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *Any, pb proto.Message) bool { + // The following is equivalent to AnyMessageName(any) == proto.MessageName(pb), + // but it avoids scanning TypeUrl for the slash. + if any == nil { + return false + } + name := proto.MessageName(pb) + prefix := len(any.TypeUrl) - len(name) + return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/any.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/any.pb.go new file mode 100644 index 00000000..4492d820 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/any.pb.go @@ -0,0 +1,723 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/any.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. This string must contain at least + // one "/" character. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Any) Reset() { *m = Any{} } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { + return fileDescriptor_b53526c13ae22eb4, []int{0} +} +func (*Any) XXX_WellKnownType() string { return "Any" } +func (m *Any) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Any.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(m, src) +} +func (m *Any) XXX_Size() int { + return m.Size() +} +func (m *Any) XXX_DiscardUnknown() { + xxx_messageInfo_Any.DiscardUnknown(m) +} + +var xxx_messageInfo_Any proto.InternalMessageInfo + +func (m *Any) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Any) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (*Any) XXX_MessageName() string { + return "google.protobuf.Any" +} +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) } + +var fileDescriptor_b53526c13ae22eb4 = []byte{ + // 211 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, + 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, + 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, + 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xaa, 0xbf, 0xf1, 0x50, 0x8e, + 0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x1f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, 0x31, 0xae, 0x78, 0x24, + 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, + 0x24, 0xc7, 0xf0, 0x01, 0x24, 0xfe, 0x58, 0x8e, 0xf1, 0xc4, 0x63, 0x39, 0x46, 0x2e, 0xe1, 0xe4, + 0xfc, 0x5c, 0x3d, 0x34, 0xeb, 0x9d, 0x38, 0x1c, 0xf3, 0x2a, 0x03, 0x40, 0x9c, 0x00, 0xc6, 0x28, + 0x56, 0x90, 0x8d, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x94, + 0x06, 0x40, 0x95, 0xea, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80, 0x94, + 0x25, 0xb1, 0x81, 0xcd, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x81, 0x82, 0xd3, 0xed, + 0x00, 0x00, 0x00, +} + +func (this *Any) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Any) + if !ok { + that2, ok := that.(Any) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.TypeUrl != that1.TypeUrl { + if this.TypeUrl < that1.TypeUrl { + return -1 + } + return 1 + } + if c := bytes.Compare(this.Value, that1.Value); c != 0 { + return c + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Any) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Any) + if !ok { + that2, ok := that.(Any) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TypeUrl != that1.TypeUrl { + return false + } + if !bytes.Equal(this.Value, that1.Value) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Any) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Any{") + s = append(s, "TypeUrl: "+fmt.Sprintf("%#v", this.TypeUrl)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringAny(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Any) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Any) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Any) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintAny(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = encodeVarintAny(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintAny(dAtA []byte, offset int, v uint64) int { + offset -= sovAny(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedAny(r randyAny, easy bool) *Any { + this := &Any{} + this.TypeUrl = string(randStringAny(r)) + v1 := r.Intn(100) + this.Value = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Value[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedAny(r, 3) + } + return this +} + +type randyAny interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneAny(r randyAny) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringAny(r randyAny) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneAny(r) + } + return string(tmps) +} +func randUnrecognizedAny(r randyAny, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldAny(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldAny(dAtA []byte, r randyAny, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateAny(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateAny(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateAny(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Any) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + sovAny(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovAny(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovAny(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozAny(x uint64) (n int) { + return sovAny(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Any) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Any{`, + `TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringAny(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Any) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Any: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Any: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAny + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAny + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAny + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthAny + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAny(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAny + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthAny + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAny(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthAny + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthAny + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipAny(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthAny + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthAny = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAny = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/api.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/api.pb.go new file mode 100644 index 00000000..b09e3ffd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/api.pb.go @@ -0,0 +1,2169 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/api.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// Api is a light-weight descriptor for an API Interface. +// +// Interfaces are also described as "protocol buffer services" in some contexts, +// such as by the "service" keyword in a .proto file, but they are different +// from API Services, which represent a concrete implementation of an interface +// as opposed to simply a description of methods and bindings. They are also +// sometimes simply referred to as "APIs" in other contexts, such as the name of +// this message itself. See https://cloud.google.com/apis/design/glossary for +// detailed terminology. +type Api struct { + // The fully qualified name of this interface, including package name + // followed by the interface's simple name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The methods of this interface, in unspecified order. + Methods []*Method `protobuf:"bytes,2,rep,name=methods,proto3" json:"methods,omitempty"` + // Any metadata attached to the interface. + Options []*Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + // A version string for this interface. If specified, must have the form + // `major-version.minor-version`, as in `1.10`. If the minor version is + // omitted, it defaults to zero. If the entire version field is empty, the + // major version is derived from the package name, as outlined below. If the + // field is not empty, the version in the package name will be verified to be + // consistent with what is provided here. + // + // The versioning schema uses [semantic + // versioning](http://semver.org) where the major version number + // indicates a breaking change and the minor version an additive, + // non-breaking change. Both version numbers are signals to users + // what to expect from different versions, and should be carefully + // chosen based on the product plan. + // + // The major version is also reflected in the package name of the + // interface, which must end in `v`, as in + // `google.feature.v1`. For major versions 0 and 1, the suffix can + // be omitted. Zero major versions must only be used for + // experimental, non-GA interfaces. + // + // + Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + // Source context for the protocol buffer service represented by this + // message. + SourceContext *SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // Included interfaces. See [Mixin][]. + Mixins []*Mixin `protobuf:"bytes,6,rep,name=mixins,proto3" json:"mixins,omitempty"` + // The source syntax of the service. + Syntax Syntax `protobuf:"varint,7,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Api) Reset() { *m = Api{} } +func (*Api) ProtoMessage() {} +func (*Api) Descriptor() ([]byte, []int) { + return fileDescriptor_a2ec32096296c143, []int{0} +} +func (m *Api) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Api) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Api.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Api) XXX_Merge(src proto.Message) { + xxx_messageInfo_Api.Merge(m, src) +} +func (m *Api) XXX_Size() int { + return m.Size() +} +func (m *Api) XXX_DiscardUnknown() { + xxx_messageInfo_Api.DiscardUnknown(m) +} + +var xxx_messageInfo_Api proto.InternalMessageInfo + +func (m *Api) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Api) GetMethods() []*Method { + if m != nil { + return m.Methods + } + return nil +} + +func (m *Api) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Api) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Api) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Api) GetMixins() []*Mixin { + if m != nil { + return m.Mixins + } + return nil +} + +func (m *Api) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (*Api) XXX_MessageName() string { + return "google.protobuf.Api" +} + +// Method represents a method of an API interface. +type Method struct { + // The simple name of this method. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A URL of the input message type. + RequestTypeUrl string `protobuf:"bytes,2,opt,name=request_type_url,json=requestTypeUrl,proto3" json:"request_type_url,omitempty"` + // If true, the request is streamed. + RequestStreaming bool `protobuf:"varint,3,opt,name=request_streaming,json=requestStreaming,proto3" json:"request_streaming,omitempty"` + // The URL of the output message type. + ResponseTypeUrl string `protobuf:"bytes,4,opt,name=response_type_url,json=responseTypeUrl,proto3" json:"response_type_url,omitempty"` + // If true, the response is streamed. + ResponseStreaming bool `protobuf:"varint,5,opt,name=response_streaming,json=responseStreaming,proto3" json:"response_streaming,omitempty"` + // Any metadata attached to the method. + Options []*Option `protobuf:"bytes,6,rep,name=options,proto3" json:"options,omitempty"` + // The source syntax of this method. + Syntax Syntax `protobuf:"varint,7,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Method) Reset() { *m = Method{} } +func (*Method) ProtoMessage() {} +func (*Method) Descriptor() ([]byte, []int) { + return fileDescriptor_a2ec32096296c143, []int{1} +} +func (m *Method) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Method) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Method.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Method) XXX_Merge(src proto.Message) { + xxx_messageInfo_Method.Merge(m, src) +} +func (m *Method) XXX_Size() int { + return m.Size() +} +func (m *Method) XXX_DiscardUnknown() { + xxx_messageInfo_Method.DiscardUnknown(m) +} + +var xxx_messageInfo_Method proto.InternalMessageInfo + +func (m *Method) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Method) GetRequestTypeUrl() string { + if m != nil { + return m.RequestTypeUrl + } + return "" +} + +func (m *Method) GetRequestStreaming() bool { + if m != nil { + return m.RequestStreaming + } + return false +} + +func (m *Method) GetResponseTypeUrl() string { + if m != nil { + return m.ResponseTypeUrl + } + return "" +} + +func (m *Method) GetResponseStreaming() bool { + if m != nil { + return m.ResponseStreaming + } + return false +} + +func (m *Method) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Method) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (*Method) XXX_MessageName() string { + return "google.protobuf.Method" +} + +// Declares an API Interface to be included in this interface. The including +// interface must redeclare all the methods from the included interface, but +// documentation and options are inherited as follows: +// +// - If after comment and whitespace stripping, the documentation +// string of the redeclared method is empty, it will be inherited +// from the original method. +// +// - Each annotation belonging to the service config (http, +// visibility) which is not set in the redeclared method will be +// inherited. +// +// - If an http annotation is inherited, the path pattern will be +// modified as follows. Any version prefix will be replaced by the +// version of the including interface plus the [root][] path if +// specified. +// +// Example of a simple mixin: +// +// package google.acl.v1; +// service AccessControl { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v1/{resource=**}:getAcl"; +// } +// } +// +// package google.storage.v2; +// service Storage { +// rpc GetAcl(GetAclRequest) returns (Acl); +// +// // Get a data record. +// rpc GetData(GetDataRequest) returns (Data) { +// option (google.api.http).get = "/v2/{resource=**}"; +// } +// } +// +// Example of a mixin configuration: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// +// The mixin construct implies that all methods in `AccessControl` are +// also declared with same name and request/response types in +// `Storage`. A documentation generator or annotation processor will +// see the effective `Storage.GetAcl` method after inherting +// documentation and annotations as follows: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/{resource=**}:getAcl"; +// } +// ... +// } +// +// Note how the version in the path pattern changed from `v1` to `v2`. +// +// If the `root` field in the mixin is specified, it should be a +// relative path under which inherited HTTP paths are placed. Example: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// root: acls +// +// This implies the following inherited HTTP annotation: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; +// } +// ... +// } +type Mixin struct { + // The fully qualified name of the interface which is included. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // If non-empty specifies a path under which inherited HTTP paths + // are rooted. + Root string `protobuf:"bytes,2,opt,name=root,proto3" json:"root,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Mixin) Reset() { *m = Mixin{} } +func (*Mixin) ProtoMessage() {} +func (*Mixin) Descriptor() ([]byte, []int) { + return fileDescriptor_a2ec32096296c143, []int{2} +} +func (m *Mixin) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Mixin) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Mixin.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Mixin) XXX_Merge(src proto.Message) { + xxx_messageInfo_Mixin.Merge(m, src) +} +func (m *Mixin) XXX_Size() int { + return m.Size() +} +func (m *Mixin) XXX_DiscardUnknown() { + xxx_messageInfo_Mixin.DiscardUnknown(m) +} + +var xxx_messageInfo_Mixin proto.InternalMessageInfo + +func (m *Mixin) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Mixin) GetRoot() string { + if m != nil { + return m.Root + } + return "" +} + +func (*Mixin) XXX_MessageName() string { + return "google.protobuf.Mixin" +} +func init() { + proto.RegisterType((*Api)(nil), "google.protobuf.Api") + proto.RegisterType((*Method)(nil), "google.protobuf.Method") + proto.RegisterType((*Mixin)(nil), "google.protobuf.Mixin") +} + +func init() { proto.RegisterFile("google/protobuf/api.proto", fileDescriptor_a2ec32096296c143) } + +var fileDescriptor_a2ec32096296c143 = []byte{ + // 467 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x91, 0x31, 0x6f, 0x13, 0x31, + 0x14, 0xc7, 0xeb, 0xbb, 0xe4, 0x52, 0x5c, 0x91, 0x82, 0x91, 0xc0, 0x64, 0xb0, 0x4e, 0x15, 0xc3, + 0x09, 0xc4, 0x45, 0x94, 0x4f, 0xd0, 0x20, 0xd4, 0x01, 0x21, 0xa2, 0x0b, 0x08, 0x89, 0x25, 0x4a, + 0x83, 0x09, 0x96, 0xee, 0x6c, 0x63, 0x3b, 0x90, 0x4c, 0xf0, 0x59, 0x98, 0x10, 0x23, 0xdf, 0x80, + 0xad, 0x23, 0x23, 0x23, 0xb9, 0x2e, 0x8c, 0x1d, 0x19, 0x91, 0x7d, 0xe7, 0xa6, 0x5c, 0x83, 0x04, + 0x9b, 0xdf, 0xfb, 0xff, 0xfc, 0xf7, 0x7b, 0x7f, 0xc3, 0x9b, 0x33, 0x21, 0x66, 0x39, 0xed, 0x4b, + 0x25, 0x8c, 0x38, 0x9a, 0xbf, 0xea, 0x4f, 0x24, 0x4b, 0x5d, 0x81, 0x76, 0x2b, 0x29, 0xf5, 0x52, + 0xef, 0x56, 0x93, 0xd5, 0x62, 0xae, 0xa6, 0x74, 0x3c, 0x15, 0xdc, 0xd0, 0x85, 0xa9, 0xc0, 0x5e, + 0xaf, 0x49, 0x99, 0xa5, 0xac, 0x4d, 0xf6, 0xbe, 0x06, 0x30, 0x3c, 0x90, 0x0c, 0x21, 0xd8, 0xe2, + 0x93, 0x82, 0x62, 0x10, 0x83, 0xe4, 0x52, 0xe6, 0xce, 0xe8, 0x1e, 0xec, 0x14, 0xd4, 0xbc, 0x16, + 0x2f, 0x35, 0x0e, 0xe2, 0x30, 0xd9, 0xd9, 0xbf, 0x91, 0x36, 0x06, 0x48, 0x1f, 0x3b, 0x3d, 0xf3, + 0x9c, 0xbd, 0x22, 0xa4, 0x61, 0x82, 0x6b, 0x1c, 0xfe, 0xe5, 0xca, 0x13, 0xa7, 0x67, 0x9e, 0x43, + 0x18, 0x76, 0xde, 0x52, 0xa5, 0x99, 0xe0, 0xb8, 0xe5, 0x1e, 0xf7, 0x25, 0x7a, 0x08, 0xbb, 0x7f, + 0xee, 0x83, 0xdb, 0x31, 0x48, 0x76, 0xf6, 0xc9, 0x05, 0xcf, 0x91, 0xc3, 0x1e, 0x54, 0x54, 0x76, + 0x59, 0x9f, 0x2f, 0x51, 0x0a, 0xa3, 0x82, 0x2d, 0x18, 0xd7, 0x38, 0x72, 0x23, 0x5d, 0xbf, 0xb8, + 0x85, 0x95, 0xb3, 0x9a, 0x42, 0x7d, 0x18, 0xe9, 0x25, 0x37, 0x93, 0x05, 0xee, 0xc4, 0x20, 0xe9, + 0x6e, 0x58, 0x61, 0xe4, 0xe4, 0xac, 0xc6, 0xf6, 0xbe, 0x04, 0x30, 0xaa, 0x82, 0xd8, 0x18, 0x63, + 0x02, 0xaf, 0x28, 0xfa, 0x66, 0x4e, 0xb5, 0x19, 0xdb, 0xe0, 0xc7, 0x73, 0x95, 0xe3, 0xc0, 0xe9, + 0xdd, 0xba, 0xff, 0x74, 0x29, 0xe9, 0x33, 0x95, 0xa3, 0x3b, 0xf0, 0xaa, 0x27, 0xb5, 0x51, 0x74, + 0x52, 0x30, 0x3e, 0xc3, 0x61, 0x0c, 0x92, 0xed, 0xcc, 0x5b, 0x8c, 0x7c, 0x1f, 0xdd, 0xb6, 0xb0, + 0x96, 0x82, 0x6b, 0xba, 0xf6, 0xad, 0x12, 0xdc, 0xf5, 0x82, 0x37, 0xbe, 0x0b, 0xd1, 0x19, 0xbb, + 0x76, 0x6e, 0x3b, 0xe7, 0x33, 0x97, 0xb5, 0xf5, 0xb9, 0x5f, 0x8c, 0xfe, 0xf1, 0x17, 0xff, 0x3b, + 0xb4, 0x3e, 0x6c, 0xbb, 0xd8, 0x37, 0x46, 0x86, 0x60, 0x4b, 0x09, 0x61, 0xea, 0x98, 0xdc, 0x79, + 0xf0, 0xfe, 0xfb, 0x8a, 0x6c, 0x9d, 0xae, 0x08, 0xf8, 0xb5, 0x22, 0xe0, 0x43, 0x49, 0xc0, 0xa7, + 0x92, 0x80, 0xe3, 0x92, 0x80, 0x6f, 0x25, 0x01, 0x3f, 0x4a, 0x02, 0x7e, 0x96, 0x64, 0xeb, 0xd4, + 0xf6, 0x4f, 0x08, 0x38, 0x3e, 0x21, 0x00, 0x5e, 0x9b, 0x8a, 0xa2, 0x39, 0xc6, 0x60, 0xfb, 0x40, + 0xb2, 0xa1, 0x2d, 0x86, 0xe0, 0x45, 0xdb, 0xe6, 0xa6, 0x3f, 0x06, 0xe1, 0xe1, 0x70, 0xf0, 0x39, + 0x20, 0x87, 0x15, 0x3a, 0xf4, 0x13, 0x3f, 0xa7, 0x79, 0xfe, 0x88, 0x8b, 0x77, 0xdc, 0xc6, 0xa8, + 0x8f, 0x22, 0xe7, 0x71, 0xff, 0x77, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x64, 0x40, 0x40, 0xa1, + 0x03, 0x00, 0x00, +} + +func (this *Api) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Api) + if !ok { + that2, ok := that.(Api) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if len(this.Methods) != len(that1.Methods) { + if len(this.Methods) < len(that1.Methods) { + return -1 + } + return 1 + } + for i := range this.Methods { + if c := this.Methods[i].Compare(that1.Methods[i]); c != 0 { + return c + } + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if this.Version != that1.Version { + if this.Version < that1.Version { + return -1 + } + return 1 + } + if c := this.SourceContext.Compare(that1.SourceContext); c != 0 { + return c + } + if len(this.Mixins) != len(that1.Mixins) { + if len(this.Mixins) < len(that1.Mixins) { + return -1 + } + return 1 + } + for i := range this.Mixins { + if c := this.Mixins[i].Compare(that1.Mixins[i]); c != 0 { + return c + } + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Method) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Method) + if !ok { + that2, ok := that.(Method) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.RequestTypeUrl != that1.RequestTypeUrl { + if this.RequestTypeUrl < that1.RequestTypeUrl { + return -1 + } + return 1 + } + if this.RequestStreaming != that1.RequestStreaming { + if !this.RequestStreaming { + return -1 + } + return 1 + } + if this.ResponseTypeUrl != that1.ResponseTypeUrl { + if this.ResponseTypeUrl < that1.ResponseTypeUrl { + return -1 + } + return 1 + } + if this.ResponseStreaming != that1.ResponseStreaming { + if !this.ResponseStreaming { + return -1 + } + return 1 + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Mixin) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Mixin) + if !ok { + that2, ok := that.(Mixin) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.Root != that1.Root { + if this.Root < that1.Root { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Api) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Api) + if !ok { + that2, ok := that.(Api) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Methods) != len(that1.Methods) { + return false + } + for i := range this.Methods { + if !this.Methods[i].Equal(that1.Methods[i]) { + return false + } + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if this.Version != that1.Version { + return false + } + if !this.SourceContext.Equal(that1.SourceContext) { + return false + } + if len(this.Mixins) != len(that1.Mixins) { + return false + } + for i := range this.Mixins { + if !this.Mixins[i].Equal(that1.Mixins[i]) { + return false + } + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Method) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Method) + if !ok { + that2, ok := that.(Method) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.RequestTypeUrl != that1.RequestTypeUrl { + return false + } + if this.RequestStreaming != that1.RequestStreaming { + return false + } + if this.ResponseTypeUrl != that1.ResponseTypeUrl { + return false + } + if this.ResponseStreaming != that1.ResponseStreaming { + return false + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Mixin) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Mixin) + if !ok { + that2, ok := that.(Mixin) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Root != that1.Root { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Api) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&types.Api{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Methods != nil { + s = append(s, "Methods: "+fmt.Sprintf("%#v", this.Methods)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "Version: "+fmt.Sprintf("%#v", this.Version)+",\n") + if this.SourceContext != nil { + s = append(s, "SourceContext: "+fmt.Sprintf("%#v", this.SourceContext)+",\n") + } + if this.Mixins != nil { + s = append(s, "Mixins: "+fmt.Sprintf("%#v", this.Mixins)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Method) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&types.Method{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "RequestTypeUrl: "+fmt.Sprintf("%#v", this.RequestTypeUrl)+",\n") + s = append(s, "RequestStreaming: "+fmt.Sprintf("%#v", this.RequestStreaming)+",\n") + s = append(s, "ResponseTypeUrl: "+fmt.Sprintf("%#v", this.ResponseTypeUrl)+",\n") + s = append(s, "ResponseStreaming: "+fmt.Sprintf("%#v", this.ResponseStreaming)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Mixin) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Mixin{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Root: "+fmt.Sprintf("%#v", this.Root)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringApi(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Api) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Api) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Api) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Syntax != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Syntax)) + i-- + dAtA[i] = 0x38 + } + if len(m.Mixins) > 0 { + for iNdEx := len(m.Mixins) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Mixins[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.SourceContext != nil { + { + size, err := m.SourceContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintApi(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x22 + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Methods) > 0 { + for iNdEx := len(m.Methods) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Methods[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Method) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Method) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Method) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Syntax != 0 { + i = encodeVarintApi(dAtA, i, uint64(m.Syntax)) + i-- + dAtA[i] = 0x38 + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintApi(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.ResponseStreaming { + i-- + if m.ResponseStreaming { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.ResponseTypeUrl) > 0 { + i -= len(m.ResponseTypeUrl) + copy(dAtA[i:], m.ResponseTypeUrl) + i = encodeVarintApi(dAtA, i, uint64(len(m.ResponseTypeUrl))) + i-- + dAtA[i] = 0x22 + } + if m.RequestStreaming { + i-- + if m.RequestStreaming { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.RequestTypeUrl) > 0 { + i -= len(m.RequestTypeUrl) + copy(dAtA[i:], m.RequestTypeUrl) + i = encodeVarintApi(dAtA, i, uint64(len(m.RequestTypeUrl))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Mixin) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mixin) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Mixin) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Root) > 0 { + i -= len(m.Root) + copy(dAtA[i:], m.Root) + i = encodeVarintApi(dAtA, i, uint64(len(m.Root))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintApi(dAtA []byte, offset int, v uint64) int { + offset -= sovApi(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedApi(r randyApi, easy bool) *Api { + this := &Api{} + this.Name = string(randStringApi(r)) + if r.Intn(5) != 0 { + v1 := r.Intn(5) + this.Methods = make([]*Method, v1) + for i := 0; i < v1; i++ { + this.Methods[i] = NewPopulatedMethod(r, easy) + } + } + if r.Intn(5) != 0 { + v2 := r.Intn(5) + this.Options = make([]*Option, v2) + for i := 0; i < v2; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + this.Version = string(randStringApi(r)) + if r.Intn(5) != 0 { + this.SourceContext = NewPopulatedSourceContext(r, easy) + } + if r.Intn(5) != 0 { + v3 := r.Intn(5) + this.Mixins = make([]*Mixin, v3) + for i := 0; i < v3; i++ { + this.Mixins[i] = NewPopulatedMixin(r, easy) + } + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedApi(r, 8) + } + return this +} + +func NewPopulatedMethod(r randyApi, easy bool) *Method { + this := &Method{} + this.Name = string(randStringApi(r)) + this.RequestTypeUrl = string(randStringApi(r)) + this.RequestStreaming = bool(bool(r.Intn(2) == 0)) + this.ResponseTypeUrl = string(randStringApi(r)) + this.ResponseStreaming = bool(bool(r.Intn(2) == 0)) + if r.Intn(5) != 0 { + v4 := r.Intn(5) + this.Options = make([]*Option, v4) + for i := 0; i < v4; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedApi(r, 8) + } + return this +} + +func NewPopulatedMixin(r randyApi, easy bool) *Mixin { + this := &Mixin{} + this.Name = string(randStringApi(r)) + this.Root = string(randStringApi(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedApi(r, 3) + } + return this +} + +type randyApi interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneApi(r randyApi) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringApi(r randyApi) string { + v5 := r.Intn(100) + tmps := make([]rune, v5) + for i := 0; i < v5; i++ { + tmps[i] = randUTF8RuneApi(r) + } + return string(tmps) +} +func randUnrecognizedApi(r randyApi, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldApi(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldApi(dAtA []byte, r randyApi, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + v6 := r.Int63() + if r.Intn(2) == 0 { + v6 *= -1 + } + dAtA = encodeVarintPopulateApi(dAtA, uint64(v6)) + case 1: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateApi(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateApi(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Api) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Methods) > 0 { + for _, e := range m.Methods { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.SourceContext != nil { + l = m.SourceContext.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Mixins) > 0 { + for _, e := range m.Mixins { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if m.Syntax != 0 { + n += 1 + sovApi(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Method) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.RequestTypeUrl) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.RequestStreaming { + n += 2 + } + l = len(m.ResponseTypeUrl) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.ResponseStreaming { + n += 2 + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if m.Syntax != 0 { + n += 1 + sovApi(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Mixin) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Root) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovApi(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozApi(x uint64) (n int) { + return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Api) String() string { + if this == nil { + return "nil" + } + repeatedStringForMethods := "[]*Method{" + for _, f := range this.Methods { + repeatedStringForMethods += strings.Replace(f.String(), "Method", "Method", 1) + "," + } + repeatedStringForMethods += "}" + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(fmt.Sprintf("%v", f), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + repeatedStringForMixins := "[]*Mixin{" + for _, f := range this.Mixins { + repeatedStringForMixins += strings.Replace(f.String(), "Mixin", "Mixin", 1) + "," + } + repeatedStringForMixins += "}" + s := strings.Join([]string{`&Api{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Methods:` + repeatedStringForMethods + `,`, + `Options:` + repeatedStringForOptions + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`, + `Mixins:` + repeatedStringForMixins + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Method) String() string { + if this == nil { + return "nil" + } + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(fmt.Sprintf("%v", f), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&Method{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `RequestTypeUrl:` + fmt.Sprintf("%v", this.RequestTypeUrl) + `,`, + `RequestStreaming:` + fmt.Sprintf("%v", this.RequestStreaming) + `,`, + `ResponseTypeUrl:` + fmt.Sprintf("%v", this.ResponseTypeUrl) + `,`, + `ResponseStreaming:` + fmt.Sprintf("%v", this.ResponseStreaming) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Mixin) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mixin{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Root:` + fmt.Sprintf("%v", this.Root) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringApi(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Api) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Api: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Api: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Methods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Methods = append(m.Methods, &Method{}) + if err := m.Methods[len(m.Methods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceContext == nil { + m.SourceContext = &SourceContext{} + } + if err := m.SourceContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mixins", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mixins = append(m.Mixins, &Mixin{}) + if err := m.Mixins[len(m.Mixins)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= Syntax(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Method) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Method: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Method: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestTypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestTypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestStreaming", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RequestStreaming = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseTypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResponseTypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseStreaming", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ResponseStreaming = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= Syntax(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mixin) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mixin: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mixin: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Root", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthApi + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Root = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipApi(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthApi + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthApi + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipApi(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthApi + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/doc.go new file mode 100644 index 00000000..ff2810af --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package types contains code for interacting with well-known types. +*/ +package types diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/duration.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/duration.go new file mode 100644 index 00000000..979b8e78 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func DurationFromProto(p *Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) * time.Nanosecond + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func DurationProto(d time.Duration) *Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/duration.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/duration.pb.go new file mode 100644 index 00000000..a8a11b81 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/duration.pb.go @@ -0,0 +1,546 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/duration.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_23597b2ebd7ac6c5, []int{0} +} +func (*Duration) XXX_WellKnownType() string { return "Duration" } +func (m *Duration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Duration.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(m, src) +} +func (m *Duration) XXX_Size() int { + return m.Size() +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func (*Duration) XXX_MessageName() string { + return "google.protobuf.Duration" +} +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) } + +var fileDescriptor_23597b2ebd7ac6c5 = []byte{ + // 209 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, + 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, + 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, + 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, + 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0x7f, 0xe3, 0xa1, 0x1c, + 0xc3, 0x87, 0x87, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, + 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9, 0x31, 0x7c, 0x78, 0x24, 0xc7, 0xb8, 0xe2, + 0xb1, 0x1c, 0xe3, 0x89, 0xc7, 0x72, 0x8c, 0x5c, 0xc2, 0xc9, 0xf9, 0xb9, 0x7a, 0x68, 0x56, 0x3b, + 0xf1, 0xc2, 0x2c, 0x0e, 0x00, 0x89, 0x04, 0x30, 0x46, 0xb1, 0x96, 0x54, 0x16, 0xa4, 0x16, 0xff, + 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0xa2, 0x25, 0x00, + 0xaa, 0x45, 0x2f, 0x3c, 0x35, 0x27, 0xc7, 0x3b, 0x2f, 0xbf, 0x3c, 0x2f, 0x04, 0xa4, 0x32, 0x89, + 0x0d, 0x6c, 0x96, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x1c, 0x64, 0x4e, 0xf6, 0x00, 0x00, + 0x00, +} + +func (this *Duration) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Duration) + if !ok { + that2, ok := that.(Duration) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Seconds != that1.Seconds { + if this.Seconds < that1.Seconds { + return -1 + } + return 1 + } + if this.Nanos != that1.Nanos { + if this.Nanos < that1.Nanos { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Duration) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Duration) + if !ok { + that2, ok := that.(Duration) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Seconds != that1.Seconds { + return false + } + if this.Nanos != that1.Nanos { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Duration) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Duration{") + s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n") + s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDuration(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Duration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Duration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Nanos != 0 { + i = encodeVarintDuration(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = encodeVarintDuration(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintDuration(dAtA []byte, offset int, v uint64) int { + offset -= sovDuration(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Duration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Seconds != 0 { + n += 1 + sovDuration(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + sovDuration(uint64(m.Nanos)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovDuration(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDuration(x uint64) (n int) { + return sovDuration(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Duration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Duration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDuration(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDuration + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthDuration + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDuration(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDuration + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthDuration + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipDuration(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthDuration + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthDuration = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDuration = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/duration_gogo.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/duration_gogo.go new file mode 100644 index 00000000..90e7670e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/duration_gogo.go @@ -0,0 +1,100 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +import ( + "fmt" + "time" +) + +func NewPopulatedDuration(r interface { + Int63() int64 +}, easy bool) *Duration { + this := &Duration{} + maxSecs := time.Hour.Nanoseconds() / 1e9 + max := 2 * maxSecs + s := int64(r.Int63()) % max + s -= maxSecs + neg := int64(1) + if s < 0 { + neg = -1 + } + this.Seconds = s + this.Nanos = int32(neg * (r.Int63() % 1e9)) + return this +} + +func (d *Duration) String() string { + td, err := DurationFromProto(d) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return td.String() +} + +func NewPopulatedStdDuration(r interface { + Int63() int64 +}, easy bool) *time.Duration { + dur := NewPopulatedDuration(r, easy) + d, err := DurationFromProto(dur) + if err != nil { + return nil + } + return &d +} + +func SizeOfStdDuration(d time.Duration) int { + dur := DurationProto(d) + return dur.Size() +} + +func StdDurationMarshal(d time.Duration) ([]byte, error) { + size := SizeOfStdDuration(d) + buf := make([]byte, size) + _, err := StdDurationMarshalTo(d, buf) + return buf, err +} + +func StdDurationMarshalTo(d time.Duration, data []byte) (int, error) { + dur := DurationProto(d) + return dur.MarshalTo(data) +} + +func StdDurationUnmarshal(d *time.Duration, data []byte) error { + dur := &Duration{} + if err := dur.Unmarshal(data); err != nil { + return err + } + dd, err := DurationFromProto(dur) + if err != nil { + return err + } + *d = dd + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/empty.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/empty.pb.go new file mode 100644 index 00000000..560ed03c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/empty.pb.go @@ -0,0 +1,491 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/empty.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Empty) Reset() { *m = Empty{} } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { + return fileDescriptor_900544acb223d5b8, []int{0} +} +func (*Empty) XXX_WellKnownType() string { return "Empty" } +func (m *Empty) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Empty.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Empty) XXX_Merge(src proto.Message) { + xxx_messageInfo_Empty.Merge(m, src) +} +func (m *Empty) XXX_Size() int { + return m.Size() +} +func (m *Empty) XXX_DiscardUnknown() { + xxx_messageInfo_Empty.DiscardUnknown(m) +} + +var xxx_messageInfo_Empty proto.InternalMessageInfo + +func (*Empty) XXX_MessageName() string { + return "google.protobuf.Empty" +} +func init() { + proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") +} + +func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_900544acb223d5b8) } + +var fileDescriptor_900544acb223d5b8 = []byte{ + // 176 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28, + 0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57, + 0x90, 0xbc, 0x53, 0x0b, 0xe3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xfe, 0x78, 0x28, + 0xc7, 0xd8, 0xf0, 0x48, 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, + 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x20, 0xf1, 0xc7, 0x72, + 0x8c, 0x27, 0x1e, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe8, 0xc4, 0x05, + 0x36, 0x2e, 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x62, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, 0xfe, 0xc1, 0xc8, + 0xb8, 0x88, 0x89, 0xd9, 0x3d, 0xc0, 0x69, 0x15, 0x93, 0x9c, 0x3b, 0x44, 0x7d, 0x00, 0x54, 0xbd, + 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x65, 0x12, 0x1b, 0xd8, + 0x20, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x21, 0xbe, 0xb6, 0x31, 0xc6, 0x00, 0x00, 0x00, +} + +func (this *Empty) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Empty) + if !ok { + that2, ok := that.(Empty) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Empty) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Empty) + if !ok { + that2, ok := that.(Empty) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Empty) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&types.Empty{") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringEmpty(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Empty) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Empty) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Empty) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func encodeVarintEmpty(dAtA []byte, offset int, v uint64) int { + offset -= sovEmpty(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedEmpty(r randyEmpty, easy bool) *Empty { + this := &Empty{} + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedEmpty(r, 1) + } + return this +} + +type randyEmpty interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneEmpty(r randyEmpty) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringEmpty(r randyEmpty) string { + v1 := r.Intn(100) + tmps := make([]rune, v1) + for i := 0; i < v1; i++ { + tmps[i] = randUTF8RuneEmpty(r) + } + return string(tmps) +} +func randUnrecognizedEmpty(r randyEmpty, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldEmpty(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldEmpty(dAtA []byte, r randyEmpty, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + v2 := r.Int63() + if r.Intn(2) == 0 { + v2 *= -1 + } + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(v2)) + case 1: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateEmpty(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Empty) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovEmpty(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEmpty(x uint64) (n int) { + return sovEmpty(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Empty) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Empty{`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringEmpty(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Empty) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEmpty + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Empty: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Empty: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipEmpty(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEmpty + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthEmpty + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEmpty(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEmpty + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthEmpty + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipEmpty(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthEmpty + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthEmpty = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEmpty = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/field_mask.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/field_mask.pb.go new file mode 100644 index 00000000..b2e5f5d8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/field_mask.pb.go @@ -0,0 +1,767 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/field_mask.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, new values will +// be appended to the existing repeated field in the target resource. Note that +// a repeated field is only allowed in the last position of a `paths` string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then new value will be merged into the existing sub-message +// in the target resource. +// +// For example, given the target message: +// +// f { +// b { +// d: 1 +// x: 2 +// } +// c: [1] +// } +// +// And an update message: +// +// f { +// b { +// d: 10 +// } +// c: [2] +// } +// +// then if the field mask is: +// +// paths: ["f.b", "f.c"] +// +// then the result will be: +// +// f { +// b { +// d: 10 +// x: 2 +// } +// c: [1, 2] +// } +// +// An implementation may provide options to override this default behavior for +// repeated and message fields. +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +// +// ## Field Mask Verification +// +// The implementation of any API method which has a FieldMask type field in the +// request should verify the included field paths, and return an +// `INVALID_ARGUMENT` error if any path is duplicated or unmappable. +type FieldMask struct { + // The set of field mask paths. + Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FieldMask) Reset() { *m = FieldMask{} } +func (*FieldMask) ProtoMessage() {} +func (*FieldMask) Descriptor() ([]byte, []int) { + return fileDescriptor_5158202634f0da48, []int{0} +} +func (m *FieldMask) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FieldMask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FieldMask.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FieldMask) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldMask.Merge(m, src) +} +func (m *FieldMask) XXX_Size() int { + return m.Size() +} +func (m *FieldMask) XXX_DiscardUnknown() { + xxx_messageInfo_FieldMask.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldMask proto.InternalMessageInfo + +func (m *FieldMask) GetPaths() []string { + if m != nil { + return m.Paths + } + return nil +} + +func (*FieldMask) XXX_MessageName() string { + return "google.protobuf.FieldMask" +} +func init() { + proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask") +} + +func init() { proto.RegisterFile("google/protobuf/field_mask.proto", fileDescriptor_5158202634f0da48) } + +var fileDescriptor_5158202634f0da48 = []byte{ + // 203 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd, + 0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54, + 0x28, 0x29, 0x72, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x26, 0x16, 0x67, 0x0b, 0x89, 0x70, 0xb1, 0x16, + 0x24, 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4e, 0x1d, 0x8c, + 0x37, 0x1e, 0xca, 0x31, 0x7c, 0x78, 0x28, 0xc7, 0xf8, 0xe3, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, + 0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, + 0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x80, 0xc4, 0x1f, 0xcb, 0x31, 0x9e, 0x78, 0x2c, 0xc7, + 0xc8, 0x25, 0x9c, 0x9c, 0x9f, 0xab, 0x87, 0x66, 0x95, 0x13, 0x1f, 0xdc, 0xa2, 0x00, 0x90, 0x50, + 0x00, 0x63, 0x14, 0x6b, 0x49, 0x65, 0x41, 0x6a, 0xf1, 0x0f, 0x46, 0xc6, 0x45, 0x4c, 0xcc, 0xee, + 0x01, 0x4e, 0xab, 0x98, 0xe4, 0xdc, 0x21, 0x7a, 0x02, 0xa0, 0x7a, 0xf4, 0xc2, 0x53, 0x73, 0x72, + 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0x2a, 0x93, 0xd8, 0xc0, 0x86, 0x19, 0x03, 0x02, 0x00, + 0x00, 0xff, 0xff, 0x43, 0xa0, 0x83, 0xd0, 0xe9, 0x00, 0x00, 0x00, +} + +func (this *FieldMask) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*FieldMask) + if !ok { + that2, ok := that.(FieldMask) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.Paths) != len(that1.Paths) { + if len(this.Paths) < len(that1.Paths) { + return -1 + } + return 1 + } + for i := range this.Paths { + if this.Paths[i] != that1.Paths[i] { + if this.Paths[i] < that1.Paths[i] { + return -1 + } + return 1 + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *FieldMask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FieldMask) + if !ok { + that2, ok := that.(FieldMask) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Paths) != len(that1.Paths) { + return false + } + for i := range this.Paths { + if this.Paths[i] != that1.Paths[i] { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *FieldMask) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.FieldMask{") + s = append(s, "Paths: "+fmt.Sprintf("%#v", this.Paths)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringFieldMask(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *FieldMask) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FieldMask) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FieldMask) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Paths) > 0 { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Paths[iNdEx]) + copy(dAtA[i:], m.Paths[iNdEx]) + i = encodeVarintFieldMask(dAtA, i, uint64(len(m.Paths[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintFieldMask(dAtA []byte, offset int, v uint64) int { + offset -= sovFieldMask(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedFieldMask(r randyFieldMask, easy bool) *FieldMask { + this := &FieldMask{} + v1 := r.Intn(10) + this.Paths = make([]string, v1) + for i := 0; i < v1; i++ { + this.Paths[i] = string(randStringFieldMask(r)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedFieldMask(r, 2) + } + return this +} + +type randyFieldMask interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneFieldMask(r randyFieldMask) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringFieldMask(r randyFieldMask) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneFieldMask(r) + } + return string(tmps) +} +func randUnrecognizedFieldMask(r randyFieldMask, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldFieldMask(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldFieldMask(dAtA []byte, r randyFieldMask, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateFieldMask(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *FieldMask) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + l = len(s) + n += 1 + l + sovFieldMask(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovFieldMask(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozFieldMask(x uint64) (n int) { + return sovFieldMask(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *FieldMask) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FieldMask{`, + `Paths:` + fmt.Sprintf("%v", this.Paths) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringFieldMask(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *FieldMask) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFieldMask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FieldMask: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FieldMask: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFieldMask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFieldMask + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFieldMask + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFieldMask(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthFieldMask + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthFieldMask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipFieldMask(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthFieldMask + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthFieldMask + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipFieldMask(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthFieldMask + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthFieldMask = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowFieldMask = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/protosize.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/protosize.go new file mode 100644 index 00000000..3a2d1b7e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/protosize.go @@ -0,0 +1,34 @@ +package types + +func (m *Any) ProtoSize() (n int) { return m.Size() } +func (m *Api) ProtoSize() (n int) { return m.Size() } +func (m *Method) ProtoSize() (n int) { return m.Size() } +func (m *Mixin) ProtoSize() (n int) { return m.Size() } +func (m *Duration) ProtoSize() (n int) { return m.Size() } +func (m *Empty) ProtoSize() (n int) { return m.Size() } +func (m *FieldMask) ProtoSize() (n int) { return m.Size() } +func (m *SourceContext) ProtoSize() (n int) { return m.Size() } +func (m *Struct) ProtoSize() (n int) { return m.Size() } +func (m *Value) ProtoSize() (n int) { return m.Size() } +func (m *Value_NullValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_NumberValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_StringValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_BoolValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_StructValue) ProtoSize() (n int) { return m.Size() } +func (m *Value_ListValue) ProtoSize() (n int) { return m.Size() } +func (m *ListValue) ProtoSize() (n int) { return m.Size() } +func (m *Timestamp) ProtoSize() (n int) { return m.Size() } +func (m *Type) ProtoSize() (n int) { return m.Size() } +func (m *Field) ProtoSize() (n int) { return m.Size() } +func (m *Enum) ProtoSize() (n int) { return m.Size() } +func (m *EnumValue) ProtoSize() (n int) { return m.Size() } +func (m *Option) ProtoSize() (n int) { return m.Size() } +func (m *DoubleValue) ProtoSize() (n int) { return m.Size() } +func (m *FloatValue) ProtoSize() (n int) { return m.Size() } +func (m *Int64Value) ProtoSize() (n int) { return m.Size() } +func (m *UInt64Value) ProtoSize() (n int) { return m.Size() } +func (m *Int32Value) ProtoSize() (n int) { return m.Size() } +func (m *UInt32Value) ProtoSize() (n int) { return m.Size() } +func (m *BoolValue) ProtoSize() (n int) { return m.Size() } +func (m *StringValue) ProtoSize() (n int) { return m.Size() } +func (m *BytesValue) ProtoSize() (n int) { return m.Size() } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/source_context.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/source_context.pb.go new file mode 100644 index 00000000..cf01b4c0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/source_context.pb.go @@ -0,0 +1,553 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/source_context.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// `SourceContext` represents information about the source of a +// protobuf element, like the file in which it is defined. +type SourceContext struct { + // The path-qualified name of the .proto file that contained the associated + // protobuf element. For example: `"google/protobuf/source_context.proto"`. + FileName string `protobuf:"bytes,1,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SourceContext) Reset() { *m = SourceContext{} } +func (*SourceContext) ProtoMessage() {} +func (*SourceContext) Descriptor() ([]byte, []int) { + return fileDescriptor_b686cdb126d509db, []int{0} +} +func (m *SourceContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SourceContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SourceContext.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SourceContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_SourceContext.Merge(m, src) +} +func (m *SourceContext) XXX_Size() int { + return m.Size() +} +func (m *SourceContext) XXX_DiscardUnknown() { + xxx_messageInfo_SourceContext.DiscardUnknown(m) +} + +var xxx_messageInfo_SourceContext proto.InternalMessageInfo + +func (m *SourceContext) GetFileName() string { + if m != nil { + return m.FileName + } + return "" +} + +func (*SourceContext) XXX_MessageName() string { + return "google.protobuf.SourceContext" +} +func init() { + proto.RegisterType((*SourceContext)(nil), "google.protobuf.SourceContext") +} + +func init() { + proto.RegisterFile("google/protobuf/source_context.proto", fileDescriptor_b686cdb126d509db) +} + +var fileDescriptor_b686cdb126d509db = []byte{ + // 212 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xce, 0x2f, 0x2d, + 0x4a, 0x4e, 0x8d, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x03, 0x8b, 0x0b, 0xf1, 0x43, + 0x54, 0xe9, 0xc1, 0x54, 0x29, 0xe9, 0x70, 0xf1, 0x06, 0x83, 0x15, 0x3a, 0x43, 0xd4, 0x09, 0x49, + 0x73, 0x71, 0xa6, 0x65, 0xe6, 0xa4, 0xc6, 0xe7, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a, + 0x70, 0x06, 0x71, 0x80, 0x04, 0xfc, 0x12, 0x73, 0x53, 0x9d, 0x3a, 0x19, 0x6f, 0x3c, 0x94, 0x63, + 0xf8, 0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, + 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, + 0xc9, 0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0x3c, 0xf1, 0x58, 0x8e, 0x91, 0x4b, 0x38, 0x39, + 0x3f, 0x57, 0x0f, 0xcd, 0x56, 0x27, 0x21, 0x14, 0x3b, 0x03, 0x40, 0xc2, 0x01, 0x8c, 0x51, 0xac, + 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, + 0x34, 0x05, 0x40, 0x35, 0xe9, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80, + 0x94, 0x25, 0xb1, 0x81, 0x4d, 0x33, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x37, 0x2a, 0xa1, + 0xf9, 0x00, 0x00, 0x00, +} + +func (this *SourceContext) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*SourceContext) + if !ok { + that2, ok := that.(SourceContext) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.FileName != that1.FileName { + if this.FileName < that1.FileName { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *SourceContext) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SourceContext) + if !ok { + that2, ok := that.(SourceContext) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.FileName != that1.FileName { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *SourceContext) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.SourceContext{") + s = append(s, "FileName: "+fmt.Sprintf("%#v", this.FileName)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringSourceContext(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *SourceContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SourceContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.FileName) > 0 { + i -= len(m.FileName) + copy(dAtA[i:], m.FileName) + i = encodeVarintSourceContext(dAtA, i, uint64(len(m.FileName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintSourceContext(dAtA []byte, offset int, v uint64) int { + offset -= sovSourceContext(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedSourceContext(r randySourceContext, easy bool) *SourceContext { + this := &SourceContext{} + this.FileName = string(randStringSourceContext(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedSourceContext(r, 2) + } + return this +} + +type randySourceContext interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneSourceContext(r randySourceContext) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringSourceContext(r randySourceContext) string { + v1 := r.Intn(100) + tmps := make([]rune, v1) + for i := 0; i < v1; i++ { + tmps[i] = randUTF8RuneSourceContext(r) + } + return string(tmps) +} +func randUnrecognizedSourceContext(r randySourceContext, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldSourceContext(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldSourceContext(dAtA []byte, r randySourceContext, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + v2 := r.Int63() + if r.Intn(2) == 0 { + v2 *= -1 + } + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(v2)) + case 1: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateSourceContext(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *SourceContext) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.FileName) + if l > 0 { + n += 1 + l + sovSourceContext(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovSourceContext(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozSourceContext(x uint64) (n int) { + return sovSourceContext(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *SourceContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceContext{`, + `FileName:` + fmt.Sprintf("%v", this.FileName) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringSourceContext(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *SourceContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSourceContext + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FileName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSourceContext + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSourceContext + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthSourceContext + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FileName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSourceContext(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSourceContext + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthSourceContext + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSourceContext(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthSourceContext + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthSourceContext + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipSourceContext(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthSourceContext + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthSourceContext = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSourceContext = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/struct.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/struct.pb.go new file mode 100644 index 00000000..03fa99c1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/struct.pb.go @@ -0,0 +1,2423 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/struct.proto + +package types + +import ( + bytes "bytes" + encoding_binary "encoding/binary" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} + +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (NullValue) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{0} +} + +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{0} +} +func (*Struct) XXX_WellKnownType() string { return "Struct" } +func (m *Struct) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Struct.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Struct) XXX_Merge(src proto.Message) { + xxx_messageInfo_Struct.Merge(m, src) +} +func (m *Struct) XXX_Size() int { + return m.Size() +} +func (m *Struct) XXX_DiscardUnknown() { + xxx_messageInfo_Struct.DiscardUnknown(m) +} + +var xxx_messageInfo_Struct proto.InternalMessageInfo + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +func (*Struct) XXX_MessageName() string { + return "google.protobuf.Struct" +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Value) Reset() { *m = Value{} } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{1} +} +func (*Value) XXX_WellKnownType() string { return "Value" } +func (m *Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Value.Merge(m, src) +} +func (m *Value) XXX_Size() int { + return m.Size() +} +func (m *Value) XXX_DiscardUnknown() { + xxx_messageInfo_Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Value proto.InternalMessageInfo + +type isValue_Kind interface { + isValue_Kind() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int + Compare(interface{}) int +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` +} +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` +} +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` +} +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"` +} +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} +func (*Value_NumberValue) isValue_Kind() {} +func (*Value_StringValue) isValue_Kind() {} +func (*Value_BoolValue) isValue_Kind() {} +func (*Value_StructValue) isValue_Kind() {} +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + _ = b.EncodeVarint(1<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + _ = b.EncodeVarint(2<<3 | proto.WireFixed64) + _ = b.EncodeFixed64(math.Float64bits(x.NumberValue)) + case *Value_StringValue: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.StringValue) + case *Value_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + _ = b.EncodeVarint(4<<3 | proto.WireVarint) + _ = b.EncodeVarint(t) + case *Value_StructValue: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StructValue); err != nil { + return err + } + case *Value_ListValue: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ListValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Value.Kind has unexpected type %T", x) + } + return nil +} + +func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Value) + switch tag { + case 1: // kind.null_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_NullValue{NullValue(x)} + return true, err + case 2: // kind.number_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Kind = &Value_NumberValue{math.Float64frombits(x)} + return true, err + case 3: // kind.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Kind = &Value_StringValue{x} + return true, err + case 4: // kind.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_BoolValue{x != 0} + return true, err + case 5: // kind.struct_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Struct) + err := b.DecodeMessage(msg) + m.Kind = &Value_StructValue{msg} + return true, err + case 6: // kind.list_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ListValue) + err := b.DecodeMessage(msg) + m.Kind = &Value_ListValue{msg} + return true, err + default: + return false, nil + } +} + +func _Value_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + n += 1 // tag and wire + n += 8 + case *Value_StringValue: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *Value_BoolValue: + n += 1 // tag and wire + n += 1 + case *Value_StructValue: + s := proto.Size(x.StructValue) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_ListValue: + s := proto.Size(x.ListValue) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func (*Value) XXX_MessageName() string { + return "google.protobuf.Value" +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{2} +} +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } +func (m *ListValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ListValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ListValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListValue.Merge(m, src) +} +func (m *ListValue) XXX_Size() int { + return m.Size() +} +func (m *ListValue) XXX_DiscardUnknown() { + xxx_messageInfo_ListValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ListValue proto.InternalMessageInfo + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func (*ListValue) XXX_MessageName() string { + return "google.protobuf.ListValue" +} +func init() { + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") +} + +func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) } + +var fileDescriptor_df322afd6c9fb402 = []byte{ + // 443 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xb1, 0x6f, 0xd3, 0x40, + 0x14, 0xc6, 0xfd, 0x9c, 0xc6, 0x22, 0xcf, 0xa8, 0x54, 0x87, 0x04, 0x51, 0x41, 0x47, 0x94, 0x2e, + 0x11, 0x42, 0xae, 0x14, 0x16, 0x44, 0x58, 0x88, 0x54, 0x5a, 0x89, 0xa8, 0x32, 0x86, 0x16, 0x89, + 0x25, 0xc2, 0xae, 0x1b, 0x59, 0xbd, 0xde, 0x55, 0xf6, 0x1d, 0x28, 0x1b, 0x0b, 0xff, 0x03, 0x33, + 0x13, 0x62, 0xe4, 0xaf, 0xe8, 0xc8, 0xc8, 0x48, 0xdc, 0x85, 0xb1, 0x63, 0x47, 0x74, 0x77, 0xb6, + 0x41, 0x8d, 0xb2, 0xf9, 0x7d, 0xf7, 0x7b, 0xdf, 0x7b, 0xdf, 0x33, 0xde, 0x9f, 0x09, 0x31, 0x63, + 0xe9, 0xf6, 0x59, 0x2e, 0xa4, 0x88, 0xd5, 0xf1, 0x76, 0x21, 0x73, 0x95, 0xc8, 0xc0, 0xd4, 0xe4, + 0x96, 0x7d, 0x0d, 0xea, 0xd7, 0xfe, 0x17, 0x40, 0xef, 0xb5, 0x21, 0xc8, 0x08, 0xbd, 0xe3, 0x2c, + 0x65, 0x47, 0x45, 0x17, 0x7a, 0xad, 0x81, 0x3f, 0xdc, 0x0a, 0xae, 0xc1, 0x81, 0x05, 0x83, 0x17, + 0x86, 0xda, 0xe1, 0x32, 0x9f, 0x47, 0x55, 0xcb, 0xe6, 0x2b, 0xf4, 0xff, 0x93, 0xc9, 0x06, 0xb6, + 0x4e, 0xd2, 0x79, 0x17, 0x7a, 0x30, 0xe8, 0x44, 0xfa, 0x93, 0x3c, 0xc2, 0xf6, 0x87, 0xf7, 0x4c, + 0xa5, 0x5d, 0xb7, 0x07, 0x03, 0x7f, 0x78, 0x67, 0xc9, 0xfc, 0x50, 0xbf, 0x46, 0x16, 0x7a, 0xea, + 0x3e, 0x81, 0xfe, 0x0f, 0x17, 0xdb, 0x46, 0x24, 0x23, 0x44, 0xae, 0x18, 0x9b, 0x5a, 0x03, 0x6d, + 0xba, 0x3e, 0xdc, 0x5c, 0x32, 0xd8, 0x57, 0x8c, 0x19, 0x7e, 0xcf, 0x89, 0x3a, 0xbc, 0x2e, 0xc8, + 0x16, 0xde, 0xe4, 0xea, 0x34, 0x4e, 0xf3, 0xe9, 0xbf, 0xf9, 0xb0, 0xe7, 0x44, 0xbe, 0x55, 0x1b, + 0xa8, 0x90, 0x79, 0xc6, 0x67, 0x15, 0xd4, 0xd2, 0x8b, 0x6b, 0xc8, 0xaa, 0x16, 0x7a, 0x80, 0x18, + 0x0b, 0x51, 0xaf, 0xb1, 0xd6, 0x83, 0xc1, 0x0d, 0x3d, 0x4a, 0x6b, 0x16, 0x78, 0x66, 0x5c, 0x54, + 0x22, 0x2b, 0xa4, 0x6d, 0xa2, 0xde, 0x5d, 0x71, 0xc7, 0xca, 0x5e, 0x25, 0xb2, 0x49, 0xc9, 0xb2, + 0xa2, 0xee, 0xf5, 0x4c, 0xef, 0x72, 0xca, 0x49, 0x56, 0xc8, 0x26, 0x25, 0xab, 0x8b, 0xb1, 0x87, + 0x6b, 0x27, 0x19, 0x3f, 0xea, 0x8f, 0xb0, 0xd3, 0x10, 0x24, 0x40, 0xcf, 0x98, 0xd5, 0x7f, 0x74, + 0xd5, 0xd1, 0x2b, 0xea, 0xe1, 0x3d, 0xec, 0x34, 0x47, 0x24, 0xeb, 0x88, 0xfb, 0x07, 0x93, 0xc9, + 0xf4, 0xf0, 0xf9, 0xe4, 0x60, 0x67, 0xc3, 0x19, 0x7f, 0x86, 0x5f, 0x0b, 0xea, 0x5c, 0x2e, 0x28, + 0x5c, 0x2d, 0x28, 0x7c, 0x2a, 0x29, 0x7c, 0x2b, 0x29, 0x9c, 0x97, 0x14, 0x7e, 0x96, 0x14, 0x7e, + 0x97, 0x14, 0xfe, 0x94, 0xd4, 0xb9, 0xd4, 0xfa, 0x05, 0x85, 0xf3, 0x0b, 0x0a, 0x78, 0x3b, 0x11, + 0xa7, 0xd7, 0x47, 0x8e, 0x7d, 0x9b, 0x3e, 0xd4, 0x75, 0x08, 0xef, 0xda, 0x72, 0x7e, 0x96, 0x16, + 0x57, 0x00, 0x5f, 0xdd, 0xd6, 0x6e, 0x38, 0xfe, 0xee, 0xd2, 0x5d, 0xdb, 0x10, 0xd6, 0x3b, 0xbe, + 0x4d, 0x19, 0x7b, 0xc9, 0xc5, 0x47, 0xfe, 0x46, 0x93, 0xb1, 0x67, 0x9c, 0x1e, 0xff, 0x0d, 0x00, + 0x00, 0xff, 0xff, 0x26, 0x30, 0xdb, 0xbe, 0xe9, 0x02, 0x00, 0x00, +} + +func (this *Struct) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Struct) + if !ok { + that2, ok := that.(Struct) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.Fields) != len(that1.Fields) { + if len(this.Fields) < len(that1.Fields) { + return -1 + } + return 1 + } + for i := range this.Fields { + if c := this.Fields[i].Compare(that1.Fields[i]); c != 0 { + return c + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value) + if !ok { + that2, ok := that.(Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if that1.Kind == nil { + if this.Kind != nil { + return 1 + } + } else if this.Kind == nil { + return -1 + } else { + thisType := -1 + switch this.Kind.(type) { + case *Value_NullValue: + thisType = 0 + case *Value_NumberValue: + thisType = 1 + case *Value_StringValue: + thisType = 2 + case *Value_BoolValue: + thisType = 3 + case *Value_StructValue: + thisType = 4 + case *Value_ListValue: + thisType = 5 + default: + panic(fmt.Sprintf("compare: unexpected type %T in oneof", this.Kind)) + } + that1Type := -1 + switch that1.Kind.(type) { + case *Value_NullValue: + that1Type = 0 + case *Value_NumberValue: + that1Type = 1 + case *Value_StringValue: + that1Type = 2 + case *Value_BoolValue: + that1Type = 3 + case *Value_StructValue: + that1Type = 4 + case *Value_ListValue: + that1Type = 5 + default: + panic(fmt.Sprintf("compare: unexpected type %T in oneof", that1.Kind)) + } + if thisType == that1Type { + if c := this.Kind.Compare(that1.Kind); c != 0 { + return c + } + } else if thisType < that1Type { + return -1 + } else if thisType > that1Type { + return 1 + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Value_NullValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_NullValue) + if !ok { + that2, ok := that.(Value_NullValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.NullValue != that1.NullValue { + if this.NullValue < that1.NullValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Value_NumberValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_NumberValue) + if !ok { + that2, ok := that.(Value_NumberValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.NumberValue != that1.NumberValue { + if this.NumberValue < that1.NumberValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Value_StringValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_StringValue) + if !ok { + that2, ok := that.(Value_StringValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.StringValue != that1.StringValue { + if this.StringValue < that1.StringValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Value_BoolValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_BoolValue) + if !ok { + that2, ok := that.(Value_BoolValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.BoolValue != that1.BoolValue { + if !this.BoolValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Value_StructValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_StructValue) + if !ok { + that2, ok := that.(Value_StructValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := this.StructValue.Compare(that1.StructValue); c != 0 { + return c + } + return 0 +} +func (this *Value_ListValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Value_ListValue) + if !ok { + that2, ok := that.(Value_ListValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := this.ListValue.Compare(that1.ListValue); c != 0 { + return c + } + return 0 +} +func (this *ListValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*ListValue) + if !ok { + that2, ok := that.(ListValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.Values) != len(that1.Values) { + if len(this.Values) < len(that1.Values) { + return -1 + } + return 1 + } + for i := range this.Values { + if c := this.Values[i].Compare(that1.Values[i]); c != 0 { + return c + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (x NullValue) String() string { + s, ok := NullValue_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *Struct) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Struct) + if !ok { + that2, ok := that.(Struct) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Fields) != len(that1.Fields) { + return false + } + for i := range this.Fields { + if !this.Fields[i].Equal(that1.Fields[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value) + if !ok { + that2, ok := that.(Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if that1.Kind == nil { + if this.Kind != nil { + return false + } + } else if this.Kind == nil { + return false + } else if !this.Kind.Equal(that1.Kind) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Value_NullValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_NullValue) + if !ok { + that2, ok := that.(Value_NullValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.NullValue != that1.NullValue { + return false + } + return true +} +func (this *Value_NumberValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_NumberValue) + if !ok { + that2, ok := that.(Value_NumberValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.NumberValue != that1.NumberValue { + return false + } + return true +} +func (this *Value_StringValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_StringValue) + if !ok { + that2, ok := that.(Value_StringValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.StringValue != that1.StringValue { + return false + } + return true +} +func (this *Value_BoolValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_BoolValue) + if !ok { + that2, ok := that.(Value_BoolValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.BoolValue != that1.BoolValue { + return false + } + return true +} +func (this *Value_StructValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_StructValue) + if !ok { + that2, ok := that.(Value_StructValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.StructValue.Equal(that1.StructValue) { + return false + } + return true +} +func (this *Value_ListValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_ListValue) + if !ok { + that2, ok := that.(Value_ListValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ListValue.Equal(that1.ListValue) { + return false + } + return true +} +func (this *ListValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ListValue) + if !ok { + that2, ok := that.(ListValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Values) != len(that1.Values) { + return false + } + for i := range this.Values { + if !this.Values[i].Equal(that1.Values[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Struct) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Struct{") + keysForFields := make([]string, 0, len(this.Fields)) + for k := range this.Fields { + keysForFields = append(keysForFields, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForFields) + mapStringForFields := "map[string]*Value{" + for _, k := range keysForFields { + mapStringForFields += fmt.Sprintf("%#v: %#v,", k, this.Fields[k]) + } + mapStringForFields += "}" + if this.Fields != nil { + s = append(s, "Fields: "+mapStringForFields+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&types.Value{") + if this.Kind != nil { + s = append(s, "Kind: "+fmt.Sprintf("%#v", this.Kind)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Value_NullValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_NullValue{` + + `NullValue:` + fmt.Sprintf("%#v", this.NullValue) + `}`}, ", ") + return s +} +func (this *Value_NumberValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_NumberValue{` + + `NumberValue:` + fmt.Sprintf("%#v", this.NumberValue) + `}`}, ", ") + return s +} +func (this *Value_StringValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_StringValue{` + + `StringValue:` + fmt.Sprintf("%#v", this.StringValue) + `}`}, ", ") + return s +} +func (this *Value_BoolValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_BoolValue{` + + `BoolValue:` + fmt.Sprintf("%#v", this.BoolValue) + `}`}, ", ") + return s +} +func (this *Value_StructValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_StructValue{` + + `StructValue:` + fmt.Sprintf("%#v", this.StructValue) + `}`}, ", ") + return s +} +func (this *Value_ListValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_ListValue{` + + `ListValue:` + fmt.Sprintf("%#v", this.ListValue) + `}`}, ", ") + return s +} +func (this *ListValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.ListValue{") + if this.Values != nil { + s = append(s, "Values: "+fmt.Sprintf("%#v", this.Values)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringStruct(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Struct) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Struct) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Struct) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Fields) > 0 { + for k := range m.Fields { + v := m.Fields[k] + baseI := i + if v != nil { + { + size, err := v.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStruct(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintStruct(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintStruct(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Kind != nil { + { + size := m.Kind.Size() + i -= size + if _, err := m.Kind.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + return len(dAtA) - i, nil +} + +func (m *Value_NullValue) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *Value_NullValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i = encodeVarintStruct(dAtA, i, uint64(m.NullValue)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} +func (m *Value_NumberValue) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *Value_NumberValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.NumberValue)))) + i-- + dAtA[i] = 0x11 + return len(dAtA) - i, nil +} +func (m *Value_StringValue) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *Value_StringValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i -= len(m.StringValue) + copy(dAtA[i:], m.StringValue) + i = encodeVarintStruct(dAtA, i, uint64(len(m.StringValue))) + i-- + dAtA[i] = 0x1a + return len(dAtA) - i, nil +} +func (m *Value_BoolValue) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *Value_BoolValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + i-- + if m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + return len(dAtA) - i, nil +} +func (m *Value_StructValue) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *Value_StructValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StructValue != nil { + { + size, err := m.StructValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStruct(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *Value_ListValue) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *Value_ListValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListValue != nil { + { + size, err := m.ListValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStruct(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *ListValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Values[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStruct(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintStruct(dAtA []byte, offset int, v uint64) int { + offset -= sovStruct(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedStruct(r randyStruct, easy bool) *Struct { + this := &Struct{} + if r.Intn(5) == 0 { + v1 := r.Intn(10) + this.Fields = make(map[string]*Value) + for i := 0; i < v1; i++ { + this.Fields[randStringStruct(r)] = NewPopulatedValue(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedStruct(r, 2) + } + return this +} + +func NewPopulatedValue(r randyStruct, easy bool) *Value { + this := &Value{} + oneofNumber_Kind := []int32{1, 2, 3, 4, 5, 6}[r.Intn(6)] + switch oneofNumber_Kind { + case 1: + this.Kind = NewPopulatedValue_NullValue(r, easy) + case 2: + this.Kind = NewPopulatedValue_NumberValue(r, easy) + case 3: + this.Kind = NewPopulatedValue_StringValue(r, easy) + case 4: + this.Kind = NewPopulatedValue_BoolValue(r, easy) + case 5: + this.Kind = NewPopulatedValue_StructValue(r, easy) + case 6: + this.Kind = NewPopulatedValue_ListValue(r, easy) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedStruct(r, 7) + } + return this +} + +func NewPopulatedValue_NullValue(r randyStruct, easy bool) *Value_NullValue { + this := &Value_NullValue{} + this.NullValue = NullValue([]int32{0}[r.Intn(1)]) + return this +} +func NewPopulatedValue_NumberValue(r randyStruct, easy bool) *Value_NumberValue { + this := &Value_NumberValue{} + this.NumberValue = float64(r.Float64()) + if r.Intn(2) == 0 { + this.NumberValue *= -1 + } + return this +} +func NewPopulatedValue_StringValue(r randyStruct, easy bool) *Value_StringValue { + this := &Value_StringValue{} + this.StringValue = string(randStringStruct(r)) + return this +} +func NewPopulatedValue_BoolValue(r randyStruct, easy bool) *Value_BoolValue { + this := &Value_BoolValue{} + this.BoolValue = bool(bool(r.Intn(2) == 0)) + return this +} +func NewPopulatedValue_StructValue(r randyStruct, easy bool) *Value_StructValue { + this := &Value_StructValue{} + this.StructValue = NewPopulatedStruct(r, easy) + return this +} +func NewPopulatedValue_ListValue(r randyStruct, easy bool) *Value_ListValue { + this := &Value_ListValue{} + this.ListValue = NewPopulatedListValue(r, easy) + return this +} +func NewPopulatedListValue(r randyStruct, easy bool) *ListValue { + this := &ListValue{} + if r.Intn(5) == 0 { + v2 := r.Intn(5) + this.Values = make([]*Value, v2) + for i := 0; i < v2; i++ { + this.Values[i] = NewPopulatedValue(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedStruct(r, 2) + } + return this +} + +type randyStruct interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneStruct(r randyStruct) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringStruct(r randyStruct) string { + v3 := r.Intn(100) + tmps := make([]rune, v3) + for i := 0; i < v3; i++ { + tmps[i] = randUTF8RuneStruct(r) + } + return string(tmps) +} +func randUnrecognizedStruct(r randyStruct, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldStruct(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldStruct(dAtA []byte, r randyStruct, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + v4 := r.Int63() + if r.Intn(2) == 0 { + v4 *= -1 + } + dAtA = encodeVarintPopulateStruct(dAtA, uint64(v4)) + case 1: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateStruct(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateStruct(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Struct) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Fields) > 0 { + for k, v := range m.Fields { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovStruct(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovStruct(uint64(len(k))) + l + n += mapEntrySize + 1 + sovStruct(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Kind != nil { + n += m.Kind.Size() + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Value_NullValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovStruct(uint64(m.NullValue)) + return n +} +func (m *Value_NumberValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 9 + return n +} +func (m *Value_StringValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.StringValue) + n += 1 + l + sovStruct(uint64(l)) + return n +} +func (m *Value_BoolValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + return n +} +func (m *Value_StructValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StructValue != nil { + l = m.StructValue.Size() + n += 1 + l + sovStruct(uint64(l)) + } + return n +} +func (m *Value_ListValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListValue != nil { + l = m.ListValue.Size() + n += 1 + l + sovStruct(uint64(l)) + } + return n +} +func (m *ListValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Values) > 0 { + for _, e := range m.Values { + l = e.Size() + n += 1 + l + sovStruct(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovStruct(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozStruct(x uint64) (n int) { + return sovStruct(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Struct) String() string { + if this == nil { + return "nil" + } + keysForFields := make([]string, 0, len(this.Fields)) + for k := range this.Fields { + keysForFields = append(keysForFields, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForFields) + mapStringForFields := "map[string]*Value{" + for _, k := range keysForFields { + mapStringForFields += fmt.Sprintf("%v: %v,", k, this.Fields[k]) + } + mapStringForFields += "}" + s := strings.Join([]string{`&Struct{`, + `Fields:` + mapStringForFields + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Value_NullValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_NullValue{`, + `NullValue:` + fmt.Sprintf("%v", this.NullValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_NumberValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_NumberValue{`, + `NumberValue:` + fmt.Sprintf("%v", this.NumberValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_StringValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_StringValue{`, + `StringValue:` + fmt.Sprintf("%v", this.StringValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_BoolValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_BoolValue{`, + `BoolValue:` + fmt.Sprintf("%v", this.BoolValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_StructValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_StructValue{`, + `StructValue:` + strings.Replace(fmt.Sprintf("%v", this.StructValue), "Struct", "Struct", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Value_ListValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_ListValue{`, + `ListValue:` + strings.Replace(fmt.Sprintf("%v", this.ListValue), "ListValue", "ListValue", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListValue) String() string { + if this == nil { + return "nil" + } + repeatedStringForValues := "[]*Value{" + for _, f := range this.Values { + repeatedStringForValues += strings.Replace(f.String(), "Value", "Value", 1) + "," + } + repeatedStringForValues += "}" + s := strings.Join([]string{`&ListValue{`, + `Values:` + repeatedStringForValues + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringStruct(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Struct) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Struct: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Struct: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Fields == nil { + m.Fields = make(map[string]*Value) + } + var mapkey string + var mapvalue *Value + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthStruct + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthStruct + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthStruct + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthStruct + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Value{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Fields[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NullValue", wireType) + } + var v NullValue + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= NullValue(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Kind = &Value_NullValue{v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberValue", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Kind = &Value_NumberValue{float64(math.Float64frombits(v))} + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = &Value_StringValue{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Kind = &Value_BoolValue{b} + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StructValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Struct{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &Value_StructValue{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ListValue{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &Value_ListValue{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStruct + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, &Value{}) + if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipStruct(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthStruct + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthStruct + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipStruct(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthStruct + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthStruct = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowStruct = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/timestamp.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/timestamp.go new file mode 100644 index 00000000..232ada57 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/timestamp.go @@ -0,0 +1,130 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func TimestampFromProto(ts *Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampNow returns a google.protobuf.Timestamp for the current time. +func TimestampNow() *Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*Timestamp, error) { + ts := &Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *Timestamp) string { + t, err := TimestampFromProto(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/timestamp.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/timestamp.pb.go new file mode 100644 index 00000000..928d2511 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/timestamp.pb.go @@ -0,0 +1,566 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/timestamp.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// A Timestamp represents a point in time independent of any time zone or local +// calendar, encoded as a count of seconds and fractions of seconds at +// nanosecond resolution. The count is relative to an epoch at UTC midnight on +// January 1, 1970, in the proleptic Gregorian calendar which extends the +// Gregorian calendar backwards to year one. +// +// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap +// second table is needed for interpretation, using a [24-hour linear +// smear](https://developers.google.com/time/smear). +// +// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +// restricting to that range, we ensure that we can convert to and from [RFC +// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D +// ) to obtain a formatter capable of generating timestamps in this format. +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_292007bbfe81227e, []int{0} +} +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(m, src) +} +func (m *Timestamp) XXX_Size() int { + return m.Size() +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func (*Timestamp) XXX_MessageName() string { + return "google.protobuf.Timestamp" +} +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) } + +var fileDescriptor_292007bbfe81227e = []byte{ + // 212 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, + 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, + 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, + 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, + 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x03, 0xe3, 0x8d, + 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, + 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, 0xf0, 0xe1, 0x91, 0x1c, + 0xe3, 0x8a, 0xc7, 0x72, 0x8c, 0x27, 0x1e, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, + 0x59, 0xee, 0xc4, 0x07, 0xb7, 0x3a, 0x00, 0x24, 0x14, 0xc0, 0x18, 0xc5, 0x5a, 0x52, 0x59, 0x90, + 0x5a, 0xfc, 0x83, 0x91, 0x71, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, + 0x9e, 0x00, 0xa8, 0x1e, 0xbd, 0xf0, 0xd4, 0x9c, 0x1c, 0xef, 0xbc, 0xfc, 0xf2, 0xbc, 0x10, 0x90, + 0xca, 0x24, 0x36, 0xb0, 0x61, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0b, 0x23, 0x83, 0xdd, + 0xfa, 0x00, 0x00, 0x00, +} + +func (this *Timestamp) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Timestamp) + if !ok { + that2, ok := that.(Timestamp) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Seconds != that1.Seconds { + if this.Seconds < that1.Seconds { + return -1 + } + return 1 + } + if this.Nanos != that1.Nanos { + if this.Nanos < that1.Nanos { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Timestamp) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Timestamp) + if !ok { + that2, ok := that.(Timestamp) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Seconds != that1.Seconds { + return false + } + if this.Nanos != that1.Nanos { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Timestamp) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Timestamp{") + s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n") + s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringTimestamp(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Timestamp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Timestamp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Nanos != 0 { + i = encodeVarintTimestamp(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + } + if m.Seconds != 0 { + i = encodeVarintTimestamp(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTimestamp(dAtA []byte, offset int, v uint64) int { + offset -= sovTimestamp(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Timestamp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Seconds != 0 { + n += 1 + sovTimestamp(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + sovTimestamp(uint64(m.Nanos)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovTimestamp(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTimestamp(x uint64) (n int) { + return sovTimestamp(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Timestamp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTimestamp(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTimestamp + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTimestamp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTimestamp(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTimestamp + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthTimestamp + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTimestamp(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthTimestamp + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTimestamp = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTimestamp = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go new file mode 100644 index 00000000..e03fa131 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go @@ -0,0 +1,94 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +import ( + "time" +) + +func NewPopulatedTimestamp(r interface { + Int63() int64 +}, easy bool) *Timestamp { + this := &Timestamp{} + ns := int64(r.Int63()) + this.Seconds = ns / 1e9 + this.Nanos = int32(ns % 1e9) + return this +} + +func (ts *Timestamp) String() string { + return TimestampString(ts) +} + +func NewPopulatedStdTime(r interface { + Int63() int64 +}, easy bool) *time.Time { + timestamp := NewPopulatedTimestamp(r, easy) + t, err := TimestampFromProto(timestamp) + if err != nil { + return nil + } + return &t +} + +func SizeOfStdTime(t time.Time) int { + ts, err := TimestampProto(t) + if err != nil { + return 0 + } + return ts.Size() +} + +func StdTimeMarshal(t time.Time) ([]byte, error) { + size := SizeOfStdTime(t) + buf := make([]byte, size) + _, err := StdTimeMarshalTo(t, buf) + return buf, err +} + +func StdTimeMarshalTo(t time.Time, data []byte) (int, error) { + ts, err := TimestampProto(t) + if err != nil { + return 0, err + } + return ts.MarshalTo(data) +} + +func StdTimeUnmarshal(t *time.Time, data []byte) error { + ts := &Timestamp{} + if err := ts.Unmarshal(data); err != nil { + return err + } + tt, err := TimestampFromProto(ts) + if err != nil { + return err + } + *t = tt + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/type.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/type.pb.go new file mode 100644 index 00000000..e0adc52b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/type.pb.go @@ -0,0 +1,3396 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/type.proto + +package types + +import ( + bytes "bytes" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strconv "strconv" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// The syntax in which a protocol buffer element is defined. +type Syntax int32 + +const ( + // Syntax `proto2`. + Syntax_SYNTAX_PROTO2 Syntax = 0 + // Syntax `proto3`. + Syntax_SYNTAX_PROTO3 Syntax = 1 +) + +var Syntax_name = map[int32]string{ + 0: "SYNTAX_PROTO2", + 1: "SYNTAX_PROTO3", +} + +var Syntax_value = map[string]int32{ + "SYNTAX_PROTO2": 0, + "SYNTAX_PROTO3": 1, +} + +func (Syntax) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{0} +} + +// Basic field types. +type Field_Kind int32 + +const ( + // Field type unknown. + Field_TYPE_UNKNOWN Field_Kind = 0 + // Field type double. + Field_TYPE_DOUBLE Field_Kind = 1 + // Field type float. + Field_TYPE_FLOAT Field_Kind = 2 + // Field type int64. + Field_TYPE_INT64 Field_Kind = 3 + // Field type uint64. + Field_TYPE_UINT64 Field_Kind = 4 + // Field type int32. + Field_TYPE_INT32 Field_Kind = 5 + // Field type fixed64. + Field_TYPE_FIXED64 Field_Kind = 6 + // Field type fixed32. + Field_TYPE_FIXED32 Field_Kind = 7 + // Field type bool. + Field_TYPE_BOOL Field_Kind = 8 + // Field type string. + Field_TYPE_STRING Field_Kind = 9 + // Field type group. Proto2 syntax only, and deprecated. + Field_TYPE_GROUP Field_Kind = 10 + // Field type message. + Field_TYPE_MESSAGE Field_Kind = 11 + // Field type bytes. + Field_TYPE_BYTES Field_Kind = 12 + // Field type uint32. + Field_TYPE_UINT32 Field_Kind = 13 + // Field type enum. + Field_TYPE_ENUM Field_Kind = 14 + // Field type sfixed32. + Field_TYPE_SFIXED32 Field_Kind = 15 + // Field type sfixed64. + Field_TYPE_SFIXED64 Field_Kind = 16 + // Field type sint32. + Field_TYPE_SINT32 Field_Kind = 17 + // Field type sint64. + Field_TYPE_SINT64 Field_Kind = 18 +) + +var Field_Kind_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} + +var Field_Kind_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (Field_Kind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{1, 0} +} + +// Whether a field is optional, required, or repeated. +type Field_Cardinality int32 + +const ( + // For fields with unknown cardinality. + Field_CARDINALITY_UNKNOWN Field_Cardinality = 0 + // For optional fields. + Field_CARDINALITY_OPTIONAL Field_Cardinality = 1 + // For required fields. Proto2 syntax only. + Field_CARDINALITY_REQUIRED Field_Cardinality = 2 + // For repeated fields. + Field_CARDINALITY_REPEATED Field_Cardinality = 3 +) + +var Field_Cardinality_name = map[int32]string{ + 0: "CARDINALITY_UNKNOWN", + 1: "CARDINALITY_OPTIONAL", + 2: "CARDINALITY_REQUIRED", + 3: "CARDINALITY_REPEATED", +} + +var Field_Cardinality_value = map[string]int32{ + "CARDINALITY_UNKNOWN": 0, + "CARDINALITY_OPTIONAL": 1, + "CARDINALITY_REQUIRED": 2, + "CARDINALITY_REPEATED": 3, +} + +func (Field_Cardinality) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{1, 1} +} + +// A protocol buffer message type. +type Type struct { + // The fully qualified message name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The list of fields. + Fields []*Field `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty"` + // The list of types appearing in `oneof` definitions in this type. + Oneofs []string `protobuf:"bytes,3,rep,name=oneofs,proto3" json:"oneofs,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,4,rep,name=options,proto3" json:"options,omitempty"` + // The source context. + SourceContext *SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,6,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Type) Reset() { *m = Type{} } +func (*Type) ProtoMessage() {} +func (*Type) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{0} +} +func (m *Type) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Type) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Type.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Type) XXX_Merge(src proto.Message) { + xxx_messageInfo_Type.Merge(m, src) +} +func (m *Type) XXX_Size() int { + return m.Size() +} +func (m *Type) XXX_DiscardUnknown() { + xxx_messageInfo_Type.DiscardUnknown(m) +} + +var xxx_messageInfo_Type proto.InternalMessageInfo + +func (m *Type) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Type) GetFields() []*Field { + if m != nil { + return m.Fields + } + return nil +} + +func (m *Type) GetOneofs() []string { + if m != nil { + return m.Oneofs + } + return nil +} + +func (m *Type) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Type) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Type) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (*Type) XXX_MessageName() string { + return "google.protobuf.Type" +} + +// A single field of a message type. +type Field struct { + // The field type. + Kind Field_Kind `protobuf:"varint,1,opt,name=kind,proto3,enum=google.protobuf.Field_Kind" json:"kind,omitempty"` + // The field cardinality. + Cardinality Field_Cardinality `protobuf:"varint,2,opt,name=cardinality,proto3,enum=google.protobuf.Field_Cardinality" json:"cardinality,omitempty"` + // The field number. + Number int32 `protobuf:"varint,3,opt,name=number,proto3" json:"number,omitempty"` + // The field name. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // The field type URL, without the scheme, for message or enumeration + // types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`. + TypeUrl string `protobuf:"bytes,6,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // The index of the field type in `Type.oneofs`, for message or enumeration + // types. The first type has index 1; zero means the type is not in the list. + OneofIndex int32 `protobuf:"varint,7,opt,name=oneof_index,json=oneofIndex,proto3" json:"oneof_index,omitempty"` + // Whether to use alternative packed wire representation. + Packed bool `protobuf:"varint,8,opt,name=packed,proto3" json:"packed,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,9,rep,name=options,proto3" json:"options,omitempty"` + // The field JSON name. + JsonName string `protobuf:"bytes,10,opt,name=json_name,json=jsonName,proto3" json:"json_name,omitempty"` + // The string value of the default value of this field. Proto2 syntax only. + DefaultValue string `protobuf:"bytes,11,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Field) Reset() { *m = Field{} } +func (*Field) ProtoMessage() {} +func (*Field) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{1} +} +func (m *Field) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Field) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Field.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Field) XXX_Merge(src proto.Message) { + xxx_messageInfo_Field.Merge(m, src) +} +func (m *Field) XXX_Size() int { + return m.Size() +} +func (m *Field) XXX_DiscardUnknown() { + xxx_messageInfo_Field.DiscardUnknown(m) +} + +var xxx_messageInfo_Field proto.InternalMessageInfo + +func (m *Field) GetKind() Field_Kind { + if m != nil { + return m.Kind + } + return Field_TYPE_UNKNOWN +} + +func (m *Field) GetCardinality() Field_Cardinality { + if m != nil { + return m.Cardinality + } + return Field_CARDINALITY_UNKNOWN +} + +func (m *Field) GetNumber() int32 { + if m != nil { + return m.Number + } + return 0 +} + +func (m *Field) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Field) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Field) GetOneofIndex() int32 { + if m != nil { + return m.OneofIndex + } + return 0 +} + +func (m *Field) GetPacked() bool { + if m != nil { + return m.Packed + } + return false +} + +func (m *Field) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Field) GetJsonName() string { + if m != nil { + return m.JsonName + } + return "" +} + +func (m *Field) GetDefaultValue() string { + if m != nil { + return m.DefaultValue + } + return "" +} + +func (*Field) XXX_MessageName() string { + return "google.protobuf.Field" +} + +// Enum type definition. +type Enum struct { + // Enum type name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Enum value definitions. + Enumvalue []*EnumValue `protobuf:"bytes,2,rep,name=enumvalue,proto3" json:"enumvalue,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + // The source context. + SourceContext *SourceContext `protobuf:"bytes,4,opt,name=source_context,json=sourceContext,proto3" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,5,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Enum) Reset() { *m = Enum{} } +func (*Enum) ProtoMessage() {} +func (*Enum) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{2} +} +func (m *Enum) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Enum) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Enum.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Enum) XXX_Merge(src proto.Message) { + xxx_messageInfo_Enum.Merge(m, src) +} +func (m *Enum) XXX_Size() int { + return m.Size() +} +func (m *Enum) XXX_DiscardUnknown() { + xxx_messageInfo_Enum.DiscardUnknown(m) +} + +var xxx_messageInfo_Enum proto.InternalMessageInfo + +func (m *Enum) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Enum) GetEnumvalue() []*EnumValue { + if m != nil { + return m.Enumvalue + } + return nil +} + +func (m *Enum) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Enum) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Enum) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return Syntax_SYNTAX_PROTO2 +} + +func (*Enum) XXX_MessageName() string { + return "google.protobuf.Enum" +} + +// Enum value definition. +type EnumValue struct { + // Enum value name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Enum value number. + Number int32 `protobuf:"varint,2,opt,name=number,proto3" json:"number,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options,proto3" json:"options,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EnumValue) Reset() { *m = EnumValue{} } +func (*EnumValue) ProtoMessage() {} +func (*EnumValue) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{3} +} +func (m *EnumValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnumValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EnumValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EnumValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnumValue.Merge(m, src) +} +func (m *EnumValue) XXX_Size() int { + return m.Size() +} +func (m *EnumValue) XXX_DiscardUnknown() { + xxx_messageInfo_EnumValue.DiscardUnknown(m) +} + +var xxx_messageInfo_EnumValue proto.InternalMessageInfo + +func (m *EnumValue) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EnumValue) GetNumber() int32 { + if m != nil { + return m.Number + } + return 0 +} + +func (m *EnumValue) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (*EnumValue) XXX_MessageName() string { + return "google.protobuf.EnumValue" +} + +// A protocol buffer option, which can be attached to a message, field, +// enumeration, etc. +type Option struct { + // The option's name. For protobuf built-in options (options defined in + // descriptor.proto), this is the short name. For example, `"map_entry"`. + // For custom options, it should be the fully-qualified name. For example, + // `"google.api.http"`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The option's value packed in an Any message. If the value is a primitive, + // the corresponding wrapper type defined in google/protobuf/wrappers.proto + // should be used. If the value is an enum, it should be stored as an int32 + // value using the google.protobuf.Int32Value type. + Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Option) Reset() { *m = Option{} } +func (*Option) ProtoMessage() {} +func (*Option) Descriptor() ([]byte, []int) { + return fileDescriptor_dd271cc1e348c538, []int{4} +} +func (m *Option) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Option) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Option.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Option) XXX_Merge(src proto.Message) { + xxx_messageInfo_Option.Merge(m, src) +} +func (m *Option) XXX_Size() int { + return m.Size() +} +func (m *Option) XXX_DiscardUnknown() { + xxx_messageInfo_Option.DiscardUnknown(m) +} + +var xxx_messageInfo_Option proto.InternalMessageInfo + +func (m *Option) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Option) GetValue() *Any { + if m != nil { + return m.Value + } + return nil +} + +func (*Option) XXX_MessageName() string { + return "google.protobuf.Option" +} +func init() { + proto.RegisterEnum("google.protobuf.Syntax", Syntax_name, Syntax_value) + proto.RegisterEnum("google.protobuf.Field_Kind", Field_Kind_name, Field_Kind_value) + proto.RegisterEnum("google.protobuf.Field_Cardinality", Field_Cardinality_name, Field_Cardinality_value) + proto.RegisterType((*Type)(nil), "google.protobuf.Type") + proto.RegisterType((*Field)(nil), "google.protobuf.Field") + proto.RegisterType((*Enum)(nil), "google.protobuf.Enum") + proto.RegisterType((*EnumValue)(nil), "google.protobuf.EnumValue") + proto.RegisterType((*Option)(nil), "google.protobuf.Option") +} + +func init() { proto.RegisterFile("google/protobuf/type.proto", fileDescriptor_dd271cc1e348c538) } + +var fileDescriptor_dd271cc1e348c538 = []byte{ + // 840 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcf, 0x73, 0xda, 0x46, + 0x14, 0xf6, 0x0a, 0x21, 0xa3, 0x87, 0xc1, 0x9b, 0x4d, 0x26, 0x51, 0x9c, 0x19, 0x95, 0xa1, 0x3d, + 0x30, 0x39, 0xe0, 0x29, 0x78, 0x3c, 0xbd, 0x82, 0x91, 0x29, 0x63, 0x22, 0xa9, 0x8b, 0x68, 0xe2, + 0x5e, 0x18, 0x0c, 0x72, 0x86, 0x44, 0xac, 0x18, 0x24, 0x5a, 0x73, 0xeb, 0x4c, 0xcf, 0xfd, 0x27, + 0x7a, 0xea, 0xf4, 0xdc, 0x3f, 0xc2, 0xc7, 0x1e, 0x7b, 0xac, 0xc9, 0xa5, 0xc7, 0x1c, 0x73, 0x6b, + 0x67, 0x57, 0x20, 0x8b, 0x1f, 0x9d, 0x49, 0xdb, 0x1b, 0xef, 0xfb, 0xbe, 0xf7, 0x73, 0x9f, 0x1e, + 0x70, 0xf4, 0xda, 0xf7, 0x5f, 0x7b, 0xee, 0xf1, 0x64, 0xea, 0x87, 0xfe, 0xd5, 0xec, 0xfa, 0x38, + 0x9c, 0x4f, 0xdc, 0xb2, 0xb0, 0xc8, 0x61, 0xc4, 0x95, 0x57, 0xdc, 0xd1, 0xd3, 0x4d, 0x71, 0x9f, + 0xcd, 0x23, 0xf6, 0xe8, 0xb3, 0x4d, 0x2a, 0xf0, 0x67, 0xd3, 0x81, 0xdb, 0x1b, 0xf8, 0x2c, 0x74, + 0x6f, 0xc2, 0x48, 0x55, 0xfc, 0x51, 0x02, 0xd9, 0x99, 0x4f, 0x5c, 0x42, 0x40, 0x66, 0xfd, 0xb1, + 0xab, 0xa1, 0x02, 0x2a, 0xa9, 0x54, 0xfc, 0x26, 0x65, 0x50, 0xae, 0x47, 0xae, 0x37, 0x0c, 0x34, + 0xa9, 0x90, 0x2a, 0x65, 0x2b, 0x8f, 0xcb, 0x1b, 0xf9, 0xcb, 0xe7, 0x9c, 0xa6, 0x4b, 0x15, 0x79, + 0x0c, 0x8a, 0xcf, 0x5c, 0xff, 0x3a, 0xd0, 0x52, 0x85, 0x54, 0x49, 0xa5, 0x4b, 0x8b, 0x7c, 0x0e, + 0xfb, 0xfe, 0x24, 0x1c, 0xf9, 0x2c, 0xd0, 0x64, 0x11, 0xe8, 0xc9, 0x56, 0x20, 0x4b, 0xf0, 0x74, + 0xa5, 0x23, 0x06, 0xe4, 0xd7, 0xeb, 0xd5, 0xd2, 0x05, 0x54, 0xca, 0x56, 0xf4, 0x2d, 0xcf, 0x8e, + 0x90, 0x9d, 0x45, 0x2a, 0x9a, 0x0b, 0x92, 0x26, 0x39, 0x06, 0x25, 0x98, 0xb3, 0xb0, 0x7f, 0xa3, + 0x29, 0x05, 0x54, 0xca, 0xef, 0x48, 0xdc, 0x11, 0x34, 0x5d, 0xca, 0x8a, 0xbf, 0x2a, 0x90, 0x16, + 0x4d, 0x91, 0x63, 0x90, 0xdf, 0x8e, 0xd8, 0x50, 0x0c, 0x24, 0x5f, 0x79, 0xb6, 0xbb, 0xf5, 0xf2, + 0xc5, 0x88, 0x0d, 0xa9, 0x10, 0x92, 0x06, 0x64, 0x07, 0xfd, 0xe9, 0x70, 0xc4, 0xfa, 0xde, 0x28, + 0x9c, 0x6b, 0x92, 0xf0, 0x2b, 0xfe, 0x83, 0xdf, 0xd9, 0xbd, 0x92, 0x26, 0xdd, 0xf8, 0x0c, 0xd9, + 0x6c, 0x7c, 0xe5, 0x4e, 0xb5, 0x54, 0x01, 0x95, 0xd2, 0x74, 0x69, 0xc5, 0xef, 0x23, 0x27, 0xde, + 0xe7, 0x29, 0x64, 0xf8, 0x72, 0xf4, 0x66, 0x53, 0x4f, 0xf4, 0xa7, 0xd2, 0x7d, 0x6e, 0x77, 0xa7, + 0x1e, 0xf9, 0x04, 0xb2, 0x62, 0xf8, 0xbd, 0x11, 0x1b, 0xba, 0x37, 0xda, 0xbe, 0x88, 0x05, 0x02, + 0x6a, 0x71, 0x84, 0xe7, 0x99, 0xf4, 0x07, 0x6f, 0xdd, 0xa1, 0x96, 0x29, 0xa0, 0x52, 0x86, 0x2e, + 0xad, 0xe4, 0x5b, 0xa9, 0x1f, 0xf9, 0x56, 0xcf, 0x40, 0x7d, 0x13, 0xf8, 0xac, 0x27, 0xea, 0x03, + 0x51, 0x47, 0x86, 0x03, 0x26, 0xaf, 0xf1, 0x53, 0xc8, 0x0d, 0xdd, 0xeb, 0xfe, 0xcc, 0x0b, 0x7b, + 0xdf, 0xf6, 0xbd, 0x99, 0xab, 0x65, 0x85, 0xe0, 0x60, 0x09, 0x7e, 0xcd, 0xb1, 0xe2, 0xad, 0x04, + 0x32, 0x9f, 0x24, 0xc1, 0x70, 0xe0, 0x5c, 0xda, 0x46, 0xaf, 0x6b, 0x5e, 0x98, 0xd6, 0x4b, 0x13, + 0xef, 0x91, 0x43, 0xc8, 0x0a, 0xa4, 0x61, 0x75, 0xeb, 0x6d, 0x03, 0x23, 0x92, 0x07, 0x10, 0xc0, + 0x79, 0xdb, 0xaa, 0x39, 0x58, 0x8a, 0xed, 0x96, 0xe9, 0x9c, 0x9e, 0xe0, 0x54, 0xec, 0xd0, 0x8d, + 0x00, 0x39, 0x29, 0xa8, 0x56, 0x70, 0x3a, 0xce, 0x71, 0xde, 0x7a, 0x65, 0x34, 0x4e, 0x4f, 0xb0, + 0xb2, 0x8e, 0x54, 0x2b, 0x78, 0x9f, 0xe4, 0x40, 0x15, 0x48, 0xdd, 0xb2, 0xda, 0x38, 0x13, 0xc7, + 0xec, 0x38, 0xb4, 0x65, 0x36, 0xb1, 0x1a, 0xc7, 0x6c, 0x52, 0xab, 0x6b, 0x63, 0x88, 0x23, 0xbc, + 0x30, 0x3a, 0x9d, 0x5a, 0xd3, 0xc0, 0xd9, 0x58, 0x51, 0xbf, 0x74, 0x8c, 0x0e, 0x3e, 0x58, 0x2b, + 0xab, 0x5a, 0xc1, 0xb9, 0x38, 0x85, 0x61, 0x76, 0x5f, 0xe0, 0x3c, 0x79, 0x00, 0xb9, 0x28, 0xc5, + 0xaa, 0x88, 0xc3, 0x0d, 0xe8, 0xf4, 0x04, 0xe3, 0xfb, 0x42, 0xa2, 0x28, 0x0f, 0xd6, 0x80, 0xd3, + 0x13, 0x4c, 0x8a, 0x21, 0x64, 0x13, 0xbb, 0x45, 0x9e, 0xc0, 0xc3, 0xb3, 0x1a, 0x6d, 0xb4, 0xcc, + 0x5a, 0xbb, 0xe5, 0x5c, 0x26, 0xe6, 0xaa, 0xc1, 0xa3, 0x24, 0x61, 0xd9, 0x4e, 0xcb, 0x32, 0x6b, + 0x6d, 0x8c, 0x36, 0x19, 0x6a, 0x7c, 0xd5, 0x6d, 0x51, 0xa3, 0x81, 0xa5, 0x6d, 0xc6, 0x36, 0x6a, + 0x8e, 0xd1, 0xc0, 0xa9, 0xe2, 0x5f, 0x08, 0x64, 0x83, 0xcd, 0xc6, 0x3b, 0xcf, 0xc8, 0x17, 0xa0, + 0xba, 0x6c, 0x36, 0x8e, 0x9e, 0x3f, 0xba, 0x24, 0x47, 0x5b, 0x4b, 0xc5, 0xbd, 0xc5, 0x32, 0xd0, + 0x7b, 0x71, 0x72, 0x19, 0x53, 0xff, 0xf9, 0x70, 0xc8, 0xff, 0xef, 0x70, 0xa4, 0x3f, 0xee, 0x70, + 0xbc, 0x01, 0x35, 0x6e, 0x61, 0xe7, 0x14, 0xee, 0x3f, 0x6c, 0x69, 0xed, 0xc3, 0xfe, 0xf7, 0x3d, + 0x16, 0xbf, 0x04, 0x25, 0x82, 0x76, 0x26, 0x7a, 0x0e, 0xe9, 0xd5, 0xa8, 0x79, 0xe3, 0x8f, 0xb6, + 0xc2, 0xd5, 0xd8, 0x9c, 0x46, 0x92, 0xe7, 0x65, 0x50, 0xa2, 0x3e, 0xf8, 0xb2, 0x75, 0x2e, 0x4d, + 0xa7, 0xf6, 0xaa, 0x67, 0x53, 0xcb, 0xb1, 0x2a, 0x78, 0x6f, 0x13, 0xaa, 0x62, 0x54, 0xff, 0x01, + 0xfd, 0x7e, 0xa7, 0xef, 0xbd, 0xbf, 0xd3, 0xd1, 0x87, 0x3b, 0x1d, 0x7d, 0xbf, 0xd0, 0xd1, 0xcf, + 0x0b, 0x1d, 0xdd, 0x2e, 0x74, 0xf4, 0xdb, 0x42, 0x47, 0x7f, 0x2c, 0x74, 0xf4, 0xe7, 0x42, 0xdf, + 0x7b, 0xcf, 0xf1, 0x77, 0x3a, 0xba, 0x7d, 0xa7, 0x23, 0x78, 0x38, 0xf0, 0xc7, 0x9b, 0x25, 0xd4, + 0x55, 0xfe, 0x9f, 0x63, 0x73, 0xcb, 0x46, 0xdf, 0xa4, 0xf9, 0xd1, 0x0a, 0x3e, 0x20, 0xf4, 0x93, + 0x94, 0x6a, 0xda, 0xf5, 0x5f, 0x24, 0xbd, 0x19, 0xc9, 0xed, 0x55, 0xc5, 0x2f, 0x5d, 0xcf, 0xbb, + 0x60, 0xfe, 0x77, 0x8c, 0xbb, 0x05, 0x57, 0x8a, 0x88, 0x53, 0xfd, 0x3b, 0x00, 0x00, 0xff, 0xff, + 0xbc, 0x2a, 0x5e, 0x82, 0x2b, 0x07, 0x00, 0x00, +} + +func (this *Type) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Type) + if !ok { + that2, ok := that.(Type) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if len(this.Fields) != len(that1.Fields) { + if len(this.Fields) < len(that1.Fields) { + return -1 + } + return 1 + } + for i := range this.Fields { + if c := this.Fields[i].Compare(that1.Fields[i]); c != 0 { + return c + } + } + if len(this.Oneofs) != len(that1.Oneofs) { + if len(this.Oneofs) < len(that1.Oneofs) { + return -1 + } + return 1 + } + for i := range this.Oneofs { + if this.Oneofs[i] != that1.Oneofs[i] { + if this.Oneofs[i] < that1.Oneofs[i] { + return -1 + } + return 1 + } + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if c := this.SourceContext.Compare(that1.SourceContext); c != 0 { + return c + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Field) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Field) + if !ok { + that2, ok := that.(Field) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Kind != that1.Kind { + if this.Kind < that1.Kind { + return -1 + } + return 1 + } + if this.Cardinality != that1.Cardinality { + if this.Cardinality < that1.Cardinality { + return -1 + } + return 1 + } + if this.Number != that1.Number { + if this.Number < that1.Number { + return -1 + } + return 1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.TypeUrl != that1.TypeUrl { + if this.TypeUrl < that1.TypeUrl { + return -1 + } + return 1 + } + if this.OneofIndex != that1.OneofIndex { + if this.OneofIndex < that1.OneofIndex { + return -1 + } + return 1 + } + if this.Packed != that1.Packed { + if !this.Packed { + return -1 + } + return 1 + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if this.JsonName != that1.JsonName { + if this.JsonName < that1.JsonName { + return -1 + } + return 1 + } + if this.DefaultValue != that1.DefaultValue { + if this.DefaultValue < that1.DefaultValue { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Enum) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Enum) + if !ok { + that2, ok := that.(Enum) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if len(this.Enumvalue) != len(that1.Enumvalue) { + if len(this.Enumvalue) < len(that1.Enumvalue) { + return -1 + } + return 1 + } + for i := range this.Enumvalue { + if c := this.Enumvalue[i].Compare(that1.Enumvalue[i]); c != 0 { + return c + } + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if c := this.SourceContext.Compare(that1.SourceContext); c != 0 { + return c + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *EnumValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*EnumValue) + if !ok { + that2, ok := that.(EnumValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.Number != that1.Number { + if this.Number < that1.Number { + return -1 + } + return 1 + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Option) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Option) + if !ok { + that2, ok := that.(Option) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if c := this.Value.Compare(that1.Value); c != 0 { + return c + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (x Syntax) String() string { + s, ok := Syntax_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Field_Kind) String() string { + s, ok := Field_Kind_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Field_Cardinality) String() string { + s, ok := Field_Cardinality_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *Type) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Type) + if !ok { + that2, ok := that.(Type) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Fields) != len(that1.Fields) { + return false + } + for i := range this.Fields { + if !this.Fields[i].Equal(that1.Fields[i]) { + return false + } + } + if len(this.Oneofs) != len(that1.Oneofs) { + return false + } + for i := range this.Oneofs { + if this.Oneofs[i] != that1.Oneofs[i] { + return false + } + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if !this.SourceContext.Equal(that1.SourceContext) { + return false + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Field) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Field) + if !ok { + that2, ok := that.(Field) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Kind != that1.Kind { + return false + } + if this.Cardinality != that1.Cardinality { + return false + } + if this.Number != that1.Number { + return false + } + if this.Name != that1.Name { + return false + } + if this.TypeUrl != that1.TypeUrl { + return false + } + if this.OneofIndex != that1.OneofIndex { + return false + } + if this.Packed != that1.Packed { + return false + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if this.JsonName != that1.JsonName { + return false + } + if this.DefaultValue != that1.DefaultValue { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Enum) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Enum) + if !ok { + that2, ok := that.(Enum) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Enumvalue) != len(that1.Enumvalue) { + return false + } + for i := range this.Enumvalue { + if !this.Enumvalue[i].Equal(that1.Enumvalue[i]) { + return false + } + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if !this.SourceContext.Equal(that1.SourceContext) { + return false + } + if this.Syntax != that1.Syntax { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *EnumValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*EnumValue) + if !ok { + that2, ok := that.(EnumValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Number != that1.Number { + return false + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Option) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Option) + if !ok { + that2, ok := that.(Option) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if !this.Value.Equal(that1.Value) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Type) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&types.Type{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Fields != nil { + s = append(s, "Fields: "+fmt.Sprintf("%#v", this.Fields)+",\n") + } + s = append(s, "Oneofs: "+fmt.Sprintf("%#v", this.Oneofs)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceContext != nil { + s = append(s, "SourceContext: "+fmt.Sprintf("%#v", this.SourceContext)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Field) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&types.Field{") + s = append(s, "Kind: "+fmt.Sprintf("%#v", this.Kind)+",\n") + s = append(s, "Cardinality: "+fmt.Sprintf("%#v", this.Cardinality)+",\n") + s = append(s, "Number: "+fmt.Sprintf("%#v", this.Number)+",\n") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "TypeUrl: "+fmt.Sprintf("%#v", this.TypeUrl)+",\n") + s = append(s, "OneofIndex: "+fmt.Sprintf("%#v", this.OneofIndex)+",\n") + s = append(s, "Packed: "+fmt.Sprintf("%#v", this.Packed)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "JsonName: "+fmt.Sprintf("%#v", this.JsonName)+",\n") + s = append(s, "DefaultValue: "+fmt.Sprintf("%#v", this.DefaultValue)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Enum) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&types.Enum{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Enumvalue != nil { + s = append(s, "Enumvalue: "+fmt.Sprintf("%#v", this.Enumvalue)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceContext != nil { + s = append(s, "SourceContext: "+fmt.Sprintf("%#v", this.SourceContext)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&types.EnumValue{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Number: "+fmt.Sprintf("%#v", this.Number)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Option) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Option{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringType(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Type) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Type) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Type) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Syntax != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Syntax)) + i-- + dAtA[i] = 0x30 + } + if m.SourceContext != nil { + { + size, err := m.SourceContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Oneofs) > 0 { + for iNdEx := len(m.Oneofs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Oneofs[iNdEx]) + copy(dAtA[i:], m.Oneofs[iNdEx]) + i = encodeVarintType(dAtA, i, uint64(len(m.Oneofs[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Fields) > 0 { + for iNdEx := len(m.Fields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Fields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Field) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Field) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Field) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.DefaultValue) > 0 { + i -= len(m.DefaultValue) + copy(dAtA[i:], m.DefaultValue) + i = encodeVarintType(dAtA, i, uint64(len(m.DefaultValue))) + i-- + dAtA[i] = 0x5a + } + if len(m.JsonName) > 0 { + i -= len(m.JsonName) + copy(dAtA[i:], m.JsonName) + i = encodeVarintType(dAtA, i, uint64(len(m.JsonName))) + i-- + dAtA[i] = 0x52 + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if m.Packed { + i-- + if m.Packed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + } + if m.OneofIndex != 0 { + i = encodeVarintType(dAtA, i, uint64(m.OneofIndex)) + i-- + dAtA[i] = 0x38 + } + if len(m.TypeUrl) > 0 { + i -= len(m.TypeUrl) + copy(dAtA[i:], m.TypeUrl) + i = encodeVarintType(dAtA, i, uint64(len(m.TypeUrl))) + i-- + dAtA[i] = 0x32 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x22 + } + if m.Number != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Number)) + i-- + dAtA[i] = 0x18 + } + if m.Cardinality != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Cardinality)) + i-- + dAtA[i] = 0x10 + } + if m.Kind != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Kind)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Enum) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Enum) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Enum) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Syntax != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Syntax)) + i-- + dAtA[i] = 0x28 + } + if m.SourceContext != nil { + { + size, err := m.SourceContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Enumvalue) > 0 { + for iNdEx := len(m.Enumvalue) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Enumvalue[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EnumValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnumValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnumValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if m.Number != 0 { + i = encodeVarintType(dAtA, i, uint64(m.Number)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Option) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Option) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Option) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != nil { + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintType(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintType(dAtA []byte, offset int, v uint64) int { + offset -= sovType(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedType(r randyType, easy bool) *Type { + this := &Type{} + this.Name = string(randStringType(r)) + if r.Intn(5) != 0 { + v1 := r.Intn(5) + this.Fields = make([]*Field, v1) + for i := 0; i < v1; i++ { + this.Fields[i] = NewPopulatedField(r, easy) + } + } + v2 := r.Intn(10) + this.Oneofs = make([]string, v2) + for i := 0; i < v2; i++ { + this.Oneofs[i] = string(randStringType(r)) + } + if r.Intn(5) != 0 { + v3 := r.Intn(5) + this.Options = make([]*Option, v3) + for i := 0; i < v3; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + if r.Intn(5) != 0 { + this.SourceContext = NewPopulatedSourceContext(r, easy) + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 7) + } + return this +} + +func NewPopulatedField(r randyType, easy bool) *Field { + this := &Field{} + this.Kind = Field_Kind([]int32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}[r.Intn(19)]) + this.Cardinality = Field_Cardinality([]int32{0, 1, 2, 3}[r.Intn(4)]) + this.Number = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Number *= -1 + } + this.Name = string(randStringType(r)) + this.TypeUrl = string(randStringType(r)) + this.OneofIndex = int32(r.Int31()) + if r.Intn(2) == 0 { + this.OneofIndex *= -1 + } + this.Packed = bool(bool(r.Intn(2) == 0)) + if r.Intn(5) != 0 { + v4 := r.Intn(5) + this.Options = make([]*Option, v4) + for i := 0; i < v4; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + this.JsonName = string(randStringType(r)) + this.DefaultValue = string(randStringType(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 12) + } + return this +} + +func NewPopulatedEnum(r randyType, easy bool) *Enum { + this := &Enum{} + this.Name = string(randStringType(r)) + if r.Intn(5) != 0 { + v5 := r.Intn(5) + this.Enumvalue = make([]*EnumValue, v5) + for i := 0; i < v5; i++ { + this.Enumvalue[i] = NewPopulatedEnumValue(r, easy) + } + } + if r.Intn(5) != 0 { + v6 := r.Intn(5) + this.Options = make([]*Option, v6) + for i := 0; i < v6; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + if r.Intn(5) != 0 { + this.SourceContext = NewPopulatedSourceContext(r, easy) + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 6) + } + return this +} + +func NewPopulatedEnumValue(r randyType, easy bool) *EnumValue { + this := &EnumValue{} + this.Name = string(randStringType(r)) + this.Number = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Number *= -1 + } + if r.Intn(5) != 0 { + v7 := r.Intn(5) + this.Options = make([]*Option, v7) + for i := 0; i < v7; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 4) + } + return this +} + +func NewPopulatedOption(r randyType, easy bool) *Option { + this := &Option{} + this.Name = string(randStringType(r)) + if r.Intn(5) != 0 { + this.Value = NewPopulatedAny(r, easy) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedType(r, 3) + } + return this +} + +type randyType interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneType(r randyType) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringType(r randyType) string { + v8 := r.Intn(100) + tmps := make([]rune, v8) + for i := 0; i < v8; i++ { + tmps[i] = randUTF8RuneType(r) + } + return string(tmps) +} +func randUnrecognizedType(r randyType, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldType(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldType(dAtA []byte, r randyType, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + v9 := r.Int63() + if r.Intn(2) == 0 { + v9 *= -1 + } + dAtA = encodeVarintPopulateType(dAtA, uint64(v9)) + case 1: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateType(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateType(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Type) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if len(m.Fields) > 0 { + for _, e := range m.Fields { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if len(m.Oneofs) > 0 { + for _, s := range m.Oneofs { + l = len(s) + n += 1 + l + sovType(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if m.SourceContext != nil { + l = m.SourceContext.Size() + n += 1 + l + sovType(uint64(l)) + } + if m.Syntax != 0 { + n += 1 + sovType(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Field) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Kind != 0 { + n += 1 + sovType(uint64(m.Kind)) + } + if m.Cardinality != 0 { + n += 1 + sovType(uint64(m.Cardinality)) + } + if m.Number != 0 { + n += 1 + sovType(uint64(m.Number)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.OneofIndex != 0 { + n += 1 + sovType(uint64(m.OneofIndex)) + } + if m.Packed { + n += 2 + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + l = len(m.JsonName) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + l = len(m.DefaultValue) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Enum) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if len(m.Enumvalue) > 0 { + for _, e := range m.Enumvalue { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if m.SourceContext != nil { + l = m.SourceContext.Size() + n += 1 + l + sovType(uint64(l)) + } + if m.Syntax != 0 { + n += 1 + sovType(uint64(m.Syntax)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *EnumValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.Number != 0 { + n += 1 + sovType(uint64(m.Number)) + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Option) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.Value != nil { + l = m.Value.Size() + n += 1 + l + sovType(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovType(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozType(x uint64) (n int) { + return sovType(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Type) String() string { + if this == nil { + return "nil" + } + repeatedStringForFields := "[]*Field{" + for _, f := range this.Fields { + repeatedStringForFields += strings.Replace(f.String(), "Field", "Field", 1) + "," + } + repeatedStringForFields += "}" + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&Type{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Fields:` + repeatedStringForFields + `,`, + `Oneofs:` + fmt.Sprintf("%v", this.Oneofs) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Field) String() string { + if this == nil { + return "nil" + } + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&Field{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Cardinality:` + fmt.Sprintf("%v", this.Cardinality) + `,`, + `Number:` + fmt.Sprintf("%v", this.Number) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`, + `OneofIndex:` + fmt.Sprintf("%v", this.OneofIndex) + `,`, + `Packed:` + fmt.Sprintf("%v", this.Packed) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `JsonName:` + fmt.Sprintf("%v", this.JsonName) + `,`, + `DefaultValue:` + fmt.Sprintf("%v", this.DefaultValue) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Enum) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnumvalue := "[]*EnumValue{" + for _, f := range this.Enumvalue { + repeatedStringForEnumvalue += strings.Replace(f.String(), "EnumValue", "EnumValue", 1) + "," + } + repeatedStringForEnumvalue += "}" + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&Enum{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Enumvalue:` + repeatedStringForEnumvalue + `,`, + `Options:` + repeatedStringForOptions + `,`, + `SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *EnumValue) String() string { + if this == nil { + return "nil" + } + repeatedStringForOptions := "[]*Option{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(f.String(), "Option", "Option", 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&EnumValue{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Number:` + fmt.Sprintf("%v", this.Number) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Option) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Option{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Any", "Any", 1) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringType(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Type) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Type: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Type: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Fields = append(m.Fields, &Field{}) + if err := m.Fields[len(m.Fields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Oneofs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Oneofs = append(m.Oneofs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceContext == nil { + m.SourceContext = &SourceContext{} + } + if err := m.SourceContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= Syntax(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Field) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Field: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Field: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + m.Kind = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Kind |= Field_Kind(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cardinality", wireType) + } + m.Cardinality = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Cardinality |= Field_Cardinality(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType) + } + m.Number = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Number |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OneofIndex", wireType) + } + m.OneofIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OneofIndex |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Packed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Packed = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JsonName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JsonName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultValue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Enum) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Enum: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Enum: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Enumvalue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Enumvalue = append(m.Enumvalue, &EnumValue{}) + if err := m.Enumvalue[len(m.Enumvalue)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceContext == nil { + m.SourceContext = &SourceContext{} + } + if err := m.SourceContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= Syntax(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnumValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnumValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnumValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType) + } + m.Number = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Number |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Option) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Option: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Option: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthType + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Value == nil { + m.Value = &Any{} + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipType(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthType + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthType + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipType(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthType + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthType = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowType = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/wrappers.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/wrappers.pb.go new file mode 100644 index 00000000..fa1fd7ed --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/wrappers.pb.go @@ -0,0 +1,2756 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/wrappers.proto + +package types + +import ( + bytes "bytes" + encoding_binary "encoding/binary" + fmt "fmt" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{0} +} +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } +func (m *DoubleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DoubleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleValue.Merge(m, src) +} +func (m *DoubleValue) XXX_Size() int { + return m.Size() +} +func (m *DoubleValue) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleValue proto.InternalMessageInfo + +func (m *DoubleValue) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +func (*DoubleValue) XXX_MessageName() string { + return "google.protobuf.DoubleValue" +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{1} +} +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } +func (m *FloatValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FloatValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_FloatValue.Merge(m, src) +} +func (m *FloatValue) XXX_Size() int { + return m.Size() +} +func (m *FloatValue) XXX_DiscardUnknown() { + xxx_messageInfo_FloatValue.DiscardUnknown(m) +} + +var xxx_messageInfo_FloatValue proto.InternalMessageInfo + +func (m *FloatValue) GetValue() float32 { + if m != nil { + return m.Value + } + return 0 +} + +func (*FloatValue) XXX_MessageName() string { + return "google.protobuf.FloatValue" +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{2} +} +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } +func (m *Int64Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Int64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int64Value.Merge(m, src) +} +func (m *Int64Value) XXX_Size() int { + return m.Size() +} +func (m *Int64Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int64Value proto.InternalMessageInfo + +func (m *Int64Value) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +func (*Int64Value) XXX_MessageName() string { + return "google.protobuf.Int64Value" +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{3} +} +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } +func (m *UInt64Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UInt64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt64Value.Merge(m, src) +} +func (m *UInt64Value) XXX_Size() int { + return m.Size() +} +func (m *UInt64Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt64Value proto.InternalMessageInfo + +func (m *UInt64Value) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +func (*UInt64Value) XXX_MessageName() string { + return "google.protobuf.UInt64Value" +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{4} +} +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } +func (m *Int32Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Int32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int32Value.Merge(m, src) +} +func (m *Int32Value) XXX_Size() int { + return m.Size() +} +func (m *Int32Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int32Value proto.InternalMessageInfo + +func (m *Int32Value) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +func (*Int32Value) XXX_MessageName() string { + return "google.protobuf.Int32Value" +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{5} +} +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } +func (m *UInt32Value) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UInt32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt32Value.Merge(m, src) +} +func (m *UInt32Value) XXX_Size() int { + return m.Size() +} +func (m *UInt32Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt32Value proto.InternalMessageInfo + +func (m *UInt32Value) GetValue() uint32 { + if m != nil { + return m.Value + } + return 0 +} + +func (*UInt32Value) XXX_MessageName() string { + return "google.protobuf.UInt32Value" +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{6} +} +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } +func (m *BoolValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BoolValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoolValue.Merge(m, src) +} +func (m *BoolValue) XXX_Size() int { + return m.Size() +} +func (m *BoolValue) XXX_DiscardUnknown() { + xxx_messageInfo_BoolValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BoolValue proto.InternalMessageInfo + +func (m *BoolValue) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + +func (*BoolValue) XXX_MessageName() string { + return "google.protobuf.BoolValue" +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{7} +} +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } +func (m *StringValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StringValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StringValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringValue.Merge(m, src) +} +func (m *StringValue) XXX_Size() int { + return m.Size() +} +func (m *StringValue) XXX_DiscardUnknown() { + xxx_messageInfo_StringValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StringValue proto.InternalMessageInfo + +func (m *StringValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +func (*StringValue) XXX_MessageName() string { + return "google.protobuf.StringValue" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{8} +} +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } +func (m *BytesValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BytesValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesValue.Merge(m, src) +} +func (m *BytesValue) XXX_Size() int { + return m.Size() +} +func (m *BytesValue) XXX_DiscardUnknown() { + xxx_messageInfo_BytesValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BytesValue proto.InternalMessageInfo + +func (m *BytesValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (*BytesValue) XXX_MessageName() string { + return "google.protobuf.BytesValue" +} +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} + +func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_5377b62bda767935) } + +var fileDescriptor_5377b62bda767935 = []byte{ + // 285 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, + 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca, + 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c, + 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5, + 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13, + 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8, + 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca, + 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a, + 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x3b, + 0xe3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xfe, 0x78, 0x28, 0xc7, 0xd8, 0xf0, 0x48, + 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, + 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x20, 0xf1, 0xc7, 0x72, 0x8c, 0x27, 0x1e, 0xcb, + 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x45, 0x87, 0x13, 0x6f, 0x38, 0x34, 0xbe, 0x02, + 0x40, 0x22, 0x01, 0x8c, 0x51, 0xac, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x3f, 0x18, 0x19, 0x17, 0x31, + 0x31, 0xbb, 0x07, 0x38, 0xad, 0x62, 0x92, 0x73, 0x87, 0x68, 0x09, 0x80, 0x6a, 0xd1, 0x0b, 0x4f, + 0xcd, 0xc9, 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4c, 0x62, 0x03, 0x9b, 0x65, 0x0c, + 0x08, 0x00, 0x00, 0xff, 0xff, 0x31, 0x55, 0x64, 0x90, 0x0a, 0x02, 0x00, 0x00, +} + +func (this *DoubleValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*DoubleValue) + if !ok { + that2, ok := that.(DoubleValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *FloatValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*FloatValue) + if !ok { + that2, ok := that.(FloatValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Int64Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Int64Value) + if !ok { + that2, ok := that.(Int64Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *UInt64Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*UInt64Value) + if !ok { + that2, ok := that.(UInt64Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *Int32Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Int32Value) + if !ok { + that2, ok := that.(Int32Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *UInt32Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*UInt32Value) + if !ok { + that2, ok := that.(UInt32Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *BoolValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*BoolValue) + if !ok { + that2, ok := that.(BoolValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if !this.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *StringValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*StringValue) + if !ok { + that2, ok := that.(StringValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *BytesValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*BytesValue) + if !ok { + that2, ok := that.(BytesValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := bytes.Compare(this.Value, that1.Value); c != 0 { + return c + } + if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 { + return c + } + return 0 +} +func (this *DoubleValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DoubleValue) + if !ok { + that2, ok := that.(DoubleValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *FloatValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FloatValue) + if !ok { + that2, ok := that.(FloatValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Int64Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Int64Value) + if !ok { + that2, ok := that.(Int64Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *UInt64Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UInt64Value) + if !ok { + that2, ok := that.(UInt64Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Int32Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Int32Value) + if !ok { + that2, ok := that.(Int32Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *UInt32Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UInt32Value) + if !ok { + that2, ok := that.(UInt32Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *BoolValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BoolValue) + if !ok { + that2, ok := that.(BoolValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *StringValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StringValue) + if !ok { + that2, ok := that.(StringValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *BytesValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BytesValue) + if !ok { + that2, ok := that.(BytesValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Value, that1.Value) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *DoubleValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.DoubleValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FloatValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.FloatValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Int64Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Int64Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UInt64Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.UInt64Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Int32Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Int32Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UInt32Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.UInt32Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *BoolValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.BoolValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StringValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.StringValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *BytesValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.BytesValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringWrappers(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *DoubleValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DoubleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 8 + encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i-- + dAtA[i] = 0x9 + } + return len(dAtA) - i, nil +} + +func (m *FloatValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FloatValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FloatValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i -= 4 + encoding_binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Value)))) + i-- + dAtA[i] = 0xd + } + return len(dAtA) - i, nil +} + +func (m *Int64Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int64Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Int64Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UInt64Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt64Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UInt64Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Int32Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int32Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Int32Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UInt32Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt32Value) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UInt32Value) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value != 0 { + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *BoolValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BoolValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BoolValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Value { + i-- + if m.Value { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StringValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StringValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StringValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintWrappers(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *BytesValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BytesValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BytesValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintWrappers(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintWrappers(dAtA []byte, offset int, v uint64) int { + offset -= sovWrappers(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func NewPopulatedDoubleValue(r randyWrappers, easy bool) *DoubleValue { + this := &DoubleValue{} + this.Value = float64(r.Float64()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedFloatValue(r randyWrappers, easy bool) *FloatValue { + this := &FloatValue{} + this.Value = float32(r.Float32()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedInt64Value(r randyWrappers, easy bool) *Int64Value { + this := &Int64Value{} + this.Value = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedUInt64Value(r randyWrappers, easy bool) *UInt64Value { + this := &UInt64Value{} + this.Value = uint64(uint64(r.Uint32())) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedInt32Value(r randyWrappers, easy bool) *Int32Value { + this := &Int32Value{} + this.Value = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedUInt32Value(r randyWrappers, easy bool) *UInt32Value { + this := &UInt32Value{} + this.Value = uint32(r.Uint32()) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedBoolValue(r randyWrappers, easy bool) *BoolValue { + this := &BoolValue{} + this.Value = bool(bool(r.Intn(2) == 0)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedStringValue(r randyWrappers, easy bool) *StringValue { + this := &StringValue{} + this.Value = string(randStringWrappers(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +func NewPopulatedBytesValue(r randyWrappers, easy bool) *BytesValue { + this := &BytesValue{} + v1 := r.Intn(100) + this.Value = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Value[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedWrappers(r, 2) + } + return this +} + +type randyWrappers interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneWrappers(r randyWrappers) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringWrappers(r randyWrappers) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneWrappers(r) + } + return string(tmps) +} +func randUnrecognizedWrappers(r randyWrappers, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldWrappers(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldWrappers(dAtA []byte, r randyWrappers, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateWrappers(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *DoubleValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 9 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *FloatValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 5 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Int64Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UInt64Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Int32Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *UInt32Value) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BoolValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value { + n += 2 + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *StringValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + sovWrappers(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *BytesValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + sovWrappers(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovWrappers(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozWrappers(x uint64) (n int) { + return sovWrappers(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DoubleValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DoubleValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *FloatValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FloatValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Int64Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Int64Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *UInt64Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UInt64Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *Int32Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Int32Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *UInt32Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UInt32Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *BoolValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BoolValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *StringValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StringValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func (this *BytesValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BytesValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} +func valueToStringWrappers(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DoubleValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DoubleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DoubleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FloatValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FloatValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FloatValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(encoding_binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.Value = float32(math.Float32frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int64Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt64Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int32Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt32Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BoolValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BoolValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BoolValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StringValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StringValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StringValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWrappers + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWrappers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BytesValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthWrappers + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthWrappers + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWrappers(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthWrappers + } + iNdEx += length + if iNdEx < 0 { + return 0, ErrInvalidLengthWrappers + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipWrappers(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + if iNdEx < 0 { + return 0, ErrInvalidLengthWrappers + } + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthWrappers = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWrappers = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/wrappers_gogo.go b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/wrappers_gogo.go new file mode 100644 index 00000000..d905df36 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/gogo/protobuf/types/wrappers_gogo.go @@ -0,0 +1,300 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +func NewPopulatedStdDouble(r randyWrappers, easy bool) *float64 { + v := NewPopulatedDoubleValue(r, easy) + return &v.Value +} + +func SizeOfStdDouble(v float64) int { + pv := &DoubleValue{Value: v} + return pv.Size() +} + +func StdDoubleMarshal(v float64) ([]byte, error) { + size := SizeOfStdDouble(v) + buf := make([]byte, size) + _, err := StdDoubleMarshalTo(v, buf) + return buf, err +} + +func StdDoubleMarshalTo(v float64, data []byte) (int, error) { + pv := &DoubleValue{Value: v} + return pv.MarshalTo(data) +} + +func StdDoubleUnmarshal(v *float64, data []byte) error { + pv := &DoubleValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdFloat(r randyWrappers, easy bool) *float32 { + v := NewPopulatedFloatValue(r, easy) + return &v.Value +} + +func SizeOfStdFloat(v float32) int { + pv := &FloatValue{Value: v} + return pv.Size() +} + +func StdFloatMarshal(v float32) ([]byte, error) { + size := SizeOfStdFloat(v) + buf := make([]byte, size) + _, err := StdFloatMarshalTo(v, buf) + return buf, err +} + +func StdFloatMarshalTo(v float32, data []byte) (int, error) { + pv := &FloatValue{Value: v} + return pv.MarshalTo(data) +} + +func StdFloatUnmarshal(v *float32, data []byte) error { + pv := &FloatValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdInt64(r randyWrappers, easy bool) *int64 { + v := NewPopulatedInt64Value(r, easy) + return &v.Value +} + +func SizeOfStdInt64(v int64) int { + pv := &Int64Value{Value: v} + return pv.Size() +} + +func StdInt64Marshal(v int64) ([]byte, error) { + size := SizeOfStdInt64(v) + buf := make([]byte, size) + _, err := StdInt64MarshalTo(v, buf) + return buf, err +} + +func StdInt64MarshalTo(v int64, data []byte) (int, error) { + pv := &Int64Value{Value: v} + return pv.MarshalTo(data) +} + +func StdInt64Unmarshal(v *int64, data []byte) error { + pv := &Int64Value{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdUInt64(r randyWrappers, easy bool) *uint64 { + v := NewPopulatedUInt64Value(r, easy) + return &v.Value +} + +func SizeOfStdUInt64(v uint64) int { + pv := &UInt64Value{Value: v} + return pv.Size() +} + +func StdUInt64Marshal(v uint64) ([]byte, error) { + size := SizeOfStdUInt64(v) + buf := make([]byte, size) + _, err := StdUInt64MarshalTo(v, buf) + return buf, err +} + +func StdUInt64MarshalTo(v uint64, data []byte) (int, error) { + pv := &UInt64Value{Value: v} + return pv.MarshalTo(data) +} + +func StdUInt64Unmarshal(v *uint64, data []byte) error { + pv := &UInt64Value{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdInt32(r randyWrappers, easy bool) *int32 { + v := NewPopulatedInt32Value(r, easy) + return &v.Value +} + +func SizeOfStdInt32(v int32) int { + pv := &Int32Value{Value: v} + return pv.Size() +} + +func StdInt32Marshal(v int32) ([]byte, error) { + size := SizeOfStdInt32(v) + buf := make([]byte, size) + _, err := StdInt32MarshalTo(v, buf) + return buf, err +} + +func StdInt32MarshalTo(v int32, data []byte) (int, error) { + pv := &Int32Value{Value: v} + return pv.MarshalTo(data) +} + +func StdInt32Unmarshal(v *int32, data []byte) error { + pv := &Int32Value{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdUInt32(r randyWrappers, easy bool) *uint32 { + v := NewPopulatedUInt32Value(r, easy) + return &v.Value +} + +func SizeOfStdUInt32(v uint32) int { + pv := &UInt32Value{Value: v} + return pv.Size() +} + +func StdUInt32Marshal(v uint32) ([]byte, error) { + size := SizeOfStdUInt32(v) + buf := make([]byte, size) + _, err := StdUInt32MarshalTo(v, buf) + return buf, err +} + +func StdUInt32MarshalTo(v uint32, data []byte) (int, error) { + pv := &UInt32Value{Value: v} + return pv.MarshalTo(data) +} + +func StdUInt32Unmarshal(v *uint32, data []byte) error { + pv := &UInt32Value{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdBool(r randyWrappers, easy bool) *bool { + v := NewPopulatedBoolValue(r, easy) + return &v.Value +} + +func SizeOfStdBool(v bool) int { + pv := &BoolValue{Value: v} + return pv.Size() +} + +func StdBoolMarshal(v bool) ([]byte, error) { + size := SizeOfStdBool(v) + buf := make([]byte, size) + _, err := StdBoolMarshalTo(v, buf) + return buf, err +} + +func StdBoolMarshalTo(v bool, data []byte) (int, error) { + pv := &BoolValue{Value: v} + return pv.MarshalTo(data) +} + +func StdBoolUnmarshal(v *bool, data []byte) error { + pv := &BoolValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdString(r randyWrappers, easy bool) *string { + v := NewPopulatedStringValue(r, easy) + return &v.Value +} + +func SizeOfStdString(v string) int { + pv := &StringValue{Value: v} + return pv.Size() +} + +func StdStringMarshal(v string) ([]byte, error) { + size := SizeOfStdString(v) + buf := make([]byte, size) + _, err := StdStringMarshalTo(v, buf) + return buf, err +} + +func StdStringMarshalTo(v string, data []byte) (int, error) { + pv := &StringValue{Value: v} + return pv.MarshalTo(data) +} + +func StdStringUnmarshal(v *string, data []byte) error { + pv := &StringValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} +func NewPopulatedStdBytes(r randyWrappers, easy bool) *[]byte { + v := NewPopulatedBytesValue(r, easy) + return &v.Value +} + +func SizeOfStdBytes(v []byte) int { + pv := &BytesValue{Value: v} + return pv.Size() +} + +func StdBytesMarshal(v []byte) ([]byte, error) { + size := SizeOfStdBytes(v) + buf := make([]byte, size) + _, err := StdBytesMarshalTo(v, buf) + return buf, err +} + +func StdBytesMarshalTo(v []byte, data []byte) (int, error) { + pv := &BytesValue{Value: v} + return pv.MarshalTo(data) +} + +func StdBytesUnmarshal(v *[]byte, data []byte) error { + pv := &BytesValue{} + if err := pv.Unmarshal(data); err != nil { + return err + } + *v = pv.Value + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/golang/glog/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/golang/glog/LICENSE new file mode 100644 index 00000000..37ec93a1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/golang/glog/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/golang/glog/README b/vendor/github.com/elastic/beats/vendor/github.com/golang/glog/README new file mode 100644 index 00000000..387b4eb6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/golang/glog/README @@ -0,0 +1,44 @@ +glog +==== + +Leveled execution logs for Go. + +This is an efficient pure Go implementation of leveled logs in the +manner of the open source C++ package + https://github.com/google/glog + +By binding methods to booleans it is possible to use the log package +without paying the expense of evaluating the arguments to the log. +Through the -vmodule flag, the package also provides fine-grained +control over logging at the file level. + +The comment from glog.go introduces the ideas: + + Package glog implements logging analogous to the Google-internal + C++ INFO/ERROR/V setup. It provides functions Info, Warning, + Error, Fatal, plus formatting variants such as Infof. It + also provides V-style logging controlled by the -v and + -vmodule=file=2 flags. + + Basic examples: + + glog.Info("Prepare to repel boarders") + + glog.Fatalf("Initialization failed: %s", err) + + See the documentation for the V function for an explanation + of these examples: + + if glog.V(2) { + glog.Info("Starting transaction...") + } + + glog.V(2).Infoln("Processed", nItems, "elements") + + +The repository contains an open source version of the log package +used inside Google. The master copy of the source lives inside +Google, not here. The code in this repo is for export only and is not itself +under development. Feature requests will be ignored. + +Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/golang/glog/glog.go b/vendor/github.com/elastic/beats/vendor/github.com/golang/glog/glog.go new file mode 100644 index 00000000..54bd7afd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/golang/glog/glog.go @@ -0,0 +1,1180 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. +// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as +// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. +// +// Basic examples: +// +// glog.Info("Prepare to repel boarders") +// +// glog.Fatalf("Initialization failed: %s", err) +// +// See the documentation for the V function for an explanation of these examples: +// +// if glog.V(2) { +// glog.Info("Starting transaction...") +// } +// +// glog.V(2).Infoln("Processed", nItems, "elements") +// +// Log output is buffered and written periodically using Flush. Programs +// should call Flush before exiting to guarantee all log output is written. +// +// By default, all log statements write to files in a temporary directory. +// This package provides several flags that modify this behavior. +// As a result, flag.Parse must be called before any logging is done. +// +// -logtostderr=false +// Logs are written to standard error instead of to files. +// -alsologtostderr=false +// Logs are written to standard error as well as to files. +// -stderrthreshold=ERROR +// Log events at or above this severity are logged to standard +// error as well as to files. +// -log_dir="" +// Log files will be written to this directory instead of the +// default temporary directory. +// +// Other flags provide aids to debugging. +// +// -log_backtrace_at="" +// When set to a file and line number holding a logging statement, +// such as +// -log_backtrace_at=gopherflakes.go:234 +// a stack trace will be written to the Info log whenever execution +// hits that statement. (Unlike with -vmodule, the ".go" must be +// present.) +// -v=0 +// Enable V-leveled logging at the specified level. +// -vmodule="" +// The syntax of the argument is a comma-separated list of pattern=N, +// where pattern is a literal file name (minus the ".go" suffix) or +// "glob" pattern and N is a V level. For instance, +// -vmodule=gopher*=3 +// sets the V level to 3 in all Go files whose names begin "gopher". +// +package glog + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "io" + stdLog "log" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// severity identifies the sort of log: info, warning etc. It also implements +// the flag.Value interface. The -stderrthreshold flag is of type severity and +// should be modified only through the flag.Value interface. The values match +// the corresponding constants in C++. +type severity int32 // sync/atomic int32 + +// These constants identify the log levels in order of increasing severity. +// A message written to a high-severity log file is also written to each +// lower-severity log file. +const ( + infoLog severity = iota + warningLog + errorLog + fatalLog + numSeverity = 4 +) + +const severityChar = "IWEF" + +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// get returns the value of the severity. +func (s *severity) get() severity { + return severity(atomic.LoadInt32((*int32)(s))) +} + +// set sets the value of the severity. +func (s *severity) set(val severity) { + atomic.StoreInt32((*int32)(s), int32(val)) +} + +// String is part of the flag.Value interface. +func (s *severity) String() string { + return strconv.FormatInt(int64(*s), 10) +} + +// Get is part of the flag.Value interface. +func (s *severity) Get() interface{} { + return *s +} + +// Set is part of the flag.Value interface. +func (s *severity) Set(value string) error { + var threshold severity + // Is it a known name? + if v, ok := severityByName(value); ok { + threshold = v + } else { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + threshold = severity(v) + } + logging.stderrThreshold.set(threshold) + return nil +} + +func severityByName(s string) (severity, bool) { + s = strings.ToUpper(s) + for i, name := range severityName { + if name == s { + return severity(i), true + } + } + return 0, false +} + +// OutputStats tracks the number of output lines and bytes written. +type OutputStats struct { + lines int64 + bytes int64 +} + +// Lines returns the number of lines written. +func (s *OutputStats) Lines() int64 { + return atomic.LoadInt64(&s.lines) +} + +// Bytes returns the number of bytes written. +func (s *OutputStats) Bytes() int64 { + return atomic.LoadInt64(&s.bytes) +} + +// Stats tracks the number of lines of output and number of bytes +// per severity level. Values must be read with atomic.LoadInt64. +var Stats struct { + Info, Warning, Error OutputStats +} + +var severityStats = [numSeverity]*OutputStats{ + infoLog: &Stats.Info, + warningLog: &Stats.Warning, + errorLog: &Stats.Error, +} + +// Level is exported because it appears in the arguments to V and is +// the type of the v flag, which can be set programmatically. +// It's a distinct type because we want to discriminate it from logType. +// Variables of type level are only changed under logging.mu. +// The -v flag is read only with atomic ops, so the state of the logging +// module is consistent. + +// Level is treated as a sync/atomic int32. + +// Level specifies a level of verbosity for V logs. *Level implements +// flag.Value; the -v flag is of type Level and should be modified +// only through the flag.Value interface. +type Level int32 + +// get returns the value of the Level. +func (l *Level) get() Level { + return Level(atomic.LoadInt32((*int32)(l))) +} + +// set sets the value of the Level. +func (l *Level) set(val Level) { + atomic.StoreInt32((*int32)(l), int32(val)) +} + +// String is part of the flag.Value interface. +func (l *Level) String() string { + return strconv.FormatInt(int64(*l), 10) +} + +// Get is part of the flag.Value interface. +func (l *Level) Get() interface{} { + return *l +} + +// Set is part of the flag.Value interface. +func (l *Level) Set(value string) error { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(Level(v), logging.vmodule.filter, false) + return nil +} + +// moduleSpec represents the setting of the -vmodule flag. +type moduleSpec struct { + filter []modulePat +} + +// modulePat contains a filter for the -vmodule flag. +// It holds a verbosity level and a file pattern to match. +type modulePat struct { + pattern string + literal bool // The pattern is a literal string + level Level +} + +// match reports whether the file matches the pattern. It uses a string +// comparison if the pattern contains no metacharacters. +func (m *modulePat) match(file string) bool { + if m.literal { + return file == m.pattern + } + match, _ := filepath.Match(m.pattern, file) + return match +} + +func (m *moduleSpec) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + var b bytes.Buffer + for i, f := range m.filter { + if i > 0 { + b.WriteRune(',') + } + fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) + } + return b.String() +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported. +func (m *moduleSpec) Get() interface{} { + return nil +} + +var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") + +// Syntax: -vmodule=recordio=2,file=1,gfs*=3 +func (m *moduleSpec) Set(value string) error { + var filter []modulePat + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return errVmoduleSyntax + } + pattern := patLev[0] + v, err := strconv.Atoi(patLev[1]) + if err != nil { + return errors.New("syntax error: expect comma-separated list of filename=N") + } + if v < 0 { + return errors.New("negative value for vmodule level") + } + if v == 0 { + continue // Ignore. It's harmless but no point in paying the overhead. + } + // TODO: check syntax of filter? + filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(logging.verbosity, filter, true) + return nil +} + +// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters +// that require filepath.Match to be called to match the pattern. +func isLiteral(pattern string) bool { + return !strings.ContainsAny(pattern, `\*?[]`) +} + +// traceLocation represents the setting of the -log_backtrace_at flag. +type traceLocation struct { + file string + line int +} + +// isSet reports whether the trace location has been specified. +// logging.mu is held. +func (t *traceLocation) isSet() bool { + return t.line > 0 +} + +// match reports whether the specified file and line matches the trace location. +// The argument file name is the full path, not the basename specified in the flag. +// logging.mu is held. +func (t *traceLocation) match(file string, line int) bool { + if t.line != line { + return false + } + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + return t.file == file +} + +func (t *traceLocation) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + return fmt.Sprintf("%s:%d", t.file, t.line) +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported +func (t *traceLocation) Get() interface{} { + return nil +} + +var errTraceSyntax = errors.New("syntax error: expect file.go:234") + +// Syntax: -log_backtrace_at=gopherflakes.go:234 +// Note that unlike vmodule the file extension is included here. +func (t *traceLocation) Set(value string) error { + if value == "" { + // Unset. + t.line = 0 + t.file = "" + } + fields := strings.Split(value, ":") + if len(fields) != 2 { + return errTraceSyntax + } + file, line := fields[0], fields[1] + if !strings.Contains(file, ".") { + return errTraceSyntax + } + v, err := strconv.Atoi(line) + if err != nil { + return errTraceSyntax + } + if v <= 0 { + return errors.New("negative or zero value for level") + } + logging.mu.Lock() + defer logging.mu.Unlock() + t.line = v + t.file = file + return nil +} + +// flushSyncWriter is the interface satisfied by logging destinations. +type flushSyncWriter interface { + Flush() error + Sync() error + io.Writer +} + +func init() { + flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") + flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") + flag.Var(&logging.verbosity, "v", "log level for V logs") + flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") + flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") + + // Default stderrThreshold is ERROR. + logging.stderrThreshold = errorLog + + logging.setVState(0, nil, false) + go logging.flushDaemon() +} + +// Flush flushes all pending log I/O. +func Flush() { + logging.lockAndFlushAll() +} + +// loggingT collects all the global state of the logging setup. +type loggingT struct { + // Boolean flags. Not handled atomically because the flag.Value interface + // does not let us avoid the =true, and that shorthand is necessary for + // compatibility. TODO: does this matter enough to fix? Seems unlikely. + toStderr bool // The -logtostderr flag. + alsoToStderr bool // The -alsologtostderr flag. + + // Level flag. Handled atomically. + stderrThreshold severity // The -stderrthreshold flag. + + // freeList is a list of byte buffers, maintained under freeListMu. + freeList *buffer + // freeListMu maintains the free list. It is separate from the main mutex + // so buffers can be grabbed and printed to without holding the main lock, + // for better parallelization. + freeListMu sync.Mutex + + // mu protects the remaining elements of this structure and is + // used to synchronize logging. + mu sync.Mutex + // file holds writer for each of the log types. + file [numSeverity]flushSyncWriter + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level + // filterLength stores the length of the vmodule filter chain. If greater + // than zero, it means vmodule is enabled. It may be read safely + // using sync.LoadInt32, but is only modified under mu. + filterLength int32 + // traceLocation is the state of the -log_backtrace_at flag. + traceLocation traceLocation + // These flags are modified only under lock, although verbosity may be fetched + // safely using atomic.LoadInt32. + vmodule moduleSpec // The state of the -vmodule flag. + verbosity Level // V logging level, the value of the -v flag/ +} + +// buffer holds a byte Buffer for reuse. The zero value is ready for use. +type buffer struct { + bytes.Buffer + tmp [64]byte // temporary byte array for creating headers. + next *buffer +} + +var logging loggingT + +// setVState sets a consistent state for V logging. +// l.mu is held. +func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { + // Turn verbosity off so V will not fire while we are in transition. + logging.verbosity.set(0) + // Ditto for filter length. + atomic.StoreInt32(&logging.filterLength, 0) + + // Set the new filters and wipe the pc->Level map if the filter has changed. + if setFilter { + logging.vmodule.filter = filter + logging.vmap = make(map[uintptr]Level) + } + + // Things are consistent now, so enable filtering and verbosity. + // They are enabled in order opposite to that in V. + atomic.StoreInt32(&logging.filterLength, int32(len(filter))) + logging.verbosity.set(verbosity) +} + +// getBuffer returns a new, ready-to-use buffer. +func (l *loggingT) getBuffer() *buffer { + l.freeListMu.Lock() + b := l.freeList + if b != nil { + l.freeList = b.next + } + l.freeListMu.Unlock() + if b == nil { + b = new(buffer) + } else { + b.next = nil + b.Reset() + } + return b +} + +// putBuffer returns a buffer to the free list. +func (l *loggingT) putBuffer(b *buffer) { + if b.Len() >= 256 { + // Let big buffers die a natural death. + return + } + l.freeListMu.Lock() + b.next = l.freeList + l.freeList = b + l.freeListMu.Unlock() +} + +var timeNow = time.Now // Stubbed out for testing. + +/* +header formats a log header as defined by the C++ implementation. +It returns a buffer containing the formatted header and the user's file and line number. +The depth specifies how many stack frames above lives the source line to be identified in the log message. + +Log lines have this form: + Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... +where the fields are defined as follows: + L A single character, representing the log level (eg 'I' for INFO) + mm The month (zero padded; ie May is '05') + dd The day (zero padded) + hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds + threadid The space-padded thread ID as returned by GetTID() + file The file name + line The line number + msg The user-supplied message +*/ +func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { + _, file, line, ok := runtime.Caller(3 + depth) + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + return l.formatHeader(s, file, line), file, line +} + +// formatHeader formats a log header using the provided file name and line number. +func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { + now := timeNow() + if line < 0 { + line = 0 // not a real line number, but acceptable to someDigits + } + if s > fatalLog { + s = infoLog // for safety. + } + buf := l.getBuffer() + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + _, month, day := now.Date() + hour, minute, second := now.Clock() + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + buf.tmp[0] = severityChar[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.tmp[8] = ':' + buf.twoDigits(9, minute) + buf.tmp[11] = ':' + buf.twoDigits(12, second) + buf.tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000, '0') + buf.tmp[21] = ' ' + buf.nDigits(7, 22, pid, ' ') // TODO: should be TID + buf.tmp[29] = ' ' + buf.Write(buf.tmp[:30]) + buf.WriteString(file) + buf.tmp[0] = ':' + n := buf.someDigits(1, line) + buf.tmp[n+1] = ']' + buf.tmp[n+2] = ' ' + buf.Write(buf.tmp[:n+3]) + return buf +} + +// Some custom tiny helper functions to print the log header efficiently. + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. +func (buf *buffer) twoDigits(i, d int) { + buf.tmp[i+1] = digits[d%10] + d /= 10 + buf.tmp[i] = digits[d%10] +} + +// nDigits formats an n-digit integer at buf.tmp[i], +// padding with pad on the left. +// It assumes d >= 0. +func (buf *buffer) nDigits(n, i, d int, pad byte) { + j := n - 1 + for ; j >= 0 && d > 0; j-- { + buf.tmp[i+j] = digits[d%10] + d /= 10 + } + for ; j >= 0; j-- { + buf.tmp[i+j] = pad + } +} + +// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. +func (buf *buffer) someDigits(i, d int) int { + // Print into the top, then copy down. We know there's space for at least + // a 10-digit number. + j := len(buf.tmp) + for { + j-- + buf.tmp[j] = digits[d%10] + d /= 10 + if d == 0 { + break + } + } + return copy(buf.tmp[i:], buf.tmp[j:]) +} + +func (l *loggingT) println(s severity, args ...interface{}) { + buf, file, line := l.header(s, 0) + fmt.Fprintln(buf, args...) + l.output(s, buf, file, line, false) +} + +func (l *loggingT) print(s severity, args ...interface{}) { + l.printDepth(s, 1, args...) +} + +func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { + buf, file, line := l.header(s, depth) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, false) +} + +func (l *loggingT) printf(s severity, format string, args ...interface{}) { + buf, file, line := l.header(s, 0) + fmt.Fprintf(buf, format, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, false) +} + +// printWithFileLine behaves like print but uses the provided file and line number. If +// alsoLogToStderr is true, the log message always appears on standard error; it +// will also appear in the log file unless --logtostderr is set. +func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { + buf := l.formatHeader(s, file, line) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, alsoToStderr) +} + +// output writes the data to the log files and releases the buffer. +func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { + l.mu.Lock() + if l.traceLocation.isSet() { + if l.traceLocation.match(file, line) { + buf.Write(stacks(false)) + } + } + data := buf.Bytes() + if !flag.Parsed() { + os.Stderr.Write([]byte("ERROR: logging before flag.Parse: ")) + os.Stderr.Write(data) + } else if l.toStderr { + os.Stderr.Write(data) + } else { + if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { + os.Stderr.Write(data) + } + if l.file[s] == nil { + if err := l.createFiles(s); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) + } + } + if s == fatalLog { + // If we got here via Exit rather than Fatal, print no stacks. + if atomic.LoadUint32(&fatalNoStacks) > 0 { + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(1) + } + // Dump all goroutine stacks before exiting. + // First, make sure we see the trace for the current goroutine on standard error. + // If -logtostderr has been specified, the loop below will do that anyway + // as the first stack in the full dump. + if !l.toStderr { + os.Stderr.Write(stacks(false)) + } + // Write the stack trace for all goroutines to the files. + trace := stacks(true) + logExitFunc = func(error) {} // If we get a write error, we'll still exit below. + for log := fatalLog; log >= infoLog; log-- { + if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. + f.Write(trace) + } + } + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. + } + l.putBuffer(buf) + l.mu.Unlock() + if stats := severityStats[s]; stats != nil { + atomic.AddInt64(&stats.lines, 1) + atomic.AddInt64(&stats.bytes, int64(len(data))) + } +} + +// timeoutFlush calls Flush and returns when it completes or after timeout +// elapses, whichever happens first. This is needed because the hooks invoked +// by Flush may deadlock when glog.Fatal is called from a hook that holds +// a lock. +func timeoutFlush(timeout time.Duration) { + done := make(chan bool, 1) + go func() { + Flush() // calls logging.lockAndFlushAll() + done <- true + }() + select { + case <-done: + case <-time.After(timeout): + fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) + } +} + +// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. +func stacks(all bool) []byte { + // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. + n := 10000 + if all { + n = 100000 + } + var trace []byte + for i := 0; i < 5; i++ { + trace = make([]byte, n) + nbytes := runtime.Stack(trace, all) + if nbytes < len(trace) { + return trace[:nbytes] + } + n *= 2 + } + return trace +} + +// logExitFunc provides a simple mechanism to override the default behavior +// of exiting on error. Used in testing and to guarantee we reach a required exit +// for fatal logs. Instead, exit could be a function rather than a method but that +// would make its use clumsier. +var logExitFunc func(error) + +// exit is called if there is trouble creating or writing log files. +// It flushes the logs and exits the program; there's no point in hanging around. +// l.mu is held. +func (l *loggingT) exit(err error) { + fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) + // If logExitFunc is set, we do that instead of exiting. + if logExitFunc != nil { + logExitFunc(err) + return + } + l.flushAll() + os.Exit(2) +} + +// syncBuffer joins a bufio.Writer to its underlying file, providing access to the +// file's Sync method and providing a wrapper for the Write method that provides log +// file rotation. There are conflicting methods, so the file cannot be embedded. +// l.mu is held for all its methods. +type syncBuffer struct { + logger *loggingT + *bufio.Writer + file *os.File + sev severity + nbytes uint64 // The number of bytes written to this file +} + +func (sb *syncBuffer) Sync() error { + return sb.file.Sync() +} + +func (sb *syncBuffer) Write(p []byte) (n int, err error) { + if sb.nbytes+uint64(len(p)) >= MaxSize { + if err := sb.rotateFile(time.Now()); err != nil { + sb.logger.exit(err) + } + } + n, err = sb.Writer.Write(p) + sb.nbytes += uint64(n) + if err != nil { + sb.logger.exit(err) + } + return +} + +// rotateFile closes the syncBuffer's file and starts a new one. +func (sb *syncBuffer) rotateFile(now time.Time) error { + if sb.file != nil { + sb.Flush() + sb.file.Close() + } + var err error + sb.file, _, err = create(severityName[sb.sev], now) + sb.nbytes = 0 + if err != nil { + return err + } + + sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) + + // Write header. + var buf bytes.Buffer + fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) + fmt.Fprintf(&buf, "Running on machine: %s\n", host) + fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) + fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") + n, err := sb.file.Write(buf.Bytes()) + sb.nbytes += uint64(n) + return err +} + +// bufferSize sizes the buffer associated with each log file. It's large +// so that log records can accumulate without the logging thread blocking +// on disk I/O. The flushDaemon will block instead. +const bufferSize = 256 * 1024 + +// createFiles creates all the log files for severity from sev down to infoLog. +// l.mu is held. +func (l *loggingT) createFiles(sev severity) error { + now := time.Now() + // Files are created in decreasing severity order, so as soon as we find one + // has already been created, we can stop. + for s := sev; s >= infoLog && l.file[s] == nil; s-- { + sb := &syncBuffer{ + logger: l, + sev: s, + } + if err := sb.rotateFile(now); err != nil { + return err + } + l.file[s] = sb + } + return nil +} + +const flushInterval = 30 * time.Second + +// flushDaemon periodically flushes the log file buffers. +func (l *loggingT) flushDaemon() { + for _ = range time.NewTicker(flushInterval).C { + l.lockAndFlushAll() + } +} + +// lockAndFlushAll is like flushAll but locks l.mu first. +func (l *loggingT) lockAndFlushAll() { + l.mu.Lock() + l.flushAll() + l.mu.Unlock() +} + +// flushAll flushes all the logs and attempts to "sync" their data to disk. +// l.mu is held. +func (l *loggingT) flushAll() { + // Flush from fatal down, in case there's trouble flushing. + for s := fatalLog; s >= infoLog; s-- { + file := l.file[s] + if file != nil { + file.Flush() // ignore error + file.Sync() // ignore error + } + } +} + +// CopyStandardLogTo arranges for messages written to the Go "log" package's +// default logs to also appear in the Google logs for the named and lower +// severities. Subsequent changes to the standard log's default output location +// or format may break this behavior. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, CopyStandardLogTo panics. +func CopyStandardLogTo(name string) { + sev, ok := severityByName(name) + if !ok { + panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) + } + // Set a log format that captures the user's file and line: + // d.go:23: message + stdLog.SetFlags(stdLog.Lshortfile) + stdLog.SetOutput(logBridge(sev)) +} + +// logBridge provides the Write method that enables CopyStandardLogTo to connect +// Go's standard logs to the logs provided by this package. +type logBridge severity + +// Write parses the standard logging line and passes its components to the +// logger for severity(lb). +func (lb logBridge) Write(b []byte) (n int, err error) { + var ( + file = "???" + line = 1 + text string + ) + // Split "d.go:23: message" into "d.go", "23", and "message". + if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { + text = fmt.Sprintf("bad log format: %s", b) + } else { + file = string(parts[0]) + text = string(parts[2][1:]) // skip leading space + line, err = strconv.Atoi(string(parts[1])) + if err != nil { + text = fmt.Sprintf("bad line number: %s", b) + line = 1 + } + } + // printWithFileLine with alsoToStderr=true, so standard log messages + // always appear on standard error. + logging.printWithFileLine(severity(lb), file, line, true, text) + return len(b), nil +} + +// setV computes and remembers the V level for a given PC +// when vmodule is enabled. +// File pattern matching takes the basename of the file, stripped +// of its .go suffix, and uses filepath.Match, which is a little more +// general than the *? matching used in C++. +// l.mu is held. +func (l *loggingT) setV(pc uintptr) Level { + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + // The file is something like /a/b/c/d.go. We want just the d. + if strings.HasSuffix(file, ".go") { + file = file[:len(file)-3] + } + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + for _, filter := range l.vmodule.filter { + if filter.match(file) { + l.vmap[pc] = filter.level + return filter.level + } + } + l.vmap[pc] = 0 + return 0 +} + +// Verbose is a boolean type that implements Infof (like Printf) etc. +// See the documentation of V for more information. +type Verbose bool + +// V reports whether verbosity at the call site is at least the requested level. +// The returned value is a boolean of type Verbose, which implements Info, Infoln +// and Infof. These methods will write to the Info log if called. +// Thus, one may write either +// if glog.V(2) { glog.Info("log this") } +// or +// glog.V(2).Info("log this") +// The second form is shorter but the first is cheaper if logging is off because it does +// not evaluate its arguments. +// +// Whether an individual call to V generates a log record depends on the setting of +// the -v and --vmodule flags; both are off by default. If the level in the call to +// V is at least the value of -v, or of -vmodule for the source file containing the +// call, the V call will log. +func V(level Level) Verbose { + // This function tries hard to be cheap unless there's work to do. + // The fast path is two atomic loads and compares. + + // Here is a cheap but safe test to see if V logging is enabled globally. + if logging.verbosity.get() >= level { + return Verbose(true) + } + + // It's off globally but it vmodule may still be set. + // Here is another cheap but safe test to see if vmodule is enabled. + if atomic.LoadInt32(&logging.filterLength) > 0 { + // Now we need a proper lock to use the logging structure. The pcs field + // is shared so we must lock before accessing it. This is fairly expensive, + // but if V logging is enabled we're slow anyway. + logging.mu.Lock() + defer logging.mu.Unlock() + if runtime.Callers(2, logging.pcs[:]) == 0 { + return Verbose(false) + } + v, ok := logging.vmap[logging.pcs[0]] + if !ok { + v = logging.setV(logging.pcs[0]) + } + return Verbose(v >= level) + } + return Verbose(false) +} + +// Info is equivalent to the global Info function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Info(args ...interface{}) { + if v { + logging.print(infoLog, args...) + } +} + +// Infoln is equivalent to the global Infoln function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infoln(args ...interface{}) { + if v { + logging.println(infoLog, args...) + } +} + +// Infof is equivalent to the global Infof function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infof(format string, args ...interface{}) { + if v { + logging.printf(infoLog, format, args...) + } +} + +// Info logs to the INFO log. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Info(args ...interface{}) { + logging.print(infoLog, args...) +} + +// InfoDepth acts as Info but uses depth to determine which call frame to log. +// InfoDepth(0, "msg") is the same as Info("msg"). +func InfoDepth(depth int, args ...interface{}) { + logging.printDepth(infoLog, depth, args...) +} + +// Infoln logs to the INFO log. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Infoln(args ...interface{}) { + logging.println(infoLog, args...) +} + +// Infof logs to the INFO log. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Infof(format string, args ...interface{}) { + logging.printf(infoLog, format, args...) +} + +// Warning logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Warning(args ...interface{}) { + logging.print(warningLog, args...) +} + +// WarningDepth acts as Warning but uses depth to determine which call frame to log. +// WarningDepth(0, "msg") is the same as Warning("msg"). +func WarningDepth(depth int, args ...interface{}) { + logging.printDepth(warningLog, depth, args...) +} + +// Warningln logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Warningln(args ...interface{}) { + logging.println(warningLog, args...) +} + +// Warningf logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Warningf(format string, args ...interface{}) { + logging.printf(warningLog, format, args...) +} + +// Error logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Error(args ...interface{}) { + logging.print(errorLog, args...) +} + +// ErrorDepth acts as Error but uses depth to determine which call frame to log. +// ErrorDepth(0, "msg") is the same as Error("msg"). +func ErrorDepth(depth int, args ...interface{}) { + logging.printDepth(errorLog, depth, args...) +} + +// Errorln logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Errorln(args ...interface{}) { + logging.println(errorLog, args...) +} + +// Errorf logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Errorf(format string, args ...interface{}) { + logging.printf(errorLog, format, args...) +} + +// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Fatal(args ...interface{}) { + logging.print(fatalLog, args...) +} + +// FatalDepth acts as Fatal but uses depth to determine which call frame to log. +// FatalDepth(0, "msg") is the same as Fatal("msg"). +func FatalDepth(depth int, args ...interface{}) { + logging.printDepth(fatalLog, depth, args...) +} + +// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Fatalln(args ...interface{}) { + logging.println(fatalLog, args...) +} + +// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Fatalf(format string, args ...interface{}) { + logging.printf(fatalLog, format, args...) +} + +// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. +// It allows Exit and relatives to use the Fatal logs. +var fatalNoStacks uint32 + +// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Exit(args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.print(fatalLog, args...) +} + +// ExitDepth acts as Exit but uses depth to determine which call frame to log. +// ExitDepth(0, "msg") is the same as Exit("msg"). +func ExitDepth(depth int, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printDepth(fatalLog, depth, args...) +} + +// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +func Exitln(args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.println(fatalLog, args...) +} + +// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Exitf(format string, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printf(fatalLog, format, args...) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/elastic/beats/vendor/github.com/golang/glog/glog_file.go new file mode 100644 index 00000000..65075d28 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/golang/glog/glog_file.go @@ -0,0 +1,124 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// File I/O for logs. + +package glog + +import ( + "errors" + "flag" + "fmt" + "os" + "os/user" + "path/filepath" + "strings" + "sync" + "time" +) + +// MaxSize is the maximum size of a log file in bytes. +var MaxSize uint64 = 1024 * 1024 * 1800 + +// logDirs lists the candidate directories for new log files. +var logDirs []string + +// If non-empty, overrides the choice of directory in which to write logs. +// See createLogDirs for the full list of possible destinations. +var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") + +func createLogDirs() { + if *logDir != "" { + logDirs = append(logDirs, *logDir) + } + logDirs = append(logDirs, os.TempDir()) +} + +var ( + pid = os.Getpid() + program = filepath.Base(os.Args[0]) + host = "unknownhost" + userName = "unknownuser" +) + +func init() { + h, err := os.Hostname() + if err == nil { + host = shortHostname(h) + } + + current, err := user.Current() + if err == nil { + userName = current.Username + } + + // Sanitize userName since it may contain filepath separators on Windows. + userName = strings.Replace(userName, `\`, "_", -1) +} + +// shortHostname returns its argument, truncating at the first period. +// For instance, given "www.google.com" it returns "www". +func shortHostname(hostname string) string { + if i := strings.Index(hostname, "."); i >= 0 { + return hostname[:i] + } + return hostname +} + +// logName returns a new log file name containing tag, with start time t, and +// the name for the symlink for tag. +func logName(tag string, t time.Time) (name, link string) { + name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", + program, + host, + userName, + tag, + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + t.Minute(), + t.Second(), + pid) + return name, program + "." + tag +} + +var onceLogDirs sync.Once + +// create creates a new log file and returns the file and its filename, which +// contains tag ("INFO", "FATAL", etc.) and t. If the file is created +// successfully, create also attempts to update the symlink for that tag, ignoring +// errors. +func create(tag string, t time.Time) (f *os.File, filename string, err error) { + onceLogDirs.Do(createLogDirs) + if len(logDirs) == 0 { + return nil, "", errors.New("log: no log dirs") + } + name, link := logName(tag, t) + var lastErr error + for _, dir := range logDirs { + fname := filepath.Join(dir, name) + f, err := os.Create(fname) + if err == nil { + symlink := filepath.Join(dir, link) + os.Remove(symlink) // ignore err + os.Symlink(name, symlink) // ignore err + return f, fname, nil + } + lastErr = err + } + return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go new file mode 100644 index 00000000..e9cc2025 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go @@ -0,0 +1,1284 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON. +It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json. + +This package produces a different output than the standard "encoding/json" package, +which does not operate correctly on protocol buffers. +*/ +package jsonpb + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + + stpb "github.com/golang/protobuf/ptypes/struct" +) + +const secondInNanos = int64(time.Second / time.Nanosecond) +const maxSecondsInDuration = 315576000000 + +// Marshaler is a configurable object for converting between +// protocol buffer objects and a JSON representation for them. +type Marshaler struct { + // Whether to render enum values as integers, as opposed to string values. + EnumsAsInts bool + + // Whether to render fields with zero values. + EmitDefaults bool + + // A string to indent each level by. The presence of this field will + // also cause a space to appear between the field separator and + // value, and for newlines to be appear between fields and array + // elements. + Indent string + + // Whether to use the original (.proto) name for fields. + OrigName bool + + // A custom URL resolver to use when marshaling Any messages to JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// AnyResolver takes a type URL, present in an Any message, and resolves it into +// an instance of the associated message. +type AnyResolver interface { + Resolve(typeUrl string) (proto.Message, error) +} + +func defaultResolveAny(typeUrl string) (proto.Message, error) { + // Only the part of typeUrl after the last slash is relevant. + mname := typeUrl + if slash := strings.LastIndex(mname, "/"); slash >= 0 { + mname = mname[slash+1:] + } + mt := proto.MessageType(mname) + if mt == nil { + return nil, fmt.Errorf("unknown message type %q", mname) + } + return reflect.New(mt.Elem()).Interface().(proto.Message), nil +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should +// also implement JSONPBUnmarshaler so that the custom format can be +// parsed. +// +// The JSON marshaling must follow the proto to JSON specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize +// the way they are unmarshaled from JSON. Messages that implement this +// should also implement JSONPBMarshaler so that the custom format can be +// produced. +// +// The JSON unmarshaling must follow the JSON to proto specification: +// https://developers.google.com/protocol-buffers/docs/proto3#json +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error +} + +// Marshal marshals a protocol buffer into JSON. +func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { + v := reflect.ValueOf(pb) + if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) { + return errors.New("Marshal called with nil") + } + // Check for unset required fields first. + if err := checkRequiredFields(pb); err != nil { + return err + } + writer := &errWriter{writer: out} + return m.marshalObject(writer, pb, "", "") +} + +// MarshalToString converts a protocol buffer object to JSON string. +func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { + var buf bytes.Buffer + if err := m.Marshal(&buf, pb); err != nil { + return "", err + } + return buf.String(), nil +} + +type int32Slice []int32 + +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(1), + `"-Infinity"`: math.Inf(-1), +} + +// For sorting extensions ids to ensure stable output. +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type wkt interface { + XXX_WellKnownType() string +} + +// marshalObject writes a struct to the Writer. +func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { + if jsm, ok := v.(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(m) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", v, err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if m.Indent != "" { + b, err = json.MarshalIndent(js, indent, m.Indent) + } else { + b, err = json.Marshal(js) + } + if err != nil { + return err + } + } + + out.write(string(b)) + return out.err + } + + s := reflect.ValueOf(v).Elem() + + // Handle well-known types. + if wkt, ok := v.(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + // "Wrappers use the same representation in JSON + // as the wrapped primitive type, ..." + sprop := proto.GetProperties(s.Type()) + return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent) + case "Any": + // Any is a bit more involved. + return m.marshalAny(out, v, indent) + case "Duration": + s, ns := s.Field(0).Int(), s.Field(1).Int() + if s < -maxSecondsInDuration || s > maxSecondsInDuration { + return fmt.Errorf("seconds out of range %v", s) + } + if ns <= -secondInNanos || ns >= secondInNanos { + return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos) + } + if (s > 0 && ns < 0) || (s < 0 && ns > 0) { + return errors.New("signs of seconds and nanos do not match") + } + // Generated output always contains 0, 3, 6, or 9 fractional digits, + // depending on required precision, followed by the suffix "s". + f := "%d.%09d" + if ns < 0 { + ns = -ns + if s == 0 { + f = "-%d.%09d" + } + } + x := fmt.Sprintf(f, s, ns) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + out.write(`"`) + out.write(x) + out.write(`s"`) + return out.err + case "Struct", "ListValue": + // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice. + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 0, 3, 6 or 9 fractional digits." + s, ns := s.Field(0).Int(), s.Field(1).Int() + if ns < 0 || ns >= secondInNanos { + return fmt.Errorf("ns out of range [0, %v)", secondInNanos) + } + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, ".000") + out.write(`"`) + out.write(x) + out.write(`Z"`) + return out.err + case "Value": + // Value has a single oneof. + kind := s.Field(0) + if kind.IsNil() { + // "absence of any variant indicates an error" + return errors.New("nil Value") + } + // oneof -> *T -> T -> T.F + x := kind.Elem().Elem().Field(0) + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, x, indent) + } + } + + out.write("{") + if m.Indent != "" { + out.write("\n") + } + + firstField := true + + if typeURL != "" { + if err := m.marshalTypeURL(out, indent, typeURL); err != nil { + return err + } + firstField = false + } + + for i := 0; i < s.NumField(); i++ { + value := s.Field(i) + valueField := s.Type().Field(i) + if strings.HasPrefix(valueField.Name, "XXX_") { + continue + } + + // IsNil will panic on most value kinds. + switch value.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface: + if value.IsNil() { + continue + } + } + + if !m.EmitDefaults { + switch value.Kind() { + case reflect.Bool: + if !value.Bool() { + continue + } + case reflect.Int32, reflect.Int64: + if value.Int() == 0 { + continue + } + case reflect.Uint32, reflect.Uint64: + if value.Uint() == 0 { + continue + } + case reflect.Float32, reflect.Float64: + if value.Float() == 0 { + continue + } + case reflect.String: + if value.Len() == 0 { + continue + } + case reflect.Map, reflect.Ptr, reflect.Slice: + if value.IsNil() { + continue + } + } + } + + // Oneof fields need special handling. + if valueField.Tag.Get("protobuf_oneof") != "" { + // value is an interface containing &T{real_value}. + sv := value.Elem().Elem() // interface -> *T -> T + value = sv.Field(0) + valueField = sv.Type().Field(0) + } + prop := jsonProperties(valueField, m.OrigName) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, prop, value, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if ep, ok := v.(proto.Message); ok { + extensions := proto.RegisteredExtensions(v) + // Sort extensions for stable output. + ids := make([]int32, 0, len(extensions)) + for id, desc := range extensions { + if !proto.HasExtension(ep, desc) { + continue + } + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + for _, id := range ids { + desc := extensions[id] + if desc == nil { + // unknown extension + continue + } + ext, extErr := proto.GetExtension(ep, desc) + if extErr != nil { + return extErr + } + value := reflect.ValueOf(ext) + var prop proto.Properties + prop.Parse(desc.Tag) + prop.JSONName = fmt.Sprintf("[%s]", desc.Name) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, &prop, value, indent); err != nil { + return err + } + firstField = false + } + + } + + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err +} + +func (m *Marshaler) writeSep(out *errWriter) { + if m.Indent != "" { + out.write(",\n") + } else { + out.write(",") + } +} + +func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + v := reflect.ValueOf(any).Elem() + turl := v.Field(0).String() + val := v.Field(1).Bytes() + + var msg proto.Message + var err error + if m.AnyResolver != nil { + msg, err = m.AnyResolver.Resolve(turl) + } else { + msg, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if err := proto.Unmarshal(val, msg); err != nil { + return err + } + + if _, ok := msg.(wkt); ok { + out.write("{") + if m.Indent != "" { + out.write("\n") + } + if err := m.marshalTypeURL(out, indent, turl); err != nil { + return err + } + m.writeSep(out) + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + out.write(`"value": `) + } else { + out.write(`"value":`) + } + if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil { + return err + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err + } + + return m.marshalObject(out, msg, indent, turl) +} + +func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"@type":`) + if m.Indent != "" { + out.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + out.write(string(b)) + return out.err +} + +// marshalField writes field description and value to the Writer. +func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"`) + out.write(prop.JSONName) + out.write(`":`) + if m.Indent != "" { + out.write(" ") + } + if err := m.marshalValue(out, prop, v, indent); err != nil { + return err + } + return nil +} + +// marshalValue writes the value to the Writer. +func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + var err error + v = reflect.Indirect(v) + + // Handle nil pointer + if v.Kind() == reflect.Invalid { + out.write("null") + return out.err + } + + // Handle repeated elements. + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { + out.write("[") + comma := "" + for i := 0; i < v.Len(); i++ { + sliceVal := v.Index(i) + out.write(comma) + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil { + return err + } + comma = "," + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write("]") + return out.err + } + + // Handle well-known types. + // Most are handled up in marshalObject (because 99% are messages). + if wkt, ok := v.Interface().(wkt); ok { + switch wkt.XXX_WellKnownType() { + case "NullValue": + out.write("null") + return out.err + } + } + + // Handle enumerations. + if !m.EnumsAsInts && prop.Enum != "" { + // Unknown enum values will are stringified by the proto library as their + // value. Such values should _not_ be quoted or they will be interpreted + // as an enum string instead of their value. + enumStr := v.Interface().(fmt.Stringer).String() + var valStr string + if v.Kind() == reflect.Ptr { + valStr = strconv.Itoa(int(v.Elem().Int())) + } else { + valStr = strconv.Itoa(int(v.Int())) + } + isKnownEnum := enumStr != valStr + if isKnownEnum { + out.write(`"`) + } + out.write(enumStr) + if isKnownEnum { + out.write(`"`) + } + return out.err + } + + // Handle nested messages. + if v.Kind() == reflect.Struct { + return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "") + } + + // Handle maps. + // Since Go randomizes map iteration, we sort keys for stable output. + if v.Kind() == reflect.Map { + out.write(`{`) + keys := v.MapKeys() + sort.Sort(mapKeys(keys)) + for i, k := range keys { + if i > 0 { + out.write(`,`) + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + + // TODO handle map key prop properly + b, err := json.Marshal(k.Interface()) + if err != nil { + return err + } + s := string(b) + + // If the JSON is not a string value, encode it again to make it one. + if !strings.HasPrefix(s, `"`) { + b, err := json.Marshal(s) + if err != nil { + return err + } + s = string(b) + } + + out.write(s) + out.write(`:`) + if m.Indent != "" { + out.write(` `) + } + + vprop := prop + if prop != nil && prop.MapValProp != nil { + vprop = prop.MapValProp + } + if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil { + return err + } + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write(`}`) + return out.err + } + + // Handle non-finite floats, e.g. NaN, Infinity and -Infinity. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + f := v.Float() + var sval string + switch { + case math.IsInf(f, 1): + sval = `"Infinity"` + case math.IsInf(f, -1): + sval = `"-Infinity"` + case math.IsNaN(f): + sval = `"NaN"` + } + if sval != "" { + out.write(sval) + return out.err + } + } + + // Default handling defers to the encoding/json library. + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64) + if needToQuote { + out.write(`"`) + } + out.write(string(b)) + if needToQuote { + out.write(`"`) + } + return out.err +} + +// Unmarshaler is a configurable object for converting from a JSON +// representation to a protocol buffer object. +type Unmarshaler struct { + // Whether to allow messages to contain unknown fields, as opposed to + // failing to unmarshal. + AllowUnknownFields bool + + // A custom URL resolver to use when unmarshaling Any messages from JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + inputValue := json.RawMessage{} + if err := dec.Decode(&inputValue); err != nil { + return err + } + if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil { + return err + } + return checkRequiredFields(pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error { + dec := json.NewDecoder(r) + return u.UnmarshalNext(dec, pb) +} + +// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. +// This function is lenient and will decode any options permutations of the +// related Marshaler. +func UnmarshalNext(dec *json.Decoder, pb proto.Message) error { + return new(Unmarshaler).UnmarshalNext(dec, pb) +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func Unmarshal(r io.Reader, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(r, pb) +} + +// UnmarshalString will populate the fields of a protocol buffer based +// on a JSON string. This function is lenient and will decode any options +// permutations of the related Marshaler. +func UnmarshalString(str string, pb proto.Message) error { + return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb) +} + +// unmarshalValue converts/copies a value into the target. +// prop may be nil. +func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error { + targetType := target.Type() + + // Allocate memory for pointer fields. + if targetType.Kind() == reflect.Ptr { + // If input value is "null" and target is a pointer type, then the field should be treated as not set + // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue. + _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler) + if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler { + return nil + } + target.Set(reflect.New(targetType.Elem())) + + return u.unmarshalValue(target.Elem(), inputValue, prop) + } + + if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, []byte(inputValue)) + } + + // Handle well-known types that are not pointers. + if w, ok := target.Addr().Interface().(wkt); ok { + switch w.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + return u.unmarshalValue(target.Field(0), inputValue, prop) + case "Any": + // Use json.RawMessage pointer type instead of value to support pre-1.8 version. + // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see + // https://github.com/golang/go/issues/14493 + var jsonFields map[string]*json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + val, ok := jsonFields["@type"] + if !ok || val == nil { + return errors.New("Any JSON doesn't have '@type'") + } + + var turl string + if err := json.Unmarshal([]byte(*val), &turl); err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", *val) + } + target.Field(0).SetString(turl) + + var m proto.Message + var err error + if u.AnyResolver != nil { + m, err = u.AnyResolver.Resolve(turl) + } else { + m, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if _, ok := m.(wkt); ok { + val, ok := jsonFields["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + + if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } else { + delete(jsonFields, "@type") + nestedProto, err := json.Marshal(jsonFields) + if err != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err) + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } + + b, err := proto.Marshal(m) + if err != nil { + return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err) + } + target.Field(1).SetBytes(b) + + return nil + case "Duration": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + + d, err := time.ParseDuration(unq) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + target.Field(0).SetInt(s) + target.Field(1).SetInt(ns) + return nil + case "Timestamp": + unq, err := unquote(string(inputValue)) + if err != nil { + return err + } + + t, err := time.Parse(time.RFC3339Nano, unq) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + target.Field(0).SetInt(t.Unix()) + target.Field(1).SetInt(int64(t.Nanosecond())) + return nil + case "Struct": + var m map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &m); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{})) + for k, jv := range m { + pv := &stpb.Value{} + if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", k, err) + } + target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv)) + } + return nil + case "ListValue": + var s []json.RawMessage + if err := json.Unmarshal(inputValue, &s); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s)))) + for i, sv := range s { + if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil { + return err + } + } + return nil + case "Value": + ivStr := string(inputValue) + if ivStr == "null" { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{})) + } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v})) + } else if v, err := unquote(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v})) + } else if v, err := strconv.ParseBool(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v})) + } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil { + lv := &stpb.ListValue{} + target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv})) + return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop) + } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil { + sv := &stpb.Struct{} + target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv})) + return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop) + } else { + return fmt.Errorf("unrecognized type for Value %q", ivStr) + } + return nil + } + } + + // Handle enums, which have an underlying type of int32, + // and may appear as strings. + // The case of an enum appearing as a number is handled + // at the bottom of this function. + if inputValue[0] == '"' && prop != nil && prop.Enum != "" { + vmap := proto.EnumValueMap(prop.Enum) + // Don't need to do unquoting; valid enum names + // are from a limited character set. + s := inputValue[1 : len(inputValue)-1] + n, ok := vmap[string(s)] + if !ok { + return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum) + } + if target.Kind() == reflect.Ptr { // proto2 + target.Set(reflect.New(targetType.Elem())) + target = target.Elem() + } + if targetType.Kind() != reflect.Int32 { + return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum) + } + target.SetInt(int64(n)) + return nil + } + + // Handle nested messages. + if targetType.Kind() == reflect.Struct { + var jsonFields map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + consumeField := func(prop *proto.Properties) (json.RawMessage, bool) { + // Be liberal in what names we accept; both orig_name and camelName are okay. + fieldNames := acceptedJSONFieldNames(prop) + + vOrig, okOrig := jsonFields[fieldNames.orig] + vCamel, okCamel := jsonFields[fieldNames.camel] + if !okOrig && !okCamel { + return nil, false + } + // If, for some reason, both are present in the data, favour the camelName. + var raw json.RawMessage + if okOrig { + raw = vOrig + delete(jsonFields, fieldNames.orig) + } + if okCamel { + raw = vCamel + delete(jsonFields, fieldNames.camel) + } + return raw, true + } + + sprops := proto.GetProperties(targetType) + for i := 0; i < target.NumField(); i++ { + ft := target.Type().Field(i) + if strings.HasPrefix(ft.Name, "XXX_") { + continue + } + + valueForField, ok := consumeField(sprops.Prop[i]) + if !ok { + continue + } + + if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil { + return err + } + } + // Check for any oneof fields. + if len(jsonFields) > 0 { + for _, oop := range sprops.OneofTypes { + raw, ok := consumeField(oop.Prop) + if !ok { + continue + } + nv := reflect.New(oop.Type.Elem()) + target.Field(oop.Field).Set(nv) + if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil { + return err + } + } + } + // Handle proto2 extensions. + if len(jsonFields) > 0 { + if ep, ok := target.Addr().Interface().(proto.Message); ok { + for _, ext := range proto.RegisteredExtensions(ep) { + name := fmt.Sprintf("[%s]", ext.Name) + raw, ok := jsonFields[name] + if !ok { + continue + } + delete(jsonFields, name) + nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem()) + if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil { + return err + } + if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil { + return err + } + } + } + } + if !u.AllowUnknownFields && len(jsonFields) > 0 { + // Pick any field to be the scapegoat. + var f string + for fname := range jsonFields { + f = fname + break + } + return fmt.Errorf("unknown field %q in %v", f, targetType) + } + return nil + } + + // Handle arrays (which aren't encoded bytes) + if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 { + var slc []json.RawMessage + if err := json.Unmarshal(inputValue, &slc); err != nil { + return err + } + if slc != nil { + l := len(slc) + target.Set(reflect.MakeSlice(targetType, l, l)) + for i := 0; i < l; i++ { + if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { + return err + } + } + } + return nil + } + + // Handle maps (whose keys are always strings) + if targetType.Kind() == reflect.Map { + var mp map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &mp); err != nil { + return err + } + if mp != nil { + target.Set(reflect.MakeMap(targetType)) + for ks, raw := range mp { + // Unmarshal map key. The core json library already decoded the key into a + // string, so we handle that specially. Other types were quoted post-serialization. + var k reflect.Value + if targetType.Key().Kind() == reflect.String { + k = reflect.ValueOf(ks) + } else { + k = reflect.New(targetType.Key()).Elem() + var kprop *proto.Properties + if prop != nil && prop.MapKeyProp != nil { + kprop = prop.MapKeyProp + } + if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil { + return err + } + } + + // Unmarshal map value. + v := reflect.New(targetType.Elem()).Elem() + var vprop *proto.Properties + if prop != nil && prop.MapValProp != nil { + vprop = prop.MapValProp + } + if err := u.unmarshalValue(v, raw, vprop); err != nil { + return err + } + target.SetMapIndex(k, v) + } + } + return nil + } + + // Non-finite numbers can be encoded as strings. + isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isFloat { + if num, ok := nonFinite[string(inputValue)]; ok { + target.SetFloat(num) + return nil + } + } + + // integers & floats can be encoded as strings. In this case we drop + // the quotes and proceed as normal. + isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 || + targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 || + targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isNum && strings.HasPrefix(string(inputValue), `"`) { + inputValue = inputValue[1 : len(inputValue)-1] + } + + // Use the encoding/json for parsing other value types. + return json.Unmarshal(inputValue, target.Addr().Interface()) +} + +func unquote(s string) (string, error) { + var ret string + err := json.Unmarshal([]byte(s), &ret) + return ret, err +} + +// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute. +func jsonProperties(f reflect.StructField, origName bool) *proto.Properties { + var prop proto.Properties + prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) + if origName || prop.JSONName == "" { + prop.JSONName = prop.OrigName + } + return &prop +} + +type fieldNames struct { + orig, camel string +} + +func acceptedJSONFieldNames(prop *proto.Properties) fieldNames { + opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName} + if prop.JSONName != "" { + opts.camel = prop.JSONName + } + return opts +} + +// Writer wrapper inspired by https://blog.golang.org/errors-are-values +type errWriter struct { + writer io.Writer + err error +} + +func (w *errWriter) write(str string) { + if w.err != nil { + return + } + _, w.err = w.writer.Write([]byte(str)) +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. +// +// Numeric keys are sorted in numeric order per +// https://developers.google.com/protocol-buffers/docs/proto#maps. +type mapKeys []reflect.Value + +func (s mapKeys) Len() int { return len(s) } +func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s mapKeys) Less(i, j int) bool { + if k := s[i].Kind(); k == s[j].Kind() { + switch k { + case reflect.String: + return s[i].String() < s[j].String() + case reflect.Int32, reflect.Int64: + return s[i].Int() < s[j].Int() + case reflect.Uint32, reflect.Uint64: + return s[i].Uint() < s[j].Uint() + } + } + return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) +} + +// checkRequiredFields returns an error if any required field in the given proto message is not set. +// This function is used by both Marshal and Unmarshal. While required fields only exist in a +// proto2 message, a proto3 message can contain proto2 message(s). +func checkRequiredFields(pb proto.Message) error { + // Most well-known type messages do not contain required fields. The "Any" type may contain + // a message that has required fields. + // + // When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value + // field in order to transform that into JSON, and that should have returned an error if a + // required field is not set in the embedded message. + // + // When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the + // embedded message to store the serialized message in Any.Value field, and that should have + // returned an error if a required field is not set. + if _, ok := pb.(wkt); ok { + return nil + } + + v := reflect.ValueOf(pb) + // Skip message if it is not a struct pointer. + if v.Kind() != reflect.Ptr { + return nil + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return nil + } + + for i := 0; i < v.NumField(); i++ { + field := v.Field(i) + sfield := v.Type().Field(i) + + if sfield.PkgPath != "" { + // blank PkgPath means the field is exported; skip if not exported + continue + } + + if strings.HasPrefix(sfield.Name, "XXX_") { + continue + } + + // Oneof field is an interface implemented by wrapper structs containing the actual oneof + // field, i.e. an interface containing &T{real_value}. + if sfield.Tag.Get("protobuf_oneof") != "" { + if field.Kind() != reflect.Interface { + continue + } + v := field.Elem() + if v.Kind() != reflect.Ptr || v.IsNil() { + continue + } + v = v.Elem() + if v.Kind() != reflect.Struct || v.NumField() < 1 { + continue + } + field = v.Field(0) + sfield = v.Type().Field(0) + } + + protoTag := sfield.Tag.Get("protobuf") + if protoTag == "" { + continue + } + var prop proto.Properties + prop.Init(sfield.Type, sfield.Name, protoTag, &sfield) + + switch field.Kind() { + case reflect.Map: + if field.IsNil() { + continue + } + // Check each map value. + keys := field.MapKeys() + for _, k := range keys { + v := field.MapIndex(k) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Slice: + // Handle non-repeated type, e.g. bytes. + if !prop.Repeated { + if prop.Required && field.IsNil() { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + + // Handle repeated type. + if field.IsNil() { + continue + } + // Check each slice item. + for i := 0; i < field.Len(); i++ { + v := field.Index(i) + if err := checkRequiredFieldsInValue(v); err != nil { + return err + } + } + case reflect.Ptr: + if field.IsNil() { + if prop.Required { + return fmt.Errorf("required field %q is not set", prop.Name) + } + continue + } + if err := checkRequiredFieldsInValue(field); err != nil { + return err + } + } + } + + // Handle proto2 extensions. + for _, ext := range proto.RegisteredExtensions(pb) { + if !proto.HasExtension(pb, ext) { + continue + } + ep, err := proto.GetExtension(pb, ext) + if err != nil { + return err + } + err = checkRequiredFieldsInValue(reflect.ValueOf(ep)) + if err != nil { + return err + } + } + + return nil +} + +func checkRequiredFieldsInValue(v reflect.Value) error { + if pm, ok := v.Interface().(proto.Message); ok { + return checkRequiredFields(pm) + } + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go new file mode 100644 index 00000000..6f4a902b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go @@ -0,0 +1,2806 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* + The code generator for the plugin for the Google protocol buffer compiler. + It generates Go code from the protocol buffer description files read by the + main routine. +*/ +package generator + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/sha256" + "encoding/hex" + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/printer" + "go/token" + "log" + "os" + "path" + "sort" + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/protoc-gen-go/generator/internal/remap" + + "github.com/golang/protobuf/protoc-gen-go/descriptor" + plugin "github.com/golang/protobuf/protoc-gen-go/plugin" +) + +// generatedCodeVersion indicates a version of the generated code. +// It is incremented whenever an incompatibility between the generated code and +// proto package is introduced; the generated code references +// a constant, proto.ProtoPackageIsVersionN (where N is generatedCodeVersion). +const generatedCodeVersion = 3 + +// A Plugin provides functionality to add to the output during Go code generation, +// such as to produce RPC stubs. +type Plugin interface { + // Name identifies the plugin. + Name() string + // Init is called once after data structures are built but before + // code generation begins. + Init(g *Generator) + // Generate produces the code generated by the plugin for this file, + // except for the imports, by calling the generator's methods P, In, and Out. + Generate(file *FileDescriptor) + // GenerateImports produces the import declarations for this file. + // It is called after Generate. + GenerateImports(file *FileDescriptor) +} + +var plugins []Plugin + +// RegisterPlugin installs a (second-order) plugin to be run when the Go output is generated. +// It is typically called during initialization. +func RegisterPlugin(p Plugin) { + plugins = append(plugins, p) +} + +// A GoImportPath is the import path of a Go package. e.g., "google.golang.org/genproto/protobuf". +type GoImportPath string + +func (p GoImportPath) String() string { return strconv.Quote(string(p)) } + +// A GoPackageName is the name of a Go package. e.g., "protobuf". +type GoPackageName string + +// Each type we import as a protocol buffer (other than FileDescriptorProto) needs +// a pointer to the FileDescriptorProto that represents it. These types achieve that +// wrapping by placing each Proto inside a struct with the pointer to its File. The +// structs have the same names as their contents, with "Proto" removed. +// FileDescriptor is used to store the things that it points to. + +// The file and package name method are common to messages and enums. +type common struct { + file *FileDescriptor // File this object comes from. +} + +// GoImportPath is the import path of the Go package containing the type. +func (c *common) GoImportPath() GoImportPath { + return c.file.importPath +} + +func (c *common) File() *FileDescriptor { return c.file } + +func fileIsProto3(file *descriptor.FileDescriptorProto) bool { + return file.GetSyntax() == "proto3" +} + +func (c *common) proto3() bool { return fileIsProto3(c.file.FileDescriptorProto) } + +// Descriptor represents a protocol buffer message. +type Descriptor struct { + common + *descriptor.DescriptorProto + parent *Descriptor // The containing message, if any. + nested []*Descriptor // Inner messages, if any. + enums []*EnumDescriptor // Inner enums, if any. + ext []*ExtensionDescriptor // Extensions, if any. + typename []string // Cached typename vector. + index int // The index into the container, whether the file or another message. + path string // The SourceCodeInfo path as comma-separated integers. + group bool +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (d *Descriptor) TypeName() []string { + if d.typename != nil { + return d.typename + } + n := 0 + for parent := d; parent != nil; parent = parent.parent { + n++ + } + s := make([]string, n) + for parent := d; parent != nil; parent = parent.parent { + n-- + s[n] = parent.GetName() + } + d.typename = s + return s +} + +// EnumDescriptor describes an enum. If it's at top level, its parent will be nil. +// Otherwise it will be the descriptor of the message in which it is defined. +type EnumDescriptor struct { + common + *descriptor.EnumDescriptorProto + parent *Descriptor // The containing message, if any. + typename []string // Cached typename vector. + index int // The index into the container, whether the file or a message. + path string // The SourceCodeInfo path as comma-separated integers. +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (e *EnumDescriptor) TypeName() (s []string) { + if e.typename != nil { + return e.typename + } + name := e.GetName() + if e.parent == nil { + s = make([]string, 1) + } else { + pname := e.parent.TypeName() + s = make([]string, len(pname)+1) + copy(s, pname) + } + s[len(s)-1] = name + e.typename = s + return s +} + +// Everything but the last element of the full type name, CamelCased. +// The values of type Foo.Bar are call Foo_value1... not Foo_Bar_value1... . +func (e *EnumDescriptor) prefix() string { + if e.parent == nil { + // If the enum is not part of a message, the prefix is just the type name. + return CamelCase(*e.Name) + "_" + } + typeName := e.TypeName() + return CamelCaseSlice(typeName[0:len(typeName)-1]) + "_" +} + +// The integer value of the named constant in this enumerated type. +func (e *EnumDescriptor) integerValueAsString(name string) string { + for _, c := range e.Value { + if c.GetName() == name { + return fmt.Sprint(c.GetNumber()) + } + } + log.Fatal("cannot find value for enum constant") + return "" +} + +// ExtensionDescriptor describes an extension. If it's at top level, its parent will be nil. +// Otherwise it will be the descriptor of the message in which it is defined. +type ExtensionDescriptor struct { + common + *descriptor.FieldDescriptorProto + parent *Descriptor // The containing message, if any. +} + +// TypeName returns the elements of the dotted type name. +// The package name is not part of this name. +func (e *ExtensionDescriptor) TypeName() (s []string) { + name := e.GetName() + if e.parent == nil { + // top-level extension + s = make([]string, 1) + } else { + pname := e.parent.TypeName() + s = make([]string, len(pname)+1) + copy(s, pname) + } + s[len(s)-1] = name + return s +} + +// DescName returns the variable name used for the generated descriptor. +func (e *ExtensionDescriptor) DescName() string { + // The full type name. + typeName := e.TypeName() + // Each scope of the extension is individually CamelCased, and all are joined with "_" with an "E_" prefix. + for i, s := range typeName { + typeName[i] = CamelCase(s) + } + return "E_" + strings.Join(typeName, "_") +} + +// ImportedDescriptor describes a type that has been publicly imported from another file. +type ImportedDescriptor struct { + common + o Object +} + +func (id *ImportedDescriptor) TypeName() []string { return id.o.TypeName() } + +// FileDescriptor describes an protocol buffer descriptor file (.proto). +// It includes slices of all the messages and enums defined within it. +// Those slices are constructed by WrapTypes. +type FileDescriptor struct { + *descriptor.FileDescriptorProto + desc []*Descriptor // All the messages defined in this file. + enum []*EnumDescriptor // All the enums defined in this file. + ext []*ExtensionDescriptor // All the top-level extensions defined in this file. + imp []*ImportedDescriptor // All types defined in files publicly imported by this file. + + // Comments, stored as a map of path (comma-separated integers) to the comment. + comments map[string]*descriptor.SourceCodeInfo_Location + + // The full list of symbols that are exported, + // as a map from the exported object to its symbols. + // This is used for supporting public imports. + exported map[Object][]symbol + + importPath GoImportPath // Import path of this file's package. + packageName GoPackageName // Name of this file's Go package. + + proto3 bool // whether to generate proto3 code for this file +} + +// VarName is the variable name we'll use in the generated code to refer +// to the compressed bytes of this descriptor. It is not exported, so +// it is only valid inside the generated package. +func (d *FileDescriptor) VarName() string { + h := sha256.Sum256([]byte(d.GetName())) + return fmt.Sprintf("fileDescriptor_%s", hex.EncodeToString(h[:8])) +} + +// goPackageOption interprets the file's go_package option. +// If there is no go_package, it returns ("", "", false). +// If there's a simple name, it returns ("", pkg, true). +// If the option implies an import path, it returns (impPath, pkg, true). +func (d *FileDescriptor) goPackageOption() (impPath GoImportPath, pkg GoPackageName, ok bool) { + opt := d.GetOptions().GetGoPackage() + if opt == "" { + return "", "", false + } + // A semicolon-delimited suffix delimits the import path and package name. + sc := strings.Index(opt, ";") + if sc >= 0 { + return GoImportPath(opt[:sc]), cleanPackageName(opt[sc+1:]), true + } + // The presence of a slash implies there's an import path. + slash := strings.LastIndex(opt, "/") + if slash >= 0 { + return GoImportPath(opt), cleanPackageName(opt[slash+1:]), true + } + return "", cleanPackageName(opt), true +} + +// goFileName returns the output name for the generated Go file. +func (d *FileDescriptor) goFileName(pathType pathType) string { + name := *d.Name + if ext := path.Ext(name); ext == ".proto" || ext == ".protodevel" { + name = name[:len(name)-len(ext)] + } + name += ".pb.go" + + if pathType == pathTypeSourceRelative { + return name + } + + // Does the file have a "go_package" option? + // If it does, it may override the filename. + if impPath, _, ok := d.goPackageOption(); ok && impPath != "" { + // Replace the existing dirname with the declared import path. + _, name = path.Split(name) + name = path.Join(string(impPath), name) + return name + } + + return name +} + +func (d *FileDescriptor) addExport(obj Object, sym symbol) { + d.exported[obj] = append(d.exported[obj], sym) +} + +// symbol is an interface representing an exported Go symbol. +type symbol interface { + // GenerateAlias should generate an appropriate alias + // for the symbol from the named package. + GenerateAlias(g *Generator, filename string, pkg GoPackageName) +} + +type messageSymbol struct { + sym string + hasExtensions, isMessageSet bool + oneofTypes []string +} + +type getterSymbol struct { + name string + typ string + typeName string // canonical name in proto world; empty for proto.Message and similar + genType bool // whether typ contains a generated type (message/group/enum) +} + +func (ms *messageSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) { + g.P("// ", ms.sym, " from public import ", filename) + g.P("type ", ms.sym, " = ", pkg, ".", ms.sym) + for _, name := range ms.oneofTypes { + g.P("type ", name, " = ", pkg, ".", name) + } +} + +type enumSymbol struct { + name string + proto3 bool // Whether this came from a proto3 file. +} + +func (es enumSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) { + s := es.name + g.P("// ", s, " from public import ", filename) + g.P("type ", s, " = ", pkg, ".", s) + g.P("var ", s, "_name = ", pkg, ".", s, "_name") + g.P("var ", s, "_value = ", pkg, ".", s, "_value") +} + +type constOrVarSymbol struct { + sym string + typ string // either "const" or "var" + cast string // if non-empty, a type cast is required (used for enums) +} + +func (cs constOrVarSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) { + v := string(pkg) + "." + cs.sym + if cs.cast != "" { + v = cs.cast + "(" + v + ")" + } + g.P(cs.typ, " ", cs.sym, " = ", v) +} + +// Object is an interface abstracting the abilities shared by enums, messages, extensions and imported objects. +type Object interface { + GoImportPath() GoImportPath + TypeName() []string + File() *FileDescriptor +} + +// Generator is the type whose methods generate the output, stored in the associated response structure. +type Generator struct { + *bytes.Buffer + + Request *plugin.CodeGeneratorRequest // The input. + Response *plugin.CodeGeneratorResponse // The output. + + Param map[string]string // Command-line parameters. + PackageImportPath string // Go import path of the package we're generating code for + ImportPrefix string // String to prefix to imported package file names. + ImportMap map[string]string // Mapping from .proto file name to import path + + Pkg map[string]string // The names under which we import support packages + + outputImportPath GoImportPath // Package we're generating code for. + allFiles []*FileDescriptor // All files in the tree + allFilesByName map[string]*FileDescriptor // All files by filename. + genFiles []*FileDescriptor // Those files we will generate output for. + file *FileDescriptor // The file we are compiling now. + packageNames map[GoImportPath]GoPackageName // Imported package names in the current file. + usedPackages map[GoImportPath]bool // Packages used in current file. + usedPackageNames map[GoPackageName]bool // Package names used in the current file. + addedImports map[GoImportPath]bool // Additional imports to emit. + typeNameToObject map[string]Object // Key is a fully-qualified name in input syntax. + init []string // Lines to emit in the init function. + indent string + pathType pathType // How to generate output filenames. + writeOutput bool + annotateCode bool // whether to store annotations + annotations []*descriptor.GeneratedCodeInfo_Annotation // annotations to store +} + +type pathType int + +const ( + pathTypeImport pathType = iota + pathTypeSourceRelative +) + +// New creates a new generator and allocates the request and response protobufs. +func New() *Generator { + g := new(Generator) + g.Buffer = new(bytes.Buffer) + g.Request = new(plugin.CodeGeneratorRequest) + g.Response = new(plugin.CodeGeneratorResponse) + return g +} + +// Error reports a problem, including an error, and exits the program. +func (g *Generator) Error(err error, msgs ...string) { + s := strings.Join(msgs, " ") + ":" + err.Error() + log.Print("protoc-gen-go: error:", s) + os.Exit(1) +} + +// Fail reports a problem and exits the program. +func (g *Generator) Fail(msgs ...string) { + s := strings.Join(msgs, " ") + log.Print("protoc-gen-go: error:", s) + os.Exit(1) +} + +// CommandLineParameters breaks the comma-separated list of key=value pairs +// in the parameter (a member of the request protobuf) into a key/value map. +// It then sets file name mappings defined by those entries. +func (g *Generator) CommandLineParameters(parameter string) { + g.Param = make(map[string]string) + for _, p := range strings.Split(parameter, ",") { + if i := strings.Index(p, "="); i < 0 { + g.Param[p] = "" + } else { + g.Param[p[0:i]] = p[i+1:] + } + } + + g.ImportMap = make(map[string]string) + pluginList := "none" // Default list of plugin names to enable (empty means all). + for k, v := range g.Param { + switch k { + case "import_prefix": + g.ImportPrefix = v + case "import_path": + g.PackageImportPath = v + case "paths": + switch v { + case "import": + g.pathType = pathTypeImport + case "source_relative": + g.pathType = pathTypeSourceRelative + default: + g.Fail(fmt.Sprintf(`Unknown path type %q: want "import" or "source_relative".`, v)) + } + case "plugins": + pluginList = v + case "annotate_code": + if v == "true" { + g.annotateCode = true + } + default: + if len(k) > 0 && k[0] == 'M' { + g.ImportMap[k[1:]] = v + } + } + } + if pluginList != "" { + // Amend the set of plugins. + enabled := make(map[string]bool) + for _, name := range strings.Split(pluginList, "+") { + enabled[name] = true + } + var nplugins []Plugin + for _, p := range plugins { + if enabled[p.Name()] { + nplugins = append(nplugins, p) + } + } + plugins = nplugins + } +} + +// DefaultPackageName returns the package name printed for the object. +// If its file is in a different package, it returns the package name we're using for this file, plus ".". +// Otherwise it returns the empty string. +func (g *Generator) DefaultPackageName(obj Object) string { + importPath := obj.GoImportPath() + if importPath == g.outputImportPath { + return "" + } + return string(g.GoPackageName(importPath)) + "." +} + +// GoPackageName returns the name used for a package. +func (g *Generator) GoPackageName(importPath GoImportPath) GoPackageName { + if name, ok := g.packageNames[importPath]; ok { + return name + } + name := cleanPackageName(baseName(string(importPath))) + for i, orig := 1, name; g.usedPackageNames[name] || isGoPredeclaredIdentifier[string(name)]; i++ { + name = orig + GoPackageName(strconv.Itoa(i)) + } + g.packageNames[importPath] = name + g.usedPackageNames[name] = true + return name +} + +// AddImport adds a package to the generated file's import section. +// It returns the name used for the package. +func (g *Generator) AddImport(importPath GoImportPath) GoPackageName { + g.addedImports[importPath] = true + return g.GoPackageName(importPath) +} + +var globalPackageNames = map[GoPackageName]bool{ + "fmt": true, + "math": true, + "proto": true, +} + +// Create and remember a guaranteed unique package name. Pkg is the candidate name. +// The FileDescriptor parameter is unused. +func RegisterUniquePackageName(pkg string, f *FileDescriptor) string { + name := cleanPackageName(pkg) + for i, orig := 1, name; globalPackageNames[name]; i++ { + name = orig + GoPackageName(strconv.Itoa(i)) + } + globalPackageNames[name] = true + return string(name) +} + +var isGoKeyword = map[string]bool{ + "break": true, + "case": true, + "chan": true, + "const": true, + "continue": true, + "default": true, + "else": true, + "defer": true, + "fallthrough": true, + "for": true, + "func": true, + "go": true, + "goto": true, + "if": true, + "import": true, + "interface": true, + "map": true, + "package": true, + "range": true, + "return": true, + "select": true, + "struct": true, + "switch": true, + "type": true, + "var": true, +} + +var isGoPredeclaredIdentifier = map[string]bool{ + "append": true, + "bool": true, + "byte": true, + "cap": true, + "close": true, + "complex": true, + "complex128": true, + "complex64": true, + "copy": true, + "delete": true, + "error": true, + "false": true, + "float32": true, + "float64": true, + "imag": true, + "int": true, + "int16": true, + "int32": true, + "int64": true, + "int8": true, + "iota": true, + "len": true, + "make": true, + "new": true, + "nil": true, + "panic": true, + "print": true, + "println": true, + "real": true, + "recover": true, + "rune": true, + "string": true, + "true": true, + "uint": true, + "uint16": true, + "uint32": true, + "uint64": true, + "uint8": true, + "uintptr": true, +} + +func cleanPackageName(name string) GoPackageName { + name = strings.Map(badToUnderscore, name) + // Identifier must not be keyword or predeclared identifier: insert _. + if isGoKeyword[name] { + name = "_" + name + } + // Identifier must not begin with digit: insert _. + if r, _ := utf8.DecodeRuneInString(name); unicode.IsDigit(r) { + name = "_" + name + } + return GoPackageName(name) +} + +// defaultGoPackage returns the package name to use, +// derived from the import path of the package we're building code for. +func (g *Generator) defaultGoPackage() GoPackageName { + p := g.PackageImportPath + if i := strings.LastIndex(p, "/"); i >= 0 { + p = p[i+1:] + } + return cleanPackageName(p) +} + +// SetPackageNames sets the package name for this run. +// The package name must agree across all files being generated. +// It also defines unique package names for all imported files. +func (g *Generator) SetPackageNames() { + g.outputImportPath = g.genFiles[0].importPath + + defaultPackageNames := make(map[GoImportPath]GoPackageName) + for _, f := range g.genFiles { + if _, p, ok := f.goPackageOption(); ok { + defaultPackageNames[f.importPath] = p + } + } + for _, f := range g.genFiles { + if _, p, ok := f.goPackageOption(); ok { + // Source file: option go_package = "quux/bar"; + f.packageName = p + } else if p, ok := defaultPackageNames[f.importPath]; ok { + // A go_package option in another file in the same package. + // + // This is a poor choice in general, since every source file should + // contain a go_package option. Supported mainly for historical + // compatibility. + f.packageName = p + } else if p := g.defaultGoPackage(); p != "" { + // Command-line: import_path=quux/bar. + // + // The import_path flag sets a package name for files which don't + // contain a go_package option. + f.packageName = p + } else if p := f.GetPackage(); p != "" { + // Source file: package quux.bar; + f.packageName = cleanPackageName(p) + } else { + // Source filename. + f.packageName = cleanPackageName(baseName(f.GetName())) + } + } + + // Check that all files have a consistent package name and import path. + for _, f := range g.genFiles[1:] { + if a, b := g.genFiles[0].importPath, f.importPath; a != b { + g.Fail(fmt.Sprintf("inconsistent package import paths: %v, %v", a, b)) + } + if a, b := g.genFiles[0].packageName, f.packageName; a != b { + g.Fail(fmt.Sprintf("inconsistent package names: %v, %v", a, b)) + } + } + + // Names of support packages. These never vary (if there are conflicts, + // we rename the conflicting package), so this could be removed someday. + g.Pkg = map[string]string{ + "fmt": "fmt", + "math": "math", + "proto": "proto", + } +} + +// WrapTypes walks the incoming data, wrapping DescriptorProtos, EnumDescriptorProtos +// and FileDescriptorProtos into file-referenced objects within the Generator. +// It also creates the list of files to generate and so should be called before GenerateAllFiles. +func (g *Generator) WrapTypes() { + g.allFiles = make([]*FileDescriptor, 0, len(g.Request.ProtoFile)) + g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles)) + genFileNames := make(map[string]bool) + for _, n := range g.Request.FileToGenerate { + genFileNames[n] = true + } + for _, f := range g.Request.ProtoFile { + fd := &FileDescriptor{ + FileDescriptorProto: f, + exported: make(map[Object][]symbol), + proto3: fileIsProto3(f), + } + // The import path may be set in a number of ways. + if substitution, ok := g.ImportMap[f.GetName()]; ok { + // Command-line: M=foo.proto=quux/bar. + // + // Explicit mapping of source file to import path. + fd.importPath = GoImportPath(substitution) + } else if genFileNames[f.GetName()] && g.PackageImportPath != "" { + // Command-line: import_path=quux/bar. + // + // The import_path flag sets the import path for every file that + // we generate code for. + fd.importPath = GoImportPath(g.PackageImportPath) + } else if p, _, _ := fd.goPackageOption(); p != "" { + // Source file: option go_package = "quux/bar"; + // + // The go_package option sets the import path. Most users should use this. + fd.importPath = p + } else { + // Source filename. + // + // Last resort when nothing else is available. + fd.importPath = GoImportPath(path.Dir(f.GetName())) + } + // We must wrap the descriptors before we wrap the enums + fd.desc = wrapDescriptors(fd) + g.buildNestedDescriptors(fd.desc) + fd.enum = wrapEnumDescriptors(fd, fd.desc) + g.buildNestedEnums(fd.desc, fd.enum) + fd.ext = wrapExtensions(fd) + extractComments(fd) + g.allFiles = append(g.allFiles, fd) + g.allFilesByName[f.GetName()] = fd + } + for _, fd := range g.allFiles { + fd.imp = wrapImported(fd, g) + } + + g.genFiles = make([]*FileDescriptor, 0, len(g.Request.FileToGenerate)) + for _, fileName := range g.Request.FileToGenerate { + fd := g.allFilesByName[fileName] + if fd == nil { + g.Fail("could not find file named", fileName) + } + g.genFiles = append(g.genFiles, fd) + } +} + +// Scan the descriptors in this file. For each one, build the slice of nested descriptors +func (g *Generator) buildNestedDescriptors(descs []*Descriptor) { + for _, desc := range descs { + if len(desc.NestedType) != 0 { + for _, nest := range descs { + if nest.parent == desc { + desc.nested = append(desc.nested, nest) + } + } + if len(desc.nested) != len(desc.NestedType) { + g.Fail("internal error: nesting failure for", desc.GetName()) + } + } + } +} + +func (g *Generator) buildNestedEnums(descs []*Descriptor, enums []*EnumDescriptor) { + for _, desc := range descs { + if len(desc.EnumType) != 0 { + for _, enum := range enums { + if enum.parent == desc { + desc.enums = append(desc.enums, enum) + } + } + if len(desc.enums) != len(desc.EnumType) { + g.Fail("internal error: enum nesting failure for", desc.GetName()) + } + } + } +} + +// Construct the Descriptor +func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *Descriptor { + d := &Descriptor{ + common: common{file}, + DescriptorProto: desc, + parent: parent, + index: index, + } + if parent == nil { + d.path = fmt.Sprintf("%d,%d", messagePath, index) + } else { + d.path = fmt.Sprintf("%s,%d,%d", parent.path, messageMessagePath, index) + } + + // The only way to distinguish a group from a message is whether + // the containing message has a TYPE_GROUP field that matches. + if parent != nil { + parts := d.TypeName() + if file.Package != nil { + parts = append([]string{*file.Package}, parts...) + } + exp := "." + strings.Join(parts, ".") + for _, field := range parent.Field { + if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetTypeName() == exp { + d.group = true + break + } + } + } + + for _, field := range desc.Extension { + d.ext = append(d.ext, &ExtensionDescriptor{common{file}, field, d}) + } + + return d +} + +// Return a slice of all the Descriptors defined within this file +func wrapDescriptors(file *FileDescriptor) []*Descriptor { + sl := make([]*Descriptor, 0, len(file.MessageType)+10) + for i, desc := range file.MessageType { + sl = wrapThisDescriptor(sl, desc, nil, file, i) + } + return sl +} + +// Wrap this Descriptor, recursively +func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) []*Descriptor { + sl = append(sl, newDescriptor(desc, parent, file, index)) + me := sl[len(sl)-1] + for i, nested := range desc.NestedType { + sl = wrapThisDescriptor(sl, nested, me, file, i) + } + return sl +} + +// Construct the EnumDescriptor +func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *EnumDescriptor { + ed := &EnumDescriptor{ + common: common{file}, + EnumDescriptorProto: desc, + parent: parent, + index: index, + } + if parent == nil { + ed.path = fmt.Sprintf("%d,%d", enumPath, index) + } else { + ed.path = fmt.Sprintf("%s,%d,%d", parent.path, messageEnumPath, index) + } + return ed +} + +// Return a slice of all the EnumDescriptors defined within this file +func wrapEnumDescriptors(file *FileDescriptor, descs []*Descriptor) []*EnumDescriptor { + sl := make([]*EnumDescriptor, 0, len(file.EnumType)+10) + // Top-level enums. + for i, enum := range file.EnumType { + sl = append(sl, newEnumDescriptor(enum, nil, file, i)) + } + // Enums within messages. Enums within embedded messages appear in the outer-most message. + for _, nested := range descs { + for i, enum := range nested.EnumType { + sl = append(sl, newEnumDescriptor(enum, nested, file, i)) + } + } + return sl +} + +// Return a slice of all the top-level ExtensionDescriptors defined within this file. +func wrapExtensions(file *FileDescriptor) []*ExtensionDescriptor { + var sl []*ExtensionDescriptor + for _, field := range file.Extension { + sl = append(sl, &ExtensionDescriptor{common{file}, field, nil}) + } + return sl +} + +// Return a slice of all the types that are publicly imported into this file. +func wrapImported(file *FileDescriptor, g *Generator) (sl []*ImportedDescriptor) { + for _, index := range file.PublicDependency { + df := g.fileByName(file.Dependency[index]) + for _, d := range df.desc { + if d.GetOptions().GetMapEntry() { + continue + } + sl = append(sl, &ImportedDescriptor{common{file}, d}) + } + for _, e := range df.enum { + sl = append(sl, &ImportedDescriptor{common{file}, e}) + } + for _, ext := range df.ext { + sl = append(sl, &ImportedDescriptor{common{file}, ext}) + } + } + return +} + +func extractComments(file *FileDescriptor) { + file.comments = make(map[string]*descriptor.SourceCodeInfo_Location) + for _, loc := range file.GetSourceCodeInfo().GetLocation() { + if loc.LeadingComments == nil { + continue + } + var p []string + for _, n := range loc.Path { + p = append(p, strconv.Itoa(int(n))) + } + file.comments[strings.Join(p, ",")] = loc + } +} + +// BuildTypeNameMap builds the map from fully qualified type names to objects. +// The key names for the map come from the input data, which puts a period at the beginning. +// It should be called after SetPackageNames and before GenerateAllFiles. +func (g *Generator) BuildTypeNameMap() { + g.typeNameToObject = make(map[string]Object) + for _, f := range g.allFiles { + // The names in this loop are defined by the proto world, not us, so the + // package name may be empty. If so, the dotted package name of X will + // be ".X"; otherwise it will be ".pkg.X". + dottedPkg := "." + f.GetPackage() + if dottedPkg != "." { + dottedPkg += "." + } + for _, enum := range f.enum { + name := dottedPkg + dottedSlice(enum.TypeName()) + g.typeNameToObject[name] = enum + } + for _, desc := range f.desc { + name := dottedPkg + dottedSlice(desc.TypeName()) + g.typeNameToObject[name] = desc + } + } +} + +// ObjectNamed, given a fully-qualified input type name as it appears in the input data, +// returns the descriptor for the message or enum with that name. +func (g *Generator) ObjectNamed(typeName string) Object { + o, ok := g.typeNameToObject[typeName] + if !ok { + g.Fail("can't find object with type", typeName) + } + return o +} + +// AnnotatedAtoms is a list of atoms (as consumed by P) that records the file name and proto AST path from which they originated. +type AnnotatedAtoms struct { + source string + path string + atoms []interface{} +} + +// Annotate records the file name and proto AST path of a list of atoms +// so that a later call to P can emit a link from each atom to its origin. +func Annotate(file *FileDescriptor, path string, atoms ...interface{}) *AnnotatedAtoms { + return &AnnotatedAtoms{source: *file.Name, path: path, atoms: atoms} +} + +// printAtom prints the (atomic, non-annotation) argument to the generated output. +func (g *Generator) printAtom(v interface{}) { + switch v := v.(type) { + case string: + g.WriteString(v) + case *string: + g.WriteString(*v) + case bool: + fmt.Fprint(g, v) + case *bool: + fmt.Fprint(g, *v) + case int: + fmt.Fprint(g, v) + case *int32: + fmt.Fprint(g, *v) + case *int64: + fmt.Fprint(g, *v) + case float64: + fmt.Fprint(g, v) + case *float64: + fmt.Fprint(g, *v) + case GoPackageName: + g.WriteString(string(v)) + case GoImportPath: + g.WriteString(strconv.Quote(string(v))) + default: + g.Fail(fmt.Sprintf("unknown type in printer: %T", v)) + } +} + +// P prints the arguments to the generated output. It handles strings and int32s, plus +// handling indirections because they may be *string, etc. Any inputs of type AnnotatedAtoms may emit +// annotations in a .meta file in addition to outputting the atoms themselves (if g.annotateCode +// is true). +func (g *Generator) P(str ...interface{}) { + if !g.writeOutput { + return + } + g.WriteString(g.indent) + for _, v := range str { + switch v := v.(type) { + case *AnnotatedAtoms: + begin := int32(g.Len()) + for _, v := range v.atoms { + g.printAtom(v) + } + if g.annotateCode { + end := int32(g.Len()) + var path []int32 + for _, token := range strings.Split(v.path, ",") { + val, err := strconv.ParseInt(token, 10, 32) + if err != nil { + g.Fail("could not parse proto AST path: ", err.Error()) + } + path = append(path, int32(val)) + } + g.annotations = append(g.annotations, &descriptor.GeneratedCodeInfo_Annotation{ + Path: path, + SourceFile: &v.source, + Begin: &begin, + End: &end, + }) + } + default: + g.printAtom(v) + } + } + g.WriteByte('\n') +} + +// addInitf stores the given statement to be printed inside the file's init function. +// The statement is given as a format specifier and arguments. +func (g *Generator) addInitf(stmt string, a ...interface{}) { + g.init = append(g.init, fmt.Sprintf(stmt, a...)) +} + +// In Indents the output one tab stop. +func (g *Generator) In() { g.indent += "\t" } + +// Out unindents the output one tab stop. +func (g *Generator) Out() { + if len(g.indent) > 0 { + g.indent = g.indent[1:] + } +} + +// GenerateAllFiles generates the output for all the files we're outputting. +func (g *Generator) GenerateAllFiles() { + // Initialize the plugins + for _, p := range plugins { + p.Init(g) + } + // Generate the output. The generator runs for every file, even the files + // that we don't generate output for, so that we can collate the full list + // of exported symbols to support public imports. + genFileMap := make(map[*FileDescriptor]bool, len(g.genFiles)) + for _, file := range g.genFiles { + genFileMap[file] = true + } + for _, file := range g.allFiles { + g.Reset() + g.annotations = nil + g.writeOutput = genFileMap[file] + g.generate(file) + if !g.writeOutput { + continue + } + fname := file.goFileName(g.pathType) + g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ + Name: proto.String(fname), + Content: proto.String(g.String()), + }) + if g.annotateCode { + // Store the generated code annotations in text, as the protoc plugin protocol requires that + // strings contain valid UTF-8. + g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{ + Name: proto.String(file.goFileName(g.pathType) + ".meta"), + Content: proto.String(proto.CompactTextString(&descriptor.GeneratedCodeInfo{Annotation: g.annotations})), + }) + } + } +} + +// Run all the plugins associated with the file. +func (g *Generator) runPlugins(file *FileDescriptor) { + for _, p := range plugins { + p.Generate(file) + } +} + +// Fill the response protocol buffer with the generated output for all the files we're +// supposed to generate. +func (g *Generator) generate(file *FileDescriptor) { + g.file = file + g.usedPackages = make(map[GoImportPath]bool) + g.packageNames = make(map[GoImportPath]GoPackageName) + g.usedPackageNames = make(map[GoPackageName]bool) + g.addedImports = make(map[GoImportPath]bool) + for name := range globalPackageNames { + g.usedPackageNames[name] = true + } + + g.P("// This is a compile-time assertion to ensure that this generated file") + g.P("// is compatible with the proto package it is being compiled against.") + g.P("// A compilation error at this line likely means your copy of the") + g.P("// proto package needs to be updated.") + g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package") + g.P() + + for _, td := range g.file.imp { + g.generateImported(td) + } + for _, enum := range g.file.enum { + g.generateEnum(enum) + } + for _, desc := range g.file.desc { + // Don't generate virtual messages for maps. + if desc.GetOptions().GetMapEntry() { + continue + } + g.generateMessage(desc) + } + for _, ext := range g.file.ext { + g.generateExtension(ext) + } + g.generateInitFunction() + g.generateFileDescriptor(file) + + // Run the plugins before the imports so we know which imports are necessary. + g.runPlugins(file) + + // Generate header and imports last, though they appear first in the output. + rem := g.Buffer + remAnno := g.annotations + g.Buffer = new(bytes.Buffer) + g.annotations = nil + g.generateHeader() + g.generateImports() + if !g.writeOutput { + return + } + // Adjust the offsets for annotations displaced by the header and imports. + for _, anno := range remAnno { + *anno.Begin += int32(g.Len()) + *anno.End += int32(g.Len()) + g.annotations = append(g.annotations, anno) + } + g.Write(rem.Bytes()) + + // Reformat generated code and patch annotation locations. + fset := token.NewFileSet() + original := g.Bytes() + if g.annotateCode { + // make a copy independent of g; we'll need it after Reset. + original = append([]byte(nil), original...) + } + fileAST, err := parser.ParseFile(fset, "", original, parser.ParseComments) + if err != nil { + // Print out the bad code with line numbers. + // This should never happen in practice, but it can while changing generated code, + // so consider this a debugging aid. + var src bytes.Buffer + s := bufio.NewScanner(bytes.NewReader(original)) + for line := 1; s.Scan(); line++ { + fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes()) + } + g.Fail("bad Go source code was generated:", err.Error(), "\n"+src.String()) + } + ast.SortImports(fset, fileAST) + g.Reset() + err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(g, fset, fileAST) + if err != nil { + g.Fail("generated Go source code could not be reformatted:", err.Error()) + } + if g.annotateCode { + m, err := remap.Compute(original, g.Bytes()) + if err != nil { + g.Fail("formatted generated Go source code could not be mapped back to the original code:", err.Error()) + } + for _, anno := range g.annotations { + new, ok := m.Find(int(*anno.Begin), int(*anno.End)) + if !ok { + g.Fail("span in formatted generated Go source code could not be mapped back to the original code") + } + *anno.Begin = int32(new.Pos) + *anno.End = int32(new.End) + } + } +} + +// Generate the header, including package definition +func (g *Generator) generateHeader() { + g.P("// Code generated by protoc-gen-go. DO NOT EDIT.") + if g.file.GetOptions().GetDeprecated() { + g.P("// ", g.file.Name, " is a deprecated file.") + } else { + g.P("// source: ", g.file.Name) + } + g.P() + g.PrintComments(strconv.Itoa(packagePath)) + g.P() + g.P("package ", g.file.packageName) + g.P() +} + +// deprecationComment is the standard comment added to deprecated +// messages, fields, enums, and enum values. +var deprecationComment = "// Deprecated: Do not use." + +// PrintComments prints any comments from the source .proto file. +// The path is a comma-separated list of integers. +// It returns an indication of whether any comments were printed. +// See descriptor.proto for its format. +func (g *Generator) PrintComments(path string) bool { + if !g.writeOutput { + return false + } + if c, ok := g.makeComments(path); ok { + g.P(c) + return true + } + return false +} + +// makeComments generates the comment string for the field, no "\n" at the end +func (g *Generator) makeComments(path string) (string, bool) { + loc, ok := g.file.comments[path] + if !ok { + return "", false + } + w := new(bytes.Buffer) + nl := "" + for _, line := range strings.Split(strings.TrimSuffix(loc.GetLeadingComments(), "\n"), "\n") { + fmt.Fprintf(w, "%s//%s", nl, line) + nl = "\n" + } + return w.String(), true +} + +func (g *Generator) fileByName(filename string) *FileDescriptor { + return g.allFilesByName[filename] +} + +// weak returns whether the ith import of the current file is a weak import. +func (g *Generator) weak(i int32) bool { + for _, j := range g.file.WeakDependency { + if j == i { + return true + } + } + return false +} + +// Generate the imports +func (g *Generator) generateImports() { + imports := make(map[GoImportPath]GoPackageName) + for i, s := range g.file.Dependency { + fd := g.fileByName(s) + importPath := fd.importPath + // Do not import our own package. + if importPath == g.file.importPath { + continue + } + // Do not import weak imports. + if g.weak(int32(i)) { + continue + } + // Do not import a package twice. + if _, ok := imports[importPath]; ok { + continue + } + // We need to import all the dependencies, even if we don't reference them, + // because other code and tools depend on having the full transitive closure + // of protocol buffer types in the binary. + packageName := g.GoPackageName(importPath) + if _, ok := g.usedPackages[importPath]; !ok { + packageName = "_" + } + imports[importPath] = packageName + } + for importPath := range g.addedImports { + imports[importPath] = g.GoPackageName(importPath) + } + // We almost always need a proto import. Rather than computing when we + // do, which is tricky when there's a plugin, just import it and + // reference it later. The same argument applies to the fmt and math packages. + g.P("import (") + g.P(g.Pkg["fmt"] + ` "fmt"`) + g.P(g.Pkg["math"] + ` "math"`) + g.P(g.Pkg["proto"]+" ", GoImportPath(g.ImportPrefix)+"github.com/golang/protobuf/proto") + for importPath, packageName := range imports { + g.P(packageName, " ", GoImportPath(g.ImportPrefix)+importPath) + } + g.P(")") + g.P() + // TODO: may need to worry about uniqueness across plugins + for _, p := range plugins { + p.GenerateImports(g.file) + g.P() + } + g.P("// Reference imports to suppress errors if they are not otherwise used.") + g.P("var _ = ", g.Pkg["proto"], ".Marshal") + g.P("var _ = ", g.Pkg["fmt"], ".Errorf") + g.P("var _ = ", g.Pkg["math"], ".Inf") + g.P() +} + +func (g *Generator) generateImported(id *ImportedDescriptor) { + df := id.o.File() + filename := *df.Name + if df.importPath == g.file.importPath { + // Don't generate type aliases for files in the same Go package as this one. + return + } + if !supportTypeAliases { + g.Fail(fmt.Sprintf("%s: public imports require at least go1.9", filename)) + } + g.usedPackages[df.importPath] = true + + for _, sym := range df.exported[id.o] { + sym.GenerateAlias(g, filename, g.GoPackageName(df.importPath)) + } + + g.P() +} + +// Generate the enum definitions for this EnumDescriptor. +func (g *Generator) generateEnum(enum *EnumDescriptor) { + // The full type name + typeName := enum.TypeName() + // The full type name, CamelCased. + ccTypeName := CamelCaseSlice(typeName) + ccPrefix := enum.prefix() + + deprecatedEnum := "" + if enum.GetOptions().GetDeprecated() { + deprecatedEnum = deprecationComment + } + g.PrintComments(enum.path) + g.P("type ", Annotate(enum.file, enum.path, ccTypeName), " int32", deprecatedEnum) + g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()}) + g.P("const (") + for i, e := range enum.Value { + etorPath := fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i) + g.PrintComments(etorPath) + + deprecatedValue := "" + if e.GetOptions().GetDeprecated() { + deprecatedValue = deprecationComment + } + + name := ccPrefix + *e.Name + g.P(Annotate(enum.file, etorPath, name), " ", ccTypeName, " = ", e.Number, " ", deprecatedValue) + g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName}) + } + g.P(")") + g.P() + g.P("var ", ccTypeName, "_name = map[int32]string{") + generated := make(map[int32]bool) // avoid duplicate values + for _, e := range enum.Value { + duplicate := "" + if _, present := generated[*e.Number]; present { + duplicate = "// Duplicate value: " + } + g.P(duplicate, e.Number, ": ", strconv.Quote(*e.Name), ",") + generated[*e.Number] = true + } + g.P("}") + g.P() + g.P("var ", ccTypeName, "_value = map[string]int32{") + for _, e := range enum.Value { + g.P(strconv.Quote(*e.Name), ": ", e.Number, ",") + } + g.P("}") + g.P() + + if !enum.proto3() { + g.P("func (x ", ccTypeName, ") Enum() *", ccTypeName, " {") + g.P("p := new(", ccTypeName, ")") + g.P("*p = x") + g.P("return p") + g.P("}") + g.P() + } + + g.P("func (x ", ccTypeName, ") String() string {") + g.P("return ", g.Pkg["proto"], ".EnumName(", ccTypeName, "_name, int32(x))") + g.P("}") + g.P() + + if !enum.proto3() { + g.P("func (x *", ccTypeName, ") UnmarshalJSON(data []byte) error {") + g.P("value, err := ", g.Pkg["proto"], ".UnmarshalJSONEnum(", ccTypeName, `_value, data, "`, ccTypeName, `")`) + g.P("if err != nil {") + g.P("return err") + g.P("}") + g.P("*x = ", ccTypeName, "(value)") + g.P("return nil") + g.P("}") + g.P() + } + + var indexes []string + for m := enum.parent; m != nil; m = m.parent { + // XXX: skip groups? + indexes = append([]string{strconv.Itoa(m.index)}, indexes...) + } + indexes = append(indexes, strconv.Itoa(enum.index)) + g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) {") + g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}") + g.P("}") + g.P() + if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" { + g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`) + g.P() + } + + g.generateEnumRegistration(enum) +} + +// The tag is a string like "varint,2,opt,name=fieldname,def=7" that +// identifies details of the field for the protocol buffer marshaling and unmarshaling +// code. The fields are: +// wire encoding +// protocol tag number +// opt,req,rep for optional, required, or repeated +// packed whether the encoding is "packed" (optional; repeated primitives only) +// name= the original declared name +// enum= the name of the enum type if it is an enum-typed field. +// proto3 if this field is in a proto3 message +// def= string representation of the default value, if any. +// The default value must be in a representation that can be used at run-time +// to generate the default value. Thus bools become 0 and 1, for instance. +func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptorProto, wiretype string) string { + optrepreq := "" + switch { + case isOptional(field): + optrepreq = "opt" + case isRequired(field): + optrepreq = "req" + case isRepeated(field): + optrepreq = "rep" + } + var defaultValue string + if dv := field.DefaultValue; dv != nil { // set means an explicit default + defaultValue = *dv + // Some types need tweaking. + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_BOOL: + if defaultValue == "true" { + defaultValue = "1" + } else { + defaultValue = "0" + } + case descriptor.FieldDescriptorProto_TYPE_STRING, + descriptor.FieldDescriptorProto_TYPE_BYTES: + // Nothing to do. Quoting is done for the whole tag. + case descriptor.FieldDescriptorProto_TYPE_ENUM: + // For enums we need to provide the integer constant. + obj := g.ObjectNamed(field.GetTypeName()) + if id, ok := obj.(*ImportedDescriptor); ok { + // It is an enum that was publicly imported. + // We need the underlying type. + obj = id.o + } + enum, ok := obj.(*EnumDescriptor) + if !ok { + log.Printf("obj is a %T", obj) + if id, ok := obj.(*ImportedDescriptor); ok { + log.Printf("id.o is a %T", id.o) + } + g.Fail("unknown enum type", CamelCaseSlice(obj.TypeName())) + } + defaultValue = enum.integerValueAsString(defaultValue) + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" { + if f, err := strconv.ParseFloat(defaultValue, 32); err == nil { + defaultValue = fmt.Sprint(float32(f)) + } + } + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" { + if f, err := strconv.ParseFloat(defaultValue, 64); err == nil { + defaultValue = fmt.Sprint(f) + } + } + } + defaultValue = ",def=" + defaultValue + } + enum := "" + if *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM { + // We avoid using obj.GoPackageName(), because we want to use the + // original (proto-world) package name. + obj := g.ObjectNamed(field.GetTypeName()) + if id, ok := obj.(*ImportedDescriptor); ok { + obj = id.o + } + enum = ",enum=" + if pkg := obj.File().GetPackage(); pkg != "" { + enum += pkg + "." + } + enum += CamelCaseSlice(obj.TypeName()) + } + packed := "" + if (field.Options != nil && field.Options.GetPacked()) || + // Per https://developers.google.com/protocol-buffers/docs/proto3#simple: + // "In proto3, repeated fields of scalar numeric types use packed encoding by default." + (message.proto3() && (field.Options == nil || field.Options.Packed == nil) && + isRepeated(field) && isScalar(field)) { + packed = ",packed" + } + fieldName := field.GetName() + name := fieldName + if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP { + // We must use the type name for groups instead of + // the field name to preserve capitalization. + // type_name in FieldDescriptorProto is fully-qualified, + // but we only want the local part. + name = *field.TypeName + if i := strings.LastIndex(name, "."); i >= 0 { + name = name[i+1:] + } + } + if json := field.GetJsonName(); field.Extendee == nil && json != "" && json != name { + // TODO: escaping might be needed, in which case + // perhaps this should be in its own "json" tag. + name += ",json=" + json + } + name = ",name=" + name + if message.proto3() { + name += ",proto3" + } + oneof := "" + if field.OneofIndex != nil { + oneof = ",oneof" + } + return strconv.Quote(fmt.Sprintf("%s,%d,%s%s%s%s%s%s", + wiretype, + field.GetNumber(), + optrepreq, + packed, + name, + enum, + oneof, + defaultValue)) +} + +func needsStar(typ descriptor.FieldDescriptorProto_Type) bool { + switch typ { + case descriptor.FieldDescriptorProto_TYPE_GROUP: + return false + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + return false + case descriptor.FieldDescriptorProto_TYPE_BYTES: + return false + } + return true +} + +// TypeName is the printed name appropriate for an item. If the object is in the current file, +// TypeName drops the package name and underscores the rest. +// Otherwise the object is from another package; and the result is the underscored +// package name followed by the item name. +// The result always has an initial capital. +func (g *Generator) TypeName(obj Object) string { + return g.DefaultPackageName(obj) + CamelCaseSlice(obj.TypeName()) +} + +// GoType returns a string representing the type name, and the wire type +func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescriptorProto) (typ string, wire string) { + // TODO: Options. + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + typ, wire = "float64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + typ, wire = "float32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_INT64: + typ, wire = "int64", "varint" + case descriptor.FieldDescriptorProto_TYPE_UINT64: + typ, wire = "uint64", "varint" + case descriptor.FieldDescriptorProto_TYPE_INT32: + typ, wire = "int32", "varint" + case descriptor.FieldDescriptorProto_TYPE_UINT32: + typ, wire = "uint32", "varint" + case descriptor.FieldDescriptorProto_TYPE_FIXED64: + typ, wire = "uint64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_FIXED32: + typ, wire = "uint32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_BOOL: + typ, wire = "bool", "varint" + case descriptor.FieldDescriptorProto_TYPE_STRING: + typ, wire = "string", "bytes" + case descriptor.FieldDescriptorProto_TYPE_GROUP: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = "*"+g.TypeName(desc), "group" + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = "*"+g.TypeName(desc), "bytes" + case descriptor.FieldDescriptorProto_TYPE_BYTES: + typ, wire = "[]byte", "bytes" + case descriptor.FieldDescriptorProto_TYPE_ENUM: + desc := g.ObjectNamed(field.GetTypeName()) + typ, wire = g.TypeName(desc), "varint" + case descriptor.FieldDescriptorProto_TYPE_SFIXED32: + typ, wire = "int32", "fixed32" + case descriptor.FieldDescriptorProto_TYPE_SFIXED64: + typ, wire = "int64", "fixed64" + case descriptor.FieldDescriptorProto_TYPE_SINT32: + typ, wire = "int32", "zigzag32" + case descriptor.FieldDescriptorProto_TYPE_SINT64: + typ, wire = "int64", "zigzag64" + default: + g.Fail("unknown type for", field.GetName()) + } + if isRepeated(field) { + typ = "[]" + typ + } else if message != nil && message.proto3() { + return + } else if field.OneofIndex != nil && message != nil { + return + } else if needsStar(*field.Type) { + typ = "*" + typ + } + return +} + +func (g *Generator) RecordTypeUse(t string) { + if _, ok := g.typeNameToObject[t]; !ok { + return + } + importPath := g.ObjectNamed(t).GoImportPath() + if importPath == g.outputImportPath { + // Don't record use of objects in our package. + return + } + g.AddImport(importPath) + g.usedPackages[importPath] = true +} + +// Method names that may be generated. Fields with these names get an +// underscore appended. Any change to this set is a potential incompatible +// API change because it changes generated field names. +var methodNames = [...]string{ + "Reset", + "String", + "ProtoMessage", + "Marshal", + "Unmarshal", + "ExtensionRangeArray", + "ExtensionMap", + "Descriptor", +} + +// Names of messages in the `google.protobuf` package for which +// we will generate XXX_WellKnownType methods. +var wellKnownTypes = map[string]bool{ + "Any": true, + "Duration": true, + "Empty": true, + "Struct": true, + "Timestamp": true, + + "Value": true, + "ListValue": true, + "DoubleValue": true, + "FloatValue": true, + "Int64Value": true, + "UInt64Value": true, + "Int32Value": true, + "UInt32Value": true, + "BoolValue": true, + "StringValue": true, + "BytesValue": true, +} + +// getterDefault finds the default value for the field to return from a getter, +// regardless of if it's a built in default or explicit from the source. Returns e.g. "nil", `""`, "Default_MessageType_FieldName" +func (g *Generator) getterDefault(field *descriptor.FieldDescriptorProto, goMessageType string) string { + if isRepeated(field) { + return "nil" + } + if def := field.GetDefaultValue(); def != "" { + defaultConstant := g.defaultConstantName(goMessageType, field.GetName()) + if *field.Type != descriptor.FieldDescriptorProto_TYPE_BYTES { + return defaultConstant + } + return "append([]byte(nil), " + defaultConstant + "...)" + } + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_BOOL: + return "false" + case descriptor.FieldDescriptorProto_TYPE_STRING: + return `""` + case descriptor.FieldDescriptorProto_TYPE_GROUP, descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_BYTES: + return "nil" + case descriptor.FieldDescriptorProto_TYPE_ENUM: + obj := g.ObjectNamed(field.GetTypeName()) + var enum *EnumDescriptor + if id, ok := obj.(*ImportedDescriptor); ok { + // The enum type has been publicly imported. + enum, _ = id.o.(*EnumDescriptor) + } else { + enum, _ = obj.(*EnumDescriptor) + } + if enum == nil { + log.Printf("don't know how to generate getter for %s", field.GetName()) + return "nil" + } + if len(enum.Value) == 0 { + return "0 // empty enum" + } + first := enum.Value[0].GetName() + return g.DefaultPackageName(obj) + enum.prefix() + first + default: + return "0" + } +} + +// defaultConstantName builds the name of the default constant from the message +// type name and the untouched field name, e.g. "Default_MessageType_FieldName" +func (g *Generator) defaultConstantName(goMessageType, protoFieldName string) string { + return "Default_" + goMessageType + "_" + CamelCase(protoFieldName) +} + +// The different types of fields in a message and how to actually print them +// Most of the logic for generateMessage is in the methods of these types. +// +// Note that the content of the field is irrelevant, a simpleField can contain +// anything from a scalar to a group (which is just a message). +// +// Extension fields (and message sets) are however handled separately. +// +// simpleField - a field that is neiter weak nor oneof, possibly repeated +// oneofField - field containing list of subfields: +// - oneofSubField - a field within the oneof + +// msgCtx contains the context for the generator functions. +type msgCtx struct { + goName string // Go struct name of the message, e.g. MessageName + message *Descriptor // The descriptor for the message +} + +// fieldCommon contains data common to all types of fields. +type fieldCommon struct { + goName string // Go name of field, e.g. "FieldName" or "Descriptor_" + protoName string // Name of field in proto language, e.g. "field_name" or "descriptor" + getterName string // Name of the getter, e.g. "GetFieldName" or "GetDescriptor_" + goType string // The Go type as a string, e.g. "*int32" or "*OtherMessage" + tags string // The tag string/annotation for the type, e.g. `protobuf:"varint,8,opt,name=region_id,json=regionId"` + fullPath string // The full path of the field as used by Annotate etc, e.g. "4,0,2,0" +} + +// getProtoName gets the proto name of a field, e.g. "field_name" or "descriptor". +func (f *fieldCommon) getProtoName() string { + return f.protoName +} + +// getGoType returns the go type of the field as a string, e.g. "*int32". +func (f *fieldCommon) getGoType() string { + return f.goType +} + +// simpleField is not weak, not a oneof, not an extension. Can be required, optional or repeated. +type simpleField struct { + fieldCommon + protoTypeName string // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration" + protoType descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 + deprecated string // Deprecation comment, if any, e.g. "// Deprecated: Do not use." + getterDef string // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName" + protoDef string // Default value as defined in the proto file, e.g "yoshi" or "5" + comment string // The full comment for the field, e.g. "// Useful information" +} + +// decl prints the declaration of the field in the struct (if any). +func (f *simpleField) decl(g *Generator, mc *msgCtx) { + g.P(f.comment, Annotate(mc.message.file, f.fullPath, f.goName), "\t", f.goType, "\t`", f.tags, "`", f.deprecated) +} + +// getter prints the getter for the field. +func (f *simpleField) getter(g *Generator, mc *msgCtx) { + star := "" + tname := f.goType + if needsStar(f.protoType) && tname[0] == '*' { + tname = tname[1:] + star = "*" + } + if f.deprecated != "" { + g.P(f.deprecated) + } + g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() "+tname+" {") + if f.getterDef == "nil" { // Simpler getter + g.P("if m != nil {") + g.P("return m." + f.goName) + g.P("}") + g.P("return nil") + g.P("}") + g.P() + return + } + if mc.message.proto3() { + g.P("if m != nil {") + } else { + g.P("if m != nil && m." + f.goName + " != nil {") + } + g.P("return " + star + "m." + f.goName) + g.P("}") + g.P("return ", f.getterDef) + g.P("}") + g.P() +} + +// setter prints the setter method of the field. +func (f *simpleField) setter(g *Generator, mc *msgCtx) { + // No setter for regular fields yet +} + +// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5". +func (f *simpleField) getProtoDef() string { + return f.protoDef +} + +// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration". +func (f *simpleField) getProtoTypeName() string { + return f.protoTypeName +} + +// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64. +func (f *simpleField) getProtoType() descriptor.FieldDescriptorProto_Type { + return f.protoType +} + +// oneofSubFields are kept slize held by each oneofField. They do not appear in the top level slize of fields for the message. +type oneofSubField struct { + fieldCommon + protoTypeName string // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration" + protoType descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 + oneofTypeName string // Type name of the enclosing struct, e.g. "MessageName_FieldName" + fieldNumber int // Actual field number, as defined in proto, e.g. 12 + getterDef string // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName" + protoDef string // Default value as defined in the proto file, e.g "yoshi" or "5" + deprecated string // Deprecation comment, if any. +} + +// typedNil prints a nil casted to the pointer to this field. +// - for XXX_OneofWrappers +func (f *oneofSubField) typedNil(g *Generator) { + g.P("(*", f.oneofTypeName, ")(nil),") +} + +// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5". +func (f *oneofSubField) getProtoDef() string { + return f.protoDef +} + +// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration". +func (f *oneofSubField) getProtoTypeName() string { + return f.protoTypeName +} + +// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64. +func (f *oneofSubField) getProtoType() descriptor.FieldDescriptorProto_Type { + return f.protoType +} + +// oneofField represents the oneof on top level. +// The alternative fields within the oneof are represented by oneofSubField. +type oneofField struct { + fieldCommon + subFields []*oneofSubField // All the possible oneof fields + comment string // The full comment for the field, e.g. "// Types that are valid to be assigned to MyOneof:\n\\" +} + +// decl prints the declaration of the field in the struct (if any). +func (f *oneofField) decl(g *Generator, mc *msgCtx) { + comment := f.comment + for _, sf := range f.subFields { + comment += "//\t*" + sf.oneofTypeName + "\n" + } + g.P(comment, Annotate(mc.message.file, f.fullPath, f.goName), " ", f.goType, " `", f.tags, "`") +} + +// getter for a oneof field will print additional discriminators and interfaces for the oneof, +// also it prints all the getters for the sub fields. +func (f *oneofField) getter(g *Generator, mc *msgCtx) { + // The discriminator type + g.P("type ", f.goType, " interface {") + g.P(f.goType, "()") + g.P("}") + g.P() + // The subField types, fulfilling the discriminator type contract + for _, sf := range f.subFields { + g.P("type ", Annotate(mc.message.file, sf.fullPath, sf.oneofTypeName), " struct {") + g.P(Annotate(mc.message.file, sf.fullPath, sf.goName), " ", sf.goType, " `", sf.tags, "`") + g.P("}") + g.P() + } + for _, sf := range f.subFields { + g.P("func (*", sf.oneofTypeName, ") ", f.goType, "() {}") + g.P() + } + // Getter for the oneof field + g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() ", f.goType, " {") + g.P("if m != nil { return m.", f.goName, " }") + g.P("return nil") + g.P("}") + g.P() + // Getters for each oneof + for _, sf := range f.subFields { + if sf.deprecated != "" { + g.P(sf.deprecated) + } + g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, sf.fullPath, sf.getterName), "() "+sf.goType+" {") + g.P("if x, ok := m.", f.getterName, "().(*", sf.oneofTypeName, "); ok {") + g.P("return x.", sf.goName) + g.P("}") + g.P("return ", sf.getterDef) + g.P("}") + g.P() + } +} + +// setter prints the setter method of the field. +func (f *oneofField) setter(g *Generator, mc *msgCtx) { + // No setters for oneof yet +} + +// topLevelField interface implemented by all types of fields on the top level (not oneofSubField). +type topLevelField interface { + decl(g *Generator, mc *msgCtx) // print declaration within the struct + getter(g *Generator, mc *msgCtx) // print getter + setter(g *Generator, mc *msgCtx) // print setter if applicable +} + +// defField interface implemented by all types of fields that can have defaults (not oneofField, but instead oneofSubField). +type defField interface { + getProtoDef() string // default value explicitly stated in the proto file, e.g "yoshi" or "5" + getProtoName() string // proto name of a field, e.g. "field_name" or "descriptor" + getGoType() string // go type of the field as a string, e.g. "*int32" + getProtoTypeName() string // protobuf type name for the field, e.g. ".google.protobuf.Duration" + getProtoType() descriptor.FieldDescriptorProto_Type // *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64 +} + +// generateDefaultConstants adds constants for default values if needed, which is only if the default value is. +// explicit in the proto. +func (g *Generator) generateDefaultConstants(mc *msgCtx, topLevelFields []topLevelField) { + // Collect fields that can have defaults + dFields := []defField{} + for _, pf := range topLevelFields { + if f, ok := pf.(*oneofField); ok { + for _, osf := range f.subFields { + dFields = append(dFields, osf) + } + continue + } + dFields = append(dFields, pf.(defField)) + } + for _, df := range dFields { + def := df.getProtoDef() + if def == "" { + continue + } + fieldname := g.defaultConstantName(mc.goName, df.getProtoName()) + typename := df.getGoType() + if typename[0] == '*' { + typename = typename[1:] + } + kind := "const " + switch { + case typename == "bool": + case typename == "string": + def = strconv.Quote(def) + case typename == "[]byte": + def = "[]byte(" + strconv.Quote(unescape(def)) + ")" + kind = "var " + case def == "inf", def == "-inf", def == "nan": + // These names are known to, and defined by, the protocol language. + switch def { + case "inf": + def = "math.Inf(1)" + case "-inf": + def = "math.Inf(-1)" + case "nan": + def = "math.NaN()" + } + if df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT { + def = "float32(" + def + ")" + } + kind = "var " + case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT: + if f, err := strconv.ParseFloat(def, 32); err == nil { + def = fmt.Sprint(float32(f)) + } + case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_DOUBLE: + if f, err := strconv.ParseFloat(def, 64); err == nil { + def = fmt.Sprint(f) + } + case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_ENUM: + // Must be an enum. Need to construct the prefixed name. + obj := g.ObjectNamed(df.getProtoTypeName()) + var enum *EnumDescriptor + if id, ok := obj.(*ImportedDescriptor); ok { + // The enum type has been publicly imported. + enum, _ = id.o.(*EnumDescriptor) + } else { + enum, _ = obj.(*EnumDescriptor) + } + if enum == nil { + log.Printf("don't know how to generate constant for %s", fieldname) + continue + } + def = g.DefaultPackageName(obj) + enum.prefix() + def + } + g.P(kind, fieldname, " ", typename, " = ", def) + g.file.addExport(mc.message, constOrVarSymbol{fieldname, kind, ""}) + } + g.P() +} + +// generateInternalStructFields just adds the XXX_ fields to the message struct. +func (g *Generator) generateInternalStructFields(mc *msgCtx, topLevelFields []topLevelField) { + g.P("XXX_NoUnkeyedLiteral\tstruct{} `json:\"-\"`") // prevent unkeyed struct literals + if len(mc.message.ExtensionRange) > 0 { + messageset := "" + if opts := mc.message.Options; opts != nil && opts.GetMessageSetWireFormat() { + messageset = "protobuf_messageset:\"1\" " + } + g.P(g.Pkg["proto"], ".XXX_InternalExtensions `", messageset, "json:\"-\"`") + } + g.P("XXX_unrecognized\t[]byte `json:\"-\"`") + g.P("XXX_sizecache\tint32 `json:\"-\"`") + +} + +// generateOneofFuncs adds all the utility functions for oneof, including marshalling, unmarshalling and sizer. +func (g *Generator) generateOneofFuncs(mc *msgCtx, topLevelFields []topLevelField) { + ofields := []*oneofField{} + for _, f := range topLevelFields { + if o, ok := f.(*oneofField); ok { + ofields = append(ofields, o) + } + } + if len(ofields) == 0 { + return + } + + // OneofFuncs + g.P("// XXX_OneofWrappers is for the internal use of the proto package.") + g.P("func (*", mc.goName, ") XXX_OneofWrappers() []interface{} {") + g.P("return []interface{}{") + for _, of := range ofields { + for _, sf := range of.subFields { + sf.typedNil(g) + } + } + g.P("}") + g.P("}") + g.P() +} + +// generateMessageStruct adds the actual struct with it's members (but not methods) to the output. +func (g *Generator) generateMessageStruct(mc *msgCtx, topLevelFields []topLevelField) { + comments := g.PrintComments(mc.message.path) + + // Guarantee deprecation comments appear after user-provided comments. + if mc.message.GetOptions().GetDeprecated() { + if comments { + // Convention: Separate deprecation comments from original + // comments with an empty line. + g.P("//") + } + g.P(deprecationComment) + } + + g.P("type ", Annotate(mc.message.file, mc.message.path, mc.goName), " struct {") + for _, pf := range topLevelFields { + pf.decl(g, mc) + } + g.generateInternalStructFields(mc, topLevelFields) + g.P("}") +} + +// generateGetters adds getters for all fields, including oneofs and weak fields when applicable. +func (g *Generator) generateGetters(mc *msgCtx, topLevelFields []topLevelField) { + for _, pf := range topLevelFields { + pf.getter(g, mc) + } +} + +// generateSetters add setters for all fields, including oneofs and weak fields when applicable. +func (g *Generator) generateSetters(mc *msgCtx, topLevelFields []topLevelField) { + for _, pf := range topLevelFields { + pf.setter(g, mc) + } +} + +// generateCommonMethods adds methods to the message that are not on a per field basis. +func (g *Generator) generateCommonMethods(mc *msgCtx) { + // Reset, String and ProtoMessage methods. + g.P("func (m *", mc.goName, ") Reset() { *m = ", mc.goName, "{} }") + g.P("func (m *", mc.goName, ") String() string { return ", g.Pkg["proto"], ".CompactTextString(m) }") + g.P("func (*", mc.goName, ") ProtoMessage() {}") + var indexes []string + for m := mc.message; m != nil; m = m.parent { + indexes = append([]string{strconv.Itoa(m.index)}, indexes...) + } + g.P("func (*", mc.goName, ") Descriptor() ([]byte, []int) {") + g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}") + g.P("}") + g.P() + // TODO: Revisit the decision to use a XXX_WellKnownType method + // if we change proto.MessageName to work with multiple equivalents. + if mc.message.file.GetPackage() == "google.protobuf" && wellKnownTypes[mc.message.GetName()] { + g.P("func (*", mc.goName, `) XXX_WellKnownType() string { return "`, mc.message.GetName(), `" }`) + g.P() + } + + // Extension support methods + if len(mc.message.ExtensionRange) > 0 { + g.P() + g.P("var extRange_", mc.goName, " = []", g.Pkg["proto"], ".ExtensionRange{") + for _, r := range mc.message.ExtensionRange { + end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends + g.P("{Start: ", r.Start, ", End: ", end, "},") + } + g.P("}") + g.P("func (*", mc.goName, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange {") + g.P("return extRange_", mc.goName) + g.P("}") + g.P() + } + + // TODO: It does not scale to keep adding another method for every + // operation on protos that we want to switch over to using the + // table-driven approach. Instead, we should only add a single method + // that allows getting access to the *InternalMessageInfo struct and then + // calling Unmarshal, Marshal, Merge, Size, and Discard directly on that. + + // Wrapper for table-driven marshaling and unmarshaling. + g.P("func (m *", mc.goName, ") XXX_Unmarshal(b []byte) error {") + g.P("return xxx_messageInfo_", mc.goName, ".Unmarshal(m, b)") + g.P("}") + + g.P("func (m *", mc.goName, ") XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {") + g.P("return xxx_messageInfo_", mc.goName, ".Marshal(b, m, deterministic)") + g.P("}") + + g.P("func (m *", mc.goName, ") XXX_Merge(src ", g.Pkg["proto"], ".Message) {") + g.P("xxx_messageInfo_", mc.goName, ".Merge(m, src)") + g.P("}") + + g.P("func (m *", mc.goName, ") XXX_Size() int {") // avoid name clash with "Size" field in some message + g.P("return xxx_messageInfo_", mc.goName, ".Size(m)") + g.P("}") + + g.P("func (m *", mc.goName, ") XXX_DiscardUnknown() {") + g.P("xxx_messageInfo_", mc.goName, ".DiscardUnknown(m)") + g.P("}") + + g.P("var xxx_messageInfo_", mc.goName, " ", g.Pkg["proto"], ".InternalMessageInfo") + g.P() +} + +// Generate the type, methods and default constant definitions for this Descriptor. +func (g *Generator) generateMessage(message *Descriptor) { + topLevelFields := []topLevelField{} + oFields := make(map[int32]*oneofField) + // The full type name + typeName := message.TypeName() + // The full type name, CamelCased. + goTypeName := CamelCaseSlice(typeName) + + usedNames := make(map[string]bool) + for _, n := range methodNames { + usedNames[n] = true + } + + // allocNames finds a conflict-free variation of the given strings, + // consistently mutating their suffixes. + // It returns the same number of strings. + allocNames := func(ns ...string) []string { + Loop: + for { + for _, n := range ns { + if usedNames[n] { + for i := range ns { + ns[i] += "_" + } + continue Loop + } + } + for _, n := range ns { + usedNames[n] = true + } + return ns + } + } + + mapFieldTypes := make(map[*descriptor.FieldDescriptorProto]string) // keep track of the map fields to be added later + + // Build a structure more suitable for generating the text in one pass + for i, field := range message.Field { + // Allocate the getter and the field at the same time so name + // collisions create field/method consistent names. + // TODO: This allocation occurs based on the order of the fields + // in the proto file, meaning that a change in the field + // ordering can change generated Method/Field names. + base := CamelCase(*field.Name) + ns := allocNames(base, "Get"+base) + fieldName, fieldGetterName := ns[0], ns[1] + typename, wiretype := g.GoType(message, field) + jsonName := *field.Name + tag := fmt.Sprintf("protobuf:%s json:%q", g.goTag(message, field, wiretype), jsonName+",omitempty") + + oneof := field.OneofIndex != nil + if oneof && oFields[*field.OneofIndex] == nil { + odp := message.OneofDecl[int(*field.OneofIndex)] + base := CamelCase(odp.GetName()) + fname := allocNames(base)[0] + + // This is the first field of a oneof we haven't seen before. + // Generate the union field. + oneofFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex) + c, ok := g.makeComments(oneofFullPath) + if ok { + c += "\n//\n" + } + c += "// Types that are valid to be assigned to " + fname + ":\n" + // Generate the rest of this comment later, + // when we've computed any disambiguation. + + dname := "is" + goTypeName + "_" + fname + tag := `protobuf_oneof:"` + odp.GetName() + `"` + of := oneofField{ + fieldCommon: fieldCommon{ + goName: fname, + getterName: "Get"+fname, + goType: dname, + tags: tag, + protoName: odp.GetName(), + fullPath: oneofFullPath, + }, + comment: c, + } + topLevelFields = append(topLevelFields, &of) + oFields[*field.OneofIndex] = &of + } + + if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE { + desc := g.ObjectNamed(field.GetTypeName()) + if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() { + // Figure out the Go types and tags for the key and value types. + keyField, valField := d.Field[0], d.Field[1] + keyType, keyWire := g.GoType(d, keyField) + valType, valWire := g.GoType(d, valField) + keyTag, valTag := g.goTag(d, keyField, keyWire), g.goTag(d, valField, valWire) + + // We don't use stars, except for message-typed values. + // Message and enum types are the only two possibly foreign types used in maps, + // so record their use. They are not permitted as map keys. + keyType = strings.TrimPrefix(keyType, "*") + switch *valField.Type { + case descriptor.FieldDescriptorProto_TYPE_ENUM: + valType = strings.TrimPrefix(valType, "*") + g.RecordTypeUse(valField.GetTypeName()) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + g.RecordTypeUse(valField.GetTypeName()) + default: + valType = strings.TrimPrefix(valType, "*") + } + + typename = fmt.Sprintf("map[%s]%s", keyType, valType) + mapFieldTypes[field] = typename // record for the getter generation + + tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", keyTag, valTag) + } + } + + fieldDeprecated := "" + if field.GetOptions().GetDeprecated() { + fieldDeprecated = deprecationComment + } + + dvalue := g.getterDefault(field, goTypeName) + if oneof { + tname := goTypeName + "_" + fieldName + // It is possible for this to collide with a message or enum + // nested in this message. Check for collisions. + for { + ok := true + for _, desc := range message.nested { + if CamelCaseSlice(desc.TypeName()) == tname { + ok = false + break + } + } + for _, enum := range message.enums { + if CamelCaseSlice(enum.TypeName()) == tname { + ok = false + break + } + } + if !ok { + tname += "_" + continue + } + break + } + + oneofField := oFields[*field.OneofIndex] + tag := "protobuf:" + g.goTag(message, field, wiretype) + sf := oneofSubField{ + fieldCommon: fieldCommon{ + goName: fieldName, + getterName: fieldGetterName, + goType: typename, + tags: tag, + protoName: field.GetName(), + fullPath: fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i), + }, + protoTypeName: field.GetTypeName(), + fieldNumber: int(*field.Number), + protoType: *field.Type, + getterDef: dvalue, + protoDef: field.GetDefaultValue(), + oneofTypeName: tname, + deprecated: fieldDeprecated, + } + oneofField.subFields = append(oneofField.subFields, &sf) + g.RecordTypeUse(field.GetTypeName()) + continue + } + + fieldFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i) + c, ok := g.makeComments(fieldFullPath) + if ok { + c += "\n" + } + rf := simpleField{ + fieldCommon: fieldCommon{ + goName: fieldName, + getterName: fieldGetterName, + goType: typename, + tags: tag, + protoName: field.GetName(), + fullPath: fieldFullPath, + }, + protoTypeName: field.GetTypeName(), + protoType: *field.Type, + deprecated: fieldDeprecated, + getterDef: dvalue, + protoDef: field.GetDefaultValue(), + comment: c, + } + var pf topLevelField = &rf + + topLevelFields = append(topLevelFields, pf) + g.RecordTypeUse(field.GetTypeName()) + } + + mc := &msgCtx{ + goName: goTypeName, + message: message, + } + + g.generateMessageStruct(mc, topLevelFields) + g.P() + g.generateCommonMethods(mc) + g.P() + g.generateDefaultConstants(mc, topLevelFields) + g.P() + g.generateGetters(mc, topLevelFields) + g.P() + g.generateSetters(mc, topLevelFields) + g.P() + g.generateOneofFuncs(mc, topLevelFields) + g.P() + + var oneofTypes []string + for _, f := range topLevelFields { + if of, ok := f.(*oneofField); ok { + for _, osf := range of.subFields { + oneofTypes = append(oneofTypes, osf.oneofTypeName) + } + } + } + + opts := message.Options + ms := &messageSymbol{ + sym: goTypeName, + hasExtensions: len(message.ExtensionRange) > 0, + isMessageSet: opts != nil && opts.GetMessageSetWireFormat(), + oneofTypes: oneofTypes, + } + g.file.addExport(message, ms) + + for _, ext := range message.ext { + g.generateExtension(ext) + } + + fullName := strings.Join(message.TypeName(), ".") + if g.file.Package != nil { + fullName = *g.file.Package + "." + fullName + } + + g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], goTypeName, fullName) + // Register types for native map types. + for _, k := range mapFieldKeys(mapFieldTypes) { + fullName := strings.TrimPrefix(*k.TypeName, ".") + g.addInitf("%s.RegisterMapType((%s)(nil), %q)", g.Pkg["proto"], mapFieldTypes[k], fullName) + } + +} + +type byTypeName []*descriptor.FieldDescriptorProto + +func (a byTypeName) Len() int { return len(a) } +func (a byTypeName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTypeName) Less(i, j int) bool { return *a[i].TypeName < *a[j].TypeName } + +// mapFieldKeys returns the keys of m in a consistent order. +func mapFieldKeys(m map[*descriptor.FieldDescriptorProto]string) []*descriptor.FieldDescriptorProto { + keys := make([]*descriptor.FieldDescriptorProto, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Sort(byTypeName(keys)) + return keys +} + +var escapeChars = [256]byte{ + 'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v', '\\': '\\', '"': '"', '\'': '\'', '?': '?', +} + +// unescape reverses the "C" escaping that protoc does for default values of bytes fields. +// It is best effort in that it effectively ignores malformed input. Seemingly invalid escape +// sequences are conveyed, unmodified, into the decoded result. +func unescape(s string) string { + // NB: Sadly, we can't use strconv.Unquote because protoc will escape both + // single and double quotes, but strconv.Unquote only allows one or the + // other (based on actual surrounding quotes of its input argument). + + var out []byte + for len(s) > 0 { + // regular character, or too short to be valid escape + if s[0] != '\\' || len(s) < 2 { + out = append(out, s[0]) + s = s[1:] + } else if c := escapeChars[s[1]]; c != 0 { + // escape sequence + out = append(out, c) + s = s[2:] + } else if s[1] == 'x' || s[1] == 'X' { + // hex escape, e.g. "\x80 + if len(s) < 4 { + // too short to be valid + out = append(out, s[:2]...) + s = s[2:] + continue + } + v, err := strconv.ParseUint(s[2:4], 16, 8) + if err != nil { + out = append(out, s[:4]...) + } else { + out = append(out, byte(v)) + } + s = s[4:] + } else if '0' <= s[1] && s[1] <= '7' { + // octal escape, can vary from 1 to 3 octal digits; e.g., "\0" "\40" or "\164" + // so consume up to 2 more bytes or up to end-of-string + n := len(s[1:]) - len(strings.TrimLeft(s[1:], "01234567")) + if n > 3 { + n = 3 + } + v, err := strconv.ParseUint(s[1:1+n], 8, 8) + if err != nil { + out = append(out, s[:1+n]...) + } else { + out = append(out, byte(v)) + } + s = s[1+n:] + } else { + // bad escape, just propagate the slash as-is + out = append(out, s[0]) + s = s[1:] + } + } + + return string(out) +} + +func (g *Generator) generateExtension(ext *ExtensionDescriptor) { + ccTypeName := ext.DescName() + + extObj := g.ObjectNamed(*ext.Extendee) + var extDesc *Descriptor + if id, ok := extObj.(*ImportedDescriptor); ok { + // This is extending a publicly imported message. + // We need the underlying type for goTag. + extDesc = id.o.(*Descriptor) + } else { + extDesc = extObj.(*Descriptor) + } + extendedType := "*" + g.TypeName(extObj) // always use the original + field := ext.FieldDescriptorProto + fieldType, wireType := g.GoType(ext.parent, field) + tag := g.goTag(extDesc, field, wireType) + g.RecordTypeUse(*ext.Extendee) + if n := ext.FieldDescriptorProto.TypeName; n != nil { + // foreign extension type + g.RecordTypeUse(*n) + } + + typeName := ext.TypeName() + + // Special case for proto2 message sets: If this extension is extending + // proto2.bridge.MessageSet, and its final name component is "message_set_extension", + // then drop that last component. + // + // TODO: This should be implemented in the text formatter rather than the generator. + // In addition, the situation for when to apply this special case is implemented + // differently in other languages: + // https://github.com/google/protobuf/blob/aff10976/src/google/protobuf/text_format.cc#L1560 + if extDesc.GetOptions().GetMessageSetWireFormat() && typeName[len(typeName)-1] == "message_set_extension" { + typeName = typeName[:len(typeName)-1] + } + + // For text formatting, the package must be exactly what the .proto file declares, + // ignoring overrides such as the go_package option, and with no dot/underscore mapping. + extName := strings.Join(typeName, ".") + if g.file.Package != nil { + extName = *g.file.Package + "." + extName + } + + g.P("var ", ccTypeName, " = &", g.Pkg["proto"], ".ExtensionDesc{") + g.P("ExtendedType: (", extendedType, ")(nil),") + g.P("ExtensionType: (", fieldType, ")(nil),") + g.P("Field: ", field.Number, ",") + g.P(`Name: "`, extName, `",`) + g.P("Tag: ", tag, ",") + g.P(`Filename: "`, g.file.GetName(), `",`) + + g.P("}") + g.P() + + g.addInitf("%s.RegisterExtension(%s)", g.Pkg["proto"], ext.DescName()) + + g.file.addExport(ext, constOrVarSymbol{ccTypeName, "var", ""}) +} + +func (g *Generator) generateInitFunction() { + if len(g.init) == 0 { + return + } + g.P("func init() {") + for _, l := range g.init { + g.P(l) + } + g.P("}") + g.init = nil +} + +func (g *Generator) generateFileDescriptor(file *FileDescriptor) { + // Make a copy and trim source_code_info data. + // TODO: Trim this more when we know exactly what we need. + pb := proto.Clone(file.FileDescriptorProto).(*descriptor.FileDescriptorProto) + pb.SourceCodeInfo = nil + + b, err := proto.Marshal(pb) + if err != nil { + g.Fail(err.Error()) + } + + var buf bytes.Buffer + w, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression) + w.Write(b) + w.Close() + b = buf.Bytes() + + v := file.VarName() + g.P() + g.P("func init() { ", g.Pkg["proto"], ".RegisterFile(", strconv.Quote(*file.Name), ", ", v, ") }") + g.P("var ", v, " = []byte{") + g.P("// ", len(b), " bytes of a gzipped FileDescriptorProto") + for len(b) > 0 { + n := 16 + if n > len(b) { + n = len(b) + } + + s := "" + for _, c := range b[:n] { + s += fmt.Sprintf("0x%02x,", c) + } + g.P(s) + + b = b[n:] + } + g.P("}") +} + +func (g *Generator) generateEnumRegistration(enum *EnumDescriptor) { + // // We always print the full (proto-world) package name here. + pkg := enum.File().GetPackage() + if pkg != "" { + pkg += "." + } + // The full type name + typeName := enum.TypeName() + // The full type name, CamelCased. + ccTypeName := CamelCaseSlice(typeName) + g.addInitf("%s.RegisterEnum(%q, %[3]s_name, %[3]s_value)", g.Pkg["proto"], pkg+ccTypeName, ccTypeName) +} + +// And now lots of helper functions. + +// Is c an ASCII lower-case letter? +func isASCIILower(c byte) bool { + return 'a' <= c && c <= 'z' +} + +// Is c an ASCII digit? +func isASCIIDigit(c byte) bool { + return '0' <= c && c <= '9' +} + +// CamelCase returns the CamelCased name. +// If there is an interior underscore followed by a lower case letter, +// drop the underscore and convert the letter to upper case. +// There is a remote possibility of this rewrite causing a name collision, +// but it's so remote we're prepared to pretend it's nonexistent - since the +// C++ generator lowercases names, it's extremely unlikely to have two fields +// with different capitalizations. +// In short, _my_field_name_2 becomes XMyFieldName_2. +func CamelCase(s string) string { + if s == "" { + return "" + } + t := make([]byte, 0, 32) + i := 0 + if s[0] == '_' { + // Need a capital letter; drop the '_'. + t = append(t, 'X') + i++ + } + // Invariant: if the next letter is lower case, it must be converted + // to upper case. + // That is, we process a word at a time, where words are marked by _ or + // upper case letter. Digits are treated as words. + for ; i < len(s); i++ { + c := s[i] + if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) { + continue // Skip the underscore in s. + } + if isASCIIDigit(c) { + t = append(t, c) + continue + } + // Assume we have a letter now - if not, it's a bogus identifier. + // The next word is a sequence of characters that must start upper case. + if isASCIILower(c) { + c ^= ' ' // Make it a capital letter. + } + t = append(t, c) // Guaranteed not lower case. + // Accept lower case sequence that follows. + for i+1 < len(s) && isASCIILower(s[i+1]) { + i++ + t = append(t, s[i]) + } + } + return string(t) +} + +// CamelCaseSlice is like CamelCase, but the argument is a slice of strings to +// be joined with "_". +func CamelCaseSlice(elem []string) string { return CamelCase(strings.Join(elem, "_")) } + +// dottedSlice turns a sliced name into a dotted name. +func dottedSlice(elem []string) string { return strings.Join(elem, ".") } + +// Is this field optional? +func isOptional(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_OPTIONAL +} + +// Is this field required? +func isRequired(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REQUIRED +} + +// Is this field repeated? +func isRepeated(field *descriptor.FieldDescriptorProto) bool { + return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED +} + +// Is this field a scalar numeric type? +func isScalar(field *descriptor.FieldDescriptorProto) bool { + if field.Type == nil { + return false + } + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE, + descriptor.FieldDescriptorProto_TYPE_FLOAT, + descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_INT32, + descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_BOOL, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM, + descriptor.FieldDescriptorProto_TYPE_SFIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED64, + descriptor.FieldDescriptorProto_TYPE_SINT32, + descriptor.FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} + +// badToUnderscore is the mapping function used to generate Go names from package names, +// which can be dotted in the input .proto file. It replaces non-identifier characters such as +// dot or dash with underscore. +func badToUnderscore(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' { + return r + } + return '_' +} + +// baseName returns the last path element of the name, with the last dotted suffix removed. +func baseName(name string) string { + // First, find the last element + if i := strings.LastIndex(name, "/"); i >= 0 { + name = name[i+1:] + } + // Now drop the suffix + if i := strings.LastIndex(name, "."); i >= 0 { + name = name[0:i] + } + return name +} + +// The SourceCodeInfo message describes the location of elements of a parsed +// .proto file by way of a "path", which is a sequence of integers that +// describe the route from a FileDescriptorProto to the relevant submessage. +// The path alternates between a field number of a repeated field, and an index +// into that repeated field. The constants below define the field numbers that +// are used. +// +// See descriptor.proto for more information about this. +const ( + // tag numbers in FileDescriptorProto + packagePath = 2 // package + messagePath = 4 // message_type + enumPath = 5 // enum_type + // tag numbers in DescriptorProto + messageFieldPath = 2 // field + messageMessagePath = 3 // nested_type + messageEnumPath = 4 // enum_type + messageOneofPath = 8 // oneof_decl + // tag numbers in EnumDescriptorProto + enumValuePath = 2 // value +) + +var supportTypeAliases bool + +func init() { + for _, tag := range build.Default.ReleaseTags { + if tag == "go1.9" { + supportTypeAliases = true + return + } + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go b/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go new file mode 100644 index 00000000..a9b61036 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go @@ -0,0 +1,117 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package remap handles tracking the locations of Go tokens in a source text +across a rewrite by the Go formatter. +*/ +package remap + +import ( + "fmt" + "go/scanner" + "go/token" +) + +// A Location represents a span of byte offsets in the source text. +type Location struct { + Pos, End int // End is exclusive +} + +// A Map represents a mapping between token locations in an input source text +// and locations in the correspnding output text. +type Map map[Location]Location + +// Find reports whether the specified span is recorded by m, and if so returns +// the new location it was mapped to. If the input span was not found, the +// returned location is the same as the input. +func (m Map) Find(pos, end int) (Location, bool) { + key := Location{ + Pos: pos, + End: end, + } + if loc, ok := m[key]; ok { + return loc, true + } + return key, false +} + +func (m Map) add(opos, oend, npos, nend int) { + m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend} +} + +// Compute constructs a location mapping from input to output. An error is +// reported if any of the tokens of output cannot be mapped. +func Compute(input, output []byte) (Map, error) { + itok := tokenize(input) + otok := tokenize(output) + if len(itok) != len(otok) { + return nil, fmt.Errorf("wrong number of tokens, %d ≠ %d", len(itok), len(otok)) + } + m := make(Map) + for i, ti := range itok { + to := otok[i] + if ti.Token != to.Token { + return nil, fmt.Errorf("token %d type mismatch: %s ≠ %s", i+1, ti, to) + } + m.add(ti.pos, ti.end, to.pos, to.end) + } + return m, nil +} + +// tokinfo records the span and type of a source token. +type tokinfo struct { + pos, end int + token.Token +} + +func tokenize(src []byte) []tokinfo { + fs := token.NewFileSet() + var s scanner.Scanner + s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments) + var info []tokinfo + for { + pos, next, lit := s.Scan() + switch next { + case token.SEMICOLON: + continue + } + info = append(info, tokinfo{ + pos: int(pos - 1), + end: int(pos + token.Pos(len(lit)) - 1), + Token: next, + }) + if next == token.EOF { + break + } + } + return info +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go new file mode 100644 index 00000000..61bfc10e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go @@ -0,0 +1,369 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/compiler/plugin.proto + +/* +Package plugin_go is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/compiler/plugin.proto + +It has these top-level messages: + Version + CodeGeneratorRequest + CodeGeneratorResponse +*/ +package plugin_go + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The version number of protocol compiler. +type Version struct { + Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } +func (m *Version) Unmarshal(b []byte) error { + return xxx_messageInfo_Version.Unmarshal(m, b) +} +func (m *Version) Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Version.Marshal(b, m, deterministic) +} +func (dst *Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_Version.Merge(dst, src) +} +func (m *Version) XXX_Size() int { + return xxx_messageInfo_Version.Size(m) +} +func (m *Version) XXX_DiscardUnknown() { + xxx_messageInfo_Version.DiscardUnknown(m) +} + +var xxx_messageInfo_Version proto.InternalMessageInfo + +func (m *Version) GetMajor() int32 { + if m != nil && m.Major != nil { + return *m.Major + } + return 0 +} + +func (m *Version) GetMinor() int32 { + if m != nil && m.Minor != nil { + return *m.Minor + } + return 0 +} + +func (m *Version) GetPatch() int32 { + if m != nil && m.Patch != nil { + return *m.Patch + } + return 0 +} + +func (m *Version) GetSuffix() string { + if m != nil && m.Suffix != nil { + return *m.Suffix + } + return "" +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +type CodeGeneratorRequest struct { + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"` + // The generator parameter passed on the command-line. + Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` + // The version number of protocol compiler. + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} } +func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorRequest) ProtoMessage() {} +func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } +func (m *CodeGeneratorRequest) Unmarshal(b []byte) error { + return xxx_messageInfo_CodeGeneratorRequest.Unmarshal(m, b) +} +func (m *CodeGeneratorRequest) Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CodeGeneratorRequest.Marshal(b, m, deterministic) +} +func (dst *CodeGeneratorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CodeGeneratorRequest.Merge(dst, src) +} +func (m *CodeGeneratorRequest) XXX_Size() int { + return xxx_messageInfo_CodeGeneratorRequest.Size(m) +} +func (m *CodeGeneratorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CodeGeneratorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CodeGeneratorRequest proto.InternalMessageInfo + +func (m *CodeGeneratorRequest) GetFileToGenerate() []string { + if m != nil { + return m.FileToGenerate + } + return nil +} + +func (m *CodeGeneratorRequest) GetParameter() string { + if m != nil && m.Parameter != nil { + return *m.Parameter + } + return "" +} + +func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto { + if m != nil { + return m.ProtoFile + } + return nil +} + +func (m *CodeGeneratorRequest) GetCompilerVersion() *Version { + if m != nil { + return m.CompilerVersion + } + return nil +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +type CodeGeneratorResponse struct { + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} } +func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorResponse) ProtoMessage() {} +func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (m *CodeGeneratorResponse) Unmarshal(b []byte) error { + return xxx_messageInfo_CodeGeneratorResponse.Unmarshal(m, b) +} +func (m *CodeGeneratorResponse) Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CodeGeneratorResponse.Marshal(b, m, deterministic) +} +func (dst *CodeGeneratorResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CodeGeneratorResponse.Merge(dst, src) +} +func (m *CodeGeneratorResponse) XXX_Size() int { + return xxx_messageInfo_CodeGeneratorResponse.Size(m) +} +func (m *CodeGeneratorResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CodeGeneratorResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CodeGeneratorResponse proto.InternalMessageInfo + +func (m *CodeGeneratorResponse) GetError() string { + if m != nil && m.Error != nil { + return *m.Error + } + return "" +} + +func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File { + if m != nil { + return m.File + } + return nil +} + +// Represents a single generated file. +type CodeGeneratorResponse_File struct { + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"` + // The file contents. + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorResponse_File{} } +func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) } +func (*CodeGeneratorResponse_File) ProtoMessage() {} +func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } +func (m *CodeGeneratorResponse_File) Unmarshal(b []byte) error { + return xxx_messageInfo_CodeGeneratorResponse_File.Unmarshal(m, b) +} +func (m *CodeGeneratorResponse_File) Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CodeGeneratorResponse_File.Marshal(b, m, deterministic) +} +func (dst *CodeGeneratorResponse_File) XXX_Merge(src proto.Message) { + xxx_messageInfo_CodeGeneratorResponse_File.Merge(dst, src) +} +func (m *CodeGeneratorResponse_File) XXX_Size() int { + return xxx_messageInfo_CodeGeneratorResponse_File.Size(m) +} +func (m *CodeGeneratorResponse_File) XXX_DiscardUnknown() { + xxx_messageInfo_CodeGeneratorResponse_File.DiscardUnknown(m) +} + +var xxx_messageInfo_CodeGeneratorResponse_File proto.InternalMessageInfo + +func (m *CodeGeneratorResponse_File) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *CodeGeneratorResponse_File) GetInsertionPoint() string { + if m != nil && m.InsertionPoint != nil { + return *m.InsertionPoint + } + return "" +} + +func (m *CodeGeneratorResponse_File) GetContent() string { + if m != nil && m.Content != nil { + return *m.Content + } + return "" +} + +func init() { + proto.RegisterType((*Version)(nil), "google.protobuf.compiler.Version") + proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest") + proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse") + proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File") +} + +func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x6a, 0x14, 0x41, + 0x10, 0xc6, 0x19, 0x77, 0x63, 0x98, 0x8a, 0x64, 0x43, 0x13, 0xa5, 0x09, 0x39, 0x8c, 0x8b, 0xe2, + 0x5c, 0x32, 0x0b, 0xc1, 0x8b, 0x78, 0x4b, 0x44, 0x3d, 0x78, 0x58, 0x1a, 0xf1, 0x20, 0xc8, 0x30, + 0x99, 0xd4, 0x74, 0x5a, 0x66, 0xba, 0xc6, 0xee, 0x1e, 0xf1, 0x49, 0x7d, 0x0f, 0xdf, 0x40, 0xfa, + 0xcf, 0x24, 0xb2, 0xb8, 0xa7, 0xee, 0xef, 0x57, 0xd5, 0xd5, 0x55, 0x1f, 0x05, 0x2f, 0x25, 0x91, + 0xec, 0x71, 0x33, 0x1a, 0x72, 0x74, 0x33, 0x75, 0x9b, 0x96, 0x86, 0x51, 0xf5, 0x68, 0x36, 0x63, + 0x3f, 0x49, 0xa5, 0xab, 0x10, 0x60, 0x3c, 0xa6, 0x55, 0x73, 0x5a, 0x35, 0xa7, 0x9d, 0x15, 0xbb, + 0x05, 0x6e, 0xd1, 0xb6, 0x46, 0x8d, 0x8e, 0x4c, 0xcc, 0x5e, 0xb7, 0x70, 0xf8, 0x05, 0x8d, 0x55, + 0xa4, 0xd9, 0x29, 0x1c, 0x0c, 0xcd, 0x77, 0x32, 0x3c, 0x2b, 0xb2, 0xf2, 0x40, 0x44, 0x11, 0xa8, + 0xd2, 0x64, 0xf8, 0xa3, 0x44, 0xbd, 0xf0, 0x74, 0x6c, 0x5c, 0x7b, 0xc7, 0x17, 0x91, 0x06, 0xc1, + 0x9e, 0xc1, 0x63, 0x3b, 0x75, 0x9d, 0xfa, 0xc5, 0x97, 0x45, 0x56, 0xe6, 0x22, 0xa9, 0xf5, 0x9f, + 0x0c, 0x4e, 0xaf, 0xe9, 0x16, 0x3f, 0xa0, 0x46, 0xd3, 0x38, 0x32, 0x02, 0x7f, 0x4c, 0x68, 0x1d, + 0x2b, 0xe1, 0xa4, 0x53, 0x3d, 0xd6, 0x8e, 0x6a, 0x19, 0x63, 0xc8, 0xb3, 0x62, 0x51, 0xe6, 0xe2, + 0xd8, 0xf3, 0xcf, 0x94, 0x5e, 0x20, 0x3b, 0x87, 0x7c, 0x6c, 0x4c, 0x33, 0xa0, 0xc3, 0xd8, 0x4a, + 0x2e, 0x1e, 0x00, 0xbb, 0x06, 0x08, 0xe3, 0xd4, 0xfe, 0x15, 0x5f, 0x15, 0x8b, 0xf2, 0xe8, 0xf2, + 0x45, 0xb5, 0x6b, 0xcb, 0x7b, 0xd5, 0xe3, 0xbb, 0x7b, 0x03, 0xb6, 0x1e, 0x8b, 0x3c, 0x44, 0x7d, + 0x84, 0x7d, 0x82, 0x93, 0xd9, 0xb8, 0xfa, 0x67, 0xf4, 0x24, 0x8c, 0x77, 0x74, 0xf9, 0xbc, 0xda, + 0xe7, 0x70, 0x95, 0xcc, 0x13, 0xab, 0x99, 0x24, 0xb0, 0xfe, 0x9d, 0xc1, 0xd3, 0x9d, 0x99, 0xed, + 0x48, 0xda, 0xa2, 0xf7, 0x0e, 0x8d, 0x49, 0x3e, 0xe7, 0x22, 0x0a, 0xf6, 0x11, 0x96, 0xff, 0x34, + 0xff, 0x7a, 0xff, 0x8f, 0xff, 0x2d, 0x1a, 0x66, 0x13, 0xa1, 0xc2, 0xd9, 0x37, 0x58, 0x86, 0x79, + 0x18, 0x2c, 0x75, 0x33, 0x60, 0xfa, 0x26, 0xdc, 0xd9, 0x2b, 0x58, 0x29, 0x6d, 0xd1, 0x38, 0x45, + 0xba, 0x1e, 0x49, 0x69, 0x97, 0xcc, 0x3c, 0xbe, 0xc7, 0x5b, 0x4f, 0x19, 0x87, 0xc3, 0x96, 0xb4, + 0x43, 0xed, 0xf8, 0x2a, 0x24, 0xcc, 0xf2, 0x4a, 0xc2, 0x79, 0x4b, 0xc3, 0xde, 0xfe, 0xae, 0x9e, + 0x6c, 0xc3, 0x6e, 0x06, 0x7b, 0xed, 0xd7, 0x37, 0x52, 0xb9, 0xbb, 0xe9, 0xc6, 0x87, 0x37, 0x92, + 0xfa, 0x46, 0xcb, 0x87, 0x65, 0x0c, 0x97, 0xf6, 0x42, 0xa2, 0xbe, 0x90, 0x94, 0x56, 0xfa, 0x6d, + 0x3c, 0x6a, 0x49, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x15, 0x40, 0xc5, 0xfe, 0x02, 0x00, + 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden b/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden new file mode 100644 index 00000000..8953d0ff --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden @@ -0,0 +1,83 @@ +// Code generated by protoc-gen-go. +// source: google/protobuf/compiler/plugin.proto +// DO NOT EDIT! + +package google_protobuf_compiler + +import proto "github.com/golang/protobuf/proto" +import "math" +import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor" + +// Reference proto and math imports to suppress error if they are not otherwise used. +var _ = proto.GetString +var _ = math.Inf + +type CodeGeneratorRequest struct { + FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"` + Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"` + ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *CodeGeneratorRequest) Reset() { *this = CodeGeneratorRequest{} } +func (this *CodeGeneratorRequest) String() string { return proto.CompactTextString(this) } +func (*CodeGeneratorRequest) ProtoMessage() {} + +func (this *CodeGeneratorRequest) GetParameter() string { + if this != nil && this.Parameter != nil { + return *this.Parameter + } + return "" +} + +type CodeGeneratorResponse struct { + Error *string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + File []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *CodeGeneratorResponse) Reset() { *this = CodeGeneratorResponse{} } +func (this *CodeGeneratorResponse) String() string { return proto.CompactTextString(this) } +func (*CodeGeneratorResponse) ProtoMessage() {} + +func (this *CodeGeneratorResponse) GetError() string { + if this != nil && this.Error != nil { + return *this.Error + } + return "" +} + +type CodeGeneratorResponse_File struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"` + Content *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (this *CodeGeneratorResponse_File) Reset() { *this = CodeGeneratorResponse_File{} } +func (this *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(this) } +func (*CodeGeneratorResponse_File) ProtoMessage() {} + +func (this *CodeGeneratorResponse_File) GetName() string { + if this != nil && this.Name != nil { + return *this.Name + } + return "" +} + +func (this *CodeGeneratorResponse_File) GetInsertionPoint() string { + if this != nil && this.InsertionPoint != nil { + return *this.InsertionPoint + } + return "" +} + +func (this *CodeGeneratorResponse_File) GetContent() string { + if this != nil && this.Content != nil { + return *this.Content + } + return "" +} + +func init() { +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto b/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto new file mode 100644 index 00000000..5b557452 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto @@ -0,0 +1,167 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: kenton@google.com (Kenton Varda) +// +// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to +// change. +// +// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is +// just a program that reads a CodeGeneratorRequest from stdin and writes a +// CodeGeneratorResponse to stdout. +// +// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead +// of dealing with the raw protocol defined here. +// +// A plugin executable needs only to be placed somewhere in the path. The +// plugin should be named "protoc-gen-$NAME", and will then be used when the +// flag "--${NAME}_out" is passed to protoc. + +syntax = "proto2"; +package google.protobuf.compiler; +option java_package = "com.google.protobuf.compiler"; +option java_outer_classname = "PluginProtos"; + +option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go"; + +import "google/protobuf/descriptor.proto"; + +// The version number of protocol compiler. +message Version { + optional int32 major = 1; + optional int32 minor = 2; + optional int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + optional string suffix = 4; +} + +// An encoded CodeGeneratorRequest is written to the plugin's stdin. +message CodeGeneratorRequest { + // The .proto files that were explicitly listed on the command-line. The + // code generator should generate code only for these files. Each file's + // descriptor will be included in proto_file, below. + repeated string file_to_generate = 1; + + // The generator parameter passed on the command-line. + optional string parameter = 2; + + // FileDescriptorProtos for all files in files_to_generate and everything + // they import. The files will appear in topological order, so each file + // appears before any file that imports it. + // + // protoc guarantees that all proto_files will be written after + // the fields above, even though this is not technically guaranteed by the + // protobuf wire format. This theoretically could allow a plugin to stream + // in the FileDescriptorProtos and handle them one by one rather than read + // the entire set into memory at once. However, as of this writing, this + // is not similarly optimized on protoc's end -- it will store all fields in + // memory at once before sending them to the plugin. + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + repeated FileDescriptorProto proto_file = 15; + + // The version number of protocol compiler. + optional Version compiler_version = 3; + +} + +// The plugin writes an encoded CodeGeneratorResponse to stdout. +message CodeGeneratorResponse { + // Error message. If non-empty, code generation failed. The plugin process + // should exit with status code zero even if it reports an error in this way. + // + // This should be used to indicate errors in .proto files which prevent the + // code generator from generating correct code. Errors which indicate a + // problem in protoc itself -- such as the input CodeGeneratorRequest being + // unparseable -- should be reported by writing a message to stderr and + // exiting with a non-zero status code. + optional string error = 1; + + // Represents a single generated file. + message File { + // The file name, relative to the output directory. The name must not + // contain "." or ".." components and must be relative, not be absolute (so, + // the file cannot lie outside the output directory). "/" must be used as + // the path separator, not "\". + // + // If the name is omitted, the content will be appended to the previous + // file. This allows the generator to break large files into small chunks, + // and allows the generated text to be streamed back to protoc so that large + // files need not reside completely in memory at one time. Note that as of + // this writing protoc does not optimize for this -- it will read the entire + // CodeGeneratorResponse before writing files to disk. + optional string name = 1; + + // If non-empty, indicates that the named file should already exist, and the + // content here is to be inserted into that file at a defined insertion + // point. This feature allows a code generator to extend the output + // produced by another code generator. The original generator may provide + // insertion points by placing special annotations in the file that look + // like: + // @@protoc_insertion_point(NAME) + // The annotation can have arbitrary text before and after it on the line, + // which allows it to be placed in a comment. NAME should be replaced with + // an identifier naming the point -- this is what other generators will use + // as the insertion_point. Code inserted at this point will be placed + // immediately above the line containing the insertion point (thus multiple + // insertions to the same point will come out in the order they were added). + // The double-@ is intended to make it unlikely that the generated code + // could contain things that look like insertion points by accident. + // + // For example, the C++ code generator places the following line in the + // .pb.h files that it generates: + // // @@protoc_insertion_point(namespace_scope) + // This line appears within the scope of the file's package namespace, but + // outside of any particular class. Another plugin can then specify the + // insertion_point "namespace_scope" to generate additional classes or + // other declarations that should be placed in this scope. + // + // Note that if the line containing the insertion point begins with + // whitespace, the same whitespace will be added to every line of the + // inserted text. This is useful for languages like Python, where + // indentation matters. In these languages, the insertion point comment + // should be indented the same amount as any inserted code will need to be + // in order to work correctly in that context. + // + // The code generator that generates the initial file and the one which + // inserts into it must both run as part of a single invocation of protoc. + // Code generators are executed in the order in which they appear on the + // command line. + // + // If |insertion_point| is present, |name| must also be present. + optional string insertion_point = 2; + + // The file contents. + optional string content = 15; + } + repeated File file = 15; +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go new file mode 100644 index 00000000..33daa73d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go @@ -0,0 +1,336 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/struct.proto + +package structpb + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} + +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (x NullValue) String() string { + return proto.EnumName(NullValue_name, int32(x)) +} + +func (NullValue) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{0} +} + +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (m *Struct) String() string { return proto.CompactTextString(m) } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{0} +} + +func (*Struct) XXX_WellKnownType() string { return "Struct" } + +func (m *Struct) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Struct.Unmarshal(m, b) +} +func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Struct.Marshal(b, m, deterministic) +} +func (m *Struct) XXX_Merge(src proto.Message) { + xxx_messageInfo_Struct.Merge(m, src) +} +func (m *Struct) XXX_Size() int { + return xxx_messageInfo_Struct.Size(m) +} +func (m *Struct) XXX_DiscardUnknown() { + xxx_messageInfo_Struct.DiscardUnknown(m) +} + +var xxx_messageInfo_Struct proto.InternalMessageInfo + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{1} +} + +func (*Value) XXX_WellKnownType() string { return "Value" } + +func (m *Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Value.Unmarshal(m, b) +} +func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Value.Marshal(b, m, deterministic) +} +func (m *Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Value.Merge(m, src) +} +func (m *Value) XXX_Size() int { + return xxx_messageInfo_Value.Size(m) +} +func (m *Value) XXX_DiscardUnknown() { + xxx_messageInfo_Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Value proto.InternalMessageInfo + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} + +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` +} + +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"` +} + +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} + +func (*Value_NumberValue) isValue_Kind() {} + +func (*Value_StringValue) isValue_Kind() {} + +func (*Value_BoolValue) isValue_Kind() {} + +func (*Value_StructValue) isValue_Kind() {} + +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*Value) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (m *ListValue) String() string { return proto.CompactTextString(m) } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { + return fileDescriptor_df322afd6c9fb402, []int{2} +} + +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } + +func (m *ListValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListValue.Unmarshal(m, b) +} +func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListValue.Marshal(b, m, deterministic) +} +func (m *ListValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListValue.Merge(m, src) +} +func (m *ListValue) XXX_Size() int { + return xxx_messageInfo_ListValue.Size(m) +} +func (m *ListValue) XXX_DiscardUnknown() { + xxx_messageInfo_ListValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ListValue proto.InternalMessageInfo + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func init() { + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") +} + +func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) } + +var fileDescriptor_df322afd6c9fb402 = []byte{ + // 417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09, + 0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94, + 0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa, + 0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff, + 0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc, + 0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15, + 0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d, + 0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce, + 0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39, + 0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab, + 0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84, + 0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48, + 0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f, + 0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59, + 0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a, + 0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64, + 0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92, + 0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25, + 0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37, + 0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6, + 0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4, + 0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda, + 0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9, + 0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53, + 0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00, + 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto new file mode 100644 index 00000000..7d7808e7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto @@ -0,0 +1,96 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/struct;structpb"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "StructProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +message Struct { + // Unordered map of dynamically typed values. + map fields = 1; +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +message Value { + // The kind of value. + oneof kind { + // Represents a null value. + NullValue null_value = 1; + // Represents a double value. + double number_value = 2; + // Represents a string value. + string string_value = 3; + // Represents a boolean value. + bool bool_value = 4; + // Represents a structured value. + Struct struct_value = 5; + // Represents a repeated `Value`. + ListValue list_value = 6; + } +} + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +enum NullValue { + // Null value. + NULL_VALUE = 0; +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +message ListValue { + // Repeated field of dynamically typed values. + repeated Value values = 1; +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go new file mode 100644 index 00000000..add19a1a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go @@ -0,0 +1,461 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/wrappers.proto + +package wrappers + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (m *DoubleValue) String() string { return proto.CompactTextString(m) } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{0} +} + +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } + +func (m *DoubleValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleValue.Unmarshal(m, b) +} +func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic) +} +func (m *DoubleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleValue.Merge(m, src) +} +func (m *DoubleValue) XXX_Size() int { + return xxx_messageInfo_DoubleValue.Size(m) +} +func (m *DoubleValue) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleValue proto.InternalMessageInfo + +func (m *DoubleValue) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (m *FloatValue) String() string { return proto.CompactTextString(m) } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{1} +} + +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } + +func (m *FloatValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FloatValue.Unmarshal(m, b) +} +func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic) +} +func (m *FloatValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_FloatValue.Merge(m, src) +} +func (m *FloatValue) XXX_Size() int { + return xxx_messageInfo_FloatValue.Size(m) +} +func (m *FloatValue) XXX_DiscardUnknown() { + xxx_messageInfo_FloatValue.DiscardUnknown(m) +} + +var xxx_messageInfo_FloatValue proto.InternalMessageInfo + +func (m *FloatValue) GetValue() float32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (m *Int64Value) String() string { return proto.CompactTextString(m) } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{2} +} + +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } + +func (m *Int64Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Int64Value.Unmarshal(m, b) +} +func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic) +} +func (m *Int64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int64Value.Merge(m, src) +} +func (m *Int64Value) XXX_Size() int { + return xxx_messageInfo_Int64Value.Size(m) +} +func (m *Int64Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int64Value proto.InternalMessageInfo + +func (m *Int64Value) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (m *UInt64Value) String() string { return proto.CompactTextString(m) } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{3} +} + +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } + +func (m *UInt64Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UInt64Value.Unmarshal(m, b) +} +func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic) +} +func (m *UInt64Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt64Value.Merge(m, src) +} +func (m *UInt64Value) XXX_Size() int { + return xxx_messageInfo_UInt64Value.Size(m) +} +func (m *UInt64Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt64Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt64Value proto.InternalMessageInfo + +func (m *UInt64Value) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (m *Int32Value) String() string { return proto.CompactTextString(m) } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{4} +} + +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } + +func (m *Int32Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Int32Value.Unmarshal(m, b) +} +func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic) +} +func (m *Int32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_Int32Value.Merge(m, src) +} +func (m *Int32Value) XXX_Size() int { + return xxx_messageInfo_Int32Value.Size(m) +} +func (m *Int32Value) XXX_DiscardUnknown() { + xxx_messageInfo_Int32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_Int32Value proto.InternalMessageInfo + +func (m *Int32Value) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (m *UInt32Value) String() string { return proto.CompactTextString(m) } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{5} +} + +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } + +func (m *UInt32Value) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UInt32Value.Unmarshal(m, b) +} +func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic) +} +func (m *UInt32Value) XXX_Merge(src proto.Message) { + xxx_messageInfo_UInt32Value.Merge(m, src) +} +func (m *UInt32Value) XXX_Size() int { + return xxx_messageInfo_UInt32Value.Size(m) +} +func (m *UInt32Value) XXX_DiscardUnknown() { + xxx_messageInfo_UInt32Value.DiscardUnknown(m) +} + +var xxx_messageInfo_UInt32Value proto.InternalMessageInfo + +func (m *UInt32Value) GetValue() uint32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (m *BoolValue) String() string { return proto.CompactTextString(m) } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{6} +} + +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } + +func (m *BoolValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BoolValue.Unmarshal(m, b) +} +func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic) +} +func (m *BoolValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoolValue.Merge(m, src) +} +func (m *BoolValue) XXX_Size() int { + return xxx_messageInfo_BoolValue.Size(m) +} +func (m *BoolValue) XXX_DiscardUnknown() { + xxx_messageInfo_BoolValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BoolValue proto.InternalMessageInfo + +func (m *BoolValue) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (m *StringValue) String() string { return proto.CompactTextString(m) } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{7} +} + +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } + +func (m *StringValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StringValue.Unmarshal(m, b) +} +func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StringValue.Marshal(b, m, deterministic) +} +func (m *StringValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringValue.Merge(m, src) +} +func (m *StringValue) XXX_Size() int { + return xxx_messageInfo_StringValue.Size(m) +} +func (m *StringValue) XXX_DiscardUnknown() { + xxx_messageInfo_StringValue.DiscardUnknown(m) +} + +var xxx_messageInfo_StringValue proto.InternalMessageInfo + +func (m *StringValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (m *BytesValue) String() string { return proto.CompactTextString(m) } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5377b62bda767935, []int{8} +} + +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } + +func (m *BytesValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BytesValue.Unmarshal(m, b) +} +func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic) +} +func (m *BytesValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesValue.Merge(m, src) +} +func (m *BytesValue) XXX_Size() int { + return xxx_messageInfo_BytesValue.Size(m) +} +func (m *BytesValue) XXX_DiscardUnknown() { + xxx_messageInfo_BytesValue.DiscardUnknown(m) +} + +var xxx_messageInfo_BytesValue proto.InternalMessageInfo + +func (m *BytesValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} + +func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_5377b62bda767935) } + +var fileDescriptor_5377b62bda767935 = []byte{ + // 259 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, + 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca, + 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c, + 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5, + 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13, + 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8, + 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca, + 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a, + 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d, + 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24, + 0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f, + 0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c, + 0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, + 0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, + 0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe, + 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto b/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto new file mode 100644 index 00000000..01947639 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto @@ -0,0 +1,118 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Wrappers for primitive (non-message) types. These types are useful +// for embedding primitives in the `google.protobuf.Any` type and for places +// where we need to distinguish between the absence of a primitive +// typed field and its default value. + +syntax = "proto3"; + +package google.protobuf; + +option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; +option go_package = "github.com/golang/protobuf/ptypes/wrappers"; +option java_package = "com.google.protobuf"; +option java_outer_classname = "WrappersProto"; +option java_multiple_files = true; +option objc_class_prefix = "GPB"; + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +message DoubleValue { + // The double value. + double value = 1; +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +message FloatValue { + // The float value. + float value = 1; +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +message Int64Value { + // The int64 value. + int64 value = 1; +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +message UInt64Value { + // The uint64 value. + uint64 value = 1; +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +message Int32Value { + // The int32 value. + int32 value = 1; +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +message UInt32Value { + // The uint32 value. + uint32 value = 1; +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +message BoolValue { + // The bool value. + bool value = 1; +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +message StringValue { + // The string value. + string value = 1; +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +message BytesValue { + // The bytes value. + bytes value = 1; +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/AUTHORS b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/AUTHORS new file mode 100644 index 00000000..649da70b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/AUTHORS @@ -0,0 +1,27 @@ +# This is the official list of benchmark authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. +# +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. +# +# Please keep the list sorted. + +Comodo CA Limited +Ed Maste +Fiaz Hossain +Google Inc. +Internet Security Research Group +Jeff Trawick +Katriel Cohn-Gordon +Laël Cellier +Mark Schloesser +NORDUnet A/S +Nicholas Galbreath +Oliver Weidner +PrimeKey Solutions AB +Ruslan Kovalov +Venafi, Inc. +Vladimir Rutsky +Ximin Luo diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md new file mode 100644 index 00000000..f4a3af45 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/CHANGELOG.md @@ -0,0 +1,378 @@ +# CERTIFICATE-TRANSPARENCY-GO Changelog + +## HEAD + +Not yet released. + +### CTFE + +The `reject_expired` and `reject_unexpired` configuration fields for the CTFE +have been changed so that their behaviour reflects their name: + +- `reject_expired` only rejects expired certificates (i.e. it now allows + not-yet-valid certificates). +- `reject_unexpired` only allows expired certificates (i.e. it now rejects + not-yet-valid certificates). + +A `reject_extensions` configuration field for the CTFE was added, this allows +submissions to be rejected if they contain an extension with any of the +specified OIDs. + +A `frozen_sth` configuration field for the CTFE was added. This STH will be +served permanently. It must be signed by the log's private key. + +A `/healthz` URL has been added which responds with HTTP 200 OK and the string +"ok" when the server is up. + +#### Flags + +The `ct_server` binary has these new flags: + +- `mask_internal_errors` - Removes error strings from HTTP 500 responses + (Internal Server Error) + +Removed default values for `--metrics_endpoint` and `--log_rpc_server` flags. +This makes it easier to get the documented "unset" behaviour. + +#### Metrics + +The CTFE exports these new metrics: + +- `is_mirror` - set to 1 for mirror logs (copies of logs hosted elsewhere) +- `frozen_sth_timestamp` - time of the frozen Signed Tree Head in milliseconds + since the epoch + +#### Kubernetes + +Updated prometheus-to-sd to v0.5.2. + +A dedicated node pool is no longer required by the Kubernetes manifests. + +### Log Lists + +A new package has been created for parsing, searching and creating JSON log +lists compatible with the +[v2 schema](http://www.gstatic.com/ct/log_list/v2_beta/log_list_schema.json): +`github.com/google/certificate-transparency-go/loglist2`. + +### Docker Images + +Our Docker images have been updated to use Go 1.11 and +[Distroless base images](https://github.com/GoogleContainerTools/distroless). + +The CTFE Docker image now sets `ENTRYPOINT`. + +### Utilities / Libraries + +#### jsonclient + +The `jsonclient` package now copes with empty HTTP responses. The user-agent +header it sends can now be specified. + +#### x509 and asn1 forks + +Merged upstream changes from Go 1.12 into the `asn1` and `x509` packages. + +Added a "lax" tag to `asn1` that applies recursively and makes some checks more +relaxed: + +- parsePrintableString() copes with invalid PrintableString contents, e.g. use + of tagPrintableString when the string data is really ISO8859-1. +- checkInteger() allows integers that are not minimally encoded (and so are + not correct DER). +- OIDs are allowed to be empty. + +The following `x509` functions will now return `x509.NonFatalErrors` if ASN.1 +parsing fails in strict mode but succeeds in lax mode. Previously, they only +attempted strict mode parsing. + +- `x509.ParseTBSCertificate()` +- `x509.ParseCertificate()` +- `x509.ParseCertificates()` + +The `x509` package will now treat a negative RSA modulus as a non-fatal error. + +The `x509` package now supports RSASES-OAEP and Ed25519 keys. + +#### ctclient + +The `ctclient` tool now defaults to using +[all_logs_list.json](https://www.gstatic.com/ct/log_list/all_logs_list.json) +instead of [log_list.json](https://www.gstatic.com/ct/log_list/log_list.json). +This can be overridden using the `--log_list` flag. + +It can now perform inclusion checks on pre-certificates. + +It has these new commands: + +- `bisect` - Finds a log entry given a timestamp. + +It has these new flags: + +- `--chain` - Displays the entire certificate chain +- `--dns_server` - The DNS server to direct queries to (system resolver by + default) +- `--skip_https_verify` - Skips verification of the HTTPS connection +- `--timestamp` - Timestamp to use for `bisect` and `inclusion` commands (for + `inclusion`, only if --leaf_hash is not used) + +It now accepts hex or base64-encoded strings for the `--tree_hash`, +`--prev_hash` and `--leaf_hash` flags. + +#### certcheck + +The `certcheck` tool has these new flags: + +- `--check_time` - Check current validity of certificate (replaces + `--timecheck`) +- `--check_name` - Check validity of certificate name +- `--check_eku` - Check validity of EKU nesting +- `--check_path_len` - Check validity of path length constraint +- `--check_name_constraint` - Check name constraints +- `--check_unknown_critical_exts` - Check for unknown critical extensions + (replaces `--ignore_unknown_critical_exts`) +- `--strict` - Set non-zero exit code for non-fatal errors in parsing + +#### sctcheck + +The `sctcheck` tool has these new flags: + +- `--check_inclusion` - Checks that the SCT was honoured (i.e. the + corresponding certificate was included in the issuing CT log) + +#### ct_hammer + +The `ct_hammer` tool has these new flags: + +- `--duplicate_chance` - Allows setting the probability of the hammer sending + a duplicate submission. + +## v1.0.21 - CTFE Logging / Path Options. Mirroring. RPKI. Non Fatal X.509 error improvements + +Published 2018-08-20 10:11:04 +0000 UTC + +### CTFE + +`CTFE` no longer prints certificate chains as long byte strings in messages when handler errors occur. This was obscuring the reason for the failure and wasn't particularly useful. + +`CTFE` now has a global log URL path prefix flag and a configuration proto for a log specific path. The latter should help for various migration strategies if existing C++ server logs are going to be converted to run on the new code. + +### Mirroring + +More progress has been made on log mirroring. We believe that it's now at the point where testing can begin. + +### Utilities / Libraries + +The `certcheck` and `ct_hammer` utilities have received more enhancements. + +`x509` and `x509util` now support Subject Information Access and additional extensions for [RPKI / RFC 3779](https://www.ietf.org/rfc/rfc3779.txt). + +`scanner` / `fixchain` and some other command line utilities now have better handling of non-fatal errors. + +Commit [3629d6846518309d22c16fee15d1007262a459d2](https://api.github.com/repos/google/certificate-transparency-go/commits/3629d6846518309d22c16fee15d1007262a459d2) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.21) + +## v1.0.20 - Minimal Gossip / Go 1.11 Fix / Utility Improvements + +Published 2018-07-05 09:21:34 +0000 UTC + +Enhancements have been made to various utilities including `scanner`, `sctcheck`, `loglist` and `x509util`. + +The `allow_verification_with_non_compliant_keys` flag has been removed from `signatures.go`. + +An implementation of Gossip has been added. See the `gossip/minimal` package for more information. + +An X.509 compatibility issue for Go 1.11 has been fixed. This should be backwards compatible with 1.10. + +Commit [37a384cd035e722ea46e55029093e26687138edf](https://api.github.com/repos/google/certificate-transparency-go/commits/37a384cd035e722ea46e55029093e26687138edf) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.20) + +## v1.0.19 - CTFE User Quota + +Published 2018-06-01 13:51:52 +0000 UTC + +CTFE now supports Trillian Log's explicit quota API; quota can be requested based on the remote user's IP, as well as per-issuing certificate in submitted chains. + +Commit [8736a411b4ff214ea20687e46c2b67d66ebd83fc](https://api.github.com/repos/google/certificate-transparency-go/commits/8736a411b4ff214ea20687e46c2b67d66ebd83fc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.19) + +## v1.0.18 - Adding Migration Tool / Client Additions / K8 Config + +Published 2018-06-01 14:28:20 +0000 UTC + +Work on a log migration tool (Migrillian) is in progress. This is not yet ready for production use but will provide features for mirroring and migrating logs. + +The `RequestLog` API allows for logging of SCTs when they are issued by CTFE. + +The CT Go client now supports `GetEntryAndProof`. Utilities have been switched over to use the `glog` package. + +Commit [77abf2dac5410a62c04ac1c662c6d0fa54afc2dc](https://api.github.com/repos/google/certificate-transparency-go/commits/77abf2dac5410a62c04ac1c662c6d0fa54afc2dc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.18) + +## v1.0.17 - Merkle verification / Tracing / Demo script / CORS + +Published 2018-06-01 14:25:16 +0000 UTC + +Now uses Merkle Tree verification from Trillian. + +The CT server now supports CORS. + +Request tracing added using OpenCensus. For GCE / K8 it just requires the flag to be enabled to export traces to Stackdriver. Other environments may differ. + +A demo script was added that goes through setting up a simple deployment suitable for development / demo purposes. This may be useful for those new to the project. + +Commit [3c3d22ce946447d047a03228ebb4a41e3e4eb15b](https://api.github.com/repos/google/certificate-transparency-go/commits/3c3d22ce946447d047a03228ebb4a41e3e4eb15b) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.17) + +## v1.0.16 - Lifecycle test / Go 1.10.1 + +Published 2018-06-01 14:22:23 +0000 UTC + +An integration test was added that goes through a create / drain queue / freeze lifecycle for a log. + +Changes to `x509` were merged from Go 1.10.1. + +Commit [a72423d09b410b80673fd1135ba1022d04bac6cd](https://api.github.com/repos/google/certificate-transparency-go/commits/a72423d09b410b80673fd1135ba1022d04bac6cd) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.16) + +## v1.0.15 - More control of verification, grpclb, stackdriver metrics + +Published 2018-06-01 14:20:32 +0000 UTC + +Facilities were added to the `x509` package to control whether verification checks are applied. + +Log server requests are now balanced using `gRPClb`. + +For Kubernetes, metrics can be published to Stackdriver monitoring. + +Commit [684d6eee6092774e54d301ccad0ed61bc8d010c1](https://api.github.com/repos/google/certificate-transparency-go/commits/684d6eee6092774e54d301ccad0ed61bc8d010c1) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.15) + +## v1.0.14 - SQLite Removed, LeafHashForLeaf + +Published 2018-06-01 14:15:37 +0000 UTC + +Support for SQLlite was removed. This motivation was ongoing test flakiness caused by multi-user access. This database may work for an embedded scenario but is not suitable for use in a server environment. + +A `LeafHashForLeaf` client API was added and is now used by the CT client and integration tests. + +Commit [698cd6a661196db4b2e71437422178ffe8705006](https://api.github.com/repos/google/certificate-transparency-go/commits/698cd6a661196db4b2e71437422178ffe8705006) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.14) + +## v1.0.13 - Crypto changes, util updates, sync with trillian repo, loglist verification + +Published 2018-06-01 14:15:21 +0000 UTC + +Some of our custom crypto package that were wrapping calls to the standard package have been removed and the base features used directly. + +Updates were made to GCE ingress and health checks. + +The log list utility can verify signatures. + +Commit [480c3654a70c5383b9543ec784203030aedbd3a5](https://api.github.com/repos/google/certificate-transparency-go/commits/480c3654a70c5383b9543ec784203030aedbd3a5) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.13) + +## v1.0.12 - Client / util updates & CTFE fixes + +Published 2018-06-01 14:13:42 +0000 UTC + +The CT client can now use a JSON loglist to find logs. + +CTFE had a fix applied for preissued precerts. + +A DNS client was added and CT client was extended to support DNS retrieval. + +Commit [74c06c95e0b304a050a1c33764c8a01d653a16e3](https://api.github.com/repos/google/certificate-transparency-go/commits/74c06c95e0b304a050a1c33764c8a01d653a16e3) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.12) + +## v1.0.11 - Kubernetes CI / Integration fixes + +Published 2018-06-01 14:12:18 +0000 UTC + +Updates to Kubernetes configs, mostly related to running a CI instance. + +Commit [0856acca7e0ab7f082ae83a1fbb5d21160962efc](https://api.github.com/repos/google/certificate-transparency-go/commits/0856acca7e0ab7f082ae83a1fbb5d21160962efc) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.11) + +## v1.0.10 - More scanner, x509, utility and client fixes. CTFE updates + +Published 2018-06-01 14:09:47 +0000 UTC + +The CT client was using the wrong protobuffer library package. To guard against this in future a check has been added to our lint config. + +The `x509` and `asn1` packages have had upstream fixes applied from Go 1.10rc1. + +Commit [1bec4527572c443752ad4f2830bef88be0533236](https://api.github.com/repos/google/certificate-transparency-go/commits/1bec4527572c443752ad4f2830bef88be0533236) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.10) + +## v1.0.9 - Scanner, x509, utility and client fixes + +Published 2018-06-01 14:11:13 +0000 UTC + +The `scanner` utility now displays throughput stats. + +Build instructions and README files were updated. + +The `certcheck` utility can be told to ignore unknown critical X.509 extensions. + +Commit [c06833528d04a94eed0c775104d1107bab9ae17c](https://api.github.com/repos/google/certificate-transparency-go/commits/c06833528d04a94eed0c775104d1107bab9ae17c) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.9) + +## v1.0.8 - Client fixes, align with trillian repo + +Published 2018-06-01 14:06:44 +0000 UTC + + + +Commit [e8b02c60f294b503dbb67de0868143f5d4935e56](https://api.github.com/repos/google/certificate-transparency-go/commits/e8b02c60f294b503dbb67de0868143f5d4935e56) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.8) + +## v1.0.7 - CTFE fixes + +Published 2018-06-01 14:06:13 +0000 UTC + +An issue was fixed with CTFE signature caching. In an unlikely set of circumstances this could lead to log mis-operation. While the chances of this are small, we recommend that versions prior to this one are not deployed. + +Commit [52c0590bd3b4b80c5497005b0f47e10557425eeb](https://api.github.com/repos/google/certificate-transparency-go/commits/52c0590bd3b4b80c5497005b0f47e10557425eeb) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.7) + +## v1.0.6 - crlcheck improvements / other fixes + +Published 2018-06-01 14:04:22 +0000 UTC + +The `crlcheck` utility has had several fixes and enhancements. Additionally the `hammer` now supports temporal logs. + +Commit [3955e4a00c42e83ff17ce25003976159c5d0f0f9](https://api.github.com/repos/google/certificate-transparency-go/commits/3955e4a00c42e83ff17ce25003976159c5d0f0f9) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.6) + +## v1.0.5 - X509 and asn1 fixes + +Published 2018-06-01 14:02:58 +0000 UTC + +This release is mostly fixes to the `x509` and `asn1` packages. Some command line utilties were also updated. + +Commit [ae40d07cce12f1227c6e658e61c9dddb7646f97b](https://api.github.com/repos/google/certificate-transparency-go/commits/ae40d07cce12f1227c6e658e61c9dddb7646f97b) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.5) + +## v1.0.4 - Multi log backend configs + +Published 2018-06-01 14:02:07 +0000 UTC + +Support was added to allow CTFE to use multiple backends, each serving a distinct set of logs. It allows for e.g. regional backend deployment with common frontend servers. + +Commit [62023ed90b41fa40854957b5dec7d9d73594723f](https://api.github.com/repos/google/certificate-transparency-go/commits/62023ed90b41fa40854957b5dec7d9d73594723f) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.4) + +## v1.0.3 - Hammer updates, use standard context + +Published 2018-06-01 14:01:11 +0000 UTC + +After the Go 1.9 migration references to anything other than the standard `context` package have been removed. This is the only one that should be used from now on. + +Commit [b28beed8b9aceacc705e0ff4a11d435a310e3d97](https://api.github.com/repos/google/certificate-transparency-go/commits/b28beed8b9aceacc705e0ff4a11d435a310e3d97) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.3) + +## v1.0.2 - Go 1.9 + +Published 2018-06-01 14:00:00 +0000 UTC + +Go 1.9 is now required to build the code. + +Commit [3aed33d672ee43f04b1e8a00b25ca3e2e2e74309](https://api.github.com/repos/google/certificate-transparency-go/commits/3aed33d672ee43f04b1e8a00b25ca3e2e2e74309) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.2) + +## v1.0.1 - Hammer and client improvements + +Published 2018-06-01 13:59:29 +0000 UTC + + + +Commit [c28796cc21776667fb05d6300e32d9517be96515](https://api.github.com/repos/google/certificate-transparency-go/commits/c28796cc21776667fb05d6300e32d9517be96515) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0.1) + +## v1.0 - First Trillian CT Release + +Published 2018-06-01 13:59:00 +0000 UTC + +This is the point that corresponds to the 1.0 release in the trillian repo. + +Commit [abb79e468b6f3bbd48d1ab0c9e68febf80d52c4d](https://api.github.com/repos/google/certificate-transparency-go/commits/abb79e468b6f3bbd48d1ab0c9e68febf80d52c4d) Download [zip](https://api.github.com/repos/google/certificate-transparency-go/zipball/v1.0) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/CONTRIBUTING.md b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/CONTRIBUTING.md new file mode 100644 index 00000000..43de4c9d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/CONTRIBUTING.md @@ -0,0 +1,58 @@ +# How to contribute # + +We'd love to accept your patches and contributions to this project. There are +a just a few small guidelines you need to follow. + + +## Contributor License Agreement ## + +Contributions to any Google project must be accompanied by a Contributor +License Agreement. This is not a copyright **assignment**, it simply gives +Google permission to use and redistribute your contributions as part of the +project. + + * If you are an individual writing original source code and you're sure you + own the intellectual property, then you'll need to sign an [individual + CLA][]. + + * If you work for a company that wants to allow you to contribute your work, + then you'll need to sign a [corporate CLA][]. + +You generally only need to submit a CLA once, so if you've already submitted +one (even if it was for a different project), you probably don't need to do it +again. + +[individual CLA]: https://developers.google.com/open-source/cla/individual +[corporate CLA]: https://developers.google.com/open-source/cla/corporate + +Once your CLA is submitted (or if you already submitted one for +another Google project), make a commit adding yourself to the +[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part +of your first [pull request][]. + +[AUTHORS]: AUTHORS +[CONTRIBUTORS]: CONTRIBUTORS + + +## Submitting a patch ## + + 1. It's generally best to start by opening a new issue describing the bug or + feature you're intending to fix. Even if you think it's relatively minor, + it's helpful to know what people are working on. Mention in the initial + issue that you are planning to work on that bug or feature so that it can + be assigned to you. + + 1. Follow the normal process of [forking][] the project, and setup a new + branch to work in. It's important that each group of changes be done in + separate branches in order to ensure that a pull request only includes the + commits related to that bug or feature. + + 1. Do your best to have [well-formed commit messages][] for each change. + This provides consistency throughout the project, and ensures that commit + messages are able to be formatted properly by various git tools. + + 1. Finally, push the commits to your fork and submit a [pull request][]. + +[forking]: https://help.github.com/articles/fork-a-repo +[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html +[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS new file mode 100644 index 00000000..8c99304d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/CONTRIBUTORS @@ -0,0 +1,59 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. +# +# Names should be added to this file as: +# Name +# +# Please keep the list sorted. + +Adam Eijdenberg +Al Cutter +Ben Laurie +Chris Kennelly +David Drysdale +Deyan Bektchiev +Ed Maste +Emilia Kasper +Eran Messeri +Fiaz Hossain +Gary Belvin +Jeff Trawick +Joe Tsai +Kat Joyce +Katriel Cohn-Gordon +Kiril Nikolov +Konrad Kraszewski +Laël Cellier +Linus Nordberg +Mark Schloesser +Nicholas Galbreath +Oliver Weidner +Pascal Leroy +Paul Hadfield +Paul Lietar +Pavel Kalinnikov +Pierre Phaneuf +Rob Percival +Rob Stradling +Roland Shoemaker +Ruslan Kovalov +Samuel Lidén Borell +Tatiana Merkulova +Vladimir Rutsky +Ximin Luo diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..c3c0feb3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,16 @@ + + +### Checklist + + + +- [ ] I have updated the [CHANGELOG](CHANGELOG.md). + - Adjust the draft version number according to [semantic versioning](https://semver.org/) rules. +- [ ] I have updated [documentation](docs/) accordingly. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/README.md b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/README.md new file mode 100644 index 00000000..dc6d7245 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/README.md @@ -0,0 +1,138 @@ +# Certificate Transparency: Go Code + +[![Build Status](https://travis-ci.org/google/certificate-transparency-go.svg?branch=master)](https://travis-ci.org/google/certificate-transparency-go) +[![Go Report Card](https://goreportcard.com/badge/github.com/google/certificate-transparency-go)](https://goreportcard.com/report/github.com/google/certificate-transparency-go) +[![GoDoc](https://godoc.org/github.com/google/certificate-transparency-go?status.svg)](https://godoc.org/github.com/google/certificate-transparency-go) + +This repository holds Go code related to +[Certificate Transparency](https://www.certificate-transparency.org/) (CT). The +repository requires Go version 1.9. + + - [Repository Structure](#repository-structure) + - [Trillian CT Personality](#trillian-ct-personality) + - [Working on the Code](#working-on-the-code) + - [Running Codebase Checks](#running-codebase-checks) + - [Rebuilding Generated Code](#rebuilding-generated-code) + - [Updating Vendor Code](#updating-vendor-code) + +## Repository Structure + +The main parts of the repository are: + + - Encoding libraries: + - `asn1/` and `x509/` are forks of the upstream Go `encoding/asn1` and + `crypto/x509` libraries. We maintain separate forks of these packages + because CT is intended to act as an observatory of certificates across the + ecosystem; as such, we need to be able to process somewhat-malformed + certificates that the stricter upstream code would (correctly) reject. + Our `x509` fork also includes code for working with the + [pre-certificates defined in RFC 6962](https://tools.ietf.org/html/rfc6962#section-3.1). + - `tls` holds a library for processing TLS-encoded data as described in + [RFC 5246](https://tools.ietf.org/html/rfc5246). + - `x509util/` provides additional utilities for dealing with + `x509.Certificate`s. + - CT client libraries: + - The top-level `ct` package (in `.`) holds types and utilities for working + with CT data structures defined in + [RFC 6962](https://tools.ietf.org/html/rfc6962). + - `client/` and `jsonclient/` hold libraries that allow access to CT Logs + via HTTP entrypoints described in + [section 4 of RFC 6962](https://tools.ietf.org/html/rfc6962#section-4). + - `dnsclient/` has a library that allows access to CT Logs over + [DNS](https://github.com/google/certificate-transparency-rfcs/blob/master/dns/draft-ct-over-dns.md). + - `scanner/` holds a library for scanning the entire contents of an existing + CT Log. + - CT Personality for [Trillian](https://github.com/google/trillian): + - `trillian/` holds code that allows a Certificate Transparency Log to be + run using a Trillian Log as its back-end -- see + [below](#trillian-ct-personality). + - Command line tools: + - `./client/ctclient` allows interaction with a CT Log. + - `./ctutil/sctcheck` allows SCTs (signed certificate timestamps) from a CT + Log to be verified. + - `./scanner/scanlog` allows an existing CT Log to be scanned for certificates + of interest; please be polite when running this tool against a Log. + - `./x509util/certcheck` allows display and verification of certificates + - `./x509util/crlcheck` allows display and verification of certificate + revocation lists (CRLs). + - Other libraries related to CT: + - `ctutil/` holds utility functions for validating and verifying CT data + structures. + - `loglist/` has a library for reading + [JSON lists of CT Logs](https://www.certificate-transparency.org/known-logs). + + +## Trillian CT Personality + +The `trillian/` subdirectory holds code and scripts for running a CT Log based +on the [Trillian](https://github.com/google/trillian) general transparency Log, +and is [documented separately](trillian/README.md). + + +## Working on the Code + +Developers who want to make changes to the codebase need some additional +dependencies and tools, described in the following sections. The +[Travis configuration](.travis.yml) for the codebase is also useful reference +for the required tools and scripts, as it may be more up-to-date than this +document. + +In order for the `go generate` command to work properly, the code must +be checked out to the following location: +`$GOPATH/src/github.com/google/certificate-transparency-go` + + +### Running Codebase Checks + +The [`scripts/presubmit.sh`](scripts/presubmit.sh) script runs various tools +and tests over the codebase; please ensure this script passes before sending +pull requests for review. + +```bash +# Install golangci-lint +go get -u github.com/golangci/golangci-lint/cmd/golangci-lint +cd $GOPATH/src/github.com/golangci/golangci-lint/cmd/golangci-lint +go install -ldflags "-X 'main.version=$(git describe --tags)' -X 'main.commit=$(git rev-parse --short HEAD)' -X 'main.date=$(date)'" +cd - + +# Run code generation, build, test and linters +./scripts/presubmit.sh + +# Run build, test and linters but skip code generation +./scripts/presubmit.sh --no-generate + +# Or just run the linters alone: +golangci-lint run +``` + +### Rebuilding Generated Code + +Some of the CT Go code is autogenerated from other files: + + - [Protocol buffer](https://developers.google.com/protocol-buffers/) message + definitions are converted to `.pb.go` implementations. + - A mock implementation of the Trillian gRPC API (in `trillian/mockclient`) is + created with [GoMock](https://github.com/golang/mock). + +Re-generating mock or protobuffer files is only needed if you're changing +the original files; if you do, you'll need to install the prerequisites: + + - `mockgen` tool from https://github.com/golang/mock + - `protoc`, [Go support for protoc](https://github.com/golang/protobuf) (see + documentation linked from the + [protobuf site](https://github.com/google/protobuf)) + +and run the following: + +```bash +go generate -x ./... # hunts for //go:generate comments and runs them +``` + +### Updating Vendor Code + +The codebase includes a couple of external projects under the `vendor/` +subdirectory, to ensure that builds use a fixed version (typically because the +upstream repository does not guarantee back-compatibility between the tip +`master` branch and the current stable release). See +[instructions in the Trillian repo](https://github.com/google/trillian#updating-vendor-code) +for how to update vendored subtrees. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/asn1/README.md b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/asn1/README.md new file mode 100644 index 00000000..a42ac4eb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/asn1/README.md @@ -0,0 +1,7 @@ +# Important Notice + +This is a fork of the `encoding/asn1` Go package. The original source can be found on +[GitHub](https://github.com/golang/go). + +Be careful about making local modifications to this code as it will +make maintenance harder in future. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/asn1/asn1.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/asn1/asn1.go new file mode 100644 index 00000000..50fa2392 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/asn1/asn1.go @@ -0,0 +1,1166 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 implements parsing of DER-encoded ASN.1 data structures, +// as defined in ITU-T Rec X.690. +// +// See also ``A Layman's Guide to a Subset of ASN.1, BER, and DER,'' +// http://luca.ntop.org/Teaching/Appunti/asn1.html. +// +// This is a fork of the Go standard library ASN.1 implementation +// (encoding/asn1), with the aim of relaxing checks for various things +// that are common errors present in many X.509 certificates in the +// wild. +// +// Main differences: +// - Extra "lax" tag that recursively applies and relaxes some strict +// checks: +// - parsePrintableString() copes with invalid PrintableString contents, +// e.g. use of tagPrintableString when the string data is really +// ISO8859-1. +// - checkInteger() allows integers that are not minimally encoded (and +// so are not correct DER). +// - parseObjectIdentifier() allows zero-length OIDs. +// - Better diagnostics on which particular field causes errors. +package asn1 + +// ASN.1 is a syntax for specifying abstract objects and BER, DER, PER, XER etc +// are different encoding formats for those objects. Here, we'll be dealing +// with DER, the Distinguished Encoding Rules. DER is used in X.509 because +// it's fast to parse and, unlike BER, has a unique encoding for every object. +// When calculating hashes over objects, it's important that the resulting +// bytes be the same at both ends and DER removes this margin of error. +// +// ASN.1 is very complex and this package doesn't attempt to implement +// everything by any means. + +import ( + "errors" + "fmt" + "math" + "math/big" + "reflect" + "strconv" + "time" + "unicode/utf8" +) + +// A StructuralError suggests that the ASN.1 data is valid, but the Go type +// which is receiving it doesn't match. +type StructuralError struct { + Msg string + Field string +} + +func (e StructuralError) Error() string { + var prefix string + if e.Field != "" { + prefix = e.Field + ": " + } + return "asn1: structure error: " + prefix + e.Msg +} + +// A SyntaxError suggests that the ASN.1 data is invalid. +type SyntaxError struct { + Msg string + Field string +} + +func (e SyntaxError) Error() string { + var prefix string + if e.Field != "" { + prefix = e.Field + ": " + } + return "asn1: syntax error: " + prefix + e.Msg +} + +// We start by dealing with each of the primitive types in turn. + +// BOOLEAN + +func parseBool(bytes []byte, fieldName string) (ret bool, err error) { + if len(bytes) != 1 { + err = SyntaxError{"invalid boolean", fieldName} + return + } + + // DER demands that "If the encoding represents the boolean value TRUE, + // its single contents octet shall have all eight bits set to one." + // Thus only 0 and 255 are valid encoded values. + switch bytes[0] { + case 0: + ret = false + case 0xff: + ret = true + default: + err = SyntaxError{"invalid boolean", fieldName} + } + + return +} + +// INTEGER + +// checkInteger returns nil if the given bytes are a valid DER-encoded +// INTEGER and an error otherwise. +func checkInteger(bytes []byte, lax bool, fieldName string) error { + if len(bytes) == 0 { + return StructuralError{"empty integer", fieldName} + } + if len(bytes) == 1 { + return nil + } + if lax { + return nil + } + if (bytes[0] == 0 && bytes[1]&0x80 == 0) || (bytes[0] == 0xff && bytes[1]&0x80 == 0x80) { + return StructuralError{"integer not minimally-encoded", fieldName} + } + return nil +} + +// parseInt64 treats the given bytes as a big-endian, signed integer and +// returns the result. +func parseInt64(bytes []byte, lax bool, fieldName string) (ret int64, err error) { + err = checkInteger(bytes, lax, fieldName) + if err != nil { + return + } + if len(bytes) > 8 { + // We'll overflow an int64 in this case. + err = StructuralError{"integer too large", fieldName} + return + } + for bytesRead := 0; bytesRead < len(bytes); bytesRead++ { + ret <<= 8 + ret |= int64(bytes[bytesRead]) + } + + // Shift up and down in order to sign extend the result. + ret <<= 64 - uint8(len(bytes))*8 + ret >>= 64 - uint8(len(bytes))*8 + return +} + +// parseInt treats the given bytes as a big-endian, signed integer and returns +// the result. +func parseInt32(bytes []byte, lax bool, fieldName string) (int32, error) { + if err := checkInteger(bytes, lax, fieldName); err != nil { + return 0, err + } + ret64, err := parseInt64(bytes, lax, fieldName) + if err != nil { + return 0, err + } + if ret64 != int64(int32(ret64)) { + return 0, StructuralError{"integer too large", fieldName} + } + return int32(ret64), nil +} + +var bigOne = big.NewInt(1) + +// parseBigInt treats the given bytes as a big-endian, signed integer and returns +// the result. +func parseBigInt(bytes []byte, lax bool, fieldName string) (*big.Int, error) { + if err := checkInteger(bytes, lax, fieldName); err != nil { + return nil, err + } + ret := new(big.Int) + if len(bytes) > 0 && bytes[0]&0x80 == 0x80 { + // This is a negative number. + notBytes := make([]byte, len(bytes)) + for i := range notBytes { + notBytes[i] = ^bytes[i] + } + ret.SetBytes(notBytes) + ret.Add(ret, bigOne) + ret.Neg(ret) + return ret, nil + } + ret.SetBytes(bytes) + return ret, nil +} + +// BIT STRING + +// BitString is the structure to use when you want an ASN.1 BIT STRING type. A +// bit string is padded up to the nearest byte in memory and the number of +// valid bits is recorded. Padding bits will be zero. +type BitString struct { + Bytes []byte // bits packed into bytes. + BitLength int // length in bits. +} + +// At returns the bit at the given index. If the index is out of range it +// returns false. +func (b BitString) At(i int) int { + if i < 0 || i >= b.BitLength { + return 0 + } + x := i / 8 + y := 7 - uint(i%8) + return int(b.Bytes[x]>>y) & 1 +} + +// RightAlign returns a slice where the padding bits are at the beginning. The +// slice may share memory with the BitString. +func (b BitString) RightAlign() []byte { + shift := uint(8 - (b.BitLength % 8)) + if shift == 8 || len(b.Bytes) == 0 { + return b.Bytes + } + + a := make([]byte, len(b.Bytes)) + a[0] = b.Bytes[0] >> shift + for i := 1; i < len(b.Bytes); i++ { + a[i] = b.Bytes[i-1] << (8 - shift) + a[i] |= b.Bytes[i] >> shift + } + + return a +} + +// parseBitString parses an ASN.1 bit string from the given byte slice and returns it. +func parseBitString(bytes []byte, fieldName string) (ret BitString, err error) { + if len(bytes) == 0 { + err = SyntaxError{"zero length BIT STRING", fieldName} + return + } + paddingBits := int(bytes[0]) + if paddingBits > 7 || + len(bytes) == 1 && paddingBits > 0 || + bytes[len(bytes)-1]&((1< 0 { + s += "." + } + s += strconv.Itoa(v) + } + + return s +} + +// parseObjectIdentifier parses an OBJECT IDENTIFIER from the given bytes and +// returns it. An object identifier is a sequence of variable length integers +// that are assigned in a hierarchy. +func parseObjectIdentifier(bytes []byte, lax bool, fieldName string) (s ObjectIdentifier, err error) { + if len(bytes) == 0 { + if lax { + return ObjectIdentifier{}, nil + } + err = SyntaxError{"zero length OBJECT IDENTIFIER", fieldName} + return + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + s = make([]int, len(bytes)+1) + + // The first varint is 40*value1 + value2: + // According to this packing, value1 can take the values 0, 1 and 2 only. + // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, + // then there are no restrictions on value2. + v, offset, err := parseBase128Int(bytes, 0, fieldName) + if err != nil { + return + } + if v < 80 { + s[0] = v / 40 + s[1] = v % 40 + } else { + s[0] = 2 + s[1] = v - 80 + } + + i := 2 + for ; offset < len(bytes); i++ { + v, offset, err = parseBase128Int(bytes, offset, fieldName) + if err != nil { + return + } + s[i] = v + } + s = s[0:i] + return +} + +// ENUMERATED + +// An Enumerated is represented as a plain int. +type Enumerated int + +// FLAG + +// A Flag accepts any data and is set to true if present. +type Flag bool + +// parseBase128Int parses a base-128 encoded int from the given offset in the +// given byte slice. It returns the value and the new offset. +func parseBase128Int(bytes []byte, initOffset int, fieldName string) (ret, offset int, err error) { + offset = initOffset + var ret64 int64 + for shifted := 0; offset < len(bytes); shifted++ { + // 5 * 7 bits per byte == 35 bits of data + // Thus the representation is either non-minimal or too large for an int32 + if shifted == 5 { + err = StructuralError{"base 128 integer too large", fieldName} + return + } + ret64 <<= 7 + b := bytes[offset] + ret64 |= int64(b & 0x7f) + offset++ + if b&0x80 == 0 { + ret = int(ret64) + // Ensure that the returned value fits in an int on all platforms + if ret64 > math.MaxInt32 { + err = StructuralError{"base 128 integer too large", fieldName} + } + return + } + } + err = SyntaxError{"truncated base 128 integer", fieldName} + return +} + +// UTCTime + +func parseUTCTime(bytes []byte) (ret time.Time, err error) { + s := string(bytes) + + formatStr := "0601021504Z0700" + ret, err = time.Parse(formatStr, s) + if err != nil { + formatStr = "060102150405Z0700" + ret, err = time.Parse(formatStr, s) + } + if err != nil { + return + } + + if serialized := ret.Format(formatStr); serialized != s { + err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized) + return + } + + if ret.Year() >= 2050 { + // UTCTime only encodes times prior to 2050. See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1 + ret = ret.AddDate(-100, 0, 0) + } + + return +} + +// parseGeneralizedTime parses the GeneralizedTime from the given byte slice +// and returns the resulting time. +func parseGeneralizedTime(bytes []byte) (ret time.Time, err error) { + const formatStr = "20060102150405Z0700" + s := string(bytes) + + if ret, err = time.Parse(formatStr, s); err != nil { + return + } + + if serialized := ret.Format(formatStr); serialized != s { + err = fmt.Errorf("asn1: time did not serialize back to the original value and may be invalid: given %q, but serialized as %q", s, serialized) + } + + return +} + +// NumericString + +// parseNumericString parses an ASN.1 NumericString from the given byte array +// and returns it. +func parseNumericString(bytes []byte, fieldName string) (ret string, err error) { + for _, b := range bytes { + if !isNumeric(b) { + return "", SyntaxError{"NumericString contains invalid character", fieldName} + } + } + return string(bytes), nil +} + +// isNumeric reports whether the given b is in the ASN.1 NumericString set. +func isNumeric(b byte) bool { + return '0' <= b && b <= '9' || + b == ' ' +} + +// PrintableString + +// parsePrintableString parses an ASN.1 PrintableString from the given byte +// array and returns it. +func parsePrintableString(bytes []byte, lax bool, fieldName string) (ret string, err error) { + for _, b := range bytes { + if !isPrintable(b, allowAsterisk, allowAmpersand) { + if !lax { + err = SyntaxError{"PrintableString contains invalid character", fieldName} + } else { + // Might be an ISO8859-1 string stuffed in, check if it + // would be valid and assume that's what's happened if so, + // otherwise try T.61, failing that give up and just assign + // the bytes + switch { + case couldBeISO8859_1(bytes): + ret, err = iso8859_1ToUTF8(bytes), nil + case couldBeT61(bytes): + ret, err = parseT61String(bytes) + default: + err = SyntaxError{"PrintableString contains invalid character, couldn't determine correct String type", fieldName} + } + } + return + } + } + ret = string(bytes) + return +} + +type asteriskFlag bool +type ampersandFlag bool + +const ( + allowAsterisk asteriskFlag = true + rejectAsterisk asteriskFlag = false + + allowAmpersand ampersandFlag = true + rejectAmpersand ampersandFlag = false +) + +// isPrintable reports whether the given b is in the ASN.1 PrintableString set. +// If asterisk is allowAsterisk then '*' is also allowed, reflecting existing +// practice. If ampersand is allowAmpersand then '&' is allowed as well. +func isPrintable(b byte, asterisk asteriskFlag, ampersand ampersandFlag) bool { + return 'a' <= b && b <= 'z' || + 'A' <= b && b <= 'Z' || + '0' <= b && b <= '9' || + '\'' <= b && b <= ')' || + '+' <= b && b <= '/' || + b == ' ' || + b == ':' || + b == '=' || + b == '?' || + // This is technically not allowed in a PrintableString. + // However, x509 certificates with wildcard strings don't + // always use the correct string type so we permit it. + (bool(asterisk) && b == '*') || + // This is not technically allowed either. However, not + // only is it relatively common, but there are also a + // handful of CA certificates that contain it. At least + // one of which will not expire until 2027. + (bool(ampersand) && b == '&') +} + +// IA5String + +// parseIA5String parses an ASN.1 IA5String (ASCII string) from the given +// byte slice and returns it. +func parseIA5String(bytes []byte, fieldName string) (ret string, err error) { + for _, b := range bytes { + if b >= utf8.RuneSelf { + err = SyntaxError{"IA5String contains invalid character", fieldName} + return + } + } + ret = string(bytes) + return +} + +// T61String + +// parseT61String parses an ASN.1 T61String (8-bit clean string) from the given +// byte slice and returns it. +func parseT61String(bytes []byte) (ret string, err error) { + return string(bytes), nil +} + +// UTF8String + +// parseUTF8String parses an ASN.1 UTF8String (raw UTF-8) from the given byte +// array and returns it. +func parseUTF8String(bytes []byte) (ret string, err error) { + if !utf8.Valid(bytes) { + return "", errors.New("asn1: invalid UTF-8 string") + } + return string(bytes), nil +} + +// A RawValue represents an undecoded ASN.1 object. +type RawValue struct { + Class, Tag int + IsCompound bool + Bytes []byte + FullBytes []byte // includes the tag and length +} + +// RawContent is used to signal that the undecoded, DER data needs to be +// preserved for a struct. To use it, the first field of the struct must have +// this type. It's an error for any of the other fields to have this type. +type RawContent []byte + +// Tagging + +// parseTagAndLength parses an ASN.1 tag and length pair from the given offset +// into a byte slice. It returns the parsed data and the new offset. SET and +// SET OF (tag 17) are mapped to SEQUENCE and SEQUENCE OF (tag 16) since we +// don't distinguish between ordered and unordered objects in this code. +func parseTagAndLength(bytes []byte, initOffset int, fieldName string) (ret tagAndLength, offset int, err error) { + offset = initOffset + // parseTagAndLength should not be called without at least a single + // byte to read. Thus this check is for robustness: + if offset >= len(bytes) { + err = errors.New("asn1: internal error in parseTagAndLength") + return + } + b := bytes[offset] + offset++ + ret.class = int(b >> 6) + ret.isCompound = b&0x20 == 0x20 + ret.tag = int(b & 0x1f) + + // If the bottom five bits are set, then the tag number is actually base 128 + // encoded afterwards + if ret.tag == 0x1f { + ret.tag, offset, err = parseBase128Int(bytes, offset, fieldName) + if err != nil { + return + } + // Tags should be encoded in minimal form. + if ret.tag < 0x1f { + err = SyntaxError{"non-minimal tag", fieldName} + return + } + } + if offset >= len(bytes) { + err = SyntaxError{"truncated tag or length", fieldName} + return + } + b = bytes[offset] + offset++ + if b&0x80 == 0 { + // The length is encoded in the bottom 7 bits. + ret.length = int(b & 0x7f) + } else { + // Bottom 7 bits give the number of length bytes to follow. + numBytes := int(b & 0x7f) + if numBytes == 0 { + err = SyntaxError{"indefinite length found (not DER)", fieldName} + return + } + ret.length = 0 + for i := 0; i < numBytes; i++ { + if offset >= len(bytes) { + err = SyntaxError{"truncated tag or length", fieldName} + return + } + b = bytes[offset] + offset++ + if ret.length >= 1<<23 { + // We can't shift ret.length up without + // overflowing. + err = StructuralError{"length too large", fieldName} + return + } + ret.length <<= 8 + ret.length |= int(b) + if ret.length == 0 { + // DER requires that lengths be minimal. + err = StructuralError{"superfluous leading zeros in length", fieldName} + return + } + } + // Short lengths must be encoded in short form. + if ret.length < 0x80 { + err = StructuralError{"non-minimal length", fieldName} + return + } + } + + return +} + +// parseSequenceOf is used for SEQUENCE OF and SET OF values. It tries to parse +// a number of ASN.1 values from the given byte slice and returns them as a +// slice of Go values of the given type. +func parseSequenceOf(bytes []byte, sliceType reflect.Type, elemType reflect.Type, lax bool, fieldName string) (ret reflect.Value, err error) { + matchAny, expectedTag, compoundType, ok := getUniversalType(elemType) + if !ok { + err = StructuralError{"unknown Go type for slice", fieldName} + return + } + + // First we iterate over the input and count the number of elements, + // checking that the types are correct in each case. + numElements := 0 + for offset := 0; offset < len(bytes); { + var t tagAndLength + t, offset, err = parseTagAndLength(bytes, offset, fieldName) + if err != nil { + return + } + switch t.tag { + case TagIA5String, TagGeneralString, TagT61String, TagUTF8String, TagNumericString: + // We pretend that various other string types are + // PRINTABLE STRINGs so that a sequence of them can be + // parsed into a []string. + t.tag = TagPrintableString + case TagGeneralizedTime, TagUTCTime: + // Likewise, both time types are treated the same. + t.tag = TagUTCTime + } + + if !matchAny && (t.class != ClassUniversal || t.isCompound != compoundType || t.tag != expectedTag) { + err = StructuralError{fmt.Sprintf("sequence tag mismatch (got:%+v, want:0/%d/%t)", t, expectedTag, compoundType), fieldName} + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"truncated sequence", fieldName} + return + } + offset += t.length + numElements++ + } + ret = reflect.MakeSlice(sliceType, numElements, numElements) + params := fieldParameters{lax: lax} + offset := 0 + for i := 0; i < numElements; i++ { + offset, err = parseField(ret.Index(i), bytes, offset, params) + if err != nil { + return + } + } + return +} + +var ( + bitStringType = reflect.TypeOf(BitString{}) + objectIdentifierType = reflect.TypeOf(ObjectIdentifier{}) + enumeratedType = reflect.TypeOf(Enumerated(0)) + flagType = reflect.TypeOf(Flag(false)) + timeType = reflect.TypeOf(time.Time{}) + rawValueType = reflect.TypeOf(RawValue{}) + rawContentsType = reflect.TypeOf(RawContent(nil)) + bigIntType = reflect.TypeOf(new(big.Int)) +) + +// invalidLength reports whether offset + length > sliceLength, or if the +// addition would overflow. +func invalidLength(offset, length, sliceLength int) bool { + return offset+length < offset || offset+length > sliceLength +} + +// Tests whether the data in |bytes| would be a valid ISO8859-1 string. +// Clearly, a sequence of bytes comprised solely of valid ISO8859-1 +// codepoints does not imply that the encoding MUST be ISO8859-1, rather that +// you would not encounter an error trying to interpret the data as such. +func couldBeISO8859_1(bytes []byte) bool { + for _, b := range bytes { + if b < 0x20 || (b >= 0x7F && b < 0xA0) { + return false + } + } + return true +} + +// Checks whether the data in |bytes| would be a valid T.61 string. +// Clearly, a sequence of bytes comprised solely of valid T.61 +// codepoints does not imply that the encoding MUST be T.61, rather that +// you would not encounter an error trying to interpret the data as such. +func couldBeT61(bytes []byte) bool { + for _, b := range bytes { + switch b { + case 0x00: + // Since we're guessing at (incorrect) encodings for a + // PrintableString, we'll err on the side of caution and disallow + // strings with a NUL in them, don't want to re-create a PayPal NUL + // situation in monitors. + fallthrough + case 0x23, 0x24, 0x5C, 0x5E, 0x60, 0x7B, 0x7D, 0x7E, 0xA5, 0xA6, 0xAC, 0xAD, 0xAE, 0xAF, + 0xB9, 0xBA, 0xC0, 0xC9, 0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, + 0xDA, 0xDB, 0xDC, 0xDE, 0xDF, 0xE5, 0xFF: + // These are all invalid code points in T.61, so it can't be a T.61 string. + return false + } + } + return true +} + +// Converts the data in |bytes| to the equivalent UTF-8 string. +func iso8859_1ToUTF8(bytes []byte) string { + buf := make([]rune, len(bytes)) + for i, b := range bytes { + buf[i] = rune(b) + } + return string(buf) +} + +// parseField is the main parsing function. Given a byte slice and an offset +// into the array, it will try to parse a suitable ASN.1 value out and store it +// in the given Value. +func parseField(v reflect.Value, bytes []byte, initOffset int, params fieldParameters) (offset int, err error) { + offset = initOffset + fieldType := v.Type() + + // If we have run out of data, it may be that there are optional elements at the end. + if offset == len(bytes) { + if !setDefaultValue(v, params) { + err = SyntaxError{"sequence truncated", params.name} + } + return + } + + // Deal with the ANY type. + if ifaceType := fieldType; ifaceType.Kind() == reflect.Interface && ifaceType.NumMethod() == 0 { + var t tagAndLength + t, offset, err = parseTagAndLength(bytes, offset, params.name) + if err != nil { + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"data truncated", params.name} + return + } + var result interface{} + if !t.isCompound && t.class == ClassUniversal { + innerBytes := bytes[offset : offset+t.length] + switch t.tag { + case TagPrintableString: + result, err = parsePrintableString(innerBytes, params.lax, params.name) + case TagNumericString: + result, err = parseNumericString(innerBytes, params.name) + case TagIA5String: + result, err = parseIA5String(innerBytes, params.name) + case TagT61String: + result, err = parseT61String(innerBytes) + case TagUTF8String: + result, err = parseUTF8String(innerBytes) + case TagInteger: + result, err = parseInt64(innerBytes, params.lax, params.name) + case TagBitString: + result, err = parseBitString(innerBytes, params.name) + case TagOID: + result, err = parseObjectIdentifier(innerBytes, params.lax, params.name) + case TagUTCTime: + result, err = parseUTCTime(innerBytes) + case TagGeneralizedTime: + result, err = parseGeneralizedTime(innerBytes) + case TagOctetString: + result = innerBytes + default: + // If we don't know how to handle the type, we just leave Value as nil. + } + } + offset += t.length + if err != nil { + return + } + if result != nil { + v.Set(reflect.ValueOf(result)) + } + return + } + + t, offset, err := parseTagAndLength(bytes, offset, params.name) + if err != nil { + return + } + if params.explicit { + expectedClass := ClassContextSpecific + if params.application { + expectedClass = ClassApplication + } + if offset == len(bytes) { + err = StructuralError{"explicit tag has no child", params.name} + return + } + if t.class == expectedClass && t.tag == *params.tag && (t.length == 0 || t.isCompound) { + if fieldType == rawValueType { + // The inner element should not be parsed for RawValues. + } else if t.length > 0 { + t, offset, err = parseTagAndLength(bytes, offset, params.name) + if err != nil { + return + } + } else { + if fieldType != flagType { + err = StructuralError{"zero length explicit tag was not an asn1.Flag", params.name} + return + } + v.SetBool(true) + return + } + } else { + // The tags didn't match, it might be an optional element. + ok := setDefaultValue(v, params) + if ok { + offset = initOffset + } else { + err = StructuralError{"explicitly tagged member didn't match", params.name} + } + return + } + } + + matchAny, universalTag, compoundType, ok1 := getUniversalType(fieldType) + if !ok1 { + err = StructuralError{fmt.Sprintf("unknown Go type: %v", fieldType), params.name} + return + } + + // Special case for strings: all the ASN.1 string types map to the Go + // type string. getUniversalType returns the tag for PrintableString + // when it sees a string, so if we see a different string type on the + // wire, we change the universal type to match. + if universalTag == TagPrintableString { + if t.class == ClassUniversal { + switch t.tag { + case TagIA5String, TagGeneralString, TagT61String, TagUTF8String, TagNumericString: + universalTag = t.tag + } + } else if params.stringType != 0 { + universalTag = params.stringType + } + } + + // Special case for time: UTCTime and GeneralizedTime both map to the + // Go type time.Time. + if universalTag == TagUTCTime && t.tag == TagGeneralizedTime && t.class == ClassUniversal { + universalTag = TagGeneralizedTime + } + + if params.set { + universalTag = TagSet + } + + matchAnyClassAndTag := matchAny + expectedClass := ClassUniversal + expectedTag := universalTag + + if !params.explicit && params.tag != nil { + expectedClass = ClassContextSpecific + expectedTag = *params.tag + matchAnyClassAndTag = false + } + + if !params.explicit && params.application && params.tag != nil { + expectedClass = ClassApplication + expectedTag = *params.tag + matchAnyClassAndTag = false + } + + if !params.explicit && params.private && params.tag != nil { + expectedClass = ClassPrivate + expectedTag = *params.tag + matchAnyClassAndTag = false + } + + // We have unwrapped any explicit tagging at this point. + if !matchAnyClassAndTag && (t.class != expectedClass || t.tag != expectedTag) || + (!matchAny && t.isCompound != compoundType) { + // Tags don't match. Again, it could be an optional element. + ok := setDefaultValue(v, params) + if ok { + offset = initOffset + } else { + err = StructuralError{fmt.Sprintf("tags don't match (%d vs %+v) %+v %s @%d", expectedTag, t, params, fieldType.Name(), offset), params.name} + } + return + } + if invalidLength(offset, t.length, len(bytes)) { + err = SyntaxError{"data truncated", params.name} + return + } + innerBytes := bytes[offset : offset+t.length] + offset += t.length + + // We deal with the structures defined in this package first. + switch fieldType { + case rawValueType: + result := RawValue{t.class, t.tag, t.isCompound, innerBytes, bytes[initOffset:offset]} + v.Set(reflect.ValueOf(result)) + return + case objectIdentifierType: + newSlice, err1 := parseObjectIdentifier(innerBytes, params.lax, params.name) + v.Set(reflect.MakeSlice(v.Type(), len(newSlice), len(newSlice))) + if err1 == nil { + reflect.Copy(v, reflect.ValueOf(newSlice)) + } + err = err1 + return + case bitStringType: + bs, err1 := parseBitString(innerBytes, params.name) + if err1 == nil { + v.Set(reflect.ValueOf(bs)) + } + err = err1 + return + case timeType: + var time time.Time + var err1 error + if universalTag == TagUTCTime { + time, err1 = parseUTCTime(innerBytes) + } else { + time, err1 = parseGeneralizedTime(innerBytes) + } + if err1 == nil { + v.Set(reflect.ValueOf(time)) + } + err = err1 + return + case enumeratedType: + parsedInt, err1 := parseInt32(innerBytes, params.lax, params.name) + if err1 == nil { + v.SetInt(int64(parsedInt)) + } + err = err1 + return + case flagType: + v.SetBool(true) + return + case bigIntType: + parsedInt, err1 := parseBigInt(innerBytes, params.lax, params.name) + if err1 == nil { + v.Set(reflect.ValueOf(parsedInt)) + } + err = err1 + return + } + switch val := v; val.Kind() { + case reflect.Bool: + parsedBool, err1 := parseBool(innerBytes, params.name) + if err1 == nil { + val.SetBool(parsedBool) + } + err = err1 + return + case reflect.Int, reflect.Int32, reflect.Int64: + if val.Type().Size() == 4 { + parsedInt, err1 := parseInt32(innerBytes, params.lax, params.name) + if err1 == nil { + val.SetInt(int64(parsedInt)) + } + err = err1 + } else { + parsedInt, err1 := parseInt64(innerBytes, params.lax, params.name) + if err1 == nil { + val.SetInt(parsedInt) + } + err = err1 + } + return + // TODO(dfc) Add support for the remaining integer types + case reflect.Struct: + structType := fieldType + + for i := 0; i < structType.NumField(); i++ { + if structType.Field(i).PkgPath != "" { + err = StructuralError{"struct contains unexported fields", structType.Field(i).Name} + return + } + } + + if structType.NumField() > 0 && + structType.Field(0).Type == rawContentsType { + bytes := bytes[initOffset:offset] + val.Field(0).Set(reflect.ValueOf(RawContent(bytes))) + } + + innerOffset := 0 + for i := 0; i < structType.NumField(); i++ { + field := structType.Field(i) + if i == 0 && field.Type == rawContentsType { + continue + } + innerParams := parseFieldParameters(field.Tag.Get("asn1")) + innerParams.name = field.Name + innerParams.lax = params.lax + innerOffset, err = parseField(val.Field(i), innerBytes, innerOffset, innerParams) + if err != nil { + return + } + } + // We allow extra bytes at the end of the SEQUENCE because + // adding elements to the end has been used in X.509 as the + // version numbers have increased. + return + case reflect.Slice: + sliceType := fieldType + if sliceType.Elem().Kind() == reflect.Uint8 { + val.Set(reflect.MakeSlice(sliceType, len(innerBytes), len(innerBytes))) + reflect.Copy(val, reflect.ValueOf(innerBytes)) + return + } + newSlice, err1 := parseSequenceOf(innerBytes, sliceType, sliceType.Elem(), params.lax, params.name) + if err1 == nil { + val.Set(newSlice) + } + err = err1 + return + case reflect.String: + var v string + switch universalTag { + case TagPrintableString: + v, err = parsePrintableString(innerBytes, params.lax, params.name) + case TagNumericString: + v, err = parseNumericString(innerBytes, params.name) + case TagIA5String: + v, err = parseIA5String(innerBytes, params.name) + case TagT61String: + v, err = parseT61String(innerBytes) + case TagUTF8String: + v, err = parseUTF8String(innerBytes) + case TagGeneralString: + // GeneralString is specified in ISO-2022/ECMA-35, + // A brief review suggests that it includes structures + // that allow the encoding to change midstring and + // such. We give up and pass it as an 8-bit string. + v, err = parseT61String(innerBytes) + default: + err = SyntaxError{fmt.Sprintf("internal error: unknown string type %d", universalTag), params.name} + } + if err == nil { + val.SetString(v) + } + return + } + err = StructuralError{"unsupported: " + v.Type().String(), params.name} + return +} + +// canHaveDefaultValue reports whether k is a Kind that we will set a default +// value for. (A signed integer, essentially.) +func canHaveDefaultValue(k reflect.Kind) bool { + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return true + } + + return false +} + +// setDefaultValue is used to install a default value, from a tag string, into +// a Value. It is successful if the field was optional, even if a default value +// wasn't provided or it failed to install it into the Value. +func setDefaultValue(v reflect.Value, params fieldParameters) (ok bool) { + if !params.optional { + return + } + ok = true + if params.defaultValue == nil { + return + } + if canHaveDefaultValue(v.Kind()) { + v.SetInt(*params.defaultValue) + } + return +} + +// Unmarshal parses the DER-encoded ASN.1 data structure b +// and uses the reflect package to fill in an arbitrary value pointed at by val. +// Because Unmarshal uses the reflect package, the structs +// being written to must use upper case field names. +// +// An ASN.1 INTEGER can be written to an int, int32, int64, +// or *big.Int (from the math/big package). +// If the encoded value does not fit in the Go type, +// Unmarshal returns a parse error. +// +// An ASN.1 BIT STRING can be written to a BitString. +// +// An ASN.1 OCTET STRING can be written to a []byte. +// +// An ASN.1 OBJECT IDENTIFIER can be written to an +// ObjectIdentifier. +// +// An ASN.1 ENUMERATED can be written to an Enumerated. +// +// An ASN.1 UTCTIME or GENERALIZEDTIME can be written to a time.Time. +// +// An ASN.1 PrintableString, IA5String, or NumericString can be written to a string. +// +// Any of the above ASN.1 values can be written to an interface{}. +// The value stored in the interface has the corresponding Go type. +// For integers, that type is int64. +// +// An ASN.1 SEQUENCE OF x or SET OF x can be written +// to a slice if an x can be written to the slice's element type. +// +// An ASN.1 SEQUENCE or SET can be written to a struct +// if each of the elements in the sequence can be +// written to the corresponding element in the struct. +// +// The following tags on struct fields have special meaning to Unmarshal: +// +// application specifies that an APPLICATION tag is used +// private specifies that a PRIVATE tag is used +// default:x sets the default value for optional integer fields (only used if optional is also present) +// explicit specifies that an additional, explicit tag wraps the implicit one +// optional marks the field as ASN.1 OPTIONAL +// set causes a SET, rather than a SEQUENCE type to be expected +// tag:x specifies the ASN.1 tag number; implies ASN.1 CONTEXT SPECIFIC +// lax relax strict encoding checks for this field, and for any fields within it +// +// If the type of the first field of a structure is RawContent then the raw +// ASN1 contents of the struct will be stored in it. +// +// If the type name of a slice element ends with "SET" then it's treated as if +// the "set" tag was set on it. This can be used with nested slices where a +// struct tag cannot be given. +// +// Other ASN.1 types are not supported; if it encounters them, +// Unmarshal returns a parse error. +func Unmarshal(b []byte, val interface{}) (rest []byte, err error) { + return UnmarshalWithParams(b, val, "") +} + +// UnmarshalWithParams allows field parameters to be specified for the +// top-level element. The form of the params is the same as the field tags. +func UnmarshalWithParams(b []byte, val interface{}, params string) (rest []byte, err error) { + v := reflect.ValueOf(val).Elem() + offset, err := parseField(v, b, 0, parseFieldParameters(params)) + if err != nil { + return nil, err + } + return b[offset:], nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/asn1/common.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/asn1/common.go new file mode 100644 index 00000000..2e117169 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/asn1/common.go @@ -0,0 +1,186 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package asn1 + +import ( + "reflect" + "strconv" + "strings" +) + +// ASN.1 objects have metadata preceding them: +// the tag: the type of the object +// a flag denoting if this object is compound or not +// the class type: the namespace of the tag +// the length of the object, in bytes + +// Here are some standard tags and classes + +// ASN.1 tags represent the type of the following object. +const ( + TagBoolean = 1 + TagInteger = 2 + TagBitString = 3 + TagOctetString = 4 + TagNull = 5 + TagOID = 6 + TagEnum = 10 + TagUTF8String = 12 + TagSequence = 16 + TagSet = 17 + TagNumericString = 18 + TagPrintableString = 19 + TagT61String = 20 + TagIA5String = 22 + TagUTCTime = 23 + TagGeneralizedTime = 24 + TagGeneralString = 27 +) + +// ASN.1 class types represent the namespace of the tag. +const ( + ClassUniversal = 0 + ClassApplication = 1 + ClassContextSpecific = 2 + ClassPrivate = 3 +) + +type tagAndLength struct { + class, tag, length int + isCompound bool +} + +// ASN.1 has IMPLICIT and EXPLICIT tags, which can be translated as "instead +// of" and "in addition to". When not specified, every primitive type has a +// default tag in the UNIVERSAL class. +// +// For example: a BIT STRING is tagged [UNIVERSAL 3] by default (although ASN.1 +// doesn't actually have a UNIVERSAL keyword). However, by saying [IMPLICIT +// CONTEXT-SPECIFIC 42], that means that the tag is replaced by another. +// +// On the other hand, if it said [EXPLICIT CONTEXT-SPECIFIC 10], then an +// /additional/ tag would wrap the default tag. This explicit tag will have the +// compound flag set. +// +// (This is used in order to remove ambiguity with optional elements.) +// +// You can layer EXPLICIT and IMPLICIT tags to an arbitrary depth, however we +// don't support that here. We support a single layer of EXPLICIT or IMPLICIT +// tagging with tag strings on the fields of a structure. + +// fieldParameters is the parsed representation of tag string from a structure field. +type fieldParameters struct { + optional bool // true iff the field is OPTIONAL + explicit bool // true iff an EXPLICIT tag is in use. + application bool // true iff an APPLICATION tag is in use. + private bool // true iff a PRIVATE tag is in use. + defaultValue *int64 // a default value for INTEGER typed fields (maybe nil). + tag *int // the EXPLICIT or IMPLICIT tag (maybe nil). + stringType int // the string tag to use when marshaling. + timeType int // the time tag to use when marshaling. + set bool // true iff this should be encoded as a SET + omitEmpty bool // true iff this should be omitted if empty when marshaling. + lax bool // true iff unmarshalling should skip some error checks + name string // name of field for better diagnostics + + // Invariants: + // if explicit is set, tag is non-nil. +} + +// Given a tag string with the format specified in the package comment, +// parseFieldParameters will parse it into a fieldParameters structure, +// ignoring unknown parts of the string. +func parseFieldParameters(str string) (ret fieldParameters) { + for _, part := range strings.Split(str, ",") { + switch { + case part == "optional": + ret.optional = true + case part == "explicit": + ret.explicit = true + if ret.tag == nil { + ret.tag = new(int) + } + case part == "generalized": + ret.timeType = TagGeneralizedTime + case part == "utc": + ret.timeType = TagUTCTime + case part == "ia5": + ret.stringType = TagIA5String + case part == "printable": + ret.stringType = TagPrintableString + case part == "numeric": + ret.stringType = TagNumericString + case part == "utf8": + ret.stringType = TagUTF8String + case strings.HasPrefix(part, "default:"): + i, err := strconv.ParseInt(part[8:], 10, 64) + if err == nil { + ret.defaultValue = new(int64) + *ret.defaultValue = i + } + case strings.HasPrefix(part, "tag:"): + i, err := strconv.Atoi(part[4:]) + if err == nil { + ret.tag = new(int) + *ret.tag = i + } + case part == "set": + ret.set = true + case part == "application": + ret.application = true + if ret.tag == nil { + ret.tag = new(int) + } + case part == "private": + ret.private = true + if ret.tag == nil { + ret.tag = new(int) + } + case part == "omitempty": + ret.omitEmpty = true + case part == "lax": + ret.lax = true + } + } + return +} + +// Given a reflected Go type, getUniversalType returns the default tag number +// and expected compound flag. +func getUniversalType(t reflect.Type) (matchAny bool, tagNumber int, isCompound, ok bool) { + switch t { + case rawValueType: + return true, -1, false, true + case objectIdentifierType: + return false, TagOID, false, true + case bitStringType: + return false, TagBitString, false, true + case timeType: + return false, TagUTCTime, false, true + case enumeratedType: + return false, TagEnum, false, true + case bigIntType: + return false, TagInteger, false, true + } + switch t.Kind() { + case reflect.Bool: + return false, TagBoolean, false, true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return false, TagInteger, false, true + case reflect.Struct: + return false, TagSequence, true, true + case reflect.Slice: + if t.Elem().Kind() == reflect.Uint8 { + return false, TagOctetString, false, true + } + if strings.HasSuffix(t.Name(), "SET") { + return false, TagSet, true, true + } + return false, TagSequence, true, true + case reflect.String: + return false, TagPrintableString, false, true + } + return false, 0, false, false +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/asn1/marshal.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/asn1/marshal.go new file mode 100644 index 00000000..9801b065 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/asn1/marshal.go @@ -0,0 +1,691 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package asn1 + +import ( + "errors" + "fmt" + "math/big" + "reflect" + "time" + "unicode/utf8" +) + +var ( + byte00Encoder encoder = byteEncoder(0x00) + byteFFEncoder encoder = byteEncoder(0xff) +) + +// encoder represents an ASN.1 element that is waiting to be marshaled. +type encoder interface { + // Len returns the number of bytes needed to marshal this element. + Len() int + // Encode encodes this element by writing Len() bytes to dst. + Encode(dst []byte) +} + +type byteEncoder byte + +func (c byteEncoder) Len() int { + return 1 +} + +func (c byteEncoder) Encode(dst []byte) { + dst[0] = byte(c) +} + +type bytesEncoder []byte + +func (b bytesEncoder) Len() int { + return len(b) +} + +func (b bytesEncoder) Encode(dst []byte) { + if copy(dst, b) != len(b) { + panic("internal error") + } +} + +type stringEncoder string + +func (s stringEncoder) Len() int { + return len(s) +} + +func (s stringEncoder) Encode(dst []byte) { + if copy(dst, s) != len(s) { + panic("internal error") + } +} + +type multiEncoder []encoder + +func (m multiEncoder) Len() int { + var size int + for _, e := range m { + size += e.Len() + } + return size +} + +func (m multiEncoder) Encode(dst []byte) { + var off int + for _, e := range m { + e.Encode(dst[off:]) + off += e.Len() + } +} + +type taggedEncoder struct { + // scratch contains temporary space for encoding the tag and length of + // an element in order to avoid extra allocations. + scratch [8]byte + tag encoder + body encoder +} + +func (t *taggedEncoder) Len() int { + return t.tag.Len() + t.body.Len() +} + +func (t *taggedEncoder) Encode(dst []byte) { + t.tag.Encode(dst) + t.body.Encode(dst[t.tag.Len():]) +} + +type int64Encoder int64 + +func (i int64Encoder) Len() int { + n := 1 + + for i > 127 { + n++ + i >>= 8 + } + + for i < -128 { + n++ + i >>= 8 + } + + return n +} + +func (i int64Encoder) Encode(dst []byte) { + n := i.Len() + + for j := 0; j < n; j++ { + dst[j] = byte(i >> uint((n-1-j)*8)) + } +} + +func base128IntLength(n int64) int { + if n == 0 { + return 1 + } + + l := 0 + for i := n; i > 0; i >>= 7 { + l++ + } + + return l +} + +func appendBase128Int(dst []byte, n int64) []byte { + l := base128IntLength(n) + + for i := l - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + + dst = append(dst, o) + } + + return dst +} + +func makeBigInt(n *big.Int, fieldName string) (encoder, error) { + if n == nil { + return nil, StructuralError{"empty integer", fieldName} + } + + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement + // form. So we'll invert and subtract 1. If the + // most-significant-bit isn't set then we'll need to pad the + // beginning with 0xff in order to keep the number negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if len(bytes) == 0 || bytes[0]&0x80 == 0 { + return multiEncoder([]encoder{byteFFEncoder, bytesEncoder(bytes)}), nil + } + return bytesEncoder(bytes), nil + } else if n.Sign() == 0 { + // Zero is written as a single 0 zero rather than no bytes. + return byte00Encoder, nil + } else { + bytes := n.Bytes() + if len(bytes) > 0 && bytes[0]&0x80 != 0 { + // We'll have to pad this with 0x00 in order to stop it + // looking like a negative number. + return multiEncoder([]encoder{byte00Encoder, bytesEncoder(bytes)}), nil + } + return bytesEncoder(bytes), nil + } +} + +func appendLength(dst []byte, i int) []byte { + n := lengthLength(i) + + for ; n > 0; n-- { + dst = append(dst, byte(i>>uint((n-1)*8))) + } + + return dst +} + +func lengthLength(i int) (numBytes int) { + numBytes = 1 + for i > 255 { + numBytes++ + i >>= 8 + } + return +} + +func appendTagAndLength(dst []byte, t tagAndLength) []byte { + b := uint8(t.class) << 6 + if t.isCompound { + b |= 0x20 + } + if t.tag >= 31 { + b |= 0x1f + dst = append(dst, b) + dst = appendBase128Int(dst, int64(t.tag)) + } else { + b |= uint8(t.tag) + dst = append(dst, b) + } + + if t.length >= 128 { + l := lengthLength(t.length) + dst = append(dst, 0x80|byte(l)) + dst = appendLength(dst, t.length) + } else { + dst = append(dst, byte(t.length)) + } + + return dst +} + +type bitStringEncoder BitString + +func (b bitStringEncoder) Len() int { + return len(b.Bytes) + 1 +} + +func (b bitStringEncoder) Encode(dst []byte) { + dst[0] = byte((8 - b.BitLength%8) % 8) + if copy(dst[1:], b.Bytes) != len(b.Bytes) { + panic("internal error") + } +} + +type oidEncoder []int + +func (oid oidEncoder) Len() int { + l := base128IntLength(int64(oid[0]*40 + oid[1])) + for i := 2; i < len(oid); i++ { + l += base128IntLength(int64(oid[i])) + } + return l +} + +func (oid oidEncoder) Encode(dst []byte) { + dst = appendBase128Int(dst[:0], int64(oid[0]*40+oid[1])) + for i := 2; i < len(oid); i++ { + dst = appendBase128Int(dst, int64(oid[i])) + } +} + +func makeObjectIdentifier(oid []int, fieldName string) (e encoder, err error) { + if len(oid) < 2 || oid[0] > 2 || (oid[0] < 2 && oid[1] >= 40) { + return nil, StructuralError{"invalid object identifier", fieldName} + } + + return oidEncoder(oid), nil +} + +func makePrintableString(s, fieldName string) (e encoder, err error) { + for i := 0; i < len(s); i++ { + // The asterisk is often used in PrintableString, even though + // it is invalid. If a PrintableString was specifically + // requested then the asterisk is permitted by this code. + // Ampersand is allowed in parsing due a handful of CA + // certificates, however when making new certificates + // it is rejected. + if !isPrintable(s[i], allowAsterisk, rejectAmpersand) { + return nil, StructuralError{"PrintableString contains invalid character", fieldName} + } + } + + return stringEncoder(s), nil +} + +func makeIA5String(s, fieldName string) (e encoder, err error) { + for i := 0; i < len(s); i++ { + if s[i] > 127 { + return nil, StructuralError{"IA5String contains invalid character", fieldName} + } + } + + return stringEncoder(s), nil +} + +func makeNumericString(s string, fieldName string) (e encoder, err error) { + for i := 0; i < len(s); i++ { + if !isNumeric(s[i]) { + return nil, StructuralError{"NumericString contains invalid character", fieldName} + } + } + + return stringEncoder(s), nil +} + +func makeUTF8String(s string) encoder { + return stringEncoder(s) +} + +func appendTwoDigits(dst []byte, v int) []byte { + return append(dst, byte('0'+(v/10)%10), byte('0'+v%10)) +} + +func appendFourDigits(dst []byte, v int) []byte { + var bytes [4]byte + for i := range bytes { + bytes[3-i] = '0' + byte(v%10) + v /= 10 + } + return append(dst, bytes[:]...) +} + +func outsideUTCRange(t time.Time) bool { + year := t.Year() + return year < 1950 || year >= 2050 +} + +func makeUTCTime(t time.Time, fieldName string) (e encoder, err error) { + dst := make([]byte, 0, 18) + + dst, err = appendUTCTime(dst, t, fieldName) + if err != nil { + return nil, err + } + + return bytesEncoder(dst), nil +} + +func makeGeneralizedTime(t time.Time, fieldName string) (e encoder, err error) { + dst := make([]byte, 0, 20) + + dst, err = appendGeneralizedTime(dst, t, fieldName) + if err != nil { + return nil, err + } + + return bytesEncoder(dst), nil +} + +func appendUTCTime(dst []byte, t time.Time, fieldName string) (ret []byte, err error) { + year := t.Year() + + switch { + case 1950 <= year && year < 2000: + dst = appendTwoDigits(dst, year-1900) + case 2000 <= year && year < 2050: + dst = appendTwoDigits(dst, year-2000) + default: + return nil, StructuralError{"cannot represent time as UTCTime", fieldName} + } + + return appendTimeCommon(dst, t), nil +} + +func appendGeneralizedTime(dst []byte, t time.Time, fieldName string) (ret []byte, err error) { + year := t.Year() + if year < 0 || year > 9999 { + return nil, StructuralError{"cannot represent time as GeneralizedTime", fieldName} + } + + dst = appendFourDigits(dst, year) + + return appendTimeCommon(dst, t), nil +} + +func appendTimeCommon(dst []byte, t time.Time) []byte { + _, month, day := t.Date() + + dst = appendTwoDigits(dst, int(month)) + dst = appendTwoDigits(dst, day) + + hour, min, sec := t.Clock() + + dst = appendTwoDigits(dst, hour) + dst = appendTwoDigits(dst, min) + dst = appendTwoDigits(dst, sec) + + _, offset := t.Zone() + + switch { + case offset/60 == 0: + return append(dst, 'Z') + case offset > 0: + dst = append(dst, '+') + case offset < 0: + dst = append(dst, '-') + } + + offsetMinutes := offset / 60 + if offsetMinutes < 0 { + offsetMinutes = -offsetMinutes + } + + dst = appendTwoDigits(dst, offsetMinutes/60) + dst = appendTwoDigits(dst, offsetMinutes%60) + + return dst +} + +func stripTagAndLength(in []byte) []byte { + _, offset, err := parseTagAndLength(in, 0, "") + if err != nil { + return in + } + return in[offset:] +} + +func makeBody(value reflect.Value, params fieldParameters) (e encoder, err error) { + switch value.Type() { + case flagType: + return bytesEncoder(nil), nil + case timeType: + t := value.Interface().(time.Time) + if params.timeType == TagGeneralizedTime || outsideUTCRange(t) { + return makeGeneralizedTime(t, params.name) + } + return makeUTCTime(t, params.name) + case bitStringType: + return bitStringEncoder(value.Interface().(BitString)), nil + case objectIdentifierType: + return makeObjectIdentifier(value.Interface().(ObjectIdentifier), params.name) + case bigIntType: + return makeBigInt(value.Interface().(*big.Int), params.name) + } + + switch v := value; v.Kind() { + case reflect.Bool: + if v.Bool() { + return byteFFEncoder, nil + } + return byte00Encoder, nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return int64Encoder(v.Int()), nil + case reflect.Struct: + t := v.Type() + + for i := 0; i < t.NumField(); i++ { + if t.Field(i).PkgPath != "" { + return nil, StructuralError{"struct contains unexported fields", t.Field(i).Name} + } + } + + startingField := 0 + + n := t.NumField() + if n == 0 { + return bytesEncoder(nil), nil + } + + // If the first element of the structure is a non-empty + // RawContents, then we don't bother serializing the rest. + if t.Field(0).Type == rawContentsType { + s := v.Field(0) + if s.Len() > 0 { + bytes := s.Bytes() + /* The RawContents will contain the tag and + * length fields but we'll also be writing + * those ourselves, so we strip them out of + * bytes */ + return bytesEncoder(stripTagAndLength(bytes)), nil + } + + startingField = 1 + } + + switch n1 := n - startingField; n1 { + case 0: + return bytesEncoder(nil), nil + case 1: + return makeField(v.Field(startingField), parseFieldParameters(t.Field(startingField).Tag.Get("asn1"))) + default: + m := make([]encoder, n1) + for i := 0; i < n1; i++ { + m[i], err = makeField(v.Field(i+startingField), parseFieldParameters(t.Field(i+startingField).Tag.Get("asn1"))) + if err != nil { + return nil, err + } + } + + return multiEncoder(m), nil + } + case reflect.Slice: + sliceType := v.Type() + if sliceType.Elem().Kind() == reflect.Uint8 { + return bytesEncoder(v.Bytes()), nil + } + + var fp fieldParameters + + switch l := v.Len(); l { + case 0: + return bytesEncoder(nil), nil + case 1: + return makeField(v.Index(0), fp) + default: + m := make([]encoder, l) + + for i := 0; i < l; i++ { + m[i], err = makeField(v.Index(i), fp) + if err != nil { + return nil, err + } + } + + return multiEncoder(m), nil + } + case reflect.String: + switch params.stringType { + case TagIA5String: + return makeIA5String(v.String(), params.name) + case TagPrintableString: + return makePrintableString(v.String(), params.name) + case TagNumericString: + return makeNumericString(v.String(), params.name) + default: + return makeUTF8String(v.String()), nil + } + } + + return nil, StructuralError{"unknown Go type", params.name} +} + +func makeField(v reflect.Value, params fieldParameters) (e encoder, err error) { + if !v.IsValid() { + return nil, fmt.Errorf("asn1: cannot marshal nil value") + } + // If the field is an interface{} then recurse into it. + if v.Kind() == reflect.Interface && v.Type().NumMethod() == 0 { + return makeField(v.Elem(), params) + } + + if v.Kind() == reflect.Slice && v.Len() == 0 && params.omitEmpty { + return bytesEncoder(nil), nil + } + + if params.optional && params.defaultValue != nil && canHaveDefaultValue(v.Kind()) { + defaultValue := reflect.New(v.Type()).Elem() + defaultValue.SetInt(*params.defaultValue) + + if reflect.DeepEqual(v.Interface(), defaultValue.Interface()) { + return bytesEncoder(nil), nil + } + } + + // If no default value is given then the zero value for the type is + // assumed to be the default value. This isn't obviously the correct + // behavior, but it's what Go has traditionally done. + if params.optional && params.defaultValue == nil { + if reflect.DeepEqual(v.Interface(), reflect.Zero(v.Type()).Interface()) { + return bytesEncoder(nil), nil + } + } + + if v.Type() == rawValueType { + rv := v.Interface().(RawValue) + if len(rv.FullBytes) != 0 { + return bytesEncoder(rv.FullBytes), nil + } + + t := new(taggedEncoder) + + t.tag = bytesEncoder(appendTagAndLength(t.scratch[:0], tagAndLength{rv.Class, rv.Tag, len(rv.Bytes), rv.IsCompound})) + t.body = bytesEncoder(rv.Bytes) + + return t, nil + } + + matchAny, tag, isCompound, ok := getUniversalType(v.Type()) + if !ok || matchAny { + return nil, StructuralError{fmt.Sprintf("unknown Go type: %v", v.Type()), params.name} + } + + if params.timeType != 0 && tag != TagUTCTime { + return nil, StructuralError{"explicit time type given to non-time member", params.name} + } + + if params.stringType != 0 && tag != TagPrintableString { + return nil, StructuralError{"explicit string type given to non-string member", params.name} + } + + switch tag { + case TagPrintableString: + if params.stringType == 0 { + // This is a string without an explicit string type. We'll use + // a PrintableString if the character set in the string is + // sufficiently limited, otherwise we'll use a UTF8String. + for _, r := range v.String() { + if r >= utf8.RuneSelf || !isPrintable(byte(r), rejectAsterisk, rejectAmpersand) { + if !utf8.ValidString(v.String()) { + return nil, errors.New("asn1: string not valid UTF-8") + } + tag = TagUTF8String + break + } + } + } else { + tag = params.stringType + } + case TagUTCTime: + if params.timeType == TagGeneralizedTime || outsideUTCRange(v.Interface().(time.Time)) { + tag = TagGeneralizedTime + } + } + + if params.set { + if tag != TagSequence { + return nil, StructuralError{"non sequence tagged as set", params.name} + } + tag = TagSet + } + + t := new(taggedEncoder) + + t.body, err = makeBody(v, params) + if err != nil { + return nil, err + } + + bodyLen := t.body.Len() + + class := ClassUniversal + if params.tag != nil { + if params.application { + class = ClassApplication + } else if params.private { + class = ClassPrivate + } else { + class = ClassContextSpecific + } + + if params.explicit { + t.tag = bytesEncoder(appendTagAndLength(t.scratch[:0], tagAndLength{ClassUniversal, tag, bodyLen, isCompound})) + + tt := new(taggedEncoder) + + tt.body = t + + tt.tag = bytesEncoder(appendTagAndLength(tt.scratch[:0], tagAndLength{ + class: class, + tag: *params.tag, + length: bodyLen + t.tag.Len(), + isCompound: true, + })) + + return tt, nil + } + + // implicit tag. + tag = *params.tag + } + + t.tag = bytesEncoder(appendTagAndLength(t.scratch[:0], tagAndLength{class, tag, bodyLen, isCompound})) + + return t, nil +} + +// Marshal returns the ASN.1 encoding of val. +// +// In addition to the struct tags recognised by Unmarshal, the following can be +// used: +// +// ia5: causes strings to be marshaled as ASN.1, IA5String values +// omitempty: causes empty slices to be skipped +// printable: causes strings to be marshaled as ASN.1, PrintableString values +// utf8: causes strings to be marshaled as ASN.1, UTF8String values +// utc: causes time.Time to be marshaled as ASN.1, UTCTime values +// generalized: causes time.Time to be marshaled as ASN.1, GeneralizedTime values +func Marshal(val interface{}) ([]byte, error) { + return MarshalWithParams(val, "") +} + +// MarshalWithParams allows field parameters to be specified for the +// top-level element. The form of the params is the same as the field tags. +func MarshalWithParams(val interface{}, params string) ([]byte, error) { + e, err := makeField(reflect.ValueOf(val), parseFieldParameters(params)) + if err != nil { + return nil, err + } + b := make([]byte, e.Len()) + e.Encode(b) + return b, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/client/configpb/gen.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/client/configpb/gen.go new file mode 100644 index 00000000..1d0c9a7f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/client/configpb/gen.go @@ -0,0 +1,17 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package configpb + +//go:generate protoc -I=. -I=$GOPATH/src --go_out=:. multilog.proto diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go new file mode 100644 index 00000000..ed5687bf --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.pb.go @@ -0,0 +1,162 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: multilog.proto + +package configpb + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + timestamp "github.com/golang/protobuf/ptypes/timestamp" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// TemporalLogConfig is a set of LogShardConfig messages, whose +// time limits should be contiguous. +type TemporalLogConfig struct { + Shard []*LogShardConfig `protobuf:"bytes,1,rep,name=shard,proto3" json:"shard,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TemporalLogConfig) Reset() { *m = TemporalLogConfig{} } +func (m *TemporalLogConfig) String() string { return proto.CompactTextString(m) } +func (*TemporalLogConfig) ProtoMessage() {} +func (*TemporalLogConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_33e545c6d900a512, []int{0} +} + +func (m *TemporalLogConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TemporalLogConfig.Unmarshal(m, b) +} +func (m *TemporalLogConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TemporalLogConfig.Marshal(b, m, deterministic) +} +func (m *TemporalLogConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_TemporalLogConfig.Merge(m, src) +} +func (m *TemporalLogConfig) XXX_Size() int { + return xxx_messageInfo_TemporalLogConfig.Size(m) +} +func (m *TemporalLogConfig) XXX_DiscardUnknown() { + xxx_messageInfo_TemporalLogConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_TemporalLogConfig proto.InternalMessageInfo + +func (m *TemporalLogConfig) GetShard() []*LogShardConfig { + if m != nil { + return m.Shard + } + return nil +} + +// LogShardConfig describes the acceptable date range for a single shard of a temporal +// log. +type LogShardConfig struct { + Uri string `protobuf:"bytes,1,opt,name=uri,proto3" json:"uri,omitempty"` + // The log's public key in DER-encoded PKIX form. + PublicKeyDer []byte `protobuf:"bytes,2,opt,name=public_key_der,json=publicKeyDer,proto3" json:"public_key_der,omitempty"` + // not_after_start defines the start of the range of acceptable NotAfter + // values, inclusive. + // Leaving this unset implies no lower bound to the range. + NotAfterStart *timestamp.Timestamp `protobuf:"bytes,3,opt,name=not_after_start,json=notAfterStart,proto3" json:"not_after_start,omitempty"` + // not_after_limit defines the end of the range of acceptable NotAfter values, + // exclusive. + // Leaving this unset implies no upper bound to the range. + NotAfterLimit *timestamp.Timestamp `protobuf:"bytes,4,opt,name=not_after_limit,json=notAfterLimit,proto3" json:"not_after_limit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogShardConfig) Reset() { *m = LogShardConfig{} } +func (m *LogShardConfig) String() string { return proto.CompactTextString(m) } +func (*LogShardConfig) ProtoMessage() {} +func (*LogShardConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_33e545c6d900a512, []int{1} +} + +func (m *LogShardConfig) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogShardConfig.Unmarshal(m, b) +} +func (m *LogShardConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogShardConfig.Marshal(b, m, deterministic) +} +func (m *LogShardConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogShardConfig.Merge(m, src) +} +func (m *LogShardConfig) XXX_Size() int { + return xxx_messageInfo_LogShardConfig.Size(m) +} +func (m *LogShardConfig) XXX_DiscardUnknown() { + xxx_messageInfo_LogShardConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_LogShardConfig proto.InternalMessageInfo + +func (m *LogShardConfig) GetUri() string { + if m != nil { + return m.Uri + } + return "" +} + +func (m *LogShardConfig) GetPublicKeyDer() []byte { + if m != nil { + return m.PublicKeyDer + } + return nil +} + +func (m *LogShardConfig) GetNotAfterStart() *timestamp.Timestamp { + if m != nil { + return m.NotAfterStart + } + return nil +} + +func (m *LogShardConfig) GetNotAfterLimit() *timestamp.Timestamp { + if m != nil { + return m.NotAfterLimit + } + return nil +} + +func init() { + proto.RegisterType((*TemporalLogConfig)(nil), "configpb.TemporalLogConfig") + proto.RegisterType((*LogShardConfig)(nil), "configpb.LogShardConfig") +} + +func init() { proto.RegisterFile("multilog.proto", fileDescriptor_33e545c6d900a512) } + +var fileDescriptor_33e545c6d900a512 = []byte{ + // 241 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x8f, 0xb1, 0x4e, 0xc3, 0x30, + 0x14, 0x45, 0x65, 0x02, 0x08, 0xdc, 0x12, 0xc0, 0x93, 0xd5, 0x85, 0xa8, 0x62, 0xc8, 0xe4, 0x4a, + 0xe5, 0x0b, 0xa0, 0x6c, 0x64, 0x4a, 0xbb, 0x47, 0x4e, 0xeb, 0x18, 0x0b, 0x3b, 0xcf, 0x72, 0x5e, + 0x86, 0xfe, 0x25, 0x9f, 0x84, 0x1c, 0x2b, 0x43, 0x37, 0xb6, 0xa7, 0x77, 0xcf, 0xb9, 0xd2, 0xa5, + 0xb9, 0x1b, 0x2d, 0x1a, 0x0b, 0x5a, 0xf8, 0x00, 0x08, 0xec, 0xee, 0x08, 0x7d, 0x67, 0xb4, 0x6f, + 0x57, 0x2f, 0x1a, 0x40, 0x5b, 0xb5, 0x99, 0xfe, 0xed, 0xd8, 0x6d, 0xd0, 0x38, 0x35, 0xa0, 0x74, + 0x3e, 0xa1, 0xeb, 0x1d, 0x7d, 0x3e, 0x28, 0xe7, 0x21, 0x48, 0x5b, 0x81, 0xde, 0x4d, 0x1e, 0x13, + 0xf4, 0x66, 0xf8, 0x96, 0xe1, 0xc4, 0x49, 0x91, 0x95, 0x8b, 0x2d, 0x17, 0x73, 0x9f, 0xa8, 0x40, + 0xef, 0x63, 0x92, 0xc0, 0x3a, 0x61, 0xeb, 0x5f, 0x42, 0xf3, 0xcb, 0x84, 0x3d, 0xd1, 0x6c, 0x0c, + 0x86, 0x93, 0x82, 0x94, 0xf7, 0x75, 0x3c, 0xd9, 0x2b, 0xcd, 0xfd, 0xd8, 0x5a, 0x73, 0x6c, 0x7e, + 0xd4, 0xb9, 0x39, 0xa9, 0xc0, 0xaf, 0x0a, 0x52, 0x2e, 0xeb, 0x65, 0xfa, 0x7e, 0xa9, 0xf3, 0xa7, + 0x0a, 0xec, 0x83, 0x3e, 0xf6, 0x80, 0x8d, 0xec, 0x50, 0x85, 0x66, 0x40, 0x19, 0x90, 0x67, 0x05, + 0x29, 0x17, 0xdb, 0x95, 0x48, 0x53, 0xc4, 0x3c, 0x45, 0x1c, 0xe6, 0x29, 0xf5, 0x43, 0x0f, 0xf8, + 0x1e, 0x8d, 0x7d, 0x14, 0x2e, 0x3b, 0xac, 0x71, 0x06, 0xf9, 0xf5, 0xff, 0x3b, 0xaa, 0x28, 0xb4, + 0xb7, 0x13, 0xf2, 0xf6, 0x17, 0x00, 0x00, 0xff, 0xff, 0xf8, 0xd9, 0x50, 0x5b, 0x5b, 0x01, 0x00, + 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.proto b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.proto new file mode 100644 index 00000000..b396a90a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/client/configpb/multilog.proto @@ -0,0 +1,43 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package configpb; + +import "google/protobuf/timestamp.proto"; + +// TemporalLogConfig is a set of LogShardConfig messages, whose +// time limits should be contiguous. +message TemporalLogConfig { + repeated LogShardConfig shard = 1; +} + +// LogShardConfig describes the acceptable date range for a single shard of a temporal +// log. +message LogShardConfig { + string uri = 1; + + // The log's public key in DER-encoded PKIX form. + bytes public_key_der = 2; + + // not_after_start defines the start of the range of acceptable NotAfter + // values, inclusive. + // Leaving this unset implies no lower bound to the range. + google.protobuf.Timestamp not_after_start = 3; + // not_after_limit defines the end of the range of acceptable NotAfter values, + // exclusive. + // Leaving this unset implies no upper bound to the range. + google.protobuf.Timestamp not_after_limit = 4; +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/client/getentries.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/client/getentries.go new file mode 100644 index 00000000..d99b3a68 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/client/getentries.go @@ -0,0 +1,71 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "errors" + "strconv" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/x509" +) + +// GetRawEntries exposes the /ct/v1/get-entries result with only the JSON parsing done. +func (c *LogClient) GetRawEntries(ctx context.Context, start, end int64) (*ct.GetEntriesResponse, error) { + if end < 0 { + return nil, errors.New("end should be >= 0") + } + if end < start { + return nil, errors.New("start should be <= end") + } + + params := map[string]string{ + "start": strconv.FormatInt(start, 10), + "end": strconv.FormatInt(end, 10), + } + if ctx == nil { + ctx = context.TODO() + } + + var resp ct.GetEntriesResponse + if _, _, err := c.GetAndParse(ctx, ct.GetEntriesPath, params, &resp); err != nil { + return nil, err + } + + return &resp, nil +} + +// GetEntries attempts to retrieve the entries in the sequence [start, end] from the CT log server +// (RFC6962 s4.6) as parsed [pre-]certificates for convenience, held in a slice of ct.LogEntry structures. +// However, this does mean that any certificate parsing failures will cause a failure of the whole +// retrieval operation; for more robust retrieval of parsed certificates, use GetRawEntries() and invoke +// ct.LogEntryFromLeaf() on each individual entry. +func (c *LogClient) GetEntries(ctx context.Context, start, end int64) ([]ct.LogEntry, error) { + resp, err := c.GetRawEntries(ctx, start, end) + if err != nil { + return nil, err + } + entries := make([]ct.LogEntry, len(resp.Entries)) + for i, entry := range resp.Entries { + index := start + int64(i) + logEntry, err := ct.LogEntryFromLeaf(index, &entry) + if x509.IsFatal(err) { + return nil, err + } + entries[i] = *logEntry + } + return entries, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/client/logclient.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/client/logclient.go new file mode 100644 index 00000000..2f6224dd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/client/logclient.go @@ -0,0 +1,255 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package client is a CT log client implementation and contains types and code +// for interacting with RFC6962-compliant CT Log instances. +// See http://tools.ietf.org/html/rfc6962 for details +package client + +import ( + "context" + "encoding/base64" + "fmt" + "net/http" + "strconv" + + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/jsonclient" + "github.com/google/certificate-transparency-go/tls" +) + +// LogClient represents a client for a given CT Log instance +type LogClient struct { + jsonclient.JSONClient +} + +// CheckLogClient is an interface that allows (just) checking of various log contents. +type CheckLogClient interface { + BaseURI() string + GetSTH(context.Context) (*ct.SignedTreeHead, error) + GetSTHConsistency(ctx context.Context, first, second uint64) ([][]byte, error) + GetProofByHash(ctx context.Context, hash []byte, treeSize uint64) (*ct.GetProofByHashResponse, error) +} + +// New constructs a new LogClient instance. +// |uri| is the base URI of the CT log instance to interact with, e.g. +// https://ct.googleapis.com/pilot +// |hc| is the underlying client to be used for HTTP requests to the CT log. +// |opts| can be used to provide a custom logger interface and a public key +// for signature verification. +func New(uri string, hc *http.Client, opts jsonclient.Options) (*LogClient, error) { + logClient, err := jsonclient.New(uri, hc, opts) + if err != nil { + return nil, err + } + return &LogClient{*logClient}, err +} + +// RspError represents a server error including HTTP information. +type RspError = jsonclient.RspError + +// Attempts to add |chain| to the log, using the api end-point specified by +// |path|. If provided context expires before submission is complete an +// error will be returned. +func (c *LogClient) addChainWithRetry(ctx context.Context, ctype ct.LogEntryType, path string, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + var resp ct.AddChainResponse + var req ct.AddChainRequest + for _, link := range chain { + req.Chain = append(req.Chain, link.Data) + } + + httpRsp, body, err := c.PostAndParseWithRetry(ctx, path, &req, &resp) + if err != nil { + return nil, err + } + + var ds ct.DigitallySigned + if rest, err := tls.Unmarshal(resp.Signature, &ds); err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } else if len(rest) > 0 { + return nil, RspError{ + Err: fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)), + StatusCode: httpRsp.StatusCode, + Body: body, + } + } + + exts, err := base64.StdEncoding.DecodeString(resp.Extensions) + if err != nil { + return nil, RspError{ + Err: fmt.Errorf("invalid base64 data in Extensions (%q): %v", resp.Extensions, err), + StatusCode: httpRsp.StatusCode, + Body: body, + } + } + + var logID ct.LogID + copy(logID.KeyID[:], resp.ID) + sct := &ct.SignedCertificateTimestamp{ + SCTVersion: resp.SCTVersion, + LogID: logID, + Timestamp: resp.Timestamp, + Extensions: ct.CTExtensions(exts), + Signature: ds, + } + if err := c.VerifySCTSignature(*sct, ctype, chain); err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + return sct, nil +} + +// AddChain adds the (DER represented) X509 |chain| to the log. +func (c *LogClient) AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + return c.addChainWithRetry(ctx, ct.X509LogEntryType, ct.AddChainPath, chain) +} + +// AddPreChain adds the (DER represented) Precertificate |chain| to the log. +func (c *LogClient) AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + return c.addChainWithRetry(ctx, ct.PrecertLogEntryType, ct.AddPreChainPath, chain) +} + +// AddJSON submits arbitrary data to to XJSON server. +func (c *LogClient) AddJSON(ctx context.Context, data interface{}) (*ct.SignedCertificateTimestamp, error) { + req := ct.AddJSONRequest{Data: data} + var resp ct.AddChainResponse + httpRsp, body, err := c.PostAndParse(ctx, ct.AddJSONPath, &req, &resp) + if err != nil { + return nil, err + } + var ds ct.DigitallySigned + if rest, err := tls.Unmarshal(resp.Signature, &ds); err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } else if len(rest) > 0 { + return nil, RspError{ + Err: fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)), + StatusCode: httpRsp.StatusCode, + Body: body, + } + } + var logID ct.LogID + copy(logID.KeyID[:], resp.ID) + return &ct.SignedCertificateTimestamp{ + SCTVersion: resp.SCTVersion, + LogID: logID, + Timestamp: resp.Timestamp, + Extensions: ct.CTExtensions(resp.Extensions), + Signature: ds, + }, nil +} + +// GetSTH retrieves the current STH from the log. +// Returns a populated SignedTreeHead, or a non-nil error (which may be of type +// RspError if a raw http.Response is available). +func (c *LogClient) GetSTH(ctx context.Context) (*ct.SignedTreeHead, error) { + var resp ct.GetSTHResponse + httpRsp, body, err := c.GetAndParse(ctx, ct.GetSTHPath, nil, &resp) + if err != nil { + return nil, err + } + + sth, err := resp.ToSignedTreeHead() + if err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + + if err := c.VerifySTHSignature(*sth); err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + return sth, nil +} + +// VerifySTHSignature checks the signature in sth, returning any error encountered or nil if verification is +// successful. +func (c *LogClient) VerifySTHSignature(sth ct.SignedTreeHead) error { + if c.Verifier == nil { + // Can't verify signatures without a verifier + return nil + } + return c.Verifier.VerifySTHSignature(sth) +} + +// VerifySCTSignature checks the signature in sct for the given LogEntryType, with associated certificate chain. +func (c *LogClient) VerifySCTSignature(sct ct.SignedCertificateTimestamp, ctype ct.LogEntryType, certData []ct.ASN1Cert) error { + if c.Verifier == nil { + // Can't verify signatures without a verifier + return nil + } + leaf, err := ct.MerkleTreeLeafFromRawChain(certData, ctype, sct.Timestamp) + if err != nil { + return fmt.Errorf("failed to build MerkleTreeLeaf: %v", err) + } + entry := ct.LogEntry{Leaf: *leaf} + return c.Verifier.VerifySCTSignature(sct, entry) +} + +// GetSTHConsistency retrieves the consistency proof between two snapshots. +func (c *LogClient) GetSTHConsistency(ctx context.Context, first, second uint64) ([][]byte, error) { + base10 := 10 + params := map[string]string{ + "first": strconv.FormatUint(first, base10), + "second": strconv.FormatUint(second, base10), + } + var resp ct.GetSTHConsistencyResponse + if _, _, err := c.GetAndParse(ctx, ct.GetSTHConsistencyPath, params, &resp); err != nil { + return nil, err + } + return resp.Consistency, nil +} + +// GetProofByHash returns an audit path for the hash of an SCT. +func (c *LogClient) GetProofByHash(ctx context.Context, hash []byte, treeSize uint64) (*ct.GetProofByHashResponse, error) { + b64Hash := base64.StdEncoding.EncodeToString(hash) + base10 := 10 + params := map[string]string{ + "tree_size": strconv.FormatUint(treeSize, base10), + "hash": b64Hash, + } + var resp ct.GetProofByHashResponse + if _, _, err := c.GetAndParse(ctx, ct.GetProofByHashPath, params, &resp); err != nil { + return nil, err + } + return &resp, nil +} + +// GetAcceptedRoots retrieves the set of acceptable root certificates for a log. +func (c *LogClient) GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error) { + var resp ct.GetRootsResponse + httpRsp, body, err := c.GetAndParse(ctx, ct.GetRootsPath, nil, &resp) + if err != nil { + return nil, err + } + var roots []ct.ASN1Cert + for _, cert64 := range resp.Certificates { + cert, err := base64.StdEncoding.DecodeString(cert64) + if err != nil { + return nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + roots = append(roots, ct.ASN1Cert{Data: cert}) + } + return roots, nil +} + +// GetEntryAndProof returns a log entry and audit path for the index of a leaf. +func (c *LogClient) GetEntryAndProof(ctx context.Context, index, treeSize uint64) (*ct.GetEntryAndProofResponse, error) { + base10 := 10 + params := map[string]string{ + "leaf_index": strconv.FormatUint(index, base10), + "tree_size": strconv.FormatUint(treeSize, base10), + } + var resp ct.GetEntryAndProofResponse + if _, _, err := c.GetAndParse(ctx, ct.GetEntryAndProofPath, params, &resp); err != nil { + return nil, err + } + return &resp, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/client/multilog.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/client/multilog.go new file mode 100644 index 00000000..9b0881e5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/client/multilog.go @@ -0,0 +1,221 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package client + +import ( + "context" + "crypto/sha256" + "errors" + "fmt" + "io/ioutil" + "net/http" + "time" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/client/configpb" + "github.com/google/certificate-transparency-go/jsonclient" + "github.com/google/certificate-transparency-go/x509" +) + +type interval struct { + lower *time.Time // nil => no lower bound + upper *time.Time // nil => no upper bound +} + +// TemporalLogConfigFromFile creates a TemporalLogConfig object from the given +// filename, which should contain text-protobuf encoded configuration data. +func TemporalLogConfigFromFile(filename string) (*configpb.TemporalLogConfig, error) { + if len(filename) == 0 { + return nil, errors.New("log config filename empty") + } + + cfgText, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("failed to read log config: %v", err) + } + + var cfg configpb.TemporalLogConfig + if err := proto.UnmarshalText(string(cfgText), &cfg); err != nil { + return nil, fmt.Errorf("failed to parse log config: %v", err) + } + + if len(cfg.Shard) == 0 { + return nil, errors.New("empty log config found") + } + return &cfg, nil +} + +// AddLogClient is an interface that allows adding certificates and pre-certificates to a log. +// Both LogClient and TemporalLogClient implement this interface, which allows users to +// commonize code for adding certs to normal/temporal logs. +type AddLogClient interface { + AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) + AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) + GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error) +} + +// TemporalLogClient allows [pre-]certificates to be uploaded to a temporal log. +type TemporalLogClient struct { + Clients []*LogClient + intervals []interval +} + +// NewTemporalLogClient builds a new client for interacting with a temporal log. +// The provided config should be contiguous and chronological. +func NewTemporalLogClient(cfg *configpb.TemporalLogConfig, hc *http.Client) (*TemporalLogClient, error) { + if len(cfg.GetShard()) == 0 { + return nil, errors.New("empty config") + } + + overall, err := shardInterval(cfg.Shard[0]) + if err != nil { + return nil, fmt.Errorf("cfg.Shard[0] invalid: %v", err) + } + intervals := make([]interval, 0, len(cfg.Shard)) + intervals = append(intervals, overall) + for i := 1; i < len(cfg.Shard); i++ { + interval, err := shardInterval(cfg.Shard[i]) + if err != nil { + return nil, fmt.Errorf("cfg.Shard[%d] invalid: %v", i, err) + } + if overall.upper == nil { + return nil, fmt.Errorf("cfg.Shard[%d] extends an interval with no upper bound", i) + } + if interval.lower == nil { + return nil, fmt.Errorf("cfg.Shard[%d] has no lower bound but extends an interval", i) + } + if !interval.lower.Equal(*overall.upper) { + return nil, fmt.Errorf("cfg.Shard[%d] starts at %v but previous interval ended at %v", i, interval.lower, overall.upper) + } + overall.upper = interval.upper + intervals = append(intervals, interval) + } + clients := make([]*LogClient, 0, len(cfg.Shard)) + for i, shard := range cfg.Shard { + opts := jsonclient.Options{UserAgent: "ct-go-multilog/1.0"} + opts.PublicKeyDER = shard.GetPublicKeyDer() + c, err := New(shard.Uri, hc, opts) + if err != nil { + return nil, fmt.Errorf("failed to create client for cfg.Shard[%d]: %v", i, err) + } + clients = append(clients, c) + } + tlc := TemporalLogClient{ + Clients: clients, + intervals: intervals, + } + return &tlc, nil +} + +// GetAcceptedRoots retrieves the set of acceptable root certificates for all +// of the shards of a temporal log (i.e. the union). +func (tlc *TemporalLogClient) GetAcceptedRoots(ctx context.Context) ([]ct.ASN1Cert, error) { + type result struct { + roots []ct.ASN1Cert + err error + } + results := make(chan result, len(tlc.Clients)) + for _, c := range tlc.Clients { + go func(c *LogClient) { + var r result + r.roots, r.err = c.GetAcceptedRoots(ctx) + results <- r + }(c) + } + + var allRoots []ct.ASN1Cert + seen := make(map[[sha256.Size]byte]bool) + for range tlc.Clients { + r := <-results + if r.err != nil { + return nil, r.err + } + for _, root := range r.roots { + h := sha256.Sum256(root.Data) + if seen[h] { + continue + } + seen[h] = true + allRoots = append(allRoots, root) + } + } + return allRoots, nil +} + +// AddChain adds the (DER represented) X509 chain to the appropriate log. +func (tlc *TemporalLogClient) AddChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + return tlc.addChain(ctx, ct.X509LogEntryType, ct.AddChainPath, chain) +} + +// AddPreChain adds the (DER represented) Precertificate chain to the appropriate log. +func (tlc *TemporalLogClient) AddPreChain(ctx context.Context, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + return tlc.addChain(ctx, ct.PrecertLogEntryType, ct.AddPreChainPath, chain) +} + +func (tlc *TemporalLogClient) addChain(ctx context.Context, ctype ct.LogEntryType, path string, chain []ct.ASN1Cert) (*ct.SignedCertificateTimestamp, error) { + // Parse the first entry in the chain + if len(chain) == 0 { + return nil, errors.New("missing chain") + } + cert, err := x509.ParseCertificate(chain[0].Data) + if err != nil { + return nil, fmt.Errorf("failed to parse initial chain entry: %v", err) + } + cidx, err := tlc.IndexByDate(cert.NotAfter) + if err != nil { + return nil, fmt.Errorf("failed to find log to process cert: %v", err) + } + return tlc.Clients[cidx].addChainWithRetry(ctx, ctype, path, chain) +} + +// IndexByDate returns the index of the Clients entry that is appropriate for the given +// date. +func (tlc *TemporalLogClient) IndexByDate(when time.Time) (int, error) { + for i, interval := range tlc.intervals { + if (interval.lower != nil) && when.Before(*interval.lower) { + continue + } + if (interval.upper != nil) && !when.Before(*interval.upper) { + continue + } + return i, nil + } + return -1, fmt.Errorf("no log found encompassing date %v", when) +} + +func shardInterval(cfg *configpb.LogShardConfig) (interval, error) { + var interval interval + if cfg.NotAfterStart != nil { + t, err := ptypes.Timestamp(cfg.NotAfterStart) + if err != nil { + return interval, fmt.Errorf("failed to parse NotAfterStart: %v", err) + } + interval.lower = &t + } + if cfg.NotAfterLimit != nil { + t, err := ptypes.Timestamp(cfg.NotAfterLimit) + if err != nil { + return interval, fmt.Errorf("failed to parse NotAfterLimit: %v", err) + } + interval.upper = &t + } + + if interval.lower != nil && interval.upper != nil && !(*interval.lower).Before(*interval.upper) { + return interval, errors.New("inverted interval") + } + return interval, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml new file mode 100644 index 00000000..6b70334a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/cloudbuild.yaml @@ -0,0 +1,46 @@ +substitutions: + _CLUSTER_NAME: trillian-opensource-ci + _MASTER_ZONE: us-central1-a +steps: +- id: build_ctfe + name: gcr.io/cloud-builders/docker + args: + - build + - --file=trillian/examples/deployment/docker/ctfe/Dockerfile + - --tag=gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA} + - . + waitFor: ["-"] +- id: build_envsubst + name: gcr.io/cloud-builders/docker + args: + - build + - trillian/examples/deployment/docker/envsubst + - -t + - envsubst + waitFor: ["-"] +- id: envsubst_kubernetes_configs + name: envsubst + args: + - trillian/examples/deployment/kubernetes/ctfe-deployment.yaml + - trillian/examples/deployment/kubernetes/ctfe-service.yaml + - trillian/examples/deployment/kubernetes/ctfe-ingress.yaml + env: + - PROJECT_ID=${PROJECT_ID} + - IMAGE_TAG=${COMMIT_SHA} + waitFor: + - build_envsubst +- id: update_kubernetes_configs_dryrun + name: gcr.io/cloud-builders/kubectl + args: + - apply + - --server-dry-run + - -f=trillian/examples/deployment/kubernetes/ctfe-deployment.yaml + - -f=trillian/examples/deployment/kubernetes/ctfe-service.yaml + - -f=trillian/examples/deployment/kubernetes/ctfe-ingress.yaml + env: + - CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE} + - CLOUDSDK_CONTAINER_CLUSTER=${_CLUSTER_NAME} + waitFor: + - envsubst_kubernetes_configs +images: +- gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml new file mode 100644 index 00000000..12b4f00c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/cloudbuild_master.yaml @@ -0,0 +1,63 @@ +substitutions: + _CLUSTER_NAME: trillian-opensource-ci + _MASTER_ZONE: us-central1-a +steps: +- id: build_ctfe + name: gcr.io/cloud-builders/docker + args: + - build + - --file=trillian/examples/deployment/docker/ctfe/Dockerfile + - --tag=gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA} + - . + waitFor: ["-"] +- id: push_ctfe + name: gcr.io/cloud-builders/docker + args: + - push + - gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA} + waitFor: + - build_ctfe +- id: tag_latest_ctfe + name: gcr.io/cloud-builders/gcloud + args: + - container + - images + - add-tag + - gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA} + - gcr.io/${PROJECT_ID}/ctfe:latest + waitFor: + - push_ctfe +- id: build_envsubst + name: gcr.io/cloud-builders/docker + args: + - build + - trillian/examples/deployment/docker/envsubst + - -t + - envsubst + waitFor: ["-"] +- id: envsubst_kubernetes_configs + name: envsubst + args: + - trillian/examples/deployment/kubernetes/ctfe-deployment.yaml + - trillian/examples/deployment/kubernetes/ctfe-service.yaml + - trillian/examples/deployment/kubernetes/ctfe-ingress.yaml + env: + - PROJECT_ID=${PROJECT_ID} + - IMAGE_TAG=${COMMIT_SHA} + waitFor: + - build_envsubst +- id: update_kubernetes_configs + name: gcr.io/cloud-builders/kubectl + args: + - apply + - -f=trillian/examples/deployment/kubernetes/ctfe-deployment.yaml + - -f=trillian/examples/deployment/kubernetes/ctfe-service.yaml + - -f=trillian/examples/deployment/kubernetes/ctfe-ingress.yaml + env: + - CLOUDSDK_COMPUTE_ZONE=${_MASTER_ZONE} + - CLOUDSDK_CONTAINER_CLUSTER=${_CLUSTER_NAME} + waitFor: + - envsubst_kubernetes_configs + - push_ctfe +images: +- gcr.io/${PROJECT_ID}/ctfe:${COMMIT_SHA} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml new file mode 100644 index 00000000..8c8c5ab6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/cloudbuild_tag.yaml @@ -0,0 +1,10 @@ +steps: +- id: build_ctfe + name: gcr.io/cloud-builders/docker + args: + - build + - --file=trillian/examples/deployment/docker/ctfe/Dockerfile + - --tag=gcr.io/${PROJECT_ID}/ctfe:${TAG_NAME} + - . +images: +- gcr.io/${PROJECT_ID}/ctfe:${TAG_NAME} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/go.mod b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/go.mod new file mode 100644 index 00000000..90f7e9ec --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/go.mod @@ -0,0 +1,62 @@ +module github.com/google/certificate-transparency-go + +go 1.12 + +require ( + contrib.go.opencensus.io/exporter/stackdriver v0.12.1 // indirect + github.com/bgentry/speakeasy v0.1.0 // indirect + github.com/coreos/bbolt v1.3.2 // indirect + github.com/coreos/etcd v3.3.13+incompatible + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e // indirect + github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f // indirect + github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect + github.com/dustin/go-humanize v1.0.0 // indirect + github.com/go-sql-driver/mysql v1.4.1 + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b + github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef // indirect + github.com/golang/mock v1.2.0 + github.com/golang/protobuf v1.3.1 + github.com/golangci/golangci-lint v1.17.1 + github.com/google/btree v1.0.0 // indirect + github.com/google/go-cmp v0.3.0 + github.com/google/monologue v0.0.0-20190606152607-4b11a32b5934 + github.com/google/trillian v1.2.2-0.20190612132142-05461f4df60a + github.com/google/trillian-examples v0.0.0-20190603134952-4e75ba15216c + github.com/gorilla/websocket v1.4.0 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.9.0 // indirect + github.com/jonboulle/clockwork v0.1.0 // indirect + github.com/juju/ratelimit v1.0.1 + github.com/kylelemons/godebug v1.1.0 + // TODO(gbelvin): Remove when Trillian declares its dependencies with go.mod + github.com/letsencrypt/pkcs11key v2.0.1-0.20170608213348-396559074696+incompatible // indirect + github.com/lib/pq v1.1.1 // indirect + github.com/mattn/go-runewidth v0.0.4 // indirect + github.com/mattn/go-sqlite3 v1.10.0 + github.com/miekg/pkcs11 v1.0.2 // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 + github.com/olekukonko/tablewriter v0.0.1 // indirect + github.com/prometheus/client_golang v0.9.4 + github.com/rs/cors v1.6.0 + github.com/sergi/go-diff v1.0.0 + github.com/soheilhy/cmux v0.1.4 // indirect + github.com/spf13/cobra v0.0.5 // indirect + github.com/stretchr/testify v1.3.0 + github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 // indirect + github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce + github.com/urfave/cli v1.20.0 // indirect + github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect + go.etcd.io/bbolt v1.3.2 // indirect + go.etcd.io/etcd v3.3.13+incompatible + go.uber.org/atomic v1.4.0 // indirect + go.uber.org/multierr v1.1.0 // indirect + go.uber.org/zap v1.10.0 // indirect + golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a + golang.org/x/net v0.0.0-20190606173856-1492cefac77f + google.golang.org/api v0.6.0 // indirect + google.golang.org/genproto v0.0.0-20190605220351-eb0b1bdb6ae6 + google.golang.org/grpc v1.21.1 + gopkg.in/cheggaaa/pb.v1 v1.0.28 // indirect +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/go.sum b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/go.sum new file mode 100644 index 00000000..95c64387 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/go.sum @@ -0,0 +1,453 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +contrib.go.opencensus.io/exporter/stackdriver v0.12.1 h1:Dll2uFfOVI3fa8UzsHyP6z0M6fEc9ZTAMo+Y3z282Xg= +contrib.go.opencensus.io/exporter/stackdriver v0.12.1/go.mod h1:iwB6wGarfphGGe/e5CWqyUk/cLzKnWsOKPVW3no6OTw= +contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/OpenPeeDeeP/depguard v0.0.0-20180806142446-a69c782687b2 h1:HTOmFEEYrWi4MW5ZKUx6xfeyM10Sx3kQF65xiQJMPYA= +github.com/OpenPeeDeeP/depguard v0.0.0-20180806142446-a69c782687b2/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o= +github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/aws/aws-sdk-go v1.19.18 h1:Hb3+b9HCqrOrbAtFstUWg7H5TQ+/EcklJtE8VShVs8o= +github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4= +github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/fatih/color v1.6.0 h1:66qjqZk8kalYAvDRtM1AdAJQI0tj4Wrue3Eq3B3pmFU= +github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-critic/go-critic v0.0.0-20181204210945-1df300866540 h1:7CU1IXBpPvxpQ/NqJrpuMXMHAw+FB2vfqtRF8tgW9fw= +github.com/go-critic/go-critic v0.0.0-20181204210945-1df300866540/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-lintpack/lintpack v0.5.2 h1:DI5mA3+eKdWeJ40nU4d6Wc26qmdG8RCi/btYq0TuRN0= +github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= +github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-toolsmith/astcast v1.0.0 h1:JojxlmI6STnFVG9yOImLeGREv8W2ocNUM+iOhR6jE7g= +github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= +github.com/go-toolsmith/astcopy v1.0.0 h1:OMgl1b1MEpjFQ1m5ztEO06rz5CUd3oBv9RF7+DyvdG8= +github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= +github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astequal v1.0.0 h1:4zxD8j3JRFNyLN46lodQuqz3xdKSrur7U/sr0SDS/gQ= +github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= +github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg= +github.com/go-toolsmith/astfmt v1.0.0 h1:A0vDDXt+vsvLEdbMFJAUBI/uTbRw1ffOPnxsILnFL6k= +github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= +github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= +github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk= +github.com/go-toolsmith/astp v1.0.0 h1:alXE75TXgcmupDsMK1fRAy0YUzLzqPVvBKoyWV+KPXg= +github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= +github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks= +github.com/go-toolsmith/pkgload v1.0.0 h1:4DFWWMXVfbcN5So1sBNW9+yeiMqLFGl1wFLTL5R0Tgg= +github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= +github.com/go-toolsmith/strparse v1.0.0 h1:Vcw78DnpCAKlM20kSbAyO4mPfJn/lyYA4BJUDxe2Jb4= +github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= +github.com/go-toolsmith/typep v1.0.0 h1:zKymWyA1TRYvqYrYDrfEMZULyrhcnGY3x7LDKU2XQaA= +github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.0.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= +github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/errcheck v0.0.0-20181003203344-ef45e06d44b6 h1:i2jIkQFb8RG45DuQs+ElyROY848cSJIoIkBM+7XXypA= +github.com/golangci/errcheck v0.0.0-20181003203344-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= +github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 h1:9kfjN3AdxcbsZBf8NjltjWihK2QfBBBZuv91cMFfDHw= +github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= +github.com/golangci/go-tools v0.0.0-20180109140146-af6baa5dc196 h1:9rtVlONXLF1rJZzvLt4tfOXtnAFUEhxCJ64Ibzj6ECo= +github.com/golangci/go-tools v0.0.0-20180109140146-af6baa5dc196/go.mod h1:unzUULGw35sjyOYjUt0jMTXqHlZPpPc6e+xfO4cd6mM= +github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3 h1:pe9JHs3cHHDQgOFXJJdYkK6fLz2PWyYtP4hthoCMvs8= +github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o= +github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee h1:J2XAy40+7yz70uaOiMbNnluTg7gyQhtGqLQncQh+4J8= +github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= +github.com/golangci/gofmt v0.0.0-20181105071733-0b8337e80d98 h1:ir6/L2ZOJfFrJlOTsuf/hlzdPuUwXV/VzkSlgS6f1vs= +github.com/golangci/gofmt v0.0.0-20181105071733-0b8337e80d98/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= +github.com/golangci/golangci-lint v1.17.1 h1:lc8Hf9GPCjIr0hg3S/xhvFT1+Hydass8F1xchr8jkME= +github.com/golangci/golangci-lint v1.17.1/go.mod h1:+5sJSl2h3aly+fpmL2meSP8CaSKua2E4Twi9LPy7b1g= +github.com/golangci/gosec v0.0.0-20180901114220-66fb7fc33547 h1:qMomh8bv+kDazm1dSLZ9S3zZ2PJZMHL4ilfBjxFOlmI= +github.com/golangci/gosec v0.0.0-20180901114220-66fb7fc33547/go.mod h1:0qUabqiIQgfmlAmulqxyiGkkyF6/tOGSnY2cnPVwrzU= +github.com/golangci/ineffassign v0.0.0-20180808204949-42439a7714cc h1:XRFao922N8F3EcIXBSNX8Iywk+GI0dxD/8FicMX2D/c= +github.com/golangci/ineffassign v0.0.0-20180808204949-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= +github.com/golangci/lint-1 v0.0.0-20180610141402-ee948d087217 h1:r7vyX+SN24x6+5AnpnrRn/bdwBb7U+McZqCHOVtXDuk= +github.com/golangci/lint-1 v0.0.0-20180610141402-ee948d087217/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= +github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= +github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770 h1:EL/O5HGrF7Jaq0yNhBLucz9hTuRzj2LdwGBOaENgxIk= +github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= +github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21 h1:leSNB7iYzLYSSx3J/s5sVf4Drkc68W2wm4Ixh/mr0us= +github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= +github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0 h1:HVfrLniijszjS1aiNg8JbBMO2+E1WIQ+j/gL4SQqGPg= +github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys= +github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/monologue v0.0.0-20190606152607-4b11a32b5934 h1:0+3qDY6030dpAiEdmBqIsz3lg2SgXAvPEEq2sjm5UBk= +github.com/google/monologue v0.0.0-20190606152607-4b11a32b5934/go.mod h1:6NTfaQoUpg5QmPsCUWLR3ig33FHrKXhTtWzF0DVdmuk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/trillian v1.2.2-0.20190612132142-05461f4df60a h1:Bn71r2jt5ObayLNUtMlCzNlKiw7o59esC9sz9ENjSe0= +github.com/google/trillian v1.2.2-0.20190612132142-05461f4df60a/go.mod h1:YPmUVn5NGwgnDUgqlVyFGMTgaWlnSvH7W5p+NdOG8UA= +github.com/google/trillian-examples v0.0.0-20190603134952-4e75ba15216c h1:dv2J28D109qglM6VfNzAXZ7VddBojviT5oMSs1yeDUY= +github.com/google/trillian-examples v0.0.0-20190603134952-4e75ba15216c/go.mod h1:WgL3XZ3pA8/9cm7yxqWrZE6iZkESB2ItGxy5Fo6k2lk= +github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3 h1:JVnpOZS+qxli+rgVl98ILOXVNbW+kb5wcxeGx8ShUIw= +github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0 h1:bM6ZAFZmc/wPFaRDi0d5L7hGEZEx/2u+Tmr2evNHDiI= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/juju/ratelimit v1.0.1 h1:+7AIFJVQ0EQgq/K9+0Krm7m530Du7tIz0METWzN0RgY= +github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/gotool v0.0.0-20161130080628-0de1eaf82fa3/go.mod h1:jxZFDH7ILpTPQTk+E2s+z4CUas9lVNjIuKR4c5/zKgM= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= +github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/letsencrypt/pkcs11key v2.0.1-0.20170608213348-396559074696+incompatible h1:GfzE+uq7odDW7nOmp1QWuilLEK7kJf8i84XcIfk3mKA= +github.com/letsencrypt/pkcs11key v2.0.1-0.20170608213348-396559074696+incompatible/go.mod h1:iGYXKqDXt0cpBthCHdr9ZdsQwyGlYFh/+8xa4WzIQ34= +github.com/lib/pq v1.1.1 h1:sJZmqHoEaY7f+NPP8pgLB/WxulyR3fewgCM2qaSlBb4= +github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= +github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o= +github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/pkcs11 v1.0.2 h1:CIBkOawOtzJNE0B+EpRiUBzuVW7JEQAwdwhSS6YhIeg= +github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= +github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663 h1:Ri1EhipkbhWsffPJ3IPlrb4SkTOPa2PfRXp3jchBczw= +github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= +github.com/olekukonko/tablewriter v0.0.1 h1:b3iUnf1v+ppJiOfNX4yxxqfWKMQPZR5yoh8urCTFX88= +github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I= +github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.4 h1:Y8E/JaaPbmFSW2V81Ab/d8yZFYQQGbni1b1jPcG9Y6A= +github.com/prometheus/client_golang v0.9.4/go.mod h1:oCXIBxdI62A4cR6aTRJCgetEjecSIYzOEaeAn4iYEpM= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/cors v1.6.0 h1:G9tHG9lebljV9mfp9SNPDL36nCDxmo3zTlAf1YgvzmI= +github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e h1:MZM7FHLqUHYI0Y/mQAt3d2aYa0SiNms/hFqC9qJYolM= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041 h1:llrF3Fs4018ePo4+G/HV/uQUqEI1HMDjCeOf2V6puPc= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sourcegraph/go-diff v0.5.1 h1:gO6i5zugwzo1RVTvgvfwCOSVegNuvnNi6bAD1QCmkHs= +github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= +github.com/spf13/afero v1.1.0/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= +github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/timakin/bodyclose v0.0.0-20190407043127-4a873e97b2bb h1:lI9ufgFfvuqRctP9Ny8lDDLbSWCMxBPletcSqrnyFYM= +github.com/timakin/bodyclose v0.0.0-20190407043127-4a873e97b2bb/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce h1:fb190+cK2Xz/dvi9Hv8eCYJYvIGUTN2/KLq1pT6CjEc= +github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= +github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v3.3.13+incompatible h1:jCejD5EMnlGxFvcGRyEV4VGlENZc7oPQX6o0t7n3xbw= +go.etcd.io/etcd v3.3.13+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a h1:YX8ljsm6wXlHZO+aRz9Exqr0evNhKRNe5K/gi+zKh4U= +golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190606173856-1492cefac77f h1:IWHgpgFqnL5AhBUBZSgBdjl2vkQUEzcY+JNKWfcgAU0= +golang.org/x/net v0.0.0-20190606173856-1492cefac77f/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b h1:ag/x1USPSsqHud38I9BAC88qdNLDHHtQ4mlgQIZPPNA= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190121143147-24cd39ecf745/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd h1:7E3PabyysDSEjnaANKBgums/hyvMI/HoHQ50qZEzTrg= +golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.6.0 h1:2tJEkRfnZL5g1GeBUlITh/rqT5HG3sFcoVCUUxmgJ2g= +google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190605220351-eb0b1bdb6ae6 h1:XRqWpmQ5ACYxWuYX495S0sHawhPGOVrh62WzgXsQnWs= +google.golang.org/genproto v0.0.0-20190605220351-eb0b1bdb6ae6/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= +gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= +mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphDJbHOQO1DFFFTeBo= +mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= +mvdan.cc/unparam v0.0.0-20190124213536-fbb59629db34 h1:B1LAOfRqg2QUyCdzfjf46quTSYUTAK5OCwbh6pljHbM= +mvdan.cc/unparam v0.0.0-20190124213536-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4 h1:JPJh2pk3+X4lXAkZIk2RuE/7/FoK9maXw+TNPJhVS/c= +sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/jsonclient/backoff.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/jsonclient/backoff.go new file mode 100644 index 00000000..0c969d09 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/jsonclient/backoff.go @@ -0,0 +1,72 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonclient + +import ( + "sync" + "time" +) + +type backoff struct { + mu sync.RWMutex + multiplier uint + notBefore time.Time +} + +const ( + // maximum backoff is 2^(maxMultiplier-1) = 128 seconds + maxMultiplier = 8 +) + +func (b *backoff) set(override *time.Duration) time.Duration { + b.mu.Lock() + defer b.mu.Unlock() + if b.notBefore.After(time.Now()) { + if override != nil { + // If existing backoff is set but override would be longer than + // it then set it to that. + notBefore := time.Now().Add(*override) + if notBefore.After(b.notBefore) { + b.notBefore = notBefore + } + } + return time.Until(b.notBefore) + } + var wait time.Duration + if override != nil { + wait = *override + } else { + if b.multiplier < maxMultiplier { + b.multiplier++ + } + wait = time.Second * time.Duration(1<<(b.multiplier-1)) + } + b.notBefore = time.Now().Add(wait) + return wait +} + +func (b *backoff) decreaseMultiplier() { + b.mu.Lock() + defer b.mu.Unlock() + if b.multiplier > 0 { + b.multiplier-- + } +} + +func (b *backoff) until() time.Time { + b.mu.RLock() + defer b.mu.RUnlock() + return b.notBefore +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/jsonclient/client.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/jsonclient/client.go new file mode 100644 index 00000000..a33099b6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/jsonclient/client.go @@ -0,0 +1,323 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonclient + +import ( + "bytes" + "context" + "crypto" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "log" + "math/rand" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/golang/glog" + ct "github.com/google/certificate-transparency-go" + "github.com/google/certificate-transparency-go/x509" + "golang.org/x/net/context/ctxhttp" +) + +const maxJitter = 250 * time.Millisecond + +type backoffer interface { + // set adjusts/increases the current backoff interval (typically on retryable failure); + // if the optional parameter is provided, this will be used as the interval if it is greater + // than the currently set interval. Returns the current wait period so that it can be + // logged along with any error message. + set(*time.Duration) time.Duration + // decreaseMultiplier reduces the current backoff multiplier, typically on success. + decreaseMultiplier() + // until returns the time until which the client should wait before making a request, + // it may be in the past in which case it should be ignored. + until() time.Time +} + +// JSONClient provides common functionality for interacting with a JSON server +// that uses cryptographic signatures. +type JSONClient struct { + uri string // the base URI of the server. e.g. https://ct.googleapis/pilot + httpClient *http.Client // used to interact with the server via HTTP + Verifier *ct.SignatureVerifier // nil for no verification (e.g. no public key available) + logger Logger // interface to use for logging warnings and errors + backoff backoffer // object used to store and calculate backoff information + userAgent string // If set, this is sent as the UserAgent header. +} + +// Logger is a simple logging interface used to log internal errors and warnings +type Logger interface { + // Printf formats and logs a message + Printf(string, ...interface{}) +} + +// Options are the options for creating a new JSONClient. +type Options struct { + // Interface to use for logging warnings and errors, if nil the + // standard library log package will be used. + Logger Logger + // PEM format public key to use for signature verification. + PublicKey string + // DER format public key to use for signature verification. + PublicKeyDER []byte + // UserAgent, if set, will be sent as the User-Agent header with each request. + UserAgent string +} + +// ParsePublicKey parses and returns the public key contained in opts. +// If both opts.PublicKey and opts.PublicKeyDER are set, PublicKeyDER is used. +// If neither is set, nil will be returned. +func (opts *Options) ParsePublicKey() (crypto.PublicKey, error) { + if len(opts.PublicKeyDER) > 0 { + return x509.ParsePKIXPublicKey(opts.PublicKeyDER) + } + + if opts.PublicKey != "" { + pubkey, _ /* keyhash */, rest, err := ct.PublicKeyFromPEM([]byte(opts.PublicKey)) + if err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, errors.New("extra data found after PEM key decoded") + } + return pubkey, nil + } + + return nil, nil +} + +type basicLogger struct{} + +func (bl *basicLogger) Printf(msg string, args ...interface{}) { + log.Printf(msg, args...) +} + +// RspError represents an error that occurred when processing a response from a server, +// and also includes key details from the http.Response that triggered the error. +type RspError struct { + Err error + StatusCode int + Body []byte +} + +// Error formats the RspError instance, focusing on the error. +func (e RspError) Error() string { + return e.Err.Error() +} + +// New constructs a new JSONClient instance, for the given base URI, using the +// given http.Client object (if provided) and the Options object. +// If opts does not specify a public key, signatures will not be verified. +func New(uri string, hc *http.Client, opts Options) (*JSONClient, error) { + pubkey, err := opts.ParsePublicKey() + if err != nil { + return nil, fmt.Errorf("invalid public key: %v", err) + } + + var verifier *ct.SignatureVerifier + if pubkey != nil { + var err error + verifier, err = ct.NewSignatureVerifier(pubkey) + if err != nil { + return nil, err + } + } + + if hc == nil { + hc = new(http.Client) + } + logger := opts.Logger + if logger == nil { + logger = &basicLogger{} + } + return &JSONClient{ + uri: strings.TrimRight(uri, "/"), + httpClient: hc, + Verifier: verifier, + logger: logger, + backoff: &backoff{}, + userAgent: opts.UserAgent, + }, nil +} + +// BaseURI returns the base URI that the JSONClient makes queries to. +func (c *JSONClient) BaseURI() string { + return c.uri +} + +// GetAndParse makes a HTTP GET call to the given path, and attempta to parse +// the response as a JSON representation of the rsp structure. Returns the +// http.Response, the body of the response, and an error (which may be of +// type RspError if the HTTP response was available). +func (c *JSONClient) GetAndParse(ctx context.Context, path string, params map[string]string, rsp interface{}) (*http.Response, []byte, error) { + if ctx == nil { + return nil, nil, errors.New("context.Context required") + } + // Build a GET request with URL-encoded parameters. + vals := url.Values{} + for k, v := range params { + vals.Add(k, v) + } + fullURI := fmt.Sprintf("%s%s?%s", c.uri, path, vals.Encode()) + glog.V(2).Infof("GET %s", fullURI) + httpReq, err := http.NewRequest(http.MethodGet, fullURI, nil) + if err != nil { + return nil, nil, err + } + if len(c.userAgent) != 0 { + httpReq.Header.Set("User-Agent", c.userAgent) + } + + httpRsp, err := ctxhttp.Do(ctx, c.httpClient, httpReq) + if err != nil { + return nil, nil, err + } + + // Read everything now so http.Client can reuse the connection. + body, err := ioutil.ReadAll(httpRsp.Body) + httpRsp.Body.Close() + if err != nil { + return nil, nil, RspError{Err: fmt.Errorf("failed to read response body: %v", err), StatusCode: httpRsp.StatusCode, Body: body} + } + + if httpRsp.StatusCode != http.StatusOK { + return nil, nil, RspError{Err: fmt.Errorf("got HTTP Status %q", httpRsp.Status), StatusCode: httpRsp.StatusCode, Body: body} + } + + if err := json.NewDecoder(bytes.NewReader(body)).Decode(rsp); err != nil { + return nil, nil, RspError{Err: err, StatusCode: httpRsp.StatusCode, Body: body} + } + + return httpRsp, body, nil +} + +// PostAndParse makes a HTTP POST call to the given path, including the request +// parameters, and attempts to parse the response as a JSON representation of +// the rsp structure. Returns the http.Response, the body of the response, and +// an error (which may be of type RspError if the HTTP response was available). +func (c *JSONClient) PostAndParse(ctx context.Context, path string, req, rsp interface{}) (*http.Response, []byte, error) { + if ctx == nil { + return nil, nil, errors.New("context.Context required") + } + // Build a POST request with JSON body. + postBody, err := json.Marshal(req) + if err != nil { + return nil, nil, err + } + fullURI := fmt.Sprintf("%s%s", c.uri, path) + glog.V(2).Infof("POST %s", fullURI) + httpReq, err := http.NewRequest(http.MethodPost, fullURI, bytes.NewReader(postBody)) + if err != nil { + return nil, nil, err + } + if len(c.userAgent) != 0 { + httpReq.Header.Set("User-Agent", c.userAgent) + } + httpReq.Header.Set("Content-Type", "application/json") + + httpRsp, err := ctxhttp.Do(ctx, c.httpClient, httpReq) + + // Read all of the body, if there is one, so that the http.Client can do Keep-Alive. + var body []byte + if httpRsp != nil { + body, err = ioutil.ReadAll(httpRsp.Body) + httpRsp.Body.Close() + } + if err != nil { + if httpRsp != nil { + return nil, nil, RspError{StatusCode: httpRsp.StatusCode, Body: body, Err: err} + } + return nil, nil, err + } + + if httpRsp.StatusCode == http.StatusOK { + if err = json.Unmarshal(body, &rsp); err != nil { + return nil, nil, RspError{StatusCode: httpRsp.StatusCode, Body: body, Err: err} + } + } + return httpRsp, body, nil +} + +// waitForBackoff blocks until the defined backoff interval or context has expired, if the returned +// not before time is in the past it returns immediately. +func (c *JSONClient) waitForBackoff(ctx context.Context) error { + dur := time.Until(c.backoff.until().Add(time.Millisecond * time.Duration(rand.Intn(int(maxJitter.Seconds()*1000))))) + if dur < 0 { + dur = 0 + } + backoffTimer := time.NewTimer(dur) + select { + case <-ctx.Done(): + return ctx.Err() + case <-backoffTimer.C: + } + return nil +} + +// PostAndParseWithRetry makes a HTTP POST call, but retries (with backoff) on +// retriable errors; the caller should set a deadline on the provided context +// to prevent infinite retries. Return values are as for PostAndParse. +func (c *JSONClient) PostAndParseWithRetry(ctx context.Context, path string, req, rsp interface{}) (*http.Response, []byte, error) { + if ctx == nil { + return nil, nil, errors.New("context.Context required") + } + for { + httpRsp, body, err := c.PostAndParse(ctx, path, req, rsp) + if err != nil { + // Don't retry context errors. + if err == context.Canceled || err == context.DeadlineExceeded { + return nil, nil, err + } + wait := c.backoff.set(nil) + c.logger.Printf("Request failed, backing-off on %s for %s: %s", c.uri, wait, err) + } else { + switch { + case httpRsp.StatusCode == http.StatusOK: + return httpRsp, body, nil + case httpRsp.StatusCode == http.StatusRequestTimeout: + // Request timeout, retry immediately + c.logger.Printf("Request timed out, retrying immediately") + case httpRsp.StatusCode == http.StatusServiceUnavailable: + var backoff *time.Duration + // Retry-After may be either a number of seconds as a int or a RFC 1123 + // date string (RFC 7231 Section 7.1.3) + if retryAfter := httpRsp.Header.Get("Retry-After"); retryAfter != "" { + if seconds, err := strconv.Atoi(retryAfter); err == nil { + b := time.Duration(seconds) * time.Second + backoff = &b + } else if date, err := time.Parse(time.RFC1123, retryAfter); err == nil { + b := time.Until(date) + backoff = &b + } + } + wait := c.backoff.set(backoff) + c.logger.Printf("Request failed, backing-off for %s: got HTTP status %s", wait, httpRsp.Status) + default: + return nil, nil, RspError{ + StatusCode: httpRsp.StatusCode, + Body: body, + Err: fmt.Errorf("got HTTP status %q", httpRsp.Status)} + } + } + if err := c.waitForBackoff(ctx); err != nil { + return nil, nil, err + } + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/serialization.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/serialization.go new file mode 100644 index 00000000..a1b558d1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/serialization.go @@ -0,0 +1,347 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ct + +import ( + "crypto" + "crypto/sha256" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/google/certificate-transparency-go/tls" + "github.com/google/certificate-transparency-go/x509" +) + +// SerializeSCTSignatureInput serializes the passed in sct and log entry into +// the correct format for signing. +func SerializeSCTSignatureInput(sct SignedCertificateTimestamp, entry LogEntry) ([]byte, error) { + switch sct.SCTVersion { + case V1: + input := CertificateTimestamp{ + SCTVersion: sct.SCTVersion, + SignatureType: CertificateTimestampSignatureType, + Timestamp: sct.Timestamp, + EntryType: entry.Leaf.TimestampedEntry.EntryType, + Extensions: sct.Extensions, + } + switch entry.Leaf.TimestampedEntry.EntryType { + case X509LogEntryType: + input.X509Entry = entry.Leaf.TimestampedEntry.X509Entry + case PrecertLogEntryType: + input.PrecertEntry = &PreCert{ + IssuerKeyHash: entry.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash, + TBSCertificate: entry.Leaf.TimestampedEntry.PrecertEntry.TBSCertificate, + } + case XJSONLogEntryType: + input.JSONEntry = entry.Leaf.TimestampedEntry.JSONEntry + default: + return nil, fmt.Errorf("unsupported entry type %s", entry.Leaf.TimestampedEntry.EntryType) + } + return tls.Marshal(input) + default: + return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion) + } +} + +// SerializeSTHSignatureInput serializes the passed in STH into the correct +// format for signing. +func SerializeSTHSignatureInput(sth SignedTreeHead) ([]byte, error) { + switch sth.Version { + case V1: + if len(sth.SHA256RootHash) != crypto.SHA256.Size() { + return nil, fmt.Errorf("invalid TreeHash length, got %d expected %d", len(sth.SHA256RootHash), crypto.SHA256.Size()) + } + + input := TreeHeadSignature{ + Version: sth.Version, + SignatureType: TreeHashSignatureType, + Timestamp: sth.Timestamp, + TreeSize: sth.TreeSize, + SHA256RootHash: sth.SHA256RootHash, + } + return tls.Marshal(input) + default: + return nil, fmt.Errorf("unsupported STH version %d", sth.Version) + } +} + +// CreateX509MerkleTreeLeaf generates a MerkleTreeLeaf for an X509 cert +func CreateX509MerkleTreeLeaf(cert ASN1Cert, timestamp uint64) *MerkleTreeLeaf { + return &MerkleTreeLeaf{ + Version: V1, + LeafType: TimestampedEntryLeafType, + TimestampedEntry: &TimestampedEntry{ + Timestamp: timestamp, + EntryType: X509LogEntryType, + X509Entry: &cert, + }, + } +} + +// CreateJSONMerkleTreeLeaf creates the merkle tree leaf for json data. +func CreateJSONMerkleTreeLeaf(data interface{}, timestamp uint64) *MerkleTreeLeaf { + jsonData, err := json.Marshal(AddJSONRequest{Data: data}) + if err != nil { + return nil + } + // Match the JSON serialization implemented by json-c + jsonStr := strings.Replace(string(jsonData), ":", ": ", -1) + jsonStr = strings.Replace(jsonStr, ",", ", ", -1) + jsonStr = strings.Replace(jsonStr, "{", "{ ", -1) + jsonStr = strings.Replace(jsonStr, "}", " }", -1) + jsonStr = strings.Replace(jsonStr, "/", `\/`, -1) + // TODO: Pending google/certificate-transparency#1243, replace with + // ObjectHash once supported by CT server. + + return &MerkleTreeLeaf{ + Version: V1, + LeafType: TimestampedEntryLeafType, + TimestampedEntry: &TimestampedEntry{ + Timestamp: timestamp, + EntryType: XJSONLogEntryType, + JSONEntry: &JSONDataEntry{Data: []byte(jsonStr)}, + }, + } +} + +// MerkleTreeLeafFromRawChain generates a MerkleTreeLeaf from a chain (in DER-encoded form) and timestamp. +func MerkleTreeLeafFromRawChain(rawChain []ASN1Cert, etype LogEntryType, timestamp uint64) (*MerkleTreeLeaf, error) { + // Need at most 3 of the chain + count := 3 + if count > len(rawChain) { + count = len(rawChain) + } + chain := make([]*x509.Certificate, count) + for i := range chain { + cert, err := x509.ParseCertificate(rawChain[i].Data) + if x509.IsFatal(err) { + return nil, fmt.Errorf("failed to parse chain[%d] cert: %v", i, err) + } + chain[i] = cert + } + return MerkleTreeLeafFromChain(chain, etype, timestamp) +} + +// MerkleTreeLeafFromChain generates a MerkleTreeLeaf from a chain and timestamp. +func MerkleTreeLeafFromChain(chain []*x509.Certificate, etype LogEntryType, timestamp uint64) (*MerkleTreeLeaf, error) { + leaf := MerkleTreeLeaf{ + Version: V1, + LeafType: TimestampedEntryLeafType, + TimestampedEntry: &TimestampedEntry{ + EntryType: etype, + Timestamp: timestamp, + }, + } + if etype == X509LogEntryType { + leaf.TimestampedEntry.X509Entry = &ASN1Cert{Data: chain[0].Raw} + return &leaf, nil + } + if etype != PrecertLogEntryType { + return nil, fmt.Errorf("unknown LogEntryType %d", etype) + } + + // Pre-certs are more complicated. First, parse the leaf pre-cert and its + // putative issuer. + if len(chain) < 2 { + return nil, fmt.Errorf("no issuer cert available for precert leaf building") + } + issuer := chain[1] + cert := chain[0] + + var preIssuer *x509.Certificate + if IsPreIssuer(issuer) { + // Replace the cert's issuance information with details from the pre-issuer. + preIssuer = issuer + + // The issuer of the pre-cert is not going to be the issuer of the final + // cert. Change to use the final issuer's key hash. + if len(chain) < 3 { + return nil, fmt.Errorf("no issuer cert available for pre-issuer") + } + issuer = chain[2] + } + + // Next, post-process the DER-encoded TBSCertificate, to remove the CT poison + // extension and possibly update the issuer field. + defangedTBS, err := x509.BuildPrecertTBS(cert.RawTBSCertificate, preIssuer) + if err != nil { + return nil, fmt.Errorf("failed to remove poison extension: %v", err) + } + + leaf.TimestampedEntry.EntryType = PrecertLogEntryType + leaf.TimestampedEntry.PrecertEntry = &PreCert{ + IssuerKeyHash: sha256.Sum256(issuer.RawSubjectPublicKeyInfo), + TBSCertificate: defangedTBS, + } + return &leaf, nil +} + +// MerkleTreeLeafForEmbeddedSCT generates a MerkleTreeLeaf from a chain and an +// SCT timestamp, where the leaf certificate at chain[0] is a certificate that +// contains embedded SCTs. It is assumed that the timestamp provided is from +// one of the SCTs embedded within the leaf certificate. +func MerkleTreeLeafForEmbeddedSCT(chain []*x509.Certificate, timestamp uint64) (*MerkleTreeLeaf, error) { + // For building the leaf for a certificate and SCT where the SCT is embedded + // in the certificate, we need to build the original precertificate TBS + // data. First, parse the leaf cert and its issuer. + if len(chain) < 2 { + return nil, fmt.Errorf("no issuer cert available for precert leaf building") + } + issuer := chain[1] + cert := chain[0] + + // Next, post-process the DER-encoded TBSCertificate, to remove the SCTList + // extension. + tbs, err := x509.RemoveSCTList(cert.RawTBSCertificate) + if err != nil { + return nil, fmt.Errorf("failed to remove SCT List extension: %v", err) + } + + return &MerkleTreeLeaf{ + Version: V1, + LeafType: TimestampedEntryLeafType, + TimestampedEntry: &TimestampedEntry{ + EntryType: PrecertLogEntryType, + Timestamp: timestamp, + PrecertEntry: &PreCert{ + IssuerKeyHash: sha256.Sum256(issuer.RawSubjectPublicKeyInfo), + TBSCertificate: tbs, + }, + }, + }, nil +} + +// LeafHashForLeaf returns the leaf hash for a Merkle tree leaf. +func LeafHashForLeaf(leaf *MerkleTreeLeaf) ([sha256.Size]byte, error) { + leafData, err := tls.Marshal(*leaf) + if err != nil { + return [sha256.Size]byte{}, fmt.Errorf("failed to tls-encode MerkleTreeLeaf: %s", err) + } + + data := append([]byte{TreeLeafPrefix}, leafData...) + leafHash := sha256.Sum256(data) + return leafHash, nil +} + +// IsPreIssuer indicates whether a certificate is a pre-cert issuer with the specific +// certificate transparency extended key usage. +func IsPreIssuer(issuer *x509.Certificate) bool { + for _, eku := range issuer.ExtKeyUsage { + if eku == x509.ExtKeyUsageCertificateTransparency { + return true + } + } + return false +} + +// RawLogEntryFromLeaf converts a LeafEntry object (which has the raw leaf data +// after JSON parsing) into a RawLogEntry object (i.e. a TLS-parsed structure). +func RawLogEntryFromLeaf(index int64, entry *LeafEntry) (*RawLogEntry, error) { + ret := RawLogEntry{Index: index} + if rest, err := tls.Unmarshal(entry.LeafInput, &ret.Leaf); err != nil { + return nil, fmt.Errorf("failed to unmarshal MerkleTreeLeaf: %v", err) + } else if len(rest) > 0 { + return nil, fmt.Errorf("MerkleTreeLeaf: trailing data %d bytes", len(rest)) + } + + switch eType := ret.Leaf.TimestampedEntry.EntryType; eType { + case X509LogEntryType: + var certChain CertificateChain + if rest, err := tls.Unmarshal(entry.ExtraData, &certChain); err != nil { + return nil, fmt.Errorf("failed to unmarshal CertificateChain: %v", err) + } else if len(rest) > 0 { + return nil, fmt.Errorf("CertificateChain: trailing data %d bytes", len(rest)) + } + ret.Cert = *ret.Leaf.TimestampedEntry.X509Entry + ret.Chain = certChain.Entries + + case PrecertLogEntryType: + var precertChain PrecertChainEntry + if rest, err := tls.Unmarshal(entry.ExtraData, &precertChain); err != nil { + return nil, fmt.Errorf("failed to unmarshal PrecertChainEntry: %v", err) + } else if len(rest) > 0 { + return nil, fmt.Errorf("PrecertChainEntry: trailing data %d bytes", len(rest)) + } + ret.Cert = precertChain.PreCertificate + ret.Chain = precertChain.CertificateChain + + default: + // TODO(pavelkalinnikov): Section 4.6 of RFC6962 implies that unknown types + // are not errors. We should revisit how we process this case. + return nil, fmt.Errorf("unknown entry type: %v", eType) + } + + return &ret, nil +} + +// ToLogEntry converts RawLogEntry to a LogEntry, which includes an x509-parsed +// (pre-)certificate. +// +// Note that this function may return a valid LogEntry object and a non-nil +// error value, when the error indicates a non-fatal parsing error. +func (rle *RawLogEntry) ToLogEntry() (*LogEntry, error) { + var err error + entry := LogEntry{Index: rle.Index, Leaf: rle.Leaf, Chain: rle.Chain} + + switch eType := rle.Leaf.TimestampedEntry.EntryType; eType { + case X509LogEntryType: + entry.X509Cert, err = rle.Leaf.X509Certificate() + if x509.IsFatal(err) { + return nil, fmt.Errorf("failed to parse certificate: %v", err) + } + + case PrecertLogEntryType: + var tbsCert *x509.Certificate + tbsCert, err = rle.Leaf.Precertificate() + if x509.IsFatal(err) { + return nil, fmt.Errorf("failed to parse precertificate: %v", err) + } + entry.Precert = &Precertificate{ + Submitted: rle.Cert, + IssuerKeyHash: rle.Leaf.TimestampedEntry.PrecertEntry.IssuerKeyHash, + TBSCertificate: tbsCert, + } + + default: + return nil, fmt.Errorf("unknown entry type: %v", eType) + } + + // err may be non-nil for a non-fatal error. + return &entry, err +} + +// LogEntryFromLeaf converts a LeafEntry object (which has the raw leaf data +// after JSON parsing) into a LogEntry object (which includes x509.Certificate +// objects, after TLS and ASN.1 parsing). +// +// Note that this function may return a valid LogEntry object and a non-nil +// error value, when the error indicates a non-fatal parsing error. +func LogEntryFromLeaf(index int64, leaf *LeafEntry) (*LogEntry, error) { + rle, err := RawLogEntryFromLeaf(index, leaf) + if err != nil { + return nil, err + } + return rle.ToLogEntry() +} + +// TimestampToTime converts a timestamp in the style of RFC 6962 (milliseconds +// since UNIX epoch) to a Go Time. +func TimestampToTime(ts uint64) time.Time { + secs := int64(ts / 1000) + msecs := int64(ts % 1000) + return time.Unix(secs, msecs*1000000) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/signatures.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/signatures.go new file mode 100644 index 00000000..b4039ea5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/signatures.go @@ -0,0 +1,110 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ct + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/sha256" + "encoding/base64" + "encoding/pem" + "fmt" + "log" + + "github.com/google/certificate-transparency-go/tls" + "github.com/google/certificate-transparency-go/x509" +) + +// AllowVerificationWithNonCompliantKeys may be set to true in order to allow +// SignatureVerifier to use keys which are technically non-compliant with +// RFC6962. +var AllowVerificationWithNonCompliantKeys = false + +// PublicKeyFromPEM parses a PEM formatted block and returns the public key contained within and any remaining unread bytes, or an error. +func PublicKeyFromPEM(b []byte) (crypto.PublicKey, SHA256Hash, []byte, error) { + p, rest := pem.Decode(b) + if p == nil { + return nil, [sha256.Size]byte{}, rest, fmt.Errorf("no PEM block found in %s", string(b)) + } + k, err := x509.ParsePKIXPublicKey(p.Bytes) + return k, sha256.Sum256(p.Bytes), rest, err +} + +// PublicKeyFromB64 parses a base64-encoded public key. +func PublicKeyFromB64(b64PubKey string) (crypto.PublicKey, error) { + der, err := base64.StdEncoding.DecodeString(b64PubKey) + if err != nil { + return nil, fmt.Errorf("error decoding public key: %s", err) + } + return x509.ParsePKIXPublicKey(der) +} + +// SignatureVerifier can verify signatures on SCTs and STHs +type SignatureVerifier struct { + PubKey crypto.PublicKey +} + +// NewSignatureVerifier creates a new SignatureVerifier using the passed in PublicKey. +func NewSignatureVerifier(pk crypto.PublicKey) (*SignatureVerifier, error) { + switch pkType := pk.(type) { + case *rsa.PublicKey: + if pkType.N.BitLen() < 2048 { + e := fmt.Errorf("public key is RSA with < 2048 bits (size:%d)", pkType.N.BitLen()) + if !AllowVerificationWithNonCompliantKeys { + return nil, e + } + log.Printf("WARNING: %v", e) + } + case *ecdsa.PublicKey: + params := *(pkType.Params()) + if params != *elliptic.P256().Params() { + e := fmt.Errorf("public is ECDSA, but not on the P256 curve") + if !AllowVerificationWithNonCompliantKeys { + return nil, e + } + log.Printf("WARNING: %v", e) + + } + default: + return nil, fmt.Errorf("unsupported public key type %v", pkType) + } + + return &SignatureVerifier{PubKey: pk}, nil +} + +// VerifySignature verifies the given signature sig matches the data. +func (s SignatureVerifier) VerifySignature(data []byte, sig tls.DigitallySigned) error { + return tls.VerifySignature(s.PubKey, data, sig) +} + +// VerifySCTSignature verifies that the SCT's signature is valid for the given LogEntry. +func (s SignatureVerifier) VerifySCTSignature(sct SignedCertificateTimestamp, entry LogEntry) error { + sctData, err := SerializeSCTSignatureInput(sct, entry) + if err != nil { + return err + } + return s.VerifySignature(sctData, tls.DigitallySigned(sct.Signature)) +} + +// VerifySTHSignature verifies that the STH's signature is valid. +func (s SignatureVerifier) VerifySTHSignature(sth SignedTreeHead) error { + sthData, err := SerializeSTHSignatureInput(sth) + if err != nil { + return err + } + return s.VerifySignature(sthData, tls.DigitallySigned(sth.TreeHeadSignature)) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/tls/signature.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/tls/signature.go new file mode 100644 index 00000000..bfdb016d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/tls/signature.go @@ -0,0 +1,152 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tls + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + _ "crypto/md5" // For registration side-effect + "crypto/rand" + "crypto/rsa" + _ "crypto/sha1" // For registration side-effect + _ "crypto/sha256" // For registration side-effect + _ "crypto/sha512" // For registration side-effect + "errors" + "fmt" + "log" + "math/big" + + "github.com/google/certificate-transparency-go/asn1" +) + +type dsaSig struct { + R, S *big.Int +} + +func generateHash(algo HashAlgorithm, data []byte) ([]byte, crypto.Hash, error) { + var hashType crypto.Hash + switch algo { + case MD5: + hashType = crypto.MD5 + case SHA1: + hashType = crypto.SHA1 + case SHA224: + hashType = crypto.SHA224 + case SHA256: + hashType = crypto.SHA256 + case SHA384: + hashType = crypto.SHA384 + case SHA512: + hashType = crypto.SHA512 + default: + return nil, hashType, fmt.Errorf("unsupported Algorithm.Hash in signature: %v", algo) + } + + hasher := hashType.New() + if _, err := hasher.Write(data); err != nil { + return nil, hashType, fmt.Errorf("failed to write to hasher: %v", err) + } + return hasher.Sum([]byte{}), hashType, nil +} + +// VerifySignature verifies that the passed in signature over data was created by the given PublicKey. +func VerifySignature(pubKey crypto.PublicKey, data []byte, sig DigitallySigned) error { + hash, hashType, err := generateHash(sig.Algorithm.Hash, data) + if err != nil { + return err + } + + switch sig.Algorithm.Signature { + case RSA: + rsaKey, ok := pubKey.(*rsa.PublicKey) + if !ok { + return fmt.Errorf("cannot verify RSA signature with %T key", pubKey) + } + if err := rsa.VerifyPKCS1v15(rsaKey, hashType, hash, sig.Signature); err != nil { + return fmt.Errorf("failed to verify rsa signature: %v", err) + } + case DSA: + dsaKey, ok := pubKey.(*dsa.PublicKey) + if !ok { + return fmt.Errorf("cannot verify DSA signature with %T key", pubKey) + } + var dsaSig dsaSig + rest, err := asn1.Unmarshal(sig.Signature, &dsaSig) + if err != nil { + return fmt.Errorf("failed to unmarshal DSA signature: %v", err) + } + if len(rest) != 0 { + log.Printf("Garbage following signature %v", rest) + } + if dsaSig.R.Sign() <= 0 || dsaSig.S.Sign() <= 0 { + return errors.New("DSA signature contained zero or negative values") + } + if !dsa.Verify(dsaKey, hash, dsaSig.R, dsaSig.S) { + return errors.New("failed to verify DSA signature") + } + case ECDSA: + ecdsaKey, ok := pubKey.(*ecdsa.PublicKey) + if !ok { + return fmt.Errorf("cannot verify ECDSA signature with %T key", pubKey) + } + var ecdsaSig dsaSig + rest, err := asn1.Unmarshal(sig.Signature, &ecdsaSig) + if err != nil { + return fmt.Errorf("failed to unmarshal ECDSA signature: %v", err) + } + if len(rest) != 0 { + log.Printf("Garbage following signature %v", rest) + } + if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 { + return errors.New("ECDSA signature contained zero or negative values") + } + + if !ecdsa.Verify(ecdsaKey, hash, ecdsaSig.R, ecdsaSig.S) { + return errors.New("failed to verify ECDSA signature") + } + default: + return fmt.Errorf("unsupported Algorithm.Signature in signature: %v", sig.Algorithm.Hash) + } + return nil +} + +// CreateSignature builds a signature over the given data using the specified hash algorithm and private key. +func CreateSignature(privKey crypto.PrivateKey, hashAlgo HashAlgorithm, data []byte) (DigitallySigned, error) { + var sig DigitallySigned + sig.Algorithm.Hash = hashAlgo + hash, hashType, err := generateHash(sig.Algorithm.Hash, data) + if err != nil { + return sig, err + } + + switch privKey := privKey.(type) { + case rsa.PrivateKey: + sig.Algorithm.Signature = RSA + sig.Signature, err = rsa.SignPKCS1v15(rand.Reader, &privKey, hashType, hash) + return sig, err + case ecdsa.PrivateKey: + sig.Algorithm.Signature = ECDSA + var ecdsaSig dsaSig + ecdsaSig.R, ecdsaSig.S, err = ecdsa.Sign(rand.Reader, &privKey, hash) + if err != nil { + return sig, err + } + sig.Signature, err = asn1.Marshal(ecdsaSig) + return sig, err + default: + return sig, fmt.Errorf("unsupported private key type %T", privKey) + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/tls/tls.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/tls/tls.go new file mode 100644 index 00000000..1bcd3a37 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/tls/tls.go @@ -0,0 +1,711 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tls implements functionality for dealing with TLS-encoded data, +// as defined in RFC 5246. This includes parsing and generation of TLS-encoded +// data, together with utility functions for dealing with the DigitallySigned +// TLS type. +package tls + +import ( + "bytes" + "encoding/binary" + "fmt" + "reflect" + "strconv" + "strings" +) + +// This file holds utility functions for TLS encoding/decoding data +// as per RFC 5246 section 4. + +// A structuralError suggests that the TLS data is valid, but the Go type +// which is receiving it doesn't match. +type structuralError struct { + field string + msg string +} + +func (e structuralError) Error() string { + var prefix string + if e.field != "" { + prefix = e.field + ": " + } + return "tls: structure error: " + prefix + e.msg +} + +// A syntaxError suggests that the TLS data is invalid. +type syntaxError struct { + field string + msg string +} + +func (e syntaxError) Error() string { + var prefix string + if e.field != "" { + prefix = e.field + ": " + } + return "tls: syntax error: " + prefix + e.msg +} + +// Uint24 is an unsigned 3-byte integer. +type Uint24 uint32 + +// Enum is an unsigned integer. +type Enum uint64 + +var ( + uint8Type = reflect.TypeOf(uint8(0)) + uint16Type = reflect.TypeOf(uint16(0)) + uint24Type = reflect.TypeOf(Uint24(0)) + uint32Type = reflect.TypeOf(uint32(0)) + uint64Type = reflect.TypeOf(uint64(0)) + enumType = reflect.TypeOf(Enum(0)) +) + +// Unmarshal parses the TLS-encoded data in b and uses the reflect package to +// fill in an arbitrary value pointed at by val. Because Unmarshal uses the +// reflect package, the structs being written to must use exported fields +// (upper case names). +// +// The mappings between TLS types and Go types is as follows; some fields +// must have tags (to indicate their encoded size). +// +// TLS Go Required Tags +// opaque byte / uint8 +// uint8 byte / uint8 +// uint16 uint16 +// uint24 tls.Uint24 +// uint32 uint32 +// uint64 uint64 +// enum tls.Enum size:S or maxval:N +// Type []Type minlen:N,maxlen:M +// opaque[N] [N]byte / [N]uint8 +// uint8[N] [N]byte / [N]uint8 +// struct { } struct { } +// select(T) { +// case e1: Type *T selector:Field,val:e1 +// } +// +// TLS variants (RFC 5246 s4.6.1) are only supported when the value of the +// associated enumeration type is available earlier in the same enclosing +// struct, and each possible variant is marked with a selector tag (to +// indicate which field selects the variants) and a val tag (to indicate +// what value of the selector picks this particular field). +// +// For example, a TLS structure: +// +// enum { e1(1), e2(2) } EnumType; +// struct { +// EnumType sel; +// select(sel) { +// case e1: uint16 +// case e2: uint32 +// } data; +// } VariantItem; +// +// would have a corresponding Go type: +// +// type VariantItem struct { +// Sel tls.Enum `tls:"maxval:2"` +// Data16 *uint16 `tls:"selector:Sel,val:1"` +// Data32 *uint32 `tls:"selector:Sel,val:2"` +// } +// +// TLS fixed-length vectors of types other than opaque or uint8 are not supported. +// +// For TLS variable-length vectors that are themselves used in other vectors, +// create a single-field structure to represent the inner type. For example, for: +// +// opaque InnerType<1..65535>; +// struct { +// InnerType inners<1,65535>; +// } Something; +// +// convert to: +// +// type InnerType struct { +// Val []byte `tls:"minlen:1,maxlen:65535"` +// } +// type Something struct { +// Inners []InnerType `tls:"minlen:1,maxlen:65535"` +// } +// +// If the encoded value does not fit in the Go type, Unmarshal returns a parse error. +func Unmarshal(b []byte, val interface{}) ([]byte, error) { + return UnmarshalWithParams(b, val, "") +} + +// UnmarshalWithParams allows field parameters to be specified for the +// top-level element. The form of the params is the same as the field tags. +func UnmarshalWithParams(b []byte, val interface{}, params string) ([]byte, error) { + info, err := fieldTagToFieldInfo(params, "") + if err != nil { + return nil, err + } + // The passed in interface{} is a pointer (to allow the value to be written + // to); extract the pointed-to object as a reflect.Value, so parseField + // can do various introspection things. + v := reflect.ValueOf(val).Elem() + offset, err := parseField(v, b, 0, info) + if err != nil { + return nil, err + } + return b[offset:], nil +} + +// Return the number of bytes needed to encode values up to (and including) x. +func byteCount(x uint64) uint { + switch { + case x < 0x100: + return 1 + case x < 0x10000: + return 2 + case x < 0x1000000: + return 3 + case x < 0x100000000: + return 4 + case x < 0x10000000000: + return 5 + case x < 0x1000000000000: + return 6 + case x < 0x100000000000000: + return 7 + default: + return 8 + } +} + +type fieldInfo struct { + count uint // Number of bytes + countSet bool + minlen uint64 // Only relevant for slices + maxlen uint64 // Only relevant for slices + selector string // Only relevant for select sub-values + val uint64 // Only relevant for select sub-values + name string // Used for better error messages +} + +func (i *fieldInfo) fieldName() string { + if i == nil { + return "" + } + return i.name +} + +// Given a tag string, return a fieldInfo describing the field. +func fieldTagToFieldInfo(str string, name string) (*fieldInfo, error) { + var info *fieldInfo + // Iterate over clauses in the tag, ignoring any that don't parse properly. + for _, part := range strings.Split(str, ",") { + switch { + case strings.HasPrefix(part, "maxval:"): + if v, err := strconv.ParseUint(part[7:], 10, 64); err == nil { + info = &fieldInfo{count: byteCount(v), countSet: true} + } + case strings.HasPrefix(part, "size:"): + if sz, err := strconv.ParseUint(part[5:], 10, 32); err == nil { + info = &fieldInfo{count: uint(sz), countSet: true} + } + case strings.HasPrefix(part, "maxlen:"): + v, err := strconv.ParseUint(part[7:], 10, 64) + if err != nil { + continue + } + if info == nil { + info = &fieldInfo{} + } + info.count = byteCount(v) + info.countSet = true + info.maxlen = v + case strings.HasPrefix(part, "minlen:"): + v, err := strconv.ParseUint(part[7:], 10, 64) + if err != nil { + continue + } + if info == nil { + info = &fieldInfo{} + } + info.minlen = v + case strings.HasPrefix(part, "selector:"): + if info == nil { + info = &fieldInfo{} + } + info.selector = part[9:] + case strings.HasPrefix(part, "val:"): + v, err := strconv.ParseUint(part[4:], 10, 64) + if err != nil { + continue + } + if info == nil { + info = &fieldInfo{} + } + info.val = v + } + } + if info != nil { + info.name = name + if info.selector == "" { + if info.count < 1 { + return nil, structuralError{name, "field of unknown size in " + str} + } else if info.count > 8 { + return nil, structuralError{name, "specified size too large in " + str} + } else if info.minlen > info.maxlen { + return nil, structuralError{name, "specified length range inverted in " + str} + } else if info.val > 0 { + return nil, structuralError{name, "specified selector value but not field in " + str} + } + } + } else if name != "" { + info = &fieldInfo{name: name} + } + return info, nil +} + +// Check that a value fits into a field described by a fieldInfo structure. +func (i fieldInfo) check(val uint64, fldName string) error { + if val >= (1 << (8 * i.count)) { + return structuralError{fldName, fmt.Sprintf("value %d too large for size", val)} + } + if i.maxlen != 0 { + if val < i.minlen { + return structuralError{fldName, fmt.Sprintf("value %d too small for minimum %d", val, i.minlen)} + } + if val > i.maxlen { + return structuralError{fldName, fmt.Sprintf("value %d too large for maximum %d", val, i.maxlen)} + } + } + return nil +} + +// readVarUint reads an big-endian unsigned integer of the given size in +// bytes. +func readVarUint(data []byte, info *fieldInfo) (uint64, error) { + if info == nil || !info.countSet { + return 0, structuralError{info.fieldName(), "no field size information available"} + } + if len(data) < int(info.count) { + return 0, syntaxError{info.fieldName(), "truncated variable-length integer"} + } + var result uint64 + for i := uint(0); i < info.count; i++ { + result = (result << 8) | uint64(data[i]) + } + if err := info.check(result, info.name); err != nil { + return 0, err + } + return result, nil +} + +// parseField is the main parsing function. Given a byte slice and an offset +// (in bytes) into the data, it will try to parse a suitable ASN.1 value out +// and store it in the given Value. +func parseField(v reflect.Value, data []byte, initOffset int, info *fieldInfo) (int, error) { + offset := initOffset + rest := data[offset:] + + fieldType := v.Type() + // First look for known fixed types. + switch fieldType { + case uint8Type: + if len(rest) < 1 { + return offset, syntaxError{info.fieldName(), "truncated uint8"} + } + v.SetUint(uint64(rest[0])) + offset++ + return offset, nil + case uint16Type: + if len(rest) < 2 { + return offset, syntaxError{info.fieldName(), "truncated uint16"} + } + v.SetUint(uint64(binary.BigEndian.Uint16(rest))) + offset += 2 + return offset, nil + case uint24Type: + if len(rest) < 3 { + return offset, syntaxError{info.fieldName(), "truncated uint24"} + } + v.SetUint(uint64(data[0])<<16 | uint64(data[1])<<8 | uint64(data[2])) + offset += 3 + return offset, nil + case uint32Type: + if len(rest) < 4 { + return offset, syntaxError{info.fieldName(), "truncated uint32"} + } + v.SetUint(uint64(binary.BigEndian.Uint32(rest))) + offset += 4 + return offset, nil + case uint64Type: + if len(rest) < 8 { + return offset, syntaxError{info.fieldName(), "truncated uint64"} + } + v.SetUint(uint64(binary.BigEndian.Uint64(rest))) + offset += 8 + return offset, nil + } + + // Now deal with user-defined types. + switch v.Kind() { + case enumType.Kind(): + // Assume that anything of the same kind as Enum is an Enum, so that + // users can alias types of their own to Enum. + val, err := readVarUint(rest, info) + if err != nil { + return offset, err + } + v.SetUint(val) + offset += int(info.count) + return offset, nil + case reflect.Struct: + structType := fieldType + // TLS includes a select(Enum) {..} construct, where the value of an enum + // indicates which variant field is present (like a C union). We require + // that the enum value be an earlier field in the same structure (the selector), + // and that each of the possible variant destination fields be pointers. + // So the Go mapping looks like: + // type variantType struct { + // Which tls.Enum `tls:"size:1"` // this is the selector + // Val1 *type1 `tls:"selector:Which,val:1"` // this is a destination + // Val2 *type2 `tls:"selector:Which,val:1"` // this is a destination + // } + + // To deal with this, we track any enum-like fields and their values... + enums := make(map[string]uint64) + // .. and we track which selector names we've seen (in the destination field tags), + // and whether a destination for that selector has been chosen. + selectorSeen := make(map[string]bool) + for i := 0; i < structType.NumField(); i++ { + // Find information about this field. + tag := structType.Field(i).Tag.Get("tls") + fieldInfo, err := fieldTagToFieldInfo(tag, structType.Field(i).Name) + if err != nil { + return offset, err + } + + destination := v.Field(i) + if fieldInfo.selector != "" { + // This is a possible select(Enum) destination, so first check that the referenced + // selector field has already been seen earlier in the struct. + choice, ok := enums[fieldInfo.selector] + if !ok { + return offset, structuralError{fieldInfo.name, "selector not seen: " + fieldInfo.selector} + } + if structType.Field(i).Type.Kind() != reflect.Ptr { + return offset, structuralError{fieldInfo.name, "choice field not a pointer type"} + } + // Is this the first mention of the selector field name? If so, remember it. + seen, ok := selectorSeen[fieldInfo.selector] + if !ok { + selectorSeen[fieldInfo.selector] = false + } + if choice != fieldInfo.val { + // This destination field was not the chosen one, so make it nil (we checked + // it was a pointer above). + v.Field(i).Set(reflect.Zero(structType.Field(i).Type)) + continue + } + if seen { + // We already saw a different destination field receive the value for this + // selector value, which indicates a badly annotated structure. + return offset, structuralError{fieldInfo.name, "duplicate selector value for " + fieldInfo.selector} + } + selectorSeen[fieldInfo.selector] = true + // Make an object of the pointed-to type and parse into that. + v.Field(i).Set(reflect.New(structType.Field(i).Type.Elem())) + destination = v.Field(i).Elem() + } + offset, err = parseField(destination, data, offset, fieldInfo) + if err != nil { + return offset, err + } + + // Remember any possible tls.Enum values encountered in case they are selectors. + if structType.Field(i).Type.Kind() == enumType.Kind() { + enums[structType.Field(i).Name] = v.Field(i).Uint() + } + + } + + // Now we have seen all fields in the structure, check that all select(Enum) {..} selector + // fields found a destination to put their data in. + for selector, seen := range selectorSeen { + if !seen { + return offset, syntaxError{info.fieldName(), selector + ": unhandled value for selector"} + } + } + return offset, nil + case reflect.Array: + datalen := v.Len() + + if datalen > len(rest) { + return offset, syntaxError{info.fieldName(), "truncated array"} + } + inner := rest[:datalen] + offset += datalen + if fieldType.Elem().Kind() != reflect.Uint8 { + // Only byte/uint8 arrays are supported + return offset, structuralError{info.fieldName(), "unsupported array type: " + v.Type().String()} + } + reflect.Copy(v, reflect.ValueOf(inner)) + return offset, nil + + case reflect.Slice: + sliceType := fieldType + // Slices represent variable-length vectors, which are prefixed by a length field. + // The fieldInfo indicates the size of that length field. + varlen, err := readVarUint(rest, info) + if err != nil { + return offset, err + } + datalen := int(varlen) + offset += int(info.count) + rest = rest[info.count:] + + if datalen > len(rest) { + return offset, syntaxError{info.fieldName(), "truncated slice"} + } + inner := rest[:datalen] + offset += datalen + if fieldType.Elem().Kind() == reflect.Uint8 { + // Fast version for []byte + v.Set(reflect.MakeSlice(sliceType, datalen, datalen)) + reflect.Copy(v, reflect.ValueOf(inner)) + return offset, nil + } + + v.Set(reflect.MakeSlice(sliceType, 0, datalen)) + single := reflect.New(sliceType.Elem()) + for innerOffset := 0; innerOffset < len(inner); { + var err error + innerOffset, err = parseField(single.Elem(), inner, innerOffset, nil) + if err != nil { + return offset, err + } + v.Set(reflect.Append(v, single.Elem())) + } + return offset, nil + + default: + return offset, structuralError{info.fieldName(), fmt.Sprintf("unsupported type: %s of kind %s", fieldType, v.Kind())} + } +} + +// Marshal returns the TLS encoding of val. +func Marshal(val interface{}) ([]byte, error) { + return MarshalWithParams(val, "") +} + +// MarshalWithParams returns the TLS encoding of val, and allows field +// parameters to be specified for the top-level element. The form +// of the params is the same as the field tags. +func MarshalWithParams(val interface{}, params string) ([]byte, error) { + info, err := fieldTagToFieldInfo(params, "") + if err != nil { + return nil, err + } + var out bytes.Buffer + v := reflect.ValueOf(val) + if err := marshalField(&out, v, info); err != nil { + return nil, err + } + return out.Bytes(), err +} + +func marshalField(out *bytes.Buffer, v reflect.Value, info *fieldInfo) error { + var prefix string + if info != nil && len(info.name) > 0 { + prefix = info.name + ": " + } + fieldType := v.Type() + // First look for known fixed types. + switch fieldType { + case uint8Type: + out.WriteByte(byte(v.Uint())) + return nil + case uint16Type: + scratch := make([]byte, 2) + binary.BigEndian.PutUint16(scratch, uint16(v.Uint())) + out.Write(scratch) + return nil + case uint24Type: + i := v.Uint() + if i > 0xffffff { + return structuralError{info.fieldName(), fmt.Sprintf("uint24 overflow %d", i)} + } + scratch := make([]byte, 4) + binary.BigEndian.PutUint32(scratch, uint32(i)) + out.Write(scratch[1:]) + return nil + case uint32Type: + scratch := make([]byte, 4) + binary.BigEndian.PutUint32(scratch, uint32(v.Uint())) + out.Write(scratch) + return nil + case uint64Type: + scratch := make([]byte, 8) + binary.BigEndian.PutUint64(scratch, uint64(v.Uint())) + out.Write(scratch) + return nil + } + + // Now deal with user-defined types. + switch v.Kind() { + case enumType.Kind(): + i := v.Uint() + if info == nil { + return structuralError{info.fieldName(), "enum field tag missing"} + } + if err := info.check(i, prefix); err != nil { + return err + } + scratch := make([]byte, 8) + binary.BigEndian.PutUint64(scratch, uint64(i)) + out.Write(scratch[(8 - info.count):]) + return nil + case reflect.Struct: + structType := fieldType + enums := make(map[string]uint64) // Values of any Enum fields + // The comment parseField() describes the mapping of the TLS select(Enum) {..} construct; + // here we have selector and source (rather than destination) fields. + + // Track which selector names we've seen (in the source field tags), and whether a source + // value for that selector has been processed. + selectorSeen := make(map[string]bool) + for i := 0; i < structType.NumField(); i++ { + // Find information about this field. + tag := structType.Field(i).Tag.Get("tls") + fieldInfo, err := fieldTagToFieldInfo(tag, structType.Field(i).Name) + if err != nil { + return err + } + + source := v.Field(i) + if fieldInfo.selector != "" { + // This field is a possible source for a select(Enum) {..}. First check + // the selector field name has been seen. + choice, ok := enums[fieldInfo.selector] + if !ok { + return structuralError{fieldInfo.name, "selector not seen: " + fieldInfo.selector} + } + if structType.Field(i).Type.Kind() != reflect.Ptr { + return structuralError{fieldInfo.name, "choice field not a pointer type"} + } + // Is this the first mention of the selector field name? If so, remember it. + seen, ok := selectorSeen[fieldInfo.selector] + if !ok { + selectorSeen[fieldInfo.selector] = false + } + if choice != fieldInfo.val { + // This source was not chosen; police that it should be nil. + if v.Field(i).Pointer() != uintptr(0) { + return structuralError{fieldInfo.name, "unchosen field is non-nil"} + } + continue + } + if seen { + // We already saw a different source field generate the value for this + // selector value, which indicates a badly annotated structure. + return structuralError{fieldInfo.name, "duplicate selector value for " + fieldInfo.selector} + } + selectorSeen[fieldInfo.selector] = true + if v.Field(i).Pointer() == uintptr(0) { + return structuralError{fieldInfo.name, "chosen field is nil"} + } + // Marshal from the pointed-to source object. + source = v.Field(i).Elem() + } + + var fieldData bytes.Buffer + if err := marshalField(&fieldData, source, fieldInfo); err != nil { + return err + } + out.Write(fieldData.Bytes()) + + // Remember any tls.Enum values encountered in case they are selectors. + if structType.Field(i).Type.Kind() == enumType.Kind() { + enums[structType.Field(i).Name] = v.Field(i).Uint() + } + } + // Now we have seen all fields in the structure, check that all select(Enum) {..} selector + // fields found a source field get get their data from. + for selector, seen := range selectorSeen { + if !seen { + return syntaxError{info.fieldName(), selector + ": unhandled value for selector"} + } + } + return nil + + case reflect.Array: + datalen := v.Len() + arrayType := fieldType + if arrayType.Elem().Kind() != reflect.Uint8 { + // Only byte/uint8 arrays are supported + return structuralError{info.fieldName(), "unsupported array type"} + } + bytes := make([]byte, datalen) + for i := 0; i < datalen; i++ { + bytes[i] = uint8(v.Index(i).Uint()) + } + _, err := out.Write(bytes) + return err + + case reflect.Slice: + if info == nil { + return structuralError{info.fieldName(), "slice field tag missing"} + } + + sliceType := fieldType + if sliceType.Elem().Kind() == reflect.Uint8 { + // Fast version for []byte: first write the length as info.count bytes. + datalen := v.Len() + scratch := make([]byte, 8) + binary.BigEndian.PutUint64(scratch, uint64(datalen)) + out.Write(scratch[(8 - info.count):]) + + if err := info.check(uint64(datalen), prefix); err != nil { + return err + } + // Then just write the data. + bytes := make([]byte, datalen) + for i := 0; i < datalen; i++ { + bytes[i] = uint8(v.Index(i).Uint()) + } + _, err := out.Write(bytes) + return err + } + // General version: use a separate Buffer to write the slice entries into. + var innerBuf bytes.Buffer + for i := 0; i < v.Len(); i++ { + if err := marshalField(&innerBuf, v.Index(i), nil); err != nil { + return err + } + } + + // Now insert (and check) the size. + size := uint64(innerBuf.Len()) + if err := info.check(size, prefix); err != nil { + return err + } + scratch := make([]byte, 8) + binary.BigEndian.PutUint64(scratch, size) + out.Write(scratch[(8 - info.count):]) + + // Then copy the data. + _, err := out.Write(innerBuf.Bytes()) + return err + + default: + return structuralError{info.fieldName(), fmt.Sprintf("unsupported type: %s of kind %s", fieldType, v.Kind())} + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/tls/types.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/tls/types.go new file mode 100644 index 00000000..14471ad2 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/tls/types.go @@ -0,0 +1,117 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tls + +import ( + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "fmt" +) + +// DigitallySigned gives information about a signature, including the algorithm used +// and the signature value. Defined in RFC 5246 s4.7. +type DigitallySigned struct { + Algorithm SignatureAndHashAlgorithm + Signature []byte `tls:"minlen:0,maxlen:65535"` +} + +func (d DigitallySigned) String() string { + return fmt.Sprintf("Signature: HashAlgo=%v SignAlgo=%v Value=%x", d.Algorithm.Hash, d.Algorithm.Signature, d.Signature) +} + +// SignatureAndHashAlgorithm gives information about the algorithms used for a +// signature. Defined in RFC 5246 s7.4.1.4.1. +type SignatureAndHashAlgorithm struct { + Hash HashAlgorithm `tls:"maxval:255"` + Signature SignatureAlgorithm `tls:"maxval:255"` +} + +// HashAlgorithm enum from RFC 5246 s7.4.1.4.1. +type HashAlgorithm Enum + +// HashAlgorithm constants from RFC 5246 s7.4.1.4.1. +const ( + None HashAlgorithm = 0 + MD5 HashAlgorithm = 1 + SHA1 HashAlgorithm = 2 + SHA224 HashAlgorithm = 3 + SHA256 HashAlgorithm = 4 + SHA384 HashAlgorithm = 5 + SHA512 HashAlgorithm = 6 +) + +func (h HashAlgorithm) String() string { + switch h { + case None: + return "None" + case MD5: + return "MD5" + case SHA1: + return "SHA1" + case SHA224: + return "SHA224" + case SHA256: + return "SHA256" + case SHA384: + return "SHA384" + case SHA512: + return "SHA512" + default: + return fmt.Sprintf("UNKNOWN(%d)", h) + } +} + +// SignatureAlgorithm enum from RFC 5246 s7.4.1.4.1. +type SignatureAlgorithm Enum + +// SignatureAlgorithm constants from RFC 5246 s7.4.1.4.1. +const ( + Anonymous SignatureAlgorithm = 0 + RSA SignatureAlgorithm = 1 + DSA SignatureAlgorithm = 2 + ECDSA SignatureAlgorithm = 3 +) + +func (s SignatureAlgorithm) String() string { + switch s { + case Anonymous: + return "Anonymous" + case RSA: + return "RSA" + case DSA: + return "DSA" + case ECDSA: + return "ECDSA" + default: + return fmt.Sprintf("UNKNOWN(%d)", s) + } +} + +// SignatureAlgorithmFromPubKey returns the algorithm used for this public key. +// ECDSA, RSA, and DSA keys are supported. Other key types will return Anonymous. +func SignatureAlgorithmFromPubKey(k crypto.PublicKey) SignatureAlgorithm { + switch k.(type) { + case *ecdsa.PublicKey: + return ECDSA + case *rsa.PublicKey: + return RSA + case *dsa.PublicKey: + return DSA + default: + return Anonymous + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/types.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/types.go new file mode 100644 index 00000000..64efc9d2 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/types.go @@ -0,0 +1,545 @@ +// Copyright 2015 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package ct holds core types and utilities for Certificate Transparency. +package ct + +import ( + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" + + "github.com/google/certificate-transparency-go/tls" + "github.com/google/certificate-transparency-go/x509" +) + +/////////////////////////////////////////////////////////////////////////////// +// The following structures represent those outlined in RFC6962; any section +// numbers mentioned refer to that RFC. +/////////////////////////////////////////////////////////////////////////////// + +// LogEntryType represents the LogEntryType enum from section 3.1: +// enum { x509_entry(0), precert_entry(1), (65535) } LogEntryType; +type LogEntryType tls.Enum // tls:"maxval:65535" + +// LogEntryType constants from section 3.1. +const ( + X509LogEntryType LogEntryType = 0 + PrecertLogEntryType LogEntryType = 1 + XJSONLogEntryType LogEntryType = 0x8000 // Experimental. Don't rely on this! +) + +func (e LogEntryType) String() string { + switch e { + case X509LogEntryType: + return "X509LogEntryType" + case PrecertLogEntryType: + return "PrecertLogEntryType" + case XJSONLogEntryType: + return "XJSONLogEntryType" + default: + return fmt.Sprintf("UnknownEntryType(%d)", e) + } +} + +// RFC6962 section 2.1 requires a prefix byte on hash inputs for second preimage resistance. +const ( + TreeLeafPrefix = byte(0x00) + TreeNodePrefix = byte(0x01) +) + +// MerkleLeafType represents the MerkleLeafType enum from section 3.4: +// enum { timestamped_entry(0), (255) } MerkleLeafType; +type MerkleLeafType tls.Enum // tls:"maxval:255" + +// TimestampedEntryLeafType is the only defined MerkleLeafType constant from section 3.4. +const TimestampedEntryLeafType MerkleLeafType = 0 // Entry type for an SCT + +func (m MerkleLeafType) String() string { + switch m { + case TimestampedEntryLeafType: + return "TimestampedEntryLeafType" + default: + return fmt.Sprintf("UnknownLeafType(%d)", m) + } +} + +// Version represents the Version enum from section 3.2: +// enum { v1(0), (255) } Version; +type Version tls.Enum // tls:"maxval:255" + +// CT Version constants from section 3.2. +const ( + V1 Version = 0 +) + +func (v Version) String() string { + switch v { + case V1: + return "V1" + default: + return fmt.Sprintf("UnknownVersion(%d)", v) + } +} + +// SignatureType differentiates STH signatures from SCT signatures, see section 3.2. +// enum { certificate_timestamp(0), tree_hash(1), (255) } SignatureType; +type SignatureType tls.Enum // tls:"maxval:255" + +// SignatureType constants from section 3.2. +const ( + CertificateTimestampSignatureType SignatureType = 0 + TreeHashSignatureType SignatureType = 1 +) + +func (st SignatureType) String() string { + switch st { + case CertificateTimestampSignatureType: + return "CertificateTimestamp" + case TreeHashSignatureType: + return "TreeHash" + default: + return fmt.Sprintf("UnknownSignatureType(%d)", st) + } +} + +// ASN1Cert type for holding the raw DER bytes of an ASN.1 Certificate +// (section 3.1). +type ASN1Cert struct { + Data []byte `tls:"minlen:1,maxlen:16777215"` +} + +// LogID holds the hash of the Log's public key (section 3.2). +// TODO(pphaneuf): Users should be migrated to the one in the logid package. +type LogID struct { + KeyID [sha256.Size]byte +} + +// PreCert represents a Precertificate (section 3.2). +type PreCert struct { + IssuerKeyHash [sha256.Size]byte + TBSCertificate []byte `tls:"minlen:1,maxlen:16777215"` // DER-encoded TBSCertificate +} + +// CTExtensions is a representation of the raw bytes of any CtExtension +// structure (see section 3.2). +// nolint: golint +type CTExtensions []byte // tls:"minlen:0,maxlen:65535"` + +// MerkleTreeNode represents an internal node in the CT tree. +type MerkleTreeNode []byte + +// ConsistencyProof represents a CT consistency proof (see sections 2.1.2 and +// 4.4). +type ConsistencyProof []MerkleTreeNode + +// AuditPath represents a CT inclusion proof (see sections 2.1.1 and 4.5). +type AuditPath []MerkleTreeNode + +// LeafInput represents a serialized MerkleTreeLeaf structure. +type LeafInput []byte + +// DigitallySigned is a local alias for tls.DigitallySigned so that we can +// attach a MarshalJSON method. +type DigitallySigned tls.DigitallySigned + +// FromBase64String populates the DigitallySigned structure from the base64 data passed in. +// Returns an error if the base64 data is invalid. +func (d *DigitallySigned) FromBase64String(b64 string) error { + raw, err := base64.StdEncoding.DecodeString(b64) + if err != nil { + return fmt.Errorf("failed to unbase64 DigitallySigned: %v", err) + } + var ds tls.DigitallySigned + if rest, err := tls.Unmarshal(raw, &ds); err != nil { + return fmt.Errorf("failed to unmarshal DigitallySigned: %v", err) + } else if len(rest) > 0 { + return fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)) + } + *d = DigitallySigned(ds) + return nil +} + +// Base64String returns the base64 representation of the DigitallySigned struct. +func (d DigitallySigned) Base64String() (string, error) { + b, err := tls.Marshal(d) + if err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(b), nil +} + +// MarshalJSON implements the json.Marshaller interface. +func (d DigitallySigned) MarshalJSON() ([]byte, error) { + b64, err := d.Base64String() + if err != nil { + return []byte{}, err + } + return []byte(`"` + b64 + `"`), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (d *DigitallySigned) UnmarshalJSON(b []byte) error { + var content string + if err := json.Unmarshal(b, &content); err != nil { + return fmt.Errorf("failed to unmarshal DigitallySigned: %v", err) + } + return d.FromBase64String(content) +} + +// RawLogEntry represents the (TLS-parsed) contents of an entry in a CT log. +type RawLogEntry struct { + // Index is a position of the entry in the log. + Index int64 + // Leaf is a parsed Merkle leaf hash input. + Leaf MerkleTreeLeaf + // Cert is: + // - A certificate if Leaf.TimestampedEntry.EntryType is X509LogEntryType. + // - A precertificate if Leaf.TimestampedEntry.EntryType is + // PrecertLogEntryType, in the form of a DER-encoded Certificate as + // originally added (which includes the poison extension and a signature + // generated over the pre-cert by the pre-cert issuer). + // - Empty otherwise. + Cert ASN1Cert + // Chain is the issuing certificate chain starting with the issuer of Cert, + // or an empty slice if Cert is empty. + Chain []ASN1Cert +} + +// LogEntry represents the (parsed) contents of an entry in a CT log. This is described +// in section 3.1, but note that this structure does *not* match the TLS structure +// defined there (the TLS structure is never used directly in RFC6962). +type LogEntry struct { + Index int64 + Leaf MerkleTreeLeaf + // Exactly one of the following three fields should be non-empty. + X509Cert *x509.Certificate // Parsed X.509 certificate + Precert *Precertificate // Extracted precertificate + JSONData []byte + + // Chain holds the issuing certificate chain, starting with the + // issuer of the leaf certificate / pre-certificate. + Chain []ASN1Cert +} + +// PrecertChainEntry holds an precertificate together with a validation chain +// for it; see section 3.1. +type PrecertChainEntry struct { + PreCertificate ASN1Cert `tls:"minlen:1,maxlen:16777215"` + CertificateChain []ASN1Cert `tls:"minlen:0,maxlen:16777215"` +} + +// CertificateChain holds a chain of certificates, as returned as extra data +// for get-entries (section 4.6). +type CertificateChain struct { + Entries []ASN1Cert `tls:"minlen:0,maxlen:16777215"` +} + +// JSONDataEntry holds arbitrary data. +type JSONDataEntry struct { + Data []byte `tls:"minlen:0,maxlen:1677215"` +} + +// SHA256Hash represents the output from the SHA256 hash function. +type SHA256Hash [sha256.Size]byte + +// FromBase64String populates the SHA256 struct with the contents of the base64 data passed in. +func (s *SHA256Hash) FromBase64String(b64 string) error { + bs, err := base64.StdEncoding.DecodeString(b64) + if err != nil { + return fmt.Errorf("failed to unbase64 LogID: %v", err) + } + if len(bs) != sha256.Size { + return fmt.Errorf("invalid SHA256 length, expected 32 but got %d", len(bs)) + } + copy(s[:], bs) + return nil +} + +// Base64String returns the base64 representation of this SHA256Hash. +func (s SHA256Hash) Base64String() string { + return base64.StdEncoding.EncodeToString(s[:]) +} + +// MarshalJSON implements the json.Marshaller interface for SHA256Hash. +func (s SHA256Hash) MarshalJSON() ([]byte, error) { + return []byte(`"` + s.Base64String() + `"`), nil +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (s *SHA256Hash) UnmarshalJSON(b []byte) error { + var content string + if err := json.Unmarshal(b, &content); err != nil { + return fmt.Errorf("failed to unmarshal SHA256Hash: %v", err) + } + return s.FromBase64String(content) +} + +// SignedTreeHead represents the structure returned by the get-sth CT method +// after base64 decoding; see sections 3.5 and 4.3. +type SignedTreeHead struct { + Version Version `json:"sth_version"` // The version of the protocol to which the STH conforms + TreeSize uint64 `json:"tree_size"` // The number of entries in the new tree + Timestamp uint64 `json:"timestamp"` // The time at which the STH was created + SHA256RootHash SHA256Hash `json:"sha256_root_hash"` // The root hash of the log's Merkle tree + TreeHeadSignature DigitallySigned `json:"tree_head_signature"` // Log's signature over a TLS-encoded TreeHeadSignature + LogID SHA256Hash `json:"log_id"` // The SHA256 hash of the log's public key +} + +func (s SignedTreeHead) String() string { + sigStr, err := s.TreeHeadSignature.Base64String() + if err != nil { + sigStr = tls.DigitallySigned(s.TreeHeadSignature).String() + } + + // If the LogID field in the SignedTreeHead is empty, don't include it in + // the string. + var logIDStr string + if id, empty := s.LogID, (SHA256Hash{}); id != empty { + logIDStr = fmt.Sprintf("LogID:%s, ", id.Base64String()) + } + + return fmt.Sprintf("{%sTreeSize:%d, Timestamp:%d, SHA256RootHash:%q, TreeHeadSignature:%q}", + logIDStr, s.TreeSize, s.Timestamp, s.SHA256RootHash.Base64String(), sigStr) +} + +// TreeHeadSignature holds the data over which the signature in an STH is +// generated; see section 3.5 +type TreeHeadSignature struct { + Version Version `tls:"maxval:255"` + SignatureType SignatureType `tls:"maxval:255"` // == TreeHashSignatureType + Timestamp uint64 + TreeSize uint64 + SHA256RootHash SHA256Hash +} + +// SignedCertificateTimestamp represents the structure returned by the +// add-chain and add-pre-chain methods after base64 decoding; see sections +// 3.2, 4.1 and 4.2. +type SignedCertificateTimestamp struct { + SCTVersion Version `tls:"maxval:255"` + LogID LogID + Timestamp uint64 + Extensions CTExtensions `tls:"minlen:0,maxlen:65535"` + Signature DigitallySigned // Signature over TLS-encoded CertificateTimestamp +} + +// CertificateTimestamp is the collection of data that the signature in an +// SCT is over; see section 3.2. +type CertificateTimestamp struct { + SCTVersion Version `tls:"maxval:255"` + SignatureType SignatureType `tls:"maxval:255"` + Timestamp uint64 + EntryType LogEntryType `tls:"maxval:65535"` + X509Entry *ASN1Cert `tls:"selector:EntryType,val:0"` + PrecertEntry *PreCert `tls:"selector:EntryType,val:1"` + JSONEntry *JSONDataEntry `tls:"selector:EntryType,val:32768"` + Extensions CTExtensions `tls:"minlen:0,maxlen:65535"` +} + +func (s SignedCertificateTimestamp) String() string { + return fmt.Sprintf("{Version:%d LogId:%s Timestamp:%d Extensions:'%s' Signature:%v}", s.SCTVersion, + base64.StdEncoding.EncodeToString(s.LogID.KeyID[:]), + s.Timestamp, + s.Extensions, + s.Signature) +} + +// TimestampedEntry is part of the MerkleTreeLeaf structure; see section 3.4. +type TimestampedEntry struct { + Timestamp uint64 + EntryType LogEntryType `tls:"maxval:65535"` + X509Entry *ASN1Cert `tls:"selector:EntryType,val:0"` + PrecertEntry *PreCert `tls:"selector:EntryType,val:1"` + JSONEntry *JSONDataEntry `tls:"selector:EntryType,val:32768"` + Extensions CTExtensions `tls:"minlen:0,maxlen:65535"` +} + +// MerkleTreeLeaf represents the deserialized structure of the hash input for the +// leaves of a log's Merkle tree; see section 3.4. +type MerkleTreeLeaf struct { + Version Version `tls:"maxval:255"` + LeafType MerkleLeafType `tls:"maxval:255"` + TimestampedEntry *TimestampedEntry `tls:"selector:LeafType,val:0"` +} + +// Precertificate represents the parsed CT Precertificate structure. +type Precertificate struct { + // DER-encoded pre-certificate as originally added, which includes a + // poison extension and a signature generated over the pre-cert by + // the pre-cert issuer (which might differ from the issuer of the final + // cert, see RFC6962 s3.1). + Submitted ASN1Cert + // SHA256 hash of the issuing key + IssuerKeyHash [sha256.Size]byte + // Parsed TBSCertificate structure, held in an x509.Certificate for convenience. + TBSCertificate *x509.Certificate +} + +// X509Certificate returns the X.509 Certificate contained within the +// MerkleTreeLeaf. +func (m *MerkleTreeLeaf) X509Certificate() (*x509.Certificate, error) { + if m.TimestampedEntry.EntryType != X509LogEntryType { + return nil, fmt.Errorf("cannot call X509Certificate on a MerkleTreeLeaf that is not an X509 entry") + } + return x509.ParseCertificate(m.TimestampedEntry.X509Entry.Data) +} + +// Precertificate returns the X.509 Precertificate contained within the MerkleTreeLeaf. +// +// The returned precertificate is embedded in an x509.Certificate, but is in the +// form stored internally in the log rather than the original submitted form +// (i.e. it does not include the poison extension and any changes to reflect the +// final certificate's issuer have been made; see x509.BuildPrecertTBS). +func (m *MerkleTreeLeaf) Precertificate() (*x509.Certificate, error) { + if m.TimestampedEntry.EntryType != PrecertLogEntryType { + return nil, fmt.Errorf("cannot call Precertificate on a MerkleTreeLeaf that is not a precert entry") + } + return x509.ParseTBSCertificate(m.TimestampedEntry.PrecertEntry.TBSCertificate) +} + +// APIEndpoint is a string that represents one of the Certificate Transparency +// Log API endpoints. +type APIEndpoint string + +// Certificate Transparency Log API endpoints; see section 4. +// WARNING: Should match the URI paths without the "/ct/v1/" prefix. If +// changing these constants, may need to change those too. +const ( + AddChainStr APIEndpoint = "add-chain" + AddPreChainStr APIEndpoint = "add-pre-chain" + GetSTHStr APIEndpoint = "get-sth" + GetEntriesStr APIEndpoint = "get-entries" + GetProofByHashStr APIEndpoint = "get-proof-by-hash" + GetSTHConsistencyStr APIEndpoint = "get-sth-consistency" + GetRootsStr APIEndpoint = "get-roots" + GetEntryAndProofStr APIEndpoint = "get-entry-and-proof" +) + +// URI paths for Log requests; see section 4. +// WARNING: Should match the API endpoints, with the "/ct/v1/" prefix. If +// changing these constants, may need to change those too. +const ( + AddChainPath = "/ct/v1/add-chain" + AddPreChainPath = "/ct/v1/add-pre-chain" + GetSTHPath = "/ct/v1/get-sth" + GetEntriesPath = "/ct/v1/get-entries" + GetProofByHashPath = "/ct/v1/get-proof-by-hash" + GetSTHConsistencyPath = "/ct/v1/get-sth-consistency" + GetRootsPath = "/ct/v1/get-roots" + GetEntryAndProofPath = "/ct/v1/get-entry-and-proof" + + AddJSONPath = "/ct/v1/add-json" // Experimental addition +) + +// AddChainRequest represents the JSON request body sent to the add-chain and +// add-pre-chain POST methods from sections 4.1 and 4.2. +type AddChainRequest struct { + Chain [][]byte `json:"chain"` +} + +// AddChainResponse represents the JSON response to the add-chain and +// add-pre-chain POST methods. +// An SCT represents a Log's promise to integrate a [pre-]certificate into the +// log within a defined period of time. +type AddChainResponse struct { + SCTVersion Version `json:"sct_version"` // SCT structure version + ID []byte `json:"id"` // Log ID + Timestamp uint64 `json:"timestamp"` // Timestamp of issuance + Extensions string `json:"extensions"` // Holder for any CT extensions + Signature []byte `json:"signature"` // Log signature for this SCT +} + +// AddJSONRequest represents the JSON request body sent to the add-json POST method. +// The corresponding response re-uses AddChainResponse. +// This is an experimental addition not covered by RFC6962. +type AddJSONRequest struct { + Data interface{} `json:"data"` +} + +// GetSTHResponse represents the JSON response to the get-sth GET method from section 4.3. +type GetSTHResponse struct { + TreeSize uint64 `json:"tree_size"` // Number of certs in the current tree + Timestamp uint64 `json:"timestamp"` // Time that the tree was created + SHA256RootHash []byte `json:"sha256_root_hash"` // Root hash of the tree + TreeHeadSignature []byte `json:"tree_head_signature"` // Log signature for this STH +} + +// ToSignedTreeHead creates a SignedTreeHead from the GetSTHResponse. +func (r *GetSTHResponse) ToSignedTreeHead() (*SignedTreeHead, error) { + sth := SignedTreeHead{ + TreeSize: r.TreeSize, + Timestamp: r.Timestamp, + } + + if len(r.SHA256RootHash) != sha256.Size { + return nil, fmt.Errorf("sha256_root_hash is invalid length, expected %d got %d", sha256.Size, len(r.SHA256RootHash)) + } + copy(sth.SHA256RootHash[:], r.SHA256RootHash) + + var ds DigitallySigned + if rest, err := tls.Unmarshal(r.TreeHeadSignature, &ds); err != nil { + return nil, fmt.Errorf("tls.Unmarshal(): %s", err) + } else if len(rest) > 0 { + return nil, fmt.Errorf("trailing data (%d bytes) after DigitallySigned", len(rest)) + } + sth.TreeHeadSignature = ds + + return &sth, nil +} + +// GetSTHConsistencyResponse represents the JSON response to the get-sth-consistency +// GET method from section 4.4. (The corresponding GET request has parameters 'first' and +// 'second'.) +type GetSTHConsistencyResponse struct { + Consistency [][]byte `json:"consistency"` +} + +// GetProofByHashResponse represents the JSON response to the get-proof-by-hash GET +// method from section 4.5. (The corresponding GET request has parameters 'hash' +// and 'tree_size'.) +type GetProofByHashResponse struct { + LeafIndex int64 `json:"leaf_index"` // The 0-based index of the end entity corresponding to the "hash" parameter. + AuditPath [][]byte `json:"audit_path"` // An array of base64-encoded Merkle Tree nodes proving the inclusion of the chosen certificate. +} + +// LeafEntry represents a leaf in the Log's Merkle tree, as returned by the get-entries +// GET method from section 4.6. +type LeafEntry struct { + // LeafInput is a TLS-encoded MerkleTreeLeaf + LeafInput []byte `json:"leaf_input"` + // ExtraData holds (unsigned) extra data, normally the cert validation chain. + ExtraData []byte `json:"extra_data"` +} + +// GetEntriesResponse respresents the JSON response to the get-entries GET method +// from section 4.6. +type GetEntriesResponse struct { + Entries []LeafEntry `json:"entries"` // the list of returned entries +} + +// GetRootsResponse represents the JSON response to the get-roots GET method from section 4.7. +type GetRootsResponse struct { + Certificates []string `json:"certificates"` +} + +// GetEntryAndProofResponse represents the JSON response to the get-entry-and-proof +// GET method from section 4.8. (The corresponding GET request has parameters 'leaf_index' +// and 'tree_size'.) +type GetEntryAndProofResponse struct { + LeafInput []byte `json:"leaf_input"` // the entry itself + ExtraData []byte `json:"extra_data"` // any chain provided when the entry was added to the log + AuditPath [][]byte `json:"audit_path"` // the corresponding proof +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/README.md b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/README.md new file mode 100644 index 00000000..6f22f5f8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/README.md @@ -0,0 +1,7 @@ +# Important Notice + +This is a fork of the `crypto/x509` Go package. The original source can be found on +[GitHub](https://github.com/golang/go). + +Be careful about making local modifications to this code as it will +make maintenance harder in future. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/cert_pool.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/cert_pool.go new file mode 100644 index 00000000..4823d594 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/cert_pool.go @@ -0,0 +1,159 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "encoding/pem" + "errors" + "runtime" +) + +// CertPool is a set of certificates. +type CertPool struct { + bySubjectKeyId map[string][]int + byName map[string][]int + certs []*Certificate +} + +// NewCertPool returns a new, empty CertPool. +func NewCertPool() *CertPool { + return &CertPool{ + bySubjectKeyId: make(map[string][]int), + byName: make(map[string][]int), + } +} + +func (s *CertPool) copy() *CertPool { + p := &CertPool{ + bySubjectKeyId: make(map[string][]int, len(s.bySubjectKeyId)), + byName: make(map[string][]int, len(s.byName)), + certs: make([]*Certificate, len(s.certs)), + } + for k, v := range s.bySubjectKeyId { + indexes := make([]int, len(v)) + copy(indexes, v) + p.bySubjectKeyId[k] = indexes + } + for k, v := range s.byName { + indexes := make([]int, len(v)) + copy(indexes, v) + p.byName[k] = indexes + } + copy(p.certs, s.certs) + return p +} + +// SystemCertPool returns a copy of the system cert pool. +// +// Any mutations to the returned pool are not written to disk and do +// not affect any other pool returned by SystemCertPool. +// +// New changes in the system cert pool might not be reflected +// in subsequent calls. +func SystemCertPool() (*CertPool, error) { + if runtime.GOOS == "windows" { + // Issue 16736, 18609: + return nil, errors.New("crypto/x509: system root pool is not available on Windows") + } + + if sysRoots := systemRootsPool(); sysRoots != nil { + return sysRoots.copy(), nil + } + + return loadSystemRoots() +} + +// findPotentialParents returns the indexes of certificates in s which might +// have signed cert. The caller must not modify the returned slice. +func (s *CertPool) findPotentialParents(cert *Certificate) []int { + if s == nil { + return nil + } + + var candidates []int + if len(cert.AuthorityKeyId) > 0 { + candidates = s.bySubjectKeyId[string(cert.AuthorityKeyId)] + } + if len(candidates) == 0 { + candidates = s.byName[string(cert.RawIssuer)] + } + return candidates +} + +func (s *CertPool) contains(cert *Certificate) bool { + if s == nil { + return false + } + + candidates := s.byName[string(cert.RawSubject)] + for _, c := range candidates { + if s.certs[c].Equal(cert) { + return true + } + } + + return false +} + +// AddCert adds a certificate to a pool. +func (s *CertPool) AddCert(cert *Certificate) { + if cert == nil { + panic("adding nil Certificate to CertPool") + } + + // Check that the certificate isn't being added twice. + if s.contains(cert) { + return + } + + n := len(s.certs) + s.certs = append(s.certs, cert) + + if len(cert.SubjectKeyId) > 0 { + keyId := string(cert.SubjectKeyId) + s.bySubjectKeyId[keyId] = append(s.bySubjectKeyId[keyId], n) + } + name := string(cert.RawSubject) + s.byName[name] = append(s.byName[name], n) +} + +// AppendCertsFromPEM attempts to parse a series of PEM encoded certificates. +// It appends any certificates found to s and reports whether any certificates +// were successfully parsed. +// +// On many Linux systems, /etc/ssl/cert.pem will contain the system wide set +// of root CAs in a format suitable for this function. +func (s *CertPool) AppendCertsFromPEM(pemCerts []byte) (ok bool) { + for len(pemCerts) > 0 { + var block *pem.Block + block, pemCerts = pem.Decode(pemCerts) + if block == nil { + break + } + if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { + continue + } + + cert, err := ParseCertificate(block.Bytes) + if IsFatal(err) { + continue + } + + s.AddCert(cert) + ok = true + } + + return +} + +// Subjects returns a list of the DER-encoded subjects of +// all of the certificates in the pool. +func (s *CertPool) Subjects() [][]byte { + res := make([][]byte, len(s.certs)) + for i, c := range s.certs { + res[i] = c.RawSubject + } + return res +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/curves.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/curves.go new file mode 100644 index 00000000..0e2778cb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/curves.go @@ -0,0 +1,37 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "crypto/elliptic" + "math/big" + "sync" +) + +// This file holds ECC curves that are not supported by the main Go crypto/elliptic +// library, but which have been observed in certificates in the wild. + +var initonce sync.Once +var p192r1 *elliptic.CurveParams + +func initAllCurves() { + initSECP192R1() +} + +func initSECP192R1() { + // See SEC-2, section 2.2.2 + p192r1 = &elliptic.CurveParams{Name: "P-192"} + p192r1.P, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEFFFFFFFFFFFFFFFF", 16) + p192r1.N, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFFFFFFFFF99DEF836146BC9B1B4D22831", 16) + p192r1.B, _ = new(big.Int).SetString("64210519E59C80E70FA7E9AB72243049FEB8DEECC146B9B1", 16) + p192r1.Gx, _ = new(big.Int).SetString("188DA80EB03090F67CBF20EB43A18800F4FF0AFD82FF1012", 16) + p192r1.Gy, _ = new(big.Int).SetString("07192B95FFC8DA78631011ED6B24CDD573F977A11E794811", 16) + p192r1.BitSize = 192 +} + +func secp192r1() elliptic.Curve { + initonce.Do(initAllCurves) + return p192r1 +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/error.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/error.go new file mode 100644 index 00000000..40b7ef7d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/error.go @@ -0,0 +1,236 @@ +package x509 + +import ( + "bytes" + "fmt" + "strconv" + "strings" +) + +// Error implements the error interface and describes a single error in an X.509 certificate or CRL. +type Error struct { + ID ErrorID + Category ErrCategory + Summary string + Field string + SpecRef string + SpecText string + // Fatal indicates that parsing has been aborted. + Fatal bool +} + +func (err Error) Error() string { + var msg bytes.Buffer + if err.ID != ErrInvalidID { + if err.Fatal { + msg.WriteRune('E') + } else { + msg.WriteRune('W') + } + msg.WriteString(fmt.Sprintf("%03d: ", err.ID)) + } + msg.WriteString(err.Summary) + return msg.String() +} + +// VerboseError creates a more verbose error string, including spec details. +func (err Error) VerboseError() string { + var msg bytes.Buffer + msg.WriteString(err.Error()) + if len(err.Field) > 0 || err.Category != UnknownCategory || len(err.SpecRef) > 0 || len(err.SpecText) > 0 { + msg.WriteString(" (") + needSep := false + if len(err.Field) > 0 { + msg.WriteString(err.Field) + needSep = true + } + if err.Category != UnknownCategory { + if needSep { + msg.WriteString(": ") + } + msg.WriteString(err.Category.String()) + needSep = true + } + if len(err.SpecRef) > 0 { + if needSep { + msg.WriteString(": ") + } + msg.WriteString(err.SpecRef) + needSep = true + } + if len(err.SpecText) > 0 { + if needSep { + if len(err.SpecRef) > 0 { + msg.WriteString(", ") + } else { + msg.WriteString(": ") + } + } + msg.WriteString("'") + msg.WriteString(err.SpecText) + msg.WriteString("'") + } + msg.WriteString(")") + } + + return msg.String() +} + +// ErrCategory indicates the category of an x509.Error. +type ErrCategory int + +// ErrCategory values. +const ( + UnknownCategory ErrCategory = iota + // Errors in ASN.1 encoding + InvalidASN1Encoding + InvalidASN1Content + InvalidASN1DER + // Errors in ASN.1 relative to schema + InvalidValueRange + InvalidASN1Type + UnexpectedAdditionalData + // Errors in X.509 + PoorlyFormedCertificate // Fails a SHOULD clause + MalformedCertificate // Fails a MUST clause + PoorlyFormedCRL // Fails a SHOULD clause + MalformedCRL // Fails a MUST clause + // Errors relative to CA/Browser Forum guidelines + BaselineRequirementsFailure + EVRequirementsFailure + // Other errors + InsecureAlgorithm + UnrecognizedValue +) + +func (category ErrCategory) String() string { + switch category { + case InvalidASN1Encoding: + return "Invalid ASN.1 encoding" + case InvalidASN1Content: + return "Invalid ASN.1 content" + case InvalidASN1DER: + return "Invalid ASN.1 distinguished encoding" + case InvalidValueRange: + return "Invalid value for range given in schema" + case InvalidASN1Type: + return "Invalid ASN.1 type for schema" + case UnexpectedAdditionalData: + return "Unexpected additional data present" + case PoorlyFormedCertificate: + return "Certificate does not comply with SHOULD clause in spec" + case MalformedCertificate: + return "Certificate does not comply with MUST clause in spec" + case PoorlyFormedCRL: + return "Certificate Revocation List does not comply with SHOULD clause in spec" + case MalformedCRL: + return "Certificate Revocation List does not comply with MUST clause in spec" + case BaselineRequirementsFailure: + return "Certificate does not comply with CA/BF baseline requirements" + case EVRequirementsFailure: + return "Certificate does not comply with CA/BF EV requirements" + case InsecureAlgorithm: + return "Certificate uses an insecure algorithm" + case UnrecognizedValue: + return "Certificate uses an unrecognized value" + default: + return fmt.Sprintf("Unknown (%d)", category) + } +} + +// ErrorID is an identifier for an x509.Error, to allow filtering. +type ErrorID int + +// Errors implements the error interface and holds a collection of errors found in a certificate or CRL. +type Errors struct { + Errs []Error +} + +// Error converts to a string. +func (e *Errors) Error() string { + return e.combineErrors(Error.Error) +} + +// VerboseError creates a more verbose error string, including spec details. +func (e *Errors) VerboseError() string { + return e.combineErrors(Error.VerboseError) +} + +// Fatal indicates whether e includes a fatal error +func (e *Errors) Fatal() bool { + return (e.FirstFatal() != nil) +} + +// Empty indicates whether e has no errors. +func (e *Errors) Empty() bool { + if e == nil { + return true + } + return len(e.Errs) == 0 +} + +// FirstFatal returns the first fatal error in e, or nil +// if there is no fatal error. +func (e *Errors) FirstFatal() error { + if e == nil { + return nil + } + for _, err := range e.Errs { + if err.Fatal { + return err + } + } + return nil + +} + +// AddID adds the Error identified by the given id to an x509.Errors. +func (e *Errors) AddID(id ErrorID, args ...interface{}) { + e.Errs = append(e.Errs, NewError(id, args...)) +} + +func (e Errors) combineErrors(errfn func(Error) string) string { + if len(e.Errs) == 0 { + return "" + } + if len(e.Errs) == 1 { + return errfn((e.Errs)[0]) + } + var msg bytes.Buffer + msg.WriteString("Errors:") + for _, err := range e.Errs { + msg.WriteString("\n ") + msg.WriteString(errfn(err)) + } + return msg.String() +} + +// Filter creates a new Errors object with any entries from the filtered +// list of IDs removed. +func (e Errors) Filter(filtered []ErrorID) Errors { + var results Errors +eloop: + for _, v := range e.Errs { + for _, f := range filtered { + if v.ID == f { + break eloop + } + } + results.Errs = append(results.Errs, v) + } + return results +} + +// ErrorFilter builds a list of error IDs (suitable for use with Errors.Filter) from a comma-separated string. +func ErrorFilter(ignore string) []ErrorID { + var ids []ErrorID + filters := strings.Split(ignore, ",") + for _, f := range filters { + v, err := strconv.Atoi(f) + if err != nil { + continue + } + ids = append(ids, ErrorID(v)) + } + return ids +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/errors.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/errors.go new file mode 100644 index 00000000..ec2fe06a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/errors.go @@ -0,0 +1,302 @@ +package x509 + +import "fmt" + +// To preserve error IDs, only append to this list, never insert. +const ( + ErrInvalidID ErrorID = iota + ErrInvalidCertList + ErrTrailingCertList + ErrUnexpectedlyCriticalCertListExtension + ErrUnexpectedlyNonCriticalCertListExtension + ErrInvalidCertListAuthKeyID + ErrTrailingCertListAuthKeyID + ErrInvalidCertListIssuerAltName + ErrInvalidCertListCRLNumber + ErrTrailingCertListCRLNumber + ErrNegativeCertListCRLNumber + ErrInvalidCertListDeltaCRL + ErrTrailingCertListDeltaCRL + ErrNegativeCertListDeltaCRL + ErrInvalidCertListIssuingDP + ErrTrailingCertListIssuingDP + ErrCertListIssuingDPMultipleTypes + ErrCertListIssuingDPInvalidFullName + ErrInvalidCertListFreshestCRL + ErrInvalidCertListAuthInfoAccess + ErrTrailingCertListAuthInfoAccess + ErrUnhandledCriticalCertListExtension + ErrUnexpectedlyCriticalRevokedCertExtension + ErrUnexpectedlyNonCriticalRevokedCertExtension + ErrInvalidRevocationReason + ErrTrailingRevocationReason + ErrInvalidRevocationInvalidityDate + ErrTrailingRevocationInvalidityDate + ErrInvalidRevocationIssuer + ErrUnhandledCriticalRevokedCertExtension + + ErrMaxID +) + +// idToError gives a template x509.Error for each defined ErrorID; where the Summary +// field may hold format specifiers that take field parameters. +var idToError map[ErrorID]Error + +var errorInfo = []Error{ + { + ID: ErrInvalidCertList, + Summary: "x509: failed to parse CertificateList: %v", + Field: "CertificateList", + SpecRef: "RFC 5280 s5.1", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrTrailingCertList, + Summary: "x509: trailing data after CertificateList", + Field: "CertificateList", + SpecRef: "RFC 5280 s5.1", + Category: InvalidASN1Content, + Fatal: true, + }, + + { + ID: ErrUnexpectedlyCriticalCertListExtension, + Summary: "x509: certificate list extension %v marked critical but expected to be non-critical", + Field: "tbsCertList.crlExtensions.*.critical", + SpecRef: "RFC 5280 s5.2", + Category: MalformedCRL, + }, + { + ID: ErrUnexpectedlyNonCriticalCertListExtension, + Summary: "x509: certificate list extension %v marked non-critical but expected to be critical", + Field: "tbsCertList.crlExtensions.*.critical", + SpecRef: "RFC 5280 s5.2", + Category: MalformedCRL, + }, + + { + ID: ErrInvalidCertListAuthKeyID, + Summary: "x509: failed to unmarshal certificate-list authority key-id: %v", + Field: "tbsCertList.crlExtensions.*.AuthorityKeyIdentifier", + SpecRef: "RFC 5280 s5.2.1", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrTrailingCertListAuthKeyID, + Summary: "x509: trailing data after certificate list auth key ID", + Field: "tbsCertList.crlExtensions.*.AuthorityKeyIdentifier", + SpecRef: "RFC 5280 s5.2.1", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrInvalidCertListIssuerAltName, + Summary: "x509: failed to parse CRL issuer alt name: %v", + Field: "tbsCertList.crlExtensions.*.IssuerAltName", + SpecRef: "RFC 5280 s5.2.2", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrInvalidCertListCRLNumber, + Summary: "x509: failed to unmarshal certificate-list crl-number: %v", + Field: "tbsCertList.crlExtensions.*.CRLNumber", + SpecRef: "RFC 5280 s5.2.3", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrTrailingCertListCRLNumber, + Summary: "x509: trailing data after certificate list crl-number", + Field: "tbsCertList.crlExtensions.*.CRLNumber", + SpecRef: "RFC 5280 s5.2.3", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrNegativeCertListCRLNumber, + Summary: "x509: negative certificate list crl-number: %d", + Field: "tbsCertList.crlExtensions.*.CRLNumber", + SpecRef: "RFC 5280 s5.2.3", + Category: MalformedCRL, + Fatal: true, + }, + { + ID: ErrInvalidCertListDeltaCRL, + Summary: "x509: failed to unmarshal certificate-list delta-crl: %v", + Field: "tbsCertList.crlExtensions.*.BaseCRLNumber", + SpecRef: "RFC 5280 s5.2.4", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrTrailingCertListDeltaCRL, + Summary: "x509: trailing data after certificate list delta-crl", + Field: "tbsCertList.crlExtensions.*.BaseCRLNumber", + SpecRef: "RFC 5280 s5.2.4", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrNegativeCertListDeltaCRL, + Summary: "x509: negative certificate list base-crl-number: %d", + Field: "tbsCertList.crlExtensions.*.BaseCRLNumber", + SpecRef: "RFC 5280 s5.2.4", + Category: MalformedCRL, + Fatal: true, + }, + { + ID: ErrInvalidCertListIssuingDP, + Summary: "x509: failed to unmarshal certificate list issuing distribution point: %v", + Field: "tbsCertList.crlExtensions.*.IssuingDistributionPoint", + SpecRef: "RFC 5280 s5.2.5", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrTrailingCertListIssuingDP, + Summary: "x509: trailing data after certificate list issuing distribution point", + Field: "tbsCertList.crlExtensions.*.IssuingDistributionPoint", + SpecRef: "RFC 5280 s5.2.5", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrCertListIssuingDPMultipleTypes, + Summary: "x509: multiple cert types set in issuing-distribution-point: user:%v CA:%v attr:%v", + Field: "tbsCertList.crlExtensions.*.IssuingDistributionPoint", + SpecRef: "RFC 5280 s5.2.5", + SpecText: "at most one of onlyContainsUserCerts, onlyContainsCACerts, and onlyContainsAttributeCerts may be set to TRUE.", + Category: MalformedCRL, + Fatal: true, + }, + { + ID: ErrCertListIssuingDPInvalidFullName, + Summary: "x509: failed to parse CRL issuing-distribution-point fullName: %v", + Field: "tbsCertList.crlExtensions.*.IssuingDistributionPoint.distributionPoint", + SpecRef: "RFC 5280 s5.2.5", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrInvalidCertListFreshestCRL, + Summary: "x509: failed to unmarshal certificate list freshestCRL: %v", + Field: "tbsCertList.crlExtensions.*.FreshestCRL", + SpecRef: "RFC 5280 s5.2.6", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrInvalidCertListAuthInfoAccess, + Summary: "x509: failed to unmarshal certificate list authority info access: %v", + Field: "tbsCertList.crlExtensions.*.AuthorityInfoAccess", + SpecRef: "RFC 5280 s5.2.7", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrTrailingCertListAuthInfoAccess, + Summary: "x509: trailing data after certificate list authority info access", + Field: "tbsCertList.crlExtensions.*.AuthorityInfoAccess", + SpecRef: "RFC 5280 s5.2.7", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrUnhandledCriticalCertListExtension, + Summary: "x509: unhandled critical extension in certificate list: %v", + Field: "tbsCertList.revokedCertificates.crlExtensions.*", + SpecRef: "RFC 5280 s5.2", + SpecText: "If a CRL contains a critical extension that the application cannot process, then the application MUST NOT use that CRL to determine the status of certificates.", + Category: MalformedCRL, + Fatal: true, + }, + + { + ID: ErrUnexpectedlyCriticalRevokedCertExtension, + Summary: "x509: revoked certificate extension %v marked critical but expected to be non-critical", + Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.critical", + SpecRef: "RFC 5280 s5.3", + Category: MalformedCRL, + }, + { + ID: ErrUnexpectedlyNonCriticalRevokedCertExtension, + Summary: "x509: revoked certificate extension %v marked non-critical but expected to be critical", + Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.critical", + SpecRef: "RFC 5280 s5.3", + Category: MalformedCRL, + }, + + { + ID: ErrInvalidRevocationReason, + Summary: "x509: failed to parse revocation reason: %v", + Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.CRLReason", + SpecRef: "RFC 5280 s5.3.1", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrTrailingRevocationReason, + Summary: "x509: trailing data after revoked certificate reason", + Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.CRLReason", + SpecRef: "RFC 5280 s5.3.1", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrInvalidRevocationInvalidityDate, + Summary: "x509: failed to parse revoked certificate invalidity date: %v", + Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.InvalidityDate", + SpecRef: "RFC 5280 s5.3.2", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrTrailingRevocationInvalidityDate, + Summary: "x509: trailing data after revoked certificate invalidity date", + Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.InvalidityDate", + SpecRef: "RFC 5280 s5.3.2", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrInvalidRevocationIssuer, + Summary: "x509: failed to parse revocation issuer %v", + Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*.CertificateIssuer", + SpecRef: "RFC 5280 s5.3.3", + Category: InvalidASN1Content, + Fatal: true, + }, + { + ID: ErrUnhandledCriticalRevokedCertExtension, + Summary: "x509: unhandled critical extension in revoked certificate: %v", + Field: "tbsCertList.revokedCertificates.crlEntryExtensions.*", + SpecRef: "RFC 5280 s5.3", + SpecText: "If a CRL contains a critical CRL entry extension that the application cannot process, then the application MUST NOT use that CRL to determine the status of any certificates.", + Category: MalformedCRL, + Fatal: true, + }, +} + +func init() { + idToError = make(map[ErrorID]Error, len(errorInfo)) + for _, info := range errorInfo { + idToError[info.ID] = info + } +} + +// NewError builds a new x509.Error based on the template for the given id. +func NewError(id ErrorID, args ...interface{}) Error { + var err Error + if id >= ErrMaxID { + err.ID = id + err.Summary = fmt.Sprintf("Unknown error ID %v: args %+v", id, args) + err.Fatal = true + } else { + err = idToError[id] + err.Summary = fmt.Sprintf(err.Summary, args...) + } + return err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/names.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/names.go new file mode 100644 index 00000000..3ff0b7d4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/names.go @@ -0,0 +1,164 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "fmt" + "net" + + "github.com/google/certificate-transparency-go/asn1" + "github.com/google/certificate-transparency-go/x509/pkix" +) + +const ( + // GeneralName tag values from RFC 5280, 4.2.1.6 + tagOtherName = 0 + tagRFC822Name = 1 + tagDNSName = 2 + tagX400Address = 3 + tagDirectoryName = 4 + tagEDIPartyName = 5 + tagURI = 6 + tagIPAddress = 7 + tagRegisteredID = 8 +) + +// OtherName describes a name related to a certificate which is not in one +// of the standard name formats. RFC 5280, 4.2.1.6: +// OtherName ::= SEQUENCE { +// type-id OBJECT IDENTIFIER, +// value [0] EXPLICIT ANY DEFINED BY type-id } +type OtherName struct { + TypeID asn1.ObjectIdentifier + Value asn1.RawValue +} + +// GeneralNames holds a collection of names related to a certificate. +type GeneralNames struct { + DNSNames []string + EmailAddresses []string + DirectoryNames []pkix.Name + URIs []string + IPNets []net.IPNet + RegisteredIDs []asn1.ObjectIdentifier + OtherNames []OtherName +} + +// Len returns the total number of names in a GeneralNames object. +func (gn GeneralNames) Len() int { + return (len(gn.DNSNames) + len(gn.EmailAddresses) + len(gn.DirectoryNames) + + len(gn.URIs) + len(gn.IPNets) + len(gn.RegisteredIDs) + len(gn.OtherNames)) +} + +// Empty indicates whether a GeneralNames object is empty. +func (gn GeneralNames) Empty() bool { + return gn.Len() == 0 +} + +func parseGeneralNames(value []byte, gname *GeneralNames) error { + // RFC 5280, 4.2.1.6 + // GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName + // + // GeneralName ::= CHOICE { + // otherName [0] OtherName, + // rfc822Name [1] IA5String, + // dNSName [2] IA5String, + // x400Address [3] ORAddress, + // directoryName [4] Name, + // ediPartyName [5] EDIPartyName, + // uniformResourceIdentifier [6] IA5String, + // iPAddress [7] OCTET STRING, + // registeredID [8] OBJECT IDENTIFIER } + var seq asn1.RawValue + var rest []byte + if rest, err := asn1.Unmarshal(value, &seq); err != nil { + return fmt.Errorf("x509: failed to parse GeneralNames: %v", err) + } else if len(rest) != 0 { + return fmt.Errorf("x509: trailing data after GeneralNames") + } + if !seq.IsCompound || seq.Tag != asn1.TagSequence || seq.Class != asn1.ClassUniversal { + return fmt.Errorf("x509: failed to parse GeneralNames sequence, tag %+v", seq) + } + + rest = seq.Bytes + for len(rest) > 0 { + var err error + rest, err = parseGeneralName(rest, gname, false) + if err != nil { + return fmt.Errorf("x509: failed to parse GeneralName: %v", err) + } + } + return nil +} + +func parseGeneralName(data []byte, gname *GeneralNames, withMask bool) ([]byte, error) { + var v asn1.RawValue + var rest []byte + var err error + rest, err = asn1.Unmarshal(data, &v) + if err != nil { + return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames: %v", err) + } + switch v.Tag { + case tagOtherName: + if !v.IsCompound { + return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames.otherName: not compound") + } + var other OtherName + v.FullBytes = append([]byte{}, v.FullBytes...) + v.FullBytes[0] = asn1.TagSequence | 0x20 + _, err = asn1.Unmarshal(v.FullBytes, &other) + if err != nil { + return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames.otherName: %v", err) + } + gname.OtherNames = append(gname.OtherNames, other) + case tagRFC822Name: + gname.EmailAddresses = append(gname.EmailAddresses, string(v.Bytes)) + case tagDNSName: + dns := string(v.Bytes) + gname.DNSNames = append(gname.DNSNames, dns) + case tagDirectoryName: + var rdnSeq pkix.RDNSequence + if _, err := asn1.Unmarshal(v.Bytes, &rdnSeq); err != nil { + return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames.directoryName: %v", err) + } + var dirName pkix.Name + dirName.FillFromRDNSequence(&rdnSeq) + gname.DirectoryNames = append(gname.DirectoryNames, dirName) + case tagURI: + gname.URIs = append(gname.URIs, string(v.Bytes)) + case tagIPAddress: + vlen := len(v.Bytes) + if withMask { + switch vlen { + case (2 * net.IPv4len), (2 * net.IPv6len): + ipNet := net.IPNet{IP: v.Bytes[0 : vlen/2], Mask: v.Bytes[vlen/2:]} + gname.IPNets = append(gname.IPNets, ipNet) + default: + return nil, fmt.Errorf("x509: invalid IP/mask length %d in GeneralNames.iPAddress", vlen) + } + } else { + switch vlen { + case net.IPv4len, net.IPv6len: + ipNet := net.IPNet{IP: v.Bytes} + gname.IPNets = append(gname.IPNets, ipNet) + default: + return nil, fmt.Errorf("x509: invalid IP length %d in GeneralNames.iPAddress", vlen) + } + } + case tagRegisteredID: + var oid asn1.ObjectIdentifier + v.FullBytes = append([]byte{}, v.FullBytes...) + v.FullBytes[0] = asn1.TagOID + _, err = asn1.Unmarshal(v.FullBytes, &oid) + if err != nil { + return nil, fmt.Errorf("x509: failed to unmarshal GeneralNames.registeredID: %v", err) + } + gname.RegisteredIDs = append(gname.RegisteredIDs, oid) + default: + return nil, fmt.Errorf("x509: failed to unmarshal GeneralName: unknown tag %d", v.Tag) + } + return rest, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/nilref_nil_darwin.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/nilref_nil_darwin.go new file mode 100644 index 00000000..fd86fc4b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/nilref_nil_darwin.go @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build cgo,!arm,!arm64,!ios,!go1.10 + +package x509 + +/* +#cgo CFLAGS: -mmacosx-version-min=10.6 +#cgo LDFLAGS: -framework CoreFoundation -framework Security + +#include +*/ +import "C" + +// For Go versions before 1.10, nil values for Apple's CoreFoundation +// CF*Ref types were represented by nil. See: +// https://github.com/golang/go/commit/b868616b63a8 +func setNilCFRef(v *C.CFDataRef) { + *v = nil +} + +func isNilCFRef(v C.CFDataRef) bool { + return v == nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/nilref_zero_darwin.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/nilref_zero_darwin.go new file mode 100644 index 00000000..c24724d8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/nilref_zero_darwin.go @@ -0,0 +1,26 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build cgo,!arm,!arm64,!ios,go1.10 + +package x509 + +/* +#cgo CFLAGS: -mmacosx-version-min=10.6 +#cgo LDFLAGS: -framework CoreFoundation -framework Security + +#include +*/ +import "C" + +// For Go versions >= 1.10, nil values for Apple's CoreFoundation +// CF*Ref types are represented by zero. See: +// https://github.com/golang/go/commit/b868616b63a8 +func setNilCFRef(v *C.CFDataRef) { + *v = 0 +} + +func isNilCFRef(v C.CFDataRef) bool { + return v == 0 +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/pem_decrypt.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/pem_decrypt.go new file mode 100644 index 00000000..93d1e4a9 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/pem_decrypt.go @@ -0,0 +1,240 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +// RFC 1423 describes the encryption of PEM blocks. The algorithm used to +// generate a key from the password was derived by looking at the OpenSSL +// implementation. + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/md5" + "encoding/hex" + "encoding/pem" + "errors" + "io" + "strings" +) + +type PEMCipher int + +// Possible values for the EncryptPEMBlock encryption algorithm. +const ( + _ PEMCipher = iota + PEMCipherDES + PEMCipher3DES + PEMCipherAES128 + PEMCipherAES192 + PEMCipherAES256 +) + +// rfc1423Algo holds a method for enciphering a PEM block. +type rfc1423Algo struct { + cipher PEMCipher + name string + cipherFunc func(key []byte) (cipher.Block, error) + keySize int + blockSize int +} + +// rfc1423Algos holds a slice of the possible ways to encrypt a PEM +// block. The ivSize numbers were taken from the OpenSSL source. +var rfc1423Algos = []rfc1423Algo{{ + cipher: PEMCipherDES, + name: "DES-CBC", + cipherFunc: des.NewCipher, + keySize: 8, + blockSize: des.BlockSize, +}, { + cipher: PEMCipher3DES, + name: "DES-EDE3-CBC", + cipherFunc: des.NewTripleDESCipher, + keySize: 24, + blockSize: des.BlockSize, +}, { + cipher: PEMCipherAES128, + name: "AES-128-CBC", + cipherFunc: aes.NewCipher, + keySize: 16, + blockSize: aes.BlockSize, +}, { + cipher: PEMCipherAES192, + name: "AES-192-CBC", + cipherFunc: aes.NewCipher, + keySize: 24, + blockSize: aes.BlockSize, +}, { + cipher: PEMCipherAES256, + name: "AES-256-CBC", + cipherFunc: aes.NewCipher, + keySize: 32, + blockSize: aes.BlockSize, +}, +} + +// deriveKey uses a key derivation function to stretch the password into a key +// with the number of bits our cipher requires. This algorithm was derived from +// the OpenSSL source. +func (c rfc1423Algo) deriveKey(password, salt []byte) []byte { + hash := md5.New() + out := make([]byte, c.keySize) + var digest []byte + + for i := 0; i < len(out); i += len(digest) { + hash.Reset() + hash.Write(digest) + hash.Write(password) + hash.Write(salt) + digest = hash.Sum(digest[:0]) + copy(out[i:], digest) + } + return out +} + +// IsEncryptedPEMBlock returns if the PEM block is password encrypted. +func IsEncryptedPEMBlock(b *pem.Block) bool { + _, ok := b.Headers["DEK-Info"] + return ok +} + +// IncorrectPasswordError is returned when an incorrect password is detected. +var IncorrectPasswordError = errors.New("x509: decryption password incorrect") + +// DecryptPEMBlock takes a password encrypted PEM block and the password used to +// encrypt it and returns a slice of decrypted DER encoded bytes. It inspects +// the DEK-Info header to determine the algorithm used for decryption. If no +// DEK-Info header is present, an error is returned. If an incorrect password +// is detected an IncorrectPasswordError is returned. Because of deficiencies +// in the encrypted-PEM format, it's not always possible to detect an incorrect +// password. In these cases no error will be returned but the decrypted DER +// bytes will be random noise. +func DecryptPEMBlock(b *pem.Block, password []byte) ([]byte, error) { + dek, ok := b.Headers["DEK-Info"] + if !ok { + return nil, errors.New("x509: no DEK-Info header in block") + } + + idx := strings.Index(dek, ",") + if idx == -1 { + return nil, errors.New("x509: malformed DEK-Info header") + } + + mode, hexIV := dek[:idx], dek[idx+1:] + ciph := cipherByName(mode) + if ciph == nil { + return nil, errors.New("x509: unknown encryption mode") + } + iv, err := hex.DecodeString(hexIV) + if err != nil { + return nil, err + } + if len(iv) != ciph.blockSize { + return nil, errors.New("x509: incorrect IV size") + } + + // Based on the OpenSSL implementation. The salt is the first 8 bytes + // of the initialization vector. + key := ciph.deriveKey(password, iv[:8]) + block, err := ciph.cipherFunc(key) + if err != nil { + return nil, err + } + + if len(b.Bytes)%block.BlockSize() != 0 { + return nil, errors.New("x509: encrypted PEM data is not a multiple of the block size") + } + + data := make([]byte, len(b.Bytes)) + dec := cipher.NewCBCDecrypter(block, iv) + dec.CryptBlocks(data, b.Bytes) + + // Blocks are padded using a scheme where the last n bytes of padding are all + // equal to n. It can pad from 1 to blocksize bytes inclusive. See RFC 1423. + // For example: + // [x y z 2 2] + // [x y 7 7 7 7 7 7 7] + // If we detect a bad padding, we assume it is an invalid password. + dlen := len(data) + if dlen == 0 || dlen%ciph.blockSize != 0 { + return nil, errors.New("x509: invalid padding") + } + last := int(data[dlen-1]) + if dlen < last { + return nil, IncorrectPasswordError + } + if last == 0 || last > ciph.blockSize { + return nil, IncorrectPasswordError + } + for _, val := range data[dlen-last:] { + if int(val) != last { + return nil, IncorrectPasswordError + } + } + return data[:dlen-last], nil +} + +// EncryptPEMBlock returns a PEM block of the specified type holding the +// given DER-encoded data encrypted with the specified algorithm and +// password. +func EncryptPEMBlock(rand io.Reader, blockType string, data, password []byte, alg PEMCipher) (*pem.Block, error) { + ciph := cipherByKey(alg) + if ciph == nil { + return nil, errors.New("x509: unknown encryption mode") + } + iv := make([]byte, ciph.blockSize) + if _, err := io.ReadFull(rand, iv); err != nil { + return nil, errors.New("x509: cannot generate IV: " + err.Error()) + } + // The salt is the first 8 bytes of the initialization vector, + // matching the key derivation in DecryptPEMBlock. + key := ciph.deriveKey(password, iv[:8]) + block, err := ciph.cipherFunc(key) + if err != nil { + return nil, err + } + enc := cipher.NewCBCEncrypter(block, iv) + pad := ciph.blockSize - len(data)%ciph.blockSize + encrypted := make([]byte, len(data), len(data)+pad) + // We could save this copy by encrypting all the whole blocks in + // the data separately, but it doesn't seem worth the additional + // code. + copy(encrypted, data) + // See RFC 1423, Section 1.1. + for i := 0; i < pad; i++ { + encrypted = append(encrypted, byte(pad)) + } + enc.CryptBlocks(encrypted, encrypted) + + return &pem.Block{ + Type: blockType, + Headers: map[string]string{ + "Proc-Type": "4,ENCRYPTED", + "DEK-Info": ciph.name + "," + hex.EncodeToString(iv), + }, + Bytes: encrypted, + }, nil +} + +func cipherByName(name string) *rfc1423Algo { + for i := range rfc1423Algos { + alg := &rfc1423Algos[i] + if alg.name == name { + return alg + } + } + return nil +} + +func cipherByKey(key PEMCipher) *rfc1423Algo { + for i := range rfc1423Algos { + alg := &rfc1423Algos[i] + if alg.cipher == key { + return alg + } + } + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/pkcs1.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/pkcs1.go new file mode 100644 index 00000000..e50e1a85 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/pkcs1.go @@ -0,0 +1,155 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "crypto/rsa" + "errors" + "math/big" + + "github.com/google/certificate-transparency-go/asn1" +) + +// pkcs1PrivateKey is a structure which mirrors the PKCS#1 ASN.1 for an RSA private key. +type pkcs1PrivateKey struct { + Version int + N *big.Int + E int + D *big.Int + P *big.Int + Q *big.Int + // We ignore these values, if present, because rsa will calculate them. + Dp *big.Int `asn1:"optional"` + Dq *big.Int `asn1:"optional"` + Qinv *big.Int `asn1:"optional"` + + AdditionalPrimes []pkcs1AdditionalRSAPrime `asn1:"optional,omitempty"` +} + +type pkcs1AdditionalRSAPrime struct { + Prime *big.Int + + // We ignore these values because rsa will calculate them. + Exp *big.Int + Coeff *big.Int +} + +// pkcs1PublicKey reflects the ASN.1 structure of a PKCS#1 public key. +type pkcs1PublicKey struct { + N *big.Int + E int +} + +// ParsePKCS1PrivateKey returns an RSA private key from its ASN.1 PKCS#1 DER encoded form. +func ParsePKCS1PrivateKey(der []byte) (*rsa.PrivateKey, error) { + var priv pkcs1PrivateKey + rest, err := asn1.Unmarshal(der, &priv) + if len(rest) > 0 { + return nil, asn1.SyntaxError{Msg: "trailing data"} + } + if err != nil { + return nil, err + } + + if priv.Version > 1 { + return nil, errors.New("x509: unsupported private key version") + } + + if priv.N.Sign() <= 0 || priv.D.Sign() <= 0 || priv.P.Sign() <= 0 || priv.Q.Sign() <= 0 { + return nil, errors.New("x509: private key contains zero or negative value") + } + + key := new(rsa.PrivateKey) + key.PublicKey = rsa.PublicKey{ + E: priv.E, + N: priv.N, + } + + key.D = priv.D + key.Primes = make([]*big.Int, 2+len(priv.AdditionalPrimes)) + key.Primes[0] = priv.P + key.Primes[1] = priv.Q + for i, a := range priv.AdditionalPrimes { + if a.Prime.Sign() <= 0 { + return nil, errors.New("x509: private key contains zero or negative prime") + } + key.Primes[i+2] = a.Prime + // We ignore the other two values because rsa will calculate + // them as needed. + } + + err = key.Validate() + if err != nil { + return nil, err + } + key.Precompute() + + return key, nil +} + +// MarshalPKCS1PrivateKey converts a private key to ASN.1 DER encoded form. +func MarshalPKCS1PrivateKey(key *rsa.PrivateKey) []byte { + key.Precompute() + + version := 0 + if len(key.Primes) > 2 { + version = 1 + } + + priv := pkcs1PrivateKey{ + Version: version, + N: key.N, + E: key.PublicKey.E, + D: key.D, + P: key.Primes[0], + Q: key.Primes[1], + Dp: key.Precomputed.Dp, + Dq: key.Precomputed.Dq, + Qinv: key.Precomputed.Qinv, + } + + priv.AdditionalPrimes = make([]pkcs1AdditionalRSAPrime, len(key.Precomputed.CRTValues)) + for i, values := range key.Precomputed.CRTValues { + priv.AdditionalPrimes[i].Prime = key.Primes[2+i] + priv.AdditionalPrimes[i].Exp = values.Exp + priv.AdditionalPrimes[i].Coeff = values.Coeff + } + + b, _ := asn1.Marshal(priv) + return b +} + +// ParsePKCS1PublicKey parses a PKCS#1 public key in ASN.1 DER form. +func ParsePKCS1PublicKey(der []byte) (*rsa.PublicKey, error) { + var pub pkcs1PublicKey + rest, err := asn1.Unmarshal(der, &pub) + if err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, asn1.SyntaxError{Msg: "trailing data"} + } + + if pub.N.Sign() <= 0 || pub.E <= 0 { + return nil, errors.New("x509: public key contains zero or negative value") + } + if pub.E > 1<<31-1 { + return nil, errors.New("x509: public key contains large public exponent") + } + + return &rsa.PublicKey{ + E: pub.E, + N: pub.N, + }, nil +} + +// MarshalPKCS1PublicKey converts an RSA public key to PKCS#1, ASN.1 DER form. +func MarshalPKCS1PublicKey(key *rsa.PublicKey) []byte { + derBytes, _ := asn1.Marshal(pkcs1PublicKey{ + N: key.N, + E: key.E, + }) + return derBytes +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/pkcs8.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/pkcs8.go new file mode 100644 index 00000000..c2d6327d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/pkcs8.go @@ -0,0 +1,136 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "crypto/ecdsa" + "crypto/rsa" + "errors" + "fmt" + + "github.com/google/certificate-transparency-go/asn1" + "github.com/google/certificate-transparency-go/x509/pkix" + "golang.org/x/crypto/ed25519" +) + +// pkcs8 reflects an ASN.1, PKCS#8 PrivateKey. See +// ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-8/pkcs-8v1_2.asn +// and RFC 5208. +type pkcs8 struct { + Version int + Algo pkix.AlgorithmIdentifier + PrivateKey []byte + // optional attributes omitted. +} + +// ParsePKCS8PrivateKey parses an unencrypted, PKCS#8 private key. +// See RFC 5208. +func ParsePKCS8PrivateKey(der []byte) (key interface{}, err error) { + var privKey pkcs8 + if _, err := asn1.Unmarshal(der, &privKey); err != nil { + return nil, err + } + switch { + case privKey.Algo.Algorithm.Equal(OIDPublicKeyRSA): + key, err = ParsePKCS1PrivateKey(privKey.PrivateKey) + if err != nil { + return nil, errors.New("x509: failed to parse RSA private key embedded in PKCS#8: " + err.Error()) + } + return key, nil + + case privKey.Algo.Algorithm.Equal(OIDPublicKeyECDSA): + bytes := privKey.Algo.Parameters.FullBytes + namedCurveOID := new(asn1.ObjectIdentifier) + if _, err := asn1.Unmarshal(bytes, namedCurveOID); err != nil { + namedCurveOID = nil + } + key, err = parseECPrivateKey(namedCurveOID, privKey.PrivateKey) + if err != nil { + return nil, errors.New("x509: failed to parse EC private key embedded in PKCS#8: " + err.Error()) + } + return key, nil + + case privKey.Algo.Algorithm.Equal(OIDPublicKeyEd25519): + key, err = ParseEd25519PrivateKey(privKey.PrivateKey) + if err != nil { + return nil, errors.New("x509: failed to parse Ed25519 private key embedded in PKCS#8: " + err.Error()) + } + return key, nil + + default: + return nil, fmt.Errorf("x509: PKCS#8 wrapping contained private key with unknown algorithm: %v", privKey.Algo.Algorithm) + } +} + +// MarshalPKCS8PrivateKey converts a private key to PKCS#8 encoded form. +// The following key types are supported: *rsa.PrivateKey, *ecdsa.PrivateKey. +// Unsupported key types result in an error. +// +// See RFC 5208. +func MarshalPKCS8PrivateKey(key interface{}) ([]byte, error) { + var privKey pkcs8 + + switch k := key.(type) { + case *rsa.PrivateKey: + privKey.Algo = pkix.AlgorithmIdentifier{ + Algorithm: OIDPublicKeyRSA, + Parameters: asn1.NullRawValue, + } + privKey.PrivateKey = MarshalPKCS1PrivateKey(k) + + case *ecdsa.PrivateKey: + oid, ok := OIDFromNamedCurve(k.Curve) + if !ok { + return nil, errors.New("x509: unknown curve while marshalling to PKCS#8") + } + + oidBytes, err := asn1.Marshal(oid) + if err != nil { + return nil, errors.New("x509: failed to marshal curve OID: " + err.Error()) + } + + privKey.Algo = pkix.AlgorithmIdentifier{ + Algorithm: OIDPublicKeyECDSA, + Parameters: asn1.RawValue{ + FullBytes: oidBytes, + }, + } + + if privKey.PrivateKey, err = marshalECPrivateKeyWithOID(k, nil); err != nil { + return nil, errors.New("x509: failed to marshal EC private key while building PKCS#8: " + err.Error()) + } + + case ed25519.PrivateKey: + privKey.Algo = pkix.AlgorithmIdentifier{Algorithm: OIDPublicKeyEd25519} + var err error + if privKey.PrivateKey, err = MarshalEd25519PrivateKey(k); err != nil { + return nil, fmt.Errorf("x509: failed to marshal Ed25519 private key while building PKCS#8: %v", err) + } + + default: + return nil, fmt.Errorf("x509: unknown key type while marshalling PKCS#8: %T", key) + } + + return asn1.Marshal(privKey) +} + +// MarshalEd25519PrivateKey converts an Ed25519 private key to ASN.1 DER encoded form +// (as an OCTET STRING holding the key seed, as per RFC 8410 section 7). +func MarshalEd25519PrivateKey(key ed25519.PrivateKey) ([]byte, error) { + return asn1.Marshal(key.Seed()) +} + +// ParseEd25519PrivateKey returns an Ed25519 private key from its ASN.1 DER encoded form +// (as an OCTET STRING holding the key seed, as per RFC 8410 section 7). +func ParseEd25519PrivateKey(der []byte) (ed25519.PrivateKey, error) { + var keySeed []byte + if _, err := asn1.Unmarshal(der, &keySeed); err != nil { + return nil, err + } + if len(keySeed) != ed25519.SeedSize { + return nil, fmt.Errorf("x509: ed25519 seed length should be %d bytes, got %d", ed25519.SeedSize, len(keySeed)) + } + return ed25519.NewKeyFromSeed(keySeed), nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/pkix/pkix.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/pkix/pkix.go new file mode 100644 index 00000000..843fa1f2 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/pkix/pkix.go @@ -0,0 +1,286 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkix contains shared, low level structures used for ASN.1 parsing +// and serialization of X.509 certificates, CRL and OCSP. +package pkix + +import ( + "encoding/hex" + "fmt" + "math/big" + "time" + + "github.com/google/certificate-transparency-go/asn1" +) + +// AlgorithmIdentifier represents the ASN.1 structure of the same name. See RFC +// 5280, section 4.1.1.2. +type AlgorithmIdentifier struct { + Algorithm asn1.ObjectIdentifier + Parameters asn1.RawValue `asn1:"optional"` +} + +type RDNSequence []RelativeDistinguishedNameSET + +var attributeTypeNames = map[string]string{ + "2.5.4.6": "C", + "2.5.4.10": "O", + "2.5.4.11": "OU", + "2.5.4.3": "CN", + "2.5.4.5": "SERIALNUMBER", + "2.5.4.7": "L", + "2.5.4.8": "ST", + "2.5.4.9": "STREET", + "2.5.4.17": "POSTALCODE", +} + +// String returns a string representation of the sequence r, +// roughly following the RFC 2253 Distinguished Names syntax. +func (r RDNSequence) String() string { + s := "" + for i := 0; i < len(r); i++ { + rdn := r[len(r)-1-i] + if i > 0 { + s += "," + } + for j, tv := range rdn { + if j > 0 { + s += "+" + } + + oidString := tv.Type.String() + typeName, ok := attributeTypeNames[oidString] + if !ok { + derBytes, err := asn1.Marshal(tv.Value) + if err == nil { + s += oidString + "=#" + hex.EncodeToString(derBytes) + continue // No value escaping necessary. + } + + typeName = oidString + } + + valueString := fmt.Sprint(tv.Value) + escaped := make([]rune, 0, len(valueString)) + + for k, c := range valueString { + escape := false + + switch c { + case ',', '+', '"', '\\', '<', '>', ';': + escape = true + + case ' ': + escape = k == 0 || k == len(valueString)-1 + + case '#': + escape = k == 0 + } + + if escape { + escaped = append(escaped, '\\', c) + } else { + escaped = append(escaped, c) + } + } + + s += typeName + "=" + string(escaped) + } + } + + return s +} + +type RelativeDistinguishedNameSET []AttributeTypeAndValue + +// AttributeTypeAndValue mirrors the ASN.1 structure of the same name in +// RFC 5280, Section 4.1.2.4. +type AttributeTypeAndValue struct { + Type asn1.ObjectIdentifier + Value interface{} +} + +// AttributeTypeAndValueSET represents a set of ASN.1 sequences of +// AttributeTypeAndValue sequences from RFC 2986 (PKCS #10). +type AttributeTypeAndValueSET struct { + Type asn1.ObjectIdentifier + Value [][]AttributeTypeAndValue `asn1:"set"` +} + +// Extension represents the ASN.1 structure of the same name. See RFC +// 5280, section 4.2. +type Extension struct { + Id asn1.ObjectIdentifier + Critical bool `asn1:"optional"` + Value []byte +} + +// Name represents an X.509 distinguished name. This only includes the common +// elements of a DN. When parsing, all elements are stored in Names and +// non-standard elements can be extracted from there. When marshaling, elements +// in ExtraNames are appended and override other values with the same OID. +type Name struct { + Country, Organization, OrganizationalUnit []string + Locality, Province []string + StreetAddress, PostalCode []string + SerialNumber, CommonName string + + Names []AttributeTypeAndValue + ExtraNames []AttributeTypeAndValue +} + +func (n *Name) FillFromRDNSequence(rdns *RDNSequence) { + for _, rdn := range *rdns { + if len(rdn) == 0 { + continue + } + + for _, atv := range rdn { + n.Names = append(n.Names, atv) + value, ok := atv.Value.(string) + if !ok { + continue + } + + t := atv.Type + if len(t) == 4 && t[0] == OIDAttribute[0] && t[1] == OIDAttribute[1] && t[2] == OIDAttribute[2] { + switch t[3] { + case OIDCommonName[3]: + n.CommonName = value + case OIDSerialNumber[3]: + n.SerialNumber = value + case OIDCountry[3]: + n.Country = append(n.Country, value) + case OIDLocality[3]: + n.Locality = append(n.Locality, value) + case OIDProvince[3]: + n.Province = append(n.Province, value) + case OIDStreetAddress[3]: + n.StreetAddress = append(n.StreetAddress, value) + case OIDOrganization[3]: + n.Organization = append(n.Organization, value) + case OIDOrganizationalUnit[3]: + n.OrganizationalUnit = append(n.OrganizationalUnit, value) + case OIDPostalCode[3]: + n.PostalCode = append(n.PostalCode, value) + } + } + } + } +} + +var ( + OIDAttribute = asn1.ObjectIdentifier{2, 5, 4} + OIDCountry = asn1.ObjectIdentifier{2, 5, 4, 6} + OIDOrganization = asn1.ObjectIdentifier{2, 5, 4, 10} + OIDOrganizationalUnit = asn1.ObjectIdentifier{2, 5, 4, 11} + OIDCommonName = asn1.ObjectIdentifier{2, 5, 4, 3} + OIDSerialNumber = asn1.ObjectIdentifier{2, 5, 4, 5} + OIDLocality = asn1.ObjectIdentifier{2, 5, 4, 7} + OIDProvince = asn1.ObjectIdentifier{2, 5, 4, 8} + OIDStreetAddress = asn1.ObjectIdentifier{2, 5, 4, 9} + OIDPostalCode = asn1.ObjectIdentifier{2, 5, 4, 17} + + OIDPseudonym = asn1.ObjectIdentifier{2, 5, 4, 65} + OIDTitle = asn1.ObjectIdentifier{2, 5, 4, 12} + OIDDnQualifier = asn1.ObjectIdentifier{2, 5, 4, 46} + OIDName = asn1.ObjectIdentifier{2, 5, 4, 41} + OIDSurname = asn1.ObjectIdentifier{2, 5, 4, 4} + OIDGivenName = asn1.ObjectIdentifier{2, 5, 4, 42} + OIDInitials = asn1.ObjectIdentifier{2, 5, 4, 43} + OIDGenerationQualifier = asn1.ObjectIdentifier{2, 5, 4, 44} +) + +// appendRDNs appends a relativeDistinguishedNameSET to the given RDNSequence +// and returns the new value. The relativeDistinguishedNameSET contains an +// attributeTypeAndValue for each of the given values. See RFC 5280, A.1, and +// search for AttributeTypeAndValue. +func (n Name) appendRDNs(in RDNSequence, values []string, oid asn1.ObjectIdentifier) RDNSequence { + if len(values) == 0 || oidInAttributeTypeAndValue(oid, n.ExtraNames) { + return in + } + + s := make([]AttributeTypeAndValue, len(values)) + for i, value := range values { + s[i].Type = oid + s[i].Value = value + } + + return append(in, s) +} + +func (n Name) ToRDNSequence() (ret RDNSequence) { + ret = n.appendRDNs(ret, n.Country, OIDCountry) + ret = n.appendRDNs(ret, n.Province, OIDProvince) + ret = n.appendRDNs(ret, n.Locality, OIDLocality) + ret = n.appendRDNs(ret, n.StreetAddress, OIDStreetAddress) + ret = n.appendRDNs(ret, n.PostalCode, OIDPostalCode) + ret = n.appendRDNs(ret, n.Organization, OIDOrganization) + ret = n.appendRDNs(ret, n.OrganizationalUnit, OIDOrganizationalUnit) + if len(n.CommonName) > 0 { + ret = n.appendRDNs(ret, []string{n.CommonName}, OIDCommonName) + } + if len(n.SerialNumber) > 0 { + ret = n.appendRDNs(ret, []string{n.SerialNumber}, OIDSerialNumber) + } + for _, atv := range n.ExtraNames { + ret = append(ret, []AttributeTypeAndValue{atv}) + } + + return ret +} + +// String returns the string form of n, roughly following +// the RFC 2253 Distinguished Names syntax. +func (n Name) String() string { + return n.ToRDNSequence().String() +} + +// oidInAttributeTypeAndValue reports whether a type with the given OID exists +// in atv. +func oidInAttributeTypeAndValue(oid asn1.ObjectIdentifier, atv []AttributeTypeAndValue) bool { + for _, a := range atv { + if a.Type.Equal(oid) { + return true + } + } + return false +} + +// CertificateList represents the ASN.1 structure of the same name. See RFC +// 5280, section 5.1. Use Certificate.CheckCRLSignature to verify the +// signature. +type CertificateList struct { + TBSCertList TBSCertificateList + SignatureAlgorithm AlgorithmIdentifier + SignatureValue asn1.BitString +} + +// HasExpired reports whether certList should have been updated by now. +func (certList *CertificateList) HasExpired(now time.Time) bool { + return !now.Before(certList.TBSCertList.NextUpdate) +} + +// TBSCertificateList represents the ASN.1 structure TBSCertList. See RFC +// 5280, section 5.1. +type TBSCertificateList struct { + Raw asn1.RawContent + Version int `asn1:"optional,default:0"` + Signature AlgorithmIdentifier + Issuer RDNSequence + ThisUpdate time.Time + NextUpdate time.Time `asn1:"optional"` + RevokedCertificates []RevokedCertificate `asn1:"optional"` + Extensions []Extension `asn1:"tag:0,optional,explicit"` +} + +// RevokedCertificate represents the unnamed ASN.1 structure that makes up the +// revokedCertificates member of the TBSCertList structure. See RFC +// 5280, section 5.1. +type RevokedCertificate struct { + SerialNumber *big.Int + RevocationTime time.Time + Extensions []Extension `asn1:"optional"` +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/ptr_sysptr_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/ptr_sysptr_windows.go new file mode 100644 index 00000000..3543e304 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/ptr_sysptr_windows.go @@ -0,0 +1,20 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11 + +package x509 + +import ( + "syscall" + "unsafe" +) + +// For Go versions >= 1.11, the ExtraPolicyPara field in +// syscall.CertChainPolicyPara is of type syscall.Pointer. See: +// https://github.com/golang/go/commit/4869ec00e87ef + +func convertToPolicyParaType(p unsafe.Pointer) syscall.Pointer { + return (syscall.Pointer)(p) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/ptr_uint_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/ptr_uint_windows.go new file mode 100644 index 00000000..3908833a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/ptr_uint_windows.go @@ -0,0 +1,17 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.11 + +package x509 + +import "unsafe" + +// For Go versions before 1.11, the ExtraPolicyPara field in +// syscall.CertChainPolicyPara was of type uintptr. See: +// https://github.com/golang/go/commit/4869ec00e87ef + +func convertToPolicyParaType(p unsafe.Pointer) uintptr { + return uintptr(p) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/revoked.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/revoked.go new file mode 100644 index 00000000..fde74b94 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/revoked.go @@ -0,0 +1,365 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "bytes" + "encoding/pem" + "time" + + "github.com/google/certificate-transparency-go/asn1" + "github.com/google/certificate-transparency-go/x509/pkix" +) + +// OID values for CRL extensions (TBSCertList.Extensions), RFC 5280 s5.2. +var ( + OIDExtensionCRLNumber = asn1.ObjectIdentifier{2, 5, 29, 20} + OIDExtensionDeltaCRLIndicator = asn1.ObjectIdentifier{2, 5, 29, 27} + OIDExtensionIssuingDistributionPoint = asn1.ObjectIdentifier{2, 5, 29, 28} +) + +// OID values for CRL entry extensions (RevokedCertificate.Extensions), RFC 5280 s5.3 +var ( + OIDExtensionCRLReasons = asn1.ObjectIdentifier{2, 5, 29, 21} + OIDExtensionInvalidityDate = asn1.ObjectIdentifier{2, 5, 29, 24} + OIDExtensionCertificateIssuer = asn1.ObjectIdentifier{2, 5, 29, 29} +) + +// RevocationReasonCode represents the reason for a certificate revocation; see RFC 5280 s5.3.1. +type RevocationReasonCode asn1.Enumerated + +// RevocationReasonCode values. +var ( + Unspecified = RevocationReasonCode(0) + KeyCompromise = RevocationReasonCode(1) + CACompromise = RevocationReasonCode(2) + AffiliationChanged = RevocationReasonCode(3) + Superseded = RevocationReasonCode(4) + CessationOfOperation = RevocationReasonCode(5) + CertificateHold = RevocationReasonCode(6) + RemoveFromCRL = RevocationReasonCode(8) + PrivilegeWithdrawn = RevocationReasonCode(9) + AACompromise = RevocationReasonCode(10) +) + +// ReasonFlag holds a bitmask of applicable revocation reasons, from RFC 5280 s4.2.1.13 +type ReasonFlag int + +// ReasonFlag values. +const ( + UnusedFlag ReasonFlag = 1 << iota + KeyCompromiseFlag + CACompromiseFlag + AffiliationChangedFlag + SupersededFlag + CessationOfOperationFlag + CertificateHoldFlag + PrivilegeWithdrawnFlag + AACompromiseFlag +) + +// CertificateList represents the ASN.1 structure of the same name from RFC 5280, s5.1. +// It has the same content as pkix.CertificateList, but the contents include parsed versions +// of any extensions. +type CertificateList struct { + Raw asn1.RawContent + TBSCertList TBSCertList + SignatureAlgorithm pkix.AlgorithmIdentifier + SignatureValue asn1.BitString +} + +// ExpiredAt reports whether now is past the expiry time of certList. +func (certList *CertificateList) ExpiredAt(now time.Time) bool { + return now.After(certList.TBSCertList.NextUpdate) +} + +// Indication of whether extensions need to be critical or non-critical. Extensions that +// can be either are omitted from the map. +var listExtCritical = map[string]bool{ + // From RFC 5280... + OIDExtensionAuthorityKeyId.String(): false, // s5.2.1 + OIDExtensionIssuerAltName.String(): false, // s5.2.2 + OIDExtensionCRLNumber.String(): false, // s5.2.3 + OIDExtensionDeltaCRLIndicator.String(): true, // s5.2.4 + OIDExtensionIssuingDistributionPoint.String(): true, // s5.2.5 + OIDExtensionFreshestCRL.String(): false, // s5.2.6 + OIDExtensionAuthorityInfoAccess.String(): false, // s5.2.7 +} + +var certExtCritical = map[string]bool{ + // From RFC 5280... + OIDExtensionCRLReasons.String(): false, // s5.3.1 + OIDExtensionInvalidityDate.String(): false, // s5.3.2 + OIDExtensionCertificateIssuer.String(): true, // s5.3.3 +} + +// IssuingDistributionPoint represents the ASN.1 structure of the same +// name +type IssuingDistributionPoint struct { + DistributionPoint distributionPointName `asn1:"optional,tag:0"` + OnlyContainsUserCerts bool `asn1:"optional,tag:1"` + OnlyContainsCACerts bool `asn1:"optional,tag:2"` + OnlySomeReasons asn1.BitString `asn1:"optional,tag:3"` + IndirectCRL bool `asn1:"optional,tag:4"` + OnlyContainsAttributeCerts bool `asn1:"optional,tag:5"` +} + +// TBSCertList represents the ASN.1 structure of the same name from RFC +// 5280, section 5.1. It has the same content as pkix.TBSCertificateList +// but the extensions are included in a parsed format. +type TBSCertList struct { + Raw asn1.RawContent + Version int + Signature pkix.AlgorithmIdentifier + Issuer pkix.RDNSequence + ThisUpdate time.Time + NextUpdate time.Time + RevokedCertificates []*RevokedCertificate + Extensions []pkix.Extension + // Cracked out extensions: + AuthorityKeyID []byte + IssuerAltNames GeneralNames + CRLNumber int + BaseCRLNumber int // -1 if no delta CRL present + IssuingDistributionPoint IssuingDistributionPoint + IssuingDPFullNames GeneralNames + FreshestCRLDistributionPoint []string + OCSPServer []string + IssuingCertificateURL []string +} + +// ParseCertificateList parses a CertificateList (e.g. a CRL) from the given +// bytes. It's often the case that PEM encoded CRLs will appear where they +// should be DER encoded, so this function will transparently handle PEM +// encoding as long as there isn't any leading garbage. +func ParseCertificateList(clBytes []byte) (*CertificateList, error) { + if bytes.HasPrefix(clBytes, pemCRLPrefix) { + block, _ := pem.Decode(clBytes) + if block != nil && block.Type == pemType { + clBytes = block.Bytes + } + } + return ParseCertificateListDER(clBytes) +} + +// ParseCertificateListDER parses a DER encoded CertificateList from the given bytes. +// For non-fatal errors, this function returns both an error and a CertificateList +// object. +func ParseCertificateListDER(derBytes []byte) (*CertificateList, error) { + var errs Errors + // First parse the DER into the pkix structures. + pkixList := new(pkix.CertificateList) + if rest, err := asn1.Unmarshal(derBytes, pkixList); err != nil { + errs.AddID(ErrInvalidCertList, err) + return nil, &errs + } else if len(rest) != 0 { + errs.AddID(ErrTrailingCertList) + return nil, &errs + } + + // Transcribe the revoked certs but crack out extensions. + revokedCerts := make([]*RevokedCertificate, len(pkixList.TBSCertList.RevokedCertificates)) + for i, pkixRevoked := range pkixList.TBSCertList.RevokedCertificates { + revokedCerts[i] = parseRevokedCertificate(pkixRevoked, &errs) + if revokedCerts[i] == nil { + return nil, &errs + } + } + + certList := CertificateList{ + Raw: derBytes, + TBSCertList: TBSCertList{ + Raw: pkixList.TBSCertList.Raw, + Version: pkixList.TBSCertList.Version, + Signature: pkixList.TBSCertList.Signature, + Issuer: pkixList.TBSCertList.Issuer, + ThisUpdate: pkixList.TBSCertList.ThisUpdate, + NextUpdate: pkixList.TBSCertList.NextUpdate, + RevokedCertificates: revokedCerts, + Extensions: pkixList.TBSCertList.Extensions, + CRLNumber: -1, + BaseCRLNumber: -1, + }, + SignatureAlgorithm: pkixList.SignatureAlgorithm, + SignatureValue: pkixList.SignatureValue, + } + + // Now crack out extensions. + for _, e := range certList.TBSCertList.Extensions { + if expectCritical, present := listExtCritical[e.Id.String()]; present { + if e.Critical && !expectCritical { + errs.AddID(ErrUnexpectedlyCriticalCertListExtension, e.Id) + } else if !e.Critical && expectCritical { + errs.AddID(ErrUnexpectedlyNonCriticalCertListExtension, e.Id) + } + } + switch { + case e.Id.Equal(OIDExtensionAuthorityKeyId): + // RFC 5280 s5.2.1 + var a authKeyId + if rest, err := asn1.Unmarshal(e.Value, &a); err != nil { + errs.AddID(ErrInvalidCertListAuthKeyID, err) + } else if len(rest) != 0 { + errs.AddID(ErrTrailingCertListAuthKeyID) + } + certList.TBSCertList.AuthorityKeyID = a.Id + case e.Id.Equal(OIDExtensionIssuerAltName): + // RFC 5280 s5.2.2 + if err := parseGeneralNames(e.Value, &certList.TBSCertList.IssuerAltNames); err != nil { + errs.AddID(ErrInvalidCertListIssuerAltName, err) + } + case e.Id.Equal(OIDExtensionCRLNumber): + // RFC 5280 s5.2.3 + if rest, err := asn1.Unmarshal(e.Value, &certList.TBSCertList.CRLNumber); err != nil { + errs.AddID(ErrInvalidCertListCRLNumber, err) + } else if len(rest) != 0 { + errs.AddID(ErrTrailingCertListCRLNumber) + } + if certList.TBSCertList.CRLNumber < 0 { + errs.AddID(ErrNegativeCertListCRLNumber, certList.TBSCertList.CRLNumber) + } + case e.Id.Equal(OIDExtensionDeltaCRLIndicator): + // RFC 5280 s5.2.4 + if rest, err := asn1.Unmarshal(e.Value, &certList.TBSCertList.BaseCRLNumber); err != nil { + errs.AddID(ErrInvalidCertListDeltaCRL, err) + } else if len(rest) != 0 { + errs.AddID(ErrTrailingCertListDeltaCRL) + } + if certList.TBSCertList.BaseCRLNumber < 0 { + errs.AddID(ErrNegativeCertListDeltaCRL, certList.TBSCertList.BaseCRLNumber) + } + case e.Id.Equal(OIDExtensionIssuingDistributionPoint): + parseIssuingDistributionPoint(e.Value, &certList.TBSCertList.IssuingDistributionPoint, &certList.TBSCertList.IssuingDPFullNames, &errs) + case e.Id.Equal(OIDExtensionFreshestCRL): + // RFC 5280 s5.2.6 + if err := parseDistributionPoints(e.Value, &certList.TBSCertList.FreshestCRLDistributionPoint); err != nil { + errs.AddID(ErrInvalidCertListFreshestCRL, err) + return nil, err + } + case e.Id.Equal(OIDExtensionAuthorityInfoAccess): + // RFC 5280 s5.2.7 + var aia []accessDescription + if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil { + errs.AddID(ErrInvalidCertListAuthInfoAccess, err) + } else if len(rest) != 0 { + errs.AddID(ErrTrailingCertListAuthInfoAccess) + } + + for _, v := range aia { + // GeneralName: uniformResourceIdentifier [6] IA5String + if v.Location.Tag != tagURI { + continue + } + switch { + case v.Method.Equal(OIDAuthorityInfoAccessOCSP): + certList.TBSCertList.OCSPServer = append(certList.TBSCertList.OCSPServer, string(v.Location.Bytes)) + case v.Method.Equal(OIDAuthorityInfoAccessIssuers): + certList.TBSCertList.IssuingCertificateURL = append(certList.TBSCertList.IssuingCertificateURL, string(v.Location.Bytes)) + } + // TODO(drysdale): cope with more possibilities + } + default: + if e.Critical { + errs.AddID(ErrUnhandledCriticalCertListExtension, e.Id) + } + } + } + + if errs.Fatal() { + return nil, &errs + } + if errs.Empty() { + return &certList, nil + } + return &certList, &errs +} + +func parseIssuingDistributionPoint(data []byte, idp *IssuingDistributionPoint, name *GeneralNames, errs *Errors) { + // RFC 5280 s5.2.5 + if rest, err := asn1.Unmarshal(data, idp); err != nil { + errs.AddID(ErrInvalidCertListIssuingDP, err) + } else if len(rest) != 0 { + errs.AddID(ErrTrailingCertListIssuingDP) + } + + typeCount := 0 + if idp.OnlyContainsUserCerts { + typeCount++ + } + if idp.OnlyContainsCACerts { + typeCount++ + } + if idp.OnlyContainsAttributeCerts { + typeCount++ + } + if typeCount > 1 { + errs.AddID(ErrCertListIssuingDPMultipleTypes, idp.OnlyContainsUserCerts, idp.OnlyContainsCACerts, idp.OnlyContainsAttributeCerts) + } + for _, fn := range idp.DistributionPoint.FullName { + if _, err := parseGeneralName(fn.FullBytes, name, false); err != nil { + errs.AddID(ErrCertListIssuingDPInvalidFullName, err) + } + } +} + +// RevokedCertificate represents the unnamed ASN.1 structure that makes up the +// revokedCertificates member of the TBSCertList structure from RFC 5280, s5.1. +// It has the same content as pkix.RevokedCertificate but the extensions are +// included in a parsed format. +type RevokedCertificate struct { + pkix.RevokedCertificate + // Cracked out extensions: + RevocationReason RevocationReasonCode + InvalidityDate time.Time + Issuer GeneralNames +} + +func parseRevokedCertificate(pkixRevoked pkix.RevokedCertificate, errs *Errors) *RevokedCertificate { + result := RevokedCertificate{RevokedCertificate: pkixRevoked} + for _, e := range pkixRevoked.Extensions { + if expectCritical, present := certExtCritical[e.Id.String()]; present { + if e.Critical && !expectCritical { + errs.AddID(ErrUnexpectedlyCriticalRevokedCertExtension, e.Id) + } else if !e.Critical && expectCritical { + errs.AddID(ErrUnexpectedlyNonCriticalRevokedCertExtension, e.Id) + } + } + switch { + case e.Id.Equal(OIDExtensionCRLReasons): + // RFC 5280, s5.3.1 + var reason asn1.Enumerated + if rest, err := asn1.Unmarshal(e.Value, &reason); err != nil { + errs.AddID(ErrInvalidRevocationReason, err) + } else if len(rest) != 0 { + errs.AddID(ErrTrailingRevocationReason) + } + result.RevocationReason = RevocationReasonCode(reason) + case e.Id.Equal(OIDExtensionInvalidityDate): + // RFC 5280, s5.3.2 + if rest, err := asn1.Unmarshal(e.Value, &result.InvalidityDate); err != nil { + errs.AddID(ErrInvalidRevocationInvalidityDate, err) + } else if len(rest) != 0 { + errs.AddID(ErrTrailingRevocationInvalidityDate) + } + case e.Id.Equal(OIDExtensionCertificateIssuer): + // RFC 5280, s5.3.3 + if err := parseGeneralNames(e.Value, &result.Issuer); err != nil { + errs.AddID(ErrInvalidRevocationIssuer, err) + } + default: + if e.Critical { + errs.AddID(ErrUnhandledCriticalRevokedCertExtension, e.Id) + } + } + } + return &result +} + +// CheckCertificateListSignature checks that the signature in crl is from c. +func (c *Certificate) CheckCertificateListSignature(crl *CertificateList) error { + algo := SignatureAlgorithmFromAI(crl.SignatureAlgorithm) + return c.CheckSignature(algo, crl.TBSCertList.Raw, crl.SignatureValue.RightAlign()) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root.go new file mode 100644 index 00000000..24029624 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root.go @@ -0,0 +1,25 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import "sync" + +var ( + once sync.Once + systemRoots *CertPool + systemRootsErr error +) + +func systemRootsPool() *CertPool { + once.Do(initSystemRoots) + return systemRoots +} + +func initSystemRoots() { + systemRoots, systemRootsErr = loadSystemRoots() + if systemRootsErr != nil { + systemRoots = nil + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_bsd.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_bsd.go new file mode 100644 index 00000000..13719338 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_bsd.go @@ -0,0 +1,15 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly freebsd netbsd openbsd + +package x509 + +// Possible certificate files; stop after finding one. +var certFiles = []string{ + "/usr/local/etc/ssl/cert.pem", // FreeBSD + "/etc/ssl/cert.pem", // OpenBSD + "/usr/local/share/certs/ca-root-nss.crt", // DragonFly + "/etc/openssl/certs/ca-certificates.crt", // NetBSD +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_cgo_darwin.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_cgo_darwin.go new file mode 100644 index 00000000..ad8000d4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_cgo_darwin.go @@ -0,0 +1,306 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build cgo,!arm,!arm64,!ios + +package x509 + +/* +#cgo CFLAGS: -mmacosx-version-min=10.10 -D__MAC_OS_X_VERSION_MAX_ALLOWED=101300 +#cgo LDFLAGS: -framework CoreFoundation -framework Security + +#include +#include + +#include +#include + +static bool isSSLPolicy(SecPolicyRef policyRef) { + if (!policyRef) { + return false; + } + CFDictionaryRef properties = SecPolicyCopyProperties(policyRef); + if (properties == NULL) { + return false; + } + CFTypeRef value = NULL; + if (CFDictionaryGetValueIfPresent(properties, kSecPolicyOid, (const void **)&value)) { + CFRelease(properties); + return CFEqual(value, kSecPolicyAppleSSL); + } + CFRelease(properties); + return false; +} + +// sslTrustSettingsResult obtains the final kSecTrustSettingsResult value +// for a certificate in the user or admin domain, combining usage constraints +// for the SSL SecTrustSettingsPolicy, ignoring SecTrustSettingsKeyUsage and +// kSecTrustSettingsAllowedError. +// https://developer.apple.com/documentation/security/1400261-sectrustsettingscopytrustsetting +static SInt32 sslTrustSettingsResult(SecCertificateRef cert) { + CFArrayRef trustSettings = NULL; + OSStatus err = SecTrustSettingsCopyTrustSettings(cert, kSecTrustSettingsDomainUser, &trustSettings); + + // According to Apple's SecTrustServer.c, "user trust settings overrule admin trust settings", + // but the rules of the override are unclear. Let's assume admin trust settings are applicable + // if and only if user trust settings fail to load or are NULL. + if (err != errSecSuccess || trustSettings == NULL) { + if (trustSettings != NULL) CFRelease(trustSettings); + err = SecTrustSettingsCopyTrustSettings(cert, kSecTrustSettingsDomainAdmin, &trustSettings); + } + + // > no trust settings [...] means "this certificate must be verified to a known trusted certificate” + if (err != errSecSuccess || trustSettings == NULL) { + if (trustSettings != NULL) CFRelease(trustSettings); + return kSecTrustSettingsResultUnspecified; + } + + // > An empty trust settings array means "always trust this certificate” with an + // > overall trust setting for the certificate of kSecTrustSettingsResultTrustRoot. + if (CFArrayGetCount(trustSettings) == 0) { + CFRelease(trustSettings); + return kSecTrustSettingsResultTrustRoot; + } + + // kSecTrustSettingsResult is defined as CFSTR("kSecTrustSettingsResult"), + // but the Go linker's internal linking mode can't handle CFSTR relocations. + // Create our own dynamic string instead and release it below. + CFStringRef _kSecTrustSettingsResult = CFStringCreateWithCString( + NULL, "kSecTrustSettingsResult", kCFStringEncodingUTF8); + CFStringRef _kSecTrustSettingsPolicy = CFStringCreateWithCString( + NULL, "kSecTrustSettingsPolicy", kCFStringEncodingUTF8); + CFStringRef _kSecTrustSettingsPolicyString = CFStringCreateWithCString( + NULL, "kSecTrustSettingsPolicyString", kCFStringEncodingUTF8); + + CFIndex m; SInt32 result = 0; + for (m = 0; m < CFArrayGetCount(trustSettings); m++) { + CFDictionaryRef tSetting = (CFDictionaryRef)CFArrayGetValueAtIndex(trustSettings, m); + + // First, check if this trust setting applies to our policy. We assume + // only one will. The docs suggest that there might be multiple applying + // but don't explain how to combine them. + SecPolicyRef policyRef; + if (CFDictionaryGetValueIfPresent(tSetting, _kSecTrustSettingsPolicy, (const void**)&policyRef)) { + if (!isSSLPolicy(policyRef)) { + continue; + } + } else { + continue; + } + + if (CFDictionaryContainsKey(tSetting, _kSecTrustSettingsPolicyString)) { + // Restricted to a hostname, not a root. + continue; + } + + CFNumberRef cfNum; + if (CFDictionaryGetValueIfPresent(tSetting, _kSecTrustSettingsResult, (const void**)&cfNum)) { + CFNumberGetValue(cfNum, kCFNumberSInt32Type, &result); + } else { + // > If the value of the kSecTrustSettingsResult component is not + // > kSecTrustSettingsResultUnspecified for a usage constraints dictionary that has + // > no constraints, the default value kSecTrustSettingsResultTrustRoot is assumed. + result = kSecTrustSettingsResultTrustRoot; + } + + break; + } + + // If trust settings are present, but none of them match the policy... + // the docs don't tell us what to do. + // + // "Trust settings for a given use apply if any of the dictionaries in the + // certificate’s trust settings array satisfies the specified use." suggests + // that it's as if there were no trust settings at all, so we should probably + // fallback to the admin trust settings. TODO. + if (result == 0) { + result = kSecTrustSettingsResultUnspecified; + } + + CFRelease(_kSecTrustSettingsPolicy); + CFRelease(_kSecTrustSettingsPolicyString); + CFRelease(_kSecTrustSettingsResult); + CFRelease(trustSettings); + + return result; +} + +// isRootCertificate reports whether Subject and Issuer match. +static Boolean isRootCertificate(SecCertificateRef cert, CFErrorRef *errRef) { + CFDataRef subjectName = SecCertificateCopyNormalizedSubjectContent(cert, errRef); + if (*errRef != NULL) { + return false; + } + CFDataRef issuerName = SecCertificateCopyNormalizedIssuerContent(cert, errRef); + if (*errRef != NULL) { + CFRelease(subjectName); + return false; + } + Boolean equal = CFEqual(subjectName, issuerName); + CFRelease(subjectName); + CFRelease(issuerName); + return equal; +} + +// FetchPEMRootsCTX509 fetches the system's list of trusted X.509 root certificates +// for the kSecTrustSettingsPolicy SSL. +// +// On success it returns 0 and fills pemRoots with a CFDataRef that contains the extracted root +// certificates of the system. On failure, the function returns -1. +// Additionally, it fills untrustedPemRoots with certs that must be removed from pemRoots. +// +// Note: The CFDataRef returned in pemRoots and untrustedPemRoots must +// be released (using CFRelease) after we've consumed its content. +int FetchPEMRootsCTX509(CFDataRef *pemRoots, CFDataRef *untrustedPemRoots, bool debugDarwinRoots) { + int i; + + if (debugDarwinRoots) { + printf("crypto/x509: kSecTrustSettingsResultInvalid = %d\n", kSecTrustSettingsResultInvalid); + printf("crypto/x509: kSecTrustSettingsResultTrustRoot = %d\n", kSecTrustSettingsResultTrustRoot); + printf("crypto/x509: kSecTrustSettingsResultTrustAsRoot = %d\n", kSecTrustSettingsResultTrustAsRoot); + printf("crypto/x509: kSecTrustSettingsResultDeny = %d\n", kSecTrustSettingsResultDeny); + printf("crypto/x509: kSecTrustSettingsResultUnspecified = %d\n", kSecTrustSettingsResultUnspecified); + } + + // Get certificates from all domains, not just System, this lets + // the user add CAs to their "login" keychain, and Admins to add + // to the "System" keychain + SecTrustSettingsDomain domains[] = { kSecTrustSettingsDomainSystem, + kSecTrustSettingsDomainAdmin, kSecTrustSettingsDomainUser }; + + int numDomains = sizeof(domains)/sizeof(SecTrustSettingsDomain); + if (pemRoots == NULL) { + return -1; + } + + CFMutableDataRef combinedData = CFDataCreateMutable(kCFAllocatorDefault, 0); + CFMutableDataRef combinedUntrustedData = CFDataCreateMutable(kCFAllocatorDefault, 0); + for (i = 0; i < numDomains; i++) { + int j; + CFArrayRef certs = NULL; + OSStatus err = SecTrustSettingsCopyCertificates(domains[i], &certs); + if (err != noErr) { + continue; + } + + CFIndex numCerts = CFArrayGetCount(certs); + for (j = 0; j < numCerts; j++) { + CFDataRef data = NULL; + CFArrayRef trustSettings = NULL; + SecCertificateRef cert = (SecCertificateRef)CFArrayGetValueAtIndex(certs, j); + if (cert == NULL) { + continue; + } + + SInt32 result; + if (domains[i] == kSecTrustSettingsDomainSystem) { + // Certs found in the system domain are always trusted. If the user + // configures "Never Trust" on such a cert, it will also be found in the + // admin or user domain, causing it to be added to untrustedPemRoots. The + // Go code will then clean this up. + result = kSecTrustSettingsResultTrustRoot; + } else { + result = sslTrustSettingsResult(cert); + if (debugDarwinRoots) { + CFErrorRef errRef = NULL; + CFStringRef summary = SecCertificateCopyShortDescription(NULL, cert, &errRef); + if (errRef != NULL) { + printf("crypto/x509: SecCertificateCopyShortDescription failed\n"); + CFRelease(errRef); + continue; + } + + CFIndex length = CFStringGetLength(summary); + CFIndex maxSize = CFStringGetMaximumSizeForEncoding(length, kCFStringEncodingUTF8) + 1; + char *buffer = malloc(maxSize); + if (CFStringGetCString(summary, buffer, maxSize, kCFStringEncodingUTF8)) { + printf("crypto/x509: %s returned %d\n", buffer, (int)result); + } + free(buffer); + CFRelease(summary); + } + } + + CFMutableDataRef appendTo; + // > Note the distinction between the results kSecTrustSettingsResultTrustRoot + // > and kSecTrustSettingsResultTrustAsRoot: The former can only be applied to + // > root (self-signed) certificates; the latter can only be applied to + // > non-root certificates. + if (result == kSecTrustSettingsResultTrustRoot) { + CFErrorRef errRef = NULL; + if (!isRootCertificate(cert, &errRef) || errRef != NULL) { + if (errRef != NULL) CFRelease(errRef); + continue; + } + + appendTo = combinedData; + } else if (result == kSecTrustSettingsResultTrustAsRoot) { + CFErrorRef errRef = NULL; + if (isRootCertificate(cert, &errRef) || errRef != NULL) { + if (errRef != NULL) CFRelease(errRef); + continue; + } + + appendTo = combinedData; + } else if (result == kSecTrustSettingsResultDeny) { + appendTo = combinedUntrustedData; + } else if (result == kSecTrustSettingsResultUnspecified) { + continue; + } else { + continue; + } + + err = SecItemExport(cert, kSecFormatX509Cert, kSecItemPemArmour, NULL, &data); + if (err != noErr) { + continue; + } + if (data != NULL) { + CFDataAppendBytes(appendTo, CFDataGetBytePtr(data), CFDataGetLength(data)); + CFRelease(data); + } + } + CFRelease(certs); + } + *pemRoots = combinedData; + *untrustedPemRoots = combinedUntrustedData; + return 0; +} +*/ +import "C" +import ( + "errors" + "unsafe" +) + +func loadSystemRoots() (*CertPool, error) { + roots := NewCertPool() + + var data C.CFDataRef + setNilCFRef(&data) + var untrustedData C.CFDataRef + setNilCFRef(&untrustedData) + err := C.FetchPEMRootsCTX509(&data, &untrustedData, C.bool(debugDarwinRoots)) + if err == -1 { + return nil, errors.New("crypto/x509: failed to load darwin system roots with cgo") + } + + defer C.CFRelease(C.CFTypeRef(data)) + buf := C.GoBytes(unsafe.Pointer(C.CFDataGetBytePtr(data)), C.int(C.CFDataGetLength(data))) + roots.AppendCertsFromPEM(buf) + if isNilCFRef(untrustedData) { + return roots, nil + } + defer C.CFRelease(C.CFTypeRef(untrustedData)) + buf = C.GoBytes(unsafe.Pointer(C.CFDataGetBytePtr(untrustedData)), C.int(C.CFDataGetLength(untrustedData))) + untrustedRoots := NewCertPool() + untrustedRoots.AppendCertsFromPEM(buf) + + trustedRoots := NewCertPool() + for _, c := range roots.certs { + if !untrustedRoots.contains(c) { + trustedRoots.AddCert(c) + } + } + return trustedRoots, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_darwin.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_darwin.go new file mode 100644 index 00000000..282835c1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_darwin.go @@ -0,0 +1,289 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run root_darwin_arm_gen.go -output root_darwin_armx.go + +package x509 + +import ( + "bufio" + "bytes" + "crypto/sha1" + "encoding/pem" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "os/user" + "path/filepath" + "strings" + "sync" +) + +var debugDarwinRoots = strings.Contains(os.Getenv("GODEBUG"), "x509roots=1") + +func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) { + return nil, nil +} + +// This code is only used when compiling without cgo. +// It is here, instead of root_nocgo_darwin.go, so that tests can check it +// even if the tests are run with cgo enabled. +// The linker will not include these unused functions in binaries built with cgo enabled. + +// execSecurityRoots finds the macOS list of trusted root certificates +// using only command-line tools. This is our fallback path when cgo isn't available. +// +// The strategy is as follows: +// +// 1. Run "security trust-settings-export" and "security +// trust-settings-export -d" to discover the set of certs with some +// user-tweaked trust policy. We're too lazy to parse the XML +// (Issue 26830) to understand what the trust +// policy actually is. We just learn that there is _some_ policy. +// +// 2. Run "security find-certificate" to dump the list of system root +// CAs in PEM format. +// +// 3. For each dumped cert, conditionally verify it with "security +// verify-cert" if that cert was in the set discovered in Step 1. +// Without the Step 1 optimization, running "security verify-cert" +// 150-200 times takes 3.5 seconds. With the optimization, the +// whole process takes about 180 milliseconds with 1 untrusted root +// CA. (Compared to 110ms in the cgo path) +func execSecurityRoots() (*CertPool, error) { + hasPolicy, err := getCertsWithTrustPolicy() + if err != nil { + return nil, err + } + if debugDarwinRoots { + fmt.Printf("crypto/x509: %d certs have a trust policy\n", len(hasPolicy)) + } + + keychains := []string{"/Library/Keychains/System.keychain"} + + // Note that this results in trusting roots from $HOME/... (the environment + // variable), which might not be expected. + u, err := user.Current() + if err != nil { + if debugDarwinRoots { + fmt.Printf("crypto/x509: can't get user home directory: %v\n", err) + } + } else { + keychains = append(keychains, + filepath.Join(u.HomeDir, "/Library/Keychains/login.keychain"), + + // Fresh installs of Sierra use a slightly different path for the login keychain + filepath.Join(u.HomeDir, "/Library/Keychains/login.keychain-db"), + ) + } + + type rootCandidate struct { + c *Certificate + system bool + } + + var ( + mu sync.Mutex + roots = NewCertPool() + numVerified int // number of execs of 'security verify-cert', for debug stats + wg sync.WaitGroup + verifyCh = make(chan rootCandidate) + ) + + // Using 4 goroutines to pipe into verify-cert seems to be + // about the best we can do. The verify-cert binary seems to + // just RPC to another server with coarse locking anyway, so + // running 16 at a time for instance doesn't help at all. Due + // to the "if hasPolicy" check below, though, we will rarely + // (or never) call verify-cert on stock macOS systems, though. + // The hope is that we only call verify-cert when the user has + // tweaked their trust policy. These 4 goroutines are only + // defensive in the pathological case of many trust edits. + for i := 0; i < 4; i++ { + wg.Add(1) + go func() { + defer wg.Done() + for cert := range verifyCh { + sha1CapHex := fmt.Sprintf("%X", sha1.Sum(cert.c.Raw)) + + var valid bool + verifyChecks := 0 + if hasPolicy[sha1CapHex] { + verifyChecks++ + valid = verifyCertWithSystem(cert.c) + } else { + // Certificates not in SystemRootCertificates without user + // or admin trust settings are not trusted. + valid = cert.system + } + + mu.Lock() + numVerified += verifyChecks + if valid { + roots.AddCert(cert.c) + } + mu.Unlock() + } + }() + } + err = forEachCertInKeychains(keychains, func(cert *Certificate) { + verifyCh <- rootCandidate{c: cert, system: false} + }) + if err != nil { + close(verifyCh) + return nil, err + } + err = forEachCertInKeychains([]string{ + "/System/Library/Keychains/SystemRootCertificates.keychain", + }, func(cert *Certificate) { + verifyCh <- rootCandidate{c: cert, system: true} + }) + if err != nil { + close(verifyCh) + return nil, err + } + close(verifyCh) + wg.Wait() + + if debugDarwinRoots { + fmt.Printf("crypto/x509: ran security verify-cert %d times\n", numVerified) + } + + return roots, nil +} + +func forEachCertInKeychains(paths []string, f func(*Certificate)) error { + args := append([]string{"find-certificate", "-a", "-p"}, paths...) + cmd := exec.Command("/usr/bin/security", args...) + data, err := cmd.Output() + if err != nil { + return err + } + for len(data) > 0 { + var block *pem.Block + block, data = pem.Decode(data) + if block == nil { + break + } + if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { + continue + } + cert, err := ParseCertificate(block.Bytes) + if err != nil { + continue + } + f(cert) + } + return nil +} + +func verifyCertWithSystem(cert *Certificate) bool { + data := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", Bytes: cert.Raw, + }) + + f, err := ioutil.TempFile("", "cert") + if err != nil { + fmt.Fprintf(os.Stderr, "can't create temporary file for cert: %v", err) + return false + } + defer os.Remove(f.Name()) + if _, err := f.Write(data); err != nil { + fmt.Fprintf(os.Stderr, "can't write temporary file for cert: %v", err) + return false + } + if err := f.Close(); err != nil { + fmt.Fprintf(os.Stderr, "can't write temporary file for cert: %v", err) + return false + } + cmd := exec.Command("/usr/bin/security", "verify-cert", "-p", "ssl", "-c", f.Name(), "-l", "-L") + var stderr bytes.Buffer + if debugDarwinRoots { + cmd.Stderr = &stderr + } + if err := cmd.Run(); err != nil { + if debugDarwinRoots { + fmt.Printf("crypto/x509: verify-cert rejected %s: %q\n", cert.Subject, bytes.TrimSpace(stderr.Bytes())) + } + return false + } + if debugDarwinRoots { + fmt.Printf("crypto/x509: verify-cert approved %s\n", cert.Subject) + } + return true +} + +// getCertsWithTrustPolicy returns the set of certs that have a +// possibly-altered trust policy. The keys of the map are capitalized +// sha1 hex of the raw cert. +// They are the certs that should be checked against `security +// verify-cert` to see whether the user altered the default trust +// settings. This code is only used for cgo-disabled builds. +func getCertsWithTrustPolicy() (map[string]bool, error) { + set := map[string]bool{} + td, err := ioutil.TempDir("", "x509trustpolicy") + if err != nil { + return nil, err + } + defer os.RemoveAll(td) + run := func(file string, args ...string) error { + file = filepath.Join(td, file) + args = append(args, file) + cmd := exec.Command("/usr/bin/security", args...) + var stderr bytes.Buffer + cmd.Stderr = &stderr + if err := cmd.Run(); err != nil { + // If there are no trust settings, the + // `security trust-settings-export` command + // fails with: + // exit status 1, SecTrustSettingsCreateExternalRepresentation: No Trust Settings were found. + // Rather than match on English substrings that are probably + // localized on macOS, just interpret any failure to mean that + // there are no trust settings. + if debugDarwinRoots { + fmt.Printf("crypto/x509: exec %q: %v, %s\n", cmd.Args, err, stderr.Bytes()) + } + return nil + } + + f, err := os.Open(file) + if err != nil { + return err + } + defer f.Close() + + // Gather all the runs of 40 capitalized hex characters. + br := bufio.NewReader(f) + var hexBuf bytes.Buffer + for { + b, err := br.ReadByte() + isHex := ('A' <= b && b <= 'F') || ('0' <= b && b <= '9') + if isHex { + hexBuf.WriteByte(b) + } else { + if hexBuf.Len() == 40 { + set[hexBuf.String()] = true + } + hexBuf.Reset() + } + if err == io.EOF { + break + } + if err != nil { + return err + } + } + + return nil + } + if err := run("user", "trust-settings-export"); err != nil { + return nil, fmt.Errorf("dump-trust-settings (user): %v", err) + } + if err := run("admin", "trust-settings-export", "-d"); err != nil { + return nil, fmt.Errorf("dump-trust-settings (admin): %v", err) + } + return set, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_darwin_armx.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_darwin_armx.go new file mode 100644 index 00000000..fcbbd6b1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_darwin_armx.go @@ -0,0 +1,4313 @@ +// Code generated by root_darwin_arm_gen --output root_darwin_armx.go; DO NOT EDIT. + +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build cgo +// +build darwin +// +build arm arm64 ios + +package x509 + +func loadSystemRoots() (*CertPool, error) { + p := NewCertPool() + p.AppendCertsFromPEM([]byte(systemRootsPEM)) + return p, nil +} + +const systemRootsPEM = ` +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEb +MBkGA1UECAwSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRow +GAYDVQQKDBFDb21vZG8gQ0EgTGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmlj +YXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAwMFoXDTI4MTIzMTIzNTk1OVowezEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNVBAMM +GEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQua +BtDFcCLNSS1UY8y2bmhGC1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe +3M/vg4aijJRPn2jymJBGhCfHdr/jzDUsi14HZGWCwEiwqJH5YZ92IFCokcdmtet4 +YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszWY19zjNoFmag4qMsXeDZR +rOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjHYpy+g8cm +ez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQU +oBEKIz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wewYDVR0fBHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20v +QUFBQ2VydGlmaWNhdGVTZXJ2aWNlcy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29t +b2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2VzLmNybDANBgkqhkiG9w0BAQUF +AAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm7l3sAg9g1o1Q +GE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2 +G9w84FoVxp7Z8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsi +l2D4kF501KKaU73yqWjgom7C12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3 +smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UE +BhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8w +MzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDkyMjExMjIwMlowazELMAkGA1UEBhMC +SVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1 +ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENB +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNv +UTufClrJwkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX +4ay8IMKx4INRimlNAJZaby/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9 +KK3giq0itFZljoZUj5NDKd45RnijMCO6zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/ +gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1fYVEiVRvjRuPjPdA1Yprb +rxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2oxgkg4YQ +51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2F +be8lEfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxe +KF+w6D9Fz8+vm2/7hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4F +v6MGn8i1zeQf1xcGDXqVdFUNaBr8EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbn +fpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5jF66CyCU3nuDuP/jVo23Eek7 +jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLYiDrIn3hm7Ynz +ezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAL +e3KHwGCmSUyIWOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70 +jsNjLiNmsGe+b7bAEzlgqqI0JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDz +WochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKxK3JCaKygvU5a2hi/a5iB0P2avl4V +SM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+Xlff1ANATIGk0k9j +pwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC4yyX +X04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+Ok +fcvHlXHo2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7R +K4X9p2jIugErsWx0Hbhzlefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btU +ZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXemOR/qnuOf0GZvBeyqdn6/axag67XH/JJU +LysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9vwGYT7JZVEc+NHt4bVaT +LnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEGDCCAwCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQGEwJTRTEU +MBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3 +b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwHhcNMDAwNTMw +MTAzODMxWhcNMjAwNTMwMTAzODMxWjBlMQswCQYDVQQGEwJTRTEUMBIGA1UEChML +QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYD +VQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUA +A4IBDwAwggEKAoIBAQCWltQhSWDia+hBBwzexODcEyPNwTXH+9ZOEQpnXvUGW2ul +CDtbKRY654eyNAbFvAWlA3yCyykQruGIgb3WntP+LVbBFc7jJp0VLhD7Bo8wBN6n +tGO0/7Gcrjyvd7ZWxbWroulpOj0OM3kyP3CCkplhbY0wCI9xP6ZIVxn4JdxLZlyl +dI+Yrsj5wAYi56xz36Uu+1LcsRVlIPo1Zmne3yzxbrww2ywkEtvrNTVokMsAsJch +PXQhI2U0K7t4WaPW4XY5mqRJjox0r26kmqPZm9I4XJuiGMx1I4S+6+JNM3GOGvDC ++Mcdoq0Dlyz4zyXG9rgkMbFjXZJ/Y/AlyVMuH79NAgMBAAGjgdIwgc8wHQYDVR0O +BBYEFJWxtPCUtr3H2tERCSG+wa9J/RB7MAsGA1UdDwQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MIGPBgNVHSMEgYcwgYSAFJWxtPCUtr3H2tERCSG+wa9J/RB7oWmkZzBl +MQswCQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFk +ZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENB +IFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBACxtZBsfzQ3duQH6lmM0MkhHma6X +7f1yFqZzR1r0693p9db7RcwpiURdv0Y5PejuvE1Uhh4dbOMXJ0PhiVYrqW9yTkkz +43J8KiOavD7/KCrto/8cI7pDVwlnTUtiBi34/2ydYB7YHEt9tTEv2dB8Xfjea4MY +eDdXL+gzB2ffHsdrKpV2ro9Xo/D0UrSpUwjP4E/TelOL/bscVjby/rK25Xa71SJl +pz/+0WatC7xrmYbvP33zGDLKe8bjq2RGlfgmadlVg3sslgf/WSxEo8bl6ancoWOA +WiFeIc9TVPC6b4nbqKqVz4vjccweGyBECMB6tkD9xOQ14R0WHNC8K47Wcdk= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEU +MBIGA1UEChMLQWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFs +IFRUUCBOZXR3b3JrMSIwIAYDVQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290 +MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEwNDgzOFowbzELMAkGA1UEBhMCU0Ux +FDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRUcnVzdCBFeHRlcm5h +bCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0EgUm9v +dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvt +H7xsD821+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9 +uMq/NzgtHj6RQa1wVsfwTz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzX +mk6vBbOmcZSccbNQYArHE504B4YCqOmoaSYYkKtMsE8jqzpPhNjfzp/haW+710LX +a0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy2xSoRcRdKn23tNbE7qzN +E0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv77+ldU9U0 +WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYD +VR0PBAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0 +Jvf6xCZU7wO94CTLVBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRU +cnVzdCBBQjEmMCQGA1UECxMdQWRkVHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsx +IjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENBIFJvb3SCAQEwDQYJKoZIhvcN +AQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZlj7DYd7usQWxH +YINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5 +6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvC +Nr4TDea9Y355e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEX +c4g/VhsxOBi0cQ+azcgOno4uG+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5a +mnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFVTCCBD2gAwIBAgIEO/OB0DANBgkqhkiG9w0BAQUFADBsMQswCQYDVQQGEwJj +aDEOMAwGA1UEChMFYWRtaW4xETAPBgNVBAsTCFNlcnZpY2VzMSIwIAYDVQQLExlD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRYwFAYDVQQDEw1BZG1pbi1Sb290LUNB +MB4XDTAxMTExNTA4NTEwN1oXDTIxMTExMDA3NTEwN1owbDELMAkGA1UEBhMCY2gx +DjAMBgNVBAoTBWFkbWluMREwDwYDVQQLEwhTZXJ2aWNlczEiMCAGA1UECxMZQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdGllczEWMBQGA1UEAxMNQWRtaW4tUm9vdC1DQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMvgr0QUIv5qF0nyXZ3PXAJi +C4C5Wr+oVTN7oxIkXkxvO0GJToM9n7OVJjSmzBL0zJ2HXj0MDRcvhSY+KiZZc6Go +vDvr5Ua481l7ILFeQAFtumeza+vvxeL5Nd0Maga2miiacLNAKXbAcUYRa0Ov5VZB +++YcOYNNt/aisWbJqA2y8He+NsEgJzK5zNdayvYXQTZN+7tVgWOck16Da3+4FXdy +fH1NCWtZlebtMKtERtkVAaVbiWW24CjZKAiVfggjsiLo3yVMPGj3budLx5D9hEEm +vlyDOtcjebca+AcZglppWMX/iHIrx7740y0zd6cWEqiLIcZCrnpkr/KzwO135GkC +AwEAAaOCAf0wggH5MA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIASBkTCBjjCBiwYI +YIV0AREDAQAwfzArBggrBgEFBQcCAjAfGh1UaGlzIGlzIHRoZSBBZG1pbi1Sb290 +LUNBIENQUzBQBggrBgEFBQcCARZEaHR0cDovL3d3dy5pbmZvcm1hdGlrLmFkbWlu +LmNoL1BLSS9saW5rcy9DUFNfMl8xNl83NTZfMV8xN18zXzFfMC5wZGYwfwYDVR0f +BHgwdjB0oHKgcKRuMGwxFjAUBgNVBAMTDUFkbWluLVJvb3QtQ0ExIjAgBgNVBAsT +GUNlcnRpZmljYXRpb24gQXV0aG9yaXRpZXMxETAPBgNVBAsTCFNlcnZpY2VzMQ4w +DAYDVQQKEwVhZG1pbjELMAkGA1UEBhMCY2gwHQYDVR0OBBYEFIKf+iNzIPGXi7JM +Tb5CxX9mzWToMIGZBgNVHSMEgZEwgY6AFIKf+iNzIPGXi7JMTb5CxX9mzWTooXCk +bjBsMQswCQYDVQQGEwJjaDEOMAwGA1UEChMFYWRtaW4xETAPBgNVBAsTCFNlcnZp +Y2VzMSIwIAYDVQQLExlDZXJ0aWZpY2F0aW9uIEF1dGhvcml0aWVzMRYwFAYDVQQD +Ew1BZG1pbi1Sb290LUNBggQ784HQMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0B +AQUFAAOCAQEAeE96XCYRpy6umkPKXDWCRn7INo96ZrWpMggcDORuofHIwdTkgOeM +vWOxDN/yuT7CC3FAaUajbPRbDw0hRMcqKz0aC8CgwcyIyhw/rFK29mfNTG3EviP9 +QSsEbnelFnjpm1wjz4EaBiFjatwpUbI6+Zv3XbEt9QQXBn+c6DeFLe4xvC4B+MTr +a440xTk59pSYux8OHhEvqIwHCkiijGqZhTS3KmGFeBopaR+dJVBRBMoXwzk4B3Hn +0Zib1dEYFZa84vPJZyvxCbLOnPRDJgH6V2uQqbG+6DXVaf/wORVOvF/wzzv0viM/ +RWbEtJZdvo8N3sdtCULzifnxP/V0T9+4ZQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBDb21tZXJjaWFsMB4XDTEwMDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6EqdbDuKP +Hx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yr +ba0F8PrVC8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPAL +MeIrJmqbTFeurCA+ukV6BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1 +yHp52UKqK39c/s4mT6NmgTWvRLpUHhwwMmWd5jyTXlBOeuM61G7MGvv50jeuJCqr +VwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNVHQ4EFgQUnZPGU4teyq8/ +nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYG +XUPGhi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNj +vbz4YYCanrHOQnDiqX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivt +Z8SOyUOyXGsViQK8YvxO8rUzqrJv0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9g +N53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0khsUlHRUe072o0EclNmsxZt9YC +nlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVz +dCBOZXR3b3JraW5nMB4XDTEwMDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDEL +MAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZp +cm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SEHi3y +YJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbua +kCNrmreIdIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRL +QESxG9fhwoXA3hA/Pe24/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp +6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gbh+0t+nvujArjqWaJGctB+d1ENmHP4ndG +yH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNVHQ4EFgQUBx/S55zawm6i +QLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwDQYJ +KoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfO +tDIuUFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzu +QY0x2+c06lkh1QF612S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZ +Lgo/bNjR9eUJtGxUAArgFU2HdW23WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4u +olu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9/ZFvgrG+CJPbFEfxojfHRZ48 +x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMC +VVMxFDASBgNVBAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQ +cmVtaXVtIEVDQzAeFw0xMDAxMjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJ +BgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1UcnVzdDEgMB4GA1UEAwwXQWZmaXJt +VHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQNMF4bFZ0D +0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQN8O9 +ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0G +A1UdDgQWBBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/Vs +aobgxCd05DhT1wV/GzTjxi+zygk8N53X57hG8f2h4nECMEJZh0PUUd+60wkyWs6I +flc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKMeQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UE +BhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVz +dCBQcmVtaXVtMB4XDTEwMDEyOTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkG +A1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1U +cnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAxBLf +qV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtnBKAQ +JG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ ++jjeRFcV5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrS +s8PhaJyJ+HoAVt70VZVs+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5 +HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmdGPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d7 +70O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5Rp9EixAqnOEhss/n/fauG +V+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NIS+LI+H+S +qHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S +5u046uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4Ia +C1nEWTJ3s7xgaVY5/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TX +OwF0lkLgAOIua+rF7nKsu7/+6qqo+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYE +FJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByvMiPIs0laUZx2 +KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B +8OWycvpEgjNC6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQ +MKSOyARiqcTtNd56l+0OOF6SL5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc +0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK+4w1IX2COPKpVJEZNZOUbWo6xbLQ +u4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmVBtWVyuEklut89pMF +u+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFgIxpH +YoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8 +GKa1qF60g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaO +RtGdFNrHF+QFlozEJLUbzxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6e +KeC2uAloGRwYQw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIIGDCCBgCgAwIBAgIGAT8vMXfmMA0GCSqGSIb3DQEBCwUAMIIBCjELMAkGA1UE +BhMCRVMxEjAQBgNVBAgMCUJhcmNlbG9uYTFYMFYGA1UEBwxPQmFyY2Vsb25hIChz +ZWUgY3VycmVudCBhZGRyZXNzIGF0IGh0dHA6Ly93d3cuYW5mLmVzL2VzL2FkZHJl +c3MtZGlyZWNjaW9uLmh0bWwgKTEnMCUGA1UECgweQU5GIEF1dG9yaWRhZCBkZSBD +ZXJ0aWZpY2FjaW9uMRcwFQYDVQQLDA5BTkYgQ2xhc2UgMSBDQTEaMBgGCSqGSIb3 +DQEJARYLaW5mb0BhbmYuZXMxEjAQBgNVBAUTCUc2MzI4NzUxMDEbMBkGA1UEAwwS +QU5GIEdsb2JhbCBSb290IENBMB4XDTEzMDYxMDE3NDUzOFoXDTMzMDYwNTE3NDUz +OFowggEKMQswCQYDVQQGEwJFUzESMBAGA1UECAwJQmFyY2Vsb25hMVgwVgYDVQQH +DE9CYXJjZWxvbmEgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgaHR0cDovL3d3dy5h +bmYuZXMvZXMvYWRkcmVzcy1kaXJlY2Npb24uaHRtbCApMScwJQYDVQQKDB5BTkYg +QXV0b3JpZGFkIGRlIENlcnRpZmljYWNpb24xFzAVBgNVBAsMDkFORiBDbGFzZSAx +IENBMRowGAYJKoZIhvcNAQkBFgtpbmZvQGFuZi5lczESMBAGA1UEBRMJRzYzMjg3 +NTEwMRswGQYDVQQDDBJBTkYgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQDHPi9xy4wynbcUbWjorVUgQKeUAVh937J7P37XmsfH +ZLOBZKIIlhhCtRwnDlg7x+BUvtJOTkIbEGMujDygUQ2s3HDYr5I41hTyM2Pl0cq2 +EuSGEbPIHb3dEX8NAguFexM0jqNjrreN3hM2/+TOkAxSdDJP2aMurlySC5zwl47K +ZLHtcVrkZnkDa0o5iN24hJT4vBDT4t2q9khQ+qb1D8KgCOb02r1PxWXu3vfd6Ha2 +mkdB97iGuEh5gO2n4yOmFS5goFlVA2UdPbbhJsb8oKVKDd+YdCKGQDCkQyG4AjmC +YiNm3UPG/qtftTH5cWri67DlLtm6fyUFOMmO6NSh0RtR745pL8GyWJUanyq/Q4bF +HQB21E+WtTsCaqjGaoFcrBunMypmCd+jUZXl27TYENRFbrwNdAh7m2UztcIyb+Sg +VJFyfvVsBQNvnp7GPimVxXZNc4VpxEXObRuPWQN1oZN/90PcZVqTia/SHzEyTryL +ckhiLG3jZiaFZ7pTZ5I9wti9Pn+4kOHvE3Y/4nEnUo4mTxPX9pOlinF+VCiybtV2 +u1KSlc+YaIM7VmuyndDZCJRXm3v0/qTE7t5A5fArZl9lvibigMbWB8fpD+c1GpGH +Eo8NRY0lkaM+DkIqQoaziIsz3IKJrfdKaq9bQMSlIfameKBZ8fNYTBZrH9KZAIhz +YwIDAQABo4IBfjCCAXowHQYDVR0OBBYEFIf6nt9SdnXsSUogb1twlo+d77sXMB8G +A1UdIwQYMBaAFIf6nt9SdnXsSUogb1twlo+d77sXMA8GA1UdEwEB/wQFMAMBAf8w +DgYDVR0PAQH/BAQDAgEGMIIBFQYDVR0RBIIBDDCCAQiCEWh0dHA6Ly93d3cuYW5m +LmVzgQtpbmZvQGFuZi5lc6SB5TCB4jE0MDIGA1UECQwrR3JhbiBWaWEgZGUgbGVz +IENvcnRzIENhdGFsYW5lcy4gOTk2LiAwODAxODESMBAGA1UEBwwJQmFyY2Vsb25h +MScwJQYDVQQKDB5BTkYgQXV0b3JpZGFkIGRlIENlcnRpZmljYWNpb24xEjAQBgNV +BAUTCUc2MzI4NzUxMDFZMFcGA1UECwxQSW5zY3JpdGEgZW4gZWwgTWluaXN0ZXJp +byBkZWwgSW50ZXJpb3IgZGUgRXNwYcOxYSBjb24gZWwgbnVtZXJvIG5hY2lvbmFs +IDE3MS40NDMwDQYJKoZIhvcNAQELBQADggIBAIgR9tFTZ9BCYg+HViMxOfF0MHN2 +Pe/eC128ARdS+GH8A4thtbqiH/SOYbWofO/0zssHhNKa5iQEj45lCAb8BANpWJMD +nWkPr6jq2+50a6d0MMgSS2l1rvjSF+3nIrEuicshHXSTi3q/vBLKr7uGKMVFaM68 +XAropIwk6ndlA0JseARSPsbetv7ALESMIZAxlHV1TcctYHd0bB3c/Jz+PLszJQqs +Cg/kBPo2D111OXZkIY8W/fJuG9veR783khAK2gUnC0zLLCNsYzEbdGt8zUmBsAsM +cGxqGm6B6vDXd65OxWqw13xdq/24+5R8Ng1PF9tvfjZkUFBF30CxjWur7P90WiKI +G7IGfr6BE1NgXlhEQQu4F+HizB1ypEPzGWltecXQ4yOzO+H0WfFTjLTYX6VSveyW +DQV18ixF8M4tHP/SwNE+yyv2b2JJ3/3RpxjtFlLk+opJ574x0gD/dMJuWTH0JqVY +3PbRfE1jIxFpk164Qz/Xp7H7w7f6xh+tQCkBs3PUYmnGIZcPwq44Q6JHlCNsKx4K +hxfggTvRCk4w79cUID45c2qDsRCqTPoOo/cbOpcfVhbH9LdMORpmuLwNogRZEUSE +fWpqR9q+0kcQf4zGSWIURIyDrogdpDgoHDxktqgMgc+qA4ZE2WQl1D8hmev53A46 +lUSrWUiWfDXtK3ux +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFkjCCA3qgAwIBAgIIAeDltYNno+AwDQYJKoZIhvcNAQEMBQAwZzEbMBkGA1UE +AwwSQXBwbGUgUm9vdCBDQSAtIEcyMSYwJAYDVQQLDB1BcHBsZSBDZXJ0aWZpY2F0 +aW9uIEF1dGhvcml0eTETMBEGA1UECgwKQXBwbGUgSW5jLjELMAkGA1UEBhMCVVMw +HhcNMTQwNDMwMTgxMDA5WhcNMzkwNDMwMTgxMDA5WjBnMRswGQYDVQQDDBJBcHBs +ZSBSb290IENBIC0gRzIxJjAkBgNVBAsMHUFwcGxlIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MRMwEQYDVQQKDApBcHBsZSBJbmMuMQswCQYDVQQGEwJVUzCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBANgREkhI2imKScUcx+xuM23+TfvgHN6s +XuI2pyT5f1BrTM65MFQn5bPW7SXmMLYFN14UIhHF6Kob0vuy0gmVOKTvKkmMXT5x +ZgM4+xb1hYjkWpIMBDLyyED7Ul+f9sDx47pFoFDVEovy3d6RhiPw9bZyLgHaC/Yu +OQhfGaFjQQscp5TBhsRTL3b2CtcM0YM/GlMZ81fVJ3/8E7j4ko380yhDPLVoACVd +J2LT3VXdRCCQgzWTxb+4Gftr49wIQuavbfqeQMpOhYV4SbHXw8EwOTKrfl+q04tv +ny0aIWhwZ7Oj8ZhBbZF8+NfbqOdfIRqMM78xdLe40fTgIvS/cjTf94FNcX1RoeKz +8NMoFnNvzcytN31O661A4T+B/fc9Cj6i8b0xlilZ3MIZgIxbdMYs0xBTJh0UT8TU +gWY8h2czJxQI6bR3hDRSj4n4aJgXv8O7qhOTH11UL6jHfPsNFL4VPSQ08prcdUFm +IrQB1guvkJ4M6mL4m1k8COKWNORj3rw31OsMiANDC1CvoDTdUE0V+1ok2Az6DGOe +HwOx4e7hqkP0ZmUoNwIx7wHHHtHMn23KVDpA287PT0aLSmWaasZobNfMmRtHsHLD +d4/E92GcdB/O/WuhwpyUgquUoue9G7q5cDmVF8Up8zlYNPXEpMZ7YLlmQ1A/bmH8 +DvmGqmAMQ0uVAgMBAAGjQjBAMB0GA1UdDgQWBBTEmRNsGAPCe8CjoA1/coB6HHcm +jTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQwF +AAOCAgEAUabz4vS4PZO/Lc4Pu1vhVRROTtHlznldgX/+tvCHM/jvlOV+3Gp5pxy+ +8JS3ptEwnMgNCnWefZKVfhidfsJxaXwU6s+DDuQUQp50DhDNqxq6EWGBeNjxtUVA +eKuowM77fWM3aPbn+6/Gw0vsHzYmE1SGlHKy6gLti23kDKaQwFd1z4xCfVzmMX3z +ybKSaUYOiPjjLUKyOKimGY3xn83uamW8GrAlvacp/fQ+onVJv57byfenHmOZ4VxG +/5IFjPoeIPmGlFYl5bRXOJ3riGQUIUkhOb9iZqmxospvPyFgxYnURTbImHy99v6Z +SYA7LNKmp4gDBDEZt7Y6YUX6yfIjyGNzv1aJMbDZfGKnexWoiIqrOEDCzBL/FePw +N983csvMmOa/orz6JopxVtfnJBtIRD6e/J/JzBrsQzwBvDR4yGn1xuZW7AYJNpDr +FEobXsmII9oDMJELuDY++ee1KG++P+w8j2Ud5cAeh6Squpj9kuNsJnfdBrRkBof0 +Tta6SqoWqPQFZ2aWuuJVecMsXUmPgEkrihLHdoBR37q9ZV0+N0djMenl9MU/S60E +inpxLK8JQzcPqOMyT/RFtm2XNuyE9QoB6he7hY1Ck3DDUOUUi78/w0EP3SIEIwiK +um1xRKtzCTrJ+VKACd+66eYWyi4uTLLT3OUEVLLUNIAytbwPF+E= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICQzCCAcmgAwIBAgIILcX8iNLFS5UwCgYIKoZIzj0EAwMwZzEbMBkGA1UEAwwS +QXBwbGUgUm9vdCBDQSAtIEczMSYwJAYDVQQLDB1BcHBsZSBDZXJ0aWZpY2F0aW9u +IEF1dGhvcml0eTETMBEGA1UECgwKQXBwbGUgSW5jLjELMAkGA1UEBhMCVVMwHhcN +MTQwNDMwMTgxOTA2WhcNMzkwNDMwMTgxOTA2WjBnMRswGQYDVQQDDBJBcHBsZSBS +b290IENBIC0gRzMxJjAkBgNVBAsMHUFwcGxlIENlcnRpZmljYXRpb24gQXV0aG9y +aXR5MRMwEQYDVQQKDApBcHBsZSBJbmMuMQswCQYDVQQGEwJVUzB2MBAGByqGSM49 +AgEGBSuBBAAiA2IABJjpLz1AcqTtkyJygRMc3RCV8cWjTnHcFBbZDuWmBSp3ZHtf +TjjTuxxEtX/1H7YyYl3J6YRbTzBPEVoA/VhYDKX1DyxNB0cTddqXl5dvMVztK517 +IDvYuVTZXpmkOlEKMaNCMEAwHQYDVR0OBBYEFLuw3qFYM4iapIqZ3r6966/ayySr +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMAoGCCqGSM49BAMDA2gA +MGUCMQCD6cHEFl4aXTQY2e3v9GwOAEZLuN+yRhHFD/3meoyhpmvOwgPUnPWTxnS4 +at+qIxUCMG1mihDK1A3UT82NQz60imOlM27jbdoXt2QfyFMm+YhidDkLF1vLUagM +6BgD56KyKA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEuzCCA6OgAwIBAgIBAjANBgkqhkiG9w0BAQUFADBiMQswCQYDVQQGEwJVUzET +MBEGA1UEChMKQXBwbGUgSW5jLjEmMCQGA1UECxMdQXBwbGUgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkxFjAUBgNVBAMTDUFwcGxlIFJvb3QgQ0EwHhcNMDYwNDI1MjE0 +MDM2WhcNMzUwMjA5MjE0MDM2WjBiMQswCQYDVQQGEwJVUzETMBEGA1UEChMKQXBw +bGUgSW5jLjEmMCQGA1UECxMdQXBwbGUgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkx +FjAUBgNVBAMTDUFwcGxlIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDkkakJH5HbHkdQ6wXtXnmELes2oldMVeyLGYne+Uts9QerIjAC6Bg+ ++FAJ039BqJj50cpmnCRrEdCju+QbKsMflZ56DKRHi1vUFjczy8QPTc4UadHJGXL1 +XQ7Vf1+b8iUDulWPTV0N8WQ1IxVLFVkds5T39pyez1C6wVhQZ48ItCD3y6wsIG9w +tj8BMIy3Q88PnT3zK0koGsj+zrW5DtleHNbLPbU6rfQPDgCSC7EhFi501TwN22IW +q6NxkkdTVcGvL0Gz+PvjcM3mo0xFfh9Ma1CWQYnEdGILEINBhzOKgbEwWOxaBDKM +aLOPHd5lc/9nXmW8Sdh2nzMUZaF3lMktAgMBAAGjggF6MIIBdjAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUK9BpR5R2Cf70a40uQKb3 +R01/CF4wHwYDVR0jBBgwFoAUK9BpR5R2Cf70a40uQKb3R01/CF4wggERBgNVHSAE +ggEIMIIBBDCCAQAGCSqGSIb3Y2QFATCB8jAqBggrBgEFBQcCARYeaHR0cHM6Ly93 +d3cuYXBwbGUuY29tL2FwcGxlY2EvMIHDBggrBgEFBQcCAjCBthqBs1JlbGlhbmNl +IG9uIHRoaXMgY2VydGlmaWNhdGUgYnkgYW55IHBhcnR5IGFzc3VtZXMgYWNjZXB0 +YW5jZSBvZiB0aGUgdGhlbiBhcHBsaWNhYmxlIHN0YW5kYXJkIHRlcm1zIGFuZCBj +b25kaXRpb25zIG9mIHVzZSwgY2VydGlmaWNhdGUgcG9saWN5IGFuZCBjZXJ0aWZp +Y2F0aW9uIHByYWN0aWNlIHN0YXRlbWVudHMuMA0GCSqGSIb3DQEBBQUAA4IBAQBc +NplMLXi37Yyb3PN3m/J20ncwT8EfhYOFG5k9RzfyqZtAjizUsZAS2L70c5vu0mQP +y3lPNNiiPvl4/2vIB+x9OYOLUyDTOMSxv5pPCmv/K/xZpwUJfBdAVhEedNO3iyM7 +R6PVbyTi69G3cN8PReEnyvFteO3ntRcXqNx+IjXKJdXZD9Zr1KIkIxH3oayPc4Fg +xhtbCS+SsvhESPBgOJ4V9T0mZyCKM2r3DYLP3uujL/lTaltkwGMzd/c6ByxW69oP +IQ7aunMZT7XZNn/Bh1XZp5m5MkL72NVxnn6hUrcbvZNCJBIqxw8dtk2cXmPIS4AX +UKqK1drk/NAJBzewdXUh +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFujCCBKKgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBhjELMAkGA1UEBhMCVVMx +HTAbBgNVBAoTFEFwcGxlIENvbXB1dGVyLCBJbmMuMS0wKwYDVQQLEyRBcHBsZSBD +b21wdXRlciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxKTAnBgNVBAMTIEFwcGxlIFJv +b3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MB4XDTA1MDIxMDAwMTgxNFoXDTI1MDIx +MDAwMTgxNFowgYYxCzAJBgNVBAYTAlVTMR0wGwYDVQQKExRBcHBsZSBDb21wdXRl +ciwgSW5jLjEtMCsGA1UECxMkQXBwbGUgQ29tcHV0ZXIgQ2VydGlmaWNhdGUgQXV0 +aG9yaXR5MSkwJwYDVQQDEyBBcHBsZSBSb290IENlcnRpZmljYXRlIEF1dGhvcml0 +eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOSRqQkfkdseR1DrBe1e +eYQt6zaiV0xV7IsZid75S2z1B6siMALoGD74UAnTf0GomPnRymacJGsR0KO75Bsq +wx+VnnoMpEeLW9QWNzPLxA9NzhRp0ckZcvVdDtV/X5vyJQO6VY9NXQ3xZDUjFUsV +WR2zlPf2nJ7PULrBWFBnjwi0IPfLrCwgb3C2PwEwjLdDzw+dPfMrSSgayP7OtbkO +2V4c1ss9tTqt9A8OAJILsSEWLnTVPA3bYharo3GSR1NVwa8vQbP4++NwzeajTEV+ +H0xrUJZBicR0YgsQg0GHM4qBsTBY7FoEMoxos48d3mVz/2deZbxJ2HafMxRloXeU +yS0CAwEAAaOCAi8wggIrMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/ +MB0GA1UdDgQWBBQr0GlHlHYJ/vRrjS5ApvdHTX8IXjAfBgNVHSMEGDAWgBQr0GlH +lHYJ/vRrjS5ApvdHTX8IXjCCASkGA1UdIASCASAwggEcMIIBGAYJKoZIhvdjZAUB +MIIBCTBBBggrBgEFBQcCARY1aHR0cHM6Ly93d3cuYXBwbGUuY29tL2NlcnRpZmlj +YXRlYXV0aG9yaXR5L3Rlcm1zLmh0bWwwgcMGCCsGAQUFBwICMIG2GoGzUmVsaWFu +Y2Ugb24gdGhpcyBjZXJ0aWZpY2F0ZSBieSBhbnkgcGFydHkgYXNzdW1lcyBhY2Nl +cHRhbmNlIG9mIHRoZSB0aGVuIGFwcGxpY2FibGUgc3RhbmRhcmQgdGVybXMgYW5k +IGNvbmRpdGlvbnMgb2YgdXNlLCBjZXJ0aWZpY2F0ZSBwb2xpY3kgYW5kIGNlcnRp +ZmljYXRpb24gcHJhY3RpY2Ugc3RhdGVtZW50cy4wRAYDVR0fBD0wOzA5oDegNYYz +aHR0cHM6Ly93d3cuYXBwbGUuY29tL2NlcnRpZmljYXRlYXV0aG9yaXR5L3Jvb3Qu +Y3JsMFUGCCsGAQUFBwEBBEkwRzBFBggrBgEFBQcwAoY5aHR0cHM6Ly93d3cuYXBw +bGUuY29tL2NlcnRpZmljYXRlYXV0aG9yaXR5L2Nhc2lnbmVycy5odG1sMA0GCSqG +SIb3DQEBBQUAA4IBAQCd2i0oWC99dgS5BNM+zrdmY06PL9T+S61yvaM5xlJNBZhS +9YlRASR5vhoy9+VEi0tEBzmC1lrKtCBe2a4VXR2MHTK/ODFiSF3H4ZCx+CRA+F9Y +m1FdV53B5f88zHIhbsTp6aF31ywXJsM/65roCwO66bNKcuszCVut5mIxauivL9Wv +Hld2j383LS4CXN1jyfJxuCZA3xWNdUQ/eb3mHZnhQyw+rW++uaT+DjUZUWOxw961 +kj5ReAFziqQjyqSI8R5cH0EWLX6VCqrpiUGYGxrdyyC/R14MJsVVNU3GMIuZZxTH +CR+6R8faAQmHJEKVvRNgGQrv6n8Obs3BREM6StXj +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID9zCCAt+gAwIBAgILMTI1MzcyODI4MjgwDQYJKoZIhvcNAQELBQAwWDELMAkG +A1UEBhMCSlAxHDAaBgNVBAoTE0phcGFuZXNlIEdvdmVybm1lbnQxDTALBgNVBAsT +BEdQS0kxHDAaBgNVBAMTE0FwcGxpY2F0aW9uQ0EyIFJvb3QwHhcNMTMwMzEyMTUw +MDAwWhcNMzMwMzEyMTUwMDAwWjBYMQswCQYDVQQGEwJKUDEcMBoGA1UEChMTSmFw +YW5lc2UgR292ZXJubWVudDENMAsGA1UECxMER1BLSTEcMBoGA1UEAxMTQXBwbGlj +YXRpb25DQTIgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKaq +rSVl1gAR1uh6dqr05rRL88zDUrSNrKZPtZJxb0a11a2LEiIXJc5F6BR6hZrkIxCo ++rFnUOVtR+BqiRPjrq418fRCxQX3TZd+PCj8sCaRHoweOBqW3FhEl2LjMsjRFUFN +dZh4vqtoqV7tR76kuo6hApfek3SZbWe0BSXulMjtqqS6MmxCEeu+yxcGkOGThchk +KM4fR8fAXWDudjbcMztR63vPctgPeKgZggiQPhqYjY60zxU2pm7dt+JNQCBT2XYq +0HisifBPizJtROouurCp64ndt295D6uBbrjmiykLWa+2SQ1RLKn9nShjZrhwlXOa +2Po7M7xCQhsyrLEy+z0CAwEAAaOBwTCBvjAdBgNVHQ4EFgQUVqesqgIdsqw9kA6g +by5Bxnbne9owDgYDVR0PAQH/BAQDAgEGMHwGA1UdEQR1MHOkcTBvMQswCQYDVQQG +EwJKUDEYMBYGA1UECgwP5pel5pys5Zu95pS/5bqcMRswGQYDVQQLDBLmlL/lupzo +qo3oqLzln7rnm6QxKTAnBgNVBAMMIOOCouODl+ODquOCseODvOOCt+ODp+ODs0NB +MiBSb290MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAH+aCXWs +B9FydC53VzDCBJzUgKaD56WgG5/+q/OAvdVKo6GPtkxgEefK4WCB10jBIFmlYTKL +nZ6X02aD2mUuWD7b5S+lzYxzplG+WCigeVxpL0PfY7KJR8q73rk0EWOgDiUX5Yf0 +HbCwpc9BqHTG6FPVQvSCLVMJEWgmcZR1E02qdog8dLHW40xPYsNJTE5t8XB+w3+m +Bcx4m+mB26jIx1ye/JKSLaaX8ji1bnOVDMA/zqaUMLX6BbfeniCq/BNkyYq6ZO/i +Y+TYmK5rtT6mVbgzPixy+ywRAPtbFi+E0hOe+gXFwctyTiLdhMpLvNIthhoEdlkf +SUJiOxMfFui61/0= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UE +AwwVQXRvcyBUcnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQG +EwJERTAeFw0xMTA3MDcxNDU4MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMM +FUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsGA1UECgwEQXRvczELMAkGA1UEBhMC +REUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCVhTuXbyo7LjvPpvMp +Nb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr54rM +VD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+ +SZFhyBH+DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ +4J7sVaE3IqKHBAUsR320HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0L +cp2AMBYHlT8oDv3FdU9T1nSatCQujgKRz3bFmx5VdJx4IbHwLfELn8LVlhgf8FQi +eowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7Rl+lwrrw7GWzbITAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZbNshMBgG +A1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3 +DQEBCwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8j +vZfza1zv7v1Apt+hk6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kP +DpFrdRbhIfzYJsdHt6bPWHJxfrrhTZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pc +maHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a961qn8FYiqTxlVMYVqL2Gns2D +lmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G3mB/ufNPRJLv +KrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UE +BhMCRVMxQjBABgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1h +cHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEy +MzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIwQAYDVQQDDDlBdXRvcmlkYWQgZGUg +Q2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBBNjI2MzQwNjgwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDDUtd9 +thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQM +cas9UX4PB99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefG +L9ItWY16Ck6WaVICqjaY7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15i +NA9wBj4gGFrO93IbJWyTdBSTo3OxDqqHECNZXyAFGUftaI6SEspd/NYrspI8IM/h +X68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyIplD9amML9ZMWGxmPsu2b +m8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctXMbScyJCy +Z/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirja +EbsXLZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/T +KI8xWVvTyQKmtFLKbpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF +6NkBiDkal4ZkQdU7hwxu+g/GvUgUvzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVh +OSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1UdEwEB/wQIMAYBAf8CAQEwDgYD +VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNHDhpkLzCBpgYD +VR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBv +ACAAZABlACAAbABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBl +AGwAbwBuAGEAIAAwADgAMAAxADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF +661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx51tkljYyGOylMnfX40S2wBEqgLk9 +am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qkR71kMrv2JYSiJ0L1 +ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaPT481 +PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS +3a/DTg4fJl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5k +SeTy36LssUzAKh3ntLFlosS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF +3dvd6qJ2gHN99ZwExEWN57kci57q13XRcrHedUTnQn3iV2t93Jm8PYMo6oCTjcVM +ZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoRsaS8I8nkvof/uZS2+F0g +StRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTDKCOM/icz +Q0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQB +jLMi6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIJmzCCB4OgAwIBAgIBATANBgkqhkiG9w0BAQwFADCCAR4xPjA8BgNVBAMTNUF1 +dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIFJhaXogZGVsIEVzdGFkbyBWZW5lem9s +YW5vMQswCQYDVQQGEwJWRTEQMA4GA1UEBxMHQ2FyYWNhczEZMBcGA1UECBMQRGlz +dHJpdG8gQ2FwaXRhbDE2MDQGA1UEChMtU2lzdGVtYSBOYWNpb25hbCBkZSBDZXJ0 +aWZpY2FjaW9uIEVsZWN0cm9uaWNhMUMwQQYDVQQLEzpTdXBlcmludGVuZGVuY2lh +IGRlIFNlcnZpY2lvcyBkZSBDZXJ0aWZpY2FjaW9uIEVsZWN0cm9uaWNhMSUwIwYJ +KoZIhvcNAQkBFhZhY3JhaXpAc3VzY2VydGUuZ29iLnZlMB4XDTEwMTIyMjE4MDgy +MVoXDTMwMTIxNzIzNTk1OVowggEeMT4wPAYDVQQDEzVBdXRvcmlkYWQgZGUgQ2Vy +dGlmaWNhY2lvbiBSYWl6IGRlbCBFc3RhZG8gVmVuZXpvbGFubzELMAkGA1UEBhMC +VkUxEDAOBgNVBAcTB0NhcmFjYXMxGTAXBgNVBAgTEERpc3RyaXRvIENhcGl0YWwx +NjA0BgNVBAoTLVNpc3RlbWEgTmFjaW9uYWwgZGUgQ2VydGlmaWNhY2lvbiBFbGVj +dHJvbmljYTFDMEEGA1UECxM6U3VwZXJpbnRlbmRlbmNpYSBkZSBTZXJ2aWNpb3Mg +ZGUgQ2VydGlmaWNhY2lvbiBFbGVjdHJvbmljYTElMCMGCSqGSIb3DQEJARYWYWNy +YWl6QHN1c2NlcnRlLmdvYi52ZTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC +ggIBAME77xNS8ZlW47RsBeEaaRZhJoZ4rw785UAFCuPZOAVMqNS1wMYqzy95q6Gk +UO81ER/ugiQX/KMcq/4HBn83fwdYWxPZfwBfK7BP2p/JsFgzYeFP0BXOLmvoJIzl +Jb6FW+1MPwGBjuaZGFImWZsSmGUclb51mRYMZETh9/J5CLThR1exStxHQptwSzra +zNFpkQY/zmj7+YZNA9yDoroVFv6sybYOZ7OxNDo7zkSLo45I7gMwtxqWZ8VkJZkC +8+p0dX6mkhUT0QAV64Zc9HsZiH/oLhEkXjhrgZ28cF73MXIqLx1fyM4kPH1yOJi/ +R72nMwL7D+Sd6mZgI035TxuHXc2/uOwXfKrrTjaJDz8Jp6DdessOkxIgkKXRjP+F +K3ze3n4NUIRGhGRtyvEjK95/2g02t6PeYiYVGur6ruS49n0RAaSS0/LJb6XzaAAe +0mmO2evnEqxIKwy2mZRNPfAVW1l3wCnWiUwryBU6OsbFcFFrQm+00wOicXvOTHBM +aiCVAVZTb9RSLyi+LJ1llzJZO3pq3IRiiBj38Nooo+2ZNbMEciSgmig7YXaUcmud +SVQvLSL+Yw+SqawyezwZuASbp7d/0rutQ59d81zlbMt3J7yB567rT2IqIydQ8qBW +k+fmXzghX+/FidYsh/aK+zZ7Wy68kKHuzEw1Vqkat5DGs+VzAgMBAAGjggLeMIIC +2jASBgNVHRMBAf8ECDAGAQH/AgECMDcGA1UdEgQwMC6CD3N1c2NlcnRlLmdvYi52 +ZaAbBgVghl4CAqASDBBSSUYtRy0yMDAwNDAzNi0wMB0GA1UdDgQWBBStuyIdxuDS +Aaj9dlBSk+2YwU2u0zCCAVAGA1UdIwSCAUcwggFDgBStuyIdxuDSAaj9dlBSk+2Y +wU2u06GCASakggEiMIIBHjE+MDwGA1UEAxM1QXV0b3JpZGFkIGRlIENlcnRpZmlj +YWNpb24gUmFpeiBkZWwgRXN0YWRvIFZlbmV6b2xhbm8xCzAJBgNVBAYTAlZFMRAw +DgYDVQQHEwdDYXJhY2FzMRkwFwYDVQQIExBEaXN0cml0byBDYXBpdGFsMTYwNAYD +VQQKEy1TaXN0ZW1hIE5hY2lvbmFsIGRlIENlcnRpZmljYWNpb24gRWxlY3Ryb25p +Y2ExQzBBBgNVBAsTOlN1cGVyaW50ZW5kZW5jaWEgZGUgU2VydmljaW9zIGRlIENl +cnRpZmljYWNpb24gRWxlY3Ryb25pY2ExJTAjBgkqhkiG9w0BCQEWFmFjcmFpekBz +dXNjZXJ0ZS5nb2IudmWCAQEwDgYDVR0PAQH/BAQDAgEGMDcGA1UdEQQwMC6CD3N1 +c2NlcnRlLmdvYi52ZaAbBgVghl4CAqASDBBSSUYtRy0yMDAwNDAzNi0wMFQGA1Ud +HwRNMEswJKAioCCGHmhodHA6Ly93d3cuc3VzY2VydGUuZ29iLnZlL2xjcjAjoCGg +H4YdbGRhcDovL2FjcmFpei5zdXNjZXJ0ZS5nb2IudmUwNwYIKwYBBQUHAQEEKzAp +MCcGCCsGAQUFBzABhhtoaHRwOi8vb2NzcC5zdXNjZXJ0ZS5nb2IudmUwQAYDVR0g +BDkwNzA1BgVghl4BAjAsMCoGCCsGAQUFBwIBFh5odHRwOi8vd3d3LnN1c2NlcnRl +LmdvYi52ZS9kcGMwDQYJKoZIhvcNAQEMBQADggIBAK4qy/zmZ9zBwfW3yOYtLcBT +Oy4szJyPz7/RhNH3bPVH7HbDTGpi6JZ4YXdXMBeJE5qBF4a590Kgj8Rlnltt+Rbo +OFQOU1UDqKuTdBsA//Zry5899fmn8jBUkg4nh09jhHHbLlaUScdz704Zz2+UVg7i +s/r3Legxap60KzmdrmTAE9VKte1TQRgavQwVX5/2mO/J+SCas//UngI+h8SyOucq +mjudYEgBrZaodUsagUfn/+AzFNrGLy+al+5nZeHb8JnCfLHWS0M9ZyhgoeO/czyn +99+5G93VWNv4zfc4KiavHZKrkn8F9pg0ycIZh+OwPT/RE2zq4gTazBMlP3ACIe/p +olkNaOEa8KvgzW96sjBZpMW49zFmyINYkcj+uaNCJrVGsXgdBmkuRGJNWFZ9r0cG +woIaxViFBypsz045r1ESfYPlfDOavBhZ/giR/Xocm9CHkPRY2BApMMR0DUCyGETg +Ql+L3kfdTKzuDjUp2DM9FqysQmaM81YDZufWkMhlZPfHwC7KbNougoLroa5Umeos +bqAXWmk46SwIdWRPLLqbUpDTKooynZKpSYIkkotdgJoVZUUCY+RCO8jsVPEU6ece +SxztNUm5UOta1OJPMwSAKRHOo3ilVb9c6lAixDdvV8MeNbqe6asM1mpCHWbJ/0rg +5Ls9Cxx8hracyp0ev7b0 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ +RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD +VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX +DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y +ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy +VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr +mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr +IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK +mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu +XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy +dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye +jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1 +BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3 +DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92 +9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx +jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0 +Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz +ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS +R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIIKv++n6Lw6YcwDQYJKoZIhvcNAQEFBQAwKDELMAkGA1UE +BhMCQkUxGTAXBgNVBAMTEEJlbGdpdW0gUm9vdCBDQTIwHhcNMDcxMDA0MTAwMDAw +WhcNMjExMjE1MDgwMDAwWjAoMQswCQYDVQQGEwJCRTEZMBcGA1UEAxMQQmVsZ2l1 +bSBSb290IENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMZzQh6S +/3UPi790hqc/7bIYLS2X+an7mEoj39WN4IzGMhwWLQdC1i22bi+n9fzGhYJdld61 +IgDMqFNAn68KNaJ6x+HK92AQZw6nUHMXU5WfIp8MXW+2QbyM69odRr2nlL/zGsvU ++40OHjPIltfsjFPekx40HopQcSZYtF3CiInaYNKJIT/e1wEYNm7hLHADBGXvmAYr +XR5i3FVr/mZkIV/4L+HXmymvb82fqgxG0YjFnaKVn6w/Fa7yYd/vw2uaItgscf1Y +HewApDgglVrH1Tdjuk+bqv5WRi5j2Qsj1Yr6tSPwiRuhFA0m2kHwOI8w7QUmecFL +TqG4flVSOmlGhHUCAwEAAaOBuzCBuDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zBCBgNVHSAEOzA5MDcGBWA4CQEBMC4wLAYIKwYBBQUHAgEWIGh0dHA6 +Ly9yZXBvc2l0b3J5LmVpZC5iZWxnaXVtLmJlMB0GA1UdDgQWBBSFiuv0xbu+DlkD +lN7WgAEV4xCcOTARBglghkgBhvhCAQEEBAMCAAcwHwYDVR0jBBgwFoAUhYrr9MW7 +vg5ZA5Te1oABFeMQnDkwDQYJKoZIhvcNAQEFBQADggEBAFHYhd27V2/MoGy1oyCc +UwnzSgEMdL8rs5qauhjyC4isHLMzr87lEwEnkoRYmhC598wUkmt0FoqW6FHvv/pK +JaeJtmMrXZRY0c8RcrYeuTlBFk0pvDVTC9rejg7NqZV3JcqUWumyaa7YwBO+mPyW +nIR/VRPmPIfjvCCkpDZoa01gZhz5v6yAlGYuuUGK02XThIAC71AdXkbc98m6tTR8 +KvPG2F9fVJ3bTc0R5/0UAoNmXsimABKgX77OFP67H6dh96tK8QYUn8pJQsKpvO2F +sauBQeYNxUJpU4c5nUwfAA4+Bw11V0SoU7Q2dmSZ3G7rPUZuFF1eR1ONeE3gJ7uO +hXY= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMiBSb290IENBMB4XDTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1ow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1g1Lr +6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPV +L4O2fuPn9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC91 +1K2GScuVr1QGbNgGE41b/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHx +MlAQTn/0hpPshNOOvEu/XAFOBz3cFIqUCqTqc/sLUegTBxj6DvEr0VQVfTzh97QZ +QmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeffawrbD02TTqigzXsu8lkB +arcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgIzRFo1clr +Us3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLi +FRhnBkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRS +P/TizPJhk9H9Z2vXUq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN +9SG9dKpN6nIDSdvHXx1iY8f93ZHsM+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxP +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMmAd+BikoL1Rpzz +uvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAU18h +9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3t +OluwlN5E40EIosHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo ++fsicdl9sz1Gv7SEr5AcD48Saq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7 +KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYdDnkM/crqJIByw5c/8nerQyIKx+u2 +DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWDLfJ6v9r9jv6ly0Us +H8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0oyLQ +I+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK7 +5t98biGCwWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h +3PFaTWwyI0PurKju7koSCTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPz +Y11aWOIv4x3kqdbQCtCev9eBCfHJxyYNrJgWVqA= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEd +MBsGA1UECgwUQnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3Mg +Q2xhc3MgMyBSb290IENBMB4XDTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFow +TjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBhc3MgQVMtOTgzMTYzMzI3MSAw +HgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRHsJ8Y +ZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3E +N3coTRiR5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9 +tznDDgFHmV0ST9tD+leh7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX +0DJq1l1sDPGzbjniazEuOQAnFN44wOwZZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c +/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH2xc519woe2v1n/MuwU8X +KhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV/afmiSTY +zIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvS +O1UQRwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D +34xFMFbG02SrZvPAXpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgP +K9Dx2hzLabjKSWJtyNBjYt1gD1iqj6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3 +AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFEe4zf/lb+74suwv +Tg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAACAj +QTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXS +IGrs/CIBKM+GuIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2 +HJLw5QY33KbmkJs4j1xrG0aGQ0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsa +O5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8ZORK15FTAaggiG6cX0S5y2CBNOxv +033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2KSb12tjE8nVhz36u +dmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz6MkE +kbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg41 +3OEMXbugUZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvD +u79leNKGef9JOxqDDPDeeOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq +4/g7u9xN12TyUb7mqqta6THuBrxzvxNiCp/HuZc= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAMMDmu5QkG4oMA0GCSqGSIb3DQEBBQUAMFIxCzAJBgNV +BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu +MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIxMB4XDTEyMDcxOTA5MDY1NloXDTQy +MDcxOTA5MDY1NlowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx +EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjEw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCqw3j33Jijp1pedxiy3QRk +D2P9m5YJgNXoqqXinCaUOuiZc4yd39ffg/N4T0Dhf9Kn0uXKE5Pn7cZ3Xza1lK/o +OI7bm+V8u8yN63Vz4STN5qctGS7Y1oprFOsIYgrY3LMATcMjfF9DCCMyEtztDK3A +fQ+lekLZWnDZv6fXARz2m6uOt0qGeKAeVjGu74IKgEH3G8muqzIm1Cxr7X1r5OJe +IgpFy4QxTaz+29FHuvlglzmxZcfe+5nkCiKxLU3lSCZpq+Kq8/v8kiky6bM+TR8n +oc2OuRf7JT7JbvN32g0S9l3HuzYQ1VTW8+DiR0jm3hTaYVKvJrT1cU/J19IG32PK +/yHoWQbgCNWEFVP3Q+V8xaCJmGtzxmjOZd69fwX3se72V6FglcXM6pM6vpmumwKj +rckWtc7dXpl4fho5frLABaTAgqWjR56M6ly2vGfb5ipN0gTco65F97yLnByn1tUD +3AjLLhbKXEAz6GfDLuemROoRRRw1ZS0eRWEkG4IupZ0zXWX4Qfkuy5Q/H6MMMSRE +7cderVC6xkGbrPAXZcD4XW9boAo0PO7X6oifmPmvTiT6l7Jkdtqr9O3jw2Dv1fkC +yC2fg69naQanMVXVz0tv/wQFx1isXxYb5dKj6zHbHzMVTdDypVP1y+E9Tmgt2BLd +qvLmTZtJ5cUoobqwWsagtQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBBjAdBgNVHQ4EFgQUiQq0OJMa5qvum5EY+fU8PjXQ04IwDQYJKoZI +hvcNAQEFBQADggIBADKL9p1Kyb4U5YysOMo6CdQbzoaz3evUuii+Eq5FLAR0rBNR +xVgYZk2C2tXck8An4b58n1KeElb21Zyp9HWc+jcSjxyT7Ff+Bw+r1RL3D65hXlaA +SfX8MPWbTx9BLxyE04nH4toCdu0Jz2zBuByDHBb6lM19oMgY0sidbvW9adRtPTXo +HqJPYNcHKfyyo6SdbhWSVhlMCrDpfNIZTUJG7L399ldb3Zh+pE3McgODWF3vkzpB +emOqfDqo9ayk0d2iLbYq/J8BjuIQscTK5GfbVSUZP/3oNn6z4eGBrxEWi1CXYBmC +AMBrTXO40RMHPuq2MU/wQppt4hF05ZSsjYSVPCGvxdpHyN85YmLLW1AL14FABZyb +7bq2ix4Eb5YgOe2kfSnbSM6C3NQCjR0EMVrHS/BsYVLXtFHCgWzN4funodKSds+x +DzdYpPJScWc/DIh4gInByLUfkmO+p3qKViwaqKactV2zY9ATIKHrkWzQjX2v3wvk +F7mGnjixlAxYjOBVqjtjbZqJYLhkKpLGN/R+Q0O3c+gB53+XD9fyexn9GtePyfqF +a3qdnom2piiZk4hA9z7NUaPK6u95RyG1/jLix8NRb76AdPCkwzryT+lf3xkK8jsT +Q6wxpLPn6/wY1gGp8yqPNg7rtLG8t0zJa7+h89n07eLw4+1knj0vllJPgFOL +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNV +BAYTAlNLMRMwEQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMu +MRkwFwYDVQQDExBDQSBEaXNpZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQy +MDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sxEzARBgNVBAcTCkJyYXRpc2xhdmEx +EzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERpc2lnIFJvb3QgUjIw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbCw3Oe +NcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNH +PWSb6WiaxswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3I +x2ymrdMxp7zo5eFm1tL7A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbe +QTg06ov80egEFGEtQX6sx3dOy1FU+16SGBsEWmjGycT6txOgmLcRK7fWV8x8nhfR +yyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqVg8NTEQxzHQuyRpDRQjrO +QG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa5Beny912 +H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJ +QfYEkoopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUD +i/ZnWejBBhG93c+AAk9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORs +nLMOPReisjQS1n6yqEm70XooQL6iFh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1 +rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5uQu0wDQYJKoZI +hvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqf +GopTpti72TVVsRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkb +lvdhuDvEK7Z4bLQjb/D907JedR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka ++elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W81k/BfDxujRNt+3vrMNDcTa/F1bal +TFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjxmHHEt38OFdAlab0i +nSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01utI3 +gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18Dr +G5gPcFw0sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3Os +zMOl6W8KjptlwlCFtaOgUxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8x +L4ysEr3vQCj8KWefshNPZiTEUxnpHikV7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNV +BAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4X +DTA3MDYyOTE1MTMwNVoXDTI3MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQ +BgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwIQ2VydGlnbmEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7qXOEm7RFHYeGifBZ4 +QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyHGxny +gQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbw +zBfsV1/pogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q +130yGLMLLGq/jj8UEYkgDncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2 +JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKfIrjxwo1p3Po6WAbfAgMBAAGjgbwwgbkw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQtCRZvgHyUtVF9lo53BEw +ZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJBgNVBAYT +AkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzj +AQ/JSP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG +9w0BAQUFAAOCAQEAhQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8h +bV6lUmPOEvjvKtpv6zf+EwLHyzs+ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFnc +fca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1kluPBS1xp81HlDQwY9qcEQCYsuu +HWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY1gkIl2PlwS6w +t0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFnDCCA4SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJGUjET +MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxJjAk +BgNVBAMMHUNlcnRpbm9taXMgLSBBdXRvcml0w6kgUmFjaW5lMB4XDTA4MDkxNzA4 +Mjg1OVoXDTI4MDkxNzA4Mjg1OVowYzELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNl +cnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMSYwJAYDVQQDDB1DZXJ0 +aW5vbWlzIC0gQXV0b3JpdMOpIFJhY2luZTCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBAJ2Fn4bT46/HsmtuM+Cet0I0VZ35gb5j2CN2DpdUzZlMGvE5x4jY +F1AMnmHawE5V3udauHpOd4cN5bjr+p5eex7Ezyh0x5P1FMYiKAT5kcOrJ3NqDi5N +8y4oH3DfVS9O7cdxbwlyLu3VMpfQ8Vh30WC8Tl7bmoT2R2FFK/ZQpn9qcSdIhDWe +rP5pqZ56XjUl+rSnSTV3lqc2W+HN3yNw2F1MpQiD8aYkOBOo7C+ooWfHpi2GR+6K +/OybDnT0K0kCe5B1jPyZOQE51kqJ5Z52qz6WKDgmi92NjMD2AR5vpTESOH2VwnHu +7XSu5DaiQ3XV8QCb4uTXzEIDS3h65X27uK4uIJPT5GHfceF2Z5c/tt9qc1pkIuVC +28+BA5PY9OMQ4HL2AHCs8MF6DwV/zzRpRbWT5BnbUhYjBYkOjUjkJW+zeL9i9Qf6 +lSTClrLooyPCXQP8w9PlfMl1I9f09bze5N/NgL+RiH2nE7Q5uiy6vdFrzPOlKO1E +nn1So2+WLhl+HPNbxxaOu2B9d2ZHVIIAEWBsMsGoOBvrbpgT1u449fCfDu/+MYHB +0iSVL1N6aaLwD4ZFjliCK0wi1F6g530mJ0jfJUaNSih8hp75mxpZuWW/Bd22Ql09 +5gBIgl4g9xGC3srYn+Y3RyYe63j3YcNBZFgCQfna4NH4+ej9Uji29YnfAgMBAAGj +WzBZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBQN +jLZh2kS40RR9w759XkjwzspqsDAXBgNVHSAEEDAOMAwGCiqBegFWAgIAAQEwDQYJ +KoZIhvcNAQEFBQADggIBACQ+YAZ+He86PtvqrxyaLAEL9MW12Ukx9F1BjYkMTv9s +ov3/4gbIOZ/xWqndIlgVqIrTseYyCYIDbNc/CMf4uboAbbnW/FIyXaR/pDGUu7ZM +OH8oMDX/nyNTt7buFHAAQCvaR6s0fl6nVjBhK4tDrP22iCj1a7Y+YEq6QpA0Z43q +619FVDsXrIvkxmUP7tCMXWY5zjKn2BCXwH40nJ+U8/aGH88bc62UeYdocMMzpXDn +2NU4lG9jeeu/Cg4I58UvD0KgKxRA/yHgBcUn4YQRE7rWhh1BCxMjidPJC+iKunqj +o3M3NYB9Ergzd0A4wPpeMNLytqOx1qKVl4GbUu1pTP+A5FPbVFsDbVRfsbjvJL1v +nxHDx2TCDyhihWZeGnuyt++uNckZM6i4J9szVb9o4XVIRFb7zdNIu0eJOqxp9YDG +5ERQL1TEqkPFMTFYvZbF6nVsmnWxTfj3l/+WFvKXTej28xH5On2KOG4Ey+HTRRWq +pdEdnV1j6CTmNhTih60bWfVEm/vXd3wfAXBioSAaosUaKPQhA+4u2cGA6rnZgtZb +dsLLO7XSAPCjDuGtbkD326C00EauFddEwk01+dIL8hf2rGbVJLJP0RyZwG71fet0 +BLj5TXcJ17TPBzAJ8bgAVtkXFhYKK4bfjwEZGuW7gmP/vgt2Fl43N+bYdJeimUV5 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFkjCCA3qgAwIBAgIBATANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJGUjET +MBEGA1UEChMKQ2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxHTAb +BgNVBAMTFENlcnRpbm9taXMgLSBSb290IENBMB4XDTEzMTAyMTA5MTcxOFoXDTMz +MTAyMTA5MTcxOFowWjELMAkGA1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMx +FzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMR0wGwYDVQQDExRDZXJ0aW5vbWlzIC0g +Um9vdCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANTMCQosP5L2 +fxSeC5yaah1AMGT9qt8OHgZbn1CF6s2Nq0Nn3rD6foCWnoR4kkjW4znuzuRZWJfl +LieY6pOod5tK8O90gC3rMB+12ceAnGInkYjwSond3IjmFPnVAy//ldu9n+ws+hQV +WZUKxkd8aRi5pwP5ynapz8dvtF4F/u7BUrJ1Mofs7SlmO/NKFoL21prbcpjp3vDF +TKWrteoB4owuZH9kb/2jJZOLyKIOSY008B/sWEUuNKqEUL3nskoTuLAPrjhdsKkb +5nPJWqHZZkCqqU2mNAKthH6yI8H7KsZn9DS2sJVqM09xRLWtwHkziOC/7aOgFLSc +CbAK42C++PhmiM1b8XcF4LVzbsF9Ri6OSyemzTUK/eVNfaoqoynHWmgE6OXWk6Ri +wsXm9E/G+Z8ajYJJGYrKWUM66A0ywfRMEwNvbqY/kXPLynNvEiCL7sCCeN5LLsJJ +wx3tFvYk9CcbXFcx3FXuqB5vbKziRcxXV4p1VxngtViZSTYxPDMBbRZKzbgqg4SG +m/lg0h9tkQPTYKbVPZrdd5A9NaSfD171UkRpucC63M9933zZxKyGIjK8e2uR73r4 +F2iw4lNVYC2vPsKD2NkJK/DAZNuHi5HMkesE/Xa0lZrmFAYb1TQdvtj/dBxThZng +WVJKYe2InmtJiUZ+IFrZ50rlau7SZRFDAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTvkUz1pcMw6C8I6tNxIqSSaHh0 +2TAfBgNVHSMEGDAWgBTvkUz1pcMw6C8I6tNxIqSSaHh02TANBgkqhkiG9w0BAQsF +AAOCAgEAfj1U2iJdGlg+O1QnurrMyOMaauo++RLrVl89UM7g6kgmJs95Vn6RHJk/ +0KGRHCwPT5iVWVO90CLYiF2cN/z7ZMF4jIuaYAnq1fohX9B0ZedQxb8uuQsLrbWw +F6YSjNRieOpWauwK0kDDPAUwPk2Ut59KA9N9J0u2/kTO+hkzGm2kQtHdzMjI1xZS +g081lLMSVX3l4kLr5JyTCcBMWwerx20RoFAXlCOotQqSD7J6wWAsOMwaplv/8gzj +qh8c3LigkyfeY+N/IZ865Z764BNqdeuWXGKRlI5nU7aJ+BIJy29SWwNyhlCVCNSN +h4YVH5Uk2KRvms6knZtt0rJ2BobGVgjF6wnaNsIbW0G+YSrjcOa4pvi2WsS9Iff/ +ql+hbHY5ZtbqTFXhADObE5hjyW/QASAJN1LnDE8+zbz1X5YnpyACleAu6AdBBR8V +btaw5BngDwKTACdyxYvRVB9dSsNAl35VpnzBMwQUAR1JIGkLGZOdblgi90AMRgwj +Y/M50n92Uaf0yKHxDHYiI0ZSKS3io0EHVmmY0gUJvGnHWmHNj4FgFU2A3ZDifcRQ +8ow7bkrHxuaAKzyBvBGAFhAn1/DNP3nMcyrDflOR1m749fPH0FFNjkulW+YZFzvW +gQncItzujrnEj1PhZ7szuIgVRs/taTX/dQ1G885x4cVrhkIGuUE= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYT +AlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBD +QTAeFw0wNjA3MDQxNzIwMDRaFw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJP +MREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7IJUqOtdu0KBuqV5Do +0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHHrfAQ +UySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5d +RdY4zTW2ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQ +OA7+j0xbm0bqQfWwCHTD0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwv +JoIQ4uNllAoEwF73XVv4EOLQunpL+943AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08C +AwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAcYwHQYDVR0O +BBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IBAQA+0hyJ +LjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecY +MnQ8SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ +44gx+FkagQnIl6Z0x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6I +Jd1hJyMctTEHBDa0GpC9oHRxUIltvBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNw +i/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7NzTogVZ96edhBiIL5VaZVDADlN +9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDDDCCAfSgAwIBAgIDAQAgMA0GCSqGSIb3DQEBBQUAMD4xCzAJBgNVBAYTAlBM +MRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBD +QTAeFw0wMjA2MTExMDQ2MzlaFw0yNzA2MTExMDQ2MzlaMD4xCzAJBgNVBAYTAlBM +MRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBD +QTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6xwS7TT3zNJc4YPk/E +jG+AanPIW1H4m9LcuwBcsaD8dQPugfCI7iNS6eYVM42sLQnFdvkrOYCJ5JdLkKWo +ePhzQ3ukYbDYWMzhbGZ+nPMJXlVjhNWo7/OxLjBos8Q82KxujZlakE403Daaj4GI +ULdtlkIJ89eVgw1BS7Bqa/j8D35in2fE7SZfECYPCE/wpFcozo+47UX2bu4lXapu +Ob7kky/ZR6By6/qmW6/KUz/iDsaWVhFu9+lmqSbYf5VT7QqFiLpPKaVCjF62/IUg +AKpoC6EahQGcxEZjgoi2IrHu/qpGWX7PNSzVttpd90gzFFS269lvzs2I1qsb2pY7 +HVkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEA +uI3O7+cUus/usESSbLQ5PqKEbq24IXfS1HeCh+YgQYHu4vgRt2PRFze+GXYkHAQa +TOs9qmdvLdTN/mUxcMUbpgIKumB7bVjCmkn+YzILa+M6wKyrO7Do0wlRjBCDxjTg +xSvgGrZgFCdsMneMvLJymM/NzD+5yCRCFNZX/OYmQ6kd5YCQzgNUKD73P9P4Te1q +CjqTE5s7FCMTY5w/0YcneeVMUeMBrYVdGjux1XMQpNPyvG5k9VpWkKjHDkx0Dy5x +O/fIR/RpbxXyEV6DHpx8Uq79AtoSqFlnGNu8cN2bsWntgM6JQEhqDjXKKWYVIZQs +6GAqm4VKQPNriiTsBhYscw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCB +gDELMAkGA1UEBhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMu +QS4xJzAlBgNVBAsTHkNlcnR1bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIG +A1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29yayBDQSAyMCIYDzIwMTExMDA2MDgz +OTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQTDEiMCAGA1UEChMZ +VW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3 +b3JrIENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWA +DGSdhhuWZGc/IjoedQF97/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn +0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+oCgCXhVqqndwpyeI1B+twTUrWwbNWuKFB +OJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40bRr5HMNUuctHFY9rnY3lE +fktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2puTRZCr+E +Sv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1m +o130GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02i +sx7QBlrd9pPPV3WZ9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOW +OZV7bIBaTxNyxtd9KXpEulKkKtVBRgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgez +Tv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pyehizKV/Ma5ciSixqClnrDvFAS +adgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vMBhBgu4M1t15n +3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQ +F/xlhMcQSZDe28cmk4gmb3DWAl45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTf +CVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuAL55MYIR4PSFk1vtBHxgP58l1cb29 +XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMoclm2q8KMZiYcdywm +djWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tMpkT/ +WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jb +AoJnwTnbw3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksq +P/ujmv5zMnHCnsZy4YpoJ/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Ko +b7a6bINDd82Kkhehnlt4Fj1F4jNy3eFmypnTycUm/Q1oBEauttmbjL4ZvrHG8hnj +XALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLXis7VmFxWlgPF7ncGNf/P +5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7zAYspsbi +DrW5viSP +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBM +MSIwIAYDVQQKExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5D +ZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBU +cnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIyMTIwNzM3WhcNMjkxMjMxMTIwNzM3 +WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMg +Uy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MSIw +IAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rH +UV+rpDKmYYe2bg+G0jACl/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LM +TXPb865Px1bVWqeWifrzq2jUI4ZZJ88JJ7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVU +BBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4fOQtf/WsX+sWn7Et0brM +kUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0cvW0QM8x +AcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNV +HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15y +sHhE49wcrwn9I0j6vSrEuVUEtRCjjSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfL +I9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1mS1FhIrlQgnXdAIv94nYmem8 +J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5ajZt3hrvJBW8qY +VoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJD +TjEwMC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9y +aXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkx +MjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEwMC4GA1UECgwnQ2hpbmEgRmluYW5j +aWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNBIEVWIFJP +T1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnVBU03 +sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpL +TIpTUnrD7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5 +/ZOkVIBMUtRSqy5J35DNuF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp +7hZZLDRJGqgG16iI0gNyejLi6mhNbiyWZXvKWfry4t3uMCz7zEasxGPrb382KzRz +EpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7xzbh72fROdOXW3NiGUgt +hxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9fpy25IGvP +a931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqot +aK8KgWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNg +TnYGmE69g60dWIolhdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfV +PKPtl8MeNPo4+QgO48BdK4PRVmrJtqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hv +cWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAfBgNVHSMEGDAWgBTj/i39KNAL +tbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAd +BgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB +ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObT +ej/tUxPQ4i9qecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdL +jOztUmCypAbqTuv0axn96/Ua4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBS +ESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sGE5uPhnEFtC+NiWYzKXZUmhH4J/qy +P5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfXBDrDMlI1Dlb4pd19 +xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjnaH9d +Ci77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN +5mydLIhyPDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe +/v5WOaHIz16eGWRGENoXkbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+Z +AAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3CekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ +5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYD +VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0 +IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3 +MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xKTAnBgNVBAMTIENoYW1iZXJz +IG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEyMjk1MFoXDTM4MDcz +MTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBj +dXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIw +EAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEp +MCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW9 +28sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKAXuFixrYp4YFs8r/lfTJq +VKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorjh40G072Q +DuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR +5gN/ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfL +ZEFHcpOrUMPrCXZkNNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05a +Sd+pZgvMPMZ4fKecHePOjlO+Bd5gD2vlGts/4+EhySnB8esHnFIbAURRPHsl18Tl +UlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331lubKgdaX8ZSD6e2wsWsSaR6s ++12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ0wlf2eOKNcx5 +Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj +ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAx +hduub+84Mxh2EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNV +HQ4EFgQU+SSsD7K1+HnA+mCIG8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1 ++HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpN +YWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29t +L2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVy +ZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAt +IDIwMDiCCQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRV +HSAAMCowKAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20w +DQYJKoZIhvcNAQEFBQADggIBAJASryI1wqM58C7e6bXpeHxIvj99RZJe6dqxGfwW +PJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH3qLPaYRgM+gQDROpI9CF +5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbURWpGqOt1 +glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaH +FoI6M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2 +pSB7+R5KBWIBpih1YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MD +xvbxrN8y8NmBGuScvfaAFPDRLLmF9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QG +tjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcKzBIKinmwPQN/aUv0NCB9szTq +jktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvGnrDQWzilm1De +fhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg +OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZ +d0jQ +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEvTCCA6WgAwIBAgIBADANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJFVTEn +MCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQL +ExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEiMCAGA1UEAxMZQ2hhbWJlcnMg +b2YgQ29tbWVyY2UgUm9vdDAeFw0wMzA5MzAxNjEzNDNaFw0zNzA5MzAxNjEzNDRa +MH8xCzAJBgNVBAYTAkVVMScwJQYDVQQKEx5BQyBDYW1lcmZpcm1hIFNBIENJRiBB +ODI3NDMyODcxIzAhBgNVBAsTGmh0dHA6Ly93d3cuY2hhbWJlcnNpZ24ub3JnMSIw +IAYDVQQDExlDaGFtYmVycyBvZiBDb21tZXJjZSBSb290MIIBIDANBgkqhkiG9w0B +AQEFAAOCAQ0AMIIBCAKCAQEAtzZV5aVdGDDg2olUkfzIx1L4L1DZ77F1c2VHfRtb +unXF/KGIJPov7coISjlUxFF6tdpg6jg8gbLL8bvZkSM/SAFwdakFKq0fcfPJVD0d +BmpAPrMMhe5cG3nCYsS4No41XQEMIwRHNaqbYE6gZj3LJgqcQKH0XZi/caulAGgq +7YN6D6IUtdQis4CwPAxaUWktWBiP7Zme8a7ileb2R6jWDA+wWFjbw2Y3npuRVDM3 +0pQcakjJyfKl2qUMI/cjDpwyVV5xnIQFUZot/eZOKjRa3spAN2cMVCFVd9oKDMyX +roDclDZK9D7ONhMeU+SsTjoF7Nuucpw4i9A5O4kKPnf+dQIBA6OCAUQwggFAMBIG +A1UdEwEB/wQIMAYBAf8CAQwwPAYDVR0fBDUwMzAxoC+gLYYraHR0cDovL2NybC5j +aGFtYmVyc2lnbi5vcmcvY2hhbWJlcnNyb290LmNybDAdBgNVHQ4EFgQU45T1sU3p +26EpW1eLTXYGduHRooowDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIA +BzAnBgNVHREEIDAegRxjaGFtYmVyc3Jvb3RAY2hhbWJlcnNpZ24ub3JnMCcGA1Ud +EgQgMB6BHGNoYW1iZXJzcm9vdEBjaGFtYmVyc2lnbi5vcmcwWAYDVR0gBFEwTzBN +BgsrBgEEAYGHLgoDATA+MDwGCCsGAQUFBwIBFjBodHRwOi8vY3BzLmNoYW1iZXJz +aWduLm9yZy9jcHMvY2hhbWJlcnNyb290Lmh0bWwwDQYJKoZIhvcNAQEFBQADggEB +AAxBl8IahsAifJ/7kPMa0QOx7xP5IV8EnNrJpY0nbJaHkb5BkAFyk+cefV/2icZd +p0AJPaxJRUXcLo0waLIJuvvDL8y6C98/d3tGfToSJI6WjzwFCm/SlCgdbQzALogi +1djPHRPH8EjX1wWnz8dHnjs8NMiAT9QUu/wNUPf6s+xCX6ndbcj0dc97wXImsQEc +XCz9ek60AcUFV7nnPKoF2YjpB0ZBzu9Bga5Y34OirsrXdx/nADydb47kMgkdTXg0 +eDQ8lJsm7U9xxhl6vSAiSFr+S30Dt+dYvsYyTnQeaN2oaFuzPu5ifdmA6Ap1erfu +tGWaIZDgqtCYvDi1czyL+Nw= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDQzCCAiugAwIBAgIQX/h7KCtU3I1CoxW1aMmt/zANBgkqhkiG9w0BAQUFADA1 +MRYwFAYDVQQKEw1DaXNjbyBTeXN0ZW1zMRswGQYDVQQDExJDaXNjbyBSb290IENB +IDIwNDgwHhcNMDQwNTE0MjAxNzEyWhcNMjkwNTE0MjAyNTQyWjA1MRYwFAYDVQQK +Ew1DaXNjbyBTeXN0ZW1zMRswGQYDVQQDExJDaXNjbyBSb290IENBIDIwNDgwggEg +MA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCwmrmrp68Kd6ficba0ZmKUeIhH +xmJVhEAyv8CrLqUccda8bnuoqrpu0hWISEWdovyD0My5jOAmaHBKeN8hF570YQXJ +FcjPFto1YYmUQ6iEqDGYeJu5Tm8sUxJszR2tKyS7McQr/4NEb7Y9JHcJ6r8qqB9q +VvYgDxFUl4F1pyXOWWqCZe+36ufijXWLbvLdT6ZeYpzPEApk0E5tzivMW/VgpSdH +jWn0f84bcN5wGyDWbs2mAag8EtKpP6BrXruOIIt6keO1aO6g58QBdKhTCytKmg9l +Eg6CTY5j/e/rmxrbU6YTYK/CfdfHbBcl1HP7R2RQgYCUTOG/rksc35LtLgXfAgED +o1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUJ/PI +FR5umgIJFq0roIlgX9p7L6owEAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZIhvcNAQEF +BQADggEBAJ2dhISjQal8dwy3U8pORFBi71R803UXHOjgxkhLtv5MOhmBVrBW7hmW +Yqpao2TB9k5UM8Z3/sUcuuVdJcr18JOagxEu5sv4dEX+5wW4q+ffy0vhN4TauYuX +cB7w4ovXsNgOnbFp1iqRe6lJT37mjpXYgyc81WhJDtSd9i7rp77rMKSsH0T8lasz +Bvt9YAretIpjsJyp8qS5UwGH0GikJ3+r/+n6yUA4iGe0OcaEb1fJU9u6ju7AQ7L4 +CYNu/2bPPu8Xs1gYJQk0XuPL1hS27PKSb3TkL4Eq1ZKR4OCXPDJoBYVL0fdX4lId +kxpUnwVwwEpxYB5DC2Ae/qPOgRnhCzU= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAw +PTELMAkGA1UEBhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFz +cyAyIFByaW1hcnkgQ0EwHhcNOTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9 +MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2VydHBsdXMxGzAZBgNVBAMTEkNsYXNz +IDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANxQ +ltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR5aiR +VhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyL +kcAbmXuZVg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCd +EgETjdyAYveVqUSISnFOYFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yas +H7WLO7dDWWuwJKZtkIvEcupdM5i3y95ee++U8Rs+yskhwcWYAqqi9lt3m/V+llU0 +HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRMECDAGAQH/AgEKMAsGA1Ud +DwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJYIZIAYb4 +QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMu +Y29tL0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/ +AN9WM2K191EBkOvDP9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8 +yfFC82x/xXp8HVGIutIKPidd3i1RTtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMR +FcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+7UCmnYR0ObncHoUW2ikbhiMA +ybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW//1IMwrh3KWB +kJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 +l7+ijrRU +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDoTCCAomgAwIBAgIQKTZHquOKrIZKI1byyrdhrzANBgkqhkiG9w0BAQUFADBO +MQswCQYDVQQGEwJ1czEYMBYGA1UEChMPVS5TLiBHb3Zlcm5tZW50MQ0wCwYDVQQL +EwRGQkNBMRYwFAYDVQQDEw1Db21tb24gUG9saWN5MB4XDTA3MTAxNTE1NTgwMFoX +DTI3MTAxNTE2MDgwMFowTjELMAkGA1UEBhMCdXMxGDAWBgNVBAoTD1UuUy4gR292 +ZXJubWVudDENMAsGA1UECxMERkJDQTEWMBQGA1UEAxMNQ29tbW9uIFBvbGljeTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJeNvTMn5K1b+3i9L0dHbsd4 +6ZOcpN7JHP0vGzk4rEcXwH53KQA7Ax9oD81Npe53uCxiazH2+nIJfTApBnznfKM9 +hBiKHa4skqgf6F5PjY7rPxr4nApnnbBnTfAu0DDew5SwoM8uCjR/VAnTNr2kSVdS +c+md/uRIeUYbW40y5KVIZPMiDZKdCBW/YDyD90ciJSKtKXG3d+8XyaK2lF7IMJCk +FEhcVlcLQUwF1CpMP64Sm1kRdXAHImktLNMxzJJ+zM2kfpRHqpwJCPZLr1LoakCR +xVW9QLHIbVeGlRfmH3O+Ry4+i0wXubklHKVSFzYIWcBCvgortFZRPBtVyYyQd+sC +AwEAAaN7MHkwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFC9Yl9ipBZilVh/72at17wI8NjTHMBIGCSsGAQQBgjcVAQQFAgMBAAEwIwYJ +KwYBBAGCNxUCBBYEFHa3YJbdFFYprHWF03BjwbxHhhyLMA0GCSqGSIb3DQEBBQUA +A4IBAQBgrvNIFkBypgiIybxHLCRLXaCRc+1leJDwZ5B6pb8KrbYq+Zln34PFdx80 +CTj5fp5B4Ehg/uKqXYeI6oj9XEWyyWrafaStsU+/HA2fHprA1RRzOCuKeEBuMPdi +4c2Z/FFpZ2wR3bgQo2jeJqVW/TZsN5hs++58PGxrcD/3SDcJjwtCga1GRrgLgwb0 +Gzigf0/NC++DiYeXHIowZ9z9VKEDfgHLhUyxCynDvux84T8PCVI8L6eaSP436REG +WOE2QYrEtr+O3c5Ks7wawM36GpnScZv6z7zyxFSjiDV2zBssRm8MtNHDYXaSdBHq +S4CNHIkRi+xb/xfJSPzn4AYR4oRe +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCB +gTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNV +BAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEyMDEwMDAw +MDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEbMBkGA1UECBMSR3Jl +YXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFDT01P +RE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3 +UcEbVASY06m/weaKXTuH+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI +2GqGd0S7WWaXUF601CxwRM/aN5VCaTwwxHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8 +Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV4EajcNxo2f8ESIl33rXp ++2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA1KGzqSX+ +DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5O +nKVIrLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW +/zAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6g +PKA6hjhodHRwOi8vY3JsLmNvbW9kb2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9u +QXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOCAQEAPpiem/Yb6dc5t3iuHXIY +SdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CPOGEIqB6BCsAv +IC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4 +zJVSk/BwJVmcIGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5dd +BA6+C4OmF4O5MBKgxTMVBbkN+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IB +ZQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTEL +MAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UE +BxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMT +IkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwMzA2MDAw +MDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdy +ZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09N +T0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSR +FtSrYpn1PlILBs5BAH+X4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0J +cfRK9ChQtP6IHG4/bC8vCVlbpVsLM5niwz2J+Wos77LTBumjQjBAMB0GA1UdDgQW +BBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VGFAkK+qDm +fQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdv +GDeAU/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCB +hTELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G +A1UEBxMHU2FsZm9yZDEaMBgGA1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNV +BAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMTE5 +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0IxGzAZBgNVBAgT +EkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR +6FSS0gpWsawNJN3Fz0RndJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8X +pz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZFGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC +9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+5eNu/Nio5JIk2kNrYrhV +/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pGx8cgoLEf +Zd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z ++pUX2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7w +qP/0uK3pN/u6uPQLOvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZah +SL0896+1DSJMwBGB7FY79tOi4lu3sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVIC +u9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+CGCe01a60y1Dma/RMhnEw6abf +Fobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5WdYgGq/yapiq +crxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E +FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB +/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvl +wFTPoCWOAvn9sKIN9SCYPBMtrFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM +4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+nq6PK7o9mfjYcwlYRm6mnPTXJ9OV +2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSgtZx8jb8uk2Intzna +FxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwWsRqZ +CuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiK +boHGhfKppC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmcke +jkk9u+UJueBPSZI9FoJAzMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yL +S0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHqZJx64SIDqZxubw5lT2yHh17zbqD5daWb +QOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk527RH89elWsn2/x20Kk4yl +0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7ILaZRfyHB +NVOFBkpdn627G190 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDkzCCAnugAwIBAgIQFBOWgxRVjOp7Y+X8NId3RDANBgkqhkiG9w0BAQUFADA0 +MRMwEQYDVQQDEwpDb21TaWduIENBMRAwDgYDVQQKEwdDb21TaWduMQswCQYDVQQG +EwJJTDAeFw0wNDAzMjQxMTMyMThaFw0yOTAzMTkxNTAyMThaMDQxEzARBgNVBAMT +CkNvbVNpZ24gQ0ExEDAOBgNVBAoTB0NvbVNpZ24xCzAJBgNVBAYTAklMMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA8ORUaSvTx49qROR+WCf4C9DklBKK +8Rs4OC8fMZwG1Cyn3gsqrhqg455qv588x26i+YtkbDqthVVRVKU4VbirgwTyP2Q2 +98CNQ0NqZtH3FyrV7zb6MBBC11PN+fozc0yz6YQgitZBJzXkOPqUm7h65HkfM/sb +2CEJKHxNGGleZIp6GZPKfuzzcuc3B1hZKKxC+cX/zT/npfo4sdAMx9lSGlPWgcxC +ejVb7Us6eva1jsz/D3zkYDaHL63woSV9/9JLEYhwVKZBqGdTUkJe5DSe5L6j7Kpi +Xd3DTKaCQeQzC6zJMw9kglcq/QytNuEMrkvF7zuZ2SOzW120V+x0cAwqTwIDAQAB +o4GgMIGdMAwGA1UdEwQFMAMBAf8wPQYDVR0fBDYwNDAyoDCgLoYsaHR0cDovL2Zl +ZGlyLmNvbXNpZ24uY28uaWwvY3JsL0NvbVNpZ25DQS5jcmwwDgYDVR0PAQH/BAQD +AgGGMB8GA1UdIwQYMBaAFEsBmz5WGmU2dst7l6qSBe4y5ygxMB0GA1UdDgQWBBRL +AZs+VhplNnbLe5eqkgXuMucoMTANBgkqhkiG9w0BAQUFAAOCAQEA0Nmlfv4pYEWd +foPPbrxHbvUanlR2QnG0PFg/LUAlQvaBnPGJEMgOqnhPOAlXsDzACPw1jvFIUY0M +cXS6hMTXcpuEfDhOZAYnKuGntewImbQKDdSFc8gS4TXt8QUxHXOZDOuWyt3T5oWq +8Ir7dcHyCTxlZWTzTNity4hp8+SDtwy9F1qWF8pb/627HOkthIDYIb6FUtnUdLlp +hbpN7Sgy6/lhSuTENh4Z3G+EER+V9YMoGKgzkkMn3V0TBEVPh9VGzT2ouvDzuFYk +Res3x+F2T3I5GN9+dHLHcy056mDmrRGiVod7w2ia/viMcKjfZTL0pECMocJEAw6U +AGegcQCCSA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIGATCCA+mgAwIBAgIRAI9hcRW6eVgXjH0ROqzW264wDQYJKoZIhvcNAQELBQAw +RTEfMB0GA1UEAxMWQ29tU2lnbiBHbG9iYWwgUm9vdCBDQTEVMBMGA1UEChMMQ29t +U2lnbiBMdGQuMQswCQYDVQQGEwJJTDAeFw0xMTA3MTgxMDI0NTRaFw0zNjA3MTYx +MDI0NTVaMEUxHzAdBgNVBAMTFkNvbVNpZ24gR2xvYmFsIFJvb3QgQ0ExFTATBgNV +BAoTDENvbVNpZ24gTHRkLjELMAkGA1UEBhMCSUwwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQCyKClzKh3rm6n1nvigmV/VU1D4hSwYW2ro3VqpzpPo0Ph3 +3LguqjXd5juDwN4mpxTpD99d7Xu5X6KGTlMVtfN+bTbA4t3x7DU0Zqn0BE5XuOgs +3GLH41Vmr5wox1bShVpM+IsjcN4E/hMnDtt/Bkb5s33xCG+ohz5dlq0gA9qfr/g4 +O9lkHZXTCeYrmVzd/il4x79CqNvGkdL3um+OKYl8rg1dPtD8UsytMaDgBAopKR+W +igc16QJzCbvcinlETlrzP/Ny76BWPnAQgaYBULax/Q5thVU+N3sEOKp6uviTdD+X +O6i96gARU4H0xxPFI75PK/YdHrHjfjQevXl4J37FJfPMSHAbgPBhHC+qn/014DOx +46fEGXcdw2BFeIIIwbj2GH70VyJWmuk/xLMCHHpJ/nIF8w25BQtkPpkwESL6esaU +b1CyB4Vgjyf16/0nRiCAKAyC/DY/Yh+rDWtXK8c6QkXD2XamrVJo43DVNFqGZzbf +5bsUXqiVDOz71AxqqK+p4ek9374xPNMJ2rB5MLPAPycwI0bUuLHhLy6nAIFHLhut +TNI+6Y/soYpi5JSaEjcY7pxI8WIkUAzr2r+6UoT0vAdyOt7nt1y8844a7szo/aKf +woziHl2O1w6ZXUC30K+ptXVaOiW79pBDcbLZ9ZdbONhS7Ea3iH4HJNwktrBJLQID +AQABo4HrMIHoMA8GA1UdEwEB/wQFMAMBAf8wgYQGA1UdHwR9MHswPKA6oDiGNmh0 +dHA6Ly9mZWRpci5jb21zaWduLmNvLmlsL2NybC9jb21zaWduZ2xvYmFscm9vdGNh +LmNybDA7oDmgN4Y1aHR0cDovL2NybDEuY29tc2lnbi5jby5pbC9jcmwvY29tc2ln +bmdsb2JhbHJvb3RjYS5jcmwwDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBQCRZPY +DUhirGm6rgZbPvuqJpFQsTAfBgNVHSMEGDAWgBQCRZPYDUhirGm6rgZbPvuqJpFQ +sTANBgkqhkiG9w0BAQsFAAOCAgEAk1V5V9701xsfy4mfX+tP9Ln5e9h3N+QMwUfj +kr+k3e8iXOqADjTpUHeBkEee5tJq09ZLp/43F5tZ2eHdYq2ZEX7iWHCnOQet6Yw9 +SU1TahsrGDA6JJD9sdPFnNZooGsU1520e0zNB0dNWwxrWAmu4RsBxvEpWCJbvzQL +dOfyX85RWwli81OiVMBc5XvJ1mxsIIqli45oRynKtsWP7E+b0ISJ1n+XFLdQo/Nm +WA/5sDfT0F5YPzWdZymudMbXitimxC+n4oQE4mbQ4Zm718Iwg3pP9gMMcSc7Qc1J +kJHPH9O7gVubkKHuSYj9T3Ym6c6egL1pb4pz/uT7cT26Fiopc/jdqbe2EAfoJZkv +hlp/zdzOoXTWjiKNA5zmgWnZn943FuE9KMRyKtyi/ezJXCh8ypnqLIKxeFfZl69C +BwJsPXUTuqj8Fic0s3aZmmr7C4jXycP+Q8V+akMEIoHAxcd960b4wVWKqOcI/kZS +Q0cYqWOY1LNjznRt9lweWEfwDBL3FhrHOmD4++1N3FkkM4W+Q1b2WOL24clDMj+i +2n9Iw0lc1llHMSMvA5D0vpsXZpOgcCVahfXczQKi9wQ3oZyonJeWx4/rXdMtagAB +VBYGFuMEUEQtybI+eIbnp5peO2WAAblQI4eTy/jMVowe5tfMEXovV3sz9ULgmGb3 +DscLP1I= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDqzCCApOgAwIBAgIRAMcoRwmzuGxFjB36JPU2TukwDQYJKoZIhvcNAQEFBQAw +PDEbMBkGA1UEAxMSQ29tU2lnbiBTZWN1cmVkIENBMRAwDgYDVQQKEwdDb21TaWdu +MQswCQYDVQQGEwJJTDAeFw0wNDAzMjQxMTM3MjBaFw0yOTAzMTYxNTA0NTZaMDwx +GzAZBgNVBAMTEkNvbVNpZ24gU2VjdXJlZCBDQTEQMA4GA1UEChMHQ29tU2lnbjEL +MAkGA1UEBhMCSUwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGtWhf +HZQVw6QIVS3joFd67+l0Kru5fFdJGhFeTymHDEjWaueP1H5XJLkGieQcPOqs49oh +gHMhCu95mGwfCP+hUH3ymBvJVG8+pSjsIQQPRbsHPaHA+iqYHU4Gk/v1iDurX8sW +v+bznkqH7Rnqwp9D5PGBpX8QTz7RSmKtUxvLg/8HZaWSLWapW7ha9B20IZFKF3ue +Mv5WJDmyVIRD9YTC2LxBkMyd1mja6YJQqTtoz7VdApRgFrFD2UNd3V2Hbuq7s8lr +9gOUCXDeFhF6K+h2j0kQmHe5Y1yLM5d19guMsqtb3nQgJT/j8xH5h2iGNXHDHYwt +6+UarA9z1YJZQIDTAgMBAAGjgacwgaQwDAYDVR0TBAUwAwEB/zBEBgNVHR8EPTA7 +MDmgN6A1hjNodHRwOi8vZmVkaXIuY29tc2lnbi5jby5pbC9jcmwvQ29tU2lnblNl +Y3VyZWRDQS5jcmwwDgYDVR0PAQH/BAQDAgGGMB8GA1UdIwQYMBaAFMFL7XC29z58 +ADsAj8c+DkWfHl3sMB0GA1UdDgQWBBTBS+1wtvc+fAA7AI/HPg5Fnx5d7DANBgkq +hkiG9w0BAQUFAAOCAQEAFs/ukhNQq3sUnjO2QiBq1BW9Cav8cujvR3qQrFHBZE7p +iL1DRYHjZiM/EoZNGeQFsOY3wo3aBijJD4mkU6l1P7CW+6tMM1X5eCZGbxs2mPtC +dsGCuY7e+0X5YxtiOzkGynd6qDwJz2w2PQ8KRUtpFhpFfTMDZflScZAmlaxMDPWL +kz/MdXSFmLr/YnpNH4n+rr2UAJm/EaXc4HnFFgt9AmEd6oX5AhVP51qJThRv4zdL +hfXBPGHg/QVBspJ/wx2g0K5SZGBrGMYmnNj1ZOQ2GmKfig8+/21OGVZOIJFsnzQz +OjRXUDpvgV4GxvU+fE6OK85lBi5d0ipTdF7Tbieejw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgMjAwOTAeFw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NTha +ME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxJzAlBgNVBAMM +HkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOADER03 +UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42 +tSHKXzlABF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9R +ySPocq60vFYJfxLLHLGvKZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsM +lFqVlNpQmvH/pStmMaTJOKDfHR+4CS7zp+hnUquVH+BGPtikw8paxTGA6Eian5Rp +/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUCAwEAAaOCARowggEWMA8G +A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ4PGEMA4G +A1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVj +dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUy +MENBJTIwMiUyMDIwMDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRl +cmV2b2NhdGlvbmxpc3QwQ6BBoD+GPWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3Js +L2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAwOS5jcmwwDQYJKoZIhvcNAQEL +BQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm2H6NMLVwMeni +acfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4K +zCUqNQT4YJEVdT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8 +PIWmawomDeCTmGCufsYkl4phX5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3Y +Johw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRF +MRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBD +bGFzcyAzIENBIDIgRVYgMjAwOTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUw +NDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxELVRydXN0IEdtYkgxKjAoBgNV +BAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAwOTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfSegpn +ljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM0 +3TP1YtHhzRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6Z +qQTMFexgaDbtCHu39b+T7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lR +p75mpoo6Kr3HGrHhFPC+Oh25z1uxav60sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8 +HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure3511H3a6UCAwEAAaOCASQw +ggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyvcop9Ntea +HNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFw +Oi8vZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xh +c3MlMjAzJTIwQ0ElMjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1E +RT9jZXJ0aWZpY2F0ZXJldm9jYXRpb25saXN0MEagRKBChkBodHRwOi8vd3d3LmQt +dHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xhc3NfM19jYV8yX2V2XzIwMDku +Y3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+PPoeUSbrh/Yp +3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNF +CSuGdXzfX2lXANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7na +xpeG0ILD5EJt/rDiZE4OJudANCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqX +KVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVvw9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEc +MBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2Vj +IFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENB +IDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5MjM1OTAwWjBxMQswCQYDVQQGEwJE +RTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxl +U2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290 +IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEU +ha88EOQ5bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhC +QN/Po7qCWWqSG6wcmtoIKyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1Mjwr +rFDa1sPeg5TKqAyZMg4ISFZbavva4VhYAUlfckE8FQYBjl2tqriTtM2e66foai1S +NNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aKSe5TBY8ZTNXeWHmb0moc +QqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTVjlsB9WoH +txa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAP +BgNVHRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOC +AQEAlGRZrTlk5ynrE/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756Abrsp +tJh6sTtU6zkXR34ajgv8HzFZMQSyzhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpa +IzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8rZ7/gFnkm0W09juwzTkZmDLl +6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4Gdyd1Lx+4ivn+ +xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU +Cm26OWMohpLzGITY+9HPBVZkVw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzExMTEwMDAwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7c +JpSIqvTO9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYP +mDI2dsze3Tyoou9q+yHyUmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+ +wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4 +VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpyoeb6pNnVFzF1roV9Iq4/ +AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whfGHdPAgMB +AAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBRF66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYun +pyGd823IDzANBgkqhkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRC +dWKuh+vy1dneVrOfzM4UKLkNl2BcEkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTf +fwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38FnSbNd67IJKusm7Xi+fT8r87cm +NW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i8b5QZ7dsvfPx +H2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBl +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJv +b3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNl +cnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSA +n61UQbVH35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4Htecc +biJVMWWXvdMX0h5i89vqbFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9Hp +EgjAALAcKxHad3A2m67OeYfcgnDmCXRwVWmvo2ifv922ebPynXApVfSr/5Vh88lA +bx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OPYLfykqGxvYmJHzDNw6Yu +YjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+RnlTGNAgMB +AAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQW +BBTOw0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPI +QW5pJ6d1Ee88hjZv0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I +0jJmwYrA8y8678Dj1JGG0VDjA9tzd29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4Gni +lmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAWhsI6yLETcDbYz+70CjTVW0z9 +B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0MjomZmWzwPDCv +ON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo +IhNzbM8m9Yop5w== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3Qg +RzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBlMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJf +Zn4f5dwbRXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17Q +RSAPWXYQ1qAk8C3eNvJsKTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ +BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgFUaFNN6KDec6NHSrkhDAKBggqhkjOPQQD +AwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5FyYZ5eEJJZVrmDxxDnOOlY +JjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy1vUhZscv +6pZjamVFkpUBtA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBD +QTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAwMDAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsB +CSDMAZOnTjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97 +nh6Vfe63SKMI2tavegw5BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt +43C/dxC//AH2hdmoRBBYMql1GNXRor5H4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7P +T19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y7vrTC0LUq7dBMtoM1O/4 +gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQABo2MwYTAO +BgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbR +TLtm8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUw +DQYJKoZIhvcNAQEFBQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/Esr +hMAtudXH/vTBH1jLuG2cenTnmCmrEbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg +06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIttep3Sp+dWOIrWcBAI+0tKIJF +PnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886UAb3LujEV0ls +YSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBh +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBH +MjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVT +MRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5j +b20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI +2/Ou8jqJkTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx +1x7e/dfgy5SDN67sH0NO3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQ +q2EGnI/yuum06ZIya7XzV+hdG82MHauVBJVJ8zUtluNJbd134/tJS7SsVQepj5Wz +tCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyMUNGPHgm+F6HmIcr9g+UQ +vIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQABo0IwQDAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV +5uNu5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY +1Yl9PMWLSn/pvtsrF9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4 +NeF22d+mQrvHRAiGfzZ0JFrabA0UWTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NG +Fdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBHQRFXGU7Aj64GxJUTFy8bJZ91 +8rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/iyK5S9kJRaTe +pLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl +MrY= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQsw +CQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cu +ZGlnaWNlcnQuY29tMSAwHgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAe +Fw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAwMDBaMGExCzAJBgNVBAYTAlVTMRUw +EwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5kaWdpY2VydC5jb20x +IDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0CAQYF +K4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FG +fp4tn+6OYwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPO +Z9wj/wMco+I+o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAd +BgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNpYim8S8YwCgYIKoZIzj0EAwMDaAAwZQIx +AK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y3maTD/HMsQmP3Wyr+mt/ +oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34VOKa5Vt8 +sycX +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBs +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSswKQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5j +ZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAwMFoXDTMxMTExMDAwMDAwMFowbDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZMBcGA1UECxMQd3d3 +LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFuY2Ug +RVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm ++9S75S0tMqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTW +PNt0OKRKzE0lgvdKpVMSOO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEM +xChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFB +Ik5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQNAQTXKFx01p8VdteZOE3 +hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUeh10aUAsg +EsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaA +FLE+w2kD+L9HAdSYJhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3Nec +nzyIZgYIVyHbIUf4KmeqvxgydkAQV8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6z +eM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFpmyPInngiK3BD41VHMWEZ71jF +hS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkKmNEVX58Svnw2 +Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep ++OkuE6N36B9K +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBi +MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3Qg +RzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1MTIwMDAwWjBiMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQu +Y29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3y +ithZwuEppz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1If +xp4VpX6+n6lXFllVcq9ok3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDV +ySAdYyktzuxeTsiT+CFhmzTrBcZe7FsavOvJz82sNEBfsXpm7nfISKhmV1efVFiO +DCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGYQJB5w3jHtrHEtWoYOAMQ +jdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6MUSaM0C/ +CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCi +EhtmmnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADM +fRyVw4/3IbKyEbe7f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QY +uKZ3AeEPlAwhHbJUKSWJbOUOUlFHdL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXK +chYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8oR7FwI+isX4KJpn15GkvmB0t +9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +hjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD +ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2 +SV1EY+CtnJYYZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd ++SeuMIW59mdNOj6PWTkiU0TryF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWc +fFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy7zBZLq7gcfJW5GqXb5JQbZaNaHqa +sjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iahixTXTBmyUEFxPT9N +cCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN5r5N +0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie +4u1Ki7wb/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mI +r/OSmbaz5mEP0oUA51Aa5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1 +/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tKG48BtieVU+i2iW1bvGjUI+iLUaJW+fCm +gKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP82Z+ +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIECTCCAvGgAwIBAgIQDV6ZCtadt3js2AdWO4YV2TANBgkqhkiG9w0BAQUFADBb +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3Qx +ETAPBgNVBAsTCERTVCBBQ0VTMRcwFQYDVQQDEw5EU1QgQUNFUyBDQSBYNjAeFw0w +MzExMjAyMTE5NThaFw0xNzExMjAyMTE5NThaMFsxCzAJBgNVBAYTAlVTMSAwHgYD +VQQKExdEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdDERMA8GA1UECxMIRFNUIEFDRVMx +FzAVBgNVBAMTDkRTVCBBQ0VTIENBIFg2MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAuT31LMmU3HWKlV1j6IR3dma5WZFcRt2SPp/5DgO0PWGSvSMmtWPu +ktKe1jzIDZBfZIGxqAgNTNj50wUoUrQBJcWVHAx+PhCEdc/BGZFjz+iokYi5Q1K7 +gLFViYsx+tC3dr5BPTCapCIlF3PoHuLTrCq9Wzgh1SpL11V94zpVvddtawJXa+ZH +fAjIgrrep4c9oW24MFbCswKBXy314powGCi4ZtPLAZZv6opFVdbgnf9nKxcCpk4a +ahELfrd755jWjHZvwTvbUJN+5dCOHze4vbrGn2zpfDPyMjwmR/onJALJfh1biEIT +ajV8fTXpLmaRcpPVMibEdPVTo7NdmvYJywIDAQABo4HIMIHFMA8GA1UdEwEB/wQF +MAMBAf8wDgYDVR0PAQH/BAQDAgHGMB8GA1UdEQQYMBaBFHBraS1vcHNAdHJ1c3Rk +c3QuY29tMGIGA1UdIARbMFkwVwYKYIZIAWUDAgEBATBJMEcGCCsGAQUFBwIBFjto +dHRwOi8vd3d3LnRydXN0ZHN0LmNvbS9jZXJ0aWZpY2F0ZXMvcG9saWN5L0FDRVMt +aW5kZXguaHRtbDAdBgNVHQ4EFgQUCXIGThhDD+XWzMNqizF7eI+og7gwDQYJKoZI +hvcNAQEFBQADggEBAKPYjtay284F5zLNAdMEA+V25FYrnJmQ6AgwbN99Pe7lv7Uk +QIRJ4dEorsTCOlMwiPH1d25Ryvr/ma8kXxug/fKshMrfqfBfBC6tFr8hlxCBPeP/ +h40y3JTlR4peahPJlJU90u7INJXQgNStMgiAVDzgvVJT11J8smk/f3rPanTK+gQq +nExaBqXpIK1FZg9p8d2/6eMyi/rgwYZNcjwu2JN4Cir42NInPRmJX1p7ijvMDNpR +rscL9yuwNwXsvFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf2 +9w4LTJxoeHtxMcfrHuBnQfO3oKfN5XozNmr6mis= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/ +MSQwIgYDVQQKExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMT +DkRTVCBSb290IENBIFgzMB4XDTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVow +PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD +Ew5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +AN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmTrE4O +rz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEq +OLl5CjH9UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9b +xiqKqy69cK3FCxolkHRyxXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw +7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40dutolucbY38EVAjqr2m7xPi71XAicPNaD +aeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQMA0GCSqG +SIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69 +ikugdB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXr +AvHRAosZy5Q6XkjEGB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZz +R8srzJmwN0jP41ZL9c8PDHIyh8bwRLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5 +JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubSfZGL+T0yjWW06XyxV3bqxbYo +Ob8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDOzCCAiOgAwIBAgIRANAeRlAAACmMAAAAAgAAAAIwDQYJKoZIhvcNAQEFBQAw +PzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMRcwFQYDVQQD +Ew5EU1QgUm9vdCBDQSBYNDAeFw0wMDA5MTMwNjIyNTBaFw0yMDA5MTMwNjIyNTBa +MD8xJDAiBgNVBAoTG0RpZ2l0YWwgU2lnbmF0dXJlIFRydXN0IENvLjEXMBUGA1UE +AxMORFNUIFJvb3QgQ0EgWDQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQCthX3OFEYY8gSeIYur0O4ypOT68HnDrjLfIutL5PZHRwQGjzCPb9PFo/ihboJ8 +RvfGhBAqpQCo47zwYEhpWm1jB+L/OE/dBBiyn98krfU2NiBKSom2J58RBeAwHGEy +cO+lewyjVvbDDLUy4CheY059vfMjPAftCRXjqSZIolQb9FdPcAoa90mFwB7rKniE +J7vppdrUScSS0+eBrHSUPLdvwyn4RGp+lSwbWYcbg5EpSpE0GRJdchic0YDjvIoC +YHpe7Rkj93PYRTQyU4bhC88ck8tMqbvRYqMRqR+vobbkrj5LLCOQCHV5WEoxWh+0 +E2SpIFe7RkV++MmpIAc0h1tZAgMBAAGjMjAwMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFPCD6nPIP1ubWzdf9UyPWvf0hki9MA0GCSqGSIb3DQEBBQUAA4IBAQCE +G85wl5eEWd7adH6XW/ikGN5salvpq/Fix6yVTzE6CrhlP5LBdkf6kx1bSPL18M45 +g0rw2zA/MWOhJ3+S6U+BE0zPGCuu8YQaZibR7snm3HiHUaZNMu5c8D0x0bcMxDjY +AVVcHCoNiL53Q4PLW27nbY6wwG0ffFKmgV3blxrYWfuUDgGpyPwHwkfVFvz9qjaV +mf12VJffL6W8omBPtgteb6UaT/k1oJ7YI0ldGf+ngpVbRhD+LC3cUtT6GO/BEPZu +8YTV/hbiDH5v3khVqMIeKT6o8IuXGG7F6a6vKwP1F1FwTXf4UC/ivhme7vdUH7B/ +Vv4AEbT8dNfEeFxrkDbh +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNV +BAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBC +aWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNV +BAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQDDB9FLVR1 +Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMwNTEyMDk0OFoXDTIz +MDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmExQDA+ +BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhp +em1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN +ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA4vU/kwVRHoViVF56C/UY +B4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vdhQd2h8y/L5VMzH2nPbxH +D5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5KCKpbknSF +Q9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEo +q1+gElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3D +k14opz8n8Y4e0ypQBaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcH +fC425lAcP9tDJMW/hkd5s3kc91r0E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsut +dEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gzrt48Ue7LE3wBf4QOXVGUnhMM +ti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAqjqFGOjGY5RH8 +zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn +rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUX +U8u3Zg5mTPj5dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6 +Jyr+zE7S6E5UMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5 +XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQAF +Nzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAKkEh47U6YA5n+KGCR +HTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jOXKqY +GwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c +77NCR807VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3 ++GbHeJAAFS6LrVE1Uweoa2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WK +vJUawSg5TB9D0pH0clmKuVb8P7Sd2nCcdlqMQ1DujjByTd//SffGqWfZbawCEeI6 +FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEVKV0jq9BgoRJP3vQXzTLl +yb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gTDx4JnW2P +AJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpD +y4Q08ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8d +NL/+I5c30jn6PQ0GC7TbO6Orb1wdtn7os4I07QZcJA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIE5zCCA8+gAwIBAgIBADANBgkqhkiG9w0BAQUFADCBjTELMAkGA1UEBhMCQ0Ex +EDAOBgNVBAgTB09udGFyaW8xEDAOBgNVBAcTB1Rvcm9udG8xHTAbBgNVBAoTFEVj +aG93b3J4IENvcnBvcmF0aW9uMR8wHQYDVQQLExZDZXJ0aWZpY2F0aW9uIFNlcnZp +Y2VzMRowGAYDVQQDExFFY2hvd29yeCBSb290IENBMjAeFw0wNTEwMDYxMDQ5MTNa +Fw0zMDEwMDcxMDQ5MTNaMIGNMQswCQYDVQQGEwJDQTEQMA4GA1UECBMHT250YXJp +bzEQMA4GA1UEBxMHVG9yb250bzEdMBsGA1UEChMURWNob3dvcnggQ29ycG9yYXRp +b24xHzAdBgNVBAsTFkNlcnRpZmljYXRpb24gU2VydmljZXMxGjAYBgNVBAMTEUVj +aG93b3J4IFJvb3QgQ0EyMIIBIDANBgkqhkiG9w0BAQEFAAOCAQ0AMIIBCAKCAQEA +utU/5BkV15UBf+s+JQruKQxr77s3rjp/RpOtmhHILIiO5gsEWP8MMrfrVEiidjI6 +Qh6ans0KAWc2Dw0/j4qKAQzOSyAZgjcdypNTBZ7muv212DA2Pu41rXqwMrlBrVi/ +KTghfdLlNRu6JrC5y8HarrnRFSKF1Thbzz921kLDRoCi+FVs5eVuK5LvIfkhNAqA +byrTgO3T9zfZgk8upmEkANPDL1+8y7dGPB/d6lk0I5mv8PESKX02TlvwgRSIiTHR +k8++iOPLBWlGp7ZfqTEXkPUZhgrQQvxcrwCUo6mk8TqgxCDP5FgPoHFiPLef5szP +ZLBJDWp7GLyE1PmkQI6WiwIBA6OCAVAwggFMMA8GA1UdEwEB/wQFMAMBAf8wCwYD +VR0PBAQDAgEGMB0GA1UdDgQWBBQ74YEboKs/OyGC1eISrq5QqxSlEzCBugYDVR0j +BIGyMIGvgBQ74YEboKs/OyGC1eISrq5QqxSlE6GBk6SBkDCBjTELMAkGA1UEBhMC +Q0ExEDAOBgNVBAgTB09udGFyaW8xEDAOBgNVBAcTB1Rvcm9udG8xHTAbBgNVBAoT +FEVjaG93b3J4IENvcnBvcmF0aW9uMR8wHQYDVQQLExZDZXJ0aWZpY2F0aW9uIFNl +cnZpY2VzMRowGAYDVQQDExFFY2hvd29yeCBSb290IENBMoIBADBQBgNVHSAESTBH +MEUGCysGAQQB+REKAQMBMDYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cuZWNob3dv +cnguY29tL2NhL3Jvb3QyL2Nwcy5wZGYwDQYJKoZIhvcNAQEFBQADggEBAG+nrPi/ +0RpfEzrj02C6JGPUar4nbjIhcY6N7DWNeqBoUulBSIH/PYGNHYx7/lnJefiixPGE +7TQ5xPgElxb9bK8zoAApO7U33OubqZ7M7DlHnFeCoOoIAZnG1kuwKwD5CXKB2a74 +HzcqNnFW0IsBFCYqrVh/rQgJOzDA8POGbH0DeD0xjwBBooAolkKT+7ZItJF1Pb56 +QpDL9G+16F7GkmnKlAIYT3QTS3yFGYChnJcd+6txUPhKi9sSOOmAIaKHnkH9Scz+ +A2cSi4A3wUYXVatuVNHpRb2lygfH3SuCX9MU8Ure3zBlSU1LALtMqI4JmcQmQpIq +zIzvO2jHyu9PQqo= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1 +MQswCQYDVQQGEwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1 +czEoMCYGA1UEAwwfRUUgQ2VydGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYG +CSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIwMTAxMDMwMTAxMDMwWhgPMjAzMDEy +MTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlBUyBTZXJ0aWZpdHNl +ZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRyZSBS +b290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEB +AQUAA4IBDwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUy +euuOF0+W2Ap7kaJjbMeMTC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvO +bntl8jixwKIy72KyaOBhU8E2lf/slLo2rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIw +WFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw93X2PaRka9ZP585ArQ/d +MtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtNP2MbRMNE +1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/ +zQas8fElyalL1BSZMEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYB +BQUHAwMGCCsGAQUFBwMEBggrBgEFBQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEF +BQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+RjxY6hUFaTlrg4wCQiZrxTFGGV +v9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqMlIpPnTX/dqQG +E5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u +uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIW +iAYLtqZLICjU3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/v +GVCJYMzpJJUPwssd8m92kMfMdcGWxZ0= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkG +A1UEBhMCVVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3 +d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVu +dHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEzMDEGA1UEAxMq +RW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRUMxMB4XDTEy +MTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYwFAYD +VQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0 +L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0g +Zm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBD +ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEVDMTB2MBAGByqGSM49AgEGBSuBBAAi +A2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHyAsWfoPZb1YsGGYZPUxBt +ByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef9eNi1KlH +Bz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVC +R98crlOZF7ZvHH3hvxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nX +hTcGtXsI/esni0qU+eH6p44mCOh8kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50 +cnVzdC5uZXQvbGVnYWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3Qs +IEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVz +dCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwHhcNMDkwNzA3MTcy +NTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUVu +dHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwt +dGVybXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0 +aG9yaXplZCB1c2Ugb25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5IC0gRzIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP/vaCeb9zYQYKpSfYs1/T +RU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXzHHfV1IWN +cCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hW +wcKUs/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1 +U1+cPvQXLOZprE4yTGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0 +jaWvYkxN4FisZDQSA/i2jZRjJKRxAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ60B7vfec7aVHUbI2fkBJmqzAN +BgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5ZiXMRrEPR9RP/ +jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ +Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v +1fN2D807iDginWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4R +nAuknZoh8/CbCzB428Hch0P+vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmH +VHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xOe4pIb4tF9g== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMC +VVMxFjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0 +Lm5ldC9DUFMgaXMgaW5jb3Jwb3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMW +KGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsGA1UEAxMkRW50cnVzdCBSb290IENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0MloXDTI2MTEyNzIw +NTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMTkw +NwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSBy +ZWZlcmVuY2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNV +BAMTJEVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBALaVtkNC+sZtKm9I35RMOVcF7sN5EUFo +Nu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYszA9u3g3s+IIRe7bJWKKf4 +4LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOwwCj0Yzfv9 +KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGI +rb68j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi +94DkZfs0Nw4pgHBNrziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOB +sDCBrTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAi +gA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1MzQyWjAfBgNVHSMEGDAWgBRo +kORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DHhmak8fdLQ/uE +vW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9t +O1KzKtvn1ISMY/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6Zua +AGAT/3B+XxFNSRuzFVJ7yVTav52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP +9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTSW3iDVuycNsMm4hH2Z0kdkquM++v/ +eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0tHuu2guQOHXvgR1m +0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEXDCCA0SgAwIBAgIEOGO5ZjANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML +RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp +bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5 +IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0xOTEy +MjQxODIwNTFaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3 +LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp +YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG +A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq +K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe +sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX +MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT +XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/ +HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH +4QIDAQABo3QwcjARBglghkgBhvhCAQEEBAMCAAcwHwYDVR0jBBgwFoAUVeSB0RGA +vtiJuQijMfmhJAkWuXAwHQYDVR0OBBYEFFXkgdERgL7YibkIozH5oSQJFrlwMB0G +CSqGSIb2fQdBAAQQMA4bCFY1LjA6NC4wAwIEkDANBgkqhkiG9w0BAQUFAAOCAQEA +WUesIYSKF8mciVMeuoCFGsY8Tj6xnLZ8xpJdGGQC49MGCBFhfGPjK50xA3B20qMo +oPS7mmNz7W3lKtvtFKkrxjYR0CvrB4ul2p5cGZ1WEvVUKcgF7bISKo30Axv/55IQ +h7A6tcOdBTcSo8f0FbnVpDkWm1M6I5HxqIKiaohowXkCIryqptau37AUX7iH0N18 +f3v/rxzP5tsHrV7bhZ3QKw0z2wTR5klAEyt2+z7pnIkPFc4YsIV4IU9rTw76NmfN +B/L/CNDi3tm/Kq+4h4YhPATKt5Rof8886ZjXOP/swNlQ8C5LWK5Gb9Auw2DaclVy +vUxFnmG6v4SBkgPR0ml8xQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChML +RW50cnVzdC5uZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBp +bmNvcnAuIGJ5IHJlZi4gKGxpbWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5 +IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNVBAMTKkVudHJ1c3QubmV0IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQxNzUwNTFaFw0yOTA3 +MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3d3d3 +LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxp +YWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEG +A1UEAxMqRW50cnVzdC5uZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgp +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArU1LqRKGsuqjIAcVFmQq +K0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOLGp18EzoOH1u3Hs/lJBQe +sYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSrhRSGlVuX +MlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVT +XTzWnLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/ +HoZdenoVve8AjhUiVBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH +4QIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJKoZIhvcNAQEFBQADggEBADub +j1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPyT/4xmf3IDExo +U8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf +zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5b +u/8j72gZyxKTJ1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+ +bYQLCIt+jerXmCHG8+c8eS9enNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/Er +fF6adulZkMV8gzURZVE= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBe +MQswCQYDVQQGEwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0 +ZC4xKjAoBgNVBAsMIWVQS0kgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMxMjdaMF4xCzAJBgNVBAYTAlRXMSMw +IQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEqMCgGA1UECwwhZVBL +SSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEF +AAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAH +SyZbCUNsIZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAh +ijHyl3SJCRImHJ7K2RKilTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3X +DZoTM1PRYfl61dd4s5oz9wCGzh1NlDivqOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1 +TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX12ruOzjjK9SXDrkb5wdJ +fzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0OWQqraffA +sgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uU +WH1+ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLS +nT0IFaUQAS2zMnaolQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pH +dmX2Os+PYhcZewoozRrSgx4hxyy/vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJip +NiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXiZo1jDiVN1Rmy5nk3pyKdVDEC +AwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/QkqiMAwGA1UdEwQF +MAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGB +uvl2ICO1J2B01GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6Yl +PwZpVnPDimZI+ymBV3QGypzqKOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkP +JXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdVxrsStZf0X4OFunHB2WyBEXYKCrC/ +gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEPNXubrjlpC2JgQCA2 +j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+rGNm6 +5ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUB +o2M3IUxExJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS +/jQ6fbjpKdx2qcgw+BRxgMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2z +Gp1iro2C6pSe3VkQw63d4k3jMdXH7OjysP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTE +W9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmODBCEIZ43ygknQW/2xzQ+D +hNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEYDCCA0igAwIBAgICATAwDQYJKoZIhvcNAQELBQAwWTELMAkGA1UEBhMCVVMx +GDAWBgNVBAoTD1UuUy4gR292ZXJubWVudDENMAsGA1UECxMERlBLSTEhMB8GA1UE +AxMYRmVkZXJhbCBDb21tb24gUG9saWN5IENBMB4XDTEwMTIwMTE2NDUyN1oXDTMw +MTIwMTE2NDUyN1owWTELMAkGA1UEBhMCVVMxGDAWBgNVBAoTD1UuUy4gR292ZXJu +bWVudDENMAsGA1UECxMERlBLSTEhMB8GA1UEAxMYRmVkZXJhbCBDb21tb24gUG9s +aWN5IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2HX7NRY0WkG/ +Wq9cMAQUHK14RLXqJup1YcfNNnn4fNi9KVFmWSHjeavUeL6wLbCh1bI1FiPQzB6+ +Duir3MPJ1hLXp3JoGDG4FyKyPn66CG3G/dFYLGmgA/Aqo/Y/ISU937cyxY4nsyOl +4FKzXZbpsLjFxZ+7xaBugkC7xScFNknWJidpDDSPzyd6KgqjQV+NHQOGgxXgVcHF +mCye7Bpy3EjBPvmE0oSCwRvDdDa3ucc2Mnr4MrbQNq4iGDGMUHMhnv6DOzCIJOPp +wX7e7ZjHH5IQip9bYi+dpLzVhW86/clTpyBLqtsgqyFOHQ1O5piF5asRR12dP8Qj +wOMUBm7+nQIDAQABo4IBMDCCASwwDwYDVR0TAQH/BAUwAwEB/zCB6QYIKwYBBQUH +AQsEgdwwgdkwPwYIKwYBBQUHMAWGM2h0dHA6Ly9odHRwLmZwa2kuZ292L2ZjcGNh +L2NhQ2VydHNJc3N1ZWRCeWZjcGNhLnA3YzCBlQYIKwYBBQUHMAWGgYhsZGFwOi8v +bGRhcC5mcGtpLmdvdi9jbj1GZWRlcmFsJTIwQ29tbW9uJTIwUG9saWN5JTIwQ0Es +b3U9RlBLSSxvPVUuUy4lMjBHb3Zlcm5tZW50LGM9VVM/Y0FDZXJ0aWZpY2F0ZTti +aW5hcnksY3Jvc3NDZXJ0aWZpY2F0ZVBhaXI7YmluYXJ5MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUrQx6dVzl85jEeZgOrCj9l/TnAvwwDQYJKoZIhvcNAQELBQAD +ggEBAI9z2uF/gLGH9uwsz9GEYx728Yi3mvIRte9UrYpuGDco71wb5O9Qt2wmGCMi +TR0mRyDpCZzicGJxqxHPkYnos/UqoEfAFMtOQsHdDA4b8Idb7OV316rgVNdF9IU+ +7LQd3nyKf1tNnJaK0KIyn9psMQz4pO9+c+iR3Ah6cFqgr2KBWfgAdKLI3VTKQVZH +venAT+0g3eOlCd+uKML80cgX2BLHb94u6b2akfI8WpQukSKAiaGMWMyDeiYZdQKl +Dn0KJnNR6obLB6jI/WNaNZvSr79PMUjBhHDbNXuaGQ/lj/RqDG8z2esccKIN47lQ +A2EC/0rskqTcLe4qNJMHtyznGI8= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVT +MRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9i +YWwgQ0EwHhcNMDIwNTIxMDQwMDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQG +EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjEbMBkGA1UEAxMSR2VvVHJ1c3Qg +R2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2swYYzD9 +9BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjoBbdq +fnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDv +iS2Aelet8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU +1XupGc1V3sjs0l44U+VcT4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+ +bw8HHa8sHo9gOeL6NlMTOdReJivbPagUvTLrGAMoUgRx5aszPeE4uwc2hGKceeoW +MPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTA +ephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVkDBF9qn1l +uMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKIn +Z57QzxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfS +tQWVYrmm3ok9Nns4d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcF +PseKUgzbFbS9bZvlxrFUaKnjaZC2mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Un +hw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6pXE0zX5IJL4hmXXeXxx12E6nV +5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvmMw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDEL +MAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChj +KSAyMDA3IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2 +MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1OVowgZgxCzAJBgNV +BAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykgMjAw +NyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNV +BAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBH +MjB2MBAGByqGSM49AgEGBSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcL +So17VDs6bl8VAsBQps8lL33KSLjHUGMcKiEIfJo22Av+0SbFWDEwKCXzXV2juLal +tJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+EVXVMAoG +CCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGT +qQ7mndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBucz +rD6ogRLQy7rQkgu2npaqBA+K +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCB +mDELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsT +MChjKSAyMDA4IEdlb1RydXN0IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25s +eTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhv +cml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIzNTk1OVowgZgxCzAJ +BgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg +MjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0 +BgNVBAMTLUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz ++uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5jK/BGvESyiaHAKAxJcCGVn2TAppMSAmUm +hsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdEc5IiaacDiGydY8hS2pgn +5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3CIShwiP/W +JmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exAL +DmKudlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZC +huOl1UcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw +HQYDVR0OBBYEFMR5yo6hTgMdHNxr2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IB +AQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9cr5HqQ6XErhK8WTTOd8lNNTB +zU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbEAp7aDHdlDkQN +kv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD +AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUH +SJsMC8tJP33st/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2G +spki4cErx5z481+oghLrGREt +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBY +MQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMo +R2VvVHJ1c3QgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjEx +MjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgxCzAJBgNVBAYTAlVTMRYwFAYDVQQK +Ew1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQcmltYXJ5IENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9 +AWbK7hWNb6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjA +ZIVcFU2Ix7e64HXprQU9nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE0 +7e9GceBrAqg1cmuXm2bgyxx5X9gaBGgeRwLmnWDiNpcB3841kt++Z8dtd1k7j53W +kBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGttm/81w7a4DSwDRp35+MI +mO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4G +A1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJ +KoZIhvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ1 +6CePbJC/kRYkRj5KTs4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl +4b7UVXGYNTq+k+qurUKykG/g/CFNNWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6K +oKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHaFloxt/m0cYASSJlyc1pZU8Fj +UjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG1riR/aYNKxoU +AT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYD +VQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0 +IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3 +MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD +aGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMxNDBaFw0zODA3MzEx +MjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUgY3Vy +cmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAG +A1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAl +BgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZI +hvcNAQEBBQADggIPADCCAgoCggIBAMDfVtPkOpt2RbQT2//BthmLN0EYlVJH6xed +KYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXfXjaOcNFccUMd2drvXNL7 +G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0ZJJ0YPP2 +zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4 +ddPB/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyG +HoiMvvKRhI9lNNgATH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2 +Id3UwD2ln58fQ1DJu7xsepeY7s2MH/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3V +yJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfeOx2YItaswTXbo6Al/3K1dh3e +beksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSFHTynyQbehP9r +6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh +wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsog +zCtLkykPAgMBAAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQW +BBS5CcqcHtvTbDprru1U8VuTBjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDpr +ru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UEBhMCRVUxQzBBBgNVBAcTOk1hZHJp +ZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJmaXJtYS5jb20vYWRk +cmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJmaXJt +YSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiC +CQDJzdPp1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCow +KAYIKwYBBQUHAgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZI +hvcNAQEFBQADggIBAICIf3DekijZBZRG/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZ +UohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6ReAJ3spED8IXDneRRXoz +X1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/sdZ7LoR/x +fxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVz +a2Mg9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yyd +Yhz2rXzdpjEetrHHfoUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMd +SqlapskD7+3056huirRXhOukP9DuqqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9O +AP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETrP3iZ8ntxPjzxmKfFGBI/5rso +M0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVqc5iJWzouE4ge +v8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z +09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIExTCCA62gAwIBAgIBADANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJFVTEn +MCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQL +ExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEgMB4GA1UEAxMXR2xvYmFsIENo +YW1iZXJzaWduIFJvb3QwHhcNMDMwOTMwMTYxNDE4WhcNMzcwOTMwMTYxNDE4WjB9 +MQswCQYDVQQGEwJFVTEnMCUGA1UEChMeQUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgy +NzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEgMB4G +A1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwggEgMA0GCSqGSIb3DQEBAQUA +A4IBDQAwggEIAoIBAQCicKLQn0KuWxfH2H3PFIP8T8mhtxOviteePgQKkotgVvq0 +Mi+ITaFgCPS3CU6gSS9J1tPfnZdan5QEcOw/Wdm3zGaLmFIoCQLfxS+EjXqXd7/s +QJ0lcqu1PzKY+7e3/HKE5TWH+VX6ox8Oby4o3Wmg2UIQxvi1RMLQQ3/bvOSiPGpV +eAp3qdjqGTK3L/5cPxvusZjsyq16aUXjlg9V9ubtdepl6DJWk0aJqCWKZQbua795 +B9Dxt6/tLE2Su8CoX6dnfQTyFQhwrJLWfQTSM/tMtgsL+xrJxI0DqX5c8lCrEqWh +z0hQpe/SyBoT+rB/sYIcd2oPX9wLlY/vQ37mRQklAgEDo4IBUDCCAUwwEgYDVR0T +AQH/BAgwBgEB/wIBDDA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmNoYW1i +ZXJzaWduLm9yZy9jaGFtYmVyc2lnbnJvb3QuY3JsMB0GA1UdDgQWBBRDnDafsJ4w +TcbOX60Qq+UDpfqpFDAOBgNVHQ8BAf8EBAMCAQYwEQYJYIZIAYb4QgEBBAQDAgAH +MCoGA1UdEQQjMCGBH2NoYW1iZXJzaWducm9vdEBjaGFtYmVyc2lnbi5vcmcwKgYD +VR0SBCMwIYEfY2hhbWJlcnNpZ25yb290QGNoYW1iZXJzaWduLm9yZzBbBgNVHSAE +VDBSMFAGCysGAQQBgYcuCgEBMEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly9jcHMuY2hh +bWJlcnNpZ24ub3JnL2Nwcy9jaGFtYmVyc2lnbnJvb3QuaHRtbDANBgkqhkiG9w0B +AQUFAAOCAQEAPDtwkfkEVCeR4e3t/mh/YV3lQWVPMvEYBZRqHN4fcNs+ezICNLUM +bKGKfKX0j//U2K0X1S0E0T9YgOKBWYi+wONGkyT+kL0mojAt6JcmVzWJdJYY9hXi +ryQZVgICsroPFOrGimbBhkVVi76SvpykBMdJPJ7oKXqJ1/6v/2j1pReQvayZzKWG +VwlnRtvWFsJG8eSpUPWP0ZIV018+xgBJOm5YstHRJw0lyDL4IBHNfTIzSJRUTN3c +ecQwn+uOuFW114hcxWokPbLTBQNRxgfvzBRydD1ucs4YKIxKoHflCStFREest2d/ +AYoFWpO+ocH/+OcOZ6RHSXZddZAa9SaP8A== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkG +A1UEBhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jv +b3QgQ0ExGzAZBgNVBAMTEkdsb2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAw +MDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNVBAYTAkJFMRkwFwYDVQQKExBHbG9i +YWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYDVQQDExJHbG9iYWxT +aWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaDuaZ +jc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavp +xy0Sy6scTHAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp +1Wrjsok6Vjk4bwY8iGlbKk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdG +snUOhugZitVtbNV4FpWi6cgKOOvyJBNPc1STE4U6G7weNLWLBYy5d4ux2x8gkasJ +U26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrXgzT/LCrBbBlDSgeF59N8 +9iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0B +AQUFAAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOz +yj1hTdNGCbM+w6DjY1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE +38NflNUVyRRBnMRddWQVDf9VMOyGj/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymP +AbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhHhm4qxFYxldBniYUr+WymXUad +DKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveCX4XSQRjbgbME +HMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIB4TCCAYegAwIBAgIRKjikHJYKBN5CsiilC+g0mAIwCgYIKoZIzj0EAwIwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI0MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI0MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEuMZ5049sJQ6fLjkZHAOkrprlOQcJ +FspjsbmG+IpXwVfOQvpzofdlQv8ewQCybnMO/8ch5RikqtlxP6jUuc6MHaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFFSwe61F +uOJAf/sKbvu+M8k8o4TVMAoGCCqGSM49BAMCA0gAMEUCIQDckqGgE6bPA7DmxCGX +kPoUVy0D7O48027KqGx2vKLeuwIgJ6iFJzWbVsaj8kfSt24bAgAXqmemFZHe+pTs +ewv4n4Q= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEk +MCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpH +bG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoX +DTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMbR2xvYmFsU2lnbiBFQ0MgUm9vdCBD +QSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQDEwpHbG9iYWxTaWdu +MHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6SFkc +8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8ke +hOvRnkmSh5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYI +KoZIzj0EAwMDaAAwZQIxAOVpEslu28YxuglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg +515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7yFz9SO8NdCKoCOJuxUnO +xwy8p2Fp8fc74SrL+SvzZpA3 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1 +MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMjETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6ErPL +v4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8 +eoLrvozps6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklq +tTleiDTsvHgMCJiEbKjNS7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzd +C9XZzPnqJworc5HGnRusyMvo4KD0L5CLTfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pa +zq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6CygPCm48CAwEAAaOBnDCB +mTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUm+IH +V2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5n +bG9iYWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG +3lm0mi3f3BmGLjANBgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4Gs +J0/WwbgcQ3izDJr86iw8bmEbTUsp9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO +291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu01yiPqFbQfXf5WRDLenVOavS +ot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG79G+dwfCMNYxd +AfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 +TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4G +A1UECxMXR2xvYmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNp +Z24xEzARBgNVBAMTCkdsb2JhbFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4 +MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxTaWduIFJvb3QgQ0EgLSBSMzETMBEG +A1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2lnbjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWtiHL8 +RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsT +gHeMCOFJ0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmm +KPZpO/bLyCiR5Z2KYVc3rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zd +QQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjlOCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZ +XriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2xmmFghcCAwEAAaNCMEAw +DgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFI/wS3+o +LkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZU +RUm7lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMp +jjM5RcOO5LlXbKr8EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK +6fBdRoyV3XpYKBovHd7NADdBj+1EbddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQX +mcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18YIvDQVETI53O9zJrlAGomecs +Mx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7rkpeDMdmztcpH +WD9f +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEh +MB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBE +YWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3 +MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkGA1UEBhMCVVMxITAfBgNVBAoTGFRo +ZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28gRGFkZHkgQ2xhc3Mg +MiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQADggEN +ADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCA +PVYYYwhv2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6w +wdhFJ2+qN1j3hybX2C32qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXi +EqITLdiOr18SPaAIBQi2XKVlOARFmR6jYGB0xUGlcmIbYsUfb18aQr4CUWWoriMY +avx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmYvLEHZ6IVDd2gWMZEewo+ +YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0OBBYEFNLE +sNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h +/t2oatTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5 +IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmlj +YXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD +ggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wimPQoZ+YeAEW5p5JYXMP80kWNy +OO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKtI3lpjbi2Tc7P +TMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mER +dEr/VxqHD3VILs9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5Cuf +ReYNnyicsbkqWletNw+vHX/bvZ8= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoT +EUdvRGFkZHkuY29tLCBJbmMuMTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRp +ZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIz +NTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQH +EwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8GA1UE +AxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKD +E6bFIEMBO4Tx5oVJnyfq9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH +/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD+qK+ihVqf94Lw7YZFAXK6sOoBJQ7Rnwy +DfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutdfMh8+7ArU6SSYmlRJQVh +GkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMlNAJWJwGR +tDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEA +AaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FDqahQcQZyi27/a9BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmX +WWcDYfF+OwYxdS2hII5PZYe096acvNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu +9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r5N9ss4UXnT3ZJE95kTXWXwTr +gIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYVN8Gb5DKj7Tjo +2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI +4uJEvlz36hz1 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFSzCCAzOgAwIBAgIRALZLiAfiI+7IXBKtpg4GofIwDQYJKoZIhvcNAQELBQAw +PzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dvdmVybm1lbnQgUm9vdCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eTAeFw0xMjA5MjgwODU4NTFaFw0zNzEyMzExNTU5NTla +MD8xCzAJBgNVBAYTAlRXMTAwLgYDVQQKDCdHb3Zlcm5tZW50IFJvb3QgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC +AQC2/5c8gb4BWCQnr44BK9ZykjAyG1+bfNTUf+ihYHMwVxAA+lCWJP5Q5ow6ldFX +eYTVZ1MMKoI+GFy4MCYa1l7GLbIEUQ7v3wxjR+vEEghRK5lxXtVpe+FdyXcdIOxW +juVhYC386RyA3/pqg7sFtR4jEpyCygrzFB0g5AaPQySZn7YKk1pzGxY5vgW28Yyl +ZJKPBeRcdvc5w88tvQ7Yy6gOMZvJRg9nU0MEj8iyyIOAX7ryD6uBNaIgIZfOD4k0 +eA/PH07p+4woPN405+2f0mb1xcoxeNLOUNFggmOd4Ez3B66DNJ1JSUPUfr0t4urH +cWWACOQ2nnlwCjyHKenkkpTqBpIpJ3jmrdc96QoLXvTg1oadLXLLi2RW5vSueKWg +OTNYPNyoj420ai39iHPplVBzBN8RiD5C1gJ0+yzEb7xs1uCAb9GGpTJXA9ZN9E4K +mSJ2fkpAgvjJ5E7LUy3Hsbbi08J1J265DnGyNPy/HE7CPfg26QrMWJqhGIZO4uGq +s3NZbl6dtMIIr69c/aQCb/+4DbvVq9dunxpPkUDwH0ZVbaCSw4nNt7H/HLPLo5wK +4/7NqrwB7N1UypHdTxOHpPaY7/1J1lcqPKZc9mA3v9g+fk5oKiMyOr5u5CI9ByTP +isubXVGzMNJxbc5Gim18SjNE2hIvNkvy6fFRCW3bapcOFwIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBTVZx3gnHosnMvFmOcdByYqhux0zTAOBgNV +HQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQELBQADggIBAJA75cJTQijq9TFOjj2Rnk0J +89ixUuZPrAwxIbvx6pnMg/y2KOTshAcOD06Xu29oRo8OURWV+Do7H1+CDgxxDryR +T64zLiNB9CZrTxOH+nj2LsIPkQWXqmrBap+8hJ4IKifd2ocXhuGzyl3tOKkpboTe +Rmv8JxlQpRJ6jH1i/NrnzLyfSa8GuCcn8on3Fj0Y5r3e9YwSkZ/jBI3+BxQaWqw5 +ghvxOBnhY+OvbLamURfr+kvriyL2l/4QOl+UoEtTcT9a4RD4co+WgN2NApgAYT2N +vC2xR8zaXeEgp4wxXPHj2rkKhkfIoT0Hozymc26Uke1uJDr5yTDRB6iBfSZ9fYTf +hsmL5a4NHr6JSFEVg5iWL0rrczTXdM3Jb9DCuiv2mv6Z3WAUjhv5nDk8f0OJU+jl +wqu+Iq0nOJt3KLejY2OngeepaUXrjnhWzAWEx/uttjB8YwWfLYwkf0uLkvw4Hp+g +pVezbp3YZLhwmmBScMip0P/GnO0QYV7Ngw5u6E0CQUridgR51lQ/ipgyFKDdLZzn +uoJxo4ZVKZnSKdt1OvfbQ/+2W/u3fjWAjg1srnm3Ni2XUqGwB5wH5Ss2zQOXlL0t +DjQG/MAWifw3VOTWzz0TBPKR2ck2Lj7FWtClTILD/y58Jnb38/1FoqVuVa4uzM8s +iTTa9g3nkagQ6hed8vbs +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1Ix +RDBCBgNVBAoTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1 +dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1p +YyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIFJvb3RDQSAyMDExMB4XDTExMTIw +NjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYTAkdSMUQwQgYDVQQK +EztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIENl +cnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJl +c2VhcmNoIEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEB +BQADggEPADCCAQoCggEBAKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPz +dYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJ +fel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa71HFK9+WXesyHgLacEns +bgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u8yBRQlqD +75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSP +FEDH3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNV +HRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp +5dgTBCPuQSUwRwYDVR0eBEAwPqA8MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQu +b3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQub3JnMA0GCSqGSIb3DQEBBQUA +A4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVtXdMiKahsog2p +6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 +TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7 +dIsXRSZMFpGD/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8Acys +Nnq/onN694/BtZqhFLKPM58N7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXI +l7WdmplNsDz4SgCbZN2fOUvRJ9e4 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsx +FjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3Qg +Um9vdCBDQSAxMB4XDTAzMDUxNTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkG +A1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdr +b25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC +AQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1ApzQ +jVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEn +PzlTCeqrauh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjh +ZY4bXSNmO7ilMlHIhqqhqZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9 +nnV0ttgCXjqQesBCNnLsak3c78QA3xMYV18meMjWCnl3v/evt3a5pQuEF10Q6m/h +q5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNVHRMBAf8ECDAGAQH/AgED +MA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7ih9legYsC +mEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI3 +7piol7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clB +oiMBdDhViw+5LmeiIAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJs +EhTkYY2sEJCehFC78JZvRZ+K88psT/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpO +fMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilTc4afU9hDDl3WY4JxHYB0yvbi +AmvZWg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFHjCCBAagAwIBAgIEAKA3oDANBgkqhkiG9w0BAQsFADCBtzELMAkGA1UEBhMC +Q1oxOjA4BgNVBAMMMUkuQ0EgLSBRdWFsaWZpZWQgQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHksIDA5LzIwMDkxLTArBgNVBAoMJFBydm7DrSBjZXJ0aWZpa2HEjW7DrSBh +dXRvcml0YSwgYS5zLjE9MDsGA1UECww0SS5DQSAtIEFjY3JlZGl0ZWQgUHJvdmlk +ZXIgb2YgQ2VydGlmaWNhdGlvbiBTZXJ2aWNlczAeFw0wOTA5MDEwMDAwMDBaFw0x +OTA5MDEwMDAwMDBaMIG3MQswCQYDVQQGEwJDWjE6MDgGA1UEAwwxSS5DQSAtIFF1 +YWxpZmllZCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSwgMDkvMjAwOTEtMCsGA1UE +CgwkUHJ2bsOtIGNlcnRpZmlrYcSNbsOtIGF1dG9yaXRhLCBhLnMuMT0wOwYDVQQL +DDRJLkNBIC0gQWNjcmVkaXRlZCBQcm92aWRlciBvZiBDZXJ0aWZpY2F0aW9uIFNl +cnZpY2VzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtTaEy0KC8M9l +4lSaWHMs4+sVV1LwzyJYiIQNeCrv1HHm/YpGIdY/Z640ceankjQvIX7m23BK4OSC +6KO8kZYA3zopOz6GFCOKV2PvLukbc+c2imF6kLHEv6qNA8WxhPbR3xKwlHDwB2yh +Wzo7V3QVgDRG83sugqQntKYC3LnlTGbJpNP+Az72gpO9AHUn/IBhFk4ksc8lYS2L +9GCy9CsmdKSBP78p9w8Lx7vDLqkDgt1/zBrcUWmSSb7AE/BPEeMryQV1IdI6nlGn +BhWkXOYf6GSdayJw86btuxC7viDKNrbp44HjQRaSxnp6O3eto1x4DfiYdw/YbJFe +7EjkxSQBywIDAQABo4IBLjCCASowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E +BAMCAQYwgecGA1UdIASB3zCB3DCB2QYEVR0gADCB0DCBzQYIKwYBBQUHAgIwgcAa +gb1UZW50byBjZXJ0aWZpa2F0IGplIHZ5ZGFuIGpha28ga3ZhbGlmaWtvdmFueSBz +eXN0ZW1vdnkgY2VydGlmaWthdCBwb2RsZSB6YWtvbmEgYy4gMjI3LzIwMDAgU2Iu +IHYgcGxhdG5lbSB6bmVuaS9UaGlzIGlzIHF1YWxpZmllZCBzeXN0ZW0gY2VydGlm +aWNhdGUgYWNjb3JkaW5nIHRvIEN6ZWNoIEFjdCBOby4gMjI3LzIwMDAgQ29sbC4w +HQYDVR0OBBYEFHnL0CPpOmdwkXRP01Hi4CD94Sj7MA0GCSqGSIb3DQEBCwUAA4IB +AQB9laU214hYaBHPZftbDS/2dIGLWdmdSbj1OZbJ8LIPBMxYjPoEMqzAR74tw96T +i6aWRa5WdOWaS6I/qibEKFZhJAVXX5mkx2ewGFLJ+0Go+eTxnjLOnhVF2V2s+57b +m8c8j6/bS6Ij6DspcHEYpfjjh64hE2r0aSpZDjGzKFM6YpqsCJN8qYe2X1qmGMLQ +wvNdjG+nPzCJOOuUEypIWt555ZDLXqS5F7ZjBjlfyDZjEfS2Es9Idok8alf563Mi +9/o+Ba46wMYOkk3P1IlU0RqCajdbliioACKDztAqubONU1guZVzV8tuMASVzbJeL +/GAB7ECTwe1RuKrLYtglMKI9 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBK +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVu +VHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQw +MTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MScw +JQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENBIDEwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ldhNlT +3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU ++ehcCuz/mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gp +S0l4PJNgiCL8mdo2yMKi1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1 +bVoE/c40yiTcdCMbXTMTEl3EASX2MN0CXZ/g1Ue9tOsbobtJSdifWwLziuQkkORi +T0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl3ZBWzvurpWCdxJ35UrCL +vYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzyNeVJSQjK +Vsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZK +dHzVWYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHT +c+XvvqDtMwt0viAgxGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hv +l7yTmvmcEpB4eoCHFddydJxVdHixuuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5N +iGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZIhvcNAQELBQAD +ggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH +6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwt +LRvM7Kqas6pgghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93 +nAbowacYXVKV7cndJZ5t+qntozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3 ++wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmVYjzlVYA211QC//G5Xc7UI2/YRYRK +W2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUXfeu+h1sXIFRRk0pT +AwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/rokTLq +l1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG +4iZZRHUe2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZ +mUlO+KWA2yUPHGNiiskzZ2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A +7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7RcGzM7vRX+Bi6hG6H +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBN +MQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVu +VHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcN +MzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJVUzESMBAGA1UEChMJSWRlblRydXN0 +MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBSb290IENBIDEwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTyP4o7 +ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGy +RBb06tD6Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlS +bdsHyo+1W/CD80/HLaXIrcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF +/YTLNiCBWS2ab21ISGHKTN9T0a9SvESfqy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R +3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoSmJxZZoY+rfGwyj4GD3vw +EUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFnol57plzy +9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9V +GxyhLrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ +2fjXctscvG29ZV/viDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsV +WaFHVCkugyhfHMKiq3IXAAaOReyL4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gD +W/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMwDQYJKoZIhvcN +AQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj +t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHV +DRDtfULAj+7AmgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9 +TaDKQGXSc3z1i9kKlT/YPyNtGtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8G +lwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFtm6/n6J91eEyrRjuazr8FGF1NFTwW +mhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMxNRF4eKLg6TCMf4Df +WN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4Mhn5 ++bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJ +tshquDDIajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhA +GaQdp/lLQzfcaFpPz+vCZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv +8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ3Wl9af0AVqW3rLatt8o+Ae+c +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw +TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh +cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4 +WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu +ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc +h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+ +0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U +A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW +T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH +B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC +B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv +KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn +OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn +jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw +qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI +rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq +hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL +ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ +3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK +NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5 +ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur +TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC +jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc +oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq +4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA +mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d +emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEXzCCA0egAwIBAgIBATANBgkqhkiG9w0BAQUFADCB0DELMAkGA1UEBhMCRVMx +SDBGBgNVBAoTP0laRU5QRSBTLkEuIC0gQ0lGIEEtMDEzMzcyNjAtUk1lcmMuVml0 +b3JpYS1HYXN0ZWl6IFQxMDU1IEY2MiBTODFCMEAGA1UEBxM5QXZkYSBkZWwgTWVk +aXRlcnJhbmVvIEV0b3JiaWRlYSAzIC0gMDEwMTAgVml0b3JpYS1HYXN0ZWl6MRMw +EQYDVQQDEwpJemVucGUuY29tMR4wHAYJKoZIhvcNAQkBFg9JbmZvQGl6ZW5wZS5j +b20wHhcNMDMwMTMwMjMwMDAwWhcNMTgwMTMwMjMwMDAwWjCB0DELMAkGA1UEBhMC +RVMxSDBGBgNVBAoTP0laRU5QRSBTLkEuIC0gQ0lGIEEtMDEzMzcyNjAtUk1lcmMu +Vml0b3JpYS1HYXN0ZWl6IFQxMDU1IEY2MiBTODFCMEAGA1UEBxM5QXZkYSBkZWwg +TWVkaXRlcnJhbmVvIEV0b3JiaWRlYSAzIC0gMDEwMTAgVml0b3JpYS1HYXN0ZWl6 +MRMwEQYDVQQDEwpJemVucGUuY29tMR4wHAYJKoZIhvcNAQkBFg9JbmZvQGl6ZW5w +ZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC1btoCXXhp3xIW +D+Bxl8nUCxkyiazWfpt0e68t+Qt9+lZjKZSdEw2Omj4qvr+ovRmDXO3iWpWVOWDl +3JHJjAzFCe8ZEBNDH+QNYwZHmPBaMYFOYFdbAFVHWvys152C308hcFJ6xWWGmjvl +2eMiEl9P2nR2LWue368DCu+ak7j3gjAXaCOdP1a7Bfr+RW3X2SC5R4Xyp8iHlL5J +PHJD/WBkLrezwzQPdACw8m9EG7q9kUwlNpL32mROujS3ZkT6mQTzJieLiE3X04s0 +uIUqVkk5MhjcHFf7al0N5CzjtTcnXYJKN2Z9EDVskk4olAdGi46eSoZXbjUOP5gk +Ej6wVZAXAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEG +MB0GA1UdDgQWBBTqVk/sPIOhFIh4gbIrBSLAB0FbQjANBgkqhkiG9w0BAQUFAAOC +AQEAYp7mEzzhw6o5Hf5+T5kcI+t4BJyiIWy7vHlLs/G8dLYXO81aN/Mzg928eMTR +TxxYZL8dd9uwsJ50TVfX6L0R4Dyw6wikh3fHRrat9ufXi63j5K91Ysr7aXqnF38d +iAgHYkrwC3kuxHBb9C0KBz6h8Q45/KCyN7d37wWAq38yyhPDlaOvyoE6bdUuK5hT +m5EYA5JmPyrhQ1moDOyueWBAjxzMEMj+OAY1H90cLv6wszsqerxRrdTOHBdv7MjB +EIpvEEQkXUxVXAzFuuT6m2t91Lfnwfl/IvljHaVC7DlyyhRYHD6D4Rx+4QKp4tWL +vpw6LkI+gKNJ/YdMCsRZQzEEFA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF8DCCA9igAwIBAgIPBuhGJy8fCo/RhFzjafbVMA0GCSqGSIb3DQEBBQUAMDgx +CzAJBgNVBAYTAkVTMRQwEgYDVQQKDAtJWkVOUEUgUy5BLjETMBEGA1UEAwwKSXpl +bnBlLmNvbTAeFw0wNzEyMTMxMzA4MjdaFw0zNzEyMTMwODI3MjVaMDgxCzAJBgNV +BAYTAkVTMRQwEgYDVQQKDAtJWkVOUEUgUy5BLjETMBEGA1UEAwwKSXplbnBlLmNv +bTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMnTesoPHqynhugWZWqx +whtFMnGV2f4QW8yv56V5AY+Jw8ryVXH3d753lPNypCxE2J6SmxQ6oeckkAoKVo7F +2CaU4dlI4S0+2gpy3aOZFdqBoof0e24md4lYrdbrDLJBenNubdt6eEHpCIgSfocu +ZhFjbFT7PJ1ywLwu/8K33Q124zrX97RovqL144FuwUZvXY3gTcZUVYkaMzEKsVe5 +o4qYw+w7NMWVQWl+dcI8IMVhulFHoCCQk6GQS/NOfIVFVJrRBSZBsLVNHTO+xAPI +JXzBcNs79AktVCdIrC/hxKw+yMuSTFM5NyPs0wH54AlETU1kwOENWocivK0bo/4m +tRXzp/yEGensoYi0RGmEg/OJ0XQGqcwL1sLeJ4VQJsoXuMl6h1YsGgEebL4TrRCs +tST1OJGh1kva8bvS3ke18byB9llrzxlT6Y0Vy0rLqW9E5RtBz+GGp8rQap+8TI0G +M1qiheWQNaBiXBZO8OOi+gMatCxxs1gs3nsL2xoP694hHwZ3BgOwye+Z/MC5TwuG +KP7Suerj2qXDR2kS4Nvw9hmL7Xtw1wLW7YcYKCwEJEx35EiKGsY7mtQPyvp10gFA +Wo15v4vPS8+qFsGV5K1Mij4XkdSxYuWC5YAEpAN+jb/af6IPl08M0w3719Hlcn4c +yHf/W5oPt64FRuXxqBbsR6QXAgMBAAGjgfYwgfMwgbAGA1UdEQSBqDCBpYEPaW5m +b0BpemVucGUuY29tpIGRMIGOMUcwRQYDVQQKDD5JWkVOUEUgUy5BLiAtIENJRiBB +MDEzMzcyNjAtUk1lcmMuVml0b3JpYS1HYXN0ZWl6IFQxMDU1IEY2MiBTODFDMEEG +A1UECQw6QXZkYSBkZWwgTWVkaXRlcnJhbmVvIEV0b3JiaWRlYSAxNCAtIDAxMDEw +IFZpdG9yaWEtR2FzdGVpejAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUHRxlDqjyJXu0kc/ksbHmvVV0bAUwDQYJKoZIhvcNAQEFBQAD +ggIBAMeBRm8hGE+gBe/n1bqXUKJg7aWSFBpSm/nxiEqg3Hh10dUflU7F57dp5iL0 ++CmoKom+z892j+Mxc50m0xwbRxYpB2iEitL7sRskPtKYGCwkjq/2e+pEFhsqxPqg +l+nqbFik73WrAGLRne0TNtsiC7bw0fRue0aHwp28vb5CO7dz0JoqPLRbEhYArxk5 +ja2DUBzIgU+9Ag89njWW7u/kwgN8KRwCfr00J16vU9adF79XbOnQgxCvv11N75B7 +XSus7Op9ACYXzAJcY9cZGKfsK8eKPlgOiofmg59OsjQerFQJTx0CCzl+gQgVuaBp +E8gyK+OtbBPWg50jLbJtooiGfqgNASYJQNntKE6MkyQP2/EeTXp6WuKlWPHcj1+Z +ggwuz7LdmMySlD/5CbOlliVbN/UShUHiGUzGigjB3Bh6Dx4/glmimj4/+eAJn/3B +kUtdyXvWton83x18hqrNA/ILUpLxYm9/h+qrdslsUMIZgq+qHfUgKGgu1fxkN0/P +pUTEvnK0jHS0bKf68r10OEMr3q/53NjgnZ/cPcqlY0S/kqJPTIAcuxrDmkoEVU3K +7iYLHL8CxWTTnn7S05EcS6L1HOUXHA0MUqORH5zwIe0ClG+poEnK6EOMxPQ02nwi +o8ZmPrgbBYhdurz3vOXcFD2nhqi2WVIhA16L4wTtSyoeo09Q +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4 +MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6 +ZW5wZS5jb20wHhcNMDcxMjEzMTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYD +VQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5j +b20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ03rKDx6sp4boFmVq +scIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAKClaO +xdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6H +LmYRY2xU+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFX +uaOKmMPsOzTFlUFpfnXCPCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQD +yCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxTOTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+ +JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbKF7jJeodWLBoBHmy+E60Q +rLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK0GqfvEyN +BjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8L +hij+0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIB +QFqNeb+Lz0vPqhbBleStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+ +HMh3/1uaD7euBUbl8agW7EekFwIDAQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2lu +Zm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+SVpFTlBFIFMuQS4gLSBDSUYg +QTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBGNjIgUzgxQzBB +BgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUA +A4ICAQB4pgwWSp9MiDrAyw6lFn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWb +laQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbgakEyrkgPH7UIBzg/YsfqikuFgba56 +awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8qhT/AQKM6WfxZSzwo +JNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Csg1lw +LDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCT +VyvehQP5aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGk +LhObNA5me0mrZJfQRsN5nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJb +UjWumDqtujWTI6cfSN01RpiyEGjkpTHCClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/ +QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZoQ0iy2+tzJOeRf1SktoA+ +naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1ZWrOZyGls +QyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDczCCAlugAwIBAgIBBDANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQGEwJLUjEN +MAsGA1UECgwES0lTQTEuMCwGA1UECwwlS29yZWEgQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkgQ2VudHJhbDEWMBQGA1UEAwwNS0lTQSBSb290Q0EgMTAeFw0wNTA4MjQw +ODA1NDZaFw0yNTA4MjQwODA1NDZaMGQxCzAJBgNVBAYTAktSMQ0wCwYDVQQKDARL +SVNBMS4wLAYDVQQLDCVLb3JlYSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSBDZW50 +cmFsMRYwFAYDVQQDDA1LSVNBIFJvb3RDQSAxMIIBIDANBgkqhkiG9w0BAQEFAAOC +AQ0AMIIBCAKCAQEAvATk+hM58DSWIGtsaLv623f/J/es7C/n/fB/bW+MKs0lCVsk +9KFo/CjsySXirO3eyDOE9bClCTqnsUdIxcxPjHmc+QZXfd3uOPbPFLKc6tPAXXdi +8EcNuRpAU1xkcK8IWsD3z3X5bI1kKB4g/rcbGdNaZoNy4rCbvdMlFQ0yb2Q3lIVG +yHK+d9VuHygvx2nt54OJM1jT3qC/QOhDUO7cTWu8peqmyGGO9cNkrwYV3CmLP3WM +vHFE2/yttRcdbYmDz8Yzvb9Fov4Kn6MRXw+5H5wawkbMnChmn3AmPC7fqoD+jMUE +CSVPzZNHPDfqAmeS/vwiJFys0izgXAEzisEZ2wIBA6MyMDAwHQYDVR0OBBYEFL+2 +J9gDWnZlTGEBQVYx5Yt7OtnMMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEF +BQADggEBABOvUQveimpb5poKyLGQSk6hAp3MiNKrZr097LuxQpVqslxa/6FjZJap +aBV/JV6K+KRzwYCKhQoOUugy50X4TmWAkZl0Q+VFnUkq8JSV3enhMNITbslOsXfl +BM+tWh6UCVrXPAgcrnrpFDLBRa3SJkhyrKhB2vAhhzle3/xk/2F0KpzZm4tfwjeT +2KM3LzuTa7IbB6d/CVDv0zq+IWuKkDsnSlFOa56ch534eJAx7REnxqhZvvwYC/uO +fi5C4e3nCSG9uRPFVmf0JqZCQ5BEVLRxm3bkGhKsGigA35vB1fjbXKP4krG9tNT5 +UNkAAk/bg9ART6RCVmE6fhMy04Qfybo= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYD +VQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0 +ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0G +CSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTAeFw0wOTA2MTYxMTMwMThaFw0y +OTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UEBwwIQnVkYXBlc3Qx +FjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUtU3pp +Z25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvP +kd6mJviZpWNwrZuuyjNAfW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tc +cbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG0IMZfcChEhyVbUr02MelTTMuhTlAdX4U +fIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKApxn1ntxVUwOXewdI/5n7 +N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm1HxdrtbC +xkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1 ++rUCAwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G +A1UdDgQWBBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPM +Pcu1SCOhGnqmKrs0aDAbBgNVHREEFDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqG +SIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0olZMEyL/azXm4Q5DwpL7v8u8h +mLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfXI/OMn74dseGk +ddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c +2Pm2G2JwCz02yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5t +HMN1Rq41Bab2XD0h7lbwyYIiLXpUq3DDfSJlgnCW +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQG +EwJIVTERMA8GA1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3 +MDUGA1UECwwuVGFuw7pzw610dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNl +cnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBBcmFueSAoQ2xhc3MgR29sZCkgRsWR +dGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgxMjA2MTUwODIxWjCB +pzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxOZXRM +b2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlm +aWNhdGlvbiBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNz +IEdvbGQpIEbFkXRhbsO6c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAxCRec75LbRTDofTjl5Bu0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrT +lF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw/HpYzY6b7cNGbIRwXdrz +AZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAkH3B5r9s5 +VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRG +ILdwfzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2 +BJtr+UBdADTHLpl1neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAG +AQH/AgEEMA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2M +U9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwWqZw8UQCgwBEIBaeZ5m8BiFRh +bvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTtaYtOUZcTh5m2C ++C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2F +uLjbvrW5KfnaNwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2 +XjG4Kvte9nHfRCaexOYNkbQudZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBi +MQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3Jp +dHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMxMjM1OTU5WjBiMQswCQYDVQQGEwJV +UzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydO +ZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwz +c7MEL7xxjOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPP +OCwGJgl6cvf6UDL4wpPTaaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rl +mGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXTcrA/vGp97Eh/jcOrqnErU2lBUzS1sLnF +BgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc/Qzpf14Dl847ABSHJ3A4 +qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMBAAGjgZcw +gZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwu +bmV0c29sc3NsLmNvbS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3Jp +dHkuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc8 +6fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q4LqILPxFzBiwmZVRDuwduIj/ +h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/GGUsyfJj4akH +/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHN +pGxlaKFJdlxDydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCB +ijELMAkGA1UEBhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHly +aWdodCAoYykgMjAwNTEiMCAGA1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNl +ZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwgUm9vdCBHQSBDQTAeFw0w +NTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYDVQQGEwJDSDEQMA4G +A1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIwIAYD +VQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBX +SVNlS2V5IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAy0+zAJs9Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxR +VVuuk+g3/ytr6dTqvirdqFEr12bDYVxgAsj1znJ7O7jyTmUIms2kahnBAbtzptf2 +w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbDd50kc3vkDIzh2TbhmYsF +mQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ/yxViJGg +4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t9 +4B3RLoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQw +EAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOx +SPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vImMMkQyh2I+3QZH4VFvbBsUfk2 +ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4+vg1YFkCExh8 +vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa +hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZi +Fj4A4xylNoEYokxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ +/L7fCg0= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBt +MQswCQYDVQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUg +Rm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9i +YWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAwMzJaFw0zOTEyMDExNTEwMzFaMG0x +CzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQLExlPSVNURSBG +b3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh +bCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3 +HEokKtaXscriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGx +WuR51jIjK+FTzJlFXHtPrby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX +1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNk +u7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4oQnc/nSMbsrY9gBQHTC5P +99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvgGUpuuy9r +M2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUB +BAMCAQAwDQYJKoZIhvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrgh +cViXfa43FK8+5/ea4n32cZiZBKpDdHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5 +gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0VQreUGdNZtGn//3ZwLWoo4rO +ZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEuiHZeeevJuQHHf +aPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic +Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00 +MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakEPBtV +wedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWe +rNrwU8lmPNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF341 +68Xfuw6cwI2H44g4hWf6Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh +4Pw5qlPafX7PGglTvF0FBM+hSo+LdoINofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXp +UhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/lg6AnhF4EwfWQvTA9xO+o +abw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV7qJZjqlc +3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/G +KubX9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSt +hfbZxbGL0eUQMk1fiyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KO +Tk0k+17kBL5yG6YnLUlamXrXXAkgt3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOt +zCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZIhvcNAQELBQAD +ggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC +MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2 +cDMT/uFPpiN3GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUN +qXsCHKnQO18LwIE6PWThv6ctTr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5 +YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP+V04ikkwj+3x6xn0dxoxGE1nVGwv +b2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh3jRJjehZrJ3ydlo2 +8hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fawx/k +NSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNj +ZgKAvQU6O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhp +q1467HxpvMc7hU6eFbm0FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFt +nh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOVhMJKzRwuJIczYOXD +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00 +MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFhZiFf +qq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMW +n4rjyduYNM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ym +c5GQYaYDFCDy54ejiK2toIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+ +O7q414AB+6XrW7PFXmAqMaCvN+ggOp+oMiwMzAkd056OXbxMmO7FGmh77FOm6RQ1 +o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+lV0POKa2Mq1W/xPtbAd0j +IaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZoL1NesNKq +IcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz +8eQQsSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43eh +vNURG3YBZwjgQQvD6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l +7ZizlWNof/k19N+IxWA1ksB8aRxhlRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALG +cC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZIhvcNAQELBQAD +ggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RC +roijQ1h5fq7KpVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0Ga +W/ZZGYjeVYg3UQt4XAoeo0L9x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4n +lv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgzdWqTHBLmYF5vHX/JHyPLhGGfHoJE ++V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6XU/IyAgkwo1jwDQHV +csaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+NwmNtd +dbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNg +KCLjsZWDzYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeM +HVOyToV7BjjHLPj4sHKNJeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4 +WSr2Rz0ZiC3oheGe7IUIarFsNMkd7EgrO3jtZsSOeWmD3n+M +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMjAeFw0wNjExMjQxODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCa +GMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6XJxg +Fyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55J +WpzmM+Yklvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bB +rrcCaoF6qUWD4gXmuVbBlDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp ++ARz8un+XJiM9XOva7R+zdRcAitMOeGylZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1 +ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt66/3FsvbzSUr5R/7mp/i +Ucw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1JdxnwQ5hYIiz +PtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og +/zOhD7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UH +oycR7hYQe7xFSkyyBNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuI +yV77zGHcizN300QyNQliBJIWENieJ0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1Ud +EwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBQahGK8SEwzJQTU7tD2 +A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGUa6FJpEcwRTEL +MAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2f +BluornFdLwUvZ+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzn +g/iN/Ae42l9NLmeyhP3ZRPx3UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2Bl +fF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodmVjB3pjd4M1IQWK4/YY7yarHvGH5K +WWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK+JDSV6IZUaUtl0Ha +B0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrWIozc +hLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPR +TUIZ3Ph1WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWD +mbA4CD/pXvk1B+TJYm5Xf6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0Z +ohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y +4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8VCLAAVBpQ570su9t+Oza +8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQEL +BQAwSDELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAc +BgNVBAMTFVF1b1ZhZGlzIFJvb3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00 +MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMgRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286IxSR +/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNu +FoM7pmRLMon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXR +U7Ox7sWTaYI+FrUoRqHe6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+c +ra1AdHkrAj80//ogaX3T7mH1urPnMNA3I4ZyYUUpSFlob3emLoG+B01vr87ERROR +FHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3UVDmrJqMz6nWB2i3ND0/k +A9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f75li59wzw +eyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634Ryl +sSqiMd5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBp +VzgeAVuNVejH38DMdyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0Q +A4XN8f+MFrXBsj6IbGB/kE+V9/YtrQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+ +ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZIhvcNAQELBQAD +ggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px +KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnI +FUBhynLWcKzSt/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5Wvv +oxXqA/4Ti2Tk08HS6IT7SdEQTXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFg +u/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9DuDcpmvJRPpq3t/O5jrFc/ZSXPsoaP +0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGibIh6BJpsQBJFxwAYf +3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmDhPbl +8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+ +DhcI00iX0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HN +PlopNLk9hM6xZdRZkZFWdSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/ +ywaZWWDYWGWVjUTR939+J399roD1B0y2PpxxVJkES/1Y+Zj0 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0x +GTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJv +b3QgQ0EgMzAeFw0wNjExMjQxOTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNV +BAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMRswGQYDVQQDExJRdW9W +YWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDM +V0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNggDhoB +4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUr +H556VOijKTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd +8lyyBTNvijbO0BNO/79KDDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9Cabwv +vWhDFlaJKjdhkf2mrk7AyxRllDdLkgbvBNDInIjbC3uBr7E9KsRlOni27tyAsdLT +mZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwpp5ijJUMv7/FfJuGITfhe +btfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8nT8KKdjc +T5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDt +WAEXMJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZ +c6tsgLjoC2SToJyMGf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A +4iLItLRkT9a6fUg+qGkM17uGcclzuD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYD +VR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHTBgkrBgEEAb5YAAMwgcUwgZMG +CCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmljYXRlIGNvbnN0 +aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVu +dC4wLQYIKwYBBQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2Nw +czALBgNVHQ8EBAMCAQYwHQYDVR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4G +A1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4ywLQoUmkRzBFMQswCQYDVQQGEwJC +TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UEAxMSUXVvVmFkaXMg +Um9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZVqyM0 +7ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSem +d1o417+shvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd ++LJ2w/w4E6oM3kJpK27zPOuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B +4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadN +t54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp8kokUvd0/bpO5qgdAm6x +DYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBCbjPsMZ57 +k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6s +zHXug/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0j +Wy10QJLZYxkNc91pvGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeT +mJlglFwjz1onl14LBQaTNx47aTbrqZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK +4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJC +TTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAzMTkxODMzMzNaFw0yMTAzMTcxODMz +MzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUw +IwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQDEyVR +dW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Yp +li4kVEAkOPcahdxYTMukJ0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2D +rOpm2RgbaIr1VxqYuvXtdj182d6UajtLF8HVj71lODqV0D1VNk7feVcxKh7YWWVJ +WCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeLYzcS19Dsw3sgQUSj7cug +F+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWenAScOospU +xbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCC +Ak4wPQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVv +dmFkaXNvZmZzaG9yZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREw +ggENMIIBCQYJKwYBBAG+WAABMIH7MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNl +IG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmljYXRlIGJ5IGFueSBwYXJ0eSBh +c3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJsZSBzdGFuZGFy +ZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh +Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYI +KwYBBQUHAgEWFmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3T +KbkGGew5Oanwl4Rqy+/fMIGuBgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rq +y+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBMaW1p +dGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYD +VQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6tlCL +MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSk +fnIYj9lofFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf8 +7C9TqnN7Az10buYWnuulLsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1R +cHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2xgI4JVrmcGmD+XcHXetwReNDWXcG31a0y +mQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi5upZIof4l/UO/erMkqQW +xFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi5nrQNiOK +SnQ2+Q== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBK +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +GTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkx +MjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3Qg +Q29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jxYDiJ +iQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa +/FHtaMbQbqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJ +jnIFHovdRIWCQtBJwB1g8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnI +HmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYVHDGA76oYa8J719rO+TMg1fW9ajMtgQT7 +sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi0XPnj3pDAgMBAAGjgZ0w +gZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1UdEwEB/wQF +MAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCsw +KaAnoCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsG +AQQBgjcVAQQDAgEAMA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0L +URYD7xh8yOOvaliTFGCRsoTciE6+OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXO +H0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cnCDpOGR86p1hcF895P4vkp9Mm +I50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/53CYNv6ZHdAbY +iNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBI +MQswCQYDVQQGEwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24x +FzAVBgNVBAMTDlNlY3VyZVRydXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIz +MTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAeBgNVBAoTF1NlY3VyZVRydXN0IENv +cnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCCASIwDQYJKoZIhvcN +AQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQXOZEz +Zum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO +0gMdA+9tDWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIao +wW8xQmxSPmjL8xk037uHGFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj +7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b01k/unK8RCSc43Oz969XL0Imnal0ugBS +8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmHursCAwEAAaOBnTCBmjAT +BgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCeg +JYYjaHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGC +NxUBBAMCAQAwDQYJKoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt3 +6Z3q059c4EVlew3KW+JwULKUBRSuSceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/ +3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHfmbx8IVQr5Fiiu1cprp6poxkm +D5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZnMUFdAvnZyPS +CPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDfTCCAmWgAwIBAgIBADANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJKUDEl +MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEqMCgGA1UECxMh +U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBFViBSb290Q0ExMB4XDTA3MDYwNjAyMTIz +MloXDTM3MDYwNjAyMTIzMlowYDELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09N +IFRydXN0IFN5c3RlbXMgQ08uLExURC4xKjAoBgNVBAsTIVNlY3VyaXR5IENvbW11 +bmljYXRpb24gRVYgUm9vdENBMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBALx/7FebJOD+nLpCeamIivqA4PUHKUPqjgo0No0c+qe1OXj/l3X3L+SqawSE +RMqm4miO/VVQYg+kcQ7OBzgtQoVQrTyWb4vVog7P3kmJPdZkLjjlHmy1V4qe70gO +zXppFodEtZDkBp2uoQSXWHnvIEqCa4wiv+wfD+mEce3xDuS4GBPMVjZd0ZoeUWs5 +bmB2iDQL87PRsJ3KYeJkHcFGB7hj3R4zZbOOCVVSPbW9/wfrrWFVGCypaZhKqkDF +MxRldAD5kd6vA0jFQFTcD4SQaCDFkpbcLuUCRarAX1T4bepJz11sS6/vmsJWXMY1 +VkJqMF/Cq/biPT+zyRGPMUzXn0kCAwEAAaNCMEAwHQYDVR0OBBYEFDVK9U2vP9eC +OKyrcWUXdYydVZPmMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0G +CSqGSIb3DQEBBQUAA4IBAQCoh+ns+EBnXcPBZsdAS5f8hxOQWsTvoMpfi7ent/HW +tWS3irO4G8za+6xmiEHO6Pzk2x6Ipu0nUBsCMCRGef4Eh3CXQHPRwMFXGZpppSeZ +q51ihPZRwSzJIxXYKLerJRO1RuGGAv8mjMSIkh1W/hln8lXkgKNrnKt34VFxDSDb +EJrbvXZ5B3eZKK2aXtqxT0QsNY6llsf9g/BYxnnWmHyojf6GPgcWkuF75x3sM3Z+ +Qi5KhfmRiWiEA4Glm5q+4zfFVKtWOxgtQaQM+ELbmaDgcm+7XeEWT1MKZPlO9L9O +VL14bIjqv5wTJMJwaaJ/D8g8rQjJsJhAoyrniIPtd490 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEY +MBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21t +dW5pY2F0aW9uIFJvb3RDQTEwHhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5 +WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMPU0VDT00gVHJ1c3QubmV0MScwJQYD +VQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw8yl8 +9f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJ +DKaVv0uMDPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9 +Ms+k2Y7CI9eNqPPYJayX5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/N +QV3Is00qVUarH9oe4kA92819uZKAnDfdDJZkndwi92SL32HeFZRSFaB9UslLqCHJ +xrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2JChzAgMBAAGjPzA9MB0G +A1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYwDwYDVR0T +AQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vG +kl3g0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfr +Uj94nK9NrvjVT8+amCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5 +Bw+SUEmK3TGXX8npN6o7WWWXlDLJs58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJU +JRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ6rBK+1YWc26sTfcioU+tHXot +RSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAiFL39vmwLAw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDEl +MCMGA1UEChMcU0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMe +U2VjdXJpdHkgQ29tbXVuaWNhdGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoX +DTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRy +dXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3VyaXR5IENvbW11bmlj +YXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANAV +OVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGr +zbl+dp+++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVM +VAX3NuRFg3sUZdbcDE3R3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQ +hNBqyjoGADdH5H5XTz+L62e4iKrFvlNVspHEfbmwhRkGeC7bYRr6hfVKkaHnFtWO +ojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1KEOtOghY6rCcMU/Gt1SSw +awNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8QIH4D5cs +OPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3 +DQEBCwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpF +coJxDjrSzG+ntKEju/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXc +okgfGT+Ok+vx+hfuzU7jBBJV1uXk3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8 +t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6qtnRGEmyR7jTV7JqR50S+kDFy +1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29mvVXIwAHIRc/ +SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEP +MA0GA1UEChMGU29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAx +MDQwNjA3Mjk0MFoXDTIxMDQwNjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNV +BAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJhIENsYXNzMiBDQTCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3/Ei9vX+ALTU74W+o +Z6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybTdXnt +5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s +3TmVToMGf+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2Ej +vOr7nQKV0ba5cTppCD8PtOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu +8nYybieDwnPz3BjotJPqdURrBGAgcVeHnfO+oJAjPYok4doh28MCAwEAAaMzMDEw +DwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITTXjwwCwYDVR0PBAQDAgEG +MA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt0jSv9zil +zqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/ +3DEIcbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvD +FNr450kkkdAdavphOe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6 +Tk6ezAyNlNzZRZxe7EJQY670XcSxEtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2 +ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLHllpwrN9M +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gRVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0y +MjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5MMR4wHAYDVQQKDBVTdGFhdCBkZXIg +TmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRlcmxhbmRlbiBFViBS +b290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkkSzrS +M4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nC +UiY4iKTWO0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3d +Z//BYY1jTw+bbRcwJu+r0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46p +rfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13l +pJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gVXJrm0w912fxBmJc+qiXb +j5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr08C+eKxC +KFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS +/ZbV0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0X +cgOPvZuM5l5Tnrmd74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH +1vI4gnPah1vlPNOePqc7nvQDs/nxfRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrP +px9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwaivsnuL8wbqg7 +MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI +eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u +2dfOWBfoqSmuc0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHS +v4ilf0X8rLiltTMMgsT7B/Zq5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTC +wPTxGfARKbalGAKb12NMcIxHowNDXLldRqANb/9Zjr7dn3LDWyvfjFvO5QxGbJKy +CqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tNf1zuacpzEPuKqf2e +vTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi5Dp6 +Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIa +Gl6I6lD4WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeL +eG9QgkRQP2YGiqtDhFZKDyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8 +FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGyeUN51q1veieQA6TqJIc/2b3Z6fJfUEkc +7uzXLg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oX +DTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl +ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv +b3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ5291 +qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8Sp +uOUfiUtnvWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPU +Z5uW6M7XxgpT0GtJlvOjCwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvE +pMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiile7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp +5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCROME4HYYEhLoaJXhena/M +UGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpICT0ugpTN +GmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy +5V6548r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv +6q012iDTiIJh8BIitrzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEK +eN5KzlW/HdXZt1bv8Hb/C3m1r737qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6 +B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMBAAGjgZcwgZQwDwYDVR0TAQH/ +BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcCARYxaHR0cDov +L3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV +HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqG +SIb3DQEBCwUAA4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLyS +CZa59sCrI2AGeYwRTlHSeYAz+51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen +5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwjf/ST7ZwaUb7dRUG/kSS0H4zpX897 +IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaNkqbG9AclVMwWVxJK +gnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfkCpYL ++63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxL +vJxxcypFURmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkm +bEgeqmiSBeGCc1qb3AdbCG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvk +N1trSt8sV4pAWja63XVECDdCcAz+3F4hoKOKwJCcaNpQ5kUQR3i2TtJlycM33+FC +Y7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoVIPVVYpbtbZNQvOSqeK3Z +ywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm66+KAQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIEAJiiOTANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJO +TDEeMBwGA1UECgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFh +dCBkZXIgTmVkZXJsYW5kZW4gUm9vdCBDQSAtIEczMB4XDTEzMTExNDExMjg0MloX +DTI4MTExMzIzMDAwMFowWjELMAkGA1UEBhMCTkwxHjAcBgNVBAoMFVN0YWF0IGRl +ciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5lZGVybGFuZGVuIFJv +b3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAL4yolQP +cPssXFnrbMSkUeiFKrPMSjTysF/zDsccPVMeiAho2G89rcKezIJnByeHaHE6n3WW +IkYFsO2tx1ueKt6c/DrGlaf1F2cY5y9JCAxcz+bMNO14+1Cx3Gsy8KL+tjzk7FqX +xz8ecAgwoNzFs21v0IJyEavSgWhZghe3eJJg+szeP4TrjTgzkApyI/o1zCZxMdFy +KJLZWyNtZrVtB0LrpjPOktvA9mxjeM3KTj215VKb8b475lRgsGYeCasH/lSJEULR +9yS6YHgamPfJEf0WwTUaVHXvQ9Plrk7O53vDxk5hUUurmkVLoR9BvUhTFXFkC4az +5S6+zqQbwSmEorXLCCN2QyIkHxcE1G6cxvx/K2Ya7Irl1s9N9WMJtxU51nus6+N8 +6U78dULI7ViVDAZCopz35HCz33JvWjdAidiFpNfxC95DGdRKWCyMijmev4SH8RY7 +Ngzp07TKbBlBUgmhHbBqv4LvcFEhMtwFdozL92TkA1CvjJFnq8Xy7ljY3r735zHP +bMk7ccHViLVlvMDoFxcHErVc0qsgk7TmgoNwNsXNo42ti+yjwUOH5kPiNL6VizXt +BznaqB16nzaeErAMZRKQFWDZJkBE41ZgpRDUajz9QdwOWke275dhdU/Z/seyHdTt +XUmzqWrLZoQT1Vyg3N9udwbRcXXIV2+vD3dbAgMBAAGjQjBAMA8GA1UdEwEB/wQF +MAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRUrfrHkleuyjWcLhL75Lpd +INyUVzANBgkqhkiG9w0BAQsFAAOCAgEAMJmdBTLIXg47mAE6iqTnB/d6+Oea31BD +U5cqPco8R5gu4RV78ZLzYdqQJRZlwJ9UXQ4DO1t3ApyEtg2YXzTdO2PCwyiBwpwp +LiniyMMB8jPqKqrMCQj3ZWfGzd/TtiunvczRDnBfuCPRy5FOCvTIeuXZYzbB1N/8 +Ipf3YF3qKS9Ysr1YvY2WTxB1v0h7PVGHoTx0IsL8B3+A3MSs/mrBcDCw6Y5p4ixp +gZQJut3+TcCDjJRYwEYgr5wfAvg1VUkvRtTA8KCWAg8zxXHzniN9lLf9OtMJgwYh +/WA9rjLA0u6NpvDntIJ8CsxwyXmA+P5M9zWEGYox+wrZ13+b8KKaa8MFSu1BYBQw +0aoRQm7TIwIEC8Zl3d1Sd9qBa7Ko+gE4uZbqKmxnl4mUnrzhVNXkanjvSr0rmj1A +fsbAddJu+2gw7OyLnflJNZoaLNmzlTnVHpL3prllL+U9bTpITAjc5CgSKL59NVzq +4BZ+Extq1z7XnvwtdbLBFNUjA9tbbws+eC8N3jONFrdI54OagQ97wUNNVQQXOEpR +1VmiiXTTn74eS9fGbbeIJG9gkaSChVtWQbzQRKtqE77RLFi3EjNYsjdj3BP1lB0/ +QFH1T/U67cjF68IeHRaVesd+QnGTbksVtzDfqu1XhUisHWrdOWnk4Xl4vs4Fv6EM +94B7IWcnMFk= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzEl +MCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMp +U3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQw +NjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBoMQswCQYDVQQGEwJVUzElMCMGA1UE +ChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZp +ZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqGSIb3 +DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf +8MOh2tTYbitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN ++lq2cwQlZut3f+dZxkqZJRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0 +X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVmepsZGD3/cVE8MC5fvj13c7JdBmzDI1aa +K4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSNF4Azbl5KXZnJHoe0nRrA +1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HFMIHCMB0G +A1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fR +zt0fhvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0 +YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBD +bGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8w +DQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGsafPzWdqbAYcaT1epoXkJKtv3 +L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLMPUxA2IGvd56D +eruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynp +VSJYACPq4xJDKVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEY +WQPJIrSPnNVeKtelttQKbfi3QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVs +ZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAw +MFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQgVGVj +aG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZp +Y2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL3twQP89o/8ArFvW59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMg +nLRJdzIpVv257IzdIvpy3Cdhl+72WoTsbhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1 +HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNkN3mSwOxGXn/hbVNMYq/N +Hwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7NfZTD4p7dN +dloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0 +HZbUJtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0G +CSqGSIb3DQEBCwUAA4IBAQARWfolTwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjU +sHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx4mcujJUDJi5DnUox9g61DLu3 +4jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUwF5okxBDgBPfg +8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1 +mMpYjn0q7pBZc2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMx +EDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoT +HFN0YXJmaWVsZCBUZWNobm9sb2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVs +ZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNVBAYTAlVTMRAwDgYD +VQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFy +ZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2Vy +dmljZXMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20p +OsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm2 +8xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4PahHQUw2eeBGg6345AWh1K +Ts9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLPLJGmpufe +hRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk +6mFBrMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAw +DwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+q +AdcwKziIorhtSpzyEZGDMA0GCSqGSIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMI +bw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPPE95Dz+I0swSdHynVv/heyNXB +ve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTyxQGjhdByPq1z +qwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn +0q23KXB56jzaYyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCN +sSi6 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEW +MBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkgRzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1 +OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjEsMCoG +A1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgRzIwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8Oo1XJ +JZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsD +vfOpL9HG4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnoo +D/Uefyf3lLE3PbfHkffiAez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/ +Q0kGi4xDuFby2X8hQxfqp0iVAXV16iulQ5XqFYSdCI0mblWbq9zSOdIxHWDirMxW +RST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbsO+wmETRIjfaAKxojAuuK +HDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8HvKTlXcxN +nw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM +0D4LnMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/i +UUjXuG+v+E5+M5iSFGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9 +Ha90OrInwMEePnWjFqmveiJdnxMaz6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHg +TuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE +AwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJKoZIhvcNAQEL +BQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K +2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfX +UfEpY9Z1zRbkJ4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl +6/2o1PXWT6RbdejF0mCy2wl+JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK +9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG/+gyRr61M3Z3qAFdlsHB1b6uJcDJ +HgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTcnIhT76IxW1hPkWLI +wpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/XldblhY +XzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5l +IxKVCCIcl85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoo +hdVddLHRDiBYmxOlsGOm7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulr +so8uBtjRkcfGEvRM/TAXw8HaOFvjqermobp573PYtlNXLfbQ4ddI +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEW +MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg +Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM2WhcNMzYwOTE3MTk0NjM2WjB9 +MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi +U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh +cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk +pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf +OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C +Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT +Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi +HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM +Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w ++2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+ +Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3 +Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B +26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID +AQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE +FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9j +ZXJ0LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3Js +LnN0YXJ0Y29tLm9yZy9zZnNjYS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFM +BgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUHAgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0 +Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRwOi8vY2VydC5zdGFy +dGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYgU3Rh +cnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlh +YmlsaXR5LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2Yg +dGhlIFN0YXJ0Q29tIENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFp +bGFibGUgYXQgaHR0cDovL2NlcnQuc3RhcnRjb20ub3JnL3BvbGljeS5wZGYwEQYJ +YIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNT +TCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOCAgEAFmyZ +9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8 +jhvh3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUW +FjgKXlf2Ysd6AgXmvB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJz +ewT4F+irsfMuXGRuczE6Eri8sxHkfY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1 +ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3fsNrarnDy0RLrHiQi+fHLB5L +EUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZEoalHmdkrQYu +L6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq +yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuC +O3NJo2pXh5Tl1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6V +um0ABj6y6koQOdjQK/W/7HW/lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkySh +NOsF/5oirpt9P/FlUQqmMGqz9IgcgA38corog14= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEW +MBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwg +Q2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0NjM3WhcNMzYwOTE3MTk0NjM2WjB9 +MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMi +U2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMgU3Rh +cnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUA +A4ICDwAwggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZk +pMyONvg45iPwbm2xPN1yo4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rf +OQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/C +Ji/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/deMotHweXMAEtcnn6RtYT +Kqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt2PZE4XNi +HzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMM +Av+Z6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w ++2OqqGwaVLRcJXrJosmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+ +Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3 +Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVcUjyJthkqcwEKDwOzEmDyei+B +26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT37uMdBNSSwID +AQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD +VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFul +F2mHMMo0aEPQQa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCC +ATgwLgYIKwYBBQUHAgEWImh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5w +ZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cuc3RhcnRzc2wuY29tL2ludGVybWVk +aWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENvbW1lcmNpYWwgKFN0 +YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0aGUg +c2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0 +aWZpY2F0aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93 +d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgG +CWCGSAGG+EIBDQQrFilTdGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1 +dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5fPGFf59Jb2vKXfuM/gTF +wWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWmN3PH/UvS +Ta0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst +0OcNOrg+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNc +pRJvkrKTlMeIFw6Ttn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKl +CcWw0bdT82AUuoVpaiF8H3VhFyAXe2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVF +P0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA2MFrLH9ZXF2RsXAiV+uKa0hK +1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBsHvUwyKMQ5bLm +KhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE +JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ +8dCAWZvLMdibD4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnm +fyWl8kgAwKQB2j8= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQXAuFXAvnWUHfV8w/f52oNjANBgkqhkiG9w0BAQUFADBk +MQswCQYDVQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0 +YWwgQ2VydGlmaWNhdGUgU2VydmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3Qg +Q0EgMTAeFw0wNTA4MTgxMjA2MjBaFw0yNTA4MTgyMjA2MjBaMGQxCzAJBgNVBAYT +AmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGlnaXRhbCBDZXJ0aWZp +Y2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAxMIICIjAN +BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0LmwqAzZuz8h+BvVM5OAFmUgdbI9 +m2BtRsiMMW8Xw/qabFbtPMWRV8PNq5ZJkCoZSx6jbVfd8StiKHVFXqrWW/oLJdih +FvkcxC7mlSpnzNApbjyFNDhhSbEAn9Y6cV9Nbc5fuankiX9qUvrKm/LcqfmdmUc/ +TilftKaNXXsLmREDA/7n29uj/x2lzZAeAR81sH8A25Bvxn570e56eqeqDFdvpG3F +EzuwpdntMhy0XmeLVNxzh+XTF3xmUHJd1BpYwdnP2IkCb6dJtDZd0KTeByy2dbco +kdaXvij1mB7qWybJvbCXc9qukSbraMH5ORXWZ0sKbU/Lz7DkQnGMU3nn7uHbHaBu +HYwadzVcFh4rUx80i9Fs/PJnB3r1re3WmquhsUvhzDdf/X/NTa64H5xD+SpYVUNF +vJbNcA78yeNmuk6NO4HLFWR7uZToXTNShXEuT46iBhFRyePLoW4xCGQMwtI89Tbo +19AOeCMgkckkKmUpWyL3Ic6DXqTz3kvTaI9GdVyDCW4pa8RwjPWd1yAv/0bSKzjC +L3UcPX7ape8eYIVpQtPM+GP+HkM5haa2Y0EQs3MevNP6yn0WR+Kn1dCjigoIlmJW +bjTb2QK5MHXjBNLnj8KwEUAKrNVxAmKLMb7dxiNYMUJDLXT5xp6mig/p/r+D5kNX +JLrvRjSq1xIBOO0CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0hBBYw +FDASBgdghXQBUwABBgdghXQBUwABMBIGA1UdEwEB/wQIMAYBAf8CAQcwHwYDVR0j +BBgwFoAUAyUv3m+CATpcLNwroWm1Z9SM0/0wHQYDVR0OBBYEFAMlL95vggE6XCzc +K6FptWfUjNP9MA0GCSqGSIb3DQEBBQUAA4ICAQA1EMvspgQNDQ/NwNurqPKIlwzf +ky9NfEBWMXrrpA9gzXrzvsMnjgM+pN0S734edAY8PzHyHHuRMSG08NBsl9Tpl7Ik +Vh5WwzW9iAUPWxAaZOHHgjD5Mq2eUCzneAXQMbFamIp1TpBcahQq4FJHgmDmHtqB +sfsUC1rxn9KVuj7QG9YVHaO+htXbD8BJZLsuUBlL0iT43R4HVtA4oJVwIHaM190e +3p9xxCPvgxNcoyQVTSlAPGrEqdi3pkSlDfTgnXceQHAm/NrZNuR55LU/vJtlvrsR +ls/bxig5OgjOR1tTWsWZ/l2p3e9M1MalrQLmjAcSHm8D0W+go/MpvRLHUKKwf4ip +mXeascClOS5cfGniLLDqN2qk4Vrh9VDlg++luyqI54zb/W1elxmofmZ1a3Hqv7HH +b6D0jqTsNFFbjCYDcKF31QESVwA12yPeDooomf2xEG9L/zgtYE4snOtnta1J7ksf +rK/7DZBaZmBwXarNeNQk7shBoJMBkpxqnvy5JMWzFYJ+vq6VK+uxwNrjAWALXmms +hFZhvnEX/h0TD/7Gh0Xp/jKgGg0TpJRVcaUWi7rKibCyx/yP2FS1k2Kdzs9Z+z0Y +zirLNRWCXf9UIltxUvu3yf5gmwBBZPCqKuy2QkPOiWaByIufOVQDJdMWNY6E0F/6 +MBr1mmz0DlP5OlvRHA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF2TCCA8GgAwIBAgIQHp4o6Ejy5e/DfEoeWhhntjANBgkqhkiG9w0BAQsFADBk +MQswCQYDVQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0 +YWwgQ2VydGlmaWNhdGUgU2VydmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3Qg +Q0EgMjAeFw0xMTA2MjQwODM4MTRaFw0zMTA2MjUwNzM4MTRaMGQxCzAJBgNVBAYT +AmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGlnaXRhbCBDZXJ0aWZp +Y2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAyMIICIjAN +BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAlUJOhJ1R5tMJ6HJaI2nbeHCOFvEr +jw0DzpPMLgAIe6szjPTpQOYXTKueuEcUMncy3SgM3hhLX3af+Dk7/E6J2HzFZ++r +0rk0X2s682Q2zsKwzxNoysjL67XiPS4h3+os1OD5cJZM/2pYmLcX5BtS5X4HAB1f +2uY+lQS3aYg5oUFgJWFLlTloYhyxCwWJwDaCFCE/rtuh/bxvHGCGtlOUSbkrRsVP +ACu/obvLP+DHVxxX6NZp+MEkUp2IVd3Chy50I9AU/SpHWrumnf2U5NGKpV+GY3aF +y6//SSj8gO1MedK75MDvAe5QQQg1I3ArqRa0jG6F6bYRzzHdUyYb3y1aSgJA/MTA +tukxGggo5WDDH8SQjhBiYEQN7Aq+VRhxLKX0srwVYv8c474d2h5Xszx+zYIdkeNL +6yxSNLCK/RJOlrDrcH+eOfdmQrGrrFLadkBXeyq96G4DsguAhYidDMfCd7Camlf0 +uPoTXGiTOmekl9AbmbeGMktg2M7v0Ax/lZ9vh0+Hio5fCHyqW/xavqGRn1V9TrAL +acywlKinh/LTSlDcX3KwFnUey7QYYpqwpzmqm59m2I2mbJYV4+by+PGDYmy7Velh +k6M99bFXi08jsJvllGov34zflVEpYKELKeRcVVi3qPyZ7iVNTA6z00yPhOgpD/0Q +VAKFyPnlw4vP5w8CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0hBBYw +FDASBgdghXQBUwIBBgdghXQBUwIBMBIGA1UdEwEB/wQIMAYBAf8CAQcwHQYDVR0O +BBYEFE0mICKJS9PVpAqhb97iEoHF8TwuMB8GA1UdIwQYMBaAFE0mICKJS9PVpAqh +b97iEoHF8TwuMA0GCSqGSIb3DQEBCwUAA4ICAQAyCrKkG8t9voJXiblqf/P0wS4R +fbgZPnm3qKhyN2abGu2sEzsOv2LwnN+ee6FTSA5BesogpxcbtnjsQJHzQq0Qw1zv +/2BZf82Fo4s9SBwlAjxnffUy6S8w5X2lejjQ82YqZh6NM4OKb3xuqFp1mrjX2lhI +REeoTPpMSQpKwhI3qEAMw8jh0FcNlzKVxzqfl9NX+Ave5XLzo9v/tdhZsnPdTSpx +srpJ9csc1fV5yJmz/MFMdOO0vSk3FQQoHt5FRnDsr7p4DooqzgB53MBfGWcsa0vv +aGgLQ+OswWIJ76bdZWGgr4RVSJFSHMYlkSrQwSIjYVmvRRGFHQEkNI/Ps/8XciAT +woCqISxxOQ7Qj1zB09GOInJGTB2Wrk9xseEFKZZZ9LuedT3PDTcNYtsmjGOpI99n +Bjx8Oto0QuFmtEYE3saWmA9LSHokMnWRn6z3aOkquVVlzl1h0ydw2Df+n7mvoC5W +t6NlUe07qxS/TFED6F+KBZvuim6c779o+sjaC+NCydAXFJy3SuCvkychVSa1ZC+N +8f+mQAWFBVzKBxlcCxMoTFh/wqXvRdpg065lYZ1Tg3TCrvJcwhbtkj6EPnNgiLx2 +9CzP0H1907he0ZESEOnN3col49XtmS++dYFLJPlFRpTJKSFTnCZFqhMX5OfNeOI5 +wSsSnqaeG8XmDtkx2Q== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF4DCCA8igAwIBAgIRAPL6ZOJ0Y9ON/RAdBB92ylgwDQYJKoZIhvcNAQELBQAw +ZzELMAkGA1UEBhMCY2gxETAPBgNVBAoTCFN3aXNzY29tMSUwIwYDVQQLExxEaWdp +dGFsIENlcnRpZmljYXRlIFNlcnZpY2VzMR4wHAYDVQQDExVTd2lzc2NvbSBSb290 +IEVWIENBIDIwHhcNMTEwNjI0MDk0NTA4WhcNMzEwNjI1MDg0NTA4WjBnMQswCQYD +VQQGEwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2Vy +dGlmaWNhdGUgU2VydmljZXMxHjAcBgNVBAMTFVN3aXNzY29tIFJvb3QgRVYgQ0Eg +MjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMT3HS9X6lds93BdY7Bx +UglgRCgzo3pOCvrY6myLURYaVa5UJsTMRQdBTxB5f3HSek4/OE6zAMaVylvNwSqD +1ycfMQ4jFrclyxy0uYAyXhqdk/HoPGAsp15XGVhRXrwsVgu42O+LgrQ8uMIkqBPH +oCE2G3pXKSinLr9xJZDzRINpUKTk4RtiGZQJo/PDvO/0vezbE53PnUgJUmfANykR +HvvSEaeFGHR55E+FFOtSN+KxRdjMDUN/rhPSays/p8LiqG12W0OfvrSdsyaGOx9/ +5fLoZigWJdBLlzin5M8J0TbDC77aO0RYjb7xnglrPvMyxyuHxuxenPaHZa0zKcQv +idm5y8kDnftslFGXEBuGCxobP/YCfnvUxVFkKJ3106yDgYjTdLRZncHrYTNaRdHL +OdAGalNgHa/2+2m8atwBz735j9m9W8E6X47aD0upm50qKGsaCnw8qyIL5XctcfaC +NYGu+HuB5ur+rPQam3Rc6I8k9l2dRsQs0h4rIWqDJ2dVSqTjyDKXZpBy2uPUZC5f +46Fq9mDU5zXNysRojddxyNMkM3OxbPlq4SjbX8Y96L5V5jcb7STZDxmPX2MYWFCB +UWVv8p9+agTnNCRxunZLWB4ZvRVgRaoMEkABnRDixzgHcgplwLa7JSnaFp6LNYth +7eVxV4O1PHGf40+/fh6Bn0GXAgMBAAGjgYYwgYMwDgYDVR0PAQH/BAQDAgGGMB0G +A1UdIQQWMBQwEgYHYIV0AVMCAgYHYIV0AVMCAjASBgNVHRMBAf8ECDAGAQH/AgED +MB0GA1UdDgQWBBRF2aWBbj2ITY1x0kbBbkUe88SAnTAfBgNVHSMEGDAWgBRF2aWB +bj2ITY1x0kbBbkUe88SAnTANBgkqhkiG9w0BAQsFAAOCAgEAlDpzBp9SSzBc1P6x +XCX5145v9Ydkn+0UjrgEjihLj6p7jjm02Vj2e6E1CqGdivdj5eu9OYLU43otb98T +PLr+flaYC/NUn81ETm484T4VvwYmneTwkLbUwp4wLh/vx3rEUMfqe9pQy3omywC0 +Wqu1kx+AiYQElY2NfwmTv9SoqORjbdlk5LgpWgi/UOGED1V7XwgiG/W9mR4U9s70 +WBCCswo9GcG/W6uqmdjyMb3lOGbcWAXH7WMaLgqXfIeTK7KK4/HsGOV1timH59yL +Gn602MnTihdsfSlEvoqq9X46Lmgxk7lq2prg2+kupYTNHAq4Sgj5nPFhJpiTt3tm +7JFe3VE/23MPrQRYCd0EApUKPtN236YQHoA96M2kZNEzx5LH4k5E4wnJTsJdhw4S +nr8PyQUQ3nqjsTzyP6WqJ3mtMX0f/fwZacXduT98zca0wjAefm6S139hdlqP65VN +vBFuIXxZN5nQBrz5Bm0yFqXZaajh3DyAHmBR3NdUIR7KYndP+tiPsys6DXhyyWhB +WkdKwqPrGtcKqzwyVcgKEZzfdNbwQBUdyLmPtTbFr/giuMod89a2GQ+fYWVq6nTI +fI/DT11lgh/ZDYnadXL77/FHZxOzyNEZiCcmmpl5fx7kLD977vHeTYuWl8PVP3wb +I+2ksx0WckNLIOFZfsLorSa/ovc= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNV +BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2ln +biBHb2xkIENBIC0gRzIwHhcNMDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBF +MQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMR8wHQYDVQQDExZT +d2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIIC +CgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUqt2/8 +76LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+ +bbqBHH5CjCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c +6bM8K8vzARO/Ws/BtQpgvd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqE +emA8atufK+ze3gE/bk3lUIbLtK/tREDFylqM2tIrfKjuvqblCqoOpd8FUrdVxyJd +MmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvRAiTysybUa9oEVeXBCsdt +MDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuendjIj3o02y +MszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69y +FGkOpeUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPi +aG59je883WX0XaxR7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxM +gI93e2CaHt+28kgeDrpOVG2Y4OGiGqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCB +qTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUWyV7 +lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64OfPAeGZe6Drn +8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe6 +45R88a7A3hfm5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczO +UYrHUDFu4Up+GC9pWbY9ZIEr44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5 +O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOfMke6UiI0HTJ6CVanfCU2qT1L2sCC +bwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6mGu6uLftIdxf+u+yv +GPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxpmo/a +77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCC +hdiDyyJkvC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid3 +92qgQmwLOM7XdVAyksLfKzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEpp +Ld6leNcG2mqeSz53OiATIgHQv2ieY2BrNU0LbbqhPcCT4H8js1WtciVORvnSFu+w +ZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6LqjviOvrv1vA+ACOzB2+htt +Qc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFejCCA2KgAwIBAgIJAN7E8kTzHab8MA0GCSqGSIb3DQEBCwUAMEoxCzAJBgNV +BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxJDAiBgNVBAMTG1N3aXNzU2ln +biBHb2xkIFJvb3QgQ0EgLSBHMzAeFw0wOTA4MDQxMzMxNDdaFw0zNzA4MDQxMzMx +NDdaMEoxCzAJBgNVBAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxJDAiBgNV +BAMTG1N3aXNzU2lnbiBHb2xkIFJvb3QgQ0EgLSBHMzCCAiIwDQYJKoZIhvcNAQEB +BQADggIPADCCAgoCggIBAMPon8hlWp1nG8FFl7S0h0NbYWCAnvJ/XvlnRN1E+qu1 +q3f/KhlMzm/Ej0Gf4OLNcuDR1FJhQQkKvwpw++CDaWEpytsimlul5t0XlbBvhI46 +PmRaQfsbWPz9Kz6ypOasyYK8zvaV+Jd37Sb2WK6eJ+IPg+zFNljIe8/Vh6GphxoT +Z2EBbaZpnOKQ8StoZfPosHz8gj3erdgKAAlEeROc8P5udXvCvLNZAQt8xdUt8L// +bVfSSYHrtLNQrFv5CxUVjGn/ozkB7fzc3CeXjnuL1Wqm1uAdX80Bkeb1Ipi6LgkY +OG8TqIHS+yE35y20YueBkLDGeVm3Z3X+vo87+jbsr63ST3Q2AeVXqyMEzEpel89+ +xu+MzJUjaY3LOMcZ9taKABQeND1v2gwLw7qX/BFLUmE+vzNnUxC/eBsJwke6Hq9Y +9XWBf71W8etW19lpDAfpNzGwEhwy71bZvnorfL3TPbxqM006PFAQhyfHegpnU9t/ +gJvoniP6+Qg6i6GONFpIM19k05eGBxl9iJTOKnzFat+vvKmfzTqmurtU+X+P388O +WsStmryzOndzg0yTPJBotXxQlRHIgl6UcdBBGPvJxmXszom2ziKzEVs/4J0+Gxho +DaoDoWdZv2udvPjyZS+aQTpF2F7QNmxvOx5jtI6YTBPbIQ6fe+3qoKpxw+ujoNIl +AgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud +DgQWBBRclwZGNKvfMMV8xQ1VcWYwtWCPnjAfBgNVHSMEGDAWgBRclwZGNKvfMMV8 +xQ1VcWYwtWCPnjANBgkqhkiG9w0BAQsFAAOCAgEAd0tN3uqFSqssJ9ZFx/FfIMFb +YO0Hy6Iz3DbPx5TxBsfV2s/NrYQ+/xJIf0HopWZXMMQd5KcaLy1Cwe9Gc7LV9Vr9 +Dnpr0sgxow1IlldlY1UYwPzkisyYhlurDIonN/ojaFlcJtehwcK5Tiz/KV7mlAu+ +zXJPleiP9ve4Pl7Oz54RyawDKUiKqbamNLmsQP/EtnM3scd/qVHbSypHX0AkB4gG +tySz+3/3sIsz+r8jdaNc/qplGsK+8X2BdwOBsY3XlQ16PEKYt4+pfVDh31IGmqBS +VHiDB2FSCTdeipynxlHRXGPRhNzC29L6Wxg2fWa81CiXL3WWHIQHrIuOUxG+JCGq +Z/LBrYic07B4Z3j101gDIApdIPG152XMDiDj1d/mLxkrhWjBBCbPj+0FU6HdBw7r +QSbHtKksW+NpPWbAYhvAqobAN8MxBIZwOb5rXyFAQaB/5dkPOEtwX0n4hbgrLqof +k0FD+PuydDwfS1dbt9RRoZJKzr4Qou7YFCJ7uUG9jemIqdGPAxpg/z+HiaCZJyJm +sD5onnKIUTidEz5FbQXlRrVz7UOGsRQKHrzaDb8eJFxmjw6+of3G62m8Q3nXA3b5 +3IeZuJjEzX9tEPkQvixC/pwpTYNrCr21jsRIiv0hB6aAfR+b6au9gmFECnEnX22b +kJ6u/zYks2gD1pWMa3M= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFwTCCA6mgAwIBAgIITrIAZwwDXU8wDQYJKoZIhvcNAQEFBQAwSTELMAkGA1UE +BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEjMCEGA1UEAxMaU3dpc3NTaWdu +IFBsYXRpbnVtIENBIC0gRzIwHhcNMDYxMDI1MDgzNjAwWhcNMzYxMDI1MDgzNjAw +WjBJMQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dpc3NTaWduIEFHMSMwIQYDVQQD +ExpTd2lzc1NpZ24gUGxhdGludW0gQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAMrfogLi2vj8Bxax3mCq3pZcZB/HL37PZ/pEQtZ2Y5Wu669y +IIpFR4ZieIbWIDkm9K6j/SPnpZy1IiEZtzeTIsBQnIJ71NUERFzLtMKfkr4k2Htn +IuJpX+UFeNSH2XFwMyVTtIc7KZAoNppVRDBopIOXfw0enHb/FZ1glwCNioUD7IC+ +6ixuEFGSzH7VozPY1kneWCqv9hbrS3uQMpe5up1Y8fhXSQQeol0GcN1x2/ndi5ob +jM89o03Oy3z2u5yg+gnOI2Ky6Q0f4nIoj5+saCB9bzuohTEJfwvH6GXp43gOCWcw +izSC+13gzJ2BbWLuCB4ELE6b7P6pT1/9aXjvCR+htL/68++QHkwFix7qepF6w9fl ++zC8bBsQWJj3Gl/QKTIDE0ZNYWqFTFJ0LwYfexHihJfGmfNtf9dng34TaNhxKFrY +zt3oEBSa/m0jh26OWnA81Y0JAKeqvLAxN23IhBQeW71FYyBrS3SMvds6DsHPWhaP +pZjydomyExI7C3d3rLvlPClKknLKYRorXkzig3R3+jVIeoVNjZpTxN94ypeRSCtF +KwH3HBqi7Ri6Cr2D+m+8jVeTO9TUps4e8aCxzqv9KyiaTxvXw3LbpMS/XUz13XuW +ae5ogObnmLo2t/5u7Su9IPhlGdpVCX4l3P5hYnL5fhgC72O00Puv5TtjjGePAgMB +AAGjgawwgakwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFFCvzAeHFUdvOMW0ZdHelarp35zMMB8GA1UdIwQYMBaAFFCvzAeHFUdvOMW0 +ZdHelarp35zMMEYGA1UdIAQ/MD0wOwYJYIV0AVkBAQEBMC4wLAYIKwYBBQUHAgEW +IGh0dHA6Ly9yZXBvc2l0b3J5LnN3aXNzc2lnbi5jb20vMA0GCSqGSIb3DQEBBQUA +A4ICAQAIhab1Fgz8RBrBY+D5VUYI/HAcQiiWjrfFwUF1TglxeeVtlspLpYhg0DB0 +uMoI3LQwnkAHFmtllXcBrqS3NQuB2nEVqXQXOHtYyvkv+8Bldo1bAbl93oI9ZLi+ +FHSjClTTLJUYFzX1UWs/j6KWYTl4a0vlpqD4U99REJNi54Av4tHgvI42Rncz7Lj7 +jposiU0xEQ8mngS7twSNC/K5/FqdOxa3L8iYq/6KUFkuozv8KV2LwUvJ4ooTHbG/ +u0IdUt1O2BReEMYxB+9xJ/cbOQncguqLs5WGXv312l0xpuAxtpTmREl0xRbl9x8D +YSjFyMsSoEJL+WuICI20MhjzdZ/EfwBPBZWcoxcCw7NTm6ogOSkrZvqdr16zktK1 +puEa+S1BaYEUtLS17Yk9zvupnTVCRLEcFHOBzyoBNZox1S2PbYTfgE1X4z/FhHXa +icYwu+uPyyIIoK6q8QNsOktNCaUOcsZWayFCTiMlFGiudgp8DAdwZPmaL/YFOSbG +DI8Zf0NebvRbFS/bYV3mZy8/CJT5YLSYMdp08YSTcU1f+2BY0fvEwW2JorsgH51x +kcsymxM9Pn2SUjWskpSi0xjCfMfqr3YFFt1nJ8J+HAciIfNAChs0B0QTwoRqjt8Z +Wr9/6x3iGjjRXK9HkmuAtTClyY3YqzGBH9/CZjfTk6mFhnll0g== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFgTCCA2mgAwIBAgIIIj+pFyDegZQwDQYJKoZIhvcNAQELBQAwTjELMAkGA1UE +BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEoMCYGA1UEAxMfU3dpc3NTaWdu +IFBsYXRpbnVtIFJvb3QgQ0EgLSBHMzAeFw0wOTA4MDQxMzM0MDRaFw0zNzA4MDQx +MzM0MDRaME4xCzAJBgNVBAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxKDAm +BgNVBAMTH1N3aXNzU2lnbiBQbGF0aW51bSBSb290IENBIC0gRzMwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCUoO8TG59EIBvNxaoiu9nyUj56Wlh35o2h +K8ncpPPksxOUAGKbHPJDUEOBfq8wNkmsGIkMGEW4PsdUbePYmllriholqba1Dbd9 +I/BffagHqfc+hi7IAU3c5jbtHeU3B2kSS+OD0QQcJPAfcHHnGe1zSG6VKxW2VuYC +31bpm/rqpu7gwsO64MzGyHvXbzqVmzqPvlss0qmgOD7WiOGxYhOO3KswZ82oaqZj +K4Kwy8c9Tu1y9n2rMk5lAusPmXT4HBoojA5FAJMsFJ9txxue9orce3jjtJRHHU0F +bYR6kFSynot1woDfhzk/n/tIVAeNoCn1+WBfWnLou5ugQuAIADSjFTwT49YaawKy +lCGjnUG8KmtOMzumlDj8PccrM7MuKwZ0rJsQb8VORfddoVYDLA1fer0e3h13kGva +pS2KTOnfQfTnS+x9lUKfTKkJD0OIPz2T5yv0ekjaaMTdEoAxGl0kVCamJCGzTK3a +Fwg2AlfGnIZwyXXJnnxh2HjmuegUafkcECgSXUt1ULo80GdwVVVWS/s9HNjbeU2X +37ie2xcs1TUHuFCp9473Vv96Z0NPINnKZtY4YEvulDHWDaJIm/80aZTGNfWWiO+q +ZsyBputMU/8ydKe2nZhXtLomqfEzM2J+OrADEVf/3G8RI60+xgrQzFS3LcKTHeXC +pozH2O9T9wIDAQABo2MwYTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQUVio/kFj0F1oUstcIG4VbVGpUGigwHwYDVR0jBBgwFoAUVio/ +kFj0F1oUstcIG4VbVGpUGigwDQYJKoZIhvcNAQELBQADggIBAGztiudDqHknm7jP +hz5kOBiMEUKShjfgWMMb7gQu94TsgxBoDH94LZzCl442ThbYDuprSK1Pnl0NzA2p +PhiFfsxomTk11tifhsEy+01lsyIUS8iFZtoX/3GRrJxWV95xLFZCv/jNDvCi0//S +IhX70HgKfuGwWs6ON9upnueVz2PyLA3S+m/zyNX7ALf3NWcQ03tS7BAy+L/dXsmm +gqTxsL8dLt0l5L1N8DWpkQFH+BAClFvrPusNutUdYyylLqvn4x6j7kuqX7FmAbSC +WvlGS8fx+N8svv113ZY4mjc6bqXmMhVus5DAOYp0pZWgvg0uiXnNKVaOw15XUcQF +bwRVj4HpTL1ZRssqvE3JHfLGTwXkyAQN925P2sM6nNLC9enGJHoUPhxCMKgCRTGp +/FCp3NyGOA9bkz9/CE5qDSc6EHlWwxW4PgaG9tlwZ691eoviWMzGdU8yVcVsFAko +O/KV5GreLCgHraB9Byjd1Fqj6aZ8E4yZC1J429nR3z5aQ3Z/RmBTws3ndkd8Vc20 +OWQQW5VLNV1EgyTV4C4kDMGAbmkAgAZ3CmaCEAxRbzeJV9vzTOW4ue4jZpdgt1Ld +2Zb7uoo7oE3OXvBETJDMIU8bOphrjjGD+YMIUssZwTVr7qEVW4g/bazyNJJTpjAq +E9fmhqhd2ULSx52peovL3+6iMcLl +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UE +BhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWdu +IFNpbHZlciBDQSAtIEcyMB4XDTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0Nlow +RzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMY +U3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644N0Mv +Fz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7br +YT7QbNHm+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieF +nbAVlDLaYQ1HTWBCrpJH6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH +6ATK72oxh9TAtvmUcXtnZLi2kUpCe2UuMGoM9ZDulebyzYLs2aFK7PayS+VFheZt +eJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5hqAaEuSh6XzjZG6k4sIN/ +c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5FZGkECwJ +MoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRH +HTBsROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTf +jNFusB3hB48IHpmccelM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb6 +5i/4z3GcRm25xBWNOHkDRUjvxF3XCO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOB +rDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU +F6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRBtjpbO8tFnb0c +wpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIB +AHPGgeAn0i0P4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShp +WJHckRE1qTodvBqlYJ7YH39FkWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9 +xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L3XWgwF15kIwb4FDm3jH+mHtwX6WQ +2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx/uNncqCxv1yL5PqZ +IseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFaDGi8 +aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2X +em1ZqSqPe97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQR +dAtq/gsD/KNVV4n+SsuuWxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/ +OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJDIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+ +hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ubDgEj8Z+7fNzcbBGXJbLy +tGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFfjCCA2agAwIBAgIJAKqIsFoLsXabMA0GCSqGSIb3DQEBCwUAMEwxCzAJBgNV +BAYTAkNIMRUwEwYDVQQKEwxTd2lzc1NpZ24gQUcxJjAkBgNVBAMTHVN3aXNzU2ln +biBTaWx2ZXIgUm9vdCBDQSAtIEczMB4XDTA5MDgwNDEzMTkxNFoXDTM3MDgwNDEz +MTkxNFowTDELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3aXNzU2lnbiBBRzEmMCQG +A1UEAxMdU3dpc3NTaWduIFNpbHZlciBSb290IENBIC0gRzMwggIiMA0GCSqGSIb3 +DQEBAQUAA4ICDwAwggIKAoICAQC+h5sF5nF8Um9t7Dep6bPczF9/01DqIZsE8D2/ +vo7JpRQWMhDPmfzscK1INmckDBcy1inlSjmxN+umeAxsbxnKTvdR2hro+iE4bJWc +L9aLzDsCm78mmxFFtrg0Wh2mVEhSyJ14cc5ISsyneIPcaKtmHncH0zYYCNfUbWD4 +8HnTMzYJkmO3BJr1p5baRa90GvyC46hbDjo/UleYfrycjMHAslrfxH7+DKZUdoN+ +ut3nKvRKNk+HZS6lujmNWWEp89OOJHCMU5sRpUcHsnUFXA2E2UTZzckmRFduAn2V +AdSrJIbuPXD7V/qwKRTQnfLFl8sJyvHyPefYS5bpiC+eR1GKVGWYSNIS5FR3DAfm +vluc8d0Dfo2E/L7JYtX8yTroibVfwgVSYfCcPuwuTYxykY7IQ8GiKF71gCTc4i+H +O1MA5cvwsnyNeRmgiM14+MWKWnflBqzdSt7mcG6+r771sasOCLDboD+Uxb4Subx7 +J3m1MildrsUgI5IDe1Q5sIkiVG0S48N46jpA/aSTrOktiDzbpkdmTN/YF+0W3hrW +10Fmvx2A8aTgZBEpXgwnBWLr5cQEYtHEnwxqVdZYOJxmD537q1SAmZzsSdaCn9pF +1j9TBgO3/R/shn104KS06DK2qgcj+O8kQZ5jMHj0VN2O8Fo4jhJ/eMdvAlYhM864 +uK1pVQIDAQABo2MwYTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAd +BgNVHQ4EFgQUoYxFkwoSYwunV18ySn3hIee3PmYwHwYDVR0jBBgwFoAUoYxFkwoS +YwunV18ySn3hIee3PmYwDQYJKoZIhvcNAQELBQADggIBAIeuYW1IOCrGHNxKLoR4 +ScAjKkW4NU3RBfq5BTPEZL3brVQWKrA+DVoo2qYagHMMxEFvr7g0tnfUW44dC4tG +kES1s+5JGInBSzSzhzV0op5FZ+1FcWa2uaElc9fCrIj70h2na9rAWubYWWQ0l2Ug +MTMDT86tCZ6u6cI+GHW0MyUSuwXsULpxQOK93ohGBSGEi6MrHuswMIm/EfVcRPiR +i0tZRQswDcoMT29jvgT+we3gh/7IzVa/5dyOetTWKU6A26ubP45lByL3RM2WHy3H +9Qm2mHD/ONxQFRGEO3+p8NgkVMgXjCsTSdaZf0XRD46/aXI3Uwf05q79Wz55uQbN +uIF4tE2g0DW65K7/00m8Ne1jxrP846thWgW2C+T/qSq+31ROwktcaNqjMqLJTVcY +UzRZPGaZ1zwCeKdMcdC/2/HEPOcB5gTyRPZIJjAzybEBGesC8cwh+joCMBedyF+A +P90lrAKb4xfevcqSFNJSgVPm6vwwZzKpYvaTFxUHMV4PG2n19Km3fC2z7YREMkco +BzuGaUWpxzaWkHJ02BKmcyPRTrm2ejrEKaFQBhG52fQmbmIIEiAW8AFXF9QFNmeX +61H5/zMkDAUPVr/vPRxSjoreaQ9aH/DVAzFEs5LG6nWorrvHYAOImP/HBIRSkIbh +tJOpUC/o69I2rDBgp9ADE7UK +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICqDCCAi2gAwIBAgIQIW4zpcvTiKRvKQe0JzzE2DAKBggqhkjOPQQDAzCBlDEL +MAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYD +VQQLExZTeW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBD +bGFzcyAxIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0g +RzQwHhcNMTExMDA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBlDELMAkGA1UEBhMC +VVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYDVQQLExZTeW1h +bnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBDbGFzcyAxIFB1 +YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAATXZrUb266zYO5G6ohjdTsqlG3zXxL24w+etgoUU0hS +yNw6s8tIICYSTvqJhNTfkeQpfSgB2dsYQ2mhH7XThhbcx39nI9/fMTGDAzVwsUu3 +yBe7UcvclBfb6gk7dhLeqrWjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBRlwI0l9Qy6l3eQP54u4Fr1ztXh5DAKBggqhkjOPQQD +AwNpADBmAjEApa7jRlP4mDbjIvouKEkN7jB+M/PsP3FezFWJeJmssv3cHFwzjim5 +axfIEWi13IMHAjEAnMhE2mnCNsNUGRCFAtqdR+9B52wmnQk9922Q0QVEL7C8g5No +8gxFSTm/mQQc0xCg +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID9jCCAt6gAwIBAgIQJDJ18h0v0gkz97RqytDzmDANBgkqhkiG9w0BAQsFADCB +lDELMAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8w +HQYDVQQLExZTeW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRl +YyBDbGFzcyAxIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRzYwHhcNMTExMDE4MDAwMDAwWhcNMzcxMjAxMjM1OTU5WjCBlDELMAkGA1UE +BhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYDVQQLExZT +eW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBDbGFzcyAx +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzYwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDHOddJZKmZgiJM6kXZBxbje/SD +6Jlz+muxNuCad6BAwoGNAcfMjL2Pffd543pMA03Z+/2HOCgs3ZqLVAjbZ/sbjP4o +ki++t7JIp4Gh2F6Iw8w5QEFa0dzl2hCfL9oBTf0uRnz5LicKaTfukaMbasxEvxvH +w9QRslBglwm9LiL1QYRmn81ApqkAgMEflZKf3vNI79sdd2H8f9/ulqRy0LY+/3gn +r8uSFWkI22MQ4uaXrG7crPaizh5HmbmJtxLmodTNWRFnw2+F2EJOKL5ZVVkElauP +N4C/DfD8HzpkMViBeNfiNfYgPym4jxZuPkjctUwH4fIa6n4KedaovetdhitNAgMB +AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBQzQejIORIVk0jyljIuWvXalF9TYDANBgkqhkiG9w0BAQsFAAOCAQEAFeNzV7EX +tl9JaUSm9l56Z6zS3nVJq/4lVcc6yUQVEG6/MWvL2QeTfxyFYwDjMhLgzMv7OWyP +4lPiPEAz2aSMR+atWPuJr+PehilWNCxFuBL6RIluLRQlKCQBZdbqUqwFblYSCT3Q +dPTXvQbKqDqNVkL6jXI+dPEDct+HG14OelWWLDi3mIXNTTNEyZSPWjEwN0ujOhKz +5zbRIWhLLTjmU64cJVYIVgNnhJ3Gw84kYsdMNs+wBkS39V8C3dlU6S+QTnrIToNA +DJqXPDe/v+z28LSFdyjBC8hnghAXOKK3Buqbvzr46SMHv3TgmDgVVXjucgBcGaP0 +0jPg/73RVDkpDw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICqDCCAi2gAwIBAgIQNBdlEkA7t1aALYDLeVWmHjAKBggqhkjOPQQDAzCBlDEL +MAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYD +VQQLExZTeW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBD +bGFzcyAyIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0g +RzQwHhcNMTExMDA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBlDELMAkGA1UEBhMC +VVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYDVQQLExZTeW1h +bnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBDbGFzcyAyIFB1 +YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAATR2UqOTA2ESlG6fO/TzPo6mrWnYxM9AeBJPvrBR8mS +szrX/m+c95o6D/UOCgrDP8jnEhSO1dVtmCyzcTIK6yq99tdqIAtnRZzSsr9TImYJ +XdsR8/EFM1ij4rjPfM2Cm72jQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBQ9MvM6qQyQhPmijGkGYVQvh3L+BTAKBggqhkjOPQQD +AwNpADBmAjEAyKapr0F/tckRQhZoaUxcuCcYtpjxwH+QbYfTjEYX8D5P/OqwCMR6 +S7wIL8fip29lAjEA1lnehs5fDspU1cbQFQ78i5Ry1I4AWFPPfrFLDeVQhuuea9// +KabYR9mglhjb8kWz +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID9jCCAt6gAwIBAgIQZIKe/DcedF38l/+XyLH/QTANBgkqhkiG9w0BAQsFADCB +lDELMAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8w +HQYDVQQLExZTeW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRl +YyBDbGFzcyAyIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRzYwHhcNMTExMDE4MDAwMDAwWhcNMzcxMjAxMjM1OTU5WjCBlDELMAkGA1UE +BhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYDVQQLExZT +eW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBDbGFzcyAy +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzYwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDNzOkFyGOFyz9AYxe9GPo15gRn +V2WYKaRPyVyPDzTS+NqoE2KquB5QZ3iwFkygOakVeq7t0qLA8JA3KRgmXOgNPLZs +ST/B4NzZS7YUGQum05bh1gnjGSYc+R9lS/kaQxwAg9bQqkmi1NvmYji6UBRDbfkx ++FYW2TgCkc/rbN27OU6Z4TBnRfHU8I3D3/7yOAchfQBeVkSz5GC9kSucq1sEcg+y +KNlyqwUgQiWpWwNqIBDMMfAr2jUs0Pual07wgksr2F82owstr2MNHSV/oW5cYqGN +KD6h/Bwg+AEvulWaEbAZ0shQeWsOagXXqgQ2sqPy4V93p3ec5R7c6d9qwWVdAgMB +AAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQW +BBSHjCCVyJhK0daABkqQNETfHE2/sDANBgkqhkiG9w0BAQsFAAOCAQEAgY6ypWaW +tyGltu9vI1pf24HFQqV4wWn99DzX+VxrcHIa/FqXTQCAiIiCisNxDY7FiZss7Y0L +0nJU9X3UXENX6fOupQIR9nYrgVfdfdp0MP1UR/bgFm6mtApI5ud1Bw8pGTnOefS2 +bMVfmdUfS/rfbSw8DVSAcPCIC4DPxmiiuB1w2XaM/O6lyc+tHc+ZJVdaYkXLFmu9 +Sc2lo4xpeSWuuExsi0BmSxY/zwIa3eFsawdhanYVKZl/G92IgMG/tY9zxaaWI4Sm +KIYkM2oBLldzJbZev4/mHWGoQClnHYebHX+bn5nNMdZUvmK7OaxoEkiRIKXLsd3+ +b/xa5IJVWa8xqQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICpzCCAi2gAwIBAgIQTHm1miicdjFk9YlE0JEC3jAKBggqhkjOPQQDAzCBlDEL +MAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYD +VQQLExZTeW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBD +bGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0g +RzQwHhcNMTIxMDE4MDAwMDAwWhcNMzcxMjAxMjM1OTU5WjCBlDELMAkGA1UEBhMC +VVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYDVQQLExZTeW1h +bnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBDbGFzcyAzIFB1 +YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAARXz+qzOU0/oSHgbi84csaHl/OFC0fnD1HI0fSZm8pZ +Zf9M+eoLtyXV0vbsMS0yYhLXdoan+jjJZdT+c+KEOfhMSWIT3brViKBfPchPsD+P +oVAR5JNGrcNfy/GkapVW6MCjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8E +BTADAQH/MB0GA1UdDgQWBBQknbzScfcdwiW+IvGJpSwVOzQeXjAKBggqhkjOPQQD +AwNoADBlAjEAuWZoZdsF0Dh9DvPIdWG40CjEsUozUVj78jwQyK5HeHbKZiQXhj5Q +Vm6lLZmIuL0kAjAD6qfnqDzqnWLGX1TamPR3vU+PGJyRXEdrQE0QHbPhicoLIsga +xcX+i93B3294n5E= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF9jCCA96gAwIBAgIQZWNxhdNvRcaPfzH5CYeSgjANBgkqhkiG9w0BAQwFADCB +lDELMAkGA1UEBhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8w +HQYDVQQLExZTeW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRl +YyBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRzYwHhcNMTIxMDE4MDAwMDAwWhcNMzcxMjAxMjM1OTU5WjCBlDELMAkGA1UE +BhMCVVMxHTAbBgNVBAoTFFN5bWFudGVjIENvcnBvcmF0aW9uMR8wHQYDVQQLExZT +eW1hbnRlYyBUcnVzdCBOZXR3b3JrMUUwQwYDVQQDEzxTeW1hbnRlYyBDbGFzcyAz +IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzYwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC3DrL6TbyachX7d1vb/UMPywv3 +YC6zK34Mu1PyzE5l8xm7/zUd99Opu0Attd141Kb5N+qFBXttt+YTSwZ8+3ZjjyAd +LTgrBIXy6LDRX01KIclq2JTqHgJQpqqQB6BHIepm+QSg5oPwxPVeluInTWHDs8GM +IrZmoQDRVin77cF/JMo9+lqUsITDx7pDHP1kDvEo+0dZ8ibhMblE+avd+76+LDfj +rAsY0/wBovGkCjWCR0yrvYpe3xOF/CDMSFmvr0FvyyPNypOn3dVfyGQ7/wEDoApP +LW49hL6vyDKyUymQFfewBZoKPPa5BpDJpeFdoDuw/qi2v/WJKFckOiGGceTciotB +VeweMCRZ0cBZuHivqlp03iWAMJjtMERvIXAc2xJTDtamKGaTLB/MTzwbgcW59nhv +0DI6CHLbaw5GF4WU87zvvPekXo7p6bVk5bdLRRIsTDe3YEMKTXEGAJQmNXQfu3o5 +XE475rgD4seTi4QsJUlF3X8jlGAfy+nN9quX92Hn+39igcjcCjBcGHzmzu/Hbh6H +fLPpysh7avRo/IOlDFa0urKNSgrHl5fFiDAVPRAIVBVycmczM/R8t84AJ1NlziTx +WmTnNi/yLgLCl99y6AIeoPc9tftoYAP6M6nmEm0G4amoXU48/tnnAGWsthlNe4N/ +NEfq4RhtsYsceavnnQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUOXEIAD7eyIbnkP/k/SEPziQZFvYwDQYJKoZIhvcN +AQEMBQADggIBAFBriE1gSM5a4yLOZ3yEp80c/ekMA4w2rwqHDmquV64B0Da78v25 +c8FftaiuTKL6ScsHRhY2vePIVzh+OOS/JTNgxtw3nGO7XpgeGrKC8K6mdxGAREeh +KcXwszrOmPC47NMOgAZ3IzBM/3lkYyJbd5NDS3Wz2ztuO0rd8ciutTeKlYg6EGhw +OLlbcH7VQ8n8X0/l5ns27vAg7UdXEyYQXhQGDXt2B8LGLRb0rqdsD7yID08sAraj +1yLmmUc12I2lT4ESOhF9s8wLdfMecKMbA+r6mujmLjY5zJnOOj8Mt674Q5mwk25v +qtkPajGRu5zTtCj7g0x6c4JQZ9IOrO1gxbJdNZjPh34eWR0kvFa62qRa2MzmvB4Q +jxuMjvPB27e+1LBbZY8WaPNWxSoZFk0PuGWHbSSDuGLc4EdhGoh7zk5//dzGDVqa +pPO1TPbdMaboHREhMzAEYX0c4D5PjT+1ixIAWn2poQDUg+twuxj4pNIcgS23CBHI +Jnu21OUPA0Zy1CVAHr5JXW2T8VyyO3VUaTqg7kwiuqya4gitRWMFSlI1dsQ09V4H +Mq3cfCbRW4+t5OaqG3Wf61206MCpFXxOSgdy30bJ1JGSdVaw4e43NmUoxRXIK3bM +bW8Zg/T92hXiQeczeUaDV/nxpbZt07zXU+fucW14qZen7iCcGRVyFT0E +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDcTCCAlmgAwIBAgIVAOYJ/nrqAGiM4CS07SAbH+9StETRMA0GCSqGSIb3DQEB +BQUAMFAxCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9LcmFqb3dhIEl6YmEgUm96bGlj +emVuaW93YSBTLkEuMRcwFQYDVQQDDA5TWkFGSVIgUk9PVCBDQTAeFw0xMTEyMDYx +MTEwNTdaFw0zMTEyMDYxMTEwNTdaMFAxCzAJBgNVBAYTAlBMMSgwJgYDVQQKDB9L +cmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRcwFQYDVQQDDA5TWkFGSVIg +Uk9PVCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKxHL49ZMTml +6g3wpYwrvQKkvc0Kc6oJ5sxfgmp1qZfluwbv88BdocHSiXlY8NzrVYzuWBp7J/9K +ULMAoWoTIzOQ6C9TNm4YbA9A1jdX1wYNL5Akylf8W5L/I4BXhT9KnlI6x+a7BVAm +nr/Ttl+utT/Asms2fRfEsF2vZPMxH4UFqOAhFjxTkmJWf2Cu4nvRQJHcttB+cEAo +ag/hERt/+tzo4URz6x6r19toYmxx4FjjBkUhWQw1X21re//Hof2+0YgiwYT84zLb +eqDqCOMOXxvH480yGDkh/QoazWX3U75HQExT/iJlwnu7I1V6HXztKIwCBjsxffbH +3jOshCJtywcCAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFFOSo33/gnbwM9TrkmdHYTMbaDsqMA0GCSqGSIb3DQEBBQUA +A4IBAQA5UFWd5EL/pBviIMm1zD2JLUCpp0mJG7JkwznIOzawhGmFFaxGoxAhQBEg +haP+E0KR66oAwVC6xe32QUVSHfWqWndzbODzLB8yj7WAR0cDM45ZngSBPBuFE3Wu +GLJX9g100ETfIX+4YBR/4NR/uvTnpnd9ete7Whl0ZfY94yuu4xQqB5QFv+P7IXXV +lTOjkjuGXEcyQAjQzbFaT9vIABSbeCXWBbjvOXukJy6WgAiclzGNSYprre8Ryydd +fmjW9HIGwsIO03EldivvqEYL1Hv1w/Pur+6FUEOaL68PEIUovfgwIB2BAw+vZDuw +cH0mX548PojGyg434cDjkSXa3mHF +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgxMDAxMTA0MDE0WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUd +AqSzm1nzHoqvNK38DcLZSBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiC +FoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/FvudocP05l03Sx5iRUKrERLMjfTlH6VJi +1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx9702cu+fjOlbpSD8DT6Iavq +jnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGVWOHAD3bZ +wI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/ +WSA2AHmgoCJrjNXyYdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhy +NsZt+U2e+iKo4YFWz827n+qrkRk4r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPAC +uvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNfvNoBYimipidx5joifsFvHZVw +IEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR3p1m0IvVVGb6 +g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlP +BSeOE6Fuwg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUx +KzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAd +BgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNl +YyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgxMDAxMTAyOTU2WhcNMzMxMDAxMjM1 +OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lzdGVtcyBFbnRlcnBy +aXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBDZW50 +ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN +8ELg63iIVl6bmlQdTQyK9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/ +RLyTPWGrTs0NvvAgJ1gORH8EGoel15YUNpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4 +hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZFiP0Zf3WHHx+xGwpzJFu5 +ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W0eDrXltM +EnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGj +QjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1 +A/d2O2GCahKqGFPrAyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOy +WL6ukK2YJ5f+AbGwUgC4TeQbIXQbfsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ +1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzTucpH9sry9uetuUg/vBa3wW30 +6gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7hP0HHRwA11fXT +91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4p +TpPDpFQUWw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAw +NzEUMBIGA1UECgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJv +b3QgQ0EgdjEwHhcNMDcxMDE4MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYD +VQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwWVGVsaWFTb25lcmEgUm9vdCBDQSB2 +MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+6yfwIaPzaSZVfp3F +VRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA3GV1 +7CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+X +Z75Ljo1kB1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+ +/jXh7VB7qTCNGdMJjmhnXb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs +81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxHoLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkm +dtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3F0fUTPHSiXk+TT2YqGHe +Oh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJoWjiUIMu +sDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4 +pgd7gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fs +slESl1MpWtTwEhDcTwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQ +arMCpgKIv7NHfirZ1fpoeDVNAgMBAAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYD +VR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qWDNXr+nuqF+gTEjANBgkqhkiG +9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNmzqjMDfz1mgbl +dxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1Tj +TQpgcmLNkQfWpb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBed +Y2gea+zDTYa4EzAvXUYNR0PVG6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7 +Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpcc41teyWRyu5FrgZLAMzTsVlQ2jqI +OylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOTJsjrDNYmiLbAJM+7 +vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2qReW +t88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcn +HL/EVlP6Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVx +SK236thZiNSQvxaz2emsWWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDEL +MAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMp +IDIwMDcgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAi +BgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMjAeFw0wNzExMDUwMDAw +MDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh +d3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBGb3Ig +YXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9v +dCBDQSAtIEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/ +BebfowJPDQfGAFG6DAJSLSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6 +papu+7qzcMBniKI11KOasf2twu8x+qi58/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8E +BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUmtgAMADna3+FGO6Lts6K +DPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUNG4k8VIZ3 +KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41ox +XZ3Krr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCB +rjELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf +Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw +MDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNV +BAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0wODA0MDIwMDAwMDBa +Fw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhhd3Rl +LCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9u +MTgwNgYDVQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXpl +ZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEcz +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsr8nLPvb2FvdeHsbnndm +gcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2AtP0LMqmsywCPLLEHd5N/8 +YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC+BsUa0Lf +b1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS9 +9irY7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2S +zhkGcuYMXDhpxwTWvGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUk +OQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNV +HQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJKoZIhvcNAQELBQADggEBABpA +2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweKA3rD6z8KLFIW +oCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu +t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7c +KUGRIjxpp7sC8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fM +m7v/OeZWYdMKp8RcTGB7BXcmer/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZu +MdRAGmI0Nj81Aa6sY6A= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCB +qTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMf +Q2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIw +MDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNV +BAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3MDAwMDAwWhcNMzYw +NzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5j +LjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYG +A1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl +IG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCsoPD7gFnUnMekz52hWXMJEEUMDSxuaPFs +W0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ1CRfBsDMRJSUjQJib+ta +3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGcq/gcfomk +6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6 +Sk/KaAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94J +NqR32HuHUETVPm4pafs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XP +r87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUFAAOCAQEAeRHAS7ORtvzw6WfU +DW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeEuzLlQRHAd9mz +YJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX +xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2 +/qxAeeWsEG89jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/ +LHbTY5xZ3Y+m4Q6gLkH3LpVHz7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7 +jVaMaA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIGHDCCBASgAwIBAgIES45gAzANBgkqhkiG9w0BAQsFADBFMQswCQYDVQQGEwJE +SzESMBAGA1UEChMJVFJVU1QyNDA4MSIwIAYDVQQDExlUUlVTVDI0MDggT0NFUyBQ +cmltYXJ5IENBMB4XDTEwMDMwMzEyNDEzNFoXDTM3MTIwMzEzMTEzNFowRTELMAkG +A1UEBhMCREsxEjAQBgNVBAoTCVRSVVNUMjQwODEiMCAGA1UEAxMZVFJVU1QyNDA4 +IE9DRVMgUHJpbWFyeSBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIB +AJlJodr3U1Fa+v8HnyACHV81/wLevLS0KUk58VIABl6Wfs3LLNoj5soVAZv4LBi5 +gs7E8CZ9w0F2CopW8vzM8i5HLKE4eedPdnaFqHiBZ0q5aaaQArW+qKJx1rT/AaXt +alMB63/yvJcYlXS2lpexk5H/zDBUXeEQyvfmK+slAySWT6wKxIPDwVapauFY9QaG ++VBhCa5jBstWS7A5gQfEvYqn6csZ3jW472kW6OFNz6ftBcTwufomGJBMkonf4ZLr +6t0AdRi9jflBPz3MNNRGxyjIuAmFqGocYFA/OODBRjvSHB2DygqQ8k+9tlpvzMRr +kU7jq3RKL+83G1dJ3/LTjCLz4ryEMIC/OJ/gNZfE0qXddpPtzflIPtUFVffXdbFV +1t6XZFhJ+wBHQCpJobq/BjqLWUA86upsDbfwnePtmIPRCemeXkY0qabC+2Qmd2Fe +xyZphwTyMnbqy6FG1tB65dYf3mOqStmLa3RcHn9+2dwNfUkh0tjO2FXD7drWcU0O +I9DW8oAypiPhm/QCjMU6j6t+0pzqJ/S0tdAo+BeiXK5hwk6aR+sRb608QfBbRAs3 +U/q8jSPByenggac2BtTN6cl+AA1Mfcgl8iXWNFVGegzd/VS9vINClJCe3FNVoUnR +YCKkj+x0fqxvBLopOkJkmuZw/yhgMxljUi2qYYGn90OzAgMBAAGjggESMIIBDjAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjARBgNVHSAECjAIMAYGBFUd +IAAwgZcGA1UdHwSBjzCBjDAsoCqgKIYmaHR0cDovL2NybC5vY2VzLnRydXN0MjQw +OC5jb20vb2Nlcy5jcmwwXKBaoFikVjBUMQswCQYDVQQGEwJESzESMBAGA1UEChMJ +VFJVU1QyNDA4MSIwIAYDVQQDExlUUlVTVDI0MDggT0NFUyBQcmltYXJ5IENBMQ0w +CwYDVQQDEwRDUkwxMB8GA1UdIwQYMBaAFPZt+LFIs0FDAduGROUYBbdezAY3MB0G +A1UdDgQWBBT2bfixSLNBQwHbhkTlGAW3XswGNzANBgkqhkiG9w0BAQsFAAOCAgEA +VPAQGrT7dIjD3/sIbQW86f9CBPu0c7JKN6oUoRUtKqgJ2KCdcB5ANhCoyznHpu3m +/dUfVUI5hc31CaPgZyY37hch1q4/c9INcELGZVE/FWfehkH+acpdNr7j8UoRZlkN +15b/0UUBfGeiiJG/ugo4llfoPrp8bUmXEGggK3wyqIPcJatPtHwlb6ympfC2b/Ld +v/0IdIOzIOm+A89Q0utx+1cOBq72OHy8gpGb6MfncVFMoL2fjP652Ypgtr8qN9Ka +/XOazktiIf+2Pzp7hLi92hRc9QMYexrV/nnFSQoWdU8TqULFUoZ3zTEC3F/g2yj+ +FhbrgXHGo5/A4O74X+lpbY2XV47aSuw+DzcPt/EhMj2of7SA55WSgbjPMbmNX0rb +oenSIte2HRFW5Tr2W+qqkc/StixgkKdyzGLoFx/xeTWdJkZKwyjqge2wJqws2upY +EiThhC497+/mTiSuXd69eVUwKyqYp9SD2rTtNmF6TCghRM/dNsJOl+osxDVGcwvt +WIVFF/Onlu5fu1NHXdqNEfzldKDUvCfii3L2iATTZyHwU9CALE+2eIA+PIaLgnM1 +1oCfUnYBkQurTrihvzz9PryCVkLxiqRmBVvUz+D4N5G/wvvKDS6t6cPCS+hqM482 +cbBsn0R9fFLO4El62S9eH1tqOzO20OAOK65yJIsOpSE= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBF +MQswCQYDVQQGEwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQL +ExNUcnVzdGlzIEZQUyBSb290IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTEx +MzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNVBAoTD1RydXN0aXMgTGltaXRlZDEc +MBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQRUN+ +AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihH +iTHcDnlkH5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjj +vSkCqPoc4Vu5g6hBSLwacY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA +0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zto3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlB +OrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEAAaNTMFEwDwYDVR0TAQH/ +BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAdBgNVHQ4E +FgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01 +GX2cGE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmW +zaD+vkAMXBJV+JOCyinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP4 +1BIy+Q7DsdwyhEQsb8tGD+pmQQ9P8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZE +f1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHVl/9D7S3B2l0pKoU/rGXuhg8F +jZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYliB6XzCGcKQEN +ZetX2fNXlrtIzYE= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcx +EjAQBgNVBAoTCVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMT +VFdDQSBHbG9iYWwgUm9vdCBDQTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5 +NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQKEwlUQUlXQU4tQ0ExEDAOBgNVBAsT +B1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3QgQ0EwggIiMA0GCSqG +SIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2CnJfF +10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz +0ALfUPZVr2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfCh +MBwqoJimFb3u/Rk28OKRQ4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbH +zIh1HrtsBv+baz4X7GGqcXzGHaL3SekVtTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc +46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1WKKD+u4ZqyPpcC1jcxkt2 +yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99sy2sbZCi +laLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYP +oA/pyJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQA +BDzfuBSO6N+pjWxnkjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcE +qYSjMq+u7msXi7Kx/mzhkIyIqJdIzshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm +4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB +/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6gcFGn90xHNcgL +1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WF +H6vPNOw/KP4M8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNo +RI2T9GRwoD2dKAXDOXC4Ynsg/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+ +nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlglPx4mI88k1HtQJAH32RjJMtOcQWh +15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryPA9gK8kxkRr05YuWW +6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3mi4TW +nsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5j +wa19hAM8EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWz +aGHQRiapIVJpLesux+t3zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmy +KwbQBM0= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzES +MBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFU +V0NBIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMz +WhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJVEFJV0FO +LUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB +AQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFE +AcK0HMMxQhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HH +K3XLfJ+utdGdIzdjp9xCoi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeX +RfwZVzsrb+RH9JlF/h3x+JejiB03HFyP4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/z +rX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1ry+UPizgN7gr8/g+YnzAx +3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkq +hkiG9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeC +MErJk/9q56YAf4lCmtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdls +XebQ79NqZp4VKIV66IIArB6nCWlWQtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62D +lhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVYT0bf+215WfKEIlKuD8z7fDvn +aspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocnyYh0igzyXxfkZ +YiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFFzCCA/+gAwIBAgIBETANBgkqhkiG9w0BAQUFADCCASsxCzAJBgNVBAYTAlRS +MRgwFgYDVQQHDA9HZWJ6ZSAtIEtvY2FlbGkxRzBFBgNVBAoMPlTDvHJraXllIEJp +bGltc2VsIHZlIFRla25vbG9qaWsgQXJhxZ90xLFybWEgS3VydW11IC0gVMOcQsSw +VEFLMUgwRgYDVQQLDD9VbHVzYWwgRWxla3Ryb25payB2ZSBLcmlwdG9sb2ppIEFy +YcWfdMSxcm1hIEVuc3RpdMO8c8O8IC0gVUVLQUUxIzAhBgNVBAsMGkthbXUgU2Vy +dGlmaWthc3lvbiBNZXJrZXppMUowSAYDVQQDDEFUw5xCxLBUQUsgVUVLQUUgS8O2 +ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSAtIFPDvHLDvG0gMzAe +Fw0wNzA4MjQxMTM3MDdaFw0xNzA4MjExMTM3MDdaMIIBKzELMAkGA1UEBhMCVFIx +GDAWBgNVBAcMD0dlYnplIC0gS29jYWVsaTFHMEUGA1UECgw+VMO8cmtpeWUgQmls +aW1zZWwgdmUgVGVrbm9sb2ppayBBcmHFn3TEsXJtYSBLdXJ1bXUgLSBUw5xCxLBU +QUsxSDBGBgNVBAsMP1VsdXNhbCBFbGVrdHJvbmlrIHZlIEtyaXB0b2xvamkgQXJh +xZ90xLFybWEgRW5zdGl0w7xzw7wgLSBVRUtBRTEjMCEGA1UECwwaS2FtdSBTZXJ0 +aWZpa2FzeW9uIE1lcmtlemkxSjBIBgNVBAMMQVTDnELEsFRBSyBVRUtBRSBLw7Zr +IFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIC0gU8O8csO8bSAzMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAim1L/xCIOsP2fpTo6iBkcK4h +gb46ezzb8R1Sf1n68yJMlaCQvEhOEav7t7WNeoMojCZG2E6VQIdhn8WebYGHV2yK +O7Rm6sxA/OOqbLLLAdsyv9Lrhc+hDVXDWzhXcLh1xnnRFDDtG1hba+818qEhTsXO +fJlfbLm4IpNQp81McGq+agV/E5wrHur+R84EpW+sky58K5+eeROR6Oqeyjh1jmKw +lZMq5d/pXpduIF9fhHpEORlAHLpVK/swsoHvhOPc7Jg4OQOFCKlUAwUp8MmPi+oL +hmUZEdPpCSPeaJMDyTYcIW7OjGbxmTDY17PDHfiBLqi9ggtm/oLL4eAagsNAgQID +AQABo0IwQDAdBgNVHQ4EFgQUvYiHyY/2pAoLquvF/pEjnatKijIwDgYDVR0PAQH/ +BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAB18+kmP +NOm3JpIWmgV050vQbTlswyb2zrgxvMTfvCr4N5EY3ATIZJkrGG2AA1nJrvhY0D7t +wyOfaTyGOBye79oneNGEN3GKPEs5z35FBtYt2IpNeBLWrcLTy9LQQfMmNkqblWwM +7uXRQydmwYj3erMgbOqwaSvHIOgMA8RBBZniP+Rr+KCGgceExh/VS4ESshYhLBOh +gLJeDEoTniDYYkCrkOpkSi+sDQESeUWoL4cZaMjihccwsnX5OD+ywJO0a+IDRM5n +oN+J1q2MdqMTw5RhK2vZbMEHCiIHhWyFJEapvj+LeISCfiQMnf2BN+MlqO02TpUs +yZyQ2uypQjyttgI= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEPTCCAyWgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvzE/MD0GA1UEAww2VMOc +UktUUlVTVCBFbGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sx +c8SxMQswCQYDVQQGEwJUUjEPMA0GA1UEBwwGQW5rYXJhMV4wXAYDVQQKDFVUw5xS +S1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kg +SGl6bWV0bGVyaSBBLsWeLiAoYykgQXJhbMSxayAyMDA3MB4XDTA3MTIyNTE4Mzcx +OVoXDTE3MTIyMjE4MzcxOVowgb8xPzA9BgNVBAMMNlTDnFJLVFJVU1QgRWxla3Ry +b25payBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTELMAkGA1UEBhMC +VFIxDzANBgNVBAcMBkFua2FyYTFeMFwGA1UECgxVVMOcUktUUlVTVCBCaWxnaSDE +sGxldGnFn2ltIHZlIEJpbGnFn2ltIEfDvHZlbmxpxJ9pIEhpem1ldGxlcmkgQS7F +ni4gKGMpIEFyYWzEsWsgMjAwNzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAKu3PgqMyKVYFeaK7yc9SrToJdPNM8Ig3BnuiD9NYvDdE3ePYakqtdTyuTFY +KTsvP2qcb3N2Je40IIDu6rfwxArNK4aUyeNgsURSsloptJGXg9i3phQvKUmi8wUG ++7RP2qFsmmaf8EMJyupyj+sA1zU511YXRxcw9L6/P8JorzZAwan0qafoEGsIiveG +HtyaKhUG9qPw9ODHFNRRf8+0222vR5YXm3dx2KdxnSQM9pQ/hTEST7ruToK4uT6P +IzdezKKqdfcYbwnTrqdUKDT74eA7YH2gvnmJhsifLfkKS8RQouf9eRbHegsYz85M +733WB2+Y8a+xwXrXgTW4qhe04MsCAwEAAaNCMEAwHQYDVR0OBBYEFCnFkKslrxHk +Yb+j/4hhkeYO/pyBMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0G +CSqGSIb3DQEBBQUAA4IBAQAQDdr4Ouwo0RSVgrESLFF6QSU2TJ/sPx+EnWVUXKgW +AkD6bho3hO9ynYYKVZ1WKKxmLNA6VpM0ByWtCLCPyA8JWcqdmBzlVPi5RX9ql2+I +aE1KBiY3iAIOtsbWcpnOa3faYjGkVh+uX4132l32iPwa2Z61gfAyuOOI0JzzaqC5 +mxRZNTZPz/OOXl0XrRWV2N2y1RVuAE6zS89mlOTgzbUF2mNXi+WzqtvALhyQRNsa +XRik7r4EW5nVcV9VZWRi1aKbBFmGyGJ353yCRWo9F7/snXUMrqNvWtMvmDb08PUZ +qxFdyKbjKlhqQgnDvZImZjINXQhVdP+MmNAKpoRq0Tl9 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFkjCCA3qgAwIBAgIBCDANBgkqhkiG9w0BAQUFADA6MQswCQYDVQQGEwJDTjER +MA8GA1UEChMIVW5pVHJ1c3QxGDAWBgNVBAMTD1VDQSBHbG9iYWwgUm9vdDAeFw0w +ODAxMDEwMDAwMDBaFw0zNzEyMzEwMDAwMDBaMDoxCzAJBgNVBAYTAkNOMREwDwYD +VQQKEwhVbmlUcnVzdDEYMBYGA1UEAxMPVUNBIEdsb2JhbCBSb290MIICIjANBgkq +hkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA2rPlBlA/9nP3xDK/RqUlYjOHsGj+p9+I +A2N9Apb964fJ7uIIu527u+RBj8cwiQ9tJMAEbBSUgU2gDXRm8/CFr/hkGd656YGT +0CiFmUdCSiw8OCdKzP/5bBnXtfPvm65bNAbXj6ITBpyKhELVs6OQaG2BkO5NhOxM +cE4t3iQ5zhkAQ5N4+QiGHUPR9HK8BcBn+sBR0smFBySuOR56zUHSNqth6iur8CBV +mTxtLRwuLnWW2HKX4AzKaXPudSsVCeCObbvaE/9GqOgADKwHLx25urnRoPeZnnRc +GQVmMc8+KlL+b5/zub35wYH1N9ouTIElXfbZlJrTNYsgKDdfUet9Ysepk9H50DTL +qScmLCiQkjtVY7cXDlRzq6987DqrcDOsIfsiJrOGrCOp139tywgg8q9A9f9ER3Hd +J90TKKHqdjn5EKCgTUCkJ7JZFStsLSS3JGN490MYeg9NEePorIdCjedYcaSrbqLA +l3y74xNLytu7awj5abQEctXDRrl36v+6++nwOgw19o8PrgaEFt2UVdTvyie3AzzF +HCYq9TyopZWbhvGKiWf4xwxmse1Bv4KmAGg6IjTuHuvlb4l0T2qqaqhXZ1LUIGHB +zlPL/SR/XybfoQhplqCe/klD4tPq2sTxiDEhbhzhzfN1DiBEFsx9c3Q1RSw7gdQg +7LYJjD5IskkCAwEAAaOBojCBnzALBgNVHQ8EBAMCAQYwDAYDVR0TBAUwAwEB/zBj +BgNVHSUEXDBaBggrBgEFBQcDAQYIKwYBBQUHAwIGCCsGAQUFBwMDBggrBgEFBQcD +BAYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEFBQcDBwYIKwYBBQUHAwgGCCsGAQUF +BwMJMB0GA1UdDgQWBBTZw9P4gJJnzF3SOqLXcaK0xDiALTANBgkqhkiG9w0BAQUF +AAOCAgEA0Ih5ygiq9ws0oE4Jwul+NUiJcIQjL1HDKy9e21NrW3UIKlS6Mg7VxnGF +sZdJgPaE0PC6t3GUyHlrpsVE6EKirSUtVy/m1jEp+hmJVCl+t35HNmktbjK81HXa +QnO4TuWDQHOyXd/URHOmYgvbqm4FjMh/Rk85hZCdvBtUKayl1/7lWFZXbSyZoUkh +1WHGjGHhdSTBAd0tGzbDLxLMC9Z4i3WA6UG5iLHKPKkWxk4V43I29tSgQYWvimVw +TbVEEFDs7d9t5tnGwBLxSzovc+k8qe4bqi81pZufTcU0hF8mFGmzI7GJchT46U1R +IgP/SobEHOh7eQrbRyWBfvw0hKxZuFhD5D1DCVR0wtD92e9uWfdyYJl2b/Unp7uD +pEqB7CmB9HdL4UISVdSGKhK28FWbAS7d9qjjGcPORy/AeGEYWsdl/J1GW1fcfA67 +loMQfFUYCQSu0feLKj6g5lDWMDbX54s4U+xJRODPpN/xU3uLWrb2EZBL1nXz/gLz +Ka/wI3J9FO2pXd96gZ6bkiL8HvgBRUGXx2sBYb4zaPKgZYRmvOAqpGjTcezHCN6j +w8k2SjTxF+KAryAhk5Qe5hXTVGLxtTgv48y5ZwSpuuXu+RBuyy5+E6+SFP7zJ3N7 +OPxzbbm5iPZujAv1/P8JDrMtXnt145Ik4ubhWD5LKAN1axibRww= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDhDCCAmygAwIBAgIBCTANBgkqhkiG9w0BAQUFADAzMQswCQYDVQQGEwJDTjER +MA8GA1UEChMIVW5pVHJ1c3QxETAPBgNVBAMTCFVDQSBSb290MB4XDTA0MDEwMTAw +MDAwMFoXDTI5MTIzMTAwMDAwMFowMzELMAkGA1UEBhMCQ04xETAPBgNVBAoTCFVu +aVRydXN0MREwDwYDVQQDEwhVQ0EgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEP +ADCCAQoCggEBALNdB8qGJn1r4vs4CQ7MgsJqGgCiFV/W6dQBt1YDAVmP9ThpJHbC +XivF9iu/r/tB/Q9a/KvXg3BNMJjRnrJ2u5LWu+kQKGkoNkTo8SzXWHwk1n8COvCB +a2FgP/Qz3m3l6ihST/ypHWN8C7rqrsRoRuTej8GnsrZYWm0dLNmMOreIy4XU9+gD +Xv2yTVDo1h//rgI/i0+WITyb1yXJHT/7mLFZ5PCpO6+zzYUs4mBGzG+OoOvwNMXx +QhhgrhLtRnUc5dipllq+3lrWeGeWW5N3UPJuG96WUUqm1ktDdSFmjXfsAoR2XEQQ +th1hbOSjIH23jboPkXXHjd+8AmCoKai9PUMCAwEAAaOBojCBnzALBgNVHQ8EBAMC +AQYwDAYDVR0TBAUwAwEB/zBjBgNVHSUEXDBaBggrBgEFBQcDAQYIKwYBBQUHAwIG +CCsGAQUFBwMDBggrBgEFBQcDBAYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEFBQcD +BwYIKwYBBQUHAwgGCCsGAQUFBwMJMB0GA1UdDgQWBBTbHzXza0z/QjFkm827Wh4d +SBC37jANBgkqhkiG9w0BAQUFAAOCAQEAOGy3iPGt+lg3dNHocN6cJ1nL5BXXoMNg +14iABMUwTD3UGusGXllH5rxmy+AI/Og17GJ9ysDawXiv5UZv+4mCI4/211NmVaDe +JRI7cTYWVRJ2+z34VFsxugAG+H1V5ad2g6pcSpemKijfvcZsCyOVjjN/Hl5AHxNU +LJzltQ7dFyiuawHTUin1Ih+QOfTcYmjwPIZH7LgFRbu3DJaUxmfLI3HQjnQi1kHr +A6i26r7EARK1s11AdgYg1GS4KUYGis4fk5oQ7vuqWrTcL9Ury/bXBYSYBZELhPc9 ++tb5evosFeo2gkO3t7jj83EB7UNDogVFwygFBzXjAaU4HoDU18PZ3g== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDEL +MAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNl +eSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMT +JVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAwMjAx +MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNVBAgT +Ck5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVUaGUg +VVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlm +aWNhdGlvbiBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqflo +I+d61SRvU8Za2EurxtW20eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinng +o4N+LZfQYcTxmdwlkWOrfzCjtHDix6EznPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0G +A1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBBHU6+4WMB +zzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbW +RNZu9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCB +iDELMAkGA1UEBhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0pl +cnNleSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNV +BAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMTAw +MjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMCVVMxEzARBgNV +BAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIK +AoICAQCAEmUXNg7D2wiz0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B +3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2jY0K2dvKpOyuR+OJv0OwWIJAJPuLodMkY +tJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFnRghRy4YUVD+8M/5+bJz/ +Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O+T23LLb2 +VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT +79uq/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6 +c0Plfg6lZrEpfDKEY1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmT +Yo61Zs8liM2EuLE/pDkP2QKe6xJMlXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97l +c6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8yexDJtC/QV9AqURE9JnnV4ee +UB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+eLf8ZxXhyVeE +Hg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8G +A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPF +Up/L+M+ZBn8b2kMVn54CVVeWFPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KO +VWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ7l8wXEskEVX/JJpuXior7gtNn3/3 +ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQEg9zKC7F4iRO/Fjs +8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM8WcR +iQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYze +Sf7dNXGiFSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZ +XHlKYC6SQK5MNyosycdiyA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/ +qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9cJ2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRB +VXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGwsAvgnEzDHNb842m1R0aB +L6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gxQ+6IHdfG +jjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCB +kzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZBgNVBAMTElVUTiAtIERBVEFDb3Jw +IFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBaMIGTMQswCQYDVQQG +EwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYD +VQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cu +dXNlcnRydXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6 +E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ysraP6LnD43m77VkIVni5c7yPeIbkFdicZ +D0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlowHDyUwDAXlCCpVZvNvlK +4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA9P4yPykq +lXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulW +bfXv33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQAB +o4GrMIGoMAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRT +MtGzz3/64PGgXYVOktKeRR20TzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3Js +LnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dDLmNybDAqBgNVHSUEIzAhBggr +BgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3DQEBBQUAA4IB +AQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft +Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyj +j98C5OBxOvG0I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVH +KWss5nbZqSl9Mt3JNjy9rjXxEZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv +2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwPDPafepE39peC4N1xaf92P2BNPM/3 +mfnGV/TJVTl4uix5yaaIK/QI +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEojCCA4qgAwIBAgIQRL4Mi1AAJLQR0zYlJWfJiTANBgkqhkiG9w0BAQUFADCB +rjELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xNjA0BgNVBAMTLVVUTi1VU0VSRmlyc3Qt +Q2xpZW50IEF1dGhlbnRpY2F0aW9uIGFuZCBFbWFpbDAeFw05OTA3MDkxNzI4NTBa +Fw0xOTA3MDkxNzM2NThaMIGuMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVVQxFzAV +BgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4wHAYDVQQKExVUaGUgVVNFUlRSVVNUIE5l +dHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cudXNlcnRydXN0LmNvbTE2MDQGA1UE +AxMtVVROLVVTRVJGaXJzdC1DbGllbnQgQXV0aGVudGljYXRpb24gYW5kIEVtYWls +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsjmFpPJ9q0E7YkY3rs3B +YHW8OWX5ShpHornMSMxqmNVNNRm5pELlzkniii8efNIxB8dOtINknS4p1aJkxIW9 +hVE1eaROaJB7HHqkkqgX8pgV8pPMyaQylbsMTzC9mKALi+VuG6JG+ni8om+rWV6l +L8/K2m2qL+usobNqqrcuZzWLeeEeaYji5kbNoKXqvgvOdjp6Dpvq/NonWz1zHyLm +SGHGTPNpsaguG7bUMSAsvIKKjqQOpdeJQ/wWWq8dcdcRWdq6hw2v+vPhwvCkxWeM +1tZUOt4KpLoDd7NlyP0e03RiqhjKaJMeoYV+9Udly/hNVyh00jT/MLbu9mIwFIws +6wIDAQABo4G5MIG2MAsGA1UdDwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud +DgQWBBSJgmd9xJ0mcABLtFBIfN49rgRufTBYBgNVHR8EUTBPME2gS6BJhkdodHRw +Oi8vY3JsLnVzZXJ0cnVzdC5jb20vVVROLVVTRVJGaXJzdC1DbGllbnRBdXRoZW50 +aWNhdGlvbmFuZEVtYWlsLmNybDAdBgNVHSUEFjAUBggrBgEFBQcDAgYIKwYBBQUH +AwQwDQYJKoZIhvcNAQEFBQADggEBALFtYV2mGn98q0rkMPxTbyUkxsrt4jFcKw7u +7mFVbwQ+zznexRtJlOTrIEy05p5QLnLZjfWqo7NK2lYcYJeA3IKirUq9iiv/Cwm0 +xtcgBEXkzYABurorbs6q15L+5K/r9CYdFip/bDCVNy8zEqx/3cfREYxRmLLQo5HQ +rfafnoOTHh1CuEava2bwm3/q4wMC5QJRwarVNZ1yQAOJujEdxRBoUp7fooXFXAim +eOZTT7Hot9MUnpOmw2TjrH5xzbyf6QMbzPvprDHBr3wVdAKZw7JHpsIyYdfHb0gk +USeh1YdV8nuPmD0Wnu51tvjQjvLzxq4oW6fw8zYX/MMF08oDSlQ= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCB +lzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3Qt +SGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgxOTIyWjCBlzELMAkG +A1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEe +MBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8v +d3d3LnVzZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdh +cmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn +0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlIwrthdBKWHTxqctU8EGc6Oe0rE81m65UJ +M6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFdtqdt++BxF2uiiPsA3/4a +MXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8i4fDidNd +oI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqI +DsjfPe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9Ksy +oUhbAgMBAAGjgbkwgbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFKFyXyYbKJhDlV0HN9WFlp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0 +dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNFUkZpcnN0LUhhcmR3YXJlLmNy +bDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUFBwMGBggrBgEF +BQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM +//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28Gpgoiskli +CE7/yMgUsogWXecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gE +CJChicsZUN/KHAG8HQQZexB2lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t +3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kniCrVWFCVH/A7HFe7fRQ5YiuayZSS +KqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67nfhmqA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEZjCCA06gAwIBAgIQRL4Mi1AAJLQR0zYt4LNfGzANBgkqhkiG9w0BAQUFADCB +lTELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2Ug +Q2l0eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExho +dHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHTAbBgNVBAMTFFVUTi1VU0VSRmlyc3Qt +T2JqZWN0MB4XDTk5MDcwOTE4MzEyMFoXDTE5MDcwOTE4NDAzNlowgZUxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJVVDEXMBUGA1UEBxMOU2FsdCBMYWtlIENpdHkxHjAc +BgNVBAoTFVRoZSBVU0VSVFJVU1QgTmV0d29yazEhMB8GA1UECxMYaHR0cDovL3d3 +dy51c2VydHJ1c3QuY29tMR0wGwYDVQQDExRVVE4tVVNFUkZpcnN0LU9iamVjdDCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6qgT+jo2F4qjEAVZURnicP +HxzfOpuCaDDASmEd8S8O+r5596Uj71VRloTN2+O5bj4x2AogZ8f02b+U60cEPgLO +KqJdhwQJ9jCdGIqXsqoc/EHSoTbL+z2RuufZcDX65OeQw5ujm9M89RKZd7G3CeBo +5hy485RjiGpq/gt2yb70IuRnuasaXnfBhQfdDWy/7gbHd2pBnqcP1/vulBe3/IW+ +pKvEHDHd17bR5PDv3xaPslKT16HUiaEHLr/hARJCHhrh2JU022R5KP+6LhHC5ehb +kkj7RwvCbNqtMoNB86XlQXD9ZZBt+vpRxPm9lisZBCzTbafc8H9vg2XiaquHhnUC +AwEAAaOBrzCBrDALBgNVHQ8EBAMCAcYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E +FgQU2u1kdBScFDyr3ZmpvVsoTYs8ydgwQgYDVR0fBDswOTA3oDWgM4YxaHR0cDov +L2NybC51c2VydHJ1c3QuY29tL1VUTi1VU0VSRmlyc3QtT2JqZWN0LmNybDApBgNV +HSUEIjAgBggrBgEFBQcDAwYIKwYBBQUHAwgGCisGAQQBgjcKAwQwDQYJKoZIhvcN +AQEFBQADggEBAAgfUrE3RHjb/c652pWWmKpVZIC1WkDdIaXFwfNfLEzIR1pp6ujw +NTX00CXzyKakh0q9G7FzCL3Uw8q2NbtZhncxzaeAFK4T7/yxSPlrJSUtUbYsbUXB +mMiKVl0+7kNOPmsnjtA6S4ULX9Ptaqd1y9Fahy85dRNacrACgZ++8A+EVCBibGnU +4U3GDZlDAQ0Slox4nb9QorFEqmrPF3rPbw/U+CRVX/A0FklmPlBGyWNxODFiuGK5 +81OtbLUrohKqGU8J2l7nk8aOFAj+8DCAGKCGhU3IfdeLA/5u1fedFqySLKAj5ZyR +Uh+U3xeUc8OzwcFxBSAAeL0TUh2oPs0AH8g= +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCLW3VWhFSFCwDPrzhIzrGkMA0GCSqGSIb3DQEBBQUAMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl +cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu +LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT +aWduIENsYXNzIDEgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD +VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT +aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ +bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu +IENsYXNzIDEgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAN2E1Lm0+afY8wR4 +nN493GwTFtl63SRRZsDHJlkNrAYIwpTRMx/wgzUfbhvI3qpuFU5UJ+/EbRrsC+MO +8ESlV8dAWB6jRx9x7GD2bZTIGDnt/kIYVt/kTEkQeE4BdjVjEjbdZrwBBDajVWjV +ojYJrKshJlQGrT/KFOCsyq0GHZXi+J3x4GD/wn91K0zM2v6HmSHquv4+VNfSWXjb +PG7PoBMAGrgnoeS+Z5bKoMWznN3JdZ7rMJpfo83ZrngZPyPpXNspva1VyBtUjGP2 +6KbqxzcSXKMpHgLZ2x87tNcPVkeBFQRKr4Mn0cVYiMHd9qqnoxjaaKptEVHhv2Vr +n5Z20T0CAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAq2aN17O6x5q25lXQBfGfMY1a +qtmqRiYPce2lrVNWYgFHKkTp/j90CxObufRNG7LRX7K20ohcs5/Ny9Sn2WCVhDr4 +wTcdYcrnsMXlkdpUpqwxga6X3s0IrLjAl4B/bnKk52kTlWUfxJM8/XmPBNQ+T+r3 +ns7NZ3xPZQL/kYVUc8f/NveGLezQXk//EZ9yBta4GvFMDSZl4kSAHsef493oCtrs +pSCAaWihT37ha88HQfqDjrw43bAuEbFrskLMmrz5SCJ5ShkPshw+IHTZasO+8ih4 +E1Z5T21Q6huwtVexN2ZYI/PcD98Kh8TvhgXVOBRgmaNL3gaWcSzy27YfpO8/7g== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEGTCCAwECEGFwy0mMX5hFKeewptlQW3owDQYJKoZIhvcNAQEFBQAwgcoxCzAJ +BgNVBAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjEfMB0GA1UECxMWVmVy +aVNpZ24gVHJ1c3QgTmV0d29yazE6MDgGA1UECxMxKGMpIDE5OTkgVmVyaVNpZ24s +IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTFFMEMGA1UEAxM8VmVyaVNp +Z24gQ2xhc3MgMiBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 +eSAtIEczMB4XDTk5MTAwMTAwMDAwMFoXDTM2MDcxNjIzNTk1OVowgcoxCzAJBgNV +BAYTAlVTMRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjEfMB0GA1UECxMWVmVyaVNp +Z24gVHJ1c3QgTmV0d29yazE6MDgGA1UECxMxKGMpIDE5OTkgVmVyaVNpZ24sIElu +Yy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTFFMEMGA1UEAxM8VmVyaVNpZ24g +Q2xhc3MgMiBQdWJsaWMgUHJpbWFyeSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAt +IEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArwoNwtUs22e5LeWU +J92lvuCwTY+zYVY81nzD9M0+hsuiiOLh2KRpxbXiv8GmR1BeRjmL1Za6tW8UvxDO +JxOeBUebMXoT2B/Z0wI3i60sR/COgQanDTAM6/c8DyAd3HJG7qUCyFvDyVZpTMUY +wZF7C9UTAJu878NIPkZgIIUq1ZC2zYugzDLdt/1AVbJQHFauzI13TccgTacxdu9o +koqQHgiBVrKtaaNS0MscxCM9H5n+TOgWY47GCI72MfbS+uV23bUckqNJzc0BzWjN +qWm6o+sdDZykIKbBoMXRRkwXbdKsZj+WjOCE1Db/IlnF+RFgqF8EffIa9iVCYQ/E +Srg+iQIDAQABMA0GCSqGSIb3DQEBBQUAA4IBAQA0JhU8wI1NQ0kdvekhktdmnLfe +xbjQ5F1fdiLAJvmEOjr5jLX77GDx6M4EsMjdpwOPMPOY36TmpDHf0xwLRtxyID+u +7gU8pDM/CzmscHhzS5kr3zDCVLCoO1Wh/hYozUK9dG6A2ydEp85EXdQbkJgNHkKU +sQAsBNB0owIFImNjzYO1+8FtYmtpdf1dcEG59b98377BMnMiIYtYgXsVkXq642RI +sH/7NiXaldDxJBQX3RiAa0YjOVT1jmIJBB2UkKab5iXiQkWquJCtvgiPqQtCGJTP +cjnhsUPgKM+351psE2tJs//jGHyJizNdrDPXp/naOlXJWBD5qu9ats9LS98q +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQsw +CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZl +cmlTaWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWdu +LCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlT +aWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQswCQYD +VQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlT +aWduIFRydXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJ +bmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWdu +IENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg +LSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMu6nFL8eB8aHm8b +N3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1EUGO+i2t +KmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGu +kxUccLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBm +CC+Vk7+qRy+oRpfwEuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJ +Xwzw3sJ2zq/3avL6QaaiMxTJ5Xpj055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWu +imi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAERSWwauSCPc/L8my/uRan2Te +2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5fj267Cz3qWhMe +DGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC +/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565p +F4ErWjfJXir0xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGt +TxzhT5yvDwyd93gN2PQ1VoDat20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW +ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp +U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y +aXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjELMAkG +A1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJp +U2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwg +SW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2ln +biBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8Utpkmw4tXNherJI9/gHm +GUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGzrl0Bp3ve +fLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJ +aW1hZ2UvZ2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYj +aHR0cDovL2xvZ28udmVyaXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMW +kf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMDA2gAMGUCMGYhDBgmYFo4e1ZC +4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIxAJw9SDkjOVga +FRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCB +yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL +ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJp +U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxW +ZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0 +aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCByjEL +MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZW +ZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2ln +biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJp +U2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9y +aXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvJAgIKXo1 +nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKzj/i5Vbex +t0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIz +SdhDY2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQG +BO+QueQA5N06tRn/Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+ +rCpSx4/VBEnkjWNHiDxpg8v+R70rfk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/ +NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8E +BAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEwHzAH +BgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy +aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKv +MzEzMA0GCSqGSIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzE +p6B4Eq1iDkVwZMXnl2YtmAl+X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y +5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKEKQsTb47bDN0lAtukixlE0kF6BWlK +WE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiCKm0oHw0LxOXnGiYZ +4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vEZV8N +hnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCB +vTELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQL +ExZWZXJpU2lnbiBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJp +U2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MTgwNgYDVQQDEy9W +ZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAe +Fw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJVUzEX +MBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0 +IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9y +IGF1dGhvcml6ZWQgdXNlIG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNh +bCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj1mCOkdeQmIN65lgZOIzF +9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGPMiJhgsWH +H26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+H +LL729fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN +/BMReYTtXlT2NJ8IAfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPT +rJ9VAMf2CGqUuV/c4DPxhGD5WycRtPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1Ud +EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0GCCsGAQUFBwEMBGEwX6FdoFsw +WTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2Oa8PPgGrUSBgs +exkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud +DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4 +sAPmLGd75JR3Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+ +seQxIcaBlVZaDrHC1LGmWazxY8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz +4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTxP/jgdFcrGJ2BtMQo2pSXpXDrrB2+ +BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+PwGZsY6rp2aQW9IHR +lRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4mJO3 +7M2CYfE45k+XmCpajQ== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBr +MQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRl +cm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv +bW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2WhcNMjIwNjI0MDAxNjEyWjBrMQsw +CQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5h +dGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1l +cmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h +2mCxlCfLF9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4E +lpF7sDPwsRROEW+1QK8bRaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdV +ZqW1LS7YgFmypw23RuwhY/81q6UCzyr0TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq +299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI/k4+oKsGGelT84ATB+0t +vz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzsGHxBvfaL +dXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQD +AgEGMB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUF +AAOCAQEAX/FBfXxcCLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcR +zCSs00Rsca4BIGsDoo8Ytyk6feUWYFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3 +LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pzzkWKsKZJ/0x9nXGIxHYdkFsd +7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBuYQa7FkKMcPcw +++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt +398znM/jra6O1I7mT1GvFpLgXPYHDw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIID+TCCAuGgAwIBAgIQW1fXqEywr9nTb0ugMbTW4jANBgkqhkiG9w0BAQUFADB5 +MQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRl +cm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xKjAoBgNVBAMTIVZpc2EgSW5m +b3JtYXRpb24gRGVsaXZlcnkgUm9vdCBDQTAeFw0wNTA2MjcxNzQyNDJaFw0yNTA2 +MjkxNzQyNDJaMHkxCzAJBgNVBAYTAlVTMQ0wCwYDVQQKEwRWSVNBMS8wLQYDVQQL +EyZWaXNhIEludGVybmF0aW9uYWwgU2VydmljZSBBc3NvY2lhdGlvbjEqMCgGA1UE +AxMhVmlzYSBJbmZvcm1hdGlvbiBEZWxpdmVyeSBSb290IENBMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyREA4R/QkkfpLx0cYjga/EhIPZpchH0MZsRZ +FfP6C2ITtf/Wc+MtgD4yTK0yoiXvni3d+aCtEgK3GDvkdgYrgF76ROJFZwUQjQ9l +x42gRT05DbXvWFoy7dTglCZ9z/Tt2Cnktv9oxKgmkeHY/CyfpCBg1S8xth2JlGMR +0ug/GMO5zANuegZOv438p5Lt5So+du2Gl+RMFQqEPwqN5uJSqAe0VtmB4gWdQ8on +Bj2ZAM2R73QW7UW0Igt2vA4JaSiNtaAG/Y/58VXWHGgbq7rDtNK1R30X0kJV0rGA +ib3RSwB3LpG7bOjbIucV5mQgJoVjoA1e05w6g1x/KmNTmOGRVwIDAQABo30wezAP +BgNVHRMBAf8EBTADAQH/MDkGA1UdIAQyMDAwLgYFZ4EDAgEwJTAVBggrBgEFBQcC +ARYJMS4yLjMuNC41MAwGCCsGAQUFBwICMAAwDgYDVR0PAQH/BAQDAgEGMB0GA1Ud +DgQWBBRPitp2/2d3I5qmgH1924h1hfeBejANBgkqhkiG9w0BAQUFAAOCAQEACUW1 +QdUHdDJydgDPmYt+telnG/Su+DPaf1cregzlN43bJaJosMP7NwjoJY/H2He4XLWb +5rXEkl+xH1UyUwF7mtaUoxbGxEvt8hPZSTB4da2mzXgwKvXuHyzF5Qjy1hOB0/pS +WaF9ARpVKJJ7TOJQdGKBsF2Ty4fSCLqZLgfxbqwMsd9sysXI3rDXjIhekqvbgeLz +PqZr+pfgFhwCCLSMQWl5Ll3u7Qk9wR094DZ6jj6+JCVCRUS3HyabH4OlM0Vc2K+j +INsF/64Or7GNtRf9HYEJvrPxHINxl3JVwhYj4ASeaO4KwhVbwtw94Tc/XrGcexDo +c5lC3rAi4/UZqweYCw== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEGjCCAwKgAwIBAgIDAYagMA0GCSqGSIb3DQEBBQUAMIGjMQswCQYDVQQGEwJG +STEQMA4GA1UECBMHRmlubGFuZDEhMB8GA1UEChMYVmFlc3RvcmVraXN0ZXJpa2Vz +a3VzIENBMSkwJwYDVQQLEyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSBTZXJ2aWNl +czEZMBcGA1UECxMQVmFybWVubmVwYWx2ZWx1dDEZMBcGA1UEAxMQVlJLIEdvdi4g +Um9vdCBDQTAeFw0wMjEyMTgxMzUzMDBaFw0yMzEyMTgxMzUxMDhaMIGjMQswCQYD +VQQGEwJGSTEQMA4GA1UECBMHRmlubGFuZDEhMB8GA1UEChMYVmFlc3RvcmVraXN0 +ZXJpa2Vza3VzIENBMSkwJwYDVQQLEyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSBT +ZXJ2aWNlczEZMBcGA1UECxMQVmFybWVubmVwYWx2ZWx1dDEZMBcGA1UEAxMQVlJL +IEdvdi4gUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALCF +FdrIAzfQo0Y3bBseljDCWoUSZyPyu5/nioFgJ/gTqTy894aqqvTzJSm0/nWuHoGG +igWyHWWyOOi0zCia+xc28ZPVec7Bg4shT8MNrUHfeJ1I4x9CRPw8bSEga60ihCRC +jxdNwlAfZM0tOSJWiP2yY51U2kJpwMhP1xjiPshphJQ9LIDGfM6911Mf64i5psu7 +hVfvV3ZdDIvTXhJBnyHAOfQmbQj6OLOhd7HuFtjQaNq0mKWgZUZKa41+qk1guPjI +DfxxPu45h4G02fhukO4/DmHXHSto5i7hQkQmeCxY8n0Wf2HASSQqiYe2XS8pGfim +545SnkFLWg6quMJmQlMCAwEAAaNVMFMwDwYDVR0TAQH/BAUwAwEB/zARBglghkgB +hvhCAQEEBAMCAAcwDgYDVR0PAQH/BAQDAgHGMB0GA1UdDgQWBBTb6eGb0tEkC/yr +46Bn6q6cS3f0sDANBgkqhkiG9w0BAQUFAAOCAQEArX1ID1QRnljurw2bEi8hpM2b +uoRH5sklVSPj3xhYKizbXvfNVPVRJHtiZ+GxH0mvNNDrsczZog1Sf0JLiGCXzyVy +t08pLWKfT6HAVVdWDsRol5EfnGTCKTIB6dTI2riBmCguGMcs/OubUpbf9MiQGS0j +8/G7cdqehSO9Gu8u5Hp5t8OdhkktY7ktdM9lDzJmid87Ie4pbzlj2RXBbvbfgD5Q +eBmK3QOjFKU3p7UsfLYRh+cF8ry23tT/l4EohP7+bEaFEEGfTXWMB9SZZ291im/k +UJL2mdUQuMSpe/cXjUu/15WfCdxEDx4yw8DP03kN5Mc7h/CQNIghYkmSBAQfvA== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCB +gjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEk +MCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRY +UmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQxMTAxMTcx +NDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMxHjAcBgNVBAsTFXd3 +dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkgU2Vy +dmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS6 +38eMpSe2OAtp87ZOqCwuIR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCP +KZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMxfoArtYzAQDsRhtDLooY2YKTVMIJt2W7Q +DxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FEzG+gSqmUsE3a56k0enI4 +qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqsAxcZZPRa +JSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNVi +PvryxS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0P +BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASs +jVy16bYbMDYGA1UdHwQvMC0wK6ApoCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0 +eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQEwDQYJKoZIhvcNAQEFBQAD +ggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc/Kh4ZzXxHfAR +vbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLa +IR9NmXmd4c8nnxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSy +i6mx5O+aGtA9aZnuqCij4Tyz8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQ +O+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- +` diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_js.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_js.go new file mode 100644 index 00000000..70abb73f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_js.go @@ -0,0 +1,10 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build js,wasm + +package x509 + +// Possible certificate files; stop after finding one. +var certFiles = []string{} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_linux.go new file mode 100644 index 00000000..aa1785e4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_linux.go @@ -0,0 +1,14 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +// Possible certificate files; stop after finding one. +var certFiles = []string{ + "/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Gentoo etc. + "/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL 6 + "/etc/ssl/ca-bundle.pem", // OpenSUSE + "/etc/pki/tls/cacert.pem", // OpenELEC + "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", // CentOS/RHEL 7 +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_nacl.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_nacl.go new file mode 100644 index 00000000..4413f647 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_nacl.go @@ -0,0 +1,8 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +// Possible certificate files; stop after finding one. +var certFiles = []string{} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_nocgo_darwin.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_nocgo_darwin.go new file mode 100644 index 00000000..2ac4666a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_nocgo_darwin.go @@ -0,0 +1,11 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !cgo + +package x509 + +func loadSystemRoots() (*CertPool, error) { + return execSecurityRoots() +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_plan9.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_plan9.go new file mode 100644 index 00000000..09f0e230 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_plan9.go @@ -0,0 +1,40 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build plan9 + +package x509 + +import ( + "io/ioutil" + "os" +) + +// Possible certificate files; stop after finding one. +var certFiles = []string{ + "/sys/lib/tls/ca.pem", +} + +func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) { + return nil, nil +} + +func loadSystemRoots() (*CertPool, error) { + roots := NewCertPool() + var bestErr error + for _, file := range certFiles { + data, err := ioutil.ReadFile(file) + if err == nil { + roots.AppendCertsFromPEM(data) + return roots, nil + } + if bestErr == nil || (os.IsNotExist(bestErr) && !os.IsNotExist(err)) { + bestErr = err + } + } + if bestErr == nil { + return roots, nil + } + return nil, bestErr +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_solaris.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_solaris.go new file mode 100644 index 00000000..e6d4e613 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_solaris.go @@ -0,0 +1,12 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +// Possible certificate files; stop after finding one. +var certFiles = []string{ + "/etc/certs/ca-certificates.crt", // Solaris 11.2+ + "/etc/ssl/certs/ca-certificates.crt", // Joyent SmartOS + "/etc/ssl/cacert.pem", // OmniOS +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_unix.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_unix.go new file mode 100644 index 00000000..8e703623 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_unix.go @@ -0,0 +1,88 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build dragonfly freebsd js,wasm linux nacl netbsd openbsd solaris + +package x509 + +import ( + "io/ioutil" + "os" +) + +// Possible directories with certificate files; stop after successfully +// reading at least one file from a directory. +var certDirectories = []string{ + "/etc/ssl/certs", // SLES10/SLES11, https://golang.org/issue/12139 + "/system/etc/security/cacerts", // Android + "/usr/local/share/certs", // FreeBSD + "/etc/pki/tls/certs", // Fedora/RHEL + "/etc/openssl/certs", // NetBSD +} + +const ( + // certFileEnv is the environment variable which identifies where to locate + // the SSL certificate file. If set this overrides the system default. + certFileEnv = "SSL_CERT_FILE" + + // certDirEnv is the environment variable which identifies which directory + // to check for SSL certificate files. If set this overrides the system default. + certDirEnv = "SSL_CERT_DIR" +) + +func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) { + return nil, nil +} + +func loadSystemRoots() (*CertPool, error) { + roots := NewCertPool() + + files := certFiles + if f := os.Getenv(certFileEnv); f != "" { + files = []string{f} + } + + var firstErr error + for _, file := range files { + data, err := ioutil.ReadFile(file) + if err == nil { + roots.AppendCertsFromPEM(data) + break + } + if firstErr == nil && !os.IsNotExist(err) { + firstErr = err + } + } + + dirs := certDirectories + if d := os.Getenv(certDirEnv); d != "" { + dirs = []string{d} + } + + for _, directory := range dirs { + fis, err := ioutil.ReadDir(directory) + if err != nil { + if firstErr == nil && !os.IsNotExist(err) { + firstErr = err + } + continue + } + rootsAdded := false + for _, fi := range fis { + data, err := ioutil.ReadFile(directory + "/" + fi.Name()) + if err == nil && roots.AppendCertsFromPEM(data) { + rootsAdded = true + } + } + if rootsAdded { + return roots, nil + } + } + + if len(roots.certs) > 0 || firstErr == nil { + return roots, nil + } + + return nil, firstErr +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_windows.go new file mode 100644 index 00000000..304ad3a6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/root_windows.go @@ -0,0 +1,266 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "errors" + "syscall" + "unsafe" +) + +// Creates a new *syscall.CertContext representing the leaf certificate in an in-memory +// certificate store containing itself and all of the intermediate certificates specified +// in the opts.Intermediates CertPool. +// +// A pointer to the in-memory store is available in the returned CertContext's Store field. +// The store is automatically freed when the CertContext is freed using +// syscall.CertFreeCertificateContext. +func createStoreContext(leaf *Certificate, opts *VerifyOptions) (*syscall.CertContext, error) { + var storeCtx *syscall.CertContext + + leafCtx, err := syscall.CertCreateCertificateContext(syscall.X509_ASN_ENCODING|syscall.PKCS_7_ASN_ENCODING, &leaf.Raw[0], uint32(len(leaf.Raw))) + if err != nil { + return nil, err + } + defer syscall.CertFreeCertificateContext(leafCtx) + + handle, err := syscall.CertOpenStore(syscall.CERT_STORE_PROV_MEMORY, 0, 0, syscall.CERT_STORE_DEFER_CLOSE_UNTIL_LAST_FREE_FLAG, 0) + if err != nil { + return nil, err + } + defer syscall.CertCloseStore(handle, 0) + + err = syscall.CertAddCertificateContextToStore(handle, leafCtx, syscall.CERT_STORE_ADD_ALWAYS, &storeCtx) + if err != nil { + return nil, err + } + + if opts.Intermediates != nil { + for _, intermediate := range opts.Intermediates.certs { + ctx, err := syscall.CertCreateCertificateContext(syscall.X509_ASN_ENCODING|syscall.PKCS_7_ASN_ENCODING, &intermediate.Raw[0], uint32(len(intermediate.Raw))) + if err != nil { + return nil, err + } + + err = syscall.CertAddCertificateContextToStore(handle, ctx, syscall.CERT_STORE_ADD_ALWAYS, nil) + syscall.CertFreeCertificateContext(ctx) + if err != nil { + return nil, err + } + } + } + + return storeCtx, nil +} + +// extractSimpleChain extracts the final certificate chain from a CertSimpleChain. +func extractSimpleChain(simpleChain **syscall.CertSimpleChain, count int) (chain []*Certificate, err error) { + if simpleChain == nil || count == 0 { + return nil, errors.New("x509: invalid simple chain") + } + + simpleChains := (*[1 << 20]*syscall.CertSimpleChain)(unsafe.Pointer(simpleChain))[:] + lastChain := simpleChains[count-1] + elements := (*[1 << 20]*syscall.CertChainElement)(unsafe.Pointer(lastChain.Elements))[:] + for i := 0; i < int(lastChain.NumElements); i++ { + // Copy the buf, since ParseCertificate does not create its own copy. + cert := elements[i].CertContext + encodedCert := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:] + buf := make([]byte, cert.Length) + copy(buf, encodedCert[:]) + parsedCert, err := ParseCertificate(buf) + if err != nil { + return nil, err + } + chain = append(chain, parsedCert) + } + + return chain, nil +} + +// checkChainTrustStatus checks the trust status of the certificate chain, translating +// any errors it finds into Go errors in the process. +func checkChainTrustStatus(c *Certificate, chainCtx *syscall.CertChainContext) error { + if chainCtx.TrustStatus.ErrorStatus != syscall.CERT_TRUST_NO_ERROR { + status := chainCtx.TrustStatus.ErrorStatus + switch status { + case syscall.CERT_TRUST_IS_NOT_TIME_VALID: + return CertificateInvalidError{c, Expired, ""} + default: + return UnknownAuthorityError{c, nil, nil} + } + } + return nil +} + +// checkChainSSLServerPolicy checks that the certificate chain in chainCtx is valid for +// use as a certificate chain for a SSL/TLS server. +func checkChainSSLServerPolicy(c *Certificate, chainCtx *syscall.CertChainContext, opts *VerifyOptions) error { + servernamep, err := syscall.UTF16PtrFromString(opts.DNSName) + if err != nil { + return err + } + sslPara := &syscall.SSLExtraCertChainPolicyPara{ + AuthType: syscall.AUTHTYPE_SERVER, + ServerName: servernamep, + } + sslPara.Size = uint32(unsafe.Sizeof(*sslPara)) + + para := &syscall.CertChainPolicyPara{ + ExtraPolicyPara: convertToPolicyParaType(unsafe.Pointer(sslPara)), + } + para.Size = uint32(unsafe.Sizeof(*para)) + + status := syscall.CertChainPolicyStatus{} + err = syscall.CertVerifyCertificateChainPolicy(syscall.CERT_CHAIN_POLICY_SSL, chainCtx, para, &status) + if err != nil { + return err + } + + // TODO(mkrautz): use the lChainIndex and lElementIndex fields + // of the CertChainPolicyStatus to provide proper context, instead + // using c. + if status.Error != 0 { + switch status.Error { + case syscall.CERT_E_EXPIRED: + return CertificateInvalidError{c, Expired, ""} + case syscall.CERT_E_CN_NO_MATCH: + return HostnameError{c, opts.DNSName} + case syscall.CERT_E_UNTRUSTEDROOT: + return UnknownAuthorityError{c, nil, nil} + default: + return UnknownAuthorityError{c, nil, nil} + } + } + + return nil +} + +// systemVerify is like Verify, except that it uses CryptoAPI calls +// to build certificate chains and verify them. +func (c *Certificate) systemVerify(opts *VerifyOptions) (chains [][]*Certificate, err error) { + hasDNSName := opts != nil && len(opts.DNSName) > 0 + + storeCtx, err := createStoreContext(c, opts) + if err != nil { + return nil, err + } + defer syscall.CertFreeCertificateContext(storeCtx) + + para := new(syscall.CertChainPara) + para.Size = uint32(unsafe.Sizeof(*para)) + + // If there's a DNSName set in opts, assume we're verifying + // a certificate from a TLS server. + if hasDNSName { + oids := []*byte{ + &syscall.OID_PKIX_KP_SERVER_AUTH[0], + // Both IE and Chrome allow certificates with + // Server Gated Crypto as well. Some certificates + // in the wild require them. + &syscall.OID_SERVER_GATED_CRYPTO[0], + &syscall.OID_SGC_NETSCAPE[0], + } + para.RequestedUsage.Type = syscall.USAGE_MATCH_TYPE_OR + para.RequestedUsage.Usage.Length = uint32(len(oids)) + para.RequestedUsage.Usage.UsageIdentifiers = &oids[0] + } else { + para.RequestedUsage.Type = syscall.USAGE_MATCH_TYPE_AND + para.RequestedUsage.Usage.Length = 0 + para.RequestedUsage.Usage.UsageIdentifiers = nil + } + + var verifyTime *syscall.Filetime + if opts != nil && !opts.CurrentTime.IsZero() { + ft := syscall.NsecToFiletime(opts.CurrentTime.UnixNano()) + verifyTime = &ft + } + + // CertGetCertificateChain will traverse Windows's root stores + // in an attempt to build a verified certificate chain. Once + // it has found a verified chain, it stops. MSDN docs on + // CERT_CHAIN_CONTEXT: + // + // When a CERT_CHAIN_CONTEXT is built, the first simple chain + // begins with an end certificate and ends with a self-signed + // certificate. If that self-signed certificate is not a root + // or otherwise trusted certificate, an attempt is made to + // build a new chain. CTLs are used to create the new chain + // beginning with the self-signed certificate from the original + // chain as the end certificate of the new chain. This process + // continues building additional simple chains until the first + // self-signed certificate is a trusted certificate or until + // an additional simple chain cannot be built. + // + // The result is that we'll only get a single trusted chain to + // return to our caller. + var chainCtx *syscall.CertChainContext + err = syscall.CertGetCertificateChain(syscall.Handle(0), storeCtx, verifyTime, storeCtx.Store, para, 0, 0, &chainCtx) + if err != nil { + return nil, err + } + defer syscall.CertFreeCertificateChain(chainCtx) + + err = checkChainTrustStatus(c, chainCtx) + if err != nil { + return nil, err + } + + if hasDNSName { + err = checkChainSSLServerPolicy(c, chainCtx, opts) + if err != nil { + return nil, err + } + } + + chain, err := extractSimpleChain(chainCtx.Chains, int(chainCtx.ChainCount)) + if err != nil { + return nil, err + } + + chains = append(chains, chain) + + return chains, nil +} + +func loadSystemRoots() (*CertPool, error) { + // TODO: restore this functionality on Windows. We tried to do + // it in Go 1.8 but had to revert it. See Issue 18609. + // Returning (nil, nil) was the old behavior, prior to CL 30578. + return nil, nil + + const CRYPT_E_NOT_FOUND = 0x80092004 + + store, err := syscall.CertOpenSystemStore(0, syscall.StringToUTF16Ptr("ROOT")) + if err != nil { + return nil, err + } + defer syscall.CertCloseStore(store, 0) + + roots := NewCertPool() + var cert *syscall.CertContext + for { + cert, err = syscall.CertEnumCertificatesInStore(store, cert) + if err != nil { + if errno, ok := err.(syscall.Errno); ok { + if errno == CRYPT_E_NOT_FOUND { + break + } + } + return nil, err + } + if cert == nil { + break + } + // Copy the buf, since ParseCertificate does not create its own copy. + buf := (*[1 << 20]byte)(unsafe.Pointer(cert.EncodedCert))[:] + buf2 := make([]byte, cert.Length) + copy(buf2, buf) + if c, err := ParseCertificate(buf2); err == nil { + roots.AddCert(c) + } + } + return roots, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/rpki.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/rpki.go new file mode 100644 index 00000000..520d6dc3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/rpki.go @@ -0,0 +1,242 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + + "github.com/google/certificate-transparency-go/asn1" +) + +// IPAddressPrefix describes an IP address prefix as an ASN.1 bit string, +// where the BitLength field holds the prefix length. +type IPAddressPrefix asn1.BitString + +// IPAddressRange describes an (inclusive) IP address range. +type IPAddressRange struct { + Min IPAddressPrefix + Max IPAddressPrefix +} + +// Most relevant values for AFI from: +// http://www.iana.org/assignments/address-family-numbers. +const ( + IPv4AddressFamilyIndicator = uint16(1) + IPv6AddressFamilyIndicator = uint16(2) +) + +// IPAddressFamilyBlocks describes a set of ranges of IP addresses. +type IPAddressFamilyBlocks struct { + // AFI holds an address family indicator from + // http://www.iana.org/assignments/address-family-numbers. + AFI uint16 + // SAFI holds a subsequent address family indicator from + // http://www.iana.org/assignments/safi-namespace. + SAFI byte + // InheritFromIssuer indicates that the set of addresses should + // be taken from the issuer's certificate. + InheritFromIssuer bool + // AddressPrefixes holds prefixes if InheritFromIssuer is false. + AddressPrefixes []IPAddressPrefix + // AddressRanges holds ranges if InheritFromIssuer is false. + AddressRanges []IPAddressRange +} + +// Internal types for asn1 unmarshalling. +type ipAddressFamily struct { + AddressFamily []byte // 2-byte AFI plus optional 1 byte SAFI + Choice asn1.RawValue +} + +// Internally, use raw asn1.BitString rather than the IPAddressPrefix +// type alias (so that asn1.Unmarshal() decodes properly). +type ipAddressRange struct { + Min asn1.BitString + Max asn1.BitString +} + +func parseRPKIAddrBlocks(data []byte, nfe *NonFatalErrors) []*IPAddressFamilyBlocks { + // RFC 3779 2.2.3 + // IPAddrBlocks ::= SEQUENCE OF IPAddressFamily + // + // IPAddressFamily ::= SEQUENCE { -- AFI & optional SAFI -- + // addressFamily OCTET STRING (SIZE (2..3)), + // ipAddressChoice IPAddressChoice } + // + // IPAddressChoice ::= CHOICE { + // inherit NULL, -- inherit from issuer -- + // addressesOrRanges SEQUENCE OF IPAddressOrRange } + // + // IPAddressOrRange ::= CHOICE { + // addressPrefix IPAddress, + // addressRange IPAddressRange } + // + // IPAddressRange ::= SEQUENCE { + // min IPAddress, + // max IPAddress } + // + // IPAddress ::= BIT STRING + + var addrBlocks []ipAddressFamily + if rest, err := asn1.Unmarshal(data, &addrBlocks); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks extension: %v", err)) + return nil + } else if len(rest) != 0 { + nfe.AddError(errors.New("trailing data after ipAddrBlocks extension")) + return nil + } + + var results []*IPAddressFamilyBlocks + for i, block := range addrBlocks { + var fam IPAddressFamilyBlocks + if l := len(block.AddressFamily); l < 2 || l > 3 { + nfe.AddError(fmt.Errorf("invalid address family length (%d) for ipAddrBlock.addressFamily", l)) + continue + } + fam.AFI = binary.BigEndian.Uint16(block.AddressFamily[0:2]) + if len(block.AddressFamily) > 2 { + fam.SAFI = block.AddressFamily[2] + } + // IPAddressChoice is an ASN.1 CHOICE where the chosen alternative is indicated by (implicit) + // tagging of the alternatives -- here, either NULL or SEQUENCE OF. + if bytes.Equal(block.Choice.FullBytes, asn1.NullBytes) { + fam.InheritFromIssuer = true + results = append(results, &fam) + continue + } + + var addrRanges []asn1.RawValue + if _, err := asn1.Unmarshal(block.Choice.FullBytes, &addrRanges); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges: %v", i, err)) + continue + } + for j, ar := range addrRanges { + // Each IPAddressOrRange is a CHOICE where the alternatives have distinct (implicit) + // tags -- here, either BIT STRING or SEQUENCE. + switch ar.Tag { + case asn1.TagBitString: + // BIT STRING for single prefix IPAddress + var val asn1.BitString + if _, err := asn1.Unmarshal(ar.FullBytes, &val); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d].addressPrefix: %v", i, j, err)) + continue + } + fam.AddressPrefixes = append(fam.AddressPrefixes, IPAddressPrefix(val)) + + case asn1.TagSequence: + var val ipAddressRange + if _, err := asn1.Unmarshal(ar.FullBytes, &val); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d].addressRange: %v", i, j, err)) + continue + } + fam.AddressRanges = append(fam.AddressRanges, IPAddressRange{Min: IPAddressPrefix(val.Min), Max: IPAddressPrefix(val.Max)}) + + default: + nfe.AddError(fmt.Errorf("unexpected ASN.1 type in ipAddrBlocks[%d].ipAddressChoice.addressesOrRanges[%d]: %+v", i, j, ar)) + } + } + results = append(results, &fam) + } + return results +} + +// ASIDRange describes an inclusive range of AS Identifiers (AS numbers or routing +// domain identifiers). +type ASIDRange struct { + Min int + Max int +} + +// ASIdentifiers describes a collection of AS Identifiers (AS numbers or routing +// domain identifiers). +type ASIdentifiers struct { + // InheritFromIssuer indicates that the set of AS identifiers should + // be taken from the issuer's certificate. + InheritFromIssuer bool + // ASIDs holds AS identifiers if InheritFromIssuer is false. + ASIDs []int + // ASIDs holds AS identifier ranges (inclusive) if InheritFromIssuer is false. + ASIDRanges []ASIDRange +} + +type asIdentifiers struct { + ASNum asn1.RawValue `asn1:"optional,tag:0"` + RDI asn1.RawValue `asn1:"optional,tag:1"` +} + +func parseASIDChoice(val asn1.RawValue, nfe *NonFatalErrors) *ASIdentifiers { + // RFC 3779 2.3.2 + // ASIdentifierChoice ::= CHOICE { + // inherit NULL, -- inherit from issuer -- + // asIdsOrRanges SEQUENCE OF ASIdOrRange } + // ASIdOrRange ::= CHOICE { + // id ASId, + // range ASRange } + // ASRange ::= SEQUENCE { + // min ASId, + // max ASId } + // ASId ::= INTEGER + if len(val.FullBytes) == 0 { // OPTIONAL + return nil + } + // ASIdentifierChoice is an ASN.1 CHOICE where the chosen alternative is indicated by (implicit) + // tagging of the alternatives -- here, either NULL or SEQUENCE OF. + if bytes.Equal(val.Bytes, asn1.NullBytes) { + return &ASIdentifiers{InheritFromIssuer: true} + } + var ids []asn1.RawValue + if rest, err := asn1.Unmarshal(val.Bytes, &ids); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges: %v", err)) + return nil + } else if len(rest) != 0 { + nfe.AddError(errors.New("trailing data after ASIdentifiers.asIdsOrRanges")) + return nil + } + var asID ASIdentifiers + for i, id := range ids { + // Each ASIdOrRange is a CHOICE where the alternatives have distinct (implicit) + // tags -- here, either INTEGER or SEQUENCE. + switch id.Tag { + case asn1.TagInteger: + var val int + if _, err := asn1.Unmarshal(id.FullBytes, &val); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges[%d].id: %v", i, err)) + continue + } + asID.ASIDs = append(asID.ASIDs, val) + + case asn1.TagSequence: + var val ASIDRange + if _, err := asn1.Unmarshal(id.FullBytes, &val); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers.asIdsOrRanges[%d].range: %v", i, err)) + continue + } + asID.ASIDRanges = append(asID.ASIDRanges, val) + + default: + nfe.AddError(fmt.Errorf("unexpected value in ASIdentifiers.asIdsOrRanges[%d]: %+v", i, id)) + } + } + return &asID +} + +func parseRPKIASIdentifiers(data []byte, nfe *NonFatalErrors) (*ASIdentifiers, *ASIdentifiers) { + // RFC 3779 2.3.2 + // ASIdentifiers ::= SEQUENCE { + // asnum [0] EXPLICIT ASIdentifierChoice OPTIONAL, + // rdi [1] EXPLICIT ASIdentifierChoice OPTIONAL} + var asIDs asIdentifiers + if rest, err := asn1.Unmarshal(data, &asIDs); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal ASIdentifiers extension: %v", err)) + return nil, nil + } else if len(rest) != 0 { + nfe.AddError(errors.New("trailing data after ASIdentifiers extension")) + return nil, nil + } + return parseASIDChoice(asIDs.ASNum, nfe), parseASIDChoice(asIDs.RDI, nfe) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/sec1.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/sec1.go new file mode 100644 index 00000000..7c51e15c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/sec1.go @@ -0,0 +1,113 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "errors" + "fmt" + "math/big" + + "github.com/google/certificate-transparency-go/asn1" +) + +const ecPrivKeyVersion = 1 + +// ecPrivateKey reflects an ASN.1 Elliptic Curve Private Key Structure. +// References: +// RFC 5915 +// SEC1 - http://www.secg.org/sec1-v2.pdf +// Per RFC 5915 the NamedCurveOID is marked as ASN.1 OPTIONAL, however in +// most cases it is not. +type ecPrivateKey struct { + Version int + PrivateKey []byte + NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"` + PublicKey asn1.BitString `asn1:"optional,explicit,tag:1"` +} + +// ParseECPrivateKey parses an ASN.1 Elliptic Curve Private Key Structure. +func ParseECPrivateKey(der []byte) (*ecdsa.PrivateKey, error) { + return parseECPrivateKey(nil, der) +} + +// MarshalECPrivateKey marshals an EC private key into ASN.1, DER format. +func MarshalECPrivateKey(key *ecdsa.PrivateKey) ([]byte, error) { + oid, ok := OIDFromNamedCurve(key.Curve) + if !ok { + return nil, errors.New("x509: unknown elliptic curve") + } + + return marshalECPrivateKeyWithOID(key, oid) +} + +// marshalECPrivateKey marshals an EC private key into ASN.1, DER format and +// sets the curve ID to the given OID, or omits it if OID is nil. +func marshalECPrivateKeyWithOID(key *ecdsa.PrivateKey, oid asn1.ObjectIdentifier) ([]byte, error) { + privateKeyBytes := key.D.Bytes() + paddedPrivateKey := make([]byte, (key.Curve.Params().N.BitLen()+7)/8) + copy(paddedPrivateKey[len(paddedPrivateKey)-len(privateKeyBytes):], privateKeyBytes) + + return asn1.Marshal(ecPrivateKey{ + Version: 1, + PrivateKey: paddedPrivateKey, + NamedCurveOID: oid, + PublicKey: asn1.BitString{Bytes: elliptic.Marshal(key.Curve, key.X, key.Y)}, + }) +} + +// parseECPrivateKey parses an ASN.1 Elliptic Curve Private Key Structure. +// The OID for the named curve may be provided from another source (such as +// the PKCS8 container) - if it is provided then use this instead of the OID +// that may exist in the EC private key structure. +func parseECPrivateKey(namedCurveOID *asn1.ObjectIdentifier, der []byte) (key *ecdsa.PrivateKey, err error) { + var privKey ecPrivateKey + if _, err := asn1.Unmarshal(der, &privKey); err != nil { + return nil, errors.New("x509: failed to parse EC private key: " + err.Error()) + } + if privKey.Version != ecPrivKeyVersion { + return nil, fmt.Errorf("x509: unknown EC private key version %d", privKey.Version) + } + + var nfe NonFatalErrors + var curve elliptic.Curve + if namedCurveOID != nil { + curve = namedCurveFromOID(*namedCurveOID, &nfe) + } else { + curve = namedCurveFromOID(privKey.NamedCurveOID, &nfe) + } + if curve == nil { + return nil, errors.New("x509: unknown elliptic curve") + } + + k := new(big.Int).SetBytes(privKey.PrivateKey) + curveOrder := curve.Params().N + if k.Cmp(curveOrder) >= 0 { + return nil, errors.New("x509: invalid elliptic curve private key value") + } + priv := new(ecdsa.PrivateKey) + priv.Curve = curve + priv.D = k + + privateKey := make([]byte, (curveOrder.BitLen()+7)/8) + + // Some private keys have leading zero padding. This is invalid + // according to [SEC1], but this code will ignore it. + for len(privKey.PrivateKey) > len(privateKey) { + if privKey.PrivateKey[0] != 0 { + return nil, errors.New("x509: invalid private key length") + } + privKey.PrivateKey = privKey.PrivateKey[1:] + } + + // Some private keys remove all leading zeros, this is also invalid + // according to [SEC1] but since OpenSSL used to do this, we ignore + // this too. + copy(privateKey[len(privateKey)-len(privKey.PrivateKey):], privKey.PrivateKey) + priv.X, priv.Y = curve.ScalarBaseMult(privateKey) + + return priv, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/test-dir.crt b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/test-dir.crt new file mode 100644 index 00000000..b7fc9c51 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/test-dir.crt @@ -0,0 +1,31 @@ +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIJAL8a/lsnspOqMA0GCSqGSIb3DQEBCwUAMEwxCzAJBgNV +BAYTAlVLMRMwEQYDVQQIDApUZXN0LVN0YXRlMRUwEwYDVQQKDAxHb2xhbmcgVGVz +dHMxETAPBgNVBAMMCHRlc3QtZGlyMB4XDTE3MDIwMTIzNTAyN1oXDTI3MDEzMDIz +NTAyN1owTDELMAkGA1UEBhMCVUsxEzARBgNVBAgMClRlc3QtU3RhdGUxFTATBgNV +BAoMDEdvbGFuZyBUZXN0czERMA8GA1UEAwwIdGVzdC1kaXIwggIiMA0GCSqGSIb3 +DQEBAQUAA4ICDwAwggIKAoICAQDzBoi43Yn30KN13PKFHu8LA4UmgCRToTukLItM +WK2Je45grs/axg9n3YJOXC6hmsyrkOnyBcx1xVNgSrOAll7fSjtChRIX72Xrloxu +XewtWVIrijqz6oylbvEmbRT3O8uynu5rF82Pmdiy8oiSfdywjKuPnE0hjV1ZSCql +MYcXqA+f0JFD8kMv4pbtxjGH8f2DkYQz+hHXLrJH4/MEYdVMQXoz/GDzLyOkrXBN +hpMaBBqg1p0P+tRdfLXuliNzA9vbZylzpF1YZ0gvsr0S5Y6LVtv7QIRygRuLY4kF +k+UYuFq8NrV8TykS7FVnO3tf4XcYZ7r2KV5FjYSrJtNNo85BV5c3xMD3fJ2XcOWk ++oD1ATdgAM3aKmSOxNtNItKKxBe1mkqDH41NbWx7xMad78gDznyeT0tjEOltN2bM +uXU1R/jgR/vq5Ec0AhXJyL/ziIcmuV2fSl/ZxT4ARD+16tgPiIx+welTf0v27/JY +adlfkkL5XsPRrbSguISrj7JeaO/gjG3KnDVHcZvYBpDfHqRhCgrosfe26TZcTXx2 +cRxOfvBjMz1zJAg+esuUzSkerreyRhzD7RpeZTwi6sxvx82MhYMbA3w1LtgdABio +9JRqZy3xqsIbNv7N46WO/qXL1UMRKb1UyHeW8g8btboz+B4zv1U0Nj+9qxPBbQui +dgL9LQIDAQABo1AwTjAdBgNVHQ4EFgQUy0/0W8nwQfz2tO6AZ2jPkEiTzvUwHwYD +VR0jBBgwFoAUy0/0W8nwQfz2tO6AZ2jPkEiTzvUwDAYDVR0TBAUwAwEB/zANBgkq +hkiG9w0BAQsFAAOCAgEAvEVnUYsIOt87rggmLPqEueynkuQ+562M8EDHSQl82zbe +xDCxeg3DvPgKb+RvaUdt1362z/szK10SoeMgx6+EQLoV9LiVqXwNqeYfixrhrdw3 +ppAhYYhymdkbUQCEMHypmXP1vPhAz4o8Bs+eES1M+zO6ErBiD7SqkmBElT+GixJC +6epC9ZQFs+dw3lPlbiZSsGE85sqc3VAs0/JgpL/pb1/Eg4s0FUhZD2C2uWdSyZGc +g0/v3aXJCp4j/9VoNhI1WXz3M45nysZIL5OQgXymLqJElQa1pZ3Wa4i/nidvT4AT +Xlxc/qijM8set/nOqp7hVd5J0uG6qdwLRILUddZ6OpXd7ZNi1EXg+Bpc7ehzGsDt +3UFGzYXDjxYnK2frQfjLS8stOQIqSrGthW6x0fdkVx0y8BByvd5J6+JmZl4UZfzA +m99VxXSt4B9x6BvnY7ktzcFDOjtuLc4B/7yg9fv1eQuStA4cHGGAttsCg1X/Kx8W +PvkkeH0UWDZ9vhH9K36703z89da6MWF+bz92B0+4HoOmlVaXRkvblsNaynJnL0LC +Ayry7QBxuh5cMnDdRwJB3AVJIiJ1GVpb7aGvBOnx+s2lwRv9HWtghb+cbwwktx1M +JHyBf3GZNSWTpKY7cD8V+NnBv3UuioOVVo+XAU4LF/bYUjdRpxWADJizNtZrtFo= +-----END CERTIFICATE----- diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/test-file.crt b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/test-file.crt new file mode 100644 index 00000000..caa83b9f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/test-file.crt @@ -0,0 +1,32 @@ +-----BEGIN CERTIFICATE----- +MIIFbTCCA1WgAwIBAgIJAN338vEmMtLsMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNV +BAYTAlVLMRMwEQYDVQQIDApUZXN0LVN0YXRlMRUwEwYDVQQKDAxHb2xhbmcgVGVz +dHMxEjAQBgNVBAMMCXRlc3QtZmlsZTAeFw0xNzAyMDEyMzUyMDhaFw0yNzAxMzAy +MzUyMDhaME0xCzAJBgNVBAYTAlVLMRMwEQYDVQQIDApUZXN0LVN0YXRlMRUwEwYD +VQQKDAxHb2xhbmcgVGVzdHMxEjAQBgNVBAMMCXRlc3QtZmlsZTCCAiIwDQYJKoZI +hvcNAQEBBQADggIPADCCAgoCggIBAPMGiLjdiffQo3Xc8oUe7wsDhSaAJFOhO6Qs +i0xYrYl7jmCuz9rGD2fdgk5cLqGazKuQ6fIFzHXFU2BKs4CWXt9KO0KFEhfvZeuW +jG5d7C1ZUiuKOrPqjKVu8SZtFPc7y7Ke7msXzY+Z2LLyiJJ93LCMq4+cTSGNXVlI +KqUxhxeoD5/QkUPyQy/ilu3GMYfx/YORhDP6Edcuskfj8wRh1UxBejP8YPMvI6St +cE2GkxoEGqDWnQ/61F18te6WI3MD29tnKXOkXVhnSC+yvRLljotW2/tAhHKBG4tj +iQWT5Ri4Wrw2tXxPKRLsVWc7e1/hdxhnuvYpXkWNhKsm002jzkFXlzfEwPd8nZdw +5aT6gPUBN2AAzdoqZI7E200i0orEF7WaSoMfjU1tbHvExp3vyAPOfJ5PS2MQ6W03 +Zsy5dTVH+OBH++rkRzQCFcnIv/OIhya5XZ9KX9nFPgBEP7Xq2A+IjH7B6VN/S/bv +8lhp2V+SQvlew9GttKC4hKuPsl5o7+CMbcqcNUdxm9gGkN8epGEKCuix97bpNlxN +fHZxHE5+8GMzPXMkCD56y5TNKR6ut7JGHMPtGl5lPCLqzG/HzYyFgxsDfDUu2B0A +GKj0lGpnLfGqwhs2/s3jpY7+pcvVQxEpvVTId5byDxu1ujP4HjO/VTQ2P72rE8Ft +C6J2Av0tAgMBAAGjUDBOMB0GA1UdDgQWBBTLT/RbyfBB/Pa07oBnaM+QSJPO9TAf +BgNVHSMEGDAWgBTLT/RbyfBB/Pa07oBnaM+QSJPO9TAMBgNVHRMEBTADAQH/MA0G +CSqGSIb3DQEBCwUAA4ICAQB3sCntCcQwhMgRPPyvOCMyTcQ/Iv+cpfxz2Ck14nlx +AkEAH2CH0ov5GWTt07/ur3aa5x+SAKi0J3wTD1cdiw4U/6Uin6jWGKKxvoo4IaeK +SbM8w/6eKx6UbmHx7PA/eRABY9tTlpdPCVgw7/o3WDr03QM+IAtatzvaCPPczake +pbdLwmBZB/v8V+6jUajy6jOgdSH0PyffGnt7MWgDETmNC6p/Xigp5eh+C8Fb4NGT +xgHES5PBC+sruWp4u22bJGDKTvYNdZHsnw/CaKQWNsQqwisxa3/8N5v+PCff/pxl +r05pE3PdHn9JrCl4iWdVlgtiI9BoPtQyDfa/OEFaScE8KYR8LxaAgdgp3zYncWls +BpwQ6Y/A2wIkhlD9eEp5Ib2hz7isXOs9UwjdriKqrBXqcIAE5M+YIk3+KAQKxAtd +4YsK3CSJ010uphr12YKqlScj4vuKFjuOtd5RyyMIxUG3lrrhAu2AzCeKCLdVgA8+ +75FrYMApUdvcjp4uzbBoED4XRQlx9kdFHVbYgmE/+yddBYJM8u4YlgAL0hW2/D8p +z9JWIfxVmjJnBnXaKGBuiUyZ864A3PJndP6EMMo7TzS2CDnfCYuJjvI0KvDjFNmc +rQA04+qfMSEz3nmKhbbZu4eYLzlADhfH8tT4GMtXf71WLA5AUHGf2Y4+HIHTsmHG +vQ== +-----END CERTIFICATE----- diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/verify.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/verify.go new file mode 100644 index 00000000..0cf3bf39 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/verify.go @@ -0,0 +1,1099 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/url" + "os" + "reflect" + "runtime" + "strings" + "time" + "unicode/utf8" +) + +// ignoreCN disables interpreting Common Name as a hostname. See issue 24151. +var ignoreCN = strings.Contains(os.Getenv("GODEBUG"), "x509ignoreCN=1") + +type InvalidReason int + +const ( + // NotAuthorizedToSign results when a certificate is signed by another + // which isn't marked as a CA certificate. + NotAuthorizedToSign InvalidReason = iota + // Expired results when a certificate has expired, based on the time + // given in the VerifyOptions. + Expired + // CANotAuthorizedForThisName results when an intermediate or root + // certificate has a name constraint which doesn't permit a DNS or + // other name (including IP address) in the leaf certificate. + CANotAuthorizedForThisName + // TooManyIntermediates results when a path length constraint is + // violated. + TooManyIntermediates + // IncompatibleUsage results when the certificate's key usage indicates + // that it may only be used for a different purpose. + IncompatibleUsage + // NameMismatch results when the subject name of a parent certificate + // does not match the issuer name in the child. + NameMismatch + // NameConstraintsWithoutSANs results when a leaf certificate doesn't + // contain a Subject Alternative Name extension, but a CA certificate + // contains name constraints, and the Common Name can be interpreted as + // a hostname. + // + // You can avoid this error by setting the experimental GODEBUG environment + // variable to "x509ignoreCN=1", disabling Common Name matching entirely. + // This behavior might become the default in the future. + NameConstraintsWithoutSANs + // UnconstrainedName results when a CA certificate contains permitted + // name constraints, but leaf certificate contains a name of an + // unsupported or unconstrained type. + UnconstrainedName + // TooManyConstraints results when the number of comparison operations + // needed to check a certificate exceeds the limit set by + // VerifyOptions.MaxConstraintComparisions. This limit exists to + // prevent pathological certificates can consuming excessive amounts of + // CPU time to verify. + TooManyConstraints + // CANotAuthorizedForExtKeyUsage results when an intermediate or root + // certificate does not permit a requested extended key usage. + CANotAuthorizedForExtKeyUsage +) + +// CertificateInvalidError results when an odd error occurs. Users of this +// library probably want to handle all these errors uniformly. +type CertificateInvalidError struct { + Cert *Certificate + Reason InvalidReason + Detail string +} + +func (e CertificateInvalidError) Error() string { + switch e.Reason { + case NotAuthorizedToSign: + return "x509: certificate is not authorized to sign other certificates" + case Expired: + return "x509: certificate has expired or is not yet valid" + case CANotAuthorizedForThisName: + return "x509: a root or intermediate certificate is not authorized to sign for this name: " + e.Detail + case CANotAuthorizedForExtKeyUsage: + return "x509: a root or intermediate certificate is not authorized for an extended key usage: " + e.Detail + case TooManyIntermediates: + return "x509: too many intermediates for path length constraint" + case IncompatibleUsage: + return "x509: certificate specifies an incompatible key usage" + case NameMismatch: + return "x509: issuer name does not match subject from issuing certificate" + case NameConstraintsWithoutSANs: + return "x509: issuer has name constraints but leaf doesn't have a SAN extension" + case UnconstrainedName: + return "x509: issuer has name constraints but leaf contains unknown or unconstrained name: " + e.Detail + } + return "x509: unknown error" +} + +// HostnameError results when the set of authorized names doesn't match the +// requested name. +type HostnameError struct { + Certificate *Certificate + Host string +} + +func (h HostnameError) Error() string { + c := h.Certificate + + if !c.hasSANExtension() && !validHostname(c.Subject.CommonName) && + matchHostnames(toLowerCaseASCII(c.Subject.CommonName), toLowerCaseASCII(h.Host)) { + // This would have validated, if it weren't for the validHostname check on Common Name. + return "x509: Common Name is not a valid hostname: " + c.Subject.CommonName + } + + var valid string + if ip := net.ParseIP(h.Host); ip != nil { + // Trying to validate an IP + if len(c.IPAddresses) == 0 { + return "x509: cannot validate certificate for " + h.Host + " because it doesn't contain any IP SANs" + } + for _, san := range c.IPAddresses { + if len(valid) > 0 { + valid += ", " + } + valid += san.String() + } + } else { + if c.commonNameAsHostname() { + valid = c.Subject.CommonName + } else { + valid = strings.Join(c.DNSNames, ", ") + } + } + + if len(valid) == 0 { + return "x509: certificate is not valid for any names, but wanted to match " + h.Host + } + return "x509: certificate is valid for " + valid + ", not " + h.Host +} + +// UnknownAuthorityError results when the certificate issuer is unknown +type UnknownAuthorityError struct { + Cert *Certificate + // hintErr contains an error that may be helpful in determining why an + // authority wasn't found. + hintErr error + // hintCert contains a possible authority certificate that was rejected + // because of the error in hintErr. + hintCert *Certificate +} + +func (e UnknownAuthorityError) Error() string { + s := "x509: certificate signed by unknown authority" + if e.hintErr != nil { + certName := e.hintCert.Subject.CommonName + if len(certName) == 0 { + if len(e.hintCert.Subject.Organization) > 0 { + certName = e.hintCert.Subject.Organization[0] + } else { + certName = "serial:" + e.hintCert.SerialNumber.String() + } + } + s += fmt.Sprintf(" (possibly because of %q while trying to verify candidate authority certificate %q)", e.hintErr, certName) + } + return s +} + +// SystemRootsError results when we fail to load the system root certificates. +type SystemRootsError struct { + Err error +} + +func (se SystemRootsError) Error() string { + msg := "x509: failed to load system roots and no roots provided" + if se.Err != nil { + return msg + "; " + se.Err.Error() + } + return msg +} + +// errNotParsed is returned when a certificate without ASN.1 contents is +// verified. Platform-specific verification needs the ASN.1 contents. +var errNotParsed = errors.New("x509: missing ASN.1 contents; use ParseCertificate") + +// VerifyOptions contains parameters for Certificate.Verify. It's a structure +// because other PKIX verification APIs have ended up needing many options. +type VerifyOptions struct { + DNSName string + Intermediates *CertPool + Roots *CertPool // if nil, the system roots are used + CurrentTime time.Time // if zero, the current time is used + // Options to disable various verification checks. + DisableTimeChecks bool + DisableCriticalExtensionChecks bool + DisableNameChecks bool + DisableEKUChecks bool + DisablePathLenChecks bool + DisableNameConstraintChecks bool + // KeyUsage specifies which Extended Key Usage values are acceptable. A leaf + // certificate is accepted if it contains any of the listed values. An empty + // list means ExtKeyUsageServerAuth. To accept any key usage, include + // ExtKeyUsageAny. + // + // Certificate chains are required to nest these extended key usage values. + // (This matches the Windows CryptoAPI behavior, but not the spec.) + KeyUsages []ExtKeyUsage + // MaxConstraintComparisions is the maximum number of comparisons to + // perform when checking a given certificate's name constraints. If + // zero, a sensible default is used. This limit prevents pathological + // certificates from consuming excessive amounts of CPU time when + // validating. + MaxConstraintComparisions int +} + +const ( + leafCertificate = iota + intermediateCertificate + rootCertificate +) + +// rfc2821Mailbox represents a “mailbox” (which is an email address to most +// people) by breaking it into the “local” (i.e. before the '@') and “domain” +// parts. +type rfc2821Mailbox struct { + local, domain string +} + +// parseRFC2821Mailbox parses an email address into local and domain parts, +// based on the ABNF for a “Mailbox” from RFC 2821. According to RFC 5280, +// Section 4.2.1.6 that's correct for an rfc822Name from a certificate: “The +// format of an rfc822Name is a "Mailbox" as defined in RFC 2821, Section 4.1.2”. +func parseRFC2821Mailbox(in string) (mailbox rfc2821Mailbox, ok bool) { + if len(in) == 0 { + return mailbox, false + } + + localPartBytes := make([]byte, 0, len(in)/2) + + if in[0] == '"' { + // Quoted-string = DQUOTE *qcontent DQUOTE + // non-whitespace-control = %d1-8 / %d11 / %d12 / %d14-31 / %d127 + // qcontent = qtext / quoted-pair + // qtext = non-whitespace-control / + // %d33 / %d35-91 / %d93-126 + // quoted-pair = ("\" text) / obs-qp + // text = %d1-9 / %d11 / %d12 / %d14-127 / obs-text + // + // (Names beginning with “obs-” are the obsolete syntax from RFC 2822, + // Section 4. Since it has been 16 years, we no longer accept that.) + in = in[1:] + QuotedString: + for { + if len(in) == 0 { + return mailbox, false + } + c := in[0] + in = in[1:] + + switch { + case c == '"': + break QuotedString + + case c == '\\': + // quoted-pair + if len(in) == 0 { + return mailbox, false + } + if in[0] == 11 || + in[0] == 12 || + (1 <= in[0] && in[0] <= 9) || + (14 <= in[0] && in[0] <= 127) { + localPartBytes = append(localPartBytes, in[0]) + in = in[1:] + } else { + return mailbox, false + } + + case c == 11 || + c == 12 || + // Space (char 32) is not allowed based on the + // BNF, but RFC 3696 gives an example that + // assumes that it is. Several “verified” + // errata continue to argue about this point. + // We choose to accept it. + c == 32 || + c == 33 || + c == 127 || + (1 <= c && c <= 8) || + (14 <= c && c <= 31) || + (35 <= c && c <= 91) || + (93 <= c && c <= 126): + // qtext + localPartBytes = append(localPartBytes, c) + + default: + return mailbox, false + } + } + } else { + // Atom ("." Atom)* + NextChar: + for len(in) > 0 { + // atext from RFC 2822, Section 3.2.4 + c := in[0] + + switch { + case c == '\\': + // Examples given in RFC 3696 suggest that + // escaped characters can appear outside of a + // quoted string. Several “verified” errata + // continue to argue the point. We choose to + // accept it. + in = in[1:] + if len(in) == 0 { + return mailbox, false + } + fallthrough + + case ('0' <= c && c <= '9') || + ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + c == '!' || c == '#' || c == '$' || c == '%' || + c == '&' || c == '\'' || c == '*' || c == '+' || + c == '-' || c == '/' || c == '=' || c == '?' || + c == '^' || c == '_' || c == '`' || c == '{' || + c == '|' || c == '}' || c == '~' || c == '.': + localPartBytes = append(localPartBytes, in[0]) + in = in[1:] + + default: + break NextChar + } + } + + if len(localPartBytes) == 0 { + return mailbox, false + } + + // From RFC 3696, Section 3: + // “period (".") may also appear, but may not be used to start + // or end the local part, nor may two or more consecutive + // periods appear.” + twoDots := []byte{'.', '.'} + if localPartBytes[0] == '.' || + localPartBytes[len(localPartBytes)-1] == '.' || + bytes.Contains(localPartBytes, twoDots) { + return mailbox, false + } + } + + if len(in) == 0 || in[0] != '@' { + return mailbox, false + } + in = in[1:] + + // The RFC species a format for domains, but that's known to be + // violated in practice so we accept that anything after an '@' is the + // domain part. + if _, ok := domainToReverseLabels(in); !ok { + return mailbox, false + } + + mailbox.local = string(localPartBytes) + mailbox.domain = in + return mailbox, true +} + +// domainToReverseLabels converts a textual domain name like foo.example.com to +// the list of labels in reverse order, e.g. ["com", "example", "foo"]. +func domainToReverseLabels(domain string) (reverseLabels []string, ok bool) { + for len(domain) > 0 { + if i := strings.LastIndexByte(domain, '.'); i == -1 { + reverseLabels = append(reverseLabels, domain) + domain = "" + } else { + reverseLabels = append(reverseLabels, domain[i+1:len(domain)]) + domain = domain[:i] + } + } + + if len(reverseLabels) > 0 && len(reverseLabels[0]) == 0 { + // An empty label at the end indicates an absolute value. + return nil, false + } + + for _, label := range reverseLabels { + if len(label) == 0 { + // Empty labels are otherwise invalid. + return nil, false + } + + for _, c := range label { + if c < 33 || c > 126 { + // Invalid character. + return nil, false + } + } + } + + return reverseLabels, true +} + +func matchEmailConstraint(mailbox rfc2821Mailbox, constraint string) (bool, error) { + // If the constraint contains an @, then it specifies an exact mailbox + // name. + if strings.Contains(constraint, "@") { + constraintMailbox, ok := parseRFC2821Mailbox(constraint) + if !ok { + return false, fmt.Errorf("x509: internal error: cannot parse constraint %q", constraint) + } + return mailbox.local == constraintMailbox.local && strings.EqualFold(mailbox.domain, constraintMailbox.domain), nil + } + + // Otherwise the constraint is like a DNS constraint of the domain part + // of the mailbox. + return matchDomainConstraint(mailbox.domain, constraint) +} + +func matchURIConstraint(uri *url.URL, constraint string) (bool, error) { + // From RFC 5280, Section 4.2.1.10: + // “a uniformResourceIdentifier that does not include an authority + // component with a host name specified as a fully qualified domain + // name (e.g., if the URI either does not include an authority + // component or includes an authority component in which the host name + // is specified as an IP address), then the application MUST reject the + // certificate.” + + host := uri.Host + if len(host) == 0 { + return false, fmt.Errorf("URI with empty host (%q) cannot be matched against constraints", uri.String()) + } + + if strings.Contains(host, ":") && !strings.HasSuffix(host, "]") { + var err error + host, _, err = net.SplitHostPort(uri.Host) + if err != nil { + return false, err + } + } + + if strings.HasPrefix(host, "[") && strings.HasSuffix(host, "]") || + net.ParseIP(host) != nil { + return false, fmt.Errorf("URI with IP (%q) cannot be matched against constraints", uri.String()) + } + + return matchDomainConstraint(host, constraint) +} + +func matchIPConstraint(ip net.IP, constraint *net.IPNet) (bool, error) { + if len(ip) != len(constraint.IP) { + return false, nil + } + + for i := range ip { + if mask := constraint.Mask[i]; ip[i]&mask != constraint.IP[i]&mask { + return false, nil + } + } + + return true, nil +} + +func matchDomainConstraint(domain, constraint string) (bool, error) { + // The meaning of zero length constraints is not specified, but this + // code follows NSS and accepts them as matching everything. + if len(constraint) == 0 { + return true, nil + } + + domainLabels, ok := domainToReverseLabels(domain) + if !ok { + return false, fmt.Errorf("x509: internal error: cannot parse domain %q", domain) + } + + // RFC 5280 says that a leading period in a domain name means that at + // least one label must be prepended, but only for URI and email + // constraints, not DNS constraints. The code also supports that + // behaviour for DNS constraints. + + mustHaveSubdomains := false + if constraint[0] == '.' { + mustHaveSubdomains = true + constraint = constraint[1:] + } + + constraintLabels, ok := domainToReverseLabels(constraint) + if !ok { + return false, fmt.Errorf("x509: internal error: cannot parse domain %q", constraint) + } + + if len(domainLabels) < len(constraintLabels) || + (mustHaveSubdomains && len(domainLabels) == len(constraintLabels)) { + return false, nil + } + + for i, constraintLabel := range constraintLabels { + if !strings.EqualFold(constraintLabel, domainLabels[i]) { + return false, nil + } + } + + return true, nil +} + +// checkNameConstraints checks that c permits a child certificate to claim the +// given name, of type nameType. The argument parsedName contains the parsed +// form of name, suitable for passing to the match function. The total number +// of comparisons is tracked in the given count and should not exceed the given +// limit. +func (c *Certificate) checkNameConstraints(count *int, + maxConstraintComparisons int, + nameType string, + name string, + parsedName interface{}, + match func(parsedName, constraint interface{}) (match bool, err error), + permitted, excluded interface{}) error { + + excludedValue := reflect.ValueOf(excluded) + + *count += excludedValue.Len() + if *count > maxConstraintComparisons { + return CertificateInvalidError{c, TooManyConstraints, ""} + } + + for i := 0; i < excludedValue.Len(); i++ { + constraint := excludedValue.Index(i).Interface() + match, err := match(parsedName, constraint) + if err != nil { + return CertificateInvalidError{c, CANotAuthorizedForThisName, err.Error()} + } + + if match { + return CertificateInvalidError{c, CANotAuthorizedForThisName, fmt.Sprintf("%s %q is excluded by constraint %q", nameType, name, constraint)} + } + } + + permittedValue := reflect.ValueOf(permitted) + + *count += permittedValue.Len() + if *count > maxConstraintComparisons { + return CertificateInvalidError{c, TooManyConstraints, ""} + } + + ok := true + for i := 0; i < permittedValue.Len(); i++ { + constraint := permittedValue.Index(i).Interface() + + var err error + if ok, err = match(parsedName, constraint); err != nil { + return CertificateInvalidError{c, CANotAuthorizedForThisName, err.Error()} + } + + if ok { + break + } + } + + if !ok { + return CertificateInvalidError{c, CANotAuthorizedForThisName, fmt.Sprintf("%s %q is not permitted by any constraint", nameType, name)} + } + + return nil +} + +// isValid performs validity checks on c given that it is a candidate to append +// to the chain in currentChain. +func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *VerifyOptions) error { + if !opts.DisableCriticalExtensionChecks && len(c.UnhandledCriticalExtensions) > 0 { + return UnhandledCriticalExtension{ID: c.UnhandledCriticalExtensions[0]} + } + + if !opts.DisableNameChecks && len(currentChain) > 0 { + child := currentChain[len(currentChain)-1] + if !bytes.Equal(child.RawIssuer, c.RawSubject) { + return CertificateInvalidError{c, NameMismatch, ""} + } + } + + if !opts.DisableTimeChecks { + now := opts.CurrentTime + if now.IsZero() { + now = time.Now() + } + if now.Before(c.NotBefore) || now.After(c.NotAfter) { + return CertificateInvalidError{c, Expired, ""} + } + } + + maxConstraintComparisons := opts.MaxConstraintComparisions + if maxConstraintComparisons == 0 { + maxConstraintComparisons = 250000 + } + comparisonCount := 0 + + var leaf *Certificate + if certType == intermediateCertificate || certType == rootCertificate { + if len(currentChain) == 0 { + return errors.New("x509: internal error: empty chain when appending CA cert") + } + leaf = currentChain[0] + } + + checkNameConstraints := !opts.DisableNameConstraintChecks && (certType == intermediateCertificate || certType == rootCertificate) && c.hasNameConstraints() + if checkNameConstraints && leaf.commonNameAsHostname() { + // This is the deprecated, legacy case of depending on the commonName as + // a hostname. We don't enforce name constraints against the CN, but + // VerifyHostname will look for hostnames in there if there are no SANs. + // In order to ensure VerifyHostname will not accept an unchecked name, + // return an error here. + return CertificateInvalidError{c, NameConstraintsWithoutSANs, ""} + } else if checkNameConstraints && leaf.hasSANExtension() { + err := forEachSAN(leaf.getSANExtension(), func(tag int, data []byte) error { + switch tag { + case nameTypeEmail: + name := string(data) + mailbox, ok := parseRFC2821Mailbox(name) + if !ok { + return fmt.Errorf("x509: cannot parse rfc822Name %q", mailbox) + } + + if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "email address", name, mailbox, + func(parsedName, constraint interface{}) (bool, error) { + return matchEmailConstraint(parsedName.(rfc2821Mailbox), constraint.(string)) + }, c.PermittedEmailAddresses, c.ExcludedEmailAddresses); err != nil { + return err + } + + case nameTypeDNS: + name := string(data) + if _, ok := domainToReverseLabels(name); !ok { + return fmt.Errorf("x509: cannot parse dnsName %q", name) + } + + if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "DNS name", name, name, + func(parsedName, constraint interface{}) (bool, error) { + return matchDomainConstraint(parsedName.(string), constraint.(string)) + }, c.PermittedDNSDomains, c.ExcludedDNSDomains); err != nil { + return err + } + + case nameTypeURI: + name := string(data) + uri, err := url.Parse(name) + if err != nil { + return fmt.Errorf("x509: internal error: URI SAN %q failed to parse", name) + } + + if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "URI", name, uri, + func(parsedName, constraint interface{}) (bool, error) { + return matchURIConstraint(parsedName.(*url.URL), constraint.(string)) + }, c.PermittedURIDomains, c.ExcludedURIDomains); err != nil { + return err + } + + case nameTypeIP: + ip := net.IP(data) + if l := len(ip); l != net.IPv4len && l != net.IPv6len { + return fmt.Errorf("x509: internal error: IP SAN %x failed to parse", data) + } + + if err := c.checkNameConstraints(&comparisonCount, maxConstraintComparisons, "IP address", ip.String(), ip, + func(parsedName, constraint interface{}) (bool, error) { + return matchIPConstraint(parsedName.(net.IP), constraint.(*net.IPNet)) + }, c.PermittedIPRanges, c.ExcludedIPRanges); err != nil { + return err + } + + default: + // Unknown SAN types are ignored. + } + + return nil + }) + + if err != nil { + return err + } + } + + // KeyUsage status flags are ignored. From Engineering Security, Peter + // Gutmann: A European government CA marked its signing certificates as + // being valid for encryption only, but no-one noticed. Another + // European CA marked its signature keys as not being valid for + // signatures. A different CA marked its own trusted root certificate + // as being invalid for certificate signing. Another national CA + // distributed a certificate to be used to encrypt data for the + // country’s tax authority that was marked as only being usable for + // digital signatures but not for encryption. Yet another CA reversed + // the order of the bit flags in the keyUsage due to confusion over + // encoding endianness, essentially setting a random keyUsage in + // certificates that it issued. Another CA created a self-invalidating + // certificate by adding a certificate policy statement stipulating + // that the certificate had to be used strictly as specified in the + // keyUsage, and a keyUsage containing a flag indicating that the RSA + // encryption key could only be used for Diffie-Hellman key agreement. + + if certType == intermediateCertificate && (!c.BasicConstraintsValid || !c.IsCA) { + return CertificateInvalidError{c, NotAuthorizedToSign, ""} + } + + if !opts.DisablePathLenChecks && c.BasicConstraintsValid && c.MaxPathLen >= 0 { + numIntermediates := len(currentChain) - 1 + if numIntermediates > c.MaxPathLen { + return CertificateInvalidError{c, TooManyIntermediates, ""} + } + } + + return nil +} + +// Verify attempts to verify c by building one or more chains from c to a +// certificate in opts.Roots, using certificates in opts.Intermediates if +// needed. If successful, it returns one or more chains where the first +// element of the chain is c and the last element is from opts.Roots. +// +// If opts.Roots is nil and system roots are unavailable the returned error +// will be of type SystemRootsError. +// +// Name constraints in the intermediates will be applied to all names claimed +// in the chain, not just opts.DNSName. Thus it is invalid for a leaf to claim +// example.com if an intermediate doesn't permit it, even if example.com is not +// the name being validated. Note that DirectoryName constraints are not +// supported. +// +// Extended Key Usage values are enforced down a chain, so an intermediate or +// root that enumerates EKUs prevents a leaf from asserting an EKU not in that +// list. +// +// WARNING: this function doesn't do any revocation checking. +func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err error) { + // Platform-specific verification needs the ASN.1 contents so + // this makes the behavior consistent across platforms. + if len(c.Raw) == 0 { + return nil, errNotParsed + } + if opts.Intermediates != nil { + for _, intermediate := range opts.Intermediates.certs { + if len(intermediate.Raw) == 0 { + return nil, errNotParsed + } + } + } + + // Use Windows's own verification and chain building. + if opts.Roots == nil && runtime.GOOS == "windows" { + return c.systemVerify(&opts) + } + + if opts.Roots == nil { + opts.Roots = systemRootsPool() + if opts.Roots == nil { + return nil, SystemRootsError{systemRootsErr} + } + } + + err = c.isValid(leafCertificate, nil, &opts) + if err != nil { + return + } + + if len(opts.DNSName) > 0 { + err = c.VerifyHostname(opts.DNSName) + if err != nil { + return + } + } + + var candidateChains [][]*Certificate + if opts.Roots.contains(c) { + candidateChains = append(candidateChains, []*Certificate{c}) + } else { + if candidateChains, err = c.buildChains(nil, []*Certificate{c}, nil, &opts); err != nil { + return nil, err + } + } + + keyUsages := opts.KeyUsages + if len(keyUsages) == 0 { + keyUsages = []ExtKeyUsage{ExtKeyUsageServerAuth} + } + + // If any key usage is acceptable then we're done. + for _, usage := range keyUsages { + if usage == ExtKeyUsageAny { + return candidateChains, nil + } + } + + for _, candidate := range candidateChains { + if opts.DisableEKUChecks || checkChainForKeyUsage(candidate, keyUsages) { + chains = append(chains, candidate) + } + } + + if len(chains) == 0 { + return nil, CertificateInvalidError{c, IncompatibleUsage, ""} + } + + return chains, nil +} + +func appendToFreshChain(chain []*Certificate, cert *Certificate) []*Certificate { + n := make([]*Certificate, len(chain)+1) + copy(n, chain) + n[len(chain)] = cert + return n +} + +// maxChainSignatureChecks is the maximum number of CheckSignatureFrom calls +// that an invocation of buildChains will (tranistively) make. Most chains are +// less than 15 certificates long, so this leaves space for multiple chains and +// for failed checks due to different intermediates having the same Subject. +const maxChainSignatureChecks = 100 + +func (c *Certificate) buildChains(cache map[*Certificate][][]*Certificate, currentChain []*Certificate, sigChecks *int, opts *VerifyOptions) (chains [][]*Certificate, err error) { + var ( + hintErr error + hintCert *Certificate + ) + + considerCandidate := func(certType int, candidate *Certificate) { + for _, cert := range currentChain { + if cert.Equal(candidate) { + return + } + } + + if sigChecks == nil { + sigChecks = new(int) + } + *sigChecks++ + if *sigChecks > maxChainSignatureChecks { + err = errors.New("x509: signature check attempts limit reached while verifying certificate chain") + return + } + + if err := c.CheckSignatureFrom(candidate); err != nil { + if hintErr == nil { + hintErr = err + hintCert = candidate + } + return + } + + err = candidate.isValid(certType, currentChain, opts) + if err != nil { + return + } + + switch certType { + case rootCertificate: + chains = append(chains, appendToFreshChain(currentChain, candidate)) + case intermediateCertificate: + if cache == nil { + cache = make(map[*Certificate][][]*Certificate) + } + childChains, ok := cache[candidate] + if !ok { + childChains, err = candidate.buildChains(cache, appendToFreshChain(currentChain, candidate), sigChecks, opts) + cache[candidate] = childChains + } + chains = append(chains, childChains...) + } + } + + for _, rootNum := range opts.Roots.findPotentialParents(c) { + considerCandidate(rootCertificate, opts.Roots.certs[rootNum]) + } + for _, intermediateNum := range opts.Intermediates.findPotentialParents(c) { + considerCandidate(intermediateCertificate, opts.Intermediates.certs[intermediateNum]) + } + + if len(chains) > 0 { + err = nil + } + if len(chains) == 0 && err == nil { + err = UnknownAuthorityError{c, hintErr, hintCert} + } + + return +} + +// validHostname reports whether host is a valid hostname that can be matched or +// matched against according to RFC 6125 2.2, with some leniency to accommodate +// legacy values. +func validHostname(host string) bool { + host = strings.TrimSuffix(host, ".") + + if len(host) == 0 { + return false + } + + for i, part := range strings.Split(host, ".") { + if part == "" { + // Empty label. + return false + } + if i == 0 && part == "*" { + // Only allow full left-most wildcards, as those are the only ones + // we match, and matching literal '*' characters is probably never + // the expected behavior. + continue + } + for j, c := range part { + if 'a' <= c && c <= 'z' { + continue + } + if '0' <= c && c <= '9' { + continue + } + if 'A' <= c && c <= 'Z' { + continue + } + if c == '-' && j != 0 { + continue + } + if c == '_' || c == ':' { + // Not valid characters in hostnames, but commonly + // found in deployments outside the WebPKI. + continue + } + return false + } + } + + return true +} + +// commonNameAsHostname reports whether the Common Name field should be +// considered the hostname that the certificate is valid for. This is a legacy +// behavior, disabled if the Subject Alt Name extension is present. +// +// It applies the strict validHostname check to the Common Name field, so that +// certificates without SANs can still be validated against CAs with name +// constraints if there is no risk the CN would be matched as a hostname. +// See NameConstraintsWithoutSANs and issue 24151. +func (c *Certificate) commonNameAsHostname() bool { + return !ignoreCN && !c.hasSANExtension() && validHostname(c.Subject.CommonName) +} + +func matchHostnames(pattern, host string) bool { + host = strings.TrimSuffix(host, ".") + pattern = strings.TrimSuffix(pattern, ".") + + if len(pattern) == 0 || len(host) == 0 { + return false + } + + patternParts := strings.Split(pattern, ".") + hostParts := strings.Split(host, ".") + + if len(patternParts) != len(hostParts) { + return false + } + + for i, patternPart := range patternParts { + if i == 0 && patternPart == "*" { + continue + } + if patternPart != hostParts[i] { + return false + } + } + + return true +} + +// toLowerCaseASCII returns a lower-case version of in. See RFC 6125 6.4.1. We use +// an explicitly ASCII function to avoid any sharp corners resulting from +// performing Unicode operations on DNS labels. +func toLowerCaseASCII(in string) string { + // If the string is already lower-case then there's nothing to do. + isAlreadyLowerCase := true + for _, c := range in { + if c == utf8.RuneError { + // If we get a UTF-8 error then there might be + // upper-case ASCII bytes in the invalid sequence. + isAlreadyLowerCase = false + break + } + if 'A' <= c && c <= 'Z' { + isAlreadyLowerCase = false + break + } + } + + if isAlreadyLowerCase { + return in + } + + out := []byte(in) + for i, c := range out { + if 'A' <= c && c <= 'Z' { + out[i] += 'a' - 'A' + } + } + return string(out) +} + +// VerifyHostname returns nil if c is a valid certificate for the named host. +// Otherwise it returns an error describing the mismatch. +func (c *Certificate) VerifyHostname(h string) error { + // IP addresses may be written in [ ]. + candidateIP := h + if len(h) >= 3 && h[0] == '[' && h[len(h)-1] == ']' { + candidateIP = h[1 : len(h)-1] + } + if ip := net.ParseIP(candidateIP); ip != nil { + // We only match IP addresses against IP SANs. + // See RFC 6125, Appendix B.2. + for _, candidate := range c.IPAddresses { + if ip.Equal(candidate) { + return nil + } + } + return HostnameError{c, candidateIP} + } + + lowered := toLowerCaseASCII(h) + + if c.commonNameAsHostname() { + if matchHostnames(toLowerCaseASCII(c.Subject.CommonName), lowered) { + return nil + } + } else { + for _, match := range c.DNSNames { + if matchHostnames(toLowerCaseASCII(match), lowered) { + return nil + } + } + } + + return HostnameError{c, h} +} + +func checkChainForKeyUsage(chain []*Certificate, keyUsages []ExtKeyUsage) bool { + usages := make([]ExtKeyUsage, len(keyUsages)) + copy(usages, keyUsages) + + if len(chain) == 0 { + return false + } + + usagesRemaining := len(usages) + + // We walk down the list and cross out any usages that aren't supported + // by each certificate. If we cross out all the usages, then the chain + // is unacceptable. + +NextCert: + for i := len(chain) - 1; i >= 0; i-- { + cert := chain[i] + if len(cert.ExtKeyUsage) == 0 && len(cert.UnknownExtKeyUsage) == 0 { + // The certificate doesn't have any extended key usage specified. + continue + } + + for _, usage := range cert.ExtKeyUsage { + if usage == ExtKeyUsageAny { + // The certificate is explicitly good for any usage. + continue NextCert + } + } + + const invalidUsage ExtKeyUsage = -1 + + NextRequestedUsage: + for i, requestedUsage := range usages { + if requestedUsage == invalidUsage { + continue + } + + for _, usage := range cert.ExtKeyUsage { + if requestedUsage == usage { + continue NextRequestedUsage + } else if requestedUsage == ExtKeyUsageServerAuth && + (usage == ExtKeyUsageNetscapeServerGatedCrypto || + usage == ExtKeyUsageMicrosoftServerGatedCrypto) { + // In order to support COMODO + // certificate chains, we have to + // accept Netscape or Microsoft SGC + // usages as equal to ServerAuth. + continue NextRequestedUsage + } + } + + usages[i] = invalidUsage + usagesRemaining-- + if usagesRemaining == 0 { + return false + } + } + } + + return true +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/x509.go b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/x509.go new file mode 100644 index 00000000..f70a5466 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/certificate-transparency-go/x509/x509.go @@ -0,0 +1,3242 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package x509 parses X.509-encoded keys and certificates. +// +// On UNIX systems the environment variables SSL_CERT_FILE and SSL_CERT_DIR +// can be used to override the system default locations for the SSL certificate +// file and SSL certificate files directory, respectively. +// +// This is a fork of the Go library crypto/x509 package, primarily adapted for +// use with Certificate Transparency. Main areas of difference are: +// +// - Life as a fork: +// - Rename OS-specific cgo code so it doesn't clash with main Go library. +// - Use local library imports (asn1, pkix) throughout. +// - Add version-specific wrappers for Go version-incompatible code (in +// nilref_*_darwin.go, ptr_*_windows.go). +// - Laxer certificate parsing: +// - Add options to disable various validation checks (times, EKUs etc). +// - Use NonFatalErrors type for some errors and continue parsing; this +// can be checked with IsFatal(err). +// - Support for short bitlength ECDSA curves (in curves.go). +// - Certificate Transparency specific function: +// - Parsing and marshaling of SCTList extension. +// - RemoveSCTList() function for rebuilding CT leaf entry. +// - Pre-certificate processing (RemoveCTPoison(), BuildPrecertTBS(), +// ParseTBSCertificate(), IsPrecertificate()). +// - Revocation list processing: +// - Detailed CRL parsing (in revoked.go) +// - Detailed error recording mechanism (in error.go, errors.go) +// - Factor out parseDistributionPoints() for reuse. +// - Factor out and generalize GeneralNames parsing (in names.go) +// - Fix CRL commenting. +// - RPKI support: +// - Support for SubjectInfoAccess extension +// - Support for RFC3779 extensions (in rpki.go) +// - RSAES-OAEP support: +// - Support for parsing RSASES-OAEP public keys from certificates +// - Ed25519 support: +// - Support for parsing and marshaling Ed25519 keys +// - General improvements: +// - Export and use OID values throughout. +// - Export OIDFromNamedCurve(). +// - Export SignatureAlgorithmFromAI(). +// - Add OID value to UnhandledCriticalExtension error. +// - Minor typo/lint fixes. +package x509 + +import ( + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + "net" + "net/url" + "strconv" + "strings" + "time" + "unicode/utf8" + + cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" + "golang.org/x/crypto/ed25519" + + "github.com/google/certificate-transparency-go/asn1" + "github.com/google/certificate-transparency-go/tls" + "github.com/google/certificate-transparency-go/x509/pkix" + "golang.org/x/crypto/cryptobyte" +) + +// pkixPublicKey reflects a PKIX public key structure. See SubjectPublicKeyInfo +// in RFC 3280. +type pkixPublicKey struct { + Algo pkix.AlgorithmIdentifier + BitString asn1.BitString +} + +// ParsePKIXPublicKey parses a DER encoded public key. These values are +// typically found in PEM blocks with "BEGIN PUBLIC KEY". +// +// Supported key types include RSA, DSA, and ECDSA. Unknown key +// types result in an error. +// +// On success, pub will be of type *rsa.PublicKey, *dsa.PublicKey, +// or *ecdsa.PublicKey. +func ParsePKIXPublicKey(derBytes []byte) (pub interface{}, err error) { + var pki publicKeyInfo + if rest, err := asn1.Unmarshal(derBytes, &pki); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after ASN.1 of public-key") + } + algo := getPublicKeyAlgorithmFromOID(pki.Algorithm.Algorithm) + if algo == UnknownPublicKeyAlgorithm { + return nil, errors.New("x509: unknown public key algorithm") + } + var nfe NonFatalErrors + pub, err = parsePublicKey(algo, &pki, &nfe) + if err != nil { + return pub, err + } + // Treat non-fatal errors as fatal for this entrypoint. + if len(nfe.Errors) > 0 { + return nil, nfe.Errors[0] + } + return pub, nil +} + +func marshalPublicKey(pub interface{}) (publicKeyBytes []byte, publicKeyAlgorithm pkix.AlgorithmIdentifier, err error) { + switch pub := pub.(type) { + case *rsa.PublicKey: + publicKeyBytes, err = asn1.Marshal(pkcs1PublicKey{ + N: pub.N, + E: pub.E, + }) + if err != nil { + return nil, pkix.AlgorithmIdentifier{}, err + } + publicKeyAlgorithm.Algorithm = OIDPublicKeyRSA + // This is a NULL parameters value which is required by + // RFC 3279, Section 2.3.1. + publicKeyAlgorithm.Parameters = asn1.NullRawValue + case *ecdsa.PublicKey: + publicKeyBytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) + oid, ok := OIDFromNamedCurve(pub.Curve) + if !ok { + return nil, pkix.AlgorithmIdentifier{}, errors.New("x509: unsupported elliptic curve") + } + publicKeyAlgorithm.Algorithm = OIDPublicKeyECDSA + var paramBytes []byte + paramBytes, err = asn1.Marshal(oid) + if err != nil { + return + } + publicKeyAlgorithm.Parameters.FullBytes = paramBytes + case ed25519.PublicKey: + publicKeyBytes = pub + publicKeyAlgorithm.Algorithm = OIDPublicKeyEd25519 + // RFC 8410 section 3: "...the parameters MUST be absent" + default: + return nil, pkix.AlgorithmIdentifier{}, errors.New("x509: only RSA and ECDSA public keys supported") + } + + return publicKeyBytes, publicKeyAlgorithm, nil +} + +// MarshalPKIXPublicKey serialises a public key to DER-encoded PKIX format. +func MarshalPKIXPublicKey(pub interface{}) ([]byte, error) { + var publicKeyBytes []byte + var publicKeyAlgorithm pkix.AlgorithmIdentifier + var err error + + if publicKeyBytes, publicKeyAlgorithm, err = marshalPublicKey(pub); err != nil { + return nil, err + } + + pkix := pkixPublicKey{ + Algo: publicKeyAlgorithm, + BitString: asn1.BitString{ + Bytes: publicKeyBytes, + BitLength: 8 * len(publicKeyBytes), + }, + } + + ret, _ := asn1.Marshal(pkix) + return ret, nil +} + +// These structures reflect the ASN.1 structure of X.509 certificates.: + +type certificate struct { + Raw asn1.RawContent + TBSCertificate tbsCertificate + SignatureAlgorithm pkix.AlgorithmIdentifier + SignatureValue asn1.BitString +} + +type tbsCertificate struct { + Raw asn1.RawContent + Version int `asn1:"optional,explicit,default:0,tag:0"` + SerialNumber *big.Int + SignatureAlgorithm pkix.AlgorithmIdentifier + Issuer asn1.RawValue + Validity validity + Subject asn1.RawValue + PublicKey publicKeyInfo + UniqueId asn1.BitString `asn1:"optional,tag:1"` + SubjectUniqueId asn1.BitString `asn1:"optional,tag:2"` + Extensions []pkix.Extension `asn1:"optional,explicit,tag:3"` +} + +// RFC 4055, 4.1 +// The current ASN.1 parser does not support non-integer defaults so +// the 'default:' tags here do nothing. +type rsaesoaepAlgorithmParameters struct { + HashFunc pkix.AlgorithmIdentifier `asn1:"optional,explicit,tag:0,default:sha1Identifier"` + MaskgenFunc pkix.AlgorithmIdentifier `asn1:"optional,explicit,tag:1,default:mgf1SHA1Identifier"` + PSourceFunc pkix.AlgorithmIdentifier `asn1:"optional,explicit,tag:2,default:pSpecifiedEmptyIdentifier"` +} + +type dsaAlgorithmParameters struct { + P, Q, G *big.Int +} + +type dsaSignature struct { + R, S *big.Int +} + +type ecdsaSignature dsaSignature + +type validity struct { + NotBefore, NotAfter time.Time +} + +type publicKeyInfo struct { + Raw asn1.RawContent + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString +} + +// RFC 5280, 4.2.1.1 +type authKeyId struct { + Id []byte `asn1:"optional,tag:0"` +} + +// SignatureAlgorithm indicates the algorithm used to sign a certificate. +type SignatureAlgorithm int + +// SignatureAlgorithm values: +const ( + UnknownSignatureAlgorithm SignatureAlgorithm = iota + MD2WithRSA + MD5WithRSA + SHA1WithRSA + SHA256WithRSA + SHA384WithRSA + SHA512WithRSA + DSAWithSHA1 + DSAWithSHA256 + ECDSAWithSHA1 + ECDSAWithSHA256 + ECDSAWithSHA384 + ECDSAWithSHA512 + SHA256WithRSAPSS + SHA384WithRSAPSS + SHA512WithRSAPSS +) + +// RFC 4055, 6. Basic object identifiers +var oidpSpecified = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 9} + +// These are the default parameters for an RSAES-OAEP pubkey. +// The current ASN.1 parser does not support non-integer defaults so +// these currently do nothing. +var ( + sha1Identifier = pkix.AlgorithmIdentifier{ + Algorithm: oidSHA1, + Parameters: asn1.NullRawValue, + } + mgf1SHA1Identifier = pkix.AlgorithmIdentifier{ + Algorithm: oidMGF1, + // RFC 4055, 2.1 sha1Identifier + Parameters: asn1.RawValue{ + Class: asn1.ClassUniversal, + Tag: asn1.TagSequence, + IsCompound: false, + Bytes: []byte{6, 5, 43, 14, 3, 2, 26, 5, 0}, + FullBytes: []byte{16, 9, 6, 5, 43, 14, 3, 2, 26, 5, 0}}, + } + pSpecifiedEmptyIdentifier = pkix.AlgorithmIdentifier{ + Algorithm: oidpSpecified, + // RFC 4055, 4.1 nullOctetString + Parameters: asn1.RawValue{ + Class: asn1.ClassUniversal, + Tag: asn1.TagOctetString, + IsCompound: false, + Bytes: []byte{}, + FullBytes: []byte{4, 0}}, + } +) + +func (algo SignatureAlgorithm) isRSAPSS() bool { + switch algo { + case SHA256WithRSAPSS, SHA384WithRSAPSS, SHA512WithRSAPSS: + return true + default: + return false + } +} + +func (algo SignatureAlgorithm) String() string { + for _, details := range signatureAlgorithmDetails { + if details.algo == algo { + return details.name + } + } + return strconv.Itoa(int(algo)) +} + +// PublicKeyAlgorithm indicates the algorithm used for a certificate's public key. +type PublicKeyAlgorithm int + +// PublicKeyAlgorithm values: +const ( + UnknownPublicKeyAlgorithm PublicKeyAlgorithm = iota + RSA + DSA + ECDSA + RSAESOAEP + Ed25519 +) + +var publicKeyAlgoName = [...]string{ + RSA: "RSA", + DSA: "DSA", + ECDSA: "ECDSA", + RSAESOAEP: "RSAESOAEP", + Ed25519: "Ed25519", +} + +func (algo PublicKeyAlgorithm) String() string { + if 0 < algo && int(algo) < len(publicKeyAlgoName) { + return publicKeyAlgoName[algo] + } + return strconv.Itoa(int(algo)) +} + +// OIDs for signature algorithms +// +// pkcs-1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 1 } +// +// +// RFC 3279 2.2.1 RSA Signature Algorithms +// +// md2WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 2 } +// +// md5WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 4 } +// +// sha-1WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 5 } +// +// dsaWithSha1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) x9-57(10040) x9cm(4) 3 } +// +// RFC 3279 2.2.3 ECDSA Signature Algorithm +// +// ecdsa-with-SHA1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) ansi-x962(10045) +// signatures(4) ecdsa-with-SHA1(1)} +// +// +// RFC 4055 5 PKCS #1 Version 1.5 +// +// sha256WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 11 } +// +// sha384WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 12 } +// +// sha512WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 13 } +// +// +// RFC 5758 3.1 DSA Signature Algorithms +// +// dsaWithSha256 OBJECT IDENTIFIER ::= { +// joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101) +// csor(3) algorithms(4) id-dsa-with-sha2(3) 2} +// +// RFC 5758 3.2 ECDSA Signature Algorithm +// +// ecdsa-with-SHA256 OBJECT IDENTIFIER ::= { iso(1) member-body(2) +// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 2 } +// +// ecdsa-with-SHA384 OBJECT IDENTIFIER ::= { iso(1) member-body(2) +// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 3 } +// +// ecdsa-with-SHA512 OBJECT IDENTIFIER ::= { iso(1) member-body(2) +// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 4 } + +var ( + oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2} + oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} + oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} + oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} + oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} + oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} + oidSignatureRSAPSS = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 10} + oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} + oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2} + oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} + oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} + oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} + oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} + + oidSHA1 = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 26} + oidSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1} + oidSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2} + oidSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3} + + oidMGF1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 8} + + // oidISOSignatureSHA1WithRSA means the same as oidSignatureSHA1WithRSA + // but it's specified by ISO. Microsoft's makecert.exe has been known + // to produce certificates with this OID. + oidISOSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 29} +) + +var signatureAlgorithmDetails = []struct { + algo SignatureAlgorithm + name string + oid asn1.ObjectIdentifier + pubKeyAlgo PublicKeyAlgorithm + hash crypto.Hash +}{ + {MD2WithRSA, "MD2-RSA", oidSignatureMD2WithRSA, RSA, crypto.Hash(0) /* no value for MD2 */}, + {MD5WithRSA, "MD5-RSA", oidSignatureMD5WithRSA, RSA, crypto.MD5}, + {SHA1WithRSA, "SHA1-RSA", oidSignatureSHA1WithRSA, RSA, crypto.SHA1}, + {SHA1WithRSA, "SHA1-RSA", oidISOSignatureSHA1WithRSA, RSA, crypto.SHA1}, + {SHA256WithRSA, "SHA256-RSA", oidSignatureSHA256WithRSA, RSA, crypto.SHA256}, + {SHA384WithRSA, "SHA384-RSA", oidSignatureSHA384WithRSA, RSA, crypto.SHA384}, + {SHA512WithRSA, "SHA512-RSA", oidSignatureSHA512WithRSA, RSA, crypto.SHA512}, + {SHA256WithRSAPSS, "SHA256-RSAPSS", oidSignatureRSAPSS, RSA, crypto.SHA256}, + {SHA384WithRSAPSS, "SHA384-RSAPSS", oidSignatureRSAPSS, RSA, crypto.SHA384}, + {SHA512WithRSAPSS, "SHA512-RSAPSS", oidSignatureRSAPSS, RSA, crypto.SHA512}, + {DSAWithSHA1, "DSA-SHA1", oidSignatureDSAWithSHA1, DSA, crypto.SHA1}, + {DSAWithSHA256, "DSA-SHA256", oidSignatureDSAWithSHA256, DSA, crypto.SHA256}, + {ECDSAWithSHA1, "ECDSA-SHA1", oidSignatureECDSAWithSHA1, ECDSA, crypto.SHA1}, + {ECDSAWithSHA256, "ECDSA-SHA256", oidSignatureECDSAWithSHA256, ECDSA, crypto.SHA256}, + {ECDSAWithSHA384, "ECDSA-SHA384", oidSignatureECDSAWithSHA384, ECDSA, crypto.SHA384}, + {ECDSAWithSHA512, "ECDSA-SHA512", oidSignatureECDSAWithSHA512, ECDSA, crypto.SHA512}, +} + +// pssParameters reflects the parameters in an AlgorithmIdentifier that +// specifies RSA PSS. See RFC 3447, Appendix A.2.3. +type pssParameters struct { + // The following three fields are not marked as + // optional because the default values specify SHA-1, + // which is no longer suitable for use in signatures. + Hash pkix.AlgorithmIdentifier `asn1:"explicit,tag:0"` + MGF pkix.AlgorithmIdentifier `asn1:"explicit,tag:1"` + SaltLength int `asn1:"explicit,tag:2"` + TrailerField int `asn1:"optional,explicit,tag:3,default:1"` +} + +// rsaPSSParameters returns an asn1.RawValue suitable for use as the Parameters +// in an AlgorithmIdentifier that specifies RSA PSS. +func rsaPSSParameters(hashFunc crypto.Hash) asn1.RawValue { + var hashOID asn1.ObjectIdentifier + + switch hashFunc { + case crypto.SHA256: + hashOID = oidSHA256 + case crypto.SHA384: + hashOID = oidSHA384 + case crypto.SHA512: + hashOID = oidSHA512 + } + + params := pssParameters{ + Hash: pkix.AlgorithmIdentifier{ + Algorithm: hashOID, + Parameters: asn1.NullRawValue, + }, + MGF: pkix.AlgorithmIdentifier{ + Algorithm: oidMGF1, + }, + SaltLength: hashFunc.Size(), + TrailerField: 1, + } + + mgf1Params := pkix.AlgorithmIdentifier{ + Algorithm: hashOID, + Parameters: asn1.NullRawValue, + } + + var err error + params.MGF.Parameters.FullBytes, err = asn1.Marshal(mgf1Params) + if err != nil { + panic(err) + } + + serialized, err := asn1.Marshal(params) + if err != nil { + panic(err) + } + + return asn1.RawValue{FullBytes: serialized} +} + +// SignatureAlgorithmFromAI converts an PKIX algorithm identifier to the +// equivalent local constant. +func SignatureAlgorithmFromAI(ai pkix.AlgorithmIdentifier) SignatureAlgorithm { + if !ai.Algorithm.Equal(oidSignatureRSAPSS) { + for _, details := range signatureAlgorithmDetails { + if ai.Algorithm.Equal(details.oid) { + return details.algo + } + } + return UnknownSignatureAlgorithm + } + + // RSA PSS is special because it encodes important parameters + // in the Parameters. + + var params pssParameters + if _, err := asn1.Unmarshal(ai.Parameters.FullBytes, ¶ms); err != nil { + return UnknownSignatureAlgorithm + } + + var mgf1HashFunc pkix.AlgorithmIdentifier + if _, err := asn1.Unmarshal(params.MGF.Parameters.FullBytes, &mgf1HashFunc); err != nil { + return UnknownSignatureAlgorithm + } + + // PSS is greatly overburdened with options. This code forces them into + // three buckets by requiring that the MGF1 hash function always match the + // message hash function (as recommended in RFC 3447, Section 8.1), that the + // salt length matches the hash length, and that the trailer field has the + // default value. + if (len(params.Hash.Parameters.FullBytes) != 0 && !bytes.Equal(params.Hash.Parameters.FullBytes, asn1.NullBytes)) || + !params.MGF.Algorithm.Equal(oidMGF1) || + !mgf1HashFunc.Algorithm.Equal(params.Hash.Algorithm) || + (len(mgf1HashFunc.Parameters.FullBytes) != 0 && !bytes.Equal(mgf1HashFunc.Parameters.FullBytes, asn1.NullBytes)) || + params.TrailerField != 1 { + return UnknownSignatureAlgorithm + } + + switch { + case params.Hash.Algorithm.Equal(oidSHA256) && params.SaltLength == 32: + return SHA256WithRSAPSS + case params.Hash.Algorithm.Equal(oidSHA384) && params.SaltLength == 48: + return SHA384WithRSAPSS + case params.Hash.Algorithm.Equal(oidSHA512) && params.SaltLength == 64: + return SHA512WithRSAPSS + } + + return UnknownSignatureAlgorithm +} + +// RFC 3279, 2.3 Public Key Algorithms +// +// pkcs-1 OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840) +// rsadsi(113549) pkcs(1) 1 } +// +// rsaEncryption OBJECT IDENTIFIER ::== { pkcs1-1 1 } +// +// id-dsa OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840) +// x9-57(10040) x9cm(4) 1 } +// +// RFC 5480, 2.1.1 Unrestricted Algorithm Identifier and Parameters +// +// id-ecPublicKey OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 } +var ( + OIDPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} + OIDPublicKeyRSAESOAEP = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 7} + OIDPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1} + OIDPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1} + OIDPublicKeyRSAObsolete = asn1.ObjectIdentifier{2, 5, 8, 1, 1} + // From RFC 8410, section 3 + OIDPublicKeyEd25519 = asn1.ObjectIdentifier{1, 3, 101, 112} +) + +func getPublicKeyAlgorithmFromOID(oid asn1.ObjectIdentifier) PublicKeyAlgorithm { + switch { + case oid.Equal(OIDPublicKeyRSA): + return RSA + case oid.Equal(OIDPublicKeyDSA): + return DSA + case oid.Equal(OIDPublicKeyECDSA): + return ECDSA + case oid.Equal(OIDPublicKeyRSAESOAEP): + return RSAESOAEP + case oid.Equal(OIDPublicKeyEd25519): + return Ed25519 + } + return UnknownPublicKeyAlgorithm +} + +// RFC 5480, 2.1.1.1. Named Curve +// +// secp224r1 OBJECT IDENTIFIER ::= { +// iso(1) identified-organization(3) certicom(132) curve(0) 33 } +// +// secp256r1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3) +// prime(1) 7 } +// +// secp384r1 OBJECT IDENTIFIER ::= { +// iso(1) identified-organization(3) certicom(132) curve(0) 34 } +// +// secp521r1 OBJECT IDENTIFIER ::= { +// iso(1) identified-organization(3) certicom(132) curve(0) 35 } +// +// secp192r1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3) +// prime(1) 1 } +// +// NB: secp256r1 is equivalent to prime256v1, +// secp192r1 is equivalent to ansix9p192r and prime192v1 +var ( + OIDNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33} + OIDNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} + OIDNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} + OIDNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} + OIDNamedCurveP192 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 1} +) + +func namedCurveFromOID(oid asn1.ObjectIdentifier, nfe *NonFatalErrors) elliptic.Curve { + switch { + case oid.Equal(OIDNamedCurveP224): + return elliptic.P224() + case oid.Equal(OIDNamedCurveP256): + return elliptic.P256() + case oid.Equal(OIDNamedCurveP384): + return elliptic.P384() + case oid.Equal(OIDNamedCurveP521): + return elliptic.P521() + case oid.Equal(OIDNamedCurveP192): + nfe.AddError(errors.New("insecure curve (secp192r1) specified")) + return secp192r1() + } + return nil +} + +// OIDFromNamedCurve returns the OID used to specify the use of the given +// elliptic curve. +func OIDFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) { + switch curve { + case elliptic.P224(): + return OIDNamedCurveP224, true + case elliptic.P256(): + return OIDNamedCurveP256, true + case elliptic.P384(): + return OIDNamedCurveP384, true + case elliptic.P521(): + return OIDNamedCurveP521, true + case secp192r1(): + return OIDNamedCurveP192, true + } + + return nil, false +} + +// KeyUsage represents the set of actions that are valid for a given key. It's +// a bitmap of the KeyUsage* constants. +type KeyUsage int + +// KeyUsage values: +const ( + KeyUsageDigitalSignature KeyUsage = 1 << iota + KeyUsageContentCommitment + KeyUsageKeyEncipherment + KeyUsageDataEncipherment + KeyUsageKeyAgreement + KeyUsageCertSign + KeyUsageCRLSign + KeyUsageEncipherOnly + KeyUsageDecipherOnly +) + +// RFC 5280, 4.2.1.12 Extended Key Usage +// +// anyExtendedKeyUsage OBJECT IDENTIFIER ::= { id-ce-extKeyUsage 0 } +// +// id-kp OBJECT IDENTIFIER ::= { id-pkix 3 } +// +// id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 } +// id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 } +// id-kp-codeSigning OBJECT IDENTIFIER ::= { id-kp 3 } +// id-kp-emailProtection OBJECT IDENTIFIER ::= { id-kp 4 } +// id-kp-timeStamping OBJECT IDENTIFIER ::= { id-kp 8 } +// id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 } +var ( + oidExtKeyUsageAny = asn1.ObjectIdentifier{2, 5, 29, 37, 0} + oidExtKeyUsageServerAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 1} + oidExtKeyUsageClientAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 2} + oidExtKeyUsageCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 3} + oidExtKeyUsageEmailProtection = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 4} + oidExtKeyUsageIPSECEndSystem = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 5} + oidExtKeyUsageIPSECTunnel = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 6} + oidExtKeyUsageIPSECUser = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 7} + oidExtKeyUsageTimeStamping = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 8} + oidExtKeyUsageOCSPSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 9} + oidExtKeyUsageMicrosoftServerGatedCrypto = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 3} + oidExtKeyUsageNetscapeServerGatedCrypto = asn1.ObjectIdentifier{2, 16, 840, 1, 113730, 4, 1} + oidExtKeyUsageMicrosoftCommercialCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 2, 1, 22} + oidExtKeyUsageMicrosoftKernelCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 61, 1, 1} + // RFC 6962 s3.1 + oidExtKeyUsageCertificateTransparency = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 4} +) + +// ExtKeyUsage represents an extended set of actions that are valid for a given key. +// Each of the ExtKeyUsage* constants define a unique action. +type ExtKeyUsage int + +// ExtKeyUsage values: +const ( + ExtKeyUsageAny ExtKeyUsage = iota + ExtKeyUsageServerAuth + ExtKeyUsageClientAuth + ExtKeyUsageCodeSigning + ExtKeyUsageEmailProtection + ExtKeyUsageIPSECEndSystem + ExtKeyUsageIPSECTunnel + ExtKeyUsageIPSECUser + ExtKeyUsageTimeStamping + ExtKeyUsageOCSPSigning + ExtKeyUsageMicrosoftServerGatedCrypto + ExtKeyUsageNetscapeServerGatedCrypto + ExtKeyUsageMicrosoftCommercialCodeSigning + ExtKeyUsageMicrosoftKernelCodeSigning + ExtKeyUsageCertificateTransparency +) + +// extKeyUsageOIDs contains the mapping between an ExtKeyUsage and its OID. +var extKeyUsageOIDs = []struct { + extKeyUsage ExtKeyUsage + oid asn1.ObjectIdentifier +}{ + {ExtKeyUsageAny, oidExtKeyUsageAny}, + {ExtKeyUsageServerAuth, oidExtKeyUsageServerAuth}, + {ExtKeyUsageClientAuth, oidExtKeyUsageClientAuth}, + {ExtKeyUsageCodeSigning, oidExtKeyUsageCodeSigning}, + {ExtKeyUsageEmailProtection, oidExtKeyUsageEmailProtection}, + {ExtKeyUsageIPSECEndSystem, oidExtKeyUsageIPSECEndSystem}, + {ExtKeyUsageIPSECTunnel, oidExtKeyUsageIPSECTunnel}, + {ExtKeyUsageIPSECUser, oidExtKeyUsageIPSECUser}, + {ExtKeyUsageTimeStamping, oidExtKeyUsageTimeStamping}, + {ExtKeyUsageOCSPSigning, oidExtKeyUsageOCSPSigning}, + {ExtKeyUsageMicrosoftServerGatedCrypto, oidExtKeyUsageMicrosoftServerGatedCrypto}, + {ExtKeyUsageNetscapeServerGatedCrypto, oidExtKeyUsageNetscapeServerGatedCrypto}, + {ExtKeyUsageMicrosoftCommercialCodeSigning, oidExtKeyUsageMicrosoftCommercialCodeSigning}, + {ExtKeyUsageMicrosoftKernelCodeSigning, oidExtKeyUsageMicrosoftKernelCodeSigning}, + {ExtKeyUsageCertificateTransparency, oidExtKeyUsageCertificateTransparency}, +} + +func extKeyUsageFromOID(oid asn1.ObjectIdentifier) (eku ExtKeyUsage, ok bool) { + for _, pair := range extKeyUsageOIDs { + if oid.Equal(pair.oid) { + return pair.extKeyUsage, true + } + } + return +} + +func oidFromExtKeyUsage(eku ExtKeyUsage) (oid asn1.ObjectIdentifier, ok bool) { + for _, pair := range extKeyUsageOIDs { + if eku == pair.extKeyUsage { + return pair.oid, true + } + } + return +} + +// SerializedSCT represents a single TLS-encoded signed certificate timestamp, from RFC6962 s3.3. +type SerializedSCT struct { + Val []byte `tls:"minlen:1,maxlen:65535"` +} + +// SignedCertificateTimestampList is a list of signed certificate timestamps, from RFC6962 s3.3. +type SignedCertificateTimestampList struct { + SCTList []SerializedSCT `tls:"minlen:1,maxlen:65335"` +} + +// A Certificate represents an X.509 certificate. +type Certificate struct { + Raw []byte // Complete ASN.1 DER content (certificate, signature algorithm and signature). + RawTBSCertificate []byte // Certificate part of raw ASN.1 DER content. + RawSubjectPublicKeyInfo []byte // DER encoded SubjectPublicKeyInfo. + RawSubject []byte // DER encoded Subject + RawIssuer []byte // DER encoded Issuer + + Signature []byte + SignatureAlgorithm SignatureAlgorithm + + PublicKeyAlgorithm PublicKeyAlgorithm + PublicKey interface{} + + Version int + SerialNumber *big.Int + Issuer pkix.Name + Subject pkix.Name + NotBefore, NotAfter time.Time // Validity bounds. + KeyUsage KeyUsage + + // Extensions contains raw X.509 extensions. When parsing certificates, + // this can be used to extract non-critical extensions that are not + // parsed by this package. When marshaling certificates, the Extensions + // field is ignored, see ExtraExtensions. + Extensions []pkix.Extension + + // ExtraExtensions contains extensions to be copied, raw, into any + // marshaled certificates. Values override any extensions that would + // otherwise be produced based on the other fields. The ExtraExtensions + // field is not populated when parsing certificates, see Extensions. + ExtraExtensions []pkix.Extension + + // UnhandledCriticalExtensions contains a list of extension IDs that + // were not (fully) processed when parsing. Verify will fail if this + // slice is non-empty, unless verification is delegated to an OS + // library which understands all the critical extensions. + // + // Users can access these extensions using Extensions and can remove + // elements from this slice if they believe that they have been + // handled. + UnhandledCriticalExtensions []asn1.ObjectIdentifier + + ExtKeyUsage []ExtKeyUsage // Sequence of extended key usages. + UnknownExtKeyUsage []asn1.ObjectIdentifier // Encountered extended key usages unknown to this package. + + // BasicConstraintsValid indicates whether IsCA, MaxPathLen, + // and MaxPathLenZero are valid. + BasicConstraintsValid bool + IsCA bool + + // MaxPathLen and MaxPathLenZero indicate the presence and + // value of the BasicConstraints' "pathLenConstraint". + // + // When parsing a certificate, a positive non-zero MaxPathLen + // means that the field was specified, -1 means it was unset, + // and MaxPathLenZero being true mean that the field was + // explicitly set to zero. The case of MaxPathLen==0 with MaxPathLenZero==false + // should be treated equivalent to -1 (unset). + // + // When generating a certificate, an unset pathLenConstraint + // can be requested with either MaxPathLen == -1 or using the + // zero value for both MaxPathLen and MaxPathLenZero. + MaxPathLen int + // MaxPathLenZero indicates that BasicConstraintsValid==true + // and MaxPathLen==0 should be interpreted as an actual + // maximum path length of zero. Otherwise, that combination is + // interpreted as MaxPathLen not being set. + MaxPathLenZero bool + + SubjectKeyId []byte + AuthorityKeyId []byte + + // RFC 5280, 4.2.2.1 (Authority Information Access) + OCSPServer []string + IssuingCertificateURL []string + + // Subject Information Access + SubjectTimestamps []string + SubjectCARepositories []string + + // Subject Alternate Name values. (Note that these values may not be valid + // if invalid values were contained within a parsed certificate. For + // example, an element of DNSNames may not be a valid DNS domain name.) + DNSNames []string + EmailAddresses []string + IPAddresses []net.IP + URIs []*url.URL + + // Name constraints + PermittedDNSDomainsCritical bool // if true then the name constraints are marked critical. + PermittedDNSDomains []string + ExcludedDNSDomains []string + PermittedIPRanges []*net.IPNet + ExcludedIPRanges []*net.IPNet + PermittedEmailAddresses []string + ExcludedEmailAddresses []string + PermittedURIDomains []string + ExcludedURIDomains []string + + // CRL Distribution Points + CRLDistributionPoints []string + + PolicyIdentifiers []asn1.ObjectIdentifier + + RPKIAddressRanges []*IPAddressFamilyBlocks + RPKIASNumbers, RPKIRoutingDomainIDs *ASIdentifiers + + // Certificate Transparency SCT extension contents; this is a TLS-encoded + // SignedCertificateTimestampList (RFC 6962 s3.3). + RawSCT []byte + SCTList SignedCertificateTimestampList +} + +// ErrUnsupportedAlgorithm results from attempting to perform an operation that +// involves algorithms that are not currently implemented. +var ErrUnsupportedAlgorithm = errors.New("x509: cannot verify signature: algorithm unimplemented") + +// InsecureAlgorithmError results when the signature algorithm for a certificate +// is known to be insecure. +type InsecureAlgorithmError SignatureAlgorithm + +func (e InsecureAlgorithmError) Error() string { + return fmt.Sprintf("x509: cannot verify signature: insecure algorithm %v", SignatureAlgorithm(e)) +} + +// ConstraintViolationError results when a requested usage is not permitted by +// a certificate. For example: checking a signature when the public key isn't a +// certificate signing key. +type ConstraintViolationError struct{} + +func (ConstraintViolationError) Error() string { + return "x509: invalid signature: parent certificate cannot sign this kind of certificate" +} + +// Equal indicates whether two Certificate objects are equal (by comparing their +// DER-encoded values). +func (c *Certificate) Equal(other *Certificate) bool { + return bytes.Equal(c.Raw, other.Raw) +} + +// IsPrecertificate checks whether the certificate is a precertificate, by +// checking for the presence of the CT Poison extension. +func (c *Certificate) IsPrecertificate() bool { + if c == nil { + return false + } + for _, ext := range c.Extensions { + if ext.Id.Equal(OIDExtensionCTPoison) { + return true + } + } + return false +} + +func (c *Certificate) hasSANExtension() bool { + return oidInExtensions(OIDExtensionSubjectAltName, c.Extensions) +} + +// Entrust have a broken root certificate (CN=Entrust.net Certification +// Authority (2048)) which isn't marked as a CA certificate and is thus invalid +// according to PKIX. +// We recognise this certificate by its SubjectPublicKeyInfo and exempt it +// from the Basic Constraints requirement. +// See http://www.entrust.net/knowledge-base/technote.cfm?tn=7869 +// +// TODO(agl): remove this hack once their reissued root is sufficiently +// widespread. +var entrustBrokenSPKI = []byte{ + 0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, + 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, + 0x00, 0x97, 0xa3, 0x2d, 0x3c, 0x9e, 0xde, 0x05, + 0xda, 0x13, 0xc2, 0x11, 0x8d, 0x9d, 0x8e, 0xe3, + 0x7f, 0xc7, 0x4b, 0x7e, 0x5a, 0x9f, 0xb3, 0xff, + 0x62, 0xab, 0x73, 0xc8, 0x28, 0x6b, 0xba, 0x10, + 0x64, 0x82, 0x87, 0x13, 0xcd, 0x57, 0x18, 0xff, + 0x28, 0xce, 0xc0, 0xe6, 0x0e, 0x06, 0x91, 0x50, + 0x29, 0x83, 0xd1, 0xf2, 0xc3, 0x2a, 0xdb, 0xd8, + 0xdb, 0x4e, 0x04, 0xcc, 0x00, 0xeb, 0x8b, 0xb6, + 0x96, 0xdc, 0xbc, 0xaa, 0xfa, 0x52, 0x77, 0x04, + 0xc1, 0xdb, 0x19, 0xe4, 0xae, 0x9c, 0xfd, 0x3c, + 0x8b, 0x03, 0xef, 0x4d, 0xbc, 0x1a, 0x03, 0x65, + 0xf9, 0xc1, 0xb1, 0x3f, 0x72, 0x86, 0xf2, 0x38, + 0xaa, 0x19, 0xae, 0x10, 0x88, 0x78, 0x28, 0xda, + 0x75, 0xc3, 0x3d, 0x02, 0x82, 0x02, 0x9c, 0xb9, + 0xc1, 0x65, 0x77, 0x76, 0x24, 0x4c, 0x98, 0xf7, + 0x6d, 0x31, 0x38, 0xfb, 0xdb, 0xfe, 0xdb, 0x37, + 0x02, 0x76, 0xa1, 0x18, 0x97, 0xa6, 0xcc, 0xde, + 0x20, 0x09, 0x49, 0x36, 0x24, 0x69, 0x42, 0xf6, + 0xe4, 0x37, 0x62, 0xf1, 0x59, 0x6d, 0xa9, 0x3c, + 0xed, 0x34, 0x9c, 0xa3, 0x8e, 0xdb, 0xdc, 0x3a, + 0xd7, 0xf7, 0x0a, 0x6f, 0xef, 0x2e, 0xd8, 0xd5, + 0x93, 0x5a, 0x7a, 0xed, 0x08, 0x49, 0x68, 0xe2, + 0x41, 0xe3, 0x5a, 0x90, 0xc1, 0x86, 0x55, 0xfc, + 0x51, 0x43, 0x9d, 0xe0, 0xb2, 0xc4, 0x67, 0xb4, + 0xcb, 0x32, 0x31, 0x25, 0xf0, 0x54, 0x9f, 0x4b, + 0xd1, 0x6f, 0xdb, 0xd4, 0xdd, 0xfc, 0xaf, 0x5e, + 0x6c, 0x78, 0x90, 0x95, 0xde, 0xca, 0x3a, 0x48, + 0xb9, 0x79, 0x3c, 0x9b, 0x19, 0xd6, 0x75, 0x05, + 0xa0, 0xf9, 0x88, 0xd7, 0xc1, 0xe8, 0xa5, 0x09, + 0xe4, 0x1a, 0x15, 0xdc, 0x87, 0x23, 0xaa, 0xb2, + 0x75, 0x8c, 0x63, 0x25, 0x87, 0xd8, 0xf8, 0x3d, + 0xa6, 0xc2, 0xcc, 0x66, 0xff, 0xa5, 0x66, 0x68, + 0x55, 0x02, 0x03, 0x01, 0x00, 0x01, +} + +// CheckSignatureFrom verifies that the signature on c is a valid signature +// from parent. +func (c *Certificate) CheckSignatureFrom(parent *Certificate) error { + // RFC 5280, 4.2.1.9: + // "If the basic constraints extension is not present in a version 3 + // certificate, or the extension is present but the cA boolean is not + // asserted, then the certified public key MUST NOT be used to verify + // certificate signatures." + // (except for Entrust, see comment above entrustBrokenSPKI) + if (parent.Version == 3 && !parent.BasicConstraintsValid || + parent.BasicConstraintsValid && !parent.IsCA) && + !bytes.Equal(c.RawSubjectPublicKeyInfo, entrustBrokenSPKI) { + return ConstraintViolationError{} + } + + if parent.KeyUsage != 0 && parent.KeyUsage&KeyUsageCertSign == 0 { + return ConstraintViolationError{} + } + + if parent.PublicKeyAlgorithm == UnknownPublicKeyAlgorithm { + return ErrUnsupportedAlgorithm + } + + // TODO(agl): don't ignore the path length constraint. + + return parent.CheckSignature(c.SignatureAlgorithm, c.RawTBSCertificate, c.Signature) +} + +// CheckSignature verifies that signature is a valid signature over signed from +// c's public key. +func (c *Certificate) CheckSignature(algo SignatureAlgorithm, signed, signature []byte) error { + return checkSignature(algo, signed, signature, c.PublicKey) +} + +func (c *Certificate) hasNameConstraints() bool { + return oidInExtensions(OIDExtensionNameConstraints, c.Extensions) +} + +func (c *Certificate) getSANExtension() []byte { + for _, e := range c.Extensions { + if e.Id.Equal(OIDExtensionSubjectAltName) { + return e.Value + } + } + + return nil +} + +func signaturePublicKeyAlgoMismatchError(expectedPubKeyAlgo PublicKeyAlgorithm, pubKey interface{}) error { + return fmt.Errorf("x509: signature algorithm specifies an %s public key, but have public key of type %T", expectedPubKeyAlgo.String(), pubKey) +} + +// CheckSignature verifies that signature is a valid signature over signed from +// a crypto.PublicKey. +func checkSignature(algo SignatureAlgorithm, signed, signature []byte, publicKey crypto.PublicKey) (err error) { + var hashType crypto.Hash + var pubKeyAlgo PublicKeyAlgorithm + + for _, details := range signatureAlgorithmDetails { + if details.algo == algo { + hashType = details.hash + pubKeyAlgo = details.pubKeyAlgo + } + } + + switch hashType { + case crypto.Hash(0): + return ErrUnsupportedAlgorithm + case crypto.MD5: + return InsecureAlgorithmError(algo) + } + + if !hashType.Available() { + return ErrUnsupportedAlgorithm + } + h := hashType.New() + + h.Write(signed) + digest := h.Sum(nil) + + switch pub := publicKey.(type) { + case *rsa.PublicKey: + if pubKeyAlgo != RSA { + return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub) + } + if algo.isRSAPSS() { + return rsa.VerifyPSS(pub, hashType, digest, signature, &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash}) + } else { + return rsa.VerifyPKCS1v15(pub, hashType, digest, signature) + } + case *dsa.PublicKey: + if pubKeyAlgo != DSA { + return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub) + } + dsaSig := new(dsaSignature) + if rest, err := asn1.Unmarshal(signature, dsaSig); err != nil { + return err + } else if len(rest) != 0 { + return errors.New("x509: trailing data after DSA signature") + } + if dsaSig.R.Sign() <= 0 || dsaSig.S.Sign() <= 0 { + return errors.New("x509: DSA signature contained zero or negative values") + } + if !dsa.Verify(pub, digest, dsaSig.R, dsaSig.S) { + return errors.New("x509: DSA verification failure") + } + return + case *ecdsa.PublicKey: + if pubKeyAlgo != ECDSA { + return signaturePublicKeyAlgoMismatchError(pubKeyAlgo, pub) + } + ecdsaSig := new(ecdsaSignature) + if rest, err := asn1.Unmarshal(signature, ecdsaSig); err != nil { + return err + } else if len(rest) != 0 { + return errors.New("x509: trailing data after ECDSA signature") + } + if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 { + return errors.New("x509: ECDSA signature contained zero or negative values") + } + if !ecdsa.Verify(pub, digest, ecdsaSig.R, ecdsaSig.S) { + return errors.New("x509: ECDSA verification failure") + } + return + } + return ErrUnsupportedAlgorithm +} + +// CheckCRLSignature checks that the signature in crl is from c. +func (c *Certificate) CheckCRLSignature(crl *pkix.CertificateList) error { + algo := SignatureAlgorithmFromAI(crl.SignatureAlgorithm) + return c.CheckSignature(algo, crl.TBSCertList.Raw, crl.SignatureValue.RightAlign()) +} + +// UnhandledCriticalExtension results when the certificate contains an extension +// that is marked as critical but which is not handled by this library. +type UnhandledCriticalExtension struct { + ID asn1.ObjectIdentifier +} + +func (h UnhandledCriticalExtension) Error() string { + return fmt.Sprintf("x509: unhandled critical extension (%v)", h.ID) +} + +// removeExtension takes a DER-encoded TBSCertificate, removes the extension +// specified by oid (preserving the order of other extensions), and returns the +// result still as a DER-encoded TBSCertificate. This function will fail if +// there is not exactly 1 extension of the type specified by the oid present. +func removeExtension(tbsData []byte, oid asn1.ObjectIdentifier) ([]byte, error) { + var tbs tbsCertificate + rest, err := asn1.Unmarshal(tbsData, &tbs) + if err != nil { + return nil, fmt.Errorf("failed to parse TBSCertificate: %v", err) + } else if rLen := len(rest); rLen > 0 { + return nil, fmt.Errorf("trailing data (%d bytes) after TBSCertificate", rLen) + } + extAt := -1 + for i, ext := range tbs.Extensions { + if ext.Id.Equal(oid) { + if extAt != -1 { + return nil, errors.New("multiple extensions of specified type present") + } + extAt = i + } + } + if extAt == -1 { + return nil, errors.New("no extension of specified type present") + } + tbs.Extensions = append(tbs.Extensions[:extAt], tbs.Extensions[extAt+1:]...) + // Clear out the asn1.RawContent so the re-marshal operation sees the + // updated structure (rather than just copying the out-of-date DER data). + tbs.Raw = nil + + data, err := asn1.Marshal(tbs) + if err != nil { + return nil, fmt.Errorf("failed to re-marshal TBSCertificate: %v", err) + } + return data, nil +} + +// RemoveSCTList takes a DER-encoded TBSCertificate and removes the CT SCT +// extension that contains the SCT list (preserving the order of other +// extensions), and returns the result still as a DER-encoded TBSCertificate. +// This function will fail if there is not exactly 1 CT SCT extension present. +func RemoveSCTList(tbsData []byte) ([]byte, error) { + return removeExtension(tbsData, OIDExtensionCTSCT) +} + +// RemoveCTPoison takes a DER-encoded TBSCertificate and removes the CT poison +// extension (preserving the order of other extensions), and returns the result +// still as a DER-encoded TBSCertificate. This function will fail if there is +// not exactly 1 CT poison extension present. +func RemoveCTPoison(tbsData []byte) ([]byte, error) { + return BuildPrecertTBS(tbsData, nil) +} + +// BuildPrecertTBS builds a Certificate Transparency pre-certificate (RFC 6962 +// s3.1) from the given DER-encoded TBSCertificate, returning a DER-encoded +// TBSCertificate. +// +// This function removes the CT poison extension (there must be exactly 1 of +// these), preserving the order of other extensions. +// +// If preIssuer is provided, this should be a special intermediate certificate +// that was used to sign the precert (indicated by having the special +// CertificateTransparency extended key usage). In this case, the issuance +// information of the pre-cert is updated to reflect the next issuer in the +// chain, i.e. the issuer of this special intermediate: +// - The precert's Issuer is changed to the Issuer of the intermediate +// - The precert's AuthorityKeyId is changed to the AuthorityKeyId of the +// intermediate. +func BuildPrecertTBS(tbsData []byte, preIssuer *Certificate) ([]byte, error) { + data, err := removeExtension(tbsData, OIDExtensionCTPoison) + if err != nil { + return nil, err + } + + var tbs tbsCertificate + rest, err := asn1.Unmarshal(data, &tbs) + if err != nil { + return nil, fmt.Errorf("failed to parse TBSCertificate: %v", err) + } else if rLen := len(rest); rLen > 0 { + return nil, fmt.Errorf("trailing data (%d bytes) after TBSCertificate", rLen) + } + + if preIssuer != nil { + // Update the precert's Issuer field. Use the RawIssuer rather than the + // parsed Issuer to avoid any chance of ASN.1 differences (e.g. switching + // from UTF8String to PrintableString). + tbs.Issuer.FullBytes = preIssuer.RawIssuer + + // Also need to update the cert's AuthorityKeyID extension + // to that of the preIssuer. + var issuerKeyID []byte + for _, ext := range preIssuer.Extensions { + if ext.Id.Equal(OIDExtensionAuthorityKeyId) { + issuerKeyID = ext.Value + break + } + } + + // Check the preIssuer has the CT EKU. + seenCTEKU := false + for _, eku := range preIssuer.ExtKeyUsage { + if eku == ExtKeyUsageCertificateTransparency { + seenCTEKU = true + break + } + } + if !seenCTEKU { + return nil, fmt.Errorf("issuer does not have CertificateTransparency extended key usage") + } + + keyAt := -1 + for i, ext := range tbs.Extensions { + if ext.Id.Equal(OIDExtensionAuthorityKeyId) { + keyAt = i + break + } + } + if keyAt >= 0 { + // PreCert has an auth-key-id; replace it with the value from the preIssuer + if issuerKeyID != nil { + tbs.Extensions[keyAt].Value = issuerKeyID + } else { + tbs.Extensions = append(tbs.Extensions[:keyAt], tbs.Extensions[keyAt+1:]...) + } + } else if issuerKeyID != nil { + // PreCert did not have an auth-key-id, but the preIssuer does, so add it at the end. + authKeyIDExt := pkix.Extension{ + Id: OIDExtensionAuthorityKeyId, + Critical: false, + Value: issuerKeyID, + } + tbs.Extensions = append(tbs.Extensions, authKeyIDExt) + } + + // Clear out the asn1.RawContent so the re-marshal operation sees the + // updated structure (rather than just copying the out-of-date DER data). + tbs.Raw = nil + } + + data, err = asn1.Marshal(tbs) + if err != nil { + return nil, fmt.Errorf("failed to re-marshal TBSCertificate: %v", err) + } + return data, nil +} + +type basicConstraints struct { + IsCA bool `asn1:"optional"` + MaxPathLen int `asn1:"optional,default:-1"` +} + +// RFC 5280, 4.2.1.4 +type policyInformation struct { + Policy asn1.ObjectIdentifier + // policyQualifiers omitted +} + +const ( + nameTypeEmail = 1 + nameTypeDNS = 2 + nameTypeURI = 6 + nameTypeIP = 7 +) + +// RFC 5280, 4.2.2.1 +type accessDescription struct { + Method asn1.ObjectIdentifier + Location asn1.RawValue +} + +// RFC 5280, 4.2.1.14 +type distributionPoint struct { + DistributionPoint distributionPointName `asn1:"optional,tag:0"` + Reason asn1.BitString `asn1:"optional,tag:1"` + CRLIssuer asn1.RawValue `asn1:"optional,tag:2"` +} + +type distributionPointName struct { + FullName []asn1.RawValue `asn1:"optional,tag:0"` + RelativeName pkix.RDNSequence `asn1:"optional,tag:1"` +} + +func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo, nfe *NonFatalErrors) (interface{}, error) { + asn1Data := keyData.PublicKey.RightAlign() + switch algo { + case RSA, RSAESOAEP: + // RSA public keys must have a NULL in the parameters. + // See RFC 3279, Section 2.3.1. + if algo == RSA && !bytes.Equal(keyData.Algorithm.Parameters.FullBytes, asn1.NullBytes) { + nfe.AddError(errors.New("x509: RSA key missing NULL parameters")) + } + if algo == RSAESOAEP { + // We only parse the parameters to ensure it is a valid encoding, we throw out the actual values + paramsData := keyData.Algorithm.Parameters.FullBytes + params := new(rsaesoaepAlgorithmParameters) + params.HashFunc = sha1Identifier + params.MaskgenFunc = mgf1SHA1Identifier + params.PSourceFunc = pSpecifiedEmptyIdentifier + rest, err := asn1.Unmarshal(paramsData, params) + if err != nil { + return nil, err + } + if len(rest) != 0 { + return nil, errors.New("x509: trailing data after RSAES-OAEP parameters") + } + } + + p := new(pkcs1PublicKey) + rest, err := asn1.Unmarshal(asn1Data, p) + if err != nil { + var laxErr error + rest, laxErr = asn1.UnmarshalWithParams(asn1Data, p, "lax") + if laxErr != nil { + return nil, laxErr + } + nfe.AddError(err) + } + if len(rest) != 0 { + return nil, errors.New("x509: trailing data after RSA public key") + } + + if p.N.Sign() <= 0 { + nfe.AddError(errors.New("x509: RSA modulus is not a positive number")) + } + if p.E <= 0 { + return nil, errors.New("x509: RSA public exponent is not a positive number") + } + + // TODO(dkarch): Update to return the parameters once crypto/x509 has come up with permanent solution (https://github.com/golang/go/issues/30416) + pub := &rsa.PublicKey{ + E: p.E, + N: p.N, + } + return pub, nil + case DSA: + var p *big.Int + rest, err := asn1.Unmarshal(asn1Data, &p) + if err != nil { + var laxErr error + rest, laxErr = asn1.UnmarshalWithParams(asn1Data, &p, "lax") + if laxErr != nil { + return nil, laxErr + } + nfe.AddError(err) + } + if len(rest) != 0 { + return nil, errors.New("x509: trailing data after DSA public key") + } + paramsData := keyData.Algorithm.Parameters.FullBytes + params := new(dsaAlgorithmParameters) + rest, err = asn1.Unmarshal(paramsData, params) + if err != nil { + return nil, err + } + if len(rest) != 0 { + return nil, errors.New("x509: trailing data after DSA parameters") + } + if p.Sign() <= 0 || params.P.Sign() <= 0 || params.Q.Sign() <= 0 || params.G.Sign() <= 0 { + return nil, errors.New("x509: zero or negative DSA parameter") + } + pub := &dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: params.P, + Q: params.Q, + G: params.G, + }, + Y: p, + } + return pub, nil + case ECDSA: + paramsData := keyData.Algorithm.Parameters.FullBytes + namedCurveOID := new(asn1.ObjectIdentifier) + rest, err := asn1.Unmarshal(paramsData, namedCurveOID) + if err != nil { + return nil, errors.New("x509: failed to parse ECDSA parameters as named curve") + } + if len(rest) != 0 { + return nil, errors.New("x509: trailing data after ECDSA parameters") + } + namedCurve := namedCurveFromOID(*namedCurveOID, nfe) + if namedCurve == nil { + return nil, fmt.Errorf("x509: unsupported elliptic curve %v", namedCurveOID) + } + x, y := elliptic.Unmarshal(namedCurve, asn1Data) + if x == nil { + return nil, errors.New("x509: failed to unmarshal elliptic curve point") + } + pub := &ecdsa.PublicKey{ + Curve: namedCurve, + X: x, + Y: y, + } + return pub, nil + case Ed25519: + return ed25519.PublicKey(asn1Data), nil + default: + return nil, nil + } +} + +// NonFatalErrors is an error type which can hold a number of other errors. +// It's used to collect a range of non-fatal errors which occur while parsing +// a certificate, that way we can still match on certs which technically are +// invalid. +type NonFatalErrors struct { + Errors []error +} + +// AddError adds an error to the list of errors contained by NonFatalErrors. +func (e *NonFatalErrors) AddError(err error) { + e.Errors = append(e.Errors, err) +} + +// Returns a string consisting of the values of Error() from all of the errors +// contained in |e| +func (e NonFatalErrors) Error() string { + r := "NonFatalErrors: " + for _, err := range e.Errors { + r += err.Error() + "; " + } + return r +} + +// HasError returns true if |e| contains at least one error +func (e *NonFatalErrors) HasError() bool { + if e == nil { + return false + } + return len(e.Errors) > 0 +} + +// Append combines the contents of two NonFatalErrors instances. +func (e *NonFatalErrors) Append(more *NonFatalErrors) *NonFatalErrors { + if e == nil { + return more + } + if more == nil { + return e + } + combined := NonFatalErrors{Errors: make([]error, 0, len(e.Errors)+len(more.Errors))} + combined.Errors = append(combined.Errors, e.Errors...) + combined.Errors = append(combined.Errors, more.Errors...) + return &combined +} + +// IsFatal indicates whether an error is fatal. +func IsFatal(err error) bool { + if err == nil { + return false + } + if _, ok := err.(NonFatalErrors); ok { + return false + } + if errs, ok := err.(*Errors); ok { + return errs.Fatal() + } + return true +} + +func parseDistributionPoints(data []byte, crldp *[]string) error { + // CRLDistributionPoints ::= SEQUENCE SIZE (1..MAX) OF DistributionPoint + // + // DistributionPoint ::= SEQUENCE { + // distributionPoint [0] DistributionPointName OPTIONAL, + // reasons [1] ReasonFlags OPTIONAL, + // cRLIssuer [2] GeneralNames OPTIONAL } + // + // DistributionPointName ::= CHOICE { + // fullName [0] GeneralNames, + // nameRelativeToCRLIssuer [1] RelativeDistinguishedName } + + var cdp []distributionPoint + if rest, err := asn1.Unmarshal(data, &cdp); err != nil { + return err + } else if len(rest) != 0 { + return errors.New("x509: trailing data after X.509 CRL distribution point") + } + + for _, dp := range cdp { + // Per RFC 5280, 4.2.1.13, one of distributionPoint or cRLIssuer may be empty. + if len(dp.DistributionPoint.FullName) == 0 { + continue + } + + for _, fullName := range dp.DistributionPoint.FullName { + if fullName.Tag == 6 { + *crldp = append(*crldp, string(fullName.Bytes)) + } + } + } + return nil +} + +func forEachSAN(extension []byte, callback func(tag int, data []byte) error) error { + // RFC 5280, 4.2.1.6 + + // SubjectAltName ::= GeneralNames + // + // GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName + // + // GeneralName ::= CHOICE { + // otherName [0] OtherName, + // rfc822Name [1] IA5String, + // dNSName [2] IA5String, + // x400Address [3] ORAddress, + // directoryName [4] Name, + // ediPartyName [5] EDIPartyName, + // uniformResourceIdentifier [6] IA5String, + // iPAddress [7] OCTET STRING, + // registeredID [8] OBJECT IDENTIFIER } + var seq asn1.RawValue + rest, err := asn1.Unmarshal(extension, &seq) + if err != nil { + return err + } else if len(rest) != 0 { + return errors.New("x509: trailing data after X.509 extension") + } + if !seq.IsCompound || seq.Tag != asn1.TagSequence || seq.Class != asn1.ClassUniversal { + return asn1.StructuralError{Msg: "bad SAN sequence"} + } + + rest = seq.Bytes + for len(rest) > 0 { + var v asn1.RawValue + rest, err = asn1.Unmarshal(rest, &v) + if err != nil { + return err + } + + if err := callback(v.Tag, v.Bytes); err != nil { + return err + } + } + + return nil +} + +func parseSANExtension(value []byte, nfe *NonFatalErrors) (dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL, err error) { + err = forEachSAN(value, func(tag int, data []byte) error { + switch tag { + case nameTypeEmail: + emailAddresses = append(emailAddresses, string(data)) + case nameTypeDNS: + dnsNames = append(dnsNames, string(data)) + case nameTypeURI: + uri, err := url.Parse(string(data)) + if err != nil { + return fmt.Errorf("x509: cannot parse URI %q: %s", string(data), err) + } + if len(uri.Host) > 0 { + if _, ok := domainToReverseLabels(uri.Host); !ok { + return fmt.Errorf("x509: cannot parse URI %q: invalid domain", string(data)) + } + } + uris = append(uris, uri) + case nameTypeIP: + switch len(data) { + case net.IPv4len, net.IPv6len: + ipAddresses = append(ipAddresses, data) + default: + nfe.AddError(errors.New("x509: cannot parse IP address of length " + strconv.Itoa(len(data)))) + } + } + + return nil + }) + + return +} + +// isValidIPMask reports whether mask consists of zero or more 1 bits, followed by zero bits. +func isValidIPMask(mask []byte) bool { + seenZero := false + + for _, b := range mask { + if seenZero { + if b != 0 { + return false + } + + continue + } + + switch b { + case 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe: + seenZero = true + case 0xff: + default: + return false + } + } + + return true +} + +func parseNameConstraintsExtension(out *Certificate, e pkix.Extension, nfe *NonFatalErrors) (unhandled bool, err error) { + // RFC 5280, 4.2.1.10 + + // NameConstraints ::= SEQUENCE { + // permittedSubtrees [0] GeneralSubtrees OPTIONAL, + // excludedSubtrees [1] GeneralSubtrees OPTIONAL } + // + // GeneralSubtrees ::= SEQUENCE SIZE (1..MAX) OF GeneralSubtree + // + // GeneralSubtree ::= SEQUENCE { + // base GeneralName, + // minimum [0] BaseDistance DEFAULT 0, + // maximum [1] BaseDistance OPTIONAL } + // + // BaseDistance ::= INTEGER (0..MAX) + + outer := cryptobyte.String(e.Value) + var toplevel, permitted, excluded cryptobyte.String + var havePermitted, haveExcluded bool + if !outer.ReadASN1(&toplevel, cryptobyte_asn1.SEQUENCE) || + !outer.Empty() || + !toplevel.ReadOptionalASN1(&permitted, &havePermitted, cryptobyte_asn1.Tag(0).ContextSpecific().Constructed()) || + !toplevel.ReadOptionalASN1(&excluded, &haveExcluded, cryptobyte_asn1.Tag(1).ContextSpecific().Constructed()) || + !toplevel.Empty() { + return false, errors.New("x509: invalid NameConstraints extension") + } + + if !havePermitted && !haveExcluded || len(permitted) == 0 && len(excluded) == 0 { + // From RFC 5280, Section 4.2.1.10: + // “either the permittedSubtrees field + // or the excludedSubtrees MUST be + // present” + return false, errors.New("x509: empty name constraints extension") + } + + getValues := func(subtrees cryptobyte.String) (dnsNames []string, ips []*net.IPNet, emails, uriDomains []string, err error) { + for !subtrees.Empty() { + var seq, value cryptobyte.String + var tag cryptobyte_asn1.Tag + if !subtrees.ReadASN1(&seq, cryptobyte_asn1.SEQUENCE) || + !seq.ReadAnyASN1(&value, &tag) { + return nil, nil, nil, nil, fmt.Errorf("x509: invalid NameConstraints extension") + } + + var ( + dnsTag = cryptobyte_asn1.Tag(2).ContextSpecific() + emailTag = cryptobyte_asn1.Tag(1).ContextSpecific() + ipTag = cryptobyte_asn1.Tag(7).ContextSpecific() + uriTag = cryptobyte_asn1.Tag(6).ContextSpecific() + ) + + switch tag { + case dnsTag: + domain := string(value) + if err := isIA5String(domain); err != nil { + return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error()) + } + + trimmedDomain := domain + if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' { + // constraints can have a leading + // period to exclude the domain + // itself, but that's not valid in a + // normal domain name. + trimmedDomain = trimmedDomain[1:] + } + if _, ok := domainToReverseLabels(trimmedDomain); !ok { + nfe.AddError(fmt.Errorf("x509: failed to parse dnsName constraint %q", domain)) + } + dnsNames = append(dnsNames, domain) + + case ipTag: + l := len(value) + var ip, mask []byte + + switch l { + case 8: + ip = value[:4] + mask = value[4:] + + case 32: + ip = value[:16] + mask = value[16:] + + default: + return nil, nil, nil, nil, fmt.Errorf("x509: IP constraint contained value of length %d", l) + } + + if !isValidIPMask(mask) { + return nil, nil, nil, nil, fmt.Errorf("x509: IP constraint contained invalid mask %x", mask) + } + + ips = append(ips, &net.IPNet{IP: net.IP(ip), Mask: net.IPMask(mask)}) + + case emailTag: + constraint := string(value) + if err := isIA5String(constraint); err != nil { + return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error()) + } + + // If the constraint contains an @ then + // it specifies an exact mailbox name. + if strings.Contains(constraint, "@") { + if _, ok := parseRFC2821Mailbox(constraint); !ok { + nfe.AddError(fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint)) + } + } else { + // Otherwise it's a domain name. + domain := constraint + if len(domain) > 0 && domain[0] == '.' { + domain = domain[1:] + } + if _, ok := domainToReverseLabels(domain); !ok { + nfe.AddError(fmt.Errorf("x509: failed to parse rfc822Name constraint %q", constraint)) + } + } + emails = append(emails, constraint) + + case uriTag: + domain := string(value) + if err := isIA5String(domain); err != nil { + return nil, nil, nil, nil, errors.New("x509: invalid constraint value: " + err.Error()) + } + + if net.ParseIP(domain) != nil { + return nil, nil, nil, nil, fmt.Errorf("x509: failed to parse URI constraint %q: cannot be IP address", domain) + } + + trimmedDomain := domain + if len(trimmedDomain) > 0 && trimmedDomain[0] == '.' { + // constraints can have a leading + // period to exclude the domain itself, + // but that's not valid in a normal + // domain name. + trimmedDomain = trimmedDomain[1:] + } + if _, ok := domainToReverseLabels(trimmedDomain); !ok { + nfe.AddError(fmt.Errorf("x509: failed to parse URI constraint %q", domain)) + } + uriDomains = append(uriDomains, domain) + + default: + unhandled = true + } + } + + return dnsNames, ips, emails, uriDomains, nil + } + + if out.PermittedDNSDomains, out.PermittedIPRanges, out.PermittedEmailAddresses, out.PermittedURIDomains, err = getValues(permitted); err != nil { + return false, err + } + if out.ExcludedDNSDomains, out.ExcludedIPRanges, out.ExcludedEmailAddresses, out.ExcludedURIDomains, err = getValues(excluded); err != nil { + return false, err + } + out.PermittedDNSDomainsCritical = e.Critical + + return unhandled, nil +} + +func parseCertificate(in *certificate) (*Certificate, error) { + var nfe NonFatalErrors + + out := new(Certificate) + out.Raw = in.Raw + out.RawTBSCertificate = in.TBSCertificate.Raw + out.RawSubjectPublicKeyInfo = in.TBSCertificate.PublicKey.Raw + out.RawSubject = in.TBSCertificate.Subject.FullBytes + out.RawIssuer = in.TBSCertificate.Issuer.FullBytes + + out.Signature = in.SignatureValue.RightAlign() + out.SignatureAlgorithm = SignatureAlgorithmFromAI(in.TBSCertificate.SignatureAlgorithm) + + out.PublicKeyAlgorithm = + getPublicKeyAlgorithmFromOID(in.TBSCertificate.PublicKey.Algorithm.Algorithm) + var err error + out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCertificate.PublicKey, &nfe) + if err != nil { + return nil, err + } + + out.Version = in.TBSCertificate.Version + 1 + out.SerialNumber = in.TBSCertificate.SerialNumber + + var issuer, subject pkix.RDNSequence + if rest, err := asn1.Unmarshal(in.TBSCertificate.Subject.FullBytes, &subject); err != nil { + var laxErr error + rest, laxErr = asn1.UnmarshalWithParams(in.TBSCertificate.Subject.FullBytes, &subject, "lax") + if laxErr != nil { + return nil, laxErr + } + nfe.AddError(err) + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 subject") + } + if rest, err := asn1.Unmarshal(in.TBSCertificate.Issuer.FullBytes, &issuer); err != nil { + var laxErr error + rest, laxErr = asn1.UnmarshalWithParams(in.TBSCertificate.Issuer.FullBytes, &issuer, "lax") + if laxErr != nil { + return nil, laxErr + } + nfe.AddError(err) + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 subject") + } + + out.Issuer.FillFromRDNSequence(&issuer) + out.Subject.FillFromRDNSequence(&subject) + + out.NotBefore = in.TBSCertificate.Validity.NotBefore + out.NotAfter = in.TBSCertificate.Validity.NotAfter + + for _, e := range in.TBSCertificate.Extensions { + out.Extensions = append(out.Extensions, e) + unhandled := false + + if len(e.Id) == 4 && e.Id[0] == OIDExtensionArc[0] && e.Id[1] == OIDExtensionArc[1] && e.Id[2] == OIDExtensionArc[2] { + switch e.Id[3] { + case OIDExtensionKeyUsage[3]: + // RFC 5280, 4.2.1.3 + var usageBits asn1.BitString + if rest, err := asn1.Unmarshal(e.Value, &usageBits); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 KeyUsage") + } + + var usage int + for i := 0; i < 9; i++ { + if usageBits.At(i) != 0 { + usage |= 1 << uint(i) + } + } + out.KeyUsage = KeyUsage(usage) + + case OIDExtensionBasicConstraints[3]: + // RFC 5280, 4.2.1.9 + var constraints basicConstraints + if rest, err := asn1.Unmarshal(e.Value, &constraints); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 BasicConstraints") + } + + out.BasicConstraintsValid = true + out.IsCA = constraints.IsCA + out.MaxPathLen = constraints.MaxPathLen + out.MaxPathLenZero = out.MaxPathLen == 0 + // TODO: map out.MaxPathLen to 0 if it has the -1 default value? (Issue 19285) + + case OIDExtensionSubjectAltName[3]: + out.DNSNames, out.EmailAddresses, out.IPAddresses, out.URIs, err = parseSANExtension(e.Value, &nfe) + if err != nil { + return nil, err + } + + if len(out.DNSNames) == 0 && len(out.EmailAddresses) == 0 && len(out.IPAddresses) == 0 && len(out.URIs) == 0 { + // If we didn't parse anything then we do the critical check, below. + unhandled = true + } + + case OIDExtensionNameConstraints[3]: + unhandled, err = parseNameConstraintsExtension(out, e, &nfe) + if err != nil { + return nil, err + } + + case OIDExtensionCRLDistributionPoints[3]: + // RFC 5280, 4.2.1.13 + if err := parseDistributionPoints(e.Value, &out.CRLDistributionPoints); err != nil { + return nil, err + } + + case OIDExtensionAuthorityKeyId[3]: + // RFC 5280, 4.2.1.1 + var a authKeyId + if rest, err := asn1.Unmarshal(e.Value, &a); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 authority key-id") + } + out.AuthorityKeyId = a.Id + + case OIDExtensionExtendedKeyUsage[3]: + // RFC 5280, 4.2.1.12. Extended Key Usage + + // id-ce-extKeyUsage OBJECT IDENTIFIER ::= { id-ce 37 } + // + // ExtKeyUsageSyntax ::= SEQUENCE SIZE (1..MAX) OF KeyPurposeId + // + // KeyPurposeId ::= OBJECT IDENTIFIER + + var keyUsage []asn1.ObjectIdentifier + if len(e.Value) == 0 { + nfe.AddError(errors.New("x509: empty ExtendedKeyUsage")) + } else { + rest, err := asn1.Unmarshal(e.Value, &keyUsage) + if err != nil { + var laxErr error + rest, laxErr = asn1.UnmarshalWithParams(e.Value, &keyUsage, "lax") + if laxErr != nil { + return nil, laxErr + } + nfe.AddError(err) + } + if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 ExtendedKeyUsage") + } + } + + for _, u := range keyUsage { + if extKeyUsage, ok := extKeyUsageFromOID(u); ok { + out.ExtKeyUsage = append(out.ExtKeyUsage, extKeyUsage) + } else { + out.UnknownExtKeyUsage = append(out.UnknownExtKeyUsage, u) + } + } + + case OIDExtensionSubjectKeyId[3]: + // RFC 5280, 4.2.1.2 + var keyid []byte + if rest, err := asn1.Unmarshal(e.Value, &keyid); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 key-id") + } + out.SubjectKeyId = keyid + + case OIDExtensionCertificatePolicies[3]: + // RFC 5280 4.2.1.4: Certificate Policies + var policies []policyInformation + if rest, err := asn1.Unmarshal(e.Value, &policies); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 certificate policies") + } + out.PolicyIdentifiers = make([]asn1.ObjectIdentifier, len(policies)) + for i, policy := range policies { + out.PolicyIdentifiers[i] = policy.Policy + } + + default: + // Unknown extensions are recorded if critical. + unhandled = true + } + } else if e.Id.Equal(OIDExtensionAuthorityInfoAccess) { + // RFC 5280 4.2.2.1: Authority Information Access + var aia []accessDescription + if rest, err := asn1.Unmarshal(e.Value, &aia); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 authority information") + } + if len(aia) == 0 { + nfe.AddError(errors.New("x509: empty AuthorityInfoAccess extension")) + } + + for _, v := range aia { + // GeneralName: uniformResourceIdentifier [6] IA5String + if v.Location.Tag != 6 { + continue + } + if v.Method.Equal(OIDAuthorityInfoAccessOCSP) { + out.OCSPServer = append(out.OCSPServer, string(v.Location.Bytes)) + } else if v.Method.Equal(OIDAuthorityInfoAccessIssuers) { + out.IssuingCertificateURL = append(out.IssuingCertificateURL, string(v.Location.Bytes)) + } + } + } else if e.Id.Equal(OIDExtensionSubjectInfoAccess) { + // RFC 5280 4.2.2.2: Subject Information Access + var sia []accessDescription + if rest, err := asn1.Unmarshal(e.Value, &sia); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 subject information") + } + if len(sia) == 0 { + nfe.AddError(errors.New("x509: empty SubjectInfoAccess extension")) + } + + for _, v := range sia { + // TODO(drysdale): cope with non-URI types of GeneralName + // GeneralName: uniformResourceIdentifier [6] IA5String + if v.Location.Tag != 6 { + continue + } + if v.Method.Equal(OIDSubjectInfoAccessTimestamp) { + out.SubjectTimestamps = append(out.SubjectTimestamps, string(v.Location.Bytes)) + } else if v.Method.Equal(OIDSubjectInfoAccessCARepo) { + out.SubjectCARepositories = append(out.SubjectCARepositories, string(v.Location.Bytes)) + } + } + } else if e.Id.Equal(OIDExtensionIPPrefixList) { + out.RPKIAddressRanges = parseRPKIAddrBlocks(e.Value, &nfe) + } else if e.Id.Equal(OIDExtensionASList) { + out.RPKIASNumbers, out.RPKIRoutingDomainIDs = parseRPKIASIdentifiers(e.Value, &nfe) + } else if e.Id.Equal(OIDExtensionCTSCT) { + if rest, err := asn1.Unmarshal(e.Value, &out.RawSCT); err != nil { + nfe.AddError(fmt.Errorf("failed to asn1.Unmarshal SCT list extension: %v", err)) + } else if len(rest) != 0 { + nfe.AddError(errors.New("trailing data after ASN1-encoded SCT list")) + } else { + if rest, err := tls.Unmarshal(out.RawSCT, &out.SCTList); err != nil { + nfe.AddError(fmt.Errorf("failed to tls.Unmarshal SCT list: %v", err)) + } else if len(rest) != 0 { + nfe.AddError(errors.New("trailing data after TLS-encoded SCT list")) + } + } + } else { + // Unknown extensions are recorded if critical. + unhandled = true + } + + if e.Critical && unhandled { + out.UnhandledCriticalExtensions = append(out.UnhandledCriticalExtensions, e.Id) + } + } + if nfe.HasError() { + return out, nfe + } + return out, nil +} + +// ParseTBSCertificate parses a single TBSCertificate from the given ASN.1 DER data. +// The parsed data is returned in a Certificate struct for ease of access. +func ParseTBSCertificate(asn1Data []byte) (*Certificate, error) { + var tbsCert tbsCertificate + var nfe NonFatalErrors + rest, err := asn1.Unmarshal(asn1Data, &tbsCert) + if err != nil { + var laxErr error + rest, laxErr = asn1.UnmarshalWithParams(asn1Data, &tbsCert, "lax") + if laxErr != nil { + return nil, laxErr + } + nfe.AddError(err) + } + if len(rest) > 0 { + return nil, asn1.SyntaxError{Msg: "trailing data"} + } + ret, err := parseCertificate(&certificate{ + Raw: tbsCert.Raw, + TBSCertificate: tbsCert}) + if err != nil { + errs, ok := err.(NonFatalErrors) + if !ok { + return nil, err + } + nfe.Errors = append(nfe.Errors, errs.Errors...) + } + if nfe.HasError() { + return ret, nfe + } + return ret, nil +} + +// ParseCertificate parses a single certificate from the given ASN.1 DER data. +// This function can return both a Certificate and an error (in which case the +// error will be of type NonFatalErrors). +func ParseCertificate(asn1Data []byte) (*Certificate, error) { + var cert certificate + var nfe NonFatalErrors + rest, err := asn1.Unmarshal(asn1Data, &cert) + if err != nil { + var laxErr error + rest, laxErr = asn1.UnmarshalWithParams(asn1Data, &cert, "lax") + if laxErr != nil { + return nil, laxErr + } + nfe.AddError(err) + } + if len(rest) > 0 { + return nil, asn1.SyntaxError{Msg: "trailing data"} + } + ret, err := parseCertificate(&cert) + if err != nil { + errs, ok := err.(NonFatalErrors) + if !ok { + return nil, err + } + nfe.Errors = append(nfe.Errors, errs.Errors...) + } + if nfe.HasError() { + return ret, nfe + } + return ret, nil +} + +// ParseCertificates parses one or more certificates from the given ASN.1 DER +// data. The certificates must be concatenated with no intermediate padding. +// This function can return both a slice of Certificate and an error (in which +// case the error will be of type NonFatalErrors). +func ParseCertificates(asn1Data []byte) ([]*Certificate, error) { + var v []*certificate + var nfe NonFatalErrors + + for len(asn1Data) > 0 { + cert := new(certificate) + var err error + asn1Data, err = asn1.Unmarshal(asn1Data, cert) + if err != nil { + var laxErr error + asn1Data, laxErr = asn1.UnmarshalWithParams(asn1Data, &cert, "lax") + if laxErr != nil { + return nil, laxErr + } + nfe.AddError(err) + } + v = append(v, cert) + } + + ret := make([]*Certificate, len(v)) + for i, ci := range v { + cert, err := parseCertificate(ci) + if err != nil { + errs, ok := err.(NonFatalErrors) + if !ok { + return nil, err + } + nfe.Errors = append(nfe.Errors, errs.Errors...) + } + ret[i] = cert + } + + if nfe.HasError() { + return ret, nfe + } + return ret, nil +} + +func reverseBitsInAByte(in byte) byte { + b1 := in>>4 | in<<4 + b2 := b1>>2&0x33 | b1<<2&0xcc + b3 := b2>>1&0x55 | b2<<1&0xaa + return b3 +} + +// asn1BitLength returns the bit-length of bitString by considering the +// most-significant bit in a byte to be the "first" bit. This convention +// matches ASN.1, but differs from almost everything else. +func asn1BitLength(bitString []byte) int { + bitLen := len(bitString) * 8 + + for i := range bitString { + b := bitString[len(bitString)-i-1] + + for bit := uint(0); bit < 8; bit++ { + if (b>>bit)&1 == 1 { + return bitLen + } + bitLen-- + } + } + + return 0 +} + +// OID values for standard extensions from RFC 5280. +var ( + OIDExtensionArc = asn1.ObjectIdentifier{2, 5, 29} // id-ce RFC5280 s4.2.1 + OIDExtensionSubjectKeyId = asn1.ObjectIdentifier{2, 5, 29, 14} + OIDExtensionKeyUsage = asn1.ObjectIdentifier{2, 5, 29, 15} + OIDExtensionExtendedKeyUsage = asn1.ObjectIdentifier{2, 5, 29, 37} + OIDExtensionAuthorityKeyId = asn1.ObjectIdentifier{2, 5, 29, 35} + OIDExtensionBasicConstraints = asn1.ObjectIdentifier{2, 5, 29, 19} + OIDExtensionSubjectAltName = asn1.ObjectIdentifier{2, 5, 29, 17} + OIDExtensionCertificatePolicies = asn1.ObjectIdentifier{2, 5, 29, 32} + OIDExtensionNameConstraints = asn1.ObjectIdentifier{2, 5, 29, 30} + OIDExtensionCRLDistributionPoints = asn1.ObjectIdentifier{2, 5, 29, 31} + OIDExtensionIssuerAltName = asn1.ObjectIdentifier{2, 5, 29, 18} + OIDExtensionSubjectDirectoryAttributes = asn1.ObjectIdentifier{2, 5, 29, 9} + OIDExtensionInhibitAnyPolicy = asn1.ObjectIdentifier{2, 5, 29, 54} + OIDExtensionPolicyConstraints = asn1.ObjectIdentifier{2, 5, 29, 36} + OIDExtensionPolicyMappings = asn1.ObjectIdentifier{2, 5, 29, 33} + OIDExtensionFreshestCRL = asn1.ObjectIdentifier{2, 5, 29, 46} + + OIDExtensionAuthorityInfoAccess = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 1} + OIDExtensionSubjectInfoAccess = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 11} + + // OIDExtensionCTPoison is defined in RFC 6962 s3.1. + OIDExtensionCTPoison = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3} + // OIDExtensionCTSCT is defined in RFC 6962 s3.3. + OIDExtensionCTSCT = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2} + // OIDExtensionIPPrefixList is defined in RFC 3779 s2. + OIDExtensionIPPrefixList = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 7} + // OIDExtensionASList is defined in RFC 3779 s3. + OIDExtensionASList = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 8} +) + +var ( + OIDAuthorityInfoAccessOCSP = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1} + OIDAuthorityInfoAccessIssuers = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 2} + OIDSubjectInfoAccessTimestamp = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 3} + OIDSubjectInfoAccessCARepo = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 5} + OIDAnyPolicy = asn1.ObjectIdentifier{2, 5, 29, 32, 0} +) + +// oidInExtensions reports whether an extension with the given oid exists in +// extensions. +func oidInExtensions(oid asn1.ObjectIdentifier, extensions []pkix.Extension) bool { + for _, e := range extensions { + if e.Id.Equal(oid) { + return true + } + } + return false +} + +// marshalSANs marshals a list of addresses into a the contents of an X.509 +// SubjectAlternativeName extension. +func marshalSANs(dnsNames, emailAddresses []string, ipAddresses []net.IP, uris []*url.URL) (derBytes []byte, err error) { + var rawValues []asn1.RawValue + for _, name := range dnsNames { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeDNS, Class: asn1.ClassContextSpecific, Bytes: []byte(name)}) + } + for _, email := range emailAddresses { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeEmail, Class: asn1.ClassContextSpecific, Bytes: []byte(email)}) + } + for _, rawIP := range ipAddresses { + // If possible, we always want to encode IPv4 addresses in 4 bytes. + ip := rawIP.To4() + if ip == nil { + ip = rawIP + } + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeIP, Class: asn1.ClassContextSpecific, Bytes: ip}) + } + for _, uri := range uris { + rawValues = append(rawValues, asn1.RawValue{Tag: nameTypeURI, Class: asn1.ClassContextSpecific, Bytes: []byte(uri.String())}) + } + return asn1.Marshal(rawValues) +} + +func isIA5String(s string) error { + for _, r := range s { + if r >= utf8.RuneSelf { + return fmt.Errorf("x509: %q cannot be encoded as an IA5String", s) + } + } + + return nil +} + +func buildExtensions(template *Certificate, subjectIsEmpty bool, authorityKeyId []byte) (ret []pkix.Extension, err error) { + ret = make([]pkix.Extension, 12 /* maximum number of elements. */) + n := 0 + + if template.KeyUsage != 0 && + !oidInExtensions(OIDExtensionKeyUsage, template.ExtraExtensions) { + ret[n].Id = OIDExtensionKeyUsage + ret[n].Critical = true + + var a [2]byte + a[0] = reverseBitsInAByte(byte(template.KeyUsage)) + a[1] = reverseBitsInAByte(byte(template.KeyUsage >> 8)) + + l := 1 + if a[1] != 0 { + l = 2 + } + + bitString := a[:l] + ret[n].Value, err = asn1.Marshal(asn1.BitString{Bytes: bitString, BitLength: asn1BitLength(bitString)}) + if err != nil { + return + } + n++ + } + + if (len(template.ExtKeyUsage) > 0 || len(template.UnknownExtKeyUsage) > 0) && + !oidInExtensions(OIDExtensionExtendedKeyUsage, template.ExtraExtensions) { + ret[n].Id = OIDExtensionExtendedKeyUsage + + var oids []asn1.ObjectIdentifier + for _, u := range template.ExtKeyUsage { + if oid, ok := oidFromExtKeyUsage(u); ok { + oids = append(oids, oid) + } else { + panic("internal error") + } + } + + oids = append(oids, template.UnknownExtKeyUsage...) + + ret[n].Value, err = asn1.Marshal(oids) + if err != nil { + return + } + n++ + } + + if template.BasicConstraintsValid && !oidInExtensions(OIDExtensionBasicConstraints, template.ExtraExtensions) { + // Leaving MaxPathLen as zero indicates that no maximum path + // length is desired, unless MaxPathLenZero is set. A value of + // -1 causes encoding/asn1 to omit the value as desired. + maxPathLen := template.MaxPathLen + if maxPathLen == 0 && !template.MaxPathLenZero { + maxPathLen = -1 + } + ret[n].Id = OIDExtensionBasicConstraints + ret[n].Value, err = asn1.Marshal(basicConstraints{template.IsCA, maxPathLen}) + ret[n].Critical = true + if err != nil { + return + } + n++ + } + + if len(template.SubjectKeyId) > 0 && !oidInExtensions(OIDExtensionSubjectKeyId, template.ExtraExtensions) { + ret[n].Id = OIDExtensionSubjectKeyId + ret[n].Value, err = asn1.Marshal(template.SubjectKeyId) + if err != nil { + return + } + n++ + } + + if len(authorityKeyId) > 0 && !oidInExtensions(OIDExtensionAuthorityKeyId, template.ExtraExtensions) { + ret[n].Id = OIDExtensionAuthorityKeyId + ret[n].Value, err = asn1.Marshal(authKeyId{authorityKeyId}) + if err != nil { + return + } + n++ + } + + if (len(template.OCSPServer) > 0 || len(template.IssuingCertificateURL) > 0) && + !oidInExtensions(OIDExtensionAuthorityInfoAccess, template.ExtraExtensions) { + ret[n].Id = OIDExtensionAuthorityInfoAccess + var aiaValues []accessDescription + for _, name := range template.OCSPServer { + aiaValues = append(aiaValues, accessDescription{ + Method: OIDAuthorityInfoAccessOCSP, + Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)}, + }) + } + for _, name := range template.IssuingCertificateURL { + aiaValues = append(aiaValues, accessDescription{ + Method: OIDAuthorityInfoAccessIssuers, + Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)}, + }) + } + ret[n].Value, err = asn1.Marshal(aiaValues) + if err != nil { + return + } + n++ + } + + if len(template.SubjectTimestamps) > 0 || len(template.SubjectCARepositories) > 0 && + !oidInExtensions(OIDExtensionSubjectInfoAccess, template.ExtraExtensions) { + ret[n].Id = OIDExtensionSubjectInfoAccess + var siaValues []accessDescription + for _, ts := range template.SubjectTimestamps { + siaValues = append(siaValues, accessDescription{ + Method: OIDSubjectInfoAccessTimestamp, + Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(ts)}, + }) + } + for _, repo := range template.SubjectCARepositories { + siaValues = append(siaValues, accessDescription{ + Method: OIDSubjectInfoAccessCARepo, + Location: asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(repo)}, + }) + } + ret[n].Value, err = asn1.Marshal(siaValues) + if err != nil { + return + } + n++ + } + + if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0 || len(template.URIs) > 0) && + !oidInExtensions(OIDExtensionSubjectAltName, template.ExtraExtensions) { + ret[n].Id = OIDExtensionSubjectAltName + // From RFC 5280, Section 4.2.1.6: + // “If the subject field contains an empty sequence ... then + // subjectAltName extension ... is marked as critical” + ret[n].Critical = subjectIsEmpty + ret[n].Value, err = marshalSANs(template.DNSNames, template.EmailAddresses, template.IPAddresses, template.URIs) + if err != nil { + return + } + n++ + } + + if len(template.PolicyIdentifiers) > 0 && + !oidInExtensions(OIDExtensionCertificatePolicies, template.ExtraExtensions) { + ret[n].Id = OIDExtensionCertificatePolicies + policies := make([]policyInformation, len(template.PolicyIdentifiers)) + for i, policy := range template.PolicyIdentifiers { + policies[i].Policy = policy + } + ret[n].Value, err = asn1.Marshal(policies) + if err != nil { + return + } + n++ + } + + if (len(template.PermittedDNSDomains) > 0 || len(template.ExcludedDNSDomains) > 0 || + len(template.PermittedIPRanges) > 0 || len(template.ExcludedIPRanges) > 0 || + len(template.PermittedEmailAddresses) > 0 || len(template.ExcludedEmailAddresses) > 0 || + len(template.PermittedURIDomains) > 0 || len(template.ExcludedURIDomains) > 0) && + !oidInExtensions(OIDExtensionNameConstraints, template.ExtraExtensions) { + ret[n].Id = OIDExtensionNameConstraints + ret[n].Critical = template.PermittedDNSDomainsCritical + + ipAndMask := func(ipNet *net.IPNet) []byte { + maskedIP := ipNet.IP.Mask(ipNet.Mask) + ipAndMask := make([]byte, 0, len(maskedIP)+len(ipNet.Mask)) + ipAndMask = append(ipAndMask, maskedIP...) + ipAndMask = append(ipAndMask, ipNet.Mask...) + return ipAndMask + } + + serialiseConstraints := func(dns []string, ips []*net.IPNet, emails []string, uriDomains []string) (der []byte, err error) { + var b cryptobyte.Builder + + for _, name := range dns { + if err = isIA5String(name); err != nil { + return nil, err + } + + b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) { + b.AddASN1(cryptobyte_asn1.Tag(2).ContextSpecific(), func(b *cryptobyte.Builder) { + b.AddBytes([]byte(name)) + }) + }) + } + + for _, ipNet := range ips { + b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) { + b.AddASN1(cryptobyte_asn1.Tag(7).ContextSpecific(), func(b *cryptobyte.Builder) { + b.AddBytes(ipAndMask(ipNet)) + }) + }) + } + + for _, email := range emails { + if err = isIA5String(email); err != nil { + return nil, err + } + + b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) { + b.AddASN1(cryptobyte_asn1.Tag(1).ContextSpecific(), func(b *cryptobyte.Builder) { + b.AddBytes([]byte(email)) + }) + }) + } + + for _, uriDomain := range uriDomains { + if err = isIA5String(uriDomain); err != nil { + return nil, err + } + + b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) { + b.AddASN1(cryptobyte_asn1.Tag(6).ContextSpecific(), func(b *cryptobyte.Builder) { + b.AddBytes([]byte(uriDomain)) + }) + }) + } + + return b.Bytes() + } + + permitted, err := serialiseConstraints(template.PermittedDNSDomains, template.PermittedIPRanges, template.PermittedEmailAddresses, template.PermittedURIDomains) + if err != nil { + return nil, err + } + + excluded, err := serialiseConstraints(template.ExcludedDNSDomains, template.ExcludedIPRanges, template.ExcludedEmailAddresses, template.ExcludedURIDomains) + if err != nil { + return nil, err + } + + var b cryptobyte.Builder + b.AddASN1(cryptobyte_asn1.SEQUENCE, func(b *cryptobyte.Builder) { + if len(permitted) > 0 { + b.AddASN1(cryptobyte_asn1.Tag(0).ContextSpecific().Constructed(), func(b *cryptobyte.Builder) { + b.AddBytes(permitted) + }) + } + + if len(excluded) > 0 { + b.AddASN1(cryptobyte_asn1.Tag(1).ContextSpecific().Constructed(), func(b *cryptobyte.Builder) { + b.AddBytes(excluded) + }) + } + }) + + ret[n].Value, err = b.Bytes() + if err != nil { + return nil, err + } + n++ + } + + if len(template.CRLDistributionPoints) > 0 && + !oidInExtensions(OIDExtensionCRLDistributionPoints, template.ExtraExtensions) { + ret[n].Id = OIDExtensionCRLDistributionPoints + + var crlDp []distributionPoint + for _, name := range template.CRLDistributionPoints { + dp := distributionPoint{ + DistributionPoint: distributionPointName{ + FullName: []asn1.RawValue{ + asn1.RawValue{Tag: 6, Class: asn1.ClassContextSpecific, Bytes: []byte(name)}, + }, + }, + } + crlDp = append(crlDp, dp) + } + + ret[n].Value, err = asn1.Marshal(crlDp) + if err != nil { + return + } + n++ + } + + if (len(template.RawSCT) > 0 || len(template.SCTList.SCTList) > 0) && !oidInExtensions(OIDExtensionCTSCT, template.ExtraExtensions) { + rawSCT := template.RawSCT + if len(template.SCTList.SCTList) > 0 { + rawSCT, err = tls.Marshal(template.SCTList) + if err != nil { + return + } + } + ret[n].Id = OIDExtensionCTSCT + ret[n].Value, err = asn1.Marshal(rawSCT) + if err != nil { + return + } + n++ + } + + // Adding another extension here? Remember to update the maximum number + // of elements in the make() at the top of the function and the list of + // template fields used in CreateCertificate documentation. + + return append(ret[:n], template.ExtraExtensions...), nil +} + +func subjectBytes(cert *Certificate) ([]byte, error) { + if len(cert.RawSubject) > 0 { + return cert.RawSubject, nil + } + + return asn1.Marshal(cert.Subject.ToRDNSequence()) +} + +// signingParamsForPublicKey returns the parameters to use for signing with +// priv. If requestedSigAlgo is not zero then it overrides the default +// signature algorithm. +func signingParamsForPublicKey(pub interface{}, requestedSigAlgo SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) { + var pubType PublicKeyAlgorithm + + switch pub := pub.(type) { + case *rsa.PublicKey: + pubType = RSA + hashFunc = crypto.SHA256 + sigAlgo.Algorithm = oidSignatureSHA256WithRSA + sigAlgo.Parameters = asn1.NullRawValue + + case *ecdsa.PublicKey: + pubType = ECDSA + + switch pub.Curve { + case elliptic.P224(), elliptic.P256(): + hashFunc = crypto.SHA256 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA256 + case elliptic.P384(): + hashFunc = crypto.SHA384 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA384 + case elliptic.P521(): + hashFunc = crypto.SHA512 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA512 + default: + err = errors.New("x509: unknown elliptic curve") + } + + default: + err = errors.New("x509: only RSA and ECDSA keys supported") + } + + if err != nil { + return + } + + if requestedSigAlgo == 0 { + return + } + + found := false + for _, details := range signatureAlgorithmDetails { + if details.algo == requestedSigAlgo { + if details.pubKeyAlgo != pubType { + err = errors.New("x509: requested SignatureAlgorithm does not match private key type") + return + } + sigAlgo.Algorithm, hashFunc = details.oid, details.hash + if hashFunc == 0 { + err = errors.New("x509: cannot sign with hash function requested") + return + } + if requestedSigAlgo.isRSAPSS() { + sigAlgo.Parameters = rsaPSSParameters(hashFunc) + } + found = true + break + } + } + + if !found { + err = errors.New("x509: unknown SignatureAlgorithm") + } + + return +} + +// emptyASN1Subject is the ASN.1 DER encoding of an empty Subject, which is +// just an empty SEQUENCE. +var emptyASN1Subject = []byte{0x30, 0} + +// CreateCertificate creates a new X.509v3 certificate based on a template. +// The following members of template are used: +// - SerialNumber +// - Subject +// - NotBefore, NotAfter +// - SignatureAlgorithm +// - For extensions: +// - KeyUsage +// - ExtKeyUsage, UnknownExtKeyUsage +// - BasicConstraintsValid, IsCA, MaxPathLen, MaxPathLenZero +// - SubjectKeyId +// - AuthorityKeyId +// - OCSPServer, IssuingCertificateURL +// - SubjectTimestamps, SubjectCARepositories +// - DNSNames, EmailAddresses, IPAddresses, URIs +// - PolicyIdentifiers +// - ExcludedDNSDomains, ExcludedIPRanges, ExcludedEmailAddresses, ExcludedURIDomains, PermittedDNSDomainsCritical, +// PermittedDNSDomains, PermittedIPRanges, PermittedEmailAddresses, PermittedURIDomains +// - CRLDistributionPoints +// - RawSCT, SCTList +// - ExtraExtensions +// +// The certificate is signed by parent. If parent is equal to template then the +// certificate is self-signed. The parameter pub is the public key of the +// signee and priv is the private key of the signer. +// +// The returned slice is the certificate in DER encoding. +// +// All keys types that are implemented via crypto.Signer are supported (This +// includes *rsa.PublicKey and *ecdsa.PublicKey.) +// +// The AuthorityKeyId will be taken from the SubjectKeyId of parent, if any, +// unless the resulting certificate is self-signed. Otherwise the value from +// template will be used. +func CreateCertificate(rand io.Reader, template, parent *Certificate, pub, priv interface{}) (cert []byte, err error) { + key, ok := priv.(crypto.Signer) + if !ok { + return nil, errors.New("x509: certificate private key does not implement crypto.Signer") + } + + if template.SerialNumber == nil { + return nil, errors.New("x509: no SerialNumber given") + } + + hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(key.Public(), template.SignatureAlgorithm) + if err != nil { + return nil, err + } + + publicKeyBytes, publicKeyAlgorithm, err := marshalPublicKey(pub) + if err != nil { + return nil, err + } + + asn1Issuer, err := subjectBytes(parent) + if err != nil { + return + } + + asn1Subject, err := subjectBytes(template) + if err != nil { + return + } + + authorityKeyId := template.AuthorityKeyId + if !bytes.Equal(asn1Issuer, asn1Subject) && len(parent.SubjectKeyId) > 0 { + authorityKeyId = parent.SubjectKeyId + } + + extensions, err := buildExtensions(template, bytes.Equal(asn1Subject, emptyASN1Subject), authorityKeyId) + if err != nil { + return + } + + encodedPublicKey := asn1.BitString{BitLength: len(publicKeyBytes) * 8, Bytes: publicKeyBytes} + c := tbsCertificate{ + Version: 2, + SerialNumber: template.SerialNumber, + SignatureAlgorithm: signatureAlgorithm, + Issuer: asn1.RawValue{FullBytes: asn1Issuer}, + Validity: validity{template.NotBefore.UTC(), template.NotAfter.UTC()}, + Subject: asn1.RawValue{FullBytes: asn1Subject}, + PublicKey: publicKeyInfo{nil, publicKeyAlgorithm, encodedPublicKey}, + Extensions: extensions, + } + + tbsCertContents, err := asn1.Marshal(c) + if err != nil { + return + } + + c.Raw = tbsCertContents + + h := hashFunc.New() + h.Write(tbsCertContents) + digest := h.Sum(nil) + + var signerOpts crypto.SignerOpts + signerOpts = hashFunc + if template.SignatureAlgorithm != 0 && template.SignatureAlgorithm.isRSAPSS() { + signerOpts = &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + Hash: hashFunc, + } + } + + var signature []byte + signature, err = key.Sign(rand, digest, signerOpts) + if err != nil { + return + } + + return asn1.Marshal(certificate{ + nil, + c, + signatureAlgorithm, + asn1.BitString{Bytes: signature, BitLength: len(signature) * 8}, + }) +} + +// pemCRLPrefix is the magic string that indicates that we have a PEM encoded +// CRL. +var pemCRLPrefix = []byte("-----BEGIN X509 CRL") + +// pemType is the type of a PEM encoded CRL. +var pemType = "X509 CRL" + +// ParseCRL parses a CRL from the given bytes. It's often the case that PEM +// encoded CRLs will appear where they should be DER encoded, so this function +// will transparently handle PEM encoding as long as there isn't any leading +// garbage. +func ParseCRL(crlBytes []byte) (*pkix.CertificateList, error) { + if bytes.HasPrefix(crlBytes, pemCRLPrefix) { + block, _ := pem.Decode(crlBytes) + if block != nil && block.Type == pemType { + crlBytes = block.Bytes + } + } + return ParseDERCRL(crlBytes) +} + +// ParseDERCRL parses a DER encoded CRL from the given bytes. +func ParseDERCRL(derBytes []byte) (*pkix.CertificateList, error) { + certList := new(pkix.CertificateList) + if rest, err := asn1.Unmarshal(derBytes, certList); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after CRL") + } + return certList, nil +} + +// CreateCRL returns a DER encoded CRL, signed by this Certificate, that +// contains the given list of revoked certificates. +func (c *Certificate) CreateCRL(rand io.Reader, priv interface{}, revokedCerts []pkix.RevokedCertificate, now, expiry time.Time) (crlBytes []byte, err error) { + key, ok := priv.(crypto.Signer) + if !ok { + return nil, errors.New("x509: certificate private key does not implement crypto.Signer") + } + + hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(key.Public(), 0) + if err != nil { + return nil, err + } + + // Force revocation times to UTC per RFC 5280. + revokedCertsUTC := make([]pkix.RevokedCertificate, len(revokedCerts)) + for i, rc := range revokedCerts { + rc.RevocationTime = rc.RevocationTime.UTC() + revokedCertsUTC[i] = rc + } + + tbsCertList := pkix.TBSCertificateList{ + Version: 1, + Signature: signatureAlgorithm, + Issuer: c.Subject.ToRDNSequence(), + ThisUpdate: now.UTC(), + NextUpdate: expiry.UTC(), + RevokedCertificates: revokedCertsUTC, + } + + // Authority Key Id + if len(c.SubjectKeyId) > 0 { + var aki pkix.Extension + aki.Id = OIDExtensionAuthorityKeyId + aki.Value, err = asn1.Marshal(authKeyId{Id: c.SubjectKeyId}) + if err != nil { + return + } + tbsCertList.Extensions = append(tbsCertList.Extensions, aki) + } + + tbsCertListContents, err := asn1.Marshal(tbsCertList) + if err != nil { + return + } + + h := hashFunc.New() + h.Write(tbsCertListContents) + digest := h.Sum(nil) + + var signature []byte + signature, err = key.Sign(rand, digest, hashFunc) + if err != nil { + return + } + + return asn1.Marshal(pkix.CertificateList{ + TBSCertList: tbsCertList, + SignatureAlgorithm: signatureAlgorithm, + SignatureValue: asn1.BitString{Bytes: signature, BitLength: len(signature) * 8}, + }) +} + +// CertificateRequest represents a PKCS #10, certificate signature request. +type CertificateRequest struct { + Raw []byte // Complete ASN.1 DER content (CSR, signature algorithm and signature). + RawTBSCertificateRequest []byte // Certificate request info part of raw ASN.1 DER content. + RawSubjectPublicKeyInfo []byte // DER encoded SubjectPublicKeyInfo. + RawSubject []byte // DER encoded Subject. + + Version int + Signature []byte + SignatureAlgorithm SignatureAlgorithm + + PublicKeyAlgorithm PublicKeyAlgorithm + PublicKey interface{} + + Subject pkix.Name + + // Attributes contains the CSR attributes that can parse as + // pkix.AttributeTypeAndValueSET. + // + // Deprecated: use Extensions and ExtraExtensions instead for parsing and + // generating the requestedExtensions attribute. + Attributes []pkix.AttributeTypeAndValueSET + + // Extensions contains all requested extensions, in raw form. When parsing + // CSRs, this can be used to extract extensions that are not parsed by this + // package. + Extensions []pkix.Extension + + // ExtraExtensions contains extensions to be copied, raw, into any CSR + // marshaled by CreateCertificateRequest. Values override any extensions + // that would otherwise be produced based on the other fields but are + // overridden by any extensions specified in Attributes. + // + // The ExtraExtensions field is not populated by ParseCertificateRequest, + // see Extensions instead. + ExtraExtensions []pkix.Extension + + // Subject Alternate Name values. + DNSNames []string + EmailAddresses []string + IPAddresses []net.IP + URIs []*url.URL +} + +// These structures reflect the ASN.1 structure of X.509 certificate +// signature requests (see RFC 2986): + +type tbsCertificateRequest struct { + Raw asn1.RawContent + Version int + Subject asn1.RawValue + PublicKey publicKeyInfo + RawAttributes []asn1.RawValue `asn1:"tag:0"` +} + +type certificateRequest struct { + Raw asn1.RawContent + TBSCSR tbsCertificateRequest + SignatureAlgorithm pkix.AlgorithmIdentifier + SignatureValue asn1.BitString +} + +// oidExtensionRequest is a PKCS#9 OBJECT IDENTIFIER that indicates requested +// extensions in a CSR. +var oidExtensionRequest = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 14} + +// newRawAttributes converts AttributeTypeAndValueSETs from a template +// CertificateRequest's Attributes into tbsCertificateRequest RawAttributes. +func newRawAttributes(attributes []pkix.AttributeTypeAndValueSET) ([]asn1.RawValue, error) { + var rawAttributes []asn1.RawValue + b, err := asn1.Marshal(attributes) + if err != nil { + return nil, err + } + rest, err := asn1.Unmarshal(b, &rawAttributes) + if err != nil { + return nil, err + } + if len(rest) != 0 { + return nil, errors.New("x509: failed to unmarshal raw CSR Attributes") + } + return rawAttributes, nil +} + +// parseRawAttributes Unmarshals RawAttributes into AttributeTypeAndValueSETs. +func parseRawAttributes(rawAttributes []asn1.RawValue) []pkix.AttributeTypeAndValueSET { + var attributes []pkix.AttributeTypeAndValueSET + for _, rawAttr := range rawAttributes { + var attr pkix.AttributeTypeAndValueSET + rest, err := asn1.Unmarshal(rawAttr.FullBytes, &attr) + // Ignore attributes that don't parse into pkix.AttributeTypeAndValueSET + // (i.e.: challengePassword or unstructuredName). + if err == nil && len(rest) == 0 { + attributes = append(attributes, attr) + } + } + return attributes +} + +// parseCSRExtensions parses the attributes from a CSR and extracts any +// requested extensions. +func parseCSRExtensions(rawAttributes []asn1.RawValue) ([]pkix.Extension, error) { + // pkcs10Attribute reflects the Attribute structure from RFC 2986, Section 4.1. + type pkcs10Attribute struct { + Id asn1.ObjectIdentifier + Values []asn1.RawValue `asn1:"set"` + } + + var ret []pkix.Extension + for _, rawAttr := range rawAttributes { + var attr pkcs10Attribute + if rest, err := asn1.Unmarshal(rawAttr.FullBytes, &attr); err != nil || len(rest) != 0 || len(attr.Values) == 0 { + // Ignore attributes that don't parse. + continue + } + + if !attr.Id.Equal(oidExtensionRequest) { + continue + } + + var extensions []pkix.Extension + if _, err := asn1.Unmarshal(attr.Values[0].FullBytes, &extensions); err != nil { + return nil, err + } + ret = append(ret, extensions...) + } + + return ret, nil +} + +// CreateCertificateRequest creates a new certificate request based on a +// template. The following members of template are used: +// +// - SignatureAlgorithm +// - Subject +// - DNSNames +// - EmailAddresses +// - IPAddresses +// - URIs +// - ExtraExtensions +// - Attributes (deprecated) +// +// priv is the private key to sign the CSR with, and the corresponding public +// key will be included in the CSR. It must implement crypto.Signer and its +// Public() method must return a *rsa.PublicKey or a *ecdsa.PublicKey. (A +// *rsa.PrivateKey or *ecdsa.PrivateKey satisfies this.) +// +// The returned slice is the certificate request in DER encoding. +func CreateCertificateRequest(rand io.Reader, template *CertificateRequest, priv interface{}) (csr []byte, err error) { + key, ok := priv.(crypto.Signer) + if !ok { + return nil, errors.New("x509: certificate private key does not implement crypto.Signer") + } + + var hashFunc crypto.Hash + var sigAlgo pkix.AlgorithmIdentifier + hashFunc, sigAlgo, err = signingParamsForPublicKey(key.Public(), template.SignatureAlgorithm) + if err != nil { + return nil, err + } + + var publicKeyBytes []byte + var publicKeyAlgorithm pkix.AlgorithmIdentifier + publicKeyBytes, publicKeyAlgorithm, err = marshalPublicKey(key.Public()) + if err != nil { + return nil, err + } + + var extensions []pkix.Extension + + if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0 || len(template.URIs) > 0) && + !oidInExtensions(OIDExtensionSubjectAltName, template.ExtraExtensions) { + sanBytes, err := marshalSANs(template.DNSNames, template.EmailAddresses, template.IPAddresses, template.URIs) + if err != nil { + return nil, err + } + + extensions = append(extensions, pkix.Extension{ + Id: OIDExtensionSubjectAltName, + Value: sanBytes, + }) + } + + extensions = append(extensions, template.ExtraExtensions...) + + // Make a copy of template.Attributes because we may alter it below. + attributes := make([]pkix.AttributeTypeAndValueSET, 0, len(template.Attributes)) + for _, attr := range template.Attributes { + values := make([][]pkix.AttributeTypeAndValue, len(attr.Value)) + copy(values, attr.Value) + attributes = append(attributes, pkix.AttributeTypeAndValueSET{ + Type: attr.Type, + Value: values, + }) + } + + extensionsAppended := false + if len(extensions) > 0 { + // Append the extensions to an existing attribute if possible. + for _, atvSet := range attributes { + if !atvSet.Type.Equal(oidExtensionRequest) || len(atvSet.Value) == 0 { + continue + } + + // specifiedExtensions contains all the extensions that we + // found specified via template.Attributes. + specifiedExtensions := make(map[string]bool) + + for _, atvs := range atvSet.Value { + for _, atv := range atvs { + specifiedExtensions[atv.Type.String()] = true + } + } + + newValue := make([]pkix.AttributeTypeAndValue, 0, len(atvSet.Value[0])+len(extensions)) + newValue = append(newValue, atvSet.Value[0]...) + + for _, e := range extensions { + if specifiedExtensions[e.Id.String()] { + // Attributes already contained a value for + // this extension and it takes priority. + continue + } + + newValue = append(newValue, pkix.AttributeTypeAndValue{ + // There is no place for the critical + // flag in an AttributeTypeAndValue. + Type: e.Id, + Value: e.Value, + }) + } + + atvSet.Value[0] = newValue + extensionsAppended = true + break + } + } + + rawAttributes, err := newRawAttributes(attributes) + if err != nil { + return + } + + // If not included in attributes, add a new attribute for the + // extensions. + if len(extensions) > 0 && !extensionsAppended { + attr := struct { + Type asn1.ObjectIdentifier + Value [][]pkix.Extension `asn1:"set"` + }{ + Type: oidExtensionRequest, + Value: [][]pkix.Extension{extensions}, + } + + b, err := asn1.Marshal(attr) + if err != nil { + return nil, errors.New("x509: failed to serialise extensions attribute: " + err.Error()) + } + + var rawValue asn1.RawValue + if _, err := asn1.Unmarshal(b, &rawValue); err != nil { + return nil, err + } + + rawAttributes = append(rawAttributes, rawValue) + } + + asn1Subject := template.RawSubject + if len(asn1Subject) == 0 { + asn1Subject, err = asn1.Marshal(template.Subject.ToRDNSequence()) + if err != nil { + return nil, err + } + } + + tbsCSR := tbsCertificateRequest{ + Version: 0, // PKCS #10, RFC 2986 + Subject: asn1.RawValue{FullBytes: asn1Subject}, + PublicKey: publicKeyInfo{ + Algorithm: publicKeyAlgorithm, + PublicKey: asn1.BitString{ + Bytes: publicKeyBytes, + BitLength: len(publicKeyBytes) * 8, + }, + }, + RawAttributes: rawAttributes, + } + + tbsCSRContents, err := asn1.Marshal(tbsCSR) + if err != nil { + return + } + tbsCSR.Raw = tbsCSRContents + + h := hashFunc.New() + h.Write(tbsCSRContents) + digest := h.Sum(nil) + + var signature []byte + signature, err = key.Sign(rand, digest, hashFunc) + if err != nil { + return + } + + return asn1.Marshal(certificateRequest{ + TBSCSR: tbsCSR, + SignatureAlgorithm: sigAlgo, + SignatureValue: asn1.BitString{ + Bytes: signature, + BitLength: len(signature) * 8, + }, + }) +} + +// ParseCertificateRequest parses a single certificate request from the +// given ASN.1 DER data. +func ParseCertificateRequest(asn1Data []byte) (*CertificateRequest, error) { + var csr certificateRequest + + rest, err := asn1.Unmarshal(asn1Data, &csr) + if err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, asn1.SyntaxError{Msg: "trailing data"} + } + + return parseCertificateRequest(&csr) +} + +func parseCertificateRequest(in *certificateRequest) (*CertificateRequest, error) { + out := &CertificateRequest{ + Raw: in.Raw, + RawTBSCertificateRequest: in.TBSCSR.Raw, + RawSubjectPublicKeyInfo: in.TBSCSR.PublicKey.Raw, + RawSubject: in.TBSCSR.Subject.FullBytes, + + Signature: in.SignatureValue.RightAlign(), + SignatureAlgorithm: SignatureAlgorithmFromAI(in.SignatureAlgorithm), + + PublicKeyAlgorithm: getPublicKeyAlgorithmFromOID(in.TBSCSR.PublicKey.Algorithm.Algorithm), + + Version: in.TBSCSR.Version, + Attributes: parseRawAttributes(in.TBSCSR.RawAttributes), + } + + var err error + var nfe NonFatalErrors + out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCSR.PublicKey, &nfe) + if err != nil { + return nil, err + } + // Treat non-fatal errors as fatal here. + if len(nfe.Errors) > 0 { + return nil, nfe.Errors[0] + } + + var subject pkix.RDNSequence + if rest, err := asn1.Unmarshal(in.TBSCSR.Subject.FullBytes, &subject); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 Subject") + } + + out.Subject.FillFromRDNSequence(&subject) + + if out.Extensions, err = parseCSRExtensions(in.TBSCSR.RawAttributes); err != nil { + return nil, err + } + + for _, extension := range out.Extensions { + if extension.Id.Equal(OIDExtensionSubjectAltName) { + out.DNSNames, out.EmailAddresses, out.IPAddresses, out.URIs, err = parseSANExtension(extension.Value, &nfe) + if err != nil { + return nil, err + } + } + } + + return out, nil +} + +// CheckSignature reports whether the signature on c is valid. +func (c *CertificateRequest) CheckSignature() error { + return checkSignature(c.SignatureAlgorithm, c.RawTBSCertificateRequest, c.Signature, c.PublicKey) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/AUTHORS b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/AUTHORS new file mode 100644 index 00000000..e8a225f3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/AUTHORS @@ -0,0 +1,53 @@ +AUTHORS AND MAINTAINERS: + +MAIN DEVELOPERS: +Graeme Connell + +AUTHORS: +Nigel Tao +Cole Mickens +Ben Daglish +Luis Martinez +Remco Verhoef +Hiroaki Kawai +Lukas Lueg +Laurent Hausermann +Bill Green +Christian Mäder +Gernot Vormayr +Vitor Garcia Graveto +Elias Chavarria Reyes +Daniel Rittweiler + +CONTRIBUTORS: +Attila Oláh +Vittus Mikiassen +Matthias Radestock +Matthew Sackman +Loic Prylli +Alexandre Fiori +Adrian Tam +Satoshi Matsumoto +David Stainton +Jesse Ward +Kane Mathers +Jose Selvi +Yerden Zhumabekov + +----------------------------------------------- +FORKED FROM github.com/akrennmair/gopcap +ALL THE FOLLOWING ARE FOR THAT PROJECT + +MAIN DEVELOPERS: +Andreas Krennmair + +CONTRIBUTORS: +Andrea Nall +Daniel Arndt +Dustin Sallings +Graeme Connell +Guillaume Savary +Mark Smith +Miek Gieben +Mike Bell +Trevor Strohman diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/CONTRIBUTING.md b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/CONTRIBUTING.md new file mode 100644 index 00000000..99ab7a2e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/CONTRIBUTING.md @@ -0,0 +1,215 @@ +Contributing To gopacket +======================== + +So you've got some code and you'd like it to be part of gopacket... wonderful! +We're happy to accept contributions, whether they're fixes to old protocols, new +protocols entirely, or anything else you think would improve the gopacket +library. This document is designed to help you to do just that. + +The first section deals with the plumbing: how to actually get a change +submitted. + +The second section deals with coding style... Go is great in that it +has a uniform style implemented by 'go fmt', but there's still some decisions +we've made that go above and beyond, and if you follow them, they won't come up +in your code review. + +The third section deals with some of the implementation decisions we've made, +which may help you to understand the current code and which we may ask you to +conform to (or provide compelling reasons for ignoring). + +Overall, we hope this document will help you to understand our system and write +great code which fits in, and help us to turn around on your code review quickly +so the code can make it into the master branch as quickly as possible. + + +How To Submit Code +------------------ + +We use github.com's Pull Request feature to receive code contributions from +external contributors. See +https://help.github.com/articles/creating-a-pull-request/ for details on +how to create a request. + +Also, there's a local script `gc` in the base directory of GoPacket that +runs a local set of checks, which should give you relatively high confidence +that your pull won't fail github pull checks. + +```sh +go get github.com/google/gopacket +cd $GOROOT/src/pkg/github.com/google/gopacket +git checkout -b # create a new branch to work from +... code code code ... +./gc # Run this to do local commits, it performs a number of checks +``` + +To sum up: + +* DO + + Pull down the latest version. + + Make a feature-specific branch. + + Code using the style and methods discussed in the rest of this document. + + Use the ./gc command to do local commits or check correctness. + + Push your new feature branch up to github.com, as a pull request. + + Handle comments and requests from reviewers, pushing new commits up to + your feature branch as problems are addressed. + + Put interesting comments and discussions into commit comments. +* DON'T + + Push to someone else's branch without their permission. + + +Coding Style +------------ + +* Go code must be run through `go fmt`, `go vet`, and `golint` +* Follow http://golang.org/doc/effective_go.html as much as possible. + + In particular, http://golang.org/doc/effective_go.html#mixed-caps. Enums + should be be CamelCase, with acronyms capitalized (TCPSourcePort, vs. + TcpSourcePort or TCP_SOURCE_PORT). +* Bonus points for giving enum types a String() field. +* Any exported types or functions should have commentary + (http://golang.org/doc/effective_go.html#commentary) + + +Coding Methods And Implementation Notes +--------------------------------------- + +### Error Handling + +Many times, you'll be decoding a protocol and run across something bad, a packet +corruption or the like. How do you handle this? First off, ALWAYS report the +error. You can do this either by returning the error from the decode() function +(most common), or if you're up for it you can implement and add an ErrorLayer +through the packet builder (the first method is a simple shortcut that does +exactly this, then stops any future decoding). + +Often, you'll already have decode some part of your protocol by the time you hit +your error. Use your own discretion to determine whether the stuff you've +already decoded should be returned to the caller or not: + +```go +func decodeMyProtocol(data []byte, p gopacket.PacketBuilder) error { + prot := &MyProtocol{} + if len(data) < 10 { + // This error occurred before we did ANYTHING, so there's nothing in my + // protocol that the caller could possibly want. Just return the error. + return fmt.Errorf("Length %d less than 10", len(data)) + } + prot.ImportantField1 = data[:5] + prot.ImportantField2 = data[5:10] + // At this point, we've already got enough information in 'prot' to + // warrant returning it to the caller, so we'll add it now. + p.AddLayer(prot) + if len(data) < 15 { + // We encountered an error later in the packet, but the caller already + // has the important info we've gleaned so far. + return fmt.Errorf("Length %d less than 15", len(data)) + } + prot.ImportantField3 = data[10:15] + return nil // We've already added the layer, we can just return success. +} +``` + +In general, our code follows the approach of returning the first error it +encounters. In general, we don't trust any bytes after the first error we see. + +### What Is A Layer? + +The definition of a layer is up to the discretion of the coder. It should be +something important enough that it's actually useful to the caller (IE: every +TLV value should probably NOT be a layer). However, it can be more granular +than a single protocol... IPv6 and SCTP both implement many layers to handle the +various parts of the protocol. Use your best judgement, and prepare to defend +your decisions during code review. ;) + +### Performance + +We strive to make gopacket as fast as possible while still providing lots of +features. In general, this means: + +* Focus performance tuning on common protocols (IP4/6, TCP, etc), and optimize + others on an as-needed basis (tons of MPLS on your network? Time to optimize + MPLS!) +* Use fast operations. See the toplevel benchmark_test for benchmarks of some + of Go's underlying features and types. +* Test your performance changes! You should use the ./gc script's --benchmark + flag to submit any performance-related changes. Use pcap/gopacket_benchmark + to test your change against a PCAP file based on your traffic patterns. +* Don't be TOO hacky. Sometimes, removing an unused struct from a field causes + a huge performance hit, due to the way that Go currently handles its segmented + stack... don't be afraid to clean it up anyway. We'll trust the Go compiler + to get good enough over time to handle this. Also, this type of + compiler-specific optimization is very fragile; someone adding a field to an + entirely different struct elsewhere in the codebase could reverse any gains + you might achieve by aligning your allocations. +* Try to minimize memory allocations. If possible, use []byte to reference + pieces of the input, instead of using string, which requires copying the bytes + into a new memory allocation. +* Think hard about what should be evaluated lazily vs. not. In general, a + layer's struct should almost exactly mirror the layer's frame. Anything + that's more interesting should be a function. This may not always be + possible, but it's a good rule of thumb. +* Don't fear micro-optimizations. With the above in mind, we welcome + micro-optimizations that we think will have positive/neutral impacts on the + majority of workloads. A prime example of this is pre-allocating certain + structs within a larger one: + +```go +type MyProtocol struct { + // Most packets have 1-4 of VeryCommon, so we preallocate it here. + initialAllocation [4]uint32 + VeryCommon []uint32 +} + +func decodeMyProtocol(data []byte, p gopacket.PacketBuilder) error { + prot := &MyProtocol{} + prot.VeryCommon = proto.initialAllocation[:0] + for len(data) > 4 { + field := binary.BigEndian.Uint32(data[:4]) + data = data[4:] + // Since we're using the underlying initialAllocation, we won't need to + // allocate new memory for the following append unless we more than 16 + // bytes of data, which should be the uncommon case. + prot.VeryCommon = append(prot.VeryCommon, field) + } + p.AddLayer(prot) + if len(data) > 0 { + return fmt.Errorf("MyProtocol packet has %d bytes left after decoding", len(data)) + } + return nil +} +``` + +### Slices And Data + +If you're pulling a slice from the data you're decoding, don't copy it. Just +use the slice itself. + +```go +type MyProtocol struct { + A, B net.IP +} +func decodeMyProtocol(data []byte, p gopacket.PacketBuilder) error { + p.AddLayer(&MyProtocol{ + A: data[:4], + B: data[4:8], + }) + return nil +} +``` + +The caller has already agreed, by using this library, that they won't modify the +set of bytes they pass in to the decoder, or the library has already copied the +set of bytes to a read-only location. See DecodeOptions.NoCopy for more +information. + +### Enums/Types + +If a protocol has an integer field (uint8, uint16, etc) with a couple of known +values that mean something special, make it a type. This allows us to do really +nice things like adding a String() function to them, so we can more easily +display those to users. Check out layers/enums.go for one example, as well as +layers/icmp.go for layer-specific enums. + +When naming things, try for descriptiveness over suscinctness. For example, +choose DNSResponseRecord over DNSRR. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/LICENSE new file mode 100644 index 00000000..2100d524 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 Google, Inc. All rights reserved. +Copyright (c) 2009-2011 Andreas Krennmair. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Andreas Krennmair, Google, nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/README.md b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/README.md new file mode 100644 index 00000000..efe462ee --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/README.md @@ -0,0 +1,12 @@ +# GoPacket + +This library provides packet decoding capabilities for Go. +See [godoc](https://godoc.org/github.com/google/gopacket) for more details. + +[![Build Status](https://travis-ci.org/google/gopacket.svg?branch=master)](https://travis-ci.org/google/gopacket) +[![GoDoc](https://godoc.org/github.com/google/gopacket?status.svg)](https://godoc.org/github.com/google/gopacket) + +Minimum Go version required is 1.5 except for pcapgo/EthernetHandle, afpacket, and bsdbpf which need at least 1.9 due to x/sys/unix dependencies. + +Originally forked from the gopcap project written by Andreas +Krennmair (http://github.com/akrennmair/gopcap). diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/afpacket/afpacket.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/afpacket/afpacket.go new file mode 100644 index 00000000..a0f9ba27 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/afpacket/afpacket.go @@ -0,0 +1,513 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +// +build linux + +// Package afpacket provides Go bindings for MMap'd AF_PACKET socket reading. +package afpacket + +// Couldn't have done this without: +// http://lxr.free-electrons.com/source/Documentation/networking/packet_mmap.txt +// http://codemonkeytips.blogspot.co.uk/2011/07/asynchronous-packet-socket-reading-with.html + +import ( + "errors" + "fmt" + "net" + "runtime" + "sync" + "sync/atomic" + "syscall" + "time" + "unsafe" + + "golang.org/x/net/bpf" + "golang.org/x/sys/unix" + + "github.com/google/gopacket" +) + +/* +#include // AF_PACKET, sockaddr_ll +#include // ETH_P_ALL +#include // socket() +#include // close() +#include // htons() +#include // mmap(), munmap() +#include // poll() +*/ +import "C" + +var pageSize = unix.Getpagesize() + +// ErrPoll returned by poll +var ErrPoll = errors.New("packet poll failed") + +// ErrTimeout returned on poll timeout +var ErrTimeout = errors.New("packet poll timeout expired") + +// AncillaryVLAN structures are used to pass the captured VLAN +// as ancillary data via CaptureInfo. +type AncillaryVLAN struct { + // The VLAN VID provided by the kernel. + VLAN int +} + +// Stats is a set of counters detailing the work TPacket has done so far. +type Stats struct { + // Packets is the total number of packets returned to the caller. + Packets int64 + // Polls is the number of blocking syscalls made waiting for packets. + // This should always be <= Packets, since with TPacket one syscall + // can (and often does) return many results. + Polls int64 +} + +// SocketStats is a struct where socket stats are stored +type SocketStats C.struct_tpacket_stats + +// Packets returns the number of packets seen by this socket. +func (s *SocketStats) Packets() uint { + return uint(s.tp_packets) +} + +// Drops returns the number of packets dropped on this socket. +func (s *SocketStats) Drops() uint { + return uint(s.tp_drops) +} + +// SocketStatsV3 is a struct where socket stats for TPacketV3 are stored +type SocketStatsV3 C.struct_tpacket_stats_v3 + +// Packets returns the number of packets seen by this socket. +func (s *SocketStatsV3) Packets() uint { + return uint(s.tp_packets) +} + +// Drops returns the number of packets dropped on this socket. +func (s *SocketStatsV3) Drops() uint { + return uint(s.tp_drops) +} + +// QueueFreezes returns the number of queue freezes on this socket. +func (s *SocketStatsV3) QueueFreezes() uint { + return uint(s.tp_freeze_q_cnt) +} + +// TPacket implements packet receiving for Linux AF_PACKET versions 1, 2, and 3. +type TPacket struct { + // stats is simple statistics on TPacket's run. This MUST be the first entry to ensure alignment for sync.atomic + stats Stats + // fd is the C file descriptor. + fd int + // ring points to the memory space of the ring buffer shared by tpacket and the kernel. + ring []byte + // rawring is the unsafe pointer that we use to poll for packets + rawring unsafe.Pointer + // opts contains read-only options for the TPacket object. + opts options + mu sync.Mutex // guards below + // offset is the offset into the ring of the current header. + offset int + // current is the current header. + current header + // shouldReleasePacket is set to true whenever we return packet data, to make sure we remember to release that data back to the kernel. + shouldReleasePacket bool + // headerNextNeeded is set to true when header need to move to the next packet. No need to move it case of poll error. + headerNextNeeded bool + // tpVersion is the version of TPacket actually in use, set by setRequestedTPacketVersion. + tpVersion OptTPacketVersion + // Hackity hack hack hack. We need to return a pointer to the header with + // getTPacketHeader, and we don't want to allocate a v3wrapper every time, + // so we leave it in the TPacket object and return a pointer to it. + v3 v3wrapper + + statsMu sync.Mutex // guards stats below + // socketStats contains stats from the socket + socketStats SocketStats + // same as socketStats, but with an extra field freeze_q_cnt + socketStatsV3 SocketStatsV3 +} + +var _ gopacket.ZeroCopyPacketDataSource = &TPacket{} + +// bindToInterface binds the TPacket socket to a particular named interface. +func (h *TPacket) bindToInterface(ifaceName string) error { + ifIndex := 0 + // An empty string here means to listen to all interfaces + if ifaceName != "" { + iface, err := net.InterfaceByName(ifaceName) + if err != nil { + return fmt.Errorf("InterfaceByName: %v", err) + } + ifIndex = iface.Index + } + s := &unix.SockaddrLinklayer{ + Protocol: htons(uint16(unix.ETH_P_ALL)), + Ifindex: ifIndex, + } + return unix.Bind(h.fd, s) +} + +// setTPacketVersion asks the kernel to set TPacket to a particular version, and returns an error on failure. +func (h *TPacket) setTPacketVersion(version OptTPacketVersion) error { + if err := unix.SetsockoptInt(h.fd, unix.SOL_PACKET, unix.PACKET_VERSION, int(version)); err != nil { + return fmt.Errorf("setsockopt packet_version: %v", err) + } + return nil +} + +// setRequestedTPacketVersion tries to set TPacket to the requested version or versions. +func (h *TPacket) setRequestedTPacketVersion() error { + switch { + case (h.opts.version == TPacketVersionHighestAvailable || h.opts.version == TPacketVersion3) && h.setTPacketVersion(TPacketVersion3) == nil: + h.tpVersion = TPacketVersion3 + case (h.opts.version == TPacketVersionHighestAvailable || h.opts.version == TPacketVersion2) && h.setTPacketVersion(TPacketVersion2) == nil: + h.tpVersion = TPacketVersion2 + case (h.opts.version == TPacketVersionHighestAvailable || h.opts.version == TPacketVersion1) && h.setTPacketVersion(TPacketVersion1) == nil: + h.tpVersion = TPacketVersion1 + default: + return errors.New("no known tpacket versions work on this machine") + } + return nil +} + +// setUpRing sets up the shared-memory ring buffer between the user process and the kernel. +func (h *TPacket) setUpRing() (err error) { + totalSize := int(h.opts.framesPerBlock * h.opts.numBlocks * h.opts.frameSize) + switch h.tpVersion { + case TPacketVersion1, TPacketVersion2: + var tp C.struct_tpacket_req + tp.tp_block_size = C.uint(h.opts.blockSize) + tp.tp_block_nr = C.uint(h.opts.numBlocks) + tp.tp_frame_size = C.uint(h.opts.frameSize) + tp.tp_frame_nr = C.uint(h.opts.framesPerBlock * h.opts.numBlocks) + if err := setsockopt(h.fd, unix.SOL_PACKET, unix.PACKET_RX_RING, unsafe.Pointer(&tp), unsafe.Sizeof(tp)); err != nil { + return fmt.Errorf("setsockopt packet_rx_ring: %v", err) + } + case TPacketVersion3: + var tp C.struct_tpacket_req3 + tp.tp_block_size = C.uint(h.opts.blockSize) + tp.tp_block_nr = C.uint(h.opts.numBlocks) + tp.tp_frame_size = C.uint(h.opts.frameSize) + tp.tp_frame_nr = C.uint(h.opts.framesPerBlock * h.opts.numBlocks) + tp.tp_retire_blk_tov = C.uint(h.opts.blockTimeout / time.Millisecond) + if err := setsockopt(h.fd, unix.SOL_PACKET, unix.PACKET_RX_RING, unsafe.Pointer(&tp), unsafe.Sizeof(tp)); err != nil { + return fmt.Errorf("setsockopt packet_rx_ring v3: %v", err) + } + default: + return errors.New("invalid tpVersion") + } + h.ring, err = unix.Mmap(h.fd, 0, totalSize, unix.PROT_READ|unix.PROT_WRITE, unix.MAP_SHARED) + if err != nil { + return err + } + if h.ring == nil { + return errors.New("no ring") + } + h.rawring = unsafe.Pointer(&h.ring[0]) + return nil +} + +// Close cleans up the TPacket. It should not be used after the Close call. +func (h *TPacket) Close() { + if h.fd == -1 { + return // already closed. + } + if h.ring != nil { + unix.Munmap(h.ring) + } + h.ring = nil + unix.Close(h.fd) + h.fd = -1 + runtime.SetFinalizer(h, nil) +} + +// NewTPacket returns a new TPacket object for reading packets off the wire. +// Its behavior may be modified by passing in any/all of afpacket.Opt* to this +// function. +// If this function succeeds, the user should be sure to Close the returned +// TPacket when finished with it. +func NewTPacket(opts ...interface{}) (h *TPacket, err error) { + h = &TPacket{} + if h.opts, err = parseOptions(opts...); err != nil { + return nil, err + } + fd, err := unix.Socket(unix.AF_PACKET, int(h.opts.socktype), int(htons(unix.ETH_P_ALL))) + if err != nil { + return nil, err + } + h.fd = fd + if err = h.bindToInterface(h.opts.iface); err != nil { + goto errlbl + } + if err = h.setRequestedTPacketVersion(); err != nil { + goto errlbl + } + if err = h.setUpRing(); err != nil { + goto errlbl + } + // Clear stat counter from socket + if err = h.InitSocketStats(); err != nil { + goto errlbl + } + runtime.SetFinalizer(h, (*TPacket).Close) + return h, nil +errlbl: + h.Close() + return nil, err +} + +// SetBPF attaches a BPF filter to the underlying socket +func (h *TPacket) SetBPF(filter []bpf.RawInstruction) error { + var p unix.SockFprog + if len(filter) > int(^uint16(0)) { + return errors.New("filter too large") + } + p.Len = uint16(len(filter)) + p.Filter = (*unix.SockFilter)(unsafe.Pointer(&filter[0])) + + return setsockopt(h.fd, unix.SOL_SOCKET, unix.SO_ATTACH_FILTER, unsafe.Pointer(&p), unix.SizeofSockFprog) +} + +func (h *TPacket) releaseCurrentPacket() error { + h.current.clearStatus() + h.offset++ + h.shouldReleasePacket = false + return nil +} + +// ZeroCopyReadPacketData reads the next packet off the wire, and returns its data. +// The slice returned by ZeroCopyReadPacketData points to bytes owned by the +// TPacket. Each call to ZeroCopyReadPacketData invalidates any data previously +// returned by ZeroCopyReadPacketData. Care must be taken not to keep pointers +// to old bytes when using ZeroCopyReadPacketData... if you need to keep data past +// the next time you call ZeroCopyReadPacketData, use ReadPacketData, which copies +// the bytes into a new buffer for you. +// tp, _ := NewTPacket(...) +// data1, _, _ := tp.ZeroCopyReadPacketData() +// // do everything you want with data1 here, copying bytes out of it if you'd like to keep them around. +// data2, _, _ := tp.ZeroCopyReadPacketData() // invalidates bytes in data1 +func (h *TPacket) ZeroCopyReadPacketData() (data []byte, ci gopacket.CaptureInfo, err error) { + h.mu.Lock() +retry: + if h.current == nil || !h.headerNextNeeded || !h.current.next() { + if h.shouldReleasePacket { + h.releaseCurrentPacket() + } + h.current = h.getTPacketHeader() + if err = h.pollForFirstPacket(h.current); err != nil { + h.headerNextNeeded = false + h.mu.Unlock() + return + } + // We received an empty block + if h.current.getLength() == 0 { + goto retry + } + } + data = h.current.getData(&h.opts) + ci.Timestamp = h.current.getTime() + ci.CaptureLength = len(data) + ci.Length = h.current.getLength() + ci.InterfaceIndex = h.current.getIfaceIndex() + vlan := h.current.getVLAN() + if vlan >= 0 { + ci.AncillaryData = append(ci.AncillaryData, AncillaryVLAN{vlan}) + } + atomic.AddInt64(&h.stats.Packets, 1) + h.headerNextNeeded = true + h.mu.Unlock() + + return +} + +// Stats returns statistics on the packets the TPacket has seen so far. +func (h *TPacket) Stats() (Stats, error) { + return Stats{ + Polls: atomic.LoadInt64(&h.stats.Polls), + Packets: atomic.LoadInt64(&h.stats.Packets), + }, nil +} + +// InitSocketStats clears socket counters and return empty stats. +func (h *TPacket) InitSocketStats() error { + if h.tpVersion == TPacketVersion3 { + socklen := unsafe.Sizeof(h.socketStatsV3) + var ssv3 SocketStatsV3 + + err := getsockopt(h.fd, unix.SOL_PACKET, unix.PACKET_STATISTICS, unsafe.Pointer(&ssv3), &socklen) + if err != nil { + return err + } + h.socketStatsV3 = SocketStatsV3{} + } else { + socklen := unsafe.Sizeof(h.socketStats) + var ss SocketStats + + err := getsockopt(h.fd, unix.SOL_PACKET, unix.PACKET_STATISTICS, unsafe.Pointer(&ss), &socklen) + if err != nil { + return err + } + h.socketStats = SocketStats{} + } + return nil +} + +// SocketStats saves stats from the socket to the TPacket instance. +func (h *TPacket) SocketStats() (SocketStats, SocketStatsV3, error) { + h.statsMu.Lock() + defer h.statsMu.Unlock() + // We need to save the counters since asking for the stats will clear them + if h.tpVersion == TPacketVersion3 { + socklen := unsafe.Sizeof(h.socketStatsV3) + var ssv3 SocketStatsV3 + + err := getsockopt(h.fd, unix.SOL_PACKET, unix.PACKET_STATISTICS, unsafe.Pointer(&ssv3), &socklen) + if err != nil { + return SocketStats{}, SocketStatsV3{}, err + } + + h.socketStatsV3.tp_packets += ssv3.tp_packets + h.socketStatsV3.tp_drops += ssv3.tp_drops + h.socketStatsV3.tp_freeze_q_cnt += ssv3.tp_freeze_q_cnt + return h.socketStats, h.socketStatsV3, nil + } + socklen := unsafe.Sizeof(h.socketStats) + var ss SocketStats + + err := getsockopt(h.fd, unix.SOL_PACKET, unix.PACKET_STATISTICS, unsafe.Pointer(&ss), &socklen) + if err != nil { + return SocketStats{}, SocketStatsV3{}, err + } + + h.socketStats.tp_packets += ss.tp_packets + h.socketStats.tp_drops += ss.tp_drops + return h.socketStats, h.socketStatsV3, nil +} + +// ReadPacketDataTo reads packet data into a user-supplied buffer. +// This function reads up to the length of the passed-in slice. +// The number of bytes read into data will be returned in ci.CaptureLength, +// which is the minimum of the size of the passed-in buffer and the size of +// the captured packet. +func (h *TPacket) ReadPacketDataTo(data []byte) (ci gopacket.CaptureInfo, err error) { + var d []byte + d, ci, err = h.ZeroCopyReadPacketData() + if err != nil { + return + } + ci.CaptureLength = copy(data, d) + return +} + +// ReadPacketData reads the next packet, copies it into a new buffer, and returns +// that buffer. Since the buffer is allocated by ReadPacketData, it is safe for long-term +// use. This implements gopacket.PacketDataSource. +func (h *TPacket) ReadPacketData() (data []byte, ci gopacket.CaptureInfo, err error) { + var d []byte + d, ci, err = h.ZeroCopyReadPacketData() + if err != nil { + return + } + data = make([]byte, len(d)) + copy(data, d) + return +} + +func (h *TPacket) getTPacketHeader() header { + switch h.tpVersion { + case TPacketVersion1: + if h.offset >= h.opts.framesPerBlock*h.opts.numBlocks { + h.offset = 0 + } + position := uintptr(h.rawring) + uintptr(h.opts.frameSize*h.offset) + return (*v1header)(unsafe.Pointer(position)) + case TPacketVersion2: + if h.offset >= h.opts.framesPerBlock*h.opts.numBlocks { + h.offset = 0 + } + position := uintptr(h.rawring) + uintptr(h.opts.frameSize*h.offset) + return (*v2header)(unsafe.Pointer(position)) + case TPacketVersion3: + // TPacket3 uses each block to return values, instead of each frame. Hence we need to rotate when we hit #blocks, not #frames. + if h.offset >= h.opts.numBlocks { + h.offset = 0 + } + position := uintptr(h.rawring) + uintptr(h.opts.frameSize*h.offset*h.opts.framesPerBlock) + h.v3 = initV3Wrapper(unsafe.Pointer(position)) + return &h.v3 + } + panic("handle tpacket version is invalid") +} + +func (h *TPacket) pollForFirstPacket(hdr header) error { + tm := int(h.opts.pollTimeout / time.Millisecond) + for hdr.getStatus()&unix.TP_STATUS_USER == 0 { + pollset := [1]unix.PollFd{ + { + Fd: int32(h.fd), + Events: unix.POLLIN, + }, + } + n, err := unix.Poll(pollset[:], tm) + if n == 0 { + return ErrTimeout + } + + atomic.AddInt64(&h.stats.Polls, 1) + if pollset[0].Revents&unix.POLLERR > 0 { + return ErrPoll + } + if err == syscall.EINTR { + continue + } + if err != nil { + return err + } + } + + h.shouldReleasePacket = true + return nil +} + +// FanoutType determines the type of fanout to use with a TPacket SetFanout call. +type FanoutType int + +// FanoutType values. +const ( + FanoutHash FanoutType = unix.PACKET_FANOUT_HASH + // It appears that defrag only works with FanoutHash, see: + // http://lxr.free-electrons.com/source/net/packet/af_packet.c#L1204 + FanoutHashWithDefrag FanoutType = unix.PACKET_FANOUT_FLAG_DEFRAG + FanoutLoadBalance FanoutType = unix.PACKET_FANOUT_LB + FanoutCPU FanoutType = unix.PACKET_FANOUT_CPU + FanoutRollover FanoutType = unix.PACKET_FANOUT_ROLLOVER + FanoutRandom FanoutType = unix.PACKET_FANOUT_RND + FanoutQueueMapping FanoutType = unix.PACKET_FANOUT_QM + FanoutCBPF FanoutType = unix.PACKET_FANOUT_CBPF + FanoutEBPF FanoutType = unix.PACKET_FANOUT_EBPF +) + +// SetFanout activates TPacket's fanout ability. +// Use of Fanout requires creating multiple TPacket objects and the same id/type to +// a SetFanout call on each. Note that this can be done cross-process, so if two +// different processes both call SetFanout with the same type/id, they'll share +// packets between them. The same should work for multiple TPacket objects within +// the same process. +func (h *TPacket) SetFanout(t FanoutType, id uint16) error { + h.mu.Lock() + defer h.mu.Unlock() + arg := C.int(t) << 16 + arg |= C.int(id) + return setsockopt(h.fd, unix.SOL_PACKET, unix.PACKET_FANOUT, unsafe.Pointer(&arg), unsafe.Sizeof(arg)) +} + +// WritePacketData transmits a raw packet. +func (h *TPacket) WritePacketData(pkt []byte) error { + _, err := unix.Write(h.fd, pkt) + return err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/afpacket/header.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/afpacket/header.go new file mode 100644 index 00000000..61634e7a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/afpacket/header.go @@ -0,0 +1,195 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +// +build linux + +package afpacket + +import ( + "reflect" + "time" + "unsafe" + + "golang.org/x/sys/unix" +) + +// #include +// #include +// #define VLAN_HLEN 4 +import "C" + +// Our model of handling all TPacket versions is a little hacky, to say the +// least. We use the header interface to handle interactions with the +// tpacket1/tpacket2 packet header AND the tpacket3 block header. The big +// difference is that tpacket3's block header implements the next() call to get +// the next packet within the block, while v1/v2 just always return false. + +type header interface { + // getStatus returns the TPacket status of the current header. + getStatus() int + // clearStatus clears the status of the current header, releasing its + // underlying data back to the kernel for future use with new packets. + // Using the header after calling clearStatus is an error. clearStatus + // should only be called after next() returns false. + clearStatus() + // getTime returns the timestamp for the current packet pointed to by + // the header. + getTime() time.Time + // getData returns the packet data pointed to by the current header. + getData(opts *options) []byte + // getLength returns the total length of the packet. + getLength() int + // getIfaceIndex returns the index of the network interface + // where the packet was seen. The index can later be translated to a name. + getIfaceIndex() int + // getVLAN returns the VLAN of a packet if it was provided out-of-band + getVLAN() int + // next moves this header to point to the next packet it contains, + // returning true on success (in which case getTime and getData will + // return values for the new packet) or false if there are no more + // packets (in which case clearStatus should be called). + next() bool +} + +const tpacketAlignment = uint(unix.TPACKET_ALIGNMENT) + +func tpAlign(x int) int { + return int((uint(x) + tpacketAlignment - 1) &^ (tpacketAlignment - 1)) +} + +type v1header C.struct_tpacket_hdr +type v2header C.struct_tpacket2_hdr + +func makeSlice(start uintptr, length int) (data []byte) { + slice := (*reflect.SliceHeader)(unsafe.Pointer(&data)) + slice.Data = start + slice.Len = length + slice.Cap = length + return +} + +func insertVlanHeader(data []byte, vlanTCI int, opts *options) []byte { + if vlanTCI == 0 || !opts.addVLANHeader { + return data + } + eth := make([]byte, 0, len(data)+C.VLAN_HLEN) + eth = append(eth, data[0:C.ETH_ALEN*2]...) + eth = append(eth, []byte{0x81, 0, byte((vlanTCI >> 8) & 0xff), byte(vlanTCI & 0xff)}...) + return append(eth, data[C.ETH_ALEN*2:]...) +} + +func (h *v1header) getVLAN() int { + return -1 +} +func (h *v1header) getStatus() int { + return int(h.tp_status) +} +func (h *v1header) clearStatus() { + h.tp_status = 0 +} +func (h *v1header) getTime() time.Time { + return time.Unix(int64(h.tp_sec), int64(h.tp_usec)*1000) +} +func (h *v1header) getData(opts *options) []byte { + return makeSlice(uintptr(unsafe.Pointer(h))+uintptr(h.tp_mac), int(h.tp_snaplen)) +} +func (h *v1header) getLength() int { + return int(h.tp_len) +} +func (h *v1header) getIfaceIndex() int { + ll := (*C.struct_sockaddr_ll)(unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(tpAlign(int(C.sizeof_struct_tpacket_hdr))))) + return int(ll.sll_ifindex) +} +func (h *v1header) next() bool { + return false +} + +func (h *v2header) getVLAN() int { + return -1 +} +func (h *v2header) getStatus() int { + return int(h.tp_status) +} +func (h *v2header) clearStatus() { + h.tp_status = 0 +} +func (h *v2header) getTime() time.Time { + return time.Unix(int64(h.tp_sec), int64(h.tp_nsec)) +} +func (h *v2header) getData(opts *options) []byte { + data := makeSlice(uintptr(unsafe.Pointer(h))+uintptr(h.tp_mac), int(h.tp_snaplen)) + return insertVlanHeader(data, int(h.tp_vlan_tci), opts) +} +func (h *v2header) getLength() int { + return int(h.tp_len) +} +func (h *v2header) getIfaceIndex() int { + ll := (*C.struct_sockaddr_ll)(unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(tpAlign(int(C.sizeof_struct_tpacket2_hdr))))) + return int(ll.sll_ifindex) +} +func (h *v2header) next() bool { + return false +} + +type v3wrapper struct { + block *C.struct_tpacket_block_desc + blockhdr *C.struct_tpacket_hdr_v1 + packet *C.struct_tpacket3_hdr + used C.__u32 +} + +func initV3Wrapper(block unsafe.Pointer) (w v3wrapper) { + w.block = (*C.struct_tpacket_block_desc)(block) + w.blockhdr = (*C.struct_tpacket_hdr_v1)(unsafe.Pointer(&w.block.hdr[0])) + w.packet = (*C.struct_tpacket3_hdr)(unsafe.Pointer(uintptr(block) + uintptr(w.blockhdr.offset_to_first_pkt))) + return +} + +func (w *v3wrapper) getVLAN() int { + if w.packet.tp_status&unix.TP_STATUS_VLAN_VALID != 0 { + hv1 := (*C.struct_tpacket_hdr_variant1)(unsafe.Pointer(&w.packet.anon0[0])) + return int(hv1.tp_vlan_tci & 0xfff) + } + return -1 +} + +func (w *v3wrapper) getStatus() int { + return int(w.blockhdr.block_status) +} +func (w *v3wrapper) clearStatus() { + w.blockhdr.block_status = 0 +} +func (w *v3wrapper) getTime() time.Time { + return time.Unix(int64(w.packet.tp_sec), int64(w.packet.tp_nsec)) +} +func (w *v3wrapper) getData(opts *options) []byte { + data := makeSlice(uintptr(unsafe.Pointer(w.packet))+uintptr(w.packet.tp_mac), int(w.packet.tp_snaplen)) + + hv1 := (*C.struct_tpacket_hdr_variant1)(unsafe.Pointer(&w.packet.anon0[0])) + return insertVlanHeader(data, int(hv1.tp_vlan_tci), opts) +} +func (w *v3wrapper) getLength() int { + return int(w.packet.tp_len) +} +func (w *v3wrapper) getIfaceIndex() int { + ll := (*C.struct_sockaddr_ll)(unsafe.Pointer(uintptr(unsafe.Pointer(w.packet)) + uintptr(tpAlign(int(C.sizeof_struct_tpacket3_hdr))))) + return int(ll.sll_ifindex) +} +func (w *v3wrapper) next() bool { + w.used++ + if w.used >= w.blockhdr.num_pkts { + return false + } + + next := uintptr(unsafe.Pointer(w.packet)) + if w.packet.tp_next_offset != 0 { + next += uintptr(w.packet.tp_next_offset) + } else { + next += uintptr(tpAlign(int(w.packet.tp_snaplen) + int(w.packet.tp_mac))) + } + w.packet = (*C.struct_tpacket3_hdr)(unsafe.Pointer(next)) + return true +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/afpacket/options.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/afpacket/options.go new file mode 100644 index 00000000..3e305c4d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/afpacket/options.go @@ -0,0 +1,188 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +// +build linux + +package afpacket + +import ( + "errors" + "fmt" + "time" + + "golang.org/x/sys/unix" +) + +// OptTPacketVersion is the version of TPacket to use. +// It can be passed into NewTPacket. +type OptTPacketVersion int + +// String returns a string representation of the version, generally of the form V#. +func (t OptTPacketVersion) String() string { + switch t { + case TPacketVersion1: + return "V1" + case TPacketVersion2: + return "V2" + case TPacketVersion3: + return "V3" + case TPacketVersionHighestAvailable: + return "HighestAvailable" + } + return "InvalidVersion" +} + +// OptSocketType is the socket type used to open the TPacket socket. +type OptSocketType int + +func (t OptSocketType) String() string { + switch t { + case SocketRaw: + return "SOCK_RAW" + case SocketDgram: + return "SOCK_DGRAM" + } + return "UnknownSocketType" +} + +// TPacket version numbers for use with NewHandle. +const ( + // TPacketVersionHighestAvailable tells NewHandle to use the highest available version of tpacket the kernel has available. + // This is the default, should a version number not be given in NewHandle's options. + TPacketVersionHighestAvailable = OptTPacketVersion(-1) + TPacketVersion1 = OptTPacketVersion(unix.TPACKET_V1) + TPacketVersion2 = OptTPacketVersion(unix.TPACKET_V2) + TPacketVersion3 = OptTPacketVersion(unix.TPACKET_V3) + tpacketVersionMax = TPacketVersion3 + tpacketVersionMin = -1 + // SocketRaw is the default socket type. It returns packet data + // including the link layer (ethernet headers, etc). + SocketRaw = OptSocketType(unix.SOCK_RAW) + // SocketDgram strips off the link layer when reading packets, and adds + // the link layer back automatically on packet writes (coming soon...) + SocketDgram = OptSocketType(unix.SOCK_DGRAM) +) + +// OptInterface is the specific interface to bind to. +// It can be passed into NewTPacket. +type OptInterface string + +// OptFrameSize is TPacket's tp_frame_size +// It can be passed into NewTPacket. +type OptFrameSize int + +// OptBlockSize is TPacket's tp_block_size +// It can be passed into NewTPacket. +type OptBlockSize int + +// OptNumBlocks is TPacket's tp_block_nr +// It can be passed into NewTPacket. +type OptNumBlocks int + +// OptBlockTimeout is TPacket v3's tp_retire_blk_tov. Note that it has only millisecond granularity, so must be >= 1 ms. +// It can be passed into NewTPacket. +type OptBlockTimeout time.Duration + +// OptPollTimeout is the number of milliseconds that poll() should block waiting for a file +// descriptor to become ready. Specifying a negative value in time‐out means an infinite timeout. +type OptPollTimeout time.Duration + +// OptAddVLANHeader modifies the packet data that comes back from the +// kernel by adding in the VLAN header that the NIC stripped. AF_PACKET by +// default uses VLAN offloading, in which the NIC strips the VLAN header off of +// the packet before handing it to the kernel. This means that, even if a +// packet has an 802.1q header on the wire, it'll show up without one by the +// time it goes through AF_PACKET. If this option is true, the VLAN header is +// added back in before the packet is returned. Note that this potentially has +// a large performance hit, especially in otherwise zero-copy operation. +// +// Note that if you do not need to have a "real" VLAN layer, it may be +// preferable to use the VLAN ID provided by the AncillaryVLAN struct +// in CaptureInfo.AncillaryData, which is populated out-of-band and has +// negligible performance impact. Such ancillary data will automatically +// be provided if available. +type OptAddVLANHeader bool + +// Default constants used by options. +const ( + DefaultFrameSize = 4096 // Default value for OptFrameSize. + DefaultBlockSize = DefaultFrameSize * 128 // Default value for OptBlockSize. + DefaultNumBlocks = 128 // Default value for OptNumBlocks. + DefaultBlockTimeout = 64 * time.Millisecond // Default value for OptBlockTimeout. + DefaultPollTimeout = -1 * time.Millisecond // Default value for OptPollTimeout. This blocks forever. +) + +type options struct { + frameSize int + framesPerBlock int + blockSize int + numBlocks int + addVLANHeader bool + blockTimeout time.Duration + pollTimeout time.Duration + version OptTPacketVersion + socktype OptSocketType + iface string +} + +var defaultOpts = options{ + frameSize: DefaultFrameSize, + blockSize: DefaultBlockSize, + numBlocks: DefaultNumBlocks, + blockTimeout: DefaultBlockTimeout, + pollTimeout: DefaultPollTimeout, + version: TPacketVersionHighestAvailable, + socktype: SocketRaw, +} + +func parseOptions(opts ...interface{}) (ret options, err error) { + ret = defaultOpts + for _, opt := range opts { + switch v := opt.(type) { + case OptFrameSize: + ret.frameSize = int(v) + case OptBlockSize: + ret.blockSize = int(v) + case OptNumBlocks: + ret.numBlocks = int(v) + case OptBlockTimeout: + ret.blockTimeout = time.Duration(v) + case OptPollTimeout: + ret.pollTimeout = time.Duration(v) + case OptTPacketVersion: + ret.version = v + case OptInterface: + ret.iface = string(v) + case OptSocketType: + ret.socktype = v + case OptAddVLANHeader: + ret.addVLANHeader = bool(v) + default: + err = errors.New("unknown type in options") + return + } + } + if err = ret.check(); err != nil { + return + } + ret.framesPerBlock = ret.blockSize / ret.frameSize + return +} +func (o options) check() error { + switch { + case o.blockSize%pageSize != 0: + return fmt.Errorf("block size %d must be divisible by page size %d", o.blockSize, pageSize) + case o.blockSize%o.frameSize != 0: + return fmt.Errorf("block size %d must be divisible by frame size %d", o.blockSize, o.frameSize) + case o.numBlocks < 1: + return fmt.Errorf("num blocks %d must be >= 1", o.numBlocks) + case o.blockTimeout < time.Millisecond: + return fmt.Errorf("block timeout %v must be > %v", o.blockTimeout, time.Millisecond) + case o.version < tpacketVersionMin || o.version > tpacketVersionMax: + return fmt.Errorf("tpacket version %v is invalid", o.version) + } + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/afpacket/sockopt_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/afpacket/sockopt_linux.go new file mode 100644 index 00000000..ab35f499 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/afpacket/sockopt_linux.go @@ -0,0 +1,52 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +// +build linux + +package afpacket + +import ( + "errors" + "syscall" + "unsafe" + + "golang.org/x/sys/unix" +) + +const maxOptSize = 8192 + +var errSockoptTooBig = errors.New("socket option too big") + +// setsockopt provides access to the setsockopt syscall. +func setsockopt(fd, level, name int, val unsafe.Pointer, vallen uintptr) error { + if vallen > maxOptSize { + return errSockoptTooBig + } + slice := (*[maxOptSize]byte)(val)[:] + return syscall.SetsockoptString(fd, level, name, string(slice[:vallen])) +} + +// getsockopt provides access to the getsockopt syscall. +func getsockopt(fd, level, name int, val unsafe.Pointer, vallen *uintptr) error { + s, err := unix.GetsockoptString(fd, level, name) + if err != nil { + return err + } + rcvLen := uintptr(len(s)) + if rcvLen > *vallen { + return errSockoptTooBig + } + copy((*[maxOptSize]byte)(val)[:rcvLen], s) + *vallen = rcvLen + return nil +} + +// htons converts a short (uint16) from host-to-network byte order. +// Thanks to mikioh for this neat trick: +// https://github.com/mikioh/-stdyng/blob/master/afpacket.go +func htons(i uint16) uint16 { + return (i<<8)&0xff00 | i>>8 +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/base.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/base.go new file mode 100644 index 00000000..91e150c2 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/base.go @@ -0,0 +1,178 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package gopacket + +import ( + "fmt" +) + +// Layer represents a single decoded packet layer (using either the +// OSI or TCP/IP definition of a layer). When decoding, a packet's data is +// broken up into a number of layers. The caller may call LayerType() to +// figure out which type of layer they've received from the packet. Optionally, +// they may then use a type assertion to get the actual layer type for deep +// inspection of the data. +type Layer interface { + // LayerType is the gopacket type for this layer. + LayerType() LayerType + // LayerContents returns the set of bytes that make up this layer. + LayerContents() []byte + // LayerPayload returns the set of bytes contained within this layer, not + // including the layer itself. + LayerPayload() []byte +} + +// Payload is a Layer containing the payload of a packet. The definition of +// what constitutes the payload of a packet depends on previous layers; for +// TCP and UDP, we stop decoding above layer 4 and return the remaining +// bytes as a Payload. Payload is an ApplicationLayer. +type Payload []byte + +// LayerType returns LayerTypePayload +func (p Payload) LayerType() LayerType { return LayerTypePayload } + +// LayerContents returns the bytes making up this layer. +func (p Payload) LayerContents() []byte { return []byte(p) } + +// LayerPayload returns the payload within this layer. +func (p Payload) LayerPayload() []byte { return nil } + +// Payload returns this layer as bytes. +func (p Payload) Payload() []byte { return []byte(p) } + +// String implements fmt.Stringer. +func (p Payload) String() string { return fmt.Sprintf("%d byte(s)", len(p)) } + +// GoString implements fmt.GoStringer. +func (p Payload) GoString() string { return LongBytesGoString([]byte(p)) } + +// CanDecode implements DecodingLayer. +func (p Payload) CanDecode() LayerClass { return LayerTypePayload } + +// NextLayerType implements DecodingLayer. +func (p Payload) NextLayerType() LayerType { return LayerTypeZero } + +// DecodeFromBytes implements DecodingLayer. +func (p *Payload) DecodeFromBytes(data []byte, df DecodeFeedback) error { + *p = Payload(data) + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (p Payload) SerializeTo(b SerializeBuffer, opts SerializeOptions) error { + bytes, err := b.PrependBytes(len(p)) + if err != nil { + return err + } + copy(bytes, p) + return nil +} + +// decodePayload decodes data by returning it all in a Payload layer. +func decodePayload(data []byte, p PacketBuilder) error { + payload := &Payload{} + if err := payload.DecodeFromBytes(data, p); err != nil { + return err + } + p.AddLayer(payload) + p.SetApplicationLayer(payload) + return nil +} + +// Fragment is a Layer containing a fragment of a larger frame, used by layers +// like IPv4 and IPv6 that allow for fragmentation of their payloads. +type Fragment []byte + +// LayerType returns LayerTypeFragment +func (p *Fragment) LayerType() LayerType { return LayerTypeFragment } + +// LayerContents implements Layer. +func (p *Fragment) LayerContents() []byte { return []byte(*p) } + +// LayerPayload implements Layer. +func (p *Fragment) LayerPayload() []byte { return nil } + +// Payload returns this layer as a byte slice. +func (p *Fragment) Payload() []byte { return []byte(*p) } + +// String implements fmt.Stringer. +func (p *Fragment) String() string { return fmt.Sprintf("%d byte(s)", len(*p)) } + +// CanDecode implements DecodingLayer. +func (p *Fragment) CanDecode() LayerClass { return LayerTypeFragment } + +// NextLayerType implements DecodingLayer. +func (p *Fragment) NextLayerType() LayerType { return LayerTypeZero } + +// DecodeFromBytes implements DecodingLayer. +func (p *Fragment) DecodeFromBytes(data []byte, df DecodeFeedback) error { + *p = Fragment(data) + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (p *Fragment) SerializeTo(b SerializeBuffer, opts SerializeOptions) error { + bytes, err := b.PrependBytes(len(*p)) + if err != nil { + return err + } + copy(bytes, *p) + return nil +} + +// decodeFragment decodes data by returning it all in a Fragment layer. +func decodeFragment(data []byte, p PacketBuilder) error { + payload := &Fragment{} + if err := payload.DecodeFromBytes(data, p); err != nil { + return err + } + p.AddLayer(payload) + p.SetApplicationLayer(payload) + return nil +} + +// These layers correspond to Internet Protocol Suite (TCP/IP) layers, and their +// corresponding OSI layers, as best as possible. + +// LinkLayer is the packet layer corresponding to TCP/IP layer 1 (OSI layer 2) +type LinkLayer interface { + Layer + LinkFlow() Flow +} + +// NetworkLayer is the packet layer corresponding to TCP/IP layer 2 (OSI +// layer 3) +type NetworkLayer interface { + Layer + NetworkFlow() Flow +} + +// TransportLayer is the packet layer corresponding to the TCP/IP layer 3 (OSI +// layer 4) +type TransportLayer interface { + Layer + TransportFlow() Flow +} + +// ApplicationLayer is the packet layer corresponding to the TCP/IP layer 4 (OSI +// layer 7), also known as the packet payload. +type ApplicationLayer interface { + Layer + Payload() []byte +} + +// ErrorLayer is a packet layer created when decoding of the packet has failed. +// Its payload is all the bytes that we were unable to decode, and the returned +// error details why the decoding failed. +type ErrorLayer interface { + Layer + Error() error +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/decode.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/decode.go new file mode 100644 index 00000000..2633f848 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/decode.go @@ -0,0 +1,157 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package gopacket + +import ( + "errors" +) + +// DecodeFeedback is used by DecodingLayer layers to provide decoding metadata. +type DecodeFeedback interface { + // SetTruncated should be called if during decoding you notice that a packet + // is shorter than internal layer variables (HeaderLength, or the like) say it + // should be. It sets packet.Metadata().Truncated. + SetTruncated() +} + +type nilDecodeFeedback struct{} + +func (nilDecodeFeedback) SetTruncated() {} + +// NilDecodeFeedback implements DecodeFeedback by doing nothing. +var NilDecodeFeedback DecodeFeedback = nilDecodeFeedback{} + +// PacketBuilder is used by layer decoders to store the layers they've decoded, +// and to defer future decoding via NextDecoder. +// Typically, the pattern for use is: +// func (m *myDecoder) Decode(data []byte, p PacketBuilder) error { +// if myLayer, err := myDecodingLogic(data); err != nil { +// return err +// } else { +// p.AddLayer(myLayer) +// } +// // maybe do this, if myLayer is a LinkLayer +// p.SetLinkLayer(myLayer) +// return p.NextDecoder(nextDecoder) +// } +type PacketBuilder interface { + DecodeFeedback + // AddLayer should be called by a decoder immediately upon successful + // decoding of a layer. + AddLayer(l Layer) + // The following functions set the various specific layers in the final + // packet. Note that if many layers call SetX, the first call is kept and all + // other calls are ignored. + SetLinkLayer(LinkLayer) + SetNetworkLayer(NetworkLayer) + SetTransportLayer(TransportLayer) + SetApplicationLayer(ApplicationLayer) + SetErrorLayer(ErrorLayer) + // NextDecoder should be called by a decoder when they're done decoding a + // packet layer but not done with decoding the entire packet. The next + // decoder will be called to decode the last AddLayer's LayerPayload. + // Because of this, NextDecoder must only be called once all other + // PacketBuilder calls have been made. Set*Layer and AddLayer calls after + // NextDecoder calls will behave incorrectly. + NextDecoder(next Decoder) error + // DumpPacketData is used solely for decoding. If you come across an error + // you need to diagnose while processing a packet, call this and your packet's + // data will be dumped to stderr so you can create a test. This should never + // be called from a production decoder. + DumpPacketData() + // DecodeOptions returns the decode options + DecodeOptions() *DecodeOptions +} + +// Decoder is an interface for logic to decode a packet layer. Users may +// implement a Decoder to handle their own strange packet types, or may use one +// of the many decoders available in the 'layers' subpackage to decode things +// for them. +type Decoder interface { + // Decode decodes the bytes of a packet, sending decoded values and other + // information to PacketBuilder, and returning an error if unsuccessful. See + // the PacketBuilder documentation for more details. + Decode([]byte, PacketBuilder) error +} + +// DecodeFunc wraps a function to make it a Decoder. +type DecodeFunc func([]byte, PacketBuilder) error + +// Decode implements Decoder by calling itself. +func (d DecodeFunc) Decode(data []byte, p PacketBuilder) error { + // function, call thyself. + return d(data, p) +} + +// DecodePayload is a Decoder that returns a Payload layer containing all +// remaining bytes. +var DecodePayload Decoder = DecodeFunc(decodePayload) + +// DecodeUnknown is a Decoder that returns an Unknown layer containing all +// remaining bytes, useful if you run up against a layer that you're unable to +// decode yet. This layer is considered an ErrorLayer. +var DecodeUnknown Decoder = DecodeFunc(decodeUnknown) + +// DecodeFragment is a Decoder that returns a Fragment layer containing all +// remaining bytes. +var DecodeFragment Decoder = DecodeFunc(decodeFragment) + +// LayerTypeZero is an invalid layer type, but can be used to determine whether +// layer type has actually been set correctly. +var LayerTypeZero = RegisterLayerType(0, LayerTypeMetadata{Name: "Unknown", Decoder: DecodeUnknown}) + +// LayerTypeDecodeFailure is the layer type for the default error layer. +var LayerTypeDecodeFailure = RegisterLayerType(1, LayerTypeMetadata{Name: "DecodeFailure", Decoder: DecodeUnknown}) + +// LayerTypePayload is the layer type for a payload that we don't try to decode +// but treat as a success, IE: an application-level payload. +var LayerTypePayload = RegisterLayerType(2, LayerTypeMetadata{Name: "Payload", Decoder: DecodePayload}) + +// LayerTypeFragment is the layer type for a fragment of a layer transported +// by an underlying layer that supports fragmentation. +var LayerTypeFragment = RegisterLayerType(3, LayerTypeMetadata{Name: "Fragment", Decoder: DecodeFragment}) + +// DecodeFailure is a packet layer created if decoding of the packet data failed +// for some reason. It implements ErrorLayer. LayerContents will be the entire +// set of bytes that failed to parse, and Error will return the reason parsing +// failed. +type DecodeFailure struct { + data []byte + err error + stack []byte +} + +// Error returns the error encountered during decoding. +func (d *DecodeFailure) Error() error { return d.err } + +// LayerContents implements Layer. +func (d *DecodeFailure) LayerContents() []byte { return d.data } + +// LayerPayload implements Layer. +func (d *DecodeFailure) LayerPayload() []byte { return nil } + +// String implements fmt.Stringer. +func (d *DecodeFailure) String() string { + return "Packet decoding error: " + d.Error().Error() +} + +// Dump implements Dumper. +func (d *DecodeFailure) Dump() (s string) { + if d.stack != nil { + s = string(d.stack) + } + return +} + +// LayerType returns LayerTypeDecodeFailure +func (d *DecodeFailure) LayerType() LayerType { return LayerTypeDecodeFailure } + +// decodeUnknown "decodes" unsupported data types by returning an error. +// This decoder will thus always return a DecodeFailure layer. +func decodeUnknown(data []byte, p PacketBuilder) error { + return errors.New("Layer type not currently supported") +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/doc.go new file mode 100644 index 00000000..b46e43df --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/doc.go @@ -0,0 +1,432 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +/* +Package gopacket provides packet decoding for the Go language. + +gopacket contains many sub-packages with additional functionality you may find +useful, including: + + * layers: You'll probably use this every time. This contains of the logic + built into gopacket for decoding packet protocols. Note that all example + code below assumes that you have imported both gopacket and + gopacket/layers. + * pcap: C bindings to use libpcap to read packets off the wire. + * pfring: C bindings to use PF_RING to read packets off the wire. + * afpacket: C bindings for Linux's AF_PACKET to read packets off the wire. + * tcpassembly: TCP stream reassembly + +Also, if you're looking to dive right into code, see the examples subdirectory +for numerous simple binaries built using gopacket libraries. + +Minimum go version required is 1.5 except for pcapgo/EthernetHandle, afpacket, +and bsdbpf which need at least 1.7 due to x/sys/unix dependencies. + +Basic Usage + +gopacket takes in packet data as a []byte and decodes it into a packet with +a non-zero number of "layers". Each layer corresponds to a protocol +within the bytes. Once a packet has been decoded, the layers of the packet +can be requested from the packet. + + // Decode a packet + packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Default) + // Get the TCP layer from this packet + if tcpLayer := packet.Layer(layers.LayerTypeTCP); tcpLayer != nil { + fmt.Println("This is a TCP packet!") + // Get actual TCP data from this layer + tcp, _ := tcpLayer.(*layers.TCP) + fmt.Printf("From src port %d to dst port %d\n", tcp.SrcPort, tcp.DstPort) + } + // Iterate over all layers, printing out each layer type + for _, layer := range packet.Layers() { + fmt.Println("PACKET LAYER:", layer.LayerType()) + } + +Packets can be decoded from a number of starting points. Many of our base +types implement Decoder, which allow us to decode packets for which +we don't have full data. + + // Decode an ethernet packet + ethP := gopacket.NewPacket(p1, layers.LayerTypeEthernet, gopacket.Default) + // Decode an IPv6 header and everything it contains + ipP := gopacket.NewPacket(p2, layers.LayerTypeIPv6, gopacket.Default) + // Decode a TCP header and its payload + tcpP := gopacket.NewPacket(p3, layers.LayerTypeTCP, gopacket.Default) + + +Reading Packets From A Source + +Most of the time, you won't just have a []byte of packet data lying around. +Instead, you'll want to read packets in from somewhere (file, interface, etc) +and process them. To do that, you'll want to build a PacketSource. + +First, you'll need to construct an object that implements the PacketDataSource +interface. There are implementations of this interface bundled with gopacket +in the gopacket/pcap and gopacket/pfring subpackages... see their documentation +for more information on their usage. Once you have a PacketDataSource, you can +pass it into NewPacketSource, along with a Decoder of your choice, to create +a PacketSource. + +Once you have a PacketSource, you can read packets from it in multiple ways. +See the docs for PacketSource for more details. The easiest method is the +Packets function, which returns a channel, then asynchronously writes new +packets into that channel, closing the channel if the packetSource hits an +end-of-file. + + packetSource := ... // construct using pcap or pfring + for packet := range packetSource.Packets() { + handlePacket(packet) // do something with each packet + } + +You can change the decoding options of the packetSource by setting fields in +packetSource.DecodeOptions... see the following sections for more details. + + +Lazy Decoding + +gopacket optionally decodes packet data lazily, meaning it +only decodes a packet layer when it needs to handle a function call. + + // Create a packet, but don't actually decode anything yet + packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Lazy) + // Now, decode the packet up to the first IPv4 layer found but no further. + // If no IPv4 layer was found, the whole packet will be decoded looking for + // it. + ip4 := packet.Layer(layers.LayerTypeIPv4) + // Decode all layers and return them. The layers up to the first IPv4 layer + // are already decoded, and will not require decoding a second time. + layers := packet.Layers() + +Lazily-decoded packets are not concurrency-safe. Since layers have not all been +decoded, each call to Layer() or Layers() has the potential to mutate the packet +in order to decode the next layer. If a packet is used +in multiple goroutines concurrently, don't use gopacket.Lazy. Then gopacket +will decode the packet fully, and all future function calls won't mutate the +object. + + +NoCopy Decoding + +By default, gopacket will copy the slice passed to NewPacket and store the +copy within the packet, so future mutations to the bytes underlying the slice +don't affect the packet and its layers. If you can guarantee that the +underlying slice bytes won't be changed, you can use NoCopy to tell +gopacket.NewPacket, and it'll use the passed-in slice itself. + + // This channel returns new byte slices, each of which points to a new + // memory location that's guaranteed immutable for the duration of the + // packet. + for data := range myByteSliceChannel { + p := gopacket.NewPacket(data, layers.LayerTypeEthernet, gopacket.NoCopy) + doSomethingWithPacket(p) + } + +The fastest method of decoding is to use both Lazy and NoCopy, but note from +the many caveats above that for some implementations either or both may be +dangerous. + + +Pointers To Known Layers + +During decoding, certain layers are stored in the packet as well-known +layer types. For example, IPv4 and IPv6 are both considered NetworkLayer +layers, while TCP and UDP are both TransportLayer layers. We support 4 +layers, corresponding to the 4 layers of the TCP/IP layering scheme (roughly +anagalous to layers 2, 3, 4, and 7 of the OSI model). To access these, +you can use the packet.LinkLayer, packet.NetworkLayer, +packet.TransportLayer, and packet.ApplicationLayer functions. Each of +these functions returns a corresponding interface +(gopacket.{Link,Network,Transport,Application}Layer). The first three +provide methods for getting src/dst addresses for that particular layer, +while the final layer provides a Payload function to get payload data. +This is helpful, for example, to get payloads for all packets regardless +of their underlying data type: + + // Get packets from some source + for packet := range someSource { + if app := packet.ApplicationLayer(); app != nil { + if strings.Contains(string(app.Payload()), "magic string") { + fmt.Println("Found magic string in a packet!") + } + } + } + +A particularly useful layer is ErrorLayer, which is set whenever there's +an error parsing part of the packet. + + packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Default) + if err := packet.ErrorLayer(); err != nil { + fmt.Println("Error decoding some part of the packet:", err) + } + +Note that we don't return an error from NewPacket because we may have decoded +a number of layers successfully before running into our erroneous layer. You +may still be able to get your Ethernet and IPv4 layers correctly, even if +your TCP layer is malformed. + + +Flow And Endpoint + +gopacket has two useful objects, Flow and Endpoint, for communicating in a protocol +independent manner the fact that a packet is coming from A and going to B. +The general layer types LinkLayer, NetworkLayer, and TransportLayer all provide +methods for extracting their flow information, without worrying about the type +of the underlying Layer. + +A Flow is a simple object made up of a set of two Endpoints, one source and one +destination. It details the sender and receiver of the Layer of the Packet. + +An Endpoint is a hashable representation of a source or destination. For +example, for LayerTypeIPv4, an Endpoint contains the IP address bytes for a v4 +IP packet. A Flow can be broken into Endpoints, and Endpoints can be combined +into Flows: + + packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Lazy) + netFlow := packet.NetworkLayer().NetworkFlow() + src, dst := netFlow.Endpoints() + reverseFlow := gopacket.NewFlow(dst, src) + +Both Endpoint and Flow objects can be used as map keys, and the equality +operator can compare them, so you can easily group together all packets +based on endpoint criteria: + + flows := map[gopacket.Endpoint]chan gopacket.Packet + packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Lazy) + // Send all TCP packets to channels based on their destination port. + if tcp := packet.Layer(layers.LayerTypeTCP); tcp != nil { + flows[tcp.TransportFlow().Dst()] <- packet + } + // Look for all packets with the same source and destination network address + if net := packet.NetworkLayer(); net != nil { + src, dst := net.NetworkFlow().Endpoints() + if src == dst { + fmt.Println("Fishy packet has same network source and dst: %s", src) + } + } + // Find all packets coming from UDP port 1000 to UDP port 500 + interestingFlow := gopacket.FlowFromEndpoints(layers.NewUDPPortEndpoint(1000), layers.NewUDPPortEndpoint(500)) + if t := packet.NetworkLayer(); t != nil && t.TransportFlow() == interestingFlow { + fmt.Println("Found that UDP flow I was looking for!") + } + +For load-balancing purposes, both Flow and Endpoint have FastHash() functions, +which provide quick, non-cryptographic hashes of their contents. Of particular +importance is the fact that Flow FastHash() is symmetric: A->B will have the same +hash as B->A. An example usage could be: + + channels := [8]chan gopacket.Packet + for i := 0; i < 8; i++ { + channels[i] = make(chan gopacket.Packet) + go packetHandler(channels[i]) + } + for packet := range getPackets() { + if net := packet.NetworkLayer(); net != nil { + channels[int(net.NetworkFlow().FastHash()) & 0x7] <- packet + } + } + +This allows us to split up a packet stream while still making sure that each +stream sees all packets for a flow (and its bidirectional opposite). + + +Implementing Your Own Decoder + +If your network has some strange encapsulation, you can implement your own +decoder. In this example, we handle Ethernet packets which are encapsulated +in a 4-byte header. + + // Create a layer type, should be unique and high, so it doesn't conflict, + // giving it a name and a decoder to use. + var MyLayerType = gopacket.RegisterLayerType(12345, gopacket.LayerTypeMetadata{Name: "MyLayerType", Decoder: gopacket.DecodeFunc(decodeMyLayer)}) + + // Implement my layer + type MyLayer struct { + StrangeHeader []byte + payload []byte + } + func (m MyLayer) LayerType() gopacket.LayerType { return MyLayerType } + func (m MyLayer) LayerContents() []byte { return m.StrangeHeader } + func (m MyLayer) LayerPayload() []byte { return m.payload } + + // Now implement a decoder... this one strips off the first 4 bytes of the + // packet. + func decodeMyLayer(data []byte, p gopacket.PacketBuilder) error { + // Create my layer + p.AddLayer(&MyLayer{data[:4], data[4:]}) + // Determine how to handle the rest of the packet + return p.NextDecoder(layers.LayerTypeEthernet) + } + + // Finally, decode your packets: + p := gopacket.NewPacket(data, MyLayerType, gopacket.Lazy) + +See the docs for Decoder and PacketBuilder for more details on how coding +decoders works, or look at RegisterLayerType and RegisterEndpointType to see how +to add layer/endpoint types to gopacket. + + +Fast Decoding With DecodingLayerParser + +TLDR: DecodingLayerParser takes about 10% of the time as NewPacket to decode +packet data, but only for known packet stacks. + +Basic decoding using gopacket.NewPacket or PacketSource.Packets is somewhat slow +due to its need to allocate a new packet and every respective layer. It's very +versatile and can handle all known layer types, but sometimes you really only +care about a specific set of layers regardless, so that versatility is wasted. + +DecodingLayerParser avoids memory allocation altogether by decoding packet +layers directly into preallocated objects, which you can then reference to get +the packet's information. A quick example: + + func main() { + var eth layers.Ethernet + var ip4 layers.IPv4 + var ip6 layers.IPv6 + var tcp layers.TCP + parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, ð, &ip4, &ip6, &tcp) + decoded := []gopacket.LayerType{} + for packetData := range somehowGetPacketData() { + if err := parser.DecodeLayers(packetData, &decoded); err != nil { + fmt.Fprintf(os.Stderr, "Could not decode layers: %v\n", err) + continue + } + for _, layerType := range decoded { + switch layerType { + case layers.LayerTypeIPv6: + fmt.Println(" IP6 ", ip6.SrcIP, ip6.DstIP) + case layers.LayerTypeIPv4: + fmt.Println(" IP4 ", ip4.SrcIP, ip4.DstIP) + } + } + } + } + +The important thing to note here is that the parser is modifying the passed in +layers (eth, ip4, ip6, tcp) instead of allocating new ones, thus greatly +speeding up the decoding process. It's even branching based on layer type... +it'll handle an (eth, ip4, tcp) or (eth, ip6, tcp) stack. However, it won't +handle any other type... since no other decoders were passed in, an (eth, ip4, +udp) stack will stop decoding after ip4, and only pass back [LayerTypeEthernet, +LayerTypeIPv4] through the 'decoded' slice (along with an error saying it can't +decode a UDP packet). + +Unfortunately, not all layers can be used by DecodingLayerParser... only those +implementing the DecodingLayer interface are usable. Also, it's possible to +create DecodingLayers that are not themselves Layers... see +layers.IPv6ExtensionSkipper for an example of this. + +Faster And Customized Decoding with DecodingLayerContainer + +By default, DecodingLayerParser uses native map to store and search for a layer +to decode. Though being versatile, in some cases this solution may be not so +optimal. For example, if you have only few layers faster operations may be +provided by sparse array indexing or linear array scan. + +To accomodate these scenarios, DecodingLayerContainer interface is introduced +along with its implementations: DecodingLayerSparse, DecodingLayerArray and +DecodingLayerMap. You can specify a container implementation to +DecodingLayerParser with SetDecodingLayerContainer method. Example: + + dlp := gopacket.NewDecodingLayerParser(LayerTypeEthernet) + dlp.SetDecodingLayerContainer(gopacket.DecodingLayerSparse(nil)) + var eth layers.Ethernet + dlp.AddDecodingLayer(ð) + // ... add layers and use DecodingLayerParser as usual... + +To skip one level of indirection (though sacrificing some capabilities) you may +also use DecodingLayerContainer as a decoding tool as it is. In this case you have to +handle unknown layer types and layer panics by yourself. Example: + + func main() { + var eth layers.Ethernet + var ip4 layers.IPv4 + var ip6 layers.IPv6 + var tcp layers.TCP + dlc := gopacket.DecodingLayerContainer(gopacket.DecodingLayerArray(nil)) + dlc = dlc.Put(ð) + dlc = dlc.Put(&ip4) + dlc = dlc.Put(&ip6) + dlc = dlc.Put(&tcp) + // you may specify some meaningful DecodeFeedback + decoder := dlc.LayersDecoder(LayerTypeEthernet, gopacket.NilDecodeFeedback) + decoded := make([]gopacket.LayerType, 0, 20) + for packetData := range somehowGetPacketData() { + lt, err := decoder(packetData, &decoded) + if err != nil { + fmt.Fprintf(os.Stderr, "Could not decode layers: %v\n", err) + continue + } + if lt != gopacket.LayerTypeZero { + fmt.Fprintf(os.Stderr, "unknown layer type: %v\n", lt) + continue + } + for _, layerType := range decoded { + // examine decoded layertypes just as already shown above + } + } + } + +DecodingLayerSparse is the fastest but most effective when LayerType values +that layers in use can decode are not large because otherwise that would lead +to bigger memory footprint. DecodingLayerArray is very compact and primarily +usable if the number of decoding layers is not big (up to ~10-15, but please do +your own benchmarks). DecodingLayerMap is the most versatile one and used by +DecodingLayerParser by default. Please refer to tests and benchmarks in layers +subpackage to further examine usage examples and performance measurements. + +You may also choose to implement your own DecodingLayerContainer if you want to +make use of your own internal packet decoding logic. + +Creating Packet Data + +As well as offering the ability to decode packet data, gopacket will allow you +to create packets from scratch, as well. A number of gopacket layers implement +the SerializableLayer interface; these layers can be serialized to a []byte in +the following manner: + + ip := &layers.IPv4{ + SrcIP: net.IP{1, 2, 3, 4}, + DstIP: net.IP{5, 6, 7, 8}, + // etc... + } + buf := gopacket.NewSerializeBuffer() + opts := gopacket.SerializeOptions{} // See SerializeOptions for more details. + err := ip.SerializeTo(buf, opts) + if err != nil { panic(err) } + fmt.Println(buf.Bytes()) // prints out a byte slice containing the serialized IPv4 layer. + +SerializeTo PREPENDS the given layer onto the SerializeBuffer, and they treat +the current buffer's Bytes() slice as the payload of the serializing layer. +Therefore, you can serialize an entire packet by serializing a set of layers in +reverse order (Payload, then TCP, then IP, then Ethernet, for example). The +SerializeBuffer's SerializeLayers function is a helper that does exactly that. + +To generate a (empty and useless, because no fields are set) +Ethernet(IPv4(TCP(Payload))) packet, for example, you can run: + + buf := gopacket.NewSerializeBuffer() + opts := gopacket.SerializeOptions{} + gopacket.SerializeLayers(buf, opts, + &layers.Ethernet{}, + &layers.IPv4{}, + &layers.TCP{}, + gopacket.Payload([]byte{1, 2, 3, 4})) + packetData := buf.Bytes() + +A Final Note + +If you use gopacket, you'll almost definitely want to make sure gopacket/layers +is imported, since when imported it sets all the LayerType variables and fills +in a lot of interesting variables/maps (DecodersByLayerName, etc). Therefore, +it's recommended that even if you don't use any layers functions directly, you still import with: + + import ( + _ "github.com/google/gopacket/layers" + ) +*/ +package gopacket diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/flows.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/flows.go new file mode 100644 index 00000000..a00c8839 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/flows.go @@ -0,0 +1,236 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package gopacket + +import ( + "bytes" + "fmt" + "strconv" +) + +// MaxEndpointSize determines the maximum size in bytes of an endpoint address. +// +// Endpoints/Flows have a problem: They need to be hashable. Therefore, they +// can't use a byte slice. The two obvious choices are to use a string or a +// byte array. Strings work great, but string creation requires memory +// allocation, which can be slow. Arrays work great, but have a fixed size. We +// originally used the former, now we've switched to the latter. Use of a fixed +// byte-array doubles the speed of constructing a flow (due to not needing to +// allocate). This is a huge increase... too much for us to pass up. +// +// The end result of this, though, is that an endpoint/flow can't be created +// using more than MaxEndpointSize bytes per address. +const MaxEndpointSize = 16 + +// Endpoint is the set of bytes used to address packets at various layers. +// See LinkLayer, NetworkLayer, and TransportLayer specifications. +// Endpoints are usable as map keys. +type Endpoint struct { + typ EndpointType + len int + raw [MaxEndpointSize]byte +} + +// EndpointType returns the endpoint type associated with this endpoint. +func (a Endpoint) EndpointType() EndpointType { return a.typ } + +// Raw returns the raw bytes of this endpoint. These aren't human-readable +// most of the time, but they are faster than calling String. +func (a Endpoint) Raw() []byte { return a.raw[:a.len] } + +// LessThan provides a stable ordering for all endpoints. It sorts first based +// on the EndpointType of an endpoint, then based on the raw bytes of that +// endpoint. +// +// For some endpoints, the actual comparison may not make sense, however this +// ordering does provide useful information for most Endpoint types. +// Ordering is based first on endpoint type, then on raw endpoint bytes. +// Endpoint bytes are sorted lexicographically. +func (a Endpoint) LessThan(b Endpoint) bool { + return a.typ < b.typ || (a.typ == b.typ && bytes.Compare(a.raw[:a.len], b.raw[:b.len]) < 0) +} + +// fnvHash is used by our FastHash functions, and implements the FNV hash +// created by Glenn Fowler, Landon Curt Noll, and Phong Vo. +// See http://isthe.com/chongo/tech/comp/fnv/. +func fnvHash(s []byte) (h uint64) { + h = fnvBasis + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= fnvPrime + } + return +} + +const fnvBasis = 14695981039346656037 +const fnvPrime = 1099511628211 + +// FastHash provides a quick hashing function for an endpoint, useful if you'd +// like to split up endpoints by modulos or other load-balancing techniques. +// It uses a variant of Fowler-Noll-Vo hashing. +// +// The output of FastHash is not guaranteed to remain the same through future +// code revisions, so should not be used to key values in persistent storage. +func (a Endpoint) FastHash() (h uint64) { + h = fnvHash(a.raw[:a.len]) + h ^= uint64(a.typ) + h *= fnvPrime + return +} + +// NewEndpoint creates a new Endpoint object. +// +// The size of raw must be less than MaxEndpointSize, otherwise this function +// will panic. +func NewEndpoint(typ EndpointType, raw []byte) (e Endpoint) { + e.len = len(raw) + if e.len > MaxEndpointSize { + panic("raw byte length greater than MaxEndpointSize") + } + e.typ = typ + copy(e.raw[:], raw) + return +} + +// EndpointTypeMetadata is used to register a new endpoint type. +type EndpointTypeMetadata struct { + // Name is the string returned by an EndpointType's String function. + Name string + // Formatter is called from an Endpoint's String function to format the raw + // bytes in an Endpoint into a human-readable string. + Formatter func([]byte) string +} + +// EndpointType is the type of a gopacket Endpoint. This type determines how +// the bytes stored in the endpoint should be interpreted. +type EndpointType int64 + +var endpointTypes = map[EndpointType]EndpointTypeMetadata{} + +// RegisterEndpointType creates a new EndpointType and registers it globally. +// It MUST be passed a unique number, or it will panic. Numbers 0-999 are +// reserved for gopacket's use. +func RegisterEndpointType(num int, meta EndpointTypeMetadata) EndpointType { + t := EndpointType(num) + if _, ok := endpointTypes[t]; ok { + panic("Endpoint type number already in use") + } + endpointTypes[t] = meta + return t +} + +func (e EndpointType) String() string { + if t, ok := endpointTypes[e]; ok { + return t.Name + } + return strconv.Itoa(int(e)) +} + +func (a Endpoint) String() string { + if t, ok := endpointTypes[a.typ]; ok && t.Formatter != nil { + return t.Formatter(a.raw[:a.len]) + } + return fmt.Sprintf("%v:%v", a.typ, a.raw) +} + +// Flow represents the direction of traffic for a packet layer, as a source and destination Endpoint. +// Flows are usable as map keys. +type Flow struct { + typ EndpointType + slen, dlen int + src, dst [MaxEndpointSize]byte +} + +// FlowFromEndpoints creates a new flow by pasting together two endpoints. +// The endpoints must have the same EndpointType, or this function will return +// an error. +func FlowFromEndpoints(src, dst Endpoint) (_ Flow, err error) { + if src.typ != dst.typ { + err = fmt.Errorf("Mismatched endpoint types: %v->%v", src.typ, dst.typ) + return + } + return Flow{src.typ, src.len, dst.len, src.raw, dst.raw}, nil +} + +// FastHash provides a quick hashing function for a flow, useful if you'd +// like to split up flows by modulos or other load-balancing techniques. +// It uses a variant of Fowler-Noll-Vo hashing, and is guaranteed to collide +// with its reverse flow. IE: the flow A->B will have the same hash as the flow +// B->A. +// +// The output of FastHash is not guaranteed to remain the same through future +// code revisions, so should not be used to key values in persistent storage. +func (f Flow) FastHash() (h uint64) { + // This combination must be commutative. We don't use ^, since that would + // give the same hash for all A->A flows. + h = fnvHash(f.src[:f.slen]) + fnvHash(f.dst[:f.dlen]) + h ^= uint64(f.typ) + h *= fnvPrime + return +} + +// String returns a human-readable representation of this flow, in the form +// "Src->Dst" +func (f Flow) String() string { + s, d := f.Endpoints() + return fmt.Sprintf("%v->%v", s, d) +} + +// EndpointType returns the EndpointType for this Flow. +func (f Flow) EndpointType() EndpointType { + return f.typ +} + +// Endpoints returns the two Endpoints for this flow. +func (f Flow) Endpoints() (src, dst Endpoint) { + return Endpoint{f.typ, f.slen, f.src}, Endpoint{f.typ, f.dlen, f.dst} +} + +// Src returns the source Endpoint for this flow. +func (f Flow) Src() (src Endpoint) { + src, _ = f.Endpoints() + return +} + +// Dst returns the destination Endpoint for this flow. +func (f Flow) Dst() (dst Endpoint) { + _, dst = f.Endpoints() + return +} + +// Reverse returns a new flow with endpoints reversed. +func (f Flow) Reverse() Flow { + return Flow{f.typ, f.dlen, f.slen, f.dst, f.src} +} + +// NewFlow creates a new flow. +// +// src and dst must have length <= MaxEndpointSize, otherwise NewFlow will +// panic. +func NewFlow(t EndpointType, src, dst []byte) (f Flow) { + f.slen = len(src) + f.dlen = len(dst) + if f.slen > MaxEndpointSize || f.dlen > MaxEndpointSize { + panic("flow raw byte length greater than MaxEndpointSize") + } + f.typ = t + copy(f.src[:], src) + copy(f.dst[:], dst) + return +} + +// EndpointInvalid is an endpoint type used for invalid endpoints, IE endpoints +// that are specified incorrectly during creation. +var EndpointInvalid = RegisterEndpointType(0, EndpointTypeMetadata{Name: "invalid", Formatter: func(b []byte) string { + return fmt.Sprintf("%v", b) +}}) + +// InvalidEndpoint is a singleton Endpoint of type EndpointInvalid. +var InvalidEndpoint = NewEndpoint(EndpointInvalid, nil) + +// InvalidFlow is a singleton Flow of type EndpointInvalid. +var InvalidFlow = NewFlow(EndpointInvalid, nil, nil) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/gc b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/gc new file mode 100755 index 00000000..b1d8d2e1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/gc @@ -0,0 +1,288 @@ +#!/bin/bash +# Copyright 2012 Google, Inc. All rights reserved. + +# This script provides a simple way to run benchmarks against previous code and +# keep a log of how benchmarks change over time. When used with the --benchmark +# flag, it runs benchmarks from the current code and from the last commit run +# with --benchmark, then stores the results in the git commit description. We +# rerun the old benchmarks along with the new ones, since there's no guarantee +# that git commits will happen on the same machine, so machine differences could +# cause wildly inaccurate results. +# +# If you're making changes to 'gopacket' which could cause performance changes, +# you may be requested to use this commit script to make sure your changes don't +# have large detrimental effects (or to show off how awesome your performance +# improvements are). +# +# If not run with the --benchmark flag, this script is still very useful... it +# makes sure all the correct go formatting, building, and testing work as +# expected. + +function Usage { + cat < + +--benchmark: Run benchmark comparisons against last benchmark'd commit +--root: Run tests that require root priviledges +--gen: Generate code for MACs/ports by pulling down external data + +Note, some 'git commit' flags are necessary, if all else fails, pass in -a +EOF + exit 1 +} + +BENCH="" +GEN="" +ROOT="" +while [ ! -z "$1" ]; do + case "$1" in + "--benchmark") + BENCH="$2" + shift + shift + ;; + "--gen") + GEN="yes" + shift + ;; + "--root") + ROOT="yes" + shift + ;; + "--help") + Usage + ;; + "-h") + Usage + ;; + "help") + Usage + ;; + *) + break + ;; + esac +done + +function Root { + if [ ! -z "$ROOT" ]; then + local exec="$1" + # Some folks (like me) keep source code in places inaccessible by root (like + # NFS), so to make sure things run smoothly we copy them to a /tmp location. + local tmpfile="$(mktemp -t gopacket_XXXXXXXX)" + echo "Running root test executable $exec as $tmpfile" + cp "$exec" "$tmpfile" + chmod a+x "$tmpfile" + shift + sudo "$tmpfile" "$@" + fi +} + +if [ "$#" -eq "0" ]; then + Usage +fi + +cd $(dirname $0) + +# Check for copyright notices. +for filename in $(find ./ -type f -name '*.go'); do + if ! head -n 1 "$filename" | grep -q Copyright; then + echo "File '$filename' may not have copyright notice" + exit 1 + fi +done + +set -e +set -x + +if [ ! -z "$ROOT" ]; then + echo "Running SUDO to get root priviledges for root tests" + sudo echo "have root" +fi + +if [ ! -z "$GEN" ]; then + pushd macs + go run gen.go | gofmt > valid_mac_prefixes.go + popd + pushd layers + go run gen.go | gofmt > iana_ports.go + go run gen2.go | gofmt > enums_generated.go + popd +fi + +# Make sure everything is formatted, compiles, and tests pass. +go fmt ./... +go test -i ./... 2>/dev/null >/dev/null || true +go test +go build +pushd examples/bytediff +go build +popd +if [ -f /usr/include/pcap.h ]; then + pushd pcap + go test ./... + go build ./... + go build pcap_tester.go + Root pcap_tester --mode=basic + Root pcap_tester --mode=filtered + Root pcap_tester --mode=timestamp || echo "You might not support timestamp sources" + popd + pushd examples/afpacket + go build + popd + pushd examples/pcapdump + go build + popd + pushd examples/arpscan + go build + popd + pushd examples/bidirectional + go build + popd + pushd examples/synscan + go build + popd + pushd examples/httpassembly + go build + popd + pushd examples/statsassembly + go build + popd +fi +pushd macs +go test ./... +gofmt -w gen.go +go build gen.go +popd +pushd tcpassembly +go test ./... +popd +pushd reassembly +go test ./... +popd +pushd layers +gofmt -w gen.go +go build gen.go +go test ./... +popd +pushd pcapgo +go test ./... +go build ./... +popd +if [ -f /usr/include/linux/if_packet.h ]; then + if grep -q TPACKET_V3 /usr/include/linux/if_packet.h; then + pushd afpacket + go build ./... + go test ./... + popd + fi +fi +if [ -f /usr/include/pfring.h ]; then + pushd pfring + go test ./... + go build ./... + popd + pushd examples/pfdump + go build + popd +fi +pushd ip4defrag +go test ./... +popd +pushd defrag +go test ./... +popd + +for travis_script in `ls .travis.*.sh`; do + ./$travis_script +done + +# Run our initial commit +git commit "$@" + +if [ -z "$BENCH" ]; then + set +x + echo "We're not benchmarking and we've committed... we're done!" + exit +fi + +### If we get here, we want to run benchmarks from current commit, and compare +### then to benchmarks from the last --benchmark commit. + +# Get our current branch. +BRANCH="$(git branch | grep '^*' | awk '{print $2}')" + +# File we're going to build our commit description in. +COMMIT_FILE="$(mktemp /tmp/tmp.XXXXXXXX)" + +# Add the word "BENCH" to the start of the git commit. +echo -n "BENCH " > $COMMIT_FILE + +# Get the current description... there must be an easier way. +git log -n 1 | grep '^ ' | sed 's/^ //' >> $COMMIT_FILE + +# Get the commit sha for the last benchmark commit +PREV=$(git log -n 1 --grep='BENCHMARK_MARKER_DO_NOT_CHANGE' | head -n 1 | awk '{print $2}') + +## Run current benchmarks + +cat >> $COMMIT_FILE <&1 | tee -a $COMMIT_FILE +pushd layers +go test --test.bench="$BENCH" 2>&1 | tee -a $COMMIT_FILE +popd +cat >> $COMMIT_FILE <&1 | tee -a $COMMIT_FILE +fi + + + +## Reset to last benchmark commit, run benchmarks + +git checkout $PREV + +cat >> $COMMIT_FILE <&1 | tee -a $COMMIT_FILE +pushd layers +go test --test.bench="$BENCH" 2>&1 | tee -a $COMMIT_FILE +popd +cat >> $COMMIT_FILE <&1 | tee -a $COMMIT_FILE +fi + + + +## Reset back to the most recent commit, edit the commit message by appending +## benchmark results. +git checkout $BRANCH +git commit --amend -F $COMMIT_FILE diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/go.mod b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/go.mod new file mode 100644 index 00000000..99e99f4d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/go.mod @@ -0,0 +1,8 @@ +module github.com/google/gopacket + +go 1.12 + +require ( + golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 + golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67 +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/go.sum b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/go.sum new file mode 100644 index 00000000..2b289429 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/go.sum @@ -0,0 +1,7 @@ +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67 h1:1Fzlr8kkDLQwqMP8GxrhptBLqZG/EDpiATneiZHY998= +golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layerclass.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layerclass.go new file mode 100644 index 00000000..775cd098 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layerclass.go @@ -0,0 +1,107 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package gopacket + +// LayerClass is a set of LayerTypes, used for grabbing one of a number of +// different types from a packet. +type LayerClass interface { + // Contains returns true if the given layer type should be considered part + // of this layer class. + Contains(LayerType) bool + // LayerTypes returns the set of all layer types in this layer class. + // Note that this may not be a fast operation on all LayerClass + // implementations. + LayerTypes() []LayerType +} + +// Contains implements LayerClass. +func (l LayerType) Contains(a LayerType) bool { + return l == a +} + +// LayerTypes implements LayerClass. +func (l LayerType) LayerTypes() []LayerType { + return []LayerType{l} +} + +// LayerClassSlice implements a LayerClass with a slice. +type LayerClassSlice []bool + +// Contains returns true if the given layer type should be considered part +// of this layer class. +func (s LayerClassSlice) Contains(t LayerType) bool { + return int(t) < len(s) && s[t] +} + +// LayerTypes returns all layer types in this LayerClassSlice. +// Because of LayerClassSlice's implementation, this could be quite slow. +func (s LayerClassSlice) LayerTypes() (all []LayerType) { + for i := 0; i < len(s); i++ { + if s[i] { + all = append(all, LayerType(i)) + } + } + return +} + +// NewLayerClassSlice creates a new LayerClassSlice by creating a slice of +// size max(types) and setting slice[t] to true for each type t. Note, if +// you implement your own LayerType and give it a high value, this WILL create +// a very large slice. +func NewLayerClassSlice(types []LayerType) LayerClassSlice { + var max LayerType + for _, typ := range types { + if typ > max { + max = typ + } + } + t := make([]bool, int(max+1)) + for _, typ := range types { + t[typ] = true + } + return t +} + +// LayerClassMap implements a LayerClass with a map. +type LayerClassMap map[LayerType]bool + +// Contains returns true if the given layer type should be considered part +// of this layer class. +func (m LayerClassMap) Contains(t LayerType) bool { + return m[t] +} + +// LayerTypes returns all layer types in this LayerClassMap. +func (m LayerClassMap) LayerTypes() (all []LayerType) { + for t := range m { + all = append(all, t) + } + return +} + +// NewLayerClassMap creates a LayerClassMap and sets map[t] to true for each +// type in types. +func NewLayerClassMap(types []LayerType) LayerClassMap { + m := LayerClassMap{} + for _, typ := range types { + m[typ] = true + } + return m +} + +// NewLayerClass creates a LayerClass, attempting to be smart about which type +// it creates based on which types are passed in. +func NewLayerClass(types []LayerType) LayerClass { + for _, typ := range types { + if typ > maxLayerType { + // NewLayerClassSlice could create a very large object, so instead create + // a map. + return NewLayerClassMap(types) + } + } + return NewLayerClassSlice(types) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/arp.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/arp.go new file mode 100644 index 00000000..0775ac0b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/arp.go @@ -0,0 +1,118 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + + "github.com/google/gopacket" +) + +// Potential values for ARP.Operation. +const ( + ARPRequest = 1 + ARPReply = 2 +) + +// ARP is a ARP packet header. +type ARP struct { + BaseLayer + AddrType LinkType + Protocol EthernetType + HwAddressSize uint8 + ProtAddressSize uint8 + Operation uint16 + SourceHwAddress []byte + SourceProtAddress []byte + DstHwAddress []byte + DstProtAddress []byte +} + +// LayerType returns LayerTypeARP +func (arp *ARP) LayerType() gopacket.LayerType { return LayerTypeARP } + +// DecodeFromBytes decodes the given bytes into this layer. +func (arp *ARP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 8 { + df.SetTruncated() + return fmt.Errorf("ARP length %d too short", len(data)) + } + arp.AddrType = LinkType(binary.BigEndian.Uint16(data[0:2])) + arp.Protocol = EthernetType(binary.BigEndian.Uint16(data[2:4])) + arp.HwAddressSize = data[4] + arp.ProtAddressSize = data[5] + arp.Operation = binary.BigEndian.Uint16(data[6:8]) + arpLength := 8 + 2*arp.HwAddressSize + 2*arp.ProtAddressSize + if len(data) < int(arpLength) { + df.SetTruncated() + return fmt.Errorf("ARP length %d too short, %d expected", len(data), arpLength) + } + arp.SourceHwAddress = data[8 : 8+arp.HwAddressSize] + arp.SourceProtAddress = data[8+arp.HwAddressSize : 8+arp.HwAddressSize+arp.ProtAddressSize] + arp.DstHwAddress = data[8+arp.HwAddressSize+arp.ProtAddressSize : 8+2*arp.HwAddressSize+arp.ProtAddressSize] + arp.DstProtAddress = data[8+2*arp.HwAddressSize+arp.ProtAddressSize : 8+2*arp.HwAddressSize+2*arp.ProtAddressSize] + + arp.Contents = data[:arpLength] + arp.Payload = data[arpLength:] + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (arp *ARP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + size := 8 + len(arp.SourceHwAddress) + len(arp.SourceProtAddress) + len(arp.DstHwAddress) + len(arp.DstProtAddress) + bytes, err := b.PrependBytes(size) + if err != nil { + return err + } + if opts.FixLengths { + if len(arp.SourceHwAddress) != len(arp.DstHwAddress) { + return errors.New("mismatched hardware address sizes") + } + arp.HwAddressSize = uint8(len(arp.SourceHwAddress)) + if len(arp.SourceProtAddress) != len(arp.DstProtAddress) { + return errors.New("mismatched prot address sizes") + } + arp.ProtAddressSize = uint8(len(arp.SourceProtAddress)) + } + binary.BigEndian.PutUint16(bytes, uint16(arp.AddrType)) + binary.BigEndian.PutUint16(bytes[2:], uint16(arp.Protocol)) + bytes[4] = arp.HwAddressSize + bytes[5] = arp.ProtAddressSize + binary.BigEndian.PutUint16(bytes[6:], arp.Operation) + start := 8 + for _, addr := range [][]byte{ + arp.SourceHwAddress, + arp.SourceProtAddress, + arp.DstHwAddress, + arp.DstProtAddress, + } { + copy(bytes[start:], addr) + start += len(addr) + } + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (arp *ARP) CanDecode() gopacket.LayerClass { + return LayerTypeARP +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (arp *ARP) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +func decodeARP(data []byte, p gopacket.PacketBuilder) error { + + arp := &ARP{} + return decodingLayerDecoder(arp, data, p) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/asf.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/asf.go new file mode 100644 index 00000000..d698bd0e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/asf.go @@ -0,0 +1,166 @@ +// Copyright 2019 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file in the root of the source tree. + +package layers + +// This file implements the ASF RMCP payload specified in section 3.2.2.3 of +// https://www.dmtf.org/sites/default/files/standards/documents/DSP0136.pdf + +import ( + "encoding/binary" + "fmt" + + "github.com/google/gopacket" +) + +const ( + // ASFRMCPEnterprise is the IANA-assigned Enterprise Number of the ASF-RMCP. + ASFRMCPEnterprise uint32 = 4542 +) + +// ASFDataIdentifier encapsulates fields used to uniquely identify the format of +// the data block. +// +// While the enterprise number is almost always 4542 (ASF-RMCP), we support +// registering layers using structs of this type as a key in case any users are +// using OEM-extensions. +type ASFDataIdentifier struct { + + // Enterprise is the IANA Enterprise Number associated with the entity that + // defines the message type. A list can be found at + // https://www.iana.org/assignments/enterprise-numbers/enterprise-numbers. + // This can be thought of as the namespace for the message type. + Enterprise uint32 + + // Type is the message type, defined by the entity associated with the + // enterprise above. No pressure, but in the context of EN 4542, 1 byte is + // the difference between sending a ping and telling a machine to do an + // unconditional power down (0x80 and 0x12 respectively). + Type uint8 +} + +// LayerType returns the payload layer type corresponding to an ASF message +// type. +func (a ASFDataIdentifier) LayerType() gopacket.LayerType { + if lt := asfDataLayerTypes[a]; lt != 0 { + return lt + } + + // some layer types don't have a payload, e.g. ASF-RMCP Presence Ping. + return gopacket.LayerTypePayload +} + +// RegisterASFLayerType allows specifying that the data block of ASF packets +// with a given enterprise number and type should be processed by a given layer +// type. This overrides any existing registrations, including defaults. +func RegisterASFLayerType(a ASFDataIdentifier, l gopacket.LayerType) { + asfDataLayerTypes[a] = l +} + +var ( + // ASFDataIdentifierPresencePong is the message type of the response to a + // Presence Ping message. It indicates the sender is ASF-RMCP-aware. + ASFDataIdentifierPresencePong = ASFDataIdentifier{ + Enterprise: ASFRMCPEnterprise, + Type: 0x40, + } + + // ASFDataIdentifierPresencePing is a message type sent to a managed client + // to solicit a Presence Pong response. Clients may ignore this if the RMCP + // version is unsupported. Sending this message with a sequence number <255 + // is the recommended way of finding out whether an implementation sends + // RMCP ACKs (e.g. iDRAC does, Super Micro does not). + // + // Systems implementing IPMI must respond to this ping to conform to the + // spec, so it is a good substitute for an ICMP ping. + ASFDataIdentifierPresencePing = ASFDataIdentifier{ + Enterprise: ASFRMCPEnterprise, + Type: 0x80, + } + + // asfDataLayerTypes is used to find the next layer for a given ASF header. + asfDataLayerTypes = map[ASFDataIdentifier]gopacket.LayerType{ + ASFDataIdentifierPresencePong: LayerTypeASFPresencePong, + } +) + +// ASF defines ASF's generic RMCP message Data block format. See section +// 3.2.2.3. +type ASF struct { + BaseLayer + ASFDataIdentifier + + // Tag is used to match request/response pairs. The tag of a response is set + // to that of the message it is responding to. If a message is + // unidirectional, i.e. not part of a request/response pair, this is set to + // 255. + Tag uint8 + + // 1 byte reserved, set to 0x00. + + // Length is the length of this layer's payload in bytes. + Length uint8 +} + +// LayerType returns LayerTypeASF. It partially satisfies Layer and +// SerializableLayer. +func (*ASF) LayerType() gopacket.LayerType { + return LayerTypeASF +} + +// CanDecode returns LayerTypeASF. It partially satisfies DecodingLayer. +func (a *ASF) CanDecode() gopacket.LayerClass { + return a.LayerType() +} + +// DecodeFromBytes makes the layer represent the provided bytes. It partially +// satisfies DecodingLayer. +func (a *ASF) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 8 { + df.SetTruncated() + return fmt.Errorf("invalid ASF data header, length %v less than 8", + len(data)) + } + + a.BaseLayer.Contents = data[:8] + a.BaseLayer.Payload = data[8:] + + a.Enterprise = binary.BigEndian.Uint32(data[:4]) + a.Type = uint8(data[4]) + a.Tag = uint8(data[5]) + // 1 byte reserved + a.Length = uint8(data[7]) + return nil +} + +// NextLayerType returns the layer type corresponding to the message type of +// this ASF data layer. This partially satisfies DecodingLayer. +func (a *ASF) NextLayerType() gopacket.LayerType { + return a.ASFDataIdentifier.LayerType() +} + +// SerializeTo writes the serialized fom of this layer into the SerializeBuffer, +// partially satisfying SerializableLayer. +func (a *ASF) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + payload := b.Bytes() + bytes, err := b.PrependBytes(8) + if err != nil { + return err + } + binary.BigEndian.PutUint32(bytes[:4], a.Enterprise) + bytes[4] = uint8(a.Type) + bytes[5] = a.Tag + bytes[6] = 0x00 + if opts.FixLengths { + a.Length = uint8(len(payload)) + } + bytes[7] = a.Length + return nil +} + +// decodeASF decodes the byte slice into an RMCP-ASF data struct. +func decodeASF(data []byte, p gopacket.PacketBuilder) error { + return decodingLayerDecoder(&ASF{}, data, p) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/asf_presencepong.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/asf_presencepong.go new file mode 100644 index 00000000..e9a8baf1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/asf_presencepong.go @@ -0,0 +1,194 @@ +// Copyright 2019 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file in the root of the source tree. + +package layers + +// This file implements the RMCP ASF Presence Pong message, specified in section +// 3.2.4.3 of +// https://www.dmtf.org/sites/default/files/standards/documents/DSP0136.pdf. It +// also contains non-competing elements from IPMI v2.0, specified in section +// 13.2.4 of +// https://www.intel.com/content/dam/www/public/us/en/documents/specification-updates/ipmi-intelligent-platform-mgt-interface-spec-2nd-gen-v2-0-spec-update.pdf. + +import ( + "encoding/binary" + "fmt" + + "github.com/google/gopacket" +) + +type ( + // ASFEntity is the type of individual entities that a Presence Pong + // response can indicate support of. The entities currently implemented by + // the spec are IPMI and ASFv1. + ASFEntity uint8 + + // ASFInteraction is the type of individual interactions that a Presence + // Pong response can indicate support for. The interactions currently + // implemented by the spec are RMCP security extensions. Although not + // specified, IPMI uses this field to indicate support for DASH, which is + // supported as well. + ASFInteraction uint8 +) + +const ( + // ASFDCMIEnterprise is the IANA-assigned Enterprise Number of the Data + // Center Manageability Interface Forum. The Presence Pong response's + // Enterprise field being set to this value indicates support for DCMI. The + // DCMI spec regards the OEM field as reserved, so these should be null. + ASFDCMIEnterprise uint32 = 36465 + + // ASFPresencePongEntityIPMI ANDs with Presence Pong's supported entities + // field if the managed system supports IPMI. + ASFPresencePongEntityIPMI ASFEntity = 1 << 7 + + // ASFPresencePongEntityASFv1 ANDs with Presence Pong's supported entities + // field if the managed system supports ASF v1.0. + ASFPresencePongEntityASFv1 ASFEntity = 1 + + // ASFPresencePongInteractionSecurityExtensions ANDs with Presence Pong's + // supported interactions field if the managed system supports RMCP v2.0 + // security extensions. See section 3.2.3. + ASFPresencePongInteractionSecurityExtensions ASFInteraction = 1 << 7 + + // ASFPresencePongInteractionDASH ANDs with Presence Pong's supported + // interactions field if the managed system supports DMTF DASH. See + // https://www.dmtf.org/standards/dash. + ASFPresencePongInteractionDASH ASFInteraction = 1 << 5 +) + +// ASFPresencePong defines the structure of a Presence Pong message's payload. +// See section 3.2.4.3. +type ASFPresencePong struct { + BaseLayer + + // Enterprise is the IANA Enterprise Number of an entity that has defined + // OEM-specific capabilities for the managed client. If no such capabilities + // exist, this is set to ASF's IANA Enterprise Number. + Enterprise uint32 + + // OEM identifies OEM-specific capabilities. Its structure is defined by the + // OEM. This is set to 0s if no OEM-specific capabilities exist. This + // implementation does not change byte order from the wire for this field. + OEM [4]byte + + // We break out entities and interactions into separate booleans as + // discovery is the entire point of this type of message, so we assume they + // are accessed. It also makes gopacket's default layer printing more + // useful. + + // IPMI is true if IPMI is supported by the managed system. There is no + // explicit version in the specification, however given the dates, this is + // assumed to be IPMI v1.0. Support for IPMI is contained in the "supported + // entities" field of the presence pong payload. + IPMI bool + + // ASFv1 indicates support for ASF v1.0. This seems somewhat redundant as + // ASF must be supported in order to receive a response. This is contained + // in the "supported entities" field of the presence pong payload. + ASFv1 bool + + // SecurityExtensions indicates support for RMCP Security Extensions, + // specified in ASF v2.0. This will always be false for v1.x + // implementations. This is contained in the "supported interactions" field + // of the presence pong payload. This field is defined in ASF v1.0, but has + // no useful value. + SecurityExtensions bool + + // DASH is true if DMTF DASH is supported. This is not specified in ASF + // v2.0, but in IPMI v2.0, however the former does not preclude it, so we + // support it. + DASH bool + + // 6 bytes reserved after the entities and interactions fields, set to 0s. +} + +// SupportsDCMI returns whether the Presence Pong message indicates support for +// the Data Center Management Interface, which is an extension of IPMI v2.0. +func (a *ASFPresencePong) SupportsDCMI() bool { + return a.Enterprise == ASFDCMIEnterprise && a.IPMI && a.ASFv1 +} + +// LayerType returns LayerTypeASFPresencePong. It partially satisfies Layer and +// SerializableLayer. +func (*ASFPresencePong) LayerType() gopacket.LayerType { + return LayerTypeASFPresencePong +} + +// CanDecode returns LayerTypeASFPresencePong. It partially satisfies +// DecodingLayer. +func (a *ASFPresencePong) CanDecode() gopacket.LayerClass { + return a.LayerType() +} + +// DecodeFromBytes makes the layer represent the provided bytes. It partially +// satisfies DecodingLayer. +func (a *ASFPresencePong) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 16 { + df.SetTruncated() + return fmt.Errorf("invalid ASF presence pong payload, length %v less than 16", + len(data)) + } + + a.BaseLayer.Contents = data[:16] + a.BaseLayer.Payload = data[16:] + + a.Enterprise = binary.BigEndian.Uint32(data[:4]) + copy(a.OEM[:], data[4:8]) // N.B. no byte order change + a.IPMI = data[8]&uint8(ASFPresencePongEntityIPMI) != 0 + a.ASFv1 = data[8]&uint8(ASFPresencePongEntityASFv1) != 0 + a.SecurityExtensions = data[9]&uint8(ASFPresencePongInteractionSecurityExtensions) != 0 + a.DASH = data[9]&uint8(ASFPresencePongInteractionDASH) != 0 + // ignore remaining 6 bytes; should be set to 0s + return nil +} + +// NextLayerType returns LayerTypePayload, as there are no further layers to +// decode. This partially satisfies DecodingLayer. +func (a *ASFPresencePong) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +// SerializeTo writes the serialized fom of this layer into the SerializeBuffer, +// partially satisfying SerializableLayer. +func (a *ASFPresencePong) SerializeTo(b gopacket.SerializeBuffer, _ gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(16) + if err != nil { + return err + } + + binary.BigEndian.PutUint32(bytes[:4], a.Enterprise) + + copy(bytes[4:8], a.OEM[:]) + + bytes[8] = 0 + if a.IPMI { + bytes[8] |= uint8(ASFPresencePongEntityIPMI) + } + if a.ASFv1 { + bytes[8] |= uint8(ASFPresencePongEntityASFv1) + } + + bytes[9] = 0 + if a.SecurityExtensions { + bytes[9] |= uint8(ASFPresencePongInteractionSecurityExtensions) + } + if a.DASH { + bytes[9] |= uint8(ASFPresencePongInteractionDASH) + } + + // zero-out remaining 6 bytes + for i := 10; i < len(bytes); i++ { + bytes[i] = 0x00 + } + + return nil +} + +// decodeASFPresencePong decodes the byte slice into an RMCP-ASF Presence Pong +// struct. +func decodeASFPresencePong(data []byte, p gopacket.PacketBuilder) error { + return decodingLayerDecoder(&ASFPresencePong{}, data, p) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/base.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/base.go new file mode 100644 index 00000000..cd59b467 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/base.go @@ -0,0 +1,52 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "github.com/google/gopacket" +) + +// BaseLayer is a convenience struct which implements the LayerData and +// LayerPayload functions of the Layer interface. +type BaseLayer struct { + // Contents is the set of bytes that make up this layer. IE: for an + // Ethernet packet, this would be the set of bytes making up the + // Ethernet frame. + Contents []byte + // Payload is the set of bytes contained by (but not part of) this + // Layer. Again, to take Ethernet as an example, this would be the + // set of bytes encapsulated by the Ethernet protocol. + Payload []byte +} + +// LayerContents returns the bytes of the packet layer. +func (b *BaseLayer) LayerContents() []byte { return b.Contents } + +// LayerPayload returns the bytes contained within the packet layer. +func (b *BaseLayer) LayerPayload() []byte { return b.Payload } + +type layerDecodingLayer interface { + gopacket.Layer + DecodeFromBytes([]byte, gopacket.DecodeFeedback) error + NextLayerType() gopacket.LayerType +} + +func decodingLayerDecoder(d layerDecodingLayer, data []byte, p gopacket.PacketBuilder) error { + err := d.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(d) + next := d.NextLayerType() + if next == gopacket.LayerTypeZero { + return nil + } + return p.NextDecoder(next) +} + +// hacky way to zero out memory... there must be a better way? +var lotsOfZeros [1024]byte diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/bfd.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/bfd.go new file mode 100644 index 00000000..43030fb6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/bfd.go @@ -0,0 +1,481 @@ +// Copyright 2017 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. +// + +package layers + +import ( + "encoding/binary" + "errors" + + "github.com/google/gopacket" +) + +// BFD Control Packet Format +// ------------------------- +// The current version of BFD's RFC (RFC 5880) contains the following +// diagram for the BFD Control packet format: +// +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// |Vers | Diag |Sta|P|F|C|A|D|M| Detect Mult | Length | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | My Discriminator | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Your Discriminator | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Desired Min TX Interval | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Required Min RX Interval | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Required Min Echo RX Interval | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// +// An optional Authentication Section MAY be present: +// +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Auth Type | Auth Len | Authentication Data... | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// +// +// Simple Password Authentication Section Format +// --------------------------------------------- +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Auth Type | Auth Len | Auth Key ID | Password... | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | ... | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// +// +// Keyed MD5 and Meticulous Keyed MD5 Authentication Section Format +// ---------------------------------------------------------------- +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Auth Type | Auth Len | Auth Key ID | Reserved | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Sequence Number | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Auth Key/Digest... | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | ... | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// +// +// Keyed SHA1 and Meticulous Keyed SHA1 Authentication Section Format +// ------------------------------------------------------------------ +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Auth Type | Auth Len | Auth Key ID | Reserved | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Sequence Number | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Auth Key/Hash... | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | ... | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// +// From https://tools.ietf.org/rfc/rfc5880.txt +const bfdMinimumRecordSizeInBytes int = 24 + +// BFDVersion represents the version as decoded from the BFD control message +type BFDVersion uint8 + +// BFDDiagnostic represents diagnostic infomation about a BFD session +type BFDDiagnostic uint8 + +// constants that define BFDDiagnostic flags +const ( + BFDDiagnosticNone BFDDiagnostic = 0 // No Diagnostic + BFDDiagnosticTimeExpired BFDDiagnostic = 1 // Control Detection Time Expired + BFDDiagnosticEchoFailed BFDDiagnostic = 2 // Echo Function Failed + BFDDiagnosticNeighborSignalDown BFDDiagnostic = 3 // Neighbor Signaled Session Down + BFDDiagnosticForwardPlaneReset BFDDiagnostic = 4 // Forwarding Plane Reset + BFDDiagnosticPathDown BFDDiagnostic = 5 // Path Down + BFDDiagnosticConcatPathDown BFDDiagnostic = 6 // Concatenated Path Down + BFDDiagnosticAdminDown BFDDiagnostic = 7 // Administratively Down + BFDDiagnosticRevConcatPathDown BFDDiagnostic = 8 // Reverse Concatenated Path Dow +) + +// String returns a string version of BFDDiagnostic +func (bd BFDDiagnostic) String() string { + switch bd { + default: + return "Unknown" + case BFDDiagnosticNone: + return "None" + case BFDDiagnosticTimeExpired: + return "Control Detection Time Expired" + case BFDDiagnosticEchoFailed: + return "Echo Function Failed" + case BFDDiagnosticNeighborSignalDown: + return "Neighbor Signaled Session Down" + case BFDDiagnosticForwardPlaneReset: + return "Forwarding Plane Reset" + case BFDDiagnosticPathDown: + return "Path Down" + case BFDDiagnosticConcatPathDown: + return "Concatenated Path Down" + case BFDDiagnosticAdminDown: + return "Administratively Down" + case BFDDiagnosticRevConcatPathDown: + return "Reverse Concatenated Path Down" + } +} + +// BFDState represents the state of a BFD session +type BFDState uint8 + +// constants that define BFDState +const ( + BFDStateAdminDown BFDState = 0 + BFDStateDown BFDState = 1 + BFDStateInit BFDState = 2 + BFDStateUp BFDState = 3 +) + +// String returns a string version of BFDState +func (s BFDState) String() string { + switch s { + default: + return "Unknown" + case BFDStateAdminDown: + return "Admin Down" + case BFDStateDown: + return "Down" + case BFDStateInit: + return "Init" + case BFDStateUp: + return "Up" + } +} + +// BFDDetectMultiplier represents the negotiated transmit interval, +// multiplied by this value, provides the Detection Time for the +// receiving system in Asynchronous mode. +type BFDDetectMultiplier uint8 + +// BFDDiscriminator is a unique, nonzero discriminator value used +// to demultiplex multiple BFD sessions between the same pair of systems. +type BFDDiscriminator uint32 + +// BFDTimeInterval represents a time interval in microseconds +type BFDTimeInterval uint32 + +// BFDAuthType represents the authentication used in the BFD session +type BFDAuthType uint8 + +// constants that define the BFDAuthType +const ( + BFDAuthTypeNone BFDAuthType = 0 // No Auth + BFDAuthTypePassword BFDAuthType = 1 // Simple Password + BFDAuthTypeKeyedMD5 BFDAuthType = 2 // Keyed MD5 + BFDAuthTypeMeticulousKeyedMD5 BFDAuthType = 3 // Meticulous Keyed MD5 + BFDAuthTypeKeyedSHA1 BFDAuthType = 4 // Keyed SHA1 + BFDAuthTypeMeticulousKeyedSHA1 BFDAuthType = 5 // Meticulous Keyed SHA1 +) + +// String returns a string version of BFDAuthType +func (at BFDAuthType) String() string { + switch at { + default: + return "Unknown" + case BFDAuthTypeNone: + return "No Authentication" + case BFDAuthTypePassword: + return "Simple Password" + case BFDAuthTypeKeyedMD5: + return "Keyed MD5" + case BFDAuthTypeMeticulousKeyedMD5: + return "Meticulous Keyed MD5" + case BFDAuthTypeKeyedSHA1: + return "Keyed SHA1" + case BFDAuthTypeMeticulousKeyedSHA1: + return "Meticulous Keyed SHA1" + } +} + +// BFDAuthKeyID represents the authentication key ID in use for +// this packet. This allows multiple keys to be active simultaneously. +type BFDAuthKeyID uint8 + +// BFDAuthSequenceNumber represents the sequence number for this packet. +// For Keyed Authentication, this value is incremented occasionally. For +// Meticulous Keyed Authentication, this value is incremented for each +// successive packet transmitted for a session. This provides protection +// against replay attacks. +type BFDAuthSequenceNumber uint32 + +// BFDAuthData represents the authentication key or digest +type BFDAuthData []byte + +// BFDAuthHeader represents authentication data used in the BFD session +type BFDAuthHeader struct { + AuthType BFDAuthType + KeyID BFDAuthKeyID + SequenceNumber BFDAuthSequenceNumber + Data BFDAuthData +} + +// Length returns the data length of the BFDAuthHeader based on the +// authentication type +func (h *BFDAuthHeader) Length() int { + switch h.AuthType { + case BFDAuthTypePassword: + return 3 + len(h.Data) + case BFDAuthTypeKeyedMD5, BFDAuthTypeMeticulousKeyedMD5: + return 8 + len(h.Data) + case BFDAuthTypeKeyedSHA1, BFDAuthTypeMeticulousKeyedSHA1: + return 8 + len(h.Data) + default: + return 0 + } +} + +// BFD represents a BFD control message packet whose payload contains +// the control information required to for a BFD session. +// +// References +// ---------- +// +// Wikipedia's BFD entry: +// https://en.wikipedia.org/wiki/Bidirectional_Forwarding_Detection +// This is the best place to get an overview of BFD. +// +// RFC 5880 "Bidirectional Forwarding Detection (BFD)" (2010) +// https://tools.ietf.org/html/rfc5880 +// This is the original BFD specification. +// +// RFC 5881 "Bidirectional Forwarding Detection (BFD) for IPv4 and IPv6 (Single Hop)" (2010) +// https://tools.ietf.org/html/rfc5881 +// Describes the use of the Bidirectional Forwarding Detection (BFD) +// protocol over IPv4 and IPv6 for single IP hops. +type BFD struct { + BaseLayer // Stores the packet bytes and payload bytes. + + Version BFDVersion // Version of the BFD protocol. + Diagnostic BFDDiagnostic // Diagnostic code for last state change + State BFDState // Current state + Poll bool // Requesting verification + Final bool // Responding to a received BFD Control packet that had the Poll (P) bit set. + ControlPlaneIndependent bool // BFD implementation does not share fate with its control plane + AuthPresent bool // Authentication Section is present and the session is to be authenticated + Demand bool // Demand mode is active + Multipoint bool // For future point-to-multipoint extensions. Must always be zero + DetectMultiplier BFDDetectMultiplier // Detection time multiplier + MyDiscriminator BFDDiscriminator // A unique, nonzero discriminator value + YourDiscriminator BFDDiscriminator // discriminator received from the remote system. + DesiredMinTxInterval BFDTimeInterval // Minimum interval, in microseconds, the local system would like to use when transmitting BFD Control packets + RequiredMinRxInterval BFDTimeInterval // Minimum interval, in microseconds, between received BFD Control packets that this system is capable of supporting + RequiredMinEchoRxInterval BFDTimeInterval // Minimum interval, in microseconds, between received BFD Echo packets that this system is capable of supporting + AuthHeader *BFDAuthHeader // Authentication data, variable length. +} + +// Length returns the data length of a BFD Control message which +// changes based on the presence and type of authentication +// contained in the message +func (d *BFD) Length() int { + if d.AuthPresent && (d.AuthHeader != nil) { + return bfdMinimumRecordSizeInBytes + d.AuthHeader.Length() + } + + return bfdMinimumRecordSizeInBytes +} + +// LayerType returns the layer type of the BFD object, which is LayerTypeBFD. +func (d *BFD) LayerType() gopacket.LayerType { + return LayerTypeBFD +} + +// decodeBFD analyses a byte slice and attempts to decode it as a BFD +// control packet +// +// If it succeeds, it loads p with information about the packet and returns nil. +// If it fails, it returns an error (non nil). +// +// This function is employed in layertypes.go to register the BFD layer. +func decodeBFD(data []byte, p gopacket.PacketBuilder) error { + + // Attempt to decode the byte slice. + d := &BFD{} + err := d.DecodeFromBytes(data, p) + if err != nil { + return err + } + + // If the decoding worked, add the layer to the packet and set it + // as the application layer too, if there isn't already one. + p.AddLayer(d) + p.SetApplicationLayer(d) + + return nil +} + +// DecodeFromBytes analyses a byte slice and attempts to decode it as a BFD +// control packet. +// +// Upon succeeds, it loads the BFD object with information about the packet +// and returns nil. +// Upon failure, it returns an error (non nil). +func (d *BFD) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + + // If the data block is too short to be a BFD record, then return an error. + if len(data) < bfdMinimumRecordSizeInBytes { + df.SetTruncated() + return errors.New("BFD packet too short") + } + + pLen := uint8(data[3]) + if len(data) != int(pLen) { + return errors.New("BFD packet length does not match") + } + + // BFD type embeds type BaseLayer which contains two fields: + // Contents is supposed to contain the bytes of the data at this level. + // Payload is supposed to contain the payload of this level. + // Here we set the baselayer to be the bytes of the BFD record. + d.BaseLayer = BaseLayer{Contents: data[:len(data)]} + + // Extract the fields from the block of bytes. + // To make sense of this, refer to the packet diagram + // above and the section on endian conventions. + + // The first few fields are all packed into the first 32 bits. Unpack them. + d.Version = BFDVersion(((data[0] & 0xE0) >> 5)) + d.Diagnostic = BFDDiagnostic(data[0] & 0x1F) + data = data[1:] + + d.State = BFDState((data[0] & 0xC0) >> 6) + d.Poll = data[0]&0x20 != 0 + d.Final = data[0]&0x10 != 0 + d.ControlPlaneIndependent = data[0]&0x08 != 0 + d.AuthPresent = data[0]&0x04 != 0 + d.Demand = data[0]&0x02 != 0 + d.Multipoint = data[0]&0x01 != 0 + data = data[1:] + + data, d.DetectMultiplier = data[1:], BFDDetectMultiplier(data[0]) + data, _ = data[1:], uint8(data[0]) // Consume length + + // The remaining fields can just be copied in big endian order. + data, d.MyDiscriminator = data[4:], BFDDiscriminator(binary.BigEndian.Uint32(data[:4])) + data, d.YourDiscriminator = data[4:], BFDDiscriminator(binary.BigEndian.Uint32(data[:4])) + data, d.DesiredMinTxInterval = data[4:], BFDTimeInterval(binary.BigEndian.Uint32(data[:4])) + data, d.RequiredMinRxInterval = data[4:], BFDTimeInterval(binary.BigEndian.Uint32(data[:4])) + data, d.RequiredMinEchoRxInterval = data[4:], BFDTimeInterval(binary.BigEndian.Uint32(data[:4])) + + if d.AuthPresent && (len(data) > 2) { + d.AuthHeader = &BFDAuthHeader{} + data, d.AuthHeader.AuthType = data[1:], BFDAuthType(data[0]) + data, _ = data[1:], uint8(data[0]) // Consume length + data, d.AuthHeader.KeyID = data[1:], BFDAuthKeyID(data[0]) + + switch d.AuthHeader.AuthType { + case BFDAuthTypePassword: + d.AuthHeader.Data = BFDAuthData(data) + case BFDAuthTypeKeyedMD5, BFDAuthTypeMeticulousKeyedMD5: + // Skipped reserved byte + data, d.AuthHeader.SequenceNumber = data[5:], BFDAuthSequenceNumber(binary.BigEndian.Uint32(data[1:5])) + d.AuthHeader.Data = BFDAuthData(data) + case BFDAuthTypeKeyedSHA1, BFDAuthTypeMeticulousKeyedSHA1: + // Skipped reserved byte + data, d.AuthHeader.SequenceNumber = data[5:], BFDAuthSequenceNumber(binary.BigEndian.Uint32(data[1:5])) + d.AuthHeader.Data = BFDAuthData(data) + } + } + + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (d *BFD) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + data, err := b.PrependBytes(bfdMinimumRecordSizeInBytes) + if err != nil { + return err + } + + // Pack the first few fields into the first 32 bits. + data[0] = byte(byte(d.Version<<5) | byte(d.Diagnostic)) + h := uint8(0) + h |= (uint8(d.State) << 6) + h |= (uint8(bool2uint8(d.Poll)) << 5) + h |= (uint8(bool2uint8(d.Final)) << 4) + h |= (uint8(bool2uint8(d.ControlPlaneIndependent)) << 3) + h |= (uint8(bool2uint8(d.AuthPresent)) << 2) + h |= (uint8(bool2uint8(d.Demand)) << 1) + h |= uint8(bool2uint8(d.Multipoint)) + data[1] = byte(h) + data[2] = byte(d.DetectMultiplier) + data[3] = byte(d.Length()) + + // The remaining fields can just be copied in big endian order. + binary.BigEndian.PutUint32(data[4:], uint32(d.MyDiscriminator)) + binary.BigEndian.PutUint32(data[8:], uint32(d.YourDiscriminator)) + binary.BigEndian.PutUint32(data[12:], uint32(d.DesiredMinTxInterval)) + binary.BigEndian.PutUint32(data[16:], uint32(d.RequiredMinRxInterval)) + binary.BigEndian.PutUint32(data[20:], uint32(d.RequiredMinEchoRxInterval)) + + if d.AuthPresent && (d.AuthHeader != nil) { + auth, err := b.AppendBytes(int(d.AuthHeader.Length())) + if err != nil { + return err + } + + auth[0] = byte(d.AuthHeader.AuthType) + auth[1] = byte(d.AuthHeader.Length()) + auth[2] = byte(d.AuthHeader.KeyID) + + switch d.AuthHeader.AuthType { + case BFDAuthTypePassword: + copy(auth[3:], d.AuthHeader.Data) + case BFDAuthTypeKeyedMD5, BFDAuthTypeMeticulousKeyedMD5: + auth[3] = byte(0) + binary.BigEndian.PutUint32(auth[4:], uint32(d.AuthHeader.SequenceNumber)) + copy(auth[8:], d.AuthHeader.Data) + case BFDAuthTypeKeyedSHA1, BFDAuthTypeMeticulousKeyedSHA1: + auth[3] = byte(0) + binary.BigEndian.PutUint32(auth[4:], uint32(d.AuthHeader.SequenceNumber)) + copy(auth[8:], d.AuthHeader.Data) + } + } + + return nil +} + +// CanDecode returns a set of layers that BFD objects can decode. +// As BFD objects can only decide the BFD layer, we can return just that layer. +// Apparently a single layer type implements LayerClass. +func (d *BFD) CanDecode() gopacket.LayerClass { + return LayerTypeBFD +} + +// NextLayerType specifies the next layer that GoPacket should attempt to +// analyse after this (BFD) layer. As BFD packets do not contain any payload +// bytes, there are no further layers to analyse. +func (d *BFD) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypeZero +} + +// Payload returns an empty byte slice as BFD packets do not carry a payload +func (d *BFD) Payload() []byte { + return nil +} + +// bool2uint8 converts a bool to uint8 +func bool2uint8(b bool) uint8 { + if b { + return 1 + } + return 0 +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/cdp.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/cdp.go new file mode 100644 index 00000000..d67203eb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/cdp.go @@ -0,0 +1,651 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +// Enum types courtesy of... +// http://search.cpan.org/~mchapman/Net-CDP-0.09/lib/Net/CDP.pm +// https://code.google.com/p/ladvd/ +// http://anonsvn.wireshark.org/viewvc/releases/wireshark-1.8.6/epan/dissectors/packet-cdp.c + +package layers + +import ( + "encoding/binary" + "fmt" + "net" + + "github.com/google/gopacket" +) + +// CDPTLVType is the type of each TLV value in a CiscoDiscovery packet. +type CDPTLVType uint16 + +// CDPTLVType values. +const ( + CDPTLVDevID CDPTLVType = 0x0001 + CDPTLVAddress CDPTLVType = 0x0002 + CDPTLVPortID CDPTLVType = 0x0003 + CDPTLVCapabilities CDPTLVType = 0x0004 + CDPTLVVersion CDPTLVType = 0x0005 + CDPTLVPlatform CDPTLVType = 0x0006 + CDPTLVIPPrefix CDPTLVType = 0x0007 + CDPTLVHello CDPTLVType = 0x0008 + CDPTLVVTPDomain CDPTLVType = 0x0009 + CDPTLVNativeVLAN CDPTLVType = 0x000a + CDPTLVFullDuplex CDPTLVType = 0x000b + CDPTLVVLANReply CDPTLVType = 0x000e + CDPTLVVLANQuery CDPTLVType = 0x000f + CDPTLVPower CDPTLVType = 0x0010 + CDPTLVMTU CDPTLVType = 0x0011 + CDPTLVExtendedTrust CDPTLVType = 0x0012 + CDPTLVUntrustedCOS CDPTLVType = 0x0013 + CDPTLVSysName CDPTLVType = 0x0014 + CDPTLVSysOID CDPTLVType = 0x0015 + CDPTLVMgmtAddresses CDPTLVType = 0x0016 + CDPTLVLocation CDPTLVType = 0x0017 + CDPTLVExternalPortID CDPTLVType = 0x0018 + CDPTLVPowerRequested CDPTLVType = 0x0019 + CDPTLVPowerAvailable CDPTLVType = 0x001a + CDPTLVPortUnidirectional CDPTLVType = 0x001b + CDPTLVEnergyWise CDPTLVType = 0x001d + CDPTLVSparePairPOE CDPTLVType = 0x001f +) + +// CiscoDiscoveryValue is a TLV value inside a CiscoDiscovery packet layer. +type CiscoDiscoveryValue struct { + Type CDPTLVType + Length uint16 + Value []byte +} + +// CiscoDiscovery is a packet layer containing the Cisco Discovery Protocol. +// See http://www.cisco.com/univercd/cc/td/doc/product/lan/trsrb/frames.htm#31885 +type CiscoDiscovery struct { + BaseLayer + Version byte + TTL byte + Checksum uint16 + Values []CiscoDiscoveryValue +} + +// CDPCapability is the set of capabilities advertised by a CDP device. +type CDPCapability uint32 + +// CDPCapability values. +const ( + CDPCapMaskRouter CDPCapability = 0x0001 + CDPCapMaskTBBridge CDPCapability = 0x0002 + CDPCapMaskSPBridge CDPCapability = 0x0004 + CDPCapMaskSwitch CDPCapability = 0x0008 + CDPCapMaskHost CDPCapability = 0x0010 + CDPCapMaskIGMPFilter CDPCapability = 0x0020 + CDPCapMaskRepeater CDPCapability = 0x0040 + CDPCapMaskPhone CDPCapability = 0x0080 + CDPCapMaskRemote CDPCapability = 0x0100 +) + +// CDPCapabilities represents the capabilities of a device +type CDPCapabilities struct { + L3Router bool + TBBridge bool + SPBridge bool + L2Switch bool + IsHost bool + IGMPFilter bool + L1Repeater bool + IsPhone bool + RemotelyManaged bool +} + +// CDP Power-over-Ethernet values. +const ( + CDPPoEFourWire byte = 0x01 + CDPPoEPDArch byte = 0x02 + CDPPoEPDRequest byte = 0x04 + CDPPoEPSE byte = 0x08 +) + +// CDPSparePairPoE provides information on PoE. +type CDPSparePairPoE struct { + PSEFourWire bool // Supported / Not supported + PDArchShared bool // Shared / Independent + PDRequestOn bool // On / Off + PSEOn bool // On / Off +} + +// CDPVLANDialogue encapsulates a VLAN Query/Reply +type CDPVLANDialogue struct { + ID uint8 + VLAN uint16 +} + +// CDPPowerDialogue encapsulates a Power Query/Reply +type CDPPowerDialogue struct { + ID uint16 + MgmtID uint16 + Values []uint32 +} + +// CDPLocation provides location information for a CDP device. +type CDPLocation struct { + Type uint8 // Undocumented + Location string +} + +// CDPHello is a Cisco Hello message (undocumented, hence the "Unknown" fields) +type CDPHello struct { + OUI []byte + ProtocolID uint16 + ClusterMaster net.IP + Unknown1 net.IP + Version byte + SubVersion byte + Status byte + Unknown2 byte + ClusterCommander net.HardwareAddr + SwitchMAC net.HardwareAddr + Unknown3 byte + ManagementVLAN uint16 +} + +// CDPEnergyWiseSubtype is used within CDP to define TLV values. +type CDPEnergyWiseSubtype uint32 + +// CDPEnergyWiseSubtype values. +const ( + CDPEnergyWiseRole CDPEnergyWiseSubtype = 0x00000007 + CDPEnergyWiseDomain CDPEnergyWiseSubtype = 0x00000008 + CDPEnergyWiseName CDPEnergyWiseSubtype = 0x00000009 + CDPEnergyWiseReplyTo CDPEnergyWiseSubtype = 0x00000017 +) + +// CDPEnergyWise is used by CDP to monitor and control power usage. +type CDPEnergyWise struct { + EncryptedData []byte + Unknown1 uint32 + SequenceNumber uint32 + ModelNumber string + Unknown2 uint16 + HardwareID string + SerialNum string + Unknown3 []byte + Role string + Domain string + Name string + ReplyUnknown1 []byte + ReplyPort []byte + ReplyAddress []byte + ReplyUnknown2 []byte + ReplyUnknown3 []byte +} + +// CiscoDiscoveryInfo represents the decoded details for a set of CiscoDiscoveryValues +type CiscoDiscoveryInfo struct { + BaseLayer + CDPHello + DeviceID string + Addresses []net.IP + PortID string + Capabilities CDPCapabilities + Version string + Platform string + IPPrefixes []net.IPNet + VTPDomain string + NativeVLAN uint16 + FullDuplex bool + VLANReply CDPVLANDialogue + VLANQuery CDPVLANDialogue + PowerConsumption uint16 + MTU uint32 + ExtendedTrust uint8 + UntrustedCOS uint8 + SysName string + SysOID string + MgmtAddresses []net.IP + Location CDPLocation + PowerRequest CDPPowerDialogue + PowerAvailable CDPPowerDialogue + SparePairPoe CDPSparePairPoE + EnergyWise CDPEnergyWise + Unknown []CiscoDiscoveryValue +} + +// LayerType returns gopacket.LayerTypeCiscoDiscovery. +func (c *CiscoDiscovery) LayerType() gopacket.LayerType { + return LayerTypeCiscoDiscovery +} + +func decodeCiscoDiscovery(data []byte, p gopacket.PacketBuilder) error { + c := &CiscoDiscovery{ + Version: data[0], + TTL: data[1], + Checksum: binary.BigEndian.Uint16(data[2:4]), + } + if c.Version != 1 && c.Version != 2 { + return fmt.Errorf("Invalid CiscoDiscovery version number %d", c.Version) + } + var err error + c.Values, err = decodeCiscoDiscoveryTLVs(data[4:]) + if err != nil { + return err + } + c.Contents = data[0:4] + c.Payload = data[4:] + p.AddLayer(c) + return p.NextDecoder(gopacket.DecodeFunc(decodeCiscoDiscoveryInfo)) +} + +// LayerType returns gopacket.LayerTypeCiscoDiscoveryInfo. +func (c *CiscoDiscoveryInfo) LayerType() gopacket.LayerType { + return LayerTypeCiscoDiscoveryInfo +} + +func decodeCiscoDiscoveryTLVs(data []byte) (values []CiscoDiscoveryValue, err error) { + for len(data) > 0 { + val := CiscoDiscoveryValue{ + Type: CDPTLVType(binary.BigEndian.Uint16(data[:2])), + Length: binary.BigEndian.Uint16(data[2:4]), + } + if val.Length < 4 { + err = fmt.Errorf("Invalid CiscoDiscovery value length %d", val.Length) + break + } + val.Value = data[4:val.Length] + values = append(values, val) + data = data[val.Length:] + } + return +} + +func decodeCiscoDiscoveryInfo(data []byte, p gopacket.PacketBuilder) error { + var err error + info := &CiscoDiscoveryInfo{BaseLayer: BaseLayer{Contents: data}} + p.AddLayer(info) + values, err := decodeCiscoDiscoveryTLVs(data) + if err != nil { // Unlikely, as parent decode will fail, but better safe... + return err + } + for _, val := range values { + switch val.Type { + case CDPTLVDevID: + info.DeviceID = string(val.Value) + case CDPTLVAddress: + if err = checkCDPTLVLen(val, 4); err != nil { + return err + } + info.Addresses, err = decodeAddresses(val.Value) + if err != nil { + return err + } + case CDPTLVPortID: + info.PortID = string(val.Value) + case CDPTLVCapabilities: + if err = checkCDPTLVLen(val, 4); err != nil { + return err + } + val := CDPCapability(binary.BigEndian.Uint32(val.Value[0:4])) + info.Capabilities.L3Router = (val&CDPCapMaskRouter > 0) + info.Capabilities.TBBridge = (val&CDPCapMaskTBBridge > 0) + info.Capabilities.SPBridge = (val&CDPCapMaskSPBridge > 0) + info.Capabilities.L2Switch = (val&CDPCapMaskSwitch > 0) + info.Capabilities.IsHost = (val&CDPCapMaskHost > 0) + info.Capabilities.IGMPFilter = (val&CDPCapMaskIGMPFilter > 0) + info.Capabilities.L1Repeater = (val&CDPCapMaskRepeater > 0) + info.Capabilities.IsPhone = (val&CDPCapMaskPhone > 0) + info.Capabilities.RemotelyManaged = (val&CDPCapMaskRemote > 0) + case CDPTLVVersion: + info.Version = string(val.Value) + case CDPTLVPlatform: + info.Platform = string(val.Value) + case CDPTLVIPPrefix: + v := val.Value + l := len(v) + if l%5 == 0 && l >= 5 { + for len(v) > 0 { + _, ipnet, _ := net.ParseCIDR(fmt.Sprintf("%d.%d.%d.%d/%d", v[0], v[1], v[2], v[3], v[4])) + info.IPPrefixes = append(info.IPPrefixes, *ipnet) + v = v[5:] + } + } else { + return fmt.Errorf("Invalid TLV %v length %d", val.Type, len(val.Value)) + } + case CDPTLVHello: + if err = checkCDPTLVLen(val, 32); err != nil { + return err + } + v := val.Value + info.CDPHello.OUI = v[0:3] + info.CDPHello.ProtocolID = binary.BigEndian.Uint16(v[3:5]) + info.CDPHello.ClusterMaster = v[5:9] + info.CDPHello.Unknown1 = v[9:13] + info.CDPHello.Version = v[13] + info.CDPHello.SubVersion = v[14] + info.CDPHello.Status = v[15] + info.CDPHello.Unknown2 = v[16] + info.CDPHello.ClusterCommander = v[17:23] + info.CDPHello.SwitchMAC = v[23:29] + info.CDPHello.Unknown3 = v[29] + info.CDPHello.ManagementVLAN = binary.BigEndian.Uint16(v[30:32]) + case CDPTLVVTPDomain: + info.VTPDomain = string(val.Value) + case CDPTLVNativeVLAN: + if err = checkCDPTLVLen(val, 2); err != nil { + return err + } + info.NativeVLAN = binary.BigEndian.Uint16(val.Value[0:2]) + case CDPTLVFullDuplex: + if err = checkCDPTLVLen(val, 1); err != nil { + return err + } + info.FullDuplex = (val.Value[0] == 1) + case CDPTLVVLANReply: + if err = checkCDPTLVLen(val, 3); err != nil { + return err + } + info.VLANReply.ID = uint8(val.Value[0]) + info.VLANReply.VLAN = binary.BigEndian.Uint16(val.Value[1:3]) + case CDPTLVVLANQuery: + if err = checkCDPTLVLen(val, 3); err != nil { + return err + } + info.VLANQuery.ID = uint8(val.Value[0]) + info.VLANQuery.VLAN = binary.BigEndian.Uint16(val.Value[1:3]) + case CDPTLVPower: + if err = checkCDPTLVLen(val, 2); err != nil { + return err + } + info.PowerConsumption = binary.BigEndian.Uint16(val.Value[0:2]) + case CDPTLVMTU: + if err = checkCDPTLVLen(val, 4); err != nil { + return err + } + info.MTU = binary.BigEndian.Uint32(val.Value[0:4]) + case CDPTLVExtendedTrust: + if err = checkCDPTLVLen(val, 1); err != nil { + return err + } + info.ExtendedTrust = uint8(val.Value[0]) + case CDPTLVUntrustedCOS: + if err = checkCDPTLVLen(val, 1); err != nil { + return err + } + info.UntrustedCOS = uint8(val.Value[0]) + case CDPTLVSysName: + info.SysName = string(val.Value) + case CDPTLVSysOID: + info.SysOID = string(val.Value) + case CDPTLVMgmtAddresses: + if err = checkCDPTLVLen(val, 4); err != nil { + return err + } + info.MgmtAddresses, err = decodeAddresses(val.Value) + if err != nil { + return err + } + case CDPTLVLocation: + if err = checkCDPTLVLen(val, 2); err != nil { + return err + } + info.Location.Type = uint8(val.Value[0]) + info.Location.Location = string(val.Value[1:]) + + // case CDPTLVLExternalPortID: + // Undocumented + case CDPTLVPowerRequested: + if err = checkCDPTLVLen(val, 4); err != nil { + return err + } + info.PowerRequest.ID = binary.BigEndian.Uint16(val.Value[0:2]) + info.PowerRequest.MgmtID = binary.BigEndian.Uint16(val.Value[2:4]) + for n := 4; n < len(val.Value); n += 4 { + info.PowerRequest.Values = append(info.PowerRequest.Values, binary.BigEndian.Uint32(val.Value[n:n+4])) + } + case CDPTLVPowerAvailable: + if err = checkCDPTLVLen(val, 4); err != nil { + return err + } + info.PowerAvailable.ID = binary.BigEndian.Uint16(val.Value[0:2]) + info.PowerAvailable.MgmtID = binary.BigEndian.Uint16(val.Value[2:4]) + for n := 4; n < len(val.Value); n += 4 { + info.PowerAvailable.Values = append(info.PowerAvailable.Values, binary.BigEndian.Uint32(val.Value[n:n+4])) + } + // case CDPTLVPortUnidirectional + // Undocumented + case CDPTLVEnergyWise: + if err = checkCDPTLVLen(val, 72); err != nil { + return err + } + info.EnergyWise.EncryptedData = val.Value[0:20] + info.EnergyWise.Unknown1 = binary.BigEndian.Uint32(val.Value[20:24]) + info.EnergyWise.SequenceNumber = binary.BigEndian.Uint32(val.Value[24:28]) + info.EnergyWise.ModelNumber = string(val.Value[28:44]) + info.EnergyWise.Unknown2 = binary.BigEndian.Uint16(val.Value[44:46]) + info.EnergyWise.HardwareID = string(val.Value[46:49]) + info.EnergyWise.SerialNum = string(val.Value[49:60]) + info.EnergyWise.Unknown3 = val.Value[60:68] + tlvLen := binary.BigEndian.Uint16(val.Value[68:70]) + tlvNum := binary.BigEndian.Uint16(val.Value[70:72]) + data := val.Value[72:] + if len(data) < int(tlvLen) { + return fmt.Errorf("Invalid TLV length %d vs %d", tlvLen, len(data)) + } + numSeen := 0 + for len(data) > 8 { + numSeen++ + if numSeen > int(tlvNum) { // Too many TLV's ? + return fmt.Errorf("Too many TLV's - wanted %d, saw %d", tlvNum, numSeen) + } + tType := CDPEnergyWiseSubtype(binary.BigEndian.Uint32(data[0:4])) + tLen := int(binary.BigEndian.Uint32(data[4:8])) + if tLen > len(data)-8 { + return fmt.Errorf("Invalid TLV length %d vs %d", tLen, len(data)-8) + } + data = data[8:] + switch tType { + case CDPEnergyWiseRole: + info.EnergyWise.Role = string(data[:]) + case CDPEnergyWiseDomain: + info.EnergyWise.Domain = string(data[:]) + case CDPEnergyWiseName: + info.EnergyWise.Name = string(data[:]) + case CDPEnergyWiseReplyTo: + if len(data) >= 18 { + info.EnergyWise.ReplyUnknown1 = data[0:2] + info.EnergyWise.ReplyPort = data[2:4] + info.EnergyWise.ReplyAddress = data[4:8] + info.EnergyWise.ReplyUnknown2 = data[8:10] + info.EnergyWise.ReplyUnknown3 = data[10:14] + } + } + data = data[tLen:] + } + case CDPTLVSparePairPOE: + if err = checkCDPTLVLen(val, 1); err != nil { + return err + } + v := val.Value[0] + info.SparePairPoe.PSEFourWire = (v&CDPPoEFourWire > 0) + info.SparePairPoe.PDArchShared = (v&CDPPoEPDArch > 0) + info.SparePairPoe.PDRequestOn = (v&CDPPoEPDRequest > 0) + info.SparePairPoe.PSEOn = (v&CDPPoEPSE > 0) + default: + info.Unknown = append(info.Unknown, val) + } + } + return nil +} + +// CDP Protocol Types +const ( + CDPProtocolTypeNLPID byte = 1 + CDPProtocolType802_2 byte = 2 +) + +// CDPAddressType is used to define TLV values within CDP addresses. +type CDPAddressType uint64 + +// CDP Address types. +const ( + CDPAddressTypeCLNP CDPAddressType = 0x81 + CDPAddressTypeIPV4 CDPAddressType = 0xcc + CDPAddressTypeIPV6 CDPAddressType = 0xaaaa030000000800 + CDPAddressTypeDECNET CDPAddressType = 0xaaaa030000006003 + CDPAddressTypeAPPLETALK CDPAddressType = 0xaaaa03000000809b + CDPAddressTypeIPX CDPAddressType = 0xaaaa030000008137 + CDPAddressTypeVINES CDPAddressType = 0xaaaa0300000080c4 + CDPAddressTypeXNS CDPAddressType = 0xaaaa030000000600 + CDPAddressTypeAPOLLO CDPAddressType = 0xaaaa030000008019 +) + +func decodeAddresses(v []byte) (addresses []net.IP, err error) { + numaddr := int(binary.BigEndian.Uint32(v[0:4])) + if numaddr < 1 { + return nil, fmt.Errorf("Invalid Address TLV number %d", numaddr) + } + v = v[4:] + if len(v) < numaddr*8 { + return nil, fmt.Errorf("Invalid Address TLV length %d", len(v)) + } + for i := 0; i < numaddr; i++ { + prottype := v[0] + if prottype != CDPProtocolTypeNLPID && prottype != CDPProtocolType802_2 { // invalid protocol type + return nil, fmt.Errorf("Invalid Address Protocol %d", prottype) + } + protlen := int(v[1]) + if (prottype == CDPProtocolTypeNLPID && protlen != 1) || + (prottype == CDPProtocolType802_2 && protlen != 3 && protlen != 8) { // invalid length + return nil, fmt.Errorf("Invalid Address Protocol length %d", protlen) + } + plen := make([]byte, 8) + copy(plen[8-protlen:], v[2:2+protlen]) + protocol := CDPAddressType(binary.BigEndian.Uint64(plen)) + v = v[2+protlen:] + addrlen := binary.BigEndian.Uint16(v[0:2]) + ab := v[2 : 2+addrlen] + if protocol == CDPAddressTypeIPV4 && addrlen == 4 { + addresses = append(addresses, net.IPv4(ab[0], ab[1], ab[2], ab[3])) + } else if protocol == CDPAddressTypeIPV6 && addrlen == 16 { + addresses = append(addresses, net.IP(ab)) + } else { + // only handle IPV4 & IPV6 for now + } + v = v[2+addrlen:] + if len(v) < 8 { + break + } + } + return +} + +func (t CDPTLVType) String() (s string) { + switch t { + case CDPTLVDevID: + s = "Device ID" + case CDPTLVAddress: + s = "Addresses" + case CDPTLVPortID: + s = "Port ID" + case CDPTLVCapabilities: + s = "Capabilities" + case CDPTLVVersion: + s = "Software Version" + case CDPTLVPlatform: + s = "Platform" + case CDPTLVIPPrefix: + s = "IP Prefix" + case CDPTLVHello: + s = "Protocol Hello" + case CDPTLVVTPDomain: + s = "VTP Management Domain" + case CDPTLVNativeVLAN: + s = "Native VLAN" + case CDPTLVFullDuplex: + s = "Full Duplex" + case CDPTLVVLANReply: + s = "VoIP VLAN Reply" + case CDPTLVVLANQuery: + s = "VLANQuery" + case CDPTLVPower: + s = "Power consumption" + case CDPTLVMTU: + s = "MTU" + case CDPTLVExtendedTrust: + s = "Extended Trust Bitmap" + case CDPTLVUntrustedCOS: + s = "Untrusted Port CoS" + case CDPTLVSysName: + s = "System Name" + case CDPTLVSysOID: + s = "System OID" + case CDPTLVMgmtAddresses: + s = "Management Addresses" + case CDPTLVLocation: + s = "Location" + case CDPTLVExternalPortID: + s = "External Port ID" + case CDPTLVPowerRequested: + s = "Power Requested" + case CDPTLVPowerAvailable: + s = "Power Available" + case CDPTLVPortUnidirectional: + s = "Port Unidirectional" + case CDPTLVEnergyWise: + s = "Energy Wise" + case CDPTLVSparePairPOE: + s = "Spare Pair POE" + default: + s = "Unknown" + } + return +} + +func (a CDPAddressType) String() (s string) { + switch a { + case CDPAddressTypeCLNP: + s = "Connectionless Network Protocol" + case CDPAddressTypeIPV4: + s = "IPv4" + case CDPAddressTypeIPV6: + s = "IPv6" + case CDPAddressTypeDECNET: + s = "DECnet Phase IV" + case CDPAddressTypeAPPLETALK: + s = "Apple Talk" + case CDPAddressTypeIPX: + s = "Novell IPX" + case CDPAddressTypeVINES: + s = "Banyan VINES" + case CDPAddressTypeXNS: + s = "Xerox Network Systems" + case CDPAddressTypeAPOLLO: + s = "Apollo" + default: + s = "Unknown" + } + return +} + +func (t CDPEnergyWiseSubtype) String() (s string) { + switch t { + case CDPEnergyWiseRole: + s = "Role" + case CDPEnergyWiseDomain: + s = "Domain" + case CDPEnergyWiseName: + s = "Name" + case CDPEnergyWiseReplyTo: + s = "ReplyTo" + default: + s = "Unknown" + } + return +} + +func checkCDPTLVLen(v CiscoDiscoveryValue, l int) (err error) { + if len(v.Value) < l { + err = fmt.Errorf("Invalid TLV %v length %d", v.Type, len(v.Value)) + } + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ctp.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ctp.go new file mode 100644 index 00000000..82875845 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ctp.go @@ -0,0 +1,109 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "fmt" + "github.com/google/gopacket" +) + +// EthernetCTPFunction is the function code used by the EthernetCTP protocol to identify each +// EthernetCTP layer. +type EthernetCTPFunction uint16 + +// EthernetCTPFunction values. +const ( + EthernetCTPFunctionReply EthernetCTPFunction = 1 + EthernetCTPFunctionForwardData EthernetCTPFunction = 2 +) + +// EthernetCTP implements the EthernetCTP protocol, see http://www.mit.edu/people/jhawk/ctp.html. +// We split EthernetCTP up into the top-level EthernetCTP layer, followed by zero or more +// EthernetCTPForwardData layers, followed by a final EthernetCTPReply layer. +type EthernetCTP struct { + BaseLayer + SkipCount uint16 +} + +// LayerType returns gopacket.LayerTypeEthernetCTP. +func (c *EthernetCTP) LayerType() gopacket.LayerType { + return LayerTypeEthernetCTP +} + +// EthernetCTPForwardData is the ForwardData layer inside EthernetCTP. See EthernetCTP's docs for more +// details. +type EthernetCTPForwardData struct { + BaseLayer + Function EthernetCTPFunction + ForwardAddress []byte +} + +// LayerType returns gopacket.LayerTypeEthernetCTPForwardData. +func (c *EthernetCTPForwardData) LayerType() gopacket.LayerType { + return LayerTypeEthernetCTPForwardData +} + +// ForwardEndpoint returns the EthernetCTPForwardData ForwardAddress as an endpoint. +func (c *EthernetCTPForwardData) ForwardEndpoint() gopacket.Endpoint { + return gopacket.NewEndpoint(EndpointMAC, c.ForwardAddress) +} + +// EthernetCTPReply is the Reply layer inside EthernetCTP. See EthernetCTP's docs for more details. +type EthernetCTPReply struct { + BaseLayer + Function EthernetCTPFunction + ReceiptNumber uint16 + Data []byte +} + +// LayerType returns gopacket.LayerTypeEthernetCTPReply. +func (c *EthernetCTPReply) LayerType() gopacket.LayerType { + return LayerTypeEthernetCTPReply +} + +// Payload returns the EthernetCTP reply's Data bytes. +func (c *EthernetCTPReply) Payload() []byte { return c.Data } + +func decodeEthernetCTP(data []byte, p gopacket.PacketBuilder) error { + c := &EthernetCTP{ + SkipCount: binary.LittleEndian.Uint16(data[:2]), + BaseLayer: BaseLayer{data[:2], data[2:]}, + } + if c.SkipCount%2 != 0 { + return fmt.Errorf("EthernetCTP skip count is odd: %d", c.SkipCount) + } + p.AddLayer(c) + return p.NextDecoder(gopacket.DecodeFunc(decodeEthernetCTPFromFunctionType)) +} + +// decodeEthernetCTPFromFunctionType reads in the first 2 bytes to determine the EthernetCTP +// layer type to decode next, then decodes based on that. +func decodeEthernetCTPFromFunctionType(data []byte, p gopacket.PacketBuilder) error { + function := EthernetCTPFunction(binary.LittleEndian.Uint16(data[:2])) + switch function { + case EthernetCTPFunctionReply: + reply := &EthernetCTPReply{ + Function: function, + ReceiptNumber: binary.LittleEndian.Uint16(data[2:4]), + Data: data[4:], + BaseLayer: BaseLayer{data, nil}, + } + p.AddLayer(reply) + p.SetApplicationLayer(reply) + return nil + case EthernetCTPFunctionForwardData: + forward := &EthernetCTPForwardData{ + Function: function, + ForwardAddress: data[2:8], + BaseLayer: BaseLayer{data[:8], data[8:]}, + } + p.AddLayer(forward) + return p.NextDecoder(gopacket.DecodeFunc(decodeEthernetCTPFromFunctionType)) + } + return fmt.Errorf("Unknown EthernetCTP function type %v", function) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/dhcpv4.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/dhcpv4.go new file mode 100644 index 00000000..ed5f53ee --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/dhcpv4.go @@ -0,0 +1,589 @@ +// Copyright 2016 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "bytes" + "encoding/binary" + "fmt" + "net" + + "github.com/google/gopacket" +) + +// DHCPOp rerprents a bootp operation +type DHCPOp byte + +// bootp operations +const ( + DHCPOpRequest DHCPOp = 1 + DHCPOpReply DHCPOp = 2 +) + +// String returns a string version of a DHCPOp. +func (o DHCPOp) String() string { + switch o { + case DHCPOpRequest: + return "Request" + case DHCPOpReply: + return "Reply" + default: + return "Unknown" + } +} + +// DHCPMsgType represents a DHCP operation +type DHCPMsgType byte + +// Constants that represent DHCP operations +const ( + DHCPMsgTypeUnspecified DHCPMsgType = iota + DHCPMsgTypeDiscover + DHCPMsgTypeOffer + DHCPMsgTypeRequest + DHCPMsgTypeDecline + DHCPMsgTypeAck + DHCPMsgTypeNak + DHCPMsgTypeRelease + DHCPMsgTypeInform +) + +// String returns a string version of a DHCPMsgType. +func (o DHCPMsgType) String() string { + switch o { + case DHCPMsgTypeUnspecified: + return "Unspecified" + case DHCPMsgTypeDiscover: + return "Discover" + case DHCPMsgTypeOffer: + return "Offer" + case DHCPMsgTypeRequest: + return "Request" + case DHCPMsgTypeDecline: + return "Decline" + case DHCPMsgTypeAck: + return "Ack" + case DHCPMsgTypeNak: + return "Nak" + case DHCPMsgTypeRelease: + return "Release" + case DHCPMsgTypeInform: + return "Inform" + default: + return "Unknown" + } +} + +//DHCPMagic is the RFC 2131 "magic cooke" for DHCP. +var DHCPMagic uint32 = 0x63825363 + +// DHCPv4 contains data for a single DHCP packet. +type DHCPv4 struct { + BaseLayer + Operation DHCPOp + HardwareType LinkType + HardwareLen uint8 + HardwareOpts uint8 + Xid uint32 + Secs uint16 + Flags uint16 + ClientIP net.IP + YourClientIP net.IP + NextServerIP net.IP + RelayAgentIP net.IP + ClientHWAddr net.HardwareAddr + ServerName []byte + File []byte + Options DHCPOptions +} + +// DHCPOptions is used to get nicely printed option lists which would normally +// be cut off after 5 options. +type DHCPOptions []DHCPOption + +// String returns a string version of the options list. +func (o DHCPOptions) String() string { + buf := &bytes.Buffer{} + buf.WriteByte('[') + for i, opt := range o { + buf.WriteString(opt.String()) + if i+1 != len(o) { + buf.WriteString(", ") + } + } + buf.WriteByte(']') + return buf.String() +} + +// LayerType returns gopacket.LayerTypeDHCPv4 +func (d *DHCPv4) LayerType() gopacket.LayerType { return LayerTypeDHCPv4 } + +// DecodeFromBytes decodes the given bytes into this layer. +func (d *DHCPv4) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 240 { + df.SetTruncated() + return fmt.Errorf("DHCPv4 length %d too short", len(data)) + } + d.Options = d.Options[:0] + d.Operation = DHCPOp(data[0]) + d.HardwareType = LinkType(data[1]) + d.HardwareLen = data[2] + d.HardwareOpts = data[3] + d.Xid = binary.BigEndian.Uint32(data[4:8]) + d.Secs = binary.BigEndian.Uint16(data[8:10]) + d.Flags = binary.BigEndian.Uint16(data[10:12]) + d.ClientIP = net.IP(data[12:16]) + d.YourClientIP = net.IP(data[16:20]) + d.NextServerIP = net.IP(data[20:24]) + d.RelayAgentIP = net.IP(data[24:28]) + d.ClientHWAddr = net.HardwareAddr(data[28 : 28+d.HardwareLen]) + d.ServerName = data[44:108] + d.File = data[108:236] + if binary.BigEndian.Uint32(data[236:240]) != DHCPMagic { + return InvalidMagicCookie + } + + if len(data) <= 240 { + // DHCP Packet could have no option (??) + return nil + } + + options := data[240:] + + stop := len(options) + start := 0 + for start < stop { + o := DHCPOption{} + if err := o.decode(options[start:]); err != nil { + return err + } + if o.Type == DHCPOptEnd { + break + } + d.Options = append(d.Options, o) + // Check if the option is a single byte pad + if o.Type == DHCPOptPad { + start++ + } else { + start += int(o.Length) + 2 + } + } + return nil +} + +// Len returns the length of a DHCPv4 packet. +func (d *DHCPv4) Len() uint16 { + n := uint16(240) + for _, o := range d.Options { + if o.Type == DHCPOptPad { + n++ + } else { + n += uint16(o.Length) + 2 + } + } + n++ // for opt end + return n +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (d *DHCPv4) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + plen := int(d.Len()) + + data, err := b.PrependBytes(plen) + if err != nil { + return err + } + + data[0] = byte(d.Operation) + data[1] = byte(d.HardwareType) + if opts.FixLengths { + d.HardwareLen = uint8(len(d.ClientHWAddr)) + } + data[2] = d.HardwareLen + data[3] = d.HardwareOpts + binary.BigEndian.PutUint32(data[4:8], d.Xid) + binary.BigEndian.PutUint16(data[8:10], d.Secs) + binary.BigEndian.PutUint16(data[10:12], d.Flags) + copy(data[12:16], d.ClientIP.To4()) + copy(data[16:20], d.YourClientIP.To4()) + copy(data[20:24], d.NextServerIP.To4()) + copy(data[24:28], d.RelayAgentIP.To4()) + copy(data[28:44], d.ClientHWAddr) + copy(data[44:108], d.ServerName) + copy(data[108:236], d.File) + binary.BigEndian.PutUint32(data[236:240], DHCPMagic) + + if len(d.Options) > 0 { + offset := 240 + for _, o := range d.Options { + if err := o.encode(data[offset:]); err != nil { + return err + } + // A pad option is only a single byte + if o.Type == DHCPOptPad { + offset++ + } else { + offset += 2 + len(o.Data) + } + } + optend := NewDHCPOption(DHCPOptEnd, nil) + if err := optend.encode(data[offset:]); err != nil { + return err + } + } + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (d *DHCPv4) CanDecode() gopacket.LayerClass { + return LayerTypeDHCPv4 +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (d *DHCPv4) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +func decodeDHCPv4(data []byte, p gopacket.PacketBuilder) error { + dhcp := &DHCPv4{} + err := dhcp.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(dhcp) + return p.NextDecoder(gopacket.LayerTypePayload) +} + +// DHCPOpt represents a DHCP option or parameter from RFC-2132 +type DHCPOpt byte + +// Constants for the DHCPOpt options. +const ( + DHCPOptPad DHCPOpt = 0 + DHCPOptSubnetMask DHCPOpt = 1 // 4, net.IP + DHCPOptTimeOffset DHCPOpt = 2 // 4, int32 (signed seconds from UTC) + DHCPOptRouter DHCPOpt = 3 // n*4, [n]net.IP + DHCPOptTimeServer DHCPOpt = 4 // n*4, [n]net.IP + DHCPOptNameServer DHCPOpt = 5 // n*4, [n]net.IP + DHCPOptDNS DHCPOpt = 6 // n*4, [n]net.IP + DHCPOptLogServer DHCPOpt = 7 // n*4, [n]net.IP + DHCPOptCookieServer DHCPOpt = 8 // n*4, [n]net.IP + DHCPOptLPRServer DHCPOpt = 9 // n*4, [n]net.IP + DHCPOptImpressServer DHCPOpt = 10 // n*4, [n]net.IP + DHCPOptResLocServer DHCPOpt = 11 // n*4, [n]net.IP + DHCPOptHostname DHCPOpt = 12 // n, string + DHCPOptBootfileSize DHCPOpt = 13 // 2, uint16 + DHCPOptMeritDumpFile DHCPOpt = 14 // >1, string + DHCPOptDomainName DHCPOpt = 15 // n, string + DHCPOptSwapServer DHCPOpt = 16 // n*4, [n]net.IP + DHCPOptRootPath DHCPOpt = 17 // n, string + DHCPOptExtensionsPath DHCPOpt = 18 // n, string + DHCPOptIPForwarding DHCPOpt = 19 // 1, bool + DHCPOptSourceRouting DHCPOpt = 20 // 1, bool + DHCPOptPolicyFilter DHCPOpt = 21 // 8*n, [n]{net.IP/net.IP} + DHCPOptDatagramMTU DHCPOpt = 22 // 2, uint16 + DHCPOptDefaultTTL DHCPOpt = 23 // 1, byte + DHCPOptPathMTUAgingTimeout DHCPOpt = 24 // 4, uint32 + DHCPOptPathPlateuTableOption DHCPOpt = 25 // 2*n, []uint16 + DHCPOptInterfaceMTU DHCPOpt = 26 // 2, uint16 + DHCPOptAllSubsLocal DHCPOpt = 27 // 1, bool + DHCPOptBroadcastAddr DHCPOpt = 28 // 4, net.IP + DHCPOptMaskDiscovery DHCPOpt = 29 // 1, bool + DHCPOptMaskSupplier DHCPOpt = 30 // 1, bool + DHCPOptRouterDiscovery DHCPOpt = 31 // 1, bool + DHCPOptSolicitAddr DHCPOpt = 32 // 4, net.IP + DHCPOptStaticRoute DHCPOpt = 33 // n*8, [n]{net.IP/net.IP} -- note the 2nd is router not mask + DHCPOptARPTrailers DHCPOpt = 34 // 1, bool + DHCPOptARPTimeout DHCPOpt = 35 // 4, uint32 + DHCPOptEthernetEncap DHCPOpt = 36 // 1, bool + DHCPOptTCPTTL DHCPOpt = 37 // 1, byte + DHCPOptTCPKeepAliveInt DHCPOpt = 38 // 4, uint32 + DHCPOptTCPKeepAliveGarbage DHCPOpt = 39 // 1, bool + DHCPOptNISDomain DHCPOpt = 40 // n, string + DHCPOptNISServers DHCPOpt = 41 // 4*n, [n]net.IP + DHCPOptNTPServers DHCPOpt = 42 // 4*n, [n]net.IP + DHCPOptVendorOption DHCPOpt = 43 // n, [n]byte // may be encapsulated. + DHCPOptNetBIOSTCPNS DHCPOpt = 44 // 4*n, [n]net.IP + DHCPOptNetBIOSTCPDDS DHCPOpt = 45 // 4*n, [n]net.IP + DHCPOptNETBIOSTCPNodeType DHCPOpt = 46 // 1, magic byte + DHCPOptNetBIOSTCPScope DHCPOpt = 47 // n, string + DHCPOptXFontServer DHCPOpt = 48 // n, string + DHCPOptXDisplayManager DHCPOpt = 49 // n, string + DHCPOptRequestIP DHCPOpt = 50 // 4, net.IP + DHCPOptLeaseTime DHCPOpt = 51 // 4, uint32 + DHCPOptExtOptions DHCPOpt = 52 // 1, 1/2/3 + DHCPOptMessageType DHCPOpt = 53 // 1, 1-7 + DHCPOptServerID DHCPOpt = 54 // 4, net.IP + DHCPOptParamsRequest DHCPOpt = 55 // n, []byte + DHCPOptMessage DHCPOpt = 56 // n, 3 + DHCPOptMaxMessageSize DHCPOpt = 57 // 2, uint16 + DHCPOptT1 DHCPOpt = 58 // 4, uint32 + DHCPOptT2 DHCPOpt = 59 // 4, uint32 + DHCPOptClassID DHCPOpt = 60 // n, []byte + DHCPOptClientID DHCPOpt = 61 // n >= 2, []byte + DHCPOptDomainSearch DHCPOpt = 119 // n, string + DHCPOptSIPServers DHCPOpt = 120 // n, url + DHCPOptClasslessStaticRoute DHCPOpt = 121 // + DHCPOptEnd DHCPOpt = 255 +) + +// String returns a string version of a DHCPOpt. +func (o DHCPOpt) String() string { + switch o { + case DHCPOptPad: + return "(padding)" + case DHCPOptSubnetMask: + return "SubnetMask" + case DHCPOptTimeOffset: + return "TimeOffset" + case DHCPOptRouter: + return "Router" + case DHCPOptTimeServer: + return "rfc868" // old time server protocol stringified to dissuade confusion w. NTP + case DHCPOptNameServer: + return "ien116" // obscure nameserver protocol stringified to dissuade confusion w. DNS + case DHCPOptDNS: + return "DNS" + case DHCPOptLogServer: + return "mitLCS" // MIT LCS server protocol yada yada w. Syslog + case DHCPOptCookieServer: + return "CookieServer" + case DHCPOptLPRServer: + return "LPRServer" + case DHCPOptImpressServer: + return "ImpressServer" + case DHCPOptResLocServer: + return "ResourceLocationServer" + case DHCPOptHostname: + return "Hostname" + case DHCPOptBootfileSize: + return "BootfileSize" + case DHCPOptMeritDumpFile: + return "MeritDumpFile" + case DHCPOptDomainName: + return "DomainName" + case DHCPOptSwapServer: + return "SwapServer" + case DHCPOptRootPath: + return "RootPath" + case DHCPOptExtensionsPath: + return "ExtensionsPath" + case DHCPOptIPForwarding: + return "IPForwarding" + case DHCPOptSourceRouting: + return "SourceRouting" + case DHCPOptPolicyFilter: + return "PolicyFilter" + case DHCPOptDatagramMTU: + return "DatagramMTU" + case DHCPOptDefaultTTL: + return "DefaultTTL" + case DHCPOptPathMTUAgingTimeout: + return "PathMTUAgingTimeout" + case DHCPOptPathPlateuTableOption: + return "PathPlateuTableOption" + case DHCPOptInterfaceMTU: + return "InterfaceMTU" + case DHCPOptAllSubsLocal: + return "AllSubsLocal" + case DHCPOptBroadcastAddr: + return "BroadcastAddress" + case DHCPOptMaskDiscovery: + return "MaskDiscovery" + case DHCPOptMaskSupplier: + return "MaskSupplier" + case DHCPOptRouterDiscovery: + return "RouterDiscovery" + case DHCPOptSolicitAddr: + return "SolicitAddr" + case DHCPOptStaticRoute: + return "StaticRoute" + case DHCPOptARPTrailers: + return "ARPTrailers" + case DHCPOptARPTimeout: + return "ARPTimeout" + case DHCPOptEthernetEncap: + return "EthernetEncap" + case DHCPOptTCPTTL: + return "TCPTTL" + case DHCPOptTCPKeepAliveInt: + return "TCPKeepAliveInt" + case DHCPOptTCPKeepAliveGarbage: + return "TCPKeepAliveGarbage" + case DHCPOptNISDomain: + return "NISDomain" + case DHCPOptNISServers: + return "NISServers" + case DHCPOptNTPServers: + return "NTPServers" + case DHCPOptVendorOption: + return "VendorOption" + case DHCPOptNetBIOSTCPNS: + return "NetBIOSOverTCPNS" + case DHCPOptNetBIOSTCPDDS: + return "NetBiosOverTCPDDS" + case DHCPOptNETBIOSTCPNodeType: + return "NetBIOSOverTCPNodeType" + case DHCPOptNetBIOSTCPScope: + return "NetBIOSOverTCPScope" + case DHCPOptXFontServer: + return "XFontServer" + case DHCPOptXDisplayManager: + return "XDisplayManager" + case DHCPOptEnd: + return "(end)" + case DHCPOptSIPServers: + return "SipServers" + case DHCPOptRequestIP: + return "RequestIP" + case DHCPOptLeaseTime: + return "LeaseTime" + case DHCPOptExtOptions: + return "ExtOpts" + case DHCPOptMessageType: + return "MessageType" + case DHCPOptServerID: + return "ServerID" + case DHCPOptParamsRequest: + return "ParamsRequest" + case DHCPOptMessage: + return "Message" + case DHCPOptMaxMessageSize: + return "MaxDHCPSize" + case DHCPOptT1: + return "Timer1" + case DHCPOptT2: + return "Timer2" + case DHCPOptClassID: + return "ClassID" + case DHCPOptClientID: + return "ClientID" + case DHCPOptDomainSearch: + return "DomainSearch" + case DHCPOptClasslessStaticRoute: + return "ClasslessStaticRoute" + default: + return "Unknown" + } +} + +// DHCPOption rerpresents a DHCP option. +type DHCPOption struct { + Type DHCPOpt + Length uint8 + Data []byte +} + +// String returns a string version of a DHCP Option. +func (o DHCPOption) String() string { + switch o.Type { + + case DHCPOptHostname, DHCPOptMeritDumpFile, DHCPOptDomainName, DHCPOptRootPath, + DHCPOptExtensionsPath, DHCPOptNISDomain, DHCPOptNetBIOSTCPScope, DHCPOptXFontServer, + DHCPOptXDisplayManager, DHCPOptMessage, DHCPOptDomainSearch: // string + return fmt.Sprintf("Option(%s:%s)", o.Type, string(o.Data)) + + case DHCPOptMessageType: + if len(o.Data) != 1 { + return fmt.Sprintf("Option(%s:INVALID)", o.Type) + } + return fmt.Sprintf("Option(%s:%s)", o.Type, DHCPMsgType(o.Data[0])) + + case DHCPOptSubnetMask, DHCPOptServerID, DHCPOptBroadcastAddr, + DHCPOptSolicitAddr, DHCPOptRequestIP: // net.IP + if len(o.Data) < 4 { + return fmt.Sprintf("Option(%s:INVALID)", o.Type) + } + return fmt.Sprintf("Option(%s:%s)", o.Type, net.IP(o.Data)) + + case DHCPOptT1, DHCPOptT2, DHCPOptLeaseTime, DHCPOptPathMTUAgingTimeout, + DHCPOptARPTimeout, DHCPOptTCPKeepAliveInt: // uint32 + if len(o.Data) != 4 { + return fmt.Sprintf("Option(%s:INVALID)", o.Type) + } + return fmt.Sprintf("Option(%s:%d)", o.Type, + uint32(o.Data[0])<<24|uint32(o.Data[1])<<16|uint32(o.Data[2])<<8|uint32(o.Data[3])) + + case DHCPOptParamsRequest: + buf := &bytes.Buffer{} + buf.WriteString(fmt.Sprintf("Option(%s:", o.Type)) + for i, v := range o.Data { + buf.WriteString(DHCPOpt(v).String()) + if i+1 != len(o.Data) { + buf.WriteByte(',') + } + } + buf.WriteString(")") + return buf.String() + + default: + return fmt.Sprintf("Option(%s:%v)", o.Type, o.Data) + } +} + +// NewDHCPOption constructs a new DHCPOption with a given type and data. +func NewDHCPOption(t DHCPOpt, data []byte) DHCPOption { + o := DHCPOption{Type: t} + if data != nil { + o.Data = data + o.Length = uint8(len(data)) + } + return o +} + +func (o *DHCPOption) encode(b []byte) error { + switch o.Type { + case DHCPOptPad, DHCPOptEnd: + b[0] = byte(o.Type) + default: + b[0] = byte(o.Type) + b[1] = o.Length + copy(b[2:], o.Data) + } + return nil +} + +func (o *DHCPOption) decode(data []byte) error { + if len(data) < 1 { + // Pad/End have a length of 1 + return DecOptionNotEnoughData + } + o.Type = DHCPOpt(data[0]) + switch o.Type { + case DHCPOptPad, DHCPOptEnd: + o.Data = nil + default: + if len(data) < 2 { + return DecOptionNotEnoughData + } + o.Length = data[1] + if int(o.Length) > len(data[2:]) { + return DecOptionMalformed + } + o.Data = data[2 : 2+int(o.Length)] + } + return nil +} + +// DHCPv4Error is used for constant errors for DHCPv4. It is needed for test asserts. +type DHCPv4Error string + +// DHCPv4Error implements error interface. +func (d DHCPv4Error) Error() string { + return string(d) +} + +const ( + // DecOptionNotEnoughData is returned when there is not enough data during option's decode process + DecOptionNotEnoughData = DHCPv4Error("Not enough data to decode") + // DecOptionMalformed is returned when the option is malformed + DecOptionMalformed = DHCPv4Error("Option is malformed") + // InvalidMagicCookie is returned when Magic cookie is missing into BOOTP header + InvalidMagicCookie = DHCPv4Error("Bad DHCP header") +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/dhcpv6.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/dhcpv6.go new file mode 100644 index 00000000..03e56488 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/dhcpv6.go @@ -0,0 +1,349 @@ +// Copyright 2018 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "net" + + "github.com/google/gopacket" +) + +// DHCPv6MsgType represents a DHCPv6 operation +type DHCPv6MsgType byte + +// Constants that represent DHCP operations +const ( + DHCPv6MsgTypeUnspecified DHCPv6MsgType = iota + DHCPv6MsgTypeSolicit + DHCPv6MsgTypeAdverstise + DHCPv6MsgTypeRequest + DHCPv6MsgTypeConfirm + DHCPv6MsgTypeRenew + DHCPv6MsgTypeRebind + DHCPv6MsgTypeReply + DHCPv6MsgTypeRelease + DHCPv6MsgTypeDecline + DHCPv6MsgTypeReconfigure + DHCPv6MsgTypeInformationRequest + DHCPv6MsgTypeRelayForward + DHCPv6MsgTypeRelayReply +) + +// String returns a string version of a DHCPv6MsgType. +func (o DHCPv6MsgType) String() string { + switch o { + case DHCPv6MsgTypeUnspecified: + return "Unspecified" + case DHCPv6MsgTypeSolicit: + return "Solicit" + case DHCPv6MsgTypeAdverstise: + return "Adverstise" + case DHCPv6MsgTypeRequest: + return "Request" + case DHCPv6MsgTypeConfirm: + return "Confirm" + case DHCPv6MsgTypeRenew: + return "Renew" + case DHCPv6MsgTypeRebind: + return "Rebind" + case DHCPv6MsgTypeReply: + return "Reply" + case DHCPv6MsgTypeRelease: + return "Release" + case DHCPv6MsgTypeDecline: + return "Decline" + case DHCPv6MsgTypeReconfigure: + return "Reconfigure" + case DHCPv6MsgTypeInformationRequest: + return "InformationRequest" + case DHCPv6MsgTypeRelayForward: + return "RelayForward" + case DHCPv6MsgTypeRelayReply: + return "RelayReply" + default: + return "Unknown" + } +} + +// DHCPv6 contains data for a single DHCP packet. +type DHCPv6 struct { + BaseLayer + MsgType DHCPv6MsgType + HopCount uint8 + LinkAddr net.IP + PeerAddr net.IP + TransactionID []byte + Options DHCPv6Options +} + +// LayerType returns gopacket.LayerTypeDHCPv6 +func (d *DHCPv6) LayerType() gopacket.LayerType { return LayerTypeDHCPv6 } + +// DecodeFromBytes decodes the given bytes into this layer. +func (d *DHCPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 4 { + df.SetTruncated() + return fmt.Errorf("DHCPv6 length %d too short", len(data)) + } + d.BaseLayer = BaseLayer{Contents: data} + d.Options = d.Options[:0] + d.MsgType = DHCPv6MsgType(data[0]) + + offset := 0 + if d.MsgType == DHCPv6MsgTypeRelayForward || d.MsgType == DHCPv6MsgTypeRelayReply { + if len(data) < 34 { + df.SetTruncated() + return fmt.Errorf("DHCPv6 length %d too short for message type %d", len(data), d.MsgType) + } + d.HopCount = data[1] + d.LinkAddr = net.IP(data[2:18]) + d.PeerAddr = net.IP(data[18:34]) + offset = 34 + } else { + d.TransactionID = data[1:4] + offset = 4 + } + + stop := len(data) + for offset < stop { + o := DHCPv6Option{} + if err := o.decode(data[offset:]); err != nil { + return err + } + d.Options = append(d.Options, o) + offset += int(o.Length) + 4 // 2 from option code, 2 from option length + } + + return nil +} + +// Len returns the length of a DHCPv6 packet. +func (d *DHCPv6) Len() int { + n := 1 + if d.MsgType == DHCPv6MsgTypeRelayForward || d.MsgType == DHCPv6MsgTypeRelayReply { + n += 33 + } else { + n += 3 + } + + for _, o := range d.Options { + n += int(o.Length) + 4 // 2 from option code, 2 from option length + } + + return n +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (d *DHCPv6) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + plen := int(d.Len()) + + data, err := b.PrependBytes(plen) + if err != nil { + return err + } + + offset := 0 + data[0] = byte(d.MsgType) + if d.MsgType == DHCPv6MsgTypeRelayForward || d.MsgType == DHCPv6MsgTypeRelayReply { + data[1] = byte(d.HopCount) + copy(data[2:18], d.LinkAddr.To16()) + copy(data[18:34], d.PeerAddr.To16()) + offset = 34 + } else { + copy(data[1:4], d.TransactionID) + offset = 4 + } + + if len(d.Options) > 0 { + for _, o := range d.Options { + if err := o.encode(data[offset:], opts); err != nil { + return err + } + offset += int(o.Length) + 4 // 2 from option code, 2 from option length + } + } + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (d *DHCPv6) CanDecode() gopacket.LayerClass { + return LayerTypeDHCPv6 +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (d *DHCPv6) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +func decodeDHCPv6(data []byte, p gopacket.PacketBuilder) error { + dhcp := &DHCPv6{} + err := dhcp.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(dhcp) + return p.NextDecoder(gopacket.LayerTypePayload) +} + +// DHCPv6StatusCode represents a DHCP status code - RFC-3315 +type DHCPv6StatusCode uint16 + +// Constants for the DHCPv6StatusCode. +const ( + DHCPv6StatusCodeSuccess DHCPv6StatusCode = iota + DHCPv6StatusCodeUnspecFail + DHCPv6StatusCodeNoAddrsAvail + DHCPv6StatusCodeNoBinding + DHCPv6StatusCodeNotOnLink + DHCPv6StatusCodeUseMulticast +) + +// String returns a string version of a DHCPv6StatusCode. +func (o DHCPv6StatusCode) String() string { + switch o { + case DHCPv6StatusCodeSuccess: + return "Success" + case DHCPv6StatusCodeUnspecFail: + return "UnspecifiedFailure" + case DHCPv6StatusCodeNoAddrsAvail: + return "NoAddressAvailable" + case DHCPv6StatusCodeNoBinding: + return "NoBinding" + case DHCPv6StatusCodeNotOnLink: + return "NotOnLink" + case DHCPv6StatusCodeUseMulticast: + return "UseMulticast" + default: + return "Unknown" + } +} + +// DHCPv6DUIDType represents a DHCP DUID - RFC-3315 +type DHCPv6DUIDType uint16 + +// Constants for the DHCPv6DUIDType. +const ( + DHCPv6DUIDTypeLLT DHCPv6DUIDType = iota + 1 + DHCPv6DUIDTypeEN + DHCPv6DUIDTypeLL +) + +// String returns a string version of a DHCPv6DUIDType. +func (o DHCPv6DUIDType) String() string { + switch o { + case DHCPv6DUIDTypeLLT: + return "LLT" + case DHCPv6DUIDTypeEN: + return "EN" + case DHCPv6DUIDTypeLL: + return "LL" + default: + return "Unknown" + } +} + +// DHCPv6DUID means DHCP Unique Identifier as stated in RFC 3315, section 9 (https://tools.ietf.org/html/rfc3315#page-19) +type DHCPv6DUID struct { + Type DHCPv6DUIDType + // LLT, LL + HardwareType []byte + // EN + EnterpriseNumber []byte + // LLT + Time []byte + // LLT, LL + LinkLayerAddress net.HardwareAddr + // EN + Identifier []byte +} + +// DecodeFromBytes decodes the given bytes into a DHCPv6DUID +func (d *DHCPv6DUID) DecodeFromBytes(data []byte) error { + if len(data) < 2 { + return errors.New("Not enough bytes to decode: " + string(len(data))) + } + + d.Type = DHCPv6DUIDType(binary.BigEndian.Uint16(data[:2])) + if d.Type == DHCPv6DUIDTypeLLT || d.Type == DHCPv6DUIDTypeLL { + d.HardwareType = data[2:4] + } + + if d.Type == DHCPv6DUIDTypeLLT { + d.Time = data[4:8] + d.LinkLayerAddress = net.HardwareAddr(data[8:]) + } else if d.Type == DHCPv6DUIDTypeEN { + d.EnterpriseNumber = data[2:6] + d.Identifier = data[6:] + } else { // DHCPv6DUIDTypeLL + d.LinkLayerAddress = net.HardwareAddr(data[4:]) + } + + return nil +} + +// Encode encodes the DHCPv6DUID in a slice of bytes +func (d *DHCPv6DUID) Encode() []byte { + length := d.Len() + data := make([]byte, length) + binary.BigEndian.PutUint16(data[0:2], uint16(d.Type)) + + if d.Type == DHCPv6DUIDTypeLLT || d.Type == DHCPv6DUIDTypeLL { + copy(data[2:4], d.HardwareType) + } + + if d.Type == DHCPv6DUIDTypeLLT { + copy(data[4:8], d.Time) + copy(data[8:], d.LinkLayerAddress) + } else if d.Type == DHCPv6DUIDTypeEN { + copy(data[2:6], d.EnterpriseNumber) + copy(data[6:], d.Identifier) + } else { + copy(data[4:], d.LinkLayerAddress) + } + + return data +} + +// Len returns the length of the DHCPv6DUID, respecting the type +func (d *DHCPv6DUID) Len() int { + length := 2 // d.Type + if d.Type == DHCPv6DUIDTypeLLT { + length += 2 /*HardwareType*/ + 4 /*d.Time*/ + len(d.LinkLayerAddress) + } else if d.Type == DHCPv6DUIDTypeEN { + length += 4 /*d.EnterpriseNumber*/ + len(d.Identifier) + } else { // LL + length += 2 /*d.HardwareType*/ + len(d.LinkLayerAddress) + } + + return length +} + +func (d *DHCPv6DUID) String() string { + duid := "Type: " + d.Type.String() + ", " + if d.Type == DHCPv6DUIDTypeLLT { + duid += fmt.Sprintf("HardwareType: %v, Time: %v, LinkLayerAddress: %v", d.HardwareType, d.Time, d.LinkLayerAddress) + } else if d.Type == DHCPv6DUIDTypeEN { + duid += fmt.Sprintf("EnterpriseNumber: %v, Identifier: %v", d.EnterpriseNumber, d.Identifier) + } else { // DHCPv6DUIDTypeLL + duid += fmt.Sprintf("HardwareType: %v, LinkLayerAddress: %v", d.HardwareType, d.LinkLayerAddress) + } + return duid +} + +func decodeDHCPv6DUID(data []byte) (*DHCPv6DUID, error) { + duid := &DHCPv6DUID{} + err := duid.DecodeFromBytes(data) + if err != nil { + return nil, err + } + return duid, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/dhcpv6_options.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/dhcpv6_options.go new file mode 100644 index 00000000..0c05e35f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/dhcpv6_options.go @@ -0,0 +1,621 @@ +// Copyright 2018 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "github.com/google/gopacket" +) + +// DHCPv6Opt represents a DHCP option or parameter from RFC-3315 +type DHCPv6Opt uint16 + +// Constants for the DHCPv6Opt options. +const ( + DHCPv6OptClientID DHCPv6Opt = 1 + DHCPv6OptServerID DHCPv6Opt = 2 + DHCPv6OptIANA DHCPv6Opt = 3 + DHCPv6OptIATA DHCPv6Opt = 4 + DHCPv6OptIAAddr DHCPv6Opt = 5 + DHCPv6OptOro DHCPv6Opt = 6 + DHCPv6OptPreference DHCPv6Opt = 7 + DHCPv6OptElapsedTime DHCPv6Opt = 8 + DHCPv6OptRelayMessage DHCPv6Opt = 9 + DHCPv6OptAuth DHCPv6Opt = 11 + DHCPv6OptUnicast DHCPv6Opt = 12 + DHCPv6OptStatusCode DHCPv6Opt = 13 + DHCPv6OptRapidCommit DHCPv6Opt = 14 + DHCPv6OptUserClass DHCPv6Opt = 15 + DHCPv6OptVendorClass DHCPv6Opt = 16 + DHCPv6OptVendorOpts DHCPv6Opt = 17 + DHCPv6OptInterfaceID DHCPv6Opt = 18 + DHCPv6OptReconfigureMessage DHCPv6Opt = 19 + DHCPv6OptReconfigureAccept DHCPv6Opt = 20 + + // RFC 3319 Session Initiation Protocol (SIP) + DHCPv6OptSIPServersDomainList DHCPv6Opt = 21 + DHCPv6OptSIPServersAddressList DHCPv6Opt = 22 + + // RFC 3646 DNS Configuration + DHCPv6OptDNSServers DHCPv6Opt = 23 + DHCPv6OptDomainList DHCPv6Opt = 24 + + // RFC 3633 Prefix Delegation + DHCPv6OptIAPD DHCPv6Opt = 25 + DHCPv6OptIAPrefix DHCPv6Opt = 26 + + // RFC 3898 Network Information Service (NIS) + DHCPv6OptNISServers DHCPv6Opt = 27 + DHCPv6OptNISPServers DHCPv6Opt = 28 + DHCPv6OptNISDomainName DHCPv6Opt = 29 + DHCPv6OptNISPDomainName DHCPv6Opt = 30 + + // RFC 4075 Simple Network Time Protocol (SNTP) + DHCPv6OptSNTPServers DHCPv6Opt = 31 + + // RFC 4242 Information Refresh Time Option + DHCPv6OptInformationRefreshTime DHCPv6Opt = 32 + + // RFC 4280 Broadcast and Multicast Control Servers + DHCPv6OptBCMCSServerDomainNameList DHCPv6Opt = 33 + DHCPv6OptBCMCSServerAddressList DHCPv6Opt = 34 + + // RFC 4776 Civic Address ConfigurationOption + DHCPv6OptGeoconfCivic DHCPv6Opt = 36 + + // RFC 4649 Relay Agent Remote-ID + DHCPv6OptRemoteID DHCPv6Opt = 37 + + // RFC 4580 Relay Agent Subscriber-ID + DHCPv6OptSubscriberID DHCPv6Opt = 38 + + // RFC 4704 Client Full Qualified Domain Name (FQDN) + DHCPv6OptClientFQDN DHCPv6Opt = 39 + + // RFC 5192 Protocol for Carrying Authentication for Network Access (PANA) + DHCPv6OptPanaAgent DHCPv6Opt = 40 + + // RFC 4833 Timezone Options + DHCPv6OptNewPOSIXTimezone DHCPv6Opt = 41 + DHCPv6OptNewTZDBTimezone DHCPv6Opt = 42 + + // RFC 4994 Relay Agent Echo Request + DHCPv6OptEchoRequestOption DHCPv6Opt = 43 + + // RFC 5007 Leasequery + DHCPv6OptLQQuery DHCPv6Opt = 44 + DHCPv6OptCLTTime DHCPv6Opt = 45 + DHCPv6OptClientData DHCPv6Opt = 46 + DHCPv6OptLQRelayData DHCPv6Opt = 47 + DHCPv6OptLQClientLink DHCPv6Opt = 48 + + // RFC 6610 Home Information Discovery in Mobile IPv6 (MIPv6) + DHCPv6OptMIP6HNIDF DHCPv6Opt = 49 + DHCPv6OptMIP6VDINF DHCPv6Opt = 50 + DHCPv6OptMIP6IDINF DHCPv6Opt = 69 + DHCPv6OptMIP6UDINF DHCPv6Opt = 70 + DHCPv6OptMIP6HNP DHCPv6Opt = 71 + DHCPv6OptMIP6HAA DHCPv6Opt = 72 + DHCPv6OptMIP6HAF DHCPv6Opt = 73 + + // RFC 5223 Discovering Location-to-Service Translation (LoST) Servers + DHCPv6OptV6LOST DHCPv6Opt = 51 + + // RFC 5417 Control And Provisioning of Wireless Access Points (CAPWAP) + DHCPv6OptCAPWAPACV6 DHCPv6Opt = 52 + + // RFC 5460 Bulk Leasequery + DHCPv6OptRelayID DHCPv6Opt = 53 + + // RFC 5678 IEEE 802.21 Mobility Services (MoS) Discovery + DHCPv6OptIPv6AddressMoS DHCPv6Opt = 54 + DHCPv6OptIPv6FQDNMoS DHCPv6Opt = 55 + + // RFC 5908 NTP Server Option + DHCPv6OptNTPServer DHCPv6Opt = 56 + + // RFC 5986 Discovering the Local Location Information Server (LIS) + DHCPv6OptV6AccessDomain DHCPv6Opt = 57 + + // RFC 5986 SIP User Agent + DHCPv6OptSIPUACSList DHCPv6Opt = 58 + + // RFC 5970 Options for Network Boot + DHCPv6OptBootFileURL DHCPv6Opt = 59 + DHCPv6OptBootFileParam DHCPv6Opt = 60 + DHCPv6OptClientArchType DHCPv6Opt = 61 + DHCPv6OptNII DHCPv6Opt = 62 + + // RFC 6225 Coordinate-Based Location Configuration Information + DHCPv6OptGeolocation DHCPv6Opt = 63 + + // RFC 6334 Dual-Stack Lite + DHCPv6OptAFTRName DHCPv6Opt = 64 + + // RFC 6440 EAP Re-authentication Protocol (ERP) + DHCPv6OptERPLocalDomainName DHCPv6Opt = 65 + + // RFC 6422 Relay-Supplied DHCP Options + DHCPv6OptRSOO DHCPv6Opt = 66 + + // RFC 6603 Prefix Exclude Option for DHCPv6-based Prefix Delegation + DHCPv6OptPDExclude DHCPv6Opt = 67 + + // RFC 6607 Virtual Subnet Selection + DHCPv6OptVSS DHCPv6Opt = 68 + + // RFC 6731 Improved Recursive DNS Server Selection for Multi-Interfaced Nodes + DHCPv6OptRDNSSSelection DHCPv6Opt = 74 + + // RFC 6784 Kerberos Options for DHCPv6 + DHCPv6OptKRBPrincipalName DHCPv6Opt = 75 + DHCPv6OptKRBRealmName DHCPv6Opt = 76 + DHCPv6OptKRBKDC DHCPv6Opt = 77 + + // RFC 6939 Client Link-Layer Address Option + DHCPv6OptClientLinkLayerAddress DHCPv6Opt = 79 + + // RFC 6977 Triggering DHCPv6 Reconfiguration from Relay Agents + DHCPv6OptLinkAddress DHCPv6Opt = 80 + + // RFC 7037 RADIUS Option for the DHCPv6 Relay Agent + DHCPv6OptRADIUS DHCPv6Opt = 81 + + // RFC 7083 Modification to Default Values of SOL_MAX_RT and INF_MAX_RT + DHCPv6OptSolMaxRt DHCPv6Opt = 82 + DHCPv6OptInfMaxRt DHCPv6Opt = 83 + + // RFC 7078 Distributing Address Selection Policy + DHCPv6OptAddrSel DHCPv6Opt = 84 + DHCPv6OptAddrSelTable DHCPv6Opt = 85 + + // RFC 7291 DHCP Options for the Port Control Protocol (PCP) + DHCPv6OptV6PCPServer DHCPv6Opt = 86 + + // RFC 7341 DHCPv4-over-DHCPv6 (DHCP 4o6) Transport + DHCPv6OptDHCPv4Message DHCPv6Opt = 87 + DHCPv6OptDHCPv4OverDHCPv6Server DHCPv6Opt = 88 + + // RFC 7598 Configuration of Softwire Address and Port-Mapped Clients + DHCPv6OptS46Rule DHCPv6Opt = 89 + DHCPv6OptS46BR DHCPv6Opt = 90 + DHCPv6OptS46DMR DHCPv6Opt = 91 + DHCPv6OptS46V4V4Bind DHCPv6Opt = 92 + DHCPv6OptS46PortParameters DHCPv6Opt = 93 + DHCPv6OptS46ContMAPE DHCPv6Opt = 94 + DHCPv6OptS46ContMAPT DHCPv6Opt = 95 + DHCPv6OptS46ContLW DHCPv6Opt = 96 + + // RFC 7600 IPv4 Residual Deployment via IPv6 + DHCPv6Opt4RD DHCPv6Opt = 97 + DHCPv6Opt4RDMapRule DHCPv6Opt = 98 + DHCPv6Opt4RDNonMapRule DHCPv6Opt = 99 + + // RFC 7653 Active Leasequery + DHCPv6OptLQBaseTime DHCPv6Opt = 100 + DHCPv6OptLQStartTime DHCPv6Opt = 101 + DHCPv6OptLQEndTime DHCPv6Opt = 102 + + // RFC 7710 Captive-Portal Identification + DHCPv6OptCaptivePortal DHCPv6Opt = 103 + + // RFC 7774 Multicast Protocol for Low-Power and Lossy Networks (MPL) Parameter Configuration + DHCPv6OptMPLParameters DHCPv6Opt = 104 + + // RFC 7839 Access-Network-Identifier (ANI) + DHCPv6OptANIATT DHCPv6Opt = 105 + DHCPv6OptANINetworkName DHCPv6Opt = 106 + DHCPv6OptANIAPName DHCPv6Opt = 107 + DHCPv6OptANIAPBSSID DHCPv6Opt = 108 + DHCPv6OptANIOperatorID DHCPv6Opt = 109 + DHCPv6OptANIOperatorRealm DHCPv6Opt = 110 + + // RFC 8026 Unified IPv4-in-IPv6 Softwire Customer Premises Equipment (CPE) + DHCPv6OptS46Priority DHCPv6Opt = 111 + + // draft-ietf-opsawg-mud-25 Manufacturer Usage Description (MUD) + DHCPv6OptMUDURLV6 DHCPv6Opt = 112 + + // RFC 8115 IPv4-Embedded Multicast and Unicast IPv6 Prefixes + DHCPv6OptV6Prefix64 DHCPv6Opt = 113 + + // RFC 8156 DHCPv6 Failover Protocol + DHCPv6OptFBindingStatus DHCPv6Opt = 114 + DHCPv6OptFConnectFlags DHCPv6Opt = 115 + DHCPv6OptFDNSRemovalInfo DHCPv6Opt = 116 + DHCPv6OptFDNSHostName DHCPv6Opt = 117 + DHCPv6OptFDNSZoneName DHCPv6Opt = 118 + DHCPv6OptFDNSFlags DHCPv6Opt = 119 + DHCPv6OptFExpirationTime DHCPv6Opt = 120 + DHCPv6OptFMaxUnacknowledgedBNDUPD DHCPv6Opt = 121 + DHCPv6OptFMCLT DHCPv6Opt = 122 + DHCPv6OptFPartnerLifetime DHCPv6Opt = 123 + DHCPv6OptFPartnerLifetimeSent DHCPv6Opt = 124 + DHCPv6OptFPartnerDownTime DHCPv6Opt = 125 + DHCPv6OptFPartnerRawCltTime DHCPv6Opt = 126 + DHCPv6OptFProtocolVersion DHCPv6Opt = 127 + DHCPv6OptFKeepaliveTime DHCPv6Opt = 128 + DHCPv6OptFReconfigureData DHCPv6Opt = 129 + DHCPv6OptFRelationshipName DHCPv6Opt = 130 + DHCPv6OptFServerFlags DHCPv6Opt = 131 + DHCPv6OptFServerState DHCPv6Opt = 132 + DHCPv6OptFStartTimeOfState DHCPv6Opt = 133 + DHCPv6OptFStateExpirationTime DHCPv6Opt = 134 + + // RFC 8357 Generalized UDP Source Port for DHCP Relay + DHCPv6OptRelayPort DHCPv6Opt = 135 + + // draft-ietf-netconf-zerotouch-25 Zero Touch Provisioning for Networking Devices + DHCPv6OptV6ZeroTouchRedirect DHCPv6Opt = 136 + + // RFC 6153 Access Network Discovery and Selection Function (ANDSF) Discovery + DHCPv6OptIPV6AddressANDSF DHCPv6Opt = 143 +) + +// String returns a string version of a DHCPv6Opt. +func (o DHCPv6Opt) String() string { + switch o { + case DHCPv6OptClientID: + return "ClientID" + case DHCPv6OptServerID: + return "ServerID" + case DHCPv6OptIANA: + return "IA_NA" + case DHCPv6OptIATA: + return "IA_TA" + case DHCPv6OptIAAddr: + return "IAAddr" + case DHCPv6OptOro: + return "Oro" + case DHCPv6OptPreference: + return "Preference" + case DHCPv6OptElapsedTime: + return "ElapsedTime" + case DHCPv6OptRelayMessage: + return "RelayMessage" + case DHCPv6OptAuth: + return "Auth" + case DHCPv6OptUnicast: + return "Unicast" + case DHCPv6OptStatusCode: + return "StatusCode" + case DHCPv6OptRapidCommit: + return "RapidCommit" + case DHCPv6OptUserClass: + return "UserClass" + case DHCPv6OptVendorClass: + return "VendorClass" + case DHCPv6OptVendorOpts: + return "VendorOpts" + case DHCPv6OptInterfaceID: + return "InterfaceID" + case DHCPv6OptReconfigureMessage: + return "ReconfigureMessage" + case DHCPv6OptReconfigureAccept: + return "ReconfigureAccept" + case DHCPv6OptSIPServersDomainList: + return "SIPServersDomainList" + case DHCPv6OptSIPServersAddressList: + return "SIPServersAddressList" + case DHCPv6OptDNSServers: + return "DNSRecursiveNameServer" + case DHCPv6OptDomainList: + return "DomainSearchList" + case DHCPv6OptIAPD: + return "IdentityAssociationPrefixDelegation" + case DHCPv6OptIAPrefix: + return "IAPDPrefix" + case DHCPv6OptNISServers: + return "NISServers" + case DHCPv6OptNISPServers: + return "NISv2Servers" + case DHCPv6OptNISDomainName: + return "NISDomainName" + case DHCPv6OptNISPDomainName: + return "NISv2DomainName" + case DHCPv6OptSNTPServers: + return "SNTPServers" + case DHCPv6OptInformationRefreshTime: + return "InformationRefreshTime" + case DHCPv6OptBCMCSServerDomainNameList: + return "BCMCSControlServersDomainNameList" + case DHCPv6OptBCMCSServerAddressList: + return "BCMCSControlServersAddressList" + case DHCPv6OptGeoconfCivic: + return "CivicAddress" + case DHCPv6OptRemoteID: + return "RelayAgentRemoteID" + case DHCPv6OptSubscriberID: + return "RelayAgentSubscriberID" + case DHCPv6OptClientFQDN: + return "ClientFQDN" + case DHCPv6OptPanaAgent: + return "PANAAuthenticationAgent" + case DHCPv6OptNewPOSIXTimezone: + return "NewPOSIXTimezone" + case DHCPv6OptNewTZDBTimezone: + return "NewTZDBTimezone" + case DHCPv6OptEchoRequestOption: + return "EchoRequest" + case DHCPv6OptLQQuery: + return "LeasequeryQuery" + case DHCPv6OptClientData: + return "LeasequeryClientData" + case DHCPv6OptCLTTime: + return "LeasequeryClientLastTransactionTime" + case DHCPv6OptLQRelayData: + return "LeasequeryRelayData" + case DHCPv6OptLQClientLink: + return "LeasequeryClientLink" + case DHCPv6OptMIP6HNIDF: + return "MIPv6HomeNetworkIDFQDN" + case DHCPv6OptMIP6VDINF: + return "MIPv6VisitedHomeNetworkInformation" + case DHCPv6OptMIP6IDINF: + return "MIPv6IdentifiedHomeNetworkInformation" + case DHCPv6OptMIP6UDINF: + return "MIPv6UnrestrictedHomeNetworkInformation" + case DHCPv6OptMIP6HNP: + return "MIPv6HomeNetworkPrefix" + case DHCPv6OptMIP6HAA: + return "MIPv6HomeAgentAddress" + case DHCPv6OptMIP6HAF: + return "MIPv6HomeAgentFQDN" + case DHCPv6OptV6LOST: + return "LoST Server" + case DHCPv6OptCAPWAPACV6: + return "CAPWAPAccessControllerV6" + case DHCPv6OptRelayID: + return "LeasequeryRelayID" + case DHCPv6OptIPv6AddressMoS: + return "MoSIPv6Address" + case DHCPv6OptIPv6FQDNMoS: + return "MoSDomainNameList" + case DHCPv6OptNTPServer: + return "NTPServer" + case DHCPv6OptV6AccessDomain: + return "AccessNetworkDomainName" + case DHCPv6OptSIPUACSList: + return "SIPUserAgentConfigurationServiceDomains" + case DHCPv6OptBootFileURL: + return "BootFileURL" + case DHCPv6OptBootFileParam: + return "BootFileParameters" + case DHCPv6OptClientArchType: + return "ClientSystemArchitectureType" + case DHCPv6OptNII: + return "ClientNetworkInterfaceIdentifier" + case DHCPv6OptGeolocation: + return "Geolocation" + case DHCPv6OptAFTRName: + return "AFTRName" + case DHCPv6OptERPLocalDomainName: + return "AFTRName" + case DHCPv6OptRSOO: + return "RSOOption" + case DHCPv6OptPDExclude: + return "PrefixExclude" + case DHCPv6OptVSS: + return "VirtualSubnetSelection" + case DHCPv6OptRDNSSSelection: + return "RDNSSSelection" + case DHCPv6OptKRBPrincipalName: + return "KerberosPrincipalName" + case DHCPv6OptKRBRealmName: + return "KerberosRealmName" + case DHCPv6OptKRBKDC: + return "KerberosKDC" + case DHCPv6OptClientLinkLayerAddress: + return "ClientLinkLayerAddress" + case DHCPv6OptLinkAddress: + return "LinkAddress" + case DHCPv6OptRADIUS: + return "RADIUS" + case DHCPv6OptSolMaxRt: + return "SolMaxRt" + case DHCPv6OptInfMaxRt: + return "InfMaxRt" + case DHCPv6OptAddrSel: + return "AddressSelection" + case DHCPv6OptAddrSelTable: + return "AddressSelectionTable" + case DHCPv6OptV6PCPServer: + return "PCPServer" + case DHCPv6OptDHCPv4Message: + return "DHCPv4Message" + case DHCPv6OptDHCPv4OverDHCPv6Server: + return "DHCP4o6ServerAddress" + case DHCPv6OptS46Rule: + return "S46Rule" + case DHCPv6OptS46BR: + return "S46BR" + case DHCPv6OptS46DMR: + return "S46DMR" + case DHCPv6OptS46V4V4Bind: + return "S46IPv4IPv6AddressBinding" + case DHCPv6OptS46PortParameters: + return "S46PortParameters" + case DHCPv6OptS46ContMAPE: + return "S46MAPEContainer" + case DHCPv6OptS46ContMAPT: + return "S46MAPTContainer" + case DHCPv6OptS46ContLW: + return "S46Lightweight4Over6Container" + case DHCPv6Opt4RD: + return "4RD" + case DHCPv6Opt4RDMapRule: + return "4RDMapRule" + case DHCPv6Opt4RDNonMapRule: + return "4RDNonMapRule" + case DHCPv6OptLQBaseTime: + return "LQBaseTime" + case DHCPv6OptLQStartTime: + return "LQStartTime" + case DHCPv6OptLQEndTime: + return "LQEndTime" + case DHCPv6OptCaptivePortal: + return "CaptivePortal" + case DHCPv6OptMPLParameters: + return "MPLParameterConfiguration" + case DHCPv6OptANIATT: + return "ANIAccessTechnologyType" + case DHCPv6OptANINetworkName: + return "ANINetworkName" + case DHCPv6OptANIAPName: + return "ANIAccessPointName" + case DHCPv6OptANIAPBSSID: + return "ANIAccessPointBSSID" + case DHCPv6OptANIOperatorID: + return "ANIOperatorIdentifier" + case DHCPv6OptANIOperatorRealm: + return "ANIOperatorRealm" + case DHCPv6OptS46Priority: + return "S64Priority" + case DHCPv6OptMUDURLV6: + return "ManufacturerUsageDescriptionURL" + case DHCPv6OptV6Prefix64: + return "V6Prefix64" + case DHCPv6OptFBindingStatus: + return "FailoverBindingStatus" + case DHCPv6OptFConnectFlags: + return "FailoverConnectFlags" + case DHCPv6OptFDNSRemovalInfo: + return "FailoverDNSRemovalInfo" + case DHCPv6OptFDNSHostName: + return "FailoverDNSHostName" + case DHCPv6OptFDNSZoneName: + return "FailoverDNSZoneName" + case DHCPv6OptFDNSFlags: + return "FailoverDNSFlags" + case DHCPv6OptFExpirationTime: + return "FailoverExpirationTime" + case DHCPv6OptFMaxUnacknowledgedBNDUPD: + return "FailoverMaxUnacknowledgedBNDUPDMessages" + case DHCPv6OptFMCLT: + return "FailoverMaximumClientLeadTime" + case DHCPv6OptFPartnerLifetime: + return "FailoverPartnerLifetime" + case DHCPv6OptFPartnerLifetimeSent: + return "FailoverPartnerLifetimeSent" + case DHCPv6OptFPartnerDownTime: + return "FailoverPartnerDownTime" + case DHCPv6OptFPartnerRawCltTime: + return "FailoverPartnerRawClientLeadTime" + case DHCPv6OptFProtocolVersion: + return "FailoverProtocolVersion" + case DHCPv6OptFKeepaliveTime: + return "FailoverKeepaliveTime" + case DHCPv6OptFReconfigureData: + return "FailoverReconfigureData" + case DHCPv6OptFRelationshipName: + return "FailoverRelationshipName" + case DHCPv6OptFServerFlags: + return "FailoverServerFlags" + case DHCPv6OptFServerState: + return "FailoverServerState" + case DHCPv6OptFStartTimeOfState: + return "FailoverStartTimeOfState" + case DHCPv6OptFStateExpirationTime: + return "FailoverStateExpirationTime" + case DHCPv6OptRelayPort: + return "RelayPort" + case DHCPv6OptV6ZeroTouchRedirect: + return "ZeroTouch" + case DHCPv6OptIPV6AddressANDSF: + return "ANDSFIPv6Address" + default: + return fmt.Sprintf("Unknown(%d)", uint16(o)) + } +} + +// DHCPv6Options is used to get nicely printed option lists which would normally +// be cut off after 5 options. +type DHCPv6Options []DHCPv6Option + +// String returns a string version of the options list. +func (o DHCPv6Options) String() string { + buf := &bytes.Buffer{} + buf.WriteByte('[') + for i, opt := range o { + buf.WriteString(opt.String()) + if i+1 != len(o) { + buf.WriteString(", ") + } + } + buf.WriteByte(']') + return buf.String() +} + +// DHCPv6Option rerpresents a DHCP option. +type DHCPv6Option struct { + Code DHCPv6Opt + Length uint16 + Data []byte +} + +// String returns a string version of a DHCP Option. +func (o DHCPv6Option) String() string { + switch o.Code { + case DHCPv6OptClientID, DHCPv6OptServerID: + duid, err := decodeDHCPv6DUID(o.Data) + if err != nil { + return fmt.Sprintf("Option(%s:INVALID)", o.Code) + } + return fmt.Sprintf("Option(%s:[%s])", o.Code, duid.String()) + case DHCPv6OptOro: + options := "" + for i := 0; i < int(o.Length); i += 2 { + if options != "" { + options += "," + } + option := DHCPv6Opt(binary.BigEndian.Uint16(o.Data[i : i+2])) + options += option.String() + } + return fmt.Sprintf("Option(%s:[%s])", o.Code, options) + default: + return fmt.Sprintf("Option(%s:%v)", o.Code, o.Data) + } +} + +// NewDHCPv6Option constructs a new DHCPv6Option with a given type and data. +func NewDHCPv6Option(code DHCPv6Opt, data []byte) DHCPv6Option { + o := DHCPv6Option{Code: code} + if data != nil { + o.Data = data + o.Length = uint16(len(data)) + } + + return o +} + +func (o *DHCPv6Option) encode(b []byte, opts gopacket.SerializeOptions) error { + binary.BigEndian.PutUint16(b[0:2], uint16(o.Code)) + if opts.FixLengths { + binary.BigEndian.PutUint16(b[2:4], uint16(len(o.Data))) + } else { + binary.BigEndian.PutUint16(b[2:4], o.Length) + } + copy(b[4:], o.Data) + + return nil +} + +func (o *DHCPv6Option) decode(data []byte) error { + if len(data) < 2 { + return errors.New("not enough data to decode") + } + o.Code = DHCPv6Opt(binary.BigEndian.Uint16(data[0:2])) + if len(data) < 3 { + return errors.New("not enough data to decode") + } + o.Length = binary.BigEndian.Uint16(data[2:4]) + o.Data = data[4 : 4+o.Length] + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/dns.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/dns.go new file mode 100644 index 00000000..0fe8d48f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/dns.go @@ -0,0 +1,1083 @@ +// Copyright 2014, 2018 GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "net" + "strings" + + "github.com/google/gopacket" +) + +// DNSClass defines the class associated with a request/response. Different DNS +// classes can be thought of as an array of parallel namespace trees. +type DNSClass uint16 + +// DNSClass known values. +const ( + DNSClassIN DNSClass = 1 // Internet + DNSClassCS DNSClass = 2 // the CSNET class (Obsolete) + DNSClassCH DNSClass = 3 // the CHAOS class + DNSClassHS DNSClass = 4 // Hesiod [Dyer 87] + DNSClassAny DNSClass = 255 // AnyClass +) + +func (dc DNSClass) String() string { + switch dc { + default: + return "Unknown" + case DNSClassIN: + return "IN" + case DNSClassCS: + return "CS" + case DNSClassCH: + return "CH" + case DNSClassHS: + return "HS" + case DNSClassAny: + return "Any" + } +} + +// DNSType defines the type of data being requested/returned in a +// question/answer. +type DNSType uint16 + +// DNSType known values. +const ( + DNSTypeA DNSType = 1 // a host address + DNSTypeNS DNSType = 2 // an authoritative name server + DNSTypeMD DNSType = 3 // a mail destination (Obsolete - use MX) + DNSTypeMF DNSType = 4 // a mail forwarder (Obsolete - use MX) + DNSTypeCNAME DNSType = 5 // the canonical name for an alias + DNSTypeSOA DNSType = 6 // marks the start of a zone of authority + DNSTypeMB DNSType = 7 // a mailbox domain name (EXPERIMENTAL) + DNSTypeMG DNSType = 8 // a mail group member (EXPERIMENTAL) + DNSTypeMR DNSType = 9 // a mail rename domain name (EXPERIMENTAL) + DNSTypeNULL DNSType = 10 // a null RR (EXPERIMENTAL) + DNSTypeWKS DNSType = 11 // a well known service description + DNSTypePTR DNSType = 12 // a domain name pointer + DNSTypeHINFO DNSType = 13 // host information + DNSTypeMINFO DNSType = 14 // mailbox or mail list information + DNSTypeMX DNSType = 15 // mail exchange + DNSTypeTXT DNSType = 16 // text strings + DNSTypeAAAA DNSType = 28 // a IPv6 host address [RFC3596] + DNSTypeSRV DNSType = 33 // server discovery [RFC2782] [RFC6195] + DNSTypeOPT DNSType = 41 // OPT Pseudo-RR [RFC6891] + DNSTypeURI DNSType = 256 // URI RR [RFC7553] +) + +func (dt DNSType) String() string { + switch dt { + default: + return "Unknown" + case DNSTypeA: + return "A" + case DNSTypeNS: + return "NS" + case DNSTypeMD: + return "MD" + case DNSTypeMF: + return "MF" + case DNSTypeCNAME: + return "CNAME" + case DNSTypeSOA: + return "SOA" + case DNSTypeMB: + return "MB" + case DNSTypeMG: + return "MG" + case DNSTypeMR: + return "MR" + case DNSTypeNULL: + return "NULL" + case DNSTypeWKS: + return "WKS" + case DNSTypePTR: + return "PTR" + case DNSTypeHINFO: + return "HINFO" + case DNSTypeMINFO: + return "MINFO" + case DNSTypeMX: + return "MX" + case DNSTypeTXT: + return "TXT" + case DNSTypeAAAA: + return "AAAA" + case DNSTypeSRV: + return "SRV" + case DNSTypeOPT: + return "OPT" + case DNSTypeURI: + return "URI" + } +} + +// DNSResponseCode provides response codes for question answers. +type DNSResponseCode uint8 + +// DNSResponseCode known values. +const ( + DNSResponseCodeNoErr DNSResponseCode = 0 // No error + DNSResponseCodeFormErr DNSResponseCode = 1 // Format Error [RFC1035] + DNSResponseCodeServFail DNSResponseCode = 2 // Server Failure [RFC1035] + DNSResponseCodeNXDomain DNSResponseCode = 3 // Non-Existent Domain [RFC1035] + DNSResponseCodeNotImp DNSResponseCode = 4 // Not Implemented [RFC1035] + DNSResponseCodeRefused DNSResponseCode = 5 // Query Refused [RFC1035] + DNSResponseCodeYXDomain DNSResponseCode = 6 // Name Exists when it should not [RFC2136] + DNSResponseCodeYXRRSet DNSResponseCode = 7 // RR Set Exists when it should not [RFC2136] + DNSResponseCodeNXRRSet DNSResponseCode = 8 // RR Set that should exist does not [RFC2136] + DNSResponseCodeNotAuth DNSResponseCode = 9 // Server Not Authoritative for zone [RFC2136] + DNSResponseCodeNotZone DNSResponseCode = 10 // Name not contained in zone [RFC2136] + DNSResponseCodeBadVers DNSResponseCode = 16 // Bad OPT Version [RFC2671] + DNSResponseCodeBadSig DNSResponseCode = 16 // TSIG Signature Failure [RFC2845] + DNSResponseCodeBadKey DNSResponseCode = 17 // Key not recognized [RFC2845] + DNSResponseCodeBadTime DNSResponseCode = 18 // Signature out of time window [RFC2845] + DNSResponseCodeBadMode DNSResponseCode = 19 // Bad TKEY Mode [RFC2930] + DNSResponseCodeBadName DNSResponseCode = 20 // Duplicate key name [RFC2930] + DNSResponseCodeBadAlg DNSResponseCode = 21 // Algorithm not supported [RFC2930] + DNSResponseCodeBadTruc DNSResponseCode = 22 // Bad Truncation [RFC4635] + DNSResponseCodeBadCookie DNSResponseCode = 23 // Bad/missing Server Cookie [RFC7873] +) + +func (drc DNSResponseCode) String() string { + switch drc { + default: + return "Unknown" + case DNSResponseCodeNoErr: + return "No Error" + case DNSResponseCodeFormErr: + return "Format Error" + case DNSResponseCodeServFail: + return "Server Failure " + case DNSResponseCodeNXDomain: + return "Non-Existent Domain" + case DNSResponseCodeNotImp: + return "Not Implemented" + case DNSResponseCodeRefused: + return "Query Refused" + case DNSResponseCodeYXDomain: + return "Name Exists when it should not" + case DNSResponseCodeYXRRSet: + return "RR Set Exists when it should not" + case DNSResponseCodeNXRRSet: + return "RR Set that should exist does not" + case DNSResponseCodeNotAuth: + return "Server Not Authoritative for zone" + case DNSResponseCodeNotZone: + return "Name not contained in zone" + case DNSResponseCodeBadVers: + return "Bad OPT Version" + case DNSResponseCodeBadKey: + return "Key not recognized" + case DNSResponseCodeBadTime: + return "Signature out of time window" + case DNSResponseCodeBadMode: + return "Bad TKEY Mode" + case DNSResponseCodeBadName: + return "Duplicate key name" + case DNSResponseCodeBadAlg: + return "Algorithm not supported" + case DNSResponseCodeBadTruc: + return "Bad Truncation" + case DNSResponseCodeBadCookie: + return "Bad Cookie" + } +} + +// DNSOpCode defines a set of different operation types. +type DNSOpCode uint8 + +// DNSOpCode known values. +const ( + DNSOpCodeQuery DNSOpCode = 0 // Query [RFC1035] + DNSOpCodeIQuery DNSOpCode = 1 // Inverse Query Obsolete [RFC3425] + DNSOpCodeStatus DNSOpCode = 2 // Status [RFC1035] + DNSOpCodeNotify DNSOpCode = 4 // Notify [RFC1996] + DNSOpCodeUpdate DNSOpCode = 5 // Update [RFC2136] +) + +func (doc DNSOpCode) String() string { + switch doc { + default: + return "Unknown" + case DNSOpCodeQuery: + return "Query" + case DNSOpCodeIQuery: + return "Inverse Query" + case DNSOpCodeStatus: + return "Status" + case DNSOpCodeNotify: + return "Notify" + case DNSOpCodeUpdate: + return "Update" + } +} + +// DNS is specified in RFC 1034 / RFC 1035 +// +---------------------+ +// | Header | +// +---------------------+ +// | Question | the question for the name server +// +---------------------+ +// | Answer | RRs answering the question +// +---------------------+ +// | Authority | RRs pointing toward an authority +// +---------------------+ +// | Additional | RRs holding additional information +// +---------------------+ +// +// DNS Header +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | ID | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// |QR| Opcode |AA|TC|RD|RA| Z | RCODE | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | QDCOUNT | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | ANCOUNT | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | NSCOUNT | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | ARCOUNT | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +// DNS contains data from a single Domain Name Service packet. +type DNS struct { + BaseLayer + + // Header fields + ID uint16 + QR bool + OpCode DNSOpCode + + AA bool // Authoritative answer + TC bool // Truncated + RD bool // Recursion desired + RA bool // Recursion available + Z uint8 // Reserved for future use + + ResponseCode DNSResponseCode + QDCount uint16 // Number of questions to expect + ANCount uint16 // Number of answers to expect + NSCount uint16 // Number of authorities to expect + ARCount uint16 // Number of additional records to expect + + // Entries + Questions []DNSQuestion + Answers []DNSResourceRecord + Authorities []DNSResourceRecord + Additionals []DNSResourceRecord + + // buffer for doing name decoding. We use a single reusable buffer to avoid + // name decoding on a single object via multiple DecodeFromBytes calls + // requiring constant allocation of small byte slices. + buffer []byte +} + +// LayerType returns gopacket.LayerTypeDNS. +func (d *DNS) LayerType() gopacket.LayerType { return LayerTypeDNS } + +// decodeDNS decodes the byte slice into a DNS type. It also +// setups the application Layer in PacketBuilder. +func decodeDNS(data []byte, p gopacket.PacketBuilder) error { + d := &DNS{} + err := d.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(d) + p.SetApplicationLayer(d) + return nil +} + +// DecodeFromBytes decodes the slice into the DNS struct. +func (d *DNS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + d.buffer = d.buffer[:0] + + if len(data) < 12 { + df.SetTruncated() + return errDNSPacketTooShort + } + + // since there are no further layers, the baselayer's content is + // pointing to this layer + d.BaseLayer = BaseLayer{Contents: data[:len(data)]} + d.ID = binary.BigEndian.Uint16(data[:2]) + d.QR = data[2]&0x80 != 0 + d.OpCode = DNSOpCode(data[2]>>3) & 0x0F + d.AA = data[2]&0x04 != 0 + d.TC = data[2]&0x02 != 0 + d.RD = data[2]&0x01 != 0 + d.RA = data[3]&0x80 != 0 + d.Z = uint8(data[3]>>4) & 0x7 + d.ResponseCode = DNSResponseCode(data[3] & 0xF) + d.QDCount = binary.BigEndian.Uint16(data[4:6]) + d.ANCount = binary.BigEndian.Uint16(data[6:8]) + d.NSCount = binary.BigEndian.Uint16(data[8:10]) + d.ARCount = binary.BigEndian.Uint16(data[10:12]) + + d.Questions = d.Questions[:0] + d.Answers = d.Answers[:0] + d.Authorities = d.Authorities[:0] + d.Additionals = d.Additionals[:0] + + offset := 12 + var err error + for i := 0; i < int(d.QDCount); i++ { + var q DNSQuestion + if offset, err = q.decode(data, offset, df, &d.buffer); err != nil { + return err + } + d.Questions = append(d.Questions, q) + } + + // For some horrible reason, if we do the obvious thing in this loop: + // var r DNSResourceRecord + // if blah := r.decode(blah); err != nil { + // return err + // } + // d.Foo = append(d.Foo, r) + // the Go compiler thinks that 'r' escapes to the heap, causing a malloc for + // every Answer, Authority, and Additional. To get around this, we do + // something really silly: we append an empty resource record to our slice, + // then use the last value in the slice to call decode. Since the value is + // already in the slice, there's no WAY it can escape... on the other hand our + // code is MUCH uglier :( + for i := 0; i < int(d.ANCount); i++ { + d.Answers = append(d.Answers, DNSResourceRecord{}) + if offset, err = d.Answers[i].decode(data, offset, df, &d.buffer); err != nil { + d.Answers = d.Answers[:i] // strip off erroneous value + return err + } + } + for i := 0; i < int(d.NSCount); i++ { + d.Authorities = append(d.Authorities, DNSResourceRecord{}) + if offset, err = d.Authorities[i].decode(data, offset, df, &d.buffer); err != nil { + d.Authorities = d.Authorities[:i] // strip off erroneous value + return err + } + } + for i := 0; i < int(d.ARCount); i++ { + d.Additionals = append(d.Additionals, DNSResourceRecord{}) + if offset, err = d.Additionals[i].decode(data, offset, df, &d.buffer); err != nil { + d.Additionals = d.Additionals[:i] // strip off erroneous value + return err + } + // extract extended RCODE from OPT RRs, RFC 6891 section 6.1.3 + if d.Additionals[i].Type == DNSTypeOPT { + d.ResponseCode = DNSResponseCode(uint8(d.ResponseCode) | uint8(d.Additionals[i].TTL>>20&0xF0)) + } + } + + if uint16(len(d.Questions)) != d.QDCount { + return errDecodeQueryBadQDCount + } else if uint16(len(d.Answers)) != d.ANCount { + return errDecodeQueryBadANCount + } else if uint16(len(d.Authorities)) != d.NSCount { + return errDecodeQueryBadNSCount + } else if uint16(len(d.Additionals)) != d.ARCount { + return errDecodeQueryBadARCount + } + return nil +} + +// CanDecode implements gopacket.DecodingLayer. +func (d *DNS) CanDecode() gopacket.LayerClass { + return LayerTypeDNS +} + +// NextLayerType implements gopacket.DecodingLayer. +func (d *DNS) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +// Payload returns nil. +func (d *DNS) Payload() []byte { + return nil +} + +func b2i(b bool) int { + if b { + return 1 + } + return 0 +} + +func recSize(rr *DNSResourceRecord) int { + switch rr.Type { + case DNSTypeA: + return 4 + case DNSTypeAAAA: + return 16 + case DNSTypeNS: + return len(rr.NS) + 2 + case DNSTypeCNAME: + return len(rr.CNAME) + 2 + case DNSTypePTR: + return len(rr.PTR) + 2 + case DNSTypeSOA: + return len(rr.SOA.MName) + 2 + len(rr.SOA.RName) + 2 + 20 + case DNSTypeMX: + return 2 + len(rr.MX.Name) + 2 + case DNSTypeTXT: + l := len(rr.TXTs) + for _, txt := range rr.TXTs { + l += len(txt) + } + return l + case DNSTypeSRV: + return 6 + len(rr.SRV.Name) + 2 + case DNSTypeURI: + return 4 + len(rr.URI.Target) + case DNSTypeOPT: + l := len(rr.OPT) * 4 + for _, opt := range rr.OPT { + l += len(opt.Data) + } + return l + } + + return 0 +} + +func computeSize(recs []DNSResourceRecord) int { + sz := 0 + for _, rr := range recs { + v := len(rr.Name) + + if v == 0 { + sz += v + 11 + } else { + sz += v + 12 + } + + sz += recSize(&rr) + } + return sz +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +func (d *DNS) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + dsz := 0 + for _, q := range d.Questions { + dsz += len(q.Name) + 6 + } + dsz += computeSize(d.Answers) + dsz += computeSize(d.Authorities) + dsz += computeSize(d.Additionals) + + bytes, err := b.PrependBytes(12 + dsz) + if err != nil { + return err + } + binary.BigEndian.PutUint16(bytes, d.ID) + bytes[2] = byte((b2i(d.QR) << 7) | (int(d.OpCode) << 3) | (b2i(d.AA) << 2) | (b2i(d.TC) << 1) | b2i(d.RD)) + bytes[3] = byte((b2i(d.RA) << 7) | (int(d.Z) << 4) | int(d.ResponseCode)) + + if opts.FixLengths { + d.QDCount = uint16(len(d.Questions)) + d.ANCount = uint16(len(d.Answers)) + d.NSCount = uint16(len(d.Authorities)) + d.ARCount = uint16(len(d.Additionals)) + } + binary.BigEndian.PutUint16(bytes[4:], d.QDCount) + binary.BigEndian.PutUint16(bytes[6:], d.ANCount) + binary.BigEndian.PutUint16(bytes[8:], d.NSCount) + binary.BigEndian.PutUint16(bytes[10:], d.ARCount) + + off := 12 + for _, qd := range d.Questions { + n := qd.encode(bytes, off) + off += n + } + + for i := range d.Answers { + // done this way so we can modify DNSResourceRecord to fix + // lengths if requested + qa := &d.Answers[i] + n, err := qa.encode(bytes, off, opts) + if err != nil { + return err + } + off += n + } + + for i := range d.Authorities { + qa := &d.Authorities[i] + n, err := qa.encode(bytes, off, opts) + if err != nil { + return err + } + off += n + } + for i := range d.Additionals { + qa := &d.Additionals[i] + n, err := qa.encode(bytes, off, opts) + if err != nil { + return err + } + off += n + } + + return nil +} + +const maxRecursionLevel = 255 + +func decodeName(data []byte, offset int, buffer *[]byte, level int) ([]byte, int, error) { + if level > maxRecursionLevel { + return nil, 0, errMaxRecursion + } else if offset >= len(data) { + return nil, 0, errDNSNameOffsetTooHigh + } else if offset < 0 { + return nil, 0, errDNSNameOffsetNegative + } + start := len(*buffer) + index := offset + if data[index] == 0x00 { + return nil, index + 1, nil + } +loop: + for data[index] != 0x00 { + switch data[index] & 0xc0 { + default: + /* RFC 1035 + A domain name represented as a sequence of labels, where + each label consists of a length octet followed by that + number of octets. The domain name terminates with the + zero length octet for the null label of the root. Note + that this field may be an odd number of octets; no + padding is used. + */ + index2 := index + int(data[index]) + 1 + if index2-offset > 255 { + return nil, 0, errDNSNameTooLong + } else if index2 < index+1 || index2 > len(data) { + return nil, 0, errDNSNameInvalidIndex + } + *buffer = append(*buffer, '.') + *buffer = append(*buffer, data[index+1:index2]...) + index = index2 + + case 0xc0: + /* RFC 1035 + The pointer takes the form of a two octet sequence. + + The first two bits are ones. This allows a pointer to + be distinguished from a label, since the label must + begin with two zero bits because labels are restricted + to 63 octets or less. (The 10 and 01 combinations are + reserved for future use.) The OFFSET field specifies + an offset from the start of the message (i.e., the + first octet of the ID field in the domain header). A + zero offset specifies the first byte of the ID field, + etc. + + The compression scheme allows a domain name in a message to be + represented as either: + - a sequence of labels ending in a zero octet + - a pointer + - a sequence of labels ending with a pointer + */ + if index+2 > len(data) { + return nil, 0, errDNSPointerOffsetTooHigh + } + offsetp := int(binary.BigEndian.Uint16(data[index:index+2]) & 0x3fff) + if offsetp > len(data) { + return nil, 0, errDNSPointerOffsetTooHigh + } + // This looks a little tricky, but actually isn't. Because of how + // decodeName is written, calling it appends the decoded name to the + // current buffer. We already have the start of the buffer, then, so + // once this call is done buffer[start:] will contain our full name. + _, _, err := decodeName(data, offsetp, buffer, level+1) + if err != nil { + return nil, 0, err + } + index++ // pointer is two bytes, so add an extra byte here. + break loop + /* EDNS, or other DNS option ? */ + case 0x40: // RFC 2673 + return nil, 0, fmt.Errorf("qname '0x40' - RFC 2673 unsupported yet (data=%x index=%d)", + data[index], index) + + case 0x80: + return nil, 0, fmt.Errorf("qname '0x80' unsupported yet (data=%x index=%d)", + data[index], index) + } + if index >= len(data) { + return nil, 0, errDNSIndexOutOfRange + } + } + if len(*buffer) <= start { + return (*buffer)[start:], index + 1, nil + } + return (*buffer)[start+1:], index + 1, nil +} + +// DNSQuestion wraps a single request (question) within a DNS query. +type DNSQuestion struct { + Name []byte + Type DNSType + Class DNSClass +} + +func (q *DNSQuestion) decode(data []byte, offset int, df gopacket.DecodeFeedback, buffer *[]byte) (int, error) { + name, endq, err := decodeName(data, offset, buffer, 1) + if err != nil { + return 0, err + } + + q.Name = name + q.Type = DNSType(binary.BigEndian.Uint16(data[endq : endq+2])) + q.Class = DNSClass(binary.BigEndian.Uint16(data[endq+2 : endq+4])) + + return endq + 4, nil +} + +func (q *DNSQuestion) encode(data []byte, offset int) int { + noff := encodeName(q.Name, data, offset) + nSz := noff - offset + binary.BigEndian.PutUint16(data[noff:], uint16(q.Type)) + binary.BigEndian.PutUint16(data[noff+2:], uint16(q.Class)) + return nSz + 4 +} + +// DNSResourceRecord +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | | +// / / +// / NAME / +// | | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | TYPE | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | CLASS | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | TTL | +// | | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | RDLENGTH | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--| +// / RDATA / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +// DNSResourceRecord wraps the data from a single DNS resource within a +// response. +type DNSResourceRecord struct { + // Header + Name []byte + Type DNSType + Class DNSClass + TTL uint32 + + // RDATA Raw Values + DataLength uint16 + Data []byte + + // RDATA Decoded Values + IP net.IP + NS, CNAME, PTR []byte + TXTs [][]byte + SOA DNSSOA + SRV DNSSRV + MX DNSMX + OPT []DNSOPT // See RFC 6891, section 6.1.2 + URI DNSURI + + // Undecoded TXT for backward compatibility + TXT []byte +} + +// decode decodes the resource record, returning the total length of the record. +func (rr *DNSResourceRecord) decode(data []byte, offset int, df gopacket.DecodeFeedback, buffer *[]byte) (int, error) { + name, endq, err := decodeName(data, offset, buffer, 1) + if err != nil { + return 0, err + } + + rr.Name = name + rr.Type = DNSType(binary.BigEndian.Uint16(data[endq : endq+2])) + rr.Class = DNSClass(binary.BigEndian.Uint16(data[endq+2 : endq+4])) + rr.TTL = binary.BigEndian.Uint32(data[endq+4 : endq+8]) + rr.DataLength = binary.BigEndian.Uint16(data[endq+8 : endq+10]) + end := endq + 10 + int(rr.DataLength) + if end > len(data) { + return 0, errDecodeRecordLength + } + rr.Data = data[endq+10 : end] + + if err = rr.decodeRData(data, endq+10, buffer); err != nil { + return 0, err + } + + return endq + 10 + int(rr.DataLength), nil +} + +func encodeName(name []byte, data []byte, offset int) int { + l := 0 + for i := range name { + if name[i] == '.' { + data[offset+i-l] = byte(l) + l = 0 + } else { + // skip one to write the length + data[offset+i+1] = name[i] + l++ + } + } + + if len(name) == 0 { + data[offset] = 0x00 // terminal + return offset + 1 + } + + // length for final portion + data[offset+len(name)-l] = byte(l) + data[offset+len(name)+1] = 0x00 // terminal + return offset + len(name) + 2 +} + +func (rr *DNSResourceRecord) encode(data []byte, offset int, opts gopacket.SerializeOptions) (int, error) { + + noff := encodeName(rr.Name, data, offset) + nSz := noff - offset + + binary.BigEndian.PutUint16(data[noff:], uint16(rr.Type)) + binary.BigEndian.PutUint16(data[noff+2:], uint16(rr.Class)) + binary.BigEndian.PutUint32(data[noff+4:], uint32(rr.TTL)) + + switch rr.Type { + case DNSTypeA: + copy(data[noff+10:], rr.IP.To4()) + case DNSTypeAAAA: + copy(data[noff+10:], rr.IP) + case DNSTypeNS: + encodeName(rr.NS, data, noff+10) + case DNSTypeCNAME: + encodeName(rr.CNAME, data, noff+10) + case DNSTypePTR: + encodeName(rr.PTR, data, noff+10) + case DNSTypeSOA: + noff2 := encodeName(rr.SOA.MName, data, noff+10) + noff2 = encodeName(rr.SOA.RName, data, noff2) + binary.BigEndian.PutUint32(data[noff2:], rr.SOA.Serial) + binary.BigEndian.PutUint32(data[noff2+4:], rr.SOA.Refresh) + binary.BigEndian.PutUint32(data[noff2+8:], rr.SOA.Retry) + binary.BigEndian.PutUint32(data[noff2+12:], rr.SOA.Expire) + binary.BigEndian.PutUint32(data[noff2+16:], rr.SOA.Minimum) + case DNSTypeMX: + binary.BigEndian.PutUint16(data[noff+10:], rr.MX.Preference) + encodeName(rr.MX.Name, data, noff+12) + case DNSTypeTXT: + noff2 := noff + 10 + for _, txt := range rr.TXTs { + data[noff2] = byte(len(txt)) + copy(data[noff2+1:], txt) + noff2 += 1 + len(txt) + } + case DNSTypeSRV: + binary.BigEndian.PutUint16(data[noff+10:], rr.SRV.Priority) + binary.BigEndian.PutUint16(data[noff+12:], rr.SRV.Weight) + binary.BigEndian.PutUint16(data[noff+14:], rr.SRV.Port) + encodeName(rr.SRV.Name, data, noff+16) + case DNSTypeURI: + binary.BigEndian.PutUint16(data[noff+10:], rr.URI.Priority) + binary.BigEndian.PutUint16(data[noff+12:], rr.URI.Weight) + copy(data[noff+14:], rr.URI.Target) + case DNSTypeOPT: + noff2 := noff + 10 + for _, opt := range rr.OPT { + binary.BigEndian.PutUint16(data[noff2:], uint16(opt.Code)) + binary.BigEndian.PutUint16(data[noff2+2:], uint16(len(opt.Data))) + copy(data[noff2+4:], opt.Data) + noff2 += 4 + len(opt.Data) + } + default: + return 0, fmt.Errorf("serializing resource record of type %v not supported", rr.Type) + } + + // DataLength + dSz := recSize(rr) + binary.BigEndian.PutUint16(data[noff+8:], uint16(dSz)) + + if opts.FixLengths { + rr.DataLength = uint16(dSz) + } + + return nSz + 10 + dSz, nil +} + +func (rr *DNSResourceRecord) String() string { + + if rr.Type == DNSTypeOPT { + opts := make([]string, len(rr.OPT)) + for i, opt := range rr.OPT { + opts[i] = opt.String() + } + return "OPT " + strings.Join(opts, ",") + } + if rr.Type == DNSTypeURI { + return fmt.Sprintf("URI %d %d %s", rr.URI.Priority, rr.URI.Weight, string(rr.URI.Target)) + } + if rr.Class == DNSClassIN { + switch rr.Type { + case DNSTypeA, DNSTypeAAAA: + return rr.IP.String() + case DNSTypeNS: + return "NS " + string(rr.NS) + case DNSTypeCNAME: + return "CNAME " + string(rr.CNAME) + case DNSTypePTR: + return "PTR " + string(rr.PTR) + case DNSTypeTXT: + return "TXT " + string(rr.TXT) + } + } + + return fmt.Sprintf("<%v, %v>", rr.Class, rr.Type) +} + +func decodeCharacterStrings(data []byte) ([][]byte, error) { + strings := make([][]byte, 0, 1) + end := len(data) + for index, index2 := 0, 0; index != end; index = index2 { + index2 = index + 1 + int(data[index]) // index increases by 1..256 and does not overflow + if index2 > end { + return nil, errCharStringMissData + } + strings = append(strings, data[index+1:index2]) + } + return strings, nil +} + +func decodeOPTs(data []byte, offset int) ([]DNSOPT, error) { + allOPT := []DNSOPT{} + end := len(data) + + if offset == end { + return allOPT, nil // There is no data to read + } + + if offset+4 > end { + return allOPT, fmt.Errorf("DNSOPT record is of length %d, it should be at least length 4", end-offset) + } + + for i := offset; i < end; { + opt := DNSOPT{} + opt.Code = DNSOptionCode(binary.BigEndian.Uint16(data[i : i+2])) + l := binary.BigEndian.Uint16(data[i+2 : i+4]) + if i+4+int(l) > end { + return allOPT, fmt.Errorf("Malformed DNSOPT record. The length (%d) field implies a packet larger than the one received", l) + } + opt.Data = data[i+4 : i+4+int(l)] + allOPT = append(allOPT, opt) + i += int(l) + 4 + } + return allOPT, nil +} + +func (rr *DNSResourceRecord) decodeRData(data []byte, offset int, buffer *[]byte) error { + switch rr.Type { + case DNSTypeA: + rr.IP = rr.Data + case DNSTypeAAAA: + rr.IP = rr.Data + case DNSTypeTXT, DNSTypeHINFO: + rr.TXT = rr.Data + txts, err := decodeCharacterStrings(rr.Data) + if err != nil { + return err + } + rr.TXTs = txts + case DNSTypeNS: + name, _, err := decodeName(data, offset, buffer, 1) + if err != nil { + return err + } + rr.NS = name + case DNSTypeCNAME: + name, _, err := decodeName(data, offset, buffer, 1) + if err != nil { + return err + } + rr.CNAME = name + case DNSTypePTR: + name, _, err := decodeName(data, offset, buffer, 1) + if err != nil { + return err + } + rr.PTR = name + case DNSTypeSOA: + name, endq, err := decodeName(data, offset, buffer, 1) + if err != nil { + return err + } + rr.SOA.MName = name + name, endq, err = decodeName(data, endq, buffer, 1) + if err != nil { + return err + } + rr.SOA.RName = name + rr.SOA.Serial = binary.BigEndian.Uint32(data[endq : endq+4]) + rr.SOA.Refresh = binary.BigEndian.Uint32(data[endq+4 : endq+8]) + rr.SOA.Retry = binary.BigEndian.Uint32(data[endq+8 : endq+12]) + rr.SOA.Expire = binary.BigEndian.Uint32(data[endq+12 : endq+16]) + rr.SOA.Minimum = binary.BigEndian.Uint32(data[endq+16 : endq+20]) + case DNSTypeMX: + rr.MX.Preference = binary.BigEndian.Uint16(data[offset : offset+2]) + name, _, err := decodeName(data, offset+2, buffer, 1) + if err != nil { + return err + } + rr.MX.Name = name + case DNSTypeURI: + rr.URI.Priority = binary.BigEndian.Uint16(data[offset : offset+2]) + rr.URI.Weight = binary.BigEndian.Uint16(data[offset+2 : offset+4]) + rr.URI.Target = rr.Data[4:] + case DNSTypeSRV: + rr.SRV.Priority = binary.BigEndian.Uint16(data[offset : offset+2]) + rr.SRV.Weight = binary.BigEndian.Uint16(data[offset+2 : offset+4]) + rr.SRV.Port = binary.BigEndian.Uint16(data[offset+4 : offset+6]) + name, _, err := decodeName(data, offset+6, buffer, 1) + if err != nil { + return err + } + rr.SRV.Name = name + case DNSTypeOPT: + allOPT, err := decodeOPTs(data, offset) + if err != nil { + return err + } + rr.OPT = allOPT + } + return nil +} + +// DNSSOA is a Start of Authority record. Each domain requires a SOA record at +// the cutover where a domain is delegated from its parent. +type DNSSOA struct { + MName, RName []byte + Serial, Refresh, Retry, Expire, Minimum uint32 +} + +// DNSSRV is a Service record, defining a location (hostname/port) of a +// server/service. +type DNSSRV struct { + Priority, Weight, Port uint16 + Name []byte +} + +// DNSMX is a mail exchange record, defining a mail server for a recipient's +// domain. +type DNSMX struct { + Preference uint16 + Name []byte +} + +// DNSURI is a URI record, defining a target (URI) of a server/service +type DNSURI struct { + Priority, Weight uint16 + Target []byte +} + +// DNSOptionCode represents the code of a DNS Option, see RFC6891, section 6.1.2 +type DNSOptionCode uint16 + +func (doc DNSOptionCode) String() string { + switch doc { + default: + return "Unknown" + case DNSOptionCodeNSID: + return "NSID" + case DNSOptionCodeDAU: + return "DAU" + case DNSOptionCodeDHU: + return "DHU" + case DNSOptionCodeN3U: + return "N3U" + case DNSOptionCodeEDNSClientSubnet: + return "EDNSClientSubnet" + case DNSOptionCodeEDNSExpire: + return "EDNSExpire" + case DNSOptionCodeCookie: + return "Cookie" + case DNSOptionCodeEDNSKeepAlive: + return "EDNSKeepAlive" + case DNSOptionCodePadding: + return "CodePadding" + case DNSOptionCodeChain: + return "CodeChain" + case DNSOptionCodeEDNSKeyTag: + return "CodeEDNSKeyTag" + case DNSOptionCodeEDNSClientTag: + return "EDNSClientTag" + case DNSOptionCodeEDNSServerTag: + return "EDNSServerTag" + case DNSOptionCodeDeviceID: + return "DeviceID" + } +} + +// DNSOptionCode known values. See IANA +const ( + DNSOptionCodeNSID DNSOptionCode = 3 + DNSOptionCodeDAU DNSOptionCode = 5 + DNSOptionCodeDHU DNSOptionCode = 6 + DNSOptionCodeN3U DNSOptionCode = 7 + DNSOptionCodeEDNSClientSubnet DNSOptionCode = 8 + DNSOptionCodeEDNSExpire DNSOptionCode = 9 + DNSOptionCodeCookie DNSOptionCode = 10 + DNSOptionCodeEDNSKeepAlive DNSOptionCode = 11 + DNSOptionCodePadding DNSOptionCode = 12 + DNSOptionCodeChain DNSOptionCode = 13 + DNSOptionCodeEDNSKeyTag DNSOptionCode = 14 + DNSOptionCodeEDNSClientTag DNSOptionCode = 16 + DNSOptionCodeEDNSServerTag DNSOptionCode = 17 + DNSOptionCodeDeviceID DNSOptionCode = 26946 +) + +// DNSOPT is a DNS Option, see RFC6891, section 6.1.2 +type DNSOPT struct { + Code DNSOptionCode + Data []byte +} + +func (opt DNSOPT) String() string { + return fmt.Sprintf("%s=%x", opt.Code, opt.Data) +} + +var ( + errMaxRecursion = errors.New("max DNS recursion level hit") + + errDNSNameOffsetTooHigh = errors.New("dns name offset too high") + errDNSNameOffsetNegative = errors.New("dns name offset is negative") + errDNSPacketTooShort = errors.New("DNS packet too short") + errDNSNameTooLong = errors.New("dns name is too long") + errDNSNameInvalidIndex = errors.New("dns name uncomputable: invalid index") + errDNSPointerOffsetTooHigh = errors.New("dns offset pointer too high") + errDNSIndexOutOfRange = errors.New("dns index walked out of range") + errDNSNameHasNoData = errors.New("no dns data found for name") + + errCharStringMissData = errors.New("Insufficient data for a ") + + errDecodeRecordLength = errors.New("resource record length exceeds data") + + errDecodeQueryBadQDCount = errors.New("Invalid query decoding, not the right number of questions") + errDecodeQueryBadANCount = errors.New("Invalid query decoding, not the right number of answers") + errDecodeQueryBadNSCount = errors.New("Invalid query decoding, not the right number of authorities") + errDecodeQueryBadARCount = errors.New("Invalid query decoding, not the right number of additionals info") +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/doc.go new file mode 100644 index 00000000..3c882c3f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/doc.go @@ -0,0 +1,61 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +/* +Package layers provides decoding layers for many common protocols. + +The layers package contains decode implementations for a number of different +types of packet layers. Users of gopacket will almost always want to also use +layers to actually decode packet data into useful pieces. To see the set of +protocols that gopacket/layers is currently able to decode, +look at the set of LayerTypes defined in the Variables sections. The +layers package also defines endpoints for many of the common packet layers +that have source/destination addresses associated with them, for example IPv4/6 +(IPs) and TCP/UDP (ports). +Finally, layers contains a number of useful enumerations (IPProtocol, +EthernetType, LinkType, PPPType, etc...). Many of these implement the +gopacket.Decoder interface, so they can be passed into gopacket as decoders. + +Most common protocol layers are named using acronyms or other industry-common +names (IPv4, TCP, PPP). Some of the less common ones have their names expanded +(CiscoDiscoveryProtocol). +For certain protocols, sub-parts of the protocol are split out into their own +layers (SCTP, for example). This is done mostly in cases where portions of the +protocol may fulfill the capabilities of interesting layers (SCTPData implements +ApplicationLayer, while base SCTP implements TransportLayer), or possibly +because splitting a protocol into a few layers makes decoding easier. + +This package is meant to be used with its parent, +http://github.com/google/gopacket. + +Port Types + +Instead of using raw uint16 or uint8 values for ports, we use a different port +type for every protocol, for example TCPPort and UDPPort. This allows us to +override string behavior for each port, which we do by setting up port name +maps (TCPPortNames, UDPPortNames, etc...). Well-known ports are annotated with +their protocol names, and their String function displays these names: + + p := TCPPort(80) + fmt.Printf("Number: %d String: %v", p, p) + // Prints: "Number: 80 String: 80(http)" + +Modifying Decode Behavior + +layers links together decoding through its enumerations. For example, after +decoding layer type Ethernet, it uses Ethernet.EthernetType as its next decoder. +All enumerations that act as decoders, like EthernetType, can be modified by +users depending on their preferences. For example, if you have a spiffy new +IPv4 decoder that works way better than the one built into layers, you can do +this: + + var mySpiffyIPv4Decoder gopacket.Decoder = ... + layers.EthernetTypeMetadata[EthernetTypeIPv4].DecodeWith = mySpiffyIPv4Decoder + +This will make all future ethernet packets use your new decoder to decode IPv4 +packets, instead of the built-in decoder used by gopacket. +*/ +package layers diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/dot11.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/dot11.go new file mode 100644 index 00000000..4f6c6a83 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/dot11.go @@ -0,0 +1,2106 @@ +// Copyright 2014 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +// See http://standards.ieee.org/findstds/standard/802.11-2012.html for info on +// all of the layers in this file. + +package layers + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash/crc32" + "net" + + "github.com/google/gopacket" +) + +// Dot11Flags contains the set of 8 flags in the IEEE 802.11 frame control +// header, all in one place. +type Dot11Flags uint8 + +const ( + Dot11FlagsToDS Dot11Flags = 1 << iota + Dot11FlagsFromDS + Dot11FlagsMF + Dot11FlagsRetry + Dot11FlagsPowerManagement + Dot11FlagsMD + Dot11FlagsWEP + Dot11FlagsOrder +) + +func (d Dot11Flags) ToDS() bool { + return d&Dot11FlagsToDS != 0 +} +func (d Dot11Flags) FromDS() bool { + return d&Dot11FlagsFromDS != 0 +} +func (d Dot11Flags) MF() bool { + return d&Dot11FlagsMF != 0 +} +func (d Dot11Flags) Retry() bool { + return d&Dot11FlagsRetry != 0 +} +func (d Dot11Flags) PowerManagement() bool { + return d&Dot11FlagsPowerManagement != 0 +} +func (d Dot11Flags) MD() bool { + return d&Dot11FlagsMD != 0 +} +func (d Dot11Flags) WEP() bool { + return d&Dot11FlagsWEP != 0 +} +func (d Dot11Flags) Order() bool { + return d&Dot11FlagsOrder != 0 +} + +// String provides a human readable string for Dot11Flags. +// This string is possibly subject to change over time; if you're storing this +// persistently, you should probably store the Dot11Flags value, not its string. +func (a Dot11Flags) String() string { + var out bytes.Buffer + if a.ToDS() { + out.WriteString("TO-DS,") + } + if a.FromDS() { + out.WriteString("FROM-DS,") + } + if a.MF() { + out.WriteString("MF,") + } + if a.Retry() { + out.WriteString("Retry,") + } + if a.PowerManagement() { + out.WriteString("PowerManagement,") + } + if a.MD() { + out.WriteString("MD,") + } + if a.WEP() { + out.WriteString("WEP,") + } + if a.Order() { + out.WriteString("Order,") + } + + if length := out.Len(); length > 0 { + return string(out.Bytes()[:length-1]) // strip final comma + } + return "" +} + +type Dot11Reason uint16 + +// TODO: Verify these reasons, and append more reasons if necessary. + +const ( + Dot11ReasonReserved Dot11Reason = 1 + Dot11ReasonUnspecified Dot11Reason = 2 + Dot11ReasonAuthExpired Dot11Reason = 3 + Dot11ReasonDeauthStLeaving Dot11Reason = 4 + Dot11ReasonInactivity Dot11Reason = 5 + Dot11ReasonApFull Dot11Reason = 6 + Dot11ReasonClass2FromNonAuth Dot11Reason = 7 + Dot11ReasonClass3FromNonAss Dot11Reason = 8 + Dot11ReasonDisasStLeaving Dot11Reason = 9 + Dot11ReasonStNotAuth Dot11Reason = 10 +) + +// String provides a human readable string for Dot11Reason. +// This string is possibly subject to change over time; if you're storing this +// persistently, you should probably store the Dot11Reason value, not its string. +func (a Dot11Reason) String() string { + switch a { + case Dot11ReasonReserved: + return "Reserved" + case Dot11ReasonUnspecified: + return "Unspecified" + case Dot11ReasonAuthExpired: + return "Auth. expired" + case Dot11ReasonDeauthStLeaving: + return "Deauth. st. leaving" + case Dot11ReasonInactivity: + return "Inactivity" + case Dot11ReasonApFull: + return "Ap. full" + case Dot11ReasonClass2FromNonAuth: + return "Class2 from non auth." + case Dot11ReasonClass3FromNonAss: + return "Class3 from non ass." + case Dot11ReasonDisasStLeaving: + return "Disass st. leaving" + case Dot11ReasonStNotAuth: + return "St. not auth." + default: + return "Unknown reason" + } +} + +type Dot11Status uint16 + +const ( + Dot11StatusSuccess Dot11Status = 0 + Dot11StatusFailure Dot11Status = 1 // Unspecified failure + Dot11StatusCannotSupportAllCapabilities Dot11Status = 10 // Cannot support all requested capabilities in the Capability Information field + Dot11StatusInabilityExistsAssociation Dot11Status = 11 // Reassociation denied due to inability to confirm that association exists + Dot11StatusAssociationDenied Dot11Status = 12 // Association denied due to reason outside the scope of this standard + Dot11StatusAlgorithmUnsupported Dot11Status = 13 // Responding station does not support the specified authentication algorithm + Dot11StatusOufOfExpectedSequence Dot11Status = 14 // Received an Authentication frame with authentication transaction sequence number out of expected sequence + Dot11StatusChallengeFailure Dot11Status = 15 // Authentication rejected because of challenge failure + Dot11StatusTimeout Dot11Status = 16 // Authentication rejected due to timeout waiting for next frame in sequence + Dot11StatusAPUnableToHandle Dot11Status = 17 // Association denied because AP is unable to handle additional associated stations + Dot11StatusRateUnsupported Dot11Status = 18 // Association denied due to requesting station not supporting all of the data rates in the BSSBasicRateSet parameter +) + +// String provides a human readable string for Dot11Status. +// This string is possibly subject to change over time; if you're storing this +// persistently, you should probably store the Dot11Status value, not its string. +func (a Dot11Status) String() string { + switch a { + case Dot11StatusSuccess: + return "success" + case Dot11StatusFailure: + return "failure" + case Dot11StatusCannotSupportAllCapabilities: + return "cannot-support-all-capabilities" + case Dot11StatusInabilityExistsAssociation: + return "inability-exists-association" + case Dot11StatusAssociationDenied: + return "association-denied" + case Dot11StatusAlgorithmUnsupported: + return "algorithm-unsupported" + case Dot11StatusOufOfExpectedSequence: + return "out-of-expected-sequence" + case Dot11StatusChallengeFailure: + return "challenge-failure" + case Dot11StatusTimeout: + return "timeout" + case Dot11StatusAPUnableToHandle: + return "ap-unable-to-handle" + case Dot11StatusRateUnsupported: + return "rate-unsupported" + default: + return "unknown status" + } +} + +type Dot11AckPolicy uint8 + +const ( + Dot11AckPolicyNormal Dot11AckPolicy = 0 + Dot11AckPolicyNone Dot11AckPolicy = 1 + Dot11AckPolicyNoExplicit Dot11AckPolicy = 2 + Dot11AckPolicyBlock Dot11AckPolicy = 3 +) + +// String provides a human readable string for Dot11AckPolicy. +// This string is possibly subject to change over time; if you're storing this +// persistently, you should probably store the Dot11AckPolicy value, not its string. +func (a Dot11AckPolicy) String() string { + switch a { + case Dot11AckPolicyNormal: + return "normal-ack" + case Dot11AckPolicyNone: + return "no-ack" + case Dot11AckPolicyNoExplicit: + return "no-explicit-ack" + case Dot11AckPolicyBlock: + return "block-ack" + default: + return "unknown-ack-policy" + } +} + +type Dot11Algorithm uint16 + +const ( + Dot11AlgorithmOpen Dot11Algorithm = 0 + Dot11AlgorithmSharedKey Dot11Algorithm = 1 +) + +// String provides a human readable string for Dot11Algorithm. +// This string is possibly subject to change over time; if you're storing this +// persistently, you should probably store the Dot11Algorithm value, not its string. +func (a Dot11Algorithm) String() string { + switch a { + case Dot11AlgorithmOpen: + return "open" + case Dot11AlgorithmSharedKey: + return "shared-key" + default: + return "unknown-algorithm" + } +} + +type Dot11InformationElementID uint8 + +const ( + Dot11InformationElementIDSSID Dot11InformationElementID = 0 + Dot11InformationElementIDRates Dot11InformationElementID = 1 + Dot11InformationElementIDFHSet Dot11InformationElementID = 2 + Dot11InformationElementIDDSSet Dot11InformationElementID = 3 + Dot11InformationElementIDCFSet Dot11InformationElementID = 4 + Dot11InformationElementIDTIM Dot11InformationElementID = 5 + Dot11InformationElementIDIBSSSet Dot11InformationElementID = 6 + Dot11InformationElementIDCountryInfo Dot11InformationElementID = 7 + Dot11InformationElementIDHoppingPatternParam Dot11InformationElementID = 8 + Dot11InformationElementIDHoppingPatternTable Dot11InformationElementID = 9 + Dot11InformationElementIDRequest Dot11InformationElementID = 10 + Dot11InformationElementIDQBSSLoadElem Dot11InformationElementID = 11 + Dot11InformationElementIDEDCAParamSet Dot11InformationElementID = 12 + Dot11InformationElementIDTrafficSpec Dot11InformationElementID = 13 + Dot11InformationElementIDTrafficClass Dot11InformationElementID = 14 + Dot11InformationElementIDSchedule Dot11InformationElementID = 15 + Dot11InformationElementIDChallenge Dot11InformationElementID = 16 + Dot11InformationElementIDPowerConst Dot11InformationElementID = 32 + Dot11InformationElementIDPowerCapability Dot11InformationElementID = 33 + Dot11InformationElementIDTPCRequest Dot11InformationElementID = 34 + Dot11InformationElementIDTPCReport Dot11InformationElementID = 35 + Dot11InformationElementIDSupportedChannels Dot11InformationElementID = 36 + Dot11InformationElementIDSwitchChannelAnnounce Dot11InformationElementID = 37 + Dot11InformationElementIDMeasureRequest Dot11InformationElementID = 38 + Dot11InformationElementIDMeasureReport Dot11InformationElementID = 39 + Dot11InformationElementIDQuiet Dot11InformationElementID = 40 + Dot11InformationElementIDIBSSDFS Dot11InformationElementID = 41 + Dot11InformationElementIDERPInfo Dot11InformationElementID = 42 + Dot11InformationElementIDTSDelay Dot11InformationElementID = 43 + Dot11InformationElementIDTCLASProcessing Dot11InformationElementID = 44 + Dot11InformationElementIDHTCapabilities Dot11InformationElementID = 45 + Dot11InformationElementIDQOSCapability Dot11InformationElementID = 46 + Dot11InformationElementIDERPInfo2 Dot11InformationElementID = 47 + Dot11InformationElementIDRSNInfo Dot11InformationElementID = 48 + Dot11InformationElementIDESRates Dot11InformationElementID = 50 + Dot11InformationElementIDAPChannelReport Dot11InformationElementID = 51 + Dot11InformationElementIDNeighborReport Dot11InformationElementID = 52 + Dot11InformationElementIDRCPI Dot11InformationElementID = 53 + Dot11InformationElementIDMobilityDomain Dot11InformationElementID = 54 + Dot11InformationElementIDFastBSSTrans Dot11InformationElementID = 55 + Dot11InformationElementIDTimeoutInt Dot11InformationElementID = 56 + Dot11InformationElementIDRICData Dot11InformationElementID = 57 + Dot11InformationElementIDDSERegisteredLoc Dot11InformationElementID = 58 + Dot11InformationElementIDSuppOperatingClass Dot11InformationElementID = 59 + Dot11InformationElementIDExtChanSwitchAnnounce Dot11InformationElementID = 60 + Dot11InformationElementIDHTInfo Dot11InformationElementID = 61 + Dot11InformationElementIDSecChanOffset Dot11InformationElementID = 62 + Dot11InformationElementIDBSSAverageAccessDelay Dot11InformationElementID = 63 + Dot11InformationElementIDAntenna Dot11InformationElementID = 64 + Dot11InformationElementIDRSNI Dot11InformationElementID = 65 + Dot11InformationElementIDMeasurePilotTrans Dot11InformationElementID = 66 + Dot11InformationElementIDBSSAvailAdmCapacity Dot11InformationElementID = 67 + Dot11InformationElementIDBSSACAccDelayWAPIParam Dot11InformationElementID = 68 + Dot11InformationElementIDTimeAdvertisement Dot11InformationElementID = 69 + Dot11InformationElementIDRMEnabledCapabilities Dot11InformationElementID = 70 + Dot11InformationElementIDMultipleBSSID Dot11InformationElementID = 71 + Dot11InformationElementID2040BSSCoExist Dot11InformationElementID = 72 + Dot11InformationElementID2040BSSIntChanReport Dot11InformationElementID = 73 + Dot11InformationElementIDOverlapBSSScanParam Dot11InformationElementID = 74 + Dot11InformationElementIDRICDescriptor Dot11InformationElementID = 75 + Dot11InformationElementIDManagementMIC Dot11InformationElementID = 76 + Dot11InformationElementIDEventRequest Dot11InformationElementID = 78 + Dot11InformationElementIDEventReport Dot11InformationElementID = 79 + Dot11InformationElementIDDiagnosticRequest Dot11InformationElementID = 80 + Dot11InformationElementIDDiagnosticReport Dot11InformationElementID = 81 + Dot11InformationElementIDLocationParam Dot11InformationElementID = 82 + Dot11InformationElementIDNonTransBSSIDCapability Dot11InformationElementID = 83 + Dot11InformationElementIDSSIDList Dot11InformationElementID = 84 + Dot11InformationElementIDMultipleBSSIDIndex Dot11InformationElementID = 85 + Dot11InformationElementIDFMSDescriptor Dot11InformationElementID = 86 + Dot11InformationElementIDFMSRequest Dot11InformationElementID = 87 + Dot11InformationElementIDFMSResponse Dot11InformationElementID = 88 + Dot11InformationElementIDQOSTrafficCapability Dot11InformationElementID = 89 + Dot11InformationElementIDBSSMaxIdlePeriod Dot11InformationElementID = 90 + Dot11InformationElementIDTFSRequest Dot11InformationElementID = 91 + Dot11InformationElementIDTFSResponse Dot11InformationElementID = 92 + Dot11InformationElementIDWNMSleepMode Dot11InformationElementID = 93 + Dot11InformationElementIDTIMBroadcastRequest Dot11InformationElementID = 94 + Dot11InformationElementIDTIMBroadcastResponse Dot11InformationElementID = 95 + Dot11InformationElementIDCollInterferenceReport Dot11InformationElementID = 96 + Dot11InformationElementIDChannelUsage Dot11InformationElementID = 97 + Dot11InformationElementIDTimeZone Dot11InformationElementID = 98 + Dot11InformationElementIDDMSRequest Dot11InformationElementID = 99 + Dot11InformationElementIDDMSResponse Dot11InformationElementID = 100 + Dot11InformationElementIDLinkIdentifier Dot11InformationElementID = 101 + Dot11InformationElementIDWakeupSchedule Dot11InformationElementID = 102 + Dot11InformationElementIDChannelSwitchTiming Dot11InformationElementID = 104 + Dot11InformationElementIDPTIControl Dot11InformationElementID = 105 + Dot11InformationElementIDPUBufferStatus Dot11InformationElementID = 106 + Dot11InformationElementIDInterworking Dot11InformationElementID = 107 + Dot11InformationElementIDAdvertisementProtocol Dot11InformationElementID = 108 + Dot11InformationElementIDExpBWRequest Dot11InformationElementID = 109 + Dot11InformationElementIDQOSMapSet Dot11InformationElementID = 110 + Dot11InformationElementIDRoamingConsortium Dot11InformationElementID = 111 + Dot11InformationElementIDEmergencyAlertIdentifier Dot11InformationElementID = 112 + Dot11InformationElementIDMeshConfiguration Dot11InformationElementID = 113 + Dot11InformationElementIDMeshID Dot11InformationElementID = 114 + Dot11InformationElementIDMeshLinkMetricReport Dot11InformationElementID = 115 + Dot11InformationElementIDCongestionNotification Dot11InformationElementID = 116 + Dot11InformationElementIDMeshPeeringManagement Dot11InformationElementID = 117 + Dot11InformationElementIDMeshChannelSwitchParam Dot11InformationElementID = 118 + Dot11InformationElementIDMeshAwakeWindows Dot11InformationElementID = 119 + Dot11InformationElementIDBeaconTiming Dot11InformationElementID = 120 + Dot11InformationElementIDMCCAOPSetupRequest Dot11InformationElementID = 121 + Dot11InformationElementIDMCCAOPSetupReply Dot11InformationElementID = 122 + Dot11InformationElementIDMCCAOPAdvertisement Dot11InformationElementID = 123 + Dot11InformationElementIDMCCAOPTeardown Dot11InformationElementID = 124 + Dot11InformationElementIDGateAnnouncement Dot11InformationElementID = 125 + Dot11InformationElementIDRootAnnouncement Dot11InformationElementID = 126 + Dot11InformationElementIDExtCapability Dot11InformationElementID = 127 + Dot11InformationElementIDAgereProprietary Dot11InformationElementID = 128 + Dot11InformationElementIDPathRequest Dot11InformationElementID = 130 + Dot11InformationElementIDPathReply Dot11InformationElementID = 131 + Dot11InformationElementIDPathError Dot11InformationElementID = 132 + Dot11InformationElementIDCiscoCCX1CKIPDeviceName Dot11InformationElementID = 133 + Dot11InformationElementIDCiscoCCX2 Dot11InformationElementID = 136 + Dot11InformationElementIDProxyUpdate Dot11InformationElementID = 137 + Dot11InformationElementIDProxyUpdateConfirmation Dot11InformationElementID = 138 + Dot11InformationElementIDAuthMeshPerringExch Dot11InformationElementID = 139 + Dot11InformationElementIDMIC Dot11InformationElementID = 140 + Dot11InformationElementIDDestinationURI Dot11InformationElementID = 141 + Dot11InformationElementIDUAPSDCoexistence Dot11InformationElementID = 142 + Dot11InformationElementIDWakeupSchedule80211ad Dot11InformationElementID = 143 + Dot11InformationElementIDExtendedSchedule Dot11InformationElementID = 144 + Dot11InformationElementIDSTAAvailability Dot11InformationElementID = 145 + Dot11InformationElementIDDMGTSPEC Dot11InformationElementID = 146 + Dot11InformationElementIDNextDMGATI Dot11InformationElementID = 147 + Dot11InformationElementIDDMSCapabilities Dot11InformationElementID = 148 + Dot11InformationElementIDCiscoUnknown95 Dot11InformationElementID = 149 + Dot11InformationElementIDVendor2 Dot11InformationElementID = 150 + Dot11InformationElementIDDMGOperating Dot11InformationElementID = 151 + Dot11InformationElementIDDMGBSSParamChange Dot11InformationElementID = 152 + Dot11InformationElementIDDMGBeamRefinement Dot11InformationElementID = 153 + Dot11InformationElementIDChannelMeasFeedback Dot11InformationElementID = 154 + Dot11InformationElementIDAwakeWindow Dot11InformationElementID = 157 + Dot11InformationElementIDMultiBand Dot11InformationElementID = 158 + Dot11InformationElementIDADDBAExtension Dot11InformationElementID = 159 + Dot11InformationElementIDNEXTPCPList Dot11InformationElementID = 160 + Dot11InformationElementIDPCPHandover Dot11InformationElementID = 161 + Dot11InformationElementIDDMGLinkMargin Dot11InformationElementID = 162 + Dot11InformationElementIDSwitchingStream Dot11InformationElementID = 163 + Dot11InformationElementIDSessionTransmission Dot11InformationElementID = 164 + Dot11InformationElementIDDynamicTonePairReport Dot11InformationElementID = 165 + Dot11InformationElementIDClusterReport Dot11InformationElementID = 166 + Dot11InformationElementIDRelayCapabilities Dot11InformationElementID = 167 + Dot11InformationElementIDRelayTransferParameter Dot11InformationElementID = 168 + Dot11InformationElementIDBeamlinkMaintenance Dot11InformationElementID = 169 + Dot11InformationElementIDMultipleMacSublayers Dot11InformationElementID = 170 + Dot11InformationElementIDUPID Dot11InformationElementID = 171 + Dot11InformationElementIDDMGLinkAdaptionAck Dot11InformationElementID = 172 + Dot11InformationElementIDSymbolProprietary Dot11InformationElementID = 173 + Dot11InformationElementIDMCCAOPAdvertOverview Dot11InformationElementID = 174 + Dot11InformationElementIDQuietPeriodRequest Dot11InformationElementID = 175 + Dot11InformationElementIDQuietPeriodResponse Dot11InformationElementID = 177 + Dot11InformationElementIDECPACPolicy Dot11InformationElementID = 182 + Dot11InformationElementIDClusterTimeOffset Dot11InformationElementID = 183 + Dot11InformationElementIDAntennaSectorID Dot11InformationElementID = 190 + Dot11InformationElementIDVHTCapabilities Dot11InformationElementID = 191 + Dot11InformationElementIDVHTOperation Dot11InformationElementID = 192 + Dot11InformationElementIDExtendedBSSLoad Dot11InformationElementID = 193 + Dot11InformationElementIDWideBWChannelSwitch Dot11InformationElementID = 194 + Dot11InformationElementIDVHTTxPowerEnvelope Dot11InformationElementID = 195 + Dot11InformationElementIDChannelSwitchWrapper Dot11InformationElementID = 196 + Dot11InformationElementIDOperatingModeNotification Dot11InformationElementID = 199 + Dot11InformationElementIDUPSIM Dot11InformationElementID = 200 + Dot11InformationElementIDReducedNeighborReport Dot11InformationElementID = 201 + Dot11InformationElementIDTVHTOperation Dot11InformationElementID = 202 + Dot11InformationElementIDDeviceLocation Dot11InformationElementID = 204 + Dot11InformationElementIDWhiteSpaceMap Dot11InformationElementID = 205 + Dot11InformationElementIDFineTuningMeasureParams Dot11InformationElementID = 206 + Dot11InformationElementIDVendor Dot11InformationElementID = 221 +) + +// String provides a human readable string for Dot11InformationElementID. +// This string is possibly subject to change over time; if you're storing this +// persistently, you should probably store the Dot11InformationElementID value, +// not its string. +func (a Dot11InformationElementID) String() string { + switch a { + case Dot11InformationElementIDSSID: + return "SSID parameter set" + case Dot11InformationElementIDRates: + return "Supported Rates" + case Dot11InformationElementIDFHSet: + return "FH Parameter set" + case Dot11InformationElementIDDSSet: + return "DS Parameter set" + case Dot11InformationElementIDCFSet: + return "CF Parameter set" + case Dot11InformationElementIDTIM: + return "Traffic Indication Map (TIM)" + case Dot11InformationElementIDIBSSSet: + return "IBSS Parameter set" + case Dot11InformationElementIDCountryInfo: + return "Country Information" + case Dot11InformationElementIDHoppingPatternParam: + return "Hopping Pattern Parameters" + case Dot11InformationElementIDHoppingPatternTable: + return "Hopping Pattern Table" + case Dot11InformationElementIDRequest: + return "Request" + case Dot11InformationElementIDQBSSLoadElem: + return "QBSS Load Element" + case Dot11InformationElementIDEDCAParamSet: + return "EDCA Parameter Set" + case Dot11InformationElementIDTrafficSpec: + return "Traffic Specification" + case Dot11InformationElementIDTrafficClass: + return "Traffic Classification" + case Dot11InformationElementIDSchedule: + return "Schedule" + case Dot11InformationElementIDChallenge: + return "Challenge text" + case Dot11InformationElementIDPowerConst: + return "Power Constraint" + case Dot11InformationElementIDPowerCapability: + return "Power Capability" + case Dot11InformationElementIDTPCRequest: + return "TPC Request" + case Dot11InformationElementIDTPCReport: + return "TPC Report" + case Dot11InformationElementIDSupportedChannels: + return "Supported Channels" + case Dot11InformationElementIDSwitchChannelAnnounce: + return "Channel Switch Announcement" + case Dot11InformationElementIDMeasureRequest: + return "Measurement Request" + case Dot11InformationElementIDMeasureReport: + return "Measurement Report" + case Dot11InformationElementIDQuiet: + return "Quiet" + case Dot11InformationElementIDIBSSDFS: + return "IBSS DFS" + case Dot11InformationElementIDERPInfo: + return "ERP Information" + case Dot11InformationElementIDTSDelay: + return "TS Delay" + case Dot11InformationElementIDTCLASProcessing: + return "TCLAS Processing" + case Dot11InformationElementIDHTCapabilities: + return "HT Capabilities (802.11n D1.10)" + case Dot11InformationElementIDQOSCapability: + return "QOS Capability" + case Dot11InformationElementIDERPInfo2: + return "ERP Information-2" + case Dot11InformationElementIDRSNInfo: + return "RSN Information" + case Dot11InformationElementIDESRates: + return "Extended Supported Rates" + case Dot11InformationElementIDAPChannelReport: + return "AP Channel Report" + case Dot11InformationElementIDNeighborReport: + return "Neighbor Report" + case Dot11InformationElementIDRCPI: + return "RCPI" + case Dot11InformationElementIDMobilityDomain: + return "Mobility Domain" + case Dot11InformationElementIDFastBSSTrans: + return "Fast BSS Transition" + case Dot11InformationElementIDTimeoutInt: + return "Timeout Interval" + case Dot11InformationElementIDRICData: + return "RIC Data" + case Dot11InformationElementIDDSERegisteredLoc: + return "DSE Registered Location" + case Dot11InformationElementIDSuppOperatingClass: + return "Supported Operating Classes" + case Dot11InformationElementIDExtChanSwitchAnnounce: + return "Extended Channel Switch Announcement" + case Dot11InformationElementIDHTInfo: + return "HT Information (802.11n D1.10)" + case Dot11InformationElementIDSecChanOffset: + return "Secondary Channel Offset (802.11n D1.10)" + case Dot11InformationElementIDBSSAverageAccessDelay: + return "BSS Average Access Delay" + case Dot11InformationElementIDAntenna: + return "Antenna" + case Dot11InformationElementIDRSNI: + return "RSNI" + case Dot11InformationElementIDMeasurePilotTrans: + return "Measurement Pilot Transmission" + case Dot11InformationElementIDBSSAvailAdmCapacity: + return "BSS Available Admission Capacity" + case Dot11InformationElementIDBSSACAccDelayWAPIParam: + return "BSS AC Access Delay/WAPI Parameter Set" + case Dot11InformationElementIDTimeAdvertisement: + return "Time Advertisement" + case Dot11InformationElementIDRMEnabledCapabilities: + return "RM Enabled Capabilities" + case Dot11InformationElementIDMultipleBSSID: + return "Multiple BSSID" + case Dot11InformationElementID2040BSSCoExist: + return "20/40 BSS Coexistence" + case Dot11InformationElementID2040BSSIntChanReport: + return "20/40 BSS Intolerant Channel Report" + case Dot11InformationElementIDOverlapBSSScanParam: + return "Overlapping BSS Scan Parameters" + case Dot11InformationElementIDRICDescriptor: + return "RIC Descriptor" + case Dot11InformationElementIDManagementMIC: + return "Management MIC" + case Dot11InformationElementIDEventRequest: + return "Event Request" + case Dot11InformationElementIDEventReport: + return "Event Report" + case Dot11InformationElementIDDiagnosticRequest: + return "Diagnostic Request" + case Dot11InformationElementIDDiagnosticReport: + return "Diagnostic Report" + case Dot11InformationElementIDLocationParam: + return "Location Parameters" + case Dot11InformationElementIDNonTransBSSIDCapability: + return "Non Transmitted BSSID Capability" + case Dot11InformationElementIDSSIDList: + return "SSID List" + case Dot11InformationElementIDMultipleBSSIDIndex: + return "Multiple BSSID Index" + case Dot11InformationElementIDFMSDescriptor: + return "FMS Descriptor" + case Dot11InformationElementIDFMSRequest: + return "FMS Request" + case Dot11InformationElementIDFMSResponse: + return "FMS Response" + case Dot11InformationElementIDQOSTrafficCapability: + return "QoS Traffic Capability" + case Dot11InformationElementIDBSSMaxIdlePeriod: + return "BSS Max Idle Period" + case Dot11InformationElementIDTFSRequest: + return "TFS Request" + case Dot11InformationElementIDTFSResponse: + return "TFS Response" + case Dot11InformationElementIDWNMSleepMode: + return "WNM-Sleep Mode" + case Dot11InformationElementIDTIMBroadcastRequest: + return "TIM Broadcast Request" + case Dot11InformationElementIDTIMBroadcastResponse: + return "TIM Broadcast Response" + case Dot11InformationElementIDCollInterferenceReport: + return "Collocated Interference Report" + case Dot11InformationElementIDChannelUsage: + return "Channel Usage" + case Dot11InformationElementIDTimeZone: + return "Time Zone" + case Dot11InformationElementIDDMSRequest: + return "DMS Request" + case Dot11InformationElementIDDMSResponse: + return "DMS Response" + case Dot11InformationElementIDLinkIdentifier: + return "Link Identifier" + case Dot11InformationElementIDWakeupSchedule: + return "Wakeup Schedule" + case Dot11InformationElementIDChannelSwitchTiming: + return "Channel Switch Timing" + case Dot11InformationElementIDPTIControl: + return "PTI Control" + case Dot11InformationElementIDPUBufferStatus: + return "PU Buffer Status" + case Dot11InformationElementIDInterworking: + return "Interworking" + case Dot11InformationElementIDAdvertisementProtocol: + return "Advertisement Protocol" + case Dot11InformationElementIDExpBWRequest: + return "Expedited Bandwidth Request" + case Dot11InformationElementIDQOSMapSet: + return "QoS Map Set" + case Dot11InformationElementIDRoamingConsortium: + return "Roaming Consortium" + case Dot11InformationElementIDEmergencyAlertIdentifier: + return "Emergency Alert Identifier" + case Dot11InformationElementIDMeshConfiguration: + return "Mesh Configuration" + case Dot11InformationElementIDMeshID: + return "Mesh ID" + case Dot11InformationElementIDMeshLinkMetricReport: + return "Mesh Link Metric Report" + case Dot11InformationElementIDCongestionNotification: + return "Congestion Notification" + case Dot11InformationElementIDMeshPeeringManagement: + return "Mesh Peering Management" + case Dot11InformationElementIDMeshChannelSwitchParam: + return "Mesh Channel Switch Parameters" + case Dot11InformationElementIDMeshAwakeWindows: + return "Mesh Awake Windows" + case Dot11InformationElementIDBeaconTiming: + return "Beacon Timing" + case Dot11InformationElementIDMCCAOPSetupRequest: + return "MCCAOP Setup Request" + case Dot11InformationElementIDMCCAOPSetupReply: + return "MCCAOP SETUP Reply" + case Dot11InformationElementIDMCCAOPAdvertisement: + return "MCCAOP Advertisement" + case Dot11InformationElementIDMCCAOPTeardown: + return "MCCAOP Teardown" + case Dot11InformationElementIDGateAnnouncement: + return "Gate Announcement" + case Dot11InformationElementIDRootAnnouncement: + return "Root Announcement" + case Dot11InformationElementIDExtCapability: + return "Extended Capabilities" + case Dot11InformationElementIDAgereProprietary: + return "Agere Proprietary" + case Dot11InformationElementIDPathRequest: + return "Path Request" + case Dot11InformationElementIDPathReply: + return "Path Reply" + case Dot11InformationElementIDPathError: + return "Path Error" + case Dot11InformationElementIDCiscoCCX1CKIPDeviceName: + return "Cisco CCX1 CKIP + Device Name" + case Dot11InformationElementIDCiscoCCX2: + return "Cisco CCX2" + case Dot11InformationElementIDProxyUpdate: + return "Proxy Update" + case Dot11InformationElementIDProxyUpdateConfirmation: + return "Proxy Update Confirmation" + case Dot11InformationElementIDAuthMeshPerringExch: + return "Auhenticated Mesh Perring Exchange" + case Dot11InformationElementIDMIC: + return "MIC (Message Integrity Code)" + case Dot11InformationElementIDDestinationURI: + return "Destination URI" + case Dot11InformationElementIDUAPSDCoexistence: + return "U-APSD Coexistence" + case Dot11InformationElementIDWakeupSchedule80211ad: + return "Wakeup Schedule 802.11ad" + case Dot11InformationElementIDExtendedSchedule: + return "Extended Schedule" + case Dot11InformationElementIDSTAAvailability: + return "STA Availability" + case Dot11InformationElementIDDMGTSPEC: + return "DMG TSPEC" + case Dot11InformationElementIDNextDMGATI: + return "Next DMG ATI" + case Dot11InformationElementIDDMSCapabilities: + return "DMG Capabilities" + case Dot11InformationElementIDCiscoUnknown95: + return "Cisco Unknown 95" + case Dot11InformationElementIDVendor2: + return "Vendor Specific" + case Dot11InformationElementIDDMGOperating: + return "DMG Operating" + case Dot11InformationElementIDDMGBSSParamChange: + return "DMG BSS Parameter Change" + case Dot11InformationElementIDDMGBeamRefinement: + return "DMG Beam Refinement" + case Dot11InformationElementIDChannelMeasFeedback: + return "Channel Measurement Feedback" + case Dot11InformationElementIDAwakeWindow: + return "Awake Window" + case Dot11InformationElementIDMultiBand: + return "Multi Band" + case Dot11InformationElementIDADDBAExtension: + return "ADDBA Extension" + case Dot11InformationElementIDNEXTPCPList: + return "NEXTPCP List" + case Dot11InformationElementIDPCPHandover: + return "PCP Handover" + case Dot11InformationElementIDDMGLinkMargin: + return "DMG Link Margin" + case Dot11InformationElementIDSwitchingStream: + return "Switching Stream" + case Dot11InformationElementIDSessionTransmission: + return "Session Transmission" + case Dot11InformationElementIDDynamicTonePairReport: + return "Dynamic Tone Pairing Report" + case Dot11InformationElementIDClusterReport: + return "Cluster Report" + case Dot11InformationElementIDRelayCapabilities: + return "Relay Capabilities" + case Dot11InformationElementIDRelayTransferParameter: + return "Relay Transfer Parameter" + case Dot11InformationElementIDBeamlinkMaintenance: + return "Beamlink Maintenance" + case Dot11InformationElementIDMultipleMacSublayers: + return "Multiple MAC Sublayers" + case Dot11InformationElementIDUPID: + return "U-PID" + case Dot11InformationElementIDDMGLinkAdaptionAck: + return "DMG Link Adaption Acknowledgment" + case Dot11InformationElementIDSymbolProprietary: + return "Symbol Proprietary" + case Dot11InformationElementIDMCCAOPAdvertOverview: + return "MCCAOP Advertisement Overview" + case Dot11InformationElementIDQuietPeriodRequest: + return "Quiet Period Request" + case Dot11InformationElementIDQuietPeriodResponse: + return "Quiet Period Response" + case Dot11InformationElementIDECPACPolicy: + return "ECPAC Policy" + case Dot11InformationElementIDClusterTimeOffset: + return "Cluster Time Offset" + case Dot11InformationElementIDAntennaSectorID: + return "Antenna Sector ID" + case Dot11InformationElementIDVHTCapabilities: + return "VHT Capabilities (IEEE Std 802.11ac/D3.1)" + case Dot11InformationElementIDVHTOperation: + return "VHT Operation (IEEE Std 802.11ac/D3.1)" + case Dot11InformationElementIDExtendedBSSLoad: + return "Extended BSS Load" + case Dot11InformationElementIDWideBWChannelSwitch: + return "Wide Bandwidth Channel Switch" + case Dot11InformationElementIDVHTTxPowerEnvelope: + return "VHT Tx Power Envelope (IEEE Std 802.11ac/D5.0)" + case Dot11InformationElementIDChannelSwitchWrapper: + return "Channel Switch Wrapper" + case Dot11InformationElementIDOperatingModeNotification: + return "Operating Mode Notification" + case Dot11InformationElementIDUPSIM: + return "UP SIM" + case Dot11InformationElementIDReducedNeighborReport: + return "Reduced Neighbor Report" + case Dot11InformationElementIDTVHTOperation: + return "TVHT Op" + case Dot11InformationElementIDDeviceLocation: + return "Device Location" + case Dot11InformationElementIDWhiteSpaceMap: + return "White Space Map" + case Dot11InformationElementIDFineTuningMeasureParams: + return "Fine Tuning Measure Parameters" + case Dot11InformationElementIDVendor: + return "Vendor" + default: + return "Unknown information element id" + } +} + +// Dot11 provides an IEEE 802.11 base packet header. +// See http://standards.ieee.org/findstds/standard/802.11-2012.html +// for excruciating detail. +type Dot11 struct { + BaseLayer + Type Dot11Type + Proto uint8 + Flags Dot11Flags + DurationID uint16 + Address1 net.HardwareAddr + Address2 net.HardwareAddr + Address3 net.HardwareAddr + Address4 net.HardwareAddr + SequenceNumber uint16 + FragmentNumber uint16 + Checksum uint32 + QOS *Dot11QOS + HTControl *Dot11HTControl + DataLayer gopacket.Layer +} + +type Dot11QOS struct { + TID uint8 /* Traffic IDentifier */ + EOSP bool /* End of service period */ + AckPolicy Dot11AckPolicy + TXOP uint8 +} + +type Dot11HTControl struct { + ACConstraint bool + RDGMorePPDU bool + + VHT *Dot11HTControlVHT + HT *Dot11HTControlHT +} + +type Dot11HTControlHT struct { + LinkAdapationControl *Dot11LinkAdapationControl + CalibrationPosition uint8 + CalibrationSequence uint8 + CSISteering uint8 + NDPAnnouncement bool + DEI bool +} + +type Dot11HTControlVHT struct { + MRQ bool + UnsolicitedMFB bool + MSI *uint8 + MFB Dot11HTControlMFB + CompressedMSI *uint8 + STBCIndication bool + MFSI *uint8 + GID *uint8 + CodingType *Dot11CodingType + FbTXBeamformed bool +} + +type Dot11HTControlMFB struct { + NumSTS uint8 + VHTMCS uint8 + BW uint8 + SNR int8 +} + +type Dot11LinkAdapationControl struct { + TRQ bool + MRQ bool + MSI uint8 + MFSI uint8 + ASEL *Dot11ASEL + MFB *uint8 +} + +type Dot11ASEL struct { + Command uint8 + Data uint8 +} + +type Dot11CodingType uint8 + +const ( + Dot11CodingTypeBCC = 0 + Dot11CodingTypeLDPC = 1 +) + +func (a Dot11CodingType) String() string { + switch a { + case Dot11CodingTypeBCC: + return "BCC" + case Dot11CodingTypeLDPC: + return "LDPC" + default: + return "Unknown coding type" + } +} + +func (m *Dot11HTControlMFB) NoFeedBackPresent() bool { + return m.VHTMCS == 15 && m.NumSTS == 7 +} + +func decodeDot11(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11{} + err := d.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(d) + if d.DataLayer != nil { + p.AddLayer(d.DataLayer) + } + return p.NextDecoder(d.NextLayerType()) +} + +func (m *Dot11) LayerType() gopacket.LayerType { return LayerTypeDot11 } +func (m *Dot11) CanDecode() gopacket.LayerClass { return LayerTypeDot11 } +func (m *Dot11) NextLayerType() gopacket.LayerType { + if m.DataLayer != nil { + if m.Flags.WEP() { + return LayerTypeDot11WEP + } + return m.DataLayer.(gopacket.DecodingLayer).NextLayerType() + } + return m.Type.LayerType() +} + +func createU8(x uint8) *uint8 { + return &x +} + +var dataDecodeMap = map[Dot11Type]func() gopacket.DecodingLayer{ + Dot11TypeData: func() gopacket.DecodingLayer { return &Dot11Data{} }, + Dot11TypeDataCFAck: func() gopacket.DecodingLayer { return &Dot11DataCFAck{} }, + Dot11TypeDataCFPoll: func() gopacket.DecodingLayer { return &Dot11DataCFPoll{} }, + Dot11TypeDataCFAckPoll: func() gopacket.DecodingLayer { return &Dot11DataCFAckPoll{} }, + Dot11TypeDataNull: func() gopacket.DecodingLayer { return &Dot11DataNull{} }, + Dot11TypeDataCFAckNoData: func() gopacket.DecodingLayer { return &Dot11DataCFAckNoData{} }, + Dot11TypeDataCFPollNoData: func() gopacket.DecodingLayer { return &Dot11DataCFPollNoData{} }, + Dot11TypeDataCFAckPollNoData: func() gopacket.DecodingLayer { return &Dot11DataCFAckPollNoData{} }, + Dot11TypeDataQOSData: func() gopacket.DecodingLayer { return &Dot11DataQOSData{} }, + Dot11TypeDataQOSDataCFAck: func() gopacket.DecodingLayer { return &Dot11DataQOSDataCFAck{} }, + Dot11TypeDataQOSDataCFPoll: func() gopacket.DecodingLayer { return &Dot11DataQOSDataCFPoll{} }, + Dot11TypeDataQOSDataCFAckPoll: func() gopacket.DecodingLayer { return &Dot11DataQOSDataCFAckPoll{} }, + Dot11TypeDataQOSNull: func() gopacket.DecodingLayer { return &Dot11DataQOSNull{} }, + Dot11TypeDataQOSCFPollNoData: func() gopacket.DecodingLayer { return &Dot11DataQOSCFPollNoData{} }, + Dot11TypeDataQOSCFAckPollNoData: func() gopacket.DecodingLayer { return &Dot11DataQOSCFAckPollNoData{} }, +} + +func (m *Dot11) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 10 { + df.SetTruncated() + return fmt.Errorf("Dot11 length %v too short, %v required", len(data), 10) + } + m.Type = Dot11Type((data[0])&0xFC) >> 2 + + m.DataLayer = nil + m.Proto = uint8(data[0]) & 0x0003 + m.Flags = Dot11Flags(data[1]) + m.DurationID = binary.LittleEndian.Uint16(data[2:4]) + m.Address1 = net.HardwareAddr(data[4:10]) + + offset := 10 + + mainType := m.Type.MainType() + + switch mainType { + case Dot11TypeCtrl: + switch m.Type { + case Dot11TypeCtrlRTS, Dot11TypeCtrlPowersavePoll, Dot11TypeCtrlCFEnd, Dot11TypeCtrlCFEndAck: + if len(data) < offset+6 { + df.SetTruncated() + return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+6) + } + m.Address2 = net.HardwareAddr(data[offset : offset+6]) + offset += 6 + } + case Dot11TypeMgmt, Dot11TypeData: + if len(data) < offset+14 { + df.SetTruncated() + return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+14) + } + m.Address2 = net.HardwareAddr(data[offset : offset+6]) + offset += 6 + m.Address3 = net.HardwareAddr(data[offset : offset+6]) + offset += 6 + + m.SequenceNumber = (binary.LittleEndian.Uint16(data[offset:offset+2]) & 0xFFF0) >> 4 + m.FragmentNumber = (binary.LittleEndian.Uint16(data[offset:offset+2]) & 0x000F) + offset += 2 + } + + if mainType == Dot11TypeData && m.Flags.FromDS() && m.Flags.ToDS() { + if len(data) < offset+6 { + df.SetTruncated() + return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+6) + } + m.Address4 = net.HardwareAddr(data[offset : offset+6]) + offset += 6 + } + + if m.Type.QOS() { + if len(data) < offset+2 { + df.SetTruncated() + return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+6) + } + m.QOS = &Dot11QOS{ + TID: (uint8(data[offset]) & 0x0F), + EOSP: (uint8(data[offset]) & 0x10) == 0x10, + AckPolicy: Dot11AckPolicy((uint8(data[offset]) & 0x60) >> 5), + TXOP: uint8(data[offset+1]), + } + offset += 2 + } + if m.Flags.Order() && (m.Type.QOS() || mainType == Dot11TypeMgmt) { + if len(data) < offset+4 { + df.SetTruncated() + return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+6) + } + + htc := &Dot11HTControl{ + ACConstraint: data[offset+3]&0x40 != 0, + RDGMorePPDU: data[offset+3]&0x80 != 0, + } + m.HTControl = htc + + if data[offset]&0x1 != 0 { // VHT Variant + vht := &Dot11HTControlVHT{} + htc.VHT = vht + vht.MRQ = data[offset]&0x4 != 0 + vht.UnsolicitedMFB = data[offset+3]&0x20 != 0 + vht.MFB = Dot11HTControlMFB{ + NumSTS: uint8(data[offset+1] >> 1 & 0x7), + VHTMCS: uint8(data[offset+1] >> 4 & 0xF), + BW: uint8(data[offset+2] & 0x3), + SNR: int8((-(data[offset+2] >> 2 & 0x20))+data[offset+2]>>2&0x1F) + 22, + } + + if vht.UnsolicitedMFB { + if !vht.MFB.NoFeedBackPresent() { + vht.CompressedMSI = createU8(data[offset] >> 3 & 0x3) + vht.STBCIndication = data[offset]&0x20 != 0 + vht.CodingType = (*Dot11CodingType)(createU8(data[offset+3] >> 3 & 0x1)) + vht.FbTXBeamformed = data[offset+3]&0x10 != 0 + vht.GID = createU8( + data[offset]>>6 + + (data[offset+1] & 0x1 << 2) + + data[offset+3]&0x7<<3) + } + } else { + if vht.MRQ { + vht.MSI = createU8((data[offset] >> 3) & 0x07) + } + vht.MFSI = createU8(data[offset]>>6 + (data[offset+1] & 0x1 << 2)) + } + + } else { // HT Variant + ht := &Dot11HTControlHT{} + htc.HT = ht + + lac := &Dot11LinkAdapationControl{} + ht.LinkAdapationControl = lac + lac.TRQ = data[offset]&0x2 != 0 + lac.MFSI = data[offset]>>6&0x3 + data[offset+1]&0x1<<3 + if data[offset]&0x3C == 0x38 { // ASEL + lac.ASEL = &Dot11ASEL{ + Command: data[offset+1] >> 1 & 0x7, + Data: data[offset+1] >> 4 & 0xF, + } + } else { + lac.MRQ = data[offset]&0x4 != 0 + if lac.MRQ { + lac.MSI = data[offset] >> 3 & 0x7 + } + lac.MFB = createU8(data[offset+1] >> 1) + } + ht.CalibrationPosition = data[offset+2] & 0x3 + ht.CalibrationSequence = data[offset+2] >> 2 & 0x3 + ht.CSISteering = data[offset+2] >> 6 & 0x3 + ht.NDPAnnouncement = data[offset+3]&0x1 != 0 + if mainType != Dot11TypeMgmt { + ht.DEI = data[offset+3]&0x20 != 0 + } + } + + offset += 4 + } + + if len(data) < offset+4 { + df.SetTruncated() + return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+4) + } + + m.BaseLayer = BaseLayer{ + Contents: data[0:offset], + Payload: data[offset : len(data)-4], + } + + if mainType == Dot11TypeData { + l := dataDecodeMap[m.Type]() + err := l.DecodeFromBytes(m.BaseLayer.Payload, df) + if err != nil { + return err + } + m.DataLayer = l.(gopacket.Layer) + } + + m.Checksum = binary.LittleEndian.Uint32(data[len(data)-4 : len(data)]) + return nil +} + +func (m *Dot11) ChecksumValid() bool { + // only for CTRL and MGMT frames + h := crc32.NewIEEE() + h.Write(m.Contents) + h.Write(m.Payload) + return m.Checksum == h.Sum32() +} + +func (m Dot11) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf, err := b.PrependBytes(24) + + if err != nil { + return err + } + + buf[0] = (uint8(m.Type) << 2) | m.Proto + buf[1] = uint8(m.Flags) + + binary.LittleEndian.PutUint16(buf[2:4], m.DurationID) + + copy(buf[4:10], m.Address1) + + offset := 10 + + switch m.Type.MainType() { + case Dot11TypeCtrl: + switch m.Type { + case Dot11TypeCtrlRTS, Dot11TypeCtrlPowersavePoll, Dot11TypeCtrlCFEnd, Dot11TypeCtrlCFEndAck: + copy(buf[offset:offset+6], m.Address2) + offset += 6 + } + case Dot11TypeMgmt, Dot11TypeData: + copy(buf[offset:offset+6], m.Address2) + offset += 6 + copy(buf[offset:offset+6], m.Address3) + offset += 6 + + binary.LittleEndian.PutUint16(buf[offset:offset+2], (m.SequenceNumber<<4)|m.FragmentNumber) + offset += 2 + } + + if m.Type.MainType() == Dot11TypeData && m.Flags.FromDS() && m.Flags.ToDS() { + copy(buf[offset:offset+6], m.Address4) + offset += 6 + } + + return nil +} + +// Dot11Mgmt is a base for all IEEE 802.11 management layers. +type Dot11Mgmt struct { + BaseLayer +} + +func (m *Dot11Mgmt) NextLayerType() gopacket.LayerType { return gopacket.LayerTypePayload } +func (m *Dot11Mgmt) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.Contents = data + return nil +} + +// Dot11Ctrl is a base for all IEEE 802.11 control layers. +type Dot11Ctrl struct { + BaseLayer +} + +func (m *Dot11Ctrl) NextLayerType() gopacket.LayerType { return gopacket.LayerTypePayload } + +func (m *Dot11Ctrl) LayerType() gopacket.LayerType { return LayerTypeDot11Ctrl } +func (m *Dot11Ctrl) CanDecode() gopacket.LayerClass { return LayerTypeDot11Ctrl } +func (m *Dot11Ctrl) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.Contents = data + return nil +} + +func decodeDot11Ctrl(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11Ctrl{} + return decodingLayerDecoder(d, data, p) +} + +// Dot11WEP contains WEP encrpted IEEE 802.11 data. +type Dot11WEP struct { + BaseLayer +} + +func (m *Dot11WEP) NextLayerType() gopacket.LayerType { return gopacket.LayerTypePayload } + +func (m *Dot11WEP) LayerType() gopacket.LayerType { return LayerTypeDot11WEP } +func (m *Dot11WEP) CanDecode() gopacket.LayerClass { return LayerTypeDot11WEP } +func (m *Dot11WEP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.Contents = data + return nil +} + +func decodeDot11WEP(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11WEP{} + return decodingLayerDecoder(d, data, p) +} + +// Dot11Data is a base for all IEEE 802.11 data layers. +type Dot11Data struct { + BaseLayer +} + +func (m *Dot11Data) NextLayerType() gopacket.LayerType { + return LayerTypeLLC +} + +func (m *Dot11Data) LayerType() gopacket.LayerType { return LayerTypeDot11Data } +func (m *Dot11Data) CanDecode() gopacket.LayerClass { return LayerTypeDot11Data } +func (m *Dot11Data) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.Payload = data + return nil +} + +func decodeDot11Data(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11Data{} + return decodingLayerDecoder(d, data, p) +} + +type Dot11DataCFAck struct { + Dot11Data +} + +func decodeDot11DataCFAck(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataCFAck{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataCFAck) LayerType() gopacket.LayerType { return LayerTypeDot11DataCFAck } +func (m *Dot11DataCFAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFAck } +func (m *Dot11DataCFAck) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Data.DecodeFromBytes(data, df) +} + +type Dot11DataCFPoll struct { + Dot11Data +} + +func decodeDot11DataCFPoll(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataCFPoll{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataCFPoll) LayerType() gopacket.LayerType { return LayerTypeDot11DataCFPoll } +func (m *Dot11DataCFPoll) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFPoll } +func (m *Dot11DataCFPoll) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Data.DecodeFromBytes(data, df) +} + +type Dot11DataCFAckPoll struct { + Dot11Data +} + +func decodeDot11DataCFAckPoll(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataCFAckPoll{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataCFAckPoll) LayerType() gopacket.LayerType { return LayerTypeDot11DataCFAckPoll } +func (m *Dot11DataCFAckPoll) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFAckPoll } +func (m *Dot11DataCFAckPoll) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Data.DecodeFromBytes(data, df) +} + +type Dot11DataNull struct { + Dot11Data +} + +func decodeDot11DataNull(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataNull{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataNull) LayerType() gopacket.LayerType { return LayerTypeDot11DataNull } +func (m *Dot11DataNull) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataNull } +func (m *Dot11DataNull) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Data.DecodeFromBytes(data, df) +} + +type Dot11DataCFAckNoData struct { + Dot11Data +} + +func decodeDot11DataCFAckNoData(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataCFAckNoData{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataCFAckNoData) LayerType() gopacket.LayerType { return LayerTypeDot11DataCFAckNoData } +func (m *Dot11DataCFAckNoData) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFAckNoData } +func (m *Dot11DataCFAckNoData) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Data.DecodeFromBytes(data, df) +} + +type Dot11DataCFPollNoData struct { + Dot11Data +} + +func decodeDot11DataCFPollNoData(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataCFPollNoData{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataCFPollNoData) LayerType() gopacket.LayerType { return LayerTypeDot11DataCFPollNoData } +func (m *Dot11DataCFPollNoData) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFPollNoData } +func (m *Dot11DataCFPollNoData) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Data.DecodeFromBytes(data, df) +} + +type Dot11DataCFAckPollNoData struct { + Dot11Data +} + +func decodeDot11DataCFAckPollNoData(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataCFAckPollNoData{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataCFAckPollNoData) LayerType() gopacket.LayerType { + return LayerTypeDot11DataCFAckPollNoData +} +func (m *Dot11DataCFAckPollNoData) CanDecode() gopacket.LayerClass { + return LayerTypeDot11DataCFAckPollNoData +} +func (m *Dot11DataCFAckPollNoData) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Data.DecodeFromBytes(data, df) +} + +type Dot11DataQOS struct { + Dot11Ctrl +} + +func (m *Dot11DataQOS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.BaseLayer = BaseLayer{Payload: data} + return nil +} + +type Dot11DataQOSData struct { + Dot11DataQOS +} + +func decodeDot11DataQOSData(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataQOSData{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataQOSData) LayerType() gopacket.LayerType { return LayerTypeDot11DataQOSData } +func (m *Dot11DataQOSData) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataQOSData } + +func (m *Dot11DataQOSData) NextLayerType() gopacket.LayerType { + return LayerTypeDot11Data +} + +type Dot11DataQOSDataCFAck struct { + Dot11DataQOS +} + +func decodeDot11DataQOSDataCFAck(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataQOSDataCFAck{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataQOSDataCFAck) LayerType() gopacket.LayerType { return LayerTypeDot11DataQOSDataCFAck } +func (m *Dot11DataQOSDataCFAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataQOSDataCFAck } +func (m *Dot11DataQOSDataCFAck) NextLayerType() gopacket.LayerType { return LayerTypeDot11DataCFAck } + +type Dot11DataQOSDataCFPoll struct { + Dot11DataQOS +} + +func decodeDot11DataQOSDataCFPoll(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataQOSDataCFPoll{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataQOSDataCFPoll) LayerType() gopacket.LayerType { + return LayerTypeDot11DataQOSDataCFPoll +} +func (m *Dot11DataQOSDataCFPoll) CanDecode() gopacket.LayerClass { + return LayerTypeDot11DataQOSDataCFPoll +} +func (m *Dot11DataQOSDataCFPoll) NextLayerType() gopacket.LayerType { return LayerTypeDot11DataCFPoll } + +type Dot11DataQOSDataCFAckPoll struct { + Dot11DataQOS +} + +func decodeDot11DataQOSDataCFAckPoll(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataQOSDataCFAckPoll{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataQOSDataCFAckPoll) LayerType() gopacket.LayerType { + return LayerTypeDot11DataQOSDataCFAckPoll +} +func (m *Dot11DataQOSDataCFAckPoll) CanDecode() gopacket.LayerClass { + return LayerTypeDot11DataQOSDataCFAckPoll +} +func (m *Dot11DataQOSDataCFAckPoll) NextLayerType() gopacket.LayerType { + return LayerTypeDot11DataCFAckPoll +} + +type Dot11DataQOSNull struct { + Dot11DataQOS +} + +func decodeDot11DataQOSNull(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataQOSNull{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataQOSNull) LayerType() gopacket.LayerType { return LayerTypeDot11DataQOSNull } +func (m *Dot11DataQOSNull) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataQOSNull } +func (m *Dot11DataQOSNull) NextLayerType() gopacket.LayerType { return LayerTypeDot11DataNull } + +type Dot11DataQOSCFPollNoData struct { + Dot11DataQOS +} + +func decodeDot11DataQOSCFPollNoData(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataQOSCFPollNoData{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataQOSCFPollNoData) LayerType() gopacket.LayerType { + return LayerTypeDot11DataQOSCFPollNoData +} +func (m *Dot11DataQOSCFPollNoData) CanDecode() gopacket.LayerClass { + return LayerTypeDot11DataQOSCFPollNoData +} +func (m *Dot11DataQOSCFPollNoData) NextLayerType() gopacket.LayerType { + return LayerTypeDot11DataCFPollNoData +} + +type Dot11DataQOSCFAckPollNoData struct { + Dot11DataQOS +} + +func decodeDot11DataQOSCFAckPollNoData(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11DataQOSCFAckPollNoData{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11DataQOSCFAckPollNoData) LayerType() gopacket.LayerType { + return LayerTypeDot11DataQOSCFAckPollNoData +} +func (m *Dot11DataQOSCFAckPollNoData) CanDecode() gopacket.LayerClass { + return LayerTypeDot11DataQOSCFAckPollNoData +} +func (m *Dot11DataQOSCFAckPollNoData) NextLayerType() gopacket.LayerType { + return LayerTypeDot11DataCFAckPollNoData +} + +type Dot11InformationElement struct { + BaseLayer + ID Dot11InformationElementID + Length uint8 + OUI []byte + Info []byte +} + +func (m *Dot11InformationElement) LayerType() gopacket.LayerType { + return LayerTypeDot11InformationElement +} +func (m *Dot11InformationElement) CanDecode() gopacket.LayerClass { + return LayerTypeDot11InformationElement +} + +func (m *Dot11InformationElement) NextLayerType() gopacket.LayerType { + return LayerTypeDot11InformationElement +} + +func (m *Dot11InformationElement) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 2 { + df.SetTruncated() + return fmt.Errorf("Dot11InformationElement length %v too short, %v required", len(data), 2) + } + m.ID = Dot11InformationElementID(data[0]) + m.Length = data[1] + offset := int(2) + + if len(data) < offset+int(m.Length) { + df.SetTruncated() + return fmt.Errorf("Dot11InformationElement length %v too short, %v required", len(data), offset+int(m.Length)) + } + if m.ID == 221 { + // Vendor extension + m.OUI = data[offset : offset+4] + m.Info = data[offset+4 : offset+int(m.Length)] + } else { + m.Info = data[offset : offset+int(m.Length)] + } + + offset += int(m.Length) + + m.BaseLayer = BaseLayer{Contents: data[:offset], Payload: data[offset:]} + return nil +} + +func (d *Dot11InformationElement) String() string { + if d.ID == 0 { + return fmt.Sprintf("802.11 Information Element (ID: %v, Length: %v, SSID: %v)", d.ID, d.Length, string(d.Info)) + } else if d.ID == 1 { + rates := "" + for i := 0; i < len(d.Info); i++ { + if d.Info[i]&0x80 == 0 { + rates += fmt.Sprintf("%.1f ", float32(d.Info[i])*0.5) + } else { + rates += fmt.Sprintf("%.1f* ", float32(d.Info[i]&0x7F)*0.5) + } + } + return fmt.Sprintf("802.11 Information Element (ID: %v, Length: %v, Rates: %s Mbit)", d.ID, d.Length, rates) + } else if d.ID == 221 { + return fmt.Sprintf("802.11 Information Element (ID: %v, Length: %v, OUI: %X, Info: %X)", d.ID, d.Length, d.OUI, d.Info) + } else { + return fmt.Sprintf("802.11 Information Element (ID: %v, Length: %v, Info: %X)", d.ID, d.Length, d.Info) + } +} + +func (m Dot11InformationElement) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + length := len(m.Info) + len(m.OUI) + if buf, err := b.PrependBytes(2 + length); err != nil { + return err + } else { + buf[0] = uint8(m.ID) + buf[1] = uint8(length) + copy(buf[2:], m.OUI) + copy(buf[2+len(m.OUI):], m.Info) + } + return nil +} + +func decodeDot11InformationElement(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11InformationElement{} + return decodingLayerDecoder(d, data, p) +} + +type Dot11CtrlCTS struct { + Dot11Ctrl +} + +func decodeDot11CtrlCTS(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11CtrlCTS{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11CtrlCTS) LayerType() gopacket.LayerType { + return LayerTypeDot11CtrlCTS +} +func (m *Dot11CtrlCTS) CanDecode() gopacket.LayerClass { + return LayerTypeDot11CtrlCTS +} +func (m *Dot11CtrlCTS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Ctrl.DecodeFromBytes(data, df) +} + +type Dot11CtrlRTS struct { + Dot11Ctrl +} + +func decodeDot11CtrlRTS(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11CtrlRTS{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11CtrlRTS) LayerType() gopacket.LayerType { + return LayerTypeDot11CtrlRTS +} +func (m *Dot11CtrlRTS) CanDecode() gopacket.LayerClass { + return LayerTypeDot11CtrlRTS +} +func (m *Dot11CtrlRTS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Ctrl.DecodeFromBytes(data, df) +} + +type Dot11CtrlBlockAckReq struct { + Dot11Ctrl +} + +func decodeDot11CtrlBlockAckReq(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11CtrlBlockAckReq{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11CtrlBlockAckReq) LayerType() gopacket.LayerType { + return LayerTypeDot11CtrlBlockAckReq +} +func (m *Dot11CtrlBlockAckReq) CanDecode() gopacket.LayerClass { + return LayerTypeDot11CtrlBlockAckReq +} +func (m *Dot11CtrlBlockAckReq) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Ctrl.DecodeFromBytes(data, df) +} + +type Dot11CtrlBlockAck struct { + Dot11Ctrl +} + +func decodeDot11CtrlBlockAck(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11CtrlBlockAck{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11CtrlBlockAck) LayerType() gopacket.LayerType { return LayerTypeDot11CtrlBlockAck } +func (m *Dot11CtrlBlockAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11CtrlBlockAck } +func (m *Dot11CtrlBlockAck) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Ctrl.DecodeFromBytes(data, df) +} + +type Dot11CtrlPowersavePoll struct { + Dot11Ctrl +} + +func decodeDot11CtrlPowersavePoll(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11CtrlPowersavePoll{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11CtrlPowersavePoll) LayerType() gopacket.LayerType { + return LayerTypeDot11CtrlPowersavePoll +} +func (m *Dot11CtrlPowersavePoll) CanDecode() gopacket.LayerClass { + return LayerTypeDot11CtrlPowersavePoll +} +func (m *Dot11CtrlPowersavePoll) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Ctrl.DecodeFromBytes(data, df) +} + +type Dot11CtrlAck struct { + Dot11Ctrl +} + +func decodeDot11CtrlAck(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11CtrlAck{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11CtrlAck) LayerType() gopacket.LayerType { return LayerTypeDot11CtrlAck } +func (m *Dot11CtrlAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11CtrlAck } +func (m *Dot11CtrlAck) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Ctrl.DecodeFromBytes(data, df) +} + +type Dot11CtrlCFEnd struct { + Dot11Ctrl +} + +func decodeDot11CtrlCFEnd(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11CtrlCFEnd{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11CtrlCFEnd) LayerType() gopacket.LayerType { + return LayerTypeDot11CtrlCFEnd +} +func (m *Dot11CtrlCFEnd) CanDecode() gopacket.LayerClass { + return LayerTypeDot11CtrlCFEnd +} +func (m *Dot11CtrlCFEnd) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Ctrl.DecodeFromBytes(data, df) +} + +type Dot11CtrlCFEndAck struct { + Dot11Ctrl +} + +func decodeDot11CtrlCFEndAck(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11CtrlCFEndAck{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11CtrlCFEndAck) LayerType() gopacket.LayerType { + return LayerTypeDot11CtrlCFEndAck +} +func (m *Dot11CtrlCFEndAck) CanDecode() gopacket.LayerClass { + return LayerTypeDot11CtrlCFEndAck +} +func (m *Dot11CtrlCFEndAck) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + return m.Dot11Ctrl.DecodeFromBytes(data, df) +} + +type Dot11MgmtAssociationReq struct { + Dot11Mgmt + CapabilityInfo uint16 + ListenInterval uint16 +} + +func decodeDot11MgmtAssociationReq(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtAssociationReq{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtAssociationReq) LayerType() gopacket.LayerType { + return LayerTypeDot11MgmtAssociationReq +} +func (m *Dot11MgmtAssociationReq) CanDecode() gopacket.LayerClass { + return LayerTypeDot11MgmtAssociationReq +} +func (m *Dot11MgmtAssociationReq) NextLayerType() gopacket.LayerType { + return LayerTypeDot11InformationElement +} +func (m *Dot11MgmtAssociationReq) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 4 { + df.SetTruncated() + return fmt.Errorf("Dot11MgmtAssociationReq length %v too short, %v required", len(data), 4) + } + m.CapabilityInfo = binary.LittleEndian.Uint16(data[0:2]) + m.ListenInterval = binary.LittleEndian.Uint16(data[2:4]) + m.Payload = data[4:] + return m.Dot11Mgmt.DecodeFromBytes(data, df) +} + +func (m Dot11MgmtAssociationReq) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf, err := b.PrependBytes(4) + + if err != nil { + return err + } + + binary.LittleEndian.PutUint16(buf[0:2], m.CapabilityInfo) + binary.LittleEndian.PutUint16(buf[2:4], m.ListenInterval) + + return nil +} + +type Dot11MgmtAssociationResp struct { + Dot11Mgmt + CapabilityInfo uint16 + Status Dot11Status + AID uint16 +} + +func decodeDot11MgmtAssociationResp(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtAssociationResp{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtAssociationResp) CanDecode() gopacket.LayerClass { + return LayerTypeDot11MgmtAssociationResp +} +func (m *Dot11MgmtAssociationResp) LayerType() gopacket.LayerType { + return LayerTypeDot11MgmtAssociationResp +} +func (m *Dot11MgmtAssociationResp) NextLayerType() gopacket.LayerType { + return LayerTypeDot11InformationElement +} +func (m *Dot11MgmtAssociationResp) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 6 { + df.SetTruncated() + return fmt.Errorf("Dot11MgmtAssociationResp length %v too short, %v required", len(data), 6) + } + m.CapabilityInfo = binary.LittleEndian.Uint16(data[0:2]) + m.Status = Dot11Status(binary.LittleEndian.Uint16(data[2:4])) + m.AID = binary.LittleEndian.Uint16(data[4:6]) + m.Payload = data[6:] + return m.Dot11Mgmt.DecodeFromBytes(data, df) +} + +func (m Dot11MgmtAssociationResp) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf, err := b.PrependBytes(6) + + if err != nil { + return err + } + + binary.LittleEndian.PutUint16(buf[0:2], m.CapabilityInfo) + binary.LittleEndian.PutUint16(buf[2:4], uint16(m.Status)) + binary.LittleEndian.PutUint16(buf[4:6], m.AID) + + return nil +} + +type Dot11MgmtReassociationReq struct { + Dot11Mgmt + CapabilityInfo uint16 + ListenInterval uint16 + CurrentApAddress net.HardwareAddr +} + +func decodeDot11MgmtReassociationReq(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtReassociationReq{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtReassociationReq) LayerType() gopacket.LayerType { + return LayerTypeDot11MgmtReassociationReq +} +func (m *Dot11MgmtReassociationReq) CanDecode() gopacket.LayerClass { + return LayerTypeDot11MgmtReassociationReq +} +func (m *Dot11MgmtReassociationReq) NextLayerType() gopacket.LayerType { + return LayerTypeDot11InformationElement +} +func (m *Dot11MgmtReassociationReq) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 10 { + df.SetTruncated() + return fmt.Errorf("Dot11MgmtReassociationReq length %v too short, %v required", len(data), 10) + } + m.CapabilityInfo = binary.LittleEndian.Uint16(data[0:2]) + m.ListenInterval = binary.LittleEndian.Uint16(data[2:4]) + m.CurrentApAddress = net.HardwareAddr(data[4:10]) + m.Payload = data[10:] + return m.Dot11Mgmt.DecodeFromBytes(data, df) +} + +func (m Dot11MgmtReassociationReq) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf, err := b.PrependBytes(10) + + if err != nil { + return err + } + + binary.LittleEndian.PutUint16(buf[0:2], m.CapabilityInfo) + binary.LittleEndian.PutUint16(buf[2:4], m.ListenInterval) + + copy(buf[4:10], m.CurrentApAddress) + + return nil +} + +type Dot11MgmtReassociationResp struct { + Dot11Mgmt +} + +func decodeDot11MgmtReassociationResp(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtReassociationResp{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtReassociationResp) LayerType() gopacket.LayerType { + return LayerTypeDot11MgmtReassociationResp +} +func (m *Dot11MgmtReassociationResp) CanDecode() gopacket.LayerClass { + return LayerTypeDot11MgmtReassociationResp +} +func (m *Dot11MgmtReassociationResp) NextLayerType() gopacket.LayerType { + return LayerTypeDot11InformationElement +} + +type Dot11MgmtProbeReq struct { + Dot11Mgmt +} + +func decodeDot11MgmtProbeReq(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtProbeReq{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtProbeReq) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtProbeReq } +func (m *Dot11MgmtProbeReq) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtProbeReq } +func (m *Dot11MgmtProbeReq) NextLayerType() gopacket.LayerType { + return LayerTypeDot11InformationElement +} + +type Dot11MgmtProbeResp struct { + Dot11Mgmt + Timestamp uint64 + Interval uint16 + Flags uint16 +} + +func decodeDot11MgmtProbeResp(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtProbeResp{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtProbeResp) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtProbeResp } +func (m *Dot11MgmtProbeResp) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtProbeResp } +func (m *Dot11MgmtProbeResp) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 12 { + df.SetTruncated() + + return fmt.Errorf("Dot11MgmtProbeResp length %v too short, %v required", len(data), 12) + } + + m.Timestamp = binary.LittleEndian.Uint64(data[0:8]) + m.Interval = binary.LittleEndian.Uint16(data[8:10]) + m.Flags = binary.LittleEndian.Uint16(data[10:12]) + m.Payload = data[12:] + + return m.Dot11Mgmt.DecodeFromBytes(data, df) +} + +func (m *Dot11MgmtProbeResp) NextLayerType() gopacket.LayerType { + return LayerTypeDot11InformationElement +} + +func (m Dot11MgmtProbeResp) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf, err := b.PrependBytes(12) + + if err != nil { + return err + } + + binary.LittleEndian.PutUint64(buf[0:8], m.Timestamp) + binary.LittleEndian.PutUint16(buf[8:10], m.Interval) + binary.LittleEndian.PutUint16(buf[10:12], m.Flags) + + return nil +} + +type Dot11MgmtMeasurementPilot struct { + Dot11Mgmt +} + +func decodeDot11MgmtMeasurementPilot(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtMeasurementPilot{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtMeasurementPilot) LayerType() gopacket.LayerType { + return LayerTypeDot11MgmtMeasurementPilot +} +func (m *Dot11MgmtMeasurementPilot) CanDecode() gopacket.LayerClass { + return LayerTypeDot11MgmtMeasurementPilot +} + +type Dot11MgmtBeacon struct { + Dot11Mgmt + Timestamp uint64 + Interval uint16 + Flags uint16 +} + +func decodeDot11MgmtBeacon(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtBeacon{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtBeacon) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtBeacon } +func (m *Dot11MgmtBeacon) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtBeacon } +func (m *Dot11MgmtBeacon) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 12 { + df.SetTruncated() + return fmt.Errorf("Dot11MgmtBeacon length %v too short, %v required", len(data), 12) + } + m.Timestamp = binary.LittleEndian.Uint64(data[0:8]) + m.Interval = binary.LittleEndian.Uint16(data[8:10]) + m.Flags = binary.LittleEndian.Uint16(data[10:12]) + m.Payload = data[12:] + return m.Dot11Mgmt.DecodeFromBytes(data, df) +} + +func (m *Dot11MgmtBeacon) NextLayerType() gopacket.LayerType { return LayerTypeDot11InformationElement } + +func (m Dot11MgmtBeacon) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf, err := b.PrependBytes(12) + + if err != nil { + return err + } + + binary.LittleEndian.PutUint64(buf[0:8], m.Timestamp) + binary.LittleEndian.PutUint16(buf[8:10], m.Interval) + binary.LittleEndian.PutUint16(buf[10:12], m.Flags) + + return nil +} + +type Dot11MgmtATIM struct { + Dot11Mgmt +} + +func decodeDot11MgmtATIM(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtATIM{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtATIM) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtATIM } +func (m *Dot11MgmtATIM) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtATIM } + +type Dot11MgmtDisassociation struct { + Dot11Mgmt + Reason Dot11Reason +} + +func decodeDot11MgmtDisassociation(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtDisassociation{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtDisassociation) LayerType() gopacket.LayerType { + return LayerTypeDot11MgmtDisassociation +} +func (m *Dot11MgmtDisassociation) CanDecode() gopacket.LayerClass { + return LayerTypeDot11MgmtDisassociation +} +func (m *Dot11MgmtDisassociation) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 2 { + df.SetTruncated() + return fmt.Errorf("Dot11MgmtDisassociation length %v too short, %v required", len(data), 2) + } + m.Reason = Dot11Reason(binary.LittleEndian.Uint16(data[0:2])) + return m.Dot11Mgmt.DecodeFromBytes(data, df) +} + +func (m Dot11MgmtDisassociation) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf, err := b.PrependBytes(2) + + if err != nil { + return err + } + + binary.LittleEndian.PutUint16(buf[0:2], uint16(m.Reason)) + + return nil +} + +type Dot11MgmtAuthentication struct { + Dot11Mgmt + Algorithm Dot11Algorithm + Sequence uint16 + Status Dot11Status +} + +func decodeDot11MgmtAuthentication(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtAuthentication{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtAuthentication) LayerType() gopacket.LayerType { + return LayerTypeDot11MgmtAuthentication +} +func (m *Dot11MgmtAuthentication) CanDecode() gopacket.LayerClass { + return LayerTypeDot11MgmtAuthentication +} +func (m *Dot11MgmtAuthentication) NextLayerType() gopacket.LayerType { + return LayerTypeDot11InformationElement +} +func (m *Dot11MgmtAuthentication) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 6 { + df.SetTruncated() + return fmt.Errorf("Dot11MgmtAuthentication length %v too short, %v required", len(data), 6) + } + m.Algorithm = Dot11Algorithm(binary.LittleEndian.Uint16(data[0:2])) + m.Sequence = binary.LittleEndian.Uint16(data[2:4]) + m.Status = Dot11Status(binary.LittleEndian.Uint16(data[4:6])) + m.Payload = data[6:] + return m.Dot11Mgmt.DecodeFromBytes(data, df) +} + +func (m Dot11MgmtAuthentication) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf, err := b.PrependBytes(6) + + if err != nil { + return err + } + + binary.LittleEndian.PutUint16(buf[0:2], uint16(m.Algorithm)) + binary.LittleEndian.PutUint16(buf[2:4], m.Sequence) + binary.LittleEndian.PutUint16(buf[4:6], uint16(m.Status)) + + return nil +} + +type Dot11MgmtDeauthentication struct { + Dot11Mgmt + Reason Dot11Reason +} + +func decodeDot11MgmtDeauthentication(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtDeauthentication{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtDeauthentication) LayerType() gopacket.LayerType { + return LayerTypeDot11MgmtDeauthentication +} +func (m *Dot11MgmtDeauthentication) CanDecode() gopacket.LayerClass { + return LayerTypeDot11MgmtDeauthentication +} +func (m *Dot11MgmtDeauthentication) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 2 { + df.SetTruncated() + return fmt.Errorf("Dot11MgmtDeauthentication length %v too short, %v required", len(data), 2) + } + m.Reason = Dot11Reason(binary.LittleEndian.Uint16(data[0:2])) + return m.Dot11Mgmt.DecodeFromBytes(data, df) +} + +func (m Dot11MgmtDeauthentication) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf, err := b.PrependBytes(2) + + if err != nil { + return err + } + + binary.LittleEndian.PutUint16(buf[0:2], uint16(m.Reason)) + + return nil +} + +type Dot11MgmtAction struct { + Dot11Mgmt +} + +func decodeDot11MgmtAction(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtAction{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtAction) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtAction } +func (m *Dot11MgmtAction) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtAction } + +type Dot11MgmtActionNoAck struct { + Dot11Mgmt +} + +func decodeDot11MgmtActionNoAck(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtActionNoAck{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtActionNoAck) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtActionNoAck } +func (m *Dot11MgmtActionNoAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtActionNoAck } + +type Dot11MgmtArubaWLAN struct { + Dot11Mgmt +} + +func decodeDot11MgmtArubaWLAN(data []byte, p gopacket.PacketBuilder) error { + d := &Dot11MgmtArubaWLAN{} + return decodingLayerDecoder(d, data, p) +} + +func (m *Dot11MgmtArubaWLAN) LayerType() gopacket.LayerType { return LayerTypeDot11MgmtArubaWLAN } +func (m *Dot11MgmtArubaWLAN) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtArubaWLAN } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/dot1q.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/dot1q.go new file mode 100644 index 00000000..5cdd2f8d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/dot1q.go @@ -0,0 +1,75 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "fmt" + "github.com/google/gopacket" +) + +// Dot1Q is the packet layer for 802.1Q VLAN headers. +type Dot1Q struct { + BaseLayer + Priority uint8 + DropEligible bool + VLANIdentifier uint16 + Type EthernetType +} + +// LayerType returns gopacket.LayerTypeDot1Q +func (d *Dot1Q) LayerType() gopacket.LayerType { return LayerTypeDot1Q } + +// DecodeFromBytes decodes the given bytes into this layer. +func (d *Dot1Q) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 4 { + df.SetTruncated() + return fmt.Errorf("802.1Q tag length %d too short", len(data)) + } + d.Priority = (data[0] & 0xE0) >> 5 + d.DropEligible = data[0]&0x10 != 0 + d.VLANIdentifier = binary.BigEndian.Uint16(data[:2]) & 0x0FFF + d.Type = EthernetType(binary.BigEndian.Uint16(data[2:4])) + d.BaseLayer = BaseLayer{Contents: data[:4], Payload: data[4:]} + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (d *Dot1Q) CanDecode() gopacket.LayerClass { + return LayerTypeDot1Q +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (d *Dot1Q) NextLayerType() gopacket.LayerType { + return d.Type.LayerType() +} + +func decodeDot1Q(data []byte, p gopacket.PacketBuilder) error { + d := &Dot1Q{} + return decodingLayerDecoder(d, data, p) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (d *Dot1Q) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(4) + if err != nil { + return err + } + if d.VLANIdentifier > 0xFFF { + return fmt.Errorf("vlan identifier %v is too high", d.VLANIdentifier) + } + firstBytes := uint16(d.Priority)<<13 | d.VLANIdentifier + if d.DropEligible { + firstBytes |= 0x1000 + } + binary.BigEndian.PutUint16(bytes, firstBytes) + binary.BigEndian.PutUint16(bytes[2:], uint16(d.Type)) + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/eap.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/eap.go new file mode 100644 index 00000000..54238e8c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/eap.go @@ -0,0 +1,114 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "fmt" + "github.com/google/gopacket" +) + +type EAPCode uint8 +type EAPType uint8 + +const ( + EAPCodeRequest EAPCode = 1 + EAPCodeResponse EAPCode = 2 + EAPCodeSuccess EAPCode = 3 + EAPCodeFailure EAPCode = 4 + + // EAPTypeNone means that this EAP layer has no Type or TypeData. + // Success and Failure EAPs will have this set. + EAPTypeNone EAPType = 0 + + EAPTypeIdentity EAPType = 1 + EAPTypeNotification EAPType = 2 + EAPTypeNACK EAPType = 3 + EAPTypeOTP EAPType = 4 + EAPTypeTokenCard EAPType = 5 +) + +// EAP defines an Extensible Authentication Protocol (rfc 3748) layer. +type EAP struct { + BaseLayer + Code EAPCode + Id uint8 + Length uint16 + Type EAPType + TypeData []byte +} + +// LayerType returns LayerTypeEAP. +func (e *EAP) LayerType() gopacket.LayerType { return LayerTypeEAP } + +// DecodeFromBytes decodes the given bytes into this layer. +func (e *EAP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 4 { + df.SetTruncated() + return fmt.Errorf("EAP length %d too short", len(data)) + } + e.Code = EAPCode(data[0]) + e.Id = data[1] + e.Length = binary.BigEndian.Uint16(data[2:4]) + if len(data) < int(e.Length) { + df.SetTruncated() + return fmt.Errorf("EAP length %d too short, %d expected", len(data), e.Length) + } + switch { + case e.Length > 4: + e.Type = EAPType(data[4]) + e.TypeData = data[5:] + case e.Length == 4: + e.Type = 0 + e.TypeData = nil + default: + return fmt.Errorf("invalid EAP length %d", e.Length) + } + e.BaseLayer.Contents = data[:e.Length] + e.BaseLayer.Payload = data[e.Length:] // Should be 0 bytes + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (e *EAP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if opts.FixLengths { + e.Length = uint16(len(e.TypeData) + 1) + } + size := len(e.TypeData) + 4 + if size > 4 { + size++ + } + bytes, err := b.PrependBytes(size) + if err != nil { + return err + } + bytes[0] = byte(e.Code) + bytes[1] = e.Id + binary.BigEndian.PutUint16(bytes[2:], e.Length) + if size > 4 { + bytes[4] = byte(e.Type) + copy(bytes[5:], e.TypeData) + } + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (e *EAP) CanDecode() gopacket.LayerClass { + return LayerTypeEAP +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (e *EAP) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypeZero +} + +func decodeEAP(data []byte, p gopacket.PacketBuilder) error { + e := &EAP{} + return decodingLayerDecoder(e, data, p) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/eapol.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/eapol.go new file mode 100644 index 00000000..902598a2 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/eapol.go @@ -0,0 +1,302 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "fmt" + "github.com/google/gopacket" +) + +// EAPOL defines an EAP over LAN (802.1x) layer. +type EAPOL struct { + BaseLayer + Version uint8 + Type EAPOLType + Length uint16 +} + +// LayerType returns LayerTypeEAPOL. +func (e *EAPOL) LayerType() gopacket.LayerType { return LayerTypeEAPOL } + +// DecodeFromBytes decodes the given bytes into this layer. +func (e *EAPOL) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 4 { + df.SetTruncated() + return fmt.Errorf("EAPOL length %d too short", len(data)) + } + e.Version = data[0] + e.Type = EAPOLType(data[1]) + e.Length = binary.BigEndian.Uint16(data[2:4]) + e.BaseLayer = BaseLayer{data[:4], data[4:]} + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer +func (e *EAPOL) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, _ := b.PrependBytes(4) + bytes[0] = e.Version + bytes[1] = byte(e.Type) + binary.BigEndian.PutUint16(bytes[2:], e.Length) + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (e *EAPOL) CanDecode() gopacket.LayerClass { + return LayerTypeEAPOL +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (e *EAPOL) NextLayerType() gopacket.LayerType { + return e.Type.LayerType() +} + +func decodeEAPOL(data []byte, p gopacket.PacketBuilder) error { + e := &EAPOL{} + return decodingLayerDecoder(e, data, p) +} + +// EAPOLKeyDescriptorType is an enumeration of key descriptor types +// as specified by 802.1x in the EAPOL-Key frame +type EAPOLKeyDescriptorType uint8 + +// Enumeration of EAPOLKeyDescriptorType +const ( + EAPOLKeyDescriptorTypeRC4 EAPOLKeyDescriptorType = 1 + EAPOLKeyDescriptorTypeDot11 EAPOLKeyDescriptorType = 2 + EAPOLKeyDescriptorTypeWPA EAPOLKeyDescriptorType = 254 +) + +func (kdt EAPOLKeyDescriptorType) String() string { + switch kdt { + case EAPOLKeyDescriptorTypeRC4: + return "RC4" + case EAPOLKeyDescriptorTypeDot11: + return "802.11" + case EAPOLKeyDescriptorTypeWPA: + return "WPA" + default: + return fmt.Sprintf("unknown descriptor type %d", kdt) + } +} + +// EAPOLKeyDescriptorVersion is an enumeration of versions specifying the +// encryption algorithm for the key data and the authentication for the +// message integrity code (MIC) +type EAPOLKeyDescriptorVersion uint8 + +// Enumeration of EAPOLKeyDescriptorVersion +const ( + EAPOLKeyDescriptorVersionOther EAPOLKeyDescriptorVersion = 0 + EAPOLKeyDescriptorVersionRC4HMACMD5 EAPOLKeyDescriptorVersion = 1 + EAPOLKeyDescriptorVersionAESHMACSHA1 EAPOLKeyDescriptorVersion = 2 + EAPOLKeyDescriptorVersionAES128CMAC EAPOLKeyDescriptorVersion = 3 +) + +func (v EAPOLKeyDescriptorVersion) String() string { + switch v { + case EAPOLKeyDescriptorVersionOther: + return "Other" + case EAPOLKeyDescriptorVersionRC4HMACMD5: + return "RC4-HMAC-MD5" + case EAPOLKeyDescriptorVersionAESHMACSHA1: + return "AES-HMAC-SHA1-128" + case EAPOLKeyDescriptorVersionAES128CMAC: + return "AES-128-CMAC" + default: + return fmt.Sprintf("unknown version %d", v) + } +} + +// EAPOLKeyType is an enumeration of key derivation types describing +// the purpose of the keys being derived. +type EAPOLKeyType uint8 + +// Enumeration of EAPOLKeyType +const ( + EAPOLKeyTypeGroupSMK EAPOLKeyType = 0 + EAPOLKeyTypePairwise EAPOLKeyType = 1 +) + +func (kt EAPOLKeyType) String() string { + switch kt { + case EAPOLKeyTypeGroupSMK: + return "Group/SMK" + case EAPOLKeyTypePairwise: + return "Pairwise" + default: + return fmt.Sprintf("unknown key type %d", kt) + } +} + +// EAPOLKey defines an EAPOL-Key frame for 802.1x authentication +type EAPOLKey struct { + BaseLayer + KeyDescriptorType EAPOLKeyDescriptorType + KeyDescriptorVersion EAPOLKeyDescriptorVersion + KeyType EAPOLKeyType + KeyIndex uint8 + Install bool + KeyACK bool + KeyMIC bool + Secure bool + MICError bool + Request bool + HasEncryptedKeyData bool + SMKMessage bool + KeyLength uint16 + ReplayCounter uint64 + Nonce []byte + IV []byte + RSC uint64 + ID uint64 + MIC []byte + KeyDataLength uint16 + EncryptedKeyData []byte +} + +// LayerType returns LayerTypeEAPOLKey. +func (ek *EAPOLKey) LayerType() gopacket.LayerType { + return LayerTypeEAPOLKey +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (ek *EAPOLKey) CanDecode() gopacket.LayerType { + return LayerTypeEAPOLKey +} + +// NextLayerType returns layers.LayerTypeDot11InformationElement if the key +// data exists and is unencrypted, otherwise it does not expect a next layer. +func (ek *EAPOLKey) NextLayerType() gopacket.LayerType { + if !ek.HasEncryptedKeyData && ek.KeyDataLength > 0 { + return LayerTypeDot11InformationElement + } + return gopacket.LayerTypePayload +} + +const eapolKeyFrameLen = 95 + +// DecodeFromBytes decodes the given bytes into this layer. +func (ek *EAPOLKey) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < eapolKeyFrameLen { + df.SetTruncated() + return fmt.Errorf("EAPOLKey length %v too short, %v required", + len(data), eapolKeyFrameLen) + } + + ek.KeyDescriptorType = EAPOLKeyDescriptorType(data[0]) + + info := binary.BigEndian.Uint16(data[1:3]) + ek.KeyDescriptorVersion = EAPOLKeyDescriptorVersion(info & 0x0007) + ek.KeyType = EAPOLKeyType((info & 0x0008) >> 3) + ek.KeyIndex = uint8((info & 0x0030) >> 4) + ek.Install = (info & 0x0040) != 0 + ek.KeyACK = (info & 0x0080) != 0 + ek.KeyMIC = (info & 0x0100) != 0 + ek.Secure = (info & 0x0200) != 0 + ek.MICError = (info & 0x0400) != 0 + ek.Request = (info & 0x0800) != 0 + ek.HasEncryptedKeyData = (info & 0x1000) != 0 + ek.SMKMessage = (info & 0x2000) != 0 + + ek.KeyLength = binary.BigEndian.Uint16(data[3:5]) + ek.ReplayCounter = binary.BigEndian.Uint64(data[5:13]) + + ek.Nonce = data[13:45] + ek.IV = data[45:61] + ek.RSC = binary.BigEndian.Uint64(data[61:69]) + ek.ID = binary.BigEndian.Uint64(data[69:77]) + ek.MIC = data[77:93] + + ek.KeyDataLength = binary.BigEndian.Uint16(data[93:95]) + + totalLength := eapolKeyFrameLen + int(ek.KeyDataLength) + if len(data) < totalLength { + df.SetTruncated() + return fmt.Errorf("EAPOLKey data length %d too short, %d required", + len(data)-eapolKeyFrameLen, ek.KeyDataLength) + } + + if ek.HasEncryptedKeyData { + ek.EncryptedKeyData = data[eapolKeyFrameLen:totalLength] + ek.BaseLayer = BaseLayer{ + Contents: data[:totalLength], + Payload: data[totalLength:], + } + } else { + ek.BaseLayer = BaseLayer{ + Contents: data[:eapolKeyFrameLen], + Payload: data[eapolKeyFrameLen:], + } + } + + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (ek *EAPOLKey) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf, err := b.PrependBytes(eapolKeyFrameLen + len(ek.EncryptedKeyData)) + if err != nil { + return err + } + + buf[0] = byte(ek.KeyDescriptorType) + + var info uint16 + info |= uint16(ek.KeyDescriptorVersion) + info |= uint16(ek.KeyType) << 3 + info |= uint16(ek.KeyIndex) << 4 + if ek.Install { + info |= 0x0040 + } + if ek.KeyACK { + info |= 0x0080 + } + if ek.KeyMIC { + info |= 0x0100 + } + if ek.Secure { + info |= 0x0200 + } + if ek.MICError { + info |= 0x0400 + } + if ek.Request { + info |= 0x0800 + } + if ek.HasEncryptedKeyData { + info |= 0x1000 + } + if ek.SMKMessage { + info |= 0x2000 + } + binary.BigEndian.PutUint16(buf[1:3], info) + + binary.BigEndian.PutUint16(buf[3:5], ek.KeyLength) + binary.BigEndian.PutUint64(buf[5:13], ek.ReplayCounter) + + copy(buf[13:45], ek.Nonce) + copy(buf[45:61], ek.IV) + binary.BigEndian.PutUint64(buf[61:69], ek.RSC) + binary.BigEndian.PutUint64(buf[69:77], ek.ID) + copy(buf[77:93], ek.MIC) + + binary.BigEndian.PutUint16(buf[93:95], ek.KeyDataLength) + if len(ek.EncryptedKeyData) > 0 { + copy(buf[95:95+len(ek.EncryptedKeyData)], ek.EncryptedKeyData) + } + + return nil +} + +func decodeEAPOLKey(data []byte, p gopacket.PacketBuilder) error { + ek := &EAPOLKey{} + return decodingLayerDecoder(ek, data, p) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/endpoints.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/endpoints.go new file mode 100644 index 00000000..4c91cc33 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/endpoints.go @@ -0,0 +1,97 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "github.com/google/gopacket" + "net" + "strconv" +) + +var ( + // We use two different endpoint types for IPv4 vs IPv6 addresses, so that + // ordering with endpointA.LessThan(endpointB) sanely groups all IPv4 + // addresses and all IPv6 addresses, such that IPv6 > IPv4 for all addresses. + EndpointIPv4 = gopacket.RegisterEndpointType(1, gopacket.EndpointTypeMetadata{Name: "IPv4", Formatter: func(b []byte) string { + return net.IP(b).String() + }}) + EndpointIPv6 = gopacket.RegisterEndpointType(2, gopacket.EndpointTypeMetadata{Name: "IPv6", Formatter: func(b []byte) string { + return net.IP(b).String() + }}) + + EndpointMAC = gopacket.RegisterEndpointType(3, gopacket.EndpointTypeMetadata{Name: "MAC", Formatter: func(b []byte) string { + return net.HardwareAddr(b).String() + }}) + EndpointTCPPort = gopacket.RegisterEndpointType(4, gopacket.EndpointTypeMetadata{Name: "TCP", Formatter: func(b []byte) string { + return strconv.Itoa(int(binary.BigEndian.Uint16(b))) + }}) + EndpointUDPPort = gopacket.RegisterEndpointType(5, gopacket.EndpointTypeMetadata{Name: "UDP", Formatter: func(b []byte) string { + return strconv.Itoa(int(binary.BigEndian.Uint16(b))) + }}) + EndpointSCTPPort = gopacket.RegisterEndpointType(6, gopacket.EndpointTypeMetadata{Name: "SCTP", Formatter: func(b []byte) string { + return strconv.Itoa(int(binary.BigEndian.Uint16(b))) + }}) + EndpointRUDPPort = gopacket.RegisterEndpointType(7, gopacket.EndpointTypeMetadata{Name: "RUDP", Formatter: func(b []byte) string { + return strconv.Itoa(int(b[0])) + }}) + EndpointUDPLitePort = gopacket.RegisterEndpointType(8, gopacket.EndpointTypeMetadata{Name: "UDPLite", Formatter: func(b []byte) string { + return strconv.Itoa(int(binary.BigEndian.Uint16(b))) + }}) + EndpointPPP = gopacket.RegisterEndpointType(9, gopacket.EndpointTypeMetadata{Name: "PPP", Formatter: func([]byte) string { + return "point" + }}) +) + +// NewIPEndpoint creates a new IP (v4 or v6) endpoint from a net.IP address. +// It returns gopacket.InvalidEndpoint if the IP address is invalid. +func NewIPEndpoint(a net.IP) gopacket.Endpoint { + ipv4 := a.To4() + if ipv4 != nil { + return gopacket.NewEndpoint(EndpointIPv4, []byte(ipv4)) + } + + ipv6 := a.To16() + if ipv6 != nil { + return gopacket.NewEndpoint(EndpointIPv6, []byte(ipv6)) + } + + return gopacket.InvalidEndpoint +} + +// NewMACEndpoint returns a new MAC address endpoint. +func NewMACEndpoint(a net.HardwareAddr) gopacket.Endpoint { + return gopacket.NewEndpoint(EndpointMAC, []byte(a)) +} +func newPortEndpoint(t gopacket.EndpointType, p uint16) gopacket.Endpoint { + return gopacket.NewEndpoint(t, []byte{byte(p >> 8), byte(p)}) +} + +// NewTCPPortEndpoint returns an endpoint based on a TCP port. +func NewTCPPortEndpoint(p TCPPort) gopacket.Endpoint { + return newPortEndpoint(EndpointTCPPort, uint16(p)) +} + +// NewUDPPortEndpoint returns an endpoint based on a UDP port. +func NewUDPPortEndpoint(p UDPPort) gopacket.Endpoint { + return newPortEndpoint(EndpointUDPPort, uint16(p)) +} + +// NewSCTPPortEndpoint returns an endpoint based on a SCTP port. +func NewSCTPPortEndpoint(p SCTPPort) gopacket.Endpoint { + return newPortEndpoint(EndpointSCTPPort, uint16(p)) +} + +// NewRUDPPortEndpoint returns an endpoint based on a RUDP port. +func NewRUDPPortEndpoint(p RUDPPort) gopacket.Endpoint { + return gopacket.NewEndpoint(EndpointRUDPPort, []byte{byte(p)}) +} + +// NewUDPLitePortEndpoint returns an endpoint based on a UDPLite port. +func NewUDPLitePortEndpoint(p UDPLitePort) gopacket.Endpoint { + return newPortEndpoint(EndpointUDPLitePort, uint16(p)) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/enums.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/enums.go new file mode 100644 index 00000000..d9955dca --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/enums.go @@ -0,0 +1,450 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "errors" + "fmt" + "runtime" + + "github.com/google/gopacket" +) + +// EnumMetadata keeps track of a set of metadata for each enumeration value +// for protocol enumerations. +type EnumMetadata struct { + // DecodeWith is the decoder to use to decode this protocol's data. + DecodeWith gopacket.Decoder + // Name is the name of the enumeration value. + Name string + // LayerType is the layer type implied by the given enum. + LayerType gopacket.LayerType +} + +// errorFunc returns a decoder that spits out a specific error message. +func errorFunc(msg string) gopacket.Decoder { + var e = errors.New(msg) + return gopacket.DecodeFunc(func([]byte, gopacket.PacketBuilder) error { + return e + }) +} + +// EthernetType is an enumeration of ethernet type values, and acts as a decoder +// for any type it supports. +type EthernetType uint16 + +const ( + // EthernetTypeLLC is not an actual ethernet type. It is instead a + // placeholder we use in Ethernet frames that use the 802.3 standard of + // srcmac|dstmac|length|LLC instead of srcmac|dstmac|ethertype. + EthernetTypeLLC EthernetType = 0 + EthernetTypeIPv4 EthernetType = 0x0800 + EthernetTypeARP EthernetType = 0x0806 + EthernetTypeIPv6 EthernetType = 0x86DD + EthernetTypeCiscoDiscovery EthernetType = 0x2000 + EthernetTypeNortelDiscovery EthernetType = 0x01a2 + EthernetTypeTransparentEthernetBridging EthernetType = 0x6558 + EthernetTypeDot1Q EthernetType = 0x8100 + EthernetTypePPP EthernetType = 0x880b + EthernetTypePPPoEDiscovery EthernetType = 0x8863 + EthernetTypePPPoESession EthernetType = 0x8864 + EthernetTypeMPLSUnicast EthernetType = 0x8847 + EthernetTypeMPLSMulticast EthernetType = 0x8848 + EthernetTypeEAPOL EthernetType = 0x888e + EthernetTypeQinQ EthernetType = 0x88a8 + EthernetTypeLinkLayerDiscovery EthernetType = 0x88cc + EthernetTypeEthernetCTP EthernetType = 0x9000 +) + +// IPProtocol is an enumeration of IP protocol values, and acts as a decoder +// for any type it supports. +type IPProtocol uint8 + +const ( + IPProtocolIPv6HopByHop IPProtocol = 0 + IPProtocolICMPv4 IPProtocol = 1 + IPProtocolIGMP IPProtocol = 2 + IPProtocolIPv4 IPProtocol = 4 + IPProtocolTCP IPProtocol = 6 + IPProtocolUDP IPProtocol = 17 + IPProtocolRUDP IPProtocol = 27 + IPProtocolIPv6 IPProtocol = 41 + IPProtocolIPv6Routing IPProtocol = 43 + IPProtocolIPv6Fragment IPProtocol = 44 + IPProtocolGRE IPProtocol = 47 + IPProtocolESP IPProtocol = 50 + IPProtocolAH IPProtocol = 51 + IPProtocolICMPv6 IPProtocol = 58 + IPProtocolNoNextHeader IPProtocol = 59 + IPProtocolIPv6Destination IPProtocol = 60 + IPProtocolOSPF IPProtocol = 89 + IPProtocolIPIP IPProtocol = 94 + IPProtocolEtherIP IPProtocol = 97 + IPProtocolVRRP IPProtocol = 112 + IPProtocolSCTP IPProtocol = 132 + IPProtocolUDPLite IPProtocol = 136 + IPProtocolMPLSInIP IPProtocol = 137 +) + +// LinkType is an enumeration of link types, and acts as a decoder for any +// link type it supports. +type LinkType uint8 + +const ( + // According to pcap-linktype(7) and http://www.tcpdump.org/linktypes.html + LinkTypeNull LinkType = 0 + LinkTypeEthernet LinkType = 1 + LinkTypeAX25 LinkType = 3 + LinkTypeTokenRing LinkType = 6 + LinkTypeArcNet LinkType = 7 + LinkTypeSLIP LinkType = 8 + LinkTypePPP LinkType = 9 + LinkTypeFDDI LinkType = 10 + LinkTypePPP_HDLC LinkType = 50 + LinkTypePPPEthernet LinkType = 51 + LinkTypeATM_RFC1483 LinkType = 100 + LinkTypeRaw LinkType = 101 + LinkTypeC_HDLC LinkType = 104 + LinkTypeIEEE802_11 LinkType = 105 + LinkTypeFRelay LinkType = 107 + LinkTypeLoop LinkType = 108 + LinkTypeLinuxSLL LinkType = 113 + LinkTypeLTalk LinkType = 114 + LinkTypePFLog LinkType = 117 + LinkTypePrismHeader LinkType = 119 + LinkTypeIPOverFC LinkType = 122 + LinkTypeSunATM LinkType = 123 + LinkTypeIEEE80211Radio LinkType = 127 + LinkTypeARCNetLinux LinkType = 129 + LinkTypeIPOver1394 LinkType = 138 + LinkTypeMTP2Phdr LinkType = 139 + LinkTypeMTP2 LinkType = 140 + LinkTypeMTP3 LinkType = 141 + LinkTypeSCCP LinkType = 142 + LinkTypeDOCSIS LinkType = 143 + LinkTypeLinuxIRDA LinkType = 144 + LinkTypeLinuxLAPD LinkType = 177 + LinkTypeLinuxUSB LinkType = 220 + LinkTypeFC2 LinkType = 224 + LinkTypeFC2Framed LinkType = 225 + LinkTypeIPv4 LinkType = 228 + LinkTypeIPv6 LinkType = 229 +) + +// PPPoECode is the PPPoE code enum, taken from http://tools.ietf.org/html/rfc2516 +type PPPoECode uint8 + +const ( + PPPoECodePADI PPPoECode = 0x09 + PPPoECodePADO PPPoECode = 0x07 + PPPoECodePADR PPPoECode = 0x19 + PPPoECodePADS PPPoECode = 0x65 + PPPoECodePADT PPPoECode = 0xA7 + PPPoECodeSession PPPoECode = 0x00 +) + +// PPPType is an enumeration of PPP type values, and acts as a decoder for any +// type it supports. +type PPPType uint16 + +const ( + PPPTypeIPv4 PPPType = 0x0021 + PPPTypeIPv6 PPPType = 0x0057 + PPPTypeMPLSUnicast PPPType = 0x0281 + PPPTypeMPLSMulticast PPPType = 0x0283 +) + +// SCTPChunkType is an enumeration of chunk types inside SCTP packets. +type SCTPChunkType uint8 + +const ( + SCTPChunkTypeData SCTPChunkType = 0 + SCTPChunkTypeInit SCTPChunkType = 1 + SCTPChunkTypeInitAck SCTPChunkType = 2 + SCTPChunkTypeSack SCTPChunkType = 3 + SCTPChunkTypeHeartbeat SCTPChunkType = 4 + SCTPChunkTypeHeartbeatAck SCTPChunkType = 5 + SCTPChunkTypeAbort SCTPChunkType = 6 + SCTPChunkTypeShutdown SCTPChunkType = 7 + SCTPChunkTypeShutdownAck SCTPChunkType = 8 + SCTPChunkTypeError SCTPChunkType = 9 + SCTPChunkTypeCookieEcho SCTPChunkType = 10 + SCTPChunkTypeCookieAck SCTPChunkType = 11 + SCTPChunkTypeShutdownComplete SCTPChunkType = 14 +) + +// FDDIFrameControl is an enumeration of FDDI frame control bytes. +type FDDIFrameControl uint8 + +const ( + FDDIFrameControlLLC FDDIFrameControl = 0x50 +) + +// EAPOLType is an enumeration of EAPOL packet types. +type EAPOLType uint8 + +const ( + EAPOLTypeEAP EAPOLType = 0 + EAPOLTypeStart EAPOLType = 1 + EAPOLTypeLogOff EAPOLType = 2 + EAPOLTypeKey EAPOLType = 3 + EAPOLTypeASFAlert EAPOLType = 4 +) + +// ProtocolFamily is the set of values defined as PF_* in sys/socket.h +type ProtocolFamily uint8 + +const ( + ProtocolFamilyIPv4 ProtocolFamily = 2 + // BSDs use different values for INET6... glory be. These values taken from + // tcpdump 4.3.0. + ProtocolFamilyIPv6BSD ProtocolFamily = 24 + ProtocolFamilyIPv6FreeBSD ProtocolFamily = 28 + ProtocolFamilyIPv6Darwin ProtocolFamily = 30 + ProtocolFamilyIPv6Linux ProtocolFamily = 10 +) + +// Dot11Type is a combination of IEEE 802.11 frame's Type and Subtype fields. +// By combining these two fields together into a single type, we're able to +// provide a String function that correctly displays the subtype given the +// top-level type. +// +// If you just care about the top-level type, use the MainType function. +type Dot11Type uint8 + +// MainType strips the subtype information from the given type, +// returning just the overarching type (Mgmt, Ctrl, Data, Reserved). +func (d Dot11Type) MainType() Dot11Type { + return d & dot11TypeMask +} + +func (d Dot11Type) QOS() bool { + return d&dot11QOSMask == Dot11TypeDataQOSData +} + +const ( + Dot11TypeMgmt Dot11Type = 0x00 + Dot11TypeCtrl Dot11Type = 0x01 + Dot11TypeData Dot11Type = 0x02 + Dot11TypeReserved Dot11Type = 0x03 + dot11TypeMask = 0x03 + dot11QOSMask = 0x23 + + // The following are type/subtype conglomerations. + + // Management + Dot11TypeMgmtAssociationReq Dot11Type = 0x00 + Dot11TypeMgmtAssociationResp Dot11Type = 0x04 + Dot11TypeMgmtReassociationReq Dot11Type = 0x08 + Dot11TypeMgmtReassociationResp Dot11Type = 0x0c + Dot11TypeMgmtProbeReq Dot11Type = 0x10 + Dot11TypeMgmtProbeResp Dot11Type = 0x14 + Dot11TypeMgmtMeasurementPilot Dot11Type = 0x18 + Dot11TypeMgmtBeacon Dot11Type = 0x20 + Dot11TypeMgmtATIM Dot11Type = 0x24 + Dot11TypeMgmtDisassociation Dot11Type = 0x28 + Dot11TypeMgmtAuthentication Dot11Type = 0x2c + Dot11TypeMgmtDeauthentication Dot11Type = 0x30 + Dot11TypeMgmtAction Dot11Type = 0x34 + Dot11TypeMgmtActionNoAck Dot11Type = 0x38 + + // Control + Dot11TypeCtrlWrapper Dot11Type = 0x1d + Dot11TypeCtrlBlockAckReq Dot11Type = 0x21 + Dot11TypeCtrlBlockAck Dot11Type = 0x25 + Dot11TypeCtrlPowersavePoll Dot11Type = 0x29 + Dot11TypeCtrlRTS Dot11Type = 0x2d + Dot11TypeCtrlCTS Dot11Type = 0x31 + Dot11TypeCtrlAck Dot11Type = 0x35 + Dot11TypeCtrlCFEnd Dot11Type = 0x39 + Dot11TypeCtrlCFEndAck Dot11Type = 0x3d + + // Data + Dot11TypeDataCFAck Dot11Type = 0x06 + Dot11TypeDataCFPoll Dot11Type = 0x0a + Dot11TypeDataCFAckPoll Dot11Type = 0x0e + Dot11TypeDataNull Dot11Type = 0x12 + Dot11TypeDataCFAckNoData Dot11Type = 0x16 + Dot11TypeDataCFPollNoData Dot11Type = 0x1a + Dot11TypeDataCFAckPollNoData Dot11Type = 0x1e + Dot11TypeDataQOSData Dot11Type = 0x22 + Dot11TypeDataQOSDataCFAck Dot11Type = 0x26 + Dot11TypeDataQOSDataCFPoll Dot11Type = 0x2a + Dot11TypeDataQOSDataCFAckPoll Dot11Type = 0x2e + Dot11TypeDataQOSNull Dot11Type = 0x32 + Dot11TypeDataQOSCFPollNoData Dot11Type = 0x3a + Dot11TypeDataQOSCFAckPollNoData Dot11Type = 0x3e +) + +// Decode a raw v4 or v6 IP packet. +func decodeIPv4or6(data []byte, p gopacket.PacketBuilder) error { + version := data[0] >> 4 + switch version { + case 4: + return decodeIPv4(data, p) + case 6: + return decodeIPv6(data, p) + } + return fmt.Errorf("Invalid IP packet version %v", version) +} + +func initActualTypeData() { + // Each of the XXXTypeMetadata arrays contains mappings of how to handle enum + // values for various enum types in gopacket/layers. + // These arrays are actually created by gen2.go and stored in + // enums_generated.go. + // + // So, EthernetTypeMetadata[2] contains information on how to handle EthernetType + // 2, including which name to give it and which decoder to use to decode + // packet data of that type. These arrays are filled by default with all of the + // protocols gopacket/layers knows how to handle, but users of the library can + // add new decoders or override existing ones. For example, if you write a better + // TCP decoder, you can override IPProtocolMetadata[IPProtocolTCP].DecodeWith + // with your new decoder, and all gopacket/layers decoding will use your new + // decoder whenever they encounter that IPProtocol. + + // Here we link up all enumerations with their respective names and decoders. + EthernetTypeMetadata[EthernetTypeLLC] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLLC), Name: "LLC", LayerType: LayerTypeLLC} + EthernetTypeMetadata[EthernetTypeIPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4", LayerType: LayerTypeIPv4} + EthernetTypeMetadata[EthernetTypeIPv6] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6} + EthernetTypeMetadata[EthernetTypeARP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeARP), Name: "ARP", LayerType: LayerTypeARP} + EthernetTypeMetadata[EthernetTypeDot1Q] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot1Q), Name: "Dot1Q", LayerType: LayerTypeDot1Q} + EthernetTypeMetadata[EthernetTypePPP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPP), Name: "PPP", LayerType: LayerTypePPP} + EthernetTypeMetadata[EthernetTypePPPoEDiscovery] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPPoE), Name: "PPPoEDiscovery", LayerType: LayerTypePPPoE} + EthernetTypeMetadata[EthernetTypePPPoESession] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPPoE), Name: "PPPoESession", LayerType: LayerTypePPPoE} + EthernetTypeMetadata[EthernetTypeEthernetCTP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEthernetCTP), Name: "EthernetCTP", LayerType: LayerTypeEthernetCTP} + EthernetTypeMetadata[EthernetTypeCiscoDiscovery] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeCiscoDiscovery), Name: "CiscoDiscovery", LayerType: LayerTypeCiscoDiscovery} + EthernetTypeMetadata[EthernetTypeNortelDiscovery] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeNortelDiscovery), Name: "NortelDiscovery", LayerType: LayerTypeNortelDiscovery} + EthernetTypeMetadata[EthernetTypeLinkLayerDiscovery] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLinkLayerDiscovery), Name: "LinkLayerDiscovery", LayerType: LayerTypeLinkLayerDiscovery} + EthernetTypeMetadata[EthernetTypeMPLSUnicast] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLSUnicast", LayerType: LayerTypeMPLS} + EthernetTypeMetadata[EthernetTypeMPLSMulticast] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLSMulticast", LayerType: LayerTypeMPLS} + EthernetTypeMetadata[EthernetTypeEAPOL] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEAPOL), Name: "EAPOL", LayerType: LayerTypeEAPOL} + EthernetTypeMetadata[EthernetTypeQinQ] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot1Q), Name: "Dot1Q", LayerType: LayerTypeDot1Q} + EthernetTypeMetadata[EthernetTypeTransparentEthernetBridging] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEthernet), Name: "TransparentEthernetBridging", LayerType: LayerTypeEthernet} + + IPProtocolMetadata[IPProtocolIPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4", LayerType: LayerTypeIPv4} + IPProtocolMetadata[IPProtocolTCP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeTCP), Name: "TCP", LayerType: LayerTypeTCP} + IPProtocolMetadata[IPProtocolUDP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUDP), Name: "UDP", LayerType: LayerTypeUDP} + IPProtocolMetadata[IPProtocolICMPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeICMPv4), Name: "ICMPv4", LayerType: LayerTypeICMPv4} + IPProtocolMetadata[IPProtocolICMPv6] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeICMPv6), Name: "ICMPv6", LayerType: LayerTypeICMPv6} + IPProtocolMetadata[IPProtocolSCTP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTP), Name: "SCTP", LayerType: LayerTypeSCTP} + IPProtocolMetadata[IPProtocolIPv6] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6} + IPProtocolMetadata[IPProtocolIPIP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4", LayerType: LayerTypeIPv4} + IPProtocolMetadata[IPProtocolEtherIP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEtherIP), Name: "EtherIP", LayerType: LayerTypeEtherIP} + IPProtocolMetadata[IPProtocolRUDP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeRUDP), Name: "RUDP", LayerType: LayerTypeRUDP} + IPProtocolMetadata[IPProtocolGRE] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeGRE), Name: "GRE", LayerType: LayerTypeGRE} + IPProtocolMetadata[IPProtocolIPv6HopByHop] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6HopByHop), Name: "IPv6HopByHop", LayerType: LayerTypeIPv6HopByHop} + IPProtocolMetadata[IPProtocolIPv6Routing] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6Routing), Name: "IPv6Routing", LayerType: LayerTypeIPv6Routing} + IPProtocolMetadata[IPProtocolIPv6Fragment] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6Fragment), Name: "IPv6Fragment", LayerType: LayerTypeIPv6Fragment} + IPProtocolMetadata[IPProtocolIPv6Destination] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6Destination), Name: "IPv6Destination", LayerType: LayerTypeIPv6Destination} + IPProtocolMetadata[IPProtocolOSPF] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeOSPF), Name: "OSPF", LayerType: LayerTypeOSPF} + IPProtocolMetadata[IPProtocolAH] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPSecAH), Name: "IPSecAH", LayerType: LayerTypeIPSecAH} + IPProtocolMetadata[IPProtocolESP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPSecESP), Name: "IPSecESP", LayerType: LayerTypeIPSecESP} + IPProtocolMetadata[IPProtocolUDPLite] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUDPLite), Name: "UDPLite", LayerType: LayerTypeUDPLite} + IPProtocolMetadata[IPProtocolMPLSInIP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLS", LayerType: LayerTypeMPLS} + IPProtocolMetadata[IPProtocolNoNextHeader] = EnumMetadata{DecodeWith: gopacket.DecodePayload, Name: "NoNextHeader", LayerType: gopacket.LayerTypePayload} + IPProtocolMetadata[IPProtocolIGMP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIGMP), Name: "IGMP", LayerType: LayerTypeIGMP} + IPProtocolMetadata[IPProtocolVRRP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeVRRP), Name: "VRRP", LayerType: LayerTypeVRRP} + + SCTPChunkTypeMetadata[SCTPChunkTypeData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPData), Name: "Data"} + SCTPChunkTypeMetadata[SCTPChunkTypeInit] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPInit), Name: "Init"} + SCTPChunkTypeMetadata[SCTPChunkTypeInitAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPInit), Name: "InitAck"} + SCTPChunkTypeMetadata[SCTPChunkTypeSack] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPSack), Name: "Sack"} + SCTPChunkTypeMetadata[SCTPChunkTypeHeartbeat] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPHeartbeat), Name: "Heartbeat"} + SCTPChunkTypeMetadata[SCTPChunkTypeHeartbeatAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPHeartbeat), Name: "HeartbeatAck"} + SCTPChunkTypeMetadata[SCTPChunkTypeAbort] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPError), Name: "Abort"} + SCTPChunkTypeMetadata[SCTPChunkTypeError] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPError), Name: "Error"} + SCTPChunkTypeMetadata[SCTPChunkTypeShutdown] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPShutdown), Name: "Shutdown"} + SCTPChunkTypeMetadata[SCTPChunkTypeShutdownAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPShutdownAck), Name: "ShutdownAck"} + SCTPChunkTypeMetadata[SCTPChunkTypeCookieEcho] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPCookieEcho), Name: "CookieEcho"} + SCTPChunkTypeMetadata[SCTPChunkTypeCookieAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPEmptyLayer), Name: "CookieAck"} + SCTPChunkTypeMetadata[SCTPChunkTypeShutdownComplete] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPEmptyLayer), Name: "ShutdownComplete"} + + PPPTypeMetadata[PPPTypeIPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4"} + PPPTypeMetadata[PPPTypeIPv6] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6"} + PPPTypeMetadata[PPPTypeMPLSUnicast] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLSUnicast"} + PPPTypeMetadata[PPPTypeMPLSMulticast] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLSMulticast"} + + PPPoECodeMetadata[PPPoECodeSession] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPP), Name: "PPP"} + + LinkTypeMetadata[LinkTypeEthernet] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEthernet), Name: "Ethernet"} + LinkTypeMetadata[LinkTypePPP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPP), Name: "PPP"} + LinkTypeMetadata[LinkTypeFDDI] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeFDDI), Name: "FDDI"} + LinkTypeMetadata[LinkTypeNull] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLoopback), Name: "Null"} + LinkTypeMetadata[LinkTypeIEEE802_11] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11), Name: "Dot11"} + LinkTypeMetadata[LinkTypeLoop] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLoopback), Name: "Loop"} + LinkTypeMetadata[LinkTypeIEEE802_11] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11), Name: "802.11"} + LinkTypeMetadata[LinkTypeRaw] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4or6), Name: "Raw"} + // See https://github.com/the-tcpdump-group/libpcap/blob/170f717e6e818cdc4bcbbfd906b63088eaa88fa0/pcap/dlt.h#L85 + // Or https://github.com/wireshark/wireshark/blob/854cfe53efe44080609c78053ecfb2342ad84a08/wiretap/pcap-common.c#L508 + if runtime.GOOS == "openbsd" { + LinkTypeMetadata[14] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4or6), Name: "Raw"} + } else { + LinkTypeMetadata[12] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4or6), Name: "Raw"} + } + LinkTypeMetadata[LinkTypePFLog] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePFLog), Name: "PFLog"} + LinkTypeMetadata[LinkTypeIEEE80211Radio] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeRadioTap), Name: "RadioTap"} + LinkTypeMetadata[LinkTypeLinuxUSB] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUSB), Name: "USB"} + LinkTypeMetadata[LinkTypeLinuxSLL] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLinuxSLL), Name: "Linux SLL"} + LinkTypeMetadata[LinkTypePrismHeader] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePrismHeader), Name: "Prism"} + + FDDIFrameControlMetadata[FDDIFrameControlLLC] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLLC), Name: "LLC"} + + EAPOLTypeMetadata[EAPOLTypeEAP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEAP), Name: "EAP", LayerType: LayerTypeEAP} + EAPOLTypeMetadata[EAPOLTypeKey] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEAPOLKey), Name: "EAPOLKey", LayerType: LayerTypeEAPOLKey} + + ProtocolFamilyMetadata[ProtocolFamilyIPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4", LayerType: LayerTypeIPv4} + ProtocolFamilyMetadata[ProtocolFamilyIPv6BSD] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6} + ProtocolFamilyMetadata[ProtocolFamilyIPv6FreeBSD] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6} + ProtocolFamilyMetadata[ProtocolFamilyIPv6Darwin] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6} + ProtocolFamilyMetadata[ProtocolFamilyIPv6Linux] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6} + + Dot11TypeMetadata[Dot11TypeMgmtAssociationReq] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtAssociationReq), Name: "MgmtAssociationReq", LayerType: LayerTypeDot11MgmtAssociationReq} + Dot11TypeMetadata[Dot11TypeMgmtAssociationResp] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtAssociationResp), Name: "MgmtAssociationResp", LayerType: LayerTypeDot11MgmtAssociationResp} + Dot11TypeMetadata[Dot11TypeMgmtReassociationReq] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtReassociationReq), Name: "MgmtReassociationReq", LayerType: LayerTypeDot11MgmtReassociationReq} + Dot11TypeMetadata[Dot11TypeMgmtReassociationResp] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtReassociationResp), Name: "MgmtReassociationResp", LayerType: LayerTypeDot11MgmtReassociationResp} + Dot11TypeMetadata[Dot11TypeMgmtProbeReq] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtProbeReq), Name: "MgmtProbeReq", LayerType: LayerTypeDot11MgmtProbeReq} + Dot11TypeMetadata[Dot11TypeMgmtProbeResp] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtProbeResp), Name: "MgmtProbeResp", LayerType: LayerTypeDot11MgmtProbeResp} + Dot11TypeMetadata[Dot11TypeMgmtMeasurementPilot] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtMeasurementPilot), Name: "MgmtMeasurementPilot", LayerType: LayerTypeDot11MgmtMeasurementPilot} + Dot11TypeMetadata[Dot11TypeMgmtBeacon] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtBeacon), Name: "MgmtBeacon", LayerType: LayerTypeDot11MgmtBeacon} + Dot11TypeMetadata[Dot11TypeMgmtATIM] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtATIM), Name: "MgmtATIM", LayerType: LayerTypeDot11MgmtATIM} + Dot11TypeMetadata[Dot11TypeMgmtDisassociation] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtDisassociation), Name: "MgmtDisassociation", LayerType: LayerTypeDot11MgmtDisassociation} + Dot11TypeMetadata[Dot11TypeMgmtAuthentication] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtAuthentication), Name: "MgmtAuthentication", LayerType: LayerTypeDot11MgmtAuthentication} + Dot11TypeMetadata[Dot11TypeMgmtDeauthentication] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtDeauthentication), Name: "MgmtDeauthentication", LayerType: LayerTypeDot11MgmtDeauthentication} + Dot11TypeMetadata[Dot11TypeMgmtAction] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtAction), Name: "MgmtAction", LayerType: LayerTypeDot11MgmtAction} + Dot11TypeMetadata[Dot11TypeMgmtActionNoAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtActionNoAck), Name: "MgmtActionNoAck", LayerType: LayerTypeDot11MgmtActionNoAck} + Dot11TypeMetadata[Dot11TypeCtrl] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11Ctrl), Name: "Ctrl", LayerType: LayerTypeDot11Ctrl} + Dot11TypeMetadata[Dot11TypeCtrlWrapper] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11Ctrl), Name: "CtrlWrapper", LayerType: LayerTypeDot11Ctrl} + Dot11TypeMetadata[Dot11TypeCtrlBlockAckReq] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlBlockAckReq), Name: "CtrlBlockAckReq", LayerType: LayerTypeDot11CtrlBlockAckReq} + Dot11TypeMetadata[Dot11TypeCtrlBlockAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlBlockAck), Name: "CtrlBlockAck", LayerType: LayerTypeDot11CtrlBlockAck} + Dot11TypeMetadata[Dot11TypeCtrlPowersavePoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlPowersavePoll), Name: "CtrlPowersavePoll", LayerType: LayerTypeDot11CtrlPowersavePoll} + Dot11TypeMetadata[Dot11TypeCtrlRTS] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlRTS), Name: "CtrlRTS", LayerType: LayerTypeDot11CtrlRTS} + Dot11TypeMetadata[Dot11TypeCtrlCTS] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlCTS), Name: "CtrlCTS", LayerType: LayerTypeDot11CtrlCTS} + Dot11TypeMetadata[Dot11TypeCtrlAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlAck), Name: "CtrlAck", LayerType: LayerTypeDot11CtrlAck} + Dot11TypeMetadata[Dot11TypeCtrlCFEnd] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlCFEnd), Name: "CtrlCFEnd", LayerType: LayerTypeDot11CtrlCFEnd} + Dot11TypeMetadata[Dot11TypeCtrlCFEndAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlCFEndAck), Name: "CtrlCFEndAck", LayerType: LayerTypeDot11CtrlCFEndAck} + Dot11TypeMetadata[Dot11TypeData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11Data), Name: "Data", LayerType: LayerTypeDot11Data} + Dot11TypeMetadata[Dot11TypeDataCFAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFAck), Name: "DataCFAck", LayerType: LayerTypeDot11DataCFAck} + Dot11TypeMetadata[Dot11TypeDataCFPoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFPoll), Name: "DataCFPoll", LayerType: LayerTypeDot11DataCFPoll} + Dot11TypeMetadata[Dot11TypeDataCFAckPoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFAckPoll), Name: "DataCFAckPoll", LayerType: LayerTypeDot11DataCFAckPoll} + Dot11TypeMetadata[Dot11TypeDataNull] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataNull), Name: "DataNull", LayerType: LayerTypeDot11DataNull} + Dot11TypeMetadata[Dot11TypeDataCFAckNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFAckNoData), Name: "DataCFAckNoData", LayerType: LayerTypeDot11DataCFAckNoData} + Dot11TypeMetadata[Dot11TypeDataCFPollNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFPollNoData), Name: "DataCFPollNoData", LayerType: LayerTypeDot11DataCFPollNoData} + Dot11TypeMetadata[Dot11TypeDataCFAckPollNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFAckPollNoData), Name: "DataCFAckPollNoData", LayerType: LayerTypeDot11DataCFAckPollNoData} + Dot11TypeMetadata[Dot11TypeDataQOSData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSData), Name: "DataQOSData", LayerType: LayerTypeDot11DataQOSData} + Dot11TypeMetadata[Dot11TypeDataQOSDataCFAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSDataCFAck), Name: "DataQOSDataCFAck", LayerType: LayerTypeDot11DataQOSDataCFAck} + Dot11TypeMetadata[Dot11TypeDataQOSDataCFPoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSDataCFPoll), Name: "DataQOSDataCFPoll", LayerType: LayerTypeDot11DataQOSDataCFPoll} + Dot11TypeMetadata[Dot11TypeDataQOSDataCFAckPoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSDataCFAckPoll), Name: "DataQOSDataCFAckPoll", LayerType: LayerTypeDot11DataQOSDataCFAckPoll} + Dot11TypeMetadata[Dot11TypeDataQOSNull] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSNull), Name: "DataQOSNull", LayerType: LayerTypeDot11DataQOSNull} + Dot11TypeMetadata[Dot11TypeDataQOSCFPollNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSCFPollNoData), Name: "DataQOSCFPollNoData", LayerType: LayerTypeDot11DataQOSCFPollNoData} + Dot11TypeMetadata[Dot11TypeDataQOSCFAckPollNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSCFAckPollNoData), Name: "DataQOSCFAckPollNoData", LayerType: LayerTypeDot11DataQOSCFAckPollNoData} + + USBTransportTypeMetadata[USBTransportTypeInterrupt] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUSBInterrupt), Name: "Interrupt", LayerType: LayerTypeUSBInterrupt} + USBTransportTypeMetadata[USBTransportTypeControl] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUSBControl), Name: "Control", LayerType: LayerTypeUSBControl} + USBTransportTypeMetadata[USBTransportTypeBulk] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUSBBulk), Name: "Bulk", LayerType: LayerTypeUSBBulk} +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/enums_generated.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/enums_generated.go new file mode 100644 index 00000000..bf77aac5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/enums_generated.go @@ -0,0 +1,434 @@ +// Copyright 2012 Google, Inc. All rights reserved. + +package layers + +// Created by gen2.go, don't edit manually +// Generated at 2017-10-23 10:20:24.458771856 -0600 MDT m=+0.001159033 + +import ( + "fmt" + + "github.com/google/gopacket" +) + +func init() { + initUnknownTypesForLinkType() + initUnknownTypesForEthernetType() + initUnknownTypesForPPPType() + initUnknownTypesForIPProtocol() + initUnknownTypesForSCTPChunkType() + initUnknownTypesForPPPoECode() + initUnknownTypesForFDDIFrameControl() + initUnknownTypesForEAPOLType() + initUnknownTypesForProtocolFamily() + initUnknownTypesForDot11Type() + initUnknownTypesForUSBTransportType() + initActualTypeData() +} + +// Decoder calls LinkTypeMetadata.DecodeWith's decoder. +func (a LinkType) Decode(data []byte, p gopacket.PacketBuilder) error { + return LinkTypeMetadata[a].DecodeWith.Decode(data, p) +} + +// String returns LinkTypeMetadata.Name. +func (a LinkType) String() string { + return LinkTypeMetadata[a].Name +} + +// LayerType returns LinkTypeMetadata.LayerType. +func (a LinkType) LayerType() gopacket.LayerType { + return LinkTypeMetadata[a].LayerType +} + +type errorDecoderForLinkType int + +func (a *errorDecoderForLinkType) Decode(data []byte, p gopacket.PacketBuilder) error { + return a +} +func (a *errorDecoderForLinkType) Error() string { + return fmt.Sprintf("Unable to decode LinkType %d", int(*a)) +} + +var errorDecodersForLinkType [256]errorDecoderForLinkType +var LinkTypeMetadata [256]EnumMetadata + +func initUnknownTypesForLinkType() { + for i := 0; i < 256; i++ { + errorDecodersForLinkType[i] = errorDecoderForLinkType(i) + LinkTypeMetadata[i] = EnumMetadata{ + DecodeWith: &errorDecodersForLinkType[i], + Name: "UnknownLinkType", + } + } +} + +// Decoder calls EthernetTypeMetadata.DecodeWith's decoder. +func (a EthernetType) Decode(data []byte, p gopacket.PacketBuilder) error { + return EthernetTypeMetadata[a].DecodeWith.Decode(data, p) +} + +// String returns EthernetTypeMetadata.Name. +func (a EthernetType) String() string { + return EthernetTypeMetadata[a].Name +} + +// LayerType returns EthernetTypeMetadata.LayerType. +func (a EthernetType) LayerType() gopacket.LayerType { + return EthernetTypeMetadata[a].LayerType +} + +type errorDecoderForEthernetType int + +func (a *errorDecoderForEthernetType) Decode(data []byte, p gopacket.PacketBuilder) error { + return a +} +func (a *errorDecoderForEthernetType) Error() string { + return fmt.Sprintf("Unable to decode EthernetType %d", int(*a)) +} + +var errorDecodersForEthernetType [65536]errorDecoderForEthernetType +var EthernetTypeMetadata [65536]EnumMetadata + +func initUnknownTypesForEthernetType() { + for i := 0; i < 65536; i++ { + errorDecodersForEthernetType[i] = errorDecoderForEthernetType(i) + EthernetTypeMetadata[i] = EnumMetadata{ + DecodeWith: &errorDecodersForEthernetType[i], + Name: "UnknownEthernetType", + } + } +} + +// Decoder calls PPPTypeMetadata.DecodeWith's decoder. +func (a PPPType) Decode(data []byte, p gopacket.PacketBuilder) error { + return PPPTypeMetadata[a].DecodeWith.Decode(data, p) +} + +// String returns PPPTypeMetadata.Name. +func (a PPPType) String() string { + return PPPTypeMetadata[a].Name +} + +// LayerType returns PPPTypeMetadata.LayerType. +func (a PPPType) LayerType() gopacket.LayerType { + return PPPTypeMetadata[a].LayerType +} + +type errorDecoderForPPPType int + +func (a *errorDecoderForPPPType) Decode(data []byte, p gopacket.PacketBuilder) error { + return a +} +func (a *errorDecoderForPPPType) Error() string { + return fmt.Sprintf("Unable to decode PPPType %d", int(*a)) +} + +var errorDecodersForPPPType [65536]errorDecoderForPPPType +var PPPTypeMetadata [65536]EnumMetadata + +func initUnknownTypesForPPPType() { + for i := 0; i < 65536; i++ { + errorDecodersForPPPType[i] = errorDecoderForPPPType(i) + PPPTypeMetadata[i] = EnumMetadata{ + DecodeWith: &errorDecodersForPPPType[i], + Name: "UnknownPPPType", + } + } +} + +// Decoder calls IPProtocolMetadata.DecodeWith's decoder. +func (a IPProtocol) Decode(data []byte, p gopacket.PacketBuilder) error { + return IPProtocolMetadata[a].DecodeWith.Decode(data, p) +} + +// String returns IPProtocolMetadata.Name. +func (a IPProtocol) String() string { + return IPProtocolMetadata[a].Name +} + +// LayerType returns IPProtocolMetadata.LayerType. +func (a IPProtocol) LayerType() gopacket.LayerType { + return IPProtocolMetadata[a].LayerType +} + +type errorDecoderForIPProtocol int + +func (a *errorDecoderForIPProtocol) Decode(data []byte, p gopacket.PacketBuilder) error { + return a +} +func (a *errorDecoderForIPProtocol) Error() string { + return fmt.Sprintf("Unable to decode IPProtocol %d", int(*a)) +} + +var errorDecodersForIPProtocol [256]errorDecoderForIPProtocol +var IPProtocolMetadata [256]EnumMetadata + +func initUnknownTypesForIPProtocol() { + for i := 0; i < 256; i++ { + errorDecodersForIPProtocol[i] = errorDecoderForIPProtocol(i) + IPProtocolMetadata[i] = EnumMetadata{ + DecodeWith: &errorDecodersForIPProtocol[i], + Name: "UnknownIPProtocol", + } + } +} + +// Decoder calls SCTPChunkTypeMetadata.DecodeWith's decoder. +func (a SCTPChunkType) Decode(data []byte, p gopacket.PacketBuilder) error { + return SCTPChunkTypeMetadata[a].DecodeWith.Decode(data, p) +} + +// String returns SCTPChunkTypeMetadata.Name. +func (a SCTPChunkType) String() string { + return SCTPChunkTypeMetadata[a].Name +} + +// LayerType returns SCTPChunkTypeMetadata.LayerType. +func (a SCTPChunkType) LayerType() gopacket.LayerType { + return SCTPChunkTypeMetadata[a].LayerType +} + +type errorDecoderForSCTPChunkType int + +func (a *errorDecoderForSCTPChunkType) Decode(data []byte, p gopacket.PacketBuilder) error { + return a +} +func (a *errorDecoderForSCTPChunkType) Error() string { + return fmt.Sprintf("Unable to decode SCTPChunkType %d", int(*a)) +} + +var errorDecodersForSCTPChunkType [256]errorDecoderForSCTPChunkType +var SCTPChunkTypeMetadata [256]EnumMetadata + +func initUnknownTypesForSCTPChunkType() { + for i := 0; i < 256; i++ { + errorDecodersForSCTPChunkType[i] = errorDecoderForSCTPChunkType(i) + SCTPChunkTypeMetadata[i] = EnumMetadata{ + DecodeWith: &errorDecodersForSCTPChunkType[i], + Name: "UnknownSCTPChunkType", + } + } +} + +// Decoder calls PPPoECodeMetadata.DecodeWith's decoder. +func (a PPPoECode) Decode(data []byte, p gopacket.PacketBuilder) error { + return PPPoECodeMetadata[a].DecodeWith.Decode(data, p) +} + +// String returns PPPoECodeMetadata.Name. +func (a PPPoECode) String() string { + return PPPoECodeMetadata[a].Name +} + +// LayerType returns PPPoECodeMetadata.LayerType. +func (a PPPoECode) LayerType() gopacket.LayerType { + return PPPoECodeMetadata[a].LayerType +} + +type errorDecoderForPPPoECode int + +func (a *errorDecoderForPPPoECode) Decode(data []byte, p gopacket.PacketBuilder) error { + return a +} +func (a *errorDecoderForPPPoECode) Error() string { + return fmt.Sprintf("Unable to decode PPPoECode %d", int(*a)) +} + +var errorDecodersForPPPoECode [256]errorDecoderForPPPoECode +var PPPoECodeMetadata [256]EnumMetadata + +func initUnknownTypesForPPPoECode() { + for i := 0; i < 256; i++ { + errorDecodersForPPPoECode[i] = errorDecoderForPPPoECode(i) + PPPoECodeMetadata[i] = EnumMetadata{ + DecodeWith: &errorDecodersForPPPoECode[i], + Name: "UnknownPPPoECode", + } + } +} + +// Decoder calls FDDIFrameControlMetadata.DecodeWith's decoder. +func (a FDDIFrameControl) Decode(data []byte, p gopacket.PacketBuilder) error { + return FDDIFrameControlMetadata[a].DecodeWith.Decode(data, p) +} + +// String returns FDDIFrameControlMetadata.Name. +func (a FDDIFrameControl) String() string { + return FDDIFrameControlMetadata[a].Name +} + +// LayerType returns FDDIFrameControlMetadata.LayerType. +func (a FDDIFrameControl) LayerType() gopacket.LayerType { + return FDDIFrameControlMetadata[a].LayerType +} + +type errorDecoderForFDDIFrameControl int + +func (a *errorDecoderForFDDIFrameControl) Decode(data []byte, p gopacket.PacketBuilder) error { + return a +} +func (a *errorDecoderForFDDIFrameControl) Error() string { + return fmt.Sprintf("Unable to decode FDDIFrameControl %d", int(*a)) +} + +var errorDecodersForFDDIFrameControl [256]errorDecoderForFDDIFrameControl +var FDDIFrameControlMetadata [256]EnumMetadata + +func initUnknownTypesForFDDIFrameControl() { + for i := 0; i < 256; i++ { + errorDecodersForFDDIFrameControl[i] = errorDecoderForFDDIFrameControl(i) + FDDIFrameControlMetadata[i] = EnumMetadata{ + DecodeWith: &errorDecodersForFDDIFrameControl[i], + Name: "UnknownFDDIFrameControl", + } + } +} + +// Decoder calls EAPOLTypeMetadata.DecodeWith's decoder. +func (a EAPOLType) Decode(data []byte, p gopacket.PacketBuilder) error { + return EAPOLTypeMetadata[a].DecodeWith.Decode(data, p) +} + +// String returns EAPOLTypeMetadata.Name. +func (a EAPOLType) String() string { + return EAPOLTypeMetadata[a].Name +} + +// LayerType returns EAPOLTypeMetadata.LayerType. +func (a EAPOLType) LayerType() gopacket.LayerType { + return EAPOLTypeMetadata[a].LayerType +} + +type errorDecoderForEAPOLType int + +func (a *errorDecoderForEAPOLType) Decode(data []byte, p gopacket.PacketBuilder) error { + return a +} +func (a *errorDecoderForEAPOLType) Error() string { + return fmt.Sprintf("Unable to decode EAPOLType %d", int(*a)) +} + +var errorDecodersForEAPOLType [256]errorDecoderForEAPOLType +var EAPOLTypeMetadata [256]EnumMetadata + +func initUnknownTypesForEAPOLType() { + for i := 0; i < 256; i++ { + errorDecodersForEAPOLType[i] = errorDecoderForEAPOLType(i) + EAPOLTypeMetadata[i] = EnumMetadata{ + DecodeWith: &errorDecodersForEAPOLType[i], + Name: "UnknownEAPOLType", + } + } +} + +// Decoder calls ProtocolFamilyMetadata.DecodeWith's decoder. +func (a ProtocolFamily) Decode(data []byte, p gopacket.PacketBuilder) error { + return ProtocolFamilyMetadata[a].DecodeWith.Decode(data, p) +} + +// String returns ProtocolFamilyMetadata.Name. +func (a ProtocolFamily) String() string { + return ProtocolFamilyMetadata[a].Name +} + +// LayerType returns ProtocolFamilyMetadata.LayerType. +func (a ProtocolFamily) LayerType() gopacket.LayerType { + return ProtocolFamilyMetadata[a].LayerType +} + +type errorDecoderForProtocolFamily int + +func (a *errorDecoderForProtocolFamily) Decode(data []byte, p gopacket.PacketBuilder) error { + return a +} +func (a *errorDecoderForProtocolFamily) Error() string { + return fmt.Sprintf("Unable to decode ProtocolFamily %d", int(*a)) +} + +var errorDecodersForProtocolFamily [256]errorDecoderForProtocolFamily +var ProtocolFamilyMetadata [256]EnumMetadata + +func initUnknownTypesForProtocolFamily() { + for i := 0; i < 256; i++ { + errorDecodersForProtocolFamily[i] = errorDecoderForProtocolFamily(i) + ProtocolFamilyMetadata[i] = EnumMetadata{ + DecodeWith: &errorDecodersForProtocolFamily[i], + Name: "UnknownProtocolFamily", + } + } +} + +// Decoder calls Dot11TypeMetadata.DecodeWith's decoder. +func (a Dot11Type) Decode(data []byte, p gopacket.PacketBuilder) error { + return Dot11TypeMetadata[a].DecodeWith.Decode(data, p) +} + +// String returns Dot11TypeMetadata.Name. +func (a Dot11Type) String() string { + return Dot11TypeMetadata[a].Name +} + +// LayerType returns Dot11TypeMetadata.LayerType. +func (a Dot11Type) LayerType() gopacket.LayerType { + return Dot11TypeMetadata[a].LayerType +} + +type errorDecoderForDot11Type int + +func (a *errorDecoderForDot11Type) Decode(data []byte, p gopacket.PacketBuilder) error { + return a +} +func (a *errorDecoderForDot11Type) Error() string { + return fmt.Sprintf("Unable to decode Dot11Type %d", int(*a)) +} + +var errorDecodersForDot11Type [256]errorDecoderForDot11Type +var Dot11TypeMetadata [256]EnumMetadata + +func initUnknownTypesForDot11Type() { + for i := 0; i < 256; i++ { + errorDecodersForDot11Type[i] = errorDecoderForDot11Type(i) + Dot11TypeMetadata[i] = EnumMetadata{ + DecodeWith: &errorDecodersForDot11Type[i], + Name: "UnknownDot11Type", + } + } +} + +// Decoder calls USBTransportTypeMetadata.DecodeWith's decoder. +func (a USBTransportType) Decode(data []byte, p gopacket.PacketBuilder) error { + return USBTransportTypeMetadata[a].DecodeWith.Decode(data, p) +} + +// String returns USBTransportTypeMetadata.Name. +func (a USBTransportType) String() string { + return USBTransportTypeMetadata[a].Name +} + +// LayerType returns USBTransportTypeMetadata.LayerType. +func (a USBTransportType) LayerType() gopacket.LayerType { + return USBTransportTypeMetadata[a].LayerType +} + +type errorDecoderForUSBTransportType int + +func (a *errorDecoderForUSBTransportType) Decode(data []byte, p gopacket.PacketBuilder) error { + return a +} +func (a *errorDecoderForUSBTransportType) Error() string { + return fmt.Sprintf("Unable to decode USBTransportType %d", int(*a)) +} + +var errorDecodersForUSBTransportType [256]errorDecoderForUSBTransportType +var USBTransportTypeMetadata [256]EnumMetadata + +func initUnknownTypesForUSBTransportType() { + for i := 0; i < 256; i++ { + errorDecodersForUSBTransportType[i] = errorDecoderForUSBTransportType(i) + USBTransportTypeMetadata[i] = EnumMetadata{ + DecodeWith: &errorDecodersForUSBTransportType[i], + Name: "UnknownUSBTransportType", + } + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/etherip.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/etherip.go new file mode 100644 index 00000000..5b7b7229 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/etherip.go @@ -0,0 +1,45 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "github.com/google/gopacket" +) + +// EtherIP is the struct for storing RFC 3378 EtherIP packet headers. +type EtherIP struct { + BaseLayer + Version uint8 + Reserved uint16 +} + +// LayerType returns gopacket.LayerTypeEtherIP. +func (e *EtherIP) LayerType() gopacket.LayerType { return LayerTypeEtherIP } + +// DecodeFromBytes decodes the given bytes into this layer. +func (e *EtherIP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + e.Version = data[0] >> 4 + e.Reserved = binary.BigEndian.Uint16(data[:2]) & 0x0fff + e.BaseLayer = BaseLayer{data[:2], data[2:]} + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (e *EtherIP) CanDecode() gopacket.LayerClass { + return LayerTypeEtherIP +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (e *EtherIP) NextLayerType() gopacket.LayerType { + return LayerTypeEthernet +} + +func decodeEtherIP(data []byte, p gopacket.PacketBuilder) error { + e := &EtherIP{} + return decodingLayerDecoder(e, data, p) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ethernet.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ethernet.go new file mode 100644 index 00000000..b73748f2 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ethernet.go @@ -0,0 +1,123 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "github.com/google/gopacket" + "net" +) + +// EthernetBroadcast is the broadcast MAC address used by Ethernet. +var EthernetBroadcast = net.HardwareAddr{0xff, 0xff, 0xff, 0xff, 0xff, 0xff} + +// Ethernet is the layer for Ethernet frame headers. +type Ethernet struct { + BaseLayer + SrcMAC, DstMAC net.HardwareAddr + EthernetType EthernetType + // Length is only set if a length field exists within this header. Ethernet + // headers follow two different standards, one that uses an EthernetType, the + // other which defines a length the follows with a LLC header (802.3). If the + // former is the case, we set EthernetType and Length stays 0. In the latter + // case, we set Length and EthernetType = EthernetTypeLLC. + Length uint16 +} + +// LayerType returns LayerTypeEthernet +func (e *Ethernet) LayerType() gopacket.LayerType { return LayerTypeEthernet } + +func (e *Ethernet) LinkFlow() gopacket.Flow { + return gopacket.NewFlow(EndpointMAC, e.SrcMAC, e.DstMAC) +} + +func (eth *Ethernet) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 14 { + return errors.New("Ethernet packet too small") + } + eth.DstMAC = net.HardwareAddr(data[0:6]) + eth.SrcMAC = net.HardwareAddr(data[6:12]) + eth.EthernetType = EthernetType(binary.BigEndian.Uint16(data[12:14])) + eth.BaseLayer = BaseLayer{data[:14], data[14:]} + eth.Length = 0 + if eth.EthernetType < 0x0600 { + eth.Length = uint16(eth.EthernetType) + eth.EthernetType = EthernetTypeLLC + if cmp := len(eth.Payload) - int(eth.Length); cmp < 0 { + df.SetTruncated() + } else if cmp > 0 { + // Strip off bytes at the end, since we have too many bytes + eth.Payload = eth.Payload[:len(eth.Payload)-cmp] + } + // fmt.Println(eth) + } + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (eth *Ethernet) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if len(eth.DstMAC) != 6 { + return fmt.Errorf("invalid dst MAC: %v", eth.DstMAC) + } + if len(eth.SrcMAC) != 6 { + return fmt.Errorf("invalid src MAC: %v", eth.SrcMAC) + } + payload := b.Bytes() + bytes, err := b.PrependBytes(14) + if err != nil { + return err + } + copy(bytes, eth.DstMAC) + copy(bytes[6:], eth.SrcMAC) + if eth.Length != 0 || eth.EthernetType == EthernetTypeLLC { + if opts.FixLengths { + eth.Length = uint16(len(payload)) + } + if eth.EthernetType != EthernetTypeLLC { + return fmt.Errorf("ethernet type %v not compatible with length value %v", eth.EthernetType, eth.Length) + } else if eth.Length > 0x0600 { + return fmt.Errorf("invalid ethernet length %v", eth.Length) + } + binary.BigEndian.PutUint16(bytes[12:], eth.Length) + } else { + binary.BigEndian.PutUint16(bytes[12:], uint16(eth.EthernetType)) + } + length := len(b.Bytes()) + if length < 60 { + // Pad out to 60 bytes. + padding, err := b.AppendBytes(60 - length) + if err != nil { + return err + } + copy(padding, lotsOfZeros[:]) + } + return nil +} + +func (eth *Ethernet) CanDecode() gopacket.LayerClass { + return LayerTypeEthernet +} + +func (eth *Ethernet) NextLayerType() gopacket.LayerType { + return eth.EthernetType.LayerType() +} + +func decodeEthernet(data []byte, p gopacket.PacketBuilder) error { + eth := &Ethernet{} + err := eth.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(eth) + p.SetLinkLayer(eth) + return p.NextDecoder(eth.EthernetType) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/fddi.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/fddi.go new file mode 100644 index 00000000..ed9e1957 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/fddi.go @@ -0,0 +1,41 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "github.com/google/gopacket" + "net" +) + +// FDDI contains the header for FDDI frames. +type FDDI struct { + BaseLayer + FrameControl FDDIFrameControl + Priority uint8 + SrcMAC, DstMAC net.HardwareAddr +} + +// LayerType returns LayerTypeFDDI. +func (f *FDDI) LayerType() gopacket.LayerType { return LayerTypeFDDI } + +// LinkFlow returns a new flow of type EndpointMAC. +func (f *FDDI) LinkFlow() gopacket.Flow { + return gopacket.NewFlow(EndpointMAC, f.SrcMAC, f.DstMAC) +} + +func decodeFDDI(data []byte, p gopacket.PacketBuilder) error { + f := &FDDI{ + FrameControl: FDDIFrameControl(data[0] & 0xF8), + Priority: data[0] & 0x07, + SrcMAC: net.HardwareAddr(data[1:7]), + DstMAC: net.HardwareAddr(data[7:13]), + BaseLayer: BaseLayer{data[:13], data[13:]}, + } + p.SetLinkLayer(f) + p.AddLayer(f) + return p.NextDecoder(f.FrameControl) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/gen_linted.sh b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/gen_linted.sh new file mode 100644 index 00000000..75c701f4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/gen_linted.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +for i in *.go; do golint $i | grep -q . || echo $i; done > .linted diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/geneve.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/geneve.go new file mode 100644 index 00000000..72fe7c77 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/geneve.go @@ -0,0 +1,110 @@ +// Copyright 2016 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + + "github.com/google/gopacket" +) + +// Geneve is specifed here https://tools.ietf.org/html/draft-ietf-nvo3-geneve-03 +// Geneve Header: +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// |Ver| Opt Len |O|C| Rsvd. | Protocol Type | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Virtual Network Identifier (VNI) | Reserved | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Variable Length Options | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +type Geneve struct { + BaseLayer + Version uint8 // 2 bits + OptionsLength uint8 // 6 bits + OAMPacket bool // 1 bits + CriticalOption bool // 1 bits + Protocol EthernetType // 16 bits + VNI uint32 // 24bits + Options []*GeneveOption +} + +// Geneve Tunnel Options +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Option Class | Type |R|R|R| Length | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Variable Option Data | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +type GeneveOption struct { + Class uint16 // 16 bits + Type uint8 // 8 bits + Flags uint8 // 3 bits + Length uint8 // 5 bits + Data []byte +} + +// LayerType returns LayerTypeGeneve +func (gn *Geneve) LayerType() gopacket.LayerType { return LayerTypeGeneve } + +func decodeGeneveOption(data []byte, gn *Geneve) (*GeneveOption, uint8) { + opt := &GeneveOption{} + + opt.Class = binary.BigEndian.Uint16(data[0:2]) + opt.Type = data[2] + opt.Flags = data[3] >> 4 + opt.Length = (data[3]&0xf)*4 + 4 + + opt.Data = make([]byte, opt.Length-4) + copy(opt.Data, data[4:opt.Length]) + + return opt, opt.Length +} + +func (gn *Geneve) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 7 { + df.SetTruncated() + return errors.New("geneve packet too short") + } + + gn.Version = data[0] >> 7 + gn.OptionsLength = (data[0] & 0x3f) * 4 + + gn.OAMPacket = data[1]&0x80 > 0 + gn.CriticalOption = data[1]&0x40 > 0 + gn.Protocol = EthernetType(binary.BigEndian.Uint16(data[2:4])) + + var buf [4]byte + copy(buf[1:], data[4:7]) + gn.VNI = binary.BigEndian.Uint32(buf[:]) + + offset, length := uint8(8), int32(gn.OptionsLength) + if len(data) < int(length+7) { + df.SetTruncated() + return errors.New("geneve packet too short") + } + + for length > 0 { + opt, len := decodeGeneveOption(data[offset:], gn) + gn.Options = append(gn.Options, opt) + + length -= int32(len) + offset += len + } + + gn.BaseLayer = BaseLayer{data[:offset], data[offset:]} + + return nil +} + +func (gn *Geneve) NextLayerType() gopacket.LayerType { + return gn.Protocol.LayerType() +} + +func decodeGeneve(data []byte, p gopacket.PacketBuilder) error { + gn := &Geneve{} + return decodingLayerDecoder(gn, data, p) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/gre.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/gre.go new file mode 100644 index 00000000..9c5e7d24 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/gre.go @@ -0,0 +1,200 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + + "github.com/google/gopacket" +) + +// GRE is a Generic Routing Encapsulation header. +type GRE struct { + BaseLayer + ChecksumPresent, RoutingPresent, KeyPresent, SeqPresent, StrictSourceRoute, AckPresent bool + RecursionControl, Flags, Version uint8 + Protocol EthernetType + Checksum, Offset uint16 + Key, Seq, Ack uint32 + *GRERouting +} + +// GRERouting is GRE routing information, present if the RoutingPresent flag is +// set. +type GRERouting struct { + AddressFamily uint16 + SREOffset, SRELength uint8 + RoutingInformation []byte + Next *GRERouting +} + +// LayerType returns gopacket.LayerTypeGRE. +func (g *GRE) LayerType() gopacket.LayerType { return LayerTypeGRE } + +// DecodeFromBytes decodes the given bytes into this layer. +func (g *GRE) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + g.ChecksumPresent = data[0]&0x80 != 0 + g.RoutingPresent = data[0]&0x40 != 0 + g.KeyPresent = data[0]&0x20 != 0 + g.SeqPresent = data[0]&0x10 != 0 + g.StrictSourceRoute = data[0]&0x08 != 0 + g.AckPresent = data[1]&0x80 != 0 + g.RecursionControl = data[0] & 0x7 + g.Flags = data[1] >> 3 + g.Version = data[1] & 0x7 + g.Protocol = EthernetType(binary.BigEndian.Uint16(data[2:4])) + offset := 4 + if g.ChecksumPresent || g.RoutingPresent { + g.Checksum = binary.BigEndian.Uint16(data[offset : offset+2]) + g.Offset = binary.BigEndian.Uint16(data[offset+2 : offset+4]) + offset += 4 + } + if g.KeyPresent { + g.Key = binary.BigEndian.Uint32(data[offset : offset+4]) + offset += 4 + } + if g.SeqPresent { + g.Seq = binary.BigEndian.Uint32(data[offset : offset+4]) + offset += 4 + } + if g.RoutingPresent { + tail := &g.GRERouting + for { + sre := &GRERouting{ + AddressFamily: binary.BigEndian.Uint16(data[offset : offset+2]), + SREOffset: data[offset+2], + SRELength: data[offset+3], + } + sre.RoutingInformation = data[offset+4 : offset+4+int(sre.SRELength)] + offset += 4 + int(sre.SRELength) + if sre.AddressFamily == 0 && sre.SRELength == 0 { + break + } + (*tail) = sre + tail = &sre.Next + } + } + if g.AckPresent { + g.Ack = binary.BigEndian.Uint32(data[offset : offset+4]) + offset += 4 + } + g.BaseLayer = BaseLayer{data[:offset], data[offset:]} + return nil +} + +// SerializeTo writes the serialized form of this layer into the SerializationBuffer, +// implementing gopacket.SerializableLayer. See the docs for gopacket.SerializableLayer for more info. +func (g *GRE) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + size := 4 + if g.ChecksumPresent || g.RoutingPresent { + size += 4 + } + if g.KeyPresent { + size += 4 + } + if g.SeqPresent { + size += 4 + } + if g.RoutingPresent { + r := g.GRERouting + for r != nil { + size += 4 + int(r.SRELength) + r = r.Next + } + size += 4 + } + if g.AckPresent { + size += 4 + } + buf, err := b.PrependBytes(size) + if err != nil { + return err + } + // Reset any potentially dirty memory in the first 2 bytes, as these use OR to set flags. + buf[0] = 0 + buf[1] = 0 + if g.ChecksumPresent { + buf[0] |= 0x80 + } + if g.RoutingPresent { + buf[0] |= 0x40 + } + if g.KeyPresent { + buf[0] |= 0x20 + } + if g.SeqPresent { + buf[0] |= 0x10 + } + if g.StrictSourceRoute { + buf[0] |= 0x08 + } + if g.AckPresent { + buf[1] |= 0x80 + } + buf[0] |= g.RecursionControl + buf[1] |= g.Flags << 3 + buf[1] |= g.Version + binary.BigEndian.PutUint16(buf[2:4], uint16(g.Protocol)) + offset := 4 + if g.ChecksumPresent || g.RoutingPresent { + // Don't write the checksum value yet, as we may need to compute it, + // which requires the entire header be complete. + // Instead we zeroize the memory in case it is dirty. + buf[offset] = 0 + buf[offset+1] = 0 + binary.BigEndian.PutUint16(buf[offset+2:offset+4], g.Offset) + offset += 4 + } + if g.KeyPresent { + binary.BigEndian.PutUint32(buf[offset:offset+4], g.Key) + offset += 4 + } + if g.SeqPresent { + binary.BigEndian.PutUint32(buf[offset:offset+4], g.Seq) + offset += 4 + } + if g.RoutingPresent { + sre := g.GRERouting + for sre != nil { + binary.BigEndian.PutUint16(buf[offset:offset+2], sre.AddressFamily) + buf[offset+2] = sre.SREOffset + buf[offset+3] = sre.SRELength + copy(buf[offset+4:offset+4+int(sre.SRELength)], sre.RoutingInformation) + offset += 4 + int(sre.SRELength) + sre = sre.Next + } + // Terminate routing field with a "NULL" SRE. + binary.BigEndian.PutUint32(buf[offset:offset+4], 0) + } + if g.AckPresent { + binary.BigEndian.PutUint32(buf[offset:offset+4], g.Ack) + offset += 4 + } + if g.ChecksumPresent { + if opts.ComputeChecksums { + g.Checksum = tcpipChecksum(b.Bytes(), 0) + } + + binary.BigEndian.PutUint16(buf[4:6], g.Checksum) + } + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (g *GRE) CanDecode() gopacket.LayerClass { + return LayerTypeGRE +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (g *GRE) NextLayerType() gopacket.LayerType { + return g.Protocol.LayerType() +} + +func decodeGRE(data []byte, p gopacket.PacketBuilder) error { + g := &GRE{} + return decodingLayerDecoder(g, data, p) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/gtp.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/gtp.go new file mode 100644 index 00000000..0ec8a6a0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/gtp.go @@ -0,0 +1,181 @@ +// Copyright 2017 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. +// + +package layers + +import ( + "encoding/binary" + "fmt" + "github.com/google/gopacket" +) + +const gtpMinimumSizeInBytes int = 8 + +// GTPExtensionHeader is used to carry extra data and enable future extensions of the GTP without the need to use another version number. +type GTPExtensionHeader struct { + Type uint8 + Content []byte +} + +// GTPv1U protocol is used to exchange user data over GTP tunnels across the Sx interfaces. +// Defined in https://portal.3gpp.org/desktopmodules/Specifications/SpecificationDetails.aspx?specificationId=1595 +type GTPv1U struct { + BaseLayer + Version uint8 + ProtocolType uint8 + Reserved uint8 + ExtensionHeaderFlag bool + SequenceNumberFlag bool + NPDUFlag bool + MessageType uint8 + MessageLength uint16 + TEID uint32 + SequenceNumber uint16 + NPDU uint8 + GTPExtensionHeaders []GTPExtensionHeader +} + +// LayerType returns LayerTypeGTPV1U +func (g *GTPv1U) LayerType() gopacket.LayerType { return LayerTypeGTPv1U } + +// DecodeFromBytes analyses a byte slice and attempts to decode it as a GTPv1U packet +func (g *GTPv1U) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + hLen := gtpMinimumSizeInBytes + dLen := len(data) + if dLen < hLen { + return fmt.Errorf("GTP packet too small: %d bytes", dLen) + } + g.Version = (data[0] >> 5) & 0x07 + g.ProtocolType = (data[0] >> 4) & 0x01 + g.Reserved = (data[0] >> 3) & 0x01 + g.SequenceNumberFlag = ((data[0] >> 1) & 0x01) == 1 + g.NPDUFlag = (data[0] & 0x01) == 1 + g.ExtensionHeaderFlag = ((data[0] >> 2) & 0x01) == 1 + g.MessageType = data[1] + g.MessageLength = binary.BigEndian.Uint16(data[2:4]) + pLen := 8 + g.MessageLength + if uint16(dLen) < pLen { + return fmt.Errorf("GTP packet too small: %d bytes", dLen) + } + // Field used to multiplex different connections in the same GTP tunnel. + g.TEID = binary.BigEndian.Uint32(data[4:8]) + cIndex := uint16(hLen) + if g.SequenceNumberFlag || g.NPDUFlag || g.ExtensionHeaderFlag { + hLen += 4 + cIndex += 4 + if dLen < hLen { + return fmt.Errorf("GTP packet too small: %d bytes", dLen) + } + if g.SequenceNumberFlag { + g.SequenceNumber = binary.BigEndian.Uint16(data[8:10]) + } + if g.NPDUFlag { + g.NPDU = data[10] + } + if g.ExtensionHeaderFlag { + extensionFlag := true + for extensionFlag { + extensionType := uint8(data[cIndex-1]) + extensionLength := uint(data[cIndex]) + if extensionLength == 0 { + return fmt.Errorf("GTP packet with invalid extension header") + } + // extensionLength is in 4-octet units + lIndex := cIndex + (uint16(extensionLength) * 4) + if uint16(dLen) < lIndex { + fmt.Println(dLen, lIndex) + return fmt.Errorf("GTP packet with small extension header: %d bytes", dLen) + } + content := data[cIndex+1 : lIndex-1] + eh := GTPExtensionHeader{Type: extensionType, Content: content} + g.GTPExtensionHeaders = append(g.GTPExtensionHeaders, eh) + cIndex = lIndex + // Check if coming bytes are from an extension header + extensionFlag = data[cIndex-1] != 0 + + } + } + } + g.BaseLayer = BaseLayer{Contents: data[:cIndex], Payload: data[cIndex:]} + return nil + +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (g *GTPv1U) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + data, err := b.PrependBytes(gtpMinimumSizeInBytes) + if err != nil { + return err + } + data[0] |= (g.Version << 5) + data[0] |= (1 << 4) + if len(g.GTPExtensionHeaders) > 0 { + data[0] |= 0x04 + g.ExtensionHeaderFlag = true + } + if g.SequenceNumberFlag { + data[0] |= 0x02 + } + if g.NPDUFlag { + data[0] |= 0x01 + } + data[1] = g.MessageType + binary.BigEndian.PutUint16(data[2:4], g.MessageLength) + binary.BigEndian.PutUint32(data[4:8], g.TEID) + if g.ExtensionHeaderFlag || g.SequenceNumberFlag || g.NPDUFlag { + data, err := b.AppendBytes(4) + if err != nil { + return err + } + binary.BigEndian.PutUint16(data[:2], g.SequenceNumber) + data[2] = g.NPDU + for _, eh := range g.GTPExtensionHeaders { + data[len(data)-1] = eh.Type + lContent := len(eh.Content) + // extensionLength is in 4-octet units + extensionLength := (lContent + 2) / 4 + // Get two extra byte for the next extension header type and length + data, err = b.AppendBytes(lContent + 2) + if err != nil { + return err + } + data[0] = byte(extensionLength) + copy(data[1:lContent+1], eh.Content) + } + } + return nil + +} + +// CanDecode returns a set of layers that GTP objects can decode. +func (g *GTPv1U) CanDecode() gopacket.LayerClass { + return LayerTypeGTPv1U +} + +// NextLayerType specifies the next layer that GoPacket should attempt to +func (g *GTPv1U) NextLayerType() gopacket.LayerType { + version := uint8(g.LayerPayload()[0]) >> 4 + if version == 4 { + return LayerTypeIPv4 + } else if version == 6 { + return LayerTypeIPv6 + } else { + return LayerTypePPP + } +} + +func decodeGTPv1u(data []byte, p gopacket.PacketBuilder) error { + gtp := >Pv1U{} + err := gtp.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(gtp) + return p.NextDecoder(gtp.NextLayerType()) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/iana_ports.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/iana_ports.go new file mode 100644 index 00000000..ddcf3ecd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/iana_ports.go @@ -0,0 +1,11351 @@ +// Copyright 2012 Google, Inc. All rights reserved. + +package layers + +// Created by gen.go, don't edit manually +// Generated at 2017-10-23 09:57:28.214859163 -0600 MDT m=+1.011679290 +// Fetched from "http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xml" + +// TCPPortNames contains the port names for all TCP ports. +var TCPPortNames = tcpPortNames + +// UDPPortNames contains the port names for all UDP ports. +var UDPPortNames = udpPortNames + +// SCTPPortNames contains the port names for all SCTP ports. +var SCTPPortNames = sctpPortNames + +var tcpPortNames = map[TCPPort]string{ + 1: "tcpmux", + 2: "compressnet", + 3: "compressnet", + 5: "rje", + 7: "echo", + 9: "discard", + 11: "systat", + 13: "daytime", + 17: "qotd", + 18: "msp", + 19: "chargen", + 20: "ftp-data", + 21: "ftp", + 22: "ssh", + 23: "telnet", + 25: "smtp", + 27: "nsw-fe", + 29: "msg-icp", + 31: "msg-auth", + 33: "dsp", + 37: "time", + 38: "rap", + 39: "rlp", + 41: "graphics", + 42: "name", + 43: "nicname", + 44: "mpm-flags", + 45: "mpm", + 46: "mpm-snd", + 48: "auditd", + 49: "tacacs", + 50: "re-mail-ck", + 52: "xns-time", + 53: "domain", + 54: "xns-ch", + 55: "isi-gl", + 56: "xns-auth", + 58: "xns-mail", + 62: "acas", + 63: "whoispp", + 64: "covia", + 65: "tacacs-ds", + 66: "sql-net", + 67: "bootps", + 68: "bootpc", + 69: "tftp", + 70: "gopher", + 71: "netrjs-1", + 72: "netrjs-2", + 73: "netrjs-3", + 74: "netrjs-4", + 76: "deos", + 78: "vettcp", + 79: "finger", + 80: "http", + 82: "xfer", + 83: "mit-ml-dev", + 84: "ctf", + 85: "mit-ml-dev", + 86: "mfcobol", + 88: "kerberos", + 89: "su-mit-tg", + 90: "dnsix", + 91: "mit-dov", + 92: "npp", + 93: "dcp", + 94: "objcall", + 95: "supdup", + 96: "dixie", + 97: "swift-rvf", + 98: "tacnews", + 99: "metagram", + 101: "hostname", + 102: "iso-tsap", + 103: "gppitnp", + 104: "acr-nema", + 105: "cso", + 106: "3com-tsmux", + 107: "rtelnet", + 108: "snagas", + 109: "pop2", + 110: "pop3", + 111: "sunrpc", + 112: "mcidas", + 113: "ident", + 115: "sftp", + 116: "ansanotify", + 117: "uucp-path", + 118: "sqlserv", + 119: "nntp", + 120: "cfdptkt", + 121: "erpc", + 122: "smakynet", + 123: "ntp", + 124: "ansatrader", + 125: "locus-map", + 126: "nxedit", + 127: "locus-con", + 128: "gss-xlicen", + 129: "pwdgen", + 130: "cisco-fna", + 131: "cisco-tna", + 132: "cisco-sys", + 133: "statsrv", + 134: "ingres-net", + 135: "epmap", + 136: "profile", + 137: "netbios-ns", + 138: "netbios-dgm", + 139: "netbios-ssn", + 140: "emfis-data", + 141: "emfis-cntl", + 142: "bl-idm", + 143: "imap", + 144: "uma", + 145: "uaac", + 146: "iso-tp0", + 147: "iso-ip", + 148: "jargon", + 149: "aed-512", + 150: "sql-net", + 151: "hems", + 152: "bftp", + 153: "sgmp", + 154: "netsc-prod", + 155: "netsc-dev", + 156: "sqlsrv", + 157: "knet-cmp", + 158: "pcmail-srv", + 159: "nss-routing", + 160: "sgmp-traps", + 161: "snmp", + 162: "snmptrap", + 163: "cmip-man", + 164: "cmip-agent", + 165: "xns-courier", + 166: "s-net", + 167: "namp", + 168: "rsvd", + 169: "send", + 170: "print-srv", + 171: "multiplex", + 172: "cl-1", + 173: "xyplex-mux", + 174: "mailq", + 175: "vmnet", + 176: "genrad-mux", + 177: "xdmcp", + 178: "nextstep", + 179: "bgp", + 180: "ris", + 181: "unify", + 182: "audit", + 183: "ocbinder", + 184: "ocserver", + 185: "remote-kis", + 186: "kis", + 187: "aci", + 188: "mumps", + 189: "qft", + 190: "gacp", + 191: "prospero", + 192: "osu-nms", + 193: "srmp", + 194: "irc", + 195: "dn6-nlm-aud", + 196: "dn6-smm-red", + 197: "dls", + 198: "dls-mon", + 199: "smux", + 200: "src", + 201: "at-rtmp", + 202: "at-nbp", + 203: "at-3", + 204: "at-echo", + 205: "at-5", + 206: "at-zis", + 207: "at-7", + 208: "at-8", + 209: "qmtp", + 210: "z39-50", + 211: "914c-g", + 212: "anet", + 213: "ipx", + 214: "vmpwscs", + 215: "softpc", + 216: "CAIlic", + 217: "dbase", + 218: "mpp", + 219: "uarps", + 220: "imap3", + 221: "fln-spx", + 222: "rsh-spx", + 223: "cdc", + 224: "masqdialer", + 242: "direct", + 243: "sur-meas", + 244: "inbusiness", + 245: "link", + 246: "dsp3270", + 247: "subntbcst-tftp", + 248: "bhfhs", + 256: "rap", + 257: "set", + 259: "esro-gen", + 260: "openport", + 261: "nsiiops", + 262: "arcisdms", + 263: "hdap", + 264: "bgmp", + 265: "x-bone-ctl", + 266: "sst", + 267: "td-service", + 268: "td-replica", + 269: "manet", + 271: "pt-tls", + 280: "http-mgmt", + 281: "personal-link", + 282: "cableport-ax", + 283: "rescap", + 284: "corerjd", + 286: "fxp", + 287: "k-block", + 308: "novastorbakcup", + 309: "entrusttime", + 310: "bhmds", + 311: "asip-webadmin", + 312: "vslmp", + 313: "magenta-logic", + 314: "opalis-robot", + 315: "dpsi", + 316: "decauth", + 317: "zannet", + 318: "pkix-timestamp", + 319: "ptp-event", + 320: "ptp-general", + 321: "pip", + 322: "rtsps", + 323: "rpki-rtr", + 324: "rpki-rtr-tls", + 333: "texar", + 344: "pdap", + 345: "pawserv", + 346: "zserv", + 347: "fatserv", + 348: "csi-sgwp", + 349: "mftp", + 350: "matip-type-a", + 351: "matip-type-b", + 352: "dtag-ste-sb", + 353: "ndsauth", + 354: "bh611", + 355: "datex-asn", + 356: "cloanto-net-1", + 357: "bhevent", + 358: "shrinkwrap", + 359: "nsrmp", + 360: "scoi2odialog", + 361: "semantix", + 362: "srssend", + 363: "rsvp-tunnel", + 364: "aurora-cmgr", + 365: "dtk", + 366: "odmr", + 367: "mortgageware", + 368: "qbikgdp", + 369: "rpc2portmap", + 370: "codaauth2", + 371: "clearcase", + 372: "ulistproc", + 373: "legent-1", + 374: "legent-2", + 375: "hassle", + 376: "nip", + 377: "tnETOS", + 378: "dsETOS", + 379: "is99c", + 380: "is99s", + 381: "hp-collector", + 382: "hp-managed-node", + 383: "hp-alarm-mgr", + 384: "arns", + 385: "ibm-app", + 386: "asa", + 387: "aurp", + 388: "unidata-ldm", + 389: "ldap", + 390: "uis", + 391: "synotics-relay", + 392: "synotics-broker", + 393: "meta5", + 394: "embl-ndt", + 395: "netcp", + 396: "netware-ip", + 397: "mptn", + 398: "kryptolan", + 399: "iso-tsap-c2", + 400: "osb-sd", + 401: "ups", + 402: "genie", + 403: "decap", + 404: "nced", + 405: "ncld", + 406: "imsp", + 407: "timbuktu", + 408: "prm-sm", + 409: "prm-nm", + 410: "decladebug", + 411: "rmt", + 412: "synoptics-trap", + 413: "smsp", + 414: "infoseek", + 415: "bnet", + 416: "silverplatter", + 417: "onmux", + 418: "hyper-g", + 419: "ariel1", + 420: "smpte", + 421: "ariel2", + 422: "ariel3", + 423: "opc-job-start", + 424: "opc-job-track", + 425: "icad-el", + 426: "smartsdp", + 427: "svrloc", + 428: "ocs-cmu", + 429: "ocs-amu", + 430: "utmpsd", + 431: "utmpcd", + 432: "iasd", + 433: "nnsp", + 434: "mobileip-agent", + 435: "mobilip-mn", + 436: "dna-cml", + 437: "comscm", + 438: "dsfgw", + 439: "dasp", + 440: "sgcp", + 441: "decvms-sysmgt", + 442: "cvc-hostd", + 443: "https", + 444: "snpp", + 445: "microsoft-ds", + 446: "ddm-rdb", + 447: "ddm-dfm", + 448: "ddm-ssl", + 449: "as-servermap", + 450: "tserver", + 451: "sfs-smp-net", + 452: "sfs-config", + 453: "creativeserver", + 454: "contentserver", + 455: "creativepartnr", + 456: "macon-tcp", + 457: "scohelp", + 458: "appleqtc", + 459: "ampr-rcmd", + 460: "skronk", + 461: "datasurfsrv", + 462: "datasurfsrvsec", + 463: "alpes", + 464: "kpasswd", + 465: "urd", + 466: "digital-vrc", + 467: "mylex-mapd", + 468: "photuris", + 469: "rcp", + 470: "scx-proxy", + 471: "mondex", + 472: "ljk-login", + 473: "hybrid-pop", + 474: "tn-tl-w1", + 475: "tcpnethaspsrv", + 476: "tn-tl-fd1", + 477: "ss7ns", + 478: "spsc", + 479: "iafserver", + 480: "iafdbase", + 481: "ph", + 482: "bgs-nsi", + 483: "ulpnet", + 484: "integra-sme", + 485: "powerburst", + 486: "avian", + 487: "saft", + 488: "gss-http", + 489: "nest-protocol", + 490: "micom-pfs", + 491: "go-login", + 492: "ticf-1", + 493: "ticf-2", + 494: "pov-ray", + 495: "intecourier", + 496: "pim-rp-disc", + 497: "retrospect", + 498: "siam", + 499: "iso-ill", + 500: "isakmp", + 501: "stmf", + 502: "mbap", + 503: "intrinsa", + 504: "citadel", + 505: "mailbox-lm", + 506: "ohimsrv", + 507: "crs", + 508: "xvttp", + 509: "snare", + 510: "fcp", + 511: "passgo", + 512: "exec", + 513: "login", + 514: "shell", + 515: "printer", + 516: "videotex", + 517: "talk", + 518: "ntalk", + 519: "utime", + 520: "efs", + 521: "ripng", + 522: "ulp", + 523: "ibm-db2", + 524: "ncp", + 525: "timed", + 526: "tempo", + 527: "stx", + 528: "custix", + 529: "irc-serv", + 530: "courier", + 531: "conference", + 532: "netnews", + 533: "netwall", + 534: "windream", + 535: "iiop", + 536: "opalis-rdv", + 537: "nmsp", + 538: "gdomap", + 539: "apertus-ldp", + 540: "uucp", + 541: "uucp-rlogin", + 542: "commerce", + 543: "klogin", + 544: "kshell", + 545: "appleqtcsrvr", + 546: "dhcpv6-client", + 547: "dhcpv6-server", + 548: "afpovertcp", + 549: "idfp", + 550: "new-rwho", + 551: "cybercash", + 552: "devshr-nts", + 553: "pirp", + 554: "rtsp", + 555: "dsf", + 556: "remotefs", + 557: "openvms-sysipc", + 558: "sdnskmp", + 559: "teedtap", + 560: "rmonitor", + 561: "monitor", + 562: "chshell", + 563: "nntps", + 564: "9pfs", + 565: "whoami", + 566: "streettalk", + 567: "banyan-rpc", + 568: "ms-shuttle", + 569: "ms-rome", + 570: "meter", + 571: "meter", + 572: "sonar", + 573: "banyan-vip", + 574: "ftp-agent", + 575: "vemmi", + 576: "ipcd", + 577: "vnas", + 578: "ipdd", + 579: "decbsrv", + 580: "sntp-heartbeat", + 581: "bdp", + 582: "scc-security", + 583: "philips-vc", + 584: "keyserver", + 586: "password-chg", + 587: "submission", + 588: "cal", + 589: "eyelink", + 590: "tns-cml", + 591: "http-alt", + 592: "eudora-set", + 593: "http-rpc-epmap", + 594: "tpip", + 595: "cab-protocol", + 596: "smsd", + 597: "ptcnameservice", + 598: "sco-websrvrmg3", + 599: "acp", + 600: "ipcserver", + 601: "syslog-conn", + 602: "xmlrpc-beep", + 603: "idxp", + 604: "tunnel", + 605: "soap-beep", + 606: "urm", + 607: "nqs", + 608: "sift-uft", + 609: "npmp-trap", + 610: "npmp-local", + 611: "npmp-gui", + 612: "hmmp-ind", + 613: "hmmp-op", + 614: "sshell", + 615: "sco-inetmgr", + 616: "sco-sysmgr", + 617: "sco-dtmgr", + 618: "dei-icda", + 619: "compaq-evm", + 620: "sco-websrvrmgr", + 621: "escp-ip", + 622: "collaborator", + 623: "oob-ws-http", + 624: "cryptoadmin", + 625: "dec-dlm", + 626: "asia", + 627: "passgo-tivoli", + 628: "qmqp", + 629: "3com-amp3", + 630: "rda", + 631: "ipp", + 632: "bmpp", + 633: "servstat", + 634: "ginad", + 635: "rlzdbase", + 636: "ldaps", + 637: "lanserver", + 638: "mcns-sec", + 639: "msdp", + 640: "entrust-sps", + 641: "repcmd", + 642: "esro-emsdp", + 643: "sanity", + 644: "dwr", + 645: "pssc", + 646: "ldp", + 647: "dhcp-failover", + 648: "rrp", + 649: "cadview-3d", + 650: "obex", + 651: "ieee-mms", + 652: "hello-port", + 653: "repscmd", + 654: "aodv", + 655: "tinc", + 656: "spmp", + 657: "rmc", + 658: "tenfold", + 660: "mac-srvr-admin", + 661: "hap", + 662: "pftp", + 663: "purenoise", + 664: "oob-ws-https", + 665: "sun-dr", + 666: "mdqs", + 667: "disclose", + 668: "mecomm", + 669: "meregister", + 670: "vacdsm-sws", + 671: "vacdsm-app", + 672: "vpps-qua", + 673: "cimplex", + 674: "acap", + 675: "dctp", + 676: "vpps-via", + 677: "vpp", + 678: "ggf-ncp", + 679: "mrm", + 680: "entrust-aaas", + 681: "entrust-aams", + 682: "xfr", + 683: "corba-iiop", + 684: "corba-iiop-ssl", + 685: "mdc-portmapper", + 686: "hcp-wismar", + 687: "asipregistry", + 688: "realm-rusd", + 689: "nmap", + 690: "vatp", + 691: "msexch-routing", + 692: "hyperwave-isp", + 693: "connendp", + 694: "ha-cluster", + 695: "ieee-mms-ssl", + 696: "rushd", + 697: "uuidgen", + 698: "olsr", + 699: "accessnetwork", + 700: "epp", + 701: "lmp", + 702: "iris-beep", + 704: "elcsd", + 705: "agentx", + 706: "silc", + 707: "borland-dsj", + 709: "entrust-kmsh", + 710: "entrust-ash", + 711: "cisco-tdp", + 712: "tbrpf", + 713: "iris-xpc", + 714: "iris-xpcs", + 715: "iris-lwz", + 729: "netviewdm1", + 730: "netviewdm2", + 731: "netviewdm3", + 741: "netgw", + 742: "netrcs", + 744: "flexlm", + 747: "fujitsu-dev", + 748: "ris-cm", + 749: "kerberos-adm", + 750: "rfile", + 751: "pump", + 752: "qrh", + 753: "rrh", + 754: "tell", + 758: "nlogin", + 759: "con", + 760: "ns", + 761: "rxe", + 762: "quotad", + 763: "cycleserv", + 764: "omserv", + 765: "webster", + 767: "phonebook", + 769: "vid", + 770: "cadlock", + 771: "rtip", + 772: "cycleserv2", + 773: "submit", + 774: "rpasswd", + 775: "entomb", + 776: "wpages", + 777: "multiling-http", + 780: "wpgs", + 800: "mdbs-daemon", + 801: "device", + 802: "mbap-s", + 810: "fcp-udp", + 828: "itm-mcell-s", + 829: "pkix-3-ca-ra", + 830: "netconf-ssh", + 831: "netconf-beep", + 832: "netconfsoaphttp", + 833: "netconfsoapbeep", + 847: "dhcp-failover2", + 848: "gdoi", + 853: "domain-s", + 854: "dlep", + 860: "iscsi", + 861: "owamp-control", + 862: "twamp-control", + 873: "rsync", + 886: "iclcnet-locate", + 887: "iclcnet-svinfo", + 888: "accessbuilder", + 900: "omginitialrefs", + 901: "smpnameres", + 902: "ideafarm-door", + 903: "ideafarm-panic", + 910: "kink", + 911: "xact-backup", + 912: "apex-mesh", + 913: "apex-edge", + 953: "rndc", + 989: "ftps-data", + 990: "ftps", + 991: "nas", + 992: "telnets", + 993: "imaps", + 995: "pop3s", + 996: "vsinet", + 997: "maitrd", + 998: "busboy", + 999: "garcon", + 1000: "cadlock2", + 1001: "webpush", + 1010: "surf", + 1021: "exp1", + 1022: "exp2", + 1025: "blackjack", + 1026: "cap", + 1029: "solid-mux", + 1033: "netinfo-local", + 1034: "activesync", + 1035: "mxxrlogin", + 1036: "nsstp", + 1037: "ams", + 1038: "mtqp", + 1039: "sbl", + 1040: "netarx", + 1041: "danf-ak2", + 1042: "afrog", + 1043: "boinc-client", + 1044: "dcutility", + 1045: "fpitp", + 1046: "wfremotertm", + 1047: "neod1", + 1048: "neod2", + 1049: "td-postman", + 1050: "cma", + 1051: "optima-vnet", + 1052: "ddt", + 1053: "remote-as", + 1054: "brvread", + 1055: "ansyslmd", + 1056: "vfo", + 1057: "startron", + 1058: "nim", + 1059: "nimreg", + 1060: "polestar", + 1061: "kiosk", + 1062: "veracity", + 1063: "kyoceranetdev", + 1064: "jstel", + 1065: "syscomlan", + 1066: "fpo-fns", + 1067: "instl-boots", + 1068: "instl-bootc", + 1069: "cognex-insight", + 1070: "gmrupdateserv", + 1071: "bsquare-voip", + 1072: "cardax", + 1073: "bridgecontrol", + 1074: "warmspotMgmt", + 1075: "rdrmshc", + 1076: "dab-sti-c", + 1077: "imgames", + 1078: "avocent-proxy", + 1079: "asprovatalk", + 1080: "socks", + 1081: "pvuniwien", + 1082: "amt-esd-prot", + 1083: "ansoft-lm-1", + 1084: "ansoft-lm-2", + 1085: "webobjects", + 1086: "cplscrambler-lg", + 1087: "cplscrambler-in", + 1088: "cplscrambler-al", + 1089: "ff-annunc", + 1090: "ff-fms", + 1091: "ff-sm", + 1092: "obrpd", + 1093: "proofd", + 1094: "rootd", + 1095: "nicelink", + 1096: "cnrprotocol", + 1097: "sunclustermgr", + 1098: "rmiactivation", + 1099: "rmiregistry", + 1100: "mctp", + 1101: "pt2-discover", + 1102: "adobeserver-1", + 1103: "adobeserver-2", + 1104: "xrl", + 1105: "ftranhc", + 1106: "isoipsigport-1", + 1107: "isoipsigport-2", + 1108: "ratio-adp", + 1110: "webadmstart", + 1111: "lmsocialserver", + 1112: "icp", + 1113: "ltp-deepspace", + 1114: "mini-sql", + 1115: "ardus-trns", + 1116: "ardus-cntl", + 1117: "ardus-mtrns", + 1118: "sacred", + 1119: "bnetgame", + 1120: "bnetfile", + 1121: "rmpp", + 1122: "availant-mgr", + 1123: "murray", + 1124: "hpvmmcontrol", + 1125: "hpvmmagent", + 1126: "hpvmmdata", + 1127: "kwdb-commn", + 1128: "saphostctrl", + 1129: "saphostctrls", + 1130: "casp", + 1131: "caspssl", + 1132: "kvm-via-ip", + 1133: "dfn", + 1134: "aplx", + 1135: "omnivision", + 1136: "hhb-gateway", + 1137: "trim", + 1138: "encrypted-admin", + 1139: "evm", + 1140: "autonoc", + 1141: "mxomss", + 1142: "edtools", + 1143: "imyx", + 1144: "fuscript", + 1145: "x9-icue", + 1146: "audit-transfer", + 1147: "capioverlan", + 1148: "elfiq-repl", + 1149: "bvtsonar", + 1150: "blaze", + 1151: "unizensus", + 1152: "winpoplanmess", + 1153: "c1222-acse", + 1154: "resacommunity", + 1155: "nfa", + 1156: "iascontrol-oms", + 1157: "iascontrol", + 1158: "dbcontrol-oms", + 1159: "oracle-oms", + 1160: "olsv", + 1161: "health-polling", + 1162: "health-trap", + 1163: "sddp", + 1164: "qsm-proxy", + 1165: "qsm-gui", + 1166: "qsm-remote", + 1167: "cisco-ipsla", + 1168: "vchat", + 1169: "tripwire", + 1170: "atc-lm", + 1171: "atc-appserver", + 1172: "dnap", + 1173: "d-cinema-rrp", + 1174: "fnet-remote-ui", + 1175: "dossier", + 1176: "indigo-server", + 1177: "dkmessenger", + 1178: "sgi-storman", + 1179: "b2n", + 1180: "mc-client", + 1181: "3comnetman", + 1182: "accelenet", + 1183: "llsurfup-http", + 1184: "llsurfup-https", + 1185: "catchpole", + 1186: "mysql-cluster", + 1187: "alias", + 1188: "hp-webadmin", + 1189: "unet", + 1190: "commlinx-avl", + 1191: "gpfs", + 1192: "caids-sensor", + 1193: "fiveacross", + 1194: "openvpn", + 1195: "rsf-1", + 1196: "netmagic", + 1197: "carrius-rshell", + 1198: "cajo-discovery", + 1199: "dmidi", + 1200: "scol", + 1201: "nucleus-sand", + 1202: "caiccipc", + 1203: "ssslic-mgr", + 1204: "ssslog-mgr", + 1205: "accord-mgc", + 1206: "anthony-data", + 1207: "metasage", + 1208: "seagull-ais", + 1209: "ipcd3", + 1210: "eoss", + 1211: "groove-dpp", + 1212: "lupa", + 1213: "mpc-lifenet", + 1214: "kazaa", + 1215: "scanstat-1", + 1216: "etebac5", + 1217: "hpss-ndapi", + 1218: "aeroflight-ads", + 1219: "aeroflight-ret", + 1220: "qt-serveradmin", + 1221: "sweetware-apps", + 1222: "nerv", + 1223: "tgp", + 1224: "vpnz", + 1225: "slinkysearch", + 1226: "stgxfws", + 1227: "dns2go", + 1228: "florence", + 1229: "zented", + 1230: "periscope", + 1231: "menandmice-lpm", + 1232: "first-defense", + 1233: "univ-appserver", + 1234: "search-agent", + 1235: "mosaicsyssvc1", + 1236: "bvcontrol", + 1237: "tsdos390", + 1238: "hacl-qs", + 1239: "nmsd", + 1240: "instantia", + 1241: "nessus", + 1242: "nmasoverip", + 1243: "serialgateway", + 1244: "isbconference1", + 1245: "isbconference2", + 1246: "payrouter", + 1247: "visionpyramid", + 1248: "hermes", + 1249: "mesavistaco", + 1250: "swldy-sias", + 1251: "servergraph", + 1252: "bspne-pcc", + 1253: "q55-pcc", + 1254: "de-noc", + 1255: "de-cache-query", + 1256: "de-server", + 1257: "shockwave2", + 1258: "opennl", + 1259: "opennl-voice", + 1260: "ibm-ssd", + 1261: "mpshrsv", + 1262: "qnts-orb", + 1263: "dka", + 1264: "prat", + 1265: "dssiapi", + 1266: "dellpwrappks", + 1267: "epc", + 1268: "propel-msgsys", + 1269: "watilapp", + 1270: "opsmgr", + 1271: "excw", + 1272: "cspmlockmgr", + 1273: "emc-gateway", + 1274: "t1distproc", + 1275: "ivcollector", + 1277: "miva-mqs", + 1278: "dellwebadmin-1", + 1279: "dellwebadmin-2", + 1280: "pictrography", + 1281: "healthd", + 1282: "emperion", + 1283: "productinfo", + 1284: "iee-qfx", + 1285: "neoiface", + 1286: "netuitive", + 1287: "routematch", + 1288: "navbuddy", + 1289: "jwalkserver", + 1290: "winjaserver", + 1291: "seagulllms", + 1292: "dsdn", + 1293: "pkt-krb-ipsec", + 1294: "cmmdriver", + 1295: "ehtp", + 1296: "dproxy", + 1297: "sdproxy", + 1298: "lpcp", + 1299: "hp-sci", + 1300: "h323hostcallsc", + 1301: "ci3-software-1", + 1302: "ci3-software-2", + 1303: "sftsrv", + 1304: "boomerang", + 1305: "pe-mike", + 1306: "re-conn-proto", + 1307: "pacmand", + 1308: "odsi", + 1309: "jtag-server", + 1310: "husky", + 1311: "rxmon", + 1312: "sti-envision", + 1313: "bmc-patroldb", + 1314: "pdps", + 1315: "els", + 1316: "exbit-escp", + 1317: "vrts-ipcserver", + 1318: "krb5gatekeeper", + 1319: "amx-icsp", + 1320: "amx-axbnet", + 1321: "pip", + 1322: "novation", + 1323: "brcd", + 1324: "delta-mcp", + 1325: "dx-instrument", + 1326: "wimsic", + 1327: "ultrex", + 1328: "ewall", + 1329: "netdb-export", + 1330: "streetperfect", + 1331: "intersan", + 1332: "pcia-rxp-b", + 1333: "passwrd-policy", + 1334: "writesrv", + 1335: "digital-notary", + 1336: "ischat", + 1337: "menandmice-dns", + 1338: "wmc-log-svc", + 1339: "kjtsiteserver", + 1340: "naap", + 1341: "qubes", + 1342: "esbroker", + 1343: "re101", + 1344: "icap", + 1345: "vpjp", + 1346: "alta-ana-lm", + 1347: "bbn-mmc", + 1348: "bbn-mmx", + 1349: "sbook", + 1350: "editbench", + 1351: "equationbuilder", + 1352: "lotusnote", + 1353: "relief", + 1354: "XSIP-network", + 1355: "intuitive-edge", + 1356: "cuillamartin", + 1357: "pegboard", + 1358: "connlcli", + 1359: "ftsrv", + 1360: "mimer", + 1361: "linx", + 1362: "timeflies", + 1363: "ndm-requester", + 1364: "ndm-server", + 1365: "adapt-sna", + 1366: "netware-csp", + 1367: "dcs", + 1368: "screencast", + 1369: "gv-us", + 1370: "us-gv", + 1371: "fc-cli", + 1372: "fc-ser", + 1373: "chromagrafx", + 1374: "molly", + 1375: "bytex", + 1376: "ibm-pps", + 1377: "cichlid", + 1378: "elan", + 1379: "dbreporter", + 1380: "telesis-licman", + 1381: "apple-licman", + 1382: "udt-os", + 1383: "gwha", + 1384: "os-licman", + 1385: "atex-elmd", + 1386: "checksum", + 1387: "cadsi-lm", + 1388: "objective-dbc", + 1389: "iclpv-dm", + 1390: "iclpv-sc", + 1391: "iclpv-sas", + 1392: "iclpv-pm", + 1393: "iclpv-nls", + 1394: "iclpv-nlc", + 1395: "iclpv-wsm", + 1396: "dvl-activemail", + 1397: "audio-activmail", + 1398: "video-activmail", + 1399: "cadkey-licman", + 1400: "cadkey-tablet", + 1401: "goldleaf-licman", + 1402: "prm-sm-np", + 1403: "prm-nm-np", + 1404: "igi-lm", + 1405: "ibm-res", + 1406: "netlabs-lm", + 1407: "tibet-server", + 1408: "sophia-lm", + 1409: "here-lm", + 1410: "hiq", + 1411: "af", + 1412: "innosys", + 1413: "innosys-acl", + 1414: "ibm-mqseries", + 1415: "dbstar", + 1416: "novell-lu6-2", + 1417: "timbuktu-srv1", + 1418: "timbuktu-srv2", + 1419: "timbuktu-srv3", + 1420: "timbuktu-srv4", + 1421: "gandalf-lm", + 1422: "autodesk-lm", + 1423: "essbase", + 1424: "hybrid", + 1425: "zion-lm", + 1426: "sais", + 1427: "mloadd", + 1428: "informatik-lm", + 1429: "nms", + 1430: "tpdu", + 1431: "rgtp", + 1432: "blueberry-lm", + 1433: "ms-sql-s", + 1434: "ms-sql-m", + 1435: "ibm-cics", + 1436: "saism", + 1437: "tabula", + 1438: "eicon-server", + 1439: "eicon-x25", + 1440: "eicon-slp", + 1441: "cadis-1", + 1442: "cadis-2", + 1443: "ies-lm", + 1444: "marcam-lm", + 1445: "proxima-lm", + 1446: "ora-lm", + 1447: "apri-lm", + 1448: "oc-lm", + 1449: "peport", + 1450: "dwf", + 1451: "infoman", + 1452: "gtegsc-lm", + 1453: "genie-lm", + 1454: "interhdl-elmd", + 1455: "esl-lm", + 1456: "dca", + 1457: "valisys-lm", + 1458: "nrcabq-lm", + 1459: "proshare1", + 1460: "proshare2", + 1461: "ibm-wrless-lan", + 1462: "world-lm", + 1463: "nucleus", + 1464: "msl-lmd", + 1465: "pipes", + 1466: "oceansoft-lm", + 1467: "csdmbase", + 1468: "csdm", + 1469: "aal-lm", + 1470: "uaiact", + 1471: "csdmbase", + 1472: "csdm", + 1473: "openmath", + 1474: "telefinder", + 1475: "taligent-lm", + 1476: "clvm-cfg", + 1477: "ms-sna-server", + 1478: "ms-sna-base", + 1479: "dberegister", + 1480: "pacerforum", + 1481: "airs", + 1482: "miteksys-lm", + 1483: "afs", + 1484: "confluent", + 1485: "lansource", + 1486: "nms-topo-serv", + 1487: "localinfosrvr", + 1488: "docstor", + 1489: "dmdocbroker", + 1490: "insitu-conf", + 1492: "stone-design-1", + 1493: "netmap-lm", + 1494: "ica", + 1495: "cvc", + 1496: "liberty-lm", + 1497: "rfx-lm", + 1498: "sybase-sqlany", + 1499: "fhc", + 1500: "vlsi-lm", + 1501: "saiscm", + 1502: "shivadiscovery", + 1503: "imtc-mcs", + 1504: "evb-elm", + 1505: "funkproxy", + 1506: "utcd", + 1507: "symplex", + 1508: "diagmond", + 1509: "robcad-lm", + 1510: "mvx-lm", + 1511: "3l-l1", + 1512: "wins", + 1513: "fujitsu-dtc", + 1514: "fujitsu-dtcns", + 1515: "ifor-protocol", + 1516: "vpad", + 1517: "vpac", + 1518: "vpvd", + 1519: "vpvc", + 1520: "atm-zip-office", + 1521: "ncube-lm", + 1522: "ricardo-lm", + 1523: "cichild-lm", + 1524: "ingreslock", + 1525: "orasrv", + 1526: "pdap-np", + 1527: "tlisrv", + 1529: "coauthor", + 1530: "rap-service", + 1531: "rap-listen", + 1532: "miroconnect", + 1533: "virtual-places", + 1534: "micromuse-lm", + 1535: "ampr-info", + 1536: "ampr-inter", + 1537: "sdsc-lm", + 1538: "3ds-lm", + 1539: "intellistor-lm", + 1540: "rds", + 1541: "rds2", + 1542: "gridgen-elmd", + 1543: "simba-cs", + 1544: "aspeclmd", + 1545: "vistium-share", + 1546: "abbaccuray", + 1547: "laplink", + 1548: "axon-lm", + 1549: "shivahose", + 1550: "3m-image-lm", + 1551: "hecmtl-db", + 1552: "pciarray", + 1553: "sna-cs", + 1554: "caci-lm", + 1555: "livelan", + 1556: "veritas-pbx", + 1557: "arbortext-lm", + 1558: "xingmpeg", + 1559: "web2host", + 1560: "asci-val", + 1561: "facilityview", + 1562: "pconnectmgr", + 1563: "cadabra-lm", + 1564: "pay-per-view", + 1565: "winddlb", + 1566: "corelvideo", + 1567: "jlicelmd", + 1568: "tsspmap", + 1569: "ets", + 1570: "orbixd", + 1571: "rdb-dbs-disp", + 1572: "chip-lm", + 1573: "itscomm-ns", + 1574: "mvel-lm", + 1575: "oraclenames", + 1576: "moldflow-lm", + 1577: "hypercube-lm", + 1578: "jacobus-lm", + 1579: "ioc-sea-lm", + 1580: "tn-tl-r1", + 1581: "mil-2045-47001", + 1582: "msims", + 1583: "simbaexpress", + 1584: "tn-tl-fd2", + 1585: "intv", + 1586: "ibm-abtact", + 1587: "pra-elmd", + 1588: "triquest-lm", + 1589: "vqp", + 1590: "gemini-lm", + 1591: "ncpm-pm", + 1592: "commonspace", + 1593: "mainsoft-lm", + 1594: "sixtrak", + 1595: "radio", + 1596: "radio-sm", + 1597: "orbplus-iiop", + 1598: "picknfs", + 1599: "simbaservices", + 1600: "issd", + 1601: "aas", + 1602: "inspect", + 1603: "picodbc", + 1604: "icabrowser", + 1605: "slp", + 1606: "slm-api", + 1607: "stt", + 1608: "smart-lm", + 1609: "isysg-lm", + 1610: "taurus-wh", + 1611: "ill", + 1612: "netbill-trans", + 1613: "netbill-keyrep", + 1614: "netbill-cred", + 1615: "netbill-auth", + 1616: "netbill-prod", + 1617: "nimrod-agent", + 1618: "skytelnet", + 1619: "xs-openstorage", + 1620: "faxportwinport", + 1621: "softdataphone", + 1622: "ontime", + 1623: "jaleosnd", + 1624: "udp-sr-port", + 1625: "svs-omagent", + 1626: "shockwave", + 1627: "t128-gateway", + 1628: "lontalk-norm", + 1629: "lontalk-urgnt", + 1630: "oraclenet8cman", + 1631: "visitview", + 1632: "pammratc", + 1633: "pammrpc", + 1634: "loaprobe", + 1635: "edb-server1", + 1636: "isdc", + 1637: "islc", + 1638: "ismc", + 1639: "cert-initiator", + 1640: "cert-responder", + 1641: "invision", + 1642: "isis-am", + 1643: "isis-ambc", + 1644: "saiseh", + 1645: "sightline", + 1646: "sa-msg-port", + 1647: "rsap", + 1648: "concurrent-lm", + 1649: "kermit", + 1650: "nkd", + 1651: "shiva-confsrvr", + 1652: "xnmp", + 1653: "alphatech-lm", + 1654: "stargatealerts", + 1655: "dec-mbadmin", + 1656: "dec-mbadmin-h", + 1657: "fujitsu-mmpdc", + 1658: "sixnetudr", + 1659: "sg-lm", + 1660: "skip-mc-gikreq", + 1661: "netview-aix-1", + 1662: "netview-aix-2", + 1663: "netview-aix-3", + 1664: "netview-aix-4", + 1665: "netview-aix-5", + 1666: "netview-aix-6", + 1667: "netview-aix-7", + 1668: "netview-aix-8", + 1669: "netview-aix-9", + 1670: "netview-aix-10", + 1671: "netview-aix-11", + 1672: "netview-aix-12", + 1673: "proshare-mc-1", + 1674: "proshare-mc-2", + 1675: "pdp", + 1676: "netcomm1", + 1677: "groupwise", + 1678: "prolink", + 1679: "darcorp-lm", + 1680: "microcom-sbp", + 1681: "sd-elmd", + 1682: "lanyon-lantern", + 1683: "ncpm-hip", + 1684: "snaresecure", + 1685: "n2nremote", + 1686: "cvmon", + 1687: "nsjtp-ctrl", + 1688: "nsjtp-data", + 1689: "firefox", + 1690: "ng-umds", + 1691: "empire-empuma", + 1692: "sstsys-lm", + 1693: "rrirtr", + 1694: "rrimwm", + 1695: "rrilwm", + 1696: "rrifmm", + 1697: "rrisat", + 1698: "rsvp-encap-1", + 1699: "rsvp-encap-2", + 1700: "mps-raft", + 1701: "l2f", + 1702: "deskshare", + 1703: "hb-engine", + 1704: "bcs-broker", + 1705: "slingshot", + 1706: "jetform", + 1707: "vdmplay", + 1708: "gat-lmd", + 1709: "centra", + 1710: "impera", + 1711: "pptconference", + 1712: "registrar", + 1713: "conferencetalk", + 1714: "sesi-lm", + 1715: "houdini-lm", + 1716: "xmsg", + 1717: "fj-hdnet", + 1718: "h323gatedisc", + 1719: "h323gatestat", + 1720: "h323hostcall", + 1721: "caicci", + 1722: "hks-lm", + 1723: "pptp", + 1724: "csbphonemaster", + 1725: "iden-ralp", + 1726: "iberiagames", + 1727: "winddx", + 1728: "telindus", + 1729: "citynl", + 1730: "roketz", + 1731: "msiccp", + 1732: "proxim", + 1733: "siipat", + 1734: "cambertx-lm", + 1735: "privatechat", + 1736: "street-stream", + 1737: "ultimad", + 1738: "gamegen1", + 1739: "webaccess", + 1740: "encore", + 1741: "cisco-net-mgmt", + 1742: "3Com-nsd", + 1743: "cinegrfx-lm", + 1744: "ncpm-ft", + 1745: "remote-winsock", + 1746: "ftrapid-1", + 1747: "ftrapid-2", + 1748: "oracle-em1", + 1749: "aspen-services", + 1750: "sslp", + 1751: "swiftnet", + 1752: "lofr-lm", + 1753: "predatar-comms", + 1754: "oracle-em2", + 1755: "ms-streaming", + 1756: "capfast-lmd", + 1757: "cnhrp", + 1758: "tftp-mcast", + 1759: "spss-lm", + 1760: "www-ldap-gw", + 1761: "cft-0", + 1762: "cft-1", + 1763: "cft-2", + 1764: "cft-3", + 1765: "cft-4", + 1766: "cft-5", + 1767: "cft-6", + 1768: "cft-7", + 1769: "bmc-net-adm", + 1770: "bmc-net-svc", + 1771: "vaultbase", + 1772: "essweb-gw", + 1773: "kmscontrol", + 1774: "global-dtserv", + 1775: "vdab", + 1776: "femis", + 1777: "powerguardian", + 1778: "prodigy-intrnet", + 1779: "pharmasoft", + 1780: "dpkeyserv", + 1781: "answersoft-lm", + 1782: "hp-hcip", + 1784: "finle-lm", + 1785: "windlm", + 1786: "funk-logger", + 1787: "funk-license", + 1788: "psmond", + 1789: "hello", + 1790: "nmsp", + 1791: "ea1", + 1792: "ibm-dt-2", + 1793: "rsc-robot", + 1794: "cera-bcm", + 1795: "dpi-proxy", + 1796: "vocaltec-admin", + 1797: "uma", + 1798: "etp", + 1799: "netrisk", + 1800: "ansys-lm", + 1801: "msmq", + 1802: "concomp1", + 1803: "hp-hcip-gwy", + 1804: "enl", + 1805: "enl-name", + 1806: "musiconline", + 1807: "fhsp", + 1808: "oracle-vp2", + 1809: "oracle-vp1", + 1810: "jerand-lm", + 1811: "scientia-sdb", + 1812: "radius", + 1813: "radius-acct", + 1814: "tdp-suite", + 1815: "mmpft", + 1816: "harp", + 1817: "rkb-oscs", + 1818: "etftp", + 1819: "plato-lm", + 1820: "mcagent", + 1821: "donnyworld", + 1822: "es-elmd", + 1823: "unisys-lm", + 1824: "metrics-pas", + 1825: "direcpc-video", + 1826: "ardt", + 1827: "asi", + 1828: "itm-mcell-u", + 1829: "optika-emedia", + 1830: "net8-cman", + 1831: "myrtle", + 1832: "tht-treasure", + 1833: "udpradio", + 1834: "ardusuni", + 1835: "ardusmul", + 1836: "ste-smsc", + 1837: "csoft1", + 1838: "talnet", + 1839: "netopia-vo1", + 1840: "netopia-vo2", + 1841: "netopia-vo3", + 1842: "netopia-vo4", + 1843: "netopia-vo5", + 1844: "direcpc-dll", + 1845: "altalink", + 1846: "tunstall-pnc", + 1847: "slp-notify", + 1848: "fjdocdist", + 1849: "alpha-sms", + 1850: "gsi", + 1851: "ctcd", + 1852: "virtual-time", + 1853: "vids-avtp", + 1854: "buddy-draw", + 1855: "fiorano-rtrsvc", + 1856: "fiorano-msgsvc", + 1857: "datacaptor", + 1858: "privateark", + 1859: "gammafetchsvr", + 1860: "sunscalar-svc", + 1861: "lecroy-vicp", + 1862: "mysql-cm-agent", + 1863: "msnp", + 1864: "paradym-31port", + 1865: "entp", + 1866: "swrmi", + 1867: "udrive", + 1868: "viziblebrowser", + 1869: "transact", + 1870: "sunscalar-dns", + 1871: "canocentral0", + 1872: "canocentral1", + 1873: "fjmpjps", + 1874: "fjswapsnp", + 1875: "westell-stats", + 1876: "ewcappsrv", + 1877: "hp-webqosdb", + 1878: "drmsmc", + 1879: "nettgain-nms", + 1880: "vsat-control", + 1881: "ibm-mqseries2", + 1882: "ecsqdmn", + 1883: "mqtt", + 1884: "idmaps", + 1885: "vrtstrapserver", + 1886: "leoip", + 1887: "filex-lport", + 1888: "ncconfig", + 1889: "unify-adapter", + 1890: "wilkenlistener", + 1891: "childkey-notif", + 1892: "childkey-ctrl", + 1893: "elad", + 1894: "o2server-port", + 1896: "b-novative-ls", + 1897: "metaagent", + 1898: "cymtec-port", + 1899: "mc2studios", + 1900: "ssdp", + 1901: "fjicl-tep-a", + 1902: "fjicl-tep-b", + 1903: "linkname", + 1904: "fjicl-tep-c", + 1905: "sugp", + 1906: "tpmd", + 1907: "intrastar", + 1908: "dawn", + 1909: "global-wlink", + 1910: "ultrabac", + 1911: "mtp", + 1912: "rhp-iibp", + 1913: "armadp", + 1914: "elm-momentum", + 1915: "facelink", + 1916: "persona", + 1917: "noagent", + 1918: "can-nds", + 1919: "can-dch", + 1920: "can-ferret", + 1921: "noadmin", + 1922: "tapestry", + 1923: "spice", + 1924: "xiip", + 1925: "discovery-port", + 1926: "egs", + 1927: "videte-cipc", + 1928: "emsd-port", + 1929: "bandwiz-system", + 1930: "driveappserver", + 1931: "amdsched", + 1932: "ctt-broker", + 1933: "xmapi", + 1934: "xaapi", + 1935: "macromedia-fcs", + 1936: "jetcmeserver", + 1937: "jwserver", + 1938: "jwclient", + 1939: "jvserver", + 1940: "jvclient", + 1941: "dic-aida", + 1942: "res", + 1943: "beeyond-media", + 1944: "close-combat", + 1945: "dialogic-elmd", + 1946: "tekpls", + 1947: "sentinelsrm", + 1948: "eye2eye", + 1949: "ismaeasdaqlive", + 1950: "ismaeasdaqtest", + 1951: "bcs-lmserver", + 1952: "mpnjsc", + 1953: "rapidbase", + 1954: "abr-api", + 1955: "abr-secure", + 1956: "vrtl-vmf-ds", + 1957: "unix-status", + 1958: "dxadmind", + 1959: "simp-all", + 1960: "nasmanager", + 1961: "bts-appserver", + 1962: "biap-mp", + 1963: "webmachine", + 1964: "solid-e-engine", + 1965: "tivoli-npm", + 1966: "slush", + 1967: "sns-quote", + 1968: "lipsinc", + 1969: "lipsinc1", + 1970: "netop-rc", + 1971: "netop-school", + 1972: "intersys-cache", + 1973: "dlsrap", + 1974: "drp", + 1975: "tcoflashagent", + 1976: "tcoregagent", + 1977: "tcoaddressbook", + 1978: "unisql", + 1979: "unisql-java", + 1980: "pearldoc-xact", + 1981: "p2pq", + 1982: "estamp", + 1983: "lhtp", + 1984: "bb", + 1985: "hsrp", + 1986: "licensedaemon", + 1987: "tr-rsrb-p1", + 1988: "tr-rsrb-p2", + 1989: "tr-rsrb-p3", + 1990: "stun-p1", + 1991: "stun-p2", + 1992: "stun-p3", + 1993: "snmp-tcp-port", + 1994: "stun-port", + 1995: "perf-port", + 1996: "tr-rsrb-port", + 1997: "gdp-port", + 1998: "x25-svc-port", + 1999: "tcp-id-port", + 2000: "cisco-sccp", + 2001: "dc", + 2002: "globe", + 2003: "brutus", + 2004: "mailbox", + 2005: "berknet", + 2006: "invokator", + 2007: "dectalk", + 2008: "conf", + 2009: "news", + 2010: "search", + 2011: "raid-cc", + 2012: "ttyinfo", + 2013: "raid-am", + 2014: "troff", + 2015: "cypress", + 2016: "bootserver", + 2017: "cypress-stat", + 2018: "terminaldb", + 2019: "whosockami", + 2020: "xinupageserver", + 2021: "servexec", + 2022: "down", + 2023: "xinuexpansion3", + 2024: "xinuexpansion4", + 2025: "ellpack", + 2026: "scrabble", + 2027: "shadowserver", + 2028: "submitserver", + 2029: "hsrpv6", + 2030: "device2", + 2031: "mobrien-chat", + 2032: "blackboard", + 2033: "glogger", + 2034: "scoremgr", + 2035: "imsldoc", + 2036: "e-dpnet", + 2037: "applus", + 2038: "objectmanager", + 2039: "prizma", + 2040: "lam", + 2041: "interbase", + 2042: "isis", + 2043: "isis-bcast", + 2044: "rimsl", + 2045: "cdfunc", + 2046: "sdfunc", + 2047: "dls", + 2048: "dls-monitor", + 2049: "shilp", + 2050: "av-emb-config", + 2051: "epnsdp", + 2052: "clearvisn", + 2053: "lot105-ds-upd", + 2054: "weblogin", + 2055: "iop", + 2056: "omnisky", + 2057: "rich-cp", + 2058: "newwavesearch", + 2059: "bmc-messaging", + 2060: "teleniumdaemon", + 2061: "netmount", + 2062: "icg-swp", + 2063: "icg-bridge", + 2064: "icg-iprelay", + 2065: "dlsrpn", + 2066: "aura", + 2067: "dlswpn", + 2068: "avauthsrvprtcl", + 2069: "event-port", + 2070: "ah-esp-encap", + 2071: "acp-port", + 2072: "msync", + 2073: "gxs-data-port", + 2074: "vrtl-vmf-sa", + 2075: "newlixengine", + 2076: "newlixconfig", + 2077: "tsrmagt", + 2078: "tpcsrvr", + 2079: "idware-router", + 2080: "autodesk-nlm", + 2081: "kme-trap-port", + 2082: "infowave", + 2083: "radsec", + 2084: "sunclustergeo", + 2085: "ada-cip", + 2086: "gnunet", + 2087: "eli", + 2088: "ip-blf", + 2089: "sep", + 2090: "lrp", + 2091: "prp", + 2092: "descent3", + 2093: "nbx-cc", + 2094: "nbx-au", + 2095: "nbx-ser", + 2096: "nbx-dir", + 2097: "jetformpreview", + 2098: "dialog-port", + 2099: "h2250-annex-g", + 2100: "amiganetfs", + 2101: "rtcm-sc104", + 2102: "zephyr-srv", + 2103: "zephyr-clt", + 2104: "zephyr-hm", + 2105: "minipay", + 2106: "mzap", + 2107: "bintec-admin", + 2108: "comcam", + 2109: "ergolight", + 2110: "umsp", + 2111: "dsatp", + 2112: "idonix-metanet", + 2113: "hsl-storm", + 2114: "newheights", + 2115: "kdm", + 2116: "ccowcmr", + 2117: "mentaclient", + 2118: "mentaserver", + 2119: "gsigatekeeper", + 2120: "qencp", + 2121: "scientia-ssdb", + 2122: "caupc-remote", + 2123: "gtp-control", + 2124: "elatelink", + 2125: "lockstep", + 2126: "pktcable-cops", + 2127: "index-pc-wb", + 2128: "net-steward", + 2129: "cs-live", + 2130: "xds", + 2131: "avantageb2b", + 2132: "solera-epmap", + 2133: "zymed-zpp", + 2134: "avenue", + 2135: "gris", + 2136: "appworxsrv", + 2137: "connect", + 2138: "unbind-cluster", + 2139: "ias-auth", + 2140: "ias-reg", + 2141: "ias-admind", + 2142: "tdmoip", + 2143: "lv-jc", + 2144: "lv-ffx", + 2145: "lv-pici", + 2146: "lv-not", + 2147: "lv-auth", + 2148: "veritas-ucl", + 2149: "acptsys", + 2150: "dynamic3d", + 2151: "docent", + 2152: "gtp-user", + 2153: "ctlptc", + 2154: "stdptc", + 2155: "brdptc", + 2156: "trp", + 2157: "xnds", + 2158: "touchnetplus", + 2159: "gdbremote", + 2160: "apc-2160", + 2161: "apc-2161", + 2162: "navisphere", + 2163: "navisphere-sec", + 2164: "ddns-v3", + 2165: "x-bone-api", + 2166: "iwserver", + 2167: "raw-serial", + 2168: "easy-soft-mux", + 2169: "brain", + 2170: "eyetv", + 2171: "msfw-storage", + 2172: "msfw-s-storage", + 2173: "msfw-replica", + 2174: "msfw-array", + 2175: "airsync", + 2176: "rapi", + 2177: "qwave", + 2178: "bitspeer", + 2179: "vmrdp", + 2180: "mc-gt-srv", + 2181: "eforward", + 2182: "cgn-stat", + 2183: "cgn-config", + 2184: "nvd", + 2185: "onbase-dds", + 2186: "gtaua", + 2187: "ssmc", + 2188: "radware-rpm", + 2189: "radware-rpm-s", + 2190: "tivoconnect", + 2191: "tvbus", + 2192: "asdis", + 2193: "drwcs", + 2197: "mnp-exchange", + 2198: "onehome-remote", + 2199: "onehome-help", + 2200: "ici", + 2201: "ats", + 2202: "imtc-map", + 2203: "b2-runtime", + 2204: "b2-license", + 2205: "jps", + 2206: "hpocbus", + 2207: "hpssd", + 2208: "hpiod", + 2209: "rimf-ps", + 2210: "noaaport", + 2211: "emwin", + 2212: "leecoposserver", + 2213: "kali", + 2214: "rpi", + 2215: "ipcore", + 2216: "vtu-comms", + 2217: "gotodevice", + 2218: "bounzza", + 2219: "netiq-ncap", + 2220: "netiq", + 2221: "ethernet-ip-s", + 2222: "EtherNet-IP-1", + 2223: "rockwell-csp2", + 2224: "efi-mg", + 2225: "rcip-itu", + 2226: "di-drm", + 2227: "di-msg", + 2228: "ehome-ms", + 2229: "datalens", + 2230: "queueadm", + 2231: "wimaxasncp", + 2232: "ivs-video", + 2233: "infocrypt", + 2234: "directplay", + 2235: "sercomm-wlink", + 2236: "nani", + 2237: "optech-port1-lm", + 2238: "aviva-sna", + 2239: "imagequery", + 2240: "recipe", + 2241: "ivsd", + 2242: "foliocorp", + 2243: "magicom", + 2244: "nmsserver", + 2245: "hao", + 2246: "pc-mta-addrmap", + 2247: "antidotemgrsvr", + 2248: "ums", + 2249: "rfmp", + 2250: "remote-collab", + 2251: "dif-port", + 2252: "njenet-ssl", + 2253: "dtv-chan-req", + 2254: "seispoc", + 2255: "vrtp", + 2256: "pcc-mfp", + 2257: "simple-tx-rx", + 2258: "rcts", + 2260: "apc-2260", + 2261: "comotionmaster", + 2262: "comotionback", + 2263: "ecwcfg", + 2264: "apx500api-1", + 2265: "apx500api-2", + 2266: "mfserver", + 2267: "ontobroker", + 2268: "amt", + 2269: "mikey", + 2270: "starschool", + 2271: "mmcals", + 2272: "mmcal", + 2273: "mysql-im", + 2274: "pcttunnell", + 2275: "ibridge-data", + 2276: "ibridge-mgmt", + 2277: "bluectrlproxy", + 2278: "s3db", + 2279: "xmquery", + 2280: "lnvpoller", + 2281: "lnvconsole", + 2282: "lnvalarm", + 2283: "lnvstatus", + 2284: "lnvmaps", + 2285: "lnvmailmon", + 2286: "nas-metering", + 2287: "dna", + 2288: "netml", + 2289: "dict-lookup", + 2290: "sonus-logging", + 2291: "eapsp", + 2292: "mib-streaming", + 2293: "npdbgmngr", + 2294: "konshus-lm", + 2295: "advant-lm", + 2296: "theta-lm", + 2297: "d2k-datamover1", + 2298: "d2k-datamover2", + 2299: "pc-telecommute", + 2300: "cvmmon", + 2301: "cpq-wbem", + 2302: "binderysupport", + 2303: "proxy-gateway", + 2304: "attachmate-uts", + 2305: "mt-scaleserver", + 2306: "tappi-boxnet", + 2307: "pehelp", + 2308: "sdhelp", + 2309: "sdserver", + 2310: "sdclient", + 2311: "messageservice", + 2312: "wanscaler", + 2313: "iapp", + 2314: "cr-websystems", + 2315: "precise-sft", + 2316: "sent-lm", + 2317: "attachmate-g32", + 2318: "cadencecontrol", + 2319: "infolibria", + 2320: "siebel-ns", + 2321: "rdlap", + 2322: "ofsd", + 2323: "3d-nfsd", + 2324: "cosmocall", + 2325: "ansysli", + 2326: "idcp", + 2327: "xingcsm", + 2328: "netrix-sftm", + 2329: "nvd", + 2330: "tscchat", + 2331: "agentview", + 2332: "rcc-host", + 2333: "snapp", + 2334: "ace-client", + 2335: "ace-proxy", + 2336: "appleugcontrol", + 2337: "ideesrv", + 2338: "norton-lambert", + 2339: "3com-webview", + 2340: "wrs-registry", + 2341: "xiostatus", + 2342: "manage-exec", + 2343: "nati-logos", + 2344: "fcmsys", + 2345: "dbm", + 2346: "redstorm-join", + 2347: "redstorm-find", + 2348: "redstorm-info", + 2349: "redstorm-diag", + 2350: "psbserver", + 2351: "psrserver", + 2352: "pslserver", + 2353: "pspserver", + 2354: "psprserver", + 2355: "psdbserver", + 2356: "gxtelmd", + 2357: "unihub-server", + 2358: "futrix", + 2359: "flukeserver", + 2360: "nexstorindltd", + 2361: "tl1", + 2362: "digiman", + 2363: "mediacntrlnfsd", + 2364: "oi-2000", + 2365: "dbref", + 2366: "qip-login", + 2367: "service-ctrl", + 2368: "opentable", + 2370: "l3-hbmon", + 2371: "hp-rda", + 2372: "lanmessenger", + 2373: "remographlm", + 2374: "hydra", + 2375: "docker", + 2376: "docker-s", + 2377: "swarm", + 2379: "etcd-client", + 2380: "etcd-server", + 2381: "compaq-https", + 2382: "ms-olap3", + 2383: "ms-olap4", + 2384: "sd-request", + 2385: "sd-data", + 2386: "virtualtape", + 2387: "vsamredirector", + 2388: "mynahautostart", + 2389: "ovsessionmgr", + 2390: "rsmtp", + 2391: "3com-net-mgmt", + 2392: "tacticalauth", + 2393: "ms-olap1", + 2394: "ms-olap2", + 2395: "lan900-remote", + 2396: "wusage", + 2397: "ncl", + 2398: "orbiter", + 2399: "fmpro-fdal", + 2400: "opequus-server", + 2401: "cvspserver", + 2402: "taskmaster2000", + 2403: "taskmaster2000", + 2404: "iec-104", + 2405: "trc-netpoll", + 2406: "jediserver", + 2407: "orion", + 2408: "railgun-webaccl", + 2409: "sns-protocol", + 2410: "vrts-registry", + 2411: "netwave-ap-mgmt", + 2412: "cdn", + 2413: "orion-rmi-reg", + 2414: "beeyond", + 2415: "codima-rtp", + 2416: "rmtserver", + 2417: "composit-server", + 2418: "cas", + 2419: "attachmate-s2s", + 2420: "dslremote-mgmt", + 2421: "g-talk", + 2422: "crmsbits", + 2423: "rnrp", + 2424: "kofax-svr", + 2425: "fjitsuappmgr", + 2426: "vcmp", + 2427: "mgcp-gateway", + 2428: "ott", + 2429: "ft-role", + 2430: "venus", + 2431: "venus-se", + 2432: "codasrv", + 2433: "codasrv-se", + 2434: "pxc-epmap", + 2435: "optilogic", + 2436: "topx", + 2437: "unicontrol", + 2438: "msp", + 2439: "sybasedbsynch", + 2440: "spearway", + 2441: "pvsw-inet", + 2442: "netangel", + 2443: "powerclientcsf", + 2444: "btpp2sectrans", + 2445: "dtn1", + 2446: "bues-service", + 2447: "ovwdb", + 2448: "hpppssvr", + 2449: "ratl", + 2450: "netadmin", + 2451: "netchat", + 2452: "snifferclient", + 2453: "madge-ltd", + 2454: "indx-dds", + 2455: "wago-io-system", + 2456: "altav-remmgt", + 2457: "rapido-ip", + 2458: "griffin", + 2459: "community", + 2460: "ms-theater", + 2461: "qadmifoper", + 2462: "qadmifevent", + 2463: "lsi-raid-mgmt", + 2464: "direcpc-si", + 2465: "lbm", + 2466: "lbf", + 2467: "high-criteria", + 2468: "qip-msgd", + 2469: "mti-tcs-comm", + 2470: "taskman-port", + 2471: "seaodbc", + 2472: "c3", + 2473: "aker-cdp", + 2474: "vitalanalysis", + 2475: "ace-server", + 2476: "ace-svr-prop", + 2477: "ssm-cvs", + 2478: "ssm-cssps", + 2479: "ssm-els", + 2480: "powerexchange", + 2481: "giop", + 2482: "giop-ssl", + 2483: "ttc", + 2484: "ttc-ssl", + 2485: "netobjects1", + 2486: "netobjects2", + 2487: "pns", + 2488: "moy-corp", + 2489: "tsilb", + 2490: "qip-qdhcp", + 2491: "conclave-cpp", + 2492: "groove", + 2493: "talarian-mqs", + 2494: "bmc-ar", + 2495: "fast-rem-serv", + 2496: "dirgis", + 2497: "quaddb", + 2498: "odn-castraq", + 2499: "unicontrol", + 2500: "rtsserv", + 2501: "rtsclient", + 2502: "kentrox-prot", + 2503: "nms-dpnss", + 2504: "wlbs", + 2505: "ppcontrol", + 2506: "jbroker", + 2507: "spock", + 2508: "jdatastore", + 2509: "fjmpss", + 2510: "fjappmgrbulk", + 2511: "metastorm", + 2512: "citrixima", + 2513: "citrixadmin", + 2514: "facsys-ntp", + 2515: "facsys-router", + 2516: "maincontrol", + 2517: "call-sig-trans", + 2518: "willy", + 2519: "globmsgsvc", + 2520: "pvsw", + 2521: "adaptecmgr", + 2522: "windb", + 2523: "qke-llc-v3", + 2524: "optiwave-lm", + 2525: "ms-v-worlds", + 2526: "ema-sent-lm", + 2527: "iqserver", + 2528: "ncr-ccl", + 2529: "utsftp", + 2530: "vrcommerce", + 2531: "ito-e-gui", + 2532: "ovtopmd", + 2533: "snifferserver", + 2534: "combox-web-acc", + 2535: "madcap", + 2536: "btpp2audctr1", + 2537: "upgrade", + 2538: "vnwk-prapi", + 2539: "vsiadmin", + 2540: "lonworks", + 2541: "lonworks2", + 2542: "udrawgraph", + 2543: "reftek", + 2544: "novell-zen", + 2545: "sis-emt", + 2546: "vytalvaultbrtp", + 2547: "vytalvaultvsmp", + 2548: "vytalvaultpipe", + 2549: "ipass", + 2550: "ads", + 2551: "isg-uda-server", + 2552: "call-logging", + 2553: "efidiningport", + 2554: "vcnet-link-v10", + 2555: "compaq-wcp", + 2556: "nicetec-nmsvc", + 2557: "nicetec-mgmt", + 2558: "pclemultimedia", + 2559: "lstp", + 2560: "labrat", + 2561: "mosaixcc", + 2562: "delibo", + 2563: "cti-redwood", + 2564: "hp-3000-telnet", + 2565: "coord-svr", + 2566: "pcs-pcw", + 2567: "clp", + 2568: "spamtrap", + 2569: "sonuscallsig", + 2570: "hs-port", + 2571: "cecsvc", + 2572: "ibp", + 2573: "trustestablish", + 2574: "blockade-bpsp", + 2575: "hl7", + 2576: "tclprodebugger", + 2577: "scipticslsrvr", + 2578: "rvs-isdn-dcp", + 2579: "mpfoncl", + 2580: "tributary", + 2581: "argis-te", + 2582: "argis-ds", + 2583: "mon", + 2584: "cyaserv", + 2585: "netx-server", + 2586: "netx-agent", + 2587: "masc", + 2588: "privilege", + 2589: "quartus-tcl", + 2590: "idotdist", + 2591: "maytagshuffle", + 2592: "netrek", + 2593: "mns-mail", + 2594: "dts", + 2595: "worldfusion1", + 2596: "worldfusion2", + 2597: "homesteadglory", + 2598: "citriximaclient", + 2599: "snapd", + 2600: "hpstgmgr", + 2601: "discp-client", + 2602: "discp-server", + 2603: "servicemeter", + 2604: "nsc-ccs", + 2605: "nsc-posa", + 2606: "netmon", + 2607: "connection", + 2608: "wag-service", + 2609: "system-monitor", + 2610: "versa-tek", + 2611: "lionhead", + 2612: "qpasa-agent", + 2613: "smntubootstrap", + 2614: "neveroffline", + 2615: "firepower", + 2616: "appswitch-emp", + 2617: "cmadmin", + 2618: "priority-e-com", + 2619: "bruce", + 2620: "lpsrecommender", + 2621: "miles-apart", + 2622: "metricadbc", + 2623: "lmdp", + 2624: "aria", + 2625: "blwnkl-port", + 2626: "gbjd816", + 2627: "moshebeeri", + 2628: "dict", + 2629: "sitaraserver", + 2630: "sitaramgmt", + 2631: "sitaradir", + 2632: "irdg-post", + 2633: "interintelli", + 2634: "pk-electronics", + 2635: "backburner", + 2636: "solve", + 2637: "imdocsvc", + 2638: "sybaseanywhere", + 2639: "aminet", + 2640: "ami-control", + 2641: "hdl-srv", + 2642: "tragic", + 2643: "gte-samp", + 2644: "travsoft-ipx-t", + 2645: "novell-ipx-cmd", + 2646: "and-lm", + 2647: "syncserver", + 2648: "upsnotifyprot", + 2649: "vpsipport", + 2650: "eristwoguns", + 2651: "ebinsite", + 2652: "interpathpanel", + 2653: "sonus", + 2654: "corel-vncadmin", + 2655: "unglue", + 2656: "kana", + 2657: "sns-dispatcher", + 2658: "sns-admin", + 2659: "sns-query", + 2660: "gcmonitor", + 2661: "olhost", + 2662: "bintec-capi", + 2663: "bintec-tapi", + 2664: "patrol-mq-gm", + 2665: "patrol-mq-nm", + 2666: "extensis", + 2667: "alarm-clock-s", + 2668: "alarm-clock-c", + 2669: "toad", + 2670: "tve-announce", + 2671: "newlixreg", + 2672: "nhserver", + 2673: "firstcall42", + 2674: "ewnn", + 2675: "ttc-etap", + 2676: "simslink", + 2677: "gadgetgate1way", + 2678: "gadgetgate2way", + 2679: "syncserverssl", + 2680: "pxc-sapxom", + 2681: "mpnjsomb", + 2683: "ncdloadbalance", + 2684: "mpnjsosv", + 2685: "mpnjsocl", + 2686: "mpnjsomg", + 2687: "pq-lic-mgmt", + 2688: "md-cg-http", + 2689: "fastlynx", + 2690: "hp-nnm-data", + 2691: "itinternet", + 2692: "admins-lms", + 2694: "pwrsevent", + 2695: "vspread", + 2696: "unifyadmin", + 2697: "oce-snmp-trap", + 2698: "mck-ivpip", + 2699: "csoft-plusclnt", + 2700: "tqdata", + 2701: "sms-rcinfo", + 2702: "sms-xfer", + 2703: "sms-chat", + 2704: "sms-remctrl", + 2705: "sds-admin", + 2706: "ncdmirroring", + 2707: "emcsymapiport", + 2708: "banyan-net", + 2709: "supermon", + 2710: "sso-service", + 2711: "sso-control", + 2712: "aocp", + 2713: "raventbs", + 2714: "raventdm", + 2715: "hpstgmgr2", + 2716: "inova-ip-disco", + 2717: "pn-requester", + 2718: "pn-requester2", + 2719: "scan-change", + 2720: "wkars", + 2721: "smart-diagnose", + 2722: "proactivesrvr", + 2723: "watchdog-nt", + 2724: "qotps", + 2725: "msolap-ptp2", + 2726: "tams", + 2727: "mgcp-callagent", + 2728: "sqdr", + 2729: "tcim-control", + 2730: "nec-raidplus", + 2731: "fyre-messanger", + 2732: "g5m", + 2733: "signet-ctf", + 2734: "ccs-software", + 2735: "netiq-mc", + 2736: "radwiz-nms-srv", + 2737: "srp-feedback", + 2738: "ndl-tcp-ois-gw", + 2739: "tn-timing", + 2740: "alarm", + 2741: "tsb", + 2742: "tsb2", + 2743: "murx", + 2744: "honyaku", + 2745: "urbisnet", + 2746: "cpudpencap", + 2747: "fjippol-swrly", + 2748: "fjippol-polsvr", + 2749: "fjippol-cnsl", + 2750: "fjippol-port1", + 2751: "fjippol-port2", + 2752: "rsisysaccess", + 2753: "de-spot", + 2754: "apollo-cc", + 2755: "expresspay", + 2756: "simplement-tie", + 2757: "cnrp", + 2758: "apollo-status", + 2759: "apollo-gms", + 2760: "sabams", + 2761: "dicom-iscl", + 2762: "dicom-tls", + 2763: "desktop-dna", + 2764: "data-insurance", + 2765: "qip-audup", + 2766: "compaq-scp", + 2767: "uadtc", + 2768: "uacs", + 2769: "exce", + 2770: "veronica", + 2771: "vergencecm", + 2772: "auris", + 2773: "rbakcup1", + 2774: "rbakcup2", + 2775: "smpp", + 2776: "ridgeway1", + 2777: "ridgeway2", + 2778: "gwen-sonya", + 2779: "lbc-sync", + 2780: "lbc-control", + 2781: "whosells", + 2782: "everydayrc", + 2783: "aises", + 2784: "www-dev", + 2785: "aic-np", + 2786: "aic-oncrpc", + 2787: "piccolo", + 2788: "fryeserv", + 2789: "media-agent", + 2790: "plgproxy", + 2791: "mtport-regist", + 2792: "f5-globalsite", + 2793: "initlsmsad", + 2795: "livestats", + 2796: "ac-tech", + 2797: "esp-encap", + 2798: "tmesis-upshot", + 2799: "icon-discover", + 2800: "acc-raid", + 2801: "igcp", + 2802: "veritas-tcp1", + 2803: "btprjctrl", + 2804: "dvr-esm", + 2805: "wta-wsp-s", + 2806: "cspuni", + 2807: "cspmulti", + 2808: "j-lan-p", + 2809: "corbaloc", + 2810: "netsteward", + 2811: "gsiftp", + 2812: "atmtcp", + 2813: "llm-pass", + 2814: "llm-csv", + 2815: "lbc-measure", + 2816: "lbc-watchdog", + 2817: "nmsigport", + 2818: "rmlnk", + 2819: "fc-faultnotify", + 2820: "univision", + 2821: "vrts-at-port", + 2822: "ka0wuc", + 2823: "cqg-netlan", + 2824: "cqg-netlan-1", + 2826: "slc-systemlog", + 2827: "slc-ctrlrloops", + 2828: "itm-lm", + 2829: "silkp1", + 2830: "silkp2", + 2831: "silkp3", + 2832: "silkp4", + 2833: "glishd", + 2834: "evtp", + 2835: "evtp-data", + 2836: "catalyst", + 2837: "repliweb", + 2838: "starbot", + 2839: "nmsigport", + 2840: "l3-exprt", + 2841: "l3-ranger", + 2842: "l3-hawk", + 2843: "pdnet", + 2844: "bpcp-poll", + 2845: "bpcp-trap", + 2846: "aimpp-hello", + 2847: "aimpp-port-req", + 2848: "amt-blc-port", + 2849: "fxp", + 2850: "metaconsole", + 2851: "webemshttp", + 2852: "bears-01", + 2853: "ispipes", + 2854: "infomover", + 2855: "msrp", + 2856: "cesdinv", + 2857: "simctlp", + 2858: "ecnp", + 2859: "activememory", + 2860: "dialpad-voice1", + 2861: "dialpad-voice2", + 2862: "ttg-protocol", + 2863: "sonardata", + 2864: "astromed-main", + 2865: "pit-vpn", + 2866: "iwlistener", + 2867: "esps-portal", + 2868: "npep-messaging", + 2869: "icslap", + 2870: "daishi", + 2871: "msi-selectplay", + 2872: "radix", + 2874: "dxmessagebase1", + 2875: "dxmessagebase2", + 2876: "sps-tunnel", + 2877: "bluelance", + 2878: "aap", + 2879: "ucentric-ds", + 2880: "synapse", + 2881: "ndsp", + 2882: "ndtp", + 2883: "ndnp", + 2884: "flashmsg", + 2885: "topflow", + 2886: "responselogic", + 2887: "aironetddp", + 2888: "spcsdlobby", + 2889: "rsom", + 2890: "cspclmulti", + 2891: "cinegrfx-elmd", + 2892: "snifferdata", + 2893: "vseconnector", + 2894: "abacus-remote", + 2895: "natuslink", + 2896: "ecovisiong6-1", + 2897: "citrix-rtmp", + 2898: "appliance-cfg", + 2899: "powergemplus", + 2900: "quicksuite", + 2901: "allstorcns", + 2902: "netaspi", + 2903: "suitcase", + 2904: "m2ua", + 2905: "m3ua", + 2906: "caller9", + 2907: "webmethods-b2b", + 2908: "mao", + 2909: "funk-dialout", + 2910: "tdaccess", + 2911: "blockade", + 2912: "epicon", + 2913: "boosterware", + 2914: "gamelobby", + 2915: "tksocket", + 2916: "elvin-server", + 2917: "elvin-client", + 2918: "kastenchasepad", + 2919: "roboer", + 2920: "roboeda", + 2921: "cesdcdman", + 2922: "cesdcdtrn", + 2923: "wta-wsp-wtp-s", + 2924: "precise-vip", + 2926: "mobile-file-dl", + 2927: "unimobilectrl", + 2928: "redstone-cpss", + 2929: "amx-webadmin", + 2930: "amx-weblinx", + 2931: "circle-x", + 2932: "incp", + 2933: "4-tieropmgw", + 2934: "4-tieropmcli", + 2935: "qtp", + 2936: "otpatch", + 2937: "pnaconsult-lm", + 2938: "sm-pas-1", + 2939: "sm-pas-2", + 2940: "sm-pas-3", + 2941: "sm-pas-4", + 2942: "sm-pas-5", + 2943: "ttnrepository", + 2944: "megaco-h248", + 2945: "h248-binary", + 2946: "fjsvmpor", + 2947: "gpsd", + 2948: "wap-push", + 2949: "wap-pushsecure", + 2950: "esip", + 2951: "ottp", + 2952: "mpfwsas", + 2953: "ovalarmsrv", + 2954: "ovalarmsrv-cmd", + 2955: "csnotify", + 2956: "ovrimosdbman", + 2957: "jmact5", + 2958: "jmact6", + 2959: "rmopagt", + 2960: "dfoxserver", + 2961: "boldsoft-lm", + 2962: "iph-policy-cli", + 2963: "iph-policy-adm", + 2964: "bullant-srap", + 2965: "bullant-rap", + 2966: "idp-infotrieve", + 2967: "ssc-agent", + 2968: "enpp", + 2969: "essp", + 2970: "index-net", + 2971: "netclip", + 2972: "pmsm-webrctl", + 2973: "svnetworks", + 2974: "signal", + 2975: "fjmpcm", + 2976: "cns-srv-port", + 2977: "ttc-etap-ns", + 2978: "ttc-etap-ds", + 2979: "h263-video", + 2980: "wimd", + 2981: "mylxamport", + 2982: "iwb-whiteboard", + 2983: "netplan", + 2984: "hpidsadmin", + 2985: "hpidsagent", + 2986: "stonefalls", + 2987: "identify", + 2988: "hippad", + 2989: "zarkov", + 2990: "boscap", + 2991: "wkstn-mon", + 2992: "avenyo", + 2993: "veritas-vis1", + 2994: "veritas-vis2", + 2995: "idrs", + 2996: "vsixml", + 2997: "rebol", + 2998: "realsecure", + 2999: "remoteware-un", + 3000: "hbci", + 3001: "origo-native", + 3002: "exlm-agent", + 3003: "cgms", + 3004: "csoftragent", + 3005: "geniuslm", + 3006: "ii-admin", + 3007: "lotusmtap", + 3008: "midnight-tech", + 3009: "pxc-ntfy", + 3010: "gw", + 3011: "trusted-web", + 3012: "twsdss", + 3013: "gilatskysurfer", + 3014: "broker-service", + 3015: "nati-dstp", + 3016: "notify-srvr", + 3017: "event-listener", + 3018: "srvc-registry", + 3019: "resource-mgr", + 3020: "cifs", + 3021: "agriserver", + 3022: "csregagent", + 3023: "magicnotes", + 3024: "nds-sso", + 3025: "arepa-raft", + 3026: "agri-gateway", + 3027: "LiebDevMgmt-C", + 3028: "LiebDevMgmt-DM", + 3029: "LiebDevMgmt-A", + 3030: "arepa-cas", + 3031: "eppc", + 3032: "redwood-chat", + 3033: "pdb", + 3034: "osmosis-aeea", + 3035: "fjsv-gssagt", + 3036: "hagel-dump", + 3037: "hp-san-mgmt", + 3038: "santak-ups", + 3039: "cogitate", + 3040: "tomato-springs", + 3041: "di-traceware", + 3042: "journee", + 3043: "brp", + 3044: "epp", + 3045: "responsenet", + 3046: "di-ase", + 3047: "hlserver", + 3048: "pctrader", + 3049: "nsws", + 3050: "gds-db", + 3051: "galaxy-server", + 3052: "apc-3052", + 3053: "dsom-server", + 3054: "amt-cnf-prot", + 3055: "policyserver", + 3056: "cdl-server", + 3057: "goahead-fldup", + 3058: "videobeans", + 3059: "qsoft", + 3060: "interserver", + 3061: "cautcpd", + 3062: "ncacn-ip-tcp", + 3063: "ncadg-ip-udp", + 3064: "rprt", + 3065: "slinterbase", + 3066: "netattachsdmp", + 3067: "fjhpjp", + 3068: "ls3bcast", + 3069: "ls3", + 3070: "mgxswitch", + 3071: "xplat-replicate", + 3072: "csd-monitor", + 3073: "vcrp", + 3074: "xbox", + 3075: "orbix-locator", + 3076: "orbix-config", + 3077: "orbix-loc-ssl", + 3078: "orbix-cfg-ssl", + 3079: "lv-frontpanel", + 3080: "stm-pproc", + 3081: "tl1-lv", + 3082: "tl1-raw", + 3083: "tl1-telnet", + 3084: "itm-mccs", + 3085: "pcihreq", + 3086: "jdl-dbkitchen", + 3087: "asoki-sma", + 3088: "xdtp", + 3089: "ptk-alink", + 3090: "stss", + 3091: "1ci-smcs", + 3093: "rapidmq-center", + 3094: "rapidmq-reg", + 3095: "panasas", + 3096: "ndl-aps", + 3098: "umm-port", + 3099: "chmd", + 3100: "opcon-xps", + 3101: "hp-pxpib", + 3102: "slslavemon", + 3103: "autocuesmi", + 3104: "autocuelog", + 3105: "cardbox", + 3106: "cardbox-http", + 3107: "business", + 3108: "geolocate", + 3109: "personnel", + 3110: "sim-control", + 3111: "wsynch", + 3112: "ksysguard", + 3113: "cs-auth-svr", + 3114: "ccmad", + 3115: "mctet-master", + 3116: "mctet-gateway", + 3117: "mctet-jserv", + 3118: "pkagent", + 3119: "d2000kernel", + 3120: "d2000webserver", + 3121: "pcmk-remote", + 3122: "vtr-emulator", + 3123: "edix", + 3124: "beacon-port", + 3125: "a13-an", + 3127: "ctx-bridge", + 3128: "ndl-aas", + 3129: "netport-id", + 3130: "icpv2", + 3131: "netbookmark", + 3132: "ms-rule-engine", + 3133: "prism-deploy", + 3134: "ecp", + 3135: "peerbook-port", + 3136: "grubd", + 3137: "rtnt-1", + 3138: "rtnt-2", + 3139: "incognitorv", + 3140: "ariliamulti", + 3141: "vmodem", + 3142: "rdc-wh-eos", + 3143: "seaview", + 3144: "tarantella", + 3145: "csi-lfap", + 3146: "bears-02", + 3147: "rfio", + 3148: "nm-game-admin", + 3149: "nm-game-server", + 3150: "nm-asses-admin", + 3151: "nm-assessor", + 3152: "feitianrockey", + 3153: "s8-client-port", + 3154: "ccmrmi", + 3155: "jpegmpeg", + 3156: "indura", + 3157: "e3consultants", + 3158: "stvp", + 3159: "navegaweb-port", + 3160: "tip-app-server", + 3161: "doc1lm", + 3162: "sflm", + 3163: "res-sap", + 3164: "imprs", + 3165: "newgenpay", + 3166: "sossecollector", + 3167: "nowcontact", + 3168: "poweronnud", + 3169: "serverview-as", + 3170: "serverview-asn", + 3171: "serverview-gf", + 3172: "serverview-rm", + 3173: "serverview-icc", + 3174: "armi-server", + 3175: "t1-e1-over-ip", + 3176: "ars-master", + 3177: "phonex-port", + 3178: "radclientport", + 3179: "h2gf-w-2m", + 3180: "mc-brk-srv", + 3181: "bmcpatrolagent", + 3182: "bmcpatrolrnvu", + 3183: "cops-tls", + 3184: "apogeex-port", + 3185: "smpppd", + 3186: "iiw-port", + 3187: "odi-port", + 3188: "brcm-comm-port", + 3189: "pcle-infex", + 3190: "csvr-proxy", + 3191: "csvr-sslproxy", + 3192: "firemonrcc", + 3193: "spandataport", + 3194: "magbind", + 3195: "ncu-1", + 3196: "ncu-2", + 3197: "embrace-dp-s", + 3198: "embrace-dp-c", + 3199: "dmod-workspace", + 3200: "tick-port", + 3201: "cpq-tasksmart", + 3202: "intraintra", + 3203: "netwatcher-mon", + 3204: "netwatcher-db", + 3205: "isns", + 3206: "ironmail", + 3207: "vx-auth-port", + 3208: "pfu-prcallback", + 3209: "netwkpathengine", + 3210: "flamenco-proxy", + 3211: "avsecuremgmt", + 3212: "surveyinst", + 3213: "neon24x7", + 3214: "jmq-daemon-1", + 3215: "jmq-daemon-2", + 3216: "ferrari-foam", + 3217: "unite", + 3218: "smartpackets", + 3219: "wms-messenger", + 3220: "xnm-ssl", + 3221: "xnm-clear-text", + 3222: "glbp", + 3223: "digivote", + 3224: "aes-discovery", + 3225: "fcip-port", + 3226: "isi-irp", + 3227: "dwnmshttp", + 3228: "dwmsgserver", + 3229: "global-cd-port", + 3230: "sftdst-port", + 3231: "vidigo", + 3232: "mdtp", + 3233: "whisker", + 3234: "alchemy", + 3235: "mdap-port", + 3236: "apparenet-ts", + 3237: "apparenet-tps", + 3238: "apparenet-as", + 3239: "apparenet-ui", + 3240: "triomotion", + 3241: "sysorb", + 3242: "sdp-id-port", + 3243: "timelot", + 3244: "onesaf", + 3245: "vieo-fe", + 3246: "dvt-system", + 3247: "dvt-data", + 3248: "procos-lm", + 3249: "ssp", + 3250: "hicp", + 3251: "sysscanner", + 3252: "dhe", + 3253: "pda-data", + 3254: "pda-sys", + 3255: "semaphore", + 3256: "cpqrpm-agent", + 3257: "cpqrpm-server", + 3258: "ivecon-port", + 3259: "epncdp2", + 3260: "iscsi-target", + 3261: "winshadow", + 3262: "necp", + 3263: "ecolor-imager", + 3264: "ccmail", + 3265: "altav-tunnel", + 3266: "ns-cfg-server", + 3267: "ibm-dial-out", + 3268: "msft-gc", + 3269: "msft-gc-ssl", + 3270: "verismart", + 3271: "csoft-prev", + 3272: "user-manager", + 3273: "sxmp", + 3274: "ordinox-server", + 3275: "samd", + 3276: "maxim-asics", + 3277: "awg-proxy", + 3278: "lkcmserver", + 3279: "admind", + 3280: "vs-server", + 3281: "sysopt", + 3282: "datusorb", + 3283: "Apple Remote Desktop (Net Assistant)", + 3284: "4talk", + 3285: "plato", + 3286: "e-net", + 3287: "directvdata", + 3288: "cops", + 3289: "enpc", + 3290: "caps-lm", + 3291: "sah-lm", + 3292: "cart-o-rama", + 3293: "fg-fps", + 3294: "fg-gip", + 3295: "dyniplookup", + 3296: "rib-slm", + 3297: "cytel-lm", + 3298: "deskview", + 3299: "pdrncs", + 3300: "ceph", + 3302: "mcs-fastmail", + 3303: "opsession-clnt", + 3304: "opsession-srvr", + 3305: "odette-ftp", + 3306: "mysql", + 3307: "opsession-prxy", + 3308: "tns-server", + 3309: "tns-adv", + 3310: "dyna-access", + 3311: "mcns-tel-ret", + 3312: "appman-server", + 3313: "uorb", + 3314: "uohost", + 3315: "cdid", + 3316: "aicc-cmi", + 3317: "vsaiport", + 3318: "ssrip", + 3319: "sdt-lmd", + 3320: "officelink2000", + 3321: "vnsstr", + 3326: "sftu", + 3327: "bbars", + 3328: "egptlm", + 3329: "hp-device-disc", + 3330: "mcs-calypsoicf", + 3331: "mcs-messaging", + 3332: "mcs-mailsvr", + 3333: "dec-notes", + 3334: "directv-web", + 3335: "directv-soft", + 3336: "directv-tick", + 3337: "directv-catlg", + 3338: "anet-b", + 3339: "anet-l", + 3340: "anet-m", + 3341: "anet-h", + 3342: "webtie", + 3343: "ms-cluster-net", + 3344: "bnt-manager", + 3345: "influence", + 3346: "trnsprntproxy", + 3347: "phoenix-rpc", + 3348: "pangolin-laser", + 3349: "chevinservices", + 3350: "findviatv", + 3351: "btrieve", + 3352: "ssql", + 3353: "fatpipe", + 3354: "suitjd", + 3355: "ordinox-dbase", + 3356: "upnotifyps", + 3357: "adtech-test", + 3358: "mpsysrmsvr", + 3359: "wg-netforce", + 3360: "kv-server", + 3361: "kv-agent", + 3362: "dj-ilm", + 3363: "nati-vi-server", + 3364: "creativeserver", + 3365: "contentserver", + 3366: "creativepartnr", + 3372: "tip2", + 3373: "lavenir-lm", + 3374: "cluster-disc", + 3375: "vsnm-agent", + 3376: "cdbroker", + 3377: "cogsys-lm", + 3378: "wsicopy", + 3379: "socorfs", + 3380: "sns-channels", + 3381: "geneous", + 3382: "fujitsu-neat", + 3383: "esp-lm", + 3384: "hp-clic", + 3385: "qnxnetman", + 3386: "gprs-data", + 3387: "backroomnet", + 3388: "cbserver", + 3389: "ms-wbt-server", + 3390: "dsc", + 3391: "savant", + 3392: "efi-lm", + 3393: "d2k-tapestry1", + 3394: "d2k-tapestry2", + 3395: "dyna-lm", + 3396: "printer-agent", + 3397: "cloanto-lm", + 3398: "mercantile", + 3399: "csms", + 3400: "csms2", + 3401: "filecast", + 3402: "fxaengine-net", + 3405: "nokia-ann-ch1", + 3406: "nokia-ann-ch2", + 3407: "ldap-admin", + 3408: "BESApi", + 3409: "networklens", + 3410: "networklenss", + 3411: "biolink-auth", + 3412: "xmlblaster", + 3413: "svnet", + 3414: "wip-port", + 3415: "bcinameservice", + 3416: "commandport", + 3417: "csvr", + 3418: "rnmap", + 3419: "softaudit", + 3420: "ifcp-port", + 3421: "bmap", + 3422: "rusb-sys-port", + 3423: "xtrm", + 3424: "xtrms", + 3425: "agps-port", + 3426: "arkivio", + 3427: "websphere-snmp", + 3428: "twcss", + 3429: "gcsp", + 3430: "ssdispatch", + 3431: "ndl-als", + 3432: "osdcp", + 3433: "opnet-smp", + 3434: "opencm", + 3435: "pacom", + 3436: "gc-config", + 3437: "autocueds", + 3438: "spiral-admin", + 3439: "hri-port", + 3440: "ans-console", + 3441: "connect-client", + 3442: "connect-server", + 3443: "ov-nnm-websrv", + 3444: "denali-server", + 3445: "monp", + 3446: "3comfaxrpc", + 3447: "directnet", + 3448: "dnc-port", + 3449: "hotu-chat", + 3450: "castorproxy", + 3451: "asam", + 3452: "sabp-signal", + 3453: "pscupd", + 3454: "mira", + 3455: "prsvp", + 3456: "vat", + 3457: "vat-control", + 3458: "d3winosfi", + 3459: "integral", + 3460: "edm-manager", + 3461: "edm-stager", + 3462: "edm-std-notify", + 3463: "edm-adm-notify", + 3464: "edm-mgr-sync", + 3465: "edm-mgr-cntrl", + 3466: "workflow", + 3467: "rcst", + 3468: "ttcmremotectrl", + 3469: "pluribus", + 3470: "jt400", + 3471: "jt400-ssl", + 3472: "jaugsremotec-1", + 3473: "jaugsremotec-2", + 3474: "ttntspauto", + 3475: "genisar-port", + 3476: "nppmp", + 3477: "ecomm", + 3478: "stun", + 3479: "twrpc", + 3480: "plethora", + 3481: "cleanerliverc", + 3482: "vulture", + 3483: "slim-devices", + 3484: "gbs-stp", + 3485: "celatalk", + 3486: "ifsf-hb-port", + 3487: "ltctcp", + 3488: "fs-rh-srv", + 3489: "dtp-dia", + 3490: "colubris", + 3491: "swr-port", + 3492: "tvdumtray-port", + 3493: "nut", + 3494: "ibm3494", + 3495: "seclayer-tcp", + 3496: "seclayer-tls", + 3497: "ipether232port", + 3498: "dashpas-port", + 3499: "sccip-media", + 3500: "rtmp-port", + 3501: "isoft-p2p", + 3502: "avinstalldisc", + 3503: "lsp-ping", + 3504: "ironstorm", + 3505: "ccmcomm", + 3506: "apc-3506", + 3507: "nesh-broker", + 3508: "interactionweb", + 3509: "vt-ssl", + 3510: "xss-port", + 3511: "webmail-2", + 3512: "aztec", + 3513: "arcpd", + 3514: "must-p2p", + 3515: "must-backplane", + 3516: "smartcard-port", + 3517: "802-11-iapp", + 3518: "artifact-msg", + 3519: "nvmsgd", + 3520: "galileolog", + 3521: "mc3ss", + 3522: "nssocketport", + 3523: "odeumservlink", + 3524: "ecmport", + 3525: "eisport", + 3526: "starquiz-port", + 3527: "beserver-msg-q", + 3528: "jboss-iiop", + 3529: "jboss-iiop-ssl", + 3530: "gf", + 3531: "joltid", + 3532: "raven-rmp", + 3533: "raven-rdp", + 3534: "urld-port", + 3535: "ms-la", + 3536: "snac", + 3537: "ni-visa-remote", + 3538: "ibm-diradm", + 3539: "ibm-diradm-ssl", + 3540: "pnrp-port", + 3541: "voispeed-port", + 3542: "hacl-monitor", + 3543: "qftest-lookup", + 3544: "teredo", + 3545: "camac", + 3547: "symantec-sim", + 3548: "interworld", + 3549: "tellumat-nms", + 3550: "ssmpp", + 3551: "apcupsd", + 3552: "taserver", + 3553: "rbr-discovery", + 3554: "questnotify", + 3555: "razor", + 3556: "sky-transport", + 3557: "personalos-001", + 3558: "mcp-port", + 3559: "cctv-port", + 3560: "iniserve-port", + 3561: "bmc-onekey", + 3562: "sdbproxy", + 3563: "watcomdebug", + 3564: "esimport", + 3565: "m2pa", + 3566: "quest-data-hub", + 3567: "dof-eps", + 3568: "dof-tunnel-sec", + 3569: "mbg-ctrl", + 3570: "mccwebsvr-port", + 3571: "megardsvr-port", + 3572: "megaregsvrport", + 3573: "tag-ups-1", + 3574: "dmaf-server", + 3575: "ccm-port", + 3576: "cmc-port", + 3577: "config-port", + 3578: "data-port", + 3579: "ttat3lb", + 3580: "nati-svrloc", + 3581: "kfxaclicensing", + 3582: "press", + 3583: "canex-watch", + 3584: "u-dbap", + 3585: "emprise-lls", + 3586: "emprise-lsc", + 3587: "p2pgroup", + 3588: "sentinel", + 3589: "isomair", + 3590: "wv-csp-sms", + 3591: "gtrack-server", + 3592: "gtrack-ne", + 3593: "bpmd", + 3594: "mediaspace", + 3595: "shareapp", + 3596: "iw-mmogame", + 3597: "a14", + 3598: "a15", + 3599: "quasar-server", + 3600: "trap-daemon", + 3601: "visinet-gui", + 3602: "infiniswitchcl", + 3603: "int-rcv-cntrl", + 3604: "bmc-jmx-port", + 3605: "comcam-io", + 3606: "splitlock", + 3607: "precise-i3", + 3608: "trendchip-dcp", + 3609: "cpdi-pidas-cm", + 3610: "echonet", + 3611: "six-degrees", + 3612: "hp-dataprotect", + 3613: "alaris-disc", + 3614: "sigma-port", + 3615: "start-network", + 3616: "cd3o-protocol", + 3617: "sharp-server", + 3618: "aairnet-1", + 3619: "aairnet-2", + 3620: "ep-pcp", + 3621: "ep-nsp", + 3622: "ff-lr-port", + 3623: "haipe-discover", + 3624: "dist-upgrade", + 3625: "volley", + 3626: "bvcdaemon-port", + 3627: "jamserverport", + 3628: "ept-machine", + 3629: "escvpnet", + 3630: "cs-remote-db", + 3631: "cs-services", + 3632: "distcc", + 3633: "wacp", + 3634: "hlibmgr", + 3635: "sdo", + 3636: "servistaitsm", + 3637: "scservp", + 3638: "ehp-backup", + 3639: "xap-ha", + 3640: "netplay-port1", + 3641: "netplay-port2", + 3642: "juxml-port", + 3643: "audiojuggler", + 3644: "ssowatch", + 3645: "cyc", + 3646: "xss-srv-port", + 3647: "splitlock-gw", + 3648: "fjcp", + 3649: "nmmp", + 3650: "prismiq-plugin", + 3651: "xrpc-registry", + 3652: "vxcrnbuport", + 3653: "tsp", + 3654: "vaprtm", + 3655: "abatemgr", + 3656: "abatjss", + 3657: "immedianet-bcn", + 3658: "ps-ams", + 3659: "apple-sasl", + 3660: "can-nds-ssl", + 3661: "can-ferret-ssl", + 3662: "pserver", + 3663: "dtp", + 3664: "ups-engine", + 3665: "ent-engine", + 3666: "eserver-pap", + 3667: "infoexch", + 3668: "dell-rm-port", + 3669: "casanswmgmt", + 3670: "smile", + 3671: "efcp", + 3672: "lispworks-orb", + 3673: "mediavault-gui", + 3674: "wininstall-ipc", + 3675: "calltrax", + 3676: "va-pacbase", + 3677: "roverlog", + 3678: "ipr-dglt", + 3679: "Escale (Newton Dock)", + 3680: "npds-tracker", + 3681: "bts-x73", + 3682: "cas-mapi", + 3683: "bmc-ea", + 3684: "faxstfx-port", + 3685: "dsx-agent", + 3686: "tnmpv2", + 3687: "simple-push", + 3688: "simple-push-s", + 3689: "daap", + 3690: "svn", + 3691: "magaya-network", + 3692: "intelsync", + 3693: "easl", + 3695: "bmc-data-coll", + 3696: "telnetcpcd", + 3697: "nw-license", + 3698: "sagectlpanel", + 3699: "kpn-icw", + 3700: "lrs-paging", + 3701: "netcelera", + 3702: "ws-discovery", + 3703: "adobeserver-3", + 3704: "adobeserver-4", + 3705: "adobeserver-5", + 3706: "rt-event", + 3707: "rt-event-s", + 3708: "sun-as-iiops", + 3709: "ca-idms", + 3710: "portgate-auth", + 3711: "edb-server2", + 3712: "sentinel-ent", + 3713: "tftps", + 3714: "delos-dms", + 3715: "anoto-rendezv", + 3716: "wv-csp-sms-cir", + 3717: "wv-csp-udp-cir", + 3718: "opus-services", + 3719: "itelserverport", + 3720: "ufastro-instr", + 3721: "xsync", + 3722: "xserveraid", + 3723: "sychrond", + 3724: "blizwow", + 3725: "na-er-tip", + 3726: "array-manager", + 3727: "e-mdu", + 3728: "e-woa", + 3729: "fksp-audit", + 3730: "client-ctrl", + 3731: "smap", + 3732: "m-wnn", + 3733: "multip-msg", + 3734: "synel-data", + 3735: "pwdis", + 3736: "rs-rmi", + 3737: "xpanel", + 3738: "versatalk", + 3739: "launchbird-lm", + 3740: "heartbeat", + 3741: "wysdma", + 3742: "cst-port", + 3743: "ipcs-command", + 3744: "sasg", + 3745: "gw-call-port", + 3746: "linktest", + 3747: "linktest-s", + 3748: "webdata", + 3749: "cimtrak", + 3750: "cbos-ip-port", + 3751: "gprs-cube", + 3752: "vipremoteagent", + 3753: "nattyserver", + 3754: "timestenbroker", + 3755: "sas-remote-hlp", + 3756: "canon-capt", + 3757: "grf-port", + 3758: "apw-registry", + 3759: "exapt-lmgr", + 3760: "adtempusclient", + 3761: "gsakmp", + 3762: "gbs-smp", + 3763: "xo-wave", + 3764: "mni-prot-rout", + 3765: "rtraceroute", + 3766: "sitewatch-s", + 3767: "listmgr-port", + 3768: "rblcheckd", + 3769: "haipe-otnk", + 3770: "cindycollab", + 3771: "paging-port", + 3772: "ctp", + 3773: "ctdhercules", + 3774: "zicom", + 3775: "ispmmgr", + 3776: "dvcprov-port", + 3777: "jibe-eb", + 3778: "c-h-it-port", + 3779: "cognima", + 3780: "nnp", + 3781: "abcvoice-port", + 3782: "iso-tp0s", + 3783: "bim-pem", + 3784: "bfd-control", + 3785: "bfd-echo", + 3786: "upstriggervsw", + 3787: "fintrx", + 3788: "isrp-port", + 3789: "remotedeploy", + 3790: "quickbooksrds", + 3791: "tvnetworkvideo", + 3792: "sitewatch", + 3793: "dcsoftware", + 3794: "jaus", + 3795: "myblast", + 3796: "spw-dialer", + 3797: "idps", + 3798: "minilock", + 3799: "radius-dynauth", + 3800: "pwgpsi", + 3801: "ibm-mgr", + 3802: "vhd", + 3803: "soniqsync", + 3804: "iqnet-port", + 3805: "tcpdataserver", + 3806: "wsmlb", + 3807: "spugna", + 3808: "sun-as-iiops-ca", + 3809: "apocd", + 3810: "wlanauth", + 3811: "amp", + 3812: "neto-wol-server", + 3813: "rap-ip", + 3814: "neto-dcs", + 3815: "lansurveyorxml", + 3816: "sunlps-http", + 3817: "tapeware", + 3818: "crinis-hb", + 3819: "epl-slp", + 3820: "scp", + 3821: "pmcp", + 3822: "acp-discovery", + 3823: "acp-conduit", + 3824: "acp-policy", + 3825: "ffserver", + 3826: "warmux", + 3827: "netmpi", + 3828: "neteh", + 3829: "neteh-ext", + 3830: "cernsysmgmtagt", + 3831: "dvapps", + 3832: "xxnetserver", + 3833: "aipn-auth", + 3834: "spectardata", + 3835: "spectardb", + 3836: "markem-dcp", + 3837: "mkm-discovery", + 3838: "sos", + 3839: "amx-rms", + 3840: "flirtmitmir", + 3841: "shiprush-db-svr", + 3842: "nhci", + 3843: "quest-agent", + 3844: "rnm", + 3845: "v-one-spp", + 3846: "an-pcp", + 3847: "msfw-control", + 3848: "item", + 3849: "spw-dnspreload", + 3850: "qtms-bootstrap", + 3851: "spectraport", + 3852: "sse-app-config", + 3853: "sscan", + 3854: "stryker-com", + 3855: "opentrac", + 3856: "informer", + 3857: "trap-port", + 3858: "trap-port-mom", + 3859: "nav-port", + 3860: "sasp", + 3861: "winshadow-hd", + 3862: "giga-pocket", + 3863: "asap-tcp", + 3864: "asap-tcp-tls", + 3865: "xpl", + 3866: "dzdaemon", + 3867: "dzoglserver", + 3868: "diameter", + 3869: "ovsam-mgmt", + 3870: "ovsam-d-agent", + 3871: "avocent-adsap", + 3872: "oem-agent", + 3873: "fagordnc", + 3874: "sixxsconfig", + 3875: "pnbscada", + 3876: "dl-agent", + 3877: "xmpcr-interface", + 3878: "fotogcad", + 3879: "appss-lm", + 3880: "igrs", + 3881: "idac", + 3882: "msdts1", + 3883: "vrpn", + 3884: "softrack-meter", + 3885: "topflow-ssl", + 3886: "nei-management", + 3887: "ciphire-data", + 3888: "ciphire-serv", + 3889: "dandv-tester", + 3890: "ndsconnect", + 3891: "rtc-pm-port", + 3892: "pcc-image-port", + 3893: "cgi-starapi", + 3894: "syam-agent", + 3895: "syam-smc", + 3896: "sdo-tls", + 3897: "sdo-ssh", + 3898: "senip", + 3899: "itv-control", + 3900: "udt-os", + 3901: "nimsh", + 3902: "nimaux", + 3903: "charsetmgr", + 3904: "omnilink-port", + 3905: "mupdate", + 3906: "topovista-data", + 3907: "imoguia-port", + 3908: "hppronetman", + 3909: "surfcontrolcpa", + 3910: "prnrequest", + 3911: "prnstatus", + 3912: "gbmt-stars", + 3913: "listcrt-port", + 3914: "listcrt-port-2", + 3915: "agcat", + 3916: "wysdmc", + 3917: "aftmux", + 3918: "pktcablemmcops", + 3919: "hyperip", + 3920: "exasoftport1", + 3921: "herodotus-net", + 3922: "sor-update", + 3923: "symb-sb-port", + 3924: "mpl-gprs-port", + 3925: "zmp", + 3926: "winport", + 3927: "natdataservice", + 3928: "netboot-pxe", + 3929: "smauth-port", + 3930: "syam-webserver", + 3931: "msr-plugin-port", + 3932: "dyn-site", + 3933: "plbserve-port", + 3934: "sunfm-port", + 3935: "sdp-portmapper", + 3936: "mailprox", + 3937: "dvbservdsc", + 3938: "dbcontrol-agent", + 3939: "aamp", + 3940: "xecp-node", + 3941: "homeportal-web", + 3942: "srdp", + 3943: "tig", + 3944: "sops", + 3945: "emcads", + 3946: "backupedge", + 3947: "ccp", + 3948: "apdap", + 3949: "drip", + 3950: "namemunge", + 3951: "pwgippfax", + 3952: "i3-sessionmgr", + 3953: "xmlink-connect", + 3954: "adrep", + 3955: "p2pcommunity", + 3956: "gvcp", + 3957: "mqe-broker", + 3958: "mqe-agent", + 3959: "treehopper", + 3960: "bess", + 3961: "proaxess", + 3962: "sbi-agent", + 3963: "thrp", + 3964: "sasggprs", + 3965: "ati-ip-to-ncpe", + 3966: "bflckmgr", + 3967: "ppsms", + 3968: "ianywhere-dbns", + 3969: "landmarks", + 3970: "lanrevagent", + 3971: "lanrevserver", + 3972: "iconp", + 3973: "progistics", + 3974: "citysearch", + 3975: "airshot", + 3976: "opswagent", + 3977: "opswmanager", + 3978: "secure-cfg-svr", + 3979: "smwan", + 3980: "acms", + 3981: "starfish", + 3982: "eis", + 3983: "eisp", + 3984: "mapper-nodemgr", + 3985: "mapper-mapethd", + 3986: "mapper-ws-ethd", + 3987: "centerline", + 3988: "dcs-config", + 3989: "bv-queryengine", + 3990: "bv-is", + 3991: "bv-smcsrv", + 3992: "bv-ds", + 3993: "bv-agent", + 3995: "iss-mgmt-ssl", + 3996: "abcsoftware", + 3997: "agentsease-db", + 3998: "dnx", + 3999: "nvcnet", + 4000: "terabase", + 4001: "newoak", + 4002: "pxc-spvr-ft", + 4003: "pxc-splr-ft", + 4004: "pxc-roid", + 4005: "pxc-pin", + 4006: "pxc-spvr", + 4007: "pxc-splr", + 4008: "netcheque", + 4009: "chimera-hwm", + 4010: "samsung-unidex", + 4011: "altserviceboot", + 4012: "pda-gate", + 4013: "acl-manager", + 4014: "taiclock", + 4015: "talarian-mcast1", + 4016: "talarian-mcast2", + 4017: "talarian-mcast3", + 4018: "talarian-mcast4", + 4019: "talarian-mcast5", + 4020: "trap", + 4021: "nexus-portal", + 4022: "dnox", + 4023: "esnm-zoning", + 4024: "tnp1-port", + 4025: "partimage", + 4026: "as-debug", + 4027: "bxp", + 4028: "dtserver-port", + 4029: "ip-qsig", + 4030: "jdmn-port", + 4031: "suucp", + 4032: "vrts-auth-port", + 4033: "sanavigator", + 4034: "ubxd", + 4035: "wap-push-http", + 4036: "wap-push-https", + 4037: "ravehd", + 4038: "fazzt-ptp", + 4039: "fazzt-admin", + 4040: "yo-main", + 4041: "houston", + 4042: "ldxp", + 4043: "nirp", + 4044: "ltp", + 4045: "npp", + 4046: "acp-proto", + 4047: "ctp-state", + 4049: "wafs", + 4050: "cisco-wafs", + 4051: "cppdp", + 4052: "interact", + 4053: "ccu-comm-1", + 4054: "ccu-comm-2", + 4055: "ccu-comm-3", + 4056: "lms", + 4057: "wfm", + 4058: "kingfisher", + 4059: "dlms-cosem", + 4060: "dsmeter-iatc", + 4061: "ice-location", + 4062: "ice-slocation", + 4063: "ice-router", + 4064: "ice-srouter", + 4065: "avanti-cdp", + 4066: "pmas", + 4067: "idp", + 4068: "ipfltbcst", + 4069: "minger", + 4070: "tripe", + 4071: "aibkup", + 4072: "zieto-sock", + 4073: "iRAPP", + 4074: "cequint-cityid", + 4075: "perimlan", + 4076: "seraph", + 4078: "cssp", + 4079: "santools", + 4080: "lorica-in", + 4081: "lorica-in-sec", + 4082: "lorica-out", + 4083: "lorica-out-sec", + 4085: "ezmessagesrv", + 4087: "applusservice", + 4088: "npsp", + 4089: "opencore", + 4090: "omasgport", + 4091: "ewinstaller", + 4092: "ewdgs", + 4093: "pvxpluscs", + 4094: "sysrqd", + 4095: "xtgui", + 4096: "bre", + 4097: "patrolview", + 4098: "drmsfsd", + 4099: "dpcp", + 4100: "igo-incognito", + 4101: "brlp-0", + 4102: "brlp-1", + 4103: "brlp-2", + 4104: "brlp-3", + 4105: "shofar", + 4106: "synchronite", + 4107: "j-ac", + 4108: "accel", + 4109: "izm", + 4110: "g2tag", + 4111: "xgrid", + 4112: "apple-vpns-rp", + 4113: "aipn-reg", + 4114: "jomamqmonitor", + 4115: "cds", + 4116: "smartcard-tls", + 4117: "hillrserv", + 4118: "netscript", + 4119: "assuria-slm", + 4120: "minirem", + 4121: "e-builder", + 4122: "fprams", + 4123: "z-wave", + 4124: "tigv2", + 4125: "opsview-envoy", + 4126: "ddrepl", + 4127: "unikeypro", + 4128: "nufw", + 4129: "nuauth", + 4130: "fronet", + 4131: "stars", + 4132: "nuts-dem", + 4133: "nuts-bootp", + 4134: "nifty-hmi", + 4135: "cl-db-attach", + 4136: "cl-db-request", + 4137: "cl-db-remote", + 4138: "nettest", + 4139: "thrtx", + 4140: "cedros-fds", + 4141: "oirtgsvc", + 4142: "oidocsvc", + 4143: "oidsr", + 4145: "vvr-control", + 4146: "tgcconnect", + 4147: "vrxpservman", + 4148: "hhb-handheld", + 4149: "agslb", + 4150: "PowerAlert-nsa", + 4151: "menandmice-noh", + 4152: "idig-mux", + 4153: "mbl-battd", + 4154: "atlinks", + 4155: "bzr", + 4156: "stat-results", + 4157: "stat-scanner", + 4158: "stat-cc", + 4159: "nss", + 4160: "jini-discovery", + 4161: "omscontact", + 4162: "omstopology", + 4163: "silverpeakpeer", + 4164: "silverpeakcomm", + 4165: "altcp", + 4166: "joost", + 4167: "ddgn", + 4168: "pslicser", + 4169: "iadt", + 4170: "d-cinema-csp", + 4171: "ml-svnet", + 4172: "pcoip", + 4174: "smcluster", + 4175: "bccp", + 4176: "tl-ipcproxy", + 4177: "wello", + 4178: "storman", + 4179: "MaxumSP", + 4180: "httpx", + 4181: "macbak", + 4182: "pcptcpservice", + 4183: "cyborgnet", + 4184: "universe-suite", + 4185: "wcpp", + 4186: "boxbackupstore", + 4187: "csc-proxy", + 4188: "vatata", + 4189: "pcep", + 4190: "sieve", + 4192: "azeti", + 4193: "pvxplusio", + 4197: "hctl", + 4199: "eims-admin", + 4300: "corelccam", + 4301: "d-data", + 4302: "d-data-control", + 4303: "srcp", + 4304: "owserver", + 4305: "batman", + 4306: "pinghgl", + 4307: "trueconf", + 4308: "compx-lockview", + 4309: "dserver", + 4310: "mirrtex", + 4311: "p6ssmc", + 4312: "pscl-mgt", + 4313: "perrla", + 4314: "choiceview-agt", + 4316: "choiceview-clt", + 4320: "fdt-rcatp", + 4321: "rwhois", + 4322: "trim-event", + 4323: "trim-ice", + 4325: "geognosisman", + 4326: "geognosis", + 4327: "jaxer-web", + 4328: "jaxer-manager", + 4329: "publiqare-sync", + 4330: "dey-sapi", + 4331: "ktickets-rest", + 4333: "ahsp", + 4334: "netconf-ch-ssh", + 4335: "netconf-ch-tls", + 4336: "restconf-ch-tls", + 4340: "gaia", + 4341: "lisp-data", + 4342: "lisp-cons", + 4343: "unicall", + 4344: "vinainstall", + 4345: "m4-network-as", + 4346: "elanlm", + 4347: "lansurveyor", + 4348: "itose", + 4349: "fsportmap", + 4350: "net-device", + 4351: "plcy-net-svcs", + 4352: "pjlink", + 4353: "f5-iquery", + 4354: "qsnet-trans", + 4355: "qsnet-workst", + 4356: "qsnet-assist", + 4357: "qsnet-cond", + 4358: "qsnet-nucl", + 4359: "omabcastltkm", + 4360: "matrix-vnet", + 4368: "wxbrief", + 4369: "epmd", + 4370: "elpro-tunnel", + 4371: "l2c-control", + 4372: "l2c-data", + 4373: "remctl", + 4374: "psi-ptt", + 4375: "tolteces", + 4376: "bip", + 4377: "cp-spxsvr", + 4378: "cp-spxdpy", + 4379: "ctdb", + 4389: "xandros-cms", + 4390: "wiegand", + 4391: "apwi-imserver", + 4392: "apwi-rxserver", + 4393: "apwi-rxspooler", + 4395: "omnivisionesx", + 4396: "fly", + 4400: "ds-srv", + 4401: "ds-srvr", + 4402: "ds-clnt", + 4403: "ds-user", + 4404: "ds-admin", + 4405: "ds-mail", + 4406: "ds-slp", + 4407: "nacagent", + 4408: "slscc", + 4409: "netcabinet-com", + 4410: "itwo-server", + 4411: "found", + 4413: "avi-nms", + 4414: "updog", + 4415: "brcd-vr-req", + 4416: "pjj-player", + 4417: "workflowdir", + 4419: "cbp", + 4420: "nvm-express", + 4421: "scaleft", + 4422: "tsepisp", + 4423: "thingkit", + 4425: "netrockey6", + 4426: "beacon-port-2", + 4427: "drizzle", + 4428: "omviserver", + 4429: "omviagent", + 4430: "rsqlserver", + 4431: "wspipe", + 4432: "l-acoustics", + 4433: "vop", + 4442: "saris", + 4443: "pharos", + 4444: "krb524", + 4445: "upnotifyp", + 4446: "n1-fwp", + 4447: "n1-rmgmt", + 4448: "asc-slmd", + 4449: "privatewire", + 4450: "camp", + 4451: "ctisystemmsg", + 4452: "ctiprogramload", + 4453: "nssalertmgr", + 4454: "nssagentmgr", + 4455: "prchat-user", + 4456: "prchat-server", + 4457: "prRegister", + 4458: "mcp", + 4484: "hpssmgmt", + 4485: "assyst-dr", + 4486: "icms", + 4487: "prex-tcp", + 4488: "awacs-ice", + 4500: "ipsec-nat-t", + 4535: "ehs", + 4536: "ehs-ssl", + 4537: "wssauthsvc", + 4538: "swx-gate", + 4545: "worldscores", + 4546: "sf-lm", + 4547: "lanner-lm", + 4548: "synchromesh", + 4549: "aegate", + 4550: "gds-adppiw-db", + 4551: "ieee-mih", + 4552: "menandmice-mon", + 4553: "icshostsvc", + 4554: "msfrs", + 4555: "rsip", + 4556: "dtn-bundle", + 4559: "hylafax", + 4563: "amahi-anywhere", + 4566: "kwtc", + 4567: "tram", + 4568: "bmc-reporting", + 4569: "iax", + 4570: "deploymentmap", + 4573: "cardifftec-back", + 4590: "rid", + 4591: "l3t-at-an", + 4593: "ipt-anri-anri", + 4594: "ias-session", + 4595: "ias-paging", + 4596: "ias-neighbor", + 4597: "a21-an-1xbs", + 4598: "a16-an-an", + 4599: "a17-an-an", + 4600: "piranha1", + 4601: "piranha2", + 4602: "mtsserver", + 4603: "menandmice-upg", + 4604: "irp", + 4605: "sixchat", + 4658: "playsta2-app", + 4659: "playsta2-lob", + 4660: "smaclmgr", + 4661: "kar2ouche", + 4662: "oms", + 4663: "noteit", + 4664: "ems", + 4665: "contclientms", + 4666: "eportcomm", + 4667: "mmacomm", + 4668: "mmaeds", + 4669: "eportcommdata", + 4670: "light", + 4671: "acter", + 4672: "rfa", + 4673: "cxws", + 4674: "appiq-mgmt", + 4675: "dhct-status", + 4676: "dhct-alerts", + 4677: "bcs", + 4678: "traversal", + 4679: "mgesupervision", + 4680: "mgemanagement", + 4681: "parliant", + 4682: "finisar", + 4683: "spike", + 4684: "rfid-rp1", + 4685: "autopac", + 4686: "msp-os", + 4687: "nst", + 4688: "mobile-p2p", + 4689: "altovacentral", + 4690: "prelude", + 4691: "mtn", + 4692: "conspiracy", + 4700: "netxms-agent", + 4701: "netxms-mgmt", + 4702: "netxms-sync", + 4703: "npqes-test", + 4704: "assuria-ins", + 4711: "trinity-dist", + 4725: "truckstar", + 4727: "fcis", + 4728: "capmux", + 4730: "gearman", + 4731: "remcap", + 4733: "resorcs", + 4737: "ipdr-sp", + 4738: "solera-lpn", + 4739: "ipfix", + 4740: "ipfixs", + 4741: "lumimgrd", + 4742: "sicct", + 4743: "openhpid", + 4744: "ifsp", + 4745: "fmp", + 4749: "profilemac", + 4750: "ssad", + 4751: "spocp", + 4752: "snap", + 4753: "simon", + 4756: "RDCenter", + 4774: "converge", + 4784: "bfd-multi-ctl", + 4786: "smart-install", + 4787: "sia-ctrl-plane", + 4788: "xmcp", + 4800: "iims", + 4801: "iwec", + 4802: "ilss", + 4803: "notateit", + 4827: "htcp", + 4837: "varadero-0", + 4838: "varadero-1", + 4839: "varadero-2", + 4840: "opcua-tcp", + 4841: "quosa", + 4842: "gw-asv", + 4843: "opcua-tls", + 4844: "gw-log", + 4845: "wcr-remlib", + 4846: "contamac-icm", + 4847: "wfc", + 4848: "appserv-http", + 4849: "appserv-https", + 4850: "sun-as-nodeagt", + 4851: "derby-repli", + 4867: "unify-debug", + 4868: "phrelay", + 4869: "phrelaydbg", + 4870: "cc-tracking", + 4871: "wired", + 4876: "tritium-can", + 4877: "lmcs", + 4879: "wsdl-event", + 4880: "hislip", + 4883: "wmlserver", + 4884: "hivestor", + 4885: "abbs", + 4894: "lyskom", + 4899: "radmin-port", + 4900: "hfcs", + 4901: "flr-agent", + 4902: "magiccontrol", + 4912: "lutap", + 4913: "lutcp", + 4914: "bones", + 4915: "frcs", + 4940: "eq-office-4940", + 4941: "eq-office-4941", + 4942: "eq-office-4942", + 4949: "munin", + 4950: "sybasesrvmon", + 4951: "pwgwims", + 4952: "sagxtsds", + 4953: "dbsyncarbiter", + 4969: "ccss-qmm", + 4970: "ccss-qsm", + 4971: "burp", + 4984: "webyast", + 4985: "gerhcs", + 4986: "mrip", + 4987: "smar-se-port1", + 4988: "smar-se-port2", + 4989: "parallel", + 4990: "busycal", + 4991: "vrt", + 4999: "hfcs-manager", + 5000: "commplex-main", + 5001: "commplex-link", + 5002: "rfe", + 5003: "fmpro-internal", + 5004: "avt-profile-1", + 5005: "avt-profile-2", + 5006: "wsm-server", + 5007: "wsm-server-ssl", + 5008: "synapsis-edge", + 5009: "winfs", + 5010: "telelpathstart", + 5011: "telelpathattack", + 5012: "nsp", + 5013: "fmpro-v6", + 5015: "fmwp", + 5020: "zenginkyo-1", + 5021: "zenginkyo-2", + 5022: "mice", + 5023: "htuilsrv", + 5024: "scpi-telnet", + 5025: "scpi-raw", + 5026: "strexec-d", + 5027: "strexec-s", + 5028: "qvr", + 5029: "infobright", + 5030: "surfpass", + 5032: "signacert-agent", + 5033: "jtnetd-server", + 5034: "jtnetd-status", + 5042: "asnaacceler8db", + 5043: "swxadmin", + 5044: "lxi-evntsvc", + 5045: "osp", + 5048: "texai", + 5049: "ivocalize", + 5050: "mmcc", + 5051: "ita-agent", + 5052: "ita-manager", + 5053: "rlm", + 5054: "rlm-admin", + 5055: "unot", + 5056: "intecom-ps1", + 5057: "intecom-ps2", + 5059: "sds", + 5060: "sip", + 5061: "sips", + 5062: "na-localise", + 5063: "csrpc", + 5064: "ca-1", + 5065: "ca-2", + 5066: "stanag-5066", + 5067: "authentx", + 5068: "bitforestsrv", + 5069: "i-net-2000-npr", + 5070: "vtsas", + 5071: "powerschool", + 5072: "ayiya", + 5073: "tag-pm", + 5074: "alesquery", + 5075: "pvaccess", + 5080: "onscreen", + 5081: "sdl-ets", + 5082: "qcp", + 5083: "qfp", + 5084: "llrp", + 5085: "encrypted-llrp", + 5086: "aprigo-cs", + 5087: "biotic", + 5093: "sentinel-lm", + 5094: "hart-ip", + 5099: "sentlm-srv2srv", + 5100: "socalia", + 5101: "talarian-tcp", + 5102: "oms-nonsecure", + 5103: "actifio-c2c", + 5106: "actifioudsagent", + 5107: "actifioreplic", + 5111: "taep-as-svc", + 5112: "pm-cmdsvr", + 5114: "ev-services", + 5115: "autobuild", + 5117: "gradecam", + 5120: "barracuda-bbs", + 5133: "nbt-pc", + 5134: "ppactivation", + 5135: "erp-scale", + 5137: "ctsd", + 5145: "rmonitor-secure", + 5146: "social-alarm", + 5150: "atmp", + 5151: "esri-sde", + 5152: "sde-discovery", + 5153: "toruxserver", + 5154: "bzflag", + 5155: "asctrl-agent", + 5156: "rugameonline", + 5157: "mediat", + 5161: "snmpssh", + 5162: "snmpssh-trap", + 5163: "sbackup", + 5164: "vpa", + 5165: "ife-icorp", + 5166: "winpcs", + 5167: "scte104", + 5168: "scte30", + 5172: "pcoip-mgmt", + 5190: "aol", + 5191: "aol-1", + 5192: "aol-2", + 5193: "aol-3", + 5194: "cpscomm", + 5195: "ampl-lic", + 5196: "ampl-tableproxy", + 5197: "tunstall-lwp", + 5200: "targus-getdata", + 5201: "targus-getdata1", + 5202: "targus-getdata2", + 5203: "targus-getdata3", + 5209: "nomad", + 5215: "noteza", + 5221: "3exmp", + 5222: "xmpp-client", + 5223: "hpvirtgrp", + 5224: "hpvirtctrl", + 5225: "hp-server", + 5226: "hp-status", + 5227: "perfd", + 5228: "hpvroom", + 5229: "jaxflow", + 5230: "jaxflow-data", + 5231: "crusecontrol", + 5232: "csedaemon", + 5233: "enfs", + 5234: "eenet", + 5235: "galaxy-network", + 5236: "padl2sim", + 5237: "mnet-discovery", + 5245: "downtools", + 5248: "caacws", + 5249: "caaclang2", + 5250: "soagateway", + 5251: "caevms", + 5252: "movaz-ssc", + 5253: "kpdp", + 5254: "logcabin", + 5264: "3com-njack-1", + 5265: "3com-njack-2", + 5269: "xmpp-server", + 5270: "cartographerxmp", + 5271: "cuelink", + 5272: "pk", + 5280: "xmpp-bosh", + 5281: "undo-lm", + 5282: "transmit-port", + 5298: "presence", + 5299: "nlg-data", + 5300: "hacl-hb", + 5301: "hacl-gs", + 5302: "hacl-cfg", + 5303: "hacl-probe", + 5304: "hacl-local", + 5305: "hacl-test", + 5306: "sun-mc-grp", + 5307: "sco-aip", + 5308: "cfengine", + 5309: "jprinter", + 5310: "outlaws", + 5312: "permabit-cs", + 5313: "rrdp", + 5314: "opalis-rbt-ipc", + 5315: "hacl-poll", + 5316: "hpbladems", + 5317: "hpdevms", + 5318: "pkix-cmc", + 5320: "bsfserver-zn", + 5321: "bsfsvr-zn-ssl", + 5343: "kfserver", + 5344: "xkotodrcp", + 5349: "stuns", + 5352: "dns-llq", + 5353: "mdns", + 5354: "mdnsresponder", + 5355: "llmnr", + 5356: "ms-smlbiz", + 5357: "wsdapi", + 5358: "wsdapi-s", + 5359: "ms-alerter", + 5360: "ms-sideshow", + 5361: "ms-s-sideshow", + 5362: "serverwsd2", + 5363: "net-projection", + 5397: "stresstester", + 5398: "elektron-admin", + 5399: "securitychase", + 5400: "excerpt", + 5401: "excerpts", + 5402: "mftp", + 5403: "hpoms-ci-lstn", + 5404: "hpoms-dps-lstn", + 5405: "netsupport", + 5406: "systemics-sox", + 5407: "foresyte-clear", + 5408: "foresyte-sec", + 5409: "salient-dtasrv", + 5410: "salient-usrmgr", + 5411: "actnet", + 5412: "continuus", + 5413: "wwiotalk", + 5414: "statusd", + 5415: "ns-server", + 5416: "sns-gateway", + 5417: "sns-agent", + 5418: "mcntp", + 5419: "dj-ice", + 5420: "cylink-c", + 5421: "netsupport2", + 5422: "salient-mux", + 5423: "virtualuser", + 5424: "beyond-remote", + 5425: "br-channel", + 5426: "devbasic", + 5427: "sco-peer-tta", + 5428: "telaconsole", + 5429: "base", + 5430: "radec-corp", + 5431: "park-agent", + 5432: "postgresql", + 5433: "pyrrho", + 5434: "sgi-arrayd", + 5435: "sceanics", + 5443: "spss", + 5445: "smbdirect", + 5450: "tiepie", + 5453: "surebox", + 5454: "apc-5454", + 5455: "apc-5455", + 5456: "apc-5456", + 5461: "silkmeter", + 5462: "ttl-publisher", + 5463: "ttlpriceproxy", + 5464: "quailnet", + 5465: "netops-broker", + 5470: "apsolab-col", + 5471: "apsolab-cols", + 5472: "apsolab-tag", + 5473: "apsolab-tags", + 5475: "apsolab-data", + 5500: "fcp-addr-srvr1", + 5501: "fcp-addr-srvr2", + 5502: "fcp-srvr-inst1", + 5503: "fcp-srvr-inst2", + 5504: "fcp-cics-gw1", + 5505: "checkoutdb", + 5506: "amc", + 5507: "psl-management", + 5550: "cbus", + 5553: "sgi-eventmond", + 5554: "sgi-esphttp", + 5555: "personal-agent", + 5556: "freeciv", + 5557: "farenet", + 5565: "hpe-dp-bura", + 5566: "westec-connect", + 5567: "dof-dps-mc-sec", + 5568: "sdt", + 5569: "rdmnet-ctrl", + 5573: "sdmmp", + 5574: "lsi-bobcat", + 5575: "ora-oap", + 5579: "fdtracks", + 5580: "tmosms0", + 5581: "tmosms1", + 5582: "fac-restore", + 5583: "tmo-icon-sync", + 5584: "bis-web", + 5585: "bis-sync", + 5586: "att-mt-sms", + 5597: "ininmessaging", + 5598: "mctfeed", + 5599: "esinstall", + 5600: "esmmanager", + 5601: "esmagent", + 5602: "a1-msc", + 5603: "a1-bs", + 5604: "a3-sdunode", + 5605: "a4-sdunode", + 5618: "efr", + 5627: "ninaf", + 5628: "htrust", + 5629: "symantec-sfdb", + 5630: "precise-comm", + 5631: "pcanywheredata", + 5632: "pcanywherestat", + 5633: "beorl", + 5634: "xprtld", + 5635: "sfmsso", + 5636: "sfm-db-server", + 5637: "cssc", + 5638: "flcrs", + 5639: "ics", + 5646: "vfmobile", + 5666: "nrpe", + 5670: "filemq", + 5671: "amqps", + 5672: "amqp", + 5673: "jms", + 5674: "hyperscsi-port", + 5675: "v5ua", + 5676: "raadmin", + 5677: "questdb2-lnchr", + 5678: "rrac", + 5679: "dccm", + 5680: "auriga-router", + 5681: "ncxcp", + 5688: "ggz", + 5689: "qmvideo", + 5693: "rbsystem", + 5696: "kmip", + 5700: "supportassist", + 5705: "storageos", + 5713: "proshareaudio", + 5714: "prosharevideo", + 5715: "prosharedata", + 5716: "prosharerequest", + 5717: "prosharenotify", + 5718: "dpm", + 5719: "dpm-agent", + 5720: "ms-licensing", + 5721: "dtpt", + 5722: "msdfsr", + 5723: "omhs", + 5724: "omsdk", + 5725: "ms-ilm", + 5726: "ms-ilm-sts", + 5727: "asgenf", + 5728: "io-dist-data", + 5729: "openmail", + 5730: "unieng", + 5741: "ida-discover1", + 5742: "ida-discover2", + 5743: "watchdoc-pod", + 5744: "watchdoc", + 5745: "fcopy-server", + 5746: "fcopys-server", + 5747: "tunatic", + 5748: "tunalyzer", + 5750: "rscd", + 5755: "openmailg", + 5757: "x500ms", + 5766: "openmailns", + 5767: "s-openmail", + 5768: "openmailpxy", + 5769: "spramsca", + 5770: "spramsd", + 5771: "netagent", + 5777: "dali-port", + 5780: "vts-rpc", + 5781: "3par-evts", + 5782: "3par-mgmt", + 5783: "3par-mgmt-ssl", + 5785: "3par-rcopy", + 5793: "xtreamx", + 5813: "icmpd", + 5814: "spt-automation", + 5841: "shiprush-d-ch", + 5842: "reversion", + 5859: "wherehoo", + 5863: "ppsuitemsg", + 5868: "diameters", + 5883: "jute", + 5900: "rfb", + 5910: "cm", + 5911: "cpdlc", + 5912: "fis", + 5913: "ads-c", + 5963: "indy", + 5968: "mppolicy-v5", + 5969: "mppolicy-mgr", + 5984: "couchdb", + 5985: "wsman", + 5986: "wsmans", + 5987: "wbem-rmi", + 5988: "wbem-http", + 5989: "wbem-https", + 5990: "wbem-exp-https", + 5991: "nuxsl", + 5992: "consul-insight", + 5993: "cim-rs", + 5999: "cvsup", + 6064: "ndl-ahp-svc", + 6065: "winpharaoh", + 6066: "ewctsp", + 6068: "gsmp-ancp", + 6069: "trip", + 6070: "messageasap", + 6071: "ssdtp", + 6072: "diagnose-proc", + 6073: "directplay8", + 6074: "max", + 6075: "dpm-acm", + 6076: "msft-dpm-cert", + 6077: "iconstructsrv", + 6084: "reload-config", + 6085: "konspire2b", + 6086: "pdtp", + 6087: "ldss", + 6088: "doglms", + 6099: "raxa-mgmt", + 6100: "synchronet-db", + 6101: "synchronet-rtc", + 6102: "synchronet-upd", + 6103: "rets", + 6104: "dbdb", + 6105: "primaserver", + 6106: "mpsserver", + 6107: "etc-control", + 6108: "sercomm-scadmin", + 6109: "globecast-id", + 6110: "softcm", + 6111: "spc", + 6112: "dtspcd", + 6113: "dayliteserver", + 6114: "wrspice", + 6115: "xic", + 6116: "xtlserv", + 6117: "daylitetouch", + 6121: "spdy", + 6122: "bex-webadmin", + 6123: "backup-express", + 6124: "pnbs", + 6130: "damewaremobgtwy", + 6133: "nbt-wol", + 6140: "pulsonixnls", + 6141: "meta-corp", + 6142: "aspentec-lm", + 6143: "watershed-lm", + 6144: "statsci1-lm", + 6145: "statsci2-lm", + 6146: "lonewolf-lm", + 6147: "montage-lm", + 6148: "ricardo-lm", + 6149: "tal-pod", + 6159: "efb-aci", + 6160: "ecmp", + 6161: "patrol-ism", + 6162: "patrol-coll", + 6163: "pscribe", + 6200: "lm-x", + 6209: "qmtps", + 6222: "radmind", + 6241: "jeol-nsdtp-1", + 6242: "jeol-nsdtp-2", + 6243: "jeol-nsdtp-3", + 6244: "jeol-nsdtp-4", + 6251: "tl1-raw-ssl", + 6252: "tl1-ssh", + 6253: "crip", + 6267: "gld", + 6268: "grid", + 6269: "grid-alt", + 6300: "bmc-grx", + 6301: "bmc-ctd-ldap", + 6306: "ufmp", + 6315: "scup", + 6316: "abb-escp", + 6317: "nav-data-cmd", + 6320: "repsvc", + 6321: "emp-server1", + 6322: "emp-server2", + 6324: "hrd-ncs", + 6325: "dt-mgmtsvc", + 6326: "dt-vra", + 6343: "sflow", + 6344: "streletz", + 6346: "gnutella-svc", + 6347: "gnutella-rtr", + 6350: "adap", + 6355: "pmcs", + 6360: "metaedit-mu", + 6370: "metaedit-se", + 6379: "redis", + 6382: "metatude-mds", + 6389: "clariion-evr01", + 6390: "metaedit-ws", + 6417: "faxcomservice", + 6418: "syserverremote", + 6419: "svdrp", + 6420: "nim-vdrshell", + 6421: "nim-wan", + 6432: "pgbouncer", + 6442: "tarp", + 6443: "sun-sr-https", + 6444: "sge-qmaster", + 6445: "sge-execd", + 6446: "mysql-proxy", + 6455: "skip-cert-recv", + 6456: "skip-cert-send", + 6464: "ieee11073-20701", + 6471: "lvision-lm", + 6480: "sun-sr-http", + 6481: "servicetags", + 6482: "ldoms-mgmt", + 6483: "SunVTS-RMI", + 6484: "sun-sr-jms", + 6485: "sun-sr-iiop", + 6486: "sun-sr-iiops", + 6487: "sun-sr-iiop-aut", + 6488: "sun-sr-jmx", + 6489: "sun-sr-admin", + 6500: "boks", + 6501: "boks-servc", + 6502: "boks-servm", + 6503: "boks-clntd", + 6505: "badm-priv", + 6506: "badm-pub", + 6507: "bdir-priv", + 6508: "bdir-pub", + 6509: "mgcs-mfp-port", + 6510: "mcer-port", + 6513: "netconf-tls", + 6514: "syslog-tls", + 6515: "elipse-rec", + 6543: "lds-distrib", + 6544: "lds-dump", + 6547: "apc-6547", + 6548: "apc-6548", + 6549: "apc-6549", + 6550: "fg-sysupdate", + 6551: "sum", + 6558: "xdsxdm", + 6566: "sane-port", + 6568: "canit-store", + 6579: "affiliate", + 6580: "parsec-master", + 6581: "parsec-peer", + 6582: "parsec-game", + 6583: "joaJewelSuite", + 6600: "mshvlm", + 6601: "mstmg-sstp", + 6602: "wsscomfrmwk", + 6619: "odette-ftps", + 6620: "kftp-data", + 6621: "kftp", + 6622: "mcftp", + 6623: "ktelnet", + 6624: "datascaler-db", + 6625: "datascaler-ctl", + 6626: "wago-service", + 6627: "nexgen", + 6628: "afesc-mc", + 6629: "nexgen-aux", + 6632: "mxodbc-connect", + 6640: "ovsdb", + 6653: "openflow", + 6655: "pcs-sf-ui-man", + 6656: "emgmsg", + 6670: "vocaltec-gold", + 6671: "p4p-portal", + 6672: "vision-server", + 6673: "vision-elmd", + 6678: "vfbp", + 6679: "osaut", + 6687: "clever-ctrace", + 6688: "clever-tcpip", + 6689: "tsa", + 6690: "cleverdetect", + 6697: "ircs-u", + 6701: "kti-icad-srvr", + 6702: "e-design-net", + 6703: "e-design-web", + 6714: "ibprotocol", + 6715: "fibotrader-com", + 6716: "princity-agent", + 6767: "bmc-perf-agent", + 6768: "bmc-perf-mgrd", + 6769: "adi-gxp-srvprt", + 6770: "plysrv-http", + 6771: "plysrv-https", + 6777: "ntz-tracker", + 6778: "ntz-p2p-storage", + 6785: "dgpf-exchg", + 6786: "smc-jmx", + 6787: "smc-admin", + 6788: "smc-http", + 6789: "radg", + 6790: "hnmp", + 6791: "hnm", + 6801: "acnet", + 6817: "pentbox-sim", + 6831: "ambit-lm", + 6841: "netmo-default", + 6842: "netmo-http", + 6850: "iccrushmore", + 6868: "acctopus-cc", + 6888: "muse", + 6900: "rtimeviewer", + 6901: "jetstream", + 6935: "ethoscan", + 6936: "xsmsvc", + 6946: "bioserver", + 6951: "otlp", + 6961: "jmact3", + 6962: "jmevt2", + 6963: "swismgr1", + 6964: "swismgr2", + 6965: "swistrap", + 6966: "swispol", + 6969: "acmsoda", + 6970: "conductor", + 6997: "MobilitySrv", + 6998: "iatp-highpri", + 6999: "iatp-normalpri", + 7000: "afs3-fileserver", + 7001: "afs3-callback", + 7002: "afs3-prserver", + 7003: "afs3-vlserver", + 7004: "afs3-kaserver", + 7005: "afs3-volser", + 7006: "afs3-errors", + 7007: "afs3-bos", + 7008: "afs3-update", + 7009: "afs3-rmtsys", + 7010: "ups-onlinet", + 7011: "talon-disc", + 7012: "talon-engine", + 7013: "microtalon-dis", + 7014: "microtalon-com", + 7015: "talon-webserver", + 7016: "spg", + 7017: "grasp", + 7018: "fisa-svc", + 7019: "doceri-ctl", + 7020: "dpserve", + 7021: "dpserveadmin", + 7022: "ctdp", + 7023: "ct2nmcs", + 7024: "vmsvc", + 7025: "vmsvc-2", + 7030: "op-probe", + 7031: "iposplanet", + 7070: "arcp", + 7071: "iwg1", + 7073: "martalk", + 7080: "empowerid", + 7099: "lazy-ptop", + 7100: "font-service", + 7101: "elcn", + 7117: "rothaga", + 7121: "virprot-lm", + 7128: "scenidm", + 7129: "scenccs", + 7161: "cabsm-comm", + 7162: "caistoragemgr", + 7163: "cacsambroker", + 7164: "fsr", + 7165: "doc-server", + 7166: "aruba-server", + 7167: "casrmagent", + 7168: "cnckadserver", + 7169: "ccag-pib", + 7170: "nsrp", + 7171: "drm-production", + 7172: "metalbend", + 7173: "zsecure", + 7174: "clutild", + 7200: "fodms", + 7201: "dlip", + 7202: "pon-ictp", + 7215: "PS-Server", + 7216: "PS-Capture-Pro", + 7227: "ramp", + 7228: "citrixupp", + 7229: "citrixuppg", + 7236: "display", + 7237: "pads", + 7244: "frc-hicp", + 7262: "cnap", + 7272: "watchme-7272", + 7273: "oma-rlp", + 7274: "oma-rlp-s", + 7275: "oma-ulp", + 7276: "oma-ilp", + 7277: "oma-ilp-s", + 7278: "oma-dcdocbs", + 7279: "ctxlic", + 7280: "itactionserver1", + 7281: "itactionserver2", + 7282: "mzca-action", + 7283: "genstat", + 7365: "lcm-server", + 7391: "mindfilesys", + 7392: "mrssrendezvous", + 7393: "nfoldman", + 7394: "fse", + 7395: "winqedit", + 7397: "hexarc", + 7400: "rtps-discovery", + 7401: "rtps-dd-ut", + 7402: "rtps-dd-mt", + 7410: "ionixnetmon", + 7411: "daqstream", + 7421: "mtportmon", + 7426: "pmdmgr", + 7427: "oveadmgr", + 7428: "ovladmgr", + 7429: "opi-sock", + 7430: "xmpv7", + 7431: "pmd", + 7437: "faximum", + 7443: "oracleas-https", + 7471: "sttunnel", + 7473: "rise", + 7474: "neo4j", + 7478: "openit", + 7491: "telops-lmd", + 7500: "silhouette", + 7501: "ovbus", + 7508: "adcp", + 7509: "acplt", + 7510: "ovhpas", + 7511: "pafec-lm", + 7542: "saratoga", + 7543: "atul", + 7544: "nta-ds", + 7545: "nta-us", + 7546: "cfs", + 7547: "cwmp", + 7548: "tidp", + 7549: "nls-tl", + 7551: "controlone-con", + 7560: "sncp", + 7563: "cfw", + 7566: "vsi-omega", + 7569: "dell-eql-asm", + 7570: "aries-kfinder", + 7574: "coherence", + 7588: "sun-lm", + 7606: "mipi-debug", + 7624: "indi", + 7626: "simco", + 7627: "soap-http", + 7628: "zen-pawn", + 7629: "xdas", + 7630: "hawk", + 7631: "tesla-sys-msg", + 7633: "pmdfmgt", + 7648: "cuseeme", + 7672: "imqstomp", + 7673: "imqstomps", + 7674: "imqtunnels", + 7675: "imqtunnel", + 7676: "imqbrokerd", + 7677: "sun-user-https", + 7680: "pando-pub", + 7683: "dmt", + 7687: "bolt", + 7689: "collaber", + 7697: "klio", + 7700: "em7-secom", + 7707: "sync-em7", + 7708: "scinet", + 7720: "medimageportal", + 7724: "nsdeepfreezectl", + 7725: "nitrogen", + 7726: "freezexservice", + 7727: "trident-data", + 7728: "osvr", + 7734: "smip", + 7738: "aiagent", + 7741: "scriptview", + 7742: "msss", + 7743: "sstp-1", + 7744: "raqmon-pdu", + 7747: "prgp", + 7775: "inetfs", + 7777: "cbt", + 7778: "interwise", + 7779: "vstat", + 7781: "accu-lmgr", + 7786: "minivend", + 7787: "popup-reminders", + 7789: "office-tools", + 7794: "q3ade", + 7797: "pnet-conn", + 7798: "pnet-enc", + 7799: "altbsdp", + 7800: "asr", + 7801: "ssp-client", + 7810: "rbt-wanopt", + 7845: "apc-7845", + 7846: "apc-7846", + 7847: "csoauth", + 7869: "mobileanalyzer", + 7870: "rbt-smc", + 7871: "mdm", + 7878: "owms", + 7880: "pss", + 7887: "ubroker", + 7900: "mevent", + 7901: "tnos-sp", + 7902: "tnos-dp", + 7903: "tnos-dps", + 7913: "qo-secure", + 7932: "t2-drm", + 7933: "t2-brm", + 7962: "generalsync", + 7967: "supercell", + 7979: "micromuse-ncps", + 7980: "quest-vista", + 7981: "sossd-collect", + 7982: "sossd-agent", + 7997: "pushns", + 7999: "irdmi2", + 8000: "irdmi", + 8001: "vcom-tunnel", + 8002: "teradataordbms", + 8003: "mcreport", + 8005: "mxi", + 8006: "wpl-analytics", + 8007: "warppipe", + 8008: "http-alt", + 8019: "qbdb", + 8020: "intu-ec-svcdisc", + 8021: "intu-ec-client", + 8022: "oa-system", + 8025: "ca-audit-da", + 8026: "ca-audit-ds", + 8032: "pro-ed", + 8033: "mindprint", + 8034: "vantronix-mgmt", + 8040: "ampify", + 8041: "enguity-xccetp", + 8042: "fs-agent", + 8043: "fs-server", + 8044: "fs-mgmt", + 8051: "rocrail", + 8052: "senomix01", + 8053: "senomix02", + 8054: "senomix03", + 8055: "senomix04", + 8056: "senomix05", + 8057: "senomix06", + 8058: "senomix07", + 8059: "senomix08", + 8066: "toad-bi-appsrvr", + 8067: "infi-async", + 8070: "ucs-isc", + 8074: "gadugadu", + 8077: "mles", + 8080: "http-alt", + 8081: "sunproxyadmin", + 8082: "us-cli", + 8083: "us-srv", + 8086: "d-s-n", + 8087: "simplifymedia", + 8088: "radan-http", + 8090: "opsmessaging", + 8091: "jamlink", + 8097: "sac", + 8100: "xprint-server", + 8101: "ldoms-migr", + 8102: "kz-migr", + 8115: "mtl8000-matrix", + 8116: "cp-cluster", + 8117: "purityrpc", + 8118: "privoxy", + 8121: "apollo-data", + 8122: "apollo-admin", + 8128: "paycash-online", + 8129: "paycash-wbp", + 8130: "indigo-vrmi", + 8131: "indigo-vbcp", + 8132: "dbabble", + 8140: "puppet", + 8148: "isdd", + 8153: "quantastor", + 8160: "patrol", + 8161: "patrol-snmp", + 8162: "lpar2rrd", + 8181: "intermapper", + 8182: "vmware-fdm", + 8183: "proremote", + 8184: "itach", + 8190: "gcp-rphy", + 8191: "limnerpressure", + 8192: "spytechphone", + 8194: "blp1", + 8195: "blp2", + 8199: "vvr-data", + 8200: "trivnet1", + 8201: "trivnet2", + 8204: "lm-perfworks", + 8205: "lm-instmgr", + 8206: "lm-dta", + 8207: "lm-sserver", + 8208: "lm-webwatcher", + 8230: "rexecj", + 8243: "synapse-nhttps", + 8270: "robot-remote", + 8276: "pando-sec", + 8280: "synapse-nhttp", + 8282: "libelle", + 8292: "blp3", + 8293: "hiperscan-id", + 8294: "blp4", + 8300: "tmi", + 8301: "amberon", + 8313: "hub-open-net", + 8320: "tnp-discover", + 8321: "tnp", + 8322: "garmin-marine", + 8351: "server-find", + 8376: "cruise-enum", + 8377: "cruise-swroute", + 8378: "cruise-config", + 8379: "cruise-diags", + 8380: "cruise-update", + 8383: "m2mservices", + 8400: "cvd", + 8401: "sabarsd", + 8402: "abarsd", + 8403: "admind", + 8404: "svcloud", + 8405: "svbackup", + 8415: "dlpx-sp", + 8416: "espeech", + 8417: "espeech-rtp", + 8423: "aritts", + 8442: "cybro-a-bus", + 8443: "pcsync-https", + 8444: "pcsync-http", + 8445: "copy", + 8450: "npmp", + 8457: "nexentamv", + 8470: "cisco-avp", + 8471: "pim-port", + 8472: "otv", + 8473: "vp2p", + 8474: "noteshare", + 8500: "fmtp", + 8501: "cmtp-mgt", + 8502: "ftnmtp", + 8554: "rtsp-alt", + 8555: "d-fence", + 8567: "dof-tunnel", + 8600: "asterix", + 8610: "canon-mfnp", + 8611: "canon-bjnp1", + 8612: "canon-bjnp2", + 8613: "canon-bjnp3", + 8614: "canon-bjnp4", + 8615: "imink", + 8665: "monetra", + 8666: "monetra-admin", + 8675: "msi-cps-rm", + 8686: "sun-as-jmxrmi", + 8688: "openremote-ctrl", + 8699: "vnyx", + 8711: "nvc", + 8733: "ibus", + 8750: "dey-keyneg", + 8763: "mc-appserver", + 8764: "openqueue", + 8765: "ultraseek-http", + 8766: "amcs", + 8770: "dpap", + 8778: "uec", + 8786: "msgclnt", + 8787: "msgsrvr", + 8793: "acd-pm", + 8800: "sunwebadmin", + 8804: "truecm", + 8873: "dxspider", + 8880: "cddbp-alt", + 8881: "galaxy4d", + 8883: "secure-mqtt", + 8888: "ddi-tcp-1", + 8889: "ddi-tcp-2", + 8890: "ddi-tcp-3", + 8891: "ddi-tcp-4", + 8892: "ddi-tcp-5", + 8893: "ddi-tcp-6", + 8894: "ddi-tcp-7", + 8899: "ospf-lite", + 8900: "jmb-cds1", + 8901: "jmb-cds2", + 8910: "manyone-http", + 8911: "manyone-xml", + 8912: "wcbackup", + 8913: "dragonfly", + 8937: "twds", + 8953: "ub-dns-control", + 8954: "cumulus-admin", + 8980: "nod-provider", + 8989: "sunwebadmins", + 8990: "http-wmap", + 8991: "https-wmap", + 8997: "oracle-ms-ens", + 8998: "canto-roboflow", + 8999: "bctp", + 9000: "cslistener", + 9001: "etlservicemgr", + 9002: "dynamid", + 9005: "golem", + 9008: "ogs-server", + 9009: "pichat", + 9010: "sdr", + 9020: "tambora", + 9021: "panagolin-ident", + 9022: "paragent", + 9023: "swa-1", + 9024: "swa-2", + 9025: "swa-3", + 9026: "swa-4", + 9050: "versiera", + 9051: "fio-cmgmt", + 9060: "CardWeb-IO", + 9080: "glrpc", + 9083: "emc-pp-mgmtsvc", + 9084: "aurora", + 9085: "ibm-rsyscon", + 9086: "net2display", + 9087: "classic", + 9088: "sqlexec", + 9089: "sqlexec-ssl", + 9090: "websm", + 9091: "xmltec-xmlmail", + 9092: "XmlIpcRegSvc", + 9093: "copycat", + 9100: "hp-pdl-datastr", + 9101: "bacula-dir", + 9102: "bacula-fd", + 9103: "bacula-sd", + 9104: "peerwire", + 9105: "xadmin", + 9106: "astergate", + 9107: "astergatefax", + 9119: "mxit", + 9122: "grcmp", + 9123: "grcp", + 9131: "dddp", + 9160: "apani1", + 9161: "apani2", + 9162: "apani3", + 9163: "apani4", + 9164: "apani5", + 9191: "sun-as-jpda", + 9200: "wap-wsp", + 9201: "wap-wsp-wtp", + 9202: "wap-wsp-s", + 9203: "wap-wsp-wtp-s", + 9204: "wap-vcard", + 9205: "wap-vcal", + 9206: "wap-vcard-s", + 9207: "wap-vcal-s", + 9208: "rjcdb-vcards", + 9209: "almobile-system", + 9210: "oma-mlp", + 9211: "oma-mlp-s", + 9212: "serverviewdbms", + 9213: "serverstart", + 9214: "ipdcesgbs", + 9215: "insis", + 9216: "acme", + 9217: "fsc-port", + 9222: "teamcoherence", + 9255: "mon", + 9278: "pegasus", + 9279: "pegasus-ctl", + 9280: "pgps", + 9281: "swtp-port1", + 9282: "swtp-port2", + 9283: "callwaveiam", + 9284: "visd", + 9285: "n2h2server", + 9287: "cumulus", + 9292: "armtechdaemon", + 9293: "storview", + 9294: "armcenterhttp", + 9295: "armcenterhttps", + 9300: "vrace", + 9306: "sphinxql", + 9312: "sphinxapi", + 9318: "secure-ts", + 9321: "guibase", + 9343: "mpidcmgr", + 9344: "mphlpdmc", + 9345: "rancher", + 9346: "ctechlicensing", + 9374: "fjdmimgr", + 9380: "boxp", + 9387: "d2dconfig", + 9388: "d2ddatatrans", + 9389: "adws", + 9390: "otp", + 9396: "fjinvmgr", + 9397: "mpidcagt", + 9400: "sec-t4net-srv", + 9401: "sec-t4net-clt", + 9402: "sec-pc2fax-srv", + 9418: "git", + 9443: "tungsten-https", + 9444: "wso2esb-console", + 9445: "mindarray-ca", + 9450: "sntlkeyssrvr", + 9500: "ismserver", + 9535: "mngsuite", + 9536: "laes-bf", + 9555: "trispen-sra", + 9592: "ldgateway", + 9593: "cba8", + 9594: "msgsys", + 9595: "pds", + 9596: "mercury-disc", + 9597: "pd-admin", + 9598: "vscp", + 9599: "robix", + 9600: "micromuse-ncpw", + 9612: "streamcomm-ds", + 9614: "iadt-tls", + 9616: "erunbook-agent", + 9617: "erunbook-server", + 9618: "condor", + 9628: "odbcpathway", + 9629: "uniport", + 9630: "peoctlr", + 9631: "peocoll", + 9640: "pqsflows", + 9666: "zoomcp", + 9667: "xmms2", + 9668: "tec5-sdctp", + 9694: "client-wakeup", + 9695: "ccnx", + 9700: "board-roar", + 9747: "l5nas-parchan", + 9750: "board-voip", + 9753: "rasadv", + 9762: "tungsten-http", + 9800: "davsrc", + 9801: "sstp-2", + 9802: "davsrcs", + 9875: "sapv1", + 9876: "sd", + 9888: "cyborg-systems", + 9889: "gt-proxy", + 9898: "monkeycom", + 9900: "iua", + 9909: "domaintime", + 9911: "sype-transport", + 9925: "xybrid-cloud", + 9950: "apc-9950", + 9951: "apc-9951", + 9952: "apc-9952", + 9953: "acis", + 9954: "hinp", + 9955: "alljoyn-stm", + 9966: "odnsp", + 9978: "xybrid-rt", + 9979: "visweather", + 9981: "pumpkindb", + 9987: "dsm-scm-target", + 9988: "nsesrvr", + 9990: "osm-appsrvr", + 9991: "osm-oev", + 9992: "palace-1", + 9993: "palace-2", + 9994: "palace-3", + 9995: "palace-4", + 9996: "palace-5", + 9997: "palace-6", + 9998: "distinct32", + 9999: "distinct", + 10000: "ndmp", + 10001: "scp-config", + 10002: "documentum", + 10003: "documentum-s", + 10004: "emcrmirccd", + 10005: "emcrmird", + 10006: "netapp-sync", + 10007: "mvs-capacity", + 10008: "octopus", + 10009: "swdtp-sv", + 10010: "rxapi", + 10020: "abb-hw", + 10050: "zabbix-agent", + 10051: "zabbix-trapper", + 10055: "qptlmd", + 10080: "amanda", + 10081: "famdc", + 10100: "itap-ddtp", + 10101: "ezmeeting-2", + 10102: "ezproxy-2", + 10103: "ezrelay", + 10104: "swdtp", + 10107: "bctp-server", + 10110: "nmea-0183", + 10113: "netiq-endpoint", + 10114: "netiq-qcheck", + 10115: "netiq-endpt", + 10116: "netiq-voipa", + 10117: "iqrm", + 10125: "cimple", + 10128: "bmc-perf-sd", + 10129: "bmc-gms", + 10160: "qb-db-server", + 10161: "snmptls", + 10162: "snmptls-trap", + 10200: "trisoap", + 10201: "rsms", + 10252: "apollo-relay", + 10260: "axis-wimp-port", + 10261: "tile-ml", + 10288: "blocks", + 10321: "cosir", + 10540: "MOS-lower", + 10541: "MOS-upper", + 10542: "MOS-aux", + 10543: "MOS-soap", + 10544: "MOS-soap-opt", + 10548: "serverdocs", + 10631: "printopia", + 10800: "gap", + 10805: "lpdg", + 10809: "nbd", + 10860: "helix", + 10880: "bveapi", + 10933: "octopustentacle", + 10990: "rmiaux", + 11000: "irisa", + 11001: "metasys", + 11095: "weave", + 11103: "origo-sync", + 11104: "netapp-icmgmt", + 11105: "netapp-icdata", + 11106: "sgi-lk", + 11109: "sgi-dmfmgr", + 11110: "sgi-soap", + 11111: "vce", + 11112: "dicom", + 11161: "suncacao-snmp", + 11162: "suncacao-jmxmp", + 11163: "suncacao-rmi", + 11164: "suncacao-csa", + 11165: "suncacao-websvc", + 11172: "oemcacao-jmxmp", + 11173: "t5-straton", + 11174: "oemcacao-rmi", + 11175: "oemcacao-websvc", + 11201: "smsqp", + 11202: "dcsl-backup", + 11208: "wifree", + 11211: "memcache", + 11319: "imip", + 11320: "imip-channels", + 11321: "arena-server", + 11367: "atm-uhas", + 11371: "hkp", + 11489: "asgcypresstcps", + 11600: "tempest-port", + 11623: "emc-xsw-dconfig", + 11720: "h323callsigalt", + 11723: "emc-xsw-dcache", + 11751: "intrepid-ssl", + 11796: "lanschool", + 11876: "xoraya", + 11967: "sysinfo-sp", + 12000: "entextxid", + 12001: "entextnetwk", + 12002: "entexthigh", + 12003: "entextmed", + 12004: "entextlow", + 12005: "dbisamserver1", + 12006: "dbisamserver2", + 12007: "accuracer", + 12008: "accuracer-dbms", + 12010: "edbsrvr", + 12012: "vipera", + 12013: "vipera-ssl", + 12109: "rets-ssl", + 12121: "nupaper-ss", + 12168: "cawas", + 12172: "hivep", + 12300: "linogridengine", + 12302: "rads", + 12321: "warehouse-sss", + 12322: "warehouse", + 12345: "italk", + 12753: "tsaf", + 12865: "netperf", + 13160: "i-zipqd", + 13216: "bcslogc", + 13217: "rs-pias", + 13218: "emc-vcas-tcp", + 13223: "powwow-client", + 13224: "powwow-server", + 13400: "doip-data", + 13720: "bprd", + 13721: "bpdbm", + 13722: "bpjava-msvc", + 13724: "vnetd", + 13782: "bpcd", + 13783: "vopied", + 13785: "nbdb", + 13786: "nomdb", + 13818: "dsmcc-config", + 13819: "dsmcc-session", + 13820: "dsmcc-passthru", + 13821: "dsmcc-download", + 13822: "dsmcc-ccp", + 13823: "bmdss", + 13894: "ucontrol", + 13929: "dta-systems", + 13930: "medevolve", + 14000: "scotty-ft", + 14001: "sua", + 14033: "sage-best-com1", + 14034: "sage-best-com2", + 14141: "vcs-app", + 14142: "icpp", + 14143: "icpps", + 14145: "gcm-app", + 14149: "vrts-tdd", + 14150: "vcscmd", + 14154: "vad", + 14250: "cps", + 14414: "ca-web-update", + 14500: "xpra", + 14936: "hde-lcesrvr-1", + 14937: "hde-lcesrvr-2", + 15000: "hydap", + 15002: "onep-tls", + 15345: "xpilot", + 15363: "3link", + 15555: "cisco-snat", + 15660: "bex-xr", + 15740: "ptp", + 15999: "programmar", + 16000: "fmsas", + 16001: "fmsascon", + 16002: "gsms", + 16020: "jwpc", + 16021: "jwpc-bin", + 16161: "sun-sea-port", + 16162: "solaris-audit", + 16309: "etb4j", + 16310: "pduncs", + 16311: "pdefmns", + 16360: "netserialext1", + 16361: "netserialext2", + 16367: "netserialext3", + 16368: "netserialext4", + 16384: "connected", + 16385: "rdgs", + 16619: "xoms", + 16665: "axon-tunnel", + 16789: "cadsisvr", + 16900: "newbay-snc-mc", + 16950: "sgcip", + 16991: "intel-rci-mp", + 16992: "amt-soap-http", + 16993: "amt-soap-https", + 16994: "amt-redir-tcp", + 16995: "amt-redir-tls", + 17007: "isode-dua", + 17184: "vestasdlp", + 17185: "soundsvirtual", + 17219: "chipper", + 17220: "avtp", + 17221: "avdecc", + 17223: "isa100-gci", + 17225: "trdp-md", + 17234: "integrius-stp", + 17235: "ssh-mgmt", + 17500: "db-lsp", + 17555: "ailith", + 17729: "ea", + 17754: "zep", + 17755: "zigbee-ip", + 17756: "zigbee-ips", + 17777: "sw-orion", + 18000: "biimenu", + 18104: "radpdf", + 18136: "racf", + 18181: "opsec-cvp", + 18182: "opsec-ufp", + 18183: "opsec-sam", + 18184: "opsec-lea", + 18185: "opsec-omi", + 18186: "ohsc", + 18187: "opsec-ela", + 18241: "checkpoint-rtm", + 18242: "iclid", + 18243: "clusterxl", + 18262: "gv-pf", + 18463: "ac-cluster", + 18634: "rds-ib", + 18635: "rds-ip", + 18668: "vdmmesh", + 18769: "ique", + 18881: "infotos", + 18888: "apc-necmp", + 19000: "igrid", + 19007: "scintilla", + 19020: "j-link", + 19191: "opsec-uaa", + 19194: "ua-secureagent", + 19220: "cora", + 19283: "keysrvr", + 19315: "keyshadow", + 19398: "mtrgtrans", + 19410: "hp-sco", + 19411: "hp-sca", + 19412: "hp-sessmon", + 19539: "fxuptp", + 19540: "sxuptp", + 19541: "jcp", + 19998: "iec-104-sec", + 19999: "dnp-sec", + 20000: "dnp", + 20001: "microsan", + 20002: "commtact-http", + 20003: "commtact-https", + 20005: "openwebnet", + 20013: "ss-idi", + 20014: "opendeploy", + 20034: "nburn-id", + 20046: "tmophl7mts", + 20048: "mountd", + 20049: "nfsrdma", + 20057: "avesterra", + 20167: "tolfab", + 20202: "ipdtp-port", + 20222: "ipulse-ics", + 20480: "emwavemsg", + 20670: "track", + 20999: "athand-mmp", + 21000: "irtrans", + 21010: "notezilla-lan", + 21221: "aigairserver", + 21553: "rdm-tfs", + 21554: "dfserver", + 21590: "vofr-gateway", + 21800: "tvpm", + 21845: "webphone", + 21846: "netspeak-is", + 21847: "netspeak-cs", + 21848: "netspeak-acd", + 21849: "netspeak-cps", + 22000: "snapenetio", + 22001: "optocontrol", + 22002: "optohost002", + 22003: "optohost003", + 22004: "optohost004", + 22005: "optohost004", + 22125: "dcap", + 22128: "gsidcap", + 22222: "easyengine", + 22273: "wnn6", + 22305: "cis", + 22335: "shrewd-control", + 22343: "cis-secure", + 22347: "wibukey", + 22350: "codemeter", + 22351: "codemeter-cmwan", + 22537: "caldsoft-backup", + 22555: "vocaltec-wconf", + 22763: "talikaserver", + 22800: "aws-brf", + 22951: "brf-gw", + 23000: "inovaport1", + 23001: "inovaport2", + 23002: "inovaport3", + 23003: "inovaport4", + 23004: "inovaport5", + 23005: "inovaport6", + 23053: "gntp", + 23294: "5afe-dir", + 23333: "elxmgmt", + 23400: "novar-dbase", + 23401: "novar-alarm", + 23402: "novar-global", + 23456: "aequus", + 23457: "aequus-alt", + 23546: "areaguard-neo", + 24000: "med-ltp", + 24001: "med-fsp-rx", + 24002: "med-fsp-tx", + 24003: "med-supp", + 24004: "med-ovw", + 24005: "med-ci", + 24006: "med-net-svc", + 24242: "filesphere", + 24249: "vista-4gl", + 24321: "ild", + 24386: "intel-rci", + 24465: "tonidods", + 24554: "binkp", + 24577: "bilobit", + 24666: "sdtvwcam", + 24676: "canditv", + 24677: "flashfiler", + 24678: "proactivate", + 24680: "tcc-http", + 24754: "cslg", + 24922: "find", + 25000: "icl-twobase1", + 25001: "icl-twobase2", + 25002: "icl-twobase3", + 25003: "icl-twobase4", + 25004: "icl-twobase5", + 25005: "icl-twobase6", + 25006: "icl-twobase7", + 25007: "icl-twobase8", + 25008: "icl-twobase9", + 25009: "icl-twobase10", + 25576: "sauterdongle", + 25604: "idtp", + 25793: "vocaltec-hos", + 25900: "tasp-net", + 25901: "niobserver", + 25902: "nilinkanalyst", + 25903: "niprobe", + 26000: "quake", + 26133: "scscp", + 26208: "wnn6-ds", + 26257: "cockroach", + 26260: "ezproxy", + 26261: "ezmeeting", + 26262: "k3software-svr", + 26263: "k3software-cli", + 26486: "exoline-tcp", + 26487: "exoconfig", + 26489: "exonet", + 27345: "imagepump", + 27442: "jesmsjc", + 27504: "kopek-httphead", + 27782: "ars-vista", + 27876: "astrolink", + 27999: "tw-auth-key", + 28000: "nxlmd", + 28001: "pqsp", + 28200: "voxelstorm", + 28240: "siemensgsm", + 28589: "bosswave", + 29167: "otmp", + 29999: "bingbang", + 30000: "ndmps", + 30001: "pago-services1", + 30002: "pago-services2", + 30003: "amicon-fpsu-ra", + 30100: "rwp", + 30260: "kingdomsonline", + 30400: "gs-realtime", + 30999: "ovobs", + 31016: "ka-sddp", + 31020: "autotrac-acp", + 31400: "pace-licensed", + 31416: "xqosd", + 31457: "tetrinet", + 31620: "lm-mon", + 31685: "dsx-monitor", + 31765: "gamesmith-port", + 31948: "iceedcp-tx", + 31949: "iceedcp-rx", + 32034: "iracinghelper", + 32249: "t1distproc60", + 32400: "plex", + 32483: "apm-link", + 32635: "sec-ntb-clnt", + 32636: "DMExpress", + 32767: "filenet-powsrm", + 32768: "filenet-tms", + 32769: "filenet-rpc", + 32770: "filenet-nch", + 32771: "filenet-rmi", + 32772: "filenet-pa", + 32773: "filenet-cm", + 32774: "filenet-re", + 32775: "filenet-pch", + 32776: "filenet-peior", + 32777: "filenet-obrok", + 32801: "mlsn", + 32811: "retp", + 32896: "idmgratm", + 33060: "mysqlx", + 33123: "aurora-balaena", + 33331: "diamondport", + 33333: "dgi-serv", + 33334: "speedtrace", + 33434: "traceroute", + 33656: "snip-slave", + 34249: "turbonote-2", + 34378: "p-net-local", + 34379: "p-net-remote", + 34567: "dhanalakshmi", + 34962: "profinet-rt", + 34963: "profinet-rtm", + 34964: "profinet-cm", + 34980: "ethercat", + 35000: "heathview", + 35001: "rt-viewer", + 35002: "rt-sound", + 35003: "rt-devicemapper", + 35004: "rt-classmanager", + 35005: "rt-labtracker", + 35006: "rt-helper", + 35100: "axio-disc", + 35354: "kitim", + 35355: "altova-lm", + 35356: "guttersnex", + 35357: "openstack-id", + 36001: "allpeers", + 36524: "febooti-aw", + 36602: "observium-agent", + 36700: "mapx", + 36865: "kastenxpipe", + 37475: "neckar", + 37483: "gdrive-sync", + 37601: "eftp", + 37654: "unisys-eportal", + 38000: "ivs-database", + 38001: "ivs-insertion", + 38002: "cresco-control", + 38201: "galaxy7-data", + 38202: "fairview", + 38203: "agpolicy", + 38800: "sruth", + 38865: "secrmmsafecopya", + 39681: "turbonote-1", + 40000: "safetynetp", + 40404: "sptx", + 40841: "cscp", + 40842: "csccredir", + 40843: "csccfirewall", + 41111: "fs-qos", + 41121: "tentacle", + 41230: "z-wave-s", + 41794: "crestron-cip", + 41795: "crestron-ctp", + 41796: "crestron-cips", + 41797: "crestron-ctps", + 42508: "candp", + 42509: "candrp", + 42510: "caerpc", + 43000: "recvr-rc", + 43188: "reachout", + 43189: "ndm-agent-port", + 43190: "ip-provision", + 43191: "noit-transport", + 43210: "shaperai", + 43439: "eq3-update", + 43440: "ew-mgmt", + 43441: "ciscocsdb", + 44123: "z-wave-tunnel", + 44321: "pmcd", + 44322: "pmcdproxy", + 44323: "pmwebapi", + 44444: "cognex-dataman", + 44553: "rbr-debug", + 44818: "EtherNet-IP-2", + 44900: "m3da", + 45000: "asmp", + 45001: "asmps", + 45002: "rs-status", + 45045: "synctest", + 45054: "invision-ag", + 45514: "cloudcheck", + 45678: "eba", + 45824: "dai-shell", + 45825: "qdb2service", + 45966: "ssr-servermgr", + 46336: "inedo", + 46998: "spremotetablet", + 46999: "mediabox", + 47000: "mbus", + 47001: "winrm", + 47557: "dbbrowse", + 47624: "directplaysrvr", + 47806: "ap", + 47808: "bacnet", + 48000: "nimcontroller", + 48001: "nimspooler", + 48002: "nimhub", + 48003: "nimgtw", + 48004: "nimbusdb", + 48005: "nimbusdbctrl", + 48049: "3gpp-cbsp", + 48050: "weandsf", + 48128: "isnetserv", + 48129: "blp5", + 48556: "com-bardac-dw", + 48619: "iqobject", + 48653: "robotraconteur", + 49000: "matahari", + 49001: "nusrp", +} +var udpPortNames = map[UDPPort]string{ + 1: "tcpmux", + 2: "compressnet", + 3: "compressnet", + 5: "rje", + 7: "echo", + 9: "discard", + 11: "systat", + 13: "daytime", + 17: "qotd", + 18: "msp", + 19: "chargen", + 20: "ftp-data", + 21: "ftp", + 22: "ssh", + 23: "telnet", + 25: "smtp", + 27: "nsw-fe", + 29: "msg-icp", + 31: "msg-auth", + 33: "dsp", + 37: "time", + 38: "rap", + 39: "rlp", + 41: "graphics", + 42: "name", + 43: "nicname", + 44: "mpm-flags", + 45: "mpm", + 46: "mpm-snd", + 48: "auditd", + 49: "tacacs", + 50: "re-mail-ck", + 52: "xns-time", + 53: "domain", + 54: "xns-ch", + 55: "isi-gl", + 56: "xns-auth", + 58: "xns-mail", + 62: "acas", + 63: "whoispp", + 64: "covia", + 65: "tacacs-ds", + 66: "sql-net", + 67: "bootps", + 68: "bootpc", + 69: "tftp", + 70: "gopher", + 71: "netrjs-1", + 72: "netrjs-2", + 73: "netrjs-3", + 74: "netrjs-4", + 76: "deos", + 78: "vettcp", + 79: "finger", + 80: "http", + 82: "xfer", + 83: "mit-ml-dev", + 84: "ctf", + 85: "mit-ml-dev", + 86: "mfcobol", + 88: "kerberos", + 89: "su-mit-tg", + 90: "dnsix", + 91: "mit-dov", + 92: "npp", + 93: "dcp", + 94: "objcall", + 95: "supdup", + 96: "dixie", + 97: "swift-rvf", + 98: "tacnews", + 99: "metagram", + 101: "hostname", + 102: "iso-tsap", + 103: "gppitnp", + 104: "acr-nema", + 105: "cso", + 106: "3com-tsmux", + 107: "rtelnet", + 108: "snagas", + 109: "pop2", + 110: "pop3", + 111: "sunrpc", + 112: "mcidas", + 113: "auth", + 115: "sftp", + 116: "ansanotify", + 117: "uucp-path", + 118: "sqlserv", + 119: "nntp", + 120: "cfdptkt", + 121: "erpc", + 122: "smakynet", + 123: "ntp", + 124: "ansatrader", + 125: "locus-map", + 126: "nxedit", + 127: "locus-con", + 128: "gss-xlicen", + 129: "pwdgen", + 130: "cisco-fna", + 131: "cisco-tna", + 132: "cisco-sys", + 133: "statsrv", + 134: "ingres-net", + 135: "epmap", + 136: "profile", + 137: "netbios-ns", + 138: "netbios-dgm", + 139: "netbios-ssn", + 140: "emfis-data", + 141: "emfis-cntl", + 142: "bl-idm", + 143: "imap", + 144: "uma", + 145: "uaac", + 146: "iso-tp0", + 147: "iso-ip", + 148: "jargon", + 149: "aed-512", + 150: "sql-net", + 151: "hems", + 152: "bftp", + 153: "sgmp", + 154: "netsc-prod", + 155: "netsc-dev", + 156: "sqlsrv", + 157: "knet-cmp", + 158: "pcmail-srv", + 159: "nss-routing", + 160: "sgmp-traps", + 161: "snmp", + 162: "snmptrap", + 163: "cmip-man", + 164: "cmip-agent", + 165: "xns-courier", + 166: "s-net", + 167: "namp", + 168: "rsvd", + 169: "send", + 170: "print-srv", + 171: "multiplex", + 172: "cl-1", + 173: "xyplex-mux", + 174: "mailq", + 175: "vmnet", + 176: "genrad-mux", + 177: "xdmcp", + 178: "nextstep", + 179: "bgp", + 180: "ris", + 181: "unify", + 182: "audit", + 183: "ocbinder", + 184: "ocserver", + 185: "remote-kis", + 186: "kis", + 187: "aci", + 188: "mumps", + 189: "qft", + 190: "gacp", + 191: "prospero", + 192: "osu-nms", + 193: "srmp", + 194: "irc", + 195: "dn6-nlm-aud", + 196: "dn6-smm-red", + 197: "dls", + 198: "dls-mon", + 199: "smux", + 200: "src", + 201: "at-rtmp", + 202: "at-nbp", + 203: "at-3", + 204: "at-echo", + 205: "at-5", + 206: "at-zis", + 207: "at-7", + 208: "at-8", + 209: "qmtp", + 210: "z39-50", + 211: "914c-g", + 212: "anet", + 213: "ipx", + 214: "vmpwscs", + 215: "softpc", + 216: "CAIlic", + 217: "dbase", + 218: "mpp", + 219: "uarps", + 220: "imap3", + 221: "fln-spx", + 222: "rsh-spx", + 223: "cdc", + 224: "masqdialer", + 242: "direct", + 243: "sur-meas", + 244: "inbusiness", + 245: "link", + 246: "dsp3270", + 247: "subntbcst-tftp", + 248: "bhfhs", + 256: "rap", + 257: "set", + 259: "esro-gen", + 260: "openport", + 261: "nsiiops", + 262: "arcisdms", + 263: "hdap", + 264: "bgmp", + 265: "x-bone-ctl", + 266: "sst", + 267: "td-service", + 268: "td-replica", + 269: "manet", + 270: "gist", + 280: "http-mgmt", + 281: "personal-link", + 282: "cableport-ax", + 283: "rescap", + 284: "corerjd", + 286: "fxp", + 287: "k-block", + 308: "novastorbakcup", + 309: "entrusttime", + 310: "bhmds", + 311: "asip-webadmin", + 312: "vslmp", + 313: "magenta-logic", + 314: "opalis-robot", + 315: "dpsi", + 316: "decauth", + 317: "zannet", + 318: "pkix-timestamp", + 319: "ptp-event", + 320: "ptp-general", + 321: "pip", + 322: "rtsps", + 333: "texar", + 344: "pdap", + 345: "pawserv", + 346: "zserv", + 347: "fatserv", + 348: "csi-sgwp", + 349: "mftp", + 350: "matip-type-a", + 351: "matip-type-b", + 352: "dtag-ste-sb", + 353: "ndsauth", + 354: "bh611", + 355: "datex-asn", + 356: "cloanto-net-1", + 357: "bhevent", + 358: "shrinkwrap", + 359: "nsrmp", + 360: "scoi2odialog", + 361: "semantix", + 362: "srssend", + 363: "rsvp-tunnel", + 364: "aurora-cmgr", + 365: "dtk", + 366: "odmr", + 367: "mortgageware", + 368: "qbikgdp", + 369: "rpc2portmap", + 370: "codaauth2", + 371: "clearcase", + 372: "ulistproc", + 373: "legent-1", + 374: "legent-2", + 375: "hassle", + 376: "nip", + 377: "tnETOS", + 378: "dsETOS", + 379: "is99c", + 380: "is99s", + 381: "hp-collector", + 382: "hp-managed-node", + 383: "hp-alarm-mgr", + 384: "arns", + 385: "ibm-app", + 386: "asa", + 387: "aurp", + 388: "unidata-ldm", + 389: "ldap", + 390: "uis", + 391: "synotics-relay", + 392: "synotics-broker", + 393: "meta5", + 394: "embl-ndt", + 395: "netcp", + 396: "netware-ip", + 397: "mptn", + 398: "kryptolan", + 399: "iso-tsap-c2", + 400: "osb-sd", + 401: "ups", + 402: "genie", + 403: "decap", + 404: "nced", + 405: "ncld", + 406: "imsp", + 407: "timbuktu", + 408: "prm-sm", + 409: "prm-nm", + 410: "decladebug", + 411: "rmt", + 412: "synoptics-trap", + 413: "smsp", + 414: "infoseek", + 415: "bnet", + 416: "silverplatter", + 417: "onmux", + 418: "hyper-g", + 419: "ariel1", + 420: "smpte", + 421: "ariel2", + 422: "ariel3", + 423: "opc-job-start", + 424: "opc-job-track", + 425: "icad-el", + 426: "smartsdp", + 427: "svrloc", + 428: "ocs-cmu", + 429: "ocs-amu", + 430: "utmpsd", + 431: "utmpcd", + 432: "iasd", + 433: "nnsp", + 434: "mobileip-agent", + 435: "mobilip-mn", + 436: "dna-cml", + 437: "comscm", + 438: "dsfgw", + 439: "dasp", + 440: "sgcp", + 441: "decvms-sysmgt", + 442: "cvc-hostd", + 443: "https", + 444: "snpp", + 445: "microsoft-ds", + 446: "ddm-rdb", + 447: "ddm-dfm", + 448: "ddm-ssl", + 449: "as-servermap", + 450: "tserver", + 451: "sfs-smp-net", + 452: "sfs-config", + 453: "creativeserver", + 454: "contentserver", + 455: "creativepartnr", + 456: "macon-udp", + 457: "scohelp", + 458: "appleqtc", + 459: "ampr-rcmd", + 460: "skronk", + 461: "datasurfsrv", + 462: "datasurfsrvsec", + 463: "alpes", + 464: "kpasswd", + 465: "igmpv3lite", + 466: "digital-vrc", + 467: "mylex-mapd", + 468: "photuris", + 469: "rcp", + 470: "scx-proxy", + 471: "mondex", + 472: "ljk-login", + 473: "hybrid-pop", + 474: "tn-tl-w2", + 475: "tcpnethaspsrv", + 476: "tn-tl-fd1", + 477: "ss7ns", + 478: "spsc", + 479: "iafserver", + 480: "iafdbase", + 481: "ph", + 482: "bgs-nsi", + 483: "ulpnet", + 484: "integra-sme", + 485: "powerburst", + 486: "avian", + 487: "saft", + 488: "gss-http", + 489: "nest-protocol", + 490: "micom-pfs", + 491: "go-login", + 492: "ticf-1", + 493: "ticf-2", + 494: "pov-ray", + 495: "intecourier", + 496: "pim-rp-disc", + 497: "retrospect", + 498: "siam", + 499: "iso-ill", + 500: "isakmp", + 501: "stmf", + 502: "mbap", + 503: "intrinsa", + 504: "citadel", + 505: "mailbox-lm", + 506: "ohimsrv", + 507: "crs", + 508: "xvttp", + 509: "snare", + 510: "fcp", + 511: "passgo", + 512: "comsat", + 513: "who", + 514: "syslog", + 515: "printer", + 516: "videotex", + 517: "talk", + 518: "ntalk", + 519: "utime", + 520: "router", + 521: "ripng", + 522: "ulp", + 523: "ibm-db2", + 524: "ncp", + 525: "timed", + 526: "tempo", + 527: "stx", + 528: "custix", + 529: "irc-serv", + 530: "courier", + 531: "conference", + 532: "netnews", + 533: "netwall", + 534: "windream", + 535: "iiop", + 536: "opalis-rdv", + 537: "nmsp", + 538: "gdomap", + 539: "apertus-ldp", + 540: "uucp", + 541: "uucp-rlogin", + 542: "commerce", + 543: "klogin", + 544: "kshell", + 545: "appleqtcsrvr", + 546: "dhcpv6-client", + 547: "dhcpv6-server", + 548: "afpovertcp", + 549: "idfp", + 550: "new-rwho", + 551: "cybercash", + 552: "devshr-nts", + 553: "pirp", + 554: "rtsp", + 555: "dsf", + 556: "remotefs", + 557: "openvms-sysipc", + 558: "sdnskmp", + 559: "teedtap", + 560: "rmonitor", + 561: "monitor", + 562: "chshell", + 563: "nntps", + 564: "9pfs", + 565: "whoami", + 566: "streettalk", + 567: "banyan-rpc", + 568: "ms-shuttle", + 569: "ms-rome", + 570: "meter", + 571: "meter", + 572: "sonar", + 573: "banyan-vip", + 574: "ftp-agent", + 575: "vemmi", + 576: "ipcd", + 577: "vnas", + 578: "ipdd", + 579: "decbsrv", + 580: "sntp-heartbeat", + 581: "bdp", + 582: "scc-security", + 583: "philips-vc", + 584: "keyserver", + 586: "password-chg", + 587: "submission", + 588: "cal", + 589: "eyelink", + 590: "tns-cml", + 591: "http-alt", + 592: "eudora-set", + 593: "http-rpc-epmap", + 594: "tpip", + 595: "cab-protocol", + 596: "smsd", + 597: "ptcnameservice", + 598: "sco-websrvrmg3", + 599: "acp", + 600: "ipcserver", + 601: "syslog-conn", + 602: "xmlrpc-beep", + 603: "idxp", + 604: "tunnel", + 605: "soap-beep", + 606: "urm", + 607: "nqs", + 608: "sift-uft", + 609: "npmp-trap", + 610: "npmp-local", + 611: "npmp-gui", + 612: "hmmp-ind", + 613: "hmmp-op", + 614: "sshell", + 615: "sco-inetmgr", + 616: "sco-sysmgr", + 617: "sco-dtmgr", + 618: "dei-icda", + 619: "compaq-evm", + 620: "sco-websrvrmgr", + 621: "escp-ip", + 622: "collaborator", + 623: "asf-rmcp", + 624: "cryptoadmin", + 625: "dec-dlm", + 626: "asia", + 627: "passgo-tivoli", + 628: "qmqp", + 629: "3com-amp3", + 630: "rda", + 631: "ipp", + 632: "bmpp", + 633: "servstat", + 634: "ginad", + 635: "rlzdbase", + 636: "ldaps", + 637: "lanserver", + 638: "mcns-sec", + 639: "msdp", + 640: "entrust-sps", + 641: "repcmd", + 642: "esro-emsdp", + 643: "sanity", + 644: "dwr", + 645: "pssc", + 646: "ldp", + 647: "dhcp-failover", + 648: "rrp", + 649: "cadview-3d", + 650: "obex", + 651: "ieee-mms", + 652: "hello-port", + 653: "repscmd", + 654: "aodv", + 655: "tinc", + 656: "spmp", + 657: "rmc", + 658: "tenfold", + 660: "mac-srvr-admin", + 661: "hap", + 662: "pftp", + 663: "purenoise", + 664: "asf-secure-rmcp", + 665: "sun-dr", + 666: "mdqs", + 667: "disclose", + 668: "mecomm", + 669: "meregister", + 670: "vacdsm-sws", + 671: "vacdsm-app", + 672: "vpps-qua", + 673: "cimplex", + 674: "acap", + 675: "dctp", + 676: "vpps-via", + 677: "vpp", + 678: "ggf-ncp", + 679: "mrm", + 680: "entrust-aaas", + 681: "entrust-aams", + 682: "xfr", + 683: "corba-iiop", + 684: "corba-iiop-ssl", + 685: "mdc-portmapper", + 686: "hcp-wismar", + 687: "asipregistry", + 688: "realm-rusd", + 689: "nmap", + 690: "vatp", + 691: "msexch-routing", + 692: "hyperwave-isp", + 693: "connendp", + 694: "ha-cluster", + 695: "ieee-mms-ssl", + 696: "rushd", + 697: "uuidgen", + 698: "olsr", + 699: "accessnetwork", + 700: "epp", + 701: "lmp", + 702: "iris-beep", + 704: "elcsd", + 705: "agentx", + 706: "silc", + 707: "borland-dsj", + 709: "entrust-kmsh", + 710: "entrust-ash", + 711: "cisco-tdp", + 712: "tbrpf", + 713: "iris-xpc", + 714: "iris-xpcs", + 715: "iris-lwz", + 716: "pana", + 729: "netviewdm1", + 730: "netviewdm2", + 731: "netviewdm3", + 741: "netgw", + 742: "netrcs", + 744: "flexlm", + 747: "fujitsu-dev", + 748: "ris-cm", + 749: "kerberos-adm", + 750: "loadav", + 751: "pump", + 752: "qrh", + 753: "rrh", + 754: "tell", + 758: "nlogin", + 759: "con", + 760: "ns", + 761: "rxe", + 762: "quotad", + 763: "cycleserv", + 764: "omserv", + 765: "webster", + 767: "phonebook", + 769: "vid", + 770: "cadlock", + 771: "rtip", + 772: "cycleserv2", + 773: "notify", + 774: "acmaint-dbd", + 775: "acmaint-transd", + 776: "wpages", + 777: "multiling-http", + 780: "wpgs", + 800: "mdbs-daemon", + 801: "device", + 802: "mbap-s", + 810: "fcp-udp", + 828: "itm-mcell-s", + 829: "pkix-3-ca-ra", + 830: "netconf-ssh", + 831: "netconf-beep", + 832: "netconfsoaphttp", + 833: "netconfsoapbeep", + 847: "dhcp-failover2", + 848: "gdoi", + 853: "domain-s", + 854: "dlep", + 860: "iscsi", + 861: "owamp-control", + 862: "twamp-control", + 873: "rsync", + 886: "iclcnet-locate", + 887: "iclcnet-svinfo", + 888: "accessbuilder", + 900: "omginitialrefs", + 901: "smpnameres", + 902: "ideafarm-door", + 903: "ideafarm-panic", + 910: "kink", + 911: "xact-backup", + 912: "apex-mesh", + 913: "apex-edge", + 989: "ftps-data", + 990: "ftps", + 991: "nas", + 992: "telnets", + 993: "imaps", + 995: "pop3s", + 996: "vsinet", + 997: "maitrd", + 998: "puparp", + 999: "applix", + 1000: "cadlock2", + 1010: "surf", + 1021: "exp1", + 1022: "exp2", + 1025: "blackjack", + 1026: "cap", + 1027: "6a44", + 1029: "solid-mux", + 1033: "netinfo-local", + 1034: "activesync", + 1035: "mxxrlogin", + 1036: "nsstp", + 1037: "ams", + 1038: "mtqp", + 1039: "sbl", + 1040: "netarx", + 1041: "danf-ak2", + 1042: "afrog", + 1043: "boinc-client", + 1044: "dcutility", + 1045: "fpitp", + 1046: "wfremotertm", + 1047: "neod1", + 1048: "neod2", + 1049: "td-postman", + 1050: "cma", + 1051: "optima-vnet", + 1052: "ddt", + 1053: "remote-as", + 1054: "brvread", + 1055: "ansyslmd", + 1056: "vfo", + 1057: "startron", + 1058: "nim", + 1059: "nimreg", + 1060: "polestar", + 1061: "kiosk", + 1062: "veracity", + 1063: "kyoceranetdev", + 1064: "jstel", + 1065: "syscomlan", + 1066: "fpo-fns", + 1067: "instl-boots", + 1068: "instl-bootc", + 1069: "cognex-insight", + 1070: "gmrupdateserv", + 1071: "bsquare-voip", + 1072: "cardax", + 1073: "bridgecontrol", + 1074: "warmspotMgmt", + 1075: "rdrmshc", + 1076: "dab-sti-c", + 1077: "imgames", + 1078: "avocent-proxy", + 1079: "asprovatalk", + 1080: "socks", + 1081: "pvuniwien", + 1082: "amt-esd-prot", + 1083: "ansoft-lm-1", + 1084: "ansoft-lm-2", + 1085: "webobjects", + 1086: "cplscrambler-lg", + 1087: "cplscrambler-in", + 1088: "cplscrambler-al", + 1089: "ff-annunc", + 1090: "ff-fms", + 1091: "ff-sm", + 1092: "obrpd", + 1093: "proofd", + 1094: "rootd", + 1095: "nicelink", + 1096: "cnrprotocol", + 1097: "sunclustermgr", + 1098: "rmiactivation", + 1099: "rmiregistry", + 1100: "mctp", + 1101: "pt2-discover", + 1102: "adobeserver-1", + 1103: "adobeserver-2", + 1104: "xrl", + 1105: "ftranhc", + 1106: "isoipsigport-1", + 1107: "isoipsigport-2", + 1108: "ratio-adp", + 1110: "nfsd-keepalive", + 1111: "lmsocialserver", + 1112: "icp", + 1113: "ltp-deepspace", + 1114: "mini-sql", + 1115: "ardus-trns", + 1116: "ardus-cntl", + 1117: "ardus-mtrns", + 1118: "sacred", + 1119: "bnetgame", + 1120: "bnetfile", + 1121: "rmpp", + 1122: "availant-mgr", + 1123: "murray", + 1124: "hpvmmcontrol", + 1125: "hpvmmagent", + 1126: "hpvmmdata", + 1127: "kwdb-commn", + 1128: "saphostctrl", + 1129: "saphostctrls", + 1130: "casp", + 1131: "caspssl", + 1132: "kvm-via-ip", + 1133: "dfn", + 1134: "aplx", + 1135: "omnivision", + 1136: "hhb-gateway", + 1137: "trim", + 1138: "encrypted-admin", + 1139: "evm", + 1140: "autonoc", + 1141: "mxomss", + 1142: "edtools", + 1143: "imyx", + 1144: "fuscript", + 1145: "x9-icue", + 1146: "audit-transfer", + 1147: "capioverlan", + 1148: "elfiq-repl", + 1149: "bvtsonar", + 1150: "blaze", + 1151: "unizensus", + 1152: "winpoplanmess", + 1153: "c1222-acse", + 1154: "resacommunity", + 1155: "nfa", + 1156: "iascontrol-oms", + 1157: "iascontrol", + 1158: "dbcontrol-oms", + 1159: "oracle-oms", + 1160: "olsv", + 1161: "health-polling", + 1162: "health-trap", + 1163: "sddp", + 1164: "qsm-proxy", + 1165: "qsm-gui", + 1166: "qsm-remote", + 1167: "cisco-ipsla", + 1168: "vchat", + 1169: "tripwire", + 1170: "atc-lm", + 1171: "atc-appserver", + 1172: "dnap", + 1173: "d-cinema-rrp", + 1174: "fnet-remote-ui", + 1175: "dossier", + 1176: "indigo-server", + 1177: "dkmessenger", + 1178: "sgi-storman", + 1179: "b2n", + 1180: "mc-client", + 1181: "3comnetman", + 1182: "accelenet-data", + 1183: "llsurfup-http", + 1184: "llsurfup-https", + 1185: "catchpole", + 1186: "mysql-cluster", + 1187: "alias", + 1188: "hp-webadmin", + 1189: "unet", + 1190: "commlinx-avl", + 1191: "gpfs", + 1192: "caids-sensor", + 1193: "fiveacross", + 1194: "openvpn", + 1195: "rsf-1", + 1196: "netmagic", + 1197: "carrius-rshell", + 1198: "cajo-discovery", + 1199: "dmidi", + 1200: "scol", + 1201: "nucleus-sand", + 1202: "caiccipc", + 1203: "ssslic-mgr", + 1204: "ssslog-mgr", + 1205: "accord-mgc", + 1206: "anthony-data", + 1207: "metasage", + 1208: "seagull-ais", + 1209: "ipcd3", + 1210: "eoss", + 1211: "groove-dpp", + 1212: "lupa", + 1213: "mpc-lifenet", + 1214: "kazaa", + 1215: "scanstat-1", + 1216: "etebac5", + 1217: "hpss-ndapi", + 1218: "aeroflight-ads", + 1219: "aeroflight-ret", + 1220: "qt-serveradmin", + 1221: "sweetware-apps", + 1222: "nerv", + 1223: "tgp", + 1224: "vpnz", + 1225: "slinkysearch", + 1226: "stgxfws", + 1227: "dns2go", + 1228: "florence", + 1229: "zented", + 1230: "periscope", + 1231: "menandmice-lpm", + 1232: "first-defense", + 1233: "univ-appserver", + 1234: "search-agent", + 1235: "mosaicsyssvc1", + 1236: "bvcontrol", + 1237: "tsdos390", + 1238: "hacl-qs", + 1239: "nmsd", + 1240: "instantia", + 1241: "nessus", + 1242: "nmasoverip", + 1243: "serialgateway", + 1244: "isbconference1", + 1245: "isbconference2", + 1246: "payrouter", + 1247: "visionpyramid", + 1248: "hermes", + 1249: "mesavistaco", + 1250: "swldy-sias", + 1251: "servergraph", + 1252: "bspne-pcc", + 1253: "q55-pcc", + 1254: "de-noc", + 1255: "de-cache-query", + 1256: "de-server", + 1257: "shockwave2", + 1258: "opennl", + 1259: "opennl-voice", + 1260: "ibm-ssd", + 1261: "mpshrsv", + 1262: "qnts-orb", + 1263: "dka", + 1264: "prat", + 1265: "dssiapi", + 1266: "dellpwrappks", + 1267: "epc", + 1268: "propel-msgsys", + 1269: "watilapp", + 1270: "opsmgr", + 1271: "excw", + 1272: "cspmlockmgr", + 1273: "emc-gateway", + 1274: "t1distproc", + 1275: "ivcollector", + 1277: "miva-mqs", + 1278: "dellwebadmin-1", + 1279: "dellwebadmin-2", + 1280: "pictrography", + 1281: "healthd", + 1282: "emperion", + 1283: "productinfo", + 1284: "iee-qfx", + 1285: "neoiface", + 1286: "netuitive", + 1287: "routematch", + 1288: "navbuddy", + 1289: "jwalkserver", + 1290: "winjaserver", + 1291: "seagulllms", + 1292: "dsdn", + 1293: "pkt-krb-ipsec", + 1294: "cmmdriver", + 1295: "ehtp", + 1296: "dproxy", + 1297: "sdproxy", + 1298: "lpcp", + 1299: "hp-sci", + 1300: "h323hostcallsc", + 1301: "ci3-software-1", + 1302: "ci3-software-2", + 1303: "sftsrv", + 1304: "boomerang", + 1305: "pe-mike", + 1306: "re-conn-proto", + 1307: "pacmand", + 1308: "odsi", + 1309: "jtag-server", + 1310: "husky", + 1311: "rxmon", + 1312: "sti-envision", + 1313: "bmc-patroldb", + 1314: "pdps", + 1315: "els", + 1316: "exbit-escp", + 1317: "vrts-ipcserver", + 1318: "krb5gatekeeper", + 1319: "amx-icsp", + 1320: "amx-axbnet", + 1321: "pip", + 1322: "novation", + 1323: "brcd", + 1324: "delta-mcp", + 1325: "dx-instrument", + 1326: "wimsic", + 1327: "ultrex", + 1328: "ewall", + 1329: "netdb-export", + 1330: "streetperfect", + 1331: "intersan", + 1332: "pcia-rxp-b", + 1333: "passwrd-policy", + 1334: "writesrv", + 1335: "digital-notary", + 1336: "ischat", + 1337: "menandmice-dns", + 1338: "wmc-log-svc", + 1339: "kjtsiteserver", + 1340: "naap", + 1341: "qubes", + 1342: "esbroker", + 1343: "re101", + 1344: "icap", + 1345: "vpjp", + 1346: "alta-ana-lm", + 1347: "bbn-mmc", + 1348: "bbn-mmx", + 1349: "sbook", + 1350: "editbench", + 1351: "equationbuilder", + 1352: "lotusnote", + 1353: "relief", + 1354: "XSIP-network", + 1355: "intuitive-edge", + 1356: "cuillamartin", + 1357: "pegboard", + 1358: "connlcli", + 1359: "ftsrv", + 1360: "mimer", + 1361: "linx", + 1362: "timeflies", + 1363: "ndm-requester", + 1364: "ndm-server", + 1365: "adapt-sna", + 1366: "netware-csp", + 1367: "dcs", + 1368: "screencast", + 1369: "gv-us", + 1370: "us-gv", + 1371: "fc-cli", + 1372: "fc-ser", + 1373: "chromagrafx", + 1374: "molly", + 1375: "bytex", + 1376: "ibm-pps", + 1377: "cichlid", + 1378: "elan", + 1379: "dbreporter", + 1380: "telesis-licman", + 1381: "apple-licman", + 1382: "udt-os", + 1383: "gwha", + 1384: "os-licman", + 1385: "atex-elmd", + 1386: "checksum", + 1387: "cadsi-lm", + 1388: "objective-dbc", + 1389: "iclpv-dm", + 1390: "iclpv-sc", + 1391: "iclpv-sas", + 1392: "iclpv-pm", + 1393: "iclpv-nls", + 1394: "iclpv-nlc", + 1395: "iclpv-wsm", + 1396: "dvl-activemail", + 1397: "audio-activmail", + 1398: "video-activmail", + 1399: "cadkey-licman", + 1400: "cadkey-tablet", + 1401: "goldleaf-licman", + 1402: "prm-sm-np", + 1403: "prm-nm-np", + 1404: "igi-lm", + 1405: "ibm-res", + 1406: "netlabs-lm", + 1408: "sophia-lm", + 1409: "here-lm", + 1410: "hiq", + 1411: "af", + 1412: "innosys", + 1413: "innosys-acl", + 1414: "ibm-mqseries", + 1415: "dbstar", + 1416: "novell-lu6-2", + 1417: "timbuktu-srv1", + 1418: "timbuktu-srv2", + 1419: "timbuktu-srv3", + 1420: "timbuktu-srv4", + 1421: "gandalf-lm", + 1422: "autodesk-lm", + 1423: "essbase", + 1424: "hybrid", + 1425: "zion-lm", + 1426: "sais", + 1427: "mloadd", + 1428: "informatik-lm", + 1429: "nms", + 1430: "tpdu", + 1431: "rgtp", + 1432: "blueberry-lm", + 1433: "ms-sql-s", + 1434: "ms-sql-m", + 1435: "ibm-cics", + 1436: "saism", + 1437: "tabula", + 1438: "eicon-server", + 1439: "eicon-x25", + 1440: "eicon-slp", + 1441: "cadis-1", + 1442: "cadis-2", + 1443: "ies-lm", + 1444: "marcam-lm", + 1445: "proxima-lm", + 1446: "ora-lm", + 1447: "apri-lm", + 1448: "oc-lm", + 1449: "peport", + 1450: "dwf", + 1451: "infoman", + 1452: "gtegsc-lm", + 1453: "genie-lm", + 1454: "interhdl-elmd", + 1455: "esl-lm", + 1456: "dca", + 1457: "valisys-lm", + 1458: "nrcabq-lm", + 1459: "proshare1", + 1460: "proshare2", + 1461: "ibm-wrless-lan", + 1462: "world-lm", + 1463: "nucleus", + 1464: "msl-lmd", + 1465: "pipes", + 1466: "oceansoft-lm", + 1467: "csdmbase", + 1468: "csdm", + 1469: "aal-lm", + 1470: "uaiact", + 1471: "csdmbase", + 1472: "csdm", + 1473: "openmath", + 1474: "telefinder", + 1475: "taligent-lm", + 1476: "clvm-cfg", + 1477: "ms-sna-server", + 1478: "ms-sna-base", + 1479: "dberegister", + 1480: "pacerforum", + 1481: "airs", + 1482: "miteksys-lm", + 1483: "afs", + 1484: "confluent", + 1485: "lansource", + 1486: "nms-topo-serv", + 1487: "localinfosrvr", + 1488: "docstor", + 1489: "dmdocbroker", + 1490: "insitu-conf", + 1492: "stone-design-1", + 1493: "netmap-lm", + 1494: "ica", + 1495: "cvc", + 1496: "liberty-lm", + 1497: "rfx-lm", + 1498: "sybase-sqlany", + 1499: "fhc", + 1500: "vlsi-lm", + 1501: "saiscm", + 1502: "shivadiscovery", + 1503: "imtc-mcs", + 1504: "evb-elm", + 1505: "funkproxy", + 1506: "utcd", + 1507: "symplex", + 1508: "diagmond", + 1509: "robcad-lm", + 1510: "mvx-lm", + 1511: "3l-l1", + 1512: "wins", + 1513: "fujitsu-dtc", + 1514: "fujitsu-dtcns", + 1515: "ifor-protocol", + 1516: "vpad", + 1517: "vpac", + 1518: "vpvd", + 1519: "vpvc", + 1520: "atm-zip-office", + 1521: "ncube-lm", + 1522: "ricardo-lm", + 1523: "cichild-lm", + 1524: "ingreslock", + 1525: "orasrv", + 1526: "pdap-np", + 1527: "tlisrv", + 1528: "ngr-t", + 1529: "coauthor", + 1530: "rap-service", + 1531: "rap-listen", + 1532: "miroconnect", + 1533: "virtual-places", + 1534: "micromuse-lm", + 1535: "ampr-info", + 1536: "ampr-inter", + 1537: "sdsc-lm", + 1538: "3ds-lm", + 1539: "intellistor-lm", + 1540: "rds", + 1541: "rds2", + 1542: "gridgen-elmd", + 1543: "simba-cs", + 1544: "aspeclmd", + 1545: "vistium-share", + 1546: "abbaccuray", + 1547: "laplink", + 1548: "axon-lm", + 1549: "shivasound", + 1550: "3m-image-lm", + 1551: "hecmtl-db", + 1552: "pciarray", + 1553: "sna-cs", + 1554: "caci-lm", + 1555: "livelan", + 1556: "veritas-pbx", + 1557: "arbortext-lm", + 1558: "xingmpeg", + 1559: "web2host", + 1560: "asci-val", + 1561: "facilityview", + 1562: "pconnectmgr", + 1563: "cadabra-lm", + 1564: "pay-per-view", + 1565: "winddlb", + 1566: "corelvideo", + 1567: "jlicelmd", + 1568: "tsspmap", + 1569: "ets", + 1570: "orbixd", + 1571: "rdb-dbs-disp", + 1572: "chip-lm", + 1573: "itscomm-ns", + 1574: "mvel-lm", + 1575: "oraclenames", + 1576: "moldflow-lm", + 1577: "hypercube-lm", + 1578: "jacobus-lm", + 1579: "ioc-sea-lm", + 1580: "tn-tl-r2", + 1581: "mil-2045-47001", + 1582: "msims", + 1583: "simbaexpress", + 1584: "tn-tl-fd2", + 1585: "intv", + 1586: "ibm-abtact", + 1587: "pra-elmd", + 1588: "triquest-lm", + 1589: "vqp", + 1590: "gemini-lm", + 1591: "ncpm-pm", + 1592: "commonspace", + 1593: "mainsoft-lm", + 1594: "sixtrak", + 1595: "radio", + 1596: "radio-bc", + 1597: "orbplus-iiop", + 1598: "picknfs", + 1599: "simbaservices", + 1600: "issd", + 1601: "aas", + 1602: "inspect", + 1603: "picodbc", + 1604: "icabrowser", + 1605: "slp", + 1606: "slm-api", + 1607: "stt", + 1608: "smart-lm", + 1609: "isysg-lm", + 1610: "taurus-wh", + 1611: "ill", + 1612: "netbill-trans", + 1613: "netbill-keyrep", + 1614: "netbill-cred", + 1615: "netbill-auth", + 1616: "netbill-prod", + 1617: "nimrod-agent", + 1618: "skytelnet", + 1619: "xs-openstorage", + 1620: "faxportwinport", + 1621: "softdataphone", + 1622: "ontime", + 1623: "jaleosnd", + 1624: "udp-sr-port", + 1625: "svs-omagent", + 1626: "shockwave", + 1627: "t128-gateway", + 1628: "lontalk-norm", + 1629: "lontalk-urgnt", + 1630: "oraclenet8cman", + 1631: "visitview", + 1632: "pammratc", + 1633: "pammrpc", + 1634: "loaprobe", + 1635: "edb-server1", + 1636: "isdc", + 1637: "islc", + 1638: "ismc", + 1639: "cert-initiator", + 1640: "cert-responder", + 1641: "invision", + 1642: "isis-am", + 1643: "isis-ambc", + 1644: "saiseh", + 1645: "sightline", + 1646: "sa-msg-port", + 1647: "rsap", + 1648: "concurrent-lm", + 1649: "kermit", + 1650: "nkd", + 1651: "shiva-confsrvr", + 1652: "xnmp", + 1653: "alphatech-lm", + 1654: "stargatealerts", + 1655: "dec-mbadmin", + 1656: "dec-mbadmin-h", + 1657: "fujitsu-mmpdc", + 1658: "sixnetudr", + 1659: "sg-lm", + 1660: "skip-mc-gikreq", + 1661: "netview-aix-1", + 1662: "netview-aix-2", + 1663: "netview-aix-3", + 1664: "netview-aix-4", + 1665: "netview-aix-5", + 1666: "netview-aix-6", + 1667: "netview-aix-7", + 1668: "netview-aix-8", + 1669: "netview-aix-9", + 1670: "netview-aix-10", + 1671: "netview-aix-11", + 1672: "netview-aix-12", + 1673: "proshare-mc-1", + 1674: "proshare-mc-2", + 1675: "pdp", + 1676: "netcomm2", + 1677: "groupwise", + 1678: "prolink", + 1679: "darcorp-lm", + 1680: "microcom-sbp", + 1681: "sd-elmd", + 1682: "lanyon-lantern", + 1683: "ncpm-hip", + 1684: "snaresecure", + 1685: "n2nremote", + 1686: "cvmon", + 1687: "nsjtp-ctrl", + 1688: "nsjtp-data", + 1689: "firefox", + 1690: "ng-umds", + 1691: "empire-empuma", + 1692: "sstsys-lm", + 1693: "rrirtr", + 1694: "rrimwm", + 1695: "rrilwm", + 1696: "rrifmm", + 1697: "rrisat", + 1698: "rsvp-encap-1", + 1699: "rsvp-encap-2", + 1700: "mps-raft", + 1701: "l2f", + 1702: "deskshare", + 1703: "hb-engine", + 1704: "bcs-broker", + 1705: "slingshot", + 1706: "jetform", + 1707: "vdmplay", + 1708: "gat-lmd", + 1709: "centra", + 1710: "impera", + 1711: "pptconference", + 1712: "registrar", + 1713: "conferencetalk", + 1714: "sesi-lm", + 1715: "houdini-lm", + 1716: "xmsg", + 1717: "fj-hdnet", + 1718: "h323gatedisc", + 1719: "h323gatestat", + 1720: "h323hostcall", + 1721: "caicci", + 1722: "hks-lm", + 1723: "pptp", + 1724: "csbphonemaster", + 1725: "iden-ralp", + 1726: "iberiagames", + 1727: "winddx", + 1728: "telindus", + 1729: "citynl", + 1730: "roketz", + 1731: "msiccp", + 1732: "proxim", + 1733: "siipat", + 1734: "cambertx-lm", + 1735: "privatechat", + 1736: "street-stream", + 1737: "ultimad", + 1738: "gamegen1", + 1739: "webaccess", + 1740: "encore", + 1741: "cisco-net-mgmt", + 1742: "3Com-nsd", + 1743: "cinegrfx-lm", + 1744: "ncpm-ft", + 1745: "remote-winsock", + 1746: "ftrapid-1", + 1747: "ftrapid-2", + 1748: "oracle-em1", + 1749: "aspen-services", + 1750: "sslp", + 1751: "swiftnet", + 1752: "lofr-lm", + 1754: "oracle-em2", + 1755: "ms-streaming", + 1756: "capfast-lmd", + 1757: "cnhrp", + 1758: "tftp-mcast", + 1759: "spss-lm", + 1760: "www-ldap-gw", + 1761: "cft-0", + 1762: "cft-1", + 1763: "cft-2", + 1764: "cft-3", + 1765: "cft-4", + 1766: "cft-5", + 1767: "cft-6", + 1768: "cft-7", + 1769: "bmc-net-adm", + 1770: "bmc-net-svc", + 1771: "vaultbase", + 1772: "essweb-gw", + 1773: "kmscontrol", + 1774: "global-dtserv", + 1776: "femis", + 1777: "powerguardian", + 1778: "prodigy-intrnet", + 1779: "pharmasoft", + 1780: "dpkeyserv", + 1781: "answersoft-lm", + 1782: "hp-hcip", + 1784: "finle-lm", + 1785: "windlm", + 1786: "funk-logger", + 1787: "funk-license", + 1788: "psmond", + 1789: "hello", + 1790: "nmsp", + 1791: "ea1", + 1792: "ibm-dt-2", + 1793: "rsc-robot", + 1794: "cera-bcm", + 1795: "dpi-proxy", + 1796: "vocaltec-admin", + 1797: "uma", + 1798: "etp", + 1799: "netrisk", + 1800: "ansys-lm", + 1801: "msmq", + 1802: "concomp1", + 1803: "hp-hcip-gwy", + 1804: "enl", + 1805: "enl-name", + 1806: "musiconline", + 1807: "fhsp", + 1808: "oracle-vp2", + 1809: "oracle-vp1", + 1810: "jerand-lm", + 1811: "scientia-sdb", + 1812: "radius", + 1813: "radius-acct", + 1814: "tdp-suite", + 1815: "mmpft", + 1816: "harp", + 1817: "rkb-oscs", + 1818: "etftp", + 1819: "plato-lm", + 1820: "mcagent", + 1821: "donnyworld", + 1822: "es-elmd", + 1823: "unisys-lm", + 1824: "metrics-pas", + 1825: "direcpc-video", + 1826: "ardt", + 1827: "asi", + 1828: "itm-mcell-u", + 1829: "optika-emedia", + 1830: "net8-cman", + 1831: "myrtle", + 1832: "tht-treasure", + 1833: "udpradio", + 1834: "ardusuni", + 1835: "ardusmul", + 1836: "ste-smsc", + 1837: "csoft1", + 1838: "talnet", + 1839: "netopia-vo1", + 1840: "netopia-vo2", + 1841: "netopia-vo3", + 1842: "netopia-vo4", + 1843: "netopia-vo5", + 1844: "direcpc-dll", + 1845: "altalink", + 1846: "tunstall-pnc", + 1847: "slp-notify", + 1848: "fjdocdist", + 1849: "alpha-sms", + 1850: "gsi", + 1851: "ctcd", + 1852: "virtual-time", + 1853: "vids-avtp", + 1854: "buddy-draw", + 1855: "fiorano-rtrsvc", + 1856: "fiorano-msgsvc", + 1857: "datacaptor", + 1858: "privateark", + 1859: "gammafetchsvr", + 1860: "sunscalar-svc", + 1861: "lecroy-vicp", + 1862: "mysql-cm-agent", + 1863: "msnp", + 1864: "paradym-31port", + 1865: "entp", + 1866: "swrmi", + 1867: "udrive", + 1868: "viziblebrowser", + 1869: "transact", + 1870: "sunscalar-dns", + 1871: "canocentral0", + 1872: "canocentral1", + 1873: "fjmpjps", + 1874: "fjswapsnp", + 1875: "westell-stats", + 1876: "ewcappsrv", + 1877: "hp-webqosdb", + 1878: "drmsmc", + 1879: "nettgain-nms", + 1880: "vsat-control", + 1881: "ibm-mqseries2", + 1882: "ecsqdmn", + 1883: "mqtt", + 1884: "idmaps", + 1885: "vrtstrapserver", + 1886: "leoip", + 1887: "filex-lport", + 1888: "ncconfig", + 1889: "unify-adapter", + 1890: "wilkenlistener", + 1891: "childkey-notif", + 1892: "childkey-ctrl", + 1893: "elad", + 1894: "o2server-port", + 1896: "b-novative-ls", + 1897: "metaagent", + 1898: "cymtec-port", + 1899: "mc2studios", + 1900: "ssdp", + 1901: "fjicl-tep-a", + 1902: "fjicl-tep-b", + 1903: "linkname", + 1904: "fjicl-tep-c", + 1905: "sugp", + 1906: "tpmd", + 1907: "intrastar", + 1908: "dawn", + 1909: "global-wlink", + 1910: "ultrabac", + 1911: "mtp", + 1912: "rhp-iibp", + 1913: "armadp", + 1914: "elm-momentum", + 1915: "facelink", + 1916: "persona", + 1917: "noagent", + 1918: "can-nds", + 1919: "can-dch", + 1920: "can-ferret", + 1921: "noadmin", + 1922: "tapestry", + 1923: "spice", + 1924: "xiip", + 1925: "discovery-port", + 1926: "egs", + 1927: "videte-cipc", + 1928: "emsd-port", + 1929: "bandwiz-system", + 1930: "driveappserver", + 1931: "amdsched", + 1932: "ctt-broker", + 1933: "xmapi", + 1934: "xaapi", + 1935: "macromedia-fcs", + 1936: "jetcmeserver", + 1937: "jwserver", + 1938: "jwclient", + 1939: "jvserver", + 1940: "jvclient", + 1941: "dic-aida", + 1942: "res", + 1943: "beeyond-media", + 1944: "close-combat", + 1945: "dialogic-elmd", + 1946: "tekpls", + 1947: "sentinelsrm", + 1948: "eye2eye", + 1949: "ismaeasdaqlive", + 1950: "ismaeasdaqtest", + 1951: "bcs-lmserver", + 1952: "mpnjsc", + 1953: "rapidbase", + 1954: "abr-api", + 1955: "abr-secure", + 1956: "vrtl-vmf-ds", + 1957: "unix-status", + 1958: "dxadmind", + 1959: "simp-all", + 1960: "nasmanager", + 1961: "bts-appserver", + 1962: "biap-mp", + 1963: "webmachine", + 1964: "solid-e-engine", + 1965: "tivoli-npm", + 1966: "slush", + 1967: "sns-quote", + 1968: "lipsinc", + 1969: "lipsinc1", + 1970: "netop-rc", + 1971: "netop-school", + 1972: "intersys-cache", + 1973: "dlsrap", + 1974: "drp", + 1975: "tcoflashagent", + 1976: "tcoregagent", + 1977: "tcoaddressbook", + 1978: "unisql", + 1979: "unisql-java", + 1980: "pearldoc-xact", + 1981: "p2pq", + 1982: "estamp", + 1983: "lhtp", + 1984: "bb", + 1985: "hsrp", + 1986: "licensedaemon", + 1987: "tr-rsrb-p1", + 1988: "tr-rsrb-p2", + 1989: "tr-rsrb-p3", + 1990: "stun-p1", + 1991: "stun-p2", + 1992: "stun-p3", + 1993: "snmp-tcp-port", + 1994: "stun-port", + 1995: "perf-port", + 1996: "tr-rsrb-port", + 1997: "gdp-port", + 1998: "x25-svc-port", + 1999: "tcp-id-port", + 2000: "cisco-sccp", + 2001: "wizard", + 2002: "globe", + 2003: "brutus", + 2004: "emce", + 2005: "oracle", + 2006: "raid-cd", + 2007: "raid-am", + 2008: "terminaldb", + 2009: "whosockami", + 2010: "pipe-server", + 2011: "servserv", + 2012: "raid-ac", + 2013: "raid-cd", + 2014: "raid-sf", + 2015: "raid-cs", + 2016: "bootserver", + 2017: "bootclient", + 2018: "rellpack", + 2019: "about", + 2020: "xinupageserver", + 2021: "xinuexpansion1", + 2022: "xinuexpansion2", + 2023: "xinuexpansion3", + 2024: "xinuexpansion4", + 2025: "xribs", + 2026: "scrabble", + 2027: "shadowserver", + 2028: "submitserver", + 2029: "hsrpv6", + 2030: "device2", + 2031: "mobrien-chat", + 2032: "blackboard", + 2033: "glogger", + 2034: "scoremgr", + 2035: "imsldoc", + 2036: "e-dpnet", + 2037: "applus", + 2038: "objectmanager", + 2039: "prizma", + 2040: "lam", + 2041: "interbase", + 2042: "isis", + 2043: "isis-bcast", + 2044: "rimsl", + 2045: "cdfunc", + 2046: "sdfunc", + 2047: "dls", + 2048: "dls-monitor", + 2049: "shilp", + 2050: "av-emb-config", + 2051: "epnsdp", + 2052: "clearvisn", + 2053: "lot105-ds-upd", + 2054: "weblogin", + 2055: "iop", + 2056: "omnisky", + 2057: "rich-cp", + 2058: "newwavesearch", + 2059: "bmc-messaging", + 2060: "teleniumdaemon", + 2061: "netmount", + 2062: "icg-swp", + 2063: "icg-bridge", + 2064: "icg-iprelay", + 2065: "dlsrpn", + 2066: "aura", + 2067: "dlswpn", + 2068: "avauthsrvprtcl", + 2069: "event-port", + 2070: "ah-esp-encap", + 2071: "acp-port", + 2072: "msync", + 2073: "gxs-data-port", + 2074: "vrtl-vmf-sa", + 2075: "newlixengine", + 2076: "newlixconfig", + 2077: "tsrmagt", + 2078: "tpcsrvr", + 2079: "idware-router", + 2080: "autodesk-nlm", + 2081: "kme-trap-port", + 2082: "infowave", + 2083: "radsec", + 2084: "sunclustergeo", + 2085: "ada-cip", + 2086: "gnunet", + 2087: "eli", + 2088: "ip-blf", + 2089: "sep", + 2090: "lrp", + 2091: "prp", + 2092: "descent3", + 2093: "nbx-cc", + 2094: "nbx-au", + 2095: "nbx-ser", + 2096: "nbx-dir", + 2097: "jetformpreview", + 2098: "dialog-port", + 2099: "h2250-annex-g", + 2100: "amiganetfs", + 2101: "rtcm-sc104", + 2102: "zephyr-srv", + 2103: "zephyr-clt", + 2104: "zephyr-hm", + 2105: "minipay", + 2106: "mzap", + 2107: "bintec-admin", + 2108: "comcam", + 2109: "ergolight", + 2110: "umsp", + 2111: "dsatp", + 2112: "idonix-metanet", + 2113: "hsl-storm", + 2114: "newheights", + 2115: "kdm", + 2116: "ccowcmr", + 2117: "mentaclient", + 2118: "mentaserver", + 2119: "gsigatekeeper", + 2120: "qencp", + 2121: "scientia-ssdb", + 2122: "caupc-remote", + 2123: "gtp-control", + 2124: "elatelink", + 2125: "lockstep", + 2126: "pktcable-cops", + 2127: "index-pc-wb", + 2128: "net-steward", + 2129: "cs-live", + 2130: "xds", + 2131: "avantageb2b", + 2132: "solera-epmap", + 2133: "zymed-zpp", + 2134: "avenue", + 2135: "gris", + 2136: "appworxsrv", + 2137: "connect", + 2138: "unbind-cluster", + 2139: "ias-auth", + 2140: "ias-reg", + 2141: "ias-admind", + 2142: "tdmoip", + 2143: "lv-jc", + 2144: "lv-ffx", + 2145: "lv-pici", + 2146: "lv-not", + 2147: "lv-auth", + 2148: "veritas-ucl", + 2149: "acptsys", + 2150: "dynamic3d", + 2151: "docent", + 2152: "gtp-user", + 2153: "ctlptc", + 2154: "stdptc", + 2155: "brdptc", + 2156: "trp", + 2157: "xnds", + 2158: "touchnetplus", + 2159: "gdbremote", + 2160: "apc-2160", + 2161: "apc-2161", + 2162: "navisphere", + 2163: "navisphere-sec", + 2164: "ddns-v3", + 2165: "x-bone-api", + 2166: "iwserver", + 2167: "raw-serial", + 2168: "easy-soft-mux", + 2169: "brain", + 2170: "eyetv", + 2171: "msfw-storage", + 2172: "msfw-s-storage", + 2173: "msfw-replica", + 2174: "msfw-array", + 2175: "airsync", + 2176: "rapi", + 2177: "qwave", + 2178: "bitspeer", + 2179: "vmrdp", + 2180: "mc-gt-srv", + 2181: "eforward", + 2182: "cgn-stat", + 2183: "cgn-config", + 2184: "nvd", + 2185: "onbase-dds", + 2186: "gtaua", + 2187: "ssmd", + 2190: "tivoconnect", + 2191: "tvbus", + 2192: "asdis", + 2193: "drwcs", + 2197: "mnp-exchange", + 2198: "onehome-remote", + 2199: "onehome-help", + 2200: "ici", + 2201: "ats", + 2202: "imtc-map", + 2203: "b2-runtime", + 2204: "b2-license", + 2205: "jps", + 2206: "hpocbus", + 2207: "hpssd", + 2208: "hpiod", + 2209: "rimf-ps", + 2210: "noaaport", + 2211: "emwin", + 2212: "leecoposserver", + 2213: "kali", + 2214: "rpi", + 2215: "ipcore", + 2216: "vtu-comms", + 2217: "gotodevice", + 2218: "bounzza", + 2219: "netiq-ncap", + 2220: "netiq", + 2221: "ethernet-ip-s", + 2222: "EtherNet-IP-1", + 2223: "rockwell-csp2", + 2224: "efi-mg", + 2226: "di-drm", + 2227: "di-msg", + 2228: "ehome-ms", + 2229: "datalens", + 2230: "queueadm", + 2231: "wimaxasncp", + 2232: "ivs-video", + 2233: "infocrypt", + 2234: "directplay", + 2235: "sercomm-wlink", + 2236: "nani", + 2237: "optech-port1-lm", + 2238: "aviva-sna", + 2239: "imagequery", + 2240: "recipe", + 2241: "ivsd", + 2242: "foliocorp", + 2243: "magicom", + 2244: "nmsserver", + 2245: "hao", + 2246: "pc-mta-addrmap", + 2247: "antidotemgrsvr", + 2248: "ums", + 2249: "rfmp", + 2250: "remote-collab", + 2251: "dif-port", + 2252: "njenet-ssl", + 2253: "dtv-chan-req", + 2254: "seispoc", + 2255: "vrtp", + 2256: "pcc-mfp", + 2257: "simple-tx-rx", + 2258: "rcts", + 2260: "apc-2260", + 2261: "comotionmaster", + 2262: "comotionback", + 2263: "ecwcfg", + 2264: "apx500api-1", + 2265: "apx500api-2", + 2266: "mfserver", + 2267: "ontobroker", + 2268: "amt", + 2269: "mikey", + 2270: "starschool", + 2271: "mmcals", + 2272: "mmcal", + 2273: "mysql-im", + 2274: "pcttunnell", + 2275: "ibridge-data", + 2276: "ibridge-mgmt", + 2277: "bluectrlproxy", + 2278: "s3db", + 2279: "xmquery", + 2280: "lnvpoller", + 2281: "lnvconsole", + 2282: "lnvalarm", + 2283: "lnvstatus", + 2284: "lnvmaps", + 2285: "lnvmailmon", + 2286: "nas-metering", + 2287: "dna", + 2288: "netml", + 2289: "dict-lookup", + 2290: "sonus-logging", + 2291: "eapsp", + 2292: "mib-streaming", + 2293: "npdbgmngr", + 2294: "konshus-lm", + 2295: "advant-lm", + 2296: "theta-lm", + 2297: "d2k-datamover1", + 2298: "d2k-datamover2", + 2299: "pc-telecommute", + 2300: "cvmmon", + 2301: "cpq-wbem", + 2302: "binderysupport", + 2303: "proxy-gateway", + 2304: "attachmate-uts", + 2305: "mt-scaleserver", + 2306: "tappi-boxnet", + 2307: "pehelp", + 2308: "sdhelp", + 2309: "sdserver", + 2310: "sdclient", + 2311: "messageservice", + 2312: "wanscaler", + 2313: "iapp", + 2314: "cr-websystems", + 2315: "precise-sft", + 2316: "sent-lm", + 2317: "attachmate-g32", + 2318: "cadencecontrol", + 2319: "infolibria", + 2320: "siebel-ns", + 2321: "rdlap", + 2322: "ofsd", + 2323: "3d-nfsd", + 2324: "cosmocall", + 2325: "ansysli", + 2326: "idcp", + 2327: "xingcsm", + 2328: "netrix-sftm", + 2329: "nvd", + 2330: "tscchat", + 2331: "agentview", + 2332: "rcc-host", + 2333: "snapp", + 2334: "ace-client", + 2335: "ace-proxy", + 2336: "appleugcontrol", + 2337: "ideesrv", + 2338: "norton-lambert", + 2339: "3com-webview", + 2340: "wrs-registry", + 2341: "xiostatus", + 2342: "manage-exec", + 2343: "nati-logos", + 2344: "fcmsys", + 2345: "dbm", + 2346: "redstorm-join", + 2347: "redstorm-find", + 2348: "redstorm-info", + 2349: "redstorm-diag", + 2350: "psbserver", + 2351: "psrserver", + 2352: "pslserver", + 2353: "pspserver", + 2354: "psprserver", + 2355: "psdbserver", + 2356: "gxtelmd", + 2357: "unihub-server", + 2358: "futrix", + 2359: "flukeserver", + 2360: "nexstorindltd", + 2361: "tl1", + 2362: "digiman", + 2363: "mediacntrlnfsd", + 2364: "oi-2000", + 2365: "dbref", + 2366: "qip-login", + 2367: "service-ctrl", + 2368: "opentable", + 2370: "l3-hbmon", + 2372: "lanmessenger", + 2381: "compaq-https", + 2382: "ms-olap3", + 2383: "ms-olap4", + 2384: "sd-capacity", + 2385: "sd-data", + 2386: "virtualtape", + 2387: "vsamredirector", + 2388: "mynahautostart", + 2389: "ovsessionmgr", + 2390: "rsmtp", + 2391: "3com-net-mgmt", + 2392: "tacticalauth", + 2393: "ms-olap1", + 2394: "ms-olap2", + 2395: "lan900-remote", + 2396: "wusage", + 2397: "ncl", + 2398: "orbiter", + 2399: "fmpro-fdal", + 2400: "opequus-server", + 2401: "cvspserver", + 2402: "taskmaster2000", + 2403: "taskmaster2000", + 2404: "iec-104", + 2405: "trc-netpoll", + 2406: "jediserver", + 2407: "orion", + 2409: "sns-protocol", + 2410: "vrts-registry", + 2411: "netwave-ap-mgmt", + 2412: "cdn", + 2413: "orion-rmi-reg", + 2414: "beeyond", + 2415: "codima-rtp", + 2416: "rmtserver", + 2417: "composit-server", + 2418: "cas", + 2419: "attachmate-s2s", + 2420: "dslremote-mgmt", + 2421: "g-talk", + 2422: "crmsbits", + 2423: "rnrp", + 2424: "kofax-svr", + 2425: "fjitsuappmgr", + 2426: "vcmp", + 2427: "mgcp-gateway", + 2428: "ott", + 2429: "ft-role", + 2430: "venus", + 2431: "venus-se", + 2432: "codasrv", + 2433: "codasrv-se", + 2434: "pxc-epmap", + 2435: "optilogic", + 2436: "topx", + 2437: "unicontrol", + 2438: "msp", + 2439: "sybasedbsynch", + 2440: "spearway", + 2441: "pvsw-inet", + 2442: "netangel", + 2443: "powerclientcsf", + 2444: "btpp2sectrans", + 2445: "dtn1", + 2446: "bues-service", + 2447: "ovwdb", + 2448: "hpppssvr", + 2449: "ratl", + 2450: "netadmin", + 2451: "netchat", + 2452: "snifferclient", + 2453: "madge-ltd", + 2454: "indx-dds", + 2455: "wago-io-system", + 2456: "altav-remmgt", + 2457: "rapido-ip", + 2458: "griffin", + 2459: "community", + 2460: "ms-theater", + 2461: "qadmifoper", + 2462: "qadmifevent", + 2463: "lsi-raid-mgmt", + 2464: "direcpc-si", + 2465: "lbm", + 2466: "lbf", + 2467: "high-criteria", + 2468: "qip-msgd", + 2469: "mti-tcs-comm", + 2470: "taskman-port", + 2471: "seaodbc", + 2472: "c3", + 2473: "aker-cdp", + 2474: "vitalanalysis", + 2475: "ace-server", + 2476: "ace-svr-prop", + 2477: "ssm-cvs", + 2478: "ssm-cssps", + 2479: "ssm-els", + 2480: "powerexchange", + 2481: "giop", + 2482: "giop-ssl", + 2483: "ttc", + 2484: "ttc-ssl", + 2485: "netobjects1", + 2486: "netobjects2", + 2487: "pns", + 2488: "moy-corp", + 2489: "tsilb", + 2490: "qip-qdhcp", + 2491: "conclave-cpp", + 2492: "groove", + 2493: "talarian-mqs", + 2494: "bmc-ar", + 2495: "fast-rem-serv", + 2496: "dirgis", + 2497: "quaddb", + 2498: "odn-castraq", + 2499: "unicontrol", + 2500: "rtsserv", + 2501: "rtsclient", + 2502: "kentrox-prot", + 2503: "nms-dpnss", + 2504: "wlbs", + 2505: "ppcontrol", + 2506: "jbroker", + 2507: "spock", + 2508: "jdatastore", + 2509: "fjmpss", + 2510: "fjappmgrbulk", + 2511: "metastorm", + 2512: "citrixima", + 2513: "citrixadmin", + 2514: "facsys-ntp", + 2515: "facsys-router", + 2516: "maincontrol", + 2517: "call-sig-trans", + 2518: "willy", + 2519: "globmsgsvc", + 2520: "pvsw", + 2521: "adaptecmgr", + 2522: "windb", + 2523: "qke-llc-v3", + 2524: "optiwave-lm", + 2525: "ms-v-worlds", + 2526: "ema-sent-lm", + 2527: "iqserver", + 2528: "ncr-ccl", + 2529: "utsftp", + 2530: "vrcommerce", + 2531: "ito-e-gui", + 2532: "ovtopmd", + 2533: "snifferserver", + 2534: "combox-web-acc", + 2535: "madcap", + 2536: "btpp2audctr1", + 2537: "upgrade", + 2538: "vnwk-prapi", + 2539: "vsiadmin", + 2540: "lonworks", + 2541: "lonworks2", + 2542: "udrawgraph", + 2543: "reftek", + 2544: "novell-zen", + 2545: "sis-emt", + 2546: "vytalvaultbrtp", + 2547: "vytalvaultvsmp", + 2548: "vytalvaultpipe", + 2549: "ipass", + 2550: "ads", + 2551: "isg-uda-server", + 2552: "call-logging", + 2553: "efidiningport", + 2554: "vcnet-link-v10", + 2555: "compaq-wcp", + 2556: "nicetec-nmsvc", + 2557: "nicetec-mgmt", + 2558: "pclemultimedia", + 2559: "lstp", + 2560: "labrat", + 2561: "mosaixcc", + 2562: "delibo", + 2563: "cti-redwood", + 2564: "hp-3000-telnet", + 2565: "coord-svr", + 2566: "pcs-pcw", + 2567: "clp", + 2568: "spamtrap", + 2569: "sonuscallsig", + 2570: "hs-port", + 2571: "cecsvc", + 2572: "ibp", + 2573: "trustestablish", + 2574: "blockade-bpsp", + 2575: "hl7", + 2576: "tclprodebugger", + 2577: "scipticslsrvr", + 2578: "rvs-isdn-dcp", + 2579: "mpfoncl", + 2580: "tributary", + 2581: "argis-te", + 2582: "argis-ds", + 2583: "mon", + 2584: "cyaserv", + 2585: "netx-server", + 2586: "netx-agent", + 2587: "masc", + 2588: "privilege", + 2589: "quartus-tcl", + 2590: "idotdist", + 2591: "maytagshuffle", + 2592: "netrek", + 2593: "mns-mail", + 2594: "dts", + 2595: "worldfusion1", + 2596: "worldfusion2", + 2597: "homesteadglory", + 2598: "citriximaclient", + 2599: "snapd", + 2600: "hpstgmgr", + 2601: "discp-client", + 2602: "discp-server", + 2603: "servicemeter", + 2604: "nsc-ccs", + 2605: "nsc-posa", + 2606: "netmon", + 2607: "connection", + 2608: "wag-service", + 2609: "system-monitor", + 2610: "versa-tek", + 2611: "lionhead", + 2612: "qpasa-agent", + 2613: "smntubootstrap", + 2614: "neveroffline", + 2615: "firepower", + 2616: "appswitch-emp", + 2617: "cmadmin", + 2618: "priority-e-com", + 2619: "bruce", + 2620: "lpsrecommender", + 2621: "miles-apart", + 2622: "metricadbc", + 2623: "lmdp", + 2624: "aria", + 2625: "blwnkl-port", + 2626: "gbjd816", + 2627: "moshebeeri", + 2628: "dict", + 2629: "sitaraserver", + 2630: "sitaramgmt", + 2631: "sitaradir", + 2632: "irdg-post", + 2633: "interintelli", + 2634: "pk-electronics", + 2635: "backburner", + 2636: "solve", + 2637: "imdocsvc", + 2638: "sybaseanywhere", + 2639: "aminet", + 2640: "ami-control", + 2641: "hdl-srv", + 2642: "tragic", + 2643: "gte-samp", + 2644: "travsoft-ipx-t", + 2645: "novell-ipx-cmd", + 2646: "and-lm", + 2647: "syncserver", + 2648: "upsnotifyprot", + 2649: "vpsipport", + 2650: "eristwoguns", + 2651: "ebinsite", + 2652: "interpathpanel", + 2653: "sonus", + 2654: "corel-vncadmin", + 2655: "unglue", + 2656: "kana", + 2657: "sns-dispatcher", + 2658: "sns-admin", + 2659: "sns-query", + 2660: "gcmonitor", + 2661: "olhost", + 2662: "bintec-capi", + 2663: "bintec-tapi", + 2664: "patrol-mq-gm", + 2665: "patrol-mq-nm", + 2666: "extensis", + 2667: "alarm-clock-s", + 2668: "alarm-clock-c", + 2669: "toad", + 2670: "tve-announce", + 2671: "newlixreg", + 2672: "nhserver", + 2673: "firstcall42", + 2674: "ewnn", + 2675: "ttc-etap", + 2676: "simslink", + 2677: "gadgetgate1way", + 2678: "gadgetgate2way", + 2679: "syncserverssl", + 2680: "pxc-sapxom", + 2681: "mpnjsomb", + 2683: "ncdloadbalance", + 2684: "mpnjsosv", + 2685: "mpnjsocl", + 2686: "mpnjsomg", + 2687: "pq-lic-mgmt", + 2688: "md-cg-http", + 2689: "fastlynx", + 2690: "hp-nnm-data", + 2691: "itinternet", + 2692: "admins-lms", + 2694: "pwrsevent", + 2695: "vspread", + 2696: "unifyadmin", + 2697: "oce-snmp-trap", + 2698: "mck-ivpip", + 2699: "csoft-plusclnt", + 2700: "tqdata", + 2701: "sms-rcinfo", + 2702: "sms-xfer", + 2703: "sms-chat", + 2704: "sms-remctrl", + 2705: "sds-admin", + 2706: "ncdmirroring", + 2707: "emcsymapiport", + 2708: "banyan-net", + 2709: "supermon", + 2710: "sso-service", + 2711: "sso-control", + 2712: "aocp", + 2713: "raventbs", + 2714: "raventdm", + 2715: "hpstgmgr2", + 2716: "inova-ip-disco", + 2717: "pn-requester", + 2718: "pn-requester2", + 2719: "scan-change", + 2720: "wkars", + 2721: "smart-diagnose", + 2722: "proactivesrvr", + 2723: "watchdog-nt", + 2724: "qotps", + 2725: "msolap-ptp2", + 2726: "tams", + 2727: "mgcp-callagent", + 2728: "sqdr", + 2729: "tcim-control", + 2730: "nec-raidplus", + 2731: "fyre-messanger", + 2732: "g5m", + 2733: "signet-ctf", + 2734: "ccs-software", + 2735: "netiq-mc", + 2736: "radwiz-nms-srv", + 2737: "srp-feedback", + 2738: "ndl-tcp-ois-gw", + 2739: "tn-timing", + 2740: "alarm", + 2741: "tsb", + 2742: "tsb2", + 2743: "murx", + 2744: "honyaku", + 2745: "urbisnet", + 2746: "cpudpencap", + 2747: "fjippol-swrly", + 2748: "fjippol-polsvr", + 2749: "fjippol-cnsl", + 2750: "fjippol-port1", + 2751: "fjippol-port2", + 2752: "rsisysaccess", + 2753: "de-spot", + 2754: "apollo-cc", + 2755: "expresspay", + 2756: "simplement-tie", + 2757: "cnrp", + 2758: "apollo-status", + 2759: "apollo-gms", + 2760: "sabams", + 2761: "dicom-iscl", + 2762: "dicom-tls", + 2763: "desktop-dna", + 2764: "data-insurance", + 2765: "qip-audup", + 2766: "compaq-scp", + 2767: "uadtc", + 2768: "uacs", + 2769: "exce", + 2770: "veronica", + 2771: "vergencecm", + 2772: "auris", + 2773: "rbakcup1", + 2774: "rbakcup2", + 2775: "smpp", + 2776: "ridgeway1", + 2777: "ridgeway2", + 2778: "gwen-sonya", + 2779: "lbc-sync", + 2780: "lbc-control", + 2781: "whosells", + 2782: "everydayrc", + 2783: "aises", + 2784: "www-dev", + 2785: "aic-np", + 2786: "aic-oncrpc", + 2787: "piccolo", + 2788: "fryeserv", + 2789: "media-agent", + 2790: "plgproxy", + 2791: "mtport-regist", + 2792: "f5-globalsite", + 2793: "initlsmsad", + 2795: "livestats", + 2796: "ac-tech", + 2797: "esp-encap", + 2798: "tmesis-upshot", + 2799: "icon-discover", + 2800: "acc-raid", + 2801: "igcp", + 2802: "veritas-udp1", + 2803: "btprjctrl", + 2804: "dvr-esm", + 2805: "wta-wsp-s", + 2806: "cspuni", + 2807: "cspmulti", + 2808: "j-lan-p", + 2809: "corbaloc", + 2810: "netsteward", + 2811: "gsiftp", + 2812: "atmtcp", + 2813: "llm-pass", + 2814: "llm-csv", + 2815: "lbc-measure", + 2816: "lbc-watchdog", + 2817: "nmsigport", + 2818: "rmlnk", + 2819: "fc-faultnotify", + 2820: "univision", + 2821: "vrts-at-port", + 2822: "ka0wuc", + 2823: "cqg-netlan", + 2824: "cqg-netlan-1", + 2826: "slc-systemlog", + 2827: "slc-ctrlrloops", + 2828: "itm-lm", + 2829: "silkp1", + 2830: "silkp2", + 2831: "silkp3", + 2832: "silkp4", + 2833: "glishd", + 2834: "evtp", + 2835: "evtp-data", + 2836: "catalyst", + 2837: "repliweb", + 2838: "starbot", + 2839: "nmsigport", + 2840: "l3-exprt", + 2841: "l3-ranger", + 2842: "l3-hawk", + 2843: "pdnet", + 2844: "bpcp-poll", + 2845: "bpcp-trap", + 2846: "aimpp-hello", + 2847: "aimpp-port-req", + 2848: "amt-blc-port", + 2849: "fxp", + 2850: "metaconsole", + 2851: "webemshttp", + 2852: "bears-01", + 2853: "ispipes", + 2854: "infomover", + 2856: "cesdinv", + 2857: "simctlp", + 2858: "ecnp", + 2859: "activememory", + 2860: "dialpad-voice1", + 2861: "dialpad-voice2", + 2862: "ttg-protocol", + 2863: "sonardata", + 2864: "astromed-main", + 2865: "pit-vpn", + 2866: "iwlistener", + 2867: "esps-portal", + 2868: "npep-messaging", + 2869: "icslap", + 2870: "daishi", + 2871: "msi-selectplay", + 2872: "radix", + 2874: "dxmessagebase1", + 2875: "dxmessagebase2", + 2876: "sps-tunnel", + 2877: "bluelance", + 2878: "aap", + 2879: "ucentric-ds", + 2880: "synapse", + 2881: "ndsp", + 2882: "ndtp", + 2883: "ndnp", + 2884: "flashmsg", + 2885: "topflow", + 2886: "responselogic", + 2887: "aironetddp", + 2888: "spcsdlobby", + 2889: "rsom", + 2890: "cspclmulti", + 2891: "cinegrfx-elmd", + 2892: "snifferdata", + 2893: "vseconnector", + 2894: "abacus-remote", + 2895: "natuslink", + 2896: "ecovisiong6-1", + 2897: "citrix-rtmp", + 2898: "appliance-cfg", + 2899: "powergemplus", + 2900: "quicksuite", + 2901: "allstorcns", + 2902: "netaspi", + 2903: "suitcase", + 2904: "m2ua", + 2906: "caller9", + 2907: "webmethods-b2b", + 2908: "mao", + 2909: "funk-dialout", + 2910: "tdaccess", + 2911: "blockade", + 2912: "epicon", + 2913: "boosterware", + 2914: "gamelobby", + 2915: "tksocket", + 2916: "elvin-server", + 2917: "elvin-client", + 2918: "kastenchasepad", + 2919: "roboer", + 2920: "roboeda", + 2921: "cesdcdman", + 2922: "cesdcdtrn", + 2923: "wta-wsp-wtp-s", + 2924: "precise-vip", + 2926: "mobile-file-dl", + 2927: "unimobilectrl", + 2928: "redstone-cpss", + 2929: "amx-webadmin", + 2930: "amx-weblinx", + 2931: "circle-x", + 2932: "incp", + 2933: "4-tieropmgw", + 2934: "4-tieropmcli", + 2935: "qtp", + 2936: "otpatch", + 2937: "pnaconsult-lm", + 2938: "sm-pas-1", + 2939: "sm-pas-2", + 2940: "sm-pas-3", + 2941: "sm-pas-4", + 2942: "sm-pas-5", + 2943: "ttnrepository", + 2944: "megaco-h248", + 2945: "h248-binary", + 2946: "fjsvmpor", + 2947: "gpsd", + 2948: "wap-push", + 2949: "wap-pushsecure", + 2950: "esip", + 2951: "ottp", + 2952: "mpfwsas", + 2953: "ovalarmsrv", + 2954: "ovalarmsrv-cmd", + 2955: "csnotify", + 2956: "ovrimosdbman", + 2957: "jmact5", + 2958: "jmact6", + 2959: "rmopagt", + 2960: "dfoxserver", + 2961: "boldsoft-lm", + 2962: "iph-policy-cli", + 2963: "iph-policy-adm", + 2964: "bullant-srap", + 2965: "bullant-rap", + 2966: "idp-infotrieve", + 2967: "ssc-agent", + 2968: "enpp", + 2969: "essp", + 2970: "index-net", + 2971: "netclip", + 2972: "pmsm-webrctl", + 2973: "svnetworks", + 2974: "signal", + 2975: "fjmpcm", + 2976: "cns-srv-port", + 2977: "ttc-etap-ns", + 2978: "ttc-etap-ds", + 2979: "h263-video", + 2980: "wimd", + 2981: "mylxamport", + 2982: "iwb-whiteboard", + 2983: "netplan", + 2984: "hpidsadmin", + 2985: "hpidsagent", + 2986: "stonefalls", + 2987: "identify", + 2988: "hippad", + 2989: "zarkov", + 2990: "boscap", + 2991: "wkstn-mon", + 2992: "avenyo", + 2993: "veritas-vis1", + 2994: "veritas-vis2", + 2995: "idrs", + 2996: "vsixml", + 2997: "rebol", + 2998: "realsecure", + 2999: "remoteware-un", + 3000: "hbci", + 3002: "exlm-agent", + 3003: "cgms", + 3004: "csoftragent", + 3005: "geniuslm", + 3006: "ii-admin", + 3007: "lotusmtap", + 3008: "midnight-tech", + 3009: "pxc-ntfy", + 3010: "ping-pong", + 3011: "trusted-web", + 3012: "twsdss", + 3013: "gilatskysurfer", + 3014: "broker-service", + 3015: "nati-dstp", + 3016: "notify-srvr", + 3017: "event-listener", + 3018: "srvc-registry", + 3019: "resource-mgr", + 3020: "cifs", + 3021: "agriserver", + 3022: "csregagent", + 3023: "magicnotes", + 3024: "nds-sso", + 3025: "arepa-raft", + 3026: "agri-gateway", + 3027: "LiebDevMgmt-C", + 3028: "LiebDevMgmt-DM", + 3029: "LiebDevMgmt-A", + 3030: "arepa-cas", + 3031: "eppc", + 3032: "redwood-chat", + 3033: "pdb", + 3034: "osmosis-aeea", + 3035: "fjsv-gssagt", + 3036: "hagel-dump", + 3037: "hp-san-mgmt", + 3038: "santak-ups", + 3039: "cogitate", + 3040: "tomato-springs", + 3041: "di-traceware", + 3042: "journee", + 3043: "brp", + 3044: "epp", + 3045: "responsenet", + 3046: "di-ase", + 3047: "hlserver", + 3048: "pctrader", + 3049: "nsws", + 3050: "gds-db", + 3051: "galaxy-server", + 3052: "apc-3052", + 3053: "dsom-server", + 3054: "amt-cnf-prot", + 3055: "policyserver", + 3056: "cdl-server", + 3057: "goahead-fldup", + 3058: "videobeans", + 3059: "qsoft", + 3060: "interserver", + 3061: "cautcpd", + 3062: "ncacn-ip-tcp", + 3063: "ncadg-ip-udp", + 3064: "rprt", + 3065: "slinterbase", + 3066: "netattachsdmp", + 3067: "fjhpjp", + 3068: "ls3bcast", + 3069: "ls3", + 3070: "mgxswitch", + 3072: "csd-monitor", + 3073: "vcrp", + 3074: "xbox", + 3075: "orbix-locator", + 3076: "orbix-config", + 3077: "orbix-loc-ssl", + 3078: "orbix-cfg-ssl", + 3079: "lv-frontpanel", + 3080: "stm-pproc", + 3081: "tl1-lv", + 3082: "tl1-raw", + 3083: "tl1-telnet", + 3084: "itm-mccs", + 3085: "pcihreq", + 3086: "jdl-dbkitchen", + 3087: "asoki-sma", + 3088: "xdtp", + 3089: "ptk-alink", + 3090: "stss", + 3091: "1ci-smcs", + 3093: "rapidmq-center", + 3094: "rapidmq-reg", + 3095: "panasas", + 3096: "ndl-aps", + 3098: "umm-port", + 3099: "chmd", + 3100: "opcon-xps", + 3101: "hp-pxpib", + 3102: "slslavemon", + 3103: "autocuesmi", + 3104: "autocuetime", + 3105: "cardbox", + 3106: "cardbox-http", + 3107: "business", + 3108: "geolocate", + 3109: "personnel", + 3110: "sim-control", + 3111: "wsynch", + 3112: "ksysguard", + 3113: "cs-auth-svr", + 3114: "ccmad", + 3115: "mctet-master", + 3116: "mctet-gateway", + 3117: "mctet-jserv", + 3118: "pkagent", + 3119: "d2000kernel", + 3120: "d2000webserver", + 3122: "vtr-emulator", + 3123: "edix", + 3124: "beacon-port", + 3125: "a13-an", + 3127: "ctx-bridge", + 3128: "ndl-aas", + 3129: "netport-id", + 3130: "icpv2", + 3131: "netbookmark", + 3132: "ms-rule-engine", + 3133: "prism-deploy", + 3134: "ecp", + 3135: "peerbook-port", + 3136: "grubd", + 3137: "rtnt-1", + 3138: "rtnt-2", + 3139: "incognitorv", + 3140: "ariliamulti", + 3141: "vmodem", + 3142: "rdc-wh-eos", + 3143: "seaview", + 3144: "tarantella", + 3145: "csi-lfap", + 3146: "bears-02", + 3147: "rfio", + 3148: "nm-game-admin", + 3149: "nm-game-server", + 3150: "nm-asses-admin", + 3151: "nm-assessor", + 3152: "feitianrockey", + 3153: "s8-client-port", + 3154: "ccmrmi", + 3155: "jpegmpeg", + 3156: "indura", + 3157: "e3consultants", + 3158: "stvp", + 3159: "navegaweb-port", + 3160: "tip-app-server", + 3161: "doc1lm", + 3162: "sflm", + 3163: "res-sap", + 3164: "imprs", + 3165: "newgenpay", + 3166: "sossecollector", + 3167: "nowcontact", + 3168: "poweronnud", + 3169: "serverview-as", + 3170: "serverview-asn", + 3171: "serverview-gf", + 3172: "serverview-rm", + 3173: "serverview-icc", + 3174: "armi-server", + 3175: "t1-e1-over-ip", + 3176: "ars-master", + 3177: "phonex-port", + 3178: "radclientport", + 3179: "h2gf-w-2m", + 3180: "mc-brk-srv", + 3181: "bmcpatrolagent", + 3182: "bmcpatrolrnvu", + 3183: "cops-tls", + 3184: "apogeex-port", + 3185: "smpppd", + 3186: "iiw-port", + 3187: "odi-port", + 3188: "brcm-comm-port", + 3189: "pcle-infex", + 3190: "csvr-proxy", + 3191: "csvr-sslproxy", + 3192: "firemonrcc", + 3193: "spandataport", + 3194: "magbind", + 3195: "ncu-1", + 3196: "ncu-2", + 3197: "embrace-dp-s", + 3198: "embrace-dp-c", + 3199: "dmod-workspace", + 3200: "tick-port", + 3201: "cpq-tasksmart", + 3202: "intraintra", + 3203: "netwatcher-mon", + 3204: "netwatcher-db", + 3205: "isns", + 3206: "ironmail", + 3207: "vx-auth-port", + 3208: "pfu-prcallback", + 3209: "netwkpathengine", + 3210: "flamenco-proxy", + 3211: "avsecuremgmt", + 3212: "surveyinst", + 3213: "neon24x7", + 3214: "jmq-daemon-1", + 3215: "jmq-daemon-2", + 3216: "ferrari-foam", + 3217: "unite", + 3218: "smartpackets", + 3219: "wms-messenger", + 3220: "xnm-ssl", + 3221: "xnm-clear-text", + 3222: "glbp", + 3223: "digivote", + 3224: "aes-discovery", + 3225: "fcip-port", + 3226: "isi-irp", + 3227: "dwnmshttp", + 3228: "dwmsgserver", + 3229: "global-cd-port", + 3230: "sftdst-port", + 3231: "vidigo", + 3232: "mdtp", + 3233: "whisker", + 3234: "alchemy", + 3235: "mdap-port", + 3236: "apparenet-ts", + 3237: "apparenet-tps", + 3238: "apparenet-as", + 3239: "apparenet-ui", + 3240: "triomotion", + 3241: "sysorb", + 3242: "sdp-id-port", + 3243: "timelot", + 3244: "onesaf", + 3245: "vieo-fe", + 3246: "dvt-system", + 3247: "dvt-data", + 3248: "procos-lm", + 3249: "ssp", + 3250: "hicp", + 3251: "sysscanner", + 3252: "dhe", + 3253: "pda-data", + 3254: "pda-sys", + 3255: "semaphore", + 3256: "cpqrpm-agent", + 3257: "cpqrpm-server", + 3258: "ivecon-port", + 3259: "epncdp2", + 3260: "iscsi-target", + 3261: "winshadow", + 3262: "necp", + 3263: "ecolor-imager", + 3264: "ccmail", + 3265: "altav-tunnel", + 3266: "ns-cfg-server", + 3267: "ibm-dial-out", + 3268: "msft-gc", + 3269: "msft-gc-ssl", + 3270: "verismart", + 3271: "csoft-prev", + 3272: "user-manager", + 3273: "sxmp", + 3274: "ordinox-server", + 3275: "samd", + 3276: "maxim-asics", + 3277: "awg-proxy", + 3278: "lkcmserver", + 3279: "admind", + 3280: "vs-server", + 3281: "sysopt", + 3282: "datusorb", + 3283: "Apple Remote Desktop (Net Assistant)", + 3284: "4talk", + 3285: "plato", + 3286: "e-net", + 3287: "directvdata", + 3288: "cops", + 3289: "enpc", + 3290: "caps-lm", + 3291: "sah-lm", + 3292: "cart-o-rama", + 3293: "fg-fps", + 3294: "fg-gip", + 3295: "dyniplookup", + 3296: "rib-slm", + 3297: "cytel-lm", + 3298: "deskview", + 3299: "pdrncs", + 3302: "mcs-fastmail", + 3303: "opsession-clnt", + 3304: "opsession-srvr", + 3305: "odette-ftp", + 3306: "mysql", + 3307: "opsession-prxy", + 3308: "tns-server", + 3309: "tns-adv", + 3310: "dyna-access", + 3311: "mcns-tel-ret", + 3312: "appman-server", + 3313: "uorb", + 3314: "uohost", + 3315: "cdid", + 3316: "aicc-cmi", + 3317: "vsaiport", + 3318: "ssrip", + 3319: "sdt-lmd", + 3320: "officelink2000", + 3321: "vnsstr", + 3326: "sftu", + 3327: "bbars", + 3328: "egptlm", + 3329: "hp-device-disc", + 3330: "mcs-calypsoicf", + 3331: "mcs-messaging", + 3332: "mcs-mailsvr", + 3333: "dec-notes", + 3334: "directv-web", + 3335: "directv-soft", + 3336: "directv-tick", + 3337: "directv-catlg", + 3338: "anet-b", + 3339: "anet-l", + 3340: "anet-m", + 3341: "anet-h", + 3342: "webtie", + 3343: "ms-cluster-net", + 3344: "bnt-manager", + 3345: "influence", + 3346: "trnsprntproxy", + 3347: "phoenix-rpc", + 3348: "pangolin-laser", + 3349: "chevinservices", + 3350: "findviatv", + 3351: "btrieve", + 3352: "ssql", + 3353: "fatpipe", + 3354: "suitjd", + 3355: "ordinox-dbase", + 3356: "upnotifyps", + 3357: "adtech-test", + 3358: "mpsysrmsvr", + 3359: "wg-netforce", + 3360: "kv-server", + 3361: "kv-agent", + 3362: "dj-ilm", + 3363: "nati-vi-server", + 3364: "creativeserver", + 3365: "contentserver", + 3366: "creativepartnr", + 3372: "tip2", + 3373: "lavenir-lm", + 3374: "cluster-disc", + 3375: "vsnm-agent", + 3376: "cdbroker", + 3377: "cogsys-lm", + 3378: "wsicopy", + 3379: "socorfs", + 3380: "sns-channels", + 3381: "geneous", + 3382: "fujitsu-neat", + 3383: "esp-lm", + 3384: "hp-clic", + 3385: "qnxnetman", + 3386: "gprs-sig", + 3387: "backroomnet", + 3388: "cbserver", + 3389: "ms-wbt-server", + 3390: "dsc", + 3391: "savant", + 3392: "efi-lm", + 3393: "d2k-tapestry1", + 3394: "d2k-tapestry2", + 3395: "dyna-lm", + 3396: "printer-agent", + 3397: "cloanto-lm", + 3398: "mercantile", + 3399: "csms", + 3400: "csms2", + 3401: "filecast", + 3402: "fxaengine-net", + 3405: "nokia-ann-ch1", + 3406: "nokia-ann-ch2", + 3407: "ldap-admin", + 3408: "BESApi", + 3409: "networklens", + 3410: "networklenss", + 3411: "biolink-auth", + 3412: "xmlblaster", + 3413: "svnet", + 3414: "wip-port", + 3415: "bcinameservice", + 3416: "commandport", + 3417: "csvr", + 3418: "rnmap", + 3419: "softaudit", + 3420: "ifcp-port", + 3421: "bmap", + 3422: "rusb-sys-port", + 3423: "xtrm", + 3424: "xtrms", + 3425: "agps-port", + 3426: "arkivio", + 3427: "websphere-snmp", + 3428: "twcss", + 3429: "gcsp", + 3430: "ssdispatch", + 3431: "ndl-als", + 3432: "osdcp", + 3433: "opnet-smp", + 3434: "opencm", + 3435: "pacom", + 3436: "gc-config", + 3437: "autocueds", + 3438: "spiral-admin", + 3439: "hri-port", + 3440: "ans-console", + 3441: "connect-client", + 3442: "connect-server", + 3443: "ov-nnm-websrv", + 3444: "denali-server", + 3445: "monp", + 3446: "3comfaxrpc", + 3447: "directnet", + 3448: "dnc-port", + 3449: "hotu-chat", + 3450: "castorproxy", + 3451: "asam", + 3452: "sabp-signal", + 3453: "pscupd", + 3454: "mira", + 3455: "prsvp", + 3456: "vat", + 3457: "vat-control", + 3458: "d3winosfi", + 3459: "integral", + 3460: "edm-manager", + 3461: "edm-stager", + 3462: "edm-std-notify", + 3463: "edm-adm-notify", + 3464: "edm-mgr-sync", + 3465: "edm-mgr-cntrl", + 3466: "workflow", + 3467: "rcst", + 3468: "ttcmremotectrl", + 3469: "pluribus", + 3470: "jt400", + 3471: "jt400-ssl", + 3472: "jaugsremotec-1", + 3473: "jaugsremotec-2", + 3474: "ttntspauto", + 3475: "genisar-port", + 3476: "nppmp", + 3477: "ecomm", + 3478: "stun", + 3479: "twrpc", + 3480: "plethora", + 3481: "cleanerliverc", + 3482: "vulture", + 3483: "slim-devices", + 3484: "gbs-stp", + 3485: "celatalk", + 3486: "ifsf-hb-port", + 3487: "ltcudp", + 3488: "fs-rh-srv", + 3489: "dtp-dia", + 3490: "colubris", + 3491: "swr-port", + 3492: "tvdumtray-port", + 3493: "nut", + 3494: "ibm3494", + 3495: "seclayer-tcp", + 3496: "seclayer-tls", + 3497: "ipether232port", + 3498: "dashpas-port", + 3499: "sccip-media", + 3500: "rtmp-port", + 3501: "isoft-p2p", + 3502: "avinstalldisc", + 3503: "lsp-ping", + 3504: "ironstorm", + 3505: "ccmcomm", + 3506: "apc-3506", + 3507: "nesh-broker", + 3508: "interactionweb", + 3509: "vt-ssl", + 3510: "xss-port", + 3511: "webmail-2", + 3512: "aztec", + 3513: "arcpd", + 3514: "must-p2p", + 3515: "must-backplane", + 3516: "smartcard-port", + 3517: "802-11-iapp", + 3518: "artifact-msg", + 3519: "galileo", + 3520: "galileolog", + 3521: "mc3ss", + 3522: "nssocketport", + 3523: "odeumservlink", + 3524: "ecmport", + 3525: "eisport", + 3526: "starquiz-port", + 3527: "beserver-msg-q", + 3528: "jboss-iiop", + 3529: "jboss-iiop-ssl", + 3530: "gf", + 3531: "joltid", + 3532: "raven-rmp", + 3533: "raven-rdp", + 3534: "urld-port", + 3535: "ms-la", + 3536: "snac", + 3537: "ni-visa-remote", + 3538: "ibm-diradm", + 3539: "ibm-diradm-ssl", + 3540: "pnrp-port", + 3541: "voispeed-port", + 3542: "hacl-monitor", + 3543: "qftest-lookup", + 3544: "teredo", + 3545: "camac", + 3547: "symantec-sim", + 3548: "interworld", + 3549: "tellumat-nms", + 3550: "ssmpp", + 3551: "apcupsd", + 3552: "taserver", + 3553: "rbr-discovery", + 3554: "questnotify", + 3555: "razor", + 3556: "sky-transport", + 3557: "personalos-001", + 3558: "mcp-port", + 3559: "cctv-port", + 3560: "iniserve-port", + 3561: "bmc-onekey", + 3562: "sdbproxy", + 3563: "watcomdebug", + 3564: "esimport", + 3567: "dof-eps", + 3568: "dof-tunnel-sec", + 3569: "mbg-ctrl", + 3570: "mccwebsvr-port", + 3571: "megardsvr-port", + 3572: "megaregsvrport", + 3573: "tag-ups-1", + 3574: "dmaf-caster", + 3575: "ccm-port", + 3576: "cmc-port", + 3577: "config-port", + 3578: "data-port", + 3579: "ttat3lb", + 3580: "nati-svrloc", + 3581: "kfxaclicensing", + 3582: "press", + 3583: "canex-watch", + 3584: "u-dbap", + 3585: "emprise-lls", + 3586: "emprise-lsc", + 3587: "p2pgroup", + 3588: "sentinel", + 3589: "isomair", + 3590: "wv-csp-sms", + 3591: "gtrack-server", + 3592: "gtrack-ne", + 3593: "bpmd", + 3594: "mediaspace", + 3595: "shareapp", + 3596: "iw-mmogame", + 3597: "a14", + 3598: "a15", + 3599: "quasar-server", + 3600: "trap-daemon", + 3601: "visinet-gui", + 3602: "infiniswitchcl", + 3603: "int-rcv-cntrl", + 3604: "bmc-jmx-port", + 3605: "comcam-io", + 3606: "splitlock", + 3607: "precise-i3", + 3608: "trendchip-dcp", + 3609: "cpdi-pidas-cm", + 3610: "echonet", + 3611: "six-degrees", + 3612: "hp-dataprotect", + 3613: "alaris-disc", + 3614: "sigma-port", + 3615: "start-network", + 3616: "cd3o-protocol", + 3617: "sharp-server", + 3618: "aairnet-1", + 3619: "aairnet-2", + 3620: "ep-pcp", + 3621: "ep-nsp", + 3622: "ff-lr-port", + 3623: "haipe-discover", + 3624: "dist-upgrade", + 3625: "volley", + 3626: "bvcdaemon-port", + 3627: "jamserverport", + 3628: "ept-machine", + 3629: "escvpnet", + 3630: "cs-remote-db", + 3631: "cs-services", + 3632: "distcc", + 3633: "wacp", + 3634: "hlibmgr", + 3635: "sdo", + 3636: "servistaitsm", + 3637: "scservp", + 3638: "ehp-backup", + 3639: "xap-ha", + 3640: "netplay-port1", + 3641: "netplay-port2", + 3642: "juxml-port", + 3643: "audiojuggler", + 3644: "ssowatch", + 3645: "cyc", + 3646: "xss-srv-port", + 3647: "splitlock-gw", + 3648: "fjcp", + 3649: "nmmp", + 3650: "prismiq-plugin", + 3651: "xrpc-registry", + 3652: "vxcrnbuport", + 3653: "tsp", + 3654: "vaprtm", + 3655: "abatemgr", + 3656: "abatjss", + 3657: "immedianet-bcn", + 3658: "ps-ams", + 3659: "apple-sasl", + 3660: "can-nds-ssl", + 3661: "can-ferret-ssl", + 3662: "pserver", + 3663: "dtp", + 3664: "ups-engine", + 3665: "ent-engine", + 3666: "eserver-pap", + 3667: "infoexch", + 3668: "dell-rm-port", + 3669: "casanswmgmt", + 3670: "smile", + 3671: "efcp", + 3672: "lispworks-orb", + 3673: "mediavault-gui", + 3674: "wininstall-ipc", + 3675: "calltrax", + 3676: "va-pacbase", + 3677: "roverlog", + 3678: "ipr-dglt", + 3679: "Escale (Newton Dock)", + 3680: "npds-tracker", + 3681: "bts-x73", + 3682: "cas-mapi", + 3683: "bmc-ea", + 3684: "faxstfx-port", + 3685: "dsx-agent", + 3686: "tnmpv2", + 3687: "simple-push", + 3688: "simple-push-s", + 3689: "daap", + 3690: "svn", + 3691: "magaya-network", + 3692: "intelsync", + 3695: "bmc-data-coll", + 3696: "telnetcpcd", + 3697: "nw-license", + 3698: "sagectlpanel", + 3699: "kpn-icw", + 3700: "lrs-paging", + 3701: "netcelera", + 3702: "ws-discovery", + 3703: "adobeserver-3", + 3704: "adobeserver-4", + 3705: "adobeserver-5", + 3706: "rt-event", + 3707: "rt-event-s", + 3708: "sun-as-iiops", + 3709: "ca-idms", + 3710: "portgate-auth", + 3711: "edb-server2", + 3712: "sentinel-ent", + 3713: "tftps", + 3714: "delos-dms", + 3715: "anoto-rendezv", + 3716: "wv-csp-sms-cir", + 3717: "wv-csp-udp-cir", + 3718: "opus-services", + 3719: "itelserverport", + 3720: "ufastro-instr", + 3721: "xsync", + 3722: "xserveraid", + 3723: "sychrond", + 3724: "blizwow", + 3725: "na-er-tip", + 3726: "array-manager", + 3727: "e-mdu", + 3728: "e-woa", + 3729: "fksp-audit", + 3730: "client-ctrl", + 3731: "smap", + 3732: "m-wnn", + 3733: "multip-msg", + 3734: "synel-data", + 3735: "pwdis", + 3736: "rs-rmi", + 3738: "versatalk", + 3739: "launchbird-lm", + 3740: "heartbeat", + 3741: "wysdma", + 3742: "cst-port", + 3743: "ipcs-command", + 3744: "sasg", + 3745: "gw-call-port", + 3746: "linktest", + 3747: "linktest-s", + 3748: "webdata", + 3749: "cimtrak", + 3750: "cbos-ip-port", + 3751: "gprs-cube", + 3752: "vipremoteagent", + 3753: "nattyserver", + 3754: "timestenbroker", + 3755: "sas-remote-hlp", + 3756: "canon-capt", + 3757: "grf-port", + 3758: "apw-registry", + 3759: "exapt-lmgr", + 3760: "adtempusclient", + 3761: "gsakmp", + 3762: "gbs-smp", + 3763: "xo-wave", + 3764: "mni-prot-rout", + 3765: "rtraceroute", + 3767: "listmgr-port", + 3768: "rblcheckd", + 3769: "haipe-otnk", + 3770: "cindycollab", + 3771: "paging-port", + 3772: "ctp", + 3773: "ctdhercules", + 3774: "zicom", + 3775: "ispmmgr", + 3776: "dvcprov-port", + 3777: "jibe-eb", + 3778: "c-h-it-port", + 3779: "cognima", + 3780: "nnp", + 3781: "abcvoice-port", + 3782: "iso-tp0s", + 3783: "bim-pem", + 3784: "bfd-control", + 3785: "bfd-echo", + 3786: "upstriggervsw", + 3787: "fintrx", + 3788: "isrp-port", + 3789: "remotedeploy", + 3790: "quickbooksrds", + 3791: "tvnetworkvideo", + 3792: "sitewatch", + 3793: "dcsoftware", + 3794: "jaus", + 3795: "myblast", + 3796: "spw-dialer", + 3797: "idps", + 3798: "minilock", + 3799: "radius-dynauth", + 3800: "pwgpsi", + 3801: "ibm-mgr", + 3802: "vhd", + 3803: "soniqsync", + 3804: "iqnet-port", + 3805: "tcpdataserver", + 3806: "wsmlb", + 3807: "spugna", + 3808: "sun-as-iiops-ca", + 3809: "apocd", + 3810: "wlanauth", + 3811: "amp", + 3812: "neto-wol-server", + 3813: "rap-ip", + 3814: "neto-dcs", + 3815: "lansurveyorxml", + 3816: "sunlps-http", + 3817: "tapeware", + 3818: "crinis-hb", + 3819: "epl-slp", + 3820: "scp", + 3821: "pmcp", + 3822: "acp-discovery", + 3823: "acp-conduit", + 3824: "acp-policy", + 3825: "ffserver", + 3826: "warmux", + 3827: "netmpi", + 3828: "neteh", + 3829: "neteh-ext", + 3830: "cernsysmgmtagt", + 3831: "dvapps", + 3832: "xxnetserver", + 3833: "aipn-auth", + 3834: "spectardata", + 3835: "spectardb", + 3836: "markem-dcp", + 3837: "mkm-discovery", + 3838: "sos", + 3839: "amx-rms", + 3840: "flirtmitmir", + 3842: "nhci", + 3843: "quest-agent", + 3844: "rnm", + 3845: "v-one-spp", + 3846: "an-pcp", + 3847: "msfw-control", + 3848: "item", + 3849: "spw-dnspreload", + 3850: "qtms-bootstrap", + 3851: "spectraport", + 3852: "sse-app-config", + 3853: "sscan", + 3854: "stryker-com", + 3855: "opentrac", + 3856: "informer", + 3857: "trap-port", + 3858: "trap-port-mom", + 3859: "nav-port", + 3860: "sasp", + 3861: "winshadow-hd", + 3862: "giga-pocket", + 3863: "asap-udp", + 3865: "xpl", + 3866: "dzdaemon", + 3867: "dzoglserver", + 3869: "ovsam-mgmt", + 3870: "ovsam-d-agent", + 3871: "avocent-adsap", + 3872: "oem-agent", + 3873: "fagordnc", + 3874: "sixxsconfig", + 3875: "pnbscada", + 3876: "dl-agent", + 3877: "xmpcr-interface", + 3878: "fotogcad", + 3879: "appss-lm", + 3880: "igrs", + 3881: "idac", + 3882: "msdts1", + 3883: "vrpn", + 3884: "softrack-meter", + 3885: "topflow-ssl", + 3886: "nei-management", + 3887: "ciphire-data", + 3888: "ciphire-serv", + 3889: "dandv-tester", + 3890: "ndsconnect", + 3891: "rtc-pm-port", + 3892: "pcc-image-port", + 3893: "cgi-starapi", + 3894: "syam-agent", + 3895: "syam-smc", + 3896: "sdo-tls", + 3897: "sdo-ssh", + 3898: "senip", + 3899: "itv-control", + 3900: "udt-os", + 3901: "nimsh", + 3902: "nimaux", + 3903: "charsetmgr", + 3904: "omnilink-port", + 3905: "mupdate", + 3906: "topovista-data", + 3907: "imoguia-port", + 3908: "hppronetman", + 3909: "surfcontrolcpa", + 3910: "prnrequest", + 3911: "prnstatus", + 3912: "gbmt-stars", + 3913: "listcrt-port", + 3914: "listcrt-port-2", + 3915: "agcat", + 3916: "wysdmc", + 3917: "aftmux", + 3918: "pktcablemmcops", + 3919: "hyperip", + 3920: "exasoftport1", + 3921: "herodotus-net", + 3922: "sor-update", + 3923: "symb-sb-port", + 3924: "mpl-gprs-port", + 3925: "zmp", + 3926: "winport", + 3927: "natdataservice", + 3928: "netboot-pxe", + 3929: "smauth-port", + 3930: "syam-webserver", + 3931: "msr-plugin-port", + 3932: "dyn-site", + 3933: "plbserve-port", + 3934: "sunfm-port", + 3935: "sdp-portmapper", + 3936: "mailprox", + 3937: "dvbservdsc", + 3938: "dbcontrol-agent", + 3939: "aamp", + 3940: "xecp-node", + 3941: "homeportal-web", + 3942: "srdp", + 3943: "tig", + 3944: "sops", + 3945: "emcads", + 3946: "backupedge", + 3947: "ccp", + 3948: "apdap", + 3949: "drip", + 3950: "namemunge", + 3951: "pwgippfax", + 3952: "i3-sessionmgr", + 3953: "xmlink-connect", + 3954: "adrep", + 3955: "p2pcommunity", + 3956: "gvcp", + 3957: "mqe-broker", + 3958: "mqe-agent", + 3959: "treehopper", + 3960: "bess", + 3961: "proaxess", + 3962: "sbi-agent", + 3963: "thrp", + 3964: "sasggprs", + 3965: "ati-ip-to-ncpe", + 3966: "bflckmgr", + 3967: "ppsms", + 3968: "ianywhere-dbns", + 3969: "landmarks", + 3970: "lanrevagent", + 3971: "lanrevserver", + 3972: "iconp", + 3973: "progistics", + 3974: "citysearch", + 3975: "airshot", + 3976: "opswagent", + 3977: "opswmanager", + 3978: "secure-cfg-svr", + 3979: "smwan", + 3980: "acms", + 3981: "starfish", + 3982: "eis", + 3983: "eisp", + 3984: "mapper-nodemgr", + 3985: "mapper-mapethd", + 3986: "mapper-ws-ethd", + 3987: "centerline", + 3988: "dcs-config", + 3989: "bv-queryengine", + 3990: "bv-is", + 3991: "bv-smcsrv", + 3992: "bv-ds", + 3993: "bv-agent", + 3995: "iss-mgmt-ssl", + 3996: "abcsoftware", + 3997: "agentsease-db", + 3998: "dnx", + 3999: "nvcnet", + 4000: "terabase", + 4001: "newoak", + 4002: "pxc-spvr-ft", + 4003: "pxc-splr-ft", + 4004: "pxc-roid", + 4005: "pxc-pin", + 4006: "pxc-spvr", + 4007: "pxc-splr", + 4008: "netcheque", + 4009: "chimera-hwm", + 4010: "samsung-unidex", + 4011: "altserviceboot", + 4012: "pda-gate", + 4013: "acl-manager", + 4014: "taiclock", + 4015: "talarian-mcast1", + 4016: "talarian-mcast2", + 4017: "talarian-mcast3", + 4018: "talarian-mcast4", + 4019: "talarian-mcast5", + 4020: "trap", + 4021: "nexus-portal", + 4022: "dnox", + 4023: "esnm-zoning", + 4024: "tnp1-port", + 4025: "partimage", + 4026: "as-debug", + 4027: "bxp", + 4028: "dtserver-port", + 4029: "ip-qsig", + 4030: "jdmn-port", + 4031: "suucp", + 4032: "vrts-auth-port", + 4033: "sanavigator", + 4034: "ubxd", + 4035: "wap-push-http", + 4036: "wap-push-https", + 4037: "ravehd", + 4038: "fazzt-ptp", + 4039: "fazzt-admin", + 4040: "yo-main", + 4041: "houston", + 4042: "ldxp", + 4043: "nirp", + 4044: "ltp", + 4045: "npp", + 4046: "acp-proto", + 4047: "ctp-state", + 4049: "wafs", + 4050: "cisco-wafs", + 4051: "cppdp", + 4052: "interact", + 4053: "ccu-comm-1", + 4054: "ccu-comm-2", + 4055: "ccu-comm-3", + 4056: "lms", + 4057: "wfm", + 4058: "kingfisher", + 4059: "dlms-cosem", + 4060: "dsmeter-iatc", + 4061: "ice-location", + 4062: "ice-slocation", + 4063: "ice-router", + 4064: "ice-srouter", + 4065: "avanti-cdp", + 4066: "pmas", + 4067: "idp", + 4068: "ipfltbcst", + 4069: "minger", + 4070: "tripe", + 4071: "aibkup", + 4072: "zieto-sock", + 4073: "iRAPP", + 4074: "cequint-cityid", + 4075: "perimlan", + 4076: "seraph", + 4077: "ascomalarm", + 4079: "santools", + 4080: "lorica-in", + 4081: "lorica-in-sec", + 4082: "lorica-out", + 4083: "lorica-out-sec", + 4084: "fortisphere-vm", + 4086: "ftsync", + 4089: "opencore", + 4090: "omasgport", + 4091: "ewinstaller", + 4092: "ewdgs", + 4093: "pvxpluscs", + 4094: "sysrqd", + 4095: "xtgui", + 4096: "bre", + 4097: "patrolview", + 4098: "drmsfsd", + 4099: "dpcp", + 4100: "igo-incognito", + 4101: "brlp-0", + 4102: "brlp-1", + 4103: "brlp-2", + 4104: "brlp-3", + 4105: "shofar", + 4106: "synchronite", + 4107: "j-ac", + 4108: "accel", + 4109: "izm", + 4110: "g2tag", + 4111: "xgrid", + 4112: "apple-vpns-rp", + 4113: "aipn-reg", + 4114: "jomamqmonitor", + 4115: "cds", + 4116: "smartcard-tls", + 4117: "hillrserv", + 4118: "netscript", + 4119: "assuria-slm", + 4121: "e-builder", + 4122: "fprams", + 4123: "z-wave", + 4124: "tigv2", + 4125: "opsview-envoy", + 4126: "ddrepl", + 4127: "unikeypro", + 4128: "nufw", + 4129: "nuauth", + 4130: "fronet", + 4131: "stars", + 4132: "nuts-dem", + 4133: "nuts-bootp", + 4134: "nifty-hmi", + 4135: "cl-db-attach", + 4136: "cl-db-request", + 4137: "cl-db-remote", + 4138: "nettest", + 4139: "thrtx", + 4140: "cedros-fds", + 4141: "oirtgsvc", + 4142: "oidocsvc", + 4143: "oidsr", + 4145: "vvr-control", + 4146: "tgcconnect", + 4147: "vrxpservman", + 4148: "hhb-handheld", + 4149: "agslb", + 4150: "PowerAlert-nsa", + 4151: "menandmice-noh", + 4152: "idig-mux", + 4153: "mbl-battd", + 4154: "atlinks", + 4155: "bzr", + 4156: "stat-results", + 4157: "stat-scanner", + 4158: "stat-cc", + 4159: "nss", + 4160: "jini-discovery", + 4161: "omscontact", + 4162: "omstopology", + 4163: "silverpeakpeer", + 4164: "silverpeakcomm", + 4165: "altcp", + 4166: "joost", + 4167: "ddgn", + 4168: "pslicser", + 4169: "iadt-disc", + 4172: "pcoip", + 4173: "mma-discovery", + 4174: "sm-disc", + 4177: "wello", + 4178: "storman", + 4179: "MaxumSP", + 4180: "httpx", + 4181: "macbak", + 4182: "pcptcpservice", + 4183: "cyborgnet", + 4184: "universe-suite", + 4185: "wcpp", + 4188: "vatata", + 4191: "dsmipv6", + 4192: "azeti-bd", + 4197: "hctl", + 4199: "eims-admin", + 4300: "corelccam", + 4301: "d-data", + 4302: "d-data-control", + 4303: "srcp", + 4304: "owserver", + 4305: "batman", + 4306: "pinghgl", + 4307: "trueconf", + 4308: "compx-lockview", + 4309: "dserver", + 4310: "mirrtex", + 4320: "fdt-rcatp", + 4321: "rwhois", + 4322: "trim-event", + 4323: "trim-ice", + 4325: "geognosisman", + 4326: "geognosis", + 4327: "jaxer-web", + 4328: "jaxer-manager", + 4333: "ahsp", + 4340: "gaia", + 4341: "lisp-data", + 4342: "lisp-control", + 4343: "unicall", + 4344: "vinainstall", + 4345: "m4-network-as", + 4346: "elanlm", + 4347: "lansurveyor", + 4348: "itose", + 4349: "fsportmap", + 4350: "net-device", + 4351: "plcy-net-svcs", + 4352: "pjlink", + 4353: "f5-iquery", + 4354: "qsnet-trans", + 4355: "qsnet-workst", + 4356: "qsnet-assist", + 4357: "qsnet-cond", + 4358: "qsnet-nucl", + 4359: "omabcastltkm", + 4361: "nacnl", + 4362: "afore-vdp-disc", + 4366: "shadowstream", + 4368: "wxbrief", + 4369: "epmd", + 4370: "elpro-tunnel", + 4371: "l2c-disc", + 4372: "l2c-data", + 4373: "remctl", + 4375: "tolteces", + 4376: "bip", + 4377: "cp-spxsvr", + 4378: "cp-spxdpy", + 4379: "ctdb", + 4389: "xandros-cms", + 4390: "wiegand", + 4394: "apwi-disc", + 4395: "omnivisionesx", + 4400: "ds-srv", + 4401: "ds-srvr", + 4402: "ds-clnt", + 4403: "ds-user", + 4404: "ds-admin", + 4405: "ds-mail", + 4406: "ds-slp", + 4412: "smallchat", + 4413: "avi-nms-disc", + 4416: "pjj-player-disc", + 4418: "axysbridge", + 4420: "nvm-express", + 4425: "netrockey6", + 4426: "beacon-port-2", + 4430: "rsqlserver", + 4432: "l-acoustics", + 4441: "netblox", + 4442: "saris", + 4443: "pharos", + 4444: "krb524", + 4445: "upnotifyp", + 4446: "n1-fwp", + 4447: "n1-rmgmt", + 4448: "asc-slmd", + 4449: "privatewire", + 4450: "camp", + 4451: "ctisystemmsg", + 4452: "ctiprogramload", + 4453: "nssalertmgr", + 4454: "nssagentmgr", + 4455: "prchat-user", + 4456: "prchat-server", + 4457: "prRegister", + 4458: "mcp", + 4484: "hpssmgmt", + 4486: "icms", + 4488: "awacs-ice", + 4500: "ipsec-nat-t", + 4534: "armagetronad", + 4535: "ehs", + 4536: "ehs-ssl", + 4537: "wssauthsvc", + 4538: "swx-gate", + 4545: "worldscores", + 4546: "sf-lm", + 4547: "lanner-lm", + 4548: "synchromesh", + 4549: "aegate", + 4550: "gds-adppiw-db", + 4551: "ieee-mih", + 4552: "menandmice-mon", + 4554: "msfrs", + 4555: "rsip", + 4556: "dtn-bundle", + 4557: "mtcevrunqss", + 4558: "mtcevrunqman", + 4559: "hylafax", + 4566: "kwtc", + 4567: "tram", + 4568: "bmc-reporting", + 4569: "iax", + 4591: "l3t-at-an", + 4592: "hrpd-ith-at-an", + 4593: "ipt-anri-anri", + 4594: "ias-session", + 4595: "ias-paging", + 4596: "ias-neighbor", + 4597: "a21-an-1xbs", + 4598: "a16-an-an", + 4599: "a17-an-an", + 4600: "piranha1", + 4601: "piranha2", + 4621: "ventoso", + 4658: "playsta2-app", + 4659: "playsta2-lob", + 4660: "smaclmgr", + 4661: "kar2ouche", + 4662: "oms", + 4663: "noteit", + 4664: "ems", + 4665: "contclientms", + 4666: "eportcomm", + 4667: "mmacomm", + 4668: "mmaeds", + 4669: "eportcommdata", + 4670: "light", + 4671: "acter", + 4672: "rfa", + 4673: "cxws", + 4674: "appiq-mgmt", + 4675: "dhct-status", + 4676: "dhct-alerts", + 4677: "bcs", + 4678: "traversal", + 4679: "mgesupervision", + 4680: "mgemanagement", + 4681: "parliant", + 4682: "finisar", + 4683: "spike", + 4684: "rfid-rp1", + 4685: "autopac", + 4686: "msp-os", + 4687: "nst", + 4688: "mobile-p2p", + 4689: "altovacentral", + 4690: "prelude", + 4691: "mtn", + 4692: "conspiracy", + 4700: "netxms-agent", + 4701: "netxms-mgmt", + 4702: "netxms-sync", + 4711: "trinity-dist", + 4725: "truckstar", + 4726: "a26-fap-fgw", + 4727: "fcis-disc", + 4728: "capmux", + 4729: "gsmtap", + 4730: "gearman", + 4732: "ohmtrigger", + 4737: "ipdr-sp", + 4738: "solera-lpn", + 4739: "ipfix", + 4740: "ipfixs", + 4741: "lumimgrd", + 4742: "sicct-sdp", + 4743: "openhpid", + 4744: "ifsp", + 4745: "fmp", + 4746: "intelliadm-disc", + 4747: "buschtrommel", + 4749: "profilemac", + 4750: "ssad", + 4751: "spocp", + 4752: "snap", + 4753: "simon-disc", + 4754: "gre-in-udp", + 4755: "gre-udp-dtls", + 4784: "bfd-multi-ctl", + 4785: "cncp", + 4789: "vxlan", + 4790: "vxlan-gpe", + 4791: "roce", + 4800: "iims", + 4801: "iwec", + 4802: "ilss", + 4803: "notateit-disc", + 4804: "aja-ntv4-disc", + 4827: "htcp", + 4837: "varadero-0", + 4838: "varadero-1", + 4839: "varadero-2", + 4840: "opcua-udp", + 4841: "quosa", + 4842: "gw-asv", + 4843: "opcua-tls", + 4844: "gw-log", + 4845: "wcr-remlib", + 4846: "contamac-icm", + 4847: "wfc", + 4848: "appserv-http", + 4849: "appserv-https", + 4850: "sun-as-nodeagt", + 4851: "derby-repli", + 4867: "unify-debug", + 4868: "phrelay", + 4869: "phrelaydbg", + 4870: "cc-tracking", + 4871: "wired", + 4876: "tritium-can", + 4877: "lmcs", + 4878: "inst-discovery", + 4881: "socp-t", + 4882: "socp-c", + 4884: "hivestor", + 4885: "abbs", + 4894: "lyskom", + 4899: "radmin-port", + 4900: "hfcs", + 4914: "bones", + 4936: "an-signaling", + 4937: "atsc-mh-ssc", + 4940: "eq-office-4940", + 4941: "eq-office-4941", + 4942: "eq-office-4942", + 4949: "munin", + 4950: "sybasesrvmon", + 4951: "pwgwims", + 4952: "sagxtsds", + 4969: "ccss-qmm", + 4970: "ccss-qsm", + 4980: "ctxs-vpp", + 4986: "mrip", + 4987: "smar-se-port1", + 4988: "smar-se-port2", + 4989: "parallel", + 4990: "busycal", + 4991: "vrt", + 4999: "hfcs-manager", + 5000: "commplex-main", + 5001: "commplex-link", + 5002: "rfe", + 5003: "fmpro-internal", + 5004: "avt-profile-1", + 5005: "avt-profile-2", + 5006: "wsm-server", + 5007: "wsm-server-ssl", + 5008: "synapsis-edge", + 5009: "winfs", + 5010: "telelpathstart", + 5011: "telelpathattack", + 5012: "nsp", + 5013: "fmpro-v6", + 5014: "onpsocket", + 5020: "zenginkyo-1", + 5021: "zenginkyo-2", + 5022: "mice", + 5023: "htuilsrv", + 5024: "scpi-telnet", + 5025: "scpi-raw", + 5026: "strexec-d", + 5027: "strexec-s", + 5029: "infobright", + 5030: "surfpass", + 5031: "dmp", + 5042: "asnaacceler8db", + 5043: "swxadmin", + 5044: "lxi-evntsvc", + 5046: "vpm-udp", + 5047: "iscape", + 5049: "ivocalize", + 5050: "mmcc", + 5051: "ita-agent", + 5052: "ita-manager", + 5053: "rlm-disc", + 5055: "unot", + 5056: "intecom-ps1", + 5057: "intecom-ps2", + 5058: "locus-disc", + 5059: "sds", + 5060: "sip", + 5061: "sips", + 5062: "na-localise", + 5064: "ca-1", + 5065: "ca-2", + 5066: "stanag-5066", + 5067: "authentx", + 5069: "i-net-2000-npr", + 5070: "vtsas", + 5071: "powerschool", + 5072: "ayiya", + 5073: "tag-pm", + 5074: "alesquery", + 5078: "pixelpusher", + 5079: "cp-spxrpts", + 5080: "onscreen", + 5081: "sdl-ets", + 5082: "qcp", + 5083: "qfp", + 5084: "llrp", + 5085: "encrypted-llrp", + 5092: "magpie", + 5093: "sentinel-lm", + 5094: "hart-ip", + 5099: "sentlm-srv2srv", + 5100: "socalia", + 5101: "talarian-udp", + 5102: "oms-nonsecure", + 5104: "tinymessage", + 5105: "hughes-ap", + 5111: "taep-as-svc", + 5112: "pm-cmdsvr", + 5116: "emb-proj-cmd", + 5120: "barracuda-bbs", + 5133: "nbt-pc", + 5136: "minotaur-sa", + 5137: "ctsd", + 5145: "rmonitor-secure", + 5150: "atmp", + 5151: "esri-sde", + 5152: "sde-discovery", + 5154: "bzflag", + 5155: "asctrl-agent", + 5164: "vpa-disc", + 5165: "ife-icorp", + 5166: "winpcs", + 5167: "scte104", + 5168: "scte30", + 5190: "aol", + 5191: "aol-1", + 5192: "aol-2", + 5193: "aol-3", + 5200: "targus-getdata", + 5201: "targus-getdata1", + 5202: "targus-getdata2", + 5203: "targus-getdata3", + 5223: "hpvirtgrp", + 5224: "hpvirtctrl", + 5225: "hp-server", + 5226: "hp-status", + 5227: "perfd", + 5234: "eenet", + 5235: "galaxy-network", + 5236: "padl2sim", + 5237: "mnet-discovery", + 5245: "downtools-disc", + 5246: "capwap-control", + 5247: "capwap-data", + 5248: "caacws", + 5249: "caaclang2", + 5250: "soagateway", + 5251: "caevms", + 5252: "movaz-ssc", + 5264: "3com-njack-1", + 5265: "3com-njack-2", + 5270: "cartographerxmp", + 5271: "cuelink-disc", + 5272: "pk", + 5282: "transmit-port", + 5298: "presence", + 5299: "nlg-data", + 5300: "hacl-hb", + 5301: "hacl-gs", + 5302: "hacl-cfg", + 5303: "hacl-probe", + 5304: "hacl-local", + 5305: "hacl-test", + 5306: "sun-mc-grp", + 5307: "sco-aip", + 5308: "cfengine", + 5309: "jprinter", + 5310: "outlaws", + 5312: "permabit-cs", + 5313: "rrdp", + 5314: "opalis-rbt-ipc", + 5315: "hacl-poll", + 5343: "kfserver", + 5344: "xkotodrcp", + 5349: "stuns", + 5350: "pcp-multicast", + 5351: "pcp", + 5352: "dns-llq", + 5353: "mdns", + 5354: "mdnsresponder", + 5355: "llmnr", + 5356: "ms-smlbiz", + 5357: "wsdapi", + 5358: "wsdapi-s", + 5359: "ms-alerter", + 5360: "ms-sideshow", + 5361: "ms-s-sideshow", + 5362: "serverwsd2", + 5363: "net-projection", + 5364: "kdnet", + 5397: "stresstester", + 5398: "elektron-admin", + 5399: "securitychase", + 5400: "excerpt", + 5401: "excerpts", + 5402: "mftp", + 5403: "hpoms-ci-lstn", + 5404: "hpoms-dps-lstn", + 5405: "netsupport", + 5406: "systemics-sox", + 5407: "foresyte-clear", + 5408: "foresyte-sec", + 5409: "salient-dtasrv", + 5410: "salient-usrmgr", + 5411: "actnet", + 5412: "continuus", + 5413: "wwiotalk", + 5414: "statusd", + 5415: "ns-server", + 5416: "sns-gateway", + 5417: "sns-agent", + 5418: "mcntp", + 5419: "dj-ice", + 5420: "cylink-c", + 5421: "netsupport2", + 5422: "salient-mux", + 5423: "virtualuser", + 5424: "beyond-remote", + 5425: "br-channel", + 5426: "devbasic", + 5427: "sco-peer-tta", + 5428: "telaconsole", + 5429: "base", + 5430: "radec-corp", + 5431: "park-agent", + 5432: "postgresql", + 5433: "pyrrho", + 5434: "sgi-arrayd", + 5435: "sceanics", + 5436: "pmip6-cntl", + 5437: "pmip6-data", + 5443: "spss", + 5450: "tiepie-disc", + 5453: "surebox", + 5454: "apc-5454", + 5455: "apc-5455", + 5456: "apc-5456", + 5461: "silkmeter", + 5462: "ttl-publisher", + 5463: "ttlpriceproxy", + 5464: "quailnet", + 5465: "netops-broker", + 5474: "apsolab-rpc", + 5500: "fcp-addr-srvr1", + 5501: "fcp-addr-srvr2", + 5502: "fcp-srvr-inst1", + 5503: "fcp-srvr-inst2", + 5504: "fcp-cics-gw1", + 5505: "checkoutdb", + 5506: "amc", + 5553: "sgi-eventmond", + 5554: "sgi-esphttp", + 5555: "personal-agent", + 5556: "freeciv", + 5567: "dof-dps-mc-sec", + 5568: "sdt", + 5569: "rdmnet-device", + 5573: "sdmmp", + 5580: "tmosms0", + 5581: "tmosms1", + 5582: "fac-restore", + 5583: "tmo-icon-sync", + 5584: "bis-web", + 5585: "bis-sync", + 5597: "ininmessaging", + 5598: "mctfeed", + 5599: "esinstall", + 5600: "esmmanager", + 5601: "esmagent", + 5602: "a1-msc", + 5603: "a1-bs", + 5604: "a3-sdunode", + 5605: "a4-sdunode", + 5627: "ninaf", + 5628: "htrust", + 5629: "symantec-sfdb", + 5630: "precise-comm", + 5631: "pcanywheredata", + 5632: "pcanywherestat", + 5633: "beorl", + 5634: "xprtld", + 5670: "zre-disc", + 5671: "amqps", + 5672: "amqp", + 5673: "jms", + 5674: "hyperscsi-port", + 5675: "v5ua", + 5676: "raadmin", + 5677: "questdb2-lnchr", + 5678: "rrac", + 5679: "dccm", + 5680: "auriga-router", + 5681: "ncxcp", + 5682: "brightcore", + 5683: "coap", + 5684: "coaps", + 5687: "gog-multiplayer", + 5688: "ggz", + 5689: "qmvideo", + 5713: "proshareaudio", + 5714: "prosharevideo", + 5715: "prosharedata", + 5716: "prosharerequest", + 5717: "prosharenotify", + 5718: "dpm", + 5719: "dpm-agent", + 5720: "ms-licensing", + 5721: "dtpt", + 5722: "msdfsr", + 5723: "omhs", + 5724: "omsdk", + 5728: "io-dist-group", + 5729: "openmail", + 5730: "unieng", + 5741: "ida-discover1", + 5742: "ida-discover2", + 5743: "watchdoc-pod", + 5744: "watchdoc", + 5745: "fcopy-server", + 5746: "fcopys-server", + 5747: "tunatic", + 5748: "tunalyzer", + 5750: "rscd", + 5755: "openmailg", + 5757: "x500ms", + 5766: "openmailns", + 5767: "s-openmail", + 5768: "openmailpxy", + 5769: "spramsca", + 5770: "spramsd", + 5771: "netagent", + 5777: "dali-port", + 5781: "3par-evts", + 5782: "3par-mgmt", + 5783: "3par-mgmt-ssl", + 5784: "ibar", + 5785: "3par-rcopy", + 5786: "cisco-redu", + 5787: "waascluster", + 5793: "xtreamx", + 5794: "spdp", + 5813: "icmpd", + 5814: "spt-automation", + 5859: "wherehoo", + 5863: "ppsuitemsg", + 5900: "rfb", + 5910: "cm", + 5911: "cpdlc", + 5912: "fis", + 5913: "ads-c", + 5963: "indy", + 5968: "mppolicy-v5", + 5969: "mppolicy-mgr", + 5984: "couchdb", + 5985: "wsman", + 5986: "wsmans", + 5987: "wbem-rmi", + 5988: "wbem-http", + 5989: "wbem-https", + 5990: "wbem-exp-https", + 5991: "nuxsl", + 5992: "consul-insight", + 5999: "cvsup", + 6064: "ndl-ahp-svc", + 6065: "winpharaoh", + 6066: "ewctsp", + 6069: "trip", + 6070: "messageasap", + 6071: "ssdtp", + 6072: "diagnose-proc", + 6073: "directplay8", + 6074: "max", + 6080: "gue", + 6081: "geneve", + 6082: "p25cai", + 6083: "miami-bcast", + 6085: "konspire2b", + 6086: "pdtp", + 6087: "ldss", + 6088: "doglms-notify", + 6100: "synchronet-db", + 6101: "synchronet-rtc", + 6102: "synchronet-upd", + 6103: "rets", + 6104: "dbdb", + 6105: "primaserver", + 6106: "mpsserver", + 6107: "etc-control", + 6108: "sercomm-scadmin", + 6109: "globecast-id", + 6110: "softcm", + 6111: "spc", + 6112: "dtspcd", + 6118: "tipc", + 6122: "bex-webadmin", + 6123: "backup-express", + 6124: "pnbs", + 6133: "nbt-wol", + 6140: "pulsonixnls", + 6141: "meta-corp", + 6142: "aspentec-lm", + 6143: "watershed-lm", + 6144: "statsci1-lm", + 6145: "statsci2-lm", + 6146: "lonewolf-lm", + 6147: "montage-lm", + 6148: "ricardo-lm", + 6149: "tal-pod", + 6160: "ecmp-data", + 6161: "patrol-ism", + 6162: "patrol-coll", + 6163: "pscribe", + 6200: "lm-x", + 6201: "thermo-calc", + 6209: "qmtps", + 6222: "radmind", + 6241: "jeol-nsddp-1", + 6242: "jeol-nsddp-2", + 6243: "jeol-nsddp-3", + 6244: "jeol-nsddp-4", + 6251: "tl1-raw-ssl", + 6252: "tl1-ssh", + 6253: "crip", + 6268: "grid", + 6269: "grid-alt", + 6300: "bmc-grx", + 6301: "bmc-ctd-ldap", + 6306: "ufmp", + 6315: "scup-disc", + 6316: "abb-escp", + 6317: "nav-data", + 6320: "repsvc", + 6321: "emp-server1", + 6322: "emp-server2", + 6324: "hrd-ns-disc", + 6343: "sflow", + 6346: "gnutella-svc", + 6347: "gnutella-rtr", + 6350: "adap", + 6355: "pmcs", + 6360: "metaedit-mu", + 6363: "ndn", + 6370: "metaedit-se", + 6382: "metatude-mds", + 6389: "clariion-evr01", + 6390: "metaedit-ws", + 6417: "faxcomservice", + 6419: "svdrp-disc", + 6420: "nim-vdrshell", + 6421: "nim-wan", + 6443: "sun-sr-https", + 6444: "sge-qmaster", + 6445: "sge-execd", + 6446: "mysql-proxy", + 6455: "skip-cert-recv", + 6456: "skip-cert-send", + 6464: "ieee11073-20701", + 6471: "lvision-lm", + 6480: "sun-sr-http", + 6481: "servicetags", + 6482: "ldoms-mgmt", + 6483: "SunVTS-RMI", + 6484: "sun-sr-jms", + 6485: "sun-sr-iiop", + 6486: "sun-sr-iiops", + 6487: "sun-sr-iiop-aut", + 6488: "sun-sr-jmx", + 6489: "sun-sr-admin", + 6500: "boks", + 6501: "boks-servc", + 6502: "boks-servm", + 6503: "boks-clntd", + 6505: "badm-priv", + 6506: "badm-pub", + 6507: "bdir-priv", + 6508: "bdir-pub", + 6509: "mgcs-mfp-port", + 6510: "mcer-port", + 6511: "dccp-udp", + 6514: "syslog-tls", + 6515: "elipse-rec", + 6543: "lds-distrib", + 6544: "lds-dump", + 6547: "apc-6547", + 6548: "apc-6548", + 6549: "apc-6549", + 6550: "fg-sysupdate", + 6551: "sum", + 6558: "xdsxdm", + 6566: "sane-port", + 6568: "rp-reputation", + 6579: "affiliate", + 6580: "parsec-master", + 6581: "parsec-peer", + 6582: "parsec-game", + 6583: "joaJewelSuite", + 6619: "odette-ftps", + 6620: "kftp-data", + 6621: "kftp", + 6622: "mcftp", + 6623: "ktelnet", + 6626: "wago-service", + 6627: "nexgen", + 6628: "afesc-mc", + 6629: "nexgen-aux", + 6633: "cisco-vpath-tun", + 6634: "mpls-pm", + 6635: "mpls-udp", + 6636: "mpls-udp-dtls", + 6653: "openflow", + 6657: "palcom-disc", + 6670: "vocaltec-gold", + 6671: "p4p-portal", + 6672: "vision-server", + 6673: "vision-elmd", + 6678: "vfbp-disc", + 6679: "osaut", + 6689: "tsa", + 6696: "babel", + 6701: "kti-icad-srvr", + 6702: "e-design-net", + 6703: "e-design-web", + 6714: "ibprotocol", + 6715: "fibotrader-com", + 6767: "bmc-perf-agent", + 6768: "bmc-perf-mgrd", + 6769: "adi-gxp-srvprt", + 6770: "plysrv-http", + 6771: "plysrv-https", + 6784: "bfd-lag", + 6785: "dgpf-exchg", + 6786: "smc-jmx", + 6787: "smc-admin", + 6788: "smc-http", + 6790: "hnmp", + 6791: "hnm", + 6801: "acnet", + 6831: "ambit-lm", + 6841: "netmo-default", + 6842: "netmo-http", + 6850: "iccrushmore", + 6868: "acctopus-st", + 6888: "muse", + 6935: "ethoscan", + 6936: "xsmsvc", + 6946: "bioserver", + 6951: "otlp", + 6961: "jmact3", + 6962: "jmevt2", + 6963: "swismgr1", + 6964: "swismgr2", + 6965: "swistrap", + 6966: "swispol", + 6969: "acmsoda", + 6997: "MobilitySrv", + 6998: "iatp-highpri", + 6999: "iatp-normalpri", + 7000: "afs3-fileserver", + 7001: "afs3-callback", + 7002: "afs3-prserver", + 7003: "afs3-vlserver", + 7004: "afs3-kaserver", + 7005: "afs3-volser", + 7006: "afs3-errors", + 7007: "afs3-bos", + 7008: "afs3-update", + 7009: "afs3-rmtsys", + 7010: "ups-onlinet", + 7011: "talon-disc", + 7012: "talon-engine", + 7013: "microtalon-dis", + 7014: "microtalon-com", + 7015: "talon-webserver", + 7016: "spg", + 7017: "grasp", + 7019: "doceri-view", + 7020: "dpserve", + 7021: "dpserveadmin", + 7022: "ctdp", + 7023: "ct2nmcs", + 7024: "vmsvc", + 7025: "vmsvc-2", + 7030: "op-probe", + 7040: "quest-disc", + 7070: "arcp", + 7071: "iwg1", + 7080: "empowerid", + 7088: "zixi-transport", + 7095: "jdp-disc", + 7099: "lazy-ptop", + 7100: "font-service", + 7101: "elcn", + 7107: "aes-x170", + 7121: "virprot-lm", + 7128: "scenidm", + 7129: "scenccs", + 7161: "cabsm-comm", + 7162: "caistoragemgr", + 7163: "cacsambroker", + 7164: "fsr", + 7165: "doc-server", + 7166: "aruba-server", + 7169: "ccag-pib", + 7170: "nsrp", + 7171: "drm-production", + 7174: "clutild", + 7181: "janus-disc", + 7200: "fodms", + 7201: "dlip", + 7227: "ramp", + 7235: "aspcoordination", + 7244: "frc-hicp-disc", + 7262: "cnap", + 7272: "watchme-7272", + 7273: "oma-rlp", + 7274: "oma-rlp-s", + 7275: "oma-ulp", + 7276: "oma-ilp", + 7277: "oma-ilp-s", + 7278: "oma-dcdocbs", + 7279: "ctxlic", + 7280: "itactionserver1", + 7281: "itactionserver2", + 7282: "mzca-alert", + 7365: "lcm-server", + 7391: "mindfilesys", + 7392: "mrssrendezvous", + 7393: "nfoldman", + 7394: "fse", + 7395: "winqedit", + 7397: "hexarc", + 7400: "rtps-discovery", + 7401: "rtps-dd-ut", + 7402: "rtps-dd-mt", + 7410: "ionixnetmon", + 7411: "daqstream", + 7421: "mtportmon", + 7426: "pmdmgr", + 7427: "oveadmgr", + 7428: "ovladmgr", + 7429: "opi-sock", + 7430: "xmpv7", + 7431: "pmd", + 7437: "faximum", + 7443: "oracleas-https", + 7473: "rise", + 7491: "telops-lmd", + 7500: "silhouette", + 7501: "ovbus", + 7510: "ovhpas", + 7511: "pafec-lm", + 7542: "saratoga", + 7543: "atul", + 7544: "nta-ds", + 7545: "nta-us", + 7546: "cfs", + 7547: "cwmp", + 7548: "tidp", + 7549: "nls-tl", + 7550: "cloudsignaling", + 7560: "sncp", + 7566: "vsi-omega", + 7570: "aries-kfinder", + 7574: "coherence-disc", + 7588: "sun-lm", + 7606: "mipi-debug", + 7624: "indi", + 7627: "soap-http", + 7628: "zen-pawn", + 7629: "xdas", + 7633: "pmdfmgt", + 7648: "cuseeme", + 7674: "imqtunnels", + 7675: "imqtunnel", + 7676: "imqbrokerd", + 7677: "sun-user-https", + 7680: "pando-pub", + 7689: "collaber", + 7697: "klio", + 7707: "sync-em7", + 7708: "scinet", + 7720: "medimageportal", + 7724: "nsdeepfreezectl", + 7725: "nitrogen", + 7726: "freezexservice", + 7727: "trident-data", + 7728: "osvr", + 7734: "smip", + 7738: "aiagent", + 7741: "scriptview", + 7743: "sstp-1", + 7744: "raqmon-pdu", + 7747: "prgp", + 7777: "cbt", + 7778: "interwise", + 7779: "vstat", + 7781: "accu-lmgr", + 7784: "s-bfd", + 7786: "minivend", + 7787: "popup-reminders", + 7789: "office-tools", + 7794: "q3ade", + 7797: "pnet-conn", + 7798: "pnet-enc", + 7799: "altbsdp", + 7800: "asr", + 7801: "ssp-client", + 7802: "vns-tp", + 7810: "rbt-wanopt", + 7845: "apc-7845", + 7846: "apc-7846", + 7872: "mipv6tls", + 7880: "pss", + 7887: "ubroker", + 7900: "mevent", + 7901: "tnos-sp", + 7902: "tnos-dp", + 7903: "tnos-dps", + 7913: "qo-secure", + 7932: "t2-drm", + 7933: "t2-brm", + 7962: "generalsync", + 7967: "supercell", + 7979: "micromuse-ncps", + 7980: "quest-vista", + 7982: "sossd-disc", + 7998: "usicontentpush", + 7999: "irdmi2", + 8000: "irdmi", + 8001: "vcom-tunnel", + 8002: "teradataordbms", + 8003: "mcreport", + 8005: "mxi", + 8006: "wpl-disc", + 8007: "warppipe", + 8008: "http-alt", + 8019: "qbdb", + 8020: "intu-ec-svcdisc", + 8021: "intu-ec-client", + 8022: "oa-system", + 8025: "ca-audit-da", + 8026: "ca-audit-ds", + 8032: "pro-ed", + 8033: "mindprint", + 8034: "vantronix-mgmt", + 8040: "ampify", + 8041: "enguity-xccetp", + 8052: "senomix01", + 8053: "senomix02", + 8054: "senomix03", + 8055: "senomix04", + 8056: "senomix05", + 8057: "senomix06", + 8058: "senomix07", + 8059: "senomix08", + 8060: "aero", + 8074: "gadugadu", + 8080: "http-alt", + 8081: "sunproxyadmin", + 8082: "us-cli", + 8083: "us-srv", + 8086: "d-s-n", + 8087: "simplifymedia", + 8088: "radan-http", + 8097: "sac", + 8100: "xprint-server", + 8115: "mtl8000-matrix", + 8116: "cp-cluster", + 8118: "privoxy", + 8121: "apollo-data", + 8122: "apollo-admin", + 8128: "paycash-online", + 8129: "paycash-wbp", + 8130: "indigo-vrmi", + 8131: "indigo-vbcp", + 8132: "dbabble", + 8148: "isdd", + 8149: "eor-game", + 8160: "patrol", + 8161: "patrol-snmp", + 8182: "vmware-fdm", + 8184: "itach", + 8192: "spytechphone", + 8194: "blp1", + 8195: "blp2", + 8199: "vvr-data", + 8200: "trivnet1", + 8201: "trivnet2", + 8202: "aesop", + 8204: "lm-perfworks", + 8205: "lm-instmgr", + 8206: "lm-dta", + 8207: "lm-sserver", + 8208: "lm-webwatcher", + 8230: "rexecj", + 8231: "hncp-udp-port", + 8232: "hncp-dtls-port", + 8243: "synapse-nhttps", + 8276: "pando-sec", + 8280: "synapse-nhttp", + 8282: "libelle-disc", + 8292: "blp3", + 8294: "blp4", + 8300: "tmi", + 8301: "amberon", + 8320: "tnp-discover", + 8321: "tnp", + 8322: "garmin-marine", + 8351: "server-find", + 8376: "cruise-enum", + 8377: "cruise-swroute", + 8378: "cruise-config", + 8379: "cruise-diags", + 8380: "cruise-update", + 8383: "m2mservices", + 8384: "marathontp", + 8400: "cvd", + 8401: "sabarsd", + 8402: "abarsd", + 8403: "admind", + 8416: "espeech", + 8417: "espeech-rtp", + 8442: "cybro-a-bus", + 8443: "pcsync-https", + 8444: "pcsync-http", + 8445: "copy-disc", + 8450: "npmp", + 8472: "otv", + 8473: "vp2p", + 8474: "noteshare", + 8500: "fmtp", + 8501: "cmtp-av", + 8503: "lsp-self-ping", + 8554: "rtsp-alt", + 8555: "d-fence", + 8567: "dof-tunnel", + 8600: "asterix", + 8609: "canon-cpp-disc", + 8610: "canon-mfnp", + 8611: "canon-bjnp1", + 8612: "canon-bjnp2", + 8613: "canon-bjnp3", + 8614: "canon-bjnp4", + 8675: "msi-cps-rm-disc", + 8686: "sun-as-jmxrmi", + 8732: "dtp-net", + 8733: "ibus", + 8763: "mc-appserver", + 8764: "openqueue", + 8765: "ultraseek-http", + 8766: "amcs", + 8770: "dpap", + 8786: "msgclnt", + 8787: "msgsrvr", + 8793: "acd-pm", + 8800: "sunwebadmin", + 8804: "truecm", + 8805: "pfcp", + 8808: "ssports-bcast", + 8873: "dxspider", + 8880: "cddbp-alt", + 8883: "secure-mqtt", + 8888: "ddi-udp-1", + 8889: "ddi-udp-2", + 8890: "ddi-udp-3", + 8891: "ddi-udp-4", + 8892: "ddi-udp-5", + 8893: "ddi-udp-6", + 8894: "ddi-udp-7", + 8899: "ospf-lite", + 8900: "jmb-cds1", + 8901: "jmb-cds2", + 8910: "manyone-http", + 8911: "manyone-xml", + 8912: "wcbackup", + 8913: "dragonfly", + 8954: "cumulus-admin", + 8980: "nod-provider", + 8981: "nod-client", + 8989: "sunwebadmins", + 8990: "http-wmap", + 8991: "https-wmap", + 8999: "bctp", + 9000: "cslistener", + 9001: "etlservicemgr", + 9002: "dynamid", + 9007: "ogs-client", + 9009: "pichat", + 9020: "tambora", + 9021: "panagolin-ident", + 9022: "paragent", + 9023: "swa-1", + 9024: "swa-2", + 9025: "swa-3", + 9026: "swa-4", + 9060: "CardWeb-RT", + 9080: "glrpc", + 9084: "aurora", + 9085: "ibm-rsyscon", + 9086: "net2display", + 9087: "classic", + 9088: "sqlexec", + 9089: "sqlexec-ssl", + 9090: "websm", + 9091: "xmltec-xmlmail", + 9092: "XmlIpcRegSvc", + 9100: "hp-pdl-datastr", + 9101: "bacula-dir", + 9102: "bacula-fd", + 9103: "bacula-sd", + 9104: "peerwire", + 9105: "xadmin", + 9106: "astergate-disc", + 9119: "mxit", + 9131: "dddp", + 9160: "apani1", + 9161: "apani2", + 9162: "apani3", + 9163: "apani4", + 9164: "apani5", + 9191: "sun-as-jpda", + 9200: "wap-wsp", + 9201: "wap-wsp-wtp", + 9202: "wap-wsp-s", + 9203: "wap-wsp-wtp-s", + 9204: "wap-vcard", + 9205: "wap-vcal", + 9206: "wap-vcard-s", + 9207: "wap-vcal-s", + 9208: "rjcdb-vcards", + 9209: "almobile-system", + 9210: "oma-mlp", + 9211: "oma-mlp-s", + 9212: "serverviewdbms", + 9213: "serverstart", + 9214: "ipdcesgbs", + 9215: "insis", + 9216: "acme", + 9217: "fsc-port", + 9222: "teamcoherence", + 9255: "mon", + 9277: "traingpsdata", + 9278: "pegasus", + 9279: "pegasus-ctl", + 9280: "pgps", + 9281: "swtp-port1", + 9282: "swtp-port2", + 9283: "callwaveiam", + 9284: "visd", + 9285: "n2h2server", + 9286: "n2receive", + 9287: "cumulus", + 9292: "armtechdaemon", + 9293: "storview", + 9294: "armcenterhttp", + 9295: "armcenterhttps", + 9300: "vrace", + 9318: "secure-ts", + 9321: "guibase", + 9343: "mpidcmgr", + 9344: "mphlpdmc", + 9346: "ctechlicensing", + 9374: "fjdmimgr", + 9380: "boxp", + 9396: "fjinvmgr", + 9397: "mpidcagt", + 9400: "sec-t4net-srv", + 9401: "sec-t4net-clt", + 9402: "sec-pc2fax-srv", + 9418: "git", + 9443: "tungsten-https", + 9444: "wso2esb-console", + 9450: "sntlkeyssrvr", + 9500: "ismserver", + 9522: "sma-spw", + 9535: "mngsuite", + 9536: "laes-bf", + 9555: "trispen-sra", + 9592: "ldgateway", + 9593: "cba8", + 9594: "msgsys", + 9595: "pds", + 9596: "mercury-disc", + 9597: "pd-admin", + 9598: "vscp", + 9599: "robix", + 9600: "micromuse-ncpw", + 9612: "streamcomm-ds", + 9618: "condor", + 9628: "odbcpathway", + 9629: "uniport", + 9632: "mc-comm", + 9667: "xmms2", + 9668: "tec5-sdctp", + 9694: "client-wakeup", + 9695: "ccnx", + 9700: "board-roar", + 9747: "l5nas-parchan", + 9750: "board-voip", + 9753: "rasadv", + 9762: "tungsten-http", + 9800: "davsrc", + 9801: "sstp-2", + 9802: "davsrcs", + 9875: "sapv1", + 9878: "kca-service", + 9888: "cyborg-systems", + 9889: "gt-proxy", + 9898: "monkeycom", + 9899: "sctp-tunneling", + 9900: "iua", + 9901: "enrp", + 9903: "multicast-ping", + 9909: "domaintime", + 9911: "sype-transport", + 9950: "apc-9950", + 9951: "apc-9951", + 9952: "apc-9952", + 9953: "acis", + 9955: "alljoyn-mcm", + 9956: "alljoyn", + 9966: "odnsp", + 9987: "dsm-scm-target", + 9990: "osm-appsrvr", + 9991: "osm-oev", + 9992: "palace-1", + 9993: "palace-2", + 9994: "palace-3", + 9995: "palace-4", + 9996: "palace-5", + 9997: "palace-6", + 9998: "distinct32", + 9999: "distinct", + 10000: "ndmp", + 10001: "scp-config", + 10002: "documentum", + 10003: "documentum-s", + 10007: "mvs-capacity", + 10008: "octopus", + 10009: "swdtp-sv", + 10050: "zabbix-agent", + 10051: "zabbix-trapper", + 10080: "amanda", + 10081: "famdc", + 10100: "itap-ddtp", + 10101: "ezmeeting-2", + 10102: "ezproxy-2", + 10103: "ezrelay", + 10104: "swdtp", + 10107: "bctp-server", + 10110: "nmea-0183", + 10111: "nmea-onenet", + 10113: "netiq-endpoint", + 10114: "netiq-qcheck", + 10115: "netiq-endpt", + 10116: "netiq-voipa", + 10117: "iqrm", + 10128: "bmc-perf-sd", + 10160: "qb-db-server", + 10161: "snmpdtls", + 10162: "snmpdtls-trap", + 10200: "trisoap", + 10201: "rscs", + 10252: "apollo-relay", + 10253: "eapol-relay", + 10260: "axis-wimp-port", + 10288: "blocks", + 10439: "bngsync", + 10500: "hip-nat-t", + 10540: "MOS-lower", + 10541: "MOS-upper", + 10542: "MOS-aux", + 10543: "MOS-soap", + 10544: "MOS-soap-opt", + 10800: "gap", + 10805: "lpdg", + 10810: "nmc-disc", + 10860: "helix", + 10880: "bveapi", + 10990: "rmiaux", + 11000: "irisa", + 11001: "metasys", + 10023: "cefd-vmp", + 11095: "weave", + 11106: "sgi-lk", + 11108: "myq-termlink", + 11111: "vce", + 11112: "dicom", + 11161: "suncacao-snmp", + 11162: "suncacao-jmxmp", + 11163: "suncacao-rmi", + 11164: "suncacao-csa", + 11165: "suncacao-websvc", + 11171: "snss", + 11201: "smsqp", + 11208: "wifree", + 11211: "memcache", + 11319: "imip", + 11320: "imip-channels", + 11321: "arena-server", + 11367: "atm-uhas", + 11371: "hkp", + 11430: "lsdp", + 11600: "tempest-port", + 11720: "h323callsigalt", + 11723: "emc-xsw-dcache", + 11751: "intrepid-ssl", + 11796: "lanschool-mpt", + 11876: "xoraya", + 11877: "x2e-disc", + 11967: "sysinfo-sp", + 12000: "entextxid", + 12001: "entextnetwk", + 12002: "entexthigh", + 12003: "entextmed", + 12004: "entextlow", + 12005: "dbisamserver1", + 12006: "dbisamserver2", + 12007: "accuracer", + 12008: "accuracer-dbms", + 12009: "ghvpn", + 12012: "vipera", + 12013: "vipera-ssl", + 12109: "rets-ssl", + 12121: "nupaper-ss", + 12168: "cawas", + 12172: "hivep", + 12300: "linogridengine", + 12321: "warehouse-sss", + 12322: "warehouse", + 12345: "italk", + 12753: "tsaf", + 13160: "i-zipqd", + 13216: "bcslogc", + 13217: "rs-pias", + 13218: "emc-vcas-udp", + 13223: "powwow-client", + 13224: "powwow-server", + 13400: "doip-disc", + 13720: "bprd", + 13721: "bpdbm", + 13722: "bpjava-msvc", + 13724: "vnetd", + 13782: "bpcd", + 13783: "vopied", + 13785: "nbdb", + 13786: "nomdb", + 13818: "dsmcc-config", + 13819: "dsmcc-session", + 13820: "dsmcc-passthru", + 13821: "dsmcc-download", + 13822: "dsmcc-ccp", + 13894: "ucontrol", + 13929: "dta-systems", + 14000: "scotty-ft", + 14001: "sua", + 14002: "scotty-disc", + 14033: "sage-best-com1", + 14034: "sage-best-com2", + 14141: "vcs-app", + 14142: "icpp", + 14145: "gcm-app", + 14149: "vrts-tdd", + 14154: "vad", + 14250: "cps", + 14414: "ca-web-update", + 14936: "hde-lcesrvr-1", + 14937: "hde-lcesrvr-2", + 15000: "hydap", + 15118: "v2g-secc", + 15345: "xpilot", + 15363: "3link", + 15555: "cisco-snat", + 15660: "bex-xr", + 15740: "ptp", + 15998: "2ping", + 16003: "alfin", + 16161: "sun-sea-port", + 16309: "etb4j", + 16310: "pduncs", + 16311: "pdefmns", + 16360: "netserialext1", + 16361: "netserialext2", + 16367: "netserialext3", + 16368: "netserialext4", + 16384: "connected", + 16666: "vtp", + 16900: "newbay-snc-mc", + 16950: "sgcip", + 16991: "intel-rci-mp", + 16992: "amt-soap-http", + 16993: "amt-soap-https", + 16994: "amt-redir-tcp", + 16995: "amt-redir-tls", + 17007: "isode-dua", + 17185: "soundsvirtual", + 17219: "chipper", + 17220: "avtp", + 17221: "avdecc", + 17222: "cpsp", + 17224: "trdp-pd", + 17225: "trdp-md", + 17234: "integrius-stp", + 17235: "ssh-mgmt", + 17500: "db-lsp-disc", + 17729: "ea", + 17754: "zep", + 17755: "zigbee-ip", + 17756: "zigbee-ips", + 18000: "biimenu", + 18181: "opsec-cvp", + 18182: "opsec-ufp", + 18183: "opsec-sam", + 18184: "opsec-lea", + 18185: "opsec-omi", + 18186: "ohsc", + 18187: "opsec-ela", + 18241: "checkpoint-rtm", + 18262: "gv-pf", + 18463: "ac-cluster", + 18634: "rds-ib", + 18635: "rds-ip", + 18668: "vdmmesh-disc", + 18769: "ique", + 18881: "infotos", + 18888: "apc-necmp", + 19000: "igrid", + 19007: "scintilla", + 19191: "opsec-uaa", + 19194: "ua-secureagent", + 19220: "cora-disc", + 19283: "keysrvr", + 19315: "keyshadow", + 19398: "mtrgtrans", + 19410: "hp-sco", + 19411: "hp-sca", + 19412: "hp-sessmon", + 19539: "fxuptp", + 19540: "sxuptp", + 19541: "jcp", + 19788: "mle", + 19999: "dnp-sec", + 20000: "dnp", + 20001: "microsan", + 20002: "commtact-http", + 20003: "commtact-https", + 20005: "openwebnet", + 20012: "ss-idi-disc", + 20014: "opendeploy", + 20034: "nburn-id", + 20046: "tmophl7mts", + 20048: "mountd", + 20049: "nfsrdma", + 20167: "tolfab", + 20202: "ipdtp-port", + 20222: "ipulse-ics", + 20480: "emwavemsg", + 20670: "track", + 20999: "athand-mmp", + 21000: "irtrans", + 21554: "dfserver", + 21590: "vofr-gateway", + 21800: "tvpm", + 21845: "webphone", + 21846: "netspeak-is", + 21847: "netspeak-cs", + 21848: "netspeak-acd", + 21849: "netspeak-cps", + 22000: "snapenetio", + 22001: "optocontrol", + 22002: "optohost002", + 22003: "optohost003", + 22004: "optohost004", + 22005: "optohost004", + 22273: "wnn6", + 22305: "cis", + 22335: "shrewd-stream", + 22343: "cis-secure", + 22347: "wibukey", + 22350: "codemeter", + 22555: "vocaltec-phone", + 22763: "talikaserver", + 22800: "aws-brf", + 22951: "brf-gw", + 23000: "inovaport1", + 23001: "inovaport2", + 23002: "inovaport3", + 23003: "inovaport4", + 23004: "inovaport5", + 23005: "inovaport6", + 23272: "s102", + 23294: "5afe-disc", + 23333: "elxmgmt", + 23400: "novar-dbase", + 23401: "novar-alarm", + 23402: "novar-global", + 24000: "med-ltp", + 24001: "med-fsp-rx", + 24002: "med-fsp-tx", + 24003: "med-supp", + 24004: "med-ovw", + 24005: "med-ci", + 24006: "med-net-svc", + 24242: "filesphere", + 24249: "vista-4gl", + 24321: "ild", + 24322: "hid", + 24386: "intel-rci", + 24465: "tonidods", + 24554: "binkp", + 24577: "bilobit-update", + 24676: "canditv", + 24677: "flashfiler", + 24678: "proactivate", + 24680: "tcc-http", + 24850: "assoc-disc", + 24922: "find", + 25000: "icl-twobase1", + 25001: "icl-twobase2", + 25002: "icl-twobase3", + 25003: "icl-twobase4", + 25004: "icl-twobase5", + 25005: "icl-twobase6", + 25006: "icl-twobase7", + 25007: "icl-twobase8", + 25008: "icl-twobase9", + 25009: "icl-twobase10", + 25793: "vocaltec-hos", + 25900: "tasp-net", + 25901: "niobserver", + 25902: "nilinkanalyst", + 25903: "niprobe", + 25954: "bf-game", + 25955: "bf-master", + 26000: "quake", + 26133: "scscp", + 26208: "wnn6-ds", + 26260: "ezproxy", + 26261: "ezmeeting", + 26262: "k3software-svr", + 26263: "k3software-cli", + 26486: "exoline-udp", + 26487: "exoconfig", + 26489: "exonet", + 27345: "imagepump", + 27442: "jesmsjc", + 27504: "kopek-httphead", + 27782: "ars-vista", + 27999: "tw-auth-key", + 28000: "nxlmd", + 28119: "a27-ran-ran", + 28200: "voxelstorm", + 28240: "siemensgsm", + 29167: "otmp", + 30001: "pago-services1", + 30002: "pago-services2", + 30003: "amicon-fpsu-ra", + 30004: "amicon-fpsu-s", + 30260: "kingdomsonline", + 30832: "samsung-disc", + 30999: "ovobs", + 31016: "ka-kdp", + 31029: "yawn", + 31416: "xqosd", + 31457: "tetrinet", + 31620: "lm-mon", + 31765: "gamesmith-port", + 31948: "iceedcp-tx", + 31949: "iceedcp-rx", + 32034: "iracinghelper", + 32249: "t1distproc60", + 32483: "apm-link", + 32635: "sec-ntb-clnt", + 32636: "DMExpress", + 32767: "filenet-powsrm", + 32768: "filenet-tms", + 32769: "filenet-rpc", + 32770: "filenet-nch", + 32771: "filenet-rmi", + 32772: "filenet-pa", + 32773: "filenet-cm", + 32774: "filenet-re", + 32775: "filenet-pch", + 32776: "filenet-peior", + 32777: "filenet-obrok", + 32801: "mlsn", + 32896: "idmgratm", + 33123: "aurora-balaena", + 33331: "diamondport", + 33334: "speedtrace-disc", + 33434: "traceroute", + 33656: "snip-slave", + 34249: "turbonote-2", + 34378: "p-net-local", + 34379: "p-net-remote", + 34567: "edi_service", + 34962: "profinet-rt", + 34963: "profinet-rtm", + 34964: "profinet-cm", + 34980: "ethercat", + 35001: "rt-viewer", + 35004: "rt-classmanager", + 35100: "axio-disc", + 35355: "altova-lm-disc", + 36001: "allpeers", + 36411: "wlcp", + 36865: "kastenxpipe", + 37475: "neckar", + 37654: "unisys-eportal", + 38002: "crescoctrl-disc", + 38201: "galaxy7-data", + 38202: "fairview", + 38203: "agpolicy", + 39681: "turbonote-1", + 40000: "safetynetp", + 40023: "k-patentssensor", + 40841: "cscp", + 40842: "csccredir", + 40843: "csccfirewall", + 40853: "ortec-disc", + 41111: "fs-qos", + 41230: "z-wave-s", + 41794: "crestron-cip", + 41795: "crestron-ctp", + 42508: "candp", + 42509: "candrp", + 42510: "caerpc", + 43000: "recvr-rc-disc", + 43188: "reachout", + 43189: "ndm-agent-port", + 43190: "ip-provision", + 43210: "shaperai-disc", + 43439: "eq3-config", + 43440: "ew-disc-cmd", + 43441: "ciscocsdb", + 44321: "pmcd", + 44322: "pmcdproxy", + 44544: "domiq", + 44553: "rbr-debug", + 44600: "asihpi", + 44818: "EtherNet-IP-2", + 44900: "m3da-disc", + 45000: "asmp-mon", + 45054: "invision-ag", + 45514: "cloudcheck-ping", + 45678: "eba", + 45825: "qdb2service", + 45966: "ssr-servermgr", + 46999: "mediabox", + 47000: "mbus", + 47100: "jvl-mactalk", + 47557: "dbbrowse", + 47624: "directplaysrvr", + 47806: "ap", + 47808: "bacnet", + 47809: "presonus-ucnet", + 48000: "nimcontroller", + 48001: "nimspooler", + 48002: "nimhub", + 48003: "nimgtw", + 48128: "isnetserv", + 48129: "blp5", + 48556: "com-bardac-dw", + 48619: "iqobject", + 48653: "robotraconteur", + 49001: "nusdp-disc", +} +var sctpPortNames = map[SCTPPort]string{ + 9: "discard", + 20: "ftp-data", + 21: "ftp", + 22: "ssh", + 80: "http", + 179: "bgp", + 443: "https", + 1021: "exp1", + 1022: "exp2", + 1167: "cisco-ipsla", + 1720: "h323hostcall", + 2049: "nfs", + 2225: "rcip-itu", + 2904: "m2ua", + 2905: "m3ua", + 2944: "megaco-h248", + 2945: "h248-binary", + 3097: "itu-bicc-stc", + 3565: "m2pa", + 3863: "asap-sctp", + 3864: "asap-sctp-tls", + 3868: "diameter", + 4333: "ahsp", + 4502: "a25-fap-fgw", + 4711: "trinity-dist", + 4739: "ipfix", + 4740: "ipfixs", + 5060: "sip", + 5061: "sips", + 5090: "car", + 5091: "cxtp", + 5215: "noteza", + 5445: "smbdirect", + 5672: "amqp", + 5675: "v5ua", + 5868: "diameters", + 5910: "cm", + 5911: "cpdlc", + 5912: "fis", + 5913: "ads-c", + 6704: "frc-hp", + 6705: "frc-mp", + 6706: "frc-lp", + 6970: "conductor-mpx", + 7626: "simco", + 7701: "nfapi", + 7728: "osvr", + 8471: "pim-port", + 9082: "lcs-ap", + 9084: "aurora", + 9900: "iua", + 9901: "enrp-sctp", + 9902: "enrp-sctp-tls", + 11997: "wmereceiving", + 11998: "wmedistribution", + 11999: "wmereporting", + 14001: "sua", + 20049: "nfsrdma", + 25471: "rna", + 29118: "sgsap", + 29168: "sbcap", + 29169: "iuhsctpassoc", + 30100: "rwp", + 36412: "s1-control", + 36422: "x2-control", + 36423: "slmap", + 36424: "nq-ap", + 36443: "m2ap", + 36444: "m3ap", + 36462: "xw-control", + 38412: "ng-control", + 38422: "xn-control", + 38472: "f1-control", +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/icmp4.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/icmp4.go new file mode 100644 index 00000000..bd3f03f0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/icmp4.go @@ -0,0 +1,267 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "reflect" + + "github.com/google/gopacket" +) + +const ( + ICMPv4TypeEchoReply = 0 + ICMPv4TypeDestinationUnreachable = 3 + ICMPv4TypeSourceQuench = 4 + ICMPv4TypeRedirect = 5 + ICMPv4TypeEchoRequest = 8 + ICMPv4TypeRouterAdvertisement = 9 + ICMPv4TypeRouterSolicitation = 10 + ICMPv4TypeTimeExceeded = 11 + ICMPv4TypeParameterProblem = 12 + ICMPv4TypeTimestampRequest = 13 + ICMPv4TypeTimestampReply = 14 + ICMPv4TypeInfoRequest = 15 + ICMPv4TypeInfoReply = 16 + ICMPv4TypeAddressMaskRequest = 17 + ICMPv4TypeAddressMaskReply = 18 +) + +const ( + // DestinationUnreachable + ICMPv4CodeNet = 0 + ICMPv4CodeHost = 1 + ICMPv4CodeProtocol = 2 + ICMPv4CodePort = 3 + ICMPv4CodeFragmentationNeeded = 4 + ICMPv4CodeSourceRoutingFailed = 5 + ICMPv4CodeNetUnknown = 6 + ICMPv4CodeHostUnknown = 7 + ICMPv4CodeSourceIsolated = 8 + ICMPv4CodeNetAdminProhibited = 9 + ICMPv4CodeHostAdminProhibited = 10 + ICMPv4CodeNetTOS = 11 + ICMPv4CodeHostTOS = 12 + ICMPv4CodeCommAdminProhibited = 13 + ICMPv4CodeHostPrecedence = 14 + ICMPv4CodePrecedenceCutoff = 15 + + // TimeExceeded + ICMPv4CodeTTLExceeded = 0 + ICMPv4CodeFragmentReassemblyTimeExceeded = 1 + + // ParameterProblem + ICMPv4CodePointerIndicatesError = 0 + ICMPv4CodeMissingOption = 1 + ICMPv4CodeBadLength = 2 + + // Redirect + // ICMPv4CodeNet = same as for DestinationUnreachable + // ICMPv4CodeHost = same as for DestinationUnreachable + ICMPv4CodeTOSNet = 2 + ICMPv4CodeTOSHost = 3 +) + +type icmpv4TypeCodeInfoStruct struct { + typeStr string + codeStr *map[uint8]string +} + +var ( + icmpv4TypeCodeInfo = map[uint8]icmpv4TypeCodeInfoStruct{ + ICMPv4TypeDestinationUnreachable: icmpv4TypeCodeInfoStruct{ + "DestinationUnreachable", &map[uint8]string{ + ICMPv4CodeNet: "Net", + ICMPv4CodeHost: "Host", + ICMPv4CodeProtocol: "Protocol", + ICMPv4CodePort: "Port", + ICMPv4CodeFragmentationNeeded: "FragmentationNeeded", + ICMPv4CodeSourceRoutingFailed: "SourceRoutingFailed", + ICMPv4CodeNetUnknown: "NetUnknown", + ICMPv4CodeHostUnknown: "HostUnknown", + ICMPv4CodeSourceIsolated: "SourceIsolated", + ICMPv4CodeNetAdminProhibited: "NetAdminProhibited", + ICMPv4CodeHostAdminProhibited: "HostAdminProhibited", + ICMPv4CodeNetTOS: "NetTOS", + ICMPv4CodeHostTOS: "HostTOS", + ICMPv4CodeCommAdminProhibited: "CommAdminProhibited", + ICMPv4CodeHostPrecedence: "HostPrecedence", + ICMPv4CodePrecedenceCutoff: "PrecedenceCutoff", + }, + }, + ICMPv4TypeTimeExceeded: icmpv4TypeCodeInfoStruct{ + "TimeExceeded", &map[uint8]string{ + ICMPv4CodeTTLExceeded: "TTLExceeded", + ICMPv4CodeFragmentReassemblyTimeExceeded: "FragmentReassemblyTimeExceeded", + }, + }, + ICMPv4TypeParameterProblem: icmpv4TypeCodeInfoStruct{ + "ParameterProblem", &map[uint8]string{ + ICMPv4CodePointerIndicatesError: "PointerIndicatesError", + ICMPv4CodeMissingOption: "MissingOption", + ICMPv4CodeBadLength: "BadLength", + }, + }, + ICMPv4TypeSourceQuench: icmpv4TypeCodeInfoStruct{ + "SourceQuench", nil, + }, + ICMPv4TypeRedirect: icmpv4TypeCodeInfoStruct{ + "Redirect", &map[uint8]string{ + ICMPv4CodeNet: "Net", + ICMPv4CodeHost: "Host", + ICMPv4CodeTOSNet: "TOS+Net", + ICMPv4CodeTOSHost: "TOS+Host", + }, + }, + ICMPv4TypeEchoRequest: icmpv4TypeCodeInfoStruct{ + "EchoRequest", nil, + }, + ICMPv4TypeEchoReply: icmpv4TypeCodeInfoStruct{ + "EchoReply", nil, + }, + ICMPv4TypeTimestampRequest: icmpv4TypeCodeInfoStruct{ + "TimestampRequest", nil, + }, + ICMPv4TypeTimestampReply: icmpv4TypeCodeInfoStruct{ + "TimestampReply", nil, + }, + ICMPv4TypeInfoRequest: icmpv4TypeCodeInfoStruct{ + "InfoRequest", nil, + }, + ICMPv4TypeInfoReply: icmpv4TypeCodeInfoStruct{ + "InfoReply", nil, + }, + ICMPv4TypeRouterSolicitation: icmpv4TypeCodeInfoStruct{ + "RouterSolicitation", nil, + }, + ICMPv4TypeRouterAdvertisement: icmpv4TypeCodeInfoStruct{ + "RouterAdvertisement", nil, + }, + ICMPv4TypeAddressMaskRequest: icmpv4TypeCodeInfoStruct{ + "AddressMaskRequest", nil, + }, + ICMPv4TypeAddressMaskReply: icmpv4TypeCodeInfoStruct{ + "AddressMaskReply", nil, + }, + } +) + +type ICMPv4TypeCode uint16 + +// Type returns the ICMPv4 type field. +func (a ICMPv4TypeCode) Type() uint8 { + return uint8(a >> 8) +} + +// Code returns the ICMPv4 code field. +func (a ICMPv4TypeCode) Code() uint8 { + return uint8(a) +} + +func (a ICMPv4TypeCode) String() string { + t, c := a.Type(), a.Code() + strInfo, ok := icmpv4TypeCodeInfo[t] + if !ok { + // Unknown ICMPv4 type field + return fmt.Sprintf("%d(%d)", t, c) + } + typeStr := strInfo.typeStr + if strInfo.codeStr == nil && c == 0 { + // The ICMPv4 type does not make use of the code field + return fmt.Sprintf("%s", strInfo.typeStr) + } + if strInfo.codeStr == nil && c != 0 { + // The ICMPv4 type does not make use of the code field, but it is present anyway + return fmt.Sprintf("%s(Code: %d)", typeStr, c) + } + codeStr, ok := (*strInfo.codeStr)[c] + if !ok { + // We don't know this ICMPv4 code; print the numerical value + return fmt.Sprintf("%s(Code: %d)", typeStr, c) + } + return fmt.Sprintf("%s(%s)", typeStr, codeStr) +} + +func (a ICMPv4TypeCode) GoString() string { + t := reflect.TypeOf(a) + return fmt.Sprintf("%s(%d, %d)", t.String(), a.Type(), a.Code()) +} + +// SerializeTo writes the ICMPv4TypeCode value to the 'bytes' buffer. +func (a ICMPv4TypeCode) SerializeTo(bytes []byte) { + binary.BigEndian.PutUint16(bytes, uint16(a)) +} + +// CreateICMPv4TypeCode is a convenience function to create an ICMPv4TypeCode +// gopacket type from the ICMPv4 type and code values. +func CreateICMPv4TypeCode(typ uint8, code uint8) ICMPv4TypeCode { + return ICMPv4TypeCode(binary.BigEndian.Uint16([]byte{typ, code})) +} + +// ICMPv4 is the layer for IPv4 ICMP packet data. +type ICMPv4 struct { + BaseLayer + TypeCode ICMPv4TypeCode + Checksum uint16 + Id uint16 + Seq uint16 +} + +// LayerType returns LayerTypeICMPv4. +func (i *ICMPv4) LayerType() gopacket.LayerType { return LayerTypeICMPv4 } + +// DecodeFromBytes decodes the given bytes into this layer. +func (i *ICMPv4) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 8 { + df.SetTruncated() + return errors.New("ICMP layer less then 8 bytes for ICMPv4 packet") + } + i.TypeCode = CreateICMPv4TypeCode(data[0], data[1]) + i.Checksum = binary.BigEndian.Uint16(data[2:4]) + i.Id = binary.BigEndian.Uint16(data[4:6]) + i.Seq = binary.BigEndian.Uint16(data[6:8]) + i.BaseLayer = BaseLayer{data[:8], data[8:]} + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (i *ICMPv4) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(8) + if err != nil { + return err + } + i.TypeCode.SerializeTo(bytes) + binary.BigEndian.PutUint16(bytes[4:], i.Id) + binary.BigEndian.PutUint16(bytes[6:], i.Seq) + if opts.ComputeChecksums { + bytes[2] = 0 + bytes[3] = 0 + i.Checksum = tcpipChecksum(b.Bytes(), 0) + } + binary.BigEndian.PutUint16(bytes[2:], i.Checksum) + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (i *ICMPv4) CanDecode() gopacket.LayerClass { + return LayerTypeICMPv4 +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (i *ICMPv4) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +func decodeICMPv4(data []byte, p gopacket.PacketBuilder) error { + i := &ICMPv4{} + return decodingLayerDecoder(i, data, p) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/icmp6.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/icmp6.go new file mode 100644 index 00000000..09afd11a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/icmp6.go @@ -0,0 +1,266 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "reflect" + + "github.com/google/gopacket" +) + +const ( + // The following are from RFC 4443 + ICMPv6TypeDestinationUnreachable = 1 + ICMPv6TypePacketTooBig = 2 + ICMPv6TypeTimeExceeded = 3 + ICMPv6TypeParameterProblem = 4 + ICMPv6TypeEchoRequest = 128 + ICMPv6TypeEchoReply = 129 + + // The following are from RFC 4861 + ICMPv6TypeRouterSolicitation = 133 + ICMPv6TypeRouterAdvertisement = 134 + ICMPv6TypeNeighborSolicitation = 135 + ICMPv6TypeNeighborAdvertisement = 136 + ICMPv6TypeRedirect = 137 + + // The following are from RFC 2710 + ICMPv6TypeMLDv1MulticastListenerQueryMessage = 130 + ICMPv6TypeMLDv1MulticastListenerReportMessage = 131 + ICMPv6TypeMLDv1MulticastListenerDoneMessage = 132 + + // The following are from RFC 3810 + ICMPv6TypeMLDv2MulticastListenerReportMessageV2 = 143 +) + +const ( + // DestinationUnreachable + ICMPv6CodeNoRouteToDst = 0 + ICMPv6CodeAdminProhibited = 1 + ICMPv6CodeBeyondScopeOfSrc = 2 + ICMPv6CodeAddressUnreachable = 3 + ICMPv6CodePortUnreachable = 4 + ICMPv6CodeSrcAddressFailedPolicy = 5 + ICMPv6CodeRejectRouteToDst = 6 + + // TimeExceeded + ICMPv6CodeHopLimitExceeded = 0 + ICMPv6CodeFragmentReassemblyTimeExceeded = 1 + + // ParameterProblem + ICMPv6CodeErroneousHeaderField = 0 + ICMPv6CodeUnrecognizedNextHeader = 1 + ICMPv6CodeUnrecognizedIPv6Option = 2 +) + +type icmpv6TypeCodeInfoStruct struct { + typeStr string + codeStr *map[uint8]string +} + +var ( + icmpv6TypeCodeInfo = map[uint8]icmpv6TypeCodeInfoStruct{ + ICMPv6TypeDestinationUnreachable: icmpv6TypeCodeInfoStruct{ + "DestinationUnreachable", &map[uint8]string{ + ICMPv6CodeNoRouteToDst: "NoRouteToDst", + ICMPv6CodeAdminProhibited: "AdminProhibited", + ICMPv6CodeBeyondScopeOfSrc: "BeyondScopeOfSrc", + ICMPv6CodeAddressUnreachable: "AddressUnreachable", + ICMPv6CodePortUnreachable: "PortUnreachable", + ICMPv6CodeSrcAddressFailedPolicy: "SrcAddressFailedPolicy", + ICMPv6CodeRejectRouteToDst: "RejectRouteToDst", + }, + }, + ICMPv6TypePacketTooBig: icmpv6TypeCodeInfoStruct{ + "PacketTooBig", nil, + }, + ICMPv6TypeTimeExceeded: icmpv6TypeCodeInfoStruct{ + "TimeExceeded", &map[uint8]string{ + ICMPv6CodeHopLimitExceeded: "HopLimitExceeded", + ICMPv6CodeFragmentReassemblyTimeExceeded: "FragmentReassemblyTimeExceeded", + }, + }, + ICMPv6TypeParameterProblem: icmpv6TypeCodeInfoStruct{ + "ParameterProblem", &map[uint8]string{ + ICMPv6CodeErroneousHeaderField: "ErroneousHeaderField", + ICMPv6CodeUnrecognizedNextHeader: "UnrecognizedNextHeader", + ICMPv6CodeUnrecognizedIPv6Option: "UnrecognizedIPv6Option", + }, + }, + ICMPv6TypeEchoRequest: icmpv6TypeCodeInfoStruct{ + "EchoRequest", nil, + }, + ICMPv6TypeEchoReply: icmpv6TypeCodeInfoStruct{ + "EchoReply", nil, + }, + ICMPv6TypeRouterSolicitation: icmpv6TypeCodeInfoStruct{ + "RouterSolicitation", nil, + }, + ICMPv6TypeRouterAdvertisement: icmpv6TypeCodeInfoStruct{ + "RouterAdvertisement", nil, + }, + ICMPv6TypeNeighborSolicitation: icmpv6TypeCodeInfoStruct{ + "NeighborSolicitation", nil, + }, + ICMPv6TypeNeighborAdvertisement: icmpv6TypeCodeInfoStruct{ + "NeighborAdvertisement", nil, + }, + ICMPv6TypeRedirect: icmpv6TypeCodeInfoStruct{ + "Redirect", nil, + }, + } +) + +type ICMPv6TypeCode uint16 + +// Type returns the ICMPv6 type field. +func (a ICMPv6TypeCode) Type() uint8 { + return uint8(a >> 8) +} + +// Code returns the ICMPv6 code field. +func (a ICMPv6TypeCode) Code() uint8 { + return uint8(a) +} + +func (a ICMPv6TypeCode) String() string { + t, c := a.Type(), a.Code() + strInfo, ok := icmpv6TypeCodeInfo[t] + if !ok { + // Unknown ICMPv6 type field + return fmt.Sprintf("%d(%d)", t, c) + } + typeStr := strInfo.typeStr + if strInfo.codeStr == nil && c == 0 { + // The ICMPv6 type does not make use of the code field + return fmt.Sprintf("%s", strInfo.typeStr) + } + if strInfo.codeStr == nil && c != 0 { + // The ICMPv6 type does not make use of the code field, but it is present anyway + return fmt.Sprintf("%s(Code: %d)", typeStr, c) + } + codeStr, ok := (*strInfo.codeStr)[c] + if !ok { + // We don't know this ICMPv6 code; print the numerical value + return fmt.Sprintf("%s(Code: %d)", typeStr, c) + } + return fmt.Sprintf("%s(%s)", typeStr, codeStr) +} + +func (a ICMPv6TypeCode) GoString() string { + t := reflect.TypeOf(a) + return fmt.Sprintf("%s(%d, %d)", t.String(), a.Type(), a.Code()) +} + +// SerializeTo writes the ICMPv6TypeCode value to the 'bytes' buffer. +func (a ICMPv6TypeCode) SerializeTo(bytes []byte) { + binary.BigEndian.PutUint16(bytes, uint16(a)) +} + +// CreateICMPv6TypeCode is a convenience function to create an ICMPv6TypeCode +// gopacket type from the ICMPv6 type and code values. +func CreateICMPv6TypeCode(typ uint8, code uint8) ICMPv6TypeCode { + return ICMPv6TypeCode(binary.BigEndian.Uint16([]byte{typ, code})) +} + +// ICMPv6 is the layer for IPv6 ICMP packet data +type ICMPv6 struct { + BaseLayer + TypeCode ICMPv6TypeCode + Checksum uint16 + // TypeBytes is deprecated and always nil. See the different ICMPv6 message types + // instead (e.g. ICMPv6TypeRouterSolicitation). + TypeBytes []byte + tcpipchecksum +} + +// LayerType returns LayerTypeICMPv6. +func (i *ICMPv6) LayerType() gopacket.LayerType { return LayerTypeICMPv6 } + +// DecodeFromBytes decodes the given bytes into this layer. +func (i *ICMPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 4 { + df.SetTruncated() + return errors.New("ICMP layer less then 4 bytes for ICMPv6 packet") + } + i.TypeCode = CreateICMPv6TypeCode(data[0], data[1]) + i.Checksum = binary.BigEndian.Uint16(data[2:4]) + i.BaseLayer = BaseLayer{data[:4], data[4:]} + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (i *ICMPv6) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(4) + if err != nil { + return err + } + i.TypeCode.SerializeTo(bytes) + + if opts.ComputeChecksums { + bytes[2] = 0 + bytes[3] = 0 + csum, err := i.computeChecksum(b.Bytes(), IPProtocolICMPv6) + if err != nil { + return err + } + i.Checksum = csum + } + binary.BigEndian.PutUint16(bytes[2:], i.Checksum) + + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (i *ICMPv6) CanDecode() gopacket.LayerClass { + return LayerTypeICMPv6 +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (i *ICMPv6) NextLayerType() gopacket.LayerType { + switch i.TypeCode.Type() { + case ICMPv6TypeEchoRequest: + return LayerTypeICMPv6Echo + case ICMPv6TypeEchoReply: + return LayerTypeICMPv6Echo + case ICMPv6TypeRouterSolicitation: + return LayerTypeICMPv6RouterSolicitation + case ICMPv6TypeRouterAdvertisement: + return LayerTypeICMPv6RouterAdvertisement + case ICMPv6TypeNeighborSolicitation: + return LayerTypeICMPv6NeighborSolicitation + case ICMPv6TypeNeighborAdvertisement: + return LayerTypeICMPv6NeighborAdvertisement + case ICMPv6TypeRedirect: + return LayerTypeICMPv6Redirect + case ICMPv6TypeMLDv1MulticastListenerQueryMessage: // Same Code for MLDv1 Query and MLDv2 Query + if len(i.Payload) > 20 { // Only payload size differs + return LayerTypeMLDv2MulticastListenerQuery + } else { + return LayerTypeMLDv1MulticastListenerQuery + } + case ICMPv6TypeMLDv1MulticastListenerDoneMessage: + return LayerTypeMLDv1MulticastListenerDone + case ICMPv6TypeMLDv1MulticastListenerReportMessage: + return LayerTypeMLDv1MulticastListenerReport + case ICMPv6TypeMLDv2MulticastListenerReportMessageV2: + return LayerTypeMLDv2MulticastListenerReport + } + + return gopacket.LayerTypePayload +} + +func decodeICMPv6(data []byte, p gopacket.PacketBuilder) error { + i := &ICMPv6{} + return decodingLayerDecoder(i, data, p) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/icmp6msg.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/icmp6msg.go new file mode 100644 index 00000000..d9268db0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/icmp6msg.go @@ -0,0 +1,578 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "net" + "time" + + "github.com/google/gopacket" +) + +// Based on RFC 4861 + +// ICMPv6Opt indicate how to decode the data associated with each ICMPv6Option. +type ICMPv6Opt uint8 + +const ( + _ ICMPv6Opt = iota + + // ICMPv6OptSourceAddress contains the link-layer address of the sender of + // the packet. It is used in the Neighbor Solicitation, Router + // Solicitation, and Router Advertisement packets. Must be ignored for other + // Neighbor discovery messages. + ICMPv6OptSourceAddress + + // ICMPv6OptTargetAddress contains the link-layer address of the target. It + // is used in Neighbor Advertisement and Redirect packets. Must be ignored + // for other Neighbor discovery messages. + ICMPv6OptTargetAddress + + // ICMPv6OptPrefixInfo provides hosts with on-link prefixes and prefixes + // for Address Autoconfiguration. The Prefix Information option appears in + // Router Advertisement packets and MUST be silently ignored for other + // messages. + ICMPv6OptPrefixInfo + + // ICMPv6OptRedirectedHeader is used in Redirect messages and contains all + // or part of the packet that is being redirected. + ICMPv6OptRedirectedHeader + + // ICMPv6OptMTU is used in Router Advertisement messages to ensure that all + // nodes on a link use the same MTU value in those cases where the link MTU + // is not well known. This option MUST be silently ignored for other + // Neighbor Discovery messages. + ICMPv6OptMTU +) + +// ICMPv6Echo represents the structure of a ping. +type ICMPv6Echo struct { + BaseLayer + Identifier uint16 + SeqNumber uint16 +} + +// ICMPv6RouterSolicitation is sent by hosts to find routers. +type ICMPv6RouterSolicitation struct { + BaseLayer + Options ICMPv6Options +} + +// ICMPv6RouterAdvertisement is sent by routers in response to Solicitation. +type ICMPv6RouterAdvertisement struct { + BaseLayer + HopLimit uint8 + Flags uint8 + RouterLifetime uint16 + ReachableTime uint32 + RetransTimer uint32 + Options ICMPv6Options +} + +// ICMPv6NeighborSolicitation is sent to request the link-layer address of a +// target node. +type ICMPv6NeighborSolicitation struct { + BaseLayer + TargetAddress net.IP + Options ICMPv6Options +} + +// ICMPv6NeighborAdvertisement is sent by nodes in response to Solicitation. +type ICMPv6NeighborAdvertisement struct { + BaseLayer + Flags uint8 + TargetAddress net.IP + Options ICMPv6Options +} + +// ICMPv6Redirect is sent by routers to inform hosts of a better first-hop node +// on the path to a destination. +type ICMPv6Redirect struct { + BaseLayer + TargetAddress net.IP + DestinationAddress net.IP + Options ICMPv6Options +} + +// ICMPv6Option contains the type and data for a single option. +type ICMPv6Option struct { + Type ICMPv6Opt + Data []byte +} + +// ICMPv6Options is a slice of ICMPv6Option. +type ICMPv6Options []ICMPv6Option + +func (i ICMPv6Opt) String() string { + switch i { + case ICMPv6OptSourceAddress: + return "SourceAddress" + case ICMPv6OptTargetAddress: + return "TargetAddress" + case ICMPv6OptPrefixInfo: + return "PrefixInfo" + case ICMPv6OptRedirectedHeader: + return "RedirectedHeader" + case ICMPv6OptMTU: + return "MTU" + default: + return fmt.Sprintf("Unknown(%d)", i) + } +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (i *ICMPv6Echo) CanDecode() gopacket.LayerClass { + return LayerTypeICMPv6Echo +} + +// LayerType returns LayerTypeICMPv6Echo. +func (i *ICMPv6Echo) LayerType() gopacket.LayerType { + return LayerTypeICMPv6Echo +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (i *ICMPv6Echo) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (i *ICMPv6Echo) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 4 { + df.SetTruncated() + return errors.New("ICMP layer less then 4 bytes for ICMPv6 Echo") + } + i.Identifier = binary.BigEndian.Uint16(data[0:2]) + i.SeqNumber = binary.BigEndian.Uint16(data[2:4]) + + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (i *ICMPv6Echo) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf, err := b.PrependBytes(4) + if err != nil { + return err + } + + binary.BigEndian.PutUint16(buf, i.Identifier) + binary.BigEndian.PutUint16(buf[2:], i.SeqNumber) + return nil +} + +// LayerType returns LayerTypeICMPv6. +func (i *ICMPv6RouterSolicitation) LayerType() gopacket.LayerType { + return LayerTypeICMPv6RouterSolicitation +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (i *ICMPv6RouterSolicitation) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (i *ICMPv6RouterSolicitation) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + // first 4 bytes are reserved followed by options + if len(data) < 4 { + df.SetTruncated() + return errors.New("ICMP layer less then 4 bytes for ICMPv6 router solicitation") + } + + // truncate old options + i.Options = i.Options[:0] + + return i.Options.DecodeFromBytes(data[4:], df) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (i *ICMPv6RouterSolicitation) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if err := i.Options.SerializeTo(b, opts); err != nil { + return err + } + + buf, err := b.PrependBytes(4) + if err != nil { + return err + } + + copy(buf, lotsOfZeros[:4]) + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (i *ICMPv6RouterSolicitation) CanDecode() gopacket.LayerClass { + return LayerTypeICMPv6RouterSolicitation +} + +// LayerType returns LayerTypeICMPv6RouterAdvertisement. +func (i *ICMPv6RouterAdvertisement) LayerType() gopacket.LayerType { + return LayerTypeICMPv6RouterAdvertisement +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (i *ICMPv6RouterAdvertisement) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (i *ICMPv6RouterAdvertisement) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 12 { + df.SetTruncated() + return errors.New("ICMP layer less then 12 bytes for ICMPv6 router advertisement") + } + + i.HopLimit = uint8(data[0]) + // M, O bit followed by 6 reserved bits + i.Flags = uint8(data[1]) + i.RouterLifetime = binary.BigEndian.Uint16(data[2:4]) + i.ReachableTime = binary.BigEndian.Uint32(data[4:8]) + i.RetransTimer = binary.BigEndian.Uint32(data[8:12]) + i.BaseLayer = BaseLayer{data, nil} // assume no payload + + // truncate old options + i.Options = i.Options[:0] + + return i.Options.DecodeFromBytes(data[12:], df) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (i *ICMPv6RouterAdvertisement) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if err := i.Options.SerializeTo(b, opts); err != nil { + return err + } + + buf, err := b.PrependBytes(12) + if err != nil { + return err + } + + buf[0] = byte(i.HopLimit) + buf[1] = byte(i.Flags) + binary.BigEndian.PutUint16(buf[2:], i.RouterLifetime) + binary.BigEndian.PutUint32(buf[4:], i.ReachableTime) + binary.BigEndian.PutUint32(buf[8:], i.RetransTimer) + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (i *ICMPv6RouterAdvertisement) CanDecode() gopacket.LayerClass { + return LayerTypeICMPv6RouterAdvertisement +} + +// ManagedAddressConfig is true when addresses are available via DHCPv6. If +// set, the OtherConfig flag is redundant. +func (i *ICMPv6RouterAdvertisement) ManagedAddressConfig() bool { + return i.Flags&0x80 != 0 +} + +// OtherConfig is true when there is other configuration information available +// via DHCPv6. For example, DNS-related information. +func (i *ICMPv6RouterAdvertisement) OtherConfig() bool { + return i.Flags&0x40 != 0 +} + +// LayerType returns LayerTypeICMPv6NeighborSolicitation. +func (i *ICMPv6NeighborSolicitation) LayerType() gopacket.LayerType { + return LayerTypeICMPv6NeighborSolicitation +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (i *ICMPv6NeighborSolicitation) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (i *ICMPv6NeighborSolicitation) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 20 { + df.SetTruncated() + return errors.New("ICMP layer less then 20 bytes for ICMPv6 neighbor solicitation") + } + + i.TargetAddress = net.IP(data[4:20]) + i.BaseLayer = BaseLayer{data, nil} // assume no payload + + // truncate old options + i.Options = i.Options[:0] + + return i.Options.DecodeFromBytes(data[20:], df) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (i *ICMPv6NeighborSolicitation) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if err := i.Options.SerializeTo(b, opts); err != nil { + return err + } + + buf, err := b.PrependBytes(20) + if err != nil { + return err + } + + copy(buf, lotsOfZeros[:4]) + copy(buf[4:], i.TargetAddress) + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (i *ICMPv6NeighborSolicitation) CanDecode() gopacket.LayerClass { + return LayerTypeICMPv6NeighborSolicitation +} + +// LayerType returns LayerTypeICMPv6NeighborAdvertisement. +func (i *ICMPv6NeighborAdvertisement) LayerType() gopacket.LayerType { + return LayerTypeICMPv6NeighborAdvertisement +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (i *ICMPv6NeighborAdvertisement) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (i *ICMPv6NeighborAdvertisement) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 20 { + df.SetTruncated() + return errors.New("ICMP layer less then 20 bytes for ICMPv6 neighbor advertisement") + } + + i.Flags = uint8(data[0]) + i.TargetAddress = net.IP(data[4:20]) + i.BaseLayer = BaseLayer{data, nil} // assume no payload + + // truncate old options + i.Options = i.Options[:0] + + return i.Options.DecodeFromBytes(data[20:], df) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (i *ICMPv6NeighborAdvertisement) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if err := i.Options.SerializeTo(b, opts); err != nil { + return err + } + + buf, err := b.PrependBytes(20) + if err != nil { + return err + } + + buf[0] = byte(i.Flags) + copy(buf[1:], lotsOfZeros[:3]) + copy(buf[4:], i.TargetAddress) + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (i *ICMPv6NeighborAdvertisement) CanDecode() gopacket.LayerClass { + return LayerTypeICMPv6NeighborAdvertisement +} + +// Router indicates whether the sender is a router or not. +func (i *ICMPv6NeighborAdvertisement) Router() bool { + return i.Flags&0x80 != 0 +} + +// Solicited indicates whether the advertisement was solicited or not. +func (i *ICMPv6NeighborAdvertisement) Solicited() bool { + return i.Flags&0x40 != 0 +} + +// Override indicates whether the advertisement should Override an existing +// cache entry. +func (i *ICMPv6NeighborAdvertisement) Override() bool { + return i.Flags&0x20 != 0 +} + +// LayerType returns LayerTypeICMPv6Redirect. +func (i *ICMPv6Redirect) LayerType() gopacket.LayerType { + return LayerTypeICMPv6Redirect +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (i *ICMPv6Redirect) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (i *ICMPv6Redirect) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 36 { + df.SetTruncated() + return errors.New("ICMP layer less then 36 bytes for ICMPv6 redirect") + } + + i.TargetAddress = net.IP(data[4:20]) + i.DestinationAddress = net.IP(data[20:36]) + i.BaseLayer = BaseLayer{data, nil} // assume no payload + + // truncate old options + i.Options = i.Options[:0] + + return i.Options.DecodeFromBytes(data[36:], df) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (i *ICMPv6Redirect) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if err := i.Options.SerializeTo(b, opts); err != nil { + return err + } + + buf, err := b.PrependBytes(36) + if err != nil { + return err + } + + copy(buf, lotsOfZeros[:4]) + copy(buf[4:], i.TargetAddress) + copy(buf[20:], i.DestinationAddress) + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (i *ICMPv6Redirect) CanDecode() gopacket.LayerClass { + return LayerTypeICMPv6Redirect +} + +func (i ICMPv6Option) String() string { + hd := hex.EncodeToString(i.Data) + if len(hd) > 0 { + hd = " 0x" + hd + } + + switch i.Type { + case ICMPv6OptSourceAddress, ICMPv6OptTargetAddress: + return fmt.Sprintf("ICMPv6Option(%s:%v)", + i.Type, + net.HardwareAddr(i.Data)) + case ICMPv6OptPrefixInfo: + if len(i.Data) == 30 { + prefixLen := uint8(i.Data[0]) + onLink := (i.Data[1]&0x80 != 0) + autonomous := (i.Data[1]&0x40 != 0) + validLifetime := time.Duration(binary.BigEndian.Uint32(i.Data[2:6])) * time.Second + preferredLifetime := time.Duration(binary.BigEndian.Uint32(i.Data[6:10])) * time.Second + + prefix := net.IP(i.Data[14:]) + + return fmt.Sprintf("ICMPv6Option(%s:%v/%v:%t:%t:%v:%v)", + i.Type, + prefix, prefixLen, + onLink, autonomous, + validLifetime, preferredLifetime) + } + case ICMPv6OptRedirectedHeader: + // could invoke IP decoder on data... probably best not to + break + case ICMPv6OptMTU: + if len(i.Data) == 6 { + return fmt.Sprintf("ICMPv6Option(%s:%v)", + i.Type, + binary.BigEndian.Uint32(i.Data[2:])) + } + + } + return fmt.Sprintf("ICMPv6Option(%s:%s)", i.Type, hd) +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (i *ICMPv6Options) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + for len(data) > 0 { + if len(data) < 2 { + df.SetTruncated() + return errors.New("ICMP layer less then 2 bytes for ICMPv6 message option") + } + + // unit is 8 octets, convert to bytes + length := int(data[1]) * 8 + + if length == 0 { + df.SetTruncated() + return errors.New("ICMPv6 message option with length 0") + } + + if len(data) < length { + df.SetTruncated() + return fmt.Errorf("ICMP layer only %v bytes for ICMPv6 message option with length %v", len(data), length) + } + + o := ICMPv6Option{ + Type: ICMPv6Opt(data[0]), + Data: data[2:length], + } + + // chop off option we just consumed + data = data[length:] + + *i = append(*i, o) + } + + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (i *ICMPv6Options) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + for _, opt := range []ICMPv6Option(*i) { + length := len(opt.Data) + 2 + buf, err := b.PrependBytes(length) + if err != nil { + return err + } + + buf[0] = byte(opt.Type) + buf[1] = byte(length / 8) + copy(buf[2:], opt.Data) + } + + return nil +} + +func decodeICMPv6Echo(data []byte, p gopacket.PacketBuilder) error { + i := &ICMPv6Echo{} + return decodingLayerDecoder(i, data, p) +} + +func decodeICMPv6RouterSolicitation(data []byte, p gopacket.PacketBuilder) error { + i := &ICMPv6RouterSolicitation{} + return decodingLayerDecoder(i, data, p) +} + +func decodeICMPv6RouterAdvertisement(data []byte, p gopacket.PacketBuilder) error { + i := &ICMPv6RouterAdvertisement{} + return decodingLayerDecoder(i, data, p) +} + +func decodeICMPv6NeighborSolicitation(data []byte, p gopacket.PacketBuilder) error { + i := &ICMPv6NeighborSolicitation{} + return decodingLayerDecoder(i, data, p) +} + +func decodeICMPv6NeighborAdvertisement(data []byte, p gopacket.PacketBuilder) error { + i := &ICMPv6NeighborAdvertisement{} + return decodingLayerDecoder(i, data, p) +} + +func decodeICMPv6Redirect(data []byte, p gopacket.PacketBuilder) error { + i := &ICMPv6Redirect{} + return decodingLayerDecoder(i, data, p) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/igmp.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/igmp.go new file mode 100644 index 00000000..d0084153 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/igmp.go @@ -0,0 +1,355 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "net" + "time" + + "github.com/google/gopacket" +) + +type IGMPType uint8 + +const ( + IGMPMembershipQuery IGMPType = 0x11 // General or group specific query + IGMPMembershipReportV1 IGMPType = 0x12 // Version 1 Membership Report + IGMPMembershipReportV2 IGMPType = 0x16 // Version 2 Membership Report + IGMPLeaveGroup IGMPType = 0x17 // Leave Group + IGMPMembershipReportV3 IGMPType = 0x22 // Version 3 Membership Report +) + +// String conversions for IGMP message types +func (i IGMPType) String() string { + switch i { + case IGMPMembershipQuery: + return "IGMP Membership Query" + case IGMPMembershipReportV1: + return "IGMPv1 Membership Report" + case IGMPMembershipReportV2: + return "IGMPv2 Membership Report" + case IGMPMembershipReportV3: + return "IGMPv3 Membership Report" + case IGMPLeaveGroup: + return "Leave Group" + default: + return "" + } +} + +type IGMPv3GroupRecordType uint8 + +const ( + IGMPIsIn IGMPv3GroupRecordType = 0x01 // Type MODE_IS_INCLUDE, source addresses x + IGMPIsEx IGMPv3GroupRecordType = 0x02 // Type MODE_IS_EXCLUDE, source addresses x + IGMPToIn IGMPv3GroupRecordType = 0x03 // Type CHANGE_TO_INCLUDE_MODE, source addresses x + IGMPToEx IGMPv3GroupRecordType = 0x04 // Type CHANGE_TO_EXCLUDE_MODE, source addresses x + IGMPAllow IGMPv3GroupRecordType = 0x05 // Type ALLOW_NEW_SOURCES, source addresses x + IGMPBlock IGMPv3GroupRecordType = 0x06 // Type BLOCK_OLD_SOURCES, source addresses x +) + +func (i IGMPv3GroupRecordType) String() string { + switch i { + case IGMPIsIn: + return "MODE_IS_INCLUDE" + case IGMPIsEx: + return "MODE_IS_EXCLUDE" + case IGMPToIn: + return "CHANGE_TO_INCLUDE_MODE" + case IGMPToEx: + return "CHANGE_TO_EXCLUDE_MODE" + case IGMPAllow: + return "ALLOW_NEW_SOURCES" + case IGMPBlock: + return "BLOCK_OLD_SOURCES" + default: + return "" + } +} + +// IGMP represents an IGMPv3 message. +type IGMP struct { + BaseLayer + Type IGMPType + MaxResponseTime time.Duration + Checksum uint16 + GroupAddress net.IP + SupressRouterProcessing bool + RobustnessValue uint8 + IntervalTime time.Duration + SourceAddresses []net.IP + NumberOfGroupRecords uint16 + NumberOfSources uint16 + GroupRecords []IGMPv3GroupRecord + Version uint8 // IGMP protocol version +} + +// IGMPv1or2 stores header details for an IGMPv1 or IGMPv2 packet. +// +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type | Max Resp Time | Checksum | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Group Address | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +type IGMPv1or2 struct { + BaseLayer + Type IGMPType // IGMP message type + MaxResponseTime time.Duration // meaningful only in Membership Query messages + Checksum uint16 // 16-bit checksum of entire ip payload + GroupAddress net.IP // either 0 or an IP multicast address + Version uint8 +} + +// decodeResponse dissects IGMPv1 or IGMPv2 packet. +func (i *IGMPv1or2) decodeResponse(data []byte) error { + if len(data) < 8 { + return errors.New("IGMP packet too small") + } + + i.MaxResponseTime = igmpTimeDecode(data[1]) + i.Checksum = binary.BigEndian.Uint16(data[2:4]) + i.GroupAddress = net.IP(data[4:8]) + + return nil +} + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 0x22 | Reserved | Checksum | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Reserved | Number of Group Records (M) | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . Group Record [1] . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . Group Record [2] . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . Group Record [M] . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Record Type | Aux Data Len | Number of Sources (N) | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Multicast Address | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Source Address [1] | +// +- -+ +// | Source Address [2] | +// +- -+ +// | Source Address [N] | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . Auxiliary Data . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +// IGMPv3GroupRecord stores individual group records for a V3 Membership Report message. +type IGMPv3GroupRecord struct { + Type IGMPv3GroupRecordType + AuxDataLen uint8 // this should always be 0 as per IGMPv3 spec. + NumberOfSources uint16 + MulticastAddress net.IP + SourceAddresses []net.IP + AuxData uint32 // NOT USED +} + +func (i *IGMP) decodeIGMPv3MembershipReport(data []byte) error { + if len(data) < 8 { + return errors.New("IGMPv3 Membership Report too small #1") + } + + i.Checksum = binary.BigEndian.Uint16(data[2:4]) + i.NumberOfGroupRecords = binary.BigEndian.Uint16(data[6:8]) + + recordOffset := 8 + for j := 0; j < int(i.NumberOfGroupRecords); j++ { + if len(data) < recordOffset+8 { + return errors.New("IGMPv3 Membership Report too small #2") + } + + var gr IGMPv3GroupRecord + gr.Type = IGMPv3GroupRecordType(data[recordOffset]) + gr.AuxDataLen = data[recordOffset+1] + gr.NumberOfSources = binary.BigEndian.Uint16(data[recordOffset+2 : recordOffset+4]) + gr.MulticastAddress = net.IP(data[recordOffset+4 : recordOffset+8]) + + if len(data) < recordOffset+8+int(gr.NumberOfSources)*4 { + return errors.New("IGMPv3 Membership Report too small #3") + } + + // append source address records. + for i := 0; i < int(gr.NumberOfSources); i++ { + sourceAddr := net.IP(data[recordOffset+8+i*4 : recordOffset+12+i*4]) + gr.SourceAddresses = append(gr.SourceAddresses, sourceAddr) + } + + i.GroupRecords = append(i.GroupRecords, gr) + recordOffset += 8 + 4*int(gr.NumberOfSources) + } + return nil +} + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Type = 0x11 | Max Resp Code | Checksum | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Group Address | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Resv |S| QRV | QQIC | Number of Sources (N) | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Source Address [1] | +// +- -+ +// | Source Address [2] | +// +- . -+ +// | Source Address [N] | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// +// decodeIGMPv3MembershipQuery parses the IGMPv3 message of type 0x11 +func (i *IGMP) decodeIGMPv3MembershipQuery(data []byte) error { + if len(data) < 12 { + return errors.New("IGMPv3 Membership Query too small #1") + } + + i.MaxResponseTime = igmpTimeDecode(data[1]) + i.Checksum = binary.BigEndian.Uint16(data[2:4]) + i.SupressRouterProcessing = data[8]&0x8 != 0 + i.GroupAddress = net.IP(data[4:8]) + i.RobustnessValue = data[8] & 0x7 + i.IntervalTime = igmpTimeDecode(data[9]) + i.NumberOfSources = binary.BigEndian.Uint16(data[10:12]) + + if len(data) < 12+int(i.NumberOfSources)*4 { + return errors.New("IGMPv3 Membership Query too small #2") + } + + for j := 0; j < int(i.NumberOfSources); j++ { + i.SourceAddresses = append(i.SourceAddresses, net.IP(data[12+j*4:16+j*4])) + } + + return nil +} + +// igmpTimeDecode decodes the duration created by the given byte, using the +// algorithm in http://www.rfc-base.org/txt/rfc-3376.txt section 4.1.1. +func igmpTimeDecode(t uint8) time.Duration { + if t&0x80 == 0 { + return time.Millisecond * 100 * time.Duration(t) + } + mant := (t & 0x70) >> 4 + exp := t & 0x0F + return time.Millisecond * 100 * time.Duration((mant|0x10)<<(exp+3)) +} + +// LayerType returns LayerTypeIGMP for the V1,2,3 message protocol formats. +func (i *IGMP) LayerType() gopacket.LayerType { return LayerTypeIGMP } +func (i *IGMPv1or2) LayerType() gopacket.LayerType { return LayerTypeIGMP } + +func (i *IGMPv1or2) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 8 { + return errors.New("IGMP Packet too small") + } + + i.Type = IGMPType(data[0]) + i.MaxResponseTime = igmpTimeDecode(data[1]) + i.Checksum = binary.BigEndian.Uint16(data[2:4]) + i.GroupAddress = net.IP(data[4:8]) + + return nil +} + +func (i *IGMPv1or2) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypeZero +} + +func (i *IGMPv1or2) CanDecode() gopacket.LayerClass { + return LayerTypeIGMP +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (i *IGMP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 1 { + return errors.New("IGMP packet is too small") + } + + // common IGMP header values between versions 1..3 of IGMP specification.. + i.Type = IGMPType(data[0]) + + switch i.Type { + case IGMPMembershipQuery: + i.decodeIGMPv3MembershipQuery(data) + case IGMPMembershipReportV3: + i.decodeIGMPv3MembershipReport(data) + default: + return errors.New("unsupported IGMP type") + } + + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (i *IGMP) CanDecode() gopacket.LayerClass { + return LayerTypeIGMP +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (i *IGMP) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypeZero +} + +// decodeIGMP will parse IGMP v1,2 or 3 protocols. Checks against the +// IGMP type are performed against byte[0], logic then iniitalizes and +// passes the appropriate struct (IGMP or IGMPv1or2) to +// decodingLayerDecoder. +func decodeIGMP(data []byte, p gopacket.PacketBuilder) error { + if len(data) < 1 { + return errors.New("IGMP packet is too small") + } + + // byte 0 contains IGMP message type. + switch IGMPType(data[0]) { + case IGMPMembershipQuery: + // IGMPv3 Membership Query payload is >= 12 + if len(data) >= 12 { + i := &IGMP{Version: 3} + return decodingLayerDecoder(i, data, p) + } else if len(data) == 8 { + i := &IGMPv1or2{} + if data[1] == 0x00 { + i.Version = 1 // IGMPv1 has a query length of 8 and MaxResp = 0 + } else { + i.Version = 2 // IGMPv2 has a query length of 8 and MaxResp != 0 + } + + return decodingLayerDecoder(i, data, p) + } + case IGMPMembershipReportV3: + i := &IGMP{Version: 3} + return decodingLayerDecoder(i, data, p) + case IGMPMembershipReportV1: + i := &IGMPv1or2{Version: 1} + return decodingLayerDecoder(i, data, p) + case IGMPLeaveGroup, IGMPMembershipReportV2: + // leave group and Query Report v2 used in IGMPv2 only. + i := &IGMPv1or2{Version: 2} + return decodingLayerDecoder(i, data, p) + default: + } + + return errors.New("Unable to determine IGMP type.") +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ip4.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ip4.go new file mode 100644 index 00000000..2b3c0c6b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ip4.go @@ -0,0 +1,325 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "net" + "strings" + + "github.com/google/gopacket" +) + +type IPv4Flag uint8 + +const ( + IPv4EvilBit IPv4Flag = 1 << 2 // http://tools.ietf.org/html/rfc3514 ;) + IPv4DontFragment IPv4Flag = 1 << 1 + IPv4MoreFragments IPv4Flag = 1 << 0 +) + +func (f IPv4Flag) String() string { + var s []string + if f&IPv4EvilBit != 0 { + s = append(s, "Evil") + } + if f&IPv4DontFragment != 0 { + s = append(s, "DF") + } + if f&IPv4MoreFragments != 0 { + s = append(s, "MF") + } + return strings.Join(s, "|") +} + +// IPv4 is the header of an IP packet. +type IPv4 struct { + BaseLayer + Version uint8 + IHL uint8 + TOS uint8 + Length uint16 + Id uint16 + Flags IPv4Flag + FragOffset uint16 + TTL uint8 + Protocol IPProtocol + Checksum uint16 + SrcIP net.IP + DstIP net.IP + Options []IPv4Option + Padding []byte +} + +// LayerType returns LayerTypeIPv4 +func (i *IPv4) LayerType() gopacket.LayerType { return LayerTypeIPv4 } +func (i *IPv4) NetworkFlow() gopacket.Flow { + return gopacket.NewFlow(EndpointIPv4, i.SrcIP, i.DstIP) +} + +type IPv4Option struct { + OptionType uint8 + OptionLength uint8 + OptionData []byte +} + +func (i IPv4Option) String() string { + return fmt.Sprintf("IPv4Option(%v:%v)", i.OptionType, i.OptionData) +} + +// for the current ipv4 options, return the number of bytes (including +// padding that the options used) +func (ip *IPv4) getIPv4OptionSize() uint8 { + optionSize := uint8(0) + for _, opt := range ip.Options { + switch opt.OptionType { + case 0: + // this is the end of option lists + optionSize++ + case 1: + // this is the padding + optionSize++ + default: + optionSize += opt.OptionLength + + } + } + // make sure the options are aligned to 32 bit boundary + if (optionSize % 4) != 0 { + optionSize += 4 - (optionSize % 4) + } + return optionSize +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +func (ip *IPv4) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + optionLength := ip.getIPv4OptionSize() + bytes, err := b.PrependBytes(20 + int(optionLength)) + if err != nil { + return err + } + if opts.FixLengths { + ip.IHL = 5 + (optionLength / 4) + ip.Length = uint16(len(b.Bytes())) + } + bytes[0] = (ip.Version << 4) | ip.IHL + bytes[1] = ip.TOS + binary.BigEndian.PutUint16(bytes[2:], ip.Length) + binary.BigEndian.PutUint16(bytes[4:], ip.Id) + binary.BigEndian.PutUint16(bytes[6:], ip.flagsfrags()) + bytes[8] = ip.TTL + bytes[9] = byte(ip.Protocol) + if err := ip.AddressTo4(); err != nil { + return err + } + copy(bytes[12:16], ip.SrcIP) + copy(bytes[16:20], ip.DstIP) + + curLocation := 20 + // Now, we will encode the options + for _, opt := range ip.Options { + switch opt.OptionType { + case 0: + // this is the end of option lists + bytes[curLocation] = 0 + curLocation++ + case 1: + // this is the padding + bytes[curLocation] = 1 + curLocation++ + default: + bytes[curLocation] = opt.OptionType + bytes[curLocation+1] = opt.OptionLength + + // sanity checking to protect us from buffer overrun + if len(opt.OptionData) > int(opt.OptionLength-2) { + return errors.New("option length is smaller than length of option data") + } + copy(bytes[curLocation+2:curLocation+int(opt.OptionLength)], opt.OptionData) + curLocation += int(opt.OptionLength) + } + } + + if opts.ComputeChecksums { + ip.Checksum = checksum(bytes) + } + binary.BigEndian.PutUint16(bytes[10:], ip.Checksum) + return nil +} + +func checksum(bytes []byte) uint16 { + // Clear checksum bytes + bytes[10] = 0 + bytes[11] = 0 + + // Compute checksum + var csum uint32 + for i := 0; i < len(bytes); i += 2 { + csum += uint32(bytes[i]) << 8 + csum += uint32(bytes[i+1]) + } + for { + // Break when sum is less or equals to 0xFFFF + if csum <= 65535 { + break + } + // Add carry to the sum + csum = (csum >> 16) + uint32(uint16(csum)) + } + // Flip all the bits + return ^uint16(csum) +} + +func (ip *IPv4) flagsfrags() (ff uint16) { + ff |= uint16(ip.Flags) << 13 + ff |= ip.FragOffset + return +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (ip *IPv4) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 20 { + df.SetTruncated() + return fmt.Errorf("Invalid ip4 header. Length %d less than 20", len(data)) + } + flagsfrags := binary.BigEndian.Uint16(data[6:8]) + + ip.Version = uint8(data[0]) >> 4 + ip.IHL = uint8(data[0]) & 0x0F + ip.TOS = data[1] + ip.Length = binary.BigEndian.Uint16(data[2:4]) + ip.Id = binary.BigEndian.Uint16(data[4:6]) + ip.Flags = IPv4Flag(flagsfrags >> 13) + ip.FragOffset = flagsfrags & 0x1FFF + ip.TTL = data[8] + ip.Protocol = IPProtocol(data[9]) + ip.Checksum = binary.BigEndian.Uint16(data[10:12]) + ip.SrcIP = data[12:16] + ip.DstIP = data[16:20] + ip.Options = ip.Options[:0] + ip.Padding = nil + // Set up an initial guess for contents/payload... we'll reset these soon. + ip.BaseLayer = BaseLayer{Contents: data} + + // This code is added for the following enviroment: + // * Windows 10 with TSO option activated. ( tested on Hyper-V, RealTek ethernet driver ) + if ip.Length == 0 { + // If using TSO(TCP Segmentation Offload), length is zero. + // The actual packet length is the length of data. + ip.Length = uint16(len(data)) + } + + if ip.Length < 20 { + return fmt.Errorf("Invalid (too small) IP length (%d < 20)", ip.Length) + } else if ip.IHL < 5 { + return fmt.Errorf("Invalid (too small) IP header length (%d < 5)", ip.IHL) + } else if int(ip.IHL*4) > int(ip.Length) { + return fmt.Errorf("Invalid IP header length > IP length (%d > %d)", ip.IHL, ip.Length) + } + if cmp := len(data) - int(ip.Length); cmp > 0 { + data = data[:ip.Length] + } else if cmp < 0 { + df.SetTruncated() + if int(ip.IHL)*4 > len(data) { + return errors.New("Not all IP header bytes available") + } + } + ip.Contents = data[:ip.IHL*4] + ip.Payload = data[ip.IHL*4:] + // From here on, data contains the header options. + data = data[20 : ip.IHL*4] + // Pull out IP options + for len(data) > 0 { + if ip.Options == nil { + // Pre-allocate to avoid growing the slice too much. + ip.Options = make([]IPv4Option, 0, 4) + } + opt := IPv4Option{OptionType: data[0]} + switch opt.OptionType { + case 0: // End of options + opt.OptionLength = 1 + ip.Options = append(ip.Options, opt) + ip.Padding = data[1:] + return nil + case 1: // 1 byte padding + opt.OptionLength = 1 + data = data[1:] + ip.Options = append(ip.Options, opt) + default: + if len(data) < 2 { + df.SetTruncated() + return fmt.Errorf("Invalid ip4 option length. Length %d less than 2", len(data)) + } + opt.OptionLength = data[1] + if len(data) < int(opt.OptionLength) { + df.SetTruncated() + return fmt.Errorf("IP option length exceeds remaining IP header size, option type %v length %v", opt.OptionType, opt.OptionLength) + } + if opt.OptionLength <= 2 { + return fmt.Errorf("Invalid IP option type %v length %d. Must be greater than 2", opt.OptionType, opt.OptionLength) + } + opt.OptionData = data[2:opt.OptionLength] + data = data[opt.OptionLength:] + ip.Options = append(ip.Options, opt) + } + } + return nil +} + +func (i *IPv4) CanDecode() gopacket.LayerClass { + return LayerTypeIPv4 +} + +func (i *IPv4) NextLayerType() gopacket.LayerType { + if i.Flags&IPv4MoreFragments != 0 || i.FragOffset != 0 { + return gopacket.LayerTypeFragment + } + return i.Protocol.LayerType() +} + +func decodeIPv4(data []byte, p gopacket.PacketBuilder) error { + ip := &IPv4{} + err := ip.DecodeFromBytes(data, p) + p.AddLayer(ip) + p.SetNetworkLayer(ip) + if err != nil { + return err + } + return p.NextDecoder(ip.NextLayerType()) +} + +func checkIPv4Address(addr net.IP) (net.IP, error) { + if c := addr.To4(); c != nil { + return c, nil + } + if len(addr) == net.IPv6len { + return nil, errors.New("address is IPv6") + } + return nil, fmt.Errorf("wrong length of %d bytes instead of %d", len(addr), net.IPv4len) +} + +func (ip *IPv4) AddressTo4() error { + var src, dst net.IP + + if addr, err := checkIPv4Address(ip.SrcIP); err != nil { + return fmt.Errorf("Invalid source IPv4 address (%s)", err) + } else { + src = addr + } + if addr, err := checkIPv4Address(ip.DstIP); err != nil { + return fmt.Errorf("Invalid destination IPv4 address (%s)", err) + } else { + dst = addr + } + ip.SrcIP = src + ip.DstIP = dst + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ip6.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ip6.go new file mode 100644 index 00000000..70e9c8d5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ip6.go @@ -0,0 +1,707 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "net" + + "github.com/google/gopacket" +) + +const ( + // IPv6HopByHopOptionJumbogram code as defined in RFC 2675 + IPv6HopByHopOptionJumbogram = 0xC2 +) + +const ( + ipv6MaxPayloadLength = 65535 +) + +// IPv6 is the layer for the IPv6 header. +type IPv6 struct { + // http://www.networksorcery.com/enp/protocol/ipv6.htm + BaseLayer + Version uint8 + TrafficClass uint8 + FlowLabel uint32 + Length uint16 + NextHeader IPProtocol + HopLimit uint8 + SrcIP net.IP + DstIP net.IP + HopByHop *IPv6HopByHop + // hbh will be pointed to by HopByHop if that layer exists. + hbh IPv6HopByHop +} + +// LayerType returns LayerTypeIPv6 +func (ipv6 *IPv6) LayerType() gopacket.LayerType { return LayerTypeIPv6 } + +// NetworkFlow returns this new Flow (EndpointIPv6, SrcIP, DstIP) +func (ipv6 *IPv6) NetworkFlow() gopacket.Flow { + return gopacket.NewFlow(EndpointIPv6, ipv6.SrcIP, ipv6.DstIP) +} + +// Search for Jumbo Payload TLV in IPv6HopByHop and return (length, true) if found +func getIPv6HopByHopJumboLength(hopopts *IPv6HopByHop) (uint32, bool, error) { + var tlv *IPv6HopByHopOption + + for _, t := range hopopts.Options { + if t.OptionType == IPv6HopByHopOptionJumbogram { + tlv = t + break + } + } + if tlv == nil { + // Not found + return 0, false, nil + } + if len(tlv.OptionData) != 4 { + return 0, false, errors.New("Jumbo length TLV data must have length 4") + } + l := binary.BigEndian.Uint32(tlv.OptionData) + if l <= ipv6MaxPayloadLength { + return 0, false, fmt.Errorf("Jumbo length cannot be less than %d", ipv6MaxPayloadLength+1) + } + // Found + return l, true, nil +} + +// Adds zero-valued Jumbo TLV to IPv6 header if it does not exist +// (if necessary add hop-by-hop header) +func addIPv6JumboOption(ip6 *IPv6) { + var tlv *IPv6HopByHopOption + + if ip6.HopByHop == nil { + // Add IPv6 HopByHop + ip6.HopByHop = &IPv6HopByHop{} + ip6.HopByHop.NextHeader = ip6.NextHeader + ip6.HopByHop.HeaderLength = 0 + ip6.NextHeader = IPProtocolIPv6HopByHop + } + for _, t := range ip6.HopByHop.Options { + if t.OptionType == IPv6HopByHopOptionJumbogram { + tlv = t + break + } + } + if tlv == nil { + // Add Jumbo TLV + tlv = &IPv6HopByHopOption{} + ip6.HopByHop.Options = append(ip6.HopByHop.Options, tlv) + } + tlv.SetJumboLength(0) +} + +// Set jumbo length in serialized IPv6 payload (starting with HopByHop header) +func setIPv6PayloadJumboLength(hbh []byte) error { + pLen := len(hbh) + if pLen < 8 { + //HopByHop is minimum 8 bytes + return fmt.Errorf("Invalid IPv6 payload (length %d)", pLen) + } + hbhLen := int((hbh[1] + 1) * 8) + if hbhLen > pLen { + return fmt.Errorf("Invalid hop-by-hop length (length: %d, payload: %d", hbhLen, pLen) + } + offset := 2 //start with options + for offset < hbhLen { + opt := hbh[offset] + if opt == 0 { + //Pad1 + offset++ + continue + } + optLen := int(hbh[offset+1]) + if opt == IPv6HopByHopOptionJumbogram { + if optLen == 4 { + binary.BigEndian.PutUint32(hbh[offset+2:], uint32(pLen)) + return nil + } + return fmt.Errorf("Jumbo TLV too short (%d bytes)", optLen) + } + offset += 2 + optLen + } + return errors.New("Jumbo TLV not found") +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (ipv6 *IPv6) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + var jumbo bool + var err error + + payload := b.Bytes() + pLen := len(payload) + if pLen > ipv6MaxPayloadLength { + jumbo = true + if opts.FixLengths { + // We need to set the length later because the hop-by-hop header may + // not exist or else need padding, so pLen may yet change + addIPv6JumboOption(ipv6) + } else if ipv6.HopByHop == nil { + return fmt.Errorf("Cannot fit payload length of %d into IPv6 packet", pLen) + } else { + _, ok, err := getIPv6HopByHopJumboLength(ipv6.HopByHop) + if err != nil { + return err + } + if !ok { + return errors.New("Missing jumbo length hop-by-hop option") + } + } + } + + hbhAlreadySerialized := false + if ipv6.HopByHop != nil { + for _, l := range b.Layers() { + if l == LayerTypeIPv6HopByHop { + hbhAlreadySerialized = true + break + } + } + } + if ipv6.HopByHop != nil && !hbhAlreadySerialized { + if ipv6.NextHeader != IPProtocolIPv6HopByHop { + // Just fix it instead of throwing an error + ipv6.NextHeader = IPProtocolIPv6HopByHop + } + err = ipv6.HopByHop.SerializeTo(b, opts) + if err != nil { + return err + } + payload = b.Bytes() + pLen = len(payload) + if opts.FixLengths && jumbo { + err := setIPv6PayloadJumboLength(payload) + if err != nil { + return err + } + } + } + + if !jumbo && pLen > ipv6MaxPayloadLength { + return errors.New("Cannot fit payload into IPv6 header") + } + bytes, err := b.PrependBytes(40) + if err != nil { + return err + } + bytes[0] = (ipv6.Version << 4) | (ipv6.TrafficClass >> 4) + bytes[1] = (ipv6.TrafficClass << 4) | uint8(ipv6.FlowLabel>>16) + binary.BigEndian.PutUint16(bytes[2:], uint16(ipv6.FlowLabel)) + if opts.FixLengths { + if jumbo { + ipv6.Length = 0 + } else { + ipv6.Length = uint16(pLen) + } + } + binary.BigEndian.PutUint16(bytes[4:], ipv6.Length) + bytes[6] = byte(ipv6.NextHeader) + bytes[7] = byte(ipv6.HopLimit) + if err := ipv6.AddressTo16(); err != nil { + return err + } + copy(bytes[8:], ipv6.SrcIP) + copy(bytes[24:], ipv6.DstIP) + return nil +} + +// DecodeFromBytes implementation according to gopacket.DecodingLayer +func (ipv6 *IPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 40 { + df.SetTruncated() + return fmt.Errorf("Invalid ip6 header. Length %d less than 40", len(data)) + } + ipv6.Version = uint8(data[0]) >> 4 + ipv6.TrafficClass = uint8((binary.BigEndian.Uint16(data[0:2]) >> 4) & 0x00FF) + ipv6.FlowLabel = binary.BigEndian.Uint32(data[0:4]) & 0x000FFFFF + ipv6.Length = binary.BigEndian.Uint16(data[4:6]) + ipv6.NextHeader = IPProtocol(data[6]) + ipv6.HopLimit = data[7] + ipv6.SrcIP = data[8:24] + ipv6.DstIP = data[24:40] + ipv6.HopByHop = nil + ipv6.BaseLayer = BaseLayer{data[:40], data[40:]} + + // We treat a HopByHop IPv6 option as part of the IPv6 packet, since its + // options are crucial for understanding what's actually happening per packet. + if ipv6.NextHeader == IPProtocolIPv6HopByHop { + err := ipv6.hbh.DecodeFromBytes(ipv6.Payload, df) + if err != nil { + return err + } + ipv6.HopByHop = &ipv6.hbh + pEnd, jumbo, err := getIPv6HopByHopJumboLength(ipv6.HopByHop) + if err != nil { + return err + } + if jumbo && ipv6.Length == 0 { + pEnd := int(pEnd) + if pEnd > len(ipv6.Payload) { + df.SetTruncated() + pEnd = len(ipv6.Payload) + } + ipv6.Payload = ipv6.Payload[:pEnd] + return nil + } else if jumbo && ipv6.Length != 0 { + return errors.New("IPv6 has jumbo length and IPv6 length is not 0") + } else if !jumbo && ipv6.Length == 0 { + return errors.New("IPv6 length 0, but HopByHop header does not have jumbogram option") + } else { + ipv6.Payload = ipv6.Payload[ipv6.hbh.ActualLength:] + } + } + + if ipv6.Length == 0 { + return fmt.Errorf("IPv6 length 0, but next header is %v, not HopByHop", ipv6.NextHeader) + } + + pEnd := int(ipv6.Length) + if pEnd > len(ipv6.Payload) { + df.SetTruncated() + pEnd = len(ipv6.Payload) + } + ipv6.Payload = ipv6.Payload[:pEnd] + + return nil +} + +// CanDecode implementation according to gopacket.DecodingLayer +func (ipv6 *IPv6) CanDecode() gopacket.LayerClass { + return LayerTypeIPv6 +} + +// NextLayerType implementation according to gopacket.DecodingLayer +func (ipv6 *IPv6) NextLayerType() gopacket.LayerType { + if ipv6.HopByHop != nil { + return ipv6.HopByHop.NextHeader.LayerType() + } + return ipv6.NextHeader.LayerType() +} + +func decodeIPv6(data []byte, p gopacket.PacketBuilder) error { + ip6 := &IPv6{} + err := ip6.DecodeFromBytes(data, p) + p.AddLayer(ip6) + p.SetNetworkLayer(ip6) + if ip6.HopByHop != nil { + p.AddLayer(ip6.HopByHop) + } + if err != nil { + return err + } + return p.NextDecoder(ip6.NextLayerType()) +} + +type ipv6HeaderTLVOption struct { + OptionType, OptionLength uint8 + ActualLength int + OptionData []byte + OptionAlignment [2]uint8 // Xn+Y = [2]uint8{X, Y} +} + +func (h *ipv6HeaderTLVOption) serializeTo(data []byte, fixLengths bool, dryrun bool) int { + if fixLengths { + h.OptionLength = uint8(len(h.OptionData)) + } + length := int(h.OptionLength) + 2 + if !dryrun { + data[0] = h.OptionType + data[1] = h.OptionLength + copy(data[2:], h.OptionData) + } + return length +} + +func decodeIPv6HeaderTLVOption(data []byte) (h *ipv6HeaderTLVOption) { + h = &ipv6HeaderTLVOption{} + if data[0] == 0 { + h.ActualLength = 1 + return + } + h.OptionType = data[0] + h.OptionLength = data[1] + h.ActualLength = int(h.OptionLength) + 2 + h.OptionData = data[2:h.ActualLength] + return +} + +func serializeTLVOptionPadding(data []byte, padLength int) { + if padLength <= 0 { + return + } + if padLength == 1 { + data[0] = 0x0 + return + } + tlvLength := uint8(padLength) - 2 + data[0] = 0x1 + data[1] = tlvLength + if tlvLength != 0 { + for k := range data[2:] { + data[k+2] = 0x0 + } + } + return +} + +// If buf is 'nil' do a serialize dry run +func serializeIPv6HeaderTLVOptions(buf []byte, options []*ipv6HeaderTLVOption, fixLengths bool) int { + var l int + + dryrun := buf == nil + length := 2 + for _, opt := range options { + if fixLengths { + x := int(opt.OptionAlignment[0]) + y := int(opt.OptionAlignment[1]) + if x != 0 { + n := length / x + offset := x*n + y + if offset < length { + offset += x + } + if length != offset { + pad := offset - length + if !dryrun { + serializeTLVOptionPadding(buf[length-2:], pad) + } + length += pad + } + } + } + if dryrun { + l = opt.serializeTo(nil, fixLengths, true) + } else { + l = opt.serializeTo(buf[length-2:], fixLengths, false) + } + length += l + } + if fixLengths { + pad := length % 8 + if pad != 0 { + if !dryrun { + serializeTLVOptionPadding(buf[length-2:], pad) + } + length += pad + } + } + return length - 2 +} + +type ipv6ExtensionBase struct { + BaseLayer + NextHeader IPProtocol + HeaderLength uint8 + ActualLength int +} + +func decodeIPv6ExtensionBase(data []byte, df gopacket.DecodeFeedback) (i ipv6ExtensionBase, returnedErr error) { + if len(data) < 2 { + df.SetTruncated() + return ipv6ExtensionBase{}, fmt.Errorf("Invalid ip6-extension header. Length %d less than 2", len(data)) + } + i.NextHeader = IPProtocol(data[0]) + i.HeaderLength = data[1] + i.ActualLength = int(i.HeaderLength)*8 + 8 + if len(data) < i.ActualLength { + return ipv6ExtensionBase{}, fmt.Errorf("Invalid ip6-extension header. Length %d less than specified length %d", len(data), i.ActualLength) + } + i.Contents = data[:i.ActualLength] + i.Payload = data[i.ActualLength:] + return +} + +// IPv6ExtensionSkipper is a DecodingLayer which decodes and ignores v6 +// extensions. You can use it with a DecodingLayerParser to handle IPv6 stacks +// which may or may not have extensions. +type IPv6ExtensionSkipper struct { + NextHeader IPProtocol + BaseLayer +} + +// DecodeFromBytes implementation according to gopacket.DecodingLayer +func (i *IPv6ExtensionSkipper) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + extension, err := decodeIPv6ExtensionBase(data, df) + if err != nil { + return err + } + i.BaseLayer = BaseLayer{data[:extension.ActualLength], data[extension.ActualLength:]} + i.NextHeader = extension.NextHeader + return nil +} + +// CanDecode implementation according to gopacket.DecodingLayer +func (i *IPv6ExtensionSkipper) CanDecode() gopacket.LayerClass { + return LayerClassIPv6Extension +} + +// NextLayerType implementation according to gopacket.DecodingLayer +func (i *IPv6ExtensionSkipper) NextLayerType() gopacket.LayerType { + return i.NextHeader.LayerType() +} + +// IPv6HopByHopOption is a TLV option present in an IPv6 hop-by-hop extension. +type IPv6HopByHopOption ipv6HeaderTLVOption + +// IPv6HopByHop is the IPv6 hop-by-hop extension. +type IPv6HopByHop struct { + ipv6ExtensionBase + Options []*IPv6HopByHopOption +} + +// LayerType returns LayerTypeIPv6HopByHop. +func (i *IPv6HopByHop) LayerType() gopacket.LayerType { return LayerTypeIPv6HopByHop } + +// SerializeTo implementation according to gopacket.SerializableLayer +func (i *IPv6HopByHop) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + var bytes []byte + var err error + + o := make([]*ipv6HeaderTLVOption, 0, len(i.Options)) + for _, v := range i.Options { + o = append(o, (*ipv6HeaderTLVOption)(v)) + } + + l := serializeIPv6HeaderTLVOptions(nil, o, opts.FixLengths) + bytes, err = b.PrependBytes(l) + if err != nil { + return err + } + serializeIPv6HeaderTLVOptions(bytes, o, opts.FixLengths) + + length := len(bytes) + 2 + if length%8 != 0 { + return errors.New("IPv6HopByHop actual length must be multiple of 8") + } + bytes, err = b.PrependBytes(2) + if err != nil { + return err + } + bytes[0] = uint8(i.NextHeader) + if opts.FixLengths { + i.HeaderLength = uint8((length / 8) - 1) + } + bytes[1] = uint8(i.HeaderLength) + return nil +} + +// DecodeFromBytes implementation according to gopacket.DecodingLayer +func (i *IPv6HopByHop) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + var err error + i.ipv6ExtensionBase, err = decodeIPv6ExtensionBase(data, df) + if err != nil { + return err + } + offset := 2 + for offset < i.ActualLength { + opt := decodeIPv6HeaderTLVOption(data[offset:]) + i.Options = append(i.Options, (*IPv6HopByHopOption)(opt)) + offset += opt.ActualLength + } + return nil +} + +func decodeIPv6HopByHop(data []byte, p gopacket.PacketBuilder) error { + i := &IPv6HopByHop{} + err := i.DecodeFromBytes(data, p) + p.AddLayer(i) + if err != nil { + return err + } + return p.NextDecoder(i.NextHeader) +} + +// SetJumboLength adds the IPv6HopByHopOptionJumbogram with the given length +func (o *IPv6HopByHopOption) SetJumboLength(len uint32) { + o.OptionType = IPv6HopByHopOptionJumbogram + o.OptionLength = 4 + o.ActualLength = 6 + if o.OptionData == nil { + o.OptionData = make([]byte, 4) + } + binary.BigEndian.PutUint32(o.OptionData, len) + o.OptionAlignment = [2]uint8{4, 2} +} + +// IPv6Routing is the IPv6 routing extension. +type IPv6Routing struct { + ipv6ExtensionBase + RoutingType uint8 + SegmentsLeft uint8 + // This segment is supposed to be zero according to RFC2460, the second set of + // 4 bytes in the extension. + Reserved []byte + // SourceRoutingIPs is the set of IPv6 addresses requested for source routing, + // set only if RoutingType == 0. + SourceRoutingIPs []net.IP +} + +// LayerType returns LayerTypeIPv6Routing. +func (i *IPv6Routing) LayerType() gopacket.LayerType { return LayerTypeIPv6Routing } + +func decodeIPv6Routing(data []byte, p gopacket.PacketBuilder) error { + base, err := decodeIPv6ExtensionBase(data, p) + if err != nil { + return err + } + i := &IPv6Routing{ + ipv6ExtensionBase: base, + RoutingType: data[2], + SegmentsLeft: data[3], + Reserved: data[4:8], + } + switch i.RoutingType { + case 0: // Source routing + if (i.ActualLength-8)%16 != 0 { + return fmt.Errorf("Invalid IPv6 source routing, length of type 0 packet %d", i.ActualLength) + } + for d := i.Contents[8:]; len(d) >= 16; d = d[16:] { + i.SourceRoutingIPs = append(i.SourceRoutingIPs, net.IP(d[:16])) + } + default: + return fmt.Errorf("Unknown IPv6 routing header type %d", i.RoutingType) + } + p.AddLayer(i) + return p.NextDecoder(i.NextHeader) +} + +// IPv6Fragment is the IPv6 fragment header, used for packet +// fragmentation/defragmentation. +type IPv6Fragment struct { + BaseLayer + NextHeader IPProtocol + // Reserved1 is bits [8-16), from least to most significant, 0-indexed + Reserved1 uint8 + FragmentOffset uint16 + // Reserved2 is bits [29-31), from least to most significant, 0-indexed + Reserved2 uint8 + MoreFragments bool + Identification uint32 +} + +// LayerType returns LayerTypeIPv6Fragment. +func (i *IPv6Fragment) LayerType() gopacket.LayerType { return LayerTypeIPv6Fragment } + +func decodeIPv6Fragment(data []byte, p gopacket.PacketBuilder) error { + if len(data) < 8 { + p.SetTruncated() + return fmt.Errorf("Invalid ip6-fragment header. Length %d less than 8", len(data)) + } + i := &IPv6Fragment{ + BaseLayer: BaseLayer{data[:8], data[8:]}, + NextHeader: IPProtocol(data[0]), + Reserved1: data[1], + FragmentOffset: binary.BigEndian.Uint16(data[2:4]) >> 3, + Reserved2: data[3] & 0x6 >> 1, + MoreFragments: data[3]&0x1 != 0, + Identification: binary.BigEndian.Uint32(data[4:8]), + } + p.AddLayer(i) + return p.NextDecoder(gopacket.DecodeFragment) +} + +// IPv6DestinationOption is a TLV option present in an IPv6 destination options extension. +type IPv6DestinationOption ipv6HeaderTLVOption + +// IPv6Destination is the IPv6 destination options header. +type IPv6Destination struct { + ipv6ExtensionBase + Options []*IPv6DestinationOption +} + +// LayerType returns LayerTypeIPv6Destination. +func (i *IPv6Destination) LayerType() gopacket.LayerType { return LayerTypeIPv6Destination } + +// DecodeFromBytes implementation according to gopacket.DecodingLayer +func (i *IPv6Destination) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + var err error + i.ipv6ExtensionBase, err = decodeIPv6ExtensionBase(data, df) + if err != nil { + return err + } + offset := 2 + for offset < i.ActualLength { + opt := decodeIPv6HeaderTLVOption(data[offset:]) + i.Options = append(i.Options, (*IPv6DestinationOption)(opt)) + offset += opt.ActualLength + } + return nil +} + +func decodeIPv6Destination(data []byte, p gopacket.PacketBuilder) error { + i := &IPv6Destination{} + err := i.DecodeFromBytes(data, p) + p.AddLayer(i) + if err != nil { + return err + } + return p.NextDecoder(i.NextHeader) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (i *IPv6Destination) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + var bytes []byte + var err error + + o := make([]*ipv6HeaderTLVOption, 0, len(i.Options)) + for _, v := range i.Options { + o = append(o, (*ipv6HeaderTLVOption)(v)) + } + + l := serializeIPv6HeaderTLVOptions(nil, o, opts.FixLengths) + bytes, err = b.PrependBytes(l) + if err != nil { + return err + } + serializeIPv6HeaderTLVOptions(bytes, o, opts.FixLengths) + + length := len(bytes) + 2 + if length%8 != 0 { + return errors.New("IPv6Destination actual length must be multiple of 8") + } + bytes, err = b.PrependBytes(2) + if err != nil { + return err + } + bytes[0] = uint8(i.NextHeader) + if opts.FixLengths { + i.HeaderLength = uint8((length / 8) - 1) + } + bytes[1] = uint8(i.HeaderLength) + return nil +} + +func checkIPv6Address(addr net.IP) error { + if len(addr) == net.IPv6len { + return nil + } + if len(addr) == net.IPv4len { + return errors.New("address is IPv4") + } + return fmt.Errorf("wrong length of %d bytes instead of %d", len(addr), net.IPv6len) +} + +// AddressTo16 ensures IPv6.SrcIP and IPv6.DstIP are actually IPv6 addresses (i.e. 16 byte addresses) +func (ipv6 *IPv6) AddressTo16() error { + if err := checkIPv6Address(ipv6.SrcIP); err != nil { + return fmt.Errorf("Invalid source IPv6 address (%s)", err) + } + if err := checkIPv6Address(ipv6.DstIP); err != nil { + return fmt.Errorf("Invalid destination IPv6 address (%s)", err) + } + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ipsec.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ipsec.go new file mode 100644 index 00000000..19163fa3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ipsec.go @@ -0,0 +1,68 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "github.com/google/gopacket" +) + +// IPSecAH is the authentication header for IPv4/6 defined in +// http://tools.ietf.org/html/rfc2402 +type IPSecAH struct { + // While the auth header can be used for both IPv4 and v6, its format is that of + // an IPv6 extension (NextHeader, PayloadLength, etc...), so we use ipv6ExtensionBase + // to build it. + ipv6ExtensionBase + Reserved uint16 + SPI, Seq uint32 + AuthenticationData []byte +} + +// LayerType returns LayerTypeIPSecAH. +func (i *IPSecAH) LayerType() gopacket.LayerType { return LayerTypeIPSecAH } + +func decodeIPSecAH(data []byte, p gopacket.PacketBuilder) error { + i := &IPSecAH{ + ipv6ExtensionBase: ipv6ExtensionBase{ + NextHeader: IPProtocol(data[0]), + HeaderLength: data[1], + }, + Reserved: binary.BigEndian.Uint16(data[2:4]), + SPI: binary.BigEndian.Uint32(data[4:8]), + Seq: binary.BigEndian.Uint32(data[8:12]), + } + i.ActualLength = (int(i.HeaderLength) + 2) * 4 + i.AuthenticationData = data[12:i.ActualLength] + i.Contents = data[:i.ActualLength] + i.Payload = data[i.ActualLength:] + p.AddLayer(i) + return p.NextDecoder(i.NextHeader) +} + +// IPSecESP is the encapsulating security payload defined in +// http://tools.ietf.org/html/rfc2406 +type IPSecESP struct { + BaseLayer + SPI, Seq uint32 + // Encrypted contains the encrypted set of bytes sent in an ESP + Encrypted []byte +} + +// LayerType returns LayerTypeIPSecESP. +func (i *IPSecESP) LayerType() gopacket.LayerType { return LayerTypeIPSecESP } + +func decodeIPSecESP(data []byte, p gopacket.PacketBuilder) error { + i := &IPSecESP{ + BaseLayer: BaseLayer{data, nil}, + SPI: binary.BigEndian.Uint32(data[:4]), + Seq: binary.BigEndian.Uint32(data[4:8]), + Encrypted: data[8:], + } + p.AddLayer(i) + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/layertypes.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/layertypes.go new file mode 100644 index 00000000..f66fd9b2 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/layertypes.go @@ -0,0 +1,221 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "github.com/google/gopacket" +) + +var ( + LayerTypeARP = gopacket.RegisterLayerType(10, gopacket.LayerTypeMetadata{Name: "ARP", Decoder: gopacket.DecodeFunc(decodeARP)}) + LayerTypeCiscoDiscovery = gopacket.RegisterLayerType(11, gopacket.LayerTypeMetadata{Name: "CiscoDiscovery", Decoder: gopacket.DecodeFunc(decodeCiscoDiscovery)}) + LayerTypeEthernetCTP = gopacket.RegisterLayerType(12, gopacket.LayerTypeMetadata{Name: "EthernetCTP", Decoder: gopacket.DecodeFunc(decodeEthernetCTP)}) + LayerTypeEthernetCTPForwardData = gopacket.RegisterLayerType(13, gopacket.LayerTypeMetadata{Name: "EthernetCTPForwardData", Decoder: nil}) + LayerTypeEthernetCTPReply = gopacket.RegisterLayerType(14, gopacket.LayerTypeMetadata{Name: "EthernetCTPReply", Decoder: nil}) + LayerTypeDot1Q = gopacket.RegisterLayerType(15, gopacket.LayerTypeMetadata{Name: "Dot1Q", Decoder: gopacket.DecodeFunc(decodeDot1Q)}) + LayerTypeEtherIP = gopacket.RegisterLayerType(16, gopacket.LayerTypeMetadata{Name: "EtherIP", Decoder: gopacket.DecodeFunc(decodeEtherIP)}) + LayerTypeEthernet = gopacket.RegisterLayerType(17, gopacket.LayerTypeMetadata{Name: "Ethernet", Decoder: gopacket.DecodeFunc(decodeEthernet)}) + LayerTypeGRE = gopacket.RegisterLayerType(18, gopacket.LayerTypeMetadata{Name: "GRE", Decoder: gopacket.DecodeFunc(decodeGRE)}) + LayerTypeICMPv4 = gopacket.RegisterLayerType(19, gopacket.LayerTypeMetadata{Name: "ICMPv4", Decoder: gopacket.DecodeFunc(decodeICMPv4)}) + LayerTypeIPv4 = gopacket.RegisterLayerType(20, gopacket.LayerTypeMetadata{Name: "IPv4", Decoder: gopacket.DecodeFunc(decodeIPv4)}) + LayerTypeIPv6 = gopacket.RegisterLayerType(21, gopacket.LayerTypeMetadata{Name: "IPv6", Decoder: gopacket.DecodeFunc(decodeIPv6)}) + LayerTypeLLC = gopacket.RegisterLayerType(22, gopacket.LayerTypeMetadata{Name: "LLC", Decoder: gopacket.DecodeFunc(decodeLLC)}) + LayerTypeSNAP = gopacket.RegisterLayerType(23, gopacket.LayerTypeMetadata{Name: "SNAP", Decoder: gopacket.DecodeFunc(decodeSNAP)}) + LayerTypeMPLS = gopacket.RegisterLayerType(24, gopacket.LayerTypeMetadata{Name: "MPLS", Decoder: gopacket.DecodeFunc(decodeMPLS)}) + LayerTypePPP = gopacket.RegisterLayerType(25, gopacket.LayerTypeMetadata{Name: "PPP", Decoder: gopacket.DecodeFunc(decodePPP)}) + LayerTypePPPoE = gopacket.RegisterLayerType(26, gopacket.LayerTypeMetadata{Name: "PPPoE", Decoder: gopacket.DecodeFunc(decodePPPoE)}) + LayerTypeRUDP = gopacket.RegisterLayerType(27, gopacket.LayerTypeMetadata{Name: "RUDP", Decoder: gopacket.DecodeFunc(decodeRUDP)}) + LayerTypeSCTP = gopacket.RegisterLayerType(28, gopacket.LayerTypeMetadata{Name: "SCTP", Decoder: gopacket.DecodeFunc(decodeSCTP)}) + LayerTypeSCTPUnknownChunkType = gopacket.RegisterLayerType(29, gopacket.LayerTypeMetadata{Name: "SCTPUnknownChunkType", Decoder: nil}) + LayerTypeSCTPData = gopacket.RegisterLayerType(30, gopacket.LayerTypeMetadata{Name: "SCTPData", Decoder: nil}) + LayerTypeSCTPInit = gopacket.RegisterLayerType(31, gopacket.LayerTypeMetadata{Name: "SCTPInit", Decoder: nil}) + LayerTypeSCTPSack = gopacket.RegisterLayerType(32, gopacket.LayerTypeMetadata{Name: "SCTPSack", Decoder: nil}) + LayerTypeSCTPHeartbeat = gopacket.RegisterLayerType(33, gopacket.LayerTypeMetadata{Name: "SCTPHeartbeat", Decoder: nil}) + LayerTypeSCTPError = gopacket.RegisterLayerType(34, gopacket.LayerTypeMetadata{Name: "SCTPError", Decoder: nil}) + LayerTypeSCTPShutdown = gopacket.RegisterLayerType(35, gopacket.LayerTypeMetadata{Name: "SCTPShutdown", Decoder: nil}) + LayerTypeSCTPShutdownAck = gopacket.RegisterLayerType(36, gopacket.LayerTypeMetadata{Name: "SCTPShutdownAck", Decoder: nil}) + LayerTypeSCTPCookieEcho = gopacket.RegisterLayerType(37, gopacket.LayerTypeMetadata{Name: "SCTPCookieEcho", Decoder: nil}) + LayerTypeSCTPEmptyLayer = gopacket.RegisterLayerType(38, gopacket.LayerTypeMetadata{Name: "SCTPEmptyLayer", Decoder: nil}) + LayerTypeSCTPInitAck = gopacket.RegisterLayerType(39, gopacket.LayerTypeMetadata{Name: "SCTPInitAck", Decoder: nil}) + LayerTypeSCTPHeartbeatAck = gopacket.RegisterLayerType(40, gopacket.LayerTypeMetadata{Name: "SCTPHeartbeatAck", Decoder: nil}) + LayerTypeSCTPAbort = gopacket.RegisterLayerType(41, gopacket.LayerTypeMetadata{Name: "SCTPAbort", Decoder: nil}) + LayerTypeSCTPShutdownComplete = gopacket.RegisterLayerType(42, gopacket.LayerTypeMetadata{Name: "SCTPShutdownComplete", Decoder: nil}) + LayerTypeSCTPCookieAck = gopacket.RegisterLayerType(43, gopacket.LayerTypeMetadata{Name: "SCTPCookieAck", Decoder: nil}) + LayerTypeTCP = gopacket.RegisterLayerType(44, gopacket.LayerTypeMetadata{Name: "TCP", Decoder: gopacket.DecodeFunc(decodeTCP)}) + LayerTypeUDP = gopacket.RegisterLayerType(45, gopacket.LayerTypeMetadata{Name: "UDP", Decoder: gopacket.DecodeFunc(decodeUDP)}) + LayerTypeIPv6HopByHop = gopacket.RegisterLayerType(46, gopacket.LayerTypeMetadata{Name: "IPv6HopByHop", Decoder: gopacket.DecodeFunc(decodeIPv6HopByHop)}) + LayerTypeIPv6Routing = gopacket.RegisterLayerType(47, gopacket.LayerTypeMetadata{Name: "IPv6Routing", Decoder: gopacket.DecodeFunc(decodeIPv6Routing)}) + LayerTypeIPv6Fragment = gopacket.RegisterLayerType(48, gopacket.LayerTypeMetadata{Name: "IPv6Fragment", Decoder: gopacket.DecodeFunc(decodeIPv6Fragment)}) + LayerTypeIPv6Destination = gopacket.RegisterLayerType(49, gopacket.LayerTypeMetadata{Name: "IPv6Destination", Decoder: gopacket.DecodeFunc(decodeIPv6Destination)}) + LayerTypeIPSecAH = gopacket.RegisterLayerType(50, gopacket.LayerTypeMetadata{Name: "IPSecAH", Decoder: gopacket.DecodeFunc(decodeIPSecAH)}) + LayerTypeIPSecESP = gopacket.RegisterLayerType(51, gopacket.LayerTypeMetadata{Name: "IPSecESP", Decoder: gopacket.DecodeFunc(decodeIPSecESP)}) + LayerTypeUDPLite = gopacket.RegisterLayerType(52, gopacket.LayerTypeMetadata{Name: "UDPLite", Decoder: gopacket.DecodeFunc(decodeUDPLite)}) + LayerTypeFDDI = gopacket.RegisterLayerType(53, gopacket.LayerTypeMetadata{Name: "FDDI", Decoder: gopacket.DecodeFunc(decodeFDDI)}) + LayerTypeLoopback = gopacket.RegisterLayerType(54, gopacket.LayerTypeMetadata{Name: "Loopback", Decoder: gopacket.DecodeFunc(decodeLoopback)}) + LayerTypeEAP = gopacket.RegisterLayerType(55, gopacket.LayerTypeMetadata{Name: "EAP", Decoder: gopacket.DecodeFunc(decodeEAP)}) + LayerTypeEAPOL = gopacket.RegisterLayerType(56, gopacket.LayerTypeMetadata{Name: "EAPOL", Decoder: gopacket.DecodeFunc(decodeEAPOL)}) + LayerTypeICMPv6 = gopacket.RegisterLayerType(57, gopacket.LayerTypeMetadata{Name: "ICMPv6", Decoder: gopacket.DecodeFunc(decodeICMPv6)}) + LayerTypeLinkLayerDiscovery = gopacket.RegisterLayerType(58, gopacket.LayerTypeMetadata{Name: "LinkLayerDiscovery", Decoder: gopacket.DecodeFunc(decodeLinkLayerDiscovery)}) + LayerTypeCiscoDiscoveryInfo = gopacket.RegisterLayerType(59, gopacket.LayerTypeMetadata{Name: "CiscoDiscoveryInfo", Decoder: gopacket.DecodeFunc(decodeCiscoDiscoveryInfo)}) + LayerTypeLinkLayerDiscoveryInfo = gopacket.RegisterLayerType(60, gopacket.LayerTypeMetadata{Name: "LinkLayerDiscoveryInfo", Decoder: nil}) + LayerTypeNortelDiscovery = gopacket.RegisterLayerType(61, gopacket.LayerTypeMetadata{Name: "NortelDiscovery", Decoder: gopacket.DecodeFunc(decodeNortelDiscovery)}) + LayerTypeIGMP = gopacket.RegisterLayerType(62, gopacket.LayerTypeMetadata{Name: "IGMP", Decoder: gopacket.DecodeFunc(decodeIGMP)}) + LayerTypePFLog = gopacket.RegisterLayerType(63, gopacket.LayerTypeMetadata{Name: "PFLog", Decoder: gopacket.DecodeFunc(decodePFLog)}) + LayerTypeRadioTap = gopacket.RegisterLayerType(64, gopacket.LayerTypeMetadata{Name: "RadioTap", Decoder: gopacket.DecodeFunc(decodeRadioTap)}) + LayerTypeDot11 = gopacket.RegisterLayerType(65, gopacket.LayerTypeMetadata{Name: "Dot11", Decoder: gopacket.DecodeFunc(decodeDot11)}) + LayerTypeDot11Ctrl = gopacket.RegisterLayerType(66, gopacket.LayerTypeMetadata{Name: "Dot11Ctrl", Decoder: gopacket.DecodeFunc(decodeDot11Ctrl)}) + LayerTypeDot11Data = gopacket.RegisterLayerType(67, gopacket.LayerTypeMetadata{Name: "Dot11Data", Decoder: gopacket.DecodeFunc(decodeDot11Data)}) + LayerTypeDot11DataCFAck = gopacket.RegisterLayerType(68, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAck", Decoder: gopacket.DecodeFunc(decodeDot11DataCFAck)}) + LayerTypeDot11DataCFPoll = gopacket.RegisterLayerType(69, gopacket.LayerTypeMetadata{Name: "Dot11DataCFPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataCFPoll)}) + LayerTypeDot11DataCFAckPoll = gopacket.RegisterLayerType(70, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAckPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataCFAckPoll)}) + LayerTypeDot11DataNull = gopacket.RegisterLayerType(71, gopacket.LayerTypeMetadata{Name: "Dot11DataNull", Decoder: gopacket.DecodeFunc(decodeDot11DataNull)}) + LayerTypeDot11DataCFAckNoData = gopacket.RegisterLayerType(72, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAck", Decoder: gopacket.DecodeFunc(decodeDot11DataCFAck)}) + LayerTypeDot11DataCFPollNoData = gopacket.RegisterLayerType(73, gopacket.LayerTypeMetadata{Name: "Dot11DataCFPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataCFPoll)}) + LayerTypeDot11DataCFAckPollNoData = gopacket.RegisterLayerType(74, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAckPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataCFAckPoll)}) + LayerTypeDot11DataQOSData = gopacket.RegisterLayerType(75, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSData", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSData)}) + LayerTypeDot11DataQOSDataCFAck = gopacket.RegisterLayerType(76, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSDataCFAck", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSDataCFAck)}) + LayerTypeDot11DataQOSDataCFPoll = gopacket.RegisterLayerType(77, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSDataCFPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSDataCFPoll)}) + LayerTypeDot11DataQOSDataCFAckPoll = gopacket.RegisterLayerType(78, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSDataCFAckPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSDataCFAckPoll)}) + LayerTypeDot11DataQOSNull = gopacket.RegisterLayerType(79, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSNull", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSNull)}) + LayerTypeDot11DataQOSCFPollNoData = gopacket.RegisterLayerType(80, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSCFPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSCFPollNoData)}) + LayerTypeDot11DataQOSCFAckPollNoData = gopacket.RegisterLayerType(81, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSCFAckPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSCFAckPollNoData)}) + LayerTypeDot11InformationElement = gopacket.RegisterLayerType(82, gopacket.LayerTypeMetadata{Name: "Dot11InformationElement", Decoder: gopacket.DecodeFunc(decodeDot11InformationElement)}) + LayerTypeDot11CtrlCTS = gopacket.RegisterLayerType(83, gopacket.LayerTypeMetadata{Name: "Dot11CtrlCTS", Decoder: gopacket.DecodeFunc(decodeDot11CtrlCTS)}) + LayerTypeDot11CtrlRTS = gopacket.RegisterLayerType(84, gopacket.LayerTypeMetadata{Name: "Dot11CtrlRTS", Decoder: gopacket.DecodeFunc(decodeDot11CtrlRTS)}) + LayerTypeDot11CtrlBlockAckReq = gopacket.RegisterLayerType(85, gopacket.LayerTypeMetadata{Name: "Dot11CtrlBlockAckReq", Decoder: gopacket.DecodeFunc(decodeDot11CtrlBlockAckReq)}) + LayerTypeDot11CtrlBlockAck = gopacket.RegisterLayerType(86, gopacket.LayerTypeMetadata{Name: "Dot11CtrlBlockAck", Decoder: gopacket.DecodeFunc(decodeDot11CtrlBlockAck)}) + LayerTypeDot11CtrlPowersavePoll = gopacket.RegisterLayerType(87, gopacket.LayerTypeMetadata{Name: "Dot11CtrlPowersavePoll", Decoder: gopacket.DecodeFunc(decodeDot11CtrlPowersavePoll)}) + LayerTypeDot11CtrlAck = gopacket.RegisterLayerType(88, gopacket.LayerTypeMetadata{Name: "Dot11CtrlAck", Decoder: gopacket.DecodeFunc(decodeDot11CtrlAck)}) + LayerTypeDot11CtrlCFEnd = gopacket.RegisterLayerType(89, gopacket.LayerTypeMetadata{Name: "Dot11CtrlCFEnd", Decoder: gopacket.DecodeFunc(decodeDot11CtrlCFEnd)}) + LayerTypeDot11CtrlCFEndAck = gopacket.RegisterLayerType(90, gopacket.LayerTypeMetadata{Name: "Dot11CtrlCFEndAck", Decoder: gopacket.DecodeFunc(decodeDot11CtrlCFEndAck)}) + LayerTypeDot11MgmtAssociationReq = gopacket.RegisterLayerType(91, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAssociationReq", Decoder: gopacket.DecodeFunc(decodeDot11MgmtAssociationReq)}) + LayerTypeDot11MgmtAssociationResp = gopacket.RegisterLayerType(92, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAssociationResp", Decoder: gopacket.DecodeFunc(decodeDot11MgmtAssociationResp)}) + LayerTypeDot11MgmtReassociationReq = gopacket.RegisterLayerType(93, gopacket.LayerTypeMetadata{Name: "Dot11MgmtReassociationReq", Decoder: gopacket.DecodeFunc(decodeDot11MgmtReassociationReq)}) + LayerTypeDot11MgmtReassociationResp = gopacket.RegisterLayerType(94, gopacket.LayerTypeMetadata{Name: "Dot11MgmtReassociationResp", Decoder: gopacket.DecodeFunc(decodeDot11MgmtReassociationResp)}) + LayerTypeDot11MgmtProbeReq = gopacket.RegisterLayerType(95, gopacket.LayerTypeMetadata{Name: "Dot11MgmtProbeReq", Decoder: gopacket.DecodeFunc(decodeDot11MgmtProbeReq)}) + LayerTypeDot11MgmtProbeResp = gopacket.RegisterLayerType(96, gopacket.LayerTypeMetadata{Name: "Dot11MgmtProbeResp", Decoder: gopacket.DecodeFunc(decodeDot11MgmtProbeResp)}) + LayerTypeDot11MgmtMeasurementPilot = gopacket.RegisterLayerType(97, gopacket.LayerTypeMetadata{Name: "Dot11MgmtMeasurementPilot", Decoder: gopacket.DecodeFunc(decodeDot11MgmtMeasurementPilot)}) + LayerTypeDot11MgmtBeacon = gopacket.RegisterLayerType(98, gopacket.LayerTypeMetadata{Name: "Dot11MgmtBeacon", Decoder: gopacket.DecodeFunc(decodeDot11MgmtBeacon)}) + LayerTypeDot11MgmtATIM = gopacket.RegisterLayerType(99, gopacket.LayerTypeMetadata{Name: "Dot11MgmtATIM", Decoder: gopacket.DecodeFunc(decodeDot11MgmtATIM)}) + LayerTypeDot11MgmtDisassociation = gopacket.RegisterLayerType(100, gopacket.LayerTypeMetadata{Name: "Dot11MgmtDisassociation", Decoder: gopacket.DecodeFunc(decodeDot11MgmtDisassociation)}) + LayerTypeDot11MgmtAuthentication = gopacket.RegisterLayerType(101, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAuthentication", Decoder: gopacket.DecodeFunc(decodeDot11MgmtAuthentication)}) + LayerTypeDot11MgmtDeauthentication = gopacket.RegisterLayerType(102, gopacket.LayerTypeMetadata{Name: "Dot11MgmtDeauthentication", Decoder: gopacket.DecodeFunc(decodeDot11MgmtDeauthentication)}) + LayerTypeDot11MgmtAction = gopacket.RegisterLayerType(103, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAction", Decoder: gopacket.DecodeFunc(decodeDot11MgmtAction)}) + LayerTypeDot11MgmtActionNoAck = gopacket.RegisterLayerType(104, gopacket.LayerTypeMetadata{Name: "Dot11MgmtActionNoAck", Decoder: gopacket.DecodeFunc(decodeDot11MgmtActionNoAck)}) + LayerTypeDot11MgmtArubaWLAN = gopacket.RegisterLayerType(105, gopacket.LayerTypeMetadata{Name: "Dot11MgmtArubaWLAN", Decoder: gopacket.DecodeFunc(decodeDot11MgmtArubaWLAN)}) + LayerTypeDot11WEP = gopacket.RegisterLayerType(106, gopacket.LayerTypeMetadata{Name: "Dot11WEP", Decoder: gopacket.DecodeFunc(decodeDot11WEP)}) + LayerTypeDNS = gopacket.RegisterLayerType(107, gopacket.LayerTypeMetadata{Name: "DNS", Decoder: gopacket.DecodeFunc(decodeDNS)}) + LayerTypeUSB = gopacket.RegisterLayerType(108, gopacket.LayerTypeMetadata{Name: "USB", Decoder: gopacket.DecodeFunc(decodeUSB)}) + LayerTypeUSBRequestBlockSetup = gopacket.RegisterLayerType(109, gopacket.LayerTypeMetadata{Name: "USBRequestBlockSetup", Decoder: gopacket.DecodeFunc(decodeUSBRequestBlockSetup)}) + LayerTypeUSBControl = gopacket.RegisterLayerType(110, gopacket.LayerTypeMetadata{Name: "USBControl", Decoder: gopacket.DecodeFunc(decodeUSBControl)}) + LayerTypeUSBInterrupt = gopacket.RegisterLayerType(111, gopacket.LayerTypeMetadata{Name: "USBInterrupt", Decoder: gopacket.DecodeFunc(decodeUSBInterrupt)}) + LayerTypeUSBBulk = gopacket.RegisterLayerType(112, gopacket.LayerTypeMetadata{Name: "USBBulk", Decoder: gopacket.DecodeFunc(decodeUSBBulk)}) + LayerTypeLinuxSLL = gopacket.RegisterLayerType(113, gopacket.LayerTypeMetadata{Name: "Linux SLL", Decoder: gopacket.DecodeFunc(decodeLinuxSLL)}) + LayerTypeSFlow = gopacket.RegisterLayerType(114, gopacket.LayerTypeMetadata{Name: "SFlow", Decoder: gopacket.DecodeFunc(decodeSFlow)}) + LayerTypePrismHeader = gopacket.RegisterLayerType(115, gopacket.LayerTypeMetadata{Name: "Prism monitor mode header", Decoder: gopacket.DecodeFunc(decodePrismHeader)}) + LayerTypeVXLAN = gopacket.RegisterLayerType(116, gopacket.LayerTypeMetadata{Name: "VXLAN", Decoder: gopacket.DecodeFunc(decodeVXLAN)}) + LayerTypeNTP = gopacket.RegisterLayerType(117, gopacket.LayerTypeMetadata{Name: "NTP", Decoder: gopacket.DecodeFunc(decodeNTP)}) + LayerTypeDHCPv4 = gopacket.RegisterLayerType(118, gopacket.LayerTypeMetadata{Name: "DHCPv4", Decoder: gopacket.DecodeFunc(decodeDHCPv4)}) + LayerTypeVRRP = gopacket.RegisterLayerType(119, gopacket.LayerTypeMetadata{Name: "VRRP", Decoder: gopacket.DecodeFunc(decodeVRRP)}) + LayerTypeGeneve = gopacket.RegisterLayerType(120, gopacket.LayerTypeMetadata{Name: "Geneve", Decoder: gopacket.DecodeFunc(decodeGeneve)}) + LayerTypeSTP = gopacket.RegisterLayerType(121, gopacket.LayerTypeMetadata{Name: "STP", Decoder: gopacket.DecodeFunc(decodeSTP)}) + LayerTypeBFD = gopacket.RegisterLayerType(122, gopacket.LayerTypeMetadata{Name: "BFD", Decoder: gopacket.DecodeFunc(decodeBFD)}) + LayerTypeOSPF = gopacket.RegisterLayerType(123, gopacket.LayerTypeMetadata{Name: "OSPF", Decoder: gopacket.DecodeFunc(decodeOSPF)}) + LayerTypeICMPv6RouterSolicitation = gopacket.RegisterLayerType(124, gopacket.LayerTypeMetadata{Name: "ICMPv6RouterSolicitation", Decoder: gopacket.DecodeFunc(decodeICMPv6RouterSolicitation)}) + LayerTypeICMPv6RouterAdvertisement = gopacket.RegisterLayerType(125, gopacket.LayerTypeMetadata{Name: "ICMPv6RouterAdvertisement", Decoder: gopacket.DecodeFunc(decodeICMPv6RouterAdvertisement)}) + LayerTypeICMPv6NeighborSolicitation = gopacket.RegisterLayerType(126, gopacket.LayerTypeMetadata{Name: "ICMPv6NeighborSolicitation", Decoder: gopacket.DecodeFunc(decodeICMPv6NeighborSolicitation)}) + LayerTypeICMPv6NeighborAdvertisement = gopacket.RegisterLayerType(127, gopacket.LayerTypeMetadata{Name: "ICMPv6NeighborAdvertisement", Decoder: gopacket.DecodeFunc(decodeICMPv6NeighborAdvertisement)}) + LayerTypeICMPv6Redirect = gopacket.RegisterLayerType(128, gopacket.LayerTypeMetadata{Name: "ICMPv6Redirect", Decoder: gopacket.DecodeFunc(decodeICMPv6Redirect)}) + LayerTypeGTPv1U = gopacket.RegisterLayerType(129, gopacket.LayerTypeMetadata{Name: "GTPv1U", Decoder: gopacket.DecodeFunc(decodeGTPv1u)}) + LayerTypeEAPOLKey = gopacket.RegisterLayerType(130, gopacket.LayerTypeMetadata{Name: "EAPOLKey", Decoder: gopacket.DecodeFunc(decodeEAPOLKey)}) + LayerTypeLCM = gopacket.RegisterLayerType(131, gopacket.LayerTypeMetadata{Name: "LCM", Decoder: gopacket.DecodeFunc(decodeLCM)}) + LayerTypeICMPv6Echo = gopacket.RegisterLayerType(132, gopacket.LayerTypeMetadata{Name: "ICMPv6Echo", Decoder: gopacket.DecodeFunc(decodeICMPv6Echo)}) + LayerTypeSIP = gopacket.RegisterLayerType(133, gopacket.LayerTypeMetadata{Name: "SIP", Decoder: gopacket.DecodeFunc(decodeSIP)}) + LayerTypeDHCPv6 = gopacket.RegisterLayerType(134, gopacket.LayerTypeMetadata{Name: "DHCPv6", Decoder: gopacket.DecodeFunc(decodeDHCPv6)}) + LayerTypeMLDv1MulticastListenerReport = gopacket.RegisterLayerType(135, gopacket.LayerTypeMetadata{Name: "MLDv1MulticastListenerReport", Decoder: gopacket.DecodeFunc(decodeMLDv1MulticastListenerReport)}) + LayerTypeMLDv1MulticastListenerDone = gopacket.RegisterLayerType(136, gopacket.LayerTypeMetadata{Name: "MLDv1MulticastListenerDone", Decoder: gopacket.DecodeFunc(decodeMLDv1MulticastListenerDone)}) + LayerTypeMLDv1MulticastListenerQuery = gopacket.RegisterLayerType(137, gopacket.LayerTypeMetadata{Name: "MLDv1MulticastListenerQuery", Decoder: gopacket.DecodeFunc(decodeMLDv1MulticastListenerQuery)}) + LayerTypeMLDv2MulticastListenerReport = gopacket.RegisterLayerType(138, gopacket.LayerTypeMetadata{Name: "MLDv2MulticastListenerReport", Decoder: gopacket.DecodeFunc(decodeMLDv2MulticastListenerReport)}) + LayerTypeMLDv2MulticastListenerQuery = gopacket.RegisterLayerType(139, gopacket.LayerTypeMetadata{Name: "MLDv2MulticastListenerQuery", Decoder: gopacket.DecodeFunc(decodeMLDv2MulticastListenerQuery)}) + LayerTypeTLS = gopacket.RegisterLayerType(140, gopacket.LayerTypeMetadata{Name: "TLS", Decoder: gopacket.DecodeFunc(decodeTLS)}) + LayerTypeModbusTCP = gopacket.RegisterLayerType(141, gopacket.LayerTypeMetadata{Name: "ModbusTCP", Decoder: gopacket.DecodeFunc(decodeModbusTCP)}) + LayerTypeRMCP = gopacket.RegisterLayerType(142, gopacket.LayerTypeMetadata{Name: "RMCP", Decoder: gopacket.DecodeFunc(decodeRMCP)}) + LayerTypeASF = gopacket.RegisterLayerType(143, gopacket.LayerTypeMetadata{Name: "ASF", Decoder: gopacket.DecodeFunc(decodeASF)}) + LayerTypeASFPresencePong = gopacket.RegisterLayerType(144, gopacket.LayerTypeMetadata{Name: "ASFPresencePong", Decoder: gopacket.DecodeFunc(decodeASFPresencePong)}) +) + +var ( + // LayerClassIPNetwork contains TCP/IP network layer types. + LayerClassIPNetwork = gopacket.NewLayerClass([]gopacket.LayerType{ + LayerTypeIPv4, + LayerTypeIPv6, + }) + // LayerClassIPTransport contains TCP/IP transport layer types. + LayerClassIPTransport = gopacket.NewLayerClass([]gopacket.LayerType{ + LayerTypeTCP, + LayerTypeUDP, + LayerTypeSCTP, + }) + // LayerClassIPControl contains TCP/IP control protocols. + LayerClassIPControl = gopacket.NewLayerClass([]gopacket.LayerType{ + LayerTypeICMPv4, + LayerTypeICMPv6, + }) + // LayerClassSCTPChunk contains SCTP chunk types (not the top-level SCTP + // layer). + LayerClassSCTPChunk = gopacket.NewLayerClass([]gopacket.LayerType{ + LayerTypeSCTPUnknownChunkType, + LayerTypeSCTPData, + LayerTypeSCTPInit, + LayerTypeSCTPSack, + LayerTypeSCTPHeartbeat, + LayerTypeSCTPError, + LayerTypeSCTPShutdown, + LayerTypeSCTPShutdownAck, + LayerTypeSCTPCookieEcho, + LayerTypeSCTPEmptyLayer, + LayerTypeSCTPInitAck, + LayerTypeSCTPHeartbeatAck, + LayerTypeSCTPAbort, + LayerTypeSCTPShutdownComplete, + LayerTypeSCTPCookieAck, + }) + // LayerClassIPv6Extension contains IPv6 extension headers. + LayerClassIPv6Extension = gopacket.NewLayerClass([]gopacket.LayerType{ + LayerTypeIPv6HopByHop, + LayerTypeIPv6Routing, + LayerTypeIPv6Fragment, + LayerTypeIPv6Destination, + }) + LayerClassIPSec = gopacket.NewLayerClass([]gopacket.LayerType{ + LayerTypeIPSecAH, + LayerTypeIPSecESP, + }) + // LayerClassICMPv6NDP contains ICMPv6 neighbor discovery protocol + // messages. + LayerClassICMPv6NDP = gopacket.NewLayerClass([]gopacket.LayerType{ + LayerTypeICMPv6RouterSolicitation, + LayerTypeICMPv6RouterAdvertisement, + LayerTypeICMPv6NeighborSolicitation, + LayerTypeICMPv6NeighborAdvertisement, + LayerTypeICMPv6Redirect, + }) + // LayerClassMLDv1 contains multicast listener discovery protocol + LayerClassMLDv1 = gopacket.NewLayerClass([]gopacket.LayerType{ + LayerTypeMLDv1MulticastListenerQuery, + LayerTypeMLDv1MulticastListenerReport, + LayerTypeMLDv1MulticastListenerDone, + }) + // LayerClassMLDv2 contains multicast listener discovery protocol v2 + LayerClassMLDv2 = gopacket.NewLayerClass([]gopacket.LayerType{ + LayerTypeMLDv1MulticastListenerReport, + LayerTypeMLDv1MulticastListenerDone, + LayerTypeMLDv2MulticastListenerReport, + LayerTypeMLDv1MulticastListenerQuery, + LayerTypeMLDv2MulticastListenerQuery, + }) +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/lcm.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/lcm.go new file mode 100644 index 00000000..5fe9fa54 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/lcm.go @@ -0,0 +1,213 @@ +// Copyright 2018 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "fmt" + + "github.com/google/gopacket" +) + +const ( + // LCMShortHeaderMagic is the LCM small message header magic number + LCMShortHeaderMagic uint32 = 0x4c433032 + // LCMFragmentedHeaderMagic is the LCM fragmented message header magic number + LCMFragmentedHeaderMagic uint32 = 0x4c433033 +) + +// LCM (Lightweight Communications and Marshalling) is a set of libraries and +// tools for message passing and data marshalling, targeted at real-time systems +// where high-bandwidth and low latency are critical. It provides a +// publish/subscribe message passing model and automatic +// marshalling/unmarshalling code generation with bindings for applications in a +// variety of programming languages. +// +// References +// https://lcm-proj.github.io/ +// https://github.com/lcm-proj/lcm +type LCM struct { + // Common (short & fragmented header) fields + Magic uint32 + SequenceNumber uint32 + // Fragmented header only fields + PayloadSize uint32 + FragmentOffset uint32 + FragmentNumber uint16 + TotalFragments uint16 + // Common field + ChannelName string + // Gopacket helper fields + Fragmented bool + fingerprint LCMFingerprint + contents []byte + payload []byte +} + +// LCMFingerprint is the type of a LCM fingerprint. +type LCMFingerprint uint64 + +var ( + // lcmLayerTypes contains a map of all LCM fingerprints that we support and + // their LayerType + lcmLayerTypes = map[LCMFingerprint]gopacket.LayerType{} + layerTypeIndex = 1001 +) + +// RegisterLCMLayerType allows users to register decoders for the underlying +// LCM payload. This is done based on the fingerprint that every LCM message +// contains and which identifies it uniquely. If num is not the zero value it +// will be used when registering with RegisterLayerType towards gopacket, +// otherwise an incremental value starting from 1001 will be used. +func RegisterLCMLayerType(num int, name string, fingerprint LCMFingerprint, + decoder gopacket.Decoder) gopacket.LayerType { + metadata := gopacket.LayerTypeMetadata{Name: name, Decoder: decoder} + + if num == 0 { + num = layerTypeIndex + layerTypeIndex++ + } + + lcmLayerTypes[fingerprint] = gopacket.RegisterLayerType(num, metadata) + + return lcmLayerTypes[fingerprint] +} + +// SupportedLCMFingerprints returns a slice of all LCM fingerprints that has +// been registered so far. +func SupportedLCMFingerprints() []LCMFingerprint { + fingerprints := make([]LCMFingerprint, 0, len(lcmLayerTypes)) + for fp := range lcmLayerTypes { + fingerprints = append(fingerprints, fp) + } + return fingerprints +} + +// GetLCMLayerType returns the underlying LCM message's LayerType. +// This LayerType has to be registered by using RegisterLCMLayerType. +func GetLCMLayerType(fingerprint LCMFingerprint) gopacket.LayerType { + layerType, ok := lcmLayerTypes[fingerprint] + if !ok { + return gopacket.LayerTypePayload + } + + return layerType +} + +func decodeLCM(data []byte, p gopacket.PacketBuilder) error { + lcm := &LCM{} + + err := lcm.DecodeFromBytes(data, p) + if err != nil { + return err + } + + p.AddLayer(lcm) + p.SetApplicationLayer(lcm) + + return p.NextDecoder(lcm.NextLayerType()) +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (lcm *LCM) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + offset := 0 + + lcm.Magic = binary.BigEndian.Uint32(data[offset:4]) + offset += 4 + + if lcm.Magic != LCMShortHeaderMagic && lcm.Magic != LCMFragmentedHeaderMagic { + return fmt.Errorf("Received LCM header magic %v does not match know "+ + "LCM magic numbers. Dropping packet.", lcm.Magic) + } + + lcm.SequenceNumber = binary.BigEndian.Uint32(data[offset:8]) + offset += 4 + + if lcm.Magic == LCMFragmentedHeaderMagic { + lcm.Fragmented = true + + lcm.PayloadSize = binary.BigEndian.Uint32(data[offset : offset+4]) + offset += 4 + + lcm.FragmentOffset = binary.BigEndian.Uint32(data[offset : offset+4]) + offset += 4 + + lcm.FragmentNumber = binary.BigEndian.Uint16(data[offset : offset+2]) + offset += 2 + + lcm.TotalFragments = binary.BigEndian.Uint16(data[offset : offset+2]) + offset += 2 + } else { + lcm.Fragmented = false + } + + if !lcm.Fragmented || (lcm.Fragmented && lcm.FragmentNumber == 0) { + buffer := make([]byte, 0) + for _, b := range data[offset:] { + offset++ + + if b == 0 { + break + } + + buffer = append(buffer, b) + } + + lcm.ChannelName = string(buffer) + } + + lcm.fingerprint = LCMFingerprint( + binary.BigEndian.Uint64(data[offset : offset+8])) + + lcm.contents = data[:offset] + lcm.payload = data[offset:] + + return nil +} + +// CanDecode returns a set of layers that LCM objects can decode. +// As LCM objects can only decode the LCM layer, we just return that layer. +func (lcm LCM) CanDecode() gopacket.LayerClass { + return LayerTypeLCM +} + +// NextLayerType specifies the LCM payload layer type following this header. +// As LCM packets are serialized structs with uniq fingerprints for each uniq +// combination of data types, lookup of correct layer type is based on that +// fingerprint. +func (lcm LCM) NextLayerType() gopacket.LayerType { + if !lcm.Fragmented || (lcm.Fragmented && lcm.FragmentNumber == 0) { + return GetLCMLayerType(lcm.fingerprint) + } + + return gopacket.LayerTypeFragment +} + +// LayerType returns LayerTypeLCM +func (lcm LCM) LayerType() gopacket.LayerType { + return LayerTypeLCM +} + +// LayerContents returns the contents of the LCM header. +func (lcm LCM) LayerContents() []byte { + return lcm.contents +} + +// LayerPayload returns the payload following this LCM header. +func (lcm LCM) LayerPayload() []byte { + return lcm.payload +} + +// Payload returns the payload following this LCM header. +func (lcm LCM) Payload() []byte { + return lcm.LayerPayload() +} + +// Fingerprint returns the LCM fingerprint of the underlying message. +func (lcm LCM) Fingerprint() LCMFingerprint { + return lcm.fingerprint +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/linux_sll.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/linux_sll.go new file mode 100644 index 00000000..85a4f8bd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/linux_sll.go @@ -0,0 +1,98 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "net" + + "github.com/google/gopacket" +) + +type LinuxSLLPacketType uint16 + +const ( + LinuxSLLPacketTypeHost LinuxSLLPacketType = 0 // To us + LinuxSLLPacketTypeBroadcast LinuxSLLPacketType = 1 // To all + LinuxSLLPacketTypeMulticast LinuxSLLPacketType = 2 // To group + LinuxSLLPacketTypeOtherhost LinuxSLLPacketType = 3 // To someone else + LinuxSLLPacketTypeOutgoing LinuxSLLPacketType = 4 // Outgoing of any type + // These ones are invisible by user level + LinuxSLLPacketTypeLoopback LinuxSLLPacketType = 5 // MC/BRD frame looped back + LinuxSLLPacketTypeFastroute LinuxSLLPacketType = 6 // Fastrouted frame +) + +func (l LinuxSLLPacketType) String() string { + switch l { + case LinuxSLLPacketTypeHost: + return "host" + case LinuxSLLPacketTypeBroadcast: + return "broadcast" + case LinuxSLLPacketTypeMulticast: + return "multicast" + case LinuxSLLPacketTypeOtherhost: + return "otherhost" + case LinuxSLLPacketTypeOutgoing: + return "outgoing" + case LinuxSLLPacketTypeLoopback: + return "loopback" + case LinuxSLLPacketTypeFastroute: + return "fastroute" + } + return fmt.Sprintf("Unknown(%d)", int(l)) +} + +type LinuxSLL struct { + BaseLayer + PacketType LinuxSLLPacketType + AddrLen uint16 + Addr net.HardwareAddr + EthernetType EthernetType + AddrType uint16 +} + +// LayerType returns LayerTypeLinuxSLL. +func (sll *LinuxSLL) LayerType() gopacket.LayerType { return LayerTypeLinuxSLL } + +func (sll *LinuxSLL) CanDecode() gopacket.LayerClass { + return LayerTypeLinuxSLL +} + +func (sll *LinuxSLL) LinkFlow() gopacket.Flow { + return gopacket.NewFlow(EndpointMAC, sll.Addr, nil) +} + +func (sll *LinuxSLL) NextLayerType() gopacket.LayerType { + return sll.EthernetType.LayerType() +} + +func (sll *LinuxSLL) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 16 { + return errors.New("Linux SLL packet too small") + } + sll.PacketType = LinuxSLLPacketType(binary.BigEndian.Uint16(data[0:2])) + sll.AddrType = binary.BigEndian.Uint16(data[2:4]) + sll.AddrLen = binary.BigEndian.Uint16(data[4:6]) + + sll.Addr = net.HardwareAddr(data[6 : sll.AddrLen+6]) + sll.EthernetType = EthernetType(binary.BigEndian.Uint16(data[14:16])) + sll.BaseLayer = BaseLayer{data[:16], data[16:]} + + return nil +} + +func decodeLinuxSLL(data []byte, p gopacket.PacketBuilder) error { + sll := &LinuxSLL{} + if err := sll.DecodeFromBytes(data, p); err != nil { + return err + } + p.AddLayer(sll) + p.SetLinkLayer(sll) + return p.NextDecoder(sll.EthernetType) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/llc.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/llc.go new file mode 100644 index 00000000..cad68036 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/llc.go @@ -0,0 +1,193 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + + "github.com/google/gopacket" +) + +// LLC is the layer used for 802.2 Logical Link Control headers. +// See http://standards.ieee.org/getieee802/download/802.2-1998.pdf +type LLC struct { + BaseLayer + DSAP uint8 + IG bool // true means group, false means individual + SSAP uint8 + CR bool // true means response, false means command + Control uint16 +} + +// LayerType returns gopacket.LayerTypeLLC. +func (l *LLC) LayerType() gopacket.LayerType { return LayerTypeLLC } + +// DecodeFromBytes decodes the given bytes into this layer. +func (l *LLC) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 3 { + return errors.New("LLC header too small") + } + l.DSAP = data[0] & 0xFE + l.IG = data[0]&0x1 != 0 + l.SSAP = data[1] & 0xFE + l.CR = data[1]&0x1 != 0 + l.Control = uint16(data[2]) + + if l.Control&0x1 == 0 || l.Control&0x3 == 0x1 { + if len(data) < 4 { + return errors.New("LLC header too small") + } + l.Control = l.Control<<8 | uint16(data[3]) + l.Contents = data[:4] + l.Payload = data[4:] + } else { + l.Contents = data[:3] + l.Payload = data[3:] + } + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (l *LLC) CanDecode() gopacket.LayerClass { + return LayerTypeLLC +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (l *LLC) NextLayerType() gopacket.LayerType { + switch { + case l.DSAP == 0xAA && l.SSAP == 0xAA: + return LayerTypeSNAP + case l.DSAP == 0x42 && l.SSAP == 0x42: + return LayerTypeSTP + } + return gopacket.LayerTypeZero // Not implemented +} + +// SNAP is used inside LLC. See +// http://standards.ieee.org/getieee802/download/802-2001.pdf. +// From http://en.wikipedia.org/wiki/Subnetwork_Access_Protocol: +// "[T]he Subnetwork Access Protocol (SNAP) is a mechanism for multiplexing, +// on networks using IEEE 802.2 LLC, more protocols than can be distinguished +// by the 8-bit 802.2 Service Access Point (SAP) fields." +type SNAP struct { + BaseLayer + OrganizationalCode []byte + Type EthernetType +} + +// LayerType returns gopacket.LayerTypeSNAP. +func (s *SNAP) LayerType() gopacket.LayerType { return LayerTypeSNAP } + +// DecodeFromBytes decodes the given bytes into this layer. +func (s *SNAP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 5 { + return errors.New("SNAP header too small") + } + s.OrganizationalCode = data[:3] + s.Type = EthernetType(binary.BigEndian.Uint16(data[3:5])) + s.BaseLayer = BaseLayer{data[:5], data[5:]} + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (s *SNAP) CanDecode() gopacket.LayerClass { + return LayerTypeSNAP +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (s *SNAP) NextLayerType() gopacket.LayerType { + // See BUG(gconnel) in decodeSNAP + return s.Type.LayerType() +} + +func decodeLLC(data []byte, p gopacket.PacketBuilder) error { + l := &LLC{} + err := l.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(l) + return p.NextDecoder(l.NextLayerType()) +} + +func decodeSNAP(data []byte, p gopacket.PacketBuilder) error { + s := &SNAP{} + err := s.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(s) + // BUG(gconnell): When decoding SNAP, we treat the SNAP type as an Ethernet + // type. This may not actually be an ethernet type in all cases, + // depending on the organizational code. Right now, we don't check. + return p.NextDecoder(s.Type) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (l *LLC) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + var igFlag, crFlag byte + var length int + + if l.Control&0xFF00 != 0 { + length = 4 + } else { + length = 3 + } + + if l.DSAP&0x1 != 0 { + return errors.New("DSAP value invalid, should not include IG flag bit") + } + + if l.SSAP&0x1 != 0 { + return errors.New("SSAP value invalid, should not include CR flag bit") + } + + if buf, err := b.PrependBytes(length); err != nil { + return err + } else { + igFlag = 0 + if l.IG { + igFlag = 0x1 + } + + crFlag = 0 + if l.CR { + crFlag = 0x1 + } + + buf[0] = l.DSAP + igFlag + buf[1] = l.SSAP + crFlag + + if length == 4 { + buf[2] = uint8(l.Control >> 8) + buf[3] = uint8(l.Control) + } else { + buf[2] = uint8(l.Control) + } + } + + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (s *SNAP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if buf, err := b.PrependBytes(5); err != nil { + return err + } else { + buf[0] = s.OrganizationalCode[0] + buf[1] = s.OrganizationalCode[1] + buf[2] = s.OrganizationalCode[2] + binary.BigEndian.PutUint16(buf[3:5], uint16(s.Type)) + } + + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/lldp.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/lldp.go new file mode 100644 index 00000000..e1282603 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/lldp.go @@ -0,0 +1,1585 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + + "github.com/google/gopacket" +) + +// LLDPTLVType is the type of each TLV value in a LinkLayerDiscovery packet. +type LLDPTLVType byte + +const ( + LLDPTLVEnd LLDPTLVType = 0 + LLDPTLVChassisID LLDPTLVType = 1 + LLDPTLVPortID LLDPTLVType = 2 + LLDPTLVTTL LLDPTLVType = 3 + LLDPTLVPortDescription LLDPTLVType = 4 + LLDPTLVSysName LLDPTLVType = 5 + LLDPTLVSysDescription LLDPTLVType = 6 + LLDPTLVSysCapabilities LLDPTLVType = 7 + LLDPTLVMgmtAddress LLDPTLVType = 8 + LLDPTLVOrgSpecific LLDPTLVType = 127 +) + +// LinkLayerDiscoveryValue is a TLV value inside a LinkLayerDiscovery packet layer. +type LinkLayerDiscoveryValue struct { + Type LLDPTLVType + Length uint16 + Value []byte +} + +func (c *LinkLayerDiscoveryValue) len() int { + return 0 +} + +// LLDPChassisIDSubType specifies the value type for a single LLDPChassisID.ID +type LLDPChassisIDSubType byte + +// LLDP Chassis Types +const ( + LLDPChassisIDSubTypeReserved LLDPChassisIDSubType = 0 + LLDPChassisIDSubTypeChassisComp LLDPChassisIDSubType = 1 + LLDPChassisIDSubtypeIfaceAlias LLDPChassisIDSubType = 2 + LLDPChassisIDSubTypePortComp LLDPChassisIDSubType = 3 + LLDPChassisIDSubTypeMACAddr LLDPChassisIDSubType = 4 + LLDPChassisIDSubTypeNetworkAddr LLDPChassisIDSubType = 5 + LLDPChassisIDSubtypeIfaceName LLDPChassisIDSubType = 6 + LLDPChassisIDSubTypeLocal LLDPChassisIDSubType = 7 +) + +type LLDPChassisID struct { + Subtype LLDPChassisIDSubType + ID []byte +} + +func (c *LLDPChassisID) serialize() []byte { + + var buf = make([]byte, c.serializedLen()) + idLen := uint16(LLDPTLVChassisID)<<9 | uint16(len(c.ID)+1) //id should take 7 bits, length should take 9 bits, +1 for subtype + binary.BigEndian.PutUint16(buf[0:2], idLen) + buf[2] = byte(c.Subtype) + copy(buf[3:], c.ID) + return buf +} + +func (c *LLDPChassisID) serializedLen() int { + return len(c.ID) + 3 // +2 for id and length, +1 for subtype +} + +// LLDPPortIDSubType specifies the value type for a single LLDPPortID.ID +type LLDPPortIDSubType byte + +// LLDP PortID types +const ( + LLDPPortIDSubtypeReserved LLDPPortIDSubType = 0 + LLDPPortIDSubtypeIfaceAlias LLDPPortIDSubType = 1 + LLDPPortIDSubtypePortComp LLDPPortIDSubType = 2 + LLDPPortIDSubtypeMACAddr LLDPPortIDSubType = 3 + LLDPPortIDSubtypeNetworkAddr LLDPPortIDSubType = 4 + LLDPPortIDSubtypeIfaceName LLDPPortIDSubType = 5 + LLDPPortIDSubtypeAgentCircuitID LLDPPortIDSubType = 6 + LLDPPortIDSubtypeLocal LLDPPortIDSubType = 7 +) + +type LLDPPortID struct { + Subtype LLDPPortIDSubType + ID []byte +} + +func (c *LLDPPortID) serialize() []byte { + + var buf = make([]byte, c.serializedLen()) + idLen := uint16(LLDPTLVPortID)<<9 | uint16(len(c.ID)+1) //id should take 7 bits, length should take 9 bits, +1 for subtype + binary.BigEndian.PutUint16(buf[0:2], idLen) + buf[2] = byte(c.Subtype) + copy(buf[3:], c.ID) + return buf +} + +func (c *LLDPPortID) serializedLen() int { + return len(c.ID) + 3 // +2 for id and length, +1 for subtype +} + +// LinkLayerDiscovery is a packet layer containing the LinkLayer Discovery Protocol. +// See http:http://standards.ieee.org/getieee802/download/802.1AB-2009.pdf +// ChassisID, PortID and TTL are mandatory TLV's. Other values can be decoded +// with DecodeValues() +type LinkLayerDiscovery struct { + BaseLayer + ChassisID LLDPChassisID + PortID LLDPPortID + TTL uint16 + Values []LinkLayerDiscoveryValue +} + +type IEEEOUI uint32 + +// http://standards.ieee.org/develop/regauth/oui/oui.txt +const ( + IEEEOUI8021 IEEEOUI = 0x0080c2 + IEEEOUI8023 IEEEOUI = 0x00120f + IEEEOUI80211 IEEEOUI = 0x000fac + IEEEOUI8021Qbg IEEEOUI = 0x0013BF + IEEEOUICisco2 IEEEOUI = 0x000142 + IEEEOUIMedia IEEEOUI = 0x0012bb // TR-41 + IEEEOUIProfinet IEEEOUI = 0x000ecf + IEEEOUIDCBX IEEEOUI = 0x001b21 +) + +// LLDPOrgSpecificTLV is an Organisation-specific TLV +type LLDPOrgSpecificTLV struct { + OUI IEEEOUI + SubType uint8 + Info []byte +} + +// LLDPCapabilities Types +const ( + LLDPCapsOther uint16 = 1 << 0 + LLDPCapsRepeater uint16 = 1 << 1 + LLDPCapsBridge uint16 = 1 << 2 + LLDPCapsWLANAP uint16 = 1 << 3 + LLDPCapsRouter uint16 = 1 << 4 + LLDPCapsPhone uint16 = 1 << 5 + LLDPCapsDocSis uint16 = 1 << 6 + LLDPCapsStationOnly uint16 = 1 << 7 + LLDPCapsCVLAN uint16 = 1 << 8 + LLDPCapsSVLAN uint16 = 1 << 9 + LLDPCapsTmpr uint16 = 1 << 10 +) + +// LLDPCapabilities represents the capabilities of a device +type LLDPCapabilities struct { + Other bool + Repeater bool + Bridge bool + WLANAP bool + Router bool + Phone bool + DocSis bool + StationOnly bool + CVLAN bool + SVLAN bool + TMPR bool +} + +type LLDPSysCapabilities struct { + SystemCap LLDPCapabilities + EnabledCap LLDPCapabilities +} + +type IANAAddressFamily byte + +// LLDP Management Address Subtypes +// http://www.iana.org/assignments/address-family-numbers/address-family-numbers.xml +const ( + IANAAddressFamilyReserved IANAAddressFamily = 0 + IANAAddressFamilyIPV4 IANAAddressFamily = 1 + IANAAddressFamilyIPV6 IANAAddressFamily = 2 + IANAAddressFamilyNSAP IANAAddressFamily = 3 + IANAAddressFamilyHDLC IANAAddressFamily = 4 + IANAAddressFamilyBBN1822 IANAAddressFamily = 5 + IANAAddressFamily802 IANAAddressFamily = 6 + IANAAddressFamilyE163 IANAAddressFamily = 7 + IANAAddressFamilyE164 IANAAddressFamily = 8 + IANAAddressFamilyF69 IANAAddressFamily = 9 + IANAAddressFamilyX121 IANAAddressFamily = 10 + IANAAddressFamilyIPX IANAAddressFamily = 11 + IANAAddressFamilyAtalk IANAAddressFamily = 12 + IANAAddressFamilyDecnet IANAAddressFamily = 13 + IANAAddressFamilyBanyan IANAAddressFamily = 14 + IANAAddressFamilyE164NSAP IANAAddressFamily = 15 + IANAAddressFamilyDNS IANAAddressFamily = 16 + IANAAddressFamilyDistname IANAAddressFamily = 17 + IANAAddressFamilyASNumber IANAAddressFamily = 18 + IANAAddressFamilyXTPIPV4 IANAAddressFamily = 19 + IANAAddressFamilyXTPIPV6 IANAAddressFamily = 20 + IANAAddressFamilyXTP IANAAddressFamily = 21 + IANAAddressFamilyFcWWPN IANAAddressFamily = 22 + IANAAddressFamilyFcWWNN IANAAddressFamily = 23 + IANAAddressFamilyGWID IANAAddressFamily = 24 + IANAAddressFamilyL2VPN IANAAddressFamily = 25 +) + +type LLDPInterfaceSubtype byte + +// LLDP Interface Subtypes +const ( + LLDPInterfaceSubtypeUnknown LLDPInterfaceSubtype = 1 + LLDPInterfaceSubtypeifIndex LLDPInterfaceSubtype = 2 + LLDPInterfaceSubtypeSysPort LLDPInterfaceSubtype = 3 +) + +type LLDPMgmtAddress struct { + Subtype IANAAddressFamily + Address []byte + InterfaceSubtype LLDPInterfaceSubtype + InterfaceNumber uint32 + OID string +} + +// LinkLayerDiscoveryInfo represents the decoded details for a set of LinkLayerDiscoveryValues +// Organisation-specific TLV's can be decoded using the various Decode() methods +type LinkLayerDiscoveryInfo struct { + BaseLayer + PortDescription string + SysName string + SysDescription string + SysCapabilities LLDPSysCapabilities + MgmtAddress LLDPMgmtAddress + OrgTLVs []LLDPOrgSpecificTLV // Private TLVs + Unknown []LinkLayerDiscoveryValue // undecoded TLVs +} + +/// IEEE 802.1 TLV Subtypes +const ( + LLDP8021SubtypePortVLANID uint8 = 1 + LLDP8021SubtypeProtocolVLANID uint8 = 2 + LLDP8021SubtypeVLANName uint8 = 3 + LLDP8021SubtypeProtocolIdentity uint8 = 4 + LLDP8021SubtypeVDIUsageDigest uint8 = 5 + LLDP8021SubtypeManagementVID uint8 = 6 + LLDP8021SubtypeLinkAggregation uint8 = 7 +) + +// VLAN Port Protocol ID options +const ( + LLDPProtocolVLANIDCapability byte = 1 << 1 + LLDPProtocolVLANIDStatus byte = 1 << 2 +) + +type PortProtocolVLANID struct { + Supported bool + Enabled bool + ID uint16 +} + +type VLANName struct { + ID uint16 + Name string +} + +type ProtocolIdentity []byte + +// LACP options +const ( + LLDPAggregationCapability byte = 1 << 0 + LLDPAggregationStatus byte = 1 << 1 +) + +// IEEE 802 Link Aggregation parameters +type LLDPLinkAggregation struct { + Supported bool + Enabled bool + PortID uint32 +} + +// LLDPInfo8021 represents the information carried in 802.1 Org-specific TLVs +type LLDPInfo8021 struct { + PVID uint16 + PPVIDs []PortProtocolVLANID + VLANNames []VLANName + ProtocolIdentities []ProtocolIdentity + VIDUsageDigest uint32 + ManagementVID uint16 + LinkAggregation LLDPLinkAggregation +} + +// IEEE 802.3 TLV Subtypes +const ( + LLDP8023SubtypeMACPHY uint8 = 1 + LLDP8023SubtypeMDIPower uint8 = 2 + LLDP8023SubtypeLinkAggregation uint8 = 3 + LLDP8023SubtypeMTU uint8 = 4 +) + +// MACPHY options +const ( + LLDPMACPHYCapability byte = 1 << 0 + LLDPMACPHYStatus byte = 1 << 1 +) + +// From IANA-MAU-MIB (introduced by RFC 4836) - dot3MauType +const ( + LLDPMAUTypeUnknown uint16 = 0 + LLDPMAUTypeAUI uint16 = 1 + LLDPMAUType10Base5 uint16 = 2 + LLDPMAUTypeFOIRL uint16 = 3 + LLDPMAUType10Base2 uint16 = 4 + LLDPMAUType10BaseT uint16 = 5 + LLDPMAUType10BaseFP uint16 = 6 + LLDPMAUType10BaseFB uint16 = 7 + LLDPMAUType10BaseFL uint16 = 8 + LLDPMAUType10BROAD36 uint16 = 9 + LLDPMAUType10BaseT_HD uint16 = 10 + LLDPMAUType10BaseT_FD uint16 = 11 + LLDPMAUType10BaseFL_HD uint16 = 12 + LLDPMAUType10BaseFL_FD uint16 = 13 + LLDPMAUType100BaseT4 uint16 = 14 + LLDPMAUType100BaseTX_HD uint16 = 15 + LLDPMAUType100BaseTX_FD uint16 = 16 + LLDPMAUType100BaseFX_HD uint16 = 17 + LLDPMAUType100BaseFX_FD uint16 = 18 + LLDPMAUType100BaseT2_HD uint16 = 19 + LLDPMAUType100BaseT2_FD uint16 = 20 + LLDPMAUType1000BaseX_HD uint16 = 21 + LLDPMAUType1000BaseX_FD uint16 = 22 + LLDPMAUType1000BaseLX_HD uint16 = 23 + LLDPMAUType1000BaseLX_FD uint16 = 24 + LLDPMAUType1000BaseSX_HD uint16 = 25 + LLDPMAUType1000BaseSX_FD uint16 = 26 + LLDPMAUType1000BaseCX_HD uint16 = 27 + LLDPMAUType1000BaseCX_FD uint16 = 28 + LLDPMAUType1000BaseT_HD uint16 = 29 + LLDPMAUType1000BaseT_FD uint16 = 30 + LLDPMAUType10GBaseX uint16 = 31 + LLDPMAUType10GBaseLX4 uint16 = 32 + LLDPMAUType10GBaseR uint16 = 33 + LLDPMAUType10GBaseER uint16 = 34 + LLDPMAUType10GBaseLR uint16 = 35 + LLDPMAUType10GBaseSR uint16 = 36 + LLDPMAUType10GBaseW uint16 = 37 + LLDPMAUType10GBaseEW uint16 = 38 + LLDPMAUType10GBaseLW uint16 = 39 + LLDPMAUType10GBaseSW uint16 = 40 + LLDPMAUType10GBaseCX4 uint16 = 41 + LLDPMAUType2BaseTL uint16 = 42 + LLDPMAUType10PASS_TS uint16 = 43 + LLDPMAUType100BaseBX10D uint16 = 44 + LLDPMAUType100BaseBX10U uint16 = 45 + LLDPMAUType100BaseLX10 uint16 = 46 + LLDPMAUType1000BaseBX10D uint16 = 47 + LLDPMAUType1000BaseBX10U uint16 = 48 + LLDPMAUType1000BaseLX10 uint16 = 49 + LLDPMAUType1000BasePX10D uint16 = 50 + LLDPMAUType1000BasePX10U uint16 = 51 + LLDPMAUType1000BasePX20D uint16 = 52 + LLDPMAUType1000BasePX20U uint16 = 53 + LLDPMAUType10GBaseT uint16 = 54 + LLDPMAUType10GBaseLRM uint16 = 55 + LLDPMAUType1000BaseKX uint16 = 56 + LLDPMAUType10GBaseKX4 uint16 = 57 + LLDPMAUType10GBaseKR uint16 = 58 + LLDPMAUType10_1GBasePRX_D1 uint16 = 59 + LLDPMAUType10_1GBasePRX_D2 uint16 = 60 + LLDPMAUType10_1GBasePRX_D3 uint16 = 61 + LLDPMAUType10_1GBasePRX_U1 uint16 = 62 + LLDPMAUType10_1GBasePRX_U2 uint16 = 63 + LLDPMAUType10_1GBasePRX_U3 uint16 = 64 + LLDPMAUType10GBasePR_D1 uint16 = 65 + LLDPMAUType10GBasePR_D2 uint16 = 66 + LLDPMAUType10GBasePR_D3 uint16 = 67 + LLDPMAUType10GBasePR_U1 uint16 = 68 + LLDPMAUType10GBasePR_U3 uint16 = 69 +) + +// From RFC 3636 - ifMauAutoNegCapAdvertisedBits +const ( + LLDPMAUPMDOther uint16 = 1 << 15 + LLDPMAUPMD10BaseT uint16 = 1 << 14 + LLDPMAUPMD10BaseT_FD uint16 = 1 << 13 + LLDPMAUPMD100BaseT4 uint16 = 1 << 12 + LLDPMAUPMD100BaseTX uint16 = 1 << 11 + LLDPMAUPMD100BaseTX_FD uint16 = 1 << 10 + LLDPMAUPMD100BaseT2 uint16 = 1 << 9 + LLDPMAUPMD100BaseT2_FD uint16 = 1 << 8 + LLDPMAUPMDFDXPAUSE uint16 = 1 << 7 + LLDPMAUPMDFDXAPAUSE uint16 = 1 << 6 + LLDPMAUPMDFDXSPAUSE uint16 = 1 << 5 + LLDPMAUPMDFDXBPAUSE uint16 = 1 << 4 + LLDPMAUPMD1000BaseX uint16 = 1 << 3 + LLDPMAUPMD1000BaseX_FD uint16 = 1 << 2 + LLDPMAUPMD1000BaseT uint16 = 1 << 1 + LLDPMAUPMD1000BaseT_FD uint16 = 1 << 0 +) + +// Inverted ifMauAutoNegCapAdvertisedBits if required +// (Some manufacturers misinterpreted the spec - +// see https://bugs.wireshark.org/bugzilla/show_bug.cgi?id=1455) +const ( + LLDPMAUPMDOtherInv uint16 = 1 << 0 + LLDPMAUPMD10BaseTInv uint16 = 1 << 1 + LLDPMAUPMD10BaseT_FDInv uint16 = 1 << 2 + LLDPMAUPMD100BaseT4Inv uint16 = 1 << 3 + LLDPMAUPMD100BaseTXInv uint16 = 1 << 4 + LLDPMAUPMD100BaseTX_FDInv uint16 = 1 << 5 + LLDPMAUPMD100BaseT2Inv uint16 = 1 << 6 + LLDPMAUPMD100BaseT2_FDInv uint16 = 1 << 7 + LLDPMAUPMDFDXPAUSEInv uint16 = 1 << 8 + LLDPMAUPMDFDXAPAUSEInv uint16 = 1 << 9 + LLDPMAUPMDFDXSPAUSEInv uint16 = 1 << 10 + LLDPMAUPMDFDXBPAUSEInv uint16 = 1 << 11 + LLDPMAUPMD1000BaseXInv uint16 = 1 << 12 + LLDPMAUPMD1000BaseX_FDInv uint16 = 1 << 13 + LLDPMAUPMD1000BaseTInv uint16 = 1 << 14 + LLDPMAUPMD1000BaseT_FDInv uint16 = 1 << 15 +) + +type LLDPMACPHYConfigStatus struct { + AutoNegSupported bool + AutoNegEnabled bool + AutoNegCapability uint16 + MAUType uint16 +} + +// MDI Power options +const ( + LLDPMDIPowerPortClass byte = 1 << 0 + LLDPMDIPowerCapability byte = 1 << 1 + LLDPMDIPowerStatus byte = 1 << 2 + LLDPMDIPowerPairsAbility byte = 1 << 3 +) + +type LLDPPowerType byte + +type LLDPPowerSource byte + +type LLDPPowerPriority byte + +const ( + LLDPPowerPriorityUnknown LLDPPowerPriority = 0 + LLDPPowerPriorityMedium LLDPPowerPriority = 1 + LLDPPowerPriorityHigh LLDPPowerPriority = 2 + LLDPPowerPriorityLow LLDPPowerPriority = 3 +) + +type LLDPPowerViaMDI8023 struct { + PortClassPSE bool // false = PD + PSESupported bool + PSEEnabled bool + PSEPairsAbility bool + PSEPowerPair uint8 + PSEClass uint8 + Type LLDPPowerType + Source LLDPPowerSource + Priority LLDPPowerPriority + Requested uint16 // 1-510 Watts + Allocated uint16 // 1-510 Watts +} + +// LLDPInfo8023 represents the information carried in 802.3 Org-specific TLVs +type LLDPInfo8023 struct { + MACPHYConfigStatus LLDPMACPHYConfigStatus + PowerViaMDI LLDPPowerViaMDI8023 + LinkAggregation LLDPLinkAggregation + MTU uint16 +} + +// IEEE 802.1Qbg TLV Subtypes +const ( + LLDP8021QbgEVB uint8 = 0 + LLDP8021QbgCDCP uint8 = 1 + LLDP8021QbgVDP uint8 = 2 + LLDP8021QbgEVB22 uint8 = 13 +) + +// LLDPEVBCapabilities Types +const ( + LLDPEVBCapsSTD uint16 = 1 << 7 + LLDPEVBCapsRR uint16 = 1 << 6 + LLDPEVBCapsRTE uint16 = 1 << 2 + LLDPEVBCapsECP uint16 = 1 << 1 + LLDPEVBCapsVDP uint16 = 1 << 0 +) + +// LLDPEVBCapabilities represents the EVB capabilities of a device +type LLDPEVBCapabilities struct { + StandardBridging bool + ReflectiveRelay bool + RetransmissionTimerExponent bool + EdgeControlProtocol bool + VSIDiscoveryProtocol bool +} + +type LLDPEVBSettings struct { + Supported LLDPEVBCapabilities + Enabled LLDPEVBCapabilities + SupportedVSIs uint16 + ConfiguredVSIs uint16 + RTEExponent uint8 +} + +// LLDPInfo8021Qbg represents the information carried in 802.1Qbg Org-specific TLVs +type LLDPInfo8021Qbg struct { + EVBSettings LLDPEVBSettings +} + +type LLDPMediaSubtype uint8 + +// Media TLV Subtypes +const ( + LLDPMediaTypeCapabilities LLDPMediaSubtype = 1 + LLDPMediaTypeNetwork LLDPMediaSubtype = 2 + LLDPMediaTypeLocation LLDPMediaSubtype = 3 + LLDPMediaTypePower LLDPMediaSubtype = 4 + LLDPMediaTypeHardware LLDPMediaSubtype = 5 + LLDPMediaTypeFirmware LLDPMediaSubtype = 6 + LLDPMediaTypeSoftware LLDPMediaSubtype = 7 + LLDPMediaTypeSerial LLDPMediaSubtype = 8 + LLDPMediaTypeManufacturer LLDPMediaSubtype = 9 + LLDPMediaTypeModel LLDPMediaSubtype = 10 + LLDPMediaTypeAssetID LLDPMediaSubtype = 11 +) + +type LLDPMediaClass uint8 + +// Media Class Values +const ( + LLDPMediaClassUndefined LLDPMediaClass = 0 + LLDPMediaClassEndpointI LLDPMediaClass = 1 + LLDPMediaClassEndpointII LLDPMediaClass = 2 + LLDPMediaClassEndpointIII LLDPMediaClass = 3 + LLDPMediaClassNetwork LLDPMediaClass = 4 +) + +// LLDPMediaCapabilities Types +const ( + LLDPMediaCapsLLDP uint16 = 1 << 0 + LLDPMediaCapsNetwork uint16 = 1 << 1 + LLDPMediaCapsLocation uint16 = 1 << 2 + LLDPMediaCapsPowerPSE uint16 = 1 << 3 + LLDPMediaCapsPowerPD uint16 = 1 << 4 + LLDPMediaCapsInventory uint16 = 1 << 5 +) + +// LLDPMediaCapabilities represents the LLDP Media capabilities of a device +type LLDPMediaCapabilities struct { + Capabilities bool + NetworkPolicy bool + Location bool + PowerPSE bool + PowerPD bool + Inventory bool + Class LLDPMediaClass +} + +type LLDPApplicationType uint8 + +const ( + LLDPAppTypeReserved LLDPApplicationType = 0 + LLDPAppTypeVoice LLDPApplicationType = 1 + LLDPappTypeVoiceSignaling LLDPApplicationType = 2 + LLDPappTypeGuestVoice LLDPApplicationType = 3 + LLDPappTypeGuestVoiceSignaling LLDPApplicationType = 4 + LLDPappTypeSoftphoneVoice LLDPApplicationType = 5 + LLDPappTypeVideoConferencing LLDPApplicationType = 6 + LLDPappTypeStreamingVideo LLDPApplicationType = 7 + LLDPappTypeVideoSignaling LLDPApplicationType = 8 +) + +type LLDPNetworkPolicy struct { + ApplicationType LLDPApplicationType + Defined bool + Tagged bool + VLANId uint16 + L2Priority uint16 + DSCPValue uint8 +} + +type LLDPLocationFormat uint8 + +const ( + LLDPLocationFormatInvalid LLDPLocationFormat = 0 + LLDPLocationFormatCoordinate LLDPLocationFormat = 1 + LLDPLocationFormatAddress LLDPLocationFormat = 2 + LLDPLocationFormatECS LLDPLocationFormat = 3 +) + +type LLDPLocationAddressWhat uint8 + +const ( + LLDPLocationAddressWhatDHCP LLDPLocationAddressWhat = 0 + LLDPLocationAddressWhatNetwork LLDPLocationAddressWhat = 1 + LLDPLocationAddressWhatClient LLDPLocationAddressWhat = 2 +) + +type LLDPLocationAddressType uint8 + +const ( + LLDPLocationAddressTypeLanguage LLDPLocationAddressType = 0 + LLDPLocationAddressTypeNational LLDPLocationAddressType = 1 + LLDPLocationAddressTypeCounty LLDPLocationAddressType = 2 + LLDPLocationAddressTypeCity LLDPLocationAddressType = 3 + LLDPLocationAddressTypeCityDivision LLDPLocationAddressType = 4 + LLDPLocationAddressTypeNeighborhood LLDPLocationAddressType = 5 + LLDPLocationAddressTypeStreet LLDPLocationAddressType = 6 + LLDPLocationAddressTypeLeadingStreet LLDPLocationAddressType = 16 + LLDPLocationAddressTypeTrailingStreet LLDPLocationAddressType = 17 + LLDPLocationAddressTypeStreetSuffix LLDPLocationAddressType = 18 + LLDPLocationAddressTypeHouseNum LLDPLocationAddressType = 19 + LLDPLocationAddressTypeHouseSuffix LLDPLocationAddressType = 20 + LLDPLocationAddressTypeLandmark LLDPLocationAddressType = 21 + LLDPLocationAddressTypeAdditional LLDPLocationAddressType = 22 + LLDPLocationAddressTypeName LLDPLocationAddressType = 23 + LLDPLocationAddressTypePostal LLDPLocationAddressType = 24 + LLDPLocationAddressTypeBuilding LLDPLocationAddressType = 25 + LLDPLocationAddressTypeUnit LLDPLocationAddressType = 26 + LLDPLocationAddressTypeFloor LLDPLocationAddressType = 27 + LLDPLocationAddressTypeRoom LLDPLocationAddressType = 28 + LLDPLocationAddressTypePlace LLDPLocationAddressType = 29 + LLDPLocationAddressTypeScript LLDPLocationAddressType = 128 +) + +type LLDPLocationCoordinate struct { + LatitudeResolution uint8 + Latitude uint64 + LongitudeResolution uint8 + Longitude uint64 + AltitudeType uint8 + AltitudeResolution uint16 + Altitude uint32 + Datum uint8 +} + +type LLDPLocationAddressLine struct { + Type LLDPLocationAddressType + Value string +} + +type LLDPLocationAddress struct { + What LLDPLocationAddressWhat + CountryCode string + AddressLines []LLDPLocationAddressLine +} + +type LLDPLocationECS struct { + ELIN string +} + +// LLDP represents a physical location. +// Only one of the embedded types will contain values, depending on Format. +type LLDPLocation struct { + Format LLDPLocationFormat + Coordinate LLDPLocationCoordinate + Address LLDPLocationAddress + ECS LLDPLocationECS +} + +type LLDPPowerViaMDI struct { + Type LLDPPowerType + Source LLDPPowerSource + Priority LLDPPowerPriority + Value uint16 +} + +// LLDPInfoMedia represents the information carried in TR-41 Org-specific TLVs +type LLDPInfoMedia struct { + MediaCapabilities LLDPMediaCapabilities + NetworkPolicy LLDPNetworkPolicy + Location LLDPLocation + PowerViaMDI LLDPPowerViaMDI + HardwareRevision string + FirmwareRevision string + SoftwareRevision string + SerialNumber string + Manufacturer string + Model string + AssetID string +} + +type LLDPCisco2Subtype uint8 + +// Cisco2 TLV Subtypes +const ( + LLDPCisco2PowerViaMDI LLDPCisco2Subtype = 1 +) + +const ( + LLDPCiscoPSESupport uint8 = 1 << 0 + LLDPCiscoArchShared uint8 = 1 << 1 + LLDPCiscoPDSparePair uint8 = 1 << 2 + LLDPCiscoPSESparePair uint8 = 1 << 3 +) + +// LLDPInfoCisco2 represents the information carried in Cisco Org-specific TLVs +type LLDPInfoCisco2 struct { + PSEFourWirePoESupported bool + PDSparePairArchitectureShared bool + PDRequestSparePairPoEOn bool + PSESparePairPoEOn bool +} + +// Profinet Subtypes +type LLDPProfinetSubtype uint8 + +const ( + LLDPProfinetPNIODelay LLDPProfinetSubtype = 1 + LLDPProfinetPNIOPortStatus LLDPProfinetSubtype = 2 + LLDPProfinetPNIOMRPPortStatus LLDPProfinetSubtype = 4 + LLDPProfinetPNIOChassisMAC LLDPProfinetSubtype = 5 + LLDPProfinetPNIOPTCPStatus LLDPProfinetSubtype = 6 +) + +type LLDPPNIODelay struct { + RXLocal uint32 + RXRemote uint32 + TXLocal uint32 + TXRemote uint32 + CableLocal uint32 +} + +type LLDPPNIOPortStatus struct { + Class2 uint16 + Class3 uint16 +} + +type LLDPPNIOMRPPortStatus struct { + UUID []byte + Status uint16 +} + +type LLDPPNIOPTCPStatus struct { + MasterAddress []byte + SubdomainUUID []byte + IRDataUUID []byte + PeriodValid bool + PeriodLength uint32 + RedPeriodValid bool + RedPeriodBegin uint32 + OrangePeriodValid bool + OrangePeriodBegin uint32 + GreenPeriodValid bool + GreenPeriodBegin uint32 +} + +// LLDPInfoProfinet represents the information carried in Profinet Org-specific TLVs +type LLDPInfoProfinet struct { + PNIODelay LLDPPNIODelay + PNIOPortStatus LLDPPNIOPortStatus + PNIOMRPPortStatus LLDPPNIOMRPPortStatus + ChassisMAC []byte + PNIOPTCPStatus LLDPPNIOPTCPStatus +} + +// LayerType returns gopacket.LayerTypeLinkLayerDiscovery. +func (c *LinkLayerDiscovery) LayerType() gopacket.LayerType { + return LayerTypeLinkLayerDiscovery +} + +// SerializeTo serializes LLDP packet to bytes and writes on SerializeBuffer. +func (c *LinkLayerDiscovery) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + chassIDLen := c.ChassisID.serializedLen() + portIDLen := c.PortID.serializedLen() + vb, err := b.AppendBytes(chassIDLen + portIDLen + 4) // +4 for TTL + if err != nil { + return err + } + copy(vb[:chassIDLen], c.ChassisID.serialize()) + copy(vb[chassIDLen:], c.PortID.serialize()) + ttlIDLen := uint16(LLDPTLVTTL)<<9 | uint16(2) + binary.BigEndian.PutUint16(vb[chassIDLen+portIDLen:], ttlIDLen) + binary.BigEndian.PutUint16(vb[chassIDLen+portIDLen+2:], c.TTL) + + vb, err = b.AppendBytes(2) // End Tlv, 2 bytes + if err != nil { + return err + } + binary.BigEndian.PutUint16(vb[len(vb)-2:], uint16(0)) //End tlv, 2 bytes, all zero + return nil + +} + +func decodeLinkLayerDiscovery(data []byte, p gopacket.PacketBuilder) error { + var vals []LinkLayerDiscoveryValue + vData := data[0:] + for len(vData) > 0 { + nbit := vData[0] & 0x01 + t := LLDPTLVType(vData[0] >> 1) + val := LinkLayerDiscoveryValue{Type: t, Length: uint16(nbit)<<8 + uint16(vData[1])} + if val.Length > 0 { + val.Value = vData[2 : val.Length+2] + } + vals = append(vals, val) + if t == LLDPTLVEnd { + break + } + if len(vData) < int(2+val.Length) { + return errors.New("Malformed LinkLayerDiscovery Header") + } + vData = vData[2+val.Length:] + } + if len(vals) < 4 { + return errors.New("Missing mandatory LinkLayerDiscovery TLV") + } + c := &LinkLayerDiscovery{} + gotEnd := false + for _, v := range vals { + switch v.Type { + case LLDPTLVEnd: + gotEnd = true + case LLDPTLVChassisID: + if len(v.Value) < 2 { + return errors.New("Malformed LinkLayerDiscovery ChassisID TLV") + } + c.ChassisID.Subtype = LLDPChassisIDSubType(v.Value[0]) + c.ChassisID.ID = v.Value[1:] + case LLDPTLVPortID: + if len(v.Value) < 2 { + return errors.New("Malformed LinkLayerDiscovery PortID TLV") + } + c.PortID.Subtype = LLDPPortIDSubType(v.Value[0]) + c.PortID.ID = v.Value[1:] + case LLDPTLVTTL: + if len(v.Value) < 2 { + return errors.New("Malformed LinkLayerDiscovery TTL TLV") + } + c.TTL = binary.BigEndian.Uint16(v.Value[0:2]) + default: + c.Values = append(c.Values, v) + } + } + if c.ChassisID.Subtype == 0 || c.PortID.Subtype == 0 || !gotEnd { + return errors.New("Missing mandatory LinkLayerDiscovery TLV") + } + c.Contents = data + p.AddLayer(c) + + info := &LinkLayerDiscoveryInfo{} + p.AddLayer(info) + for _, v := range c.Values { + switch v.Type { + case LLDPTLVPortDescription: + info.PortDescription = string(v.Value) + case LLDPTLVSysName: + info.SysName = string(v.Value) + case LLDPTLVSysDescription: + info.SysDescription = string(v.Value) + case LLDPTLVSysCapabilities: + if err := checkLLDPTLVLen(v, 4); err != nil { + return err + } + info.SysCapabilities.SystemCap = getCapabilities(binary.BigEndian.Uint16(v.Value[0:2])) + info.SysCapabilities.EnabledCap = getCapabilities(binary.BigEndian.Uint16(v.Value[2:4])) + case LLDPTLVMgmtAddress: + if err := checkLLDPTLVLen(v, 9); err != nil { + return err + } + mlen := v.Value[0] + if err := checkLLDPTLVLen(v, int(mlen+7)); err != nil { + return err + } + info.MgmtAddress.Subtype = IANAAddressFamily(v.Value[1]) + info.MgmtAddress.Address = v.Value[2 : mlen+1] + info.MgmtAddress.InterfaceSubtype = LLDPInterfaceSubtype(v.Value[mlen+1]) + info.MgmtAddress.InterfaceNumber = binary.BigEndian.Uint32(v.Value[mlen+2 : mlen+6]) + olen := v.Value[mlen+6] + if err := checkLLDPTLVLen(v, int(mlen+6+olen)); err != nil { + return err + } + info.MgmtAddress.OID = string(v.Value[mlen+9 : mlen+9+olen]) + case LLDPTLVOrgSpecific: + if err := checkLLDPTLVLen(v, 4); err != nil { + return err + } + info.OrgTLVs = append(info.OrgTLVs, LLDPOrgSpecificTLV{IEEEOUI(binary.BigEndian.Uint32(append([]byte{byte(0)}, v.Value[0:3]...))), uint8(v.Value[3]), v.Value[4:]}) + } + } + return nil +} + +func (l *LinkLayerDiscoveryInfo) Decode8021() (info LLDPInfo8021, err error) { + for _, o := range l.OrgTLVs { + if o.OUI != IEEEOUI8021 { + continue + } + switch o.SubType { + case LLDP8021SubtypePortVLANID: + if err = checkLLDPOrgSpecificLen(o, 2); err != nil { + return + } + info.PVID = binary.BigEndian.Uint16(o.Info[0:2]) + case LLDP8021SubtypeProtocolVLANID: + if err = checkLLDPOrgSpecificLen(o, 3); err != nil { + return + } + sup := (o.Info[0]&LLDPProtocolVLANIDCapability > 0) + en := (o.Info[0]&LLDPProtocolVLANIDStatus > 0) + id := binary.BigEndian.Uint16(o.Info[1:3]) + info.PPVIDs = append(info.PPVIDs, PortProtocolVLANID{sup, en, id}) + case LLDP8021SubtypeVLANName: + if err = checkLLDPOrgSpecificLen(o, 2); err != nil { + return + } + id := binary.BigEndian.Uint16(o.Info[0:2]) + info.VLANNames = append(info.VLANNames, VLANName{id, string(o.Info[3:])}) + case LLDP8021SubtypeProtocolIdentity: + if err = checkLLDPOrgSpecificLen(o, 1); err != nil { + return + } + l := int(o.Info[0]) + if l > 0 { + info.ProtocolIdentities = append(info.ProtocolIdentities, o.Info[1:1+l]) + } + case LLDP8021SubtypeVDIUsageDigest: + if err = checkLLDPOrgSpecificLen(o, 4); err != nil { + return + } + info.VIDUsageDigest = binary.BigEndian.Uint32(o.Info[0:4]) + case LLDP8021SubtypeManagementVID: + if err = checkLLDPOrgSpecificLen(o, 2); err != nil { + return + } + info.ManagementVID = binary.BigEndian.Uint16(o.Info[0:2]) + case LLDP8021SubtypeLinkAggregation: + if err = checkLLDPOrgSpecificLen(o, 5); err != nil { + return + } + sup := (o.Info[0]&LLDPAggregationCapability > 0) + en := (o.Info[0]&LLDPAggregationStatus > 0) + info.LinkAggregation = LLDPLinkAggregation{sup, en, binary.BigEndian.Uint32(o.Info[1:5])} + } + } + return +} + +func (l *LinkLayerDiscoveryInfo) Decode8023() (info LLDPInfo8023, err error) { + for _, o := range l.OrgTLVs { + if o.OUI != IEEEOUI8023 { + continue + } + switch o.SubType { + case LLDP8023SubtypeMACPHY: + if err = checkLLDPOrgSpecificLen(o, 5); err != nil { + return + } + sup := (o.Info[0]&LLDPMACPHYCapability > 0) + en := (o.Info[0]&LLDPMACPHYStatus > 0) + ca := binary.BigEndian.Uint16(o.Info[1:3]) + mau := binary.BigEndian.Uint16(o.Info[3:5]) + info.MACPHYConfigStatus = LLDPMACPHYConfigStatus{sup, en, ca, mau} + case LLDP8023SubtypeMDIPower: + if err = checkLLDPOrgSpecificLen(o, 3); err != nil { + return + } + info.PowerViaMDI.PortClassPSE = (o.Info[0]&LLDPMDIPowerPortClass > 0) + info.PowerViaMDI.PSESupported = (o.Info[0]&LLDPMDIPowerCapability > 0) + info.PowerViaMDI.PSEEnabled = (o.Info[0]&LLDPMDIPowerStatus > 0) + info.PowerViaMDI.PSEPairsAbility = (o.Info[0]&LLDPMDIPowerPairsAbility > 0) + info.PowerViaMDI.PSEPowerPair = uint8(o.Info[1]) + info.PowerViaMDI.PSEClass = uint8(o.Info[2]) + if len(o.Info) >= 7 { + info.PowerViaMDI.Type = LLDPPowerType((o.Info[3] & 0xc0) >> 6) + info.PowerViaMDI.Source = LLDPPowerSource((o.Info[3] & 0x30) >> 4) + if info.PowerViaMDI.Type == 1 || info.PowerViaMDI.Type == 3 { + info.PowerViaMDI.Source += 128 // For Stringify purposes + } + info.PowerViaMDI.Priority = LLDPPowerPriority(o.Info[3] & 0x0f) + info.PowerViaMDI.Requested = binary.BigEndian.Uint16(o.Info[4:6]) + info.PowerViaMDI.Allocated = binary.BigEndian.Uint16(o.Info[6:8]) + } + case LLDP8023SubtypeLinkAggregation: + if err = checkLLDPOrgSpecificLen(o, 5); err != nil { + return + } + sup := (o.Info[0]&LLDPAggregationCapability > 0) + en := (o.Info[0]&LLDPAggregationStatus > 0) + info.LinkAggregation = LLDPLinkAggregation{sup, en, binary.BigEndian.Uint32(o.Info[1:5])} + case LLDP8023SubtypeMTU: + if err = checkLLDPOrgSpecificLen(o, 2); err != nil { + return + } + info.MTU = binary.BigEndian.Uint16(o.Info[0:2]) + } + } + return +} + +func (l *LinkLayerDiscoveryInfo) Decode8021Qbg() (info LLDPInfo8021Qbg, err error) { + for _, o := range l.OrgTLVs { + if o.OUI != IEEEOUI8021Qbg { + continue + } + switch o.SubType { + case LLDP8021QbgEVB: + if err = checkLLDPOrgSpecificLen(o, 9); err != nil { + return + } + info.EVBSettings.Supported = getEVBCapabilities(binary.BigEndian.Uint16(o.Info[0:2])) + info.EVBSettings.Enabled = getEVBCapabilities(binary.BigEndian.Uint16(o.Info[2:4])) + info.EVBSettings.SupportedVSIs = binary.BigEndian.Uint16(o.Info[4:6]) + info.EVBSettings.ConfiguredVSIs = binary.BigEndian.Uint16(o.Info[6:8]) + info.EVBSettings.RTEExponent = uint8(o.Info[8]) + } + } + return +} + +func (l *LinkLayerDiscoveryInfo) DecodeMedia() (info LLDPInfoMedia, err error) { + for _, o := range l.OrgTLVs { + if o.OUI != IEEEOUIMedia { + continue + } + switch LLDPMediaSubtype(o.SubType) { + case LLDPMediaTypeCapabilities: + if err = checkLLDPOrgSpecificLen(o, 3); err != nil { + return + } + b := binary.BigEndian.Uint16(o.Info[0:2]) + info.MediaCapabilities.Capabilities = (b & LLDPMediaCapsLLDP) > 0 + info.MediaCapabilities.NetworkPolicy = (b & LLDPMediaCapsNetwork) > 0 + info.MediaCapabilities.Location = (b & LLDPMediaCapsLocation) > 0 + info.MediaCapabilities.PowerPSE = (b & LLDPMediaCapsPowerPSE) > 0 + info.MediaCapabilities.PowerPD = (b & LLDPMediaCapsPowerPD) > 0 + info.MediaCapabilities.Inventory = (b & LLDPMediaCapsInventory) > 0 + info.MediaCapabilities.Class = LLDPMediaClass(o.Info[2]) + case LLDPMediaTypeNetwork: + if err = checkLLDPOrgSpecificLen(o, 4); err != nil { + return + } + info.NetworkPolicy.ApplicationType = LLDPApplicationType(o.Info[0]) + b := binary.BigEndian.Uint16(o.Info[1:3]) + info.NetworkPolicy.Defined = (b & 0x8000) == 0 + info.NetworkPolicy.Tagged = (b & 0x4000) > 0 + info.NetworkPolicy.VLANId = (b & 0x1ffe) >> 1 + b = binary.BigEndian.Uint16(o.Info[2:4]) + info.NetworkPolicy.L2Priority = (b & 0x01c0) >> 6 + info.NetworkPolicy.DSCPValue = uint8(o.Info[3] & 0x3f) + case LLDPMediaTypeLocation: + if err = checkLLDPOrgSpecificLen(o, 1); err != nil { + return + } + info.Location.Format = LLDPLocationFormat(o.Info[0]) + o.Info = o.Info[1:] + switch info.Location.Format { + case LLDPLocationFormatCoordinate: + if err = checkLLDPOrgSpecificLen(o, 16); err != nil { + return + } + info.Location.Coordinate.LatitudeResolution = uint8(o.Info[0]&0xfc) >> 2 + b := binary.BigEndian.Uint64(o.Info[0:8]) + info.Location.Coordinate.Latitude = (b & 0x03ffffffff000000) >> 24 + info.Location.Coordinate.LongitudeResolution = uint8(o.Info[5]&0xfc) >> 2 + b = binary.BigEndian.Uint64(o.Info[5:13]) + info.Location.Coordinate.Longitude = (b & 0x03ffffffff000000) >> 24 + info.Location.Coordinate.AltitudeType = uint8((o.Info[10] & 0x30) >> 4) + b1 := binary.BigEndian.Uint16(o.Info[10:12]) + info.Location.Coordinate.AltitudeResolution = (b1 & 0xfc0) >> 6 + b2 := binary.BigEndian.Uint32(o.Info[11:15]) + info.Location.Coordinate.Altitude = b2 & 0x3fffffff + info.Location.Coordinate.Datum = uint8(o.Info[15]) + case LLDPLocationFormatAddress: + if err = checkLLDPOrgSpecificLen(o, 3); err != nil { + return + } + //ll := uint8(o.Info[0]) + info.Location.Address.What = LLDPLocationAddressWhat(o.Info[1]) + info.Location.Address.CountryCode = string(o.Info[2:4]) + data := o.Info[4:] + for len(data) > 1 { + aType := LLDPLocationAddressType(data[0]) + aLen := int(data[1]) + if len(data) >= aLen+2 { + info.Location.Address.AddressLines = append(info.Location.Address.AddressLines, LLDPLocationAddressLine{aType, string(data[2 : aLen+2])}) + data = data[aLen+2:] + } else { + break + } + } + case LLDPLocationFormatECS: + info.Location.ECS.ELIN = string(o.Info) + } + case LLDPMediaTypePower: + if err = checkLLDPOrgSpecificLen(o, 3); err != nil { + return + } + info.PowerViaMDI.Type = LLDPPowerType((o.Info[0] & 0xc0) >> 6) + info.PowerViaMDI.Source = LLDPPowerSource((o.Info[0] & 0x30) >> 4) + if info.PowerViaMDI.Type == 1 || info.PowerViaMDI.Type == 3 { + info.PowerViaMDI.Source += 128 // For Stringify purposes + } + info.PowerViaMDI.Priority = LLDPPowerPriority(o.Info[0] & 0x0f) + info.PowerViaMDI.Value = binary.BigEndian.Uint16(o.Info[1:3]) * 100 // 0 to 102.3 w, 0.1W increments + case LLDPMediaTypeHardware: + info.HardwareRevision = string(o.Info) + case LLDPMediaTypeFirmware: + info.FirmwareRevision = string(o.Info) + case LLDPMediaTypeSoftware: + info.SoftwareRevision = string(o.Info) + case LLDPMediaTypeSerial: + info.SerialNumber = string(o.Info) + case LLDPMediaTypeManufacturer: + info.Manufacturer = string(o.Info) + case LLDPMediaTypeModel: + info.Model = string(o.Info) + case LLDPMediaTypeAssetID: + info.AssetID = string(o.Info) + } + } + return +} + +func (l *LinkLayerDiscoveryInfo) DecodeCisco2() (info LLDPInfoCisco2, err error) { + for _, o := range l.OrgTLVs { + if o.OUI != IEEEOUICisco2 { + continue + } + switch LLDPCisco2Subtype(o.SubType) { + case LLDPCisco2PowerViaMDI: + if err = checkLLDPOrgSpecificLen(o, 1); err != nil { + return + } + info.PSEFourWirePoESupported = (o.Info[0] & LLDPCiscoPSESupport) > 0 + info.PDSparePairArchitectureShared = (o.Info[0] & LLDPCiscoArchShared) > 0 + info.PDRequestSparePairPoEOn = (o.Info[0] & LLDPCiscoPDSparePair) > 0 + info.PSESparePairPoEOn = (o.Info[0] & LLDPCiscoPSESparePair) > 0 + } + } + return +} + +func (l *LinkLayerDiscoveryInfo) DecodeProfinet() (info LLDPInfoProfinet, err error) { + for _, o := range l.OrgTLVs { + if o.OUI != IEEEOUIProfinet { + continue + } + switch LLDPProfinetSubtype(o.SubType) { + case LLDPProfinetPNIODelay: + if err = checkLLDPOrgSpecificLen(o, 20); err != nil { + return + } + info.PNIODelay.RXLocal = binary.BigEndian.Uint32(o.Info[0:4]) + info.PNIODelay.RXRemote = binary.BigEndian.Uint32(o.Info[4:8]) + info.PNIODelay.TXLocal = binary.BigEndian.Uint32(o.Info[8:12]) + info.PNIODelay.TXRemote = binary.BigEndian.Uint32(o.Info[12:16]) + info.PNIODelay.CableLocal = binary.BigEndian.Uint32(o.Info[16:20]) + case LLDPProfinetPNIOPortStatus: + if err = checkLLDPOrgSpecificLen(o, 4); err != nil { + return + } + info.PNIOPortStatus.Class2 = binary.BigEndian.Uint16(o.Info[0:2]) + info.PNIOPortStatus.Class3 = binary.BigEndian.Uint16(o.Info[2:4]) + case LLDPProfinetPNIOMRPPortStatus: + if err = checkLLDPOrgSpecificLen(o, 18); err != nil { + return + } + info.PNIOMRPPortStatus.UUID = o.Info[0:16] + info.PNIOMRPPortStatus.Status = binary.BigEndian.Uint16(o.Info[16:18]) + case LLDPProfinetPNIOChassisMAC: + if err = checkLLDPOrgSpecificLen(o, 6); err != nil { + return + } + info.ChassisMAC = o.Info[0:6] + case LLDPProfinetPNIOPTCPStatus: + if err = checkLLDPOrgSpecificLen(o, 54); err != nil { + return + } + info.PNIOPTCPStatus.MasterAddress = o.Info[0:6] + info.PNIOPTCPStatus.SubdomainUUID = o.Info[6:22] + info.PNIOPTCPStatus.IRDataUUID = o.Info[22:38] + b := binary.BigEndian.Uint32(o.Info[38:42]) + info.PNIOPTCPStatus.PeriodValid = (b & 0x80000000) > 0 + info.PNIOPTCPStatus.PeriodLength = b & 0x7fffffff + b = binary.BigEndian.Uint32(o.Info[42:46]) + info.PNIOPTCPStatus.RedPeriodValid = (b & 0x80000000) > 0 + info.PNIOPTCPStatus.RedPeriodBegin = b & 0x7fffffff + b = binary.BigEndian.Uint32(o.Info[46:50]) + info.PNIOPTCPStatus.OrangePeriodValid = (b & 0x80000000) > 0 + info.PNIOPTCPStatus.OrangePeriodBegin = b & 0x7fffffff + b = binary.BigEndian.Uint32(o.Info[50:54]) + info.PNIOPTCPStatus.GreenPeriodValid = (b & 0x80000000) > 0 + info.PNIOPTCPStatus.GreenPeriodBegin = b & 0x7fffffff + } + } + return +} + +// LayerType returns gopacket.LayerTypeLinkLayerDiscoveryInfo. +func (c *LinkLayerDiscoveryInfo) LayerType() gopacket.LayerType { + return LayerTypeLinkLayerDiscoveryInfo +} + +func getCapabilities(v uint16) (c LLDPCapabilities) { + c.Other = (v&LLDPCapsOther > 0) + c.Repeater = (v&LLDPCapsRepeater > 0) + c.Bridge = (v&LLDPCapsBridge > 0) + c.WLANAP = (v&LLDPCapsWLANAP > 0) + c.Router = (v&LLDPCapsRouter > 0) + c.Phone = (v&LLDPCapsPhone > 0) + c.DocSis = (v&LLDPCapsDocSis > 0) + c.StationOnly = (v&LLDPCapsStationOnly > 0) + c.CVLAN = (v&LLDPCapsCVLAN > 0) + c.SVLAN = (v&LLDPCapsSVLAN > 0) + c.TMPR = (v&LLDPCapsTmpr > 0) + return +} + +func getEVBCapabilities(v uint16) (c LLDPEVBCapabilities) { + c.StandardBridging = (v & LLDPEVBCapsSTD) > 0 + c.StandardBridging = (v & LLDPEVBCapsSTD) > 0 + c.ReflectiveRelay = (v & LLDPEVBCapsRR) > 0 + c.RetransmissionTimerExponent = (v & LLDPEVBCapsRTE) > 0 + c.EdgeControlProtocol = (v & LLDPEVBCapsECP) > 0 + c.VSIDiscoveryProtocol = (v & LLDPEVBCapsVDP) > 0 + return +} + +func (t LLDPTLVType) String() (s string) { + switch t { + case LLDPTLVEnd: + s = "TLV End" + case LLDPTLVChassisID: + s = "Chassis ID" + case LLDPTLVPortID: + s = "Port ID" + case LLDPTLVTTL: + s = "TTL" + case LLDPTLVPortDescription: + s = "Port Description" + case LLDPTLVSysName: + s = "System Name" + case LLDPTLVSysDescription: + s = "System Description" + case LLDPTLVSysCapabilities: + s = "System Capabilities" + case LLDPTLVMgmtAddress: + s = "Management Address" + case LLDPTLVOrgSpecific: + s = "Organisation Specific" + default: + s = "Unknown" + } + return +} + +func (t LLDPChassisIDSubType) String() (s string) { + switch t { + case LLDPChassisIDSubTypeReserved: + s = "Reserved" + case LLDPChassisIDSubTypeChassisComp: + s = "Chassis Component" + case LLDPChassisIDSubtypeIfaceAlias: + s = "Interface Alias" + case LLDPChassisIDSubTypePortComp: + s = "Port Component" + case LLDPChassisIDSubTypeMACAddr: + s = "MAC Address" + case LLDPChassisIDSubTypeNetworkAddr: + s = "Network Address" + case LLDPChassisIDSubtypeIfaceName: + s = "Interface Name" + case LLDPChassisIDSubTypeLocal: + s = "Local" + default: + s = "Unknown" + } + return +} + +func (t LLDPPortIDSubType) String() (s string) { + switch t { + case LLDPPortIDSubtypeReserved: + s = "Reserved" + case LLDPPortIDSubtypeIfaceAlias: + s = "Interface Alias" + case LLDPPortIDSubtypePortComp: + s = "Port Component" + case LLDPPortIDSubtypeMACAddr: + s = "MAC Address" + case LLDPPortIDSubtypeNetworkAddr: + s = "Network Address" + case LLDPPortIDSubtypeIfaceName: + s = "Interface Name" + case LLDPPortIDSubtypeAgentCircuitID: + s = "Agent Circuit ID" + case LLDPPortIDSubtypeLocal: + s = "Local" + default: + s = "Unknown" + } + return +} + +func (t IANAAddressFamily) String() (s string) { + switch t { + case IANAAddressFamilyReserved: + s = "Reserved" + case IANAAddressFamilyIPV4: + s = "IPv4" + case IANAAddressFamilyIPV6: + s = "IPv6" + case IANAAddressFamilyNSAP: + s = "NSAP" + case IANAAddressFamilyHDLC: + s = "HDLC" + case IANAAddressFamilyBBN1822: + s = "BBN 1822" + case IANAAddressFamily802: + s = "802 media plus Ethernet 'canonical format'" + case IANAAddressFamilyE163: + s = "E.163" + case IANAAddressFamilyE164: + s = "E.164 (SMDS, Frame Relay, ATM)" + case IANAAddressFamilyF69: + s = "F.69 (Telex)" + case IANAAddressFamilyX121: + s = "X.121, X.25, Frame Relay" + case IANAAddressFamilyIPX: + s = "IPX" + case IANAAddressFamilyAtalk: + s = "Appletalk" + case IANAAddressFamilyDecnet: + s = "Decnet IV" + case IANAAddressFamilyBanyan: + s = "Banyan Vines" + case IANAAddressFamilyE164NSAP: + s = "E.164 with NSAP format subaddress" + case IANAAddressFamilyDNS: + s = "DNS" + case IANAAddressFamilyDistname: + s = "Distinguished Name" + case IANAAddressFamilyASNumber: + s = "AS Number" + case IANAAddressFamilyXTPIPV4: + s = "XTP over IP version 4" + case IANAAddressFamilyXTPIPV6: + s = "XTP over IP version 6" + case IANAAddressFamilyXTP: + s = "XTP native mode XTP" + case IANAAddressFamilyFcWWPN: + s = "Fibre Channel World-Wide Port Name" + case IANAAddressFamilyFcWWNN: + s = "Fibre Channel World-Wide Node Name" + case IANAAddressFamilyGWID: + s = "GWID" + case IANAAddressFamilyL2VPN: + s = "AFI for Layer 2 VPN" + default: + s = "Unknown" + } + return +} + +func (t LLDPInterfaceSubtype) String() (s string) { + switch t { + case LLDPInterfaceSubtypeUnknown: + s = "Unknown" + case LLDPInterfaceSubtypeifIndex: + s = "IfIndex" + case LLDPInterfaceSubtypeSysPort: + s = "System Port Number" + default: + s = "Unknown" + } + return +} + +func (t LLDPPowerType) String() (s string) { + switch t { + case 0: + s = "Type 2 PSE Device" + case 1: + s = "Type 2 PD Device" + case 2: + s = "Type 1 PSE Device" + case 3: + s = "Type 1 PD Device" + default: + s = "Unknown" + } + return +} + +func (t LLDPPowerSource) String() (s string) { + switch t { + // PD Device + case 0: + s = "Unknown" + case 1: + s = "PSE" + case 2: + s = "Local" + case 3: + s = "PSE and Local" + // PSE Device (Actual value + 128) + case 128: + s = "Unknown" + case 129: + s = "Primary Power Source" + case 130: + s = "Backup Power Source" + default: + s = "Unknown" + } + return +} + +func (t LLDPPowerPriority) String() (s string) { + switch t { + case 0: + s = "Unknown" + case 1: + s = "Critical" + case 2: + s = "High" + case 3: + s = "Low" + default: + s = "Unknown" + } + return +} + +func (t LLDPMediaSubtype) String() (s string) { + switch t { + case LLDPMediaTypeCapabilities: + s = "Media Capabilities " + case LLDPMediaTypeNetwork: + s = "Network Policy" + case LLDPMediaTypeLocation: + s = "Location Identification" + case LLDPMediaTypePower: + s = "Extended Power-via-MDI" + case LLDPMediaTypeHardware: + s = "Hardware Revision" + case LLDPMediaTypeFirmware: + s = "Firmware Revision" + case LLDPMediaTypeSoftware: + s = "Software Revision" + case LLDPMediaTypeSerial: + s = "Serial Number" + case LLDPMediaTypeManufacturer: + s = "Manufacturer" + case LLDPMediaTypeModel: + s = "Model" + case LLDPMediaTypeAssetID: + s = "Asset ID" + default: + s = "Unknown" + } + return +} + +func (t LLDPMediaClass) String() (s string) { + switch t { + case LLDPMediaClassUndefined: + s = "Undefined" + case LLDPMediaClassEndpointI: + s = "Endpoint Class I" + case LLDPMediaClassEndpointII: + s = "Endpoint Class II" + case LLDPMediaClassEndpointIII: + s = "Endpoint Class III" + case LLDPMediaClassNetwork: + s = "Network connectivity " + default: + s = "Unknown" + } + return +} + +func (t LLDPApplicationType) String() (s string) { + switch t { + case LLDPAppTypeReserved: + s = "Reserved" + case LLDPAppTypeVoice: + s = "Voice" + case LLDPappTypeVoiceSignaling: + s = "Voice Signaling" + case LLDPappTypeGuestVoice: + s = "Guest Voice" + case LLDPappTypeGuestVoiceSignaling: + s = "Guest Voice Signaling" + case LLDPappTypeSoftphoneVoice: + s = "Softphone Voice" + case LLDPappTypeVideoConferencing: + s = "Video Conferencing" + case LLDPappTypeStreamingVideo: + s = "Streaming Video" + case LLDPappTypeVideoSignaling: + s = "Video Signaling" + default: + s = "Unknown" + } + return +} + +func (t LLDPLocationFormat) String() (s string) { + switch t { + case LLDPLocationFormatInvalid: + s = "Invalid" + case LLDPLocationFormatCoordinate: + s = "Coordinate-based LCI" + case LLDPLocationFormatAddress: + s = "Address-based LCO" + case LLDPLocationFormatECS: + s = "ECS ELIN" + default: + s = "Unknown" + } + return +} + +func (t LLDPLocationAddressType) String() (s string) { + switch t { + case LLDPLocationAddressTypeLanguage: + s = "Language" + case LLDPLocationAddressTypeNational: + s = "National subdivisions (province, state, etc)" + case LLDPLocationAddressTypeCounty: + s = "County, parish, district" + case LLDPLocationAddressTypeCity: + s = "City, township" + case LLDPLocationAddressTypeCityDivision: + s = "City division, borough, ward" + case LLDPLocationAddressTypeNeighborhood: + s = "Neighborhood, block" + case LLDPLocationAddressTypeStreet: + s = "Street" + case LLDPLocationAddressTypeLeadingStreet: + s = "Leading street direction" + case LLDPLocationAddressTypeTrailingStreet: + s = "Trailing street suffix" + case LLDPLocationAddressTypeStreetSuffix: + s = "Street suffix" + case LLDPLocationAddressTypeHouseNum: + s = "House number" + case LLDPLocationAddressTypeHouseSuffix: + s = "House number suffix" + case LLDPLocationAddressTypeLandmark: + s = "Landmark or vanity address" + case LLDPLocationAddressTypeAdditional: + s = "Additional location information" + case LLDPLocationAddressTypeName: + s = "Name" + case LLDPLocationAddressTypePostal: + s = "Postal/ZIP code" + case LLDPLocationAddressTypeBuilding: + s = "Building" + case LLDPLocationAddressTypeUnit: + s = "Unit" + case LLDPLocationAddressTypeFloor: + s = "Floor" + case LLDPLocationAddressTypeRoom: + s = "Room number" + case LLDPLocationAddressTypePlace: + s = "Place type" + case LLDPLocationAddressTypeScript: + s = "Script" + default: + s = "Unknown" + } + return +} + +func checkLLDPTLVLen(v LinkLayerDiscoveryValue, l int) (err error) { + if len(v.Value) < l { + err = fmt.Errorf("Invalid TLV %v length %d (wanted mimimum %v", v.Type, len(v.Value), l) + } + return +} + +func checkLLDPOrgSpecificLen(o LLDPOrgSpecificTLV, l int) (err error) { + if len(o.Info) < l { + err = fmt.Errorf("Invalid Org Specific TLV %v length %d (wanted minimum %v)", o.SubType, len(o.Info), l) + } + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/loopback.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/loopback.go new file mode 100644 index 00000000..839f7607 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/loopback.go @@ -0,0 +1,80 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + + "github.com/google/gopacket" +) + +// Loopback contains the header for loopback encapsulation. This header is +// used by both BSD and OpenBSD style loopback decoding (pcap's DLT_NULL +// and DLT_LOOP, respectively). +type Loopback struct { + BaseLayer + Family ProtocolFamily +} + +// LayerType returns LayerTypeLoopback. +func (l *Loopback) LayerType() gopacket.LayerType { return LayerTypeLoopback } + +// DecodeFromBytes decodes the given bytes into this layer. +func (l *Loopback) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 4 { + return errors.New("Loopback packet too small") + } + + // The protocol could be either big-endian or little-endian, we're + // not sure. But we're PRETTY sure that the value is less than + // 256, so we can check the first two bytes. + var prot uint32 + if data[0] == 0 && data[1] == 0 { + prot = binary.BigEndian.Uint32(data[:4]) + } else { + prot = binary.LittleEndian.Uint32(data[:4]) + } + if prot > 0xFF { + return fmt.Errorf("Invalid loopback protocol %q", data[:4]) + } + + l.Family = ProtocolFamily(prot) + l.BaseLayer = BaseLayer{data[:4], data[4:]} + return nil +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (l *Loopback) CanDecode() gopacket.LayerClass { + return LayerTypeLoopback +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (l *Loopback) NextLayerType() gopacket.LayerType { + return l.Family.LayerType() +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +func (l *Loopback) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(4) + if err != nil { + return err + } + binary.LittleEndian.PutUint32(bytes, uint32(l.Family)) + return nil +} + +func decodeLoopback(data []byte, p gopacket.PacketBuilder) error { + l := Loopback{} + if err := l.DecodeFromBytes(data, gopacket.NilDecodeFeedback); err != nil { + return err + } + p.AddLayer(&l) + return p.NextDecoder(l.Family) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/mldv1.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/mldv1.go new file mode 100644 index 00000000..e1bb1dc0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/mldv1.go @@ -0,0 +1,182 @@ +// Copyright 2018 GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + "net" + "time" + + "github.com/google/gopacket" +) + +// MLDv1Message represents the common structure of all MLDv1 messages +type MLDv1Message struct { + BaseLayer + // 3.4. Maximum Response Delay + MaximumResponseDelay time.Duration + // 3.6. Multicast Address + // Zero in general query + // Specific IPv6 multicast address otherwise + MulticastAddress net.IP +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (m *MLDv1Message) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 20 { + df.SetTruncated() + return errors.New("ICMP layer less than 20 bytes for Multicast Listener Query Message V1") + } + + m.MaximumResponseDelay = time.Duration(binary.BigEndian.Uint16(data[0:2])) * time.Millisecond + // data[2:4] is reserved and not used in mldv1 + m.MulticastAddress = data[4:20] + + return nil +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (*MLDv1Message) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypeZero +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (m *MLDv1Message) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf, err := b.PrependBytes(20) + if err != nil { + return err + } + + if m.MaximumResponseDelay < 0 { + return errors.New("maximum response delay must not be negative") + } + dms := m.MaximumResponseDelay / time.Millisecond + if dms > math.MaxUint16 { + return fmt.Errorf("maximum response delay %dms is more than the allowed 65535ms", dms) + } + binary.BigEndian.PutUint16(buf[0:2], uint16(dms)) + + copy(buf[2:4], []byte{0x0, 0x0}) + + ma16 := m.MulticastAddress.To16() + if ma16 == nil { + return fmt.Errorf("invalid multicast address '%s'", m.MulticastAddress) + } + copy(buf[4:20], ma16) + + return nil +} + +// Sums this layer up nicely formatted +func (m *MLDv1Message) String() string { + return fmt.Sprintf( + "Maximum Response Delay: %dms, Multicast Address: %s", + m.MaximumResponseDelay/time.Millisecond, + m.MulticastAddress) +} + +// MLDv1MulticastListenerQueryMessage are sent by the router to determine +// whether there are multicast listeners on the link. +// https://tools.ietf.org/html/rfc2710 Page 5 +type MLDv1MulticastListenerQueryMessage struct { + MLDv1Message +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (m *MLDv1MulticastListenerQueryMessage) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + err := m.MLDv1Message.DecodeFromBytes(data, df) + if err != nil { + return err + } + + if len(data) > 20 { + m.Payload = data[20:] + } + + return nil +} + +// LayerType returns LayerTypeMLDv1MulticastListenerQuery. +func (*MLDv1MulticastListenerQueryMessage) LayerType() gopacket.LayerType { + return LayerTypeMLDv1MulticastListenerQuery +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (*MLDv1MulticastListenerQueryMessage) CanDecode() gopacket.LayerClass { + return LayerTypeMLDv1MulticastListenerQuery +} + +// IsGeneralQuery is true when this is a general query. +// In a Query message, the Multicast Address field is set to zero when +// sending a General Query. +// https://tools.ietf.org/html/rfc2710#section-3.6 +func (m *MLDv1MulticastListenerQueryMessage) IsGeneralQuery() bool { + return net.IPv6zero.Equal(m.MulticastAddress) +} + +// IsSpecificQuery is true when this is not a general query. +// In a Query message, the Multicast Address field is set to a specific +// IPv6 multicast address when sending a Multicast-Address-Specific Query. +// https://tools.ietf.org/html/rfc2710#section-3.6 +func (m *MLDv1MulticastListenerQueryMessage) IsSpecificQuery() bool { + return !m.IsGeneralQuery() +} + +// MLDv1MulticastListenerReportMessage is sent by a client listening on +// a specific multicast address to indicate that it is (still) listening +// on the specific multicast address. +// https://tools.ietf.org/html/rfc2710 Page 6 +type MLDv1MulticastListenerReportMessage struct { + MLDv1Message +} + +// LayerType returns LayerTypeMLDv1MulticastListenerReport. +func (*MLDv1MulticastListenerReportMessage) LayerType() gopacket.LayerType { + return LayerTypeMLDv1MulticastListenerReport +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (*MLDv1MulticastListenerReportMessage) CanDecode() gopacket.LayerClass { + return LayerTypeMLDv1MulticastListenerReport +} + +// MLDv1MulticastListenerDoneMessage should be sent by a client when it ceases +// to listen to a multicast address on an interface. +// https://tools.ietf.org/html/rfc2710 Page 7 +type MLDv1MulticastListenerDoneMessage struct { + MLDv1Message +} + +// LayerType returns LayerTypeMLDv1MulticastListenerDone. +func (*MLDv1MulticastListenerDoneMessage) LayerType() gopacket.LayerType { + return LayerTypeMLDv1MulticastListenerDone +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (*MLDv1MulticastListenerDoneMessage) CanDecode() gopacket.LayerClass { + return LayerTypeMLDv1MulticastListenerDone +} + +func decodeMLDv1MulticastListenerReport(data []byte, p gopacket.PacketBuilder) error { + m := &MLDv1MulticastListenerReportMessage{} + return decodingLayerDecoder(m, data, p) +} + +func decodeMLDv1MulticastListenerQuery(data []byte, p gopacket.PacketBuilder) error { + m := &MLDv1MulticastListenerQueryMessage{} + return decodingLayerDecoder(m, data, p) +} + +func decodeMLDv1MulticastListenerDone(data []byte, p gopacket.PacketBuilder) error { + m := &MLDv1MulticastListenerDoneMessage{} + return decodingLayerDecoder(m, data, p) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/mldv2.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/mldv2.go new file mode 100644 index 00000000..248cf749 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/mldv2.go @@ -0,0 +1,619 @@ +// Copyright 2018 GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "math" + "net" + "time" + + "github.com/google/gopacket" +) + +const ( + // S Flag bit is 1 + mldv2STrue uint8 = 0x8 + + // S Flag value mask + // mldv2STrue & mldv2SMask == mldv2STrue // true + // 0x1 & mldv2SMask == mldv2STrue // true + // 0x0 & mldv2SMask == mldv2STrue // false + mldv2SMask uint8 = 0x8 + + // QRV value mask + mldv2QRVMask uint8 = 0x7 +) + +// MLDv2MulticastListenerQueryMessage are sent by multicast routers to query the +// multicast listening state of neighboring interfaces. +// https://tools.ietf.org/html/rfc3810#section-5.1 +// +// Some information, like Maximum Response Code and Multicast Address are in the +// previous layer LayerTypeMLDv1MulticastListenerQuery +type MLDv2MulticastListenerQueryMessage struct { + BaseLayer + // 5.1.3. Maximum Response Delay COde + MaximumResponseCode uint16 + // 5.1.5. Multicast Address + // Zero in general query + // Specific IPv6 multicast address otherwise + MulticastAddress net.IP + // 5.1.7. S Flag (Suppress Router-Side Processing) + SuppressRoutersideProcessing bool + // 5.1.8. QRV (Querier's Robustness Variable) + QueriersRobustnessVariable uint8 + // 5.1.9. QQIC (Querier's Query Interval Code) + QueriersQueryIntervalCode uint8 + // 5.1.10. Number of Sources (N) + NumberOfSources uint16 + // 5.1.11 Source Address [i] + SourceAddresses []net.IP +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (m *MLDv2MulticastListenerQueryMessage) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 24 { + df.SetTruncated() + return errors.New("ICMP layer less than 24 bytes for Multicast Listener Query Message V2") + } + + m.MaximumResponseCode = binary.BigEndian.Uint16(data[0:2]) + // ignore data[2:4] as per https://tools.ietf.org/html/rfc3810#section-5.1.4 + m.MulticastAddress = data[4:20] + m.SuppressRoutersideProcessing = (data[20] & mldv2SMask) == mldv2STrue + m.QueriersRobustnessVariable = data[20] & mldv2QRVMask + m.QueriersQueryIntervalCode = data[21] + + m.NumberOfSources = binary.BigEndian.Uint16(data[22:24]) + + var end int + for i := uint16(0); i < m.NumberOfSources; i++ { + begin := 24 + (int(i) * 16) + end = begin + 16 + + if end > len(data) { + df.SetTruncated() + return fmt.Errorf("ICMP layer less than %d bytes for Multicast Listener Query Message V2", end) + } + + m.SourceAddresses = append(m.SourceAddresses, data[begin:end]) + } + + return nil +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (*MLDv2MulticastListenerQueryMessage) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypeZero +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (m *MLDv2MulticastListenerQueryMessage) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if err := m.serializeSourceAddressesTo(b, opts); err != nil { + return err + } + + buf, err := b.PrependBytes(24) + if err != nil { + return err + } + + binary.BigEndian.PutUint16(buf[0:2], m.MaximumResponseCode) + copy(buf[2:4], []byte{0x00, 0x00}) // set reserved bytes to zero + + ma16 := m.MulticastAddress.To16() + if ma16 == nil { + return fmt.Errorf("invalid MulticastAddress '%s'", m.MulticastAddress) + } + copy(buf[4:20], ma16) + + byte20 := m.QueriersRobustnessVariable & mldv2QRVMask + if m.SuppressRoutersideProcessing { + byte20 |= mldv2STrue + } else { + byte20 &= ^mldv2STrue // the complement of mldv2STrue + } + byte20 &= 0x0F // set reserved bits to zero + buf[20] = byte20 + + binary.BigEndian.PutUint16(buf[22:24], m.NumberOfSources) + buf[21] = m.QueriersQueryIntervalCode + + return nil +} + +// writes each source address to the buffer preserving the order +func (m *MLDv2MulticastListenerQueryMessage) serializeSourceAddressesTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + numberOfSourceAddresses := len(m.SourceAddresses) + if numberOfSourceAddresses > math.MaxUint16 { + return fmt.Errorf( + "there are more than %d source addresses, but 65535 is the maximum number of supported addresses", + numberOfSourceAddresses) + } + + if opts.FixLengths { + m.NumberOfSources = uint16(numberOfSourceAddresses) + } + + lastSAIdx := numberOfSourceAddresses - 1 + for k := range m.SourceAddresses { + i := lastSAIdx - k // reverse order + + buf, err := b.PrependBytes(16) + if err != nil { + return err + } + + sa16 := m.SourceAddresses[i].To16() + if sa16 == nil { + return fmt.Errorf("invalid source address [%d] '%s'", i, m.SourceAddresses[i]) + } + copy(buf[0:16], sa16) + } + + return nil +} + +// String sums this layer up nicely formatted +func (m *MLDv2MulticastListenerQueryMessage) String() string { + return fmt.Sprintf( + "Maximum Response Code: %#x (%dms), Multicast Address: %s, Suppress Routerside Processing: %t, QRV: %#x, QQIC: %#x (%ds), Number of Source Address: %d (actual: %d), Source Addresses: %s", + m.MaximumResponseCode, + m.MaximumResponseDelay(), + m.MulticastAddress, + m.SuppressRoutersideProcessing, + m.QueriersRobustnessVariable, + m.QueriersQueryIntervalCode, + m.QQI()/time.Second, + m.NumberOfSources, + len(m.SourceAddresses), + m.SourceAddresses) +} + +// LayerType returns LayerTypeMLDv2MulticastListenerQuery. +func (*MLDv2MulticastListenerQueryMessage) LayerType() gopacket.LayerType { + return LayerTypeMLDv2MulticastListenerQuery +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (*MLDv2MulticastListenerQueryMessage) CanDecode() gopacket.LayerClass { + return LayerTypeMLDv2MulticastListenerQuery +} + +// QQI calculates the Querier's Query Interval based on the QQIC +// according to https://tools.ietf.org/html/rfc3810#section-5.1.9 +func (m *MLDv2MulticastListenerQueryMessage) QQI() time.Duration { + data := m.QueriersQueryIntervalCode + if data < 128 { + return time.Second * time.Duration(data) + } + + exp := uint16(data) & 0x70 >> 4 + mant := uint16(data) & 0x0F + return time.Second * time.Duration(mant|0x1000<<(exp+3)) +} + +// SetQQI calculates and updates the Querier's Query Interval Code (QQIC) +// according to https://tools.ietf.org/html/rfc3810#section-5.1.9 +func (m *MLDv2MulticastListenerQueryMessage) SetQQI(d time.Duration) error { + if d < 0 { + m.QueriersQueryIntervalCode = 0 + return errors.New("QQI duration is negative") + } + + if d == 0 { + m.QueriersQueryIntervalCode = 0 + return nil + } + + dms := d / time.Second + if dms < 128 { + m.QueriersQueryIntervalCode = uint8(dms) + } + + if dms > 31744 { // mant=0xF, exp=0x7 + m.QueriersQueryIntervalCode = 0xFF + return fmt.Errorf("QQI duration %ds is, maximum allowed is 31744s", dms) + } + + value := uint16(dms) // ok, because 31744 < math.MaxUint16 + exp := uint8(7) + for mask := uint16(0x4000); exp > 0; exp-- { + if mask&value != 0 { + break + } + + mask >>= 1 + } + + mant := uint8(0x000F & (value >> (exp + 3))) + sig := uint8(0x10) + m.QueriersQueryIntervalCode = sig | exp<<4 | mant + + return nil +} + +// MaximumResponseDelay returns the Maximum Response Delay based on the +// Maximum Response Code according to +// https://tools.ietf.org/html/rfc3810#section-5.1.3 +func (m *MLDv2MulticastListenerQueryMessage) MaximumResponseDelay() time.Duration { + if m.MaximumResponseCode < 0x8000 { + return time.Duration(m.MaximumResponseCode) + } + + exp := m.MaximumResponseCode & 0x7000 >> 12 + mant := m.MaximumResponseCode & 0x0FFF + + return time.Millisecond * time.Duration(mant|0x1000<<(exp+3)) +} + +// SetMLDv2MaximumResponseDelay updates the Maximum Response Code according to +// https://tools.ietf.org/html/rfc3810#section-5.1.3 +func (m *MLDv2MulticastListenerQueryMessage) SetMLDv2MaximumResponseDelay(d time.Duration) error { + if d == 0 { + m.MaximumResponseCode = 0 + return nil + } + + if d < 0 { + return errors.New("maximum response delay must not be negative") + } + + dms := d / time.Millisecond + + if dms < 32768 { + m.MaximumResponseCode = uint16(dms) + } + + if dms > 4193280 { // mant=0xFFF, exp=0x7 + return fmt.Errorf("maximum response delay %dms is bigger the than maximum of 4193280ms", dms) + } + + value := uint32(dms) // ok, because 4193280 < math.MaxUint32 + exp := uint8(7) + for mask := uint32(0x40000000); exp > 0; exp-- { + if mask&value != 0 { + break + } + + mask >>= 1 + } + + mant := uint16(0x00000FFF & (value >> (exp + 3))) + sig := uint16(0x1000) + m.MaximumResponseCode = sig | uint16(exp)<<12 | mant + return nil +} + +// MLDv2MulticastListenerReportMessage is sent by an IP node to report the +// current multicast listening state, or changes therein. +// https://tools.ietf.org/html/rfc3810#section-5.2 +type MLDv2MulticastListenerReportMessage struct { + BaseLayer + // 5.2.3. Nr of Mcast Address Records + NumberOfMulticastAddressRecords uint16 + // 5.2.4. Multicast Address Record [i] + MulticastAddressRecords []MLDv2MulticastAddressRecord +} + +// DecodeFromBytes decodes the given bytes into this layer. +func (m *MLDv2MulticastListenerReportMessage) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 4 { + df.SetTruncated() + return errors.New("ICMP layer less than 4 bytes for Multicast Listener Report Message V2") + } + + // ignore data[0:2] as per RFC + // https://tools.ietf.org/html/rfc3810#section-5.2.1 + m.NumberOfMulticastAddressRecords = binary.BigEndian.Uint16(data[2:4]) + + begin := 4 + for i := uint16(0); i < m.NumberOfMulticastAddressRecords; i++ { + mar := MLDv2MulticastAddressRecord{} + read, err := mar.decode(data[begin:], df) + if err != nil { + return err + } + + m.MulticastAddressRecords = append(m.MulticastAddressRecords, mar) + + begin += read + } + + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (m *MLDv2MulticastListenerReportMessage) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + lastItemIdx := len(m.MulticastAddressRecords) - 1 + for k := range m.MulticastAddressRecords { + i := lastItemIdx - k // reverse order + + err := m.MulticastAddressRecords[i].serializeTo(b, opts) + if err != nil { + return err + } + } + + if opts.FixLengths { + numberOfMAR := len(m.MulticastAddressRecords) + if numberOfMAR > math.MaxUint16 { + return fmt.Errorf( + "%d multicast address records added, but the maximum is 65535", + numberOfMAR) + } + + m.NumberOfMulticastAddressRecords = uint16(numberOfMAR) + } + + buf, err := b.PrependBytes(4) + if err != nil { + return err + } + + copy(buf[0:2], []byte{0x0, 0x0}) + binary.BigEndian.PutUint16(buf[2:4], m.NumberOfMulticastAddressRecords) + return nil +} + +// Sums this layer up nicely formatted +func (m *MLDv2MulticastListenerReportMessage) String() string { + return fmt.Sprintf( + "Number of Mcast Addr Records: %d (actual %d), Multicast Address Records: %+v", + m.NumberOfMulticastAddressRecords, + len(m.MulticastAddressRecords), + m.MulticastAddressRecords) +} + +// LayerType returns LayerTypeMLDv2MulticastListenerQuery. +func (*MLDv2MulticastListenerReportMessage) LayerType() gopacket.LayerType { + return LayerTypeMLDv2MulticastListenerReport +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (*MLDv2MulticastListenerReportMessage) CanDecode() gopacket.LayerClass { + return LayerTypeMLDv2MulticastListenerReport +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (*MLDv2MulticastListenerReportMessage) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +// MLDv2MulticastAddressRecordType holds the type of a +// Multicast Address Record, according to +// https://tools.ietf.org/html/rfc3810#section-5.2.5 and +// https://tools.ietf.org/html/rfc3810#section-5.2.12 +type MLDv2MulticastAddressRecordType uint8 + +const ( + // MLDv2MulticastAddressRecordTypeModeIsIncluded stands for + // MODE_IS_INCLUDE - indicates that the interface has a filter + // mode of INCLUDE for the specified multicast address. + MLDv2MulticastAddressRecordTypeModeIsIncluded MLDv2MulticastAddressRecordType = 1 + // MLDv2MulticastAddressRecordTypeModeIsExcluded stands for + // MODE_IS_EXCLUDE - indicates that the interface has a filter + // mode of EXCLUDE for the specified multicast address. + MLDv2MulticastAddressRecordTypeModeIsExcluded MLDv2MulticastAddressRecordType = 2 + // MLDv2MulticastAddressRecordTypeChangeToIncludeMode stands for + // CHANGE_TO_INCLUDE_MODE - indicates that the interface has + // changed to INCLUDE filter mode for the specified multicast + // address. + MLDv2MulticastAddressRecordTypeChangeToIncludeMode MLDv2MulticastAddressRecordType = 3 + // MLDv2MulticastAddressRecordTypeChangeToExcludeMode stands for + // CHANGE_TO_EXCLUDE_MODE - indicates that the interface has + // changed to EXCLUDE filter mode for the specified multicast + // address + MLDv2MulticastAddressRecordTypeChangeToExcludeMode MLDv2MulticastAddressRecordType = 4 + // MLDv2MulticastAddressRecordTypeAllowNewSources stands for + // ALLOW_NEW_SOURCES - indicates that the Source Address [i] + // fields in this Multicast Address Record contain a list of + // the additional sources that the node wishes to listen to, + // for packets sent to the specified multicast address. + MLDv2MulticastAddressRecordTypeAllowNewSources MLDv2MulticastAddressRecordType = 5 + // MLDv2MulticastAddressRecordTypeBlockOldSources stands for + // BLOCK_OLD_SOURCES - indicates that the Source Address [i] + // fields in this Multicast Address Record contain a list of + // the sources that the node no longer wishes to listen to, + // for packets sent to the specified multicast address. + MLDv2MulticastAddressRecordTypeBlockOldSources MLDv2MulticastAddressRecordType = 6 +) + +// Human readable record types +// Naming follows https://tools.ietf.org/html/rfc3810#section-5.2.12 +func (m MLDv2MulticastAddressRecordType) String() string { + switch m { + case MLDv2MulticastAddressRecordTypeModeIsIncluded: + return "MODE_IS_INCLUDE" + case MLDv2MulticastAddressRecordTypeModeIsExcluded: + return "MODE_IS_EXCLUDE" + case MLDv2MulticastAddressRecordTypeChangeToIncludeMode: + return "CHANGE_TO_INCLUDE_MODE" + case MLDv2MulticastAddressRecordTypeChangeToExcludeMode: + return "CHANGE_TO_EXCLUDE_MODE" + case MLDv2MulticastAddressRecordTypeAllowNewSources: + return "ALLOW_NEW_SOURCES" + case MLDv2MulticastAddressRecordTypeBlockOldSources: + return "BLOCK_OLD_SOURCES" + default: + return fmt.Sprintf("UNKNOWN(%d)", m) + } +} + +// MLDv2MulticastAddressRecord contains information on the sender listening to a +// single multicast address on the interface the report is sent. +// https://tools.ietf.org/html/rfc3810#section-5.2.4 +type MLDv2MulticastAddressRecord struct { + // 5.2.5. Record Type + RecordType MLDv2MulticastAddressRecordType + // 5.2.6. Auxiliary Data Length (number of 32-bit words) + AuxDataLen uint8 + // 5.2.7. Number Of Sources (N) + N uint16 + // 5.2.8. Multicast Address + MulticastAddress net.IP + // 5.2.9 Source Address [i] + SourceAddresses []net.IP + // 5.2.10 Auxiliary Data + AuxiliaryData []byte +} + +// decodes a multicast address record from bytes +func (m *MLDv2MulticastAddressRecord) decode(data []byte, df gopacket.DecodeFeedback) (int, error) { + if len(data) < 4 { + df.SetTruncated() + return 0, errors.New( + "Multicast Listener Report Message V2 layer less than 4 bytes for Multicast Address Record") + } + + m.RecordType = MLDv2MulticastAddressRecordType(data[0]) + m.AuxDataLen = data[1] + m.N = binary.BigEndian.Uint16(data[2:4]) + m.MulticastAddress = data[4:20] + + for i := uint16(0); i < m.N; i++ { + begin := 20 + (int(i) * 16) + end := begin + 16 + + if len(data) < end { + df.SetTruncated() + return begin, fmt.Errorf( + "Multicast Listener Report Message V2 layer less than %d bytes for Multicast Address Record", end) + } + + m.SourceAddresses = append(m.SourceAddresses, data[begin:end]) + } + + expectedLengthWithouAuxData := 20 + (int(m.N) * 16) + expectedTotalLength := (int(m.AuxDataLen) * 4) + expectedLengthWithouAuxData // *4 because AuxDataLen are 32bit words + if len(data) < expectedTotalLength { + return expectedLengthWithouAuxData, fmt.Errorf( + "Multicast Listener Report Message V2 layer less than %d bytes for Multicast Address Record", + expectedLengthWithouAuxData) + } + + m.AuxiliaryData = data[expectedLengthWithouAuxData:expectedTotalLength] + + return expectedTotalLength, nil +} + +// String sums this layer up nicely formatted +func (m *MLDv2MulticastAddressRecord) String() string { + return fmt.Sprintf( + "RecordType: %d (%s), AuxDataLen: %d [32-bit words], N: %d, Multicast Address: %s, SourceAddresses: %s, Auxiliary Data: %#x", + m.RecordType, + m.RecordType.String(), + m.AuxDataLen, + m.N, + m.MulticastAddress.To16(), + m.SourceAddresses, + m.AuxiliaryData) +} + +// serializes a multicast address record +func (m *MLDv2MulticastAddressRecord) serializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if err := m.serializeAuxiliaryDataTo(b, opts); err != nil { + return err + } + + if err := m.serializeSourceAddressesTo(b, opts); err != nil { + return err + } + + buf, err := b.PrependBytes(20) + if err != nil { + return err + } + + buf[0] = uint8(m.RecordType) + buf[1] = m.AuxDataLen + binary.BigEndian.PutUint16(buf[2:4], m.N) + + ma16 := m.MulticastAddress.To16() + if ma16 == nil { + return fmt.Errorf("invalid multicast address '%s'", m.MulticastAddress) + } + copy(buf[4:20], ma16) + + return nil +} + +// serializes the auxiliary data of a multicast address record +func (m *MLDv2MulticastAddressRecord) serializeAuxiliaryDataTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if remainder := len(m.AuxiliaryData) % 4; remainder != 0 { + zeroWord := []byte{0x0, 0x0, 0x0, 0x0} + m.AuxiliaryData = append(m.AuxiliaryData, zeroWord[:remainder]...) + } + + if opts.FixLengths { + auxDataLen := len(m.AuxiliaryData) / 4 + + if auxDataLen > math.MaxUint8 { + return fmt.Errorf("auxilary data is %d 32-bit words, but the maximum is 255 32-bit words", auxDataLen) + } + + m.AuxDataLen = uint8(auxDataLen) + } + + buf, err := b.PrependBytes(len(m.AuxiliaryData)) + if err != nil { + return err + } + + copy(buf, m.AuxiliaryData) + return nil +} + +// serializes the source addresses of a multicast address record preserving the order +func (m *MLDv2MulticastAddressRecord) serializeSourceAddressesTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if opts.FixLengths { + numberOfSourceAddresses := len(m.SourceAddresses) + + if numberOfSourceAddresses > math.MaxUint16 { + return fmt.Errorf( + "%d source addresses added, but the maximum is 65535", + numberOfSourceAddresses) + } + + m.N = uint16(numberOfSourceAddresses) + } + + lastItemIdx := len(m.SourceAddresses) - 1 + for k := range m.SourceAddresses { + i := lastItemIdx - k // reverse order + + buf, err := b.PrependBytes(16) + if err != nil { + return err + } + + sa16 := m.SourceAddresses[i].To16() + if sa16 == nil { + return fmt.Errorf("invalid source address [%d] '%s'", i, m.SourceAddresses[i]) + } + copy(buf, sa16) + } + + return nil +} + +func decodeMLDv2MulticastListenerReport(data []byte, p gopacket.PacketBuilder) error { + m := &MLDv2MulticastListenerReportMessage{} + return decodingLayerDecoder(m, data, p) +} + +func decodeMLDv2MulticastListenerQuery(data []byte, p gopacket.PacketBuilder) error { + m := &MLDv2MulticastListenerQueryMessage{} + return decodingLayerDecoder(m, data, p) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/modbustcp.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/modbustcp.go new file mode 100644 index 00000000..bafbd743 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/modbustcp.go @@ -0,0 +1,150 @@ +// Copyright 2018, The GoPacket Authors, All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. +// +//****************************************************************************** + +package layers + +import ( + "encoding/binary" + "errors" + "github.com/google/gopacket" +) + +//****************************************************************************** +// +// ModbusTCP Decoding Layer +// ------------------------------------------ +// This file provides a GoPacket decoding layer for ModbusTCP. +// +//****************************************************************************** + +const mbapRecordSizeInBytes int = 7 +const modbusPDUMinimumRecordSizeInBytes int = 2 +const modbusPDUMaximumRecordSizeInBytes int = 253 + +// ModbusProtocol type +type ModbusProtocol uint16 + +// ModbusProtocol known values. +const ( + ModbusProtocolModbus ModbusProtocol = 0 +) + +func (mp ModbusProtocol) String() string { + switch mp { + default: + return "Unknown" + case ModbusProtocolModbus: + return "Modbus" + } +} + +//****************************************************************************** + +// ModbusTCP Type +// -------- +// Type ModbusTCP implements the DecodingLayer interface. Each ModbusTCP object +// represents in a structured form the MODBUS Application Protocol header (MBAP) record present as the TCP +// payload in an ModbusTCP TCP packet. +// +type ModbusTCP struct { + BaseLayer // Stores the packet bytes and payload (Modbus PDU) bytes . + + TransactionIdentifier uint16 // Identification of a MODBUS Request/Response transaction + ProtocolIdentifier ModbusProtocol // It is used for intra-system multiplexing + Length uint16 // Number of following bytes (includes 1 byte for UnitIdentifier + Modbus data length + UnitIdentifier uint8 // Identification of a remote slave connected on a serial line or on other buses +} + +//****************************************************************************** + +// LayerType returns the layer type of the ModbusTCP object, which is LayerTypeModbusTCP. +func (d *ModbusTCP) LayerType() gopacket.LayerType { + return LayerTypeModbusTCP +} + +//****************************************************************************** + +// decodeModbusTCP analyses a byte slice and attempts to decode it as an ModbusTCP +// record of a TCP packet. +// +// If it succeeds, it loads p with information about the packet and returns nil. +// If it fails, it returns an error (non nil). +// +// This function is employed in layertypes.go to register the ModbusTCP layer. +func decodeModbusTCP(data []byte, p gopacket.PacketBuilder) error { + + // Attempt to decode the byte slice. + d := &ModbusTCP{} + err := d.DecodeFromBytes(data, p) + if err != nil { + return err + } + // If the decoding worked, add the layer to the packet and set it + // as the application layer too, if there isn't already one. + p.AddLayer(d) + p.SetApplicationLayer(d) + + return p.NextDecoder(d.NextLayerType()) + +} + +//****************************************************************************** + +// DecodeFromBytes analyses a byte slice and attempts to decode it as an ModbusTCP +// record of a TCP packet. +// +// Upon succeeds, it loads the ModbusTCP object with information about the packet +// and returns nil. +// Upon failure, it returns an error (non nil). +func (d *ModbusTCP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + + // If the data block is too short to be a MBAP record, then return an error. + if len(data) < mbapRecordSizeInBytes+modbusPDUMinimumRecordSizeInBytes { + df.SetTruncated() + return errors.New("ModbusTCP packet too short") + } + + if len(data) > mbapRecordSizeInBytes+modbusPDUMaximumRecordSizeInBytes { + df.SetTruncated() + return errors.New("ModbusTCP packet too long") + } + + // ModbusTCP type embeds type BaseLayer which contains two fields: + // Contents is supposed to contain the bytes of the data at this level (MPBA). + // Payload is supposed to contain the payload of this level (PDU). + d.BaseLayer = BaseLayer{Contents: data[:mbapRecordSizeInBytes], Payload: data[mbapRecordSizeInBytes:len(data)]} + + // Extract the fields from the block of bytes. + // The fields can just be copied in big endian order. + d.TransactionIdentifier = binary.BigEndian.Uint16(data[:2]) + d.ProtocolIdentifier = ModbusProtocol(binary.BigEndian.Uint16(data[2:4])) + d.Length = binary.BigEndian.Uint16(data[4:6]) + + // Length should have the size of the payload plus one byte (size of UnitIdentifier) + if d.Length != uint16(len(d.BaseLayer.Payload)+1) { + df.SetTruncated() + return errors.New("ModbusTCP packet with wrong field value (Length)") + } + d.UnitIdentifier = uint8(data[6]) + + return nil +} + +//****************************************************************************** + +// NextLayerType returns the layer type of the ModbusTCP payload, which is LayerTypePayload. +func (d *ModbusTCP) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +//****************************************************************************** + +// Payload returns Modbus Protocol Data Unit (PDU) composed by Function Code and Data, it is carried within ModbusTCP packets +func (d *ModbusTCP) Payload() []byte { + return d.BaseLayer.Payload +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/mpls.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/mpls.go new file mode 100644 index 00000000..83079a09 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/mpls.go @@ -0,0 +1,87 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "github.com/google/gopacket" +) + +// MPLS is the MPLS packet header. +type MPLS struct { + BaseLayer + Label uint32 + TrafficClass uint8 + StackBottom bool + TTL uint8 +} + +// LayerType returns gopacket.LayerTypeMPLS. +func (m *MPLS) LayerType() gopacket.LayerType { return LayerTypeMPLS } + +// ProtocolGuessingDecoder attempts to guess the protocol of the bytes it's +// given, then decode the packet accordingly. Its algorithm for guessing is: +// If the packet starts with byte 0x45-0x4F: IPv4 +// If the packet starts with byte 0x60-0x6F: IPv6 +// Otherwise: Error +// See draft-hsmit-isis-aal5mux-00.txt for more detail on this approach. +type ProtocolGuessingDecoder struct{} + +func (ProtocolGuessingDecoder) Decode(data []byte, p gopacket.PacketBuilder) error { + switch data[0] { + // 0x40 | header_len, where header_len is at least 5. + case 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f: + return decodeIPv4(data, p) + // IPv6 can start with any byte whose first 4 bits are 0x6. + case 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f: + return decodeIPv6(data, p) + } + return errors.New("Unable to guess protocol of packet data") +} + +// MPLSPayloadDecoder is the decoder used to data encapsulated by each MPLS +// layer. MPLS contains no type information, so we have to explicitly decide +// which decoder to use. This is initially set to ProtocolGuessingDecoder, our +// simple attempt at guessing protocols based on the first few bytes of data +// available to us. However, if you know that in your environment MPLS always +// encapsulates a specific protocol, you may reset this. +var MPLSPayloadDecoder gopacket.Decoder = ProtocolGuessingDecoder{} + +func decodeMPLS(data []byte, p gopacket.PacketBuilder) error { + decoded := binary.BigEndian.Uint32(data[:4]) + mpls := &MPLS{ + Label: decoded >> 12, + TrafficClass: uint8(decoded>>9) & 0x7, + StackBottom: decoded&0x100 != 0, + TTL: uint8(decoded), + BaseLayer: BaseLayer{data[:4], data[4:]}, + } + p.AddLayer(mpls) + if mpls.StackBottom { + return p.NextDecoder(MPLSPayloadDecoder) + } + return p.NextDecoder(gopacket.DecodeFunc(decodeMPLS)) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (m *MPLS) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(4) + if err != nil { + return err + } + encoded := m.Label << 12 + encoded |= uint32(m.TrafficClass) << 9 + encoded |= uint32(m.TTL) + if m.StackBottom { + encoded |= 0x100 + } + binary.BigEndian.PutUint32(bytes, encoded) + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ndp.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ndp.go new file mode 100644 index 00000000..f7ca1b26 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ndp.go @@ -0,0 +1,611 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +// Enum types courtesy of... +// http://anonsvn.wireshark.org/wireshark/trunk/epan/dissectors/packet-ndp.c + +package layers + +import ( + "fmt" + "github.com/google/gopacket" + "net" +) + +type NDPChassisType uint8 + +// Nortel Chassis Types +const ( + NDPChassisother NDPChassisType = 1 + NDPChassis3000 NDPChassisType = 2 + NDPChassis3030 NDPChassisType = 3 + NDPChassis2310 NDPChassisType = 4 + NDPChassis2810 NDPChassisType = 5 + NDPChassis2912 NDPChassisType = 6 + NDPChassis2914 NDPChassisType = 7 + NDPChassis271x NDPChassisType = 8 + NDPChassis2813 NDPChassisType = 9 + NDPChassis2814 NDPChassisType = 10 + NDPChassis2915 NDPChassisType = 11 + NDPChassis5000 NDPChassisType = 12 + NDPChassis2813SA NDPChassisType = 13 + NDPChassis2814SA NDPChassisType = 14 + NDPChassis810M NDPChassisType = 15 + NDPChassisEthercell NDPChassisType = 16 + NDPChassis5005 NDPChassisType = 17 + NDPChassisAlcatelEWC NDPChassisType = 18 + NDPChassis2715SA NDPChassisType = 20 + NDPChassis2486 NDPChassisType = 21 + NDPChassis28000series NDPChassisType = 22 + NDPChassis23000series NDPChassisType = 23 + NDPChassis5DN00xseries NDPChassisType = 24 + NDPChassisBayStackEthernet NDPChassisType = 25 + NDPChassis23100series NDPChassisType = 26 + NDPChassis100BaseTHub NDPChassisType = 27 + NDPChassis3000FastEthernet NDPChassisType = 28 + NDPChassisOrionSwitch NDPChassisType = 29 + NDPChassisDDS NDPChassisType = 31 + NDPChassisCentillion6slot NDPChassisType = 32 + NDPChassisCentillion12slot NDPChassisType = 33 + NDPChassisCentillion1slot NDPChassisType = 34 + NDPChassisBayStack301 NDPChassisType = 35 + NDPChassisBayStackTokenRingHub NDPChassisType = 36 + NDPChassisFVCMultimediaSwitch NDPChassisType = 37 + NDPChassisSwitchNode NDPChassisType = 38 + NDPChassisBayStack302Switch NDPChassisType = 39 + NDPChassisBayStack350Switch NDPChassisType = 40 + NDPChassisBayStack150EthernetHub NDPChassisType = 41 + NDPChassisCentillion50NSwitch NDPChassisType = 42 + NDPChassisCentillion50TSwitch NDPChassisType = 43 + NDPChassisBayStack303304Switches NDPChassisType = 44 + NDPChassisBayStack200EthernetHub NDPChassisType = 45 + NDPChassisBayStack25010100EthernetHub NDPChassisType = 46 + NDPChassisBayStack450101001000Switches NDPChassisType = 48 + NDPChassisBayStack41010100Switches NDPChassisType = 49 + NDPChassisPassport1200L3Switch NDPChassisType = 50 + NDPChassisPassport1250L3Switch NDPChassisType = 51 + NDPChassisPassport1100L3Switch NDPChassisType = 52 + NDPChassisPassport1150L3Switch NDPChassisType = 53 + NDPChassisPassport1050L3Switch NDPChassisType = 54 + NDPChassisPassport1051L3Switch NDPChassisType = 55 + NDPChassisPassport8610L3Switch NDPChassisType = 56 + NDPChassisPassport8606L3Switch NDPChassisType = 57 + NDPChassisPassport8010 NDPChassisType = 58 + NDPChassisPassport8006 NDPChassisType = 59 + NDPChassisBayStack670wirelessaccesspoint NDPChassisType = 60 + NDPChassisPassport740 NDPChassisType = 61 + NDPChassisPassport750 NDPChassisType = 62 + NDPChassisPassport790 NDPChassisType = 63 + NDPChassisBusinessPolicySwitch200010100Switches NDPChassisType = 64 + NDPChassisPassport8110L2Switch NDPChassisType = 65 + NDPChassisPassport8106L2Switch NDPChassisType = 66 + NDPChassisBayStack3580GigSwitch NDPChassisType = 67 + NDPChassisBayStack10PowerSupplyUnit NDPChassisType = 68 + NDPChassisBayStack42010100Switch NDPChassisType = 69 + NDPChassisOPTeraMetro1200EthernetServiceModule NDPChassisType = 70 + NDPChassisOPTera8010co NDPChassisType = 71 + NDPChassisOPTera8610coL3Switch NDPChassisType = 72 + NDPChassisOPTera8110coL2Switch NDPChassisType = 73 + NDPChassisOPTera8003 NDPChassisType = 74 + NDPChassisOPTera8603L3Switch NDPChassisType = 75 + NDPChassisOPTera8103L2Switch NDPChassisType = 76 + NDPChassisBayStack380101001000Switch NDPChassisType = 77 + NDPChassisEthernetSwitch47048T NDPChassisType = 78 + NDPChassisOPTeraMetro1450EthernetServiceModule NDPChassisType = 79 + NDPChassisOPTeraMetro1400EthernetServiceModule NDPChassisType = 80 + NDPChassisAlteonSwitchFamily NDPChassisType = 81 + NDPChassisEthernetSwitch46024TPWR NDPChassisType = 82 + NDPChassisOPTeraMetro8010OPML2Switch NDPChassisType = 83 + NDPChassisOPTeraMetro8010coOPML2Switch NDPChassisType = 84 + NDPChassisOPTeraMetro8006OPML2Switch NDPChassisType = 85 + NDPChassisOPTeraMetro8003OPML2Switch NDPChassisType = 86 + NDPChassisAlteon180e NDPChassisType = 87 + NDPChassisAlteonAD3 NDPChassisType = 88 + NDPChassisAlteon184 NDPChassisType = 89 + NDPChassisAlteonAD4 NDPChassisType = 90 + NDPChassisPassport1424L3Switch NDPChassisType = 91 + NDPChassisPassport1648L3Switch NDPChassisType = 92 + NDPChassisPassport1612L3Switch NDPChassisType = 93 + NDPChassisPassport1624L3Switch NDPChassisType = 94 + NDPChassisBayStack38024FFiber1000Switch NDPChassisType = 95 + NDPChassisEthernetRoutingSwitch551024T NDPChassisType = 96 + NDPChassisEthernetRoutingSwitch551048T NDPChassisType = 97 + NDPChassisEthernetSwitch47024T NDPChassisType = 98 + NDPChassisNortelNetworksWirelessLANAccessPoint2220 NDPChassisType = 99 + NDPChassisPassportRBS2402L3Switch NDPChassisType = 100 + NDPChassisAlteonApplicationSwitch2424 NDPChassisType = 101 + NDPChassisAlteonApplicationSwitch2224 NDPChassisType = 102 + NDPChassisAlteonApplicationSwitch2208 NDPChassisType = 103 + NDPChassisAlteonApplicationSwitch2216 NDPChassisType = 104 + NDPChassisAlteonApplicationSwitch3408 NDPChassisType = 105 + NDPChassisAlteonApplicationSwitch3416 NDPChassisType = 106 + NDPChassisNortelNetworksWirelessLANSecuritySwitch2250 NDPChassisType = 107 + NDPChassisEthernetSwitch42548T NDPChassisType = 108 + NDPChassisEthernetSwitch42524T NDPChassisType = 109 + NDPChassisNortelNetworksWirelessLANAccessPoint2221 NDPChassisType = 110 + NDPChassisNortelMetroEthernetServiceUnit24TSPFswitch NDPChassisType = 111 + NDPChassisNortelMetroEthernetServiceUnit24TLXDCswitch NDPChassisType = 112 + NDPChassisPassport830010slotchassis NDPChassisType = 113 + NDPChassisPassport83006slotchassis NDPChassisType = 114 + NDPChassisEthernetRoutingSwitch552024TPWR NDPChassisType = 115 + NDPChassisEthernetRoutingSwitch552048TPWR NDPChassisType = 116 + NDPChassisNortelNetworksVPNGateway3050 NDPChassisType = 117 + NDPChassisAlteonSSL31010100 NDPChassisType = 118 + NDPChassisAlteonSSL31010100Fiber NDPChassisType = 119 + NDPChassisAlteonSSL31010100FIPS NDPChassisType = 120 + NDPChassisAlteonSSL410101001000 NDPChassisType = 121 + NDPChassisAlteonSSL410101001000Fiber NDPChassisType = 122 + NDPChassisAlteonApplicationSwitch2424SSL NDPChassisType = 123 + NDPChassisEthernetSwitch32524T NDPChassisType = 124 + NDPChassisEthernetSwitch32524G NDPChassisType = 125 + NDPChassisNortelNetworksWirelessLANAccessPoint2225 NDPChassisType = 126 + NDPChassisNortelNetworksWirelessLANSecuritySwitch2270 NDPChassisType = 127 + NDPChassis24portEthernetSwitch47024TPWR NDPChassisType = 128 + NDPChassis48portEthernetSwitch47048TPWR NDPChassisType = 129 + NDPChassisEthernetRoutingSwitch553024TFD NDPChassisType = 130 + NDPChassisEthernetSwitch351024T NDPChassisType = 131 + NDPChassisNortelMetroEthernetServiceUnit12GACL3Switch NDPChassisType = 132 + NDPChassisNortelMetroEthernetServiceUnit12GDCL3Switch NDPChassisType = 133 + NDPChassisNortelSecureAccessSwitch NDPChassisType = 134 + NDPChassisNortelNetworksVPNGateway3070 NDPChassisType = 135 + NDPChassisOPTeraMetro3500 NDPChassisType = 136 + NDPChassisSMBBES101024T NDPChassisType = 137 + NDPChassisSMBBES101048T NDPChassisType = 138 + NDPChassisSMBBES102024TPWR NDPChassisType = 139 + NDPChassisSMBBES102048TPWR NDPChassisType = 140 + NDPChassisSMBBES201024T NDPChassisType = 141 + NDPChassisSMBBES201048T NDPChassisType = 142 + NDPChassisSMBBES202024TPWR NDPChassisType = 143 + NDPChassisSMBBES202048TPWR NDPChassisType = 144 + NDPChassisSMBBES11024T NDPChassisType = 145 + NDPChassisSMBBES11048T NDPChassisType = 146 + NDPChassisSMBBES12024TPWR NDPChassisType = 147 + NDPChassisSMBBES12048TPWR NDPChassisType = 148 + NDPChassisSMBBES21024T NDPChassisType = 149 + NDPChassisSMBBES21048T NDPChassisType = 150 + NDPChassisSMBBES22024TPWR NDPChassisType = 151 + NDPChassisSMBBES22048TPWR NDPChassisType = 152 + NDPChassisOME6500 NDPChassisType = 153 + NDPChassisEthernetRoutingSwitch4548GT NDPChassisType = 154 + NDPChassisEthernetRoutingSwitch4548GTPWR NDPChassisType = 155 + NDPChassisEthernetRoutingSwitch4550T NDPChassisType = 156 + NDPChassisEthernetRoutingSwitch4550TPWR NDPChassisType = 157 + NDPChassisEthernetRoutingSwitch4526FX NDPChassisType = 158 + NDPChassisEthernetRoutingSwitch250026T NDPChassisType = 159 + NDPChassisEthernetRoutingSwitch250026TPWR NDPChassisType = 160 + NDPChassisEthernetRoutingSwitch250050T NDPChassisType = 161 + NDPChassisEthernetRoutingSwitch250050TPWR NDPChassisType = 162 +) + +type NDPBackplaneType uint8 + +// Nortel Backplane Types +const ( + NDPBackplaneOther NDPBackplaneType = 1 + NDPBackplaneEthernet NDPBackplaneType = 2 + NDPBackplaneEthernetTokenring NDPBackplaneType = 3 + NDPBackplaneEthernetFDDI NDPBackplaneType = 4 + NDPBackplaneEthernetTokenringFDDI NDPBackplaneType = 5 + NDPBackplaneEthernetTokenringRedundantPower NDPBackplaneType = 6 + NDPBackplaneEthernetTokenringFDDIRedundantPower NDPBackplaneType = 7 + NDPBackplaneTokenRing NDPBackplaneType = 8 + NDPBackplaneEthernetTokenringFastEthernet NDPBackplaneType = 9 + NDPBackplaneEthernetFastEthernet NDPBackplaneType = 10 + NDPBackplaneEthernetTokenringFastEthernetRedundantPower NDPBackplaneType = 11 + NDPBackplaneEthernetFastEthernetGigabitEthernet NDPBackplaneType = 12 +) + +type NDPState uint8 + +// Device State +const ( + NDPStateTopology NDPState = 1 + NDPStateHeartbeat NDPState = 2 + NDPStateNew NDPState = 3 +) + +// NortelDiscovery is a packet layer containing the Nortel Discovery Protocol. +type NortelDiscovery struct { + BaseLayer + IPAddress net.IP + SegmentID []byte + Chassis NDPChassisType + Backplane NDPBackplaneType + State NDPState + NumLinks uint8 +} + +// LayerType returns gopacket.LayerTypeNortelDiscovery. +func (c *NortelDiscovery) LayerType() gopacket.LayerType { + return LayerTypeNortelDiscovery +} + +func decodeNortelDiscovery(data []byte, p gopacket.PacketBuilder) error { + c := &NortelDiscovery{} + if len(data) < 11 { + return fmt.Errorf("Invalid NortelDiscovery packet length %d", len(data)) + } + c.IPAddress = data[0:4] + c.SegmentID = data[4:7] + c.Chassis = NDPChassisType(data[7]) + c.Backplane = NDPBackplaneType(data[8]) + c.State = NDPState(data[9]) + c.NumLinks = uint8(data[10]) + p.AddLayer(c) + return nil +} + +func (t NDPChassisType) String() (s string) { + switch t { + case NDPChassisother: + s = "other" + case NDPChassis3000: + s = "3000" + case NDPChassis3030: + s = "3030" + case NDPChassis2310: + s = "2310" + case NDPChassis2810: + s = "2810" + case NDPChassis2912: + s = "2912" + case NDPChassis2914: + s = "2914" + case NDPChassis271x: + s = "271x" + case NDPChassis2813: + s = "2813" + case NDPChassis2814: + s = "2814" + case NDPChassis2915: + s = "2915" + case NDPChassis5000: + s = "5000" + case NDPChassis2813SA: + s = "2813SA" + case NDPChassis2814SA: + s = "2814SA" + case NDPChassis810M: + s = "810M" + case NDPChassisEthercell: + s = "Ethercell" + case NDPChassis5005: + s = "5005" + case NDPChassisAlcatelEWC: + s = "Alcatel Ethernet workgroup conc." + case NDPChassis2715SA: + s = "2715SA" + case NDPChassis2486: + s = "2486" + case NDPChassis28000series: + s = "28000 series" + case NDPChassis23000series: + s = "23000 series" + case NDPChassis5DN00xseries: + s = "5DN00x series" + case NDPChassisBayStackEthernet: + s = "BayStack Ethernet" + case NDPChassis23100series: + s = "23100 series" + case NDPChassis100BaseTHub: + s = "100Base-T Hub" + case NDPChassis3000FastEthernet: + s = "3000 Fast Ethernet" + case NDPChassisOrionSwitch: + s = "Orion switch" + case NDPChassisDDS: + s = "DDS" + case NDPChassisCentillion6slot: + s = "Centillion (6 slot)" + case NDPChassisCentillion12slot: + s = "Centillion (12 slot)" + case NDPChassisCentillion1slot: + s = "Centillion (1 slot)" + case NDPChassisBayStack301: + s = "BayStack 301" + case NDPChassisBayStackTokenRingHub: + s = "BayStack TokenRing Hub" + case NDPChassisFVCMultimediaSwitch: + s = "FVC Multimedia Switch" + case NDPChassisSwitchNode: + s = "Switch Node" + case NDPChassisBayStack302Switch: + s = "BayStack 302 Switch" + case NDPChassisBayStack350Switch: + s = "BayStack 350 Switch" + case NDPChassisBayStack150EthernetHub: + s = "BayStack 150 Ethernet Hub" + case NDPChassisCentillion50NSwitch: + s = "Centillion 50N switch" + case NDPChassisCentillion50TSwitch: + s = "Centillion 50T switch" + case NDPChassisBayStack303304Switches: + s = "BayStack 303 and 304 Switches" + case NDPChassisBayStack200EthernetHub: + s = "BayStack 200 Ethernet Hub" + case NDPChassisBayStack25010100EthernetHub: + s = "BayStack 250 10/100 Ethernet Hub" + case NDPChassisBayStack450101001000Switches: + s = "BayStack 450 10/100/1000 Switches" + case NDPChassisBayStack41010100Switches: + s = "BayStack 410 10/100 Switches" + case NDPChassisPassport1200L3Switch: + s = "Passport 1200 L3 Switch" + case NDPChassisPassport1250L3Switch: + s = "Passport 1250 L3 Switch" + case NDPChassisPassport1100L3Switch: + s = "Passport 1100 L3 Switch" + case NDPChassisPassport1150L3Switch: + s = "Passport 1150 L3 Switch" + case NDPChassisPassport1050L3Switch: + s = "Passport 1050 L3 Switch" + case NDPChassisPassport1051L3Switch: + s = "Passport 1051 L3 Switch" + case NDPChassisPassport8610L3Switch: + s = "Passport 8610 L3 Switch" + case NDPChassisPassport8606L3Switch: + s = "Passport 8606 L3 Switch" + case NDPChassisPassport8010: + s = "Passport 8010" + case NDPChassisPassport8006: + s = "Passport 8006" + case NDPChassisBayStack670wirelessaccesspoint: + s = "BayStack 670 wireless access point" + case NDPChassisPassport740: + s = "Passport 740" + case NDPChassisPassport750: + s = "Passport 750" + case NDPChassisPassport790: + s = "Passport 790" + case NDPChassisBusinessPolicySwitch200010100Switches: + s = "Business Policy Switch 2000 10/100 Switches" + case NDPChassisPassport8110L2Switch: + s = "Passport 8110 L2 Switch" + case NDPChassisPassport8106L2Switch: + s = "Passport 8106 L2 Switch" + case NDPChassisBayStack3580GigSwitch: + s = "BayStack 3580 Gig Switch" + case NDPChassisBayStack10PowerSupplyUnit: + s = "BayStack 10 Power Supply Unit" + case NDPChassisBayStack42010100Switch: + s = "BayStack 420 10/100 Switch" + case NDPChassisOPTeraMetro1200EthernetServiceModule: + s = "OPTera Metro 1200 Ethernet Service Module" + case NDPChassisOPTera8010co: + s = "OPTera 8010co" + case NDPChassisOPTera8610coL3Switch: + s = "OPTera 8610co L3 switch" + case NDPChassisOPTera8110coL2Switch: + s = "OPTera 8110co L2 switch" + case NDPChassisOPTera8003: + s = "OPTera 8003" + case NDPChassisOPTera8603L3Switch: + s = "OPTera 8603 L3 switch" + case NDPChassisOPTera8103L2Switch: + s = "OPTera 8103 L2 switch" + case NDPChassisBayStack380101001000Switch: + s = "BayStack 380 10/100/1000 Switch" + case NDPChassisEthernetSwitch47048T: + s = "Ethernet Switch 470-48T" + case NDPChassisOPTeraMetro1450EthernetServiceModule: + s = "OPTera Metro 1450 Ethernet Service Module" + case NDPChassisOPTeraMetro1400EthernetServiceModule: + s = "OPTera Metro 1400 Ethernet Service Module" + case NDPChassisAlteonSwitchFamily: + s = "Alteon Switch Family" + case NDPChassisEthernetSwitch46024TPWR: + s = "Ethernet Switch 460-24T-PWR" + case NDPChassisOPTeraMetro8010OPML2Switch: + s = "OPTera Metro 8010 OPM L2 Switch" + case NDPChassisOPTeraMetro8010coOPML2Switch: + s = "OPTera Metro 8010co OPM L2 Switch" + case NDPChassisOPTeraMetro8006OPML2Switch: + s = "OPTera Metro 8006 OPM L2 Switch" + case NDPChassisOPTeraMetro8003OPML2Switch: + s = "OPTera Metro 8003 OPM L2 Switch" + case NDPChassisAlteon180e: + s = "Alteon 180e" + case NDPChassisAlteonAD3: + s = "Alteon AD3" + case NDPChassisAlteon184: + s = "Alteon 184" + case NDPChassisAlteonAD4: + s = "Alteon AD4" + case NDPChassisPassport1424L3Switch: + s = "Passport 1424 L3 switch" + case NDPChassisPassport1648L3Switch: + s = "Passport 1648 L3 switch" + case NDPChassisPassport1612L3Switch: + s = "Passport 1612 L3 switch" + case NDPChassisPassport1624L3Switch: + s = "Passport 1624 L3 switch" + case NDPChassisBayStack38024FFiber1000Switch: + s = "BayStack 380-24F Fiber 1000 Switch" + case NDPChassisEthernetRoutingSwitch551024T: + s = "Ethernet Routing Switch 5510-24T" + case NDPChassisEthernetRoutingSwitch551048T: + s = "Ethernet Routing Switch 5510-48T" + case NDPChassisEthernetSwitch47024T: + s = "Ethernet Switch 470-24T" + case NDPChassisNortelNetworksWirelessLANAccessPoint2220: + s = "Nortel Networks Wireless LAN Access Point 2220" + case NDPChassisPassportRBS2402L3Switch: + s = "Passport RBS 2402 L3 switch" + case NDPChassisAlteonApplicationSwitch2424: + s = "Alteon Application Switch 2424" + case NDPChassisAlteonApplicationSwitch2224: + s = "Alteon Application Switch 2224" + case NDPChassisAlteonApplicationSwitch2208: + s = "Alteon Application Switch 2208" + case NDPChassisAlteonApplicationSwitch2216: + s = "Alteon Application Switch 2216" + case NDPChassisAlteonApplicationSwitch3408: + s = "Alteon Application Switch 3408" + case NDPChassisAlteonApplicationSwitch3416: + s = "Alteon Application Switch 3416" + case NDPChassisNortelNetworksWirelessLANSecuritySwitch2250: + s = "Nortel Networks Wireless LAN SecuritySwitch 2250" + case NDPChassisEthernetSwitch42548T: + s = "Ethernet Switch 425-48T" + case NDPChassisEthernetSwitch42524T: + s = "Ethernet Switch 425-24T" + case NDPChassisNortelNetworksWirelessLANAccessPoint2221: + s = "Nortel Networks Wireless LAN Access Point 2221" + case NDPChassisNortelMetroEthernetServiceUnit24TSPFswitch: + s = "Nortel Metro Ethernet Service Unit 24-T SPF switch" + case NDPChassisNortelMetroEthernetServiceUnit24TLXDCswitch: + s = " Nortel Metro Ethernet Service Unit 24-T LX DC switch" + case NDPChassisPassport830010slotchassis: + s = "Passport 8300 10-slot chassis" + case NDPChassisPassport83006slotchassis: + s = "Passport 8300 6-slot chassis" + case NDPChassisEthernetRoutingSwitch552024TPWR: + s = "Ethernet Routing Switch 5520-24T-PWR" + case NDPChassisEthernetRoutingSwitch552048TPWR: + s = "Ethernet Routing Switch 5520-48T-PWR" + case NDPChassisNortelNetworksVPNGateway3050: + s = "Nortel Networks VPN Gateway 3050" + case NDPChassisAlteonSSL31010100: + s = "Alteon SSL 310 10/100" + case NDPChassisAlteonSSL31010100Fiber: + s = "Alteon SSL 310 10/100 Fiber" + case NDPChassisAlteonSSL31010100FIPS: + s = "Alteon SSL 310 10/100 FIPS" + case NDPChassisAlteonSSL410101001000: + s = "Alteon SSL 410 10/100/1000" + case NDPChassisAlteonSSL410101001000Fiber: + s = "Alteon SSL 410 10/100/1000 Fiber" + case NDPChassisAlteonApplicationSwitch2424SSL: + s = "Alteon Application Switch 2424-SSL" + case NDPChassisEthernetSwitch32524T: + s = "Ethernet Switch 325-24T" + case NDPChassisEthernetSwitch32524G: + s = "Ethernet Switch 325-24G" + case NDPChassisNortelNetworksWirelessLANAccessPoint2225: + s = "Nortel Networks Wireless LAN Access Point 2225" + case NDPChassisNortelNetworksWirelessLANSecuritySwitch2270: + s = "Nortel Networks Wireless LAN SecuritySwitch 2270" + case NDPChassis24portEthernetSwitch47024TPWR: + s = "24-port Ethernet Switch 470-24T-PWR" + case NDPChassis48portEthernetSwitch47048TPWR: + s = "48-port Ethernet Switch 470-48T-PWR" + case NDPChassisEthernetRoutingSwitch553024TFD: + s = "Ethernet Routing Switch 5530-24TFD" + case NDPChassisEthernetSwitch351024T: + s = "Ethernet Switch 3510-24T" + case NDPChassisNortelMetroEthernetServiceUnit12GACL3Switch: + s = "Nortel Metro Ethernet Service Unit 12G AC L3 switch" + case NDPChassisNortelMetroEthernetServiceUnit12GDCL3Switch: + s = "Nortel Metro Ethernet Service Unit 12G DC L3 switch" + case NDPChassisNortelSecureAccessSwitch: + s = "Nortel Secure Access Switch" + case NDPChassisNortelNetworksVPNGateway3070: + s = "Nortel Networks VPN Gateway 3070" + case NDPChassisOPTeraMetro3500: + s = "OPTera Metro 3500" + case NDPChassisSMBBES101024T: + s = "SMB BES 1010 24T" + case NDPChassisSMBBES101048T: + s = "SMB BES 1010 48T" + case NDPChassisSMBBES102024TPWR: + s = "SMB BES 1020 24T PWR" + case NDPChassisSMBBES102048TPWR: + s = "SMB BES 1020 48T PWR" + case NDPChassisSMBBES201024T: + s = "SMB BES 2010 24T" + case NDPChassisSMBBES201048T: + s = "SMB BES 2010 48T" + case NDPChassisSMBBES202024TPWR: + s = "SMB BES 2020 24T PWR" + case NDPChassisSMBBES202048TPWR: + s = "SMB BES 2020 48T PWR" + case NDPChassisSMBBES11024T: + s = "SMB BES 110 24T" + case NDPChassisSMBBES11048T: + s = "SMB BES 110 48T" + case NDPChassisSMBBES12024TPWR: + s = "SMB BES 120 24T PWR" + case NDPChassisSMBBES12048TPWR: + s = "SMB BES 120 48T PWR" + case NDPChassisSMBBES21024T: + s = "SMB BES 210 24T" + case NDPChassisSMBBES21048T: + s = "SMB BES 210 48T" + case NDPChassisSMBBES22024TPWR: + s = "SMB BES 220 24T PWR" + case NDPChassisSMBBES22048TPWR: + s = "SMB BES 220 48T PWR" + case NDPChassisOME6500: + s = "OME 6500" + case NDPChassisEthernetRoutingSwitch4548GT: + s = "Ethernet Routing Switch 4548GT" + case NDPChassisEthernetRoutingSwitch4548GTPWR: + s = "Ethernet Routing Switch 4548GT-PWR" + case NDPChassisEthernetRoutingSwitch4550T: + s = "Ethernet Routing Switch 4550T" + case NDPChassisEthernetRoutingSwitch4550TPWR: + s = "Ethernet Routing Switch 4550T-PWR" + case NDPChassisEthernetRoutingSwitch4526FX: + s = "Ethernet Routing Switch 4526FX" + case NDPChassisEthernetRoutingSwitch250026T: + s = "Ethernet Routing Switch 2500-26T" + case NDPChassisEthernetRoutingSwitch250026TPWR: + s = "Ethernet Routing Switch 2500-26T-PWR" + case NDPChassisEthernetRoutingSwitch250050T: + s = "Ethernet Routing Switch 2500-50T" + case NDPChassisEthernetRoutingSwitch250050TPWR: + s = "Ethernet Routing Switch 2500-50T-PWR" + default: + s = "Unknown" + } + return +} + +func (t NDPBackplaneType) String() (s string) { + switch t { + case NDPBackplaneOther: + s = "Other" + case NDPBackplaneEthernet: + s = "Ethernet" + case NDPBackplaneEthernetTokenring: + s = "Ethernet and Tokenring" + case NDPBackplaneEthernetFDDI: + s = "Ethernet and FDDI" + case NDPBackplaneEthernetTokenringFDDI: + s = "Ethernet, Tokenring and FDDI" + case NDPBackplaneEthernetTokenringRedundantPower: + s = "Ethernet and Tokenring with redundant power" + case NDPBackplaneEthernetTokenringFDDIRedundantPower: + s = "Ethernet, Tokenring, FDDI with redundant power" + case NDPBackplaneTokenRing: + s = "Token Ring" + case NDPBackplaneEthernetTokenringFastEthernet: + s = "Ethernet, Tokenring and Fast Ethernet" + case NDPBackplaneEthernetFastEthernet: + s = "Ethernet and Fast Ethernet" + case NDPBackplaneEthernetTokenringFastEthernetRedundantPower: + s = "Ethernet, Tokenring, Fast Ethernet with redundant power" + case NDPBackplaneEthernetFastEthernetGigabitEthernet: + s = "Ethernet, Fast Ethernet and Gigabit Ethernet" + default: + s = "Unknown" + } + return +} + +func (t NDPState) String() (s string) { + switch t { + case NDPStateTopology: + s = "Topology Change" + case NDPStateHeartbeat: + s = "Heartbeat" + case NDPStateNew: + s = "New" + default: + s = "Unknown" + } + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ntp.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ntp.go new file mode 100644 index 00000000..33c15b3b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ntp.go @@ -0,0 +1,416 @@ +// Copyright 2016 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. +// +//****************************************************************************** + +package layers + +import ( + "encoding/binary" + "errors" + + "github.com/google/gopacket" +) + +//****************************************************************************** +// +// Network Time Protocol (NTP) Decoding Layer +// ------------------------------------------ +// This file provides a GoPacket decoding layer for NTP. +// +//****************************************************************************** +// +// About The Network Time Protocol (NTP) +// ------------------------------------- +// NTP is a protocol that enables computers on the internet to set their +// clocks to the correct time (or to a time that is acceptably close to the +// correct time). NTP runs on top of UDP. +// +// There have been a series of versions of the NTP protocol. The latest +// version is V4 and is specified in RFC 5905: +// http://www.ietf.org/rfc/rfc5905.txt +// +//****************************************************************************** +// +// References +// ---------- +// +// Wikipedia's NTP entry: +// https://en.wikipedia.org/wiki/Network_Time_Protocol +// This is the best place to get an overview of NTP. +// +// Network Time Protocol Home Website: +// http://www.ntp.org/ +// This appears to be the official website of NTP. +// +// List of current NTP Protocol RFCs: +// http://www.ntp.org/rfc.html +// +// RFC 958: "Network Time Protocol (NTP)" (1985) +// https://tools.ietf.org/html/rfc958 +// This is the original NTP specification. +// +// RFC 1305: "Network Time Protocol (Version 3) Specification, Implementation and Analysis" (1992) +// https://tools.ietf.org/html/rfc1305 +// The protocol was updated in 1992 yielding NTP V3. +// +// RFC 5905: "Network Time Protocol Version 4: Protocol and Algorithms Specification" (2010) +// https://www.ietf.org/rfc/rfc5905.txt +// The protocol was updated in 2010 yielding NTP V4. +// V4 is backwards compatible with all previous versions of NTP. +// +// RFC 5906: "Network Time Protocol Version 4: Autokey Specification" +// https://tools.ietf.org/html/rfc5906 +// This document addresses the security of the NTP protocol +// and is probably not relevant to this package. +// +// RFC 5907: "Definitions of Managed Objects for Network Time Protocol Version 4 (NTPv4)" +// https://tools.ietf.org/html/rfc5907 +// This document addresses the management of NTP servers and +// is probably not relevant to this package. +// +// RFC 5908: "Network Time Protocol (NTP) Server Option for DHCPv6" +// https://tools.ietf.org/html/rfc5908 +// This document addresses the use of NTP in DHCPv6 and is +// probably not relevant to this package. +// +// "Let's make a NTP Client in C" +// https://lettier.github.io/posts/2016-04-26-lets-make-a-ntp-client-in-c.html +// This web page contains useful information about the details of NTP, +// including an NTP record struture in C, and C code. +// +// "NTP Packet Header (NTP Reference Implementation) (Computer Network Time Synchronization)" +// http://what-when-how.com/computer-network-time-synchronization/ +// ntp-packet-header-ntp-reference-implementation-computer-network-time-synchronization/ +// This web page contains useful information on the details of NTP. +// +// "Technical information - NTP Data Packet" +// https://www.meinbergglobal.com/english/info/ntp-packet.htm +// This page has a helpful diagram of an NTP V4 packet. +// +//****************************************************************************** +// +// Obsolete References +// ------------------- +// +// RFC 1119: "RFC-1119 "Network Time Protocol (Version 2) Specification and Implementation" (1989) +// https://tools.ietf.org/html/rfc1119 +// Version 2 was drafted in 1989. +// It is unclear whether V2 was ever implememented or whether the +// ideas ended up in V3 (which was implemented in 1992). +// +// RFC 1361: "Simple Network Time Protocol (SNTP)" +// https://tools.ietf.org/html/rfc1361 +// This document is obsoleted by RFC 1769 and is included only for completeness. +// +// RFC 1769: "Simple Network Time Protocol (SNTP)" +// https://tools.ietf.org/html/rfc1769 +// This document is obsoleted by RFC 2030 and RFC 4330 and is included only for completeness. +// +// RFC 2030: "Simple Network Time Protocol (SNTP) Version 4 for IPv4, IPv6 and OSI" +// https://tools.ietf.org/html/rfc2030 +// This document is obsoleted by RFC 4330 and is included only for completeness. +// +// RFC 4330: "Simple Network Time Protocol (SNTP) Version 4 for IPv4, IPv6 and OSI" +// https://tools.ietf.org/html/rfc4330 +// This document is obsoleted by RFC 5905 and is included only for completeness. +// +//****************************************************************************** +// +// Endian And Bit Numbering Issues +// ------------------------------- +// +// Endian and bit numbering issues can be confusing. Here is some +// clarification: +// +// ENDIAN: Values are sent big endian. +// https://en.wikipedia.org/wiki/Endianness +// +// BIT NUMBERING: Bits are numbered 0 upwards from the most significant +// bit to the least significant bit. This means that if there is a 32-bit +// value, the most significant bit is called bit 0 and the least +// significant bit is called bit 31. +// +// See RFC 791 Appendix B for more discussion. +// +//****************************************************************************** +// +// NTP V3 and V4 Packet Format +// --------------------------- +// NTP packets are UDP packets whose payload contains an NTP record. +// +// The NTP RFC defines the format of the NTP record. +// +// There have been four versions of the protocol: +// +// V1 in 1985 +// V2 in 1989 +// V3 in 1992 +// V4 in 2010 +// +// It is clear that V1 and V2 are obsolete, and there is no need to +// cater for these formats. +// +// V3 and V4 essentially use the same format, with V4 adding some optional +// fields on the end. So this package supports the V3 and V4 formats. +// +// The current version of NTP (NTP V4)'s RFC (V4 - RFC 5905) contains +// the following diagram for the NTP record format: + +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// |LI | VN |Mode | Stratum | Poll | Precision | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Root Delay | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Root Dispersion | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Reference ID | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// + Reference Timestamp (64) + +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// + Origin Timestamp (64) + +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// + Receive Timestamp (64) + +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// + Transmit Timestamp (64) + +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . . +// . Extension Field 1 (variable) . +// . . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// . . +// . Extension Field 2 (variable) . +// . . +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | Key Identifier | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | | +// | dgst (128) | +// | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// From http://www.ietf.org/rfc/rfc5905.txt +// +// The fields "Extension Field 1 (variable)" and later are optional fields, +// and so we can set a minimum NTP record size of 48 bytes. +// +const ntpMinimumRecordSizeInBytes int = 48 + +//****************************************************************************** + +// NTP Type +// -------- +// Type NTP implements the DecodingLayer interface. Each NTP object +// represents in a structured form the NTP record present as the UDP +// payload in an NTP UDP packet. +// + +type NTPLeapIndicator uint8 +type NTPVersion uint8 +type NTPMode uint8 +type NTPStratum uint8 +type NTPLog2Seconds int8 +type NTPFixed16Seconds uint32 +type NTPReferenceID uint32 +type NTPTimestamp uint64 + +type NTP struct { + BaseLayer // Stores the packet bytes and payload bytes. + + LeapIndicator NTPLeapIndicator // [0,3]. Indicates whether leap second(s) is to be added. + Version NTPVersion // [0,7]. Version of the NTP protocol. + Mode NTPMode // [0,7]. Mode. + Stratum NTPStratum // [0,255]. Stratum of time server in the server tree. + Poll NTPLog2Seconds // [-128,127]. The maximum interval between successive messages, in log2 seconds. + Precision NTPLog2Seconds // [-128,127]. The precision of the system clock, in log2 seconds. + RootDelay NTPFixed16Seconds // [0,2^32-1]. Total round trip delay to the reference clock in seconds times 2^16. + RootDispersion NTPFixed16Seconds // [0,2^32-1]. Total dispersion to the reference clock, in seconds times 2^16. + ReferenceID NTPReferenceID // ID code of reference clock [0,2^32-1]. + ReferenceTimestamp NTPTimestamp // Most recent timestamp from the reference clock. + OriginTimestamp NTPTimestamp // Local time when request was sent from local host. + ReceiveTimestamp NTPTimestamp // Local time (on server) that request arrived at server host. + TransmitTimestamp NTPTimestamp // Local time (on server) that request departed server host. + + // FIX: This package should analyse the extension fields and represent the extension fields too. + ExtensionBytes []byte // Just put extensions in a byte slice. +} + +//****************************************************************************** + +// LayerType returns the layer type of the NTP object, which is LayerTypeNTP. +func (d *NTP) LayerType() gopacket.LayerType { + return LayerTypeNTP +} + +//****************************************************************************** + +// decodeNTP analyses a byte slice and attempts to decode it as an NTP +// record of a UDP packet. +// +// If it succeeds, it loads p with information about the packet and returns nil. +// If it fails, it returns an error (non nil). +// +// This function is employed in layertypes.go to register the NTP layer. +func decodeNTP(data []byte, p gopacket.PacketBuilder) error { + + // Attempt to decode the byte slice. + d := &NTP{} + err := d.DecodeFromBytes(data, p) + if err != nil { + return err + } + + // If the decoding worked, add the layer to the packet and set it + // as the application layer too, if there isn't already one. + p.AddLayer(d) + p.SetApplicationLayer(d) + + return nil +} + +//****************************************************************************** + +// DecodeFromBytes analyses a byte slice and attempts to decode it as an NTP +// record of a UDP packet. +// +// Upon succeeds, it loads the NTP object with information about the packet +// and returns nil. +// Upon failure, it returns an error (non nil). +func (d *NTP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + + // If the data block is too short to be a NTP record, then return an error. + if len(data) < ntpMinimumRecordSizeInBytes { + df.SetTruncated() + return errors.New("NTP packet too short") + } + + // RFC 5905 does not appear to define a maximum NTP record length. + // The protocol allows "extension fields" to be included in the record, + // and states about these fields:" + // + // "While the minimum field length containing required fields is + // four words (16 octets), a maximum field length remains to be + // established." + // + // For this reason, the packet length is not checked here for being too long. + + // NTP type embeds type BaseLayer which contains two fields: + // Contents is supposed to contain the bytes of the data at this level. + // Payload is supposed to contain the payload of this level. + // Here we set the baselayer to be the bytes of the NTP record. + d.BaseLayer = BaseLayer{Contents: data[:len(data)]} + + // Extract the fields from the block of bytes. + // To make sense of this, refer to the packet diagram + // above and the section on endian conventions. + + // The first few fields are all packed into the first 32 bits. Unpack them. + f := data[0] + d.LeapIndicator = NTPLeapIndicator((f & 0xC0) >> 6) + d.Version = NTPVersion((f & 0x38) >> 3) + d.Mode = NTPMode(f & 0x07) + d.Stratum = NTPStratum(data[1]) + d.Poll = NTPLog2Seconds(data[2]) + d.Precision = NTPLog2Seconds(data[3]) + + // The remaining fields can just be copied in big endian order. + d.RootDelay = NTPFixed16Seconds(binary.BigEndian.Uint32(data[4:8])) + d.RootDispersion = NTPFixed16Seconds(binary.BigEndian.Uint32(data[8:12])) + d.ReferenceID = NTPReferenceID(binary.BigEndian.Uint32(data[12:16])) + d.ReferenceTimestamp = NTPTimestamp(binary.BigEndian.Uint64(data[16:24])) + d.OriginTimestamp = NTPTimestamp(binary.BigEndian.Uint64(data[24:32])) + d.ReceiveTimestamp = NTPTimestamp(binary.BigEndian.Uint64(data[32:40])) + d.TransmitTimestamp = NTPTimestamp(binary.BigEndian.Uint64(data[40:48])) + + // This layer does not attempt to analyse the extension bytes. + // But if there are any, we'd like the user to know. So we just + // place them all in an ExtensionBytes field. + d.ExtensionBytes = data[48:] + + // Return no error. + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (d *NTP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + data, err := b.PrependBytes(ntpMinimumRecordSizeInBytes) + if err != nil { + return err + } + + // Pack the first few fields into the first 32 bits. + h := uint8(0) + h |= (uint8(d.LeapIndicator) << 6) & 0xC0 + h |= (uint8(d.Version) << 3) & 0x38 + h |= (uint8(d.Mode)) & 0x07 + data[0] = byte(h) + data[1] = byte(d.Stratum) + data[2] = byte(d.Poll) + data[3] = byte(d.Precision) + + // The remaining fields can just be copied in big endian order. + binary.BigEndian.PutUint32(data[4:8], uint32(d.RootDelay)) + binary.BigEndian.PutUint32(data[8:12], uint32(d.RootDispersion)) + binary.BigEndian.PutUint32(data[12:16], uint32(d.ReferenceID)) + binary.BigEndian.PutUint64(data[16:24], uint64(d.ReferenceTimestamp)) + binary.BigEndian.PutUint64(data[24:32], uint64(d.OriginTimestamp)) + binary.BigEndian.PutUint64(data[32:40], uint64(d.ReceiveTimestamp)) + binary.BigEndian.PutUint64(data[40:48], uint64(d.TransmitTimestamp)) + + ex, err := b.AppendBytes(len(d.ExtensionBytes)) + if err != nil { + return err + } + copy(ex, d.ExtensionBytes) + + return nil +} + +//****************************************************************************** + +// CanDecode returns a set of layers that NTP objects can decode. +// As NTP objects can only decide the NTP layer, we can return just that layer. +// Apparently a single layer type implements LayerClass. +func (d *NTP) CanDecode() gopacket.LayerClass { + return LayerTypeNTP +} + +//****************************************************************************** + +// NextLayerType specifies the next layer that GoPacket should attempt to +// analyse after this (NTP) layer. As NTP packets do not contain any payload +// bytes, there are no further layers to analyse. +func (d *NTP) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypeZero +} + +//****************************************************************************** + +// NTP packets do not carry any data payload, so the empty byte slice is retured. +// In Go, a nil slice is functionally identical to an empty slice, so we +// return nil to avoid a heap allocation. +func (d *NTP) Payload() []byte { + return nil +} + +//****************************************************************************** +//* End Of NTP File * +//****************************************************************************** diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ospf.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ospf.go new file mode 100644 index 00000000..b8fbcb1b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ospf.go @@ -0,0 +1,709 @@ +// Copyright 2017 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "fmt" + + "github.com/google/gopacket" +) + +// OSPFType denotes what kind of OSPF type it is +type OSPFType uint8 + +// Potential values for OSPF.Type. +const ( + OSPFHello OSPFType = 1 + OSPFDatabaseDescription OSPFType = 2 + OSPFLinkStateRequest OSPFType = 3 + OSPFLinkStateUpdate OSPFType = 4 + OSPFLinkStateAcknowledgment OSPFType = 5 +) + +// LSA Function Codes for LSAheader.LSType +const ( + RouterLSAtypeV2 = 0x1 + RouterLSAtype = 0x2001 + NetworkLSAtypeV2 = 0x2 + NetworkLSAtype = 0x2002 + SummaryLSANetworktypeV2 = 0x3 + InterAreaPrefixLSAtype = 0x2003 + SummaryLSAASBRtypeV2 = 0x4 + InterAreaRouterLSAtype = 0x2004 + ASExternalLSAtypeV2 = 0x5 + ASExternalLSAtype = 0x4005 + NSSALSAtype = 0x2007 + NSSALSAtypeV2 = 0x7 + LinkLSAtype = 0x0008 + IntraAreaPrefixLSAtype = 0x2009 +) + +// String conversions for OSPFType +func (i OSPFType) String() string { + switch i { + case OSPFHello: + return "Hello" + case OSPFDatabaseDescription: + return "Database Description" + case OSPFLinkStateRequest: + return "Link State Request" + case OSPFLinkStateUpdate: + return "Link State Update" + case OSPFLinkStateAcknowledgment: + return "Link State Acknowledgment" + default: + return "" + } +} + +// Prefix extends IntraAreaPrefixLSA +type Prefix struct { + PrefixLength uint8 + PrefixOptions uint8 + Metric uint16 + AddressPrefix []byte +} + +// IntraAreaPrefixLSA is the struct from RFC 5340 A.4.10. +type IntraAreaPrefixLSA struct { + NumOfPrefixes uint16 + RefLSType uint16 + RefLinkStateID uint32 + RefAdvRouter uint32 + Prefixes []Prefix +} + +// LinkLSA is the struct from RFC 5340 A.4.9. +type LinkLSA struct { + RtrPriority uint8 + Options uint32 + LinkLocalAddress []byte + NumOfPrefixes uint32 + Prefixes []Prefix +} + +// ASExternalLSAV2 is the struct from RFC 2328 A.4.5. +type ASExternalLSAV2 struct { + NetworkMask uint32 + ExternalBit uint8 + Metric uint32 + ForwardingAddress uint32 + ExternalRouteTag uint32 +} + +// ASExternalLSA is the struct from RFC 5340 A.4.7. +type ASExternalLSA struct { + Flags uint8 + Metric uint32 + PrefixLength uint8 + PrefixOptions uint8 + RefLSType uint16 + AddressPrefix []byte + ForwardingAddress []byte + ExternalRouteTag uint32 + RefLinkStateID uint32 +} + +// InterAreaRouterLSA is the struct from RFC 5340 A.4.6. +type InterAreaRouterLSA struct { + Options uint32 + Metric uint32 + DestinationRouterID uint32 +} + +// InterAreaPrefixLSA is the struct from RFC 5340 A.4.5. +type InterAreaPrefixLSA struct { + Metric uint32 + PrefixLength uint8 + PrefixOptions uint8 + AddressPrefix []byte +} + +// NetworkLSA is the struct from RFC 5340 A.4.4. +type NetworkLSA struct { + Options uint32 + AttachedRouter []uint32 +} + +// NetworkLSAV2 is the struct from RFC 2328 A.4.3. +type NetworkLSAV2 struct { + NetworkMask uint32 + AttachedRouter []uint32 +} + +// RouterV2 extends RouterLSAV2 +type RouterV2 struct { + Type uint8 + LinkID uint32 + LinkData uint32 + Metric uint16 +} + +// RouterLSAV2 is the struct from RFC 2328 A.4.2. +type RouterLSAV2 struct { + Flags uint8 + Links uint16 + Routers []RouterV2 +} + +// Router extends RouterLSA +type Router struct { + Type uint8 + Metric uint16 + InterfaceID uint32 + NeighborInterfaceID uint32 + NeighborRouterID uint32 +} + +// RouterLSA is the struct from RFC 5340 A.4.3. +type RouterLSA struct { + Flags uint8 + Options uint32 + Routers []Router +} + +// LSAheader is the struct from RFC 5340 A.4.2 and RFC 2328 A.4.1. +type LSAheader struct { + LSAge uint16 + LSType uint16 + LinkStateID uint32 + AdvRouter uint32 + LSSeqNumber uint32 + LSChecksum uint16 + Length uint16 + LSOptions uint8 +} + +// LSA links LSAheader with the structs from RFC 5340 A.4. +type LSA struct { + LSAheader + Content interface{} +} + +// LSUpdate is the struct from RFC 5340 A.3.5. +type LSUpdate struct { + NumOfLSAs uint32 + LSAs []LSA +} + +// LSReq is the struct from RFC 5340 A.3.4. +type LSReq struct { + LSType uint16 + LSID uint32 + AdvRouter uint32 +} + +// DbDescPkg is the struct from RFC 5340 A.3.3. +type DbDescPkg struct { + Options uint32 + InterfaceMTU uint16 + Flags uint16 + DDSeqNumber uint32 + LSAinfo []LSAheader +} + +// HelloPkg is the struct from RFC 5340 A.3.2. +type HelloPkg struct { + InterfaceID uint32 + RtrPriority uint8 + Options uint32 + HelloInterval uint16 + RouterDeadInterval uint32 + DesignatedRouterID uint32 + BackupDesignatedRouterID uint32 + NeighborID []uint32 +} + +// HelloPkgV2 extends the HelloPkg struct with OSPFv2 information +type HelloPkgV2 struct { + HelloPkg + NetworkMask uint32 +} + +// OSPF is a basic OSPF packet header with common fields of Version 2 and Version 3. +type OSPF struct { + Version uint8 + Type OSPFType + PacketLength uint16 + RouterID uint32 + AreaID uint32 + Checksum uint16 + Content interface{} +} + +//OSPFv2 extend the OSPF head with version 2 specific fields +type OSPFv2 struct { + BaseLayer + OSPF + AuType uint16 + Authentication uint64 +} + +// OSPFv3 extend the OSPF head with version 3 specific fields +type OSPFv3 struct { + BaseLayer + OSPF + Instance uint8 + Reserved uint8 +} + +// getLSAsv2 parses the LSA information from the packet for OSPFv2 +func getLSAsv2(num uint32, data []byte) ([]LSA, error) { + var lsas []LSA + var i uint32 = 0 + var offset uint32 = 0 + for ; i < num; i++ { + lstype := uint16(data[offset+3]) + lsalength := binary.BigEndian.Uint16(data[offset+18 : offset+20]) + content, err := extractLSAInformation(lstype, lsalength, data[offset:]) + if err != nil { + return nil, fmt.Errorf("Could not extract Link State type.") + } + lsa := LSA{ + LSAheader: LSAheader{ + LSAge: binary.BigEndian.Uint16(data[offset : offset+2]), + LSOptions: data[offset+2], + LSType: lstype, + LinkStateID: binary.BigEndian.Uint32(data[offset+4 : offset+8]), + AdvRouter: binary.BigEndian.Uint32(data[offset+8 : offset+12]), + LSSeqNumber: binary.BigEndian.Uint32(data[offset+12 : offset+16]), + LSChecksum: binary.BigEndian.Uint16(data[offset+16 : offset+18]), + Length: lsalength, + }, + Content: content, + } + lsas = append(lsas, lsa) + offset += uint32(lsalength) + } + return lsas, nil +} + +// extractLSAInformation extracts all the LSA information +func extractLSAInformation(lstype, lsalength uint16, data []byte) (interface{}, error) { + if lsalength < 20 { + return nil, fmt.Errorf("Link State header length %v too short, %v required", lsalength, 20) + } + if len(data) < int(lsalength) { + return nil, fmt.Errorf("Link State header length %v too short, %v required", len(data), lsalength) + } + var content interface{} + switch lstype { + case RouterLSAtypeV2: + var routers []RouterV2 + var j uint32 + for j = 24; j < uint32(lsalength); j += 12 { + router := RouterV2{ + LinkID: binary.BigEndian.Uint32(data[j : j+4]), + LinkData: binary.BigEndian.Uint32(data[j+4 : j+8]), + Type: uint8(data[j+8]), + Metric: binary.BigEndian.Uint16(data[j+10 : j+12]), + } + routers = append(routers, router) + } + links := binary.BigEndian.Uint16(data[22:24]) + content = RouterLSAV2{ + Flags: data[20], + Links: links, + Routers: routers, + } + case NSSALSAtypeV2: + fallthrough + case ASExternalLSAtypeV2: + content = ASExternalLSAV2{ + NetworkMask: binary.BigEndian.Uint32(data[20:24]), + ExternalBit: data[24] & 0x80, + Metric: binary.BigEndian.Uint32(data[24:28]) & 0x00FFFFFF, + ForwardingAddress: binary.BigEndian.Uint32(data[28:32]), + ExternalRouteTag: binary.BigEndian.Uint32(data[32:36]), + } + case NetworkLSAtypeV2: + var routers []uint32 + var j uint32 + for j = 24; j < uint32(lsalength); j += 4 { + routers = append(routers, binary.BigEndian.Uint32(data[j:j+4])) + } + content = NetworkLSAV2{ + NetworkMask: binary.BigEndian.Uint32(data[20:24]), + AttachedRouter: routers, + } + case RouterLSAtype: + var routers []Router + var j uint32 + for j = 24; j < uint32(lsalength); j += 16 { + router := Router{ + Type: uint8(data[j]), + Metric: binary.BigEndian.Uint16(data[j+2 : j+4]), + InterfaceID: binary.BigEndian.Uint32(data[j+4 : j+8]), + NeighborInterfaceID: binary.BigEndian.Uint32(data[j+8 : j+12]), + NeighborRouterID: binary.BigEndian.Uint32(data[j+12 : j+16]), + } + routers = append(routers, router) + } + content = RouterLSA{ + Flags: uint8(data[20]), + Options: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF, + Routers: routers, + } + case NetworkLSAtype: + var routers []uint32 + var j uint32 + for j = 24; j < uint32(lsalength); j += 4 { + routers = append(routers, binary.BigEndian.Uint32(data[j:j+4])) + } + content = NetworkLSA{ + Options: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF, + AttachedRouter: routers, + } + case InterAreaPrefixLSAtype: + content = InterAreaPrefixLSA{ + Metric: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF, + PrefixLength: uint8(data[24]), + PrefixOptions: uint8(data[25]), + AddressPrefix: data[28:uint32(lsalength)], + } + case InterAreaRouterLSAtype: + content = InterAreaRouterLSA{ + Options: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF, + Metric: binary.BigEndian.Uint32(data[24:28]) & 0x00FFFFFF, + DestinationRouterID: binary.BigEndian.Uint32(data[28:32]), + } + case ASExternalLSAtype: + fallthrough + case NSSALSAtype: + + flags := uint8(data[20]) + prefixLen := uint8(data[24]) / 8 + var forwardingAddress []byte + if (flags & 0x02) == 0x02 { + forwardingAddress = data[28+uint32(prefixLen) : 28+uint32(prefixLen)+16] + } + content = ASExternalLSA{ + Flags: flags, + Metric: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF, + PrefixLength: prefixLen, + PrefixOptions: uint8(data[25]), + RefLSType: binary.BigEndian.Uint16(data[26:28]), + AddressPrefix: data[28 : 28+uint32(prefixLen)], + ForwardingAddress: forwardingAddress, + } + case LinkLSAtype: + var prefixes []Prefix + var prefixOffset uint32 = 44 + var j uint32 + numOfPrefixes := binary.BigEndian.Uint32(data[40:44]) + for j = 0; j < numOfPrefixes; j++ { + prefixLen := uint8(data[prefixOffset]) + prefix := Prefix{ + PrefixLength: prefixLen, + PrefixOptions: uint8(data[prefixOffset+1]), + AddressPrefix: data[prefixOffset+4 : prefixOffset+4+uint32(prefixLen)/8], + } + prefixes = append(prefixes, prefix) + prefixOffset = prefixOffset + 4 + uint32(prefixLen)/8 + } + content = LinkLSA{ + RtrPriority: uint8(data[20]), + Options: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF, + LinkLocalAddress: data[24:40], + NumOfPrefixes: numOfPrefixes, + Prefixes: prefixes, + } + case IntraAreaPrefixLSAtype: + var prefixes []Prefix + var prefixOffset uint32 = 32 + var j uint16 + numOfPrefixes := binary.BigEndian.Uint16(data[20:22]) + for j = 0; j < numOfPrefixes; j++ { + prefixLen := uint8(data[prefixOffset]) + prefix := Prefix{ + PrefixLength: prefixLen, + PrefixOptions: uint8(data[prefixOffset+1]), + Metric: binary.BigEndian.Uint16(data[prefixOffset+2 : prefixOffset+4]), + AddressPrefix: data[prefixOffset+4 : prefixOffset+4+uint32(prefixLen)/8], + } + prefixes = append(prefixes, prefix) + prefixOffset = prefixOffset + 4 + uint32(prefixLen) + } + content = IntraAreaPrefixLSA{ + NumOfPrefixes: numOfPrefixes, + RefLSType: binary.BigEndian.Uint16(data[22:24]), + RefLinkStateID: binary.BigEndian.Uint32(data[24:28]), + RefAdvRouter: binary.BigEndian.Uint32(data[28:32]), + Prefixes: prefixes, + } + default: + return nil, fmt.Errorf("Unknown Link State type.") + } + return content, nil +} + +// getLSAs parses the LSA information from the packet for OSPFv3 +func getLSAs(num uint32, data []byte) ([]LSA, error) { + var lsas []LSA + var i uint32 = 0 + var offset uint32 = 0 + for ; i < num; i++ { + var content interface{} + lstype := binary.BigEndian.Uint16(data[offset+2 : offset+4]) + lsalength := binary.BigEndian.Uint16(data[offset+18 : offset+20]) + + content, err := extractLSAInformation(lstype, lsalength, data[offset:]) + if err != nil { + return nil, fmt.Errorf("Could not extract Link State type.") + } + lsa := LSA{ + LSAheader: LSAheader{ + LSAge: binary.BigEndian.Uint16(data[offset : offset+2]), + LSType: lstype, + LinkStateID: binary.BigEndian.Uint32(data[offset+4 : offset+8]), + AdvRouter: binary.BigEndian.Uint32(data[offset+8 : offset+12]), + LSSeqNumber: binary.BigEndian.Uint32(data[offset+12 : offset+16]), + LSChecksum: binary.BigEndian.Uint16(data[offset+16 : offset+18]), + Length: lsalength, + }, + Content: content, + } + lsas = append(lsas, lsa) + offset += uint32(lsalength) + } + return lsas, nil +} + +// DecodeFromBytes decodes the given bytes into the OSPF layer. +func (ospf *OSPFv2) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 24 { + return fmt.Errorf("Packet too smal for OSPF Version 2") + } + + ospf.Version = uint8(data[0]) + ospf.Type = OSPFType(data[1]) + ospf.PacketLength = binary.BigEndian.Uint16(data[2:4]) + ospf.RouterID = binary.BigEndian.Uint32(data[4:8]) + ospf.AreaID = binary.BigEndian.Uint32(data[8:12]) + ospf.Checksum = binary.BigEndian.Uint16(data[12:14]) + ospf.AuType = binary.BigEndian.Uint16(data[14:16]) + ospf.Authentication = binary.BigEndian.Uint64(data[16:24]) + + switch ospf.Type { + case OSPFHello: + var neighbors []uint32 + for i := 44; uint16(i+4) <= ospf.PacketLength; i += 4 { + neighbors = append(neighbors, binary.BigEndian.Uint32(data[i:i+4])) + } + ospf.Content = HelloPkgV2{ + NetworkMask: binary.BigEndian.Uint32(data[24:28]), + HelloPkg: HelloPkg{ + HelloInterval: binary.BigEndian.Uint16(data[28:30]), + Options: uint32(data[30]), + RtrPriority: uint8(data[31]), + RouterDeadInterval: binary.BigEndian.Uint32(data[32:36]), + DesignatedRouterID: binary.BigEndian.Uint32(data[36:40]), + BackupDesignatedRouterID: binary.BigEndian.Uint32(data[40:44]), + NeighborID: neighbors, + }, + } + case OSPFDatabaseDescription: + var lsas []LSAheader + for i := 32; uint16(i+20) <= ospf.PacketLength; i += 20 { + lsa := LSAheader{ + LSAge: binary.BigEndian.Uint16(data[i : i+2]), + LSType: binary.BigEndian.Uint16(data[i+2 : i+4]), + LinkStateID: binary.BigEndian.Uint32(data[i+4 : i+8]), + AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]), + LSSeqNumber: binary.BigEndian.Uint32(data[i+12 : i+16]), + LSChecksum: binary.BigEndian.Uint16(data[i+16 : i+18]), + Length: binary.BigEndian.Uint16(data[i+18 : i+20]), + } + lsas = append(lsas, lsa) + } + ospf.Content = DbDescPkg{ + InterfaceMTU: binary.BigEndian.Uint16(data[24:26]), + Options: uint32(data[26]), + Flags: uint16(data[27]), + DDSeqNumber: binary.BigEndian.Uint32(data[28:32]), + LSAinfo: lsas, + } + case OSPFLinkStateRequest: + var lsrs []LSReq + for i := 24; uint16(i+12) <= ospf.PacketLength; i += 12 { + lsr := LSReq{ + LSType: binary.BigEndian.Uint16(data[i+2 : i+4]), + LSID: binary.BigEndian.Uint32(data[i+4 : i+8]), + AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]), + } + lsrs = append(lsrs, lsr) + } + ospf.Content = lsrs + case OSPFLinkStateUpdate: + num := binary.BigEndian.Uint32(data[24:28]) + + lsas, err := getLSAsv2(num, data[28:]) + if err != nil { + return fmt.Errorf("Cannot parse Link State Update packet: %v", err) + } + ospf.Content = LSUpdate{ + NumOfLSAs: num, + LSAs: lsas, + } + case OSPFLinkStateAcknowledgment: + var lsas []LSAheader + for i := 24; uint16(i+20) <= ospf.PacketLength; i += 20 { + lsa := LSAheader{ + LSAge: binary.BigEndian.Uint16(data[i : i+2]), + LSOptions: data[i+2], + LSType: uint16(data[i+3]), + LinkStateID: binary.BigEndian.Uint32(data[i+4 : i+8]), + AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]), + LSSeqNumber: binary.BigEndian.Uint32(data[i+12 : i+16]), + LSChecksum: binary.BigEndian.Uint16(data[i+16 : i+18]), + Length: binary.BigEndian.Uint16(data[i+18 : i+20]), + } + lsas = append(lsas, lsa) + } + ospf.Content = lsas + } + return nil +} + +// DecodeFromBytes decodes the given bytes into the OSPF layer. +func (ospf *OSPFv3) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + + if len(data) < 16 { + return fmt.Errorf("Packet too smal for OSPF Version 3") + } + + ospf.Version = uint8(data[0]) + ospf.Type = OSPFType(data[1]) + ospf.PacketLength = binary.BigEndian.Uint16(data[2:4]) + ospf.RouterID = binary.BigEndian.Uint32(data[4:8]) + ospf.AreaID = binary.BigEndian.Uint32(data[8:12]) + ospf.Checksum = binary.BigEndian.Uint16(data[12:14]) + ospf.Instance = uint8(data[14]) + ospf.Reserved = uint8(data[15]) + + switch ospf.Type { + case OSPFHello: + var neighbors []uint32 + for i := 36; uint16(i+4) <= ospf.PacketLength; i += 4 { + neighbors = append(neighbors, binary.BigEndian.Uint32(data[i:i+4])) + } + ospf.Content = HelloPkg{ + InterfaceID: binary.BigEndian.Uint32(data[16:20]), + RtrPriority: uint8(data[20]), + Options: binary.BigEndian.Uint32(data[21:25]) >> 8, + HelloInterval: binary.BigEndian.Uint16(data[24:26]), + RouterDeadInterval: uint32(binary.BigEndian.Uint16(data[26:28])), + DesignatedRouterID: binary.BigEndian.Uint32(data[28:32]), + BackupDesignatedRouterID: binary.BigEndian.Uint32(data[32:36]), + NeighborID: neighbors, + } + case OSPFDatabaseDescription: + var lsas []LSAheader + for i := 28; uint16(i+20) <= ospf.PacketLength; i += 20 { + lsa := LSAheader{ + LSAge: binary.BigEndian.Uint16(data[i : i+2]), + LSType: binary.BigEndian.Uint16(data[i+2 : i+4]), + LinkStateID: binary.BigEndian.Uint32(data[i+4 : i+8]), + AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]), + LSSeqNumber: binary.BigEndian.Uint32(data[i+12 : i+16]), + LSChecksum: binary.BigEndian.Uint16(data[i+16 : i+18]), + Length: binary.BigEndian.Uint16(data[i+18 : i+20]), + } + lsas = append(lsas, lsa) + } + ospf.Content = DbDescPkg{ + Options: binary.BigEndian.Uint32(data[16:20]) & 0x00FFFFFF, + InterfaceMTU: binary.BigEndian.Uint16(data[20:22]), + Flags: binary.BigEndian.Uint16(data[22:24]), + DDSeqNumber: binary.BigEndian.Uint32(data[24:28]), + LSAinfo: lsas, + } + case OSPFLinkStateRequest: + var lsrs []LSReq + for i := 16; uint16(i+12) <= ospf.PacketLength; i += 12 { + lsr := LSReq{ + LSType: binary.BigEndian.Uint16(data[i+2 : i+4]), + LSID: binary.BigEndian.Uint32(data[i+4 : i+8]), + AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]), + } + lsrs = append(lsrs, lsr) + } + ospf.Content = lsrs + case OSPFLinkStateUpdate: + num := binary.BigEndian.Uint32(data[16:20]) + lsas, err := getLSAs(num, data[20:]) + if err != nil { + return fmt.Errorf("Cannot parse Link State Update packet: %v", err) + } + ospf.Content = LSUpdate{ + NumOfLSAs: num, + LSAs: lsas, + } + + case OSPFLinkStateAcknowledgment: + var lsas []LSAheader + for i := 16; uint16(i+20) <= ospf.PacketLength; i += 20 { + lsa := LSAheader{ + LSAge: binary.BigEndian.Uint16(data[i : i+2]), + LSType: binary.BigEndian.Uint16(data[i+2 : i+4]), + LinkStateID: binary.BigEndian.Uint32(data[i+4 : i+8]), + AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]), + LSSeqNumber: binary.BigEndian.Uint32(data[i+12 : i+16]), + LSChecksum: binary.BigEndian.Uint16(data[i+16 : i+18]), + Length: binary.BigEndian.Uint16(data[i+18 : i+20]), + } + lsas = append(lsas, lsa) + } + ospf.Content = lsas + default: + } + + return nil +} + +// LayerType returns LayerTypeOSPF +func (ospf *OSPFv2) LayerType() gopacket.LayerType { + return LayerTypeOSPF +} +func (ospf *OSPFv3) LayerType() gopacket.LayerType { + return LayerTypeOSPF +} + +// NextLayerType returns the layer type contained by this DecodingLayer. +func (ospf *OSPFv2) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypeZero +} +func (ospf *OSPFv3) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypeZero +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode. +func (ospf *OSPFv2) CanDecode() gopacket.LayerClass { + return LayerTypeOSPF +} +func (ospf *OSPFv3) CanDecode() gopacket.LayerClass { + return LayerTypeOSPF +} + +func decodeOSPF(data []byte, p gopacket.PacketBuilder) error { + if len(data) < 14 { + return fmt.Errorf("Packet too smal for OSPF") + } + + switch uint8(data[0]) { + case 2: + ospf := &OSPFv2{} + return decodingLayerDecoder(ospf, data, p) + case 3: + ospf := &OSPFv3{} + return decodingLayerDecoder(ospf, data, p) + default: + } + + return fmt.Errorf("Unable to determine OSPF type.") +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/pflog.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/pflog.go new file mode 100644 index 00000000..853882fd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/pflog.go @@ -0,0 +1,76 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + + "github.com/google/gopacket" +) + +type PFDirection uint8 + +const ( + PFDirectionInOut PFDirection = 0 + PFDirectionIn PFDirection = 1 + PFDirectionOut PFDirection = 2 +) + +// PFLog provides the layer for 'pf' packet-filter logging, as described at +// http://www.freebsd.org/cgi/man.cgi?query=pflog&sektion=4 +type PFLog struct { + BaseLayer + Length uint8 + Family ProtocolFamily + Action, Reason uint8 + IFName, Ruleset []byte + RuleNum, SubruleNum uint32 + UID uint32 + PID int32 + RuleUID uint32 + RulePID int32 + Direction PFDirection + // The remainder is padding +} + +func (pf *PFLog) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + pf.Length = data[0] + pf.Family = ProtocolFamily(data[1]) + pf.Action = data[2] + pf.Reason = data[3] + pf.IFName = data[4:20] + pf.Ruleset = data[20:36] + pf.RuleNum = binary.BigEndian.Uint32(data[36:40]) + pf.SubruleNum = binary.BigEndian.Uint32(data[40:44]) + pf.UID = binary.BigEndian.Uint32(data[44:48]) + pf.PID = int32(binary.BigEndian.Uint32(data[48:52])) + pf.RuleUID = binary.BigEndian.Uint32(data[52:56]) + pf.RulePID = int32(binary.BigEndian.Uint32(data[56:60])) + pf.Direction = PFDirection(data[60]) + if pf.Length%4 != 1 { + return errors.New("PFLog header length should be 3 less than multiple of 4") + } + actualLength := int(pf.Length) + 3 + pf.Contents = data[:actualLength] + pf.Payload = data[actualLength:] + return nil +} + +// LayerType returns layers.LayerTypePFLog +func (pf *PFLog) LayerType() gopacket.LayerType { return LayerTypePFLog } + +func (pf *PFLog) CanDecode() gopacket.LayerClass { return LayerTypePFLog } + +func (pf *PFLog) NextLayerType() gopacket.LayerType { + return pf.Family.LayerType() +} + +func decodePFLog(data []byte, p gopacket.PacketBuilder) error { + pf := &PFLog{} + return decodingLayerDecoder(pf, data, p) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ports.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ports.go new file mode 100644 index 00000000..7ea5adaf --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ports.go @@ -0,0 +1,155 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "fmt" + "strconv" + + "github.com/google/gopacket" +) + +// TCPPort is a port in a TCP layer. +type TCPPort uint16 + +// UDPPort is a port in a UDP layer. +type UDPPort uint16 + +// RUDPPort is a port in a RUDP layer. +type RUDPPort uint8 + +// SCTPPort is a port in a SCTP layer. +type SCTPPort uint16 + +// UDPLitePort is a port in a UDPLite layer. +type UDPLitePort uint16 + +// RUDPPortNames contains the string names for all RUDP ports. +var RUDPPortNames = map[RUDPPort]string{} + +// UDPLitePortNames contains the string names for all UDPLite ports. +var UDPLitePortNames = map[UDPLitePort]string{} + +// {TCP,UDP,SCTP}PortNames can be found in iana_ports.go + +// String returns the port as "number(name)" if there's a well-known port name, +// or just "number" if there isn't. Well-known names are stored in +// TCPPortNames. +func (a TCPPort) String() string { + if name, ok := TCPPortNames[a]; ok { + return fmt.Sprintf("%d(%s)", a, name) + } + return strconv.Itoa(int(a)) +} + +// LayerType returns a LayerType that would be able to decode the +// application payload. It uses some well-known ports such as 53 for +// DNS. +// +// Returns gopacket.LayerTypePayload for unknown/unsupported port numbers. +func (a TCPPort) LayerType() gopacket.LayerType { + lt := tcpPortLayerType[uint16(a)] + if lt != 0 { + return lt + } + return gopacket.LayerTypePayload +} + +var tcpPortLayerType = [65536]gopacket.LayerType{ + 53: LayerTypeDNS, + 443: LayerTypeTLS, // https + 502: LayerTypeModbusTCP, // modbustcp + 636: LayerTypeTLS, // ldaps + 989: LayerTypeTLS, // ftps-data + 990: LayerTypeTLS, // ftps + 992: LayerTypeTLS, // telnets + 993: LayerTypeTLS, // imaps + 994: LayerTypeTLS, // ircs + 995: LayerTypeTLS, // pop3s + 5061: LayerTypeTLS, // ips +} + +// RegisterTCPPortLayerType creates a new mapping between a TCPPort +// and an underlaying LayerType. +func RegisterTCPPortLayerType(port TCPPort, layerType gopacket.LayerType) { + tcpPortLayerType[port] = layerType +} + +// String returns the port as "number(name)" if there's a well-known port name, +// or just "number" if there isn't. Well-known names are stored in +// UDPPortNames. +func (a UDPPort) String() string { + if name, ok := UDPPortNames[a]; ok { + return fmt.Sprintf("%d(%s)", a, name) + } + return strconv.Itoa(int(a)) +} + +// LayerType returns a LayerType that would be able to decode the +// application payload. It uses some well-known ports such as 53 for +// DNS. +// +// Returns gopacket.LayerTypePayload for unknown/unsupported port numbers. +func (a UDPPort) LayerType() gopacket.LayerType { + lt := udpPortLayerType[uint16(a)] + if lt != 0 { + return lt + } + return gopacket.LayerTypePayload +} + +var udpPortLayerType = [65536]gopacket.LayerType{ + 53: LayerTypeDNS, + 123: LayerTypeNTP, + 4789: LayerTypeVXLAN, + 67: LayerTypeDHCPv4, + 68: LayerTypeDHCPv4, + 546: LayerTypeDHCPv6, + 547: LayerTypeDHCPv6, + 5060: LayerTypeSIP, + 6343: LayerTypeSFlow, + 6081: LayerTypeGeneve, + 3784: LayerTypeBFD, + 2152: LayerTypeGTPv1U, + 623: LayerTypeRMCP, +} + +// RegisterUDPPortLayerType creates a new mapping between a UDPPort +// and an underlaying LayerType. +func RegisterUDPPortLayerType(port UDPPort, layerType gopacket.LayerType) { + udpPortLayerType[port] = layerType +} + +// String returns the port as "number(name)" if there's a well-known port name, +// or just "number" if there isn't. Well-known names are stored in +// RUDPPortNames. +func (a RUDPPort) String() string { + if name, ok := RUDPPortNames[a]; ok { + return fmt.Sprintf("%d(%s)", a, name) + } + return strconv.Itoa(int(a)) +} + +// String returns the port as "number(name)" if there's a well-known port name, +// or just "number" if there isn't. Well-known names are stored in +// SCTPPortNames. +func (a SCTPPort) String() string { + if name, ok := SCTPPortNames[a]; ok { + return fmt.Sprintf("%d(%s)", a, name) + } + return strconv.Itoa(int(a)) +} + +// String returns the port as "number(name)" if there's a well-known port name, +// or just "number" if there isn't. Well-known names are stored in +// UDPLitePortNames. +func (a UDPLitePort) String() string { + if name, ok := UDPLitePortNames[a]; ok { + return fmt.Sprintf("%d(%s)", a, name) + } + return strconv.Itoa(int(a)) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ppp.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ppp.go new file mode 100644 index 00000000..e534d698 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/ppp.go @@ -0,0 +1,88 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "github.com/google/gopacket" +) + +// PPP is the layer for PPP encapsulation headers. +type PPP struct { + BaseLayer + PPPType PPPType + HasPPTPHeader bool +} + +// PPPEndpoint is a singleton endpoint for PPP. Since there is no actual +// addressing for the two ends of a PPP connection, we use a singleton value +// named 'point' for each endpoint. +var PPPEndpoint = gopacket.NewEndpoint(EndpointPPP, nil) + +// PPPFlow is a singleton flow for PPP. Since there is no actual addressing for +// the two ends of a PPP connection, we use a singleton value to represent the +// flow for all PPP connections. +var PPPFlow = gopacket.NewFlow(EndpointPPP, nil, nil) + +// LayerType returns LayerTypePPP +func (p *PPP) LayerType() gopacket.LayerType { return LayerTypePPP } + +// LinkFlow returns PPPFlow. +func (p *PPP) LinkFlow() gopacket.Flow { return PPPFlow } + +func decodePPP(data []byte, p gopacket.PacketBuilder) error { + ppp := &PPP{} + offset := 0 + if data[0] == 0xff && data[1] == 0x03 { + offset = 2 + ppp.HasPPTPHeader = true + } + if data[offset]&0x1 == 0 { + if data[offset+1]&0x1 == 0 { + return errors.New("PPP has invalid type") + } + ppp.PPPType = PPPType(binary.BigEndian.Uint16(data[offset : offset+2])) + ppp.Contents = data[offset : offset+2] + ppp.Payload = data[offset+2:] + } else { + ppp.PPPType = PPPType(data[offset]) + ppp.Contents = data[offset : offset+1] + ppp.Payload = data[offset+1:] + } + p.AddLayer(ppp) + p.SetLinkLayer(ppp) + return p.NextDecoder(ppp.PPPType) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (p *PPP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + if p.PPPType&0x100 == 0 { + bytes, err := b.PrependBytes(2) + if err != nil { + return err + } + binary.BigEndian.PutUint16(bytes, uint16(p.PPPType)) + } else { + bytes, err := b.PrependBytes(1) + if err != nil { + return err + } + bytes[0] = uint8(p.PPPType) + } + if p.HasPPTPHeader { + bytes, err := b.PrependBytes(2) + if err != nil { + return err + } + bytes[0] = 0xff + bytes[1] = 0x03 + } + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/pppoe.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/pppoe.go new file mode 100644 index 00000000..14cd63a1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/pppoe.go @@ -0,0 +1,60 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "github.com/google/gopacket" +) + +// PPPoE is the layer for PPPoE encapsulation headers. +type PPPoE struct { + BaseLayer + Version uint8 + Type uint8 + Code PPPoECode + SessionId uint16 + Length uint16 +} + +// LayerType returns gopacket.LayerTypePPPoE. +func (p *PPPoE) LayerType() gopacket.LayerType { + return LayerTypePPPoE +} + +// decodePPPoE decodes the PPPoE header (see http://tools.ietf.org/html/rfc2516). +func decodePPPoE(data []byte, p gopacket.PacketBuilder) error { + pppoe := &PPPoE{ + Version: data[0] >> 4, + Type: data[0] & 0x0F, + Code: PPPoECode(data[1]), + SessionId: binary.BigEndian.Uint16(data[2:4]), + Length: binary.BigEndian.Uint16(data[4:6]), + } + pppoe.BaseLayer = BaseLayer{data[:6], data[6 : 6+pppoe.Length]} + p.AddLayer(pppoe) + return p.NextDecoder(pppoe.Code) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (p *PPPoE) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + payload := b.Bytes() + bytes, err := b.PrependBytes(6) + if err != nil { + return err + } + bytes[0] = (p.Version << 4) | p.Type + bytes[1] = byte(p.Code) + binary.BigEndian.PutUint16(bytes[2:], p.SessionId) + if opts.FixLengths { + p.Length = uint16(len(payload)) + } + binary.BigEndian.PutUint16(bytes[4:], p.Length) + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/prism.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/prism.go new file mode 100644 index 00000000..e1711e7f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/prism.go @@ -0,0 +1,146 @@ +// Copyright 2015 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +// http://www.tcpdump.org/linktypes/LINKTYPE_IEEE802_11_PRISM.html + +package layers + +import ( + "encoding/binary" + "errors" + + "github.com/google/gopacket" +) + +func decodePrismValue(data []byte, pv *PrismValue) { + pv.DID = PrismDID(binary.LittleEndian.Uint32(data[0:4])) + pv.Status = binary.LittleEndian.Uint16(data[4:6]) + pv.Length = binary.LittleEndian.Uint16(data[6:8]) + pv.Data = data[8 : 8+pv.Length] +} + +type PrismDID uint32 + +const ( + PrismDIDType1HostTime PrismDID = 0x10044 + PrismDIDType2HostTime PrismDID = 0x01041 + PrismDIDType1MACTime PrismDID = 0x20044 + PrismDIDType2MACTime PrismDID = 0x02041 + PrismDIDType1Channel PrismDID = 0x30044 + PrismDIDType2Channel PrismDID = 0x03041 + PrismDIDType1RSSI PrismDID = 0x40044 + PrismDIDType2RSSI PrismDID = 0x04041 + PrismDIDType1SignalQuality PrismDID = 0x50044 + PrismDIDType2SignalQuality PrismDID = 0x05041 + PrismDIDType1Signal PrismDID = 0x60044 + PrismDIDType2Signal PrismDID = 0x06041 + PrismDIDType1Noise PrismDID = 0x70044 + PrismDIDType2Noise PrismDID = 0x07041 + PrismDIDType1Rate PrismDID = 0x80044 + PrismDIDType2Rate PrismDID = 0x08041 + PrismDIDType1TransmittedFrameIndicator PrismDID = 0x90044 + PrismDIDType2TransmittedFrameIndicator PrismDID = 0x09041 + PrismDIDType1FrameLength PrismDID = 0xA0044 + PrismDIDType2FrameLength PrismDID = 0x0A041 +) + +const ( + PrismType1MessageCode uint16 = 0x00000044 + PrismType2MessageCode uint16 = 0x00000041 +) + +func (p PrismDID) String() string { + dids := map[PrismDID]string{ + PrismDIDType1HostTime: "Host Time", + PrismDIDType2HostTime: "Host Time", + PrismDIDType1MACTime: "MAC Time", + PrismDIDType2MACTime: "MAC Time", + PrismDIDType1Channel: "Channel", + PrismDIDType2Channel: "Channel", + PrismDIDType1RSSI: "RSSI", + PrismDIDType2RSSI: "RSSI", + PrismDIDType1SignalQuality: "Signal Quality", + PrismDIDType2SignalQuality: "Signal Quality", + PrismDIDType1Signal: "Signal", + PrismDIDType2Signal: "Signal", + PrismDIDType1Noise: "Noise", + PrismDIDType2Noise: "Noise", + PrismDIDType1Rate: "Rate", + PrismDIDType2Rate: "Rate", + PrismDIDType1TransmittedFrameIndicator: "Transmitted Frame Indicator", + PrismDIDType2TransmittedFrameIndicator: "Transmitted Frame Indicator", + PrismDIDType1FrameLength: "Frame Length", + PrismDIDType2FrameLength: "Frame Length", + } + + if str, ok := dids[p]; ok { + return str + } + + return "Unknown DID" +} + +type PrismValue struct { + DID PrismDID + Status uint16 + Length uint16 + Data []byte +} + +func (pv *PrismValue) IsSupplied() bool { + return pv.Status == 1 +} + +var ErrPrismExpectedMoreData = errors.New("Expected more data.") +var ErrPrismInvalidCode = errors.New("Invalid header code.") + +func decodePrismHeader(data []byte, p gopacket.PacketBuilder) error { + d := &PrismHeader{} + return decodingLayerDecoder(d, data, p) +} + +type PrismHeader struct { + BaseLayer + Code uint16 + Length uint16 + DeviceName string + Values []PrismValue +} + +func (m *PrismHeader) LayerType() gopacket.LayerType { return LayerTypePrismHeader } + +func (m *PrismHeader) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.Code = binary.LittleEndian.Uint16(data[0:4]) + m.Length = binary.LittleEndian.Uint16(data[4:8]) + m.DeviceName = string(data[8:24]) + m.BaseLayer = BaseLayer{Contents: data[:m.Length], Payload: data[m.Length:len(data)]} + + switch m.Code { + case PrismType1MessageCode: + fallthrough + case PrismType2MessageCode: + // valid message code + default: + return ErrPrismInvalidCode + } + + offset := uint16(24) + + m.Values = make([]PrismValue, (m.Length-offset)/12) + for i := 0; i < len(m.Values); i++ { + decodePrismValue(data[offset:offset+12], &m.Values[i]) + offset += 12 + } + + if offset != m.Length { + return ErrPrismExpectedMoreData + } + + return nil +} + +func (m *PrismHeader) CanDecode() gopacket.LayerClass { return LayerTypePrismHeader } +func (m *PrismHeader) NextLayerType() gopacket.LayerType { return LayerTypeDot11 } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/radiotap.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/radiotap.go new file mode 100644 index 00000000..17c61335 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/radiotap.go @@ -0,0 +1,1069 @@ +// Copyright 2014 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash/crc32" + "strings" + + "github.com/google/gopacket" +) + +// align calculates the number of bytes needed to align with the width +// on the offset, returning the number of bytes we need to skip to +// align to the offset (width). +func align(offset uint16, width uint16) uint16 { + return ((((offset) + ((width) - 1)) & (^((width) - 1))) - offset) +} + +type RadioTapPresent uint32 + +const ( + RadioTapPresentTSFT RadioTapPresent = 1 << iota + RadioTapPresentFlags + RadioTapPresentRate + RadioTapPresentChannel + RadioTapPresentFHSS + RadioTapPresentDBMAntennaSignal + RadioTapPresentDBMAntennaNoise + RadioTapPresentLockQuality + RadioTapPresentTxAttenuation + RadioTapPresentDBTxAttenuation + RadioTapPresentDBMTxPower + RadioTapPresentAntenna + RadioTapPresentDBAntennaSignal + RadioTapPresentDBAntennaNoise + RadioTapPresentRxFlags + RadioTapPresentTxFlags + RadioTapPresentRtsRetries + RadioTapPresentDataRetries + _ + RadioTapPresentMCS + RadioTapPresentAMPDUStatus + RadioTapPresentVHT + RadioTapPresentEXT RadioTapPresent = 1 << 31 +) + +func (r RadioTapPresent) TSFT() bool { + return r&RadioTapPresentTSFT != 0 +} +func (r RadioTapPresent) Flags() bool { + return r&RadioTapPresentFlags != 0 +} +func (r RadioTapPresent) Rate() bool { + return r&RadioTapPresentRate != 0 +} +func (r RadioTapPresent) Channel() bool { + return r&RadioTapPresentChannel != 0 +} +func (r RadioTapPresent) FHSS() bool { + return r&RadioTapPresentFHSS != 0 +} +func (r RadioTapPresent) DBMAntennaSignal() bool { + return r&RadioTapPresentDBMAntennaSignal != 0 +} +func (r RadioTapPresent) DBMAntennaNoise() bool { + return r&RadioTapPresentDBMAntennaNoise != 0 +} +func (r RadioTapPresent) LockQuality() bool { + return r&RadioTapPresentLockQuality != 0 +} +func (r RadioTapPresent) TxAttenuation() bool { + return r&RadioTapPresentTxAttenuation != 0 +} +func (r RadioTapPresent) DBTxAttenuation() bool { + return r&RadioTapPresentDBTxAttenuation != 0 +} +func (r RadioTapPresent) DBMTxPower() bool { + return r&RadioTapPresentDBMTxPower != 0 +} +func (r RadioTapPresent) Antenna() bool { + return r&RadioTapPresentAntenna != 0 +} +func (r RadioTapPresent) DBAntennaSignal() bool { + return r&RadioTapPresentDBAntennaSignal != 0 +} +func (r RadioTapPresent) DBAntennaNoise() bool { + return r&RadioTapPresentDBAntennaNoise != 0 +} +func (r RadioTapPresent) RxFlags() bool { + return r&RadioTapPresentRxFlags != 0 +} +func (r RadioTapPresent) TxFlags() bool { + return r&RadioTapPresentTxFlags != 0 +} +func (r RadioTapPresent) RtsRetries() bool { + return r&RadioTapPresentRtsRetries != 0 +} +func (r RadioTapPresent) DataRetries() bool { + return r&RadioTapPresentDataRetries != 0 +} +func (r RadioTapPresent) MCS() bool { + return r&RadioTapPresentMCS != 0 +} +func (r RadioTapPresent) AMPDUStatus() bool { + return r&RadioTapPresentAMPDUStatus != 0 +} +func (r RadioTapPresent) VHT() bool { + return r&RadioTapPresentVHT != 0 +} +func (r RadioTapPresent) EXT() bool { + return r&RadioTapPresentEXT != 0 +} + +type RadioTapChannelFlags uint16 + +const ( + RadioTapChannelFlagsTurbo RadioTapChannelFlags = 0x0010 // Turbo channel + RadioTapChannelFlagsCCK RadioTapChannelFlags = 0x0020 // CCK channel + RadioTapChannelFlagsOFDM RadioTapChannelFlags = 0x0040 // OFDM channel + RadioTapChannelFlagsGhz2 RadioTapChannelFlags = 0x0080 // 2 GHz spectrum channel. + RadioTapChannelFlagsGhz5 RadioTapChannelFlags = 0x0100 // 5 GHz spectrum channel + RadioTapChannelFlagsPassive RadioTapChannelFlags = 0x0200 // Only passive scan allowed + RadioTapChannelFlagsDynamic RadioTapChannelFlags = 0x0400 // Dynamic CCK-OFDM channel + RadioTapChannelFlagsGFSK RadioTapChannelFlags = 0x0800 // GFSK channel (FHSS PHY) +) + +func (r RadioTapChannelFlags) Turbo() bool { + return r&RadioTapChannelFlagsTurbo != 0 +} +func (r RadioTapChannelFlags) CCK() bool { + return r&RadioTapChannelFlagsCCK != 0 +} +func (r RadioTapChannelFlags) OFDM() bool { + return r&RadioTapChannelFlagsOFDM != 0 +} +func (r RadioTapChannelFlags) Ghz2() bool { + return r&RadioTapChannelFlagsGhz2 != 0 +} +func (r RadioTapChannelFlags) Ghz5() bool { + return r&RadioTapChannelFlagsGhz5 != 0 +} +func (r RadioTapChannelFlags) Passive() bool { + return r&RadioTapChannelFlagsPassive != 0 +} +func (r RadioTapChannelFlags) Dynamic() bool { + return r&RadioTapChannelFlagsDynamic != 0 +} +func (r RadioTapChannelFlags) GFSK() bool { + return r&RadioTapChannelFlagsGFSK != 0 +} + +// String provides a human readable string for RadioTapChannelFlags. +// This string is possibly subject to change over time; if you're storing this +// persistently, you should probably store the RadioTapChannelFlags value, not its string. +func (a RadioTapChannelFlags) String() string { + var out bytes.Buffer + if a.Turbo() { + out.WriteString("Turbo,") + } + if a.CCK() { + out.WriteString("CCK,") + } + if a.OFDM() { + out.WriteString("OFDM,") + } + if a.Ghz2() { + out.WriteString("Ghz2,") + } + if a.Ghz5() { + out.WriteString("Ghz5,") + } + if a.Passive() { + out.WriteString("Passive,") + } + if a.Dynamic() { + out.WriteString("Dynamic,") + } + if a.GFSK() { + out.WriteString("GFSK,") + } + + if length := out.Len(); length > 0 { + return string(out.Bytes()[:length-1]) // strip final comma + } + return "" +} + +type RadioTapFlags uint8 + +const ( + RadioTapFlagsCFP RadioTapFlags = 1 << iota // sent/received during CFP + RadioTapFlagsShortPreamble // sent/received * with short * preamble + RadioTapFlagsWEP // sent/received * with WEP encryption + RadioTapFlagsFrag // sent/received * with fragmentation + RadioTapFlagsFCS // frame includes FCS + RadioTapFlagsDatapad // frame has padding between * 802.11 header and payload * (to 32-bit boundary) + RadioTapFlagsBadFCS // does not pass FCS check + RadioTapFlagsShortGI // HT short GI +) + +func (r RadioTapFlags) CFP() bool { + return r&RadioTapFlagsCFP != 0 +} +func (r RadioTapFlags) ShortPreamble() bool { + return r&RadioTapFlagsShortPreamble != 0 +} +func (r RadioTapFlags) WEP() bool { + return r&RadioTapFlagsWEP != 0 +} +func (r RadioTapFlags) Frag() bool { + return r&RadioTapFlagsFrag != 0 +} +func (r RadioTapFlags) FCS() bool { + return r&RadioTapFlagsFCS != 0 +} +func (r RadioTapFlags) Datapad() bool { + return r&RadioTapFlagsDatapad != 0 +} +func (r RadioTapFlags) BadFCS() bool { + return r&RadioTapFlagsBadFCS != 0 +} +func (r RadioTapFlags) ShortGI() bool { + return r&RadioTapFlagsShortGI != 0 +} + +// String provides a human readable string for RadioTapFlags. +// This string is possibly subject to change over time; if you're storing this +// persistently, you should probably store the RadioTapFlags value, not its string. +func (a RadioTapFlags) String() string { + var out bytes.Buffer + if a.CFP() { + out.WriteString("CFP,") + } + if a.ShortPreamble() { + out.WriteString("SHORT-PREAMBLE,") + } + if a.WEP() { + out.WriteString("WEP,") + } + if a.Frag() { + out.WriteString("FRAG,") + } + if a.FCS() { + out.WriteString("FCS,") + } + if a.Datapad() { + out.WriteString("DATAPAD,") + } + if a.ShortGI() { + out.WriteString("SHORT-GI,") + } + + if length := out.Len(); length > 0 { + return string(out.Bytes()[:length-1]) // strip final comma + } + return "" +} + +type RadioTapRate uint8 + +func (a RadioTapRate) String() string { + return fmt.Sprintf("%v Mb/s", 0.5*float32(a)) +} + +type RadioTapChannelFrequency uint16 + +func (a RadioTapChannelFrequency) String() string { + return fmt.Sprintf("%d MHz", a) +} + +type RadioTapRxFlags uint16 + +const ( + RadioTapRxFlagsBadPlcp RadioTapRxFlags = 0x0002 +) + +func (self RadioTapRxFlags) BadPlcp() bool { + return self&RadioTapRxFlagsBadPlcp != 0 +} + +func (self RadioTapRxFlags) String() string { + if self.BadPlcp() { + return "BADPLCP" + } + return "" +} + +type RadioTapTxFlags uint16 + +const ( + RadioTapTxFlagsFail RadioTapTxFlags = 1 << iota + RadioTapTxFlagsCTS + RadioTapTxFlagsRTS + RadioTapTxFlagsNoACK +) + +func (self RadioTapTxFlags) Fail() bool { return self&RadioTapTxFlagsFail != 0 } +func (self RadioTapTxFlags) CTS() bool { return self&RadioTapTxFlagsCTS != 0 } +func (self RadioTapTxFlags) RTS() bool { return self&RadioTapTxFlagsRTS != 0 } +func (self RadioTapTxFlags) NoACK() bool { return self&RadioTapTxFlagsNoACK != 0 } + +func (self RadioTapTxFlags) String() string { + var tokens []string + if self.Fail() { + tokens = append(tokens, "Fail") + } + if self.CTS() { + tokens = append(tokens, "CTS") + } + if self.RTS() { + tokens = append(tokens, "RTS") + } + if self.NoACK() { + tokens = append(tokens, "NoACK") + } + return strings.Join(tokens, ",") +} + +type RadioTapMCS struct { + Known RadioTapMCSKnown + Flags RadioTapMCSFlags + MCS uint8 +} + +func (self RadioTapMCS) String() string { + var tokens []string + if self.Known.Bandwidth() { + token := "?" + switch self.Flags.Bandwidth() { + case 0: + token = "20" + case 1: + token = "40" + case 2: + token = "40(20L)" + case 3: + token = "40(20U)" + } + tokens = append(tokens, token) + } + if self.Known.MCSIndex() { + tokens = append(tokens, fmt.Sprintf("MCSIndex#%d", self.MCS)) + } + if self.Known.GuardInterval() { + if self.Flags.ShortGI() { + tokens = append(tokens, fmt.Sprintf("shortGI")) + } else { + tokens = append(tokens, fmt.Sprintf("longGI")) + } + } + if self.Known.HTFormat() { + if self.Flags.Greenfield() { + tokens = append(tokens, fmt.Sprintf("HT-greenfield")) + } else { + tokens = append(tokens, fmt.Sprintf("HT-mixed")) + } + } + if self.Known.FECType() { + if self.Flags.FECLDPC() { + tokens = append(tokens, fmt.Sprintf("LDPC")) + } else { + tokens = append(tokens, fmt.Sprintf("BCC")) + } + } + if self.Known.STBC() { + tokens = append(tokens, fmt.Sprintf("STBC#%d", self.Flags.STBC())) + } + if self.Known.NESS() { + num := 0 + if self.Known.NESS1() { + num |= 0x02 + } + if self.Flags.NESS0() { + num |= 0x01 + } + tokens = append(tokens, fmt.Sprintf("num-of-ESS#%d", num)) + } + return strings.Join(tokens, ",") +} + +type RadioTapMCSKnown uint8 + +const ( + RadioTapMCSKnownBandwidth RadioTapMCSKnown = 1 << iota + RadioTapMCSKnownMCSIndex + RadioTapMCSKnownGuardInterval + RadioTapMCSKnownHTFormat + RadioTapMCSKnownFECType + RadioTapMCSKnownSTBC + RadioTapMCSKnownNESS + RadioTapMCSKnownNESS1 +) + +func (self RadioTapMCSKnown) Bandwidth() bool { return self&RadioTapMCSKnownBandwidth != 0 } +func (self RadioTapMCSKnown) MCSIndex() bool { return self&RadioTapMCSKnownMCSIndex != 0 } +func (self RadioTapMCSKnown) GuardInterval() bool { return self&RadioTapMCSKnownGuardInterval != 0 } +func (self RadioTapMCSKnown) HTFormat() bool { return self&RadioTapMCSKnownHTFormat != 0 } +func (self RadioTapMCSKnown) FECType() bool { return self&RadioTapMCSKnownFECType != 0 } +func (self RadioTapMCSKnown) STBC() bool { return self&RadioTapMCSKnownSTBC != 0 } +func (self RadioTapMCSKnown) NESS() bool { return self&RadioTapMCSKnownNESS != 0 } +func (self RadioTapMCSKnown) NESS1() bool { return self&RadioTapMCSKnownNESS1 != 0 } + +type RadioTapMCSFlags uint8 + +const ( + RadioTapMCSFlagsBandwidthMask RadioTapMCSFlags = 0x03 + RadioTapMCSFlagsShortGI = 0x04 + RadioTapMCSFlagsGreenfield = 0x08 + RadioTapMCSFlagsFECLDPC = 0x10 + RadioTapMCSFlagsSTBCMask = 0x60 + RadioTapMCSFlagsNESS0 = 0x80 +) + +func (self RadioTapMCSFlags) Bandwidth() int { + return int(self & RadioTapMCSFlagsBandwidthMask) +} +func (self RadioTapMCSFlags) ShortGI() bool { return self&RadioTapMCSFlagsShortGI != 0 } +func (self RadioTapMCSFlags) Greenfield() bool { return self&RadioTapMCSFlagsGreenfield != 0 } +func (self RadioTapMCSFlags) FECLDPC() bool { return self&RadioTapMCSFlagsFECLDPC != 0 } +func (self RadioTapMCSFlags) STBC() int { + return int(self&RadioTapMCSFlagsSTBCMask) >> 5 +} +func (self RadioTapMCSFlags) NESS0() bool { return self&RadioTapMCSFlagsNESS0 != 0 } + +type RadioTapAMPDUStatus struct { + Reference uint32 + Flags RadioTapAMPDUStatusFlags + CRC uint8 +} + +func (self RadioTapAMPDUStatus) String() string { + tokens := []string{ + fmt.Sprintf("ref#%x", self.Reference), + } + if self.Flags.ReportZerolen() && self.Flags.IsZerolen() { + tokens = append(tokens, fmt.Sprintf("zero-length")) + } + if self.Flags.LastKnown() && self.Flags.IsLast() { + tokens = append(tokens, "last") + } + if self.Flags.DelimCRCErr() { + tokens = append(tokens, "delimiter CRC error") + } + if self.Flags.DelimCRCKnown() { + tokens = append(tokens, fmt.Sprintf("delimiter-CRC=%02x", self.CRC)) + } + return strings.Join(tokens, ",") +} + +type RadioTapAMPDUStatusFlags uint16 + +const ( + RadioTapAMPDUStatusFlagsReportZerolen RadioTapAMPDUStatusFlags = 1 << iota + RadioTapAMPDUIsZerolen + RadioTapAMPDULastKnown + RadioTapAMPDUIsLast + RadioTapAMPDUDelimCRCErr + RadioTapAMPDUDelimCRCKnown +) + +func (self RadioTapAMPDUStatusFlags) ReportZerolen() bool { + return self&RadioTapAMPDUStatusFlagsReportZerolen != 0 +} +func (self RadioTapAMPDUStatusFlags) IsZerolen() bool { return self&RadioTapAMPDUIsZerolen != 0 } +func (self RadioTapAMPDUStatusFlags) LastKnown() bool { return self&RadioTapAMPDULastKnown != 0 } +func (self RadioTapAMPDUStatusFlags) IsLast() bool { return self&RadioTapAMPDUIsLast != 0 } +func (self RadioTapAMPDUStatusFlags) DelimCRCErr() bool { return self&RadioTapAMPDUDelimCRCErr != 0 } +func (self RadioTapAMPDUStatusFlags) DelimCRCKnown() bool { return self&RadioTapAMPDUDelimCRCKnown != 0 } + +type RadioTapVHT struct { + Known RadioTapVHTKnown + Flags RadioTapVHTFlags + Bandwidth uint8 + MCSNSS [4]RadioTapVHTMCSNSS + Coding uint8 + GroupId uint8 + PartialAID uint16 +} + +func (self RadioTapVHT) String() string { + var tokens []string + if self.Known.STBC() { + if self.Flags.STBC() { + tokens = append(tokens, "STBC") + } else { + tokens = append(tokens, "no STBC") + } + } + if self.Known.TXOPPSNotAllowed() { + if self.Flags.TXOPPSNotAllowed() { + tokens = append(tokens, "TXOP doze not allowed") + } else { + tokens = append(tokens, "TXOP doze allowed") + } + } + if self.Known.GI() { + if self.Flags.SGI() { + tokens = append(tokens, "short GI") + } else { + tokens = append(tokens, "long GI") + } + } + if self.Known.SGINSYMDisambiguation() { + if self.Flags.SGINSYMMod() { + tokens = append(tokens, "NSYM mod 10=9") + } else { + tokens = append(tokens, "NSYM mod 10!=9 or no short GI") + } + } + if self.Known.LDPCExtraOFDMSymbol() { + if self.Flags.LDPCExtraOFDMSymbol() { + tokens = append(tokens, "LDPC extra OFDM symbols") + } else { + tokens = append(tokens, "no LDPC extra OFDM symbols") + } + } + if self.Known.Beamformed() { + if self.Flags.Beamformed() { + tokens = append(tokens, "beamformed") + } else { + tokens = append(tokens, "no beamformed") + } + } + if self.Known.Bandwidth() { + token := "?" + switch self.Bandwidth & 0x1f { + case 0: + token = "20" + case 1: + token = "40" + case 2: + token = "40(20L)" + case 3: + token = "40(20U)" + case 4: + token = "80" + case 5: + token = "80(40L)" + case 6: + token = "80(40U)" + case 7: + token = "80(20LL)" + case 8: + token = "80(20LU)" + case 9: + token = "80(20UL)" + case 10: + token = "80(20UU)" + case 11: + token = "160" + case 12: + token = "160(80L)" + case 13: + token = "160(80U)" + case 14: + token = "160(40LL)" + case 15: + token = "160(40LU)" + case 16: + token = "160(40UL)" + case 17: + token = "160(40UU)" + case 18: + token = "160(20LLL)" + case 19: + token = "160(20LLU)" + case 20: + token = "160(20LUL)" + case 21: + token = "160(20LUU)" + case 22: + token = "160(20ULL)" + case 23: + token = "160(20ULU)" + case 24: + token = "160(20UUL)" + case 25: + token = "160(20UUU)" + } + tokens = append(tokens, token) + } + for i, MCSNSS := range self.MCSNSS { + if MCSNSS.Present() { + fec := "?" + switch self.Coding & (1 << uint8(i)) { + case 0: + fec = "BCC" + case 1: + fec = "LDPC" + } + tokens = append(tokens, fmt.Sprintf("user%d(%s,%s)", i, MCSNSS.String(), fec)) + } + } + if self.Known.GroupId() { + tokens = append(tokens, + fmt.Sprintf("group=%d", self.GroupId)) + } + if self.Known.PartialAID() { + tokens = append(tokens, + fmt.Sprintf("partial-AID=%d", self.PartialAID)) + } + return strings.Join(tokens, ",") +} + +type RadioTapVHTKnown uint16 + +const ( + RadioTapVHTKnownSTBC RadioTapVHTKnown = 1 << iota + RadioTapVHTKnownTXOPPSNotAllowed + RadioTapVHTKnownGI + RadioTapVHTKnownSGINSYMDisambiguation + RadioTapVHTKnownLDPCExtraOFDMSymbol + RadioTapVHTKnownBeamformed + RadioTapVHTKnownBandwidth + RadioTapVHTKnownGroupId + RadioTapVHTKnownPartialAID +) + +func (self RadioTapVHTKnown) STBC() bool { return self&RadioTapVHTKnownSTBC != 0 } +func (self RadioTapVHTKnown) TXOPPSNotAllowed() bool { + return self&RadioTapVHTKnownTXOPPSNotAllowed != 0 +} +func (self RadioTapVHTKnown) GI() bool { return self&RadioTapVHTKnownGI != 0 } +func (self RadioTapVHTKnown) SGINSYMDisambiguation() bool { + return self&RadioTapVHTKnownSGINSYMDisambiguation != 0 +} +func (self RadioTapVHTKnown) LDPCExtraOFDMSymbol() bool { + return self&RadioTapVHTKnownLDPCExtraOFDMSymbol != 0 +} +func (self RadioTapVHTKnown) Beamformed() bool { return self&RadioTapVHTKnownBeamformed != 0 } +func (self RadioTapVHTKnown) Bandwidth() bool { return self&RadioTapVHTKnownBandwidth != 0 } +func (self RadioTapVHTKnown) GroupId() bool { return self&RadioTapVHTKnownGroupId != 0 } +func (self RadioTapVHTKnown) PartialAID() bool { return self&RadioTapVHTKnownPartialAID != 0 } + +type RadioTapVHTFlags uint8 + +const ( + RadioTapVHTFlagsSTBC RadioTapVHTFlags = 1 << iota + RadioTapVHTFlagsTXOPPSNotAllowed + RadioTapVHTFlagsSGI + RadioTapVHTFlagsSGINSYMMod + RadioTapVHTFlagsLDPCExtraOFDMSymbol + RadioTapVHTFlagsBeamformed +) + +func (self RadioTapVHTFlags) STBC() bool { return self&RadioTapVHTFlagsSTBC != 0 } +func (self RadioTapVHTFlags) TXOPPSNotAllowed() bool { + return self&RadioTapVHTFlagsTXOPPSNotAllowed != 0 +} +func (self RadioTapVHTFlags) SGI() bool { return self&RadioTapVHTFlagsSGI != 0 } +func (self RadioTapVHTFlags) SGINSYMMod() bool { return self&RadioTapVHTFlagsSGINSYMMod != 0 } +func (self RadioTapVHTFlags) LDPCExtraOFDMSymbol() bool { + return self&RadioTapVHTFlagsLDPCExtraOFDMSymbol != 0 +} +func (self RadioTapVHTFlags) Beamformed() bool { return self&RadioTapVHTFlagsBeamformed != 0 } + +type RadioTapVHTMCSNSS uint8 + +func (self RadioTapVHTMCSNSS) Present() bool { + return self&0x0F != 0 +} + +func (self RadioTapVHTMCSNSS) String() string { + return fmt.Sprintf("NSS#%dMCS#%d", uint32(self&0xf), uint32(self>>4)) +} + +func decodeRadioTap(data []byte, p gopacket.PacketBuilder) error { + d := &RadioTap{} + // TODO: Should we set LinkLayer here? And implement LinkFlow + return decodingLayerDecoder(d, data, p) +} + +type RadioTap struct { + BaseLayer + + // Version 0. Only increases for drastic changes, introduction of compatible new fields does not count. + Version uint8 + // Length of the whole header in bytes, including it_version, it_pad, it_len, and data fields. + Length uint16 + // Present is a bitmap telling which fields are present. Set bit 31 (0x80000000) to extend the bitmap by another 32 bits. Additional extensions are made by setting bit 31. + Present RadioTapPresent + // TSFT: value in microseconds of the MAC's 64-bit 802.11 Time Synchronization Function timer when the first bit of the MPDU arrived at the MAC. For received frames, only. + TSFT uint64 + Flags RadioTapFlags + // Rate Tx/Rx data rate + Rate RadioTapRate + // ChannelFrequency Tx/Rx frequency in MHz, followed by flags + ChannelFrequency RadioTapChannelFrequency + ChannelFlags RadioTapChannelFlags + // FHSS For frequency-hopping radios, the hop set (first byte) and pattern (second byte). + FHSS uint16 + // DBMAntennaSignal RF signal power at the antenna, decibel difference from one milliwatt. + DBMAntennaSignal int8 + // DBMAntennaNoise RF noise power at the antenna, decibel difference from one milliwatt. + DBMAntennaNoise int8 + // LockQuality Quality of Barker code lock. Unitless. Monotonically nondecreasing with "better" lock strength. Called "Signal Quality" in datasheets. + LockQuality uint16 + // TxAttenuation Transmit power expressed as unitless distance from max power set at factory calibration. 0 is max power. Monotonically nondecreasing with lower power levels. + TxAttenuation uint16 + // DBTxAttenuation Transmit power expressed as decibel distance from max power set at factory calibration. 0 is max power. Monotonically nondecreasing with lower power levels. + DBTxAttenuation uint16 + // DBMTxPower Transmit power expressed as dBm (decibels from a 1 milliwatt reference). This is the absolute power level measured at the antenna port. + DBMTxPower int8 + // Antenna Unitless indication of the Rx/Tx antenna for this packet. The first antenna is antenna 0. + Antenna uint8 + // DBAntennaSignal RF signal power at the antenna, decibel difference from an arbitrary, fixed reference. + DBAntennaSignal uint8 + // DBAntennaNoise RF noise power at the antenna, decibel difference from an arbitrary, fixed reference point. + DBAntennaNoise uint8 + // + RxFlags RadioTapRxFlags + TxFlags RadioTapTxFlags + RtsRetries uint8 + DataRetries uint8 + MCS RadioTapMCS + AMPDUStatus RadioTapAMPDUStatus + VHT RadioTapVHT +} + +func (m *RadioTap) LayerType() gopacket.LayerType { return LayerTypeRadioTap } + +func (m *RadioTap) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.Version = uint8(data[0]) + m.Length = binary.LittleEndian.Uint16(data[2:4]) + m.Present = RadioTapPresent(binary.LittleEndian.Uint32(data[4:8])) + + offset := uint16(4) + + for (binary.LittleEndian.Uint32(data[offset:offset+4]) & 0x80000000) != 0 { + // This parser only handles standard radiotap namespace, + // and expects all fields are packed in the first it_present. + // Extended bitmap will be just ignored. + offset += 4 + } + offset += 4 // skip the bitmap + + if m.Present.TSFT() { + offset += align(offset, 8) + m.TSFT = binary.LittleEndian.Uint64(data[offset : offset+8]) + offset += 8 + } + if m.Present.Flags() { + m.Flags = RadioTapFlags(data[offset]) + offset++ + } + if m.Present.Rate() { + m.Rate = RadioTapRate(data[offset]) + offset++ + } + if m.Present.Channel() { + offset += align(offset, 2) + m.ChannelFrequency = RadioTapChannelFrequency(binary.LittleEndian.Uint16(data[offset : offset+2])) + offset += 2 + m.ChannelFlags = RadioTapChannelFlags(binary.LittleEndian.Uint16(data[offset : offset+2])) + offset += 2 + } + if m.Present.FHSS() { + m.FHSS = binary.LittleEndian.Uint16(data[offset : offset+2]) + offset += 2 + } + if m.Present.DBMAntennaSignal() { + m.DBMAntennaSignal = int8(data[offset]) + offset++ + } + if m.Present.DBMAntennaNoise() { + m.DBMAntennaNoise = int8(data[offset]) + offset++ + } + if m.Present.LockQuality() { + offset += align(offset, 2) + m.LockQuality = binary.LittleEndian.Uint16(data[offset : offset+2]) + offset += 2 + } + if m.Present.TxAttenuation() { + offset += align(offset, 2) + m.TxAttenuation = binary.LittleEndian.Uint16(data[offset : offset+2]) + offset += 2 + } + if m.Present.DBTxAttenuation() { + offset += align(offset, 2) + m.DBTxAttenuation = binary.LittleEndian.Uint16(data[offset : offset+2]) + offset += 2 + } + if m.Present.DBMTxPower() { + m.DBMTxPower = int8(data[offset]) + offset++ + } + if m.Present.Antenna() { + m.Antenna = uint8(data[offset]) + offset++ + } + if m.Present.DBAntennaSignal() { + m.DBAntennaSignal = uint8(data[offset]) + offset++ + } + if m.Present.DBAntennaNoise() { + m.DBAntennaNoise = uint8(data[offset]) + offset++ + } + if m.Present.RxFlags() { + offset += align(offset, 2) + m.RxFlags = RadioTapRxFlags(binary.LittleEndian.Uint16(data[offset:])) + offset += 2 + } + if m.Present.TxFlags() { + offset += align(offset, 2) + m.TxFlags = RadioTapTxFlags(binary.LittleEndian.Uint16(data[offset:])) + offset += 2 + } + if m.Present.RtsRetries() { + m.RtsRetries = uint8(data[offset]) + offset++ + } + if m.Present.DataRetries() { + m.DataRetries = uint8(data[offset]) + offset++ + } + if m.Present.MCS() { + m.MCS = RadioTapMCS{ + RadioTapMCSKnown(data[offset]), + RadioTapMCSFlags(data[offset+1]), + uint8(data[offset+2]), + } + offset += 3 + } + if m.Present.AMPDUStatus() { + offset += align(offset, 4) + m.AMPDUStatus = RadioTapAMPDUStatus{ + Reference: binary.LittleEndian.Uint32(data[offset:]), + Flags: RadioTapAMPDUStatusFlags(binary.LittleEndian.Uint16(data[offset+4:])), + CRC: uint8(data[offset+6]), + } + offset += 8 + } + if m.Present.VHT() { + offset += align(offset, 2) + m.VHT = RadioTapVHT{ + Known: RadioTapVHTKnown(binary.LittleEndian.Uint16(data[offset:])), + Flags: RadioTapVHTFlags(data[offset+2]), + Bandwidth: uint8(data[offset+3]), + MCSNSS: [4]RadioTapVHTMCSNSS{ + RadioTapVHTMCSNSS(data[offset+4]), + RadioTapVHTMCSNSS(data[offset+5]), + RadioTapVHTMCSNSS(data[offset+6]), + RadioTapVHTMCSNSS(data[offset+7]), + }, + Coding: uint8(data[offset+8]), + GroupId: uint8(data[offset+9]), + PartialAID: binary.LittleEndian.Uint16(data[offset+10:]), + } + offset += 12 + } + + payload := data[m.Length:] + + // Remove non standard padding used by some Wi-Fi drivers + if m.Flags.Datapad() && + payload[0]&0xC == 0x8 { //&& // Data frame + headlen := 24 + if payload[0]&0x8C == 0x88 { // QoS + headlen += 2 + } + if payload[1]&0x3 == 0x3 { // 4 addresses + headlen += 2 + } + if headlen%4 == 2 { + payload = append(payload[:headlen], payload[headlen+2:len(payload)]...) + } + } + + if !m.Flags.FCS() { + // Dot11.DecodeFromBytes() expects FCS present and performs a hard chop on the checksum + // If a user is handing in subslices or packets from a buffered stream, the capacity of the slice + // may extend beyond the len, rather than expecting callers to enforce cap==len on every packet + // we take the hit in this one case and do a reallocation. If the user DOES enforce cap==len + // then the reallocation will happen anyway on the append. This is requried because the append + // write to the memory directly after the payload if there is sufficient capacity, which callers + // may not expect. + reallocPayload := make([]byte, len(payload)+4) + copy(reallocPayload[0:len(payload)], payload) + h := crc32.NewIEEE() + h.Write(payload) + binary.LittleEndian.PutUint32(reallocPayload[len(payload):], h.Sum32()) + payload = reallocPayload + } + m.BaseLayer = BaseLayer{Contents: data[:m.Length], Payload: payload} + + return nil +} + +func (m RadioTap) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + buf := make([]byte, 1024) + + buf[0] = m.Version + buf[1] = 0 + + binary.LittleEndian.PutUint32(buf[4:8], uint32(m.Present)) + + offset := uint16(4) + + for (binary.LittleEndian.Uint32(buf[offset:offset+4]) & 0x80000000) != 0 { + offset += 4 + } + + offset += 4 + + if m.Present.TSFT() { + offset += align(offset, 8) + binary.LittleEndian.PutUint64(buf[offset:offset+8], m.TSFT) + offset += 8 + } + + if m.Present.Flags() { + buf[offset] = uint8(m.Flags) + offset++ + } + + if m.Present.Rate() { + buf[offset] = uint8(m.Rate) + offset++ + } + + if m.Present.Channel() { + offset += align(offset, 2) + binary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(m.ChannelFrequency)) + offset += 2 + binary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(m.ChannelFlags)) + offset += 2 + } + + if m.Present.FHSS() { + binary.LittleEndian.PutUint16(buf[offset:offset+2], m.FHSS) + offset += 2 + } + + if m.Present.DBMAntennaSignal() { + buf[offset] = byte(m.DBMAntennaSignal) + offset++ + } + + if m.Present.DBMAntennaNoise() { + buf[offset] = byte(m.DBMAntennaNoise) + offset++ + } + + if m.Present.LockQuality() { + offset += align(offset, 2) + binary.LittleEndian.PutUint16(buf[offset:offset+2], m.LockQuality) + offset += 2 + } + + if m.Present.TxAttenuation() { + offset += align(offset, 2) + binary.LittleEndian.PutUint16(buf[offset:offset+2], m.TxAttenuation) + offset += 2 + } + + if m.Present.DBTxAttenuation() { + offset += align(offset, 2) + binary.LittleEndian.PutUint16(buf[offset:offset+2], m.DBTxAttenuation) + offset += 2 + } + + if m.Present.DBMTxPower() { + buf[offset] = byte(m.DBMTxPower) + offset++ + } + + if m.Present.Antenna() { + buf[offset] = uint8(m.Antenna) + offset++ + } + + if m.Present.DBAntennaSignal() { + buf[offset] = uint8(m.DBAntennaSignal) + offset++ + } + + if m.Present.DBAntennaNoise() { + buf[offset] = uint8(m.DBAntennaNoise) + offset++ + } + + if m.Present.RxFlags() { + offset += align(offset, 2) + binary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(m.RxFlags)) + offset += 2 + } + + if m.Present.TxFlags() { + offset += align(offset, 2) + binary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(m.TxFlags)) + offset += 2 + } + + if m.Present.RtsRetries() { + buf[offset] = m.RtsRetries + offset++ + } + + if m.Present.DataRetries() { + buf[offset] = m.DataRetries + offset++ + } + + if m.Present.MCS() { + buf[offset] = uint8(m.MCS.Known) + buf[offset+1] = uint8(m.MCS.Flags) + buf[offset+2] = uint8(m.MCS.MCS) + + offset += 3 + } + + if m.Present.AMPDUStatus() { + offset += align(offset, 4) + + binary.LittleEndian.PutUint32(buf[offset:offset+4], m.AMPDUStatus.Reference) + binary.LittleEndian.PutUint16(buf[offset+4:offset+6], uint16(m.AMPDUStatus.Flags)) + + buf[offset+6] = m.AMPDUStatus.CRC + + offset += 8 + } + + if m.Present.VHT() { + offset += align(offset, 2) + + binary.LittleEndian.PutUint16(buf[offset:], uint16(m.VHT.Known)) + + buf[offset+2] = uint8(m.VHT.Flags) + buf[offset+3] = uint8(m.VHT.Bandwidth) + buf[offset+4] = uint8(m.VHT.MCSNSS[0]) + buf[offset+5] = uint8(m.VHT.MCSNSS[1]) + buf[offset+6] = uint8(m.VHT.MCSNSS[2]) + buf[offset+7] = uint8(m.VHT.MCSNSS[3]) + buf[offset+8] = uint8(m.VHT.Coding) + buf[offset+9] = uint8(m.VHT.GroupId) + + binary.LittleEndian.PutUint16(buf[offset+10:offset+12], m.VHT.PartialAID) + + offset += 12 + } + + packetBuf, err := b.PrependBytes(int(offset)) + + if err != nil { + return err + } + + if opts.FixLengths { + m.Length = offset + } + + binary.LittleEndian.PutUint16(buf[2:4], m.Length) + + copy(packetBuf, buf) + + return nil +} + +func (m *RadioTap) CanDecode() gopacket.LayerClass { return LayerTypeRadioTap } +func (m *RadioTap) NextLayerType() gopacket.LayerType { return LayerTypeDot11 } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/rmcp.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/rmcp.go new file mode 100644 index 00000000..5474fee4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/rmcp.go @@ -0,0 +1,170 @@ +// Copyright 2019 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file in the root of the source tree. + +package layers + +// This file implements the ASF-RMCP header specified in section 3.2.2.2 of +// https://www.dmtf.org/sites/default/files/standards/documents/DSP0136.pdf + +import ( + "fmt" + + "github.com/google/gopacket" +) + +// RMCPClass is the class of a RMCP layer's payload, e.g. ASF or IPMI. This is a +// 4-bit unsigned int on the wire; all but 6 (ASF), 7 (IPMI) and 8 (OEM-defined) +// are currently reserved. +type RMCPClass uint8 + +// LayerType returns the payload layer type corresponding to a RMCP class. +func (c RMCPClass) LayerType() gopacket.LayerType { + if lt := rmcpClassLayerTypes[uint8(c)]; lt != 0 { + return lt + } + return gopacket.LayerTypePayload +} + +func (c RMCPClass) String() string { + return fmt.Sprintf("%v(%v)", uint8(c), c.LayerType()) +} + +const ( + // RMCPVersion1 identifies RMCP v1.0 in the Version header field. Lower + // values are considered legacy, while higher values are reserved by the + // specification. + RMCPVersion1 uint8 = 0x06 + + // RMCPNormal indicates a "normal" message, i.e. not an acknowledgement. + RMCPNormal uint8 = 0 + + // RMCPAck indicates a message is acknowledging a received normal message. + RMCPAck uint8 = 1 << 7 + + // RMCPClassASF identifies an RMCP message as containing an ASF-RMCP + // payload. + RMCPClassASF RMCPClass = 0x06 + + // RMCPClassIPMI identifies an RMCP message as containing an IPMI payload. + RMCPClassIPMI RMCPClass = 0x07 + + // RMCPClassOEM identifies an RMCP message as containing an OEM-defined + // payload. + RMCPClassOEM RMCPClass = 0x08 +) + +var ( + rmcpClassLayerTypes = [16]gopacket.LayerType{ + RMCPClassASF: LayerTypeASF, + // RMCPClassIPMI is to implement; RMCPClassOEM is deliberately not + // implemented, so we return LayerTypePayload + } +) + +// RegisterRMCPLayerType allows specifying that the payload of a RMCP packet of +// a certain class should processed by the provided layer type. This overrides +// any existing registrations, including defaults. +func RegisterRMCPLayerType(c RMCPClass, l gopacket.LayerType) { + rmcpClassLayerTypes[c] = l +} + +// RMCP describes the format of an RMCP header, which forms a UDP payload. See +// section 3.2.2.2. +type RMCP struct { + BaseLayer + + // Version identifies the version of the RMCP header. 0x06 indicates RMCP + // v1.0; lower values are legacy, higher values are reserved. + Version uint8 + + // Sequence is the sequence number assicated with the message. Note that + // this rolls over to 0 after 254, not 255. Seq num 255 indicates the + // receiver must not send an ACK. + Sequence uint8 + + // Ack indicates whether this packet is an acknowledgement. If it is, the + // payload will be empty. + Ack bool + + // Class idicates the structure of the payload. There are only 2^4 valid + // values, however there is no uint4 data type. N.B. the Ack bit has been + // split off into another field. The most significant 4 bits of this field + // will always be 0. + Class RMCPClass +} + +// LayerType returns LayerTypeRMCP. It partially satisfies Layer and +// SerializableLayer. +func (*RMCP) LayerType() gopacket.LayerType { + return LayerTypeRMCP +} + +// CanDecode returns LayerTypeRMCP. It partially satisfies DecodingLayer. +func (r *RMCP) CanDecode() gopacket.LayerClass { + return r.LayerType() +} + +// DecodeFromBytes makes the layer represent the provided bytes. It partially +// satisfies DecodingLayer. +func (r *RMCP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 4 { + df.SetTruncated() + return fmt.Errorf("invalid RMCP header, length %v less than 4", + len(data)) + } + + r.BaseLayer.Contents = data[:4] + r.BaseLayer.Payload = data[4:] + + r.Version = uint8(data[0]) + // 1 byte reserved + r.Sequence = uint8(data[2]) + r.Ack = data[3]&RMCPAck != 0 + r.Class = RMCPClass(data[3] & 0xF) + return nil +} + +// NextLayerType returns the data layer of this RMCP layer. This partially +// satisfies DecodingLayer. +func (r *RMCP) NextLayerType() gopacket.LayerType { + return r.Class.LayerType() +} + +// Payload returns the data layer. It partially satisfies ApplicationLayer. +func (r *RMCP) Payload() []byte { + return r.BaseLayer.Payload +} + +// SerializeTo writes the serialized fom of this layer into the SerializeBuffer, +// partially satisfying SerializableLayer. +func (r *RMCP) SerializeTo(b gopacket.SerializeBuffer, _ gopacket.SerializeOptions) error { + // The IPMI v1.5 spec contains a pad byte for frame sizes of certain lengths + // to work around issues in LAN chips. This is no longer necessary as of + // IPMI v2.0 (renamed to "legacy pad") so we do not attempt to add it. The + // same approach is taken by FreeIPMI: + // http://git.savannah.gnu.org/cgit/freeipmi.git/tree/libfreeipmi/interface/ipmi-lan-interface.c?id=b5ffcd38317daf42074458879f4c55ba6804a595#n836 + bytes, err := b.PrependBytes(4) + if err != nil { + return err + } + bytes[0] = r.Version + bytes[1] = 0x00 + bytes[2] = r.Sequence + bytes[3] = bool2uint8(r.Ack)<<7 | uint8(r.Class) // thanks, BFD layer + return nil +} + +// decodeRMCP decodes the byte slice into an RMCP type, and sets the application +// layer to it. +func decodeRMCP(data []byte, p gopacket.PacketBuilder) error { + rmcp := &RMCP{} + err := rmcp.DecodeFromBytes(data, p) + p.AddLayer(rmcp) + p.SetApplicationLayer(rmcp) + if err != nil { + return err + } + return p.NextDecoder(rmcp.NextLayerType()) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/rudp.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/rudp.go new file mode 100644 index 00000000..8435129b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/rudp.go @@ -0,0 +1,93 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "fmt" + "github.com/google/gopacket" +) + +type RUDP struct { + BaseLayer + SYN, ACK, EACK, RST, NUL bool + Version uint8 + HeaderLength uint8 + SrcPort, DstPort RUDPPort + DataLength uint16 + Seq, Ack, Checksum uint32 + VariableHeaderArea []byte + // RUDPHeaderSyn contains SYN information for the RUDP packet, + // if the SYN flag is set + *RUDPHeaderSYN + // RUDPHeaderEack contains EACK information for the RUDP packet, + // if the EACK flag is set. + *RUDPHeaderEACK +} + +type RUDPHeaderSYN struct { + MaxOutstandingSegments, MaxSegmentSize, OptionFlags uint16 +} + +type RUDPHeaderEACK struct { + SeqsReceivedOK []uint32 +} + +// LayerType returns gopacket.LayerTypeRUDP. +func (r *RUDP) LayerType() gopacket.LayerType { return LayerTypeRUDP } + +func decodeRUDP(data []byte, p gopacket.PacketBuilder) error { + r := &RUDP{ + SYN: data[0]&0x80 != 0, + ACK: data[0]&0x40 != 0, + EACK: data[0]&0x20 != 0, + RST: data[0]&0x10 != 0, + NUL: data[0]&0x08 != 0, + Version: data[0] & 0x3, + HeaderLength: data[1], + SrcPort: RUDPPort(data[2]), + DstPort: RUDPPort(data[3]), + DataLength: binary.BigEndian.Uint16(data[4:6]), + Seq: binary.BigEndian.Uint32(data[6:10]), + Ack: binary.BigEndian.Uint32(data[10:14]), + Checksum: binary.BigEndian.Uint32(data[14:18]), + } + if r.HeaderLength < 9 { + return fmt.Errorf("RUDP packet with too-short header length %d", r.HeaderLength) + } + hlen := int(r.HeaderLength) * 2 + r.Contents = data[:hlen] + r.Payload = data[hlen : hlen+int(r.DataLength)] + r.VariableHeaderArea = data[18:hlen] + headerData := r.VariableHeaderArea + switch { + case r.SYN: + if len(headerData) != 6 { + return fmt.Errorf("RUDP packet invalid SYN header length: %d", len(headerData)) + } + r.RUDPHeaderSYN = &RUDPHeaderSYN{ + MaxOutstandingSegments: binary.BigEndian.Uint16(headerData[:2]), + MaxSegmentSize: binary.BigEndian.Uint16(headerData[2:4]), + OptionFlags: binary.BigEndian.Uint16(headerData[4:6]), + } + case r.EACK: + if len(headerData)%4 != 0 { + return fmt.Errorf("RUDP packet invalid EACK header length: %d", len(headerData)) + } + r.RUDPHeaderEACK = &RUDPHeaderEACK{make([]uint32, len(headerData)/4)} + for i := 0; i < len(headerData); i += 4 { + r.SeqsReceivedOK[i/4] = binary.BigEndian.Uint32(headerData[i : i+4]) + } + } + p.AddLayer(r) + p.SetTransportLayer(r) + return p.NextDecoder(gopacket.LayerTypePayload) +} + +func (r *RUDP) TransportFlow() gopacket.Flow { + return gopacket.NewFlow(EndpointRUDPPort, []byte{byte(r.SrcPort)}, []byte{byte(r.DstPort)}) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/sctp.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/sctp.go new file mode 100644 index 00000000..511176e5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/sctp.go @@ -0,0 +1,746 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "hash/crc32" + + "github.com/google/gopacket" +) + +// SCTP contains information on the top level of an SCTP packet. +type SCTP struct { + BaseLayer + SrcPort, DstPort SCTPPort + VerificationTag uint32 + Checksum uint32 + sPort, dPort []byte +} + +// LayerType returns gopacket.LayerTypeSCTP +func (s *SCTP) LayerType() gopacket.LayerType { return LayerTypeSCTP } + +func decodeSCTP(data []byte, p gopacket.PacketBuilder) error { + sctp := &SCTP{} + err := sctp.DecodeFromBytes(data, p) + p.AddLayer(sctp) + p.SetTransportLayer(sctp) + if err != nil { + return err + } + return p.NextDecoder(sctpChunkTypePrefixDecoder) +} + +var sctpChunkTypePrefixDecoder = gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix) + +// TransportFlow returns a flow based on the source and destination SCTP port. +func (s *SCTP) TransportFlow() gopacket.Flow { + return gopacket.NewFlow(EndpointSCTPPort, s.sPort, s.dPort) +} + +func decodeWithSCTPChunkTypePrefix(data []byte, p gopacket.PacketBuilder) error { + chunkType := SCTPChunkType(data[0]) + return chunkType.Decode(data, p) +} + +// SerializeTo is for gopacket.SerializableLayer. +func (s SCTP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(12) + if err != nil { + return err + } + binary.BigEndian.PutUint16(bytes[0:2], uint16(s.SrcPort)) + binary.BigEndian.PutUint16(bytes[2:4], uint16(s.DstPort)) + binary.BigEndian.PutUint32(bytes[4:8], s.VerificationTag) + if opts.ComputeChecksums { + // Note: MakeTable(Castagnoli) actually only creates the table once, then + // passes back a singleton on every other call, so this shouldn't cause + // excessive memory allocation. + binary.LittleEndian.PutUint32(bytes[8:12], crc32.Checksum(b.Bytes(), crc32.MakeTable(crc32.Castagnoli))) + } + return nil +} + +func (sctp *SCTP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 12 { + return errors.New("Invalid SCTP common header length") + } + sctp.SrcPort = SCTPPort(binary.BigEndian.Uint16(data[:2])) + sctp.sPort = data[:2] + sctp.DstPort = SCTPPort(binary.BigEndian.Uint16(data[2:4])) + sctp.dPort = data[2:4] + sctp.VerificationTag = binary.BigEndian.Uint32(data[4:8]) + sctp.Checksum = binary.BigEndian.Uint32(data[8:12]) + sctp.BaseLayer = BaseLayer{data[:12], data[12:]} + + return nil +} + +func (t *SCTP) CanDecode() gopacket.LayerClass { + return LayerTypeSCTP +} + +func (t *SCTP) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +// SCTPChunk contains the common fields in all SCTP chunks. +type SCTPChunk struct { + BaseLayer + Type SCTPChunkType + Flags uint8 + Length uint16 + // ActualLength is the total length of an SCTP chunk, including padding. + // SCTP chunks start and end on 4-byte boundaries. So if a chunk has a length + // of 18, it means that it has data up to and including byte 18, then padding + // up to the next 4-byte boundary, 20. In this case, Length would be 18, and + // ActualLength would be 20. + ActualLength int +} + +func roundUpToNearest4(i int) int { + if i%4 == 0 { + return i + } + return i + 4 - (i % 4) +} + +func decodeSCTPChunk(data []byte) (SCTPChunk, error) { + length := binary.BigEndian.Uint16(data[2:4]) + if length < 4 { + return SCTPChunk{}, errors.New("invalid SCTP chunk length") + } + actual := roundUpToNearest4(int(length)) + ct := SCTPChunkType(data[0]) + + // For SCTP Data, use a separate layer for the payload + delta := 0 + if ct == SCTPChunkTypeData { + delta = int(actual) - int(length) + actual = 16 + } + + return SCTPChunk{ + Type: ct, + Flags: data[1], + Length: length, + ActualLength: actual, + BaseLayer: BaseLayer{data[:actual], data[actual : len(data)-delta]}, + }, nil +} + +// SCTPParameter is a TLV parameter inside a SCTPChunk. +type SCTPParameter struct { + Type uint16 + Length uint16 + ActualLength int + Value []byte +} + +func decodeSCTPParameter(data []byte) SCTPParameter { + length := binary.BigEndian.Uint16(data[2:4]) + return SCTPParameter{ + Type: binary.BigEndian.Uint16(data[0:2]), + Length: length, + Value: data[4:length], + ActualLength: roundUpToNearest4(int(length)), + } +} + +func (p SCTPParameter) Bytes() []byte { + length := 4 + len(p.Value) + data := make([]byte, roundUpToNearest4(length)) + binary.BigEndian.PutUint16(data[0:2], p.Type) + binary.BigEndian.PutUint16(data[2:4], uint16(length)) + copy(data[4:], p.Value) + return data +} + +// SCTPUnknownChunkType is the layer type returned when we don't recognize the +// chunk type. Since there's a length in a known location, we can skip over +// it even if we don't know what it is, and continue parsing the rest of the +// chunks. This chunk is stored as an ErrorLayer in the packet. +type SCTPUnknownChunkType struct { + SCTPChunk + bytes []byte +} + +func decodeSCTPChunkTypeUnknown(data []byte, p gopacket.PacketBuilder) error { + chunk, err := decodeSCTPChunk(data) + if err != nil { + return err + } + sc := &SCTPUnknownChunkType{SCTPChunk: chunk} + sc.bytes = data[:sc.ActualLength] + p.AddLayer(sc) + p.SetErrorLayer(sc) + return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix)) +} + +// SerializeTo is for gopacket.SerializableLayer. +func (s SCTPUnknownChunkType) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(s.ActualLength) + if err != nil { + return err + } + copy(bytes, s.bytes) + return nil +} + +// LayerType returns gopacket.LayerTypeSCTPUnknownChunkType. +func (s *SCTPUnknownChunkType) LayerType() gopacket.LayerType { return LayerTypeSCTPUnknownChunkType } + +// Payload returns all bytes in this header, including the decoded Type, Length, +// and Flags. +func (s *SCTPUnknownChunkType) Payload() []byte { return s.bytes } + +// Error implements ErrorLayer. +func (s *SCTPUnknownChunkType) Error() error { + return fmt.Errorf("No decode method available for SCTP chunk type %s", s.Type) +} + +// SCTPData is the SCTP Data chunk layer. +type SCTPData struct { + SCTPChunk + Unordered, BeginFragment, EndFragment bool + TSN uint32 + StreamId uint16 + StreamSequence uint16 + PayloadProtocol SCTPPayloadProtocol +} + +// LayerType returns gopacket.LayerTypeSCTPData. +func (s *SCTPData) LayerType() gopacket.LayerType { return LayerTypeSCTPData } + +// SCTPPayloadProtocol represents a payload protocol +type SCTPPayloadProtocol uint32 + +// SCTPPayloadProtocol constonts from http://www.iana.org/assignments/sctp-parameters/sctp-parameters.xhtml +const ( + SCTPProtocolReserved SCTPPayloadProtocol = 0 + SCTPPayloadUIA = 1 + SCTPPayloadM2UA = 2 + SCTPPayloadM3UA = 3 + SCTPPayloadSUA = 4 + SCTPPayloadM2PA = 5 + SCTPPayloadV5UA = 6 + SCTPPayloadH248 = 7 + SCTPPayloadBICC = 8 + SCTPPayloadTALI = 9 + SCTPPayloadDUA = 10 + SCTPPayloadASAP = 11 + SCTPPayloadENRP = 12 + SCTPPayloadH323 = 13 + SCTPPayloadQIPC = 14 + SCTPPayloadSIMCO = 15 + SCTPPayloadDDPSegment = 16 + SCTPPayloadDDPStream = 17 + SCTPPayloadS1AP = 18 +) + +func (p SCTPPayloadProtocol) String() string { + switch p { + case SCTPProtocolReserved: + return "Reserved" + case SCTPPayloadUIA: + return "UIA" + case SCTPPayloadM2UA: + return "M2UA" + case SCTPPayloadM3UA: + return "M3UA" + case SCTPPayloadSUA: + return "SUA" + case SCTPPayloadM2PA: + return "M2PA" + case SCTPPayloadV5UA: + return "V5UA" + case SCTPPayloadH248: + return "H.248" + case SCTPPayloadBICC: + return "BICC" + case SCTPPayloadTALI: + return "TALI" + case SCTPPayloadDUA: + return "DUA" + case SCTPPayloadASAP: + return "ASAP" + case SCTPPayloadENRP: + return "ENRP" + case SCTPPayloadH323: + return "H.323" + case SCTPPayloadQIPC: + return "QIPC" + case SCTPPayloadSIMCO: + return "SIMCO" + case SCTPPayloadDDPSegment: + return "DDPSegment" + case SCTPPayloadDDPStream: + return "DDPStream" + case SCTPPayloadS1AP: + return "S1AP" + } + return fmt.Sprintf("Unknown(%d)", p) +} + +func decodeSCTPData(data []byte, p gopacket.PacketBuilder) error { + chunk, err := decodeSCTPChunk(data) + if err != nil { + return err + } + sc := &SCTPData{ + SCTPChunk: chunk, + Unordered: data[1]&0x4 != 0, + BeginFragment: data[1]&0x2 != 0, + EndFragment: data[1]&0x1 != 0, + TSN: binary.BigEndian.Uint32(data[4:8]), + StreamId: binary.BigEndian.Uint16(data[8:10]), + StreamSequence: binary.BigEndian.Uint16(data[10:12]), + PayloadProtocol: SCTPPayloadProtocol(binary.BigEndian.Uint32(data[12:16])), + } + // Length is the length in bytes of the data, INCLUDING the 16-byte header. + p.AddLayer(sc) + return p.NextDecoder(gopacket.LayerTypePayload) +} + +// SerializeTo is for gopacket.SerializableLayer. +func (sc SCTPData) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + payload := b.Bytes() + // Pad the payload to a 32 bit boundary + if rem := len(payload) % 4; rem != 0 { + b.AppendBytes(4 - rem) + } + length := 16 + bytes, err := b.PrependBytes(length) + if err != nil { + return err + } + bytes[0] = uint8(sc.Type) + flags := uint8(0) + if sc.Unordered { + flags |= 0x4 + } + if sc.BeginFragment { + flags |= 0x2 + } + if sc.EndFragment { + flags |= 0x1 + } + bytes[1] = flags + binary.BigEndian.PutUint16(bytes[2:4], uint16(length+len(payload))) + binary.BigEndian.PutUint32(bytes[4:8], sc.TSN) + binary.BigEndian.PutUint16(bytes[8:10], sc.StreamId) + binary.BigEndian.PutUint16(bytes[10:12], sc.StreamSequence) + binary.BigEndian.PutUint32(bytes[12:16], uint32(sc.PayloadProtocol)) + return nil +} + +// SCTPInitParameter is a parameter for an SCTP Init or InitAck packet. +type SCTPInitParameter SCTPParameter + +// SCTPInit is used as the return value for both SCTPInit and SCTPInitAck +// messages. +type SCTPInit struct { + SCTPChunk + InitiateTag uint32 + AdvertisedReceiverWindowCredit uint32 + OutboundStreams, InboundStreams uint16 + InitialTSN uint32 + Parameters []SCTPInitParameter +} + +// LayerType returns either gopacket.LayerTypeSCTPInit or gopacket.LayerTypeSCTPInitAck. +func (sc *SCTPInit) LayerType() gopacket.LayerType { + if sc.Type == SCTPChunkTypeInitAck { + return LayerTypeSCTPInitAck + } + // sc.Type == SCTPChunkTypeInit + return LayerTypeSCTPInit +} + +func decodeSCTPInit(data []byte, p gopacket.PacketBuilder) error { + chunk, err := decodeSCTPChunk(data) + if err != nil { + return err + } + sc := &SCTPInit{ + SCTPChunk: chunk, + InitiateTag: binary.BigEndian.Uint32(data[4:8]), + AdvertisedReceiverWindowCredit: binary.BigEndian.Uint32(data[8:12]), + OutboundStreams: binary.BigEndian.Uint16(data[12:14]), + InboundStreams: binary.BigEndian.Uint16(data[14:16]), + InitialTSN: binary.BigEndian.Uint32(data[16:20]), + } + paramData := data[20:sc.ActualLength] + for len(paramData) > 0 { + p := SCTPInitParameter(decodeSCTPParameter(paramData)) + paramData = paramData[p.ActualLength:] + sc.Parameters = append(sc.Parameters, p) + } + p.AddLayer(sc) + return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix)) +} + +// SerializeTo is for gopacket.SerializableLayer. +func (sc SCTPInit) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + var payload []byte + for _, param := range sc.Parameters { + payload = append(payload, SCTPParameter(param).Bytes()...) + } + length := 20 + len(payload) + bytes, err := b.PrependBytes(roundUpToNearest4(length)) + if err != nil { + return err + } + bytes[0] = uint8(sc.Type) + bytes[1] = sc.Flags + binary.BigEndian.PutUint16(bytes[2:4], uint16(length)) + binary.BigEndian.PutUint32(bytes[4:8], sc.InitiateTag) + binary.BigEndian.PutUint32(bytes[8:12], sc.AdvertisedReceiverWindowCredit) + binary.BigEndian.PutUint16(bytes[12:14], sc.OutboundStreams) + binary.BigEndian.PutUint16(bytes[14:16], sc.InboundStreams) + binary.BigEndian.PutUint32(bytes[16:20], sc.InitialTSN) + copy(bytes[20:], payload) + return nil +} + +// SCTPSack is the SCTP Selective ACK chunk layer. +type SCTPSack struct { + SCTPChunk + CumulativeTSNAck uint32 + AdvertisedReceiverWindowCredit uint32 + NumGapACKs, NumDuplicateTSNs uint16 + GapACKs []uint16 + DuplicateTSNs []uint32 +} + +// LayerType return LayerTypeSCTPSack +func (sc *SCTPSack) LayerType() gopacket.LayerType { + return LayerTypeSCTPSack +} + +func decodeSCTPSack(data []byte, p gopacket.PacketBuilder) error { + chunk, err := decodeSCTPChunk(data) + if err != nil { + return err + } + sc := &SCTPSack{ + SCTPChunk: chunk, + CumulativeTSNAck: binary.BigEndian.Uint32(data[4:8]), + AdvertisedReceiverWindowCredit: binary.BigEndian.Uint32(data[8:12]), + NumGapACKs: binary.BigEndian.Uint16(data[12:14]), + NumDuplicateTSNs: binary.BigEndian.Uint16(data[14:16]), + } + // We maximize gapAcks and dupTSNs here so we're not allocating tons + // of memory based on a user-controlable field. Our maximums are not exact, + // but should give us sane defaults... we'll still hit slice boundaries and + // fail if the user-supplied values are too high (in the for loops below), but + // the amount of memory we'll have allocated because of that should be small + // (< sc.ActualLength) + gapAcks := sc.SCTPChunk.ActualLength / 2 + dupTSNs := (sc.SCTPChunk.ActualLength - gapAcks*2) / 4 + if gapAcks > int(sc.NumGapACKs) { + gapAcks = int(sc.NumGapACKs) + } + if dupTSNs > int(sc.NumDuplicateTSNs) { + dupTSNs = int(sc.NumDuplicateTSNs) + } + sc.GapACKs = make([]uint16, 0, gapAcks) + sc.DuplicateTSNs = make([]uint32, 0, dupTSNs) + bytesRemaining := data[16:] + for i := 0; i < int(sc.NumGapACKs); i++ { + sc.GapACKs = append(sc.GapACKs, binary.BigEndian.Uint16(bytesRemaining[:2])) + bytesRemaining = bytesRemaining[2:] + } + for i := 0; i < int(sc.NumDuplicateTSNs); i++ { + sc.DuplicateTSNs = append(sc.DuplicateTSNs, binary.BigEndian.Uint32(bytesRemaining[:4])) + bytesRemaining = bytesRemaining[4:] + } + p.AddLayer(sc) + return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix)) +} + +// SerializeTo is for gopacket.SerializableLayer. +func (sc SCTPSack) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + length := 16 + 2*len(sc.GapACKs) + 4*len(sc.DuplicateTSNs) + bytes, err := b.PrependBytes(roundUpToNearest4(length)) + if err != nil { + return err + } + bytes[0] = uint8(sc.Type) + bytes[1] = sc.Flags + binary.BigEndian.PutUint16(bytes[2:4], uint16(length)) + binary.BigEndian.PutUint32(bytes[4:8], sc.CumulativeTSNAck) + binary.BigEndian.PutUint32(bytes[8:12], sc.AdvertisedReceiverWindowCredit) + binary.BigEndian.PutUint16(bytes[12:14], uint16(len(sc.GapACKs))) + binary.BigEndian.PutUint16(bytes[14:16], uint16(len(sc.DuplicateTSNs))) + for i, v := range sc.GapACKs { + binary.BigEndian.PutUint16(bytes[16+i*2:], v) + } + offset := 16 + 2*len(sc.GapACKs) + for i, v := range sc.DuplicateTSNs { + binary.BigEndian.PutUint32(bytes[offset+i*4:], v) + } + return nil +} + +// SCTPHeartbeatParameter is the parameter type used by SCTP heartbeat and +// heartbeat ack layers. +type SCTPHeartbeatParameter SCTPParameter + +// SCTPHeartbeat is the SCTP heartbeat layer, also used for heatbeat ack. +type SCTPHeartbeat struct { + SCTPChunk + Parameters []SCTPHeartbeatParameter +} + +// LayerType returns gopacket.LayerTypeSCTPHeartbeat. +func (sc *SCTPHeartbeat) LayerType() gopacket.LayerType { + if sc.Type == SCTPChunkTypeHeartbeatAck { + return LayerTypeSCTPHeartbeatAck + } + // sc.Type == SCTPChunkTypeHeartbeat + return LayerTypeSCTPHeartbeat +} + +func decodeSCTPHeartbeat(data []byte, p gopacket.PacketBuilder) error { + chunk, err := decodeSCTPChunk(data) + if err != nil { + return err + } + sc := &SCTPHeartbeat{ + SCTPChunk: chunk, + } + paramData := data[4:sc.Length] + for len(paramData) > 0 { + p := SCTPHeartbeatParameter(decodeSCTPParameter(paramData)) + paramData = paramData[p.ActualLength:] + sc.Parameters = append(sc.Parameters, p) + } + p.AddLayer(sc) + return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix)) +} + +// SerializeTo is for gopacket.SerializableLayer. +func (sc SCTPHeartbeat) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + var payload []byte + for _, param := range sc.Parameters { + payload = append(payload, SCTPParameter(param).Bytes()...) + } + length := 4 + len(payload) + + bytes, err := b.PrependBytes(roundUpToNearest4(length)) + if err != nil { + return err + } + bytes[0] = uint8(sc.Type) + bytes[1] = sc.Flags + binary.BigEndian.PutUint16(bytes[2:4], uint16(length)) + copy(bytes[4:], payload) + return nil +} + +// SCTPErrorParameter is the parameter type used by SCTP Abort and Error layers. +type SCTPErrorParameter SCTPParameter + +// SCTPError is the SCTP error layer, also used for SCTP aborts. +type SCTPError struct { + SCTPChunk + Parameters []SCTPErrorParameter +} + +// LayerType returns LayerTypeSCTPAbort or LayerTypeSCTPError. +func (sc *SCTPError) LayerType() gopacket.LayerType { + if sc.Type == SCTPChunkTypeAbort { + return LayerTypeSCTPAbort + } + // sc.Type == SCTPChunkTypeError + return LayerTypeSCTPError +} + +func decodeSCTPError(data []byte, p gopacket.PacketBuilder) error { + // remarkably similar to decodeSCTPHeartbeat ;) + chunk, err := decodeSCTPChunk(data) + if err != nil { + return err + } + sc := &SCTPError{ + SCTPChunk: chunk, + } + paramData := data[4:sc.Length] + for len(paramData) > 0 { + p := SCTPErrorParameter(decodeSCTPParameter(paramData)) + paramData = paramData[p.ActualLength:] + sc.Parameters = append(sc.Parameters, p) + } + p.AddLayer(sc) + return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix)) +} + +// SerializeTo is for gopacket.SerializableLayer. +func (sc SCTPError) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + var payload []byte + for _, param := range sc.Parameters { + payload = append(payload, SCTPParameter(param).Bytes()...) + } + length := 4 + len(payload) + + bytes, err := b.PrependBytes(roundUpToNearest4(length)) + if err != nil { + return err + } + bytes[0] = uint8(sc.Type) + bytes[1] = sc.Flags + binary.BigEndian.PutUint16(bytes[2:4], uint16(length)) + copy(bytes[4:], payload) + return nil +} + +// SCTPShutdown is the SCTP shutdown layer. +type SCTPShutdown struct { + SCTPChunk + CumulativeTSNAck uint32 +} + +// LayerType returns gopacket.LayerTypeSCTPShutdown. +func (sc *SCTPShutdown) LayerType() gopacket.LayerType { return LayerTypeSCTPShutdown } + +func decodeSCTPShutdown(data []byte, p gopacket.PacketBuilder) error { + chunk, err := decodeSCTPChunk(data) + if err != nil { + return err + } + sc := &SCTPShutdown{ + SCTPChunk: chunk, + CumulativeTSNAck: binary.BigEndian.Uint32(data[4:8]), + } + p.AddLayer(sc) + return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix)) +} + +// SerializeTo is for gopacket.SerializableLayer. +func (sc SCTPShutdown) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(8) + if err != nil { + return err + } + bytes[0] = uint8(sc.Type) + bytes[1] = sc.Flags + binary.BigEndian.PutUint16(bytes[2:4], 8) + binary.BigEndian.PutUint32(bytes[4:8], sc.CumulativeTSNAck) + return nil +} + +// SCTPShutdownAck is the SCTP shutdown layer. +type SCTPShutdownAck struct { + SCTPChunk +} + +// LayerType returns gopacket.LayerTypeSCTPShutdownAck. +func (sc *SCTPShutdownAck) LayerType() gopacket.LayerType { return LayerTypeSCTPShutdownAck } + +func decodeSCTPShutdownAck(data []byte, p gopacket.PacketBuilder) error { + chunk, err := decodeSCTPChunk(data) + if err != nil { + return err + } + sc := &SCTPShutdownAck{ + SCTPChunk: chunk, + } + p.AddLayer(sc) + return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix)) +} + +// SerializeTo is for gopacket.SerializableLayer. +func (sc SCTPShutdownAck) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(4) + if err != nil { + return err + } + bytes[0] = uint8(sc.Type) + bytes[1] = sc.Flags + binary.BigEndian.PutUint16(bytes[2:4], 4) + return nil +} + +// SCTPCookieEcho is the SCTP Cookie Echo layer. +type SCTPCookieEcho struct { + SCTPChunk + Cookie []byte +} + +// LayerType returns gopacket.LayerTypeSCTPCookieEcho. +func (sc *SCTPCookieEcho) LayerType() gopacket.LayerType { return LayerTypeSCTPCookieEcho } + +func decodeSCTPCookieEcho(data []byte, p gopacket.PacketBuilder) error { + chunk, err := decodeSCTPChunk(data) + if err != nil { + return err + } + sc := &SCTPCookieEcho{ + SCTPChunk: chunk, + } + sc.Cookie = data[4:sc.Length] + p.AddLayer(sc) + return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix)) +} + +// SerializeTo is for gopacket.SerializableLayer. +func (sc SCTPCookieEcho) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + length := 4 + len(sc.Cookie) + bytes, err := b.PrependBytes(roundUpToNearest4(length)) + if err != nil { + return err + } + bytes[0] = uint8(sc.Type) + bytes[1] = sc.Flags + binary.BigEndian.PutUint16(bytes[2:4], uint16(length)) + copy(bytes[4:], sc.Cookie) + return nil +} + +// This struct is used by all empty SCTP chunks (currently CookieAck and +// ShutdownComplete). +type SCTPEmptyLayer struct { + SCTPChunk +} + +// LayerType returns either gopacket.LayerTypeSCTPShutdownComplete or +// LayerTypeSCTPCookieAck. +func (sc *SCTPEmptyLayer) LayerType() gopacket.LayerType { + if sc.Type == SCTPChunkTypeShutdownComplete { + return LayerTypeSCTPShutdownComplete + } + // sc.Type == SCTPChunkTypeCookieAck + return LayerTypeSCTPCookieAck +} + +func decodeSCTPEmptyLayer(data []byte, p gopacket.PacketBuilder) error { + chunk, err := decodeSCTPChunk(data) + if err != nil { + return err + } + sc := &SCTPEmptyLayer{ + SCTPChunk: chunk, + } + p.AddLayer(sc) + return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix)) +} + +// SerializeTo is for gopacket.SerializableLayer. +func (sc SCTPEmptyLayer) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(4) + if err != nil { + return err + } + bytes[0] = uint8(sc.Type) + bytes[1] = sc.Flags + binary.BigEndian.PutUint16(bytes[2:4], 4) + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/sflow.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/sflow.go new file mode 100644 index 00000000..c56fe89d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/sflow.go @@ -0,0 +1,2480 @@ +// Copyright 2014 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +/* +This layer decodes SFlow version 5 datagrams. + +The specification can be found here: http://sflow.org/sflow_version_5.txt + +Additional developer information about sflow can be found at: +http://sflow.org/developers/specifications.php + +And SFlow in general: +http://sflow.org/index.php + +Two forms of sample data are defined: compact and expanded. The +Specification has this to say: + + Compact and expand forms of counter and flow samples are defined. + An agent must not mix compact/expanded encodings. If an agent + will never use ifIndex numbers >= 2^24 then it must use compact + encodings for all interfaces. Otherwise the expanded formats must + be used for all interfaces. + +This decoder only supports the compact form, because that is the only +one for which data was avaialble. + +The datagram is composed of one or more samples of type flow or counter, +and each sample is composed of one or more records describing the sample. +A sample is a single instance of sampled inforamtion, and each record in +the sample gives additional / supplimentary information about the sample. + +The following sample record types are supported: + + Raw Packet Header + opaque = flow_data; enterprise = 0; format = 1 + + Extended Switch Data + opaque = flow_data; enterprise = 0; format = 1001 + + Extended Router Data + opaque = flow_data; enterprise = 0; format = 1002 + + Extended Gateway Data + opaque = flow_data; enterprise = 0; format = 1003 + + Extended User Data + opaque = flow_data; enterprise = 0; format = 1004 + + Extended URL Data + opaque = flow_data; enterprise = 0; format = 1005 + +The following types of counter records are supported: + + Generic Interface Counters - see RFC 2233 + opaque = counter_data; enterprise = 0; format = 1 + + Ethernet Interface Counters - see RFC 2358 + opaque = counter_data; enterprise = 0; format = 2 + +SFlow is encoded using XDR (RFC4506). There are a few places +where the standard 4-byte fields are partitioned into two +bitfields of different lengths. I'm not sure why the designers +chose to pack together two values like this in some places, and +in others they use the entire 4-byte value to store a number that +will never be more than a few bits. In any case, there are a couple +of types defined to handle the decoding of these bitfields, and +that's why they're there. */ + +package layers + +import ( + "encoding/binary" + "errors" + "fmt" + "net" + + "github.com/google/gopacket" +) + +// SFlowRecord holds both flow sample records and counter sample records. +// A Record is the structure that actually holds the sampled data +// and / or counters. +type SFlowRecord interface { +} + +// SFlowDataSource encodes a 2-bit SFlowSourceFormat in its most significant +// 2 bits, and an SFlowSourceValue in its least significant 30 bits. +// These types and values define the meaning of the inteface information +// presented in the sample metadata. +type SFlowDataSource int32 + +func (sdc SFlowDataSource) decode() (SFlowSourceFormat, SFlowSourceValue) { + leftField := sdc >> 30 + rightField := uint32(0x3FFFFFFF) & uint32(sdc) + return SFlowSourceFormat(leftField), SFlowSourceValue(rightField) +} + +type SFlowDataSourceExpanded struct { + SourceIDClass SFlowSourceFormat + SourceIDIndex SFlowSourceValue +} + +func (sdce SFlowDataSourceExpanded) decode() (SFlowSourceFormat, SFlowSourceValue) { + leftField := sdce.SourceIDClass >> 30 + rightField := uint32(0x3FFFFFFF) & uint32(sdce.SourceIDIndex) + return SFlowSourceFormat(leftField), SFlowSourceValue(rightField) +} + +type SFlowSourceFormat uint32 + +type SFlowSourceValue uint32 + +const ( + SFlowTypeSingleInterface SFlowSourceFormat = 0 + SFlowTypePacketDiscarded SFlowSourceFormat = 1 + SFlowTypeMultipleDestinations SFlowSourceFormat = 2 +) + +func (sdf SFlowSourceFormat) String() string { + switch sdf { + case SFlowTypeSingleInterface: + return "Single Interface" + case SFlowTypePacketDiscarded: + return "Packet Discarded" + case SFlowTypeMultipleDestinations: + return "Multiple Destinations" + default: + return "UNKNOWN" + } +} + +func decodeSFlow(data []byte, p gopacket.PacketBuilder) error { + s := &SFlowDatagram{} + err := s.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(s) + p.SetApplicationLayer(s) + return nil +} + +// SFlowDatagram is the outermost container which holds some basic information +// about the reporting agent, and holds at least one sample record +type SFlowDatagram struct { + BaseLayer + + DatagramVersion uint32 + AgentAddress net.IP + SubAgentID uint32 + SequenceNumber uint32 + AgentUptime uint32 + SampleCount uint32 + FlowSamples []SFlowFlowSample + CounterSamples []SFlowCounterSample +} + +// An SFlow datagram's outer container has the following +// structure: + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int sFlow version (2|4|5) | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int IP version of the Agent (1=v4|2=v6) | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / Agent IP address (v4=4byte|v6=16byte) / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int sub agent id | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int datagram sequence number | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int switch uptime in ms | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int n samples in datagram | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / n samples / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +// SFlowDataFormat encodes the EnterpriseID in the most +// significant 12 bits, and the SampleType in the least significant +// 20 bits. +type SFlowDataFormat uint32 + +func (sdf SFlowDataFormat) decode() (SFlowEnterpriseID, SFlowSampleType) { + leftField := sdf >> 12 + rightField := uint32(0xFFF) & uint32(sdf) + return SFlowEnterpriseID(leftField), SFlowSampleType(rightField) +} + +// SFlowEnterpriseID is used to differentiate between the +// official SFlow standard, and other, vendor-specific +// types of flow data. (Similiar to SNMP's enterprise MIB +// OIDs) Only the office SFlow Enterprise ID is decoded +// here. +type SFlowEnterpriseID uint32 + +const ( + SFlowStandard SFlowEnterpriseID = 0 +) + +func (eid SFlowEnterpriseID) String() string { + switch eid { + case SFlowStandard: + return "Standard SFlow" + default: + return "" + } +} + +func (eid SFlowEnterpriseID) GetType() SFlowEnterpriseID { + return SFlowStandard +} + +// SFlowSampleType specifies the type of sample. Only flow samples +// and counter samples are supported +type SFlowSampleType uint32 + +const ( + SFlowTypeFlowSample SFlowSampleType = 1 + SFlowTypeCounterSample SFlowSampleType = 2 + SFlowTypeExpandedFlowSample SFlowSampleType = 3 + SFlowTypeExpandedCounterSample SFlowSampleType = 4 +) + +func (st SFlowSampleType) GetType() SFlowSampleType { + switch st { + case SFlowTypeFlowSample: + return SFlowTypeFlowSample + case SFlowTypeCounterSample: + return SFlowTypeCounterSample + case SFlowTypeExpandedFlowSample: + return SFlowTypeExpandedFlowSample + case SFlowTypeExpandedCounterSample: + return SFlowTypeExpandedCounterSample + default: + panic("Invalid Sample Type") + } +} + +func (st SFlowSampleType) String() string { + switch st { + case SFlowTypeFlowSample: + return "Flow Sample" + case SFlowTypeCounterSample: + return "Counter Sample" + case SFlowTypeExpandedFlowSample: + return "Expanded Flow Sample" + case SFlowTypeExpandedCounterSample: + return "Expanded Counter Sample" + default: + return "" + } +} + +func (s *SFlowDatagram) LayerType() gopacket.LayerType { return LayerTypeSFlow } + +func (d *SFlowDatagram) Payload() []byte { return nil } + +func (d *SFlowDatagram) CanDecode() gopacket.LayerClass { return LayerTypeSFlow } + +func (d *SFlowDatagram) NextLayerType() gopacket.LayerType { return gopacket.LayerTypePayload } + +// SFlowIPType determines what form the IP address being decoded will +// take. This is an XDR union type allowing for both IPv4 and IPv6 +type SFlowIPType uint32 + +const ( + SFlowIPv4 SFlowIPType = 1 + SFlowIPv6 SFlowIPType = 2 +) + +func (s SFlowIPType) String() string { + switch s { + case SFlowIPv4: + return "IPv4" + case SFlowIPv6: + return "IPv6" + default: + return "" + } +} + +func (s SFlowIPType) Length() int { + switch s { + case SFlowIPv4: + return 4 + case SFlowIPv6: + return 16 + default: + return 0 + } +} + +func (s *SFlowDatagram) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + var agentAddressType SFlowIPType + + data, s.DatagramVersion = data[4:], binary.BigEndian.Uint32(data[:4]) + data, agentAddressType = data[4:], SFlowIPType(binary.BigEndian.Uint32(data[:4])) + data, s.AgentAddress = data[agentAddressType.Length():], data[:agentAddressType.Length()] + data, s.SubAgentID = data[4:], binary.BigEndian.Uint32(data[:4]) + data, s.SequenceNumber = data[4:], binary.BigEndian.Uint32(data[:4]) + data, s.AgentUptime = data[4:], binary.BigEndian.Uint32(data[:4]) + data, s.SampleCount = data[4:], binary.BigEndian.Uint32(data[:4]) + + if s.SampleCount < 1 { + return fmt.Errorf("SFlow Datagram has invalid sample length: %d", s.SampleCount) + } + for i := uint32(0); i < s.SampleCount; i++ { + sdf := SFlowDataFormat(binary.BigEndian.Uint32(data[:4])) + _, sampleType := sdf.decode() + switch sampleType { + case SFlowTypeFlowSample: + if flowSample, err := decodeFlowSample(&data, false); err == nil { + s.FlowSamples = append(s.FlowSamples, flowSample) + } else { + return err + } + case SFlowTypeCounterSample: + if counterSample, err := decodeCounterSample(&data, false); err == nil { + s.CounterSamples = append(s.CounterSamples, counterSample) + } else { + return err + } + case SFlowTypeExpandedFlowSample: + if flowSample, err := decodeFlowSample(&data, true); err == nil { + s.FlowSamples = append(s.FlowSamples, flowSample) + } else { + return err + } + case SFlowTypeExpandedCounterSample: + if counterSample, err := decodeCounterSample(&data, true); err == nil { + s.CounterSamples = append(s.CounterSamples, counterSample) + } else { + return err + } + + default: + return fmt.Errorf("Unsupported SFlow sample type %d", sampleType) + } + } + return nil +} + +// SFlowFlowSample represents a sampled packet and contains +// one or more records describing the packet +type SFlowFlowSample struct { + EnterpriseID SFlowEnterpriseID + Format SFlowSampleType + SampleLength uint32 + SequenceNumber uint32 + SourceIDClass SFlowSourceFormat + SourceIDIndex SFlowSourceValue + SamplingRate uint32 + SamplePool uint32 + Dropped uint32 + InputInterfaceFormat uint32 + InputInterface uint32 + OutputInterfaceFormat uint32 + OutputInterface uint32 + RecordCount uint32 + Records []SFlowRecord +} + +// Flow samples have the following structure. Note +// the bit fields to encode the Enterprise ID and the +// Flow record format: type 1 + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | sample length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int sample sequence number | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// |id type | src id index value | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int sampling rate | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int sample pool | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int drops | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int input ifIndex | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int output ifIndex | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int number of records | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / flow records / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +// Flow samples have the following structure. +// Flow record format: type 3 + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | sample length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int sample sequence number | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int src id type | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int src id index value | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int sampling rate | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int sample pool | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int drops | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int input interface format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int input interface value | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int output interface format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int output interface value | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int number of records | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / flow records / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +type SFlowFlowDataFormat uint32 + +func (fdf SFlowFlowDataFormat) decode() (SFlowEnterpriseID, SFlowFlowRecordType) { + leftField := fdf >> 12 + rightField := uint32(0xFFF) & uint32(fdf) + return SFlowEnterpriseID(leftField), SFlowFlowRecordType(rightField) +} + +func (fs SFlowFlowSample) GetRecords() []SFlowRecord { + return fs.Records +} + +func (fs SFlowFlowSample) GetType() SFlowSampleType { + return SFlowTypeFlowSample +} + +func skipRecord(data *[]byte) { + recordLength := int(binary.BigEndian.Uint32((*data)[4:])) + *data = (*data)[(recordLength+((4-recordLength)%4))+8:] +} + +func decodeFlowSample(data *[]byte, expanded bool) (SFlowFlowSample, error) { + s := SFlowFlowSample{} + var sdf SFlowDataFormat + *data, sdf = (*data)[4:], SFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + var sdc SFlowDataSource + + s.EnterpriseID, s.Format = sdf.decode() + *data, s.SampleLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, s.SequenceNumber = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + if expanded { + *data, s.SourceIDClass = (*data)[4:], SFlowSourceFormat(binary.BigEndian.Uint32((*data)[:4])) + *data, s.SourceIDIndex = (*data)[4:], SFlowSourceValue(binary.BigEndian.Uint32((*data)[:4])) + } else { + *data, sdc = (*data)[4:], SFlowDataSource(binary.BigEndian.Uint32((*data)[:4])) + s.SourceIDClass, s.SourceIDIndex = sdc.decode() + } + *data, s.SamplingRate = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, s.SamplePool = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, s.Dropped = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + if expanded { + *data, s.InputInterfaceFormat = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, s.InputInterface = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, s.OutputInterfaceFormat = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, s.OutputInterface = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + } else { + *data, s.InputInterface = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, s.OutputInterface = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + } + *data, s.RecordCount = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + for i := uint32(0); i < s.RecordCount; i++ { + rdf := SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + enterpriseID, flowRecordType := rdf.decode() + + // Try to decode when EnterpriseID is 0 signaling + // default sflow structs are used according specification + // Unexpected behavior detected for e.g. with pmacct + if enterpriseID == 0 { + switch flowRecordType { + case SFlowTypeRawPacketFlow: + if record, err := decodeRawPacketFlowRecord(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedUserFlow: + if record, err := decodeExtendedUserFlow(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedUrlFlow: + if record, err := decodeExtendedURLRecord(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedSwitchFlow: + if record, err := decodeExtendedSwitchFlowRecord(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedRouterFlow: + if record, err := decodeExtendedRouterFlowRecord(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedGatewayFlow: + if record, err := decodeExtendedGatewayFlowRecord(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeEthernetFrameFlow: + if record, err := decodeEthernetFrameFlowRecord(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeIpv4Flow: + if record, err := decodeSFlowIpv4Record(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeIpv6Flow: + if record, err := decodeSFlowIpv6Record(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedMlpsFlow: + // TODO + skipRecord(data) + return s, errors.New("skipping TypeExtendedMlpsFlow") + case SFlowTypeExtendedNatFlow: + // TODO + skipRecord(data) + return s, errors.New("skipping TypeExtendedNatFlow") + case SFlowTypeExtendedMlpsTunnelFlow: + // TODO + skipRecord(data) + return s, errors.New("skipping TypeExtendedMlpsTunnelFlow") + case SFlowTypeExtendedMlpsVcFlow: + // TODO + skipRecord(data) + return s, errors.New("skipping TypeExtendedMlpsVcFlow") + case SFlowTypeExtendedMlpsFecFlow: + // TODO + skipRecord(data) + return s, errors.New("skipping TypeExtendedMlpsFecFlow") + case SFlowTypeExtendedMlpsLvpFecFlow: + // TODO + skipRecord(data) + return s, errors.New("skipping TypeExtendedMlpsLvpFecFlow") + case SFlowTypeExtendedVlanFlow: + // TODO + skipRecord(data) + return s, errors.New("skipping TypeExtendedVlanFlow") + case SFlowTypeExtendedIpv4TunnelEgressFlow: + if record, err := decodeExtendedIpv4TunnelEgress(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedIpv4TunnelIngressFlow: + if record, err := decodeExtendedIpv4TunnelIngress(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedIpv6TunnelEgressFlow: + if record, err := decodeExtendedIpv6TunnelEgress(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedIpv6TunnelIngressFlow: + if record, err := decodeExtendedIpv6TunnelIngress(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedDecapsulateEgressFlow: + if record, err := decodeExtendedDecapsulateEgress(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedDecapsulateIngressFlow: + if record, err := decodeExtendedDecapsulateIngress(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedVniEgressFlow: + if record, err := decodeExtendedVniEgress(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeExtendedVniIngressFlow: + if record, err := decodeExtendedVniIngress(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + default: + return s, fmt.Errorf("Unsupported flow record type: %d", flowRecordType) + } + } else { + skipRecord(data) + } + } + return s, nil +} + +// Counter samples report information about various counter +// objects. Typically these are items like IfInOctets, or +// CPU / Memory stats, etc. SFlow will report these at regular +// intervals as configured on the agent. If one were sufficiently +// industrious, this could be used to replace the typical +// SNMP polling used for such things. +type SFlowCounterSample struct { + EnterpriseID SFlowEnterpriseID + Format SFlowSampleType + SampleLength uint32 + SequenceNumber uint32 + SourceIDClass SFlowSourceFormat + SourceIDIndex SFlowSourceValue + RecordCount uint32 + Records []SFlowRecord +} + +// Counter samples have the following structure: + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int sample sequence number | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// |id type | src id index value | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | int number of records | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / counter records / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +type SFlowCounterDataFormat uint32 + +func (cdf SFlowCounterDataFormat) decode() (SFlowEnterpriseID, SFlowCounterRecordType) { + leftField := cdf >> 12 + rightField := uint32(0xFFF) & uint32(cdf) + return SFlowEnterpriseID(leftField), SFlowCounterRecordType(rightField) +} + +// GetRecords will return a slice of interface types +// representing records. A type switch can be used to +// get at the underlying SFlowCounterRecordType. +func (cs SFlowCounterSample) GetRecords() []SFlowRecord { + return cs.Records +} + +// GetType will report the type of sample. Only the +// compact form of counter samples is supported +func (cs SFlowCounterSample) GetType() SFlowSampleType { + return SFlowTypeCounterSample +} + +type SFlowCounterRecordType uint32 + +const ( + SFlowTypeGenericInterfaceCounters SFlowCounterRecordType = 1 + SFlowTypeEthernetInterfaceCounters SFlowCounterRecordType = 2 + SFlowTypeTokenRingInterfaceCounters SFlowCounterRecordType = 3 + SFlowType100BaseVGInterfaceCounters SFlowCounterRecordType = 4 + SFlowTypeVLANCounters SFlowCounterRecordType = 5 + SFlowTypeLACPCounters SFlowCounterRecordType = 7 + SFlowTypeProcessorCounters SFlowCounterRecordType = 1001 + SFlowTypeOpenflowPortCounters SFlowCounterRecordType = 1004 + SFlowTypePORTNAMECounters SFlowCounterRecordType = 1005 + SFLowTypeAPPRESOURCESCounters SFlowCounterRecordType = 2203 + SFlowTypeOVSDPCounters SFlowCounterRecordType = 2207 +) + +func (cr SFlowCounterRecordType) String() string { + switch cr { + case SFlowTypeGenericInterfaceCounters: + return "Generic Interface Counters" + case SFlowTypeEthernetInterfaceCounters: + return "Ethernet Interface Counters" + case SFlowTypeTokenRingInterfaceCounters: + return "Token Ring Interface Counters" + case SFlowType100BaseVGInterfaceCounters: + return "100BaseVG Interface Counters" + case SFlowTypeVLANCounters: + return "VLAN Counters" + case SFlowTypeLACPCounters: + return "LACP Counters" + case SFlowTypeProcessorCounters: + return "Processor Counters" + case SFlowTypeOpenflowPortCounters: + return "Openflow Port Counters" + case SFlowTypePORTNAMECounters: + return "PORT NAME Counters" + case SFLowTypeAPPRESOURCESCounters: + return "App Resources Counters" + case SFlowTypeOVSDPCounters: + return "OVSDP Counters" + default: + return "" + + } +} + +func decodeCounterSample(data *[]byte, expanded bool) (SFlowCounterSample, error) { + s := SFlowCounterSample{} + var sdc SFlowDataSource + var sdce SFlowDataSourceExpanded + var sdf SFlowDataFormat + + *data, sdf = (*data)[4:], SFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + s.EnterpriseID, s.Format = sdf.decode() + *data, s.SampleLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, s.SequenceNumber = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + if expanded { + *data, sdce = (*data)[8:], SFlowDataSourceExpanded{SFlowSourceFormat(binary.BigEndian.Uint32((*data)[:4])), SFlowSourceValue(binary.BigEndian.Uint32((*data)[4:8]))} + s.SourceIDClass, s.SourceIDIndex = sdce.decode() + } else { + *data, sdc = (*data)[4:], SFlowDataSource(binary.BigEndian.Uint32((*data)[:4])) + s.SourceIDClass, s.SourceIDIndex = sdc.decode() + } + *data, s.RecordCount = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + for i := uint32(0); i < s.RecordCount; i++ { + cdf := SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4])) + _, counterRecordType := cdf.decode() + switch counterRecordType { + case SFlowTypeGenericInterfaceCounters: + if record, err := decodeGenericInterfaceCounters(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeEthernetInterfaceCounters: + if record, err := decodeEthernetCounters(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeTokenRingInterfaceCounters: + skipRecord(data) + return s, errors.New("skipping TypeTokenRingInterfaceCounters") + case SFlowType100BaseVGInterfaceCounters: + skipRecord(data) + return s, errors.New("skipping Type100BaseVGInterfaceCounters") + case SFlowTypeVLANCounters: + if record, err := decodeVLANCounters(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeLACPCounters: + if record, err := decodeLACPCounters(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeProcessorCounters: + if record, err := decodeProcessorCounters(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeOpenflowPortCounters: + if record, err := decodeOpenflowportCounters(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypePORTNAMECounters: + if record, err := decodePortnameCounters(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFLowTypeAPPRESOURCESCounters: + if record, err := decodeAppresourcesCounters(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + case SFlowTypeOVSDPCounters: + if record, err := decodeOVSDPCounters(data); err == nil { + s.Records = append(s.Records, record) + } else { + return s, err + } + default: + return s, fmt.Errorf("Invalid counter record type: %d", counterRecordType) + } + } + return s, nil +} + +// SFlowBaseFlowRecord holds the fields common to all records +// of type SFlowFlowRecordType +type SFlowBaseFlowRecord struct { + EnterpriseID SFlowEnterpriseID + Format SFlowFlowRecordType + FlowDataLength uint32 +} + +func (bfr SFlowBaseFlowRecord) GetType() SFlowFlowRecordType { + return bfr.Format +} + +// SFlowFlowRecordType denotes what kind of Flow Record is +// represented. See RFC 3176 +type SFlowFlowRecordType uint32 + +const ( + SFlowTypeRawPacketFlow SFlowFlowRecordType = 1 + SFlowTypeEthernetFrameFlow SFlowFlowRecordType = 2 + SFlowTypeIpv4Flow SFlowFlowRecordType = 3 + SFlowTypeIpv6Flow SFlowFlowRecordType = 4 + SFlowTypeExtendedSwitchFlow SFlowFlowRecordType = 1001 + SFlowTypeExtendedRouterFlow SFlowFlowRecordType = 1002 + SFlowTypeExtendedGatewayFlow SFlowFlowRecordType = 1003 + SFlowTypeExtendedUserFlow SFlowFlowRecordType = 1004 + SFlowTypeExtendedUrlFlow SFlowFlowRecordType = 1005 + SFlowTypeExtendedMlpsFlow SFlowFlowRecordType = 1006 + SFlowTypeExtendedNatFlow SFlowFlowRecordType = 1007 + SFlowTypeExtendedMlpsTunnelFlow SFlowFlowRecordType = 1008 + SFlowTypeExtendedMlpsVcFlow SFlowFlowRecordType = 1009 + SFlowTypeExtendedMlpsFecFlow SFlowFlowRecordType = 1010 + SFlowTypeExtendedMlpsLvpFecFlow SFlowFlowRecordType = 1011 + SFlowTypeExtendedVlanFlow SFlowFlowRecordType = 1012 + SFlowTypeExtendedIpv4TunnelEgressFlow SFlowFlowRecordType = 1023 + SFlowTypeExtendedIpv4TunnelIngressFlow SFlowFlowRecordType = 1024 + SFlowTypeExtendedIpv6TunnelEgressFlow SFlowFlowRecordType = 1025 + SFlowTypeExtendedIpv6TunnelIngressFlow SFlowFlowRecordType = 1026 + SFlowTypeExtendedDecapsulateEgressFlow SFlowFlowRecordType = 1027 + SFlowTypeExtendedDecapsulateIngressFlow SFlowFlowRecordType = 1028 + SFlowTypeExtendedVniEgressFlow SFlowFlowRecordType = 1029 + SFlowTypeExtendedVniIngressFlow SFlowFlowRecordType = 1030 +) + +func (rt SFlowFlowRecordType) String() string { + switch rt { + case SFlowTypeRawPacketFlow: + return "Raw Packet Flow Record" + case SFlowTypeEthernetFrameFlow: + return "Ethernet Frame Flow Record" + case SFlowTypeIpv4Flow: + return "IPv4 Flow Record" + case SFlowTypeIpv6Flow: + return "IPv6 Flow Record" + case SFlowTypeExtendedSwitchFlow: + return "Extended Switch Flow Record" + case SFlowTypeExtendedRouterFlow: + return "Extended Router Flow Record" + case SFlowTypeExtendedGatewayFlow: + return "Extended Gateway Flow Record" + case SFlowTypeExtendedUserFlow: + return "Extended User Flow Record" + case SFlowTypeExtendedUrlFlow: + return "Extended URL Flow Record" + case SFlowTypeExtendedMlpsFlow: + return "Extended MPLS Flow Record" + case SFlowTypeExtendedNatFlow: + return "Extended NAT Flow Record" + case SFlowTypeExtendedMlpsTunnelFlow: + return "Extended MPLS Tunnel Flow Record" + case SFlowTypeExtendedMlpsVcFlow: + return "Extended MPLS VC Flow Record" + case SFlowTypeExtendedMlpsFecFlow: + return "Extended MPLS FEC Flow Record" + case SFlowTypeExtendedMlpsLvpFecFlow: + return "Extended MPLS LVP FEC Flow Record" + case SFlowTypeExtendedVlanFlow: + return "Extended VLAN Flow Record" + case SFlowTypeExtendedIpv4TunnelEgressFlow: + return "Extended IPv4 Tunnel Egress Record" + case SFlowTypeExtendedIpv4TunnelIngressFlow: + return "Extended IPv4 Tunnel Ingress Record" + case SFlowTypeExtendedIpv6TunnelEgressFlow: + return "Extended IPv6 Tunnel Egress Record" + case SFlowTypeExtendedIpv6TunnelIngressFlow: + return "Extended IPv6 Tunnel Ingress Record" + case SFlowTypeExtendedDecapsulateEgressFlow: + return "Extended Decapsulate Egress Record" + case SFlowTypeExtendedDecapsulateIngressFlow: + return "Extended Decapsulate Ingress Record" + case SFlowTypeExtendedVniEgressFlow: + return "Extended VNI Ingress Record" + case SFlowTypeExtendedVniIngressFlow: + return "Extended VNI Ingress Record" + default: + return "" + } +} + +// SFlowRawPacketFlowRecords hold information about a sampled +// packet grabbed as it transited the agent. This is +// perhaps the most useful and interesting record type, +// as it holds the headers of the sampled packet and +// can be used to build up a complete picture of the +// traffic patterns on a network. +// +// The raw packet header is sent back into gopacket for +// decoding, and the resulting gopackt.Packet is stored +// in the Header member +type SFlowRawPacketFlowRecord struct { + SFlowBaseFlowRecord + HeaderProtocol SFlowRawHeaderProtocol + FrameLength uint32 + PayloadRemoved uint32 + HeaderLength uint32 + Header gopacket.Packet +} + +// Raw packet record types have the following structure: + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Header Protocol | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Frame Length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Payload Removed | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Header Length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// \ Header \ +// \ \ +// \ \ +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +type SFlowRawHeaderProtocol uint32 + +const ( + SFlowProtoEthernet SFlowRawHeaderProtocol = 1 + SFlowProtoISO88024 SFlowRawHeaderProtocol = 2 + SFlowProtoISO88025 SFlowRawHeaderProtocol = 3 + SFlowProtoFDDI SFlowRawHeaderProtocol = 4 + SFlowProtoFrameRelay SFlowRawHeaderProtocol = 5 + SFlowProtoX25 SFlowRawHeaderProtocol = 6 + SFlowProtoPPP SFlowRawHeaderProtocol = 7 + SFlowProtoSMDS SFlowRawHeaderProtocol = 8 + SFlowProtoAAL5 SFlowRawHeaderProtocol = 9 + SFlowProtoAAL5_IP SFlowRawHeaderProtocol = 10 /* e.g. Cisco AAL5 mux */ + SFlowProtoIPv4 SFlowRawHeaderProtocol = 11 + SFlowProtoIPv6 SFlowRawHeaderProtocol = 12 + SFlowProtoMPLS SFlowRawHeaderProtocol = 13 + SFlowProtoPOS SFlowRawHeaderProtocol = 14 /* RFC 1662, 2615 */ +) + +func (sfhp SFlowRawHeaderProtocol) String() string { + switch sfhp { + case SFlowProtoEthernet: + return "ETHERNET-ISO88023" + case SFlowProtoISO88024: + return "ISO88024-TOKENBUS" + case SFlowProtoISO88025: + return "ISO88025-TOKENRING" + case SFlowProtoFDDI: + return "FDDI" + case SFlowProtoFrameRelay: + return "FRAME-RELAY" + case SFlowProtoX25: + return "X25" + case SFlowProtoPPP: + return "PPP" + case SFlowProtoSMDS: + return "SMDS" + case SFlowProtoAAL5: + return "AAL5" + case SFlowProtoAAL5_IP: + return "AAL5-IP" + case SFlowProtoIPv4: + return "IPv4" + case SFlowProtoIPv6: + return "IPv6" + case SFlowProtoMPLS: + return "MPLS" + case SFlowProtoPOS: + return "POS" + } + return "UNKNOWN" +} + +func decodeRawPacketFlowRecord(data *[]byte) (SFlowRawPacketFlowRecord, error) { + rec := SFlowRawPacketFlowRecord{} + header := []byte{} + var fdf SFlowFlowDataFormat + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + rec.EnterpriseID, rec.Format = fdf.decode() + *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, rec.HeaderProtocol = (*data)[4:], SFlowRawHeaderProtocol(binary.BigEndian.Uint32((*data)[:4])) + *data, rec.FrameLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, rec.PayloadRemoved = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, rec.HeaderLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + headerLenWithPadding := int(rec.HeaderLength + ((4 - rec.HeaderLength) % 4)) + *data, header = (*data)[headerLenWithPadding:], (*data)[:headerLenWithPadding] + rec.Header = gopacket.NewPacket(header, LayerTypeEthernet, gopacket.Default) + return rec, nil +} + +// SFlowExtendedSwitchFlowRecord give additional information +// about the sampled packet if it's available. It's mainly +// useful for getting at the incoming and outgoing VLANs +// An agent may or may not provide this information. +type SFlowExtendedSwitchFlowRecord struct { + SFlowBaseFlowRecord + IncomingVLAN uint32 + IncomingVLANPriority uint32 + OutgoingVLAN uint32 + OutgoingVLANPriority uint32 +} + +// Extended switch records have the following structure: + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Incoming VLAN | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Incoming VLAN Priority | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Outgoing VLAN | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Outgoing VLAN Priority | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +func decodeExtendedSwitchFlowRecord(data *[]byte) (SFlowExtendedSwitchFlowRecord, error) { + es := SFlowExtendedSwitchFlowRecord{} + var fdf SFlowFlowDataFormat + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + es.EnterpriseID, es.Format = fdf.decode() + *data, es.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, es.IncomingVLAN = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, es.IncomingVLANPriority = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, es.OutgoingVLAN = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, es.OutgoingVLANPriority = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + return es, nil +} + +// SFlowExtendedRouterFlowRecord gives additional information +// about the layer 3 routing information used to forward +// the packet +type SFlowExtendedRouterFlowRecord struct { + SFlowBaseFlowRecord + NextHop net.IP + NextHopSourceMask uint32 + NextHopDestinationMask uint32 +} + +// Extended router records have the following structure: + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IP version of next hop router (1=v4|2=v6) | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / Next Hop address (v4=4byte|v6=16byte) / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Next Hop Source Mask | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Next Hop Destination Mask | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +func decodeExtendedRouterFlowRecord(data *[]byte) (SFlowExtendedRouterFlowRecord, error) { + er := SFlowExtendedRouterFlowRecord{} + var fdf SFlowFlowDataFormat + var extendedRouterAddressType SFlowIPType + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + er.EnterpriseID, er.Format = fdf.decode() + *data, er.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, extendedRouterAddressType = (*data)[4:], SFlowIPType(binary.BigEndian.Uint32((*data)[:4])) + *data, er.NextHop = (*data)[extendedRouterAddressType.Length():], (*data)[:extendedRouterAddressType.Length()] + *data, er.NextHopSourceMask = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, er.NextHopDestinationMask = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + return er, nil +} + +// SFlowExtendedGatewayFlowRecord describes information treasured by +// nework engineers everywhere: AS path information listing which +// BGP peer sent the packet, and various other BGP related info. +// This information is vital because it gives a picture of how much +// traffic is being sent from / received by various BGP peers. + +// Extended gateway records have the following structure: + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IP version of next hop router (1=v4|2=v6) | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / Next Hop address (v4=4byte|v6=16byte) / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | AS | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Source AS | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Peer AS | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | AS Path Count | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / AS Path / Sequence / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / Communities / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Local Pref | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +// AS Path / Sequence: + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | AS Source Type (Path=1 / Sequence=2) | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Path / Sequence length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / Path / Sequence Members / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +// Communities: + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | communitiy length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / communitiy Members / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +type SFlowExtendedGatewayFlowRecord struct { + SFlowBaseFlowRecord + NextHop net.IP + AS uint32 + SourceAS uint32 + PeerAS uint32 + ASPathCount uint32 + ASPath []SFlowASDestination + Communities []uint32 + LocalPref uint32 +} + +type SFlowASPathType uint32 + +const ( + SFlowASSet SFlowASPathType = 1 + SFlowASSequence SFlowASPathType = 2 +) + +func (apt SFlowASPathType) String() string { + switch apt { + case SFlowASSet: + return "AS Set" + case SFlowASSequence: + return "AS Sequence" + default: + return "" + } +} + +type SFlowASDestination struct { + Type SFlowASPathType + Count uint32 + Members []uint32 +} + +func (asd SFlowASDestination) String() string { + switch asd.Type { + case SFlowASSet: + return fmt.Sprint("AS Set:", asd.Members) + case SFlowASSequence: + return fmt.Sprint("AS Sequence:", asd.Members) + default: + return "" + } +} + +func (ad *SFlowASDestination) decodePath(data *[]byte) { + *data, ad.Type = (*data)[4:], SFlowASPathType(binary.BigEndian.Uint32((*data)[:4])) + *data, ad.Count = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + ad.Members = make([]uint32, ad.Count) + for i := uint32(0); i < ad.Count; i++ { + var member uint32 + *data, member = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + ad.Members[i] = member + } +} + +func decodeExtendedGatewayFlowRecord(data *[]byte) (SFlowExtendedGatewayFlowRecord, error) { + eg := SFlowExtendedGatewayFlowRecord{} + var fdf SFlowFlowDataFormat + var extendedGatewayAddressType SFlowIPType + var communitiesLength uint32 + var community uint32 + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + eg.EnterpriseID, eg.Format = fdf.decode() + *data, eg.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, extendedGatewayAddressType = (*data)[4:], SFlowIPType(binary.BigEndian.Uint32((*data)[:4])) + *data, eg.NextHop = (*data)[extendedGatewayAddressType.Length():], (*data)[:extendedGatewayAddressType.Length()] + *data, eg.AS = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, eg.SourceAS = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, eg.PeerAS = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, eg.ASPathCount = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + for i := uint32(0); i < eg.ASPathCount; i++ { + asPath := SFlowASDestination{} + asPath.decodePath(data) + eg.ASPath = append(eg.ASPath, asPath) + } + *data, communitiesLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + eg.Communities = make([]uint32, communitiesLength) + for j := uint32(0); j < communitiesLength; j++ { + *data, community = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + eg.Communities[j] = community + } + *data, eg.LocalPref = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + return eg, nil +} + +// ************************************************** +// Extended URL Flow Record +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | direction | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | URL | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Host | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +type SFlowURLDirection uint32 + +const ( + SFlowURLsrc SFlowURLDirection = 1 + SFlowURLdst SFlowURLDirection = 2 +) + +func (urld SFlowURLDirection) String() string { + switch urld { + case SFlowURLsrc: + return "Source address is the server" + case SFlowURLdst: + return "Destination address is the server" + default: + return "" + } +} + +type SFlowExtendedURLRecord struct { + SFlowBaseFlowRecord + Direction SFlowURLDirection + URL string + Host string +} + +func decodeExtendedURLRecord(data *[]byte) (SFlowExtendedURLRecord, error) { + eur := SFlowExtendedURLRecord{} + var fdf SFlowFlowDataFormat + var urlLen uint32 + var urlLenWithPad int + var hostLen uint32 + var hostLenWithPad int + var urlBytes []byte + var hostBytes []byte + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + eur.EnterpriseID, eur.Format = fdf.decode() + *data, eur.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, eur.Direction = (*data)[4:], SFlowURLDirection(binary.BigEndian.Uint32((*data)[:4])) + *data, urlLen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + urlLenWithPad = int(urlLen + ((4 - urlLen) % 4)) + *data, urlBytes = (*data)[urlLenWithPad:], (*data)[:urlLenWithPad] + eur.URL = string(urlBytes[:urlLen]) + *data, hostLen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + hostLenWithPad = int(hostLen + ((4 - hostLen) % 4)) + *data, hostBytes = (*data)[hostLenWithPad:], (*data)[:hostLenWithPad] + eur.Host = string(hostBytes[:hostLen]) + return eur, nil +} + +// ************************************************** +// Extended User Flow Record +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Source Character Set | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Source User Id | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Destination Character Set | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Destination User ID | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +type SFlowExtendedUserFlow struct { + SFlowBaseFlowRecord + SourceCharSet SFlowCharSet + SourceUserID string + DestinationCharSet SFlowCharSet + DestinationUserID string +} + +type SFlowCharSet uint32 + +const ( + SFlowCSunknown SFlowCharSet = 2 + SFlowCSASCII SFlowCharSet = 3 + SFlowCSISOLatin1 SFlowCharSet = 4 + SFlowCSISOLatin2 SFlowCharSet = 5 + SFlowCSISOLatin3 SFlowCharSet = 6 + SFlowCSISOLatin4 SFlowCharSet = 7 + SFlowCSISOLatinCyrillic SFlowCharSet = 8 + SFlowCSISOLatinArabic SFlowCharSet = 9 + SFlowCSISOLatinGreek SFlowCharSet = 10 + SFlowCSISOLatinHebrew SFlowCharSet = 11 + SFlowCSISOLatin5 SFlowCharSet = 12 + SFlowCSISOLatin6 SFlowCharSet = 13 + SFlowCSISOTextComm SFlowCharSet = 14 + SFlowCSHalfWidthKatakana SFlowCharSet = 15 + SFlowCSJISEncoding SFlowCharSet = 16 + SFlowCSShiftJIS SFlowCharSet = 17 + SFlowCSEUCPkdFmtJapanese SFlowCharSet = 18 + SFlowCSEUCFixWidJapanese SFlowCharSet = 19 + SFlowCSISO4UnitedKingdom SFlowCharSet = 20 + SFlowCSISO11SwedishForNames SFlowCharSet = 21 + SFlowCSISO15Italian SFlowCharSet = 22 + SFlowCSISO17Spanish SFlowCharSet = 23 + SFlowCSISO21German SFlowCharSet = 24 + SFlowCSISO60DanishNorwegian SFlowCharSet = 25 + SFlowCSISO69French SFlowCharSet = 26 + SFlowCSISO10646UTF1 SFlowCharSet = 27 + SFlowCSISO646basic1983 SFlowCharSet = 28 + SFlowCSINVARIANT SFlowCharSet = 29 + SFlowCSISO2IntlRefVersion SFlowCharSet = 30 + SFlowCSNATSSEFI SFlowCharSet = 31 + SFlowCSNATSSEFIADD SFlowCharSet = 32 + SFlowCSNATSDANO SFlowCharSet = 33 + SFlowCSNATSDANOADD SFlowCharSet = 34 + SFlowCSISO10Swedish SFlowCharSet = 35 + SFlowCSKSC56011987 SFlowCharSet = 36 + SFlowCSISO2022KR SFlowCharSet = 37 + SFlowCSEUCKR SFlowCharSet = 38 + SFlowCSISO2022JP SFlowCharSet = 39 + SFlowCSISO2022JP2 SFlowCharSet = 40 + SFlowCSISO13JISC6220jp SFlowCharSet = 41 + SFlowCSISO14JISC6220ro SFlowCharSet = 42 + SFlowCSISO16Portuguese SFlowCharSet = 43 + SFlowCSISO18Greek7Old SFlowCharSet = 44 + SFlowCSISO19LatinGreek SFlowCharSet = 45 + SFlowCSISO25French SFlowCharSet = 46 + SFlowCSISO27LatinGreek1 SFlowCharSet = 47 + SFlowCSISO5427Cyrillic SFlowCharSet = 48 + SFlowCSISO42JISC62261978 SFlowCharSet = 49 + SFlowCSISO47BSViewdata SFlowCharSet = 50 + SFlowCSISO49INIS SFlowCharSet = 51 + SFlowCSISO50INIS8 SFlowCharSet = 52 + SFlowCSISO51INISCyrillic SFlowCharSet = 53 + SFlowCSISO54271981 SFlowCharSet = 54 + SFlowCSISO5428Greek SFlowCharSet = 55 + SFlowCSISO57GB1988 SFlowCharSet = 56 + SFlowCSISO58GB231280 SFlowCharSet = 57 + SFlowCSISO61Norwegian2 SFlowCharSet = 58 + SFlowCSISO70VideotexSupp1 SFlowCharSet = 59 + SFlowCSISO84Portuguese2 SFlowCharSet = 60 + SFlowCSISO85Spanish2 SFlowCharSet = 61 + SFlowCSISO86Hungarian SFlowCharSet = 62 + SFlowCSISO87JISX0208 SFlowCharSet = 63 + SFlowCSISO88Greek7 SFlowCharSet = 64 + SFlowCSISO89ASMO449 SFlowCharSet = 65 + SFlowCSISO90 SFlowCharSet = 66 + SFlowCSISO91JISC62291984a SFlowCharSet = 67 + SFlowCSISO92JISC62991984b SFlowCharSet = 68 + SFlowCSISO93JIS62291984badd SFlowCharSet = 69 + SFlowCSISO94JIS62291984hand SFlowCharSet = 70 + SFlowCSISO95JIS62291984handadd SFlowCharSet = 71 + SFlowCSISO96JISC62291984kana SFlowCharSet = 72 + SFlowCSISO2033 SFlowCharSet = 73 + SFlowCSISO99NAPLPS SFlowCharSet = 74 + SFlowCSISO102T617bit SFlowCharSet = 75 + SFlowCSISO103T618bit SFlowCharSet = 76 + SFlowCSISO111ECMACyrillic SFlowCharSet = 77 + SFlowCSa71 SFlowCharSet = 78 + SFlowCSa72 SFlowCharSet = 79 + SFlowCSISO123CSAZ24341985gr SFlowCharSet = 80 + SFlowCSISO88596E SFlowCharSet = 81 + SFlowCSISO88596I SFlowCharSet = 82 + SFlowCSISO128T101G2 SFlowCharSet = 83 + SFlowCSISO88598E SFlowCharSet = 84 + SFlowCSISO88598I SFlowCharSet = 85 + SFlowCSISO139CSN369103 SFlowCharSet = 86 + SFlowCSISO141JUSIB1002 SFlowCharSet = 87 + SFlowCSISO143IECP271 SFlowCharSet = 88 + SFlowCSISO146Serbian SFlowCharSet = 89 + SFlowCSISO147Macedonian SFlowCharSet = 90 + SFlowCSISO150 SFlowCharSet = 91 + SFlowCSISO151Cuba SFlowCharSet = 92 + SFlowCSISO6937Add SFlowCharSet = 93 + SFlowCSISO153GOST1976874 SFlowCharSet = 94 + SFlowCSISO8859Supp SFlowCharSet = 95 + SFlowCSISO10367Box SFlowCharSet = 96 + SFlowCSISO158Lap SFlowCharSet = 97 + SFlowCSISO159JISX02121990 SFlowCharSet = 98 + SFlowCSISO646Danish SFlowCharSet = 99 + SFlowCSUSDK SFlowCharSet = 100 + SFlowCSDKUS SFlowCharSet = 101 + SFlowCSKSC5636 SFlowCharSet = 102 + SFlowCSUnicode11UTF7 SFlowCharSet = 103 + SFlowCSISO2022CN SFlowCharSet = 104 + SFlowCSISO2022CNEXT SFlowCharSet = 105 + SFlowCSUTF8 SFlowCharSet = 106 + SFlowCSISO885913 SFlowCharSet = 109 + SFlowCSISO885914 SFlowCharSet = 110 + SFlowCSISO885915 SFlowCharSet = 111 + SFlowCSISO885916 SFlowCharSet = 112 + SFlowCSGBK SFlowCharSet = 113 + SFlowCSGB18030 SFlowCharSet = 114 + SFlowCSOSDEBCDICDF0415 SFlowCharSet = 115 + SFlowCSOSDEBCDICDF03IRV SFlowCharSet = 116 + SFlowCSOSDEBCDICDF041 SFlowCharSet = 117 + SFlowCSISO115481 SFlowCharSet = 118 + SFlowCSKZ1048 SFlowCharSet = 119 + SFlowCSUnicode SFlowCharSet = 1000 + SFlowCSUCS4 SFlowCharSet = 1001 + SFlowCSUnicodeASCII SFlowCharSet = 1002 + SFlowCSUnicodeLatin1 SFlowCharSet = 1003 + SFlowCSUnicodeJapanese SFlowCharSet = 1004 + SFlowCSUnicodeIBM1261 SFlowCharSet = 1005 + SFlowCSUnicodeIBM1268 SFlowCharSet = 1006 + SFlowCSUnicodeIBM1276 SFlowCharSet = 1007 + SFlowCSUnicodeIBM1264 SFlowCharSet = 1008 + SFlowCSUnicodeIBM1265 SFlowCharSet = 1009 + SFlowCSUnicode11 SFlowCharSet = 1010 + SFlowCSSCSU SFlowCharSet = 1011 + SFlowCSUTF7 SFlowCharSet = 1012 + SFlowCSUTF16BE SFlowCharSet = 1013 + SFlowCSUTF16LE SFlowCharSet = 1014 + SFlowCSUTF16 SFlowCharSet = 1015 + SFlowCSCESU8 SFlowCharSet = 1016 + SFlowCSUTF32 SFlowCharSet = 1017 + SFlowCSUTF32BE SFlowCharSet = 1018 + SFlowCSUTF32LE SFlowCharSet = 1019 + SFlowCSBOCU1 SFlowCharSet = 1020 + SFlowCSWindows30Latin1 SFlowCharSet = 2000 + SFlowCSWindows31Latin1 SFlowCharSet = 2001 + SFlowCSWindows31Latin2 SFlowCharSet = 2002 + SFlowCSWindows31Latin5 SFlowCharSet = 2003 + SFlowCSHPRoman8 SFlowCharSet = 2004 + SFlowCSAdobeStandardEncoding SFlowCharSet = 2005 + SFlowCSVenturaUS SFlowCharSet = 2006 + SFlowCSVenturaInternational SFlowCharSet = 2007 + SFlowCSDECMCS SFlowCharSet = 2008 + SFlowCSPC850Multilingual SFlowCharSet = 2009 + SFlowCSPCp852 SFlowCharSet = 2010 + SFlowCSPC8CodePage437 SFlowCharSet = 2011 + SFlowCSPC8DanishNorwegian SFlowCharSet = 2012 + SFlowCSPC862LatinHebrew SFlowCharSet = 2013 + SFlowCSPC8Turkish SFlowCharSet = 2014 + SFlowCSIBMSymbols SFlowCharSet = 2015 + SFlowCSIBMThai SFlowCharSet = 2016 + SFlowCSHPLegal SFlowCharSet = 2017 + SFlowCSHPPiFont SFlowCharSet = 2018 + SFlowCSHPMath8 SFlowCharSet = 2019 + SFlowCSHPPSMath SFlowCharSet = 2020 + SFlowCSHPDesktop SFlowCharSet = 2021 + SFlowCSVenturaMath SFlowCharSet = 2022 + SFlowCSMicrosoftPublishing SFlowCharSet = 2023 + SFlowCSWindows31J SFlowCharSet = 2024 + SFlowCSGB2312 SFlowCharSet = 2025 + SFlowCSBig5 SFlowCharSet = 2026 + SFlowCSMacintosh SFlowCharSet = 2027 + SFlowCSIBM037 SFlowCharSet = 2028 + SFlowCSIBM038 SFlowCharSet = 2029 + SFlowCSIBM273 SFlowCharSet = 2030 + SFlowCSIBM274 SFlowCharSet = 2031 + SFlowCSIBM275 SFlowCharSet = 2032 + SFlowCSIBM277 SFlowCharSet = 2033 + SFlowCSIBM278 SFlowCharSet = 2034 + SFlowCSIBM280 SFlowCharSet = 2035 + SFlowCSIBM281 SFlowCharSet = 2036 + SFlowCSIBM284 SFlowCharSet = 2037 + SFlowCSIBM285 SFlowCharSet = 2038 + SFlowCSIBM290 SFlowCharSet = 2039 + SFlowCSIBM297 SFlowCharSet = 2040 + SFlowCSIBM420 SFlowCharSet = 2041 + SFlowCSIBM423 SFlowCharSet = 2042 + SFlowCSIBM424 SFlowCharSet = 2043 + SFlowCSIBM500 SFlowCharSet = 2044 + SFlowCSIBM851 SFlowCharSet = 2045 + SFlowCSIBM855 SFlowCharSet = 2046 + SFlowCSIBM857 SFlowCharSet = 2047 + SFlowCSIBM860 SFlowCharSet = 2048 + SFlowCSIBM861 SFlowCharSet = 2049 + SFlowCSIBM863 SFlowCharSet = 2050 + SFlowCSIBM864 SFlowCharSet = 2051 + SFlowCSIBM865 SFlowCharSet = 2052 + SFlowCSIBM868 SFlowCharSet = 2053 + SFlowCSIBM869 SFlowCharSet = 2054 + SFlowCSIBM870 SFlowCharSet = 2055 + SFlowCSIBM871 SFlowCharSet = 2056 + SFlowCSIBM880 SFlowCharSet = 2057 + SFlowCSIBM891 SFlowCharSet = 2058 + SFlowCSIBM903 SFlowCharSet = 2059 + SFlowCSIBBM904 SFlowCharSet = 2060 + SFlowCSIBM905 SFlowCharSet = 2061 + SFlowCSIBM918 SFlowCharSet = 2062 + SFlowCSIBM1026 SFlowCharSet = 2063 + SFlowCSIBMEBCDICATDE SFlowCharSet = 2064 + SFlowCSEBCDICATDEA SFlowCharSet = 2065 + SFlowCSEBCDICCAFR SFlowCharSet = 2066 + SFlowCSEBCDICDKNO SFlowCharSet = 2067 + SFlowCSEBCDICDKNOA SFlowCharSet = 2068 + SFlowCSEBCDICFISE SFlowCharSet = 2069 + SFlowCSEBCDICFISEA SFlowCharSet = 2070 + SFlowCSEBCDICFR SFlowCharSet = 2071 + SFlowCSEBCDICIT SFlowCharSet = 2072 + SFlowCSEBCDICPT SFlowCharSet = 2073 + SFlowCSEBCDICES SFlowCharSet = 2074 + SFlowCSEBCDICESA SFlowCharSet = 2075 + SFlowCSEBCDICESS SFlowCharSet = 2076 + SFlowCSEBCDICUK SFlowCharSet = 2077 + SFlowCSEBCDICUS SFlowCharSet = 2078 + SFlowCSUnknown8BiT SFlowCharSet = 2079 + SFlowCSMnemonic SFlowCharSet = 2080 + SFlowCSMnem SFlowCharSet = 2081 + SFlowCSVISCII SFlowCharSet = 2082 + SFlowCSVIQR SFlowCharSet = 2083 + SFlowCSKOI8R SFlowCharSet = 2084 + SFlowCSHZGB2312 SFlowCharSet = 2085 + SFlowCSIBM866 SFlowCharSet = 2086 + SFlowCSPC775Baltic SFlowCharSet = 2087 + SFlowCSKOI8U SFlowCharSet = 2088 + SFlowCSIBM00858 SFlowCharSet = 2089 + SFlowCSIBM00924 SFlowCharSet = 2090 + SFlowCSIBM01140 SFlowCharSet = 2091 + SFlowCSIBM01141 SFlowCharSet = 2092 + SFlowCSIBM01142 SFlowCharSet = 2093 + SFlowCSIBM01143 SFlowCharSet = 2094 + SFlowCSIBM01144 SFlowCharSet = 2095 + SFlowCSIBM01145 SFlowCharSet = 2096 + SFlowCSIBM01146 SFlowCharSet = 2097 + SFlowCSIBM01147 SFlowCharSet = 2098 + SFlowCSIBM01148 SFlowCharSet = 2099 + SFlowCSIBM01149 SFlowCharSet = 2100 + SFlowCSBig5HKSCS SFlowCharSet = 2101 + SFlowCSIBM1047 SFlowCharSet = 2102 + SFlowCSPTCP154 SFlowCharSet = 2103 + SFlowCSAmiga1251 SFlowCharSet = 2104 + SFlowCSKOI7switched SFlowCharSet = 2105 + SFlowCSBRF SFlowCharSet = 2106 + SFlowCSTSCII SFlowCharSet = 2107 + SFlowCSCP51932 SFlowCharSet = 2108 + SFlowCSWindows874 SFlowCharSet = 2109 + SFlowCSWindows1250 SFlowCharSet = 2250 + SFlowCSWindows1251 SFlowCharSet = 2251 + SFlowCSWindows1252 SFlowCharSet = 2252 + SFlowCSWindows1253 SFlowCharSet = 2253 + SFlowCSWindows1254 SFlowCharSet = 2254 + SFlowCSWindows1255 SFlowCharSet = 2255 + SFlowCSWindows1256 SFlowCharSet = 2256 + SFlowCSWindows1257 SFlowCharSet = 2257 + SFlowCSWindows1258 SFlowCharSet = 2258 + SFlowCSTIS620 SFlowCharSet = 2259 + SFlowCS50220 SFlowCharSet = 2260 + SFlowCSreserved SFlowCharSet = 3000 +) + +func decodeExtendedUserFlow(data *[]byte) (SFlowExtendedUserFlow, error) { + eu := SFlowExtendedUserFlow{} + var fdf SFlowFlowDataFormat + var srcUserLen uint32 + var srcUserLenWithPad int + var srcUserBytes []byte + var dstUserLen uint32 + var dstUserLenWithPad int + var dstUserBytes []byte + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + eu.EnterpriseID, eu.Format = fdf.decode() + *data, eu.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, eu.SourceCharSet = (*data)[4:], SFlowCharSet(binary.BigEndian.Uint32((*data)[:4])) + *data, srcUserLen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + srcUserLenWithPad = int(srcUserLen + ((4 - srcUserLen) % 4)) + *data, srcUserBytes = (*data)[srcUserLenWithPad:], (*data)[:srcUserLenWithPad] + eu.SourceUserID = string(srcUserBytes[:srcUserLen]) + *data, eu.DestinationCharSet = (*data)[4:], SFlowCharSet(binary.BigEndian.Uint32((*data)[:4])) + *data, dstUserLen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + dstUserLenWithPad = int(dstUserLen + ((4 - dstUserLen) % 4)) + *data, dstUserBytes = (*data)[dstUserLenWithPad:], (*data)[:dstUserLenWithPad] + eu.DestinationUserID = string(dstUserBytes[:dstUserLen]) + return eu, nil +} + +// ************************************************** +// Packet IP version 4 Record +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Protocol | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Source IPv4 | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Destination IPv4 | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Source Port | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Destionation Port | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | TCP Flags | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | TOS | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +type SFlowIpv4Record struct { + // The length of the IP packet excluding ower layer encapsulations + Length uint32 + // IP Protocol type (for example, TCP = 6, UDP = 17) + Protocol uint32 + // Source IP Address + IPSrc net.IP + // Destination IP Address + IPDst net.IP + // TCP/UDP source port number or equivalent + PortSrc uint32 + // TCP/UDP destination port number or equivalent + PortDst uint32 + // TCP flags + TCPFlags uint32 + // IP type of service + TOS uint32 +} + +func decodeSFlowIpv4Record(data *[]byte) (SFlowIpv4Record, error) { + si := SFlowIpv4Record{} + + *data, si.Length = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, si.Protocol = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, si.IPSrc = (*data)[4:], net.IP((*data)[:4]) + *data, si.IPDst = (*data)[4:], net.IP((*data)[:4]) + *data, si.PortSrc = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, si.PortDst = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, si.TCPFlags = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, si.TOS = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + return si, nil +} + +// ************************************************** +// Packet IP version 6 Record +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Protocol | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Source IPv4 | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Destination IPv4 | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Source Port | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Destionation Port | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | TCP Flags | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Priority | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +type SFlowIpv6Record struct { + // The length of the IP packet excluding ower layer encapsulations + Length uint32 + // IP Protocol type (for example, TCP = 6, UDP = 17) + Protocol uint32 + // Source IP Address + IPSrc net.IP + // Destination IP Address + IPDst net.IP + // TCP/UDP source port number or equivalent + PortSrc uint32 + // TCP/UDP destination port number or equivalent + PortDst uint32 + // TCP flags + TCPFlags uint32 + // IP priority + Priority uint32 +} + +func decodeSFlowIpv6Record(data *[]byte) (SFlowIpv6Record, error) { + si := SFlowIpv6Record{} + + *data, si.Length = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, si.Protocol = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, si.IPSrc = (*data)[16:], net.IP((*data)[:16]) + *data, si.IPDst = (*data)[16:], net.IP((*data)[:16]) + *data, si.PortSrc = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, si.PortDst = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, si.TCPFlags = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, si.Priority = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + return si, nil +} + +// ************************************************** +// Extended IPv4 Tunnel Egress +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / Packet IP version 4 Record / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +type SFlowExtendedIpv4TunnelEgressRecord struct { + SFlowBaseFlowRecord + SFlowIpv4Record SFlowIpv4Record +} + +func decodeExtendedIpv4TunnelEgress(data *[]byte) (SFlowExtendedIpv4TunnelEgressRecord, error) { + rec := SFlowExtendedIpv4TunnelEgressRecord{} + var fdf SFlowFlowDataFormat + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + rec.EnterpriseID, rec.Format = fdf.decode() + *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + rec.SFlowIpv4Record, _ = decodeSFlowIpv4Record(data) + + return rec, nil +} + +// ************************************************** +// Extended IPv4 Tunnel Ingress +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / Packet IP version 4 Record / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +type SFlowExtendedIpv4TunnelIngressRecord struct { + SFlowBaseFlowRecord + SFlowIpv4Record SFlowIpv4Record +} + +func decodeExtendedIpv4TunnelIngress(data *[]byte) (SFlowExtendedIpv4TunnelIngressRecord, error) { + rec := SFlowExtendedIpv4TunnelIngressRecord{} + var fdf SFlowFlowDataFormat + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + rec.EnterpriseID, rec.Format = fdf.decode() + *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + rec.SFlowIpv4Record, _ = decodeSFlowIpv4Record(data) + + return rec, nil +} + +// ************************************************** +// Extended IPv6 Tunnel Egress +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / Packet IP version 6 Record / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +type SFlowExtendedIpv6TunnelEgressRecord struct { + SFlowBaseFlowRecord + SFlowIpv6Record +} + +func decodeExtendedIpv6TunnelEgress(data *[]byte) (SFlowExtendedIpv6TunnelEgressRecord, error) { + rec := SFlowExtendedIpv6TunnelEgressRecord{} + var fdf SFlowFlowDataFormat + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + rec.EnterpriseID, rec.Format = fdf.decode() + *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + rec.SFlowIpv6Record, _ = decodeSFlowIpv6Record(data) + + return rec, nil +} + +// ************************************************** +// Extended IPv6 Tunnel Ingress +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / Packet IP version 6 Record / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +type SFlowExtendedIpv6TunnelIngressRecord struct { + SFlowBaseFlowRecord + SFlowIpv6Record +} + +func decodeExtendedIpv6TunnelIngress(data *[]byte) (SFlowExtendedIpv6TunnelIngressRecord, error) { + rec := SFlowExtendedIpv6TunnelIngressRecord{} + var fdf SFlowFlowDataFormat + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + rec.EnterpriseID, rec.Format = fdf.decode() + *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + rec.SFlowIpv6Record, _ = decodeSFlowIpv6Record(data) + + return rec, nil +} + +// ************************************************** +// Extended Decapsulate Egress +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Inner Header Offset | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +type SFlowExtendedDecapsulateEgressRecord struct { + SFlowBaseFlowRecord + InnerHeaderOffset uint32 +} + +func decodeExtendedDecapsulateEgress(data *[]byte) (SFlowExtendedDecapsulateEgressRecord, error) { + rec := SFlowExtendedDecapsulateEgressRecord{} + var fdf SFlowFlowDataFormat + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + rec.EnterpriseID, rec.Format = fdf.decode() + *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, rec.InnerHeaderOffset = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + return rec, nil +} + +// ************************************************** +// Extended Decapsulate Ingress +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Inner Header Offset | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +type SFlowExtendedDecapsulateIngressRecord struct { + SFlowBaseFlowRecord + InnerHeaderOffset uint32 +} + +func decodeExtendedDecapsulateIngress(data *[]byte) (SFlowExtendedDecapsulateIngressRecord, error) { + rec := SFlowExtendedDecapsulateIngressRecord{} + var fdf SFlowFlowDataFormat + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + rec.EnterpriseID, rec.Format = fdf.decode() + *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, rec.InnerHeaderOffset = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + return rec, nil +} + +// ************************************************** +// Extended VNI Egress +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | VNI | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +type SFlowExtendedVniEgressRecord struct { + SFlowBaseFlowRecord + VNI uint32 +} + +func decodeExtendedVniEgress(data *[]byte) (SFlowExtendedVniEgressRecord, error) { + rec := SFlowExtendedVniEgressRecord{} + var fdf SFlowFlowDataFormat + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + rec.EnterpriseID, rec.Format = fdf.decode() + *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, rec.VNI = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + return rec, nil +} + +// ************************************************** +// Extended VNI Ingress +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | VNI | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +type SFlowExtendedVniIngressRecord struct { + SFlowBaseFlowRecord + VNI uint32 +} + +func decodeExtendedVniIngress(data *[]byte) (SFlowExtendedVniIngressRecord, error) { + rec := SFlowExtendedVniIngressRecord{} + var fdf SFlowFlowDataFormat + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + rec.EnterpriseID, rec.Format = fdf.decode() + *data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, rec.VNI = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + return rec, nil +} + +// ************************************************** +// Counter Record +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | counter length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / counter data / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +type SFlowBaseCounterRecord struct { + EnterpriseID SFlowEnterpriseID + Format SFlowCounterRecordType + FlowDataLength uint32 +} + +func (bcr SFlowBaseCounterRecord) GetType() SFlowCounterRecordType { + switch bcr.Format { + case SFlowTypeGenericInterfaceCounters: + return SFlowTypeGenericInterfaceCounters + case SFlowTypeEthernetInterfaceCounters: + return SFlowTypeEthernetInterfaceCounters + case SFlowTypeTokenRingInterfaceCounters: + return SFlowTypeTokenRingInterfaceCounters + case SFlowType100BaseVGInterfaceCounters: + return SFlowType100BaseVGInterfaceCounters + case SFlowTypeVLANCounters: + return SFlowTypeVLANCounters + case SFlowTypeLACPCounters: + return SFlowTypeLACPCounters + case SFlowTypeProcessorCounters: + return SFlowTypeProcessorCounters + case SFlowTypeOpenflowPortCounters: + return SFlowTypeOpenflowPortCounters + case SFlowTypePORTNAMECounters: + return SFlowTypePORTNAMECounters + case SFLowTypeAPPRESOURCESCounters: + return SFLowTypeAPPRESOURCESCounters + case SFlowTypeOVSDPCounters: + return SFlowTypeOVSDPCounters + } + unrecognized := fmt.Sprint("Unrecognized counter record type:", bcr.Format) + panic(unrecognized) +} + +// ************************************************** +// Counter Record +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | counter length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfIndex | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfType | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfSpeed | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfDirection | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfStatus | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IFInOctets | +// | | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfInUcastPkts | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfInMulticastPkts | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfInBroadcastPkts | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfInDiscards | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | InInErrors | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfInUnknownProtos | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfOutOctets | +// | | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfOutUcastPkts | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfOutMulticastPkts | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfOutBroadcastPkts | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfOutDiscards | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfOUtErrors | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | IfPromiscouousMode | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +type SFlowGenericInterfaceCounters struct { + SFlowBaseCounterRecord + IfIndex uint32 + IfType uint32 + IfSpeed uint64 + IfDirection uint32 + IfStatus uint32 + IfInOctets uint64 + IfInUcastPkts uint32 + IfInMulticastPkts uint32 + IfInBroadcastPkts uint32 + IfInDiscards uint32 + IfInErrors uint32 + IfInUnknownProtos uint32 + IfOutOctets uint64 + IfOutUcastPkts uint32 + IfOutMulticastPkts uint32 + IfOutBroadcastPkts uint32 + IfOutDiscards uint32 + IfOutErrors uint32 + IfPromiscuousMode uint32 +} + +func decodeGenericInterfaceCounters(data *[]byte) (SFlowGenericInterfaceCounters, error) { + gic := SFlowGenericInterfaceCounters{} + var cdf SFlowCounterDataFormat + + *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4])) + gic.EnterpriseID, gic.Format = cdf.decode() + *data, gic.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfIndex = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfType = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfSpeed = (*data)[8:], binary.BigEndian.Uint64((*data)[:8]) + *data, gic.IfDirection = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfStatus = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfInOctets = (*data)[8:], binary.BigEndian.Uint64((*data)[:8]) + *data, gic.IfInUcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfInMulticastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfInBroadcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfInDiscards = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfInErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfInUnknownProtos = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfOutOctets = (*data)[8:], binary.BigEndian.Uint64((*data)[:8]) + *data, gic.IfOutUcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfOutMulticastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfOutBroadcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfOutDiscards = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfOutErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, gic.IfPromiscuousMode = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + return gic, nil +} + +// ************************************************** +// Counter Record +// ************************************************** + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | counter length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// / counter data / +// / / +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +type SFlowEthernetCounters struct { + SFlowBaseCounterRecord + AlignmentErrors uint32 + FCSErrors uint32 + SingleCollisionFrames uint32 + MultipleCollisionFrames uint32 + SQETestErrors uint32 + DeferredTransmissions uint32 + LateCollisions uint32 + ExcessiveCollisions uint32 + InternalMacTransmitErrors uint32 + CarrierSenseErrors uint32 + FrameTooLongs uint32 + InternalMacReceiveErrors uint32 + SymbolErrors uint32 +} + +func decodeEthernetCounters(data *[]byte) (SFlowEthernetCounters, error) { + ec := SFlowEthernetCounters{} + var cdf SFlowCounterDataFormat + + *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4])) + ec.EnterpriseID, ec.Format = cdf.decode() + *data, ec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, ec.AlignmentErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, ec.FCSErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, ec.SingleCollisionFrames = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, ec.MultipleCollisionFrames = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, ec.SQETestErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, ec.DeferredTransmissions = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, ec.LateCollisions = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, ec.ExcessiveCollisions = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, ec.InternalMacTransmitErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, ec.CarrierSenseErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, ec.FrameTooLongs = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, ec.InternalMacReceiveErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, ec.SymbolErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + return ec, nil +} + +// VLAN Counter + +type SFlowVLANCounters struct { + SFlowBaseCounterRecord + VlanID uint32 + Octets uint64 + UcastPkts uint32 + MulticastPkts uint32 + BroadcastPkts uint32 + Discards uint32 +} + +func decodeVLANCounters(data *[]byte) (SFlowVLANCounters, error) { + vc := SFlowVLANCounters{} + var cdf SFlowCounterDataFormat + + *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4])) + vc.EnterpriseID, vc.Format = cdf.decode() + vc.EnterpriseID, vc.Format = cdf.decode() + *data, vc.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, vc.VlanID = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, vc.Octets = (*data)[8:], binary.BigEndian.Uint64((*data)[:8]) + *data, vc.UcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, vc.MulticastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, vc.BroadcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, vc.Discards = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + return vc, nil +} + +//SFLLACPportState : SFlow LACP Port State (All(4) - 32 bit) +type SFLLACPPortState struct { + PortStateAll uint32 +} + +//LACPcounters : LACP SFlow Counters ( 64 Bytes ) +type SFlowLACPCounters struct { + SFlowBaseCounterRecord + ActorSystemID net.HardwareAddr + PartnerSystemID net.HardwareAddr + AttachedAggID uint32 + LacpPortState SFLLACPPortState + LACPDUsRx uint32 + MarkerPDUsRx uint32 + MarkerResponsePDUsRx uint32 + UnknownRx uint32 + IllegalRx uint32 + LACPDUsTx uint32 + MarkerPDUsTx uint32 + MarkerResponsePDUsTx uint32 +} + +func decodeLACPCounters(data *[]byte) (SFlowLACPCounters, error) { + la := SFlowLACPCounters{} + var cdf SFlowCounterDataFormat + + *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4])) + la.EnterpriseID, la.Format = cdf.decode() + *data, la.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, la.ActorSystemID = (*data)[6:], (*data)[:6] + *data = (*data)[2:] // remove padding + *data, la.PartnerSystemID = (*data)[6:], (*data)[:6] + *data = (*data)[2:] //remove padding + *data, la.AttachedAggID = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, la.LacpPortState.PortStateAll = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, la.LACPDUsRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, la.MarkerPDUsRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, la.MarkerResponsePDUsRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, la.UnknownRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, la.IllegalRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, la.LACPDUsTx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, la.MarkerPDUsTx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, la.MarkerResponsePDUsTx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + return la, nil + +} + +// ************************************************** +// Processor Counter Record +// ************************************************** +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | counter length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | FiveSecCpu | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | OneMinCpu | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | GiveMinCpu | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | TotalMemory | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | FreeMemory | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +type SFlowProcessorCounters struct { + SFlowBaseCounterRecord + FiveSecCpu uint32 // 5 second average CPU utilization + OneMinCpu uint32 // 1 minute average CPU utilization + FiveMinCpu uint32 // 5 minute average CPU utilization + TotalMemory uint64 // total memory (in bytes) + FreeMemory uint64 // free memory (in bytes) +} + +func decodeProcessorCounters(data *[]byte) (SFlowProcessorCounters, error) { + pc := SFlowProcessorCounters{} + var cdf SFlowCounterDataFormat + var high32, low32 uint32 + + *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4])) + pc.EnterpriseID, pc.Format = cdf.decode() + *data, pc.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + *data, pc.FiveSecCpu = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, pc.OneMinCpu = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, pc.FiveMinCpu = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, high32 = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, low32 = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + pc.TotalMemory = (uint64(high32) << 32) + uint64(low32) + *data, high32 = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, low32 = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + pc.FreeMemory = (uint64(high32)) + uint64(low32) + + return pc, nil +} + +// SFlowEthernetFrameFlowRecord give additional information +// about the sampled packet if it's available. +// An agent may or may not provide this information. +type SFlowEthernetFrameFlowRecord struct { + SFlowBaseFlowRecord + FrameLength uint32 + SrcMac net.HardwareAddr + DstMac net.HardwareAddr + Type uint32 +} + +// Ethernet frame flow records have the following structure: + +// 0 15 31 +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | 20 bit Interprise (0) |12 bit format | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | record length | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Source Mac Address | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Destination Mac Address | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +// | Ethernet Packet Type | +// +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ + +func decodeEthernetFrameFlowRecord(data *[]byte) (SFlowEthernetFrameFlowRecord, error) { + es := SFlowEthernetFrameFlowRecord{} + var fdf SFlowFlowDataFormat + + *data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4])) + es.EnterpriseID, es.Format = fdf.decode() + *data, es.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + *data, es.FrameLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, es.SrcMac = (*data)[8:], net.HardwareAddr((*data)[:6]) + *data, es.DstMac = (*data)[8:], net.HardwareAddr((*data)[:6]) + *data, es.Type = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + return es, nil +} + +//SFlowOpenflowPortCounters : OVS-Sflow OpenFlow Port Counter ( 20 Bytes ) +type SFlowOpenflowPortCounters struct { + SFlowBaseCounterRecord + DatapathID uint64 + PortNo uint32 +} + +func decodeOpenflowportCounters(data *[]byte) (SFlowOpenflowPortCounters, error) { + ofp := SFlowOpenflowPortCounters{} + var cdf SFlowCounterDataFormat + + *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4])) + ofp.EnterpriseID, ofp.Format = cdf.decode() + *data, ofp.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, ofp.DatapathID = (*data)[8:], binary.BigEndian.Uint64((*data)[:8]) + *data, ofp.PortNo = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + return ofp, nil +} + +//SFlowAppresourcesCounters : OVS_Sflow App Resources Counter ( 48 Bytes ) +type SFlowAppresourcesCounters struct { + SFlowBaseCounterRecord + UserTime uint32 + SystemTime uint32 + MemUsed uint64 + MemMax uint64 + FdOpen uint32 + FdMax uint32 + ConnOpen uint32 + ConnMax uint32 +} + +func decodeAppresourcesCounters(data *[]byte) (SFlowAppresourcesCounters, error) { + app := SFlowAppresourcesCounters{} + var cdf SFlowCounterDataFormat + + *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4])) + app.EnterpriseID, app.Format = cdf.decode() + *data, app.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, app.UserTime = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, app.SystemTime = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, app.MemUsed = (*data)[8:], binary.BigEndian.Uint64((*data)[:8]) + *data, app.MemMax = (*data)[8:], binary.BigEndian.Uint64((*data)[:8]) + *data, app.FdOpen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, app.FdMax = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, app.ConnOpen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, app.ConnMax = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + return app, nil +} + +//SFlowOVSDPCounters : OVS-Sflow DataPath Counter ( 32 Bytes ) +type SFlowOVSDPCounters struct { + SFlowBaseCounterRecord + NHit uint32 + NMissed uint32 + NLost uint32 + NMaskHit uint32 + NFlows uint32 + NMasks uint32 +} + +func decodeOVSDPCounters(data *[]byte) (SFlowOVSDPCounters, error) { + dp := SFlowOVSDPCounters{} + var cdf SFlowCounterDataFormat + + *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4])) + dp.EnterpriseID, dp.Format = cdf.decode() + *data, dp.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, dp.NHit = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, dp.NMissed = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, dp.NLost = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, dp.NMaskHit = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, dp.NFlows = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + *data, dp.NMasks = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + + return dp, nil +} + +//SFlowPORTNAME : OVS-Sflow PORTNAME Counter Sampletype ( 20 Bytes ) +type SFlowPORTNAME struct { + SFlowBaseCounterRecord + Len uint32 + Str string +} + +func decodeString(data *[]byte) (len uint32, str string) { + *data, len = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + str = string((*data)[:len]) + if (len % 4) != 0 { + len += 4 - len%4 + } + *data = (*data)[len:] + return +} + +func decodePortnameCounters(data *[]byte) (SFlowPORTNAME, error) { + pn := SFlowPORTNAME{} + var cdf SFlowCounterDataFormat + + *data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4])) + pn.EnterpriseID, pn.Format = cdf.decode() + *data, pn.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4]) + pn.Len, pn.Str = decodeString(data) + + return pn, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/sip.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/sip.go new file mode 100644 index 00000000..43ea61d0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/sip.go @@ -0,0 +1,543 @@ +// Copyright 2017 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" + + "github.com/google/gopacket" +) + +// SIPVersion defines the different versions of the SIP Protocol +type SIPVersion uint8 + +// Represents all the versions of SIP protocol +const ( + SIPVersion1 SIPVersion = 1 + SIPVersion2 SIPVersion = 2 +) + +func (sv SIPVersion) String() string { + switch sv { + default: + // Defaulting to SIP/2.0 + return "SIP/2.0" + case SIPVersion1: + return "SIP/1.0" + case SIPVersion2: + return "SIP/2.0" + } +} + +// GetSIPVersion is used to get SIP version constant +func GetSIPVersion(version string) (SIPVersion, error) { + switch strings.ToUpper(version) { + case "SIP/1.0": + return SIPVersion1, nil + case "SIP/2.0": + return SIPVersion2, nil + default: + return 0, fmt.Errorf("Unknown SIP version: '%s'", version) + + } +} + +// SIPMethod defines the different methods of the SIP Protocol +// defined in the different RFC's +type SIPMethod uint16 + +// Here are all the SIP methods +const ( + SIPMethodInvite SIPMethod = 1 // INVITE [RFC3261] + SIPMethodAck SIPMethod = 2 // ACK [RFC3261] + SIPMethodBye SIPMethod = 3 // BYE [RFC3261] + SIPMethodCancel SIPMethod = 4 // CANCEL [RFC3261] + SIPMethodOptions SIPMethod = 5 // OPTIONS [RFC3261] + SIPMethodRegister SIPMethod = 6 // REGISTER [RFC3261] + SIPMethodPrack SIPMethod = 7 // PRACK [RFC3262] + SIPMethodSubscribe SIPMethod = 8 // SUBSCRIBE [RFC6665] + SIPMethodNotify SIPMethod = 9 // NOTIFY [RFC6665] + SIPMethodPublish SIPMethod = 10 // PUBLISH [RFC3903] + SIPMethodInfo SIPMethod = 11 // INFO [RFC6086] + SIPMethodRefer SIPMethod = 12 // REFER [RFC3515] + SIPMethodMessage SIPMethod = 13 // MESSAGE [RFC3428] + SIPMethodUpdate SIPMethod = 14 // UPDATE [RFC3311] + SIPMethodPing SIPMethod = 15 // PING [https://tools.ietf.org/html/draft-fwmiller-ping-03] +) + +func (sm SIPMethod) String() string { + switch sm { + default: + return "Unknown method" + case SIPMethodInvite: + return "INVITE" + case SIPMethodAck: + return "ACK" + case SIPMethodBye: + return "BYE" + case SIPMethodCancel: + return "CANCEL" + case SIPMethodOptions: + return "OPTIONS" + case SIPMethodRegister: + return "REGISTER" + case SIPMethodPrack: + return "PRACK" + case SIPMethodSubscribe: + return "SUBSCRIBE" + case SIPMethodNotify: + return "NOTIFY" + case SIPMethodPublish: + return "PUBLISH" + case SIPMethodInfo: + return "INFO" + case SIPMethodRefer: + return "REFER" + case SIPMethodMessage: + return "MESSAGE" + case SIPMethodUpdate: + return "UPDATE" + case SIPMethodPing: + return "PING" + } +} + +// GetSIPMethod returns the constant of a SIP method +// from its string +func GetSIPMethod(method string) (SIPMethod, error) { + switch strings.ToUpper(method) { + case "INVITE": + return SIPMethodInvite, nil + case "ACK": + return SIPMethodAck, nil + case "BYE": + return SIPMethodBye, nil + case "CANCEL": + return SIPMethodCancel, nil + case "OPTIONS": + return SIPMethodOptions, nil + case "REGISTER": + return SIPMethodRegister, nil + case "PRACK": + return SIPMethodPrack, nil + case "SUBSCRIBE": + return SIPMethodSubscribe, nil + case "NOTIFY": + return SIPMethodNotify, nil + case "PUBLISH": + return SIPMethodPublish, nil + case "INFO": + return SIPMethodInfo, nil + case "REFER": + return SIPMethodRefer, nil + case "MESSAGE": + return SIPMethodMessage, nil + case "UPDATE": + return SIPMethodUpdate, nil + case "PING": + return SIPMethodPing, nil + default: + return 0, fmt.Errorf("Unknown SIP method: '%s'", method) + } +} + +// Here is a correspondance between long header names and short +// as defined in rfc3261 in section 20 +var compactSipHeadersCorrespondance = map[string]string{ + "accept-contact": "a", + "allow-events": "u", + "call-id": "i", + "contact": "m", + "content-encoding": "e", + "content-length": "l", + "content-type": "c", + "event": "o", + "from": "f", + "identity": "y", + "refer-to": "r", + "referred-by": "b", + "reject-contact": "j", + "request-disposition": "d", + "session-expires": "x", + "subject": "s", + "supported": "k", + "to": "t", + "via": "v", +} + +// SIP object will contains information about decoded SIP packet. +// -> The SIP Version +// -> The SIP Headers (in a map[string][]string because of multiple headers with the same name +// -> The SIP Method +// -> The SIP Response code (if it's a response) +// -> The SIP Status line (if it's a response) +// You can easily know the type of the packet with the IsResponse boolean +// +type SIP struct { + BaseLayer + + // Base information + Version SIPVersion + Method SIPMethod + Headers map[string][]string + + // Request + RequestURI string + + // Response + IsResponse bool + ResponseCode int + ResponseStatus string + + // Private fields + cseq int64 + contentLength int64 + lastHeaderParsed string +} + +// decodeSIP decodes the byte slice into a SIP type. It also +// setups the application Layer in PacketBuilder. +func decodeSIP(data []byte, p gopacket.PacketBuilder) error { + s := NewSIP() + err := s.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(s) + p.SetApplicationLayer(s) + return nil +} + +// NewSIP instantiates a new empty SIP object +func NewSIP() *SIP { + s := new(SIP) + s.Headers = make(map[string][]string) + return s +} + +// LayerType returns gopacket.LayerTypeSIP. +func (s *SIP) LayerType() gopacket.LayerType { + return LayerTypeSIP +} + +// Payload returns the base layer payload +func (s *SIP) Payload() []byte { + return s.BaseLayer.Payload +} + +// CanDecode returns the set of layer types that this DecodingLayer can decode +func (s *SIP) CanDecode() gopacket.LayerClass { + return LayerTypeSIP +} + +// NextLayerType returns the layer type contained by this DecodingLayer +func (s *SIP) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +// DecodeFromBytes decodes the slice into the SIP struct. +func (s *SIP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + // Init some vars for parsing follow-up + var countLines int + var line []byte + var err error + var offset int + + // Iterate on all lines of the SIP Headers + // and stop when we reach the SDP (aka when the new line + // is at index 0 of the remaining packet) + buffer := bytes.NewBuffer(data) + + for { + + // Read next line + line, err = buffer.ReadBytes(byte('\n')) + if err != nil { + if err == io.EOF { + break + } else { + return err + } + } + offset += len(line) + + // Trim the new line delimiters + line = bytes.Trim(line, "\r\n") + + // Empty line, we hit Body + if len(line) == 0 { + break + } + + // First line is the SIP request/response line + // Other lines are headers + if countLines == 0 { + err = s.ParseFirstLine(line) + if err != nil { + return err + } + + } else { + err = s.ParseHeader(line) + if err != nil { + return err + } + } + + countLines++ + } + s.BaseLayer = BaseLayer{Contents: data[:offset], Payload: data[offset:]} + + return nil +} + +// ParseFirstLine will compute the first line of a SIP packet. +// The first line will tell us if it's a request or a response. +// +// Examples of first line of SIP Prococol : +// +// Request : INVITE bob@example.com SIP/2.0 +// Response : SIP/2.0 200 OK +// Response : SIP/2.0 501 Not Implemented +// +func (s *SIP) ParseFirstLine(firstLine []byte) error { + + var err error + + // Splits line by space + splits := strings.SplitN(string(firstLine), " ", 3) + + // We must have at least 3 parts + if len(splits) < 3 { + return fmt.Errorf("invalid first SIP line: '%s'", string(firstLine)) + } + + // Determine the SIP packet type + if strings.HasPrefix(splits[0], "SIP") { + + // --> Response + s.IsResponse = true + + // Validate SIP Version + s.Version, err = GetSIPVersion(splits[0]) + if err != nil { + return err + } + + // Compute code + s.ResponseCode, err = strconv.Atoi(splits[1]) + if err != nil { + return err + } + + // Compute status line + s.ResponseStatus = splits[2] + + } else { + + // --> Request + + // Validate method + s.Method, err = GetSIPMethod(splits[0]) + if err != nil { + return err + } + + s.RequestURI = splits[1] + + // Validate SIP Version + s.Version, err = GetSIPVersion(splits[2]) + if err != nil { + return err + } + } + + return nil +} + +// ParseHeader will parse a SIP Header +// SIP Headers are quite simple, there are colon separated name and value +// Headers can be spread over multiple lines +// +// Examples of header : +// +// CSeq: 1 REGISTER +// Via: SIP/2.0/UDP there.com:5060 +// Authorization:Digest username="UserB", +// realm="MCI WorldCom SIP", +// nonce="1cec4341ae6cbe5a359ea9c8e88df84f", opaque="", +// uri="sip:ss2.wcom.com", response="71ba27c64bd01de719686aa4590d5824" +// +func (s *SIP) ParseHeader(header []byte) (err error) { + + // Ignore empty headers + if len(header) == 0 { + return + } + + // Check if this is the following of last header + // RFC 3261 - 7.3.1 - Header Field Format specify that following lines of + // multiline headers must begin by SP or TAB + if header[0] == '\t' || header[0] == ' ' { + + header = bytes.TrimSpace(header) + s.Headers[s.lastHeaderParsed][len(s.Headers[s.lastHeaderParsed])-1] += fmt.Sprintf(" %s", string(header)) + return + } + + // Find the ':' to separate header name and value + index := bytes.Index(header, []byte(":")) + if index >= 0 { + + headerName := strings.ToLower(string(bytes.Trim(header[:index], " "))) + headerValue := string(bytes.Trim(header[index+1:], " ")) + + // Add header to object + s.Headers[headerName] = append(s.Headers[headerName], headerValue) + s.lastHeaderParsed = headerName + + // Compute specific headers + err = s.ParseSpecificHeaders(headerName, headerValue) + if err != nil { + return err + } + } + + return nil +} + +// ParseSpecificHeaders will parse some specific key values from +// specific headers like CSeq or Content-Length integer values +func (s *SIP) ParseSpecificHeaders(headerName string, headerValue string) (err error) { + + switch headerName { + case "cseq": + + // CSeq header value is formatted like that : + // CSeq: 123 INVITE + // We split the value to parse Cseq integer value, and method + splits := strings.Split(headerValue, " ") + if len(splits) > 1 { + + // Parse Cseq + s.cseq, err = strconv.ParseInt(splits[0], 10, 64) + if err != nil { + return err + } + + // Validate method + if s.IsResponse { + s.Method, err = GetSIPMethod(splits[1]) + if err != nil { + return err + } + } + } + + case "content-length": + + // Parse Content-Length + s.contentLength, err = strconv.ParseInt(headerValue, 10, 64) + if err != nil { + return err + } + } + + return nil +} + +// GetAllHeaders will return the full headers of the +// current SIP packets in a map[string][]string +func (s *SIP) GetAllHeaders() map[string][]string { + return s.Headers +} + +// GetHeader will return all the headers with +// the specified name. +func (s *SIP) GetHeader(headerName string) []string { + headerName = strings.ToLower(headerName) + h := make([]string, 0) + if _, ok := s.Headers[headerName]; ok { + if len(s.Headers[headerName]) > 0 { + return s.Headers[headerName] + } else if len(s.Headers[compactSipHeadersCorrespondance[headerName]]) > 0 { + return s.Headers[compactSipHeadersCorrespondance[headerName]] + } + } + return h +} + +// GetFirstHeader will return the first header with +// the specified name. If the current SIP packet has multiple +// headers with the same name, it returns the first. +func (s *SIP) GetFirstHeader(headerName string) string { + headerName = strings.ToLower(headerName) + if _, ok := s.Headers[headerName]; ok { + if len(s.Headers[headerName]) > 0 { + return s.Headers[headerName][0] + } else if len(s.Headers[compactSipHeadersCorrespondance[headerName]]) > 0 { + return s.Headers[compactSipHeadersCorrespondance[headerName]][0] + } + } + return "" +} + +// +// Some handy getters for most used SIP headers +// + +// GetAuthorization will return the Authorization +// header of the current SIP packet +func (s *SIP) GetAuthorization() string { + return s.GetFirstHeader("Authorization") +} + +// GetFrom will return the From +// header of the current SIP packet +func (s *SIP) GetFrom() string { + return s.GetFirstHeader("From") +} + +// GetTo will return the To +// header of the current SIP packet +func (s *SIP) GetTo() string { + return s.GetFirstHeader("To") +} + +// GetContact will return the Contact +// header of the current SIP packet +func (s *SIP) GetContact() string { + return s.GetFirstHeader("Contact") +} + +// GetCallID will return the Call-ID +// header of the current SIP packet +func (s *SIP) GetCallID() string { + return s.GetFirstHeader("Call-ID") +} + +// GetUserAgent will return the User-Agent +// header of the current SIP packet +func (s *SIP) GetUserAgent() string { + return s.GetFirstHeader("User-Agent") +} + +// GetContentLength will return the parsed integer +// Content-Length header of the current SIP packet +func (s *SIP) GetContentLength() int64 { + return s.contentLength +} + +// GetCSeq will return the parsed integer CSeq header +// header of the current SIP packet +func (s *SIP) GetCSeq() int64 { + return s.cseq +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/stp.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/stp.go new file mode 100644 index 00000000..bde7d7c8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/stp.go @@ -0,0 +1,27 @@ +// Copyright 2017 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "github.com/google/gopacket" +) + +// STP decode spanning tree protocol packets to transport BPDU (bridge protocol data unit) message. +type STP struct { + BaseLayer +} + +// LayerType returns gopacket.LayerTypeSTP. +func (s *STP) LayerType() gopacket.LayerType { return LayerTypeSTP } + +func decodeSTP(data []byte, p gopacket.PacketBuilder) error { + stp := &STP{} + stp.Contents = data[:] + // TODO: parse the STP protocol into actual subfields. + p.AddLayer(stp) + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/tcp.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/tcp.go new file mode 100644 index 00000000..5d6216c6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/tcp.go @@ -0,0 +1,338 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + + "github.com/google/gopacket" +) + +// TCP is the layer for TCP headers. +type TCP struct { + BaseLayer + SrcPort, DstPort TCPPort + Seq uint32 + Ack uint32 + DataOffset uint8 + FIN, SYN, RST, PSH, ACK, URG, ECE, CWR, NS bool + Window uint16 + Checksum uint16 + Urgent uint16 + sPort, dPort []byte + Options []TCPOption + Padding []byte + opts [4]TCPOption + tcpipchecksum +} + +// TCPOptionKind represents a TCP option code. +type TCPOptionKind uint8 + +const ( + TCPOptionKindEndList = 0 + TCPOptionKindNop = 1 + TCPOptionKindMSS = 2 // len = 4 + TCPOptionKindWindowScale = 3 // len = 3 + TCPOptionKindSACKPermitted = 4 // len = 2 + TCPOptionKindSACK = 5 // len = n + TCPOptionKindEcho = 6 // len = 6, obsolete + TCPOptionKindEchoReply = 7 // len = 6, obsolete + TCPOptionKindTimestamps = 8 // len = 10 + TCPOptionKindPartialOrderConnectionPermitted = 9 // len = 2, obsolete + TCPOptionKindPartialOrderServiceProfile = 10 // len = 3, obsolete + TCPOptionKindCC = 11 // obsolete + TCPOptionKindCCNew = 12 // obsolete + TCPOptionKindCCEcho = 13 // obsolete + TCPOptionKindAltChecksum = 14 // len = 3, obsolete + TCPOptionKindAltChecksumData = 15 // len = n, obsolete +) + +func (k TCPOptionKind) String() string { + switch k { + case TCPOptionKindEndList: + return "EndList" + case TCPOptionKindNop: + return "NOP" + case TCPOptionKindMSS: + return "MSS" + case TCPOptionKindWindowScale: + return "WindowScale" + case TCPOptionKindSACKPermitted: + return "SACKPermitted" + case TCPOptionKindSACK: + return "SACK" + case TCPOptionKindEcho: + return "Echo" + case TCPOptionKindEchoReply: + return "EchoReply" + case TCPOptionKindTimestamps: + return "Timestamps" + case TCPOptionKindPartialOrderConnectionPermitted: + return "PartialOrderConnectionPermitted" + case TCPOptionKindPartialOrderServiceProfile: + return "PartialOrderServiceProfile" + case TCPOptionKindCC: + return "CC" + case TCPOptionKindCCNew: + return "CCNew" + case TCPOptionKindCCEcho: + return "CCEcho" + case TCPOptionKindAltChecksum: + return "AltChecksum" + case TCPOptionKindAltChecksumData: + return "AltChecksumData" + default: + return fmt.Sprintf("Unknown(%d)", k) + } +} + +type TCPOption struct { + OptionType TCPOptionKind + OptionLength uint8 + OptionData []byte +} + +func (t TCPOption) String() string { + hd := hex.EncodeToString(t.OptionData) + if len(hd) > 0 { + hd = " 0x" + hd + } + switch t.OptionType { + case TCPOptionKindMSS: + return fmt.Sprintf("TCPOption(%s:%v%s)", + t.OptionType, + binary.BigEndian.Uint16(t.OptionData), + hd) + + case TCPOptionKindTimestamps: + if len(t.OptionData) == 8 { + return fmt.Sprintf("TCPOption(%s:%v/%v%s)", + t.OptionType, + binary.BigEndian.Uint32(t.OptionData[:4]), + binary.BigEndian.Uint32(t.OptionData[4:8]), + hd) + } + } + return fmt.Sprintf("TCPOption(%s:%s)", t.OptionType, hd) +} + +// LayerType returns gopacket.LayerTypeTCP +func (t *TCP) LayerType() gopacket.LayerType { return LayerTypeTCP } + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (t *TCP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + var optionLength int + for _, o := range t.Options { + switch o.OptionType { + case 0, 1: + optionLength += 1 + default: + optionLength += 2 + len(o.OptionData) + } + } + if opts.FixLengths { + if rem := optionLength % 4; rem != 0 { + t.Padding = lotsOfZeros[:4-rem] + } + t.DataOffset = uint8((len(t.Padding) + optionLength + 20) / 4) + } + bytes, err := b.PrependBytes(20 + optionLength + len(t.Padding)) + if err != nil { + return err + } + binary.BigEndian.PutUint16(bytes, uint16(t.SrcPort)) + binary.BigEndian.PutUint16(bytes[2:], uint16(t.DstPort)) + binary.BigEndian.PutUint32(bytes[4:], t.Seq) + binary.BigEndian.PutUint32(bytes[8:], t.Ack) + binary.BigEndian.PutUint16(bytes[12:], t.flagsAndOffset()) + binary.BigEndian.PutUint16(bytes[14:], t.Window) + binary.BigEndian.PutUint16(bytes[18:], t.Urgent) + start := 20 + for _, o := range t.Options { + bytes[start] = byte(o.OptionType) + switch o.OptionType { + case 0, 1: + start++ + default: + if opts.FixLengths { + o.OptionLength = uint8(len(o.OptionData) + 2) + } + bytes[start+1] = o.OptionLength + copy(bytes[start+2:start+len(o.OptionData)+2], o.OptionData) + start += len(o.OptionData) + 2 + } + } + copy(bytes[start:], t.Padding) + if opts.ComputeChecksums { + // zero out checksum bytes in current serialization. + bytes[16] = 0 + bytes[17] = 0 + csum, err := t.computeChecksum(b.Bytes(), IPProtocolTCP) + if err != nil { + return err + } + t.Checksum = csum + } + binary.BigEndian.PutUint16(bytes[16:], t.Checksum) + return nil +} + +func (t *TCP) ComputeChecksum() (uint16, error) { + return t.computeChecksum(append(t.Contents, t.Payload...), IPProtocolTCP) +} + +func (t *TCP) flagsAndOffset() uint16 { + f := uint16(t.DataOffset) << 12 + if t.FIN { + f |= 0x0001 + } + if t.SYN { + f |= 0x0002 + } + if t.RST { + f |= 0x0004 + } + if t.PSH { + f |= 0x0008 + } + if t.ACK { + f |= 0x0010 + } + if t.URG { + f |= 0x0020 + } + if t.ECE { + f |= 0x0040 + } + if t.CWR { + f |= 0x0080 + } + if t.NS { + f |= 0x0100 + } + return f +} + +func (tcp *TCP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 20 { + df.SetTruncated() + return fmt.Errorf("Invalid TCP header. Length %d less than 20", len(data)) + } + tcp.SrcPort = TCPPort(binary.BigEndian.Uint16(data[0:2])) + tcp.sPort = data[0:2] + tcp.DstPort = TCPPort(binary.BigEndian.Uint16(data[2:4])) + tcp.dPort = data[2:4] + tcp.Seq = binary.BigEndian.Uint32(data[4:8]) + tcp.Ack = binary.BigEndian.Uint32(data[8:12]) + tcp.DataOffset = data[12] >> 4 + tcp.FIN = data[13]&0x01 != 0 + tcp.SYN = data[13]&0x02 != 0 + tcp.RST = data[13]&0x04 != 0 + tcp.PSH = data[13]&0x08 != 0 + tcp.ACK = data[13]&0x10 != 0 + tcp.URG = data[13]&0x20 != 0 + tcp.ECE = data[13]&0x40 != 0 + tcp.CWR = data[13]&0x80 != 0 + tcp.NS = data[12]&0x01 != 0 + tcp.Window = binary.BigEndian.Uint16(data[14:16]) + tcp.Checksum = binary.BigEndian.Uint16(data[16:18]) + tcp.Urgent = binary.BigEndian.Uint16(data[18:20]) + if tcp.Options == nil { + // Pre-allocate to avoid allocating a slice. + tcp.Options = tcp.opts[:0] + } else { + tcp.Options = tcp.Options[:0] + } + if tcp.DataOffset < 5 { + return fmt.Errorf("Invalid TCP data offset %d < 5", tcp.DataOffset) + } + dataStart := int(tcp.DataOffset) * 4 + if dataStart > len(data) { + df.SetTruncated() + tcp.Payload = nil + tcp.Contents = data + return errors.New("TCP data offset greater than packet length") + } + tcp.Contents = data[:dataStart] + tcp.Payload = data[dataStart:] + // From here on, data points just to the header options. + data = data[20:dataStart] +OPTIONS: + for len(data) > 0 { + tcp.Options = append(tcp.Options, TCPOption{OptionType: TCPOptionKind(data[0])}) + opt := &tcp.Options[len(tcp.Options)-1] + switch opt.OptionType { + case TCPOptionKindEndList: // End of options + opt.OptionLength = 1 + tcp.Padding = data[1:] + break OPTIONS + case TCPOptionKindNop: // 1 byte padding + opt.OptionLength = 1 + default: + if len(data) < 2 { + df.SetTruncated() + return fmt.Errorf("Invalid TCP option length. Length %d less than 2", len(data)) + } + opt.OptionLength = data[1] + if opt.OptionLength < 2 { + return fmt.Errorf("Invalid TCP option length %d < 2", opt.OptionLength) + } else if int(opt.OptionLength) > len(data) { + df.SetTruncated() + return fmt.Errorf("Invalid TCP option length %d exceeds remaining %d bytes", opt.OptionLength, len(data)) + } + opt.OptionData = data[2:opt.OptionLength] + } + data = data[opt.OptionLength:] + } + return nil +} + +func (t *TCP) CanDecode() gopacket.LayerClass { + return LayerTypeTCP +} + +func (t *TCP) NextLayerType() gopacket.LayerType { + lt := t.DstPort.LayerType() + if lt == gopacket.LayerTypePayload { + lt = t.SrcPort.LayerType() + } + return lt +} + +func decodeTCP(data []byte, p gopacket.PacketBuilder) error { + tcp := &TCP{} + err := tcp.DecodeFromBytes(data, p) + p.AddLayer(tcp) + p.SetTransportLayer(tcp) + if err != nil { + return err + } + if p.DecodeOptions().DecodeStreamsAsDatagrams { + return p.NextDecoder(tcp.NextLayerType()) + } else { + return p.NextDecoder(gopacket.LayerTypePayload) + } +} + +func (t *TCP) TransportFlow() gopacket.Flow { + return gopacket.NewFlow(EndpointTCPPort, t.sPort, t.dPort) +} + +// For testing only +func (t *TCP) SetInternalPortsForTesting() { + t.sPort = make([]byte, 2) + t.dPort = make([]byte, 2) + binary.BigEndian.PutUint16(t.sPort, uint16(t.SrcPort)) + binary.BigEndian.PutUint16(t.dPort, uint16(t.DstPort)) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/tcpip.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/tcpip.go new file mode 100644 index 00000000..64ba51cc --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/tcpip.go @@ -0,0 +1,104 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "errors" + "fmt" + + "github.com/google/gopacket" +) + +// Checksum computation for TCP/UDP. +type tcpipchecksum struct { + pseudoheader tcpipPseudoHeader +} + +type tcpipPseudoHeader interface { + pseudoheaderChecksum() (uint32, error) +} + +func (ip *IPv4) pseudoheaderChecksum() (csum uint32, err error) { + if err := ip.AddressTo4(); err != nil { + return 0, err + } + csum += (uint32(ip.SrcIP[0]) + uint32(ip.SrcIP[2])) << 8 + csum += uint32(ip.SrcIP[1]) + uint32(ip.SrcIP[3]) + csum += (uint32(ip.DstIP[0]) + uint32(ip.DstIP[2])) << 8 + csum += uint32(ip.DstIP[1]) + uint32(ip.DstIP[3]) + return csum, nil +} + +func (ip *IPv6) pseudoheaderChecksum() (csum uint32, err error) { + if err := ip.AddressTo16(); err != nil { + return 0, err + } + for i := 0; i < 16; i += 2 { + csum += uint32(ip.SrcIP[i]) << 8 + csum += uint32(ip.SrcIP[i+1]) + csum += uint32(ip.DstIP[i]) << 8 + csum += uint32(ip.DstIP[i+1]) + } + return csum, nil +} + +// Calculate the TCP/IP checksum defined in rfc1071. The passed-in csum is any +// initial checksum data that's already been computed. +func tcpipChecksum(data []byte, csum uint32) uint16 { + // to handle odd lengths, we loop to length - 1, incrementing by 2, then + // handle the last byte specifically by checking against the original + // length. + length := len(data) - 1 + for i := 0; i < length; i += 2 { + // For our test packet, doing this manually is about 25% faster + // (740 ns vs. 1000ns) than doing it by calling binary.BigEndian.Uint16. + csum += uint32(data[i]) << 8 + csum += uint32(data[i+1]) + } + if len(data)%2 == 1 { + csum += uint32(data[length]) << 8 + } + for csum > 0xffff { + csum = (csum >> 16) + (csum & 0xffff) + } + return ^uint16(csum) +} + +// computeChecksum computes a TCP or UDP checksum. headerAndPayload is the +// serialized TCP or UDP header plus its payload, with the checksum zero'd +// out. headerProtocol is the IP protocol number of the upper-layer header. +func (c *tcpipchecksum) computeChecksum(headerAndPayload []byte, headerProtocol IPProtocol) (uint16, error) { + if c.pseudoheader == nil { + return 0, errors.New("TCP/IP layer 4 checksum cannot be computed without network layer... call SetNetworkLayerForChecksum to set which layer to use") + } + length := uint32(len(headerAndPayload)) + csum, err := c.pseudoheader.pseudoheaderChecksum() + if err != nil { + return 0, err + } + csum += uint32(headerProtocol) + csum += length & 0xffff + csum += length >> 16 + return tcpipChecksum(headerAndPayload, csum), nil +} + +// SetNetworkLayerForChecksum tells this layer which network layer is wrapping it. +// This is needed for computing the checksum when serializing, since TCP/IP transport +// layer checksums depends on fields in the IPv4 or IPv6 layer that contains it. +// The passed in layer must be an *IPv4 or *IPv6. +func (i *tcpipchecksum) SetNetworkLayerForChecksum(l gopacket.NetworkLayer) error { + switch v := l.(type) { + case *IPv4: + i.pseudoheader = v + case *IPv6: + i.pseudoheader = v + default: + return fmt.Errorf("cannot use layer type %v for tcp checksum network layer", l.LayerType()) + } + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/test_creator.py b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/test_creator.py new file mode 100755 index 00000000..c92d2765 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/test_creator.py @@ -0,0 +1,103 @@ +#!/usr/bin/python +# Copyright 2012 Google, Inc. All rights reserved. + +"""TestCreator creates test templates from pcap files.""" + +import argparse +import base64 +import glob +import re +import string +import subprocess +import sys + + +class Packet(object): + """Helper class encapsulating packet from a pcap file.""" + + def __init__(self, packet_lines): + self.packet_lines = packet_lines + self.data = self._DecodeText(packet_lines) + + @classmethod + def _DecodeText(cls, packet_lines): + packet_bytes = [] + # First line is timestamp and stuff, skip it. + # Format: 0x0010: 0000 0020 3aff 3ffe 0000 0000 0000 0000 ....:.?......... + + for line in packet_lines[1:]: + m = re.match(r'\s+0x[a-f\d]+:\s+((?:[\da-f]{2,4}\s)*)', line, re.IGNORECASE) + if m is None: continue + for hexpart in m.group(1).split(): + packet_bytes.append(base64.b16decode(hexpart.upper())) + return ''.join(packet_bytes) + + def Test(self, name, link_type): + """Yields a test using this packet, as a set of lines.""" + yield '// testPacket%s is the packet:' % name + for line in self.packet_lines: + yield '// ' + line + yield 'var testPacket%s = []byte{' % name + data = list(self.data) + while data: + linebytes, data = data[:16], data[16:] + yield ''.join(['\t'] + ['0x%02x, ' % ord(c) for c in linebytes]) + yield '}' + yield 'func TestPacket%s(t *testing.T) {' % name + yield '\tp := gopacket.NewPacket(testPacket%s, LinkType%s, gopacket.Default)' % (name, link_type) + yield '\tif p.ErrorLayer() != nil {' + yield '\t\tt.Error("Failed to decode packet:", p.ErrorLayer().Error())' + yield '\t}' + yield '\tcheckLayers(p, []gopacket.LayerType{LayerType%s, FILL_ME_IN_WITH_ACTUAL_LAYERS}, t)' % link_type + yield '}' + yield 'func BenchmarkDecodePacket%s(b *testing.B) {' % name + yield '\tfor i := 0; i < b.N; i++ {' + yield '\t\tgopacket.NewPacket(testPacket%s, LinkType%s, gopacket.NoCopy)' % (name, link_type) + yield '\t}' + yield '}' + + + +def GetTcpdumpOutput(filename): + """Runs tcpdump on the given file, returning output as string.""" + return subprocess.check_output( + ['tcpdump', '-XX', '-s', '0', '-n', '-r', filename]) + + +def TcpdumpOutputToPackets(output): + """Reads a pcap file with TCPDump, yielding Packet objects.""" + pdata = [] + for line in output.splitlines(): + if line[0] not in string.whitespace and pdata: + yield Packet(pdata) + pdata = [] + pdata.append(line) + if pdata: + yield Packet(pdata) + + +def main(): + class CustomHelpFormatter(argparse.ArgumentDefaultsHelpFormatter): + def _format_usage(self, usage, actions, groups, prefix=None): + header =('TestCreator creates gopacket tests using a pcap file.\n\n' + 'Tests are written to standard out... they can then be \n' + 'copied into the file of your choice and modified as \n' + 'you see.\n\n') + return header + argparse.ArgumentDefaultsHelpFormatter._format_usage( + self, usage, actions, groups, prefix) + + parser = argparse.ArgumentParser(formatter_class=CustomHelpFormatter) + parser.add_argument('--link_type', default='Ethernet', help='the link type (default: %(default)s)') + parser.add_argument('--name', default='Packet%d', help='the layer type, must have "%d" inside it') + parser.add_argument('files', metavar='file.pcap', type=str, nargs='+', help='the files to process') + + args = parser.parse_args() + + for arg in args.files: + for path in glob.glob(arg): + for i, packet in enumerate(TcpdumpOutputToPackets(GetTcpdumpOutput(path))): + print '\n'.join(packet.Test( + args.name % i, args.link_type)) + +if __name__ == '__main__': + main() diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/tls.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/tls.go new file mode 100644 index 00000000..ddb6ff9d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/tls.go @@ -0,0 +1,208 @@ +// Copyright 2018 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + + "github.com/google/gopacket" +) + +// TLSType defines the type of data after the TLS Record +type TLSType uint8 + +// TLSType known values. +const ( + TLSChangeCipherSpec TLSType = 20 + TLSAlert TLSType = 21 + TLSHandshake TLSType = 22 + TLSApplicationData TLSType = 23 + TLSUnknown TLSType = 255 +) + +// String shows the register type nicely formatted +func (tt TLSType) String() string { + switch tt { + default: + return "Unknown" + case TLSChangeCipherSpec: + return "Change Cipher Spec" + case TLSAlert: + return "Alert" + case TLSHandshake: + return "Handshake" + case TLSApplicationData: + return "Application Data" + } +} + +// TLSVersion represents the TLS version in numeric format +type TLSVersion uint16 + +// Strings shows the TLS version nicely formatted +func (tv TLSVersion) String() string { + switch tv { + default: + return "Unknown" + case 0x0200: + return "SSL 2.0" + case 0x0300: + return "SSL 3.0" + case 0x0301: + return "TLS 1.0" + case 0x0302: + return "TLS 1.1" + case 0x0303: + return "TLS 1.2" + case 0x0304: + return "TLS 1.3" + } +} + +// TLS is specified in RFC 5246 +// +// TLS Record Protocol +// 0 1 2 3 4 5 6 7 8 +// +--+--+--+--+--+--+--+--+ +// | Content Type | +// +--+--+--+--+--+--+--+--+ +// | Version (major) | +// +--+--+--+--+--+--+--+--+ +// | Version (minor) | +// +--+--+--+--+--+--+--+--+ +// | Length | +// +--+--+--+--+--+--+--+--+ +// | Length | +// +--+--+--+--+--+--+--+--+ + +// TLS is actually a slide of TLSrecord structures +type TLS struct { + BaseLayer + + // TLS Records + ChangeCipherSpec []TLSChangeCipherSpecRecord + Handshake []TLSHandshakeRecord + AppData []TLSAppDataRecord + Alert []TLSAlertRecord +} + +// TLSRecordHeader contains all the information that each TLS Record types should have +type TLSRecordHeader struct { + ContentType TLSType + Version TLSVersion + Length uint16 +} + +// LayerType returns gopacket.LayerTypeTLS. +func (t *TLS) LayerType() gopacket.LayerType { return LayerTypeTLS } + +// decodeTLS decodes the byte slice into a TLS type. It also +// setups the application Layer in PacketBuilder. +func decodeTLS(data []byte, p gopacket.PacketBuilder) error { + t := &TLS{} + err := t.DecodeFromBytes(data, p) + if err != nil { + return err + } + p.AddLayer(t) + p.SetApplicationLayer(t) + return nil +} + +// DecodeFromBytes decodes the slice into the TLS struct. +func (t *TLS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + t.BaseLayer.Contents = data + t.BaseLayer.Payload = nil + + t.ChangeCipherSpec = t.ChangeCipherSpec[:0] + t.Handshake = t.Handshake[:0] + t.AppData = t.AppData[:0] + t.Alert = t.Alert[:0] + + return t.decodeTLSRecords(data, df) +} + +func (t *TLS) decodeTLSRecords(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 5 { + df.SetTruncated() + return errors.New("TLS record too short") + } + + // since there are no further layers, the baselayer's content is + // pointing to this layer + t.BaseLayer = BaseLayer{Contents: data[:len(data)]} + + var h TLSRecordHeader + h.ContentType = TLSType(data[0]) + h.Version = TLSVersion(binary.BigEndian.Uint16(data[1:3])) + h.Length = binary.BigEndian.Uint16(data[3:5]) + + if h.ContentType.String() == "Unknown" { + return errors.New("Unknown TLS record type") + } + + hl := 5 // header length + tl := hl + int(h.Length) + if len(data) < tl { + df.SetTruncated() + return errors.New("TLS packet length mismatch") + } + + switch h.ContentType { + default: + return errors.New("Unknown TLS record type") + case TLSChangeCipherSpec: + var r TLSChangeCipherSpecRecord + e := r.decodeFromBytes(h, data[hl:tl], df) + if e != nil { + return e + } + t.ChangeCipherSpec = append(t.ChangeCipherSpec, r) + case TLSAlert: + var r TLSAlertRecord + e := r.decodeFromBytes(h, data[hl:tl], df) + if e != nil { + return e + } + t.Alert = append(t.Alert, r) + case TLSHandshake: + var r TLSHandshakeRecord + e := r.decodeFromBytes(h, data[hl:tl], df) + if e != nil { + return e + } + t.Handshake = append(t.Handshake, r) + case TLSApplicationData: + var r TLSAppDataRecord + e := r.decodeFromBytes(h, data[hl:tl], df) + if e != nil { + return e + } + t.AppData = append(t.AppData, r) + } + + if len(data) == tl { + return nil + } + return t.decodeTLSRecords(data[tl:len(data)], df) +} + +// CanDecode implements gopacket.DecodingLayer. +func (t *TLS) CanDecode() gopacket.LayerClass { + return LayerTypeTLS +} + +// NextLayerType implements gopacket.DecodingLayer. +func (t *TLS) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypeZero +} + +// Payload returns nil, since TLS encrypted payload is inside TLSAppDataRecord +func (t *TLS) Payload() []byte { + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/tls_alert.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/tls_alert.go new file mode 100644 index 00000000..0c5aee02 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/tls_alert.go @@ -0,0 +1,165 @@ +// Copyright 2018 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "errors" + "fmt" + + "github.com/google/gopacket" +) + +// TLSAlertLevel defines the alert level data type +type TLSAlertLevel uint8 + +// TLSAlertDescr defines the alert descrption data type +type TLSAlertDescr uint8 + +const ( + TLSAlertWarning TLSAlertLevel = 1 + TLSAlertFatal TLSAlertLevel = 2 + TLSAlertUnknownLevel TLSAlertLevel = 255 + + TLSAlertCloseNotify TLSAlertDescr = 0 + TLSAlertUnexpectedMessage TLSAlertDescr = 10 + TLSAlertBadRecordMac TLSAlertDescr = 20 + TLSAlertDecryptionFailedRESERVED TLSAlertDescr = 21 + TLSAlertRecordOverflow TLSAlertDescr = 22 + TLSAlertDecompressionFailure TLSAlertDescr = 30 + TLSAlertHandshakeFailure TLSAlertDescr = 40 + TLSAlertNoCertificateRESERVED TLSAlertDescr = 41 + TLSAlertBadCertificate TLSAlertDescr = 42 + TLSAlertUnsupportedCertificate TLSAlertDescr = 43 + TLSAlertCertificateRevoked TLSAlertDescr = 44 + TLSAlertCertificateExpired TLSAlertDescr = 45 + TLSAlertCertificateUnknown TLSAlertDescr = 46 + TLSAlertIllegalParameter TLSAlertDescr = 47 + TLSAlertUnknownCa TLSAlertDescr = 48 + TLSAlertAccessDenied TLSAlertDescr = 49 + TLSAlertDecodeError TLSAlertDescr = 50 + TLSAlertDecryptError TLSAlertDescr = 51 + TLSAlertExportRestrictionRESERVED TLSAlertDescr = 60 + TLSAlertProtocolVersion TLSAlertDescr = 70 + TLSAlertInsufficientSecurity TLSAlertDescr = 71 + TLSAlertInternalError TLSAlertDescr = 80 + TLSAlertUserCanceled TLSAlertDescr = 90 + TLSAlertNoRenegotiation TLSAlertDescr = 100 + TLSAlertUnsupportedExtension TLSAlertDescr = 110 + TLSAlertUnknownDescription TLSAlertDescr = 255 +) + +// TLS Alert +// 0 1 2 3 4 5 6 7 8 +// +--+--+--+--+--+--+--+--+ +// | Level | +// +--+--+--+--+--+--+--+--+ +// | Description | +// +--+--+--+--+--+--+--+--+ + +// TLSAlertRecord contains all the information that each Alert Record type should have +type TLSAlertRecord struct { + TLSRecordHeader + + Level TLSAlertLevel + Description TLSAlertDescr + + EncryptedMsg []byte +} + +// DecodeFromBytes decodes the slice into the TLS struct. +func (t *TLSAlertRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error { + // TLS Record Header + t.ContentType = h.ContentType + t.Version = h.Version + t.Length = h.Length + + if len(data) < 2 { + df.SetTruncated() + return errors.New("TLS Alert packet too short") + } + + if t.Length == 2 { + t.Level = TLSAlertLevel(data[0]) + t.Description = TLSAlertDescr(data[1]) + } else { + t.Level = TLSAlertUnknownLevel + t.Description = TLSAlertUnknownDescription + t.EncryptedMsg = data + } + + return nil +} + +// Strings shows the TLS alert level nicely formatted +func (al TLSAlertLevel) String() string { + switch al { + default: + return fmt.Sprintf("Unknown(%d)", al) + case TLSAlertWarning: + return "Warning" + case TLSAlertFatal: + return "Fatal" + } +} + +// Strings shows the TLS alert description nicely formatted +func (ad TLSAlertDescr) String() string { + switch ad { + default: + return "Unknown" + case TLSAlertCloseNotify: + return "close_notify" + case TLSAlertUnexpectedMessage: + return "unexpected_message" + case TLSAlertBadRecordMac: + return "bad_record_mac" + case TLSAlertDecryptionFailedRESERVED: + return "decryption_failed_RESERVED" + case TLSAlertRecordOverflow: + return "record_overflow" + case TLSAlertDecompressionFailure: + return "decompression_failure" + case TLSAlertHandshakeFailure: + return "handshake_failure" + case TLSAlertNoCertificateRESERVED: + return "no_certificate_RESERVED" + case TLSAlertBadCertificate: + return "bad_certificate" + case TLSAlertUnsupportedCertificate: + return "unsupported_certificate" + case TLSAlertCertificateRevoked: + return "certificate_revoked" + case TLSAlertCertificateExpired: + return "certificate_expired" + case TLSAlertCertificateUnknown: + return "certificate_unknown" + case TLSAlertIllegalParameter: + return "illegal_parameter" + case TLSAlertUnknownCa: + return "unknown_ca" + case TLSAlertAccessDenied: + return "access_denied" + case TLSAlertDecodeError: + return "decode_error" + case TLSAlertDecryptError: + return "decrypt_error" + case TLSAlertExportRestrictionRESERVED: + return "export_restriction_RESERVED" + case TLSAlertProtocolVersion: + return "protocol_version" + case TLSAlertInsufficientSecurity: + return "insufficient_security" + case TLSAlertInternalError: + return "internal_error" + case TLSAlertUserCanceled: + return "user_canceled" + case TLSAlertNoRenegotiation: + return "no_renegotiation" + case TLSAlertUnsupportedExtension: + return "unsupported_extension" + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/tls_appdata.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/tls_appdata.go new file mode 100644 index 00000000..dedd1d58 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/tls_appdata.go @@ -0,0 +1,34 @@ +// Copyright 2018 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "errors" + + "github.com/google/gopacket" +) + +// TLSAppDataRecord contains all the information that each AppData Record types should have +type TLSAppDataRecord struct { + TLSRecordHeader + Payload []byte +} + +// DecodeFromBytes decodes the slice into the TLS struct. +func (t *TLSAppDataRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error { + // TLS Record Header + t.ContentType = h.ContentType + t.Version = h.Version + t.Length = h.Length + + if len(data) != int(t.Length) { + return errors.New("TLS Application Data length mismatch") + } + + t.Payload = data + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/tls_cipherspec.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/tls_cipherspec.go new file mode 100644 index 00000000..8f3dc62b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/tls_cipherspec.go @@ -0,0 +1,64 @@ +// Copyright 2018 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "errors" + + "github.com/google/gopacket" +) + +// TLSchangeCipherSpec defines the message value inside ChangeCipherSpec Record +type TLSchangeCipherSpec uint8 + +const ( + TLSChangecipherspecMessage TLSchangeCipherSpec = 1 + TLSChangecipherspecUnknown TLSchangeCipherSpec = 255 +) + +// TLS Change Cipher Spec +// 0 1 2 3 4 5 6 7 8 +// +--+--+--+--+--+--+--+--+ +// | Message | +// +--+--+--+--+--+--+--+--+ + +// TLSChangeCipherSpecRecord defines the type of data inside ChangeCipherSpec Record +type TLSChangeCipherSpecRecord struct { + TLSRecordHeader + + Message TLSchangeCipherSpec +} + +// DecodeFromBytes decodes the slice into the TLS struct. +func (t *TLSChangeCipherSpecRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error { + // TLS Record Header + t.ContentType = h.ContentType + t.Version = h.Version + t.Length = h.Length + + if len(data) != 1 { + df.SetTruncated() + return errors.New("TLS Change Cipher Spec record incorrect length") + } + + t.Message = TLSchangeCipherSpec(data[0]) + if t.Message != TLSChangecipherspecMessage { + t.Message = TLSChangecipherspecUnknown + } + + return nil +} + +// String shows the message value nicely formatted +func (ccs TLSchangeCipherSpec) String() string { + switch ccs { + default: + return "Unknown" + case TLSChangecipherspecMessage: + return "Change Cipher Spec Message" + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/tls_handshake.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/tls_handshake.go new file mode 100644 index 00000000..e45e2c7c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/tls_handshake.go @@ -0,0 +1,28 @@ +// Copyright 2018 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "github.com/google/gopacket" +) + +// TLSHandshakeRecord defines the structure of a Handshare Record +type TLSHandshakeRecord struct { + TLSRecordHeader +} + +// DecodeFromBytes decodes the slice into the TLS struct. +func (t *TLSHandshakeRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error { + // TLS Record Header + t.ContentType = h.ContentType + t.Version = h.Version + t.Length = h.Length + + // TODO + + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/udp.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/udp.go new file mode 100644 index 00000000..97e81c69 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/udp.go @@ -0,0 +1,133 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "fmt" + + "github.com/google/gopacket" +) + +// UDP is the layer for UDP headers. +type UDP struct { + BaseLayer + SrcPort, DstPort UDPPort + Length uint16 + Checksum uint16 + sPort, dPort []byte + tcpipchecksum +} + +// LayerType returns gopacket.LayerTypeUDP +func (u *UDP) LayerType() gopacket.LayerType { return LayerTypeUDP } + +func (udp *UDP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + if len(data) < 8 { + df.SetTruncated() + return fmt.Errorf("Invalid UDP header. Length %d less than 8", len(data)) + } + udp.SrcPort = UDPPort(binary.BigEndian.Uint16(data[0:2])) + udp.sPort = data[0:2] + udp.DstPort = UDPPort(binary.BigEndian.Uint16(data[2:4])) + udp.dPort = data[2:4] + udp.Length = binary.BigEndian.Uint16(data[4:6]) + udp.Checksum = binary.BigEndian.Uint16(data[6:8]) + udp.BaseLayer = BaseLayer{Contents: data[:8]} + switch { + case udp.Length >= 8: + hlen := int(udp.Length) + if hlen > len(data) { + df.SetTruncated() + hlen = len(data) + } + udp.Payload = data[8:hlen] + case udp.Length == 0: // Jumbogram, use entire rest of data + udp.Payload = data[8:] + default: + return fmt.Errorf("UDP packet too small: %d bytes", udp.Length) + } + return nil +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (u *UDP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + var jumbo bool + + payload := b.Bytes() + if _, ok := u.pseudoheader.(*IPv6); ok { + if len(payload)+8 > 65535 { + jumbo = true + } + } + bytes, err := b.PrependBytes(8) + if err != nil { + return err + } + binary.BigEndian.PutUint16(bytes, uint16(u.SrcPort)) + binary.BigEndian.PutUint16(bytes[2:], uint16(u.DstPort)) + if opts.FixLengths { + if jumbo { + u.Length = 0 + } else { + u.Length = uint16(len(payload)) + 8 + } + } + binary.BigEndian.PutUint16(bytes[4:], u.Length) + if opts.ComputeChecksums { + // zero out checksum bytes + bytes[6] = 0 + bytes[7] = 0 + csum, err := u.computeChecksum(b.Bytes(), IPProtocolUDP) + if err != nil { + return err + } + u.Checksum = csum + } + binary.BigEndian.PutUint16(bytes[6:], u.Checksum) + return nil +} + +func (u *UDP) CanDecode() gopacket.LayerClass { + return LayerTypeUDP +} + +// NextLayerType use the destination port to select the +// right next decoder. It tries first to decode via the +// destination port, then the source port. +func (u *UDP) NextLayerType() gopacket.LayerType { + if lt := u.DstPort.LayerType(); lt != gopacket.LayerTypePayload { + return lt + } + return u.SrcPort.LayerType() +} + +func decodeUDP(data []byte, p gopacket.PacketBuilder) error { + udp := &UDP{} + err := udp.DecodeFromBytes(data, p) + p.AddLayer(udp) + p.SetTransportLayer(udp) + if err != nil { + return err + } + return p.NextDecoder(udp.NextLayerType()) +} + +func (u *UDP) TransportFlow() gopacket.Flow { + return gopacket.NewFlow(EndpointUDPPort, u.sPort, u.dPort) +} + +// For testing only +func (u *UDP) SetInternalPortsForTesting() { + u.sPort = make([]byte, 2) + u.dPort = make([]byte, 2) + binary.BigEndian.PutUint16(u.sPort, uint16(u.SrcPort)) + binary.BigEndian.PutUint16(u.dPort, uint16(u.DstPort)) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/udplite.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/udplite.go new file mode 100644 index 00000000..7d84c514 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/udplite.go @@ -0,0 +1,44 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// Copyright 2009-2011 Andreas Krennmair. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "github.com/google/gopacket" +) + +// UDPLite is the layer for UDP-Lite headers (rfc 3828). +type UDPLite struct { + BaseLayer + SrcPort, DstPort UDPLitePort + ChecksumCoverage uint16 + Checksum uint16 + sPort, dPort []byte +} + +// LayerType returns gopacket.LayerTypeUDPLite +func (u *UDPLite) LayerType() gopacket.LayerType { return LayerTypeUDPLite } + +func decodeUDPLite(data []byte, p gopacket.PacketBuilder) error { + udp := &UDPLite{ + SrcPort: UDPLitePort(binary.BigEndian.Uint16(data[0:2])), + sPort: data[0:2], + DstPort: UDPLitePort(binary.BigEndian.Uint16(data[2:4])), + dPort: data[2:4], + ChecksumCoverage: binary.BigEndian.Uint16(data[4:6]), + Checksum: binary.BigEndian.Uint16(data[6:8]), + BaseLayer: BaseLayer{data[:8], data[8:]}, + } + p.AddLayer(udp) + p.SetTransportLayer(udp) + return p.NextDecoder(gopacket.LayerTypePayload) +} + +func (u *UDPLite) TransportFlow() gopacket.Flow { + return gopacket.NewFlow(EndpointUDPLitePort, u.sPort, u.dPort) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/usb.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/usb.go new file mode 100644 index 00000000..0b4d4af0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/usb.go @@ -0,0 +1,287 @@ +// Copyright 2014 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "github.com/google/gopacket" +) + +type USBEventType uint8 + +const ( + USBEventTypeSubmit USBEventType = 'S' + USBEventTypeComplete USBEventType = 'C' + USBEventTypeError USBEventType = 'E' +) + +func (a USBEventType) String() string { + switch a { + case USBEventTypeSubmit: + return "SUBMIT" + case USBEventTypeComplete: + return "COMPLETE" + case USBEventTypeError: + return "ERROR" + default: + return "Unknown event type" + } +} + +type USBRequestBlockSetupRequest uint8 + +const ( + USBRequestBlockSetupRequestGetStatus USBRequestBlockSetupRequest = 0x00 + USBRequestBlockSetupRequestClearFeature USBRequestBlockSetupRequest = 0x01 + USBRequestBlockSetupRequestSetFeature USBRequestBlockSetupRequest = 0x03 + USBRequestBlockSetupRequestSetAddress USBRequestBlockSetupRequest = 0x05 + USBRequestBlockSetupRequestGetDescriptor USBRequestBlockSetupRequest = 0x06 + USBRequestBlockSetupRequestSetDescriptor USBRequestBlockSetupRequest = 0x07 + USBRequestBlockSetupRequestGetConfiguration USBRequestBlockSetupRequest = 0x08 + USBRequestBlockSetupRequestSetConfiguration USBRequestBlockSetupRequest = 0x09 + USBRequestBlockSetupRequestSetIdle USBRequestBlockSetupRequest = 0x0a +) + +func (a USBRequestBlockSetupRequest) String() string { + switch a { + case USBRequestBlockSetupRequestGetStatus: + return "GET_STATUS" + case USBRequestBlockSetupRequestClearFeature: + return "CLEAR_FEATURE" + case USBRequestBlockSetupRequestSetFeature: + return "SET_FEATURE" + case USBRequestBlockSetupRequestSetAddress: + return "SET_ADDRESS" + case USBRequestBlockSetupRequestGetDescriptor: + return "GET_DESCRIPTOR" + case USBRequestBlockSetupRequestSetDescriptor: + return "SET_DESCRIPTOR" + case USBRequestBlockSetupRequestGetConfiguration: + return "GET_CONFIGURATION" + case USBRequestBlockSetupRequestSetConfiguration: + return "SET_CONFIGURATION" + case USBRequestBlockSetupRequestSetIdle: + return "SET_IDLE" + default: + return "UNKNOWN" + } +} + +type USBTransportType uint8 + +const ( + USBTransportTypeTransferIn USBTransportType = 0x80 // Indicates send or receive + USBTransportTypeIsochronous USBTransportType = 0x00 // Isochronous transfers occur continuously and periodically. They typically contain time sensitive information, such as an audio or video stream. + USBTransportTypeInterrupt USBTransportType = 0x01 // Interrupt transfers are typically non-periodic, small device "initiated" communication requiring bounded latency, such as pointing devices or keyboards. + USBTransportTypeControl USBTransportType = 0x02 // Control transfers are typically used for command and status operations. + USBTransportTypeBulk USBTransportType = 0x03 // Bulk transfers can be used for large bursty data, using all remaining available bandwidth, no guarantees on bandwidth or latency, such as file transfers. +) + +type USBDirectionType uint8 + +const ( + USBDirectionTypeUnknown USBDirectionType = iota + USBDirectionTypeIn + USBDirectionTypeOut +) + +func (a USBDirectionType) String() string { + switch a { + case USBDirectionTypeIn: + return "In" + case USBDirectionTypeOut: + return "Out" + default: + return "Unknown direction type" + } +} + +// The reference at http://www.beyondlogic.org/usbnutshell/usb1.shtml contains more information about the protocol. +type USB struct { + BaseLayer + ID uint64 + EventType USBEventType + TransferType USBTransportType + Direction USBDirectionType + EndpointNumber uint8 + DeviceAddress uint8 + BusID uint16 + TimestampSec int64 + TimestampUsec int32 + Setup bool + Data bool + Status int32 + UrbLength uint32 + UrbDataLength uint32 + + UrbInterval uint32 + UrbStartFrame uint32 + UrbCopyOfTransferFlags uint32 + IsoNumDesc uint32 +} + +func (u *USB) LayerType() gopacket.LayerType { return LayerTypeUSB } + +func (m *USB) NextLayerType() gopacket.LayerType { + if m.Setup { + return LayerTypeUSBRequestBlockSetup + } else if m.Data { + } + + return m.TransferType.LayerType() +} + +func decodeUSB(data []byte, p gopacket.PacketBuilder) error { + d := &USB{} + + return decodingLayerDecoder(d, data, p) +} + +func (m *USB) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.ID = binary.LittleEndian.Uint64(data[0:8]) + m.EventType = USBEventType(data[8]) + m.TransferType = USBTransportType(data[9]) + + m.EndpointNumber = data[10] & 0x7f + if data[10]&uint8(USBTransportTypeTransferIn) > 0 { + m.Direction = USBDirectionTypeIn + } else { + m.Direction = USBDirectionTypeOut + } + + m.DeviceAddress = data[11] + m.BusID = binary.LittleEndian.Uint16(data[12:14]) + + if uint(data[14]) == 0 { + m.Setup = true + } + + if uint(data[15]) == 0 { + m.Data = true + } + + m.TimestampSec = int64(binary.LittleEndian.Uint64(data[16:24])) + m.TimestampUsec = int32(binary.LittleEndian.Uint32(data[24:28])) + m.Status = int32(binary.LittleEndian.Uint32(data[28:32])) + m.UrbLength = binary.LittleEndian.Uint32(data[32:36]) + m.UrbDataLength = binary.LittleEndian.Uint32(data[36:40]) + + m.Contents = data[:40] + m.Payload = data[40:] + + if m.Setup { + m.Payload = data[40:] + } else if m.Data { + m.Payload = data[uint32(len(data))-m.UrbDataLength:] + } + + // if 64 bit, dissect_linux_usb_pseudo_header_ext + if false { + m.UrbInterval = binary.LittleEndian.Uint32(data[40:44]) + m.UrbStartFrame = binary.LittleEndian.Uint32(data[44:48]) + m.UrbDataLength = binary.LittleEndian.Uint32(data[48:52]) + m.IsoNumDesc = binary.LittleEndian.Uint32(data[52:56]) + m.Contents = data[:56] + m.Payload = data[56:] + } + + // crc5 or crc16 + // eop (end of packet) + + return nil +} + +type USBRequestBlockSetup struct { + BaseLayer + RequestType uint8 + Request USBRequestBlockSetupRequest + Value uint16 + Index uint16 + Length uint16 +} + +func (u *USBRequestBlockSetup) LayerType() gopacket.LayerType { return LayerTypeUSBRequestBlockSetup } + +func (m *USBRequestBlockSetup) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +func (m *USBRequestBlockSetup) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.RequestType = data[0] + m.Request = USBRequestBlockSetupRequest(data[1]) + m.Value = binary.LittleEndian.Uint16(data[2:4]) + m.Index = binary.LittleEndian.Uint16(data[4:6]) + m.Length = binary.LittleEndian.Uint16(data[6:8]) + m.Contents = data[:8] + m.Payload = data[8:] + return nil +} + +func decodeUSBRequestBlockSetup(data []byte, p gopacket.PacketBuilder) error { + d := &USBRequestBlockSetup{} + return decodingLayerDecoder(d, data, p) +} + +type USBControl struct { + BaseLayer +} + +func (u *USBControl) LayerType() gopacket.LayerType { return LayerTypeUSBControl } + +func (m *USBControl) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +func (m *USBControl) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.Contents = data + return nil +} + +func decodeUSBControl(data []byte, p gopacket.PacketBuilder) error { + d := &USBControl{} + return decodingLayerDecoder(d, data, p) +} + +type USBInterrupt struct { + BaseLayer +} + +func (u *USBInterrupt) LayerType() gopacket.LayerType { return LayerTypeUSBInterrupt } + +func (m *USBInterrupt) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +func (m *USBInterrupt) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.Contents = data + return nil +} + +func decodeUSBInterrupt(data []byte, p gopacket.PacketBuilder) error { + d := &USBInterrupt{} + return decodingLayerDecoder(d, data, p) +} + +type USBBulk struct { + BaseLayer +} + +func (u *USBBulk) LayerType() gopacket.LayerType { return LayerTypeUSBBulk } + +func (m *USBBulk) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypePayload +} + +func (m *USBBulk) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + m.Contents = data + return nil +} + +func decodeUSBBulk(data []byte, p gopacket.PacketBuilder) error { + d := &USBBulk{} + return decodingLayerDecoder(d, data, p) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/vrrp.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/vrrp.go new file mode 100644 index 00000000..ffaafe6a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/vrrp.go @@ -0,0 +1,156 @@ +// Copyright 2016 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "errors" + "net" + + "github.com/google/gopacket" +) + +/* + This layer provides decoding for Virtual Router Redundancy Protocol (VRRP) v2. + https://tools.ietf.org/html/rfc3768#section-5 + 0 1 2 3 + 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + |Version| Type | Virtual Rtr ID| Priority | Count IP Addrs| + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Auth Type | Adver Int | Checksum | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | IP Address (1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | . | + | . | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | IP Address (n) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Authentication Data (1) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + | Authentication Data (2) | + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +*/ + +type VRRPv2Type uint8 +type VRRPv2AuthType uint8 + +const ( + VRRPv2Advertisement VRRPv2Type = 0x01 // router advertisement +) + +// String conversions for VRRP message types +func (v VRRPv2Type) String() string { + switch v { + case VRRPv2Advertisement: + return "VRRPv2 Advertisement" + default: + return "" + } +} + +const ( + VRRPv2AuthNoAuth VRRPv2AuthType = 0x00 // No Authentication + VRRPv2AuthReserved1 VRRPv2AuthType = 0x01 // Reserved field 1 + VRRPv2AuthReserved2 VRRPv2AuthType = 0x02 // Reserved field 2 +) + +func (v VRRPv2AuthType) String() string { + switch v { + case VRRPv2AuthNoAuth: + return "No Authentication" + case VRRPv2AuthReserved1: + return "Reserved" + case VRRPv2AuthReserved2: + return "Reserved" + default: + return "" + } +} + +// VRRPv2 represents an VRRP v2 message. +type VRRPv2 struct { + BaseLayer + Version uint8 // The version field specifies the VRRP protocol version of this packet (v2) + Type VRRPv2Type // The type field specifies the type of this VRRP packet. The only type defined in v2 is ADVERTISEMENT + VirtualRtrID uint8 // identifies the virtual router this packet is reporting status for + Priority uint8 // specifies the sending VRRP router's priority for the virtual router (100 = default) + CountIPAddr uint8 // The number of IP addresses contained in this VRRP advertisement. + AuthType VRRPv2AuthType // identifies the authentication method being utilized + AdverInt uint8 // The Advertisement interval indicates the time interval (in seconds) between ADVERTISEMENTS. The default is 1 second + Checksum uint16 // used to detect data corruption in the VRRP message. + IPAddress []net.IP // one or more IP addresses associated with the virtual router. Specified in the CountIPAddr field. +} + +// LayerType returns LayerTypeVRRP for VRRP v2 message. +func (v *VRRPv2) LayerType() gopacket.LayerType { return LayerTypeVRRP } + +func (v *VRRPv2) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error { + + v.BaseLayer = BaseLayer{Contents: data[:len(data)]} + v.Version = data[0] >> 4 // high nibble == VRRP version. We're expecting v2 + + v.Type = VRRPv2Type(data[0] & 0x0F) // low nibble == VRRP type. Expecting 1 (advertisement) + if v.Type != 1 { + // rfc3768: A packet with unknown type MUST be discarded. + return errors.New("Unrecognized VRRPv2 type field.") + } + + v.VirtualRtrID = data[1] + v.Priority = data[2] + + v.CountIPAddr = data[3] + if v.CountIPAddr < 1 { + return errors.New("VRRPv2 number of IP addresses is not valid.") + } + + v.AuthType = VRRPv2AuthType(data[4]) + v.AdverInt = uint8(data[5]) + v.Checksum = binary.BigEndian.Uint16(data[6:8]) + + // populate the IPAddress field. The number of addresses is specified in the v.CountIPAddr field + // offset references the starting byte containing the list of ip addresses + offset := 8 + for i := uint8(0); i < v.CountIPAddr; i++ { + v.IPAddress = append(v.IPAddress, data[offset:offset+4]) + offset += 4 + } + + // any trailing packets here may be authentication data and *should* be ignored in v2 as per RFC + // + // 5.3.10. Authentication Data + // + // The authentication string is currently only used to maintain + // backwards compatibility with RFC 2338. It SHOULD be set to zero on + // transmission and ignored on reception. + return nil +} + +// CanDecode specifies the layer type in which we are attempting to unwrap. +func (v *VRRPv2) CanDecode() gopacket.LayerClass { + return LayerTypeVRRP +} + +// NextLayerType specifies the next layer that should be decoded. VRRP does not contain any further payload, so we set to 0 +func (v *VRRPv2) NextLayerType() gopacket.LayerType { + return gopacket.LayerTypeZero +} + +// The VRRP packet does not include payload data. Setting byte slice to nil +func (v *VRRPv2) Payload() []byte { + return nil +} + +// decodeVRRP will parse VRRP v2 +func decodeVRRP(data []byte, p gopacket.PacketBuilder) error { + if len(data) < 8 { + return errors.New("Not a valid VRRP packet. Packet length is too small.") + } + v := &VRRPv2{} + return decodingLayerDecoder(v, data, p) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/vxlan.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/vxlan.go new file mode 100644 index 00000000..4f79ea4e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers/vxlan.go @@ -0,0 +1,98 @@ +// Copyright 2016 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package layers + +import ( + "encoding/binary" + "fmt" + "github.com/google/gopacket" +) + +// VXLAN is specifed in RFC 7348 https://tools.ietf.org/html/rfc7348 +// G, D, A, Group Policy ID from https://tools.ietf.org/html/draft-smith-vxlan-group-policy-00 +// 0 1 2 3 +// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +// 0 8 16 24 32 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// |G|R|R|R|I|R|R|R|R|D|R|R|A|R|R|R| Group Policy ID | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// | 24 bit VXLAN Network Identifier | Reserved | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +// VXLAN is a VXLAN packet header +type VXLAN struct { + BaseLayer + ValidIDFlag bool // 'I' bit per RFC 7348 + VNI uint32 // 'VXLAN Network Identifier' 24 bits per RFC 7348 + GBPExtension bool // 'G' bit per Group Policy https://tools.ietf.org/html/draft-smith-vxlan-group-policy-00 + GBPDontLearn bool // 'D' bit per Group Policy + GBPApplied bool // 'A' bit per Group Policy + GBPGroupPolicyID uint16 // 'Group Policy ID' 16 bits per Group Policy +} + +// LayerType returns LayerTypeVXLAN +func (vx *VXLAN) LayerType() gopacket.LayerType { return LayerTypeVXLAN } + +func decodeVXLAN(data []byte, p gopacket.PacketBuilder) error { + vx := &VXLAN{} + + // VNI is a 24bit number, Uint32 requires 32 bits + var buf [4]byte + copy(buf[1:], data[4:7]) + + // RFC 7348 https://tools.ietf.org/html/rfc7348 + vx.ValidIDFlag = data[0]&0x08 > 0 // 'I' bit per RFC7348 + vx.VNI = binary.BigEndian.Uint32(buf[:]) // VXLAN Network Identifier per RFC7348 + + // Group Based Policy https://tools.ietf.org/html/draft-smith-vxlan-group-policy-00 + vx.GBPExtension = data[0]&0x80 > 0 // 'G' bit per the group policy draft + vx.GBPDontLearn = data[1]&0x40 > 0 // 'D' bit - the egress VTEP MUST NOT learn the source address of the encapsulated frame. + vx.GBPApplied = data[1]&0x80 > 0 // 'A' bit - indicates that the group policy has already been applied to this packet. + vx.GBPGroupPolicyID = binary.BigEndian.Uint16(data[2:4]) // Policy ID as per the group policy draft + + // Layer information + const vxlanLength = 8 + vx.Contents = data[:vxlanLength] + vx.Payload = data[vxlanLength:] + + p.AddLayer(vx) + return p.NextDecoder(LinkTypeEthernet) +} + +// SerializeTo writes the serialized form of this layer into the +// SerializationBuffer, implementing gopacket.SerializableLayer. +// See the docs for gopacket.SerializableLayer for more info. +func (vx *VXLAN) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error { + bytes, err := b.PrependBytes(8) + if err != nil { + return err + } + + // PrependBytes does not guarantee that bytes are zeroed. Setting flags via OR requires that they start off at zero + bytes[0] = 0 + bytes[1] = 0 + + if vx.ValidIDFlag { + bytes[0] |= 0x08 + } + if vx.GBPExtension { + bytes[0] |= 0x80 + } + if vx.GBPDontLearn { + bytes[1] |= 0x40 + } + if vx.GBPApplied { + bytes[1] |= 0x80 + } + + binary.BigEndian.PutUint16(bytes[2:4], vx.GBPGroupPolicyID) + if vx.VNI >= 1<<24 { + return fmt.Errorf("Virtual Network Identifier = %x exceeds max for 24-bit uint", vx.VNI) + } + binary.BigEndian.PutUint32(bytes[4:8], vx.VNI<<8) + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers_decoder.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers_decoder.go new file mode 100644 index 00000000..8c1f108c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layers_decoder.go @@ -0,0 +1,101 @@ +// Copyright 2019 The GoPacket Authors. All rights reserved. + +package gopacket + +// Created by gen.go, don't edit manually +// Generated at 2019-06-18 11:37:31.308731293 +0600 +06 m=+0.000842599 + +// LayersDecoder returns DecodingLayerFunc for specified +// DecodingLayerContainer, LayerType value to start decoding with and +// some DecodeFeedback. +func LayersDecoder(dl DecodingLayerContainer, first LayerType, df DecodeFeedback) DecodingLayerFunc { + firstDec, ok := dl.Decoder(first) + if !ok { + return func([]byte, *[]LayerType) (LayerType, error) { + return first, nil + } + } + if dlc, ok := dl.(DecodingLayerSparse); ok { + return func(data []byte, decoded *[]LayerType) (LayerType, error) { + *decoded = (*decoded)[:0] // Truncated decoded layers. + typ := first + decoder := firstDec + for { + if err := decoder.DecodeFromBytes(data, df); err != nil { + return LayerTypeZero, err + } + *decoded = append(*decoded, typ) + typ = decoder.NextLayerType() + if data = decoder.LayerPayload(); len(data) == 0 { + break + } + if decoder, ok = dlc.Decoder(typ); !ok { + return typ, nil + } + } + return LayerTypeZero, nil + } + } + if dlc, ok := dl.(DecodingLayerArray); ok { + return func(data []byte, decoded *[]LayerType) (LayerType, error) { + *decoded = (*decoded)[:0] // Truncated decoded layers. + typ := first + decoder := firstDec + for { + if err := decoder.DecodeFromBytes(data, df); err != nil { + return LayerTypeZero, err + } + *decoded = append(*decoded, typ) + typ = decoder.NextLayerType() + if data = decoder.LayerPayload(); len(data) == 0 { + break + } + if decoder, ok = dlc.Decoder(typ); !ok { + return typ, nil + } + } + return LayerTypeZero, nil + } + } + if dlc, ok := dl.(DecodingLayerMap); ok { + return func(data []byte, decoded *[]LayerType) (LayerType, error) { + *decoded = (*decoded)[:0] // Truncated decoded layers. + typ := first + decoder := firstDec + for { + if err := decoder.DecodeFromBytes(data, df); err != nil { + return LayerTypeZero, err + } + *decoded = append(*decoded, typ) + typ = decoder.NextLayerType() + if data = decoder.LayerPayload(); len(data) == 0 { + break + } + if decoder, ok = dlc.Decoder(typ); !ok { + return typ, nil + } + } + return LayerTypeZero, nil + } + } + dlc := dl + return func(data []byte, decoded *[]LayerType) (LayerType, error) { + *decoded = (*decoded)[:0] // Truncated decoded layers. + typ := first + decoder := firstDec + for { + if err := decoder.DecodeFromBytes(data, df); err != nil { + return LayerTypeZero, err + } + *decoded = append(*decoded, typ) + typ = decoder.NextLayerType() + if data = decoder.LayerPayload(); len(data) == 0 { + break + } + if decoder, ok = dlc.Decoder(typ); !ok { + return typ, nil + } + } + return LayerTypeZero, nil + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layertype.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layertype.go new file mode 100644 index 00000000..3abfee1e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/layertype.go @@ -0,0 +1,111 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package gopacket + +import ( + "fmt" + "strconv" +) + +// LayerType is a unique identifier for each type of layer. This enumeration +// does not match with any externally available numbering scheme... it's solely +// usable/useful within this library as a means for requesting layer types +// (see Packet.Layer) and determining which types of layers have been decoded. +// +// New LayerTypes may be created by calling gopacket.RegisterLayerType. +type LayerType int64 + +// LayerTypeMetadata contains metadata associated with each LayerType. +type LayerTypeMetadata struct { + // Name is the string returned by each layer type's String method. + Name string + // Decoder is the decoder to use when the layer type is passed in as a + // Decoder. + Decoder Decoder +} + +type layerTypeMetadata struct { + inUse bool + LayerTypeMetadata +} + +// DecodersByLayerName maps layer names to decoders for those layers. +// This allows users to specify decoders by name to a program and have that +// program pick the correct decoder accordingly. +var DecodersByLayerName = map[string]Decoder{} + +const maxLayerType = 2000 + +var ltMeta [maxLayerType]layerTypeMetadata +var ltMetaMap = map[LayerType]layerTypeMetadata{} + +// RegisterLayerType creates a new layer type and registers it globally. +// The number passed in must be unique, or a runtime panic will occur. Numbers +// 0-999 are reserved for the gopacket library. Numbers 1000-1999 should be +// used for common application-specific types, and are very fast. Any other +// number (negative or >= 2000) may be used for uncommon application-specific +// types, and are somewhat slower (they require a map lookup over an array +// index). +func RegisterLayerType(num int, meta LayerTypeMetadata) LayerType { + if 0 <= num && num < maxLayerType { + if ltMeta[num].inUse { + panic("Layer type already exists") + } + } else { + if ltMetaMap[LayerType(num)].inUse { + panic("Layer type already exists") + } + } + return OverrideLayerType(num, meta) +} + +// OverrideLayerType acts like RegisterLayerType, except that if the layer type +// has already been registered, it overrides the metadata with the passed-in +// metadata intead of panicing. +func OverrideLayerType(num int, meta LayerTypeMetadata) LayerType { + if 0 <= num && num < maxLayerType { + ltMeta[num] = layerTypeMetadata{ + inUse: true, + LayerTypeMetadata: meta, + } + } else { + ltMetaMap[LayerType(num)] = layerTypeMetadata{ + inUse: true, + LayerTypeMetadata: meta, + } + } + DecodersByLayerName[meta.Name] = meta.Decoder + return LayerType(num) +} + +// Decode decodes the given data using the decoder registered with the layer +// type. +func (t LayerType) Decode(data []byte, c PacketBuilder) error { + var d Decoder + if 0 <= int(t) && int(t) < maxLayerType { + d = ltMeta[int(t)].Decoder + } else { + d = ltMetaMap[t].Decoder + } + if d != nil { + return d.Decode(data, c) + } + return fmt.Errorf("Layer type %v has no associated decoder", t) +} + +// String returns the string associated with this layer type. +func (t LayerType) String() (s string) { + if 0 <= int(t) && int(t) < maxLayerType { + s = ltMeta[int(t)].Name + } else { + s = ltMetaMap[t].Name + } + if s == "" { + s = strconv.Itoa(int(t)) + } + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/packet.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/packet.go new file mode 100644 index 00000000..3a7c4b3d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/packet.go @@ -0,0 +1,864 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package gopacket + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + "io" + "net" + "os" + "reflect" + "runtime/debug" + "strings" + "syscall" + "time" +) + +// CaptureInfo provides standardized information about a packet captured off +// the wire or read from a file. +type CaptureInfo struct { + // Timestamp is the time the packet was captured, if that is known. + Timestamp time.Time + // CaptureLength is the total number of bytes read off of the wire. + CaptureLength int + // Length is the size of the original packet. Should always be >= + // CaptureLength. + Length int + // InterfaceIndex + InterfaceIndex int + // The packet source can place ancillary data of various types here. + // For example, the afpacket source can report the VLAN of captured + // packets this way. + AncillaryData []interface{} +} + +// PacketMetadata contains metadata for a packet. +type PacketMetadata struct { + CaptureInfo + // Truncated is true if packet decoding logic detects that there are fewer + // bytes in the packet than are detailed in various headers (for example, if + // the number of bytes in the IPv4 contents/payload is less than IPv4.Length). + // This is also set automatically for packets captured off the wire if + // CaptureInfo.CaptureLength < CaptureInfo.Length. + Truncated bool +} + +// Packet is the primary object used by gopacket. Packets are created by a +// Decoder's Decode call. A packet is made up of a set of Data, which +// is broken into a number of Layers as it is decoded. +type Packet interface { + //// Functions for outputting the packet as a human-readable string: + //// ------------------------------------------------------------------ + // String returns a human-readable string representation of the packet. + // It uses LayerString on each layer to output the layer. + String() string + // Dump returns a verbose human-readable string representation of the packet, + // including a hex dump of all layers. It uses LayerDump on each layer to + // output the layer. + Dump() string + + //// Functions for accessing arbitrary packet layers: + //// ------------------------------------------------------------------ + // Layers returns all layers in this packet, computing them as necessary + Layers() []Layer + // Layer returns the first layer in this packet of the given type, or nil + Layer(LayerType) Layer + // LayerClass returns the first layer in this packet of the given class, + // or nil. + LayerClass(LayerClass) Layer + + //// Functions for accessing specific types of packet layers. These functions + //// return the first layer of each type found within the packet. + //// ------------------------------------------------------------------ + // LinkLayer returns the first link layer in the packet + LinkLayer() LinkLayer + // NetworkLayer returns the first network layer in the packet + NetworkLayer() NetworkLayer + // TransportLayer returns the first transport layer in the packet + TransportLayer() TransportLayer + // ApplicationLayer returns the first application layer in the packet + ApplicationLayer() ApplicationLayer + // ErrorLayer is particularly useful, since it returns nil if the packet + // was fully decoded successfully, and non-nil if an error was encountered + // in decoding and the packet was only partially decoded. Thus, its output + // can be used to determine if the entire packet was able to be decoded. + ErrorLayer() ErrorLayer + + //// Functions for accessing data specific to the packet: + //// ------------------------------------------------------------------ + // Data returns the set of bytes that make up this entire packet. + Data() []byte + // Metadata returns packet metadata associated with this packet. + Metadata() *PacketMetadata +} + +// packet contains all the information we need to fulfill the Packet interface, +// and its two "subclasses" (yes, no such thing in Go, bear with me), +// eagerPacket and lazyPacket, provide eager and lazy decoding logic around the +// various functions needed to access this information. +type packet struct { + // data contains the entire packet data for a packet + data []byte + // initialLayers is space for an initial set of layers already created inside + // the packet. + initialLayers [6]Layer + // layers contains each layer we've already decoded + layers []Layer + // last is the last layer added to the packet + last Layer + // metadata is the PacketMetadata for this packet + metadata PacketMetadata + + decodeOptions DecodeOptions + + // Pointers to the various important layers + link LinkLayer + network NetworkLayer + transport TransportLayer + application ApplicationLayer + failure ErrorLayer +} + +func (p *packet) SetTruncated() { + p.metadata.Truncated = true +} + +func (p *packet) SetLinkLayer(l LinkLayer) { + if p.link == nil { + p.link = l + } +} + +func (p *packet) SetNetworkLayer(l NetworkLayer) { + if p.network == nil { + p.network = l + } +} + +func (p *packet) SetTransportLayer(l TransportLayer) { + if p.transport == nil { + p.transport = l + } +} + +func (p *packet) SetApplicationLayer(l ApplicationLayer) { + if p.application == nil { + p.application = l + } +} + +func (p *packet) SetErrorLayer(l ErrorLayer) { + if p.failure == nil { + p.failure = l + } +} + +func (p *packet) AddLayer(l Layer) { + p.layers = append(p.layers, l) + p.last = l +} + +func (p *packet) DumpPacketData() { + fmt.Fprint(os.Stderr, p.packetDump()) + os.Stderr.Sync() +} + +func (p *packet) Metadata() *PacketMetadata { + return &p.metadata +} + +func (p *packet) Data() []byte { + return p.data +} + +func (p *packet) DecodeOptions() *DecodeOptions { + return &p.decodeOptions +} + +func (p *packet) addFinalDecodeError(err error, stack []byte) { + fail := &DecodeFailure{err: err, stack: stack} + if p.last == nil { + fail.data = p.data + } else { + fail.data = p.last.LayerPayload() + } + p.AddLayer(fail) + p.SetErrorLayer(fail) +} + +func (p *packet) recoverDecodeError() { + if !p.decodeOptions.SkipDecodeRecovery { + if r := recover(); r != nil { + p.addFinalDecodeError(fmt.Errorf("%v", r), debug.Stack()) + } + } +} + +// LayerString outputs an individual layer as a string. The layer is output +// in a single line, with no trailing newline. This function is specifically +// designed to do the right thing for most layers... it follows the following +// rules: +// * If the Layer has a String function, just output that. +// * Otherwise, output all exported fields in the layer, recursing into +// exported slices and structs. +// NOTE: This is NOT THE SAME AS fmt's "%#v". %#v will output both exported +// and unexported fields... many times packet layers contain unexported stuff +// that would just mess up the output of the layer, see for example the +// Payload layer and it's internal 'data' field, which contains a large byte +// array that would really mess up formatting. +func LayerString(l Layer) string { + return fmt.Sprintf("%v\t%s", l.LayerType(), layerString(reflect.ValueOf(l), false, false)) +} + +// Dumper dumps verbose information on a value. If a layer type implements +// Dumper, then its LayerDump() string will include the results in its output. +type Dumper interface { + Dump() string +} + +// LayerDump outputs a very verbose string representation of a layer. Its +// output is a concatenation of LayerString(l) and hex.Dump(l.LayerContents()). +// It contains newlines and ends with a newline. +func LayerDump(l Layer) string { + var b bytes.Buffer + b.WriteString(LayerString(l)) + b.WriteByte('\n') + if d, ok := l.(Dumper); ok { + dump := d.Dump() + if dump != "" { + b.WriteString(dump) + if dump[len(dump)-1] != '\n' { + b.WriteByte('\n') + } + } + } + b.WriteString(hex.Dump(l.LayerContents())) + return b.String() +} + +// layerString outputs, recursively, a layer in a "smart" way. See docs for +// LayerString for more details. +// +// Params: +// i - value to write out +// anonymous: if we're currently recursing an anonymous member of a struct +// writeSpace: if we've already written a value in a struct, and need to +// write a space before writing more. This happens when we write various +// anonymous values, and need to keep writing more. +func layerString(v reflect.Value, anonymous bool, writeSpace bool) string { + // Let String() functions take precedence. + if v.CanInterface() { + if s, ok := v.Interface().(fmt.Stringer); ok { + return s.String() + } + } + // Reflect, and spit out all the exported fields as key=value. + switch v.Type().Kind() { + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + return "nil" + } + r := v.Elem() + return layerString(r, anonymous, writeSpace) + case reflect.Struct: + var b bytes.Buffer + typ := v.Type() + if !anonymous { + b.WriteByte('{') + } + for i := 0; i < v.NumField(); i++ { + // Check if this is upper-case. + ftype := typ.Field(i) + f := v.Field(i) + if ftype.Anonymous { + anonStr := layerString(f, true, writeSpace) + writeSpace = writeSpace || anonStr != "" + b.WriteString(anonStr) + } else if ftype.PkgPath == "" { // exported + if writeSpace { + b.WriteByte(' ') + } + writeSpace = true + fmt.Fprintf(&b, "%s=%s", typ.Field(i).Name, layerString(f, false, writeSpace)) + } + } + if !anonymous { + b.WriteByte('}') + } + return b.String() + case reflect.Slice: + var b bytes.Buffer + b.WriteByte('[') + if v.Len() > 4 { + fmt.Fprintf(&b, "..%d..", v.Len()) + } else { + for j := 0; j < v.Len(); j++ { + if j != 0 { + b.WriteString(", ") + } + b.WriteString(layerString(v.Index(j), false, false)) + } + } + b.WriteByte(']') + return b.String() + } + return fmt.Sprintf("%v", v.Interface()) +} + +const ( + longBytesLength = 128 +) + +// LongBytesGoString returns a string representation of the byte slice shortened +// using the format '{ ... ( bytes)}' if it +// exceeds a predetermined length. Can be used to avoid filling the display with +// very long byte strings. +func LongBytesGoString(buf []byte) string { + if len(buf) < longBytesLength { + return fmt.Sprintf("%#v", buf) + } + s := fmt.Sprintf("%#v", buf[:longBytesLength-1]) + s = strings.TrimSuffix(s, "}") + return fmt.Sprintf("%s ... (%d bytes)}", s, len(buf)) +} + +func baseLayerString(value reflect.Value) string { + t := value.Type() + content := value.Field(0) + c := make([]byte, content.Len()) + for i := range c { + c[i] = byte(content.Index(i).Uint()) + } + payload := value.Field(1) + p := make([]byte, payload.Len()) + for i := range p { + p[i] = byte(payload.Index(i).Uint()) + } + return fmt.Sprintf("%s{Contents:%s, Payload:%s}", t.String(), + LongBytesGoString(c), + LongBytesGoString(p)) +} + +func layerGoString(i interface{}, b *bytes.Buffer) { + if s, ok := i.(fmt.GoStringer); ok { + b.WriteString(s.GoString()) + return + } + + var v reflect.Value + var ok bool + if v, ok = i.(reflect.Value); !ok { + v = reflect.ValueOf(i) + } + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + if v.Kind() == reflect.Ptr { + b.WriteByte('&') + } + layerGoString(v.Elem().Interface(), b) + case reflect.Struct: + t := v.Type() + b.WriteString(t.String()) + b.WriteByte('{') + for i := 0; i < v.NumField(); i++ { + if i > 0 { + b.WriteString(", ") + } + if t.Field(i).Name == "BaseLayer" { + fmt.Fprintf(b, "BaseLayer:%s", baseLayerString(v.Field(i))) + } else if v.Field(i).Kind() == reflect.Struct { + fmt.Fprintf(b, "%s:", t.Field(i).Name) + layerGoString(v.Field(i), b) + } else if v.Field(i).Kind() == reflect.Ptr { + b.WriteByte('&') + layerGoString(v.Field(i), b) + } else { + fmt.Fprintf(b, "%s:%#v", t.Field(i).Name, v.Field(i)) + } + } + b.WriteByte('}') + default: + fmt.Fprintf(b, "%#v", i) + } +} + +// LayerGoString returns a representation of the layer in Go syntax, +// taking care to shorten "very long" BaseLayer byte slices +func LayerGoString(l Layer) string { + b := new(bytes.Buffer) + layerGoString(l, b) + return b.String() +} + +func (p *packet) packetString() string { + var b bytes.Buffer + fmt.Fprintf(&b, "PACKET: %d bytes", len(p.Data())) + if p.metadata.Truncated { + b.WriteString(", truncated") + } + if p.metadata.Length > 0 { + fmt.Fprintf(&b, ", wire length %d cap length %d", p.metadata.Length, p.metadata.CaptureLength) + } + if !p.metadata.Timestamp.IsZero() { + fmt.Fprintf(&b, " @ %v", p.metadata.Timestamp) + } + b.WriteByte('\n') + for i, l := range p.layers { + fmt.Fprintf(&b, "- Layer %d (%02d bytes) = %s\n", i+1, len(l.LayerContents()), LayerString(l)) + } + return b.String() +} + +func (p *packet) packetDump() string { + var b bytes.Buffer + fmt.Fprintf(&b, "-- FULL PACKET DATA (%d bytes) ------------------------------------\n%s", len(p.data), hex.Dump(p.data)) + for i, l := range p.layers { + fmt.Fprintf(&b, "--- Layer %d ---\n%s", i+1, LayerDump(l)) + } + return b.String() +} + +// eagerPacket is a packet implementation that does eager decoding. Upon +// initial construction, it decodes all the layers it can from packet data. +// eagerPacket implements Packet and PacketBuilder. +type eagerPacket struct { + packet +} + +var errNilDecoder = errors.New("NextDecoder passed nil decoder, probably an unsupported decode type") + +func (p *eagerPacket) NextDecoder(next Decoder) error { + if next == nil { + return errNilDecoder + } + if p.last == nil { + return errors.New("NextDecoder called, but no layers added yet") + } + d := p.last.LayerPayload() + if len(d) == 0 { + return nil + } + // Since we're eager, immediately call the next decoder. + return next.Decode(d, p) +} +func (p *eagerPacket) initialDecode(dec Decoder) { + defer p.recoverDecodeError() + err := dec.Decode(p.data, p) + if err != nil { + p.addFinalDecodeError(err, nil) + } +} +func (p *eagerPacket) LinkLayer() LinkLayer { + return p.link +} +func (p *eagerPacket) NetworkLayer() NetworkLayer { + return p.network +} +func (p *eagerPacket) TransportLayer() TransportLayer { + return p.transport +} +func (p *eagerPacket) ApplicationLayer() ApplicationLayer { + return p.application +} +func (p *eagerPacket) ErrorLayer() ErrorLayer { + return p.failure +} +func (p *eagerPacket) Layers() []Layer { + return p.layers +} +func (p *eagerPacket) Layer(t LayerType) Layer { + for _, l := range p.layers { + if l.LayerType() == t { + return l + } + } + return nil +} +func (p *eagerPacket) LayerClass(lc LayerClass) Layer { + for _, l := range p.layers { + if lc.Contains(l.LayerType()) { + return l + } + } + return nil +} +func (p *eagerPacket) String() string { return p.packetString() } +func (p *eagerPacket) Dump() string { return p.packetDump() } + +// lazyPacket does lazy decoding on its packet data. On construction it does +// no initial decoding. For each function call, it decodes only as many layers +// as are necessary to compute the return value for that function. +// lazyPacket implements Packet and PacketBuilder. +type lazyPacket struct { + packet + next Decoder +} + +func (p *lazyPacket) NextDecoder(next Decoder) error { + if next == nil { + return errNilDecoder + } + p.next = next + return nil +} +func (p *lazyPacket) decodeNextLayer() { + if p.next == nil { + return + } + d := p.data + if p.last != nil { + d = p.last.LayerPayload() + } + next := p.next + p.next = nil + // We've just set p.next to nil, so if we see we have no data, this should be + // the final call we get to decodeNextLayer if we return here. + if len(d) == 0 { + return + } + defer p.recoverDecodeError() + err := next.Decode(d, p) + if err != nil { + p.addFinalDecodeError(err, nil) + } +} +func (p *lazyPacket) LinkLayer() LinkLayer { + for p.link == nil && p.next != nil { + p.decodeNextLayer() + } + return p.link +} +func (p *lazyPacket) NetworkLayer() NetworkLayer { + for p.network == nil && p.next != nil { + p.decodeNextLayer() + } + return p.network +} +func (p *lazyPacket) TransportLayer() TransportLayer { + for p.transport == nil && p.next != nil { + p.decodeNextLayer() + } + return p.transport +} +func (p *lazyPacket) ApplicationLayer() ApplicationLayer { + for p.application == nil && p.next != nil { + p.decodeNextLayer() + } + return p.application +} +func (p *lazyPacket) ErrorLayer() ErrorLayer { + for p.failure == nil && p.next != nil { + p.decodeNextLayer() + } + return p.failure +} +func (p *lazyPacket) Layers() []Layer { + for p.next != nil { + p.decodeNextLayer() + } + return p.layers +} +func (p *lazyPacket) Layer(t LayerType) Layer { + for _, l := range p.layers { + if l.LayerType() == t { + return l + } + } + numLayers := len(p.layers) + for p.next != nil { + p.decodeNextLayer() + for _, l := range p.layers[numLayers:] { + if l.LayerType() == t { + return l + } + } + numLayers = len(p.layers) + } + return nil +} +func (p *lazyPacket) LayerClass(lc LayerClass) Layer { + for _, l := range p.layers { + if lc.Contains(l.LayerType()) { + return l + } + } + numLayers := len(p.layers) + for p.next != nil { + p.decodeNextLayer() + for _, l := range p.layers[numLayers:] { + if lc.Contains(l.LayerType()) { + return l + } + } + numLayers = len(p.layers) + } + return nil +} +func (p *lazyPacket) String() string { p.Layers(); return p.packetString() } +func (p *lazyPacket) Dump() string { p.Layers(); return p.packetDump() } + +// DecodeOptions tells gopacket how to decode a packet. +type DecodeOptions struct { + // Lazy decoding decodes the minimum number of layers needed to return data + // for a packet at each function call. Be careful using this with concurrent + // packet processors, as each call to packet.* could mutate the packet, and + // two concurrent function calls could interact poorly. + Lazy bool + // NoCopy decoding doesn't copy its input buffer into storage that's owned by + // the packet. If you can guarantee that the bytes underlying the slice + // passed into NewPacket aren't going to be modified, this can be faster. If + // there's any chance that those bytes WILL be changed, this will invalidate + // your packets. + NoCopy bool + // SkipDecodeRecovery skips over panic recovery during packet decoding. + // Normally, when packets decode, if a panic occurs, that panic is captured + // by a recover(), and a DecodeFailure layer is added to the packet detailing + // the issue. If this flag is set, panics are instead allowed to continue up + // the stack. + SkipDecodeRecovery bool + // DecodeStreamsAsDatagrams enables routing of application-level layers in the TCP + // decoder. If true, we should try to decode layers after TCP in single packets. + // This is disabled by default because the reassembly package drives the decoding + // of TCP payload data after reassembly. + DecodeStreamsAsDatagrams bool +} + +// Default decoding provides the safest (but slowest) method for decoding +// packets. It eagerly processes all layers (so it's concurrency-safe) and it +// copies its input buffer upon creation of the packet (so the packet remains +// valid if the underlying slice is modified. Both of these take time, +// though, so beware. If you can guarantee that the packet will only be used +// by one goroutine at a time, set Lazy decoding. If you can guarantee that +// the underlying slice won't change, set NoCopy decoding. +var Default = DecodeOptions{} + +// Lazy is a DecodeOptions with just Lazy set. +var Lazy = DecodeOptions{Lazy: true} + +// NoCopy is a DecodeOptions with just NoCopy set. +var NoCopy = DecodeOptions{NoCopy: true} + +// DecodeStreamsAsDatagrams is a DecodeOptions with just DecodeStreamsAsDatagrams set. +var DecodeStreamsAsDatagrams = DecodeOptions{DecodeStreamsAsDatagrams: true} + +// NewPacket creates a new Packet object from a set of bytes. The +// firstLayerDecoder tells it how to interpret the first layer from the bytes, +// future layers will be generated from that first layer automatically. +func NewPacket(data []byte, firstLayerDecoder Decoder, options DecodeOptions) Packet { + if !options.NoCopy { + dataCopy := make([]byte, len(data)) + copy(dataCopy, data) + data = dataCopy + } + if options.Lazy { + p := &lazyPacket{ + packet: packet{data: data, decodeOptions: options}, + next: firstLayerDecoder, + } + p.layers = p.initialLayers[:0] + // Crazy craziness: + // If the following return statemet is REMOVED, and Lazy is FALSE, then + // eager packet processing becomes 17% FASTER. No, there is no logical + // explanation for this. However, it's such a hacky micro-optimization that + // we really can't rely on it. It appears to have to do with the size the + // compiler guesses for this function's stack space, since one symptom is + // that with the return statement in place, we more than double calls to + // runtime.morestack/runtime.lessstack. We'll hope the compiler gets better + // over time and we get this optimization for free. Until then, we'll have + // to live with slower packet processing. + return p + } + p := &eagerPacket{ + packet: packet{data: data, decodeOptions: options}, + } + p.layers = p.initialLayers[:0] + p.initialDecode(firstLayerDecoder) + return p +} + +// PacketDataSource is an interface for some source of packet data. Users may +// create their own implementations, or use the existing implementations in +// gopacket/pcap (libpcap, allows reading from live interfaces or from +// pcap files) or gopacket/pfring (PF_RING, allows reading from live +// interfaces). +type PacketDataSource interface { + // ReadPacketData returns the next packet available from this data source. + // It returns: + // data: The bytes of an individual packet. + // ci: Metadata about the capture + // err: An error encountered while reading packet data. If err != nil, + // then data/ci will be ignored. + ReadPacketData() (data []byte, ci CaptureInfo, err error) +} + +// ConcatFinitePacketDataSources returns a PacketDataSource that wraps a set +// of internal PacketDataSources, each of which will stop with io.EOF after +// reading a finite number of packets. The returned PacketDataSource will +// return all packets from the first finite source, followed by all packets from +// the second, etc. Once all finite sources have returned io.EOF, the returned +// source will as well. +func ConcatFinitePacketDataSources(pds ...PacketDataSource) PacketDataSource { + c := concat(pds) + return &c +} + +type concat []PacketDataSource + +func (c *concat) ReadPacketData() (data []byte, ci CaptureInfo, err error) { + for len(*c) > 0 { + data, ci, err = (*c)[0].ReadPacketData() + if err == io.EOF { + *c = (*c)[1:] + continue + } + return + } + return nil, CaptureInfo{}, io.EOF +} + +// ZeroCopyPacketDataSource is an interface to pull packet data from sources +// that allow data to be returned without copying to a user-controlled buffer. +// It's very similar to PacketDataSource, except that the caller must be more +// careful in how the returned buffer is handled. +type ZeroCopyPacketDataSource interface { + // ZeroCopyReadPacketData returns the next packet available from this data source. + // It returns: + // data: The bytes of an individual packet. Unlike with + // PacketDataSource's ReadPacketData, the slice returned here points + // to a buffer owned by the data source. In particular, the bytes in + // this buffer may be changed by future calls to + // ZeroCopyReadPacketData. Do not use the returned buffer after + // subsequent ZeroCopyReadPacketData calls. + // ci: Metadata about the capture + // err: An error encountered while reading packet data. If err != nil, + // then data/ci will be ignored. + ZeroCopyReadPacketData() (data []byte, ci CaptureInfo, err error) +} + +// PacketSource reads in packets from a PacketDataSource, decodes them, and +// returns them. +// +// There are currently two different methods for reading packets in through +// a PacketSource: +// +// Reading With Packets Function +// +// This method is the most convenient and easiest to code, but lacks +// flexibility. Packets returns a 'chan Packet', then asynchronously writes +// packets into that channel. Packets uses a blocking channel, and closes +// it if an io.EOF is returned by the underlying PacketDataSource. All other +// PacketDataSource errors are ignored and discarded. +// for packet := range packetSource.Packets() { +// ... +// } +// +// Reading With NextPacket Function +// +// This method is the most flexible, and exposes errors that may be +// encountered by the underlying PacketDataSource. It's also the fastest +// in a tight loop, since it doesn't have the overhead of a channel +// read/write. However, it requires the user to handle errors, most +// importantly the io.EOF error in cases where packets are being read from +// a file. +// for { +// packet, err := packetSource.NextPacket() +// if err == io.EOF { +// break +// } else if err != nil { +// log.Println("Error:", err) +// continue +// } +// handlePacket(packet) // Do something with each packet. +// } +type PacketSource struct { + source PacketDataSource + decoder Decoder + // DecodeOptions is the set of options to use for decoding each piece + // of packet data. This can/should be changed by the user to reflect the + // way packets should be decoded. + DecodeOptions + c chan Packet +} + +// NewPacketSource creates a packet data source. +func NewPacketSource(source PacketDataSource, decoder Decoder) *PacketSource { + return &PacketSource{ + source: source, + decoder: decoder, + } +} + +// NextPacket returns the next decoded packet from the PacketSource. On error, +// it returns a nil packet and a non-nil error. +func (p *PacketSource) NextPacket() (Packet, error) { + data, ci, err := p.source.ReadPacketData() + if err != nil { + return nil, err + } + packet := NewPacket(data, p.decoder, p.DecodeOptions) + m := packet.Metadata() + m.CaptureInfo = ci + m.Truncated = m.Truncated || ci.CaptureLength < ci.Length + return packet, nil +} + +// packetsToChannel reads in all packets from the packet source and sends them +// to the given channel. This routine terminates when a non-temporary error +// is returned by NextPacket(). +func (p *PacketSource) packetsToChannel() { + defer close(p.c) + for { + packet, err := p.NextPacket() + if err == nil { + p.c <- packet + continue + } + + // Immediately retry for temporary network errors + if nerr, ok := err.(net.Error); ok && nerr.Temporary() { + continue + } + + // Immediately retry for EAGAIN + if err == syscall.EAGAIN { + continue + } + + // Immediately break for known unrecoverable errors + if err == io.EOF || err == io.ErrUnexpectedEOF || + err == io.ErrNoProgress || err == io.ErrClosedPipe || err == io.ErrShortBuffer || + err == syscall.EBADF || + strings.Contains(err.Error(), "use of closed file") { + break + } + + // Sleep briefly and try again + time.Sleep(time.Millisecond * time.Duration(5)) + } +} + +// Packets returns a channel of packets, allowing easy iterating over +// packets. Packets will be asynchronously read in from the underlying +// PacketDataSource and written to the returned channel. If the underlying +// PacketDataSource returns an io.EOF error, the channel will be closed. +// If any other error is encountered, it is ignored. +// +// for packet := range packetSource.Packets() { +// handlePacket(packet) // Do something with each packet. +// } +// +// If called more than once, returns the same channel. +func (p *PacketSource) Packets() chan Packet { + if p.c == nil { + p.c = make(chan Packet, 1000) + go p.packetsToChannel() + } + return p.c +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/parser.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/parser.go new file mode 100644 index 00000000..4a4676f1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/parser.go @@ -0,0 +1,350 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package gopacket + +import ( + "fmt" +) + +// A container for single LayerType->DecodingLayer mapping. +type decodingLayerElem struct { + typ LayerType + dec DecodingLayer +} + +// DecodingLayer is an interface for packet layers that can decode themselves. +// +// The important part of DecodingLayer is that they decode themselves in-place. +// Calling DecodeFromBytes on a DecodingLayer totally resets the entire layer to +// the new state defined by the data passed in. A returned error leaves the +// DecodingLayer in an unknown intermediate state, thus its fields should not be +// trusted. +// +// Because the DecodingLayer is resetting its own fields, a call to +// DecodeFromBytes should normally not require any memory allocation. +type DecodingLayer interface { + // DecodeFromBytes resets the internal state of this layer to the state + // defined by the passed-in bytes. Slices in the DecodingLayer may + // reference the passed-in data, so care should be taken to copy it + // first should later modification of data be required before the + // DecodingLayer is discarded. + DecodeFromBytes(data []byte, df DecodeFeedback) error + // CanDecode returns the set of LayerTypes this DecodingLayer can + // decode. For Layers that are also DecodingLayers, this will most + // often be that Layer's LayerType(). + CanDecode() LayerClass + // NextLayerType returns the LayerType which should be used to decode + // the LayerPayload. + NextLayerType() LayerType + // LayerPayload is the set of bytes remaining to decode after a call to + // DecodeFromBytes. + LayerPayload() []byte +} + +// DecodingLayerFunc decodes given packet and stores decoded LayerType +// values into specified slice. Returns either first encountered +// unsupported LayerType value or decoding error. In case of success, +// returns (LayerTypeZero, nil). +type DecodingLayerFunc func([]byte, *[]LayerType) (LayerType, error) + +// DecodingLayerContainer stores all DecodingLayer-s and serves as a +// searching tool for DecodingLayerParser. +type DecodingLayerContainer interface { + // Put adds new DecodingLayer to container. The new instance of + // the same DecodingLayerContainer is returned so it may be + // implemented as a value receiver. + Put(DecodingLayer) DecodingLayerContainer + // Decoder returns DecodingLayer to decode given LayerType and + // true if it was found. If no decoder found, return false. + Decoder(LayerType) (DecodingLayer, bool) + // LayersDecoder returns DecodingLayerFunc which decodes given + // packet, starting with specified LayerType and DecodeFeedback. + LayersDecoder(first LayerType, df DecodeFeedback) DecodingLayerFunc +} + +// DecodingLayerSparse is a sparse array-based implementation of +// DecodingLayerContainer. Each DecodingLayer is addressed in an +// allocated slice by LayerType value itself. Though this is the +// fastest container it may be memory-consuming if used with big +// LayerType values. +type DecodingLayerSparse []DecodingLayer + +// Put implements DecodingLayerContainer interface. +func (dl DecodingLayerSparse) Put(d DecodingLayer) DecodingLayerContainer { + maxLayerType := LayerType(len(dl) - 1) + for _, typ := range d.CanDecode().LayerTypes() { + if typ > maxLayerType { + maxLayerType = typ + } + } + + if extra := maxLayerType - LayerType(len(dl)) + 1; extra > 0 { + dl = append(dl, make([]DecodingLayer, extra)...) + } + + for _, typ := range d.CanDecode().LayerTypes() { + dl[typ] = d + } + return dl +} + +// LayersDecoder implements DecodingLayerContainer interface. +func (dl DecodingLayerSparse) LayersDecoder(first LayerType, df DecodeFeedback) DecodingLayerFunc { + return LayersDecoder(dl, first, df) +} + +// Decoder implements DecodingLayerContainer interface. +func (dl DecodingLayerSparse) Decoder(typ LayerType) (DecodingLayer, bool) { + if int64(typ) < int64(len(dl)) { + decoder := dl[typ] + return decoder, decoder != nil + } + return nil, false +} + +// DecodingLayerArray is an array-based implementation of +// DecodingLayerContainer. Each DecodingLayer is searched linearly in +// an allocated slice in one-by-one fashion. +type DecodingLayerArray []decodingLayerElem + +// Put implements DecodingLayerContainer interface. +func (dl DecodingLayerArray) Put(d DecodingLayer) DecodingLayerContainer { +TYPES: + for _, typ := range d.CanDecode().LayerTypes() { + for i := range dl { + if dl[i].typ == typ { + dl[i].dec = d + continue TYPES + } + } + dl = append(dl, decodingLayerElem{typ, d}) + } + return dl +} + +// Decoder implements DecodingLayerContainer interface. +func (dl DecodingLayerArray) Decoder(typ LayerType) (DecodingLayer, bool) { + for i := range dl { + if dl[i].typ == typ { + return dl[i].dec, true + } + } + return nil, false +} + +// LayersDecoder implements DecodingLayerContainer interface. +func (dl DecodingLayerArray) LayersDecoder(first LayerType, df DecodeFeedback) DecodingLayerFunc { + return LayersDecoder(dl, first, df) +} + +// DecodingLayerMap is an map-based implementation of +// DecodingLayerContainer. Each DecodingLayer is searched in a map +// hashed by LayerType value. +type DecodingLayerMap map[LayerType]DecodingLayer + +// Put implements DecodingLayerContainer interface. +func (dl DecodingLayerMap) Put(d DecodingLayer) DecodingLayerContainer { + for _, typ := range d.CanDecode().LayerTypes() { + if dl == nil { + dl = make(map[LayerType]DecodingLayer) + } + dl[typ] = d + } + return dl +} + +// Decoder implements DecodingLayerContainer interface. +func (dl DecodingLayerMap) Decoder(typ LayerType) (DecodingLayer, bool) { + d, ok := dl[typ] + return d, ok +} + +// LayersDecoder implements DecodingLayerContainer interface. +func (dl DecodingLayerMap) LayersDecoder(first LayerType, df DecodeFeedback) DecodingLayerFunc { + return LayersDecoder(dl, first, df) +} + +// Static code check. +var ( + _ = []DecodingLayerContainer{ + DecodingLayerSparse(nil), + DecodingLayerMap(nil), + DecodingLayerArray(nil), + } +) + +// DecodingLayerParser parses a given set of layer types. See DecodeLayers for +// more information on how DecodingLayerParser should be used. +type DecodingLayerParser struct { + // DecodingLayerParserOptions is the set of options available to the + // user to define the parser's behavior. + DecodingLayerParserOptions + dlc DecodingLayerContainer + first LayerType + df DecodeFeedback + + decodeFunc DecodingLayerFunc + + // Truncated is set when a decode layer detects that the packet has been + // truncated. + Truncated bool +} + +// AddDecodingLayer adds a decoding layer to the parser. This adds support for +// the decoding layer's CanDecode layers to the parser... should they be +// encountered, they'll be parsed. +func (l *DecodingLayerParser) AddDecodingLayer(d DecodingLayer) { + l.SetDecodingLayerContainer(l.dlc.Put(d)) +} + +// SetTruncated is used by DecodingLayers to set the Truncated boolean in the +// DecodingLayerParser. Users should simply read Truncated after calling +// DecodeLayers. +func (l *DecodingLayerParser) SetTruncated() { + l.Truncated = true +} + +// NewDecodingLayerParser creates a new DecodingLayerParser and adds in all +// of the given DecodingLayers with AddDecodingLayer. +// +// Each call to DecodeLayers will attempt to decode the given bytes first by +// treating them as a 'first'-type layer, then by using NextLayerType on +// subsequently decoded layers to find the next relevant decoder. Should a +// deoder not be available for the layer type returned by NextLayerType, +// decoding will stop. +// +// NewDecodingLayerParser uses DecodingLayerMap container by +// default. +func NewDecodingLayerParser(first LayerType, decoders ...DecodingLayer) *DecodingLayerParser { + dlp := &DecodingLayerParser{first: first} + dlp.df = dlp // Cast this once to the interface + // default container + dlc := DecodingLayerContainer(DecodingLayerMap(make(map[LayerType]DecodingLayer))) + for _, d := range decoders { + dlc = dlc.Put(d) + } + + dlp.SetDecodingLayerContainer(dlc) + return dlp +} + +// SetDecodingLayerContainer specifies container with decoders. This +// call replaces all decoders already registered in given instance of +// DecodingLayerParser. +func (l *DecodingLayerParser) SetDecodingLayerContainer(dlc DecodingLayerContainer) { + l.dlc = dlc + l.decodeFunc = l.dlc.LayersDecoder(l.first, l.df) +} + +// DecodeLayers decodes as many layers as possible from the given data. It +// initially treats the data as layer type 'typ', then uses NextLayerType on +// each subsequent decoded layer until it gets to a layer type it doesn't know +// how to parse. +// +// For each layer successfully decoded, DecodeLayers appends the layer type to +// the decoded slice. DecodeLayers truncates the 'decoded' slice initially, so +// there's no need to empty it yourself. +// +// This decoding method is about an order of magnitude faster than packet +// decoding, because it only decodes known layers that have already been +// allocated. This means it doesn't need to allocate each layer it returns... +// instead it overwrites the layers that already exist. +// +// Example usage: +// func main() { +// var eth layers.Ethernet +// var ip4 layers.IPv4 +// var ip6 layers.IPv6 +// var tcp layers.TCP +// var udp layers.UDP +// var payload gopacket.Payload +// parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, ð, &ip4, &ip6, &tcp, &udp, &payload) +// var source gopacket.PacketDataSource = getMyDataSource() +// decodedLayers := make([]gopacket.LayerType, 0, 10) +// for { +// data, _, err := source.ReadPacketData() +// if err != nil { +// fmt.Println("Error reading packet data: ", err) +// continue +// } +// fmt.Println("Decoding packet") +// err = parser.DecodeLayers(data, &decodedLayers) +// for _, typ := range decodedLayers { +// fmt.Println(" Successfully decoded layer type", typ) +// switch typ { +// case layers.LayerTypeEthernet: +// fmt.Println(" Eth ", eth.SrcMAC, eth.DstMAC) +// case layers.LayerTypeIPv4: +// fmt.Println(" IP4 ", ip4.SrcIP, ip4.DstIP) +// case layers.LayerTypeIPv6: +// fmt.Println(" IP6 ", ip6.SrcIP, ip6.DstIP) +// case layers.LayerTypeTCP: +// fmt.Println(" TCP ", tcp.SrcPort, tcp.DstPort) +// case layers.LayerTypeUDP: +// fmt.Println(" UDP ", udp.SrcPort, udp.DstPort) +// } +// } +// if decodedLayers.Truncated { +// fmt.Println(" Packet has been truncated") +// } +// if err != nil { +// fmt.Println(" Error encountered:", err) +// } +// } +// } +// +// If DecodeLayers is unable to decode the next layer type, it will return the +// error UnsupportedLayerType. +func (l *DecodingLayerParser) DecodeLayers(data []byte, decoded *[]LayerType) (err error) { + l.Truncated = false + if !l.IgnorePanic { + defer panicToError(&err) + } + typ, err := l.decodeFunc(data, decoded) + if typ != LayerTypeZero { + // no decoder + if l.IgnoreUnsupported { + return nil + } + return UnsupportedLayerType(typ) + } + return err +} + +// UnsupportedLayerType is returned by DecodingLayerParser if DecodeLayers +// encounters a layer type that the DecodingLayerParser has no decoder for. +type UnsupportedLayerType LayerType + +// Error implements the error interface, returning a string to say that the +// given layer type is unsupported. +func (e UnsupportedLayerType) Error() string { + return fmt.Sprintf("No decoder for layer type %v", LayerType(e)) +} + +func panicToError(e *error) { + if r := recover(); r != nil { + *e = fmt.Errorf("panic: %v", r) + } +} + +// DecodingLayerParserOptions provides options to affect the behavior of a given +// DecodingLayerParser. +type DecodingLayerParserOptions struct { + // IgnorePanic determines whether a DecodingLayerParser should stop + // panics on its own (by returning them as an error from DecodeLayers) + // or should allow them to raise up the stack. Handling errors does add + // latency to the process of decoding layers, but is much safer for + // callers. IgnorePanic defaults to false, thus if the caller does + // nothing decode panics will be returned as errors. + IgnorePanic bool + // IgnoreUnsupported will stop parsing and return a nil error when it + // encounters a layer it doesn't have a parser for, instead of returning an + // UnsupportedLayerType error. If this is true, it's up to the caller to make + // sure that all expected layers have been parsed (by checking the decoded + // slice). + IgnoreUnsupported bool +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/time.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/time.go new file mode 100644 index 00000000..6d116cdf --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/time.go @@ -0,0 +1,72 @@ +// Copyright 2018 The GoPacket Authors. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package gopacket + +import ( + "fmt" + "math" + "time" +) + +// TimestampResolution represents the resolution of timestamps in Base^Exponent. +type TimestampResolution struct { + Base, Exponent int +} + +func (t TimestampResolution) String() string { + return fmt.Sprintf("%d^%d", t.Base, t.Exponent) +} + +// ToDuration returns the smallest representable time difference as a time.Duration +func (t TimestampResolution) ToDuration() time.Duration { + if t.Base == 0 { + return 0 + } + if t.Exponent == 0 { + return time.Second + } + switch t.Base { + case 10: + return time.Duration(math.Pow10(t.Exponent + 9)) + case 2: + if t.Exponent < 0 { + return time.Second >> uint(-t.Exponent) + } + return time.Second << uint(t.Exponent) + default: + // this might loose precision + return time.Duration(float64(time.Second) * math.Pow(float64(t.Base), float64(t.Exponent))) + } +} + +// TimestampResolutionInvalid represents an invalid timestamp resolution +var TimestampResolutionInvalid = TimestampResolution{} + +// TimestampResolutionMillisecond is a resolution of 10^-3s +var TimestampResolutionMillisecond = TimestampResolution{10, -3} + +// TimestampResolutionMicrosecond is a resolution of 10^-6s +var TimestampResolutionMicrosecond = TimestampResolution{10, -6} + +// TimestampResolutionNanosecond is a resolution of 10^-9s +var TimestampResolutionNanosecond = TimestampResolution{10, -9} + +// TimestampResolutionNTP is the resolution of NTP timestamps which is 2^-32 ≈ 233 picoseconds +var TimestampResolutionNTP = TimestampResolution{2, -32} + +// TimestampResolutionCaptureInfo is the resolution used in CaptureInfo, which his currently nanosecond +var TimestampResolutionCaptureInfo = TimestampResolutionNanosecond + +// PacketSourceResolution is an interface for packet data sources that +// support reporting the timestamp resolution of the aqcuired timestamps. +// Returned timestamps will always have NanosecondTimestampResolution due +// to the use of time.Time, but scaling might have occured if acquired +// timestamps have a different resolution. +type PacketSourceResolution interface { + // Resolution returns the timestamp resolution of acquired timestamps before scaling to NanosecondTimestampResolution. + Resolution() TimestampResolution +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/writer.go b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/writer.go new file mode 100644 index 00000000..5d303dc4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/google/gopacket/writer.go @@ -0,0 +1,232 @@ +// Copyright 2012 Google, Inc. All rights reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the LICENSE file in the root of the source +// tree. + +package gopacket + +import ( + "fmt" +) + +// SerializableLayer allows its implementations to be written out as a set of bytes, +// so those bytes may be sent on the wire or otherwise used by the caller. +// SerializableLayer is implemented by certain Layer types, and can be encoded to +// bytes using the LayerWriter object. +type SerializableLayer interface { + // SerializeTo writes this layer to a slice, growing that slice if necessary + // to make it fit the layer's data. + // Args: + // b: SerializeBuffer to write this layer on to. When called, b.Bytes() + // is the payload this layer should wrap, if any. Note that this + // layer can either prepend itself (common), append itself + // (uncommon), or both (sometimes padding or footers are required at + // the end of packet data). It's also possible (though probably very + // rarely needed) to overwrite any bytes in the current payload. + // After this call, b.Bytes() should return the byte encoding of + // this layer wrapping the original b.Bytes() payload. + // opts: options to use while writing out data. + // Returns: + // error if a problem was encountered during encoding. If an error is + // returned, the bytes in data should be considered invalidated, and + // not used. + // + // SerializeTo calls SHOULD entirely ignore LayerContents and + // LayerPayload. It just serializes based on struct fields, neither + // modifying nor using contents/payload. + SerializeTo(b SerializeBuffer, opts SerializeOptions) error + // LayerType returns the type of the layer that is being serialized to the buffer + LayerType() LayerType +} + +// SerializeOptions provides options for behaviors that SerializableLayers may want to +// implement. +type SerializeOptions struct { + // FixLengths determines whether, during serialization, layers should fix + // the values for any length field that depends on the payload. + FixLengths bool + // ComputeChecksums determines whether, during serialization, layers + // should recompute checksums based on their payloads. + ComputeChecksums bool +} + +// SerializeBuffer is a helper used by gopacket for writing out packet layers. +// SerializeBuffer starts off as an empty []byte. Subsequent calls to PrependBytes +// return byte slices before the current Bytes(), AppendBytes returns byte +// slices after. +// +// Byte slices returned by PrependBytes/AppendBytes are NOT zero'd out, so if +// you want to make sure they're all zeros, set them as such. +// +// SerializeBuffer is specifically designed to handle packet writing, where unlike +// with normal writes it's easier to start writing at the inner-most layer and +// work out, meaning that we often need to prepend bytes. This runs counter to +// typical writes to byte slices using append(), where we only write at the end +// of the buffer. +// +// It can be reused via Clear. Note, however, that a Clear call will invalidate the +// byte slices returned by any previous Bytes() call (the same buffer is +// reused). +// +// 1) Reusing a write buffer is generally much faster than creating a new one, +// and with the default implementation it avoids additional memory allocations. +// 2) If a byte slice from a previous Bytes() call will continue to be used, +// it's better to create a new SerializeBuffer. +// +// The Clear method is specifically designed to minimize memory allocations for +// similar later workloads on the SerializeBuffer. IE: if you make a set of +// Prepend/Append calls, then clear, then make the same calls with the same +// sizes, the second round (and all future similar rounds) shouldn't allocate +// any new memory. +type SerializeBuffer interface { + // Bytes returns the contiguous set of bytes collected so far by Prepend/Append + // calls. The slice returned by Bytes will be modified by future Clear calls, + // so if you're planning on clearing this SerializeBuffer, you may want to copy + // Bytes somewhere safe first. + Bytes() []byte + // PrependBytes returns a set of bytes which prepends the current bytes in this + // buffer. These bytes start in an indeterminate state, so they should be + // overwritten by the caller. The caller must only call PrependBytes if they + // know they're going to immediately overwrite all bytes returned. + PrependBytes(num int) ([]byte, error) + // AppendBytes returns a set of bytes which appends the current bytes in this + // buffer. These bytes start in an indeterminate state, so they should be + // overwritten by the caller. The caller must only call AppendBytes if they + // know they're going to immediately overwrite all bytes returned. + AppendBytes(num int) ([]byte, error) + // Clear resets the SerializeBuffer to a new, empty buffer. After a call to clear, + // the byte slice returned by any previous call to Bytes() for this buffer + // should be considered invalidated. + Clear() error + // Layers returns all the Layers that have been successfully serialized into this buffer + // already. + Layers() []LayerType + // PushLayer adds the current Layer to the list of Layers that have been serialized + // into this buffer. + PushLayer(LayerType) +} + +type serializeBuffer struct { + data []byte + start int + prepended, appended int + layers []LayerType +} + +// NewSerializeBuffer creates a new instance of the default implementation of +// the SerializeBuffer interface. +func NewSerializeBuffer() SerializeBuffer { + return &serializeBuffer{} +} + +// NewSerializeBufferExpectedSize creates a new buffer for serialization, optimized for an +// expected number of bytes prepended/appended. This tends to decrease the +// number of memory allocations made by the buffer during writes. +func NewSerializeBufferExpectedSize(expectedPrependLength, expectedAppendLength int) SerializeBuffer { + return &serializeBuffer{ + data: make([]byte, expectedPrependLength, expectedPrependLength+expectedAppendLength), + start: expectedPrependLength, + prepended: expectedPrependLength, + appended: expectedAppendLength, + } +} + +func (w *serializeBuffer) Bytes() []byte { + return w.data[w.start:] +} + +func (w *serializeBuffer) PrependBytes(num int) ([]byte, error) { + if num < 0 { + panic("num < 0") + } + if w.start < num { + toPrepend := w.prepended + if toPrepend < num { + toPrepend = num + } + w.prepended += toPrepend + length := cap(w.data) + toPrepend + newData := make([]byte, length) + newStart := w.start + toPrepend + copy(newData[newStart:], w.data[w.start:]) + w.start = newStart + w.data = newData[:toPrepend+len(w.data)] + } + w.start -= num + return w.data[w.start : w.start+num], nil +} + +func (w *serializeBuffer) AppendBytes(num int) ([]byte, error) { + if num < 0 { + panic("num < 0") + } + initialLength := len(w.data) + if cap(w.data)-initialLength < num { + toAppend := w.appended + if toAppend < num { + toAppend = num + } + w.appended += toAppend + newData := make([]byte, cap(w.data)+toAppend) + copy(newData[w.start:], w.data[w.start:]) + w.data = newData[:initialLength] + } + // Grow the buffer. We know it'll be under capacity given above. + w.data = w.data[:initialLength+num] + return w.data[initialLength:], nil +} + +func (w *serializeBuffer) Clear() error { + w.start = w.prepended + w.data = w.data[:w.start] + w.layers = w.layers[:0] + return nil +} + +func (w *serializeBuffer) Layers() []LayerType { + return w.layers +} + +func (w *serializeBuffer) PushLayer(l LayerType) { + w.layers = append(w.layers, l) +} + +// SerializeLayers clears the given write buffer, then writes all layers into it so +// they correctly wrap each other. Note that by clearing the buffer, it +// invalidates all slices previously returned by w.Bytes() +// +// Example: +// buf := gopacket.NewSerializeBuffer() +// opts := gopacket.SerializeOptions{} +// gopacket.SerializeLayers(buf, opts, a, b, c) +// firstPayload := buf.Bytes() // contains byte representation of a(b(c)) +// gopacket.SerializeLayers(buf, opts, d, e, f) +// secondPayload := buf.Bytes() // contains byte representation of d(e(f)). firstPayload is now invalidated, since the SerializeLayers call Clears buf. +func SerializeLayers(w SerializeBuffer, opts SerializeOptions, layers ...SerializableLayer) error { + w.Clear() + for i := len(layers) - 1; i >= 0; i-- { + layer := layers[i] + err := layer.SerializeTo(w, opts) + if err != nil { + return err + } + w.PushLayer(layer.LayerType()) + } + return nil +} + +// SerializePacket is a convenience function that calls SerializeLayers +// on packet's Layers(). +// It returns an error if one of the packet layers is not a SerializableLayer. +func SerializePacket(buf SerializeBuffer, opts SerializeOptions, packet Packet) error { + sls := []SerializableLayer{} + for _, layer := range packet.Layers() { + sl, ok := layer.(SerializableLayer) + if !ok { + return fmt.Errorf("layer %s is not serializable", layer.LayerType().String()) + } + sls = append(sls, sl) + } + return SerializeLayers(buf, opts, sls...) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md new file mode 100644 index 00000000..b351def9 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/CHANGELOG.md @@ -0,0 +1,25 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [1.2.0](https://github.com/grpc-ecosystem/go-grpc-prometheus/releases/tag/v1.2.0) - 2018-06-04 + +### Added + +* Require go 1.9 or later and test against these versions in CI. +* Provide metrics object as `prometheus.Collector`, for conventional metric registration. +* Support non-default/global Prometheus registry. +* Allow configuring counters with `prometheus.CounterOpts`. + +### Changed + +* Remove usage of deprecated `grpc.Code()`. +* Remove usage of deprecated `grpc.Errorf` and replace with `status.Errorf`. + +--- + +This changelog was started with version `v1.2.0`, for earlier versions refer to the respective [GitHub releases](https://github.com/grpc-ecosystem/go-grpc-prometheus/releases). diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE new file mode 100644 index 00000000..b2b06503 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md new file mode 100644 index 00000000..b101dffc --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/README.md @@ -0,0 +1,250 @@ +# Go gRPC Interceptors for Prometheus monitoring + +[![Travis Build](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus.svg)](https://travis-ci.org/grpc-ecosystem/go-grpc-prometheus) +[![Go Report Card](https://goreportcard.com/badge/github.com/grpc-ecosystem/go-grpc-prometheus)](http://goreportcard.com/report/grpc-ecosystem/go-grpc-prometheus) +[![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/github.com/grpc-ecosystem/go-grpc-prometheus) +[![SourceGraph](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/-/badge.svg)](https://sourcegraph.com/github.com/grpc-ecosystem/go-grpc-prometheus/?badge) +[![codecov](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus/branch/master/graph/badge.svg)](https://codecov.io/gh/grpc-ecosystem/go-grpc-prometheus) +[![Slack](https://img.shields.io/badge/join%20slack-%23go--grpc--prometheus-brightgreen.svg)](https://join.slack.com/t/improbable-eng/shared_invite/enQtMzQ1ODcyMzQ5MjM4LWY5ZWZmNGM2ODc5MmViNmQ3ZTA3ZTY3NzQwOTBlMTkzZmIxZTIxODk0OWU3YjZhNWVlNDU3MDlkZGViZjhkMjc) +[![Apache 2.0 License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](LICENSE) + +[Prometheus](https://prometheus.io/) monitoring for your [gRPC Go](https://github.com/grpc/grpc-go) servers and clients. + +A sister implementation for [gRPC Java](https://github.com/grpc/grpc-java) (same metrics, same semantics) is in [grpc-ecosystem/java-grpc-prometheus](https://github.com/grpc-ecosystem/java-grpc-prometheus). + +## Interceptors + +[gRPC Go](https://github.com/grpc/grpc-go) recently acquired support for Interceptors, i.e. middleware that is executed +by a gRPC Server before the request is passed onto the user's application logic. It is a perfect way to implement +common patterns: auth, logging and... monitoring. + +To use Interceptors in chains, please see [`go-grpc-middleware`](https://github.com/mwitkow/go-grpc-middleware). + +This library requires Go 1.9 or later. + +## Usage + +There are two types of interceptors: client-side and server-side. This package provides monitoring Interceptors for both. + +### Server-side + +```go +import "github.com/grpc-ecosystem/go-grpc-prometheus" +... + // Initialize your gRPC server's interceptor. + myServer := grpc.NewServer( + grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor), + grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor), + ) + // Register your gRPC service implementations. + myservice.RegisterMyServiceServer(s.server, &myServiceImpl{}) + // After all your registrations, make sure all of the Prometheus metrics are initialized. + grpc_prometheus.Register(myServer) + // Register Prometheus metrics handler. + http.Handle("/metrics", promhttp.Handler()) +... +``` + +### Client-side + +```go +import "github.com/grpc-ecosystem/go-grpc-prometheus" +... + clientConn, err = grpc.Dial( + address, + grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor), + grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor) + ) + client = pb_testproto.NewTestServiceClient(clientConn) + resp, err := client.PingEmpty(s.ctx, &myservice.Request{Msg: "hello"}) +... +``` + +# Metrics + +## Labels + +All server-side metrics start with `grpc_server` as Prometheus subsystem name. All client-side metrics start with `grpc_client`. Both of them have mirror-concepts. Similarly all methods +contain the same rich labels: + + * `grpc_service` - the [gRPC service](http://www.grpc.io/docs/#defining-a-service) name, which is the combination of protobuf `package` and + the `grpc_service` section name. E.g. for `package = mwitkow.testproto` and + `service TestService` the label will be `grpc_service="mwitkow.testproto.TestService"` + * `grpc_method` - the name of the method called on the gRPC service. E.g. + `grpc_method="Ping"` + * `grpc_type` - the gRPC [type of request](http://www.grpc.io/docs/guides/concepts.html#rpc-life-cycle). + Differentiating between the two is important especially for latency measurements. + + - `unary` is single request, single response RPC + - `client_stream` is a multi-request, single response RPC + - `server_stream` is a single request, multi-response RPC + - `bidi_stream` is a multi-request, multi-response RPC + + +Additionally for completed RPCs, the following labels are used: + + * `grpc_code` - the human-readable [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go). + The list of all statuses is to long, but here are some common ones: + + - `OK` - means the RPC was successful + - `IllegalArgument` - RPC contained bad values + - `Internal` - server-side error not disclosed to the clients + +## Counters + +The counters and their up to date documentation is in [server_reporter.go](server_reporter.go) and [client_reporter.go](client_reporter.go) +the respective Prometheus handler (usually `/metrics`). + +For the purpose of this documentation we will only discuss `grpc_server` metrics. The `grpc_client` ones contain mirror concepts. + +For simplicity, let's assume we're tracking a single server-side RPC call of [`mwitkow.testproto.TestService`](examples/testproto/test.proto), +calling the method `PingList`. The call succeeds and returns 20 messages in the stream. + +First, immediately after the server receives the call it will increment the +`grpc_server_started_total` and start the handling time clock (if histograms are enabled). + +```jsoniq +grpc_server_started_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1 +``` + +Then the user logic gets invoked. It receives one message from the client containing the request +(it's a `server_stream`): + +```jsoniq +grpc_server_msg_received_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1 +``` + +The user logic may return an error, or send multiple messages back to the client. In this case, on +each of the 20 messages sent back, a counter will be incremented: + +```jsoniq +grpc_server_msg_sent_total{grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 20 +``` + +After the call completes, its status (`OK` or other [gRPC status code](https://github.com/grpc/grpc-go/blob/master/codes/codes.go)) +and the relevant call labels increment the `grpc_server_handled_total` counter. + +```jsoniq +grpc_server_handled_total{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1 +``` + +## Histograms + +[Prometheus histograms](https://prometheus.io/docs/concepts/metric_types/#histogram) are a great way +to measure latency distributions of your RPCs. However, since it is bad practice to have metrics +of [high cardinality](https://prometheus.io/docs/practices/instrumentation/#do-not-overuse-labels) +the latency monitoring metrics are disabled by default. To enable them please call the following +in your server initialization code: + +```jsoniq +grpc_prometheus.EnableHandlingTimeHistogram() +``` + +After the call completes, its handling time will be recorded in a [Prometheus histogram](https://prometheus.io/docs/concepts/metric_types/#histogram) +variable `grpc_server_handling_seconds`. The histogram variable contains three sub-metrics: + + * `grpc_server_handling_seconds_count` - the count of all completed RPCs by status and method + * `grpc_server_handling_seconds_sum` - cumulative time of RPCs by status and method, useful for + calculating average handling times + * `grpc_server_handling_seconds_bucket` - contains the counts of RPCs by status and method in respective + handling-time buckets. These buckets can be used by Prometheus to estimate SLAs (see [here](https://prometheus.io/docs/practices/histograms/)) + +The counter values will look as follows: + +```jsoniq +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.005"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.01"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.025"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.05"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.1"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.25"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="0.5"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="1"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="2.5"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="5"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="10"} 1 +grpc_server_handling_seconds_bucket{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream",le="+Inf"} 1 +grpc_server_handling_seconds_sum{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 0.0003866430000000001 +grpc_server_handling_seconds_count{grpc_code="OK",grpc_method="PingList",grpc_service="mwitkow.testproto.TestService",grpc_type="server_stream"} 1 +``` + + +## Useful query examples + +Prometheus philosophy is to provide raw metrics to the monitoring system, and +let the aggregations be handled there. The verbosity of above metrics make it possible to have that +flexibility. Here's a couple of useful monitoring queries: + + +### request inbound rate +```jsoniq +sum(rate(grpc_server_started_total{job="foo"}[1m])) by (grpc_service) +``` +For `job="foo"` (common label to differentiate between Prometheus monitoring targets), calculate the +rate of requests per second (1 minute window) for each gRPC `grpc_service` that the job has. Please note +how the `grpc_method` is being omitted here: all methods of a given gRPC service will be summed together. + +### unary request error rate +```jsoniq +sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service) +``` +For `job="foo"`, calculate the per-`grpc_service` rate of `unary` (1:1) RPCs that failed, i.e. the +ones that didn't finish with `OK` code. + +### unary request error percentage +```jsoniq +sum(rate(grpc_server_handled_total{job="foo",grpc_type="unary",grpc_code!="OK"}[1m])) by (grpc_service) + / +sum(rate(grpc_server_started_total{job="foo",grpc_type="unary"}[1m])) by (grpc_service) + * 100.0 +``` +For `job="foo"`, calculate the percentage of failed requests by service. It's easy to notice that +this is a combination of the two above examples. This is an example of a query you would like to +[alert on](https://prometheus.io/docs/alerting/rules/) in your system for SLA violations, e.g. +"no more than 1% requests should fail". + +### average response stream size +```jsoniq +sum(rate(grpc_server_msg_sent_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service) + / +sum(rate(grpc_server_started_total{job="foo",grpc_type="server_stream"}[10m])) by (grpc_service) +``` +For `job="foo"` what is the `grpc_service`-wide `10m` average of messages returned for all ` +server_stream` RPCs. This allows you to track the stream sizes returned by your system, e.g. allows +you to track when clients started to send "wide" queries that ret +Note the divisor is the number of started RPCs, in order to account for in-flight requests. + +### 99%-tile latency of unary requests +```jsoniq +histogram_quantile(0.99, + sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary"}[5m])) by (grpc_service,le) +) +``` +For `job="foo"`, returns an 99%-tile [quantile estimation](https://prometheus.io/docs/practices/histograms/#quantiles) +of the handling time of RPCs per service. Please note the `5m` rate, this means that the quantile +estimation will take samples in a rolling `5m` window. When combined with other quantiles +(e.g. 50%, 90%), this query gives you tremendous insight into the responsiveness of your system +(e.g. impact of caching). + +### percentage of slow unary queries (>250ms) +```jsoniq +100.0 - ( +sum(rate(grpc_server_handling_seconds_bucket{job="foo",grpc_type="unary",le="0.25"}[5m])) by (grpc_service) + / +sum(rate(grpc_server_handling_seconds_count{job="foo",grpc_type="unary"}[5m])) by (grpc_service) +) * 100.0 +``` +For `job="foo"` calculate the by-`grpc_service` fraction of slow requests that took longer than `0.25` +seconds. This query is relatively complex, since the Prometheus aggregations use `le` (less or equal) +buckets, meaning that counting "fast" requests fractions is easier. However, simple maths helps. +This is an example of a query you would like to alert on in your system for SLA violations, +e.g. "less than 1% of requests are slower than 250ms". + + +## Status + +This code has been used since August 2015 as the basis for monitoring of *production* gRPC micro services at [Improbable](https://improbable.io). + +## License + +`go-grpc-prometheus` is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go new file mode 100644 index 00000000..5b861b72 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client.go @@ -0,0 +1,57 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +// gRPC Prometheus monitoring interceptors for client-side gRPC. + +package grpc_prometheus + +import ( + prom "github.com/prometheus/client_golang/prometheus" +) + +var ( + // DefaultClientMetrics is the default instance of ClientMetrics. It is + // intended to be used in conjunction the default Prometheus metrics + // registry. + DefaultClientMetrics = NewClientMetrics() + + // UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs. + UnaryClientInterceptor = DefaultClientMetrics.UnaryClientInterceptor() + + // StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs. + StreamClientInterceptor = DefaultClientMetrics.StreamClientInterceptor() +) + +func init() { + prom.MustRegister(DefaultClientMetrics.clientStartedCounter) + prom.MustRegister(DefaultClientMetrics.clientHandledCounter) + prom.MustRegister(DefaultClientMetrics.clientStreamMsgReceived) + prom.MustRegister(DefaultClientMetrics.clientStreamMsgSent) +} + +// EnableClientHandlingTimeHistogram turns on recording of handling time of +// RPCs. Histogram metrics can be very expensive for Prometheus to retain and +// query. This function acts on the DefaultClientMetrics variable and the +// default Prometheus metrics registry. +func EnableClientHandlingTimeHistogram(opts ...HistogramOption) { + DefaultClientMetrics.EnableClientHandlingTimeHistogram(opts...) + prom.Register(DefaultClientMetrics.clientHandledHistogram) +} + +// EnableClientStreamReceiveTimeHistogram turns on recording of +// single message receive time of streaming RPCs. +// This function acts on the DefaultClientMetrics variable and the +// default Prometheus metrics registry. +func EnableClientStreamReceiveTimeHistogram(opts ...HistogramOption) { + DefaultClientMetrics.EnableClientStreamReceiveTimeHistogram(opts...) + prom.Register(DefaultClientMetrics.clientStreamRecvHistogram) +} + +// EnableClientStreamReceiveTimeHistogram turns on recording of +// single message send time of streaming RPCs. +// This function acts on the DefaultClientMetrics variable and the +// default Prometheus metrics registry. +func EnableClientStreamSendTimeHistogram(opts ...HistogramOption) { + DefaultClientMetrics.EnableClientStreamSendTimeHistogram(opts...) + prom.Register(DefaultClientMetrics.clientStreamSendHistogram) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go new file mode 100644 index 00000000..2d29cde1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_metrics.go @@ -0,0 +1,244 @@ +package grpc_prometheus + +import ( + "context" + "io" + + prom "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ClientMetrics represents a collection of metrics to be registered on a +// Prometheus metrics registry for a gRPC client. +type ClientMetrics struct { + clientStartedCounter *prom.CounterVec + clientHandledCounter *prom.CounterVec + clientStreamMsgReceived *prom.CounterVec + clientStreamMsgSent *prom.CounterVec + + clientHandledHistogramEnabled bool + clientHandledHistogramOpts prom.HistogramOpts + clientHandledHistogram *prom.HistogramVec + + clientStreamRecvHistogramEnabled bool + clientStreamRecvHistogramOpts prom.HistogramOpts + clientStreamRecvHistogram *prom.HistogramVec + + clientStreamSendHistogramEnabled bool + clientStreamSendHistogramOpts prom.HistogramOpts + clientStreamSendHistogram *prom.HistogramVec +} + +// NewClientMetrics returns a ClientMetrics object. Use a new instance of +// ClientMetrics when not using the default Prometheus metrics registry, for +// example when wanting to control which metrics are added to a registry as +// opposed to automatically adding metrics via init functions. +func NewClientMetrics(counterOpts ...CounterOption) *ClientMetrics { + opts := counterOptions(counterOpts) + return &ClientMetrics{ + clientStartedCounter: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_client_started_total", + Help: "Total number of RPCs started on the client.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + + clientHandledCounter: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_client_handled_total", + Help: "Total number of RPCs completed by the client, regardless of success or failure.", + }), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}), + + clientStreamMsgReceived: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_client_msg_received_total", + Help: "Total number of RPC stream messages received by the client.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + + clientStreamMsgSent: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_client_msg_sent_total", + Help: "Total number of gRPC stream messages sent by the client.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + + clientHandledHistogramEnabled: false, + clientHandledHistogramOpts: prom.HistogramOpts{ + Name: "grpc_client_handling_seconds", + Help: "Histogram of response latency (seconds) of the gRPC until it is finished by the application.", + Buckets: prom.DefBuckets, + }, + clientHandledHistogram: nil, + clientStreamRecvHistogramEnabled: false, + clientStreamRecvHistogramOpts: prom.HistogramOpts{ + Name: "grpc_client_msg_recv_handling_seconds", + Help: "Histogram of response latency (seconds) of the gRPC single message receive.", + Buckets: prom.DefBuckets, + }, + clientStreamRecvHistogram: nil, + clientStreamSendHistogramEnabled: false, + clientStreamSendHistogramOpts: prom.HistogramOpts{ + Name: "grpc_client_msg_send_handling_seconds", + Help: "Histogram of response latency (seconds) of the gRPC single message send.", + Buckets: prom.DefBuckets, + }, + clientStreamSendHistogram: nil, + } +} + +// Describe sends the super-set of all possible descriptors of metrics +// collected by this Collector to the provided channel and returns once +// the last descriptor has been sent. +func (m *ClientMetrics) Describe(ch chan<- *prom.Desc) { + m.clientStartedCounter.Describe(ch) + m.clientHandledCounter.Describe(ch) + m.clientStreamMsgReceived.Describe(ch) + m.clientStreamMsgSent.Describe(ch) + if m.clientHandledHistogramEnabled { + m.clientHandledHistogram.Describe(ch) + } + if m.clientStreamRecvHistogramEnabled { + m.clientStreamRecvHistogram.Describe(ch) + } + if m.clientStreamSendHistogramEnabled { + m.clientStreamSendHistogram.Describe(ch) + } +} + +// Collect is called by the Prometheus registry when collecting +// metrics. The implementation sends each collected metric via the +// provided channel and returns once the last metric has been sent. +func (m *ClientMetrics) Collect(ch chan<- prom.Metric) { + m.clientStartedCounter.Collect(ch) + m.clientHandledCounter.Collect(ch) + m.clientStreamMsgReceived.Collect(ch) + m.clientStreamMsgSent.Collect(ch) + if m.clientHandledHistogramEnabled { + m.clientHandledHistogram.Collect(ch) + } + if m.clientStreamRecvHistogramEnabled { + m.clientStreamRecvHistogram.Collect(ch) + } + if m.clientStreamSendHistogramEnabled { + m.clientStreamSendHistogram.Collect(ch) + } +} + +// EnableClientHandlingTimeHistogram turns on recording of handling time of RPCs. +// Histogram metrics can be very expensive for Prometheus to retain and query. +func (m *ClientMetrics) EnableClientHandlingTimeHistogram(opts ...HistogramOption) { + for _, o := range opts { + o(&m.clientHandledHistogramOpts) + } + if !m.clientHandledHistogramEnabled { + m.clientHandledHistogram = prom.NewHistogramVec( + m.clientHandledHistogramOpts, + []string{"grpc_type", "grpc_service", "grpc_method"}, + ) + } + m.clientHandledHistogramEnabled = true +} + +// EnableClientStreamReceiveTimeHistogram turns on recording of single message receive time of streaming RPCs. +// Histogram metrics can be very expensive for Prometheus to retain and query. +func (m *ClientMetrics) EnableClientStreamReceiveTimeHistogram(opts ...HistogramOption) { + for _, o := range opts { + o(&m.clientStreamRecvHistogramOpts) + } + + if !m.clientStreamRecvHistogramEnabled { + m.clientStreamRecvHistogram = prom.NewHistogramVec( + m.clientStreamRecvHistogramOpts, + []string{"grpc_type", "grpc_service", "grpc_method"}, + ) + } + + m.clientStreamRecvHistogramEnabled = true +} + +// EnableClientStreamSendTimeHistogram turns on recording of single message send time of streaming RPCs. +// Histogram metrics can be very expensive for Prometheus to retain and query. +func (m *ClientMetrics) EnableClientStreamSendTimeHistogram(opts ...HistogramOption) { + for _, o := range opts { + o(&m.clientStreamSendHistogramOpts) + } + + if !m.clientStreamSendHistogramEnabled { + m.clientStreamSendHistogram = prom.NewHistogramVec( + m.clientStreamSendHistogramOpts, + []string{"grpc_type", "grpc_service", "grpc_method"}, + ) + } + + m.clientStreamSendHistogramEnabled = true +} + +// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs. +func (m *ClientMetrics) UnaryClientInterceptor() func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + monitor := newClientReporter(m, Unary, method) + monitor.SentMessage() + err := invoker(ctx, method, req, reply, cc, opts...) + if err != nil { + monitor.ReceivedMessage() + } + st, _ := status.FromError(err) + monitor.Handled(st.Code()) + return err + } +} + +// StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs. +func (m *ClientMetrics) StreamClientInterceptor() func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + monitor := newClientReporter(m, clientStreamType(desc), method) + clientStream, err := streamer(ctx, desc, cc, method, opts...) + if err != nil { + st, _ := status.FromError(err) + monitor.Handled(st.Code()) + return nil, err + } + return &monitoredClientStream{clientStream, monitor}, nil + } +} + +func clientStreamType(desc *grpc.StreamDesc) grpcType { + if desc.ClientStreams && !desc.ServerStreams { + return ClientStream + } else if !desc.ClientStreams && desc.ServerStreams { + return ServerStream + } + return BidiStream +} + +// monitoredClientStream wraps grpc.ClientStream allowing each Sent/Recv of message to increment counters. +type monitoredClientStream struct { + grpc.ClientStream + monitor *clientReporter +} + +func (s *monitoredClientStream) SendMsg(m interface{}) error { + timer := s.monitor.SendMessageTimer() + err := s.ClientStream.SendMsg(m) + timer.ObserveDuration() + if err == nil { + s.monitor.SentMessage() + } + return err +} + +func (s *monitoredClientStream) RecvMsg(m interface{}) error { + timer := s.monitor.ReceiveMessageTimer() + err := s.ClientStream.RecvMsg(m) + timer.ObserveDuration() + + if err == nil { + s.monitor.ReceivedMessage() + } else if err == io.EOF { + s.monitor.Handled(codes.OK) + } else { + st, _ := status.FromError(err) + s.monitor.Handled(st.Code()) + } + return err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go new file mode 100644 index 00000000..286d6571 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/client_reporter.go @@ -0,0 +1,79 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_prometheus + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc/codes" +) + +type clientReporter struct { + metrics *ClientMetrics + rpcType grpcType + serviceName string + methodName string + startTime time.Time +} + +func newClientReporter(m *ClientMetrics, rpcType grpcType, fullMethod string) *clientReporter { + r := &clientReporter{ + metrics: m, + rpcType: rpcType, + } + if r.metrics.clientHandledHistogramEnabled { + r.startTime = time.Now() + } + r.serviceName, r.methodName = splitMethodName(fullMethod) + r.metrics.clientStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() + return r +} + +// timer is a helper interface to time functions. +type timer interface { + ObserveDuration() time.Duration +} + +type noOpTimer struct { +} + +func (noOpTimer) ObserveDuration() time.Duration { + return 0 +} + +var emptyTimer = noOpTimer{} + +func (r *clientReporter) ReceiveMessageTimer() timer { + if r.metrics.clientStreamRecvHistogramEnabled { + hist := r.metrics.clientStreamRecvHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName) + return prometheus.NewTimer(hist) + } + + return emptyTimer +} + +func (r *clientReporter) ReceivedMessage() { + r.metrics.clientStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() +} + +func (r *clientReporter) SendMessageTimer() timer { + if r.metrics.clientStreamSendHistogramEnabled { + hist := r.metrics.clientStreamSendHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName) + return prometheus.NewTimer(hist) + } + + return emptyTimer +} + +func (r *clientReporter) SentMessage() { + r.metrics.clientStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() +} + +func (r *clientReporter) Handled(code codes.Code) { + r.metrics.clientHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc() + if r.metrics.clientHandledHistogramEnabled { + r.metrics.clientHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds()) + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/go.mod b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/go.mod new file mode 100644 index 00000000..c49d1108 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/go.mod @@ -0,0 +1,10 @@ +module github.com/grpc-ecosystem/go-grpc-prometheus + +require ( + github.com/golang/protobuf v1.2.0 + github.com/prometheus/client_golang v0.9.2 + github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 + github.com/stretchr/testify v1.3.0 + golang.org/x/net v0.0.0-20190213061140-3a22650c66bd + google.golang.org/grpc v1.18.0 +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/go.sum b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/go.sum new file mode 100644 index 00000000..485e90ad --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/go.sum @@ -0,0 +1,48 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd h1:HuTn7WObtcDo9uEEU7rEqL0jYthdXAmZ6PP+meazmaU= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522 h1:Ve1ORMCxvRmSXBwJK+t3Oy+V2vRW2OetUQBq4rJIkZE= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.18.0 h1:IZl7mfBGfbhYx2p2rKRtYgDFw6SBz+kclmxYrCksPPA= +google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile new file mode 100644 index 00000000..74c08422 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/makefile @@ -0,0 +1,16 @@ +SHELL="/bin/bash" + +GOFILES_NOVENDOR = $(shell go list ./... | grep -v /vendor/) + +all: vet fmt test + +fmt: + go fmt $(GOFILES_NOVENDOR) + +vet: + go vet $(GOFILES_NOVENDOR) + +test: vet + ./scripts/test_all.sh + +.PHONY: all vet test diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go new file mode 100644 index 00000000..9d51aec9 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/metric_options.go @@ -0,0 +1,41 @@ +package grpc_prometheus + +import ( + prom "github.com/prometheus/client_golang/prometheus" +) + +// A CounterOption lets you add options to Counter metrics using With* funcs. +type CounterOption func(*prom.CounterOpts) + +type counterOptions []CounterOption + +func (co counterOptions) apply(o prom.CounterOpts) prom.CounterOpts { + for _, f := range co { + f(&o) + } + return o +} + +// WithConstLabels allows you to add ConstLabels to Counter metrics. +func WithConstLabels(labels prom.Labels) CounterOption { + return func(o *prom.CounterOpts) { + o.ConstLabels = labels + } +} + +// A HistogramOption lets you add options to Histogram metrics using With* +// funcs. +type HistogramOption func(*prom.HistogramOpts) + +// WithHistogramBuckets allows you to specify custom bucket ranges for histograms if EnableHandlingTimeHistogram is on. +func WithHistogramBuckets(buckets []float64) HistogramOption { + return func(o *prom.HistogramOpts) { o.Buckets = buckets } +} + +// WithHistogramConstLabels allows you to add custom ConstLabels to +// histograms metrics. +func WithHistogramConstLabels(labels prom.Labels) HistogramOption { + return func(o *prom.HistogramOpts) { + o.ConstLabels = labels + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go new file mode 100644 index 00000000..322f9904 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server.go @@ -0,0 +1,48 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +// gRPC Prometheus monitoring interceptors for server-side gRPC. + +package grpc_prometheus + +import ( + prom "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc" +) + +var ( + // DefaultServerMetrics is the default instance of ServerMetrics. It is + // intended to be used in conjunction the default Prometheus metrics + // registry. + DefaultServerMetrics = NewServerMetrics() + + // UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs. + UnaryServerInterceptor = DefaultServerMetrics.UnaryServerInterceptor() + + // StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs. + StreamServerInterceptor = DefaultServerMetrics.StreamServerInterceptor() +) + +func init() { + prom.MustRegister(DefaultServerMetrics.serverStartedCounter) + prom.MustRegister(DefaultServerMetrics.serverHandledCounter) + prom.MustRegister(DefaultServerMetrics.serverStreamMsgReceived) + prom.MustRegister(DefaultServerMetrics.serverStreamMsgSent) +} + +// Register takes a gRPC server and pre-initializes all counters to 0. This +// allows for easier monitoring in Prometheus (no missing metrics), and should +// be called *after* all services have been registered with the server. This +// function acts on the DefaultServerMetrics variable. +func Register(server *grpc.Server) { + DefaultServerMetrics.InitializeMetrics(server) +} + +// EnableHandlingTimeHistogram turns on recording of handling time +// of RPCs. Histogram metrics can be very expensive for Prometheus +// to retain and query. This function acts on the DefaultServerMetrics +// variable and the default Prometheus metrics registry. +func EnableHandlingTimeHistogram(opts ...HistogramOption) { + DefaultServerMetrics.EnableHandlingTimeHistogram(opts...) + prom.Register(DefaultServerMetrics.serverHandledHistogram) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go new file mode 100644 index 00000000..cf2e045d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_metrics.go @@ -0,0 +1,186 @@ +package grpc_prometheus + +import ( + "context" + + prom "github.com/prometheus/client_golang/prometheus" + "google.golang.org/grpc" + "google.golang.org/grpc/status" +) + +// ServerMetrics represents a collection of metrics to be registered on a +// Prometheus metrics registry for a gRPC server. +type ServerMetrics struct { + serverStartedCounter *prom.CounterVec + serverHandledCounter *prom.CounterVec + serverStreamMsgReceived *prom.CounterVec + serverStreamMsgSent *prom.CounterVec + serverHandledHistogramEnabled bool + serverHandledHistogramOpts prom.HistogramOpts + serverHandledHistogram *prom.HistogramVec +} + +// NewServerMetrics returns a ServerMetrics object. Use a new instance of +// ServerMetrics when not using the default Prometheus metrics registry, for +// example when wanting to control which metrics are added to a registry as +// opposed to automatically adding metrics via init functions. +func NewServerMetrics(counterOpts ...CounterOption) *ServerMetrics { + opts := counterOptions(counterOpts) + return &ServerMetrics{ + serverStartedCounter: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_server_started_total", + Help: "Total number of RPCs started on the server.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + serverHandledCounter: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_server_handled_total", + Help: "Total number of RPCs completed on the server, regardless of success or failure.", + }), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}), + serverStreamMsgReceived: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_server_msg_received_total", + Help: "Total number of RPC stream messages received on the server.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + serverStreamMsgSent: prom.NewCounterVec( + opts.apply(prom.CounterOpts{ + Name: "grpc_server_msg_sent_total", + Help: "Total number of gRPC stream messages sent by the server.", + }), []string{"grpc_type", "grpc_service", "grpc_method"}), + serverHandledHistogramEnabled: false, + serverHandledHistogramOpts: prom.HistogramOpts{ + Name: "grpc_server_handling_seconds", + Help: "Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.", + Buckets: prom.DefBuckets, + }, + serverHandledHistogram: nil, + } +} + +// EnableHandlingTimeHistogram enables histograms being registered when +// registering the ServerMetrics on a Prometheus registry. Histograms can be +// expensive on Prometheus servers. It takes options to configure histogram +// options such as the defined buckets. +func (m *ServerMetrics) EnableHandlingTimeHistogram(opts ...HistogramOption) { + for _, o := range opts { + o(&m.serverHandledHistogramOpts) + } + if !m.serverHandledHistogramEnabled { + m.serverHandledHistogram = prom.NewHistogramVec( + m.serverHandledHistogramOpts, + []string{"grpc_type", "grpc_service", "grpc_method"}, + ) + } + m.serverHandledHistogramEnabled = true +} + +// Describe sends the super-set of all possible descriptors of metrics +// collected by this Collector to the provided channel and returns once +// the last descriptor has been sent. +func (m *ServerMetrics) Describe(ch chan<- *prom.Desc) { + m.serverStartedCounter.Describe(ch) + m.serverHandledCounter.Describe(ch) + m.serverStreamMsgReceived.Describe(ch) + m.serverStreamMsgSent.Describe(ch) + if m.serverHandledHistogramEnabled { + m.serverHandledHistogram.Describe(ch) + } +} + +// Collect is called by the Prometheus registry when collecting +// metrics. The implementation sends each collected metric via the +// provided channel and returns once the last metric has been sent. +func (m *ServerMetrics) Collect(ch chan<- prom.Metric) { + m.serverStartedCounter.Collect(ch) + m.serverHandledCounter.Collect(ch) + m.serverStreamMsgReceived.Collect(ch) + m.serverStreamMsgSent.Collect(ch) + if m.serverHandledHistogramEnabled { + m.serverHandledHistogram.Collect(ch) + } +} + +// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs. +func (m *ServerMetrics) UnaryServerInterceptor() func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { + monitor := newServerReporter(m, Unary, info.FullMethod) + monitor.ReceivedMessage() + resp, err := handler(ctx, req) + st, _ := status.FromError(err) + monitor.Handled(st.Code()) + if err == nil { + monitor.SentMessage() + } + return resp, err + } +} + +// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs. +func (m *ServerMetrics) StreamServerInterceptor() func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { + monitor := newServerReporter(m, streamRPCType(info), info.FullMethod) + err := handler(srv, &monitoredServerStream{ss, monitor}) + st, _ := status.FromError(err) + monitor.Handled(st.Code()) + return err + } +} + +// InitializeMetrics initializes all metrics, with their appropriate null +// value, for all gRPC methods registered on a gRPC server. This is useful, to +// ensure that all metrics exist when collecting and querying. +func (m *ServerMetrics) InitializeMetrics(server *grpc.Server) { + serviceInfo := server.GetServiceInfo() + for serviceName, info := range serviceInfo { + for _, mInfo := range info.Methods { + preRegisterMethod(m, serviceName, &mInfo) + } + } +} + +func streamRPCType(info *grpc.StreamServerInfo) grpcType { + if info.IsClientStream && !info.IsServerStream { + return ClientStream + } else if !info.IsClientStream && info.IsServerStream { + return ServerStream + } + return BidiStream +} + +// monitoredStream wraps grpc.ServerStream allowing each Sent/Recv of message to increment counters. +type monitoredServerStream struct { + grpc.ServerStream + monitor *serverReporter +} + +func (s *monitoredServerStream) SendMsg(m interface{}) error { + err := s.ServerStream.SendMsg(m) + if err == nil { + s.monitor.SentMessage() + } + return err +} + +func (s *monitoredServerStream) RecvMsg(m interface{}) error { + err := s.ServerStream.RecvMsg(m) + if err == nil { + s.monitor.ReceivedMessage() + } + return err +} + +// preRegisterMethod is invoked on Register of a Server, allowing all gRPC services labels to be pre-populated. +func preRegisterMethod(metrics *ServerMetrics, serviceName string, mInfo *grpc.MethodInfo) { + methodName := mInfo.Name + methodType := string(typeFromMethodInfo(mInfo)) + // These are just references (no increments), as just referencing will create the labels but not set values. + metrics.serverStartedCounter.GetMetricWithLabelValues(methodType, serviceName, methodName) + metrics.serverStreamMsgReceived.GetMetricWithLabelValues(methodType, serviceName, methodName) + metrics.serverStreamMsgSent.GetMetricWithLabelValues(methodType, serviceName, methodName) + if metrics.serverHandledHistogramEnabled { + metrics.serverHandledHistogram.GetMetricWithLabelValues(methodType, serviceName, methodName) + } + for _, code := range allCodes { + metrics.serverHandledCounter.GetMetricWithLabelValues(methodType, serviceName, methodName, code.String()) + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go new file mode 100644 index 00000000..aa9db540 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/server_reporter.go @@ -0,0 +1,46 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_prometheus + +import ( + "time" + + "google.golang.org/grpc/codes" +) + +type serverReporter struct { + metrics *ServerMetrics + rpcType grpcType + serviceName string + methodName string + startTime time.Time +} + +func newServerReporter(m *ServerMetrics, rpcType grpcType, fullMethod string) *serverReporter { + r := &serverReporter{ + metrics: m, + rpcType: rpcType, + } + if r.metrics.serverHandledHistogramEnabled { + r.startTime = time.Now() + } + r.serviceName, r.methodName = splitMethodName(fullMethod) + r.metrics.serverStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() + return r +} + +func (r *serverReporter) ReceivedMessage() { + r.metrics.serverStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() +} + +func (r *serverReporter) SentMessage() { + r.metrics.serverStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc() +} + +func (r *serverReporter) Handled(code codes.Code) { + r.metrics.serverHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc() + if r.metrics.serverHandledHistogramEnabled { + r.metrics.serverHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds()) + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go new file mode 100644 index 00000000..7987de35 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/go-grpc-prometheus/util.go @@ -0,0 +1,50 @@ +// Copyright 2016 Michal Witkowski. All Rights Reserved. +// See LICENSE for licensing terms. + +package grpc_prometheus + +import ( + "strings" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" +) + +type grpcType string + +const ( + Unary grpcType = "unary" + ClientStream grpcType = "client_stream" + ServerStream grpcType = "server_stream" + BidiStream grpcType = "bidi_stream" +) + +var ( + allCodes = []codes.Code{ + codes.OK, codes.Canceled, codes.Unknown, codes.InvalidArgument, codes.DeadlineExceeded, codes.NotFound, + codes.AlreadyExists, codes.PermissionDenied, codes.Unauthenticated, codes.ResourceExhausted, + codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.Unimplemented, codes.Internal, + codes.Unavailable, codes.DataLoss, + } +) + +func splitMethodName(fullMethodName string) (string, string) { + fullMethodName = strings.TrimPrefix(fullMethodName, "/") // remove leading slash + if i := strings.Index(fullMethodName, "/"); i >= 0 { + return fullMethodName[:i], fullMethodName[i+1:] + } + return "unknown", "unknown" +} + +func typeFromMethodInfo(mInfo *grpc.MethodInfo) grpcType { + if !mInfo.IsClientStream && !mInfo.IsServerStream { + return Unary + } + if mInfo.IsClientStream && !mInfo.IsServerStream { + return ClientStream + } + if !mInfo.IsClientStream && mInfo.IsServerStream { + return ServerStream + } + return BidiStream +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt new file mode 100644 index 00000000..36451625 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt @@ -0,0 +1,27 @@ +Copyright (c) 2015, Gengo, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of Gengo, Inc. nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel new file mode 100644 index 00000000..76cafe6e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel @@ -0,0 +1,22 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +package(default_visibility = ["//visibility:public"]) + +proto_library( + name = "internal_proto", + srcs = ["stream_chunk.proto"], + deps = ["@com_google_protobuf//:any_proto"], +) + +go_proto_library( + name = "internal_go_proto", + importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", + proto = ":internal_proto", +) + +go_library( + name = "go_default_library", + embed = [":internal_go_proto"], + importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go new file mode 100644 index 00000000..8858f069 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go @@ -0,0 +1,118 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: internal/stream_chunk.proto + +package internal + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import any "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// StreamError is a response type which is returned when +// streaming rpc returns an error. +type StreamError struct { + GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"` + HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"` + Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` + HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"` + Details []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamError) Reset() { *m = StreamError{} } +func (m *StreamError) String() string { return proto.CompactTextString(m) } +func (*StreamError) ProtoMessage() {} +func (*StreamError) Descriptor() ([]byte, []int) { + return fileDescriptor_stream_chunk_a2afb657504565d7, []int{0} +} +func (m *StreamError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamError.Unmarshal(m, b) +} +func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamError.Marshal(b, m, deterministic) +} +func (dst *StreamError) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamError.Merge(dst, src) +} +func (m *StreamError) XXX_Size() int { + return xxx_messageInfo_StreamError.Size(m) +} +func (m *StreamError) XXX_DiscardUnknown() { + xxx_messageInfo_StreamError.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamError proto.InternalMessageInfo + +func (m *StreamError) GetGrpcCode() int32 { + if m != nil { + return m.GrpcCode + } + return 0 +} + +func (m *StreamError) GetHttpCode() int32 { + if m != nil { + return m.HttpCode + } + return 0 +} + +func (m *StreamError) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *StreamError) GetHttpStatus() string { + if m != nil { + return m.HttpStatus + } + return "" +} + +func (m *StreamError) GetDetails() []*any.Any { + if m != nil { + return m.Details + } + return nil +} + +func init() { + proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError") +} + +func init() { + proto.RegisterFile("internal/stream_chunk.proto", fileDescriptor_stream_chunk_a2afb657504565d7) +} + +var fileDescriptor_stream_chunk_a2afb657504565d7 = []byte{ + // 223 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x90, 0x41, 0x4e, 0xc3, 0x30, + 0x10, 0x45, 0x15, 0x4a, 0x69, 0x3b, 0xd9, 0x45, 0x5d, 0x18, 0xba, 0x20, 0x62, 0x95, 0x95, 0x23, + 0xc1, 0x09, 0x00, 0x71, 0x81, 0x74, 0xc7, 0xa6, 0x9a, 0x26, 0x83, 0x13, 0x91, 0xd8, 0xd1, 0x78, + 0x22, 0x94, 0x6b, 0x71, 0xc2, 0xca, 0x8e, 0xb2, 0xf4, 0x7b, 0x7f, 0xbe, 0xbe, 0x0c, 0xa7, 0xce, + 0x0a, 0xb1, 0xc5, 0xbe, 0xf4, 0xc2, 0x84, 0xc3, 0xa5, 0x6e, 0x27, 0xfb, 0xab, 0x47, 0x76, 0xe2, + 0xb2, 0xa3, 0xe1, 0xb1, 0xd6, 0x06, 0x85, 0xfe, 0x70, 0xd6, 0x3c, 0x59, 0xe9, 0x06, 0x7a, 0x7a, + 0x34, 0xce, 0x99, 0x9e, 0xca, 0x98, 0xb9, 0x4e, 0x3f, 0x25, 0xda, 0x79, 0x39, 0x78, 0xf9, 0x4f, + 0x20, 0x3d, 0xc7, 0x9e, 0x2f, 0x66, 0xc7, 0xd9, 0x09, 0x0e, 0xa1, 0xe2, 0x52, 0xbb, 0x86, 0x54, + 0x92, 0x27, 0xc5, 0xb6, 0xda, 0x07, 0xf0, 0xe9, 0x1a, 0x0a, 0xb2, 0x15, 0x19, 0x17, 0x79, 0xb7, + 0xc8, 0x00, 0xa2, 0x54, 0xb0, 0x1b, 0xc8, 0x7b, 0x34, 0xa4, 0x36, 0x79, 0x52, 0x1c, 0xaa, 0xf5, + 0x99, 0x3d, 0x43, 0x1a, 0xcf, 0xbc, 0xa0, 0x4c, 0x5e, 0xdd, 0x47, 0x0b, 0x01, 0x9d, 0x23, 0xc9, + 0x34, 0xec, 0x1a, 0x12, 0xec, 0x7a, 0xaf, 0xb6, 0xf9, 0xa6, 0x48, 0x5f, 0x8f, 0x7a, 0x59, 0xac, + 0xd7, 0xc5, 0xfa, 0xdd, 0xce, 0xd5, 0x1a, 0xfa, 0x80, 0xef, 0xfd, 0xfa, 0x09, 0xd7, 0x87, 0x18, + 0x79, 0xbb, 0x05, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x7d, 0xa5, 0x18, 0x17, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto new file mode 100644 index 00000000..55f42ce6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; +package grpc.gateway.runtime; +option go_package = "internal"; + +import "google/protobuf/any.proto"; + +// StreamError is a response type which is returned when +// streaming rpc returns an error. +message StreamError { + int32 grpc_code = 1; + int32 http_code = 2; + string message = 3; + string http_status = 4; + repeated google.protobuf.Any details = 5; +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel new file mode 100644 index 00000000..20862228 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel @@ -0,0 +1,84 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "go_default_library", + srcs = [ + "context.go", + "convert.go", + "doc.go", + "errors.go", + "fieldmask.go", + "handler.go", + "marshal_httpbodyproto.go", + "marshal_json.go", + "marshal_jsonpb.go", + "marshal_proto.go", + "marshaler.go", + "marshaler_registry.go", + "mux.go", + "pattern.go", + "proto2_convert.go", + "proto_errors.go", + "query.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime", + deps = [ + "//internal:go_default_library", + "//utilities:go_default_library", + "@com_github_golang_protobuf//jsonpb:go_default_library_gen", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//protoc-gen-go/generator:go_default_library_gen", + "@go_googleapis//google/api:httpbody_go_proto", + "@io_bazel_rules_go//proto/wkt:any_go_proto", + "@io_bazel_rules_go//proto/wkt:duration_go_proto", + "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", + "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//grpclog:go_default_library", + "@org_golang_google_grpc//metadata:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) + +go_test( + name = "go_default_test", + size = "small", + srcs = [ + "context_test.go", + "errors_test.go", + "fieldmask_test.go", + "handler_test.go", + "marshal_httpbodyproto_test.go", + "marshal_json_test.go", + "marshal_jsonpb_test.go", + "marshal_proto_test.go", + "marshaler_registry_test.go", + "mux_test.go", + "pattern_test.go", + "query_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//examples/proto/examplepb:go_default_library", + "//internal:go_default_library", + "//utilities:go_default_library", + "@com_github_golang_protobuf//jsonpb:go_default_library_gen", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@go_googleapis//google/api:httpbody_go_proto", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@io_bazel_rules_go//proto/wkt:duration_go_proto", + "@io_bazel_rules_go//proto/wkt:empty_go_proto", + "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", + "@io_bazel_rules_go//proto/wkt:struct_go_proto", + "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//metadata:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go new file mode 100644 index 00000000..896057e1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go @@ -0,0 +1,210 @@ +package runtime + +import ( + "context" + "encoding/base64" + "fmt" + "net" + "net/http" + "net/textproto" + "strconv" + "strings" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// MetadataHeaderPrefix is the http prefix that represents custom metadata +// parameters to or from a gRPC call. +const MetadataHeaderPrefix = "Grpc-Metadata-" + +// MetadataPrefix is prepended to permanent HTTP header keys (as specified +// by the IANA) when added to the gRPC context. +const MetadataPrefix = "grpcgateway-" + +// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to +// HTTP headers in a response handled by grpc-gateway +const MetadataTrailerPrefix = "Grpc-Trailer-" + +const metadataGrpcTimeout = "Grpc-Timeout" +const metadataHeaderBinarySuffix = "-Bin" + +const xForwardedFor = "X-Forwarded-For" +const xForwardedHost = "X-Forwarded-Host" + +var ( + // DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound + // header isn't present. If the value is 0 the sent `context` will not have a timeout. + DefaultContextTimeout = 0 * time.Second +) + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +/* +AnnotateContext adds context information such as metadata from the request. + +At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For", +except that the forwarded destination is not another HTTP service but rather +a gRPC service. +*/ +func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { + var pairs []string + timeout := DefaultContextTimeout + if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { + var err error + timeout, err = timeoutDecode(tm) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) + } + } + + for key, vals := range req.Header { + for _, val := range vals { + key = textproto.CanonicalMIMEHeaderKey(key) + // For backwards-compatibility, pass through 'authorization' header with no prefix. + if key == "Authorization" { + pairs = append(pairs, "authorization", val) + } + if h, ok := mux.incomingHeaderMatcher(key); ok { + // Handles "-bin" metadata in grpc, since grpc will do another base64 + // encode before sending to server, we need to decode it first. + if strings.HasSuffix(key, metadataHeaderBinarySuffix) { + b, err := decodeBinHeader(val) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err) + } + + val = string(b) + } + pairs = append(pairs, h, val) + } + } + } + if host := req.Header.Get(xForwardedHost); host != "" { + pairs = append(pairs, strings.ToLower(xForwardedHost), host) + } else if req.Host != "" { + pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host) + } + + if addr := req.RemoteAddr; addr != "" { + if remoteIP, _, err := net.SplitHostPort(addr); err == nil { + if fwd := req.Header.Get(xForwardedFor); fwd == "" { + pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP) + } else { + pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP)) + } + } else { + grpclog.Infof("invalid remote addr: %s", addr) + } + } + + if timeout != 0 { + ctx, _ = context.WithTimeout(ctx, timeout) + } + if len(pairs) == 0 { + return ctx, nil + } + md := metadata.Pairs(pairs...) + for _, mda := range mux.metadataAnnotators { + md = metadata.Join(md, mda(ctx, req)) + } + return metadata.NewOutgoingContext(ctx, md), nil +} + +// ServerMetadata consists of metadata sent from gRPC server. +type ServerMetadata struct { + HeaderMD metadata.MD + TrailerMD metadata.MD +} + +type serverMetadataKey struct{} + +// NewServerMetadataContext creates a new context with ServerMetadata +func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context { + return context.WithValue(ctx, serverMetadataKey{}, md) +} + +// ServerMetadataFromContext returns the ServerMetadata in ctx +func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) { + md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata) + return +} + +func timeoutDecode(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("timeout string is too short: %q", s) + } + d, ok := timeoutUnitToDuration(s[size-1]) + if !ok { + return 0, fmt.Errorf("timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + return d * time.Duration(t), nil +} + +func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) { + switch u { + case 'H': + return time.Hour, true + case 'M': + return time.Minute, true + case 'S': + return time.Second, true + case 'm': + return time.Millisecond, true + case 'u': + return time.Microsecond, true + case 'n': + return time.Nanosecond, true + default: + } + return +} + +// isPermanentHTTPHeader checks whether hdr belongs to the list of +// permenant request headers maintained by IANA. +// http://www.iana.org/assignments/message-headers/message-headers.xml +func isPermanentHTTPHeader(hdr string) bool { + switch hdr { + case + "Accept", + "Accept-Charset", + "Accept-Language", + "Accept-Ranges", + "Authorization", + "Cache-Control", + "Content-Type", + "Cookie", + "Date", + "Expect", + "From", + "Host", + "If-Match", + "If-Modified-Since", + "If-None-Match", + "If-Schedule-Tag-Match", + "If-Unmodified-Since", + "Max-Forwards", + "Origin", + "Pragma", + "Referer", + "User-Agent", + "Via", + "Warning": + return true + } + return false +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go new file mode 100644 index 00000000..a5b3bd6a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go @@ -0,0 +1,312 @@ +package runtime + +import ( + "encoding/base64" + "fmt" + "strconv" + "strings" + + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/ptypes/duration" + "github.com/golang/protobuf/ptypes/timestamp" + "github.com/golang/protobuf/ptypes/wrappers" +) + +// String just returns the given string. +// It is just for compatibility to other types. +func String(val string) (string, error) { + return val, nil +} + +// StringSlice converts 'val' where individual strings are separated by +// 'sep' into a string slice. +func StringSlice(val, sep string) ([]string, error) { + return strings.Split(val, sep), nil +} + +// Bool converts the given string representation of a boolean value into bool. +func Bool(val string) (bool, error) { + return strconv.ParseBool(val) +} + +// BoolSlice converts 'val' where individual booleans are separated by +// 'sep' into a bool slice. +func BoolSlice(val, sep string) ([]bool, error) { + s := strings.Split(val, sep) + values := make([]bool, len(s)) + for i, v := range s { + value, err := Bool(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Float64 converts the given string representation into representation of a floating point number into float64. +func Float64(val string) (float64, error) { + return strconv.ParseFloat(val, 64) +} + +// Float64Slice converts 'val' where individual floating point numbers are separated by +// 'sep' into a float64 slice. +func Float64Slice(val, sep string) ([]float64, error) { + s := strings.Split(val, sep) + values := make([]float64, len(s)) + for i, v := range s { + value, err := Float64(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Float32 converts the given string representation of a floating point number into float32. +func Float32(val string) (float32, error) { + f, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(f), nil +} + +// Float32Slice converts 'val' where individual floating point numbers are separated by +// 'sep' into a float32 slice. +func Float32Slice(val, sep string) ([]float32, error) { + s := strings.Split(val, sep) + values := make([]float32, len(s)) + for i, v := range s { + value, err := Float32(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Int64 converts the given string representation of an integer into int64. +func Int64(val string) (int64, error) { + return strconv.ParseInt(val, 0, 64) +} + +// Int64Slice converts 'val' where individual integers are separated by +// 'sep' into a int64 slice. +func Int64Slice(val, sep string) ([]int64, error) { + s := strings.Split(val, sep) + values := make([]int64, len(s)) + for i, v := range s { + value, err := Int64(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Int32 converts the given string representation of an integer into int32. +func Int32(val string) (int32, error) { + i, err := strconv.ParseInt(val, 0, 32) + if err != nil { + return 0, err + } + return int32(i), nil +} + +// Int32Slice converts 'val' where individual integers are separated by +// 'sep' into a int32 slice. +func Int32Slice(val, sep string) ([]int32, error) { + s := strings.Split(val, sep) + values := make([]int32, len(s)) + for i, v := range s { + value, err := Int32(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Uint64 converts the given string representation of an integer into uint64. +func Uint64(val string) (uint64, error) { + return strconv.ParseUint(val, 0, 64) +} + +// Uint64Slice converts 'val' where individual integers are separated by +// 'sep' into a uint64 slice. +func Uint64Slice(val, sep string) ([]uint64, error) { + s := strings.Split(val, sep) + values := make([]uint64, len(s)) + for i, v := range s { + value, err := Uint64(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Uint32 converts the given string representation of an integer into uint32. +func Uint32(val string) (uint32, error) { + i, err := strconv.ParseUint(val, 0, 32) + if err != nil { + return 0, err + } + return uint32(i), nil +} + +// Uint32Slice converts 'val' where individual integers are separated by +// 'sep' into a uint32 slice. +func Uint32Slice(val, sep string) ([]uint32, error) { + s := strings.Split(val, sep) + values := make([]uint32, len(s)) + for i, v := range s { + value, err := Uint32(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Bytes converts the given string representation of a byte sequence into a slice of bytes +// A bytes sequence is encoded in URL-safe base64 without padding +func Bytes(val string) ([]byte, error) { + b, err := base64.StdEncoding.DecodeString(val) + if err != nil { + b, err = base64.URLEncoding.DecodeString(val) + if err != nil { + return nil, err + } + } + return b, nil +} + +// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe +// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. +func BytesSlice(val, sep string) ([][]byte, error) { + s := strings.Split(val, sep) + values := make([][]byte, len(s)) + for i, v := range s { + value, err := Bytes(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp. +func Timestamp(val string) (*timestamp.Timestamp, error) { + var r *timestamp.Timestamp + err := jsonpb.UnmarshalString(val, r) + return r, err +} + +// Duration converts the given string into a timestamp.Duration. +func Duration(val string) (*duration.Duration, error) { + var r *duration.Duration + err := jsonpb.UnmarshalString(val, r) + return r, err +} + +// Enum converts the given string into an int32 that should be type casted into the +// correct enum proto type. +func Enum(val string, enumValMap map[string]int32) (int32, error) { + e, ok := enumValMap[val] + if ok { + return e, nil + } + + i, err := Int32(val) + if err != nil { + return 0, fmt.Errorf("%s is not valid", val) + } + for _, v := range enumValMap { + if v == i { + return i, nil + } + } + return 0, fmt.Errorf("%s is not valid", val) +} + +// EnumSlice converts 'val' where individual enums are separated by 'sep' +// into a int32 slice. Each individual int32 should be type casted into the +// correct enum proto type. +func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) { + s := strings.Split(val, sep) + values := make([]int32, len(s)) + for i, v := range s { + value, err := Enum(v, enumValMap) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +/* + Support fot google.protobuf.wrappers on top of primitive types +*/ + +// StringValue well-known type support as wrapper around string type +func StringValue(val string) (*wrappers.StringValue, error) { + return &wrappers.StringValue{Value: val}, nil +} + +// FloatValue well-known type support as wrapper around float32 type +func FloatValue(val string) (*wrappers.FloatValue, error) { + parsedVal, err := Float32(val) + return &wrappers.FloatValue{Value: parsedVal}, err +} + +// DoubleValue well-known type support as wrapper around float64 type +func DoubleValue(val string) (*wrappers.DoubleValue, error) { + parsedVal, err := Float64(val) + return &wrappers.DoubleValue{Value: parsedVal}, err +} + +// BoolValue well-known type support as wrapper around bool type +func BoolValue(val string) (*wrappers.BoolValue, error) { + parsedVal, err := Bool(val) + return &wrappers.BoolValue{Value: parsedVal}, err +} + +// Int32Value well-known type support as wrapper around int32 type +func Int32Value(val string) (*wrappers.Int32Value, error) { + parsedVal, err := Int32(val) + return &wrappers.Int32Value{Value: parsedVal}, err +} + +// UInt32Value well-known type support as wrapper around uint32 type +func UInt32Value(val string) (*wrappers.UInt32Value, error) { + parsedVal, err := Uint32(val) + return &wrappers.UInt32Value{Value: parsedVal}, err +} + +// Int64Value well-known type support as wrapper around int64 type +func Int64Value(val string) (*wrappers.Int64Value, error) { + parsedVal, err := Int64(val) + return &wrappers.Int64Value{Value: parsedVal}, err +} + +// UInt64Value well-known type support as wrapper around uint64 type +func UInt64Value(val string) (*wrappers.UInt64Value, error) { + parsedVal, err := Uint64(val) + return &wrappers.UInt64Value{Value: parsedVal}, err +} + +// BytesValue well-known type support as wrapper around bytes[] type +func BytesValue(val string) (*wrappers.BytesValue, error) { + parsedVal, err := Bytes(val) + return &wrappers.BytesValue{Value: parsedVal}, err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go new file mode 100644 index 00000000..b6e5ddf7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go @@ -0,0 +1,5 @@ +/* +Package runtime contains runtime helper functions used by +servers which protoc-gen-grpc-gateway generates. +*/ +package runtime diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go new file mode 100644 index 00000000..ad945788 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go @@ -0,0 +1,146 @@ +package runtime + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. +// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto +func HTTPStatusFromCode(code codes.Code) int { + switch code { + case codes.OK: + return http.StatusOK + case codes.Canceled: + return http.StatusRequestTimeout + case codes.Unknown: + return http.StatusInternalServerError + case codes.InvalidArgument: + return http.StatusBadRequest + case codes.DeadlineExceeded: + return http.StatusGatewayTimeout + case codes.NotFound: + return http.StatusNotFound + case codes.AlreadyExists: + return http.StatusConflict + case codes.PermissionDenied: + return http.StatusForbidden + case codes.Unauthenticated: + return http.StatusUnauthorized + case codes.ResourceExhausted: + return http.StatusTooManyRequests + case codes.FailedPrecondition: + // Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status. + return http.StatusBadRequest + case codes.Aborted: + return http.StatusConflict + case codes.OutOfRange: + return http.StatusBadRequest + case codes.Unimplemented: + return http.StatusNotImplemented + case codes.Internal: + return http.StatusInternalServerError + case codes.Unavailable: + return http.StatusServiceUnavailable + case codes.DataLoss: + return http.StatusInternalServerError + } + + grpclog.Infof("Unknown gRPC error code: %v", code) + return http.StatusInternalServerError +} + +var ( + // HTTPError replies to the request with the error. + // You can set a custom function to this variable to customize error format. + HTTPError = DefaultHTTPError + // OtherErrorHandler handles the following error used by the gateway: StatusMethodNotAllowed StatusNotFound and StatusBadRequest + OtherErrorHandler = DefaultOtherErrorHandler +) + +type errorBody struct { + Error string `protobuf:"bytes,1,name=error" json:"error"` + // This is to make the error more compatible with users that expect errors to be Status objects: + // https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto + // It should be the exact same message as the Error field. + Message string `protobuf:"bytes,1,name=message" json:"message"` + Code int32 `protobuf:"varint,2,name=code" json:"code"` + Details []*any.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"` +} + +// Make this also conform to proto.Message for builtin JSONPb Marshaler +func (e *errorBody) Reset() { *e = errorBody{} } +func (e *errorBody) String() string { return proto.CompactTextString(e) } +func (*errorBody) ProtoMessage() {} + +// DefaultHTTPError is the default implementation of HTTPError. +// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. +// If otherwise, it replies with http.StatusInternalServerError. +// +// The response body returned by this function is a JSON object, +// which contains a member whose key is "error" and whose value is err.Error(). +func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { + const fallback = `{"error": "failed to marshal error message"}` + + s, ok := status.FromError(err) + if !ok { + s = status.New(codes.Unknown, err.Error()) + } + + w.Header().Del("Trailer") + + contentType := marshaler.ContentType() + // Check marshaler on run time in order to keep backwards compatability + // An interface param needs to be added to the ContentType() function on + // the Marshal interface to be able to remove this check + if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok { + pb := s.Proto() + contentType = httpBodyMarshaler.ContentTypeFromMessage(pb) + } + w.Header().Set("Content-Type", contentType) + + body := &errorBody{ + Error: s.Message(), + Message: s.Message(), + Code: int32(s.Code()), + Details: s.Proto().GetDetails(), + } + + buf, merr := marshaler.Marshal(body) + if merr != nil { + grpclog.Infof("Failed to marshal error message %q: %v", body, merr) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallback); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + handleForwardResponseTrailerHeader(w, md) + st := HTTPStatusFromCode(s.Code()) + w.WriteHeader(st) + if _, err := w.Write(buf); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + + handleForwardResponseTrailer(w, md) +} + +// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler. +// It simply writes a string representation of the given error into "w". +func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) { + http.Error(w, msg, code) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go new file mode 100644 index 00000000..e1cf7a91 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go @@ -0,0 +1,70 @@ +package runtime + +import ( + "encoding/json" + "io" + "strings" + + "github.com/golang/protobuf/protoc-gen-go/generator" + "google.golang.org/genproto/protobuf/field_mask" +) + +// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body. +func FieldMaskFromRequestBody(r io.Reader) (*field_mask.FieldMask, error) { + fm := &field_mask.FieldMask{} + var root interface{} + if err := json.NewDecoder(r).Decode(&root); err != nil { + if err == io.EOF { + return fm, nil + } + return nil, err + } + + queue := []fieldMaskPathItem{{node: root}} + for len(queue) > 0 { + // dequeue an item + item := queue[0] + queue = queue[1:] + + if m, ok := item.node.(map[string]interface{}); ok { + // if the item is an object, then enqueue all of its children + for k, v := range m { + queue = append(queue, fieldMaskPathItem{path: append(item.path, generator.CamelCase(k)), node: v}) + } + } else if len(item.path) > 0 { + // otherwise, it's a leaf node so print its path + fm.Paths = append(fm.Paths, strings.Join(item.path, ".")) + } + } + + return fm, nil +} + +// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask +type fieldMaskPathItem struct { + // the list of prior fields leading up to node + path []string + + // a generic decoded json object the current item to inspect for further path extraction + node interface{} +} + +// CamelCaseFieldMask updates the given FieldMask by converting all of its paths to CamelCase, using the same heuristic +// that's used for naming protobuf fields in Go. +func CamelCaseFieldMask(mask *field_mask.FieldMask) { + if mask == nil || mask.Paths == nil { + return + } + + var newPaths []string + for _, path := range mask.Paths { + lowerCasedParts := strings.Split(path, ".") + var camelCasedParts []string + for _, part := range lowerCasedParts { + camelCasedParts = append(camelCasedParts, generator.CamelCase(part)) + } + newPaths = append(newPaths, strings.Join(camelCasedParts, ".")) + } + + mask.Paths = newPaths +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go new file mode 100644 index 00000000..2af90065 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go @@ -0,0 +1,209 @@ +package runtime + +import ( + "errors" + "fmt" + "io" + "net/http" + "net/textproto" + + "context" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/internal" + "google.golang.org/grpc/grpclog" +) + +var errEmptyResponse = errors.New("empty response") + +// ForwardResponseStream forwards the stream from gRPC server to REST client. +func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { + f, ok := w.(http.Flusher) + if !ok { + grpclog.Infof("Flush not supported in %T", w) + http.Error(w, "unexpected type of web server", http.StatusInternalServerError) + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + http.Error(w, "unexpected error", http.StatusInternalServerError) + return + } + handleForwardResponseServerMetadata(w, mux, md) + + w.Header().Set("Transfer-Encoding", "chunked") + w.Header().Set("Content-Type", marshaler.ContentType()) + if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil { + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + + var delimiter []byte + if d, ok := marshaler.(Delimited); ok { + delimiter = d.Delimiter() + } else { + delimiter = []byte("\n") + } + + var wroteHeader bool + for { + resp, err := recv() + if err == io.EOF { + return + } + if err != nil { + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) + return + } + if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) + return + } + + buf, err := marshaler.Marshal(streamChunk(ctx, resp, mux.streamErrorHandler)) + if err != nil { + grpclog.Infof("Failed to marshal response chunk: %v", err) + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) + return + } + if _, err = w.Write(buf); err != nil { + grpclog.Infof("Failed to send response chunk: %v", err) + return + } + wroteHeader = true + if _, err = w.Write(delimiter); err != nil { + grpclog.Infof("Failed to send delimiter chunk: %v", err) + return + } + f.Flush() + } +} + +func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { + for k, vs := range md.HeaderMD { + if h, ok := mux.outgoingHeaderMatcher(k); ok { + for _, v := range vs { + w.Header().Add(h, v) + } + } + } +} + +func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) { + for k := range md.TrailerMD { + tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)) + w.Header().Add("Trailer", tKey) + } +} + +func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) { + for k, vs := range md.TrailerMD { + tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k) + for _, v := range vs { + w.Header().Add(tKey, v) + } + } +} + +// responseBody interface contains method for getting field for marshaling to the response body +// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule` +type responseBody interface { + XXX_ResponseBody() interface{} +} + +// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client. +func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + handleForwardResponseTrailerHeader(w, md) + + contentType := marshaler.ContentType() + // Check marshaler on run time in order to keep backwards compatability + // An interface param needs to be added to the ContentType() function on + // the Marshal interface to be able to remove this check + if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok { + contentType = httpBodyMarshaler.ContentTypeFromMessage(resp) + } + w.Header().Set("Content-Type", contentType) + + if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + var buf []byte + var err error + if rb, ok := resp.(responseBody); ok { + buf, err = marshaler.Marshal(rb.XXX_ResponseBody()) + } else { + buf, err = marshaler.Marshal(resp) + } + if err != nil { + grpclog.Infof("Marshal error: %v", err) + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + + if _, err = w.Write(buf); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + + handleForwardResponseTrailer(w, md) +} + +func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error { + if len(opts) == 0 { + return nil + } + for _, opt := range opts { + if err := opt(ctx, w, resp); err != nil { + grpclog.Infof("Error handling ForwardResponseOptions: %v", err) + return err + } + } + return nil +} + +func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) { + serr := streamError(ctx, mux.streamErrorHandler, err) + if !wroteHeader { + w.WriteHeader(int(serr.HttpCode)) + } + buf, merr := marshaler.Marshal(errorChunk(serr)) + if merr != nil { + grpclog.Infof("Failed to marshal an error: %v", merr) + return + } + if _, werr := w.Write(buf); werr != nil { + grpclog.Infof("Failed to notify error to client: %v", werr) + return + } +} + +// streamChunk returns a chunk in a response stream for the given result. The +// given errHandler is used to render an error chunk if result is nil. +func streamChunk(ctx context.Context, result proto.Message, errHandler StreamErrorHandlerFunc) map[string]proto.Message { + if result == nil { + return errorChunk(streamError(ctx, errHandler, errEmptyResponse)) + } + return map[string]proto.Message{"result": result} +} + +// streamError returns the payload for the final message in a response stream +// that represents the given err. +func streamError(ctx context.Context, errHandler StreamErrorHandlerFunc, err error) *StreamError { + serr := errHandler(ctx, err) + if serr != nil { + return serr + } + // TODO: log about misbehaving stream error handler? + return DefaultHTTPStreamErrorHandler(ctx, err) +} + +func errorChunk(err *StreamError) map[string]proto.Message { + return map[string]proto.Message{"error": (*internal.StreamError)(err)} +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go new file mode 100644 index 00000000..f55285b5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go @@ -0,0 +1,43 @@ +package runtime + +import ( + "google.golang.org/genproto/googleapis/api/httpbody" +) + +// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler +func SetHTTPBodyMarshaler(serveMux *ServeMux) { + serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{ + Marshaler: &JSONPb{OrigName: true}, + } +} + +// HTTPBodyMarshaler is a Marshaler which supports marshaling of a +// google.api.HttpBody message as the full response body if it is +// the actual message used as the response. If not, then this will +// simply fallback to the Marshaler specified as its default Marshaler. +type HTTPBodyMarshaler struct { + Marshaler +} + +// ContentType implementation to keep backwards compatability with marshal interface +func (h *HTTPBodyMarshaler) ContentType() string { + return h.ContentTypeFromMessage(nil) +} + +// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns +// its specified content type otherwise fall back to the default Marshaler. +func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string { + if httpBody, ok := v.(*httpbody.HttpBody); ok { + return httpBody.GetContentType() + } + return h.Marshaler.ContentType() +} + +// Marshal marshals "v" by returning the body bytes if v is a +// google.api.HttpBody message, otherwise it falls back to the default Marshaler. +func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) { + if httpBody, ok := v.(*httpbody.HttpBody); ok { + return httpBody.Data, nil + } + return h.Marshaler.Marshal(v) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go new file mode 100644 index 00000000..f9d3a585 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go @@ -0,0 +1,45 @@ +package runtime + +import ( + "encoding/json" + "io" +) + +// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON +// with the standard "encoding/json" package of Golang. +// Although it is generally faster for simple proto messages than JSONPb, +// it does not support advanced features of protobuf, e.g. map, oneof, .... +// +// The NewEncoder and NewDecoder types return *json.Encoder and +// *json.Decoder respectively. +type JSONBuiltin struct{} + +// ContentType always Returns "application/json". +func (*JSONBuiltin) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// Unmarshal unmarshals JSON data into "v". +func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder { + return json.NewDecoder(r) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *JSONBuiltin) Delimiter() []byte { + return []byte("\n") +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go new file mode 100644 index 00000000..f0de351b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go @@ -0,0 +1,262 @@ +package runtime + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/proto" +) + +// JSONPb is a Marshaler which marshals/unmarshals into/from JSON +// with the "github.com/golang/protobuf/jsonpb". +// It supports fully functionality of protobuf unlike JSONBuiltin. +// +// The NewDecoder method returns a DecoderWrapper, so the underlying +// *json.Decoder methods can be used. +type JSONPb jsonpb.Marshaler + +// ContentType always returns "application/json". +func (*JSONPb) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON. +func (j *JSONPb) Marshal(v interface{}) ([]byte, error) { + if _, ok := v.(proto.Message); !ok { + return j.marshalNonProtoField(v) + } + + var buf bytes.Buffer + if err := j.marshalTo(&buf, v); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + buf, err := j.marshalNonProtoField(v) + if err != nil { + return err + } + _, err = w.Write(buf) + return err + } + return (*jsonpb.Marshaler)(j).Marshal(w, p) +} + +var ( + // protoMessageType is stored to prevent constant lookup of the same type at runtime. + protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() +) + +// marshalNonProto marshals a non-message field of a protobuf message. +// This function does not correctly marshals arbitrary data structure into JSON, +// but it is only capable of marshaling non-message field values of protobuf, +// i.e. primitive types, enums; pointers to primitives or enums; maps from +// integer/string types to primitives/enums/pointers to messages. +func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { + if v == nil { + return []byte("null"), nil + } + rv := reflect.ValueOf(v) + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return []byte("null"), nil + } + rv = rv.Elem() + } + + if rv.Kind() == reflect.Slice { + if rv.IsNil() { + if j.EmitDefaults { + return []byte("[]"), nil + } + return []byte("null"), nil + } + + if rv.Type().Elem().Implements(protoMessageType) { + var buf bytes.Buffer + err := buf.WriteByte('[') + if err != nil { + return nil, err + } + for i := 0; i < rv.Len(); i++ { + if i != 0 { + err = buf.WriteByte(',') + if err != nil { + return nil, err + } + } + if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { + return nil, err + } + } + err = buf.WriteByte(']') + if err != nil { + return nil, err + } + + return buf.Bytes(), nil + } + } + + if rv.Kind() == reflect.Map { + m := make(map[string]*json.RawMessage) + for _, k := range rv.MapKeys() { + buf, err := j.Marshal(rv.MapIndex(k).Interface()) + if err != nil { + return nil, err + } + m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf) + } + if j.Indent != "" { + return json.MarshalIndent(m, "", j.Indent) + } + return json.Marshal(m) + } + if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts { + return json.Marshal(enum.String()) + } + return json.Marshal(rv.Interface()) +} + +// Unmarshal unmarshals JSON "data" into "v" +func (j *JSONPb) Unmarshal(data []byte, v interface{}) error { + return unmarshalJSONPb(data, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JSONPb) NewDecoder(r io.Reader) Decoder { + d := json.NewDecoder(r) + return DecoderWrapper{Decoder: d} +} + +// DecoderWrapper is a wrapper around a *json.Decoder that adds +// support for protos to the Decode method. +type DecoderWrapper struct { + *json.Decoder +} + +// Decode wraps the embedded decoder's Decode method to support +// protos using a jsonpb.Unmarshaler. +func (d DecoderWrapper) Decode(v interface{}) error { + return decodeJSONPb(d.Decoder, v) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JSONPb) NewEncoder(w io.Writer) Encoder { + return EncoderFunc(func(v interface{}) error { + if err := j.marshalTo(w, v); err != nil { + return err + } + // mimic json.Encoder by adding a newline (makes output + // easier to read when it contains multiple encoded items) + _, err := w.Write(j.Delimiter()) + return err + }) +} + +func unmarshalJSONPb(data []byte, v interface{}) error { + d := json.NewDecoder(bytes.NewReader(data)) + return decodeJSONPb(d, v) +} + +func decodeJSONPb(d *json.Decoder, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + return decodeNonProtoField(d, v) + } + unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields} + return unmarshaler.UnmarshalNext(d, p) +} + +func decodeNonProtoField(d *json.Decoder, v interface{}) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer", v) + } + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + if rv.Type().ConvertibleTo(typeProtoMessage) { + unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields} + return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message)) + } + rv = rv.Elem() + } + if rv.Kind() == reflect.Map { + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + conv, ok := convFromType[rv.Type().Key().Kind()] + if !ok { + return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key()) + } + + m := make(map[string]*json.RawMessage) + if err := d.Decode(&m); err != nil { + return err + } + for k, v := range m { + result := conv.Call([]reflect.Value{reflect.ValueOf(k)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + bk := result[0] + bv := reflect.New(rv.Type().Elem()) + if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil { + return err + } + rv.SetMapIndex(bk, bv.Elem()) + } + return nil + } + if _, ok := rv.Interface().(protoEnum); ok { + var repr interface{} + if err := d.Decode(&repr); err != nil { + return err + } + switch repr.(type) { + case string: + // TODO(yugui) Should use proto.StructProperties? + return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface()) + case float64: + rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type())) + return nil + default: + return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface()) + } + } + return d.Decode(v) +} + +type protoEnum interface { + fmt.Stringer + EnumDescriptor() ([]byte, []int) +} + +var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() + +// Delimiter for newline encoded JSON streams. +func (j *JSONPb) Delimiter() []byte { + return []byte("\n") +} + +// allowUnknownFields helps not to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +var allowUnknownFields = true + +// DisallowUnknownFields enables option in decoder (unmarshaller) to +// return an error when it finds an unknown field. This function must be +// called before using the JSON marshaller. +func DisallowUnknownFields() { + allowUnknownFields = false +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go new file mode 100644 index 00000000..f65d1a26 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go @@ -0,0 +1,62 @@ +package runtime + +import ( + "io" + + "errors" + "github.com/golang/protobuf/proto" + "io/ioutil" +) + +// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes +type ProtoMarshaller struct{} + +// ContentType always returns "application/octet-stream". +func (*ProtoMarshaller) ContentType() string { + return "application/octet-stream" +} + +// Marshal marshals "value" into Proto +func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) { + message, ok := value.(proto.Message) + if !ok { + return nil, errors.New("unable to marshal non proto field") + } + return proto.Marshal(message) +} + +// Unmarshal unmarshals proto "data" into "value" +func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error { + message, ok := value.(proto.Message) + if !ok { + return errors.New("unable to unmarshal non proto field") + } + return proto.Unmarshal(data, message) +} + +// NewDecoder returns a Decoder which reads proto stream from "reader". +func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder { + return DecoderFunc(func(value interface{}) error { + buffer, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + return marshaller.Unmarshal(buffer, value) + }) +} + +// NewEncoder returns an Encoder which writes proto stream into "writer". +func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder { + return EncoderFunc(func(value interface{}) error { + buffer, err := marshaller.Marshal(value) + if err != nil { + return err + } + _, err = writer.Write(buffer) + if err != nil { + return err + } + + return nil + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go new file mode 100644 index 00000000..98fe6e88 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go @@ -0,0 +1,48 @@ +package runtime + +import ( + "io" +) + +// Marshaler defines a conversion between byte sequence and gRPC payloads / fields. +type Marshaler interface { + // Marshal marshals "v" into byte sequence. + Marshal(v interface{}) ([]byte, error) + // Unmarshal unmarshals "data" into "v". + // "v" must be a pointer value. + Unmarshal(data []byte, v interface{}) error + // NewDecoder returns a Decoder which reads byte sequence from "r". + NewDecoder(r io.Reader) Decoder + // NewEncoder returns an Encoder which writes bytes sequence into "w". + NewEncoder(w io.Writer) Encoder + // ContentType returns the Content-Type which this marshaler is responsible for. + ContentType() string +} + +// Decoder decodes a byte sequence +type Decoder interface { + Decode(v interface{}) error +} + +// Encoder encodes gRPC payloads / fields into byte sequence. +type Encoder interface { + Encode(v interface{}) error +} + +// DecoderFunc adapts an decoder function into Decoder. +type DecoderFunc func(v interface{}) error + +// Decode delegates invocations to the underlying function itself. +func (f DecoderFunc) Decode(v interface{}) error { return f(v) } + +// EncoderFunc adapts an encoder function into Encoder +type EncoderFunc func(v interface{}) error + +// Encode delegates invocations to the underlying function itself. +func (f EncoderFunc) Encode(v interface{}) error { return f(v) } + +// Delimited defines the streaming delimiter. +type Delimited interface { + // Delimiter returns the record seperator for the stream. + Delimiter() []byte +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go new file mode 100644 index 00000000..5cc53ae4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go @@ -0,0 +1,91 @@ +package runtime + +import ( + "errors" + "net/http" +) + +// MIMEWildcard is the fallback MIME type used for requests which do not match +// a registered MIME type. +const MIMEWildcard = "*" + +var ( + acceptHeader = http.CanonicalHeaderKey("Accept") + contentTypeHeader = http.CanonicalHeaderKey("Content-Type") + + defaultMarshaler = &JSONPb{OrigName: true} +) + +// MarshalerForRequest returns the inbound/outbound marshalers for this request. +// It checks the registry on the ServeMux for the MIME type set by the Content-Type header. +// If it isn't set (or the request Content-Type is empty), checks for "*". +// If there are multiple Content-Type headers set, choose the first one that it can +// exactly match in the registry. +// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler. +func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) { + for _, acceptVal := range r.Header[acceptHeader] { + if m, ok := mux.marshalers.mimeMap[acceptVal]; ok { + outbound = m + break + } + } + + for _, contentTypeVal := range r.Header[contentTypeHeader] { + if m, ok := mux.marshalers.mimeMap[contentTypeVal]; ok { + inbound = m + break + } + } + + if inbound == nil { + inbound = mux.marshalers.mimeMap[MIMEWildcard] + } + if outbound == nil { + outbound = inbound + } + + return inbound, outbound +} + +// marshalerRegistry is a mapping from MIME types to Marshalers. +type marshalerRegistry struct { + mimeMap map[string]Marshaler +} + +// add adds a marshaler for a case-sensitive MIME type string ("*" to match any +// MIME type). +func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { + if len(mime) == 0 { + return errors.New("empty MIME type") + } + + m.mimeMap[mime] = marshaler + + return nil +} + +// makeMarshalerMIMERegistry returns a new registry of marshalers. +// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. +// +// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler +// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler +// with a "application/json" Content-Type. +// "*" can be used to match any Content-Type. +// This can be attached to a ServerMux with the marshaler option. +func makeMarshalerMIMERegistry() marshalerRegistry { + return marshalerRegistry{ + mimeMap: map[string]Marshaler{ + MIMEWildcard: defaultMarshaler, + }, + } +} + +// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound +// Marshalers to a MIME type in mux. +func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption { + return func(mux *ServeMux) { + if err := mux.marshalers.add(mime, marshaler); err != nil { + panic(err) + } + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go new file mode 100644 index 00000000..1da3a588 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go @@ -0,0 +1,303 @@ +package runtime + +import ( + "context" + "fmt" + "net/http" + "net/textproto" + "strings" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// A HandlerFunc handles a specific pair of path pattern and HTTP method. +type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) + +// ErrUnknownURI is the error supplied to a custom ProtoErrorHandlerFunc when +// a request is received with a URI path that does not match any registered +// service method. +// +// Since gRPC servers return an "Unimplemented" code for requests with an +// unrecognized URI path, this error also has a gRPC "Unimplemented" code. +var ErrUnknownURI = status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) + +// ServeMux is a request multiplexer for grpc-gateway. +// It matches http requests to patterns and invokes the corresponding handler. +type ServeMux struct { + // handlers maps HTTP method to a list of handlers. + handlers map[string][]handler + forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error + marshalers marshalerRegistry + incomingHeaderMatcher HeaderMatcherFunc + outgoingHeaderMatcher HeaderMatcherFunc + metadataAnnotators []func(context.Context, *http.Request) metadata.MD + streamErrorHandler StreamErrorHandlerFunc + protoErrorHandler ProtoErrorHandlerFunc + disablePathLengthFallback bool + lastMatchWins bool +} + +// ServeMuxOption is an option that can be given to a ServeMux on construction. +type ServeMuxOption func(*ServeMux) + +// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. +// +// forwardResponseOption is an option that will be called on the relevant context.Context, +// http.ResponseWriter, and proto.Message before every forwarded response. +// +// The message may be nil in the case where just a header is being sent. +func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption) + } +} + +// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context. +type HeaderMatcherFunc func(string) (string, bool) + +// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header +// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with +// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'. +func DefaultHeaderMatcher(key string) (string, bool) { + key = textproto.CanonicalMIMEHeaderKey(key) + if isPermanentHTTPHeader(key) { + return MetadataPrefix + key, true + } else if strings.HasPrefix(key, MetadataHeaderPrefix) { + return key[len(MetadataHeaderPrefix):], true + } + return "", false +} + +// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway. +// +// This matcher will be called with each header in http.Request. If matcher returns true, that header will be +// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header. +func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.incomingHeaderMatcher = fn + } +} + +// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. +// +// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be +// passed to http response returned from gateway. To transform the header before passing to response, +// matcher should return modified header. +func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.outgoingHeaderMatcher = fn + } +} + +// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context. +// +// This can be used by services that need to read from http.Request and modify gRPC context. A common use case +// is reading token from cookie and adding it in gRPC context. +func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator) + } +} + +// WithProtoErrorHandler returns a ServeMuxOption for passing metadata to a gRPC context. +// +// This can be used to handle an error as general proto message defined by gRPC. +// The response including body and status is not backward compatible with the default error handler. +// When this option is used, HTTPError and OtherErrorHandler are overwritten on initialization. +func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.protoErrorHandler = fn + } +} + +// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback. +func WithDisablePathLengthFallback() ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.disablePathLengthFallback = true + } +} + +// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream +// error handler, which allows for customizing the error trailer for server-streaming +// calls. +// +// For stream errors that occur before any response has been written, the mux's +// ProtoErrorHandler will be invoked. However, once data has been written, the errors must +// be handled differently: they must be included in the response body. The response body's +// final message will include the error details returned by the stream error handler. +func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.streamErrorHandler = fn + } +} + +// WithLastMatchWins returns a ServeMuxOption that will enable "last +// match wins" behavior, where if multiple path patterns match a +// request path, the last one defined in the .proto file will be used. +func WithLastMatchWins() ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.lastMatchWins = true + } +} + +// NewServeMux returns a new ServeMux whose internal mapping is empty. +func NewServeMux(opts ...ServeMuxOption) *ServeMux { + serveMux := &ServeMux{ + handlers: make(map[string][]handler), + forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), + marshalers: makeMarshalerMIMERegistry(), + streamErrorHandler: DefaultHTTPStreamErrorHandler, + } + + for _, opt := range opts { + opt(serveMux) + } + + if serveMux.protoErrorHandler != nil { + HTTPError = serveMux.protoErrorHandler + // OtherErrorHandler is no longer used when protoErrorHandler is set. + // Overwritten by a special error handler to return Unknown. + OtherErrorHandler = func(w http.ResponseWriter, r *http.Request, _ string, _ int) { + ctx := context.Background() + _, outboundMarshaler := MarshalerForRequest(serveMux, r) + sterr := status.Error(codes.Unknown, "unexpected use of OtherErrorHandler") + serveMux.protoErrorHandler(ctx, serveMux, outboundMarshaler, w, r, sterr) + } + } + + if serveMux.incomingHeaderMatcher == nil { + serveMux.incomingHeaderMatcher = DefaultHeaderMatcher + } + + if serveMux.outgoingHeaderMatcher == nil { + serveMux.outgoingHeaderMatcher = func(key string) (string, bool) { + return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true + } + } + + return serveMux +} + +// Handle associates "h" to the pair of HTTP method and path pattern. +func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { + if s.lastMatchWins { + s.handlers[meth] = append([]handler{handler{pat: pat, h: h}}, s.handlers[meth]...) + } else { + s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h}) + } +} + +// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path. +func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + path := r.URL.Path + if !strings.HasPrefix(path, "/") { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) + } + return + } + + components := strings.Split(path[1:], "/") + l := len(components) + var verb string + if idx := strings.LastIndex(components[l-1], ":"); idx == 0 { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + } + return + } else if idx > 0 { + c := components[l-1] + components[l-1], verb = c[:idx], c[idx+1:] + } + + if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) { + r.Method = strings.ToUpper(override) + if err := r.ParseForm(); err != nil { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + } + return + } + } + for _, h := range s.handlers[r.Method] { + pathParams, err := h.pat.Match(components, verb) + if err != nil { + continue + } + h.h(w, r, pathParams) + return + } + + // lookup other methods to handle fallback from GET to POST and + // to determine if it is MethodNotAllowed or NotFound. + for m, handlers := range s.handlers { + if m == r.Method { + continue + } + for _, h := range handlers { + pathParams, err := h.pat.Match(components, verb) + if err != nil { + continue + } + // X-HTTP-Method-Override is optional. Always allow fallback to POST. + if s.isPathLengthFallback(r) { + if err := r.ParseForm(); err != nil { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + } + return + } + h.h(w, r, pathParams) + return + } + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) + } + return + } + } + + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + } +} + +// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux. +func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error { + return s.forwardResponseOptions +} + +func (s *ServeMux) isPathLengthFallback(r *http.Request) bool { + return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded" +} + +type handler struct { + pat Pattern + h HandlerFunc +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go new file mode 100644 index 00000000..09053695 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go @@ -0,0 +1,262 @@ +package runtime + +import ( + "errors" + "fmt" + "strings" + + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc/grpclog" +) + +var ( + // ErrNotMatch indicates that the given HTTP request path does not match to the pattern. + ErrNotMatch = errors.New("not match to the path pattern") + // ErrInvalidPattern indicates that the given definition of Pattern is not valid. + ErrInvalidPattern = errors.New("invalid pattern") +) + +type op struct { + code utilities.OpCode + operand int +} + +// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto. +type Pattern struct { + // ops is a list of operations + ops []op + // pool is a constant pool indexed by the operands or vars. + pool []string + // vars is a list of variables names to be bound by this pattern + vars []string + // stacksize is the max depth of the stack + stacksize int + // tailLen is the length of the fixed-size segments after a deep wildcard + tailLen int + // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part. + verb string + // assumeColonVerb indicates whether a path suffix after a final + // colon may only be interpreted as a verb. + assumeColonVerb bool +} + +type patternOptions struct { + assumeColonVerb bool +} + +// PatternOpt is an option for creating Patterns. +type PatternOpt func(*patternOptions) + +// NewPattern returns a new Pattern from the given definition values. +// "ops" is a sequence of op codes. "pool" is a constant pool. +// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part. +// "version" must be 1 for now. +// It returns an error if the given definition is invalid. +func NewPattern(version int, ops []int, pool []string, verb string, opts ...PatternOpt) (Pattern, error) { + options := patternOptions{ + assumeColonVerb: true, + } + for _, o := range opts { + o(&options) + } + + if version != 1 { + grpclog.Infof("unsupported version: %d", version) + return Pattern{}, ErrInvalidPattern + } + + l := len(ops) + if l%2 != 0 { + grpclog.Infof("odd number of ops codes: %d", l) + return Pattern{}, ErrInvalidPattern + } + + var ( + typedOps []op + stack, maxstack int + tailLen int + pushMSeen bool + vars []string + ) + for i := 0; i < l; i += 2 { + op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]} + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush: + if pushMSeen { + tailLen++ + } + stack++ + case utilities.OpPushM: + if pushMSeen { + grpclog.Infof("pushM appears twice") + return Pattern{}, ErrInvalidPattern + } + pushMSeen = true + stack++ + case utilities.OpLitPush: + if op.operand < 0 || len(pool) <= op.operand { + grpclog.Infof("negative literal index: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + if pushMSeen { + tailLen++ + } + stack++ + case utilities.OpConcatN: + if op.operand <= 0 { + grpclog.Infof("negative concat size: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + stack -= op.operand + if stack < 0 { + grpclog.Print("stack underflow") + return Pattern{}, ErrInvalidPattern + } + stack++ + case utilities.OpCapture: + if op.operand < 0 || len(pool) <= op.operand { + grpclog.Infof("variable name index out of bound: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + v := pool[op.operand] + op.operand = len(vars) + vars = append(vars, v) + stack-- + if stack < 0 { + grpclog.Infof("stack underflow") + return Pattern{}, ErrInvalidPattern + } + default: + grpclog.Infof("invalid opcode: %d", op.code) + return Pattern{}, ErrInvalidPattern + } + + if maxstack < stack { + maxstack = stack + } + typedOps = append(typedOps, op) + } + return Pattern{ + ops: typedOps, + pool: pool, + vars: vars, + stacksize: maxstack, + tailLen: tailLen, + verb: verb, + assumeColonVerb: options.assumeColonVerb, + }, nil +} + +// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization. +func MustPattern(p Pattern, err error) Pattern { + if err != nil { + grpclog.Fatalf("Pattern initialization failed: %v", err) + } + return p +} + +// Match examines components if it matches to the Pattern. +// If it matches, the function returns a mapping from field paths to their captured values. +// If otherwise, the function returns an error. +func (p Pattern) Match(components []string, verb string) (map[string]string, error) { + if p.verb != verb { + if p.assumeColonVerb || p.verb != "" { + return nil, ErrNotMatch + } + if len(components) == 0 { + components = []string{":" + verb} + } else { + components = append([]string{}, components...) + components[len(components)-1] += ":" + verb + } + verb = "" + } + + var pos int + stack := make([]string, 0, p.stacksize) + captured := make([]string, len(p.vars)) + l := len(components) + for _, op := range p.ops { + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush, utilities.OpLitPush: + if pos >= l { + return nil, ErrNotMatch + } + c := components[pos] + if op.code == utilities.OpLitPush { + if lit := p.pool[op.operand]; c != lit { + return nil, ErrNotMatch + } + } + stack = append(stack, c) + pos++ + case utilities.OpPushM: + end := len(components) + if end < pos+p.tailLen { + return nil, ErrNotMatch + } + end -= p.tailLen + stack = append(stack, strings.Join(components[pos:end], "/")) + pos = end + case utilities.OpConcatN: + n := op.operand + l := len(stack) - n + stack = append(stack[:l], strings.Join(stack[l:], "/")) + case utilities.OpCapture: + n := len(stack) - 1 + captured[op.operand] = stack[n] + stack = stack[:n] + } + } + if pos < l { + return nil, ErrNotMatch + } + bindings := make(map[string]string) + for i, val := range captured { + bindings[p.vars[i]] = val + } + return bindings, nil +} + +// Verb returns the verb part of the Pattern. +func (p Pattern) Verb() string { return p.verb } + +func (p Pattern) String() string { + var stack []string + for _, op := range p.ops { + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush: + stack = append(stack, "*") + case utilities.OpLitPush: + stack = append(stack, p.pool[op.operand]) + case utilities.OpPushM: + stack = append(stack, "**") + case utilities.OpConcatN: + n := op.operand + l := len(stack) - n + stack = append(stack[:l], strings.Join(stack[l:], "/")) + case utilities.OpCapture: + n := len(stack) - 1 + stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n]) + } + } + segs := strings.Join(stack, "/") + if p.verb != "" { + return fmt.Sprintf("/%s:%s", segs, p.verb) + } + return "/" + segs +} + +// AssumeColonVerbOpt indicates whether a path suffix after a final +// colon may only be interpreted as a verb. +func AssumeColonVerbOpt(val bool) PatternOpt { + return PatternOpt(func(o *patternOptions) { + o.assumeColonVerb = val + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go new file mode 100644 index 00000000..a3151e2a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go @@ -0,0 +1,80 @@ +package runtime + +import ( + "github.com/golang/protobuf/proto" +) + +// StringP returns a pointer to a string whose pointee is same as the given string value. +func StringP(val string) (*string, error) { + return proto.String(val), nil +} + +// BoolP parses the given string representation of a boolean value, +// and returns a pointer to a bool whose value is same as the parsed value. +func BoolP(val string) (*bool, error) { + b, err := Bool(val) + if err != nil { + return nil, err + } + return proto.Bool(b), nil +} + +// Float64P parses the given string representation of a floating point number, +// and returns a pointer to a float64 whose value is same as the parsed number. +func Float64P(val string) (*float64, error) { + f, err := Float64(val) + if err != nil { + return nil, err + } + return proto.Float64(f), nil +} + +// Float32P parses the given string representation of a floating point number, +// and returns a pointer to a float32 whose value is same as the parsed number. +func Float32P(val string) (*float32, error) { + f, err := Float32(val) + if err != nil { + return nil, err + } + return proto.Float32(f), nil +} + +// Int64P parses the given string representation of an integer +// and returns a pointer to a int64 whose value is same as the parsed integer. +func Int64P(val string) (*int64, error) { + i, err := Int64(val) + if err != nil { + return nil, err + } + return proto.Int64(i), nil +} + +// Int32P parses the given string representation of an integer +// and returns a pointer to a int32 whose value is same as the parsed integer. +func Int32P(val string) (*int32, error) { + i, err := Int32(val) + if err != nil { + return nil, err + } + return proto.Int32(i), err +} + +// Uint64P parses the given string representation of an integer +// and returns a pointer to a uint64 whose value is same as the parsed integer. +func Uint64P(val string) (*uint64, error) { + i, err := Uint64(val) + if err != nil { + return nil, err + } + return proto.Uint64(i), err +} + +// Uint32P parses the given string representation of an integer +// and returns a pointer to a uint32 whose value is same as the parsed integer. +func Uint32P(val string) (*uint32, error) { + i, err := Uint32(val) + if err != nil { + return nil, err + } + return proto.Uint32(i), err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go new file mode 100644 index 00000000..ca76324e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go @@ -0,0 +1,106 @@ +package runtime + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/ptypes/any" + "github.com/grpc-ecosystem/grpc-gateway/internal" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// StreamErrorHandlerFunc accepts an error as a gRPC error generated via status package and translates it into a +// a proto struct used to represent error at the end of a stream. +type StreamErrorHandlerFunc func(context.Context, error) *StreamError + +// StreamError is the payload for the final message in a server stream in the event that the server returns an +// error after a response message has already been sent. +type StreamError internal.StreamError + +// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request. +type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error) + +var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler + +// DefaultHTTPProtoErrorHandler is an implementation of HTTPError. +// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. +// If otherwise, it replies with http.StatusInternalServerError. +// +// The response body returned by this function is a Status message marshaled by a Marshaler. +// +// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead. +func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { + // return Internal when Marshal failed + const fallback = `{"code": 13, "message": "failed to marshal error message"}` + + s, ok := status.FromError(err) + if !ok { + s = status.New(codes.Unknown, err.Error()) + } + + w.Header().Del("Trailer") + + contentType := marshaler.ContentType() + // Check marshaler on run time in order to keep backwards compatability + // An interface param needs to be added to the ContentType() function on + // the Marshal interface to be able to remove this check + if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok { + pb := s.Proto() + contentType = httpBodyMarshaler.ContentTypeFromMessage(pb) + } + w.Header().Set("Content-Type", contentType) + + buf, merr := marshaler.Marshal(s.Proto()) + if merr != nil { + grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallback); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + handleForwardResponseTrailerHeader(w, md) + st := HTTPStatusFromCode(s.Code()) + w.WriteHeader(st) + if _, err := w.Write(buf); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + + handleForwardResponseTrailer(w, md) +} + +// DefaultHTTPStreamErrorHandler converts the given err into a *StreamError via +// default logic. +// +// It extracts the gRPC status from err if possible. The fields of the status are +// used to populate the returned StreamError, and the HTTP status code is derived +// from the gRPC code via HTTPStatusFromCode. If the given err does not contain a +// gRPC status, an "Unknown" gRPC code is used and "Internal Server Error" HTTP code. +func DefaultHTTPStreamErrorHandler(_ context.Context, err error) *StreamError { + grpcCode := codes.Unknown + grpcMessage := err.Error() + var grpcDetails []*any.Any + if s, ok := status.FromError(err); ok { + grpcCode = s.Code() + grpcMessage = s.Message() + grpcDetails = s.Proto().GetDetails() + } + httpCode := HTTPStatusFromCode(grpcCode) + return &StreamError{ + GrpcCode: int32(grpcCode), + HttpCode: int32(httpCode), + Message: grpcMessage, + HttpStatus: http.StatusText(httpCode), + Details: grpcDetails, + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go new file mode 100644 index 00000000..5fbba5e8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go @@ -0,0 +1,391 @@ +package runtime + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc/grpclog" +) + +// PopulateQueryParameters populates "values" into "msg". +// A value is ignored if its key starts with one of the elements in "filter". +func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { + for key, values := range values { + re, err := regexp.Compile("^(.*)\\[(.*)\\]$") + if err != nil { + return err + } + match := re.FindStringSubmatch(key) + if len(match) == 3 { + key = match[1] + values = append([]string{match[2]}, values...) + } + fieldPath := strings.Split(key, ".") + if filter.HasCommonPrefix(fieldPath) { + continue + } + if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil { + return err + } + } + return nil +} + +// PopulateFieldFromPath sets a value in a nested Protobuf structure. +// It instantiates missing protobuf fields as it goes. +func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error { + fieldPath := strings.Split(fieldPathString, ".") + return populateFieldValueFromPath(msg, fieldPath, []string{value}) +} + +func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error { + m := reflect.ValueOf(msg) + if m.Kind() != reflect.Ptr { + return fmt.Errorf("unexpected type %T: %v", msg, msg) + } + var props *proto.Properties + m = m.Elem() + for i, fieldName := range fieldPath { + isLast := i == len(fieldPath)-1 + if !isLast && m.Kind() != reflect.Struct { + return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, ".")) + } + var f reflect.Value + var err error + f, props, err = fieldByProtoName(m, fieldName) + if err != nil { + return err + } else if !f.IsValid() { + grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, ".")) + return nil + } + + switch f.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: + if !isLast { + return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) + } + m = f + case reflect.Slice: + if !isLast { + return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, ".")) + } + // Handle []byte + if f.Type().Elem().Kind() == reflect.Uint8 { + m = f + break + } + return populateRepeatedField(f, values, props) + case reflect.Ptr: + if f.IsNil() { + m = reflect.New(f.Type().Elem()) + f.Set(m.Convert(f.Type())) + } + m = f.Elem() + continue + case reflect.Struct: + m = f + continue + case reflect.Map: + if !isLast { + return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) + } + return populateMapField(f, values, props) + default: + return fmt.Errorf("unexpected type %s in %T", f.Type(), msg) + } + } + switch len(values) { + case 0: + return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, ".")) + case 1: + default: + grpclog.Infof("too many field values: %s", strings.Join(fieldPath, ".")) + } + return populateField(m, values[0], props) +} + +// fieldByProtoName looks up a field whose corresponding protobuf field name is "name". +// "m" must be a struct value. It returns zero reflect.Value if no such field found. +func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) { + props := proto.GetProperties(m.Type()) + + // look up field name in oneof map + if op, ok := props.OneofTypes[name]; ok { + v := reflect.New(op.Type.Elem()) + field := m.Field(op.Field) + if !field.IsNil() { + return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName) + } + field.Set(v) + return v.Elem().Field(0), op.Prop, nil + } + + for _, p := range props.Prop { + if p.OrigName == name { + return m.FieldByName(p.Name), p, nil + } + if p.JSONName == name { + return m.FieldByName(p.Name), p, nil + } + } + return reflect.Value{}, nil, nil +} + +func populateMapField(f reflect.Value, values []string, props *proto.Properties) error { + if len(values) != 2 { + return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name) + } + + key, value := values[0], values[1] + keyType := f.Type().Key() + valueType := f.Type().Elem() + if f.IsNil() { + f.Set(reflect.MakeMap(f.Type())) + } + + keyConv, ok := convFromType[keyType.Kind()] + if !ok { + return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name) + } + valueConv, ok := convFromType[valueType.Kind()] + if !ok { + return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name) + } + + keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)}) + if err := keyV[1].Interface(); err != nil { + return err.(error) + } + valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)}) + if err := valueV[1].Interface(); err != nil { + return err.(error) + } + + f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType)) + + return nil +} + +func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error { + elemType := f.Type().Elem() + + // is the destination field a slice of an enumeration type? + if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { + return populateFieldEnumRepeated(f, values, enumValMap) + } + + conv, ok := convFromType[elemType.Kind()] + if !ok { + return fmt.Errorf("unsupported field type %s", elemType) + } + f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) + for i, v := range values { + result := conv.Call([]reflect.Value{reflect.ValueOf(v)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + f.Index(i).Set(result[0].Convert(f.Index(i).Type())) + } + return nil +} + +func populateField(f reflect.Value, value string, props *proto.Properties) error { + i := f.Addr().Interface() + + // Handle protobuf well known types + var name string + switch m := i.(type) { + case interface{ XXX_WellKnownType() string }: + name = m.XXX_WellKnownType() + case proto.Message: + const wktPrefix = "google.protobuf." + if fullName := proto.MessageName(m); strings.HasPrefix(fullName, wktPrefix) { + name = fullName[len(wktPrefix):] + } + } + switch name { + case "Timestamp": + if value == "null" { + f.FieldByName("Seconds").SetInt(0) + f.FieldByName("Nanos").SetInt(0) + return nil + } + + t, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + f.FieldByName("Seconds").SetInt(int64(t.Unix())) + f.FieldByName("Nanos").SetInt(int64(t.Nanosecond())) + return nil + case "Duration": + if value == "null" { + f.FieldByName("Seconds").SetInt(0) + f.FieldByName("Nanos").SetInt(0) + return nil + } + d, err := time.ParseDuration(value) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + f.FieldByName("Seconds").SetInt(s) + f.FieldByName("Nanos").SetInt(ns) + return nil + case "DoubleValue": + fallthrough + case "FloatValue": + float64Val, err := strconv.ParseFloat(value, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.FieldByName("Value").SetFloat(float64Val) + return nil + case "Int64Value": + fallthrough + case "Int32Value": + int64Val, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.FieldByName("Value").SetInt(int64Val) + return nil + case "UInt64Value": + fallthrough + case "UInt32Value": + uint64Val, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.FieldByName("Value").SetUint(uint64Val) + return nil + case "BoolValue": + if value == "true" { + f.FieldByName("Value").SetBool(true) + } else if value == "false" { + f.FieldByName("Value").SetBool(false) + } else { + return fmt.Errorf("bad BoolValue: %s", value) + } + return nil + case "StringValue": + f.FieldByName("Value").SetString(value) + return nil + case "BytesValue": + bytesVal, err := base64.StdEncoding.DecodeString(value) + if err != nil { + return fmt.Errorf("bad BytesValue: %s", value) + } + f.FieldByName("Value").SetBytes(bytesVal) + return nil + case "FieldMask": + p := f.FieldByName("Paths") + for _, v := range strings.Split(value, ",") { + if v != "" { + p.Set(reflect.Append(p, reflect.ValueOf(v))) + } + } + return nil + } + + // Handle Time and Duration stdlib types + switch t := i.(type) { + case *time.Time: + pt, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + *t = pt + return nil + case *time.Duration: + d, err := time.ParseDuration(value) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + *t = d + return nil + } + + // is the destination field an enumeration type? + if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { + return populateFieldEnum(f, value, enumValMap) + } + + conv, ok := convFromType[f.Kind()] + if !ok { + return fmt.Errorf("field type %T is not supported in query parameters", i) + } + result := conv.Call([]reflect.Value{reflect.ValueOf(value)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + f.Set(result[0].Convert(f.Type())) + return nil +} + +func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) { + // see if it's an enumeration string + if enumVal, ok := enumValMap[value]; ok { + return reflect.ValueOf(enumVal).Convert(t), nil + } + + // check for an integer that matches an enumeration value + eVal, err := strconv.Atoi(value) + if err != nil { + return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) + } + for _, v := range enumValMap { + if v == int32(eVal) { + return reflect.ValueOf(eVal).Convert(t), nil + } + } + return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) +} + +func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error { + cval, err := convertEnum(value, f.Type(), enumValMap) + if err != nil { + return err + } + f.Set(cval) + return nil +} + +func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error { + elemType := f.Type().Elem() + f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) + for i, v := range values { + result, err := convertEnum(v, elemType, enumValMap) + if err != nil { + return err + } + f.Index(i).Set(result) + } + return nil +} + +var ( + convFromType = map[reflect.Kind]reflect.Value{ + reflect.String: reflect.ValueOf(String), + reflect.Bool: reflect.ValueOf(Bool), + reflect.Float64: reflect.ValueOf(Float64), + reflect.Float32: reflect.ValueOf(Float32), + reflect.Int64: reflect.ValueOf(Int64), + reflect.Int32: reflect.ValueOf(Int32), + reflect.Uint64: reflect.ValueOf(Uint64), + reflect.Uint32: reflect.ValueOf(Uint32), + reflect.Slice: reflect.ValueOf(Bytes), + } +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel new file mode 100644 index 00000000..7109d793 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel @@ -0,0 +1,21 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "pattern.go", + "readerfactory.go", + "trie.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities", +) + +go_test( + name = "go_default_test", + size = "small", + srcs = ["trie_test.go"], + embed = [":go_default_library"], +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go new file mode 100644 index 00000000..cf79a4d5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go @@ -0,0 +1,2 @@ +// Package utilities provides members for internal use in grpc-gateway. +package utilities diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go new file mode 100644 index 00000000..dfe7de48 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go @@ -0,0 +1,22 @@ +package utilities + +// An OpCode is a opcode of compiled path patterns. +type OpCode int + +// These constants are the valid values of OpCode. +const ( + // OpNop does nothing + OpNop = OpCode(iota) + // OpPush pushes a component to stack + OpPush + // OpLitPush pushes a component to stack if it matches to the literal + OpLitPush + // OpPushM concatenates the remaining components and pushes it to stack + OpPushM + // OpConcatN pops N items from stack, concatenates them and pushes it back to stack + OpConcatN + // OpCapture pops an item and binds it to the variable + OpCapture + // OpEnd is the least positive invalid opcode. + OpEnd +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go new file mode 100644 index 00000000..6dd38546 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go @@ -0,0 +1,20 @@ +package utilities + +import ( + "bytes" + "io" + "io/ioutil" +) + +// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins +// at the start of the stream +func IOReaderFactory(r io.Reader) (func() io.Reader, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + return func() io.Reader { + return bytes.NewReader(b) + }, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go new file mode 100644 index 00000000..c2b7b30d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go @@ -0,0 +1,177 @@ +package utilities + +import ( + "sort" +) + +// DoubleArray is a Double Array implementation of trie on sequences of strings. +type DoubleArray struct { + // Encoding keeps an encoding from string to int + Encoding map[string]int + // Base is the base array of Double Array + Base []int + // Check is the check array of Double Array + Check []int +} + +// NewDoubleArray builds a DoubleArray from a set of sequences of strings. +func NewDoubleArray(seqs [][]string) *DoubleArray { + da := &DoubleArray{Encoding: make(map[string]int)} + if len(seqs) == 0 { + return da + } + + encoded := registerTokens(da, seqs) + sort.Sort(byLex(encoded)) + + root := node{row: -1, col: -1, left: 0, right: len(encoded)} + addSeqs(da, encoded, 0, root) + + for i := len(da.Base); i > 0; i-- { + if da.Check[i-1] != 0 { + da.Base = da.Base[:i] + da.Check = da.Check[:i] + break + } + } + return da +} + +func registerTokens(da *DoubleArray, seqs [][]string) [][]int { + var result [][]int + for _, seq := range seqs { + var encoded []int + for _, token := range seq { + if _, ok := da.Encoding[token]; !ok { + da.Encoding[token] = len(da.Encoding) + } + encoded = append(encoded, da.Encoding[token]) + } + result = append(result, encoded) + } + for i := range result { + result[i] = append(result[i], len(da.Encoding)) + } + return result +} + +type node struct { + row, col int + left, right int +} + +func (n node) value(seqs [][]int) int { + return seqs[n.row][n.col] +} + +func (n node) children(seqs [][]int) []*node { + var result []*node + lastVal := int(-1) + last := new(node) + for i := n.left; i < n.right; i++ { + if lastVal == seqs[i][n.col+1] { + continue + } + last.right = i + last = &node{ + row: i, + col: n.col + 1, + left: i, + } + result = append(result, last) + } + last.right = n.right + return result +} + +func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) { + ensureSize(da, pos) + + children := n.children(seqs) + var i int + for i = 1; ; i++ { + ok := func() bool { + for _, child := range children { + code := child.value(seqs) + j := i + code + ensureSize(da, j) + if da.Check[j] != 0 { + return false + } + } + return true + }() + if ok { + break + } + } + da.Base[pos] = i + for _, child := range children { + code := child.value(seqs) + j := i + code + da.Check[j] = pos + 1 + } + terminator := len(da.Encoding) + for _, child := range children { + code := child.value(seqs) + if code == terminator { + continue + } + j := i + code + addSeqs(da, seqs, j, *child) + } +} + +func ensureSize(da *DoubleArray, i int) { + for i >= len(da.Base) { + da.Base = append(da.Base, make([]int, len(da.Base)+1)...) + da.Check = append(da.Check, make([]int, len(da.Check)+1)...) + } +} + +type byLex [][]int + +func (l byLex) Len() int { return len(l) } +func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l byLex) Less(i, j int) bool { + si := l[i] + sj := l[j] + var k int + for k = 0; k < len(si) && k < len(sj); k++ { + if si[k] < sj[k] { + return true + } + if si[k] > sj[k] { + return false + } + } + if k < len(sj) { + return true + } + return false +} + +// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence. +func (da *DoubleArray) HasCommonPrefix(seq []string) bool { + if len(da.Base) == 0 { + return false + } + + var i int + for _, t := range seq { + code, ok := da.Encoding[t] + if !ok { + break + } + j := da.Base[i] + code + if len(da.Check) <= j || da.Check[j] != i+1 { + break + } + i = j + } + j := da.Base[i] + len(da.Encoding) + if len(da.Check) <= j || da.Check[j] != i+1 { + return false + } + return true +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md new file mode 100644 index 00000000..a967ae45 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md @@ -0,0 +1,9 @@ +# UNRELEASED + +FEATURES + +* Add `SeekLowerBound` to allow for range scans. [[GH-24](https://github.com/hashicorp/go-immutable-radix/pull/24)] + +# 1.0.0 (August 30th, 2018) + +* go mod adopted diff --git a/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/LICENSE new file mode 100644 index 00000000..e87a115e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/README.md b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/README.md new file mode 100644 index 00000000..aca15a64 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/README.md @@ -0,0 +1,66 @@ +go-immutable-radix [![CircleCI](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master) +========= + +Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree). +The package only provides a single `Tree` implementation, optimized for sparse nodes. + +As a radix tree, it provides the following: + * O(k) operations. In many cases, this can be faster than a hash table since + the hash function is an O(k) operation, and hash tables have very poor cache locality. + * Minimum / Maximum value lookups + * Ordered iteration + +A tree supports using a transaction to batch multiple updates (insert, delete) +in a more efficient manner than performing each operation one at a time. + +For a mutable variant, see [go-radix](https://github.com/armon/go-radix). + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix). + +Example +======= + +Below is a simple example of usage + +```go +// Create a tree +r := iradix.New() +r, _, _ = r.Insert([]byte("foo"), 1) +r, _, _ = r.Insert([]byte("bar"), 2) +r, _, _ = r.Insert([]byte("foobar"), 2) + +// Find the longest prefix match +m, _, _ := r.Root().LongestPrefix([]byte("foozip")) +if string(m) != "foo" { + panic("should be foo") +} +``` + +Here is an example of performing a range scan of the keys. + +```go +// Create a tree +r := iradix.New() +r, _, _ = r.Insert([]byte("001"), 1) +r, _, _ = r.Insert([]byte("002"), 2) +r, _, _ = r.Insert([]byte("005"), 5) +r, _, _ = r.Insert([]byte("010"), 10) +r, _, _ = r.Insert([]byte("100"), 10) + +// Range scan over the keys that sort lexicographically between [003, 050) +it := r.Root().Iterator() +it.SeekLowerBound([]byte("003")) +for key, _, ok := it.Next(); ok; key, _, ok = it.Next() { + if key >= "050" { + break + } + fmt.Println(key) +} +// Output: +// 005 +// 010 +``` + diff --git a/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/edges.go b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/edges.go new file mode 100644 index 00000000..a6367477 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/edges.go @@ -0,0 +1,21 @@ +package iradix + +import "sort" + +type edges []edge + +func (e edges) Len() int { + return len(e) +} + +func (e edges) Less(i, j int) bool { + return e[i].label < e[j].label +} + +func (e edges) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +func (e edges) Sort() { + sort.Sort(e) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/go.mod b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/go.mod new file mode 100644 index 00000000..27e7b7c9 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/go.mod @@ -0,0 +1,6 @@ +module github.com/hashicorp/go-immutable-radix + +require ( + github.com/hashicorp/go-uuid v1.0.0 + github.com/hashicorp/golang-lru v0.5.0 +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/go.sum b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/go.sum new file mode 100644 index 00000000..7de5dfc5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/go.sum @@ -0,0 +1,4 @@ +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= diff --git a/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/iradix.go b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/iradix.go new file mode 100644 index 00000000..e5e6e57f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/iradix.go @@ -0,0 +1,662 @@ +package iradix + +import ( + "bytes" + "strings" + + "github.com/hashicorp/golang-lru/simplelru" +) + +const ( + // defaultModifiedCache is the default size of the modified node + // cache used per transaction. This is used to cache the updates + // to the nodes near the root, while the leaves do not need to be + // cached. This is important for very large transactions to prevent + // the modified cache from growing to be enormous. This is also used + // to set the max size of the mutation notify maps since those should + // also be bounded in a similar way. + defaultModifiedCache = 8192 +) + +// Tree implements an immutable radix tree. This can be treated as a +// Dictionary abstract data type. The main advantage over a standard +// hash map is prefix-based lookups and ordered iteration. The immutability +// means that it is safe to concurrently read from a Tree without any +// coordination. +type Tree struct { + root *Node + size int +} + +// New returns an empty Tree +func New() *Tree { + t := &Tree{ + root: &Node{ + mutateCh: make(chan struct{}), + }, + } + return t +} + +// Len is used to return the number of elements in the tree +func (t *Tree) Len() int { + return t.size +} + +// Txn is a transaction on the tree. This transaction is applied +// atomically and returns a new tree when committed. A transaction +// is not thread safe, and should only be used by a single goroutine. +type Txn struct { + // root is the modified root for the transaction. + root *Node + + // snap is a snapshot of the root node for use if we have to run the + // slow notify algorithm. + snap *Node + + // size tracks the size of the tree as it is modified during the + // transaction. + size int + + // writable is a cache of writable nodes that have been created during + // the course of the transaction. This allows us to re-use the same + // nodes for further writes and avoid unnecessary copies of nodes that + // have never been exposed outside the transaction. This will only hold + // up to defaultModifiedCache number of entries. + writable *simplelru.LRU + + // trackChannels is used to hold channels that need to be notified to + // signal mutation of the tree. This will only hold up to + // defaultModifiedCache number of entries, after which we will set the + // trackOverflow flag, which will cause us to use a more expensive + // algorithm to perform the notifications. Mutation tracking is only + // performed if trackMutate is true. + trackChannels map[chan struct{}]struct{} + trackOverflow bool + trackMutate bool +} + +// Txn starts a new transaction that can be used to mutate the tree +func (t *Tree) Txn() *Txn { + txn := &Txn{ + root: t.root, + snap: t.root, + size: t.size, + } + return txn +} + +// TrackMutate can be used to toggle if mutations are tracked. If this is enabled +// then notifications will be issued for affected internal nodes and leaves when +// the transaction is committed. +func (t *Txn) TrackMutate(track bool) { + t.trackMutate = track +} + +// trackChannel safely attempts to track the given mutation channel, setting the +// overflow flag if we can no longer track any more. This limits the amount of +// state that will accumulate during a transaction and we have a slower algorithm +// to switch to if we overflow. +func (t *Txn) trackChannel(ch chan struct{}) { + // In overflow, make sure we don't store any more objects. + if t.trackOverflow { + return + } + + // If this would overflow the state we reject it and set the flag (since + // we aren't tracking everything that's required any longer). + if len(t.trackChannels) >= defaultModifiedCache { + // Mark that we are in the overflow state + t.trackOverflow = true + + // Clear the map so that the channels can be garbage collected. It is + // safe to do this since we have already overflowed and will be using + // the slow notify algorithm. + t.trackChannels = nil + return + } + + // Create the map on the fly when we need it. + if t.trackChannels == nil { + t.trackChannels = make(map[chan struct{}]struct{}) + } + + // Otherwise we are good to track it. + t.trackChannels[ch] = struct{}{} +} + +// writeNode returns a node to be modified, if the current node has already been +// modified during the course of the transaction, it is used in-place. Set +// forLeafUpdate to true if you are getting a write node to update the leaf, +// which will set leaf mutation tracking appropriately as well. +func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node { + // Ensure the writable set exists. + if t.writable == nil { + lru, err := simplelru.NewLRU(defaultModifiedCache, nil) + if err != nil { + panic(err) + } + t.writable = lru + } + + // If this node has already been modified, we can continue to use it + // during this transaction. We know that we don't need to track it for + // a node update since the node is writable, but if this is for a leaf + // update we track it, in case the initial write to this node didn't + // update the leaf. + if _, ok := t.writable.Get(n); ok { + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + return n + } + + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && forLeafUpdate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Copy the existing node. If you have set forLeafUpdate it will be + // safe to replace this leaf with another after you get your node for + // writing. You MUST replace it, because the channel associated with + // this leaf will be closed when this transaction is committed. + nc := &Node{ + mutateCh: make(chan struct{}), + leaf: n.leaf, + } + if n.prefix != nil { + nc.prefix = make([]byte, len(n.prefix)) + copy(nc.prefix, n.prefix) + } + if len(n.edges) != 0 { + nc.edges = make([]edge, len(n.edges)) + copy(nc.edges, n.edges) + } + + // Mark this node as writable. + t.writable.Add(nc, nil) + return nc +} + +// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction +// Returns the size of the subtree visited +func (t *Txn) trackChannelsAndCount(n *Node) int { + // Count only leaf nodes + leaves := 0 + if n.leaf != nil { + leaves = 1 + } + // Mark this node as being mutated. + if t.trackMutate { + t.trackChannel(n.mutateCh) + } + + // Mark its leaf as being mutated, if appropriate. + if t.trackMutate && n.leaf != nil { + t.trackChannel(n.leaf.mutateCh) + } + + // Recurse on the children + for _, e := range n.edges { + leaves += t.trackChannelsAndCount(e.node) + } + return leaves +} + +// mergeChild is called to collapse the given node with its child. This is only +// called when the given node is not a leaf and has a single edge. +func (t *Txn) mergeChild(n *Node) { + // Mark the child node as being mutated since we are about to abandon + // it. We don't need to mark the leaf since we are retaining it if it + // is there. + e := n.edges[0] + child := e.node + if t.trackMutate { + t.trackChannel(child.mutateCh) + } + + // Merge the nodes. + n.prefix = concat(n.prefix, child.prefix) + n.leaf = child.leaf + if len(child.edges) != 0 { + n.edges = make([]edge, len(child.edges)) + copy(n.edges, child.edges) + } else { + n.edges = nil + } +} + +// insert does a recursive insertion +func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) { + // Handle key exhaustion + if len(search) == 0 { + var oldVal interface{} + didUpdate := false + if n.isLeaf() { + oldVal = n.leaf.val + didUpdate = true + } + + nc := t.writeNode(n, true) + nc.leaf = &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + return nc, oldVal, didUpdate + } + + // Look for the edge + idx, child := n.getEdge(search[0]) + + // No edge, create one + if child == nil { + e := edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + }, + prefix: search, + }, + } + nc := t.writeNode(n, false) + nc.addEdge(e) + return nc, nil, false + } + + // Determine longest prefix of the search key on match + commonPrefix := longestPrefix(search, child.prefix) + if commonPrefix == len(child.prefix) { + search = search[commonPrefix:] + newChild, oldVal, didUpdate := t.insert(child, k, search, v) + if newChild != nil { + nc := t.writeNode(n, false) + nc.edges[idx].node = newChild + return nc, oldVal, didUpdate + } + return nil, oldVal, didUpdate + } + + // Split the node + nc := t.writeNode(n, false) + splitNode := &Node{ + mutateCh: make(chan struct{}), + prefix: search[:commonPrefix], + } + nc.replaceEdge(edge{ + label: search[0], + node: splitNode, + }) + + // Restore the existing child node + modChild := t.writeNode(child, false) + splitNode.addEdge(edge{ + label: modChild.prefix[commonPrefix], + node: modChild, + }) + modChild.prefix = modChild.prefix[commonPrefix:] + + // Create a new leaf node + leaf := &leafNode{ + mutateCh: make(chan struct{}), + key: k, + val: v, + } + + // If the new key is a subset, add to to this node + search = search[commonPrefix:] + if len(search) == 0 { + splitNode.leaf = leaf + return nc, nil, false + } + + // Create a new edge for the node + splitNode.addEdge(edge{ + label: search[0], + node: &Node{ + mutateCh: make(chan struct{}), + leaf: leaf, + prefix: search, + }, + }) + return nc, nil, false +} + +// delete does a recursive deletion +func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) { + // Check for key exhaustion + if len(search) == 0 { + if !n.isLeaf() { + return nil, nil + } + // Copy the pointer in case we are in a transaction that already + // modified this node since the node will be reused. Any changes + // made to the node will not affect returning the original leaf + // value. + oldLeaf := n.leaf + + // Remove the leaf node + nc := t.writeNode(n, true) + nc.leaf = nil + + // Check if this node should be merged + if n != t.root && len(nc.edges) == 1 { + t.mergeChild(nc) + } + return nc, oldLeaf + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + if child == nil || !bytes.HasPrefix(search, child.prefix) { + return nil, nil + } + + // Consume the search prefix + search = search[len(child.prefix):] + newChild, leaf := t.delete(n, child, search) + if newChild == nil { + return nil, nil + } + + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, leaf +} + +// delete does a recursive deletion +func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) { + // Check for key exhaustion + if len(search) == 0 { + nc := t.writeNode(n, true) + if n.isLeaf() { + nc.leaf = nil + } + nc.edges = nil + return nc, t.trackChannelsAndCount(n) + } + + // Look for an edge + label := search[0] + idx, child := n.getEdge(label) + // We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix + // Need to do both so that we can delete prefixes that don't correspond to any node in the tree + if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) { + return nil, 0 + } + + // Consume the search prefix + if len(child.prefix) > len(search) { + search = []byte("") + } else { + search = search[len(child.prefix):] + } + newChild, numDeletions := t.deletePrefix(n, child, search) + if newChild == nil { + return nil, 0 + } + // Copy this node. WATCH OUT - it's safe to pass "false" here because we + // will only ADD a leaf via nc.mergeChild() if there isn't one due to + // the !nc.isLeaf() check in the logic just below. This is pretty subtle, + // so be careful if you change any of the logic here. + + nc := t.writeNode(n, false) + + // Delete the edge if the node has no edges + if newChild.leaf == nil && len(newChild.edges) == 0 { + nc.delEdge(label) + if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() { + t.mergeChild(nc) + } + } else { + nc.edges[idx].node = newChild + } + return nc, numDeletions +} + +// Insert is used to add or update a given key. The return provides +// the previous value and a bool indicating if any was set. +func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) { + newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v) + if newRoot != nil { + t.root = newRoot + } + if !didUpdate { + t.size++ + } + return oldVal, didUpdate +} + +// Delete is used to delete a given key. Returns the old value if any, +// and a bool indicating if the key was set. +func (t *Txn) Delete(k []byte) (interface{}, bool) { + newRoot, leaf := t.delete(nil, t.root, k) + if newRoot != nil { + t.root = newRoot + } + if leaf != nil { + t.size-- + return leaf.val, true + } + return nil, false +} + +// DeletePrefix is used to delete an entire subtree that matches the prefix +// This will delete all nodes under that prefix +func (t *Txn) DeletePrefix(prefix []byte) bool { + newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix) + if newRoot != nil { + t.root = newRoot + t.size = t.size - numDeletions + return true + } + return false + +} + +// Root returns the current root of the radix tree within this +// transaction. The root is not safe across insert and delete operations, +// but can be used to read the current state during a transaction. +func (t *Txn) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Txn) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// GetWatch is used to lookup a specific key, returning +// the watch channel, value and if it was found +func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + return t.root.GetWatch(k) +} + +// Commit is used to finalize the transaction and return a new tree. If mutation +// tracking is turned on then notifications will also be issued. +func (t *Txn) Commit() *Tree { + nt := t.CommitOnly() + if t.trackMutate { + t.Notify() + } + return nt +} + +// CommitOnly is used to finalize the transaction and return a new tree, but +// does not issue any notifications until Notify is called. +func (t *Txn) CommitOnly() *Tree { + nt := &Tree{t.root, t.size} + t.writable = nil + return nt +} + +// slowNotify does a complete comparison of the before and after trees in order +// to trigger notifications. This doesn't require any additional state but it +// is very expensive to compute. +func (t *Txn) slowNotify() { + snapIter := t.snap.rawIterator() + rootIter := t.root.rawIterator() + for snapIter.Front() != nil || rootIter.Front() != nil { + // If we've exhausted the nodes in the old snapshot, we know + // there's nothing remaining to notify. + if snapIter.Front() == nil { + return + } + snapElem := snapIter.Front() + + // If we've exhausted the nodes in the new root, we know we need + // to invalidate everything that remains in the old snapshot. We + // know from the loop condition there's something in the old + // snapshot. + if rootIter.Front() == nil { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // Do one string compare so we can check the various conditions + // below without repeating the compare. + cmp := strings.Compare(snapIter.Path(), rootIter.Path()) + + // If the snapshot is behind the root, then we must have deleted + // this node during the transaction. + if cmp < 0 { + close(snapElem.mutateCh) + if snapElem.isLeaf() { + close(snapElem.leaf.mutateCh) + } + snapIter.Next() + continue + } + + // If the snapshot is ahead of the root, then we must have added + // this node during the transaction. + if cmp > 0 { + rootIter.Next() + continue + } + + // If we have the same path, then we need to see if we mutated a + // node and possibly the leaf. + rootElem := rootIter.Front() + if snapElem != rootElem { + close(snapElem.mutateCh) + if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) { + close(snapElem.leaf.mutateCh) + } + } + snapIter.Next() + rootIter.Next() + } +} + +// Notify is used along with TrackMutate to trigger notifications. This must +// only be done once a transaction is committed via CommitOnly, and it is called +// automatically by Commit. +func (t *Txn) Notify() { + if !t.trackMutate { + return + } + + // If we've overflowed the tracking state we can't use it in any way and + // need to do a full tree compare. + if t.trackOverflow { + t.slowNotify() + } else { + for ch := range t.trackChannels { + close(ch) + } + } + + // Clean up the tracking state so that a re-notify is safe (will trigger + // the else clause above which will be a no-op). + t.trackChannels = nil + t.trackOverflow = false +} + +// Insert is used to add or update a given key. The return provides +// the new tree, previous value and a bool indicating if any was set. +func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Insert(k, v) + return txn.Commit(), old, ok +} + +// Delete is used to delete a given key. Returns the new tree, +// old value if any, and a bool indicating if the key was set. +func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) { + txn := t.Txn() + old, ok := txn.Delete(k) + return txn.Commit(), old, ok +} + +// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree, +// and a bool indicating if the prefix matched any nodes +func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) { + txn := t.Txn() + ok := txn.DeletePrefix(k) + return txn.Commit(), ok +} + +// Root returns the root node of the tree which can be used for richer +// query operations. +func (t *Tree) Root() *Node { + return t.root +} + +// Get is used to lookup a specific key, returning +// the value and if it was found +func (t *Tree) Get(k []byte) (interface{}, bool) { + return t.root.Get(k) +} + +// longestPrefix finds the length of the shared prefix +// of two strings +func longestPrefix(k1, k2 []byte) int { + max := len(k1) + if l := len(k2); l < max { + max = l + } + var i int + for i = 0; i < max; i++ { + if k1[i] != k2[i] { + break + } + } + return i +} + +// concat two byte slices, returning a third new copy +func concat(a, b []byte) []byte { + c := make([]byte, len(a)+len(b)) + copy(c, a) + copy(c[len(a):], b) + return c +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/iter.go new file mode 100644 index 00000000..1ecaf831 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/iter.go @@ -0,0 +1,188 @@ +package iradix + +import ( + "bytes" +) + +// Iterator is used to iterate over a set of nodes +// in pre-order +type Iterator struct { + node *Node + stack []edges +} + +// SeekPrefixWatch is used to seek the iterator to a given prefix +// and returns the watch channel of the finest granularity +func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { + // Wipe the stack + i.stack = nil + n := i.node + watch = n.mutateCh + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + i.node = n + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + i.node = nil + return + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + i.node = n + return + } else { + i.node = nil + return + } + } +} + +// SeekPrefix is used to seek the iterator to a given prefix +func (i *Iterator) SeekPrefix(prefix []byte) { + i.SeekPrefixWatch(prefix) +} + +func (i *Iterator) recurseMin(n *Node) *Node { + // Traverse to the minimum child + if n.leaf != nil { + return n + } + if len(n.edges) > 0 { + // Add all the other edges to the stack (the min node will be added as + // we recurse) + i.stack = append(i.stack, n.edges[1:]) + return i.recurseMin(n.edges[0].node) + } + // Shouldn't be possible + return nil +} + +// SeekLowerBound is used to seek the iterator to the smallest key that is +// greater or equal to the given key. There is no watch variant as it's hard to +// predict based on the radix structure which node(s) changes might affect the +// result. +func (i *Iterator) SeekLowerBound(key []byte) { + // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we + // go because we need only a subset of edges of many nodes in the path to the + // leaf with the lower bound. + i.stack = []edges{} + n := i.node + search := key + + found := func(n *Node) { + i.node = n + i.stack = append(i.stack, edges{edge{node: n}}) + } + + for { + // Compare current prefix with the search key's same-length prefix. + var prefixCmp int + if len(n.prefix) < len(search) { + prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)]) + } else { + prefixCmp = bytes.Compare(n.prefix, search) + } + + if prefixCmp > 0 { + // Prefix is larger, that means the lower bound is greater than the search + // and from now on we need to follow the minimum path to the smallest + // leaf under this subtree. + n = i.recurseMin(n) + if n != nil { + found(n) + } + return + } + + if prefixCmp < 0 { + // Prefix is smaller than search prefix, that means there is no lower + // bound + i.node = nil + return + } + + // Prefix is equal, we are still heading for an exact match. If this is a + // leaf we're done. + if n.leaf != nil { + if bytes.Compare(n.leaf.key, key) < 0 { + i.node = nil + return + } + found(n) + return + } + + // Consume the search prefix + if len(n.prefix) > len(search) { + search = []byte{} + } else { + search = search[len(n.prefix):] + } + + // Otherwise, take the lower bound next edge. + idx, lbNode := n.getLowerBoundEdge(search[0]) + if lbNode == nil { + i.node = nil + return + } + + // Create stack edges for the all strictly higher edges in this node. + if idx+1 < len(n.edges) { + i.stack = append(i.stack, n.edges[idx+1:]) + } + + i.node = lbNode + // Recurse + n = lbNode + } +} + +// Next returns the next node in order +func (i *Iterator) Next() ([]byte, interface{}, bool) { + // Initialize our stack if needed + if i.stack == nil && i.node != nil { + i.stack = []edges{ + edges{ + edge{node: i.node}, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack + n := len(i.stack) + last := i.stack[n-1] + elem := last[0].node + + // Update the stack + if len(last) > 1 { + i.stack[n-1] = last[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier + if len(elem.edges) > 0 { + i.stack = append(i.stack, elem.edges) + } + + // Return the leaf values if any + if elem.leaf != nil { + return elem.leaf.key, elem.leaf.val, true + } + } + return nil, nil, false +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/node.go b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/node.go new file mode 100644 index 00000000..3ab904ed --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/node.go @@ -0,0 +1,304 @@ +package iradix + +import ( + "bytes" + "sort" +) + +// WalkFn is used when walking the tree. Takes a +// key and value, returning if iteration should +// be terminated. +type WalkFn func(k []byte, v interface{}) bool + +// leafNode is used to represent a value +type leafNode struct { + mutateCh chan struct{} + key []byte + val interface{} +} + +// edge is used to represent an edge node +type edge struct { + label byte + node *Node +} + +// Node is an immutable node in the radix tree +type Node struct { + // mutateCh is closed if this node is modified + mutateCh chan struct{} + + // leaf is used to store possible leaf + leaf *leafNode + + // prefix is the common prefix we ignore + prefix []byte + + // Edges should be stored in-order for iteration. + // We avoid a fully materialized slice to save memory, + // since in most cases we expect to be sparse + edges edges +} + +func (n *Node) isLeaf() bool { + return n.leaf != nil +} + +func (n *Node) addEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + n.edges = append(n.edges, e) + if idx != num { + copy(n.edges[idx+1:], n.edges[idx:num]) + n.edges[idx] = e + } +} + +func (n *Node) replaceEdge(e edge) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= e.label + }) + if idx < num && n.edges[idx].label == e.label { + n.edges[idx].node = e.node + return + } + panic("replacing missing edge") +} + +func (n *Node) getEdge(label byte) (int, *Node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + return idx, n.edges[idx].node + } + return -1, nil +} + +func (n *Node) getLowerBoundEdge(label byte) (int, *Node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + // we want lower bound behavior so return even if it's not an exact match + if idx < num { + return idx, n.edges[idx].node + } + return -1, nil +} + +func (n *Node) delEdge(label byte) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + if idx < num && n.edges[idx].label == label { + copy(n.edges[idx:], n.edges[idx+1:]) + n.edges[len(n.edges)-1] = edge{} + n.edges = n.edges[:len(n.edges)-1] + } +} + +func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) { + search := k + watch := n.mutateCh + for { + // Check for key exhaustion + if len(search) == 0 { + if n.isLeaf() { + return n.leaf.mutateCh, n.leaf.val, true + } + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Update to the finest granularity as the search makes progress + watch = n.mutateCh + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + return watch, nil, false +} + +func (n *Node) Get(k []byte) (interface{}, bool) { + _, val, ok := n.GetWatch(k) + return val, ok +} + +// LongestPrefix is like Get, but instead of an +// exact match, it will return the longest prefix match. +func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) { + var last *leafNode + search := k + for { + // Look for a leaf node + if n.isLeaf() { + last = n.leaf + } + + // Check for key exhaution + if len(search) == 0 { + break + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } + if last != nil { + return last.key, last.val, true + } + return nil, nil, false +} + +// Minimum is used to return the minimum value in the tree +func (n *Node) Minimum() ([]byte, interface{}, bool) { + for { + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } + if len(n.edges) > 0 { + n = n.edges[0].node + } else { + break + } + } + return nil, nil, false +} + +// Maximum is used to return the maximum value in the tree +func (n *Node) Maximum() ([]byte, interface{}, bool) { + for { + if num := len(n.edges); num > 0 { + n = n.edges[num-1].node + continue + } + if n.isLeaf() { + return n.leaf.key, n.leaf.val, true + } else { + break + } + } + return nil, nil, false +} + +// Iterator is used to return an iterator at +// the given node to walk the tree +func (n *Node) Iterator() *Iterator { + return &Iterator{node: n} +} + +// rawIterator is used to return a raw iterator at the given node to walk the +// tree. +func (n *Node) rawIterator() *rawIterator { + iter := &rawIterator{node: n} + iter.Next() + return iter +} + +// Walk is used to walk the tree +func (n *Node) Walk(fn WalkFn) { + recursiveWalk(n, fn) +} + +// WalkPrefix is used to walk the tree under a prefix +func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) { + search := prefix + for { + // Check for key exhaution + if len(search) == 0 { + recursiveWalk(n, fn) + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + break + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + + } else if bytes.HasPrefix(n.prefix, search) { + // Child may be under our search prefix + recursiveWalk(n, fn) + return + } else { + break + } + } +} + +// WalkPath is used to walk the tree, but only visiting nodes +// from the root down to a given leaf. Where WalkPrefix walks +// all the entries *under* the given prefix, this walks the +// entries *above* the given prefix. +func (n *Node) WalkPath(path []byte, fn WalkFn) { + search := path + for { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return + } + + // Check for key exhaution + if len(search) == 0 { + return + } + + // Look for an edge + _, n = n.getEdge(search[0]) + if n == nil { + return + } + + // Consume the search prefix + if bytes.HasPrefix(search, n.prefix) { + search = search[len(n.prefix):] + } else { + break + } + } +} + +// recursiveWalk is used to do a pre-order walk of a node +// recursively. Returns true if the walk should be aborted +func recursiveWalk(n *Node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children + for _, e := range n.edges { + if recursiveWalk(e.node, fn) { + return true + } + } + return false +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go new file mode 100644 index 00000000..04814c13 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go @@ -0,0 +1,78 @@ +package iradix + +// rawIterator visits each of the nodes in the tree, even the ones that are not +// leaves. It keeps track of the effective path (what a leaf at a given node +// would be called), which is useful for comparing trees. +type rawIterator struct { + // node is the starting node in the tree for the iterator. + node *Node + + // stack keeps track of edges in the frontier. + stack []rawStackEntry + + // pos is the current position of the iterator. + pos *Node + + // path is the effective path of the current iterator position, + // regardless of whether the current node is a leaf. + path string +} + +// rawStackEntry is used to keep track of the cumulative common path as well as +// its associated edges in the frontier. +type rawStackEntry struct { + path string + edges edges +} + +// Front returns the current node that has been iterated to. +func (i *rawIterator) Front() *Node { + return i.pos +} + +// Path returns the effective path of the current node, even if it's not actually +// a leaf. +func (i *rawIterator) Path() string { + return i.path +} + +// Next advances the iterator to the next node. +func (i *rawIterator) Next() { + // Initialize our stack if needed. + if i.stack == nil && i.node != nil { + i.stack = []rawStackEntry{ + rawStackEntry{ + edges: edges{ + edge{node: i.node}, + }, + }, + } + } + + for len(i.stack) > 0 { + // Inspect the last element of the stack. + n := len(i.stack) + last := i.stack[n-1] + elem := last.edges[0].node + + // Update the stack. + if len(last.edges) > 1 { + i.stack[n-1].edges = last.edges[1:] + } else { + i.stack = i.stack[:n-1] + } + + // Push the edges onto the frontier. + if len(elem.edges) > 0 { + path := last.path + string(elem.prefix) + i.stack = append(i.stack, rawStackEntry{path, elem.edges}) + } + + i.pos = elem + i.path = last.path + string(elem.prefix) + return + } + + i.pos = nil + i.path = "" +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/LICENSE new file mode 100644 index 00000000..e87a115e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/README.md b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/README.md new file mode 100644 index 00000000..f445a756 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/README.md @@ -0,0 +1,146 @@ +# go-memdb [![CircleCI](https://circleci.com/gh/hashicorp/go-memdb/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-memdb/tree/master) + +Provides the `memdb` package that implements a simple in-memory database +built on immutable radix trees. The database provides Atomicity, Consistency +and Isolation from ACID. Being that it is in-memory, it does not provide durability. +The database is instantiated with a schema that specifies the tables and indices +that exist and allows transactions to be executed. + +The database provides the following: + +* Multi-Version Concurrency Control (MVCC) - By leveraging immutable radix trees + the database is able to support any number of concurrent readers without locking, + and allows a writer to make progress. + +* Transaction Support - The database allows for rich transactions, in which multiple + objects are inserted, updated or deleted. The transactions can span multiple tables, + and are applied atomically. The database provides atomicity and isolation in ACID + terminology, such that until commit the updates are not visible. + +* Rich Indexing - Tables can support any number of indexes, which can be simple like + a single field index, or more advanced compound field indexes. Certain types like + UUID can be efficiently compressed from strings into byte indexes for reduced + storage requirements. + +* Watches - Callers can populate a watch set as part of a query, which can be used to + detect when a modification has been made to the database which affects the query + results. This lets callers easily watch for changes in the database in a very general + way. + +For the underlying immutable radix trees, see [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix). + +Documentation +============= + +The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-memdb). + +Example +======= + +Below is a [simple example](https://play.golang.org/p/gCGE9FA4og1) of usage + +```go +// Create a sample struct +type Person struct { + Email string + Name string + Age int +} + +// Create the DB schema +schema := &memdb.DBSchema{ + Tables: map[string]*memdb.TableSchema{ + "person": &memdb.TableSchema{ + Name: "person", + Indexes: map[string]*memdb.IndexSchema{ + "id": &memdb.IndexSchema{ + Name: "id", + Unique: true, + Indexer: &memdb.StringFieldIndex{Field: "Email"}, + }, + "age": &memdb.IndexSchema{ + Name: "age", + Unique: false, + Indexer: &memdb.IntFieldIndex{Field: "Age"}, + }, + }, + }, + }, +} + +// Create a new data base +db, err := memdb.NewMemDB(schema) +if err != nil { + panic(err) +} + +// Create a write transaction +txn := db.Txn(true) + +// Insert some people +people := []*Person{ + &Person{"joe@aol.com", "Joe", 30}, + &Person{"lucy@aol.com", "Lucy", 35}, + &Person{"tariq@aol.com", "Tariq", 21}, + &Person{"dorothy@aol.com", "Dorothy", 53}, +} +for _, p := range people { + if err := txn.Insert("person", p); err != nil { + panic(err) + } +} + +// Commit the transaction +txn.Commit() + +// Create read-only transaction +txn = db.Txn(false) +defer txn.Abort() + +// Lookup by email +raw, err := txn.First("person", "id", "joe@aol.com") +if err != nil { + panic(err) +} + +// Say hi! +fmt.Printf("Hello %s!\n", raw.(*Person).Name) + +// List all the people +it, err := txn.Get("person", "id") +if err != nil { + panic(err) +} + +fmt.Println("All the people:") +for obj := it.Next(); obj != nil; obj = it.Next() { + p := obj.(*Person) + fmt.Printf(" %s\n", p.Name) +} + +// Range scan over people with ages between 25 and 35 inclusive +it, err = txn.LowerBound("person", "age", 25) +if err != nil { + panic(err) +} + +fmt.Println("People aged 25 - 35:") +for obj := it.Next(); obj != nil; obj = it.Next() { + p := obj.(*Person) + if p.Age > 35 { + break + } + fmt.Printf(" %s is aged %d\n", p.Name, p.Age) +} +// Output: +// Hello Joe! +// All the people: +// Dorothy +// Joe +// Lucy +// Tariq +// People aged 25 - 35: +// Joe is aged 30 +// Lucy is aged 35 +``` + diff --git a/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/filter.go b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/filter.go new file mode 100644 index 00000000..2e3a9b3f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/filter.go @@ -0,0 +1,33 @@ +package memdb + +// FilterFunc is a function that takes the results of an iterator and returns +// whether the result should be filtered out. +type FilterFunc func(interface{}) bool + +// FilterIterator is used to wrap a ResultIterator and apply a filter over it. +type FilterIterator struct { + // filter is the filter function applied over the base iterator. + filter FilterFunc + + // iter is the iterator that is being wrapped. + iter ResultIterator +} + +func NewFilterIterator(wrap ResultIterator, filter FilterFunc) *FilterIterator { + return &FilterIterator{ + filter: filter, + iter: wrap, + } +} + +// WatchCh returns the watch channel of the wrapped iterator. +func (f *FilterIterator) WatchCh() <-chan struct{} { return f.iter.WatchCh() } + +// Next returns the next non-filtered result from the wrapped iterator +func (f *FilterIterator) Next() interface{} { + for { + if value := f.iter.Next(); value == nil || !f.filter(value) { + return value + } + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/go.mod b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/go.mod new file mode 100644 index 00000000..34bfd824 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/go.mod @@ -0,0 +1,5 @@ +module github.com/hashicorp/go-memdb + +go 1.12 + +require github.com/hashicorp/go-immutable-radix v1.1.0 diff --git a/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/go.sum b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/go.sum new file mode 100644 index 00000000..1a21d603 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/go.sum @@ -0,0 +1,6 @@ +github.com/hashicorp/go-immutable-radix v1.1.0 h1:vN9wG1D6KG6YHRTWr8512cxGOVgTMEfgEdSj/hr8MPc= +github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= diff --git a/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/index.go b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/index.go new file mode 100644 index 00000000..e368319e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/index.go @@ -0,0 +1,848 @@ +package memdb + +import ( + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "reflect" + "strings" +) + +// Indexer is an interface used for defining indexes. Indexes are used +// for efficient lookup of objects in a MemDB table. An Indexer must also +// implement one of SingleIndexer or MultiIndexer. +// +// Indexers are primarily responsible for returning the lookup key as +// a byte slice. The byte slice is the key data in the underlying data storage. +type Indexer interface { + // FromArgs is called to build the exact index key from a list of arguments. + FromArgs(args ...interface{}) ([]byte, error) +} + +// SingleIndexer is an interface used for defining indexes that generate a +// single value per object +type SingleIndexer interface { + // FromObject extracts the index value from an object. The return values + // are whether the index value was found, the index value, and any error + // while extracting the index value, respectively. + FromObject(raw interface{}) (bool, []byte, error) +} + +// MultiIndexer is an interface used for defining indexes that generate +// multiple values per object. Each value is stored as a seperate index +// pointing to the same object. +// +// For example, an index that extracts the first and last name of a person +// and allows lookup based on eitherd would be a MultiIndexer. The FromObject +// of this example would split the first and last name and return both as +// values. +type MultiIndexer interface { + // FromObject extracts index values from an object. The return values + // are the same as a SingleIndexer except there can be multiple index + // values. + FromObject(raw interface{}) (bool, [][]byte, error) +} + +// PrefixIndexer is an optional interface on top of an Indexer that allows +// indexes to support prefix-based iteration. +type PrefixIndexer interface { + // PrefixFromArgs is the same as FromArgs for an Indexer except that + // the index value returned should return all prefix-matched values. + PrefixFromArgs(args ...interface{}) ([]byte, error) +} + +// StringFieldIndex is used to extract a field from an object +// using reflection and builds an index on that field. +type StringFieldIndex struct { + Field string + Lowercase bool +} + +func (s *StringFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(s.Field) + isPtr := fv.Kind() == reflect.Ptr + fv = reflect.Indirect(fv) + if !isPtr && !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid %v ", s.Field, obj, isPtr) + } + + if isPtr && !fv.IsValid() { + val := "" + return true, []byte(val), nil + } + + val := fv.String() + if val == "" { + return false, nil, nil + } + + if s.Lowercase { + val = strings.ToLower(val) + } + + // Add the null character as a terminator + val += "\x00" + return true, []byte(val), nil +} + +func (s *StringFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + if s.Lowercase { + arg = strings.ToLower(arg) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +func (s *StringFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + val, err := s.FromArgs(args...) + if err != nil { + return nil, err + } + + // Strip the null terminator, the rest is a prefix + n := len(val) + if n > 0 { + return val[:n-1], nil + } + return val, nil +} + +// StringSliceFieldIndex builds an index from a field on an object that is a +// string slice ([]string). Each value within the string slice can be used for +// lookup. +type StringSliceFieldIndex struct { + Field string + Lowercase bool +} + +func (s *StringSliceFieldIndex) FromObject(obj interface{}) (bool, [][]byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(s.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", s.Field, obj) + } + + if fv.Kind() != reflect.Slice || fv.Type().Elem().Kind() != reflect.String { + return false, nil, fmt.Errorf("field '%s' is not a string slice", s.Field) + } + + length := fv.Len() + vals := make([][]byte, 0, length) + for i := 0; i < fv.Len(); i++ { + val := fv.Index(i).String() + if val == "" { + continue + } + + if s.Lowercase { + val = strings.ToLower(val) + } + + // Add the null character as a terminator + val += "\x00" + vals = append(vals, []byte(val)) + } + if len(vals) == 0 { + return false, nil, nil + } + return true, vals, nil +} + +func (s *StringSliceFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + arg, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + if s.Lowercase { + arg = strings.ToLower(arg) + } + // Add the null character as a terminator + arg += "\x00" + return []byte(arg), nil +} + +func (s *StringSliceFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + val, err := s.FromArgs(args...) + if err != nil { + return nil, err + } + + // Strip the null terminator, the rest is a prefix + n := len(val) + if n > 0 { + return val[:n-1], nil + } + return val, nil +} + +// StringMapFieldIndex is used to extract a field of type map[string]string +// from an object using reflection and builds an index on that field. +// +// Note that although FromArgs in theory supports using either one or +// two arguments, there is a bug: FromObject only creates an index +// using key/value, and does not also create an index using key. This +// means a lookup using one argument will never actually work. +// +// It is currently left as-is to prevent backwards compatibility +// issues. +// +// TODO: Fix this in the next major bump. +type StringMapFieldIndex struct { + Field string + Lowercase bool +} + +var MapType = reflect.MapOf(reflect.TypeOf(""), reflect.TypeOf("")).Kind() + +func (s *StringMapFieldIndex) FromObject(obj interface{}) (bool, [][]byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(s.Field) + if !fv.IsValid() { + return false, nil, fmt.Errorf("field '%s' for %#v is invalid", s.Field, obj) + } + + if fv.Kind() != MapType { + return false, nil, fmt.Errorf("field '%s' is not a map[string]string", s.Field) + } + + length := fv.Len() + vals := make([][]byte, 0, length) + for _, key := range fv.MapKeys() { + k := key.String() + if k == "" { + continue + } + val := fv.MapIndex(key).String() + + if s.Lowercase { + k = strings.ToLower(k) + val = strings.ToLower(val) + } + + // Add the null character as a terminator + k += "\x00" + val + "\x00" + + vals = append(vals, []byte(k)) + } + if len(vals) == 0 { + return false, nil, nil + } + return true, vals, nil +} + +// WARNING: Because of a bug in FromObject, this function will never return +// a value when using the single-argument version. +func (s *StringMapFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) > 2 || len(args) == 0 { + return nil, fmt.Errorf("must provide one or two arguments") + } + key, ok := args[0].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[0]) + } + if s.Lowercase { + key = strings.ToLower(key) + } + // Add the null character as a terminator + key += "\x00" + + if len(args) == 2 { + val, ok := args[1].(string) + if !ok { + return nil, fmt.Errorf("argument must be a string: %#v", args[1]) + } + if s.Lowercase { + val = strings.ToLower(val) + } + // Add the null character as a terminator + key += val + "\x00" + } + + return []byte(key), nil +} + +// IntFieldIndex is used to extract an int field from an object using +// reflection and builds an index on that field. +type IntFieldIndex struct { + Field string +} + +func (i *IntFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(i.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", i.Field, obj) + } + + // Check the type + k := fv.Kind() + size, ok := IsIntType(k) + if !ok { + return false, nil, fmt.Errorf("field %q is of type %v; want an int", i.Field, k) + } + + // Get the value and encode it + val := fv.Int() + buf := make([]byte, size) + binary.PutVarint(buf, val) + + return true, buf, nil +} + +func (i *IntFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + + v := reflect.ValueOf(args[0]) + if !v.IsValid() { + return nil, fmt.Errorf("%#v is invalid", args[0]) + } + + k := v.Kind() + size, ok := IsIntType(k) + if !ok { + return nil, fmt.Errorf("arg is of type %v; want a int", k) + } + + val := v.Int() + buf := make([]byte, size) + binary.PutVarint(buf, val) + + return buf, nil +} + +// IsIntType returns whether the passed type is a type of int and the number +// of bytes needed to encode the type. +func IsIntType(k reflect.Kind) (size int, okay bool) { + switch k { + case reflect.Int: + return binary.MaxVarintLen64, true + case reflect.Int8: + return 2, true + case reflect.Int16: + return binary.MaxVarintLen16, true + case reflect.Int32: + return binary.MaxVarintLen32, true + case reflect.Int64: + return binary.MaxVarintLen64, true + default: + return 0, false + } +} + +// UintFieldIndex is used to extract a uint field from an object using +// reflection and builds an index on that field. +type UintFieldIndex struct { + Field string +} + +func (u *UintFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(u.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", u.Field, obj) + } + + // Check the type + k := fv.Kind() + size, ok := IsUintType(k) + if !ok { + return false, nil, fmt.Errorf("field %q is of type %v; want a uint", u.Field, k) + } + + // Get the value and encode it + val := fv.Uint() + buf := make([]byte, size) + binary.PutUvarint(buf, val) + + return true, buf, nil +} + +func (u *UintFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + + v := reflect.ValueOf(args[0]) + if !v.IsValid() { + return nil, fmt.Errorf("%#v is invalid", args[0]) + } + + k := v.Kind() + size, ok := IsUintType(k) + if !ok { + return nil, fmt.Errorf("arg is of type %v; want a uint", k) + } + + val := v.Uint() + buf := make([]byte, size) + binary.PutUvarint(buf, val) + + return buf, nil +} + +// IsUintType returns whether the passed type is a type of uint and the number +// of bytes needed to encode the type. +func IsUintType(k reflect.Kind) (size int, okay bool) { + switch k { + case reflect.Uint: + return binary.MaxVarintLen64, true + case reflect.Uint8: + return 2, true + case reflect.Uint16: + return binary.MaxVarintLen16, true + case reflect.Uint32: + return binary.MaxVarintLen32, true + case reflect.Uint64: + return binary.MaxVarintLen64, true + default: + return 0, false + } +} + +// UUIDFieldIndex is used to extract a field from an object +// using reflection and builds an index on that field by treating +// it as a UUID. This is an optimization to using a StringFieldIndex +// as the UUID can be more compactly represented in byte form. +type UUIDFieldIndex struct { + Field string +} + +func (u *UUIDFieldIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(u.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", u.Field, obj) + } + + val := fv.String() + if val == "" { + return false, nil, nil + } + + buf, err := u.parseString(val, true) + return true, buf, err +} + +func (u *UUIDFieldIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + switch arg := args[0].(type) { + case string: + return u.parseString(arg, true) + case []byte: + if len(arg) != 16 { + return nil, fmt.Errorf("byte slice must be 16 characters") + } + return arg, nil + default: + return nil, + fmt.Errorf("argument must be a string or byte slice: %#v", args[0]) + } +} + +func (u *UUIDFieldIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + switch arg := args[0].(type) { + case string: + return u.parseString(arg, false) + case []byte: + return arg, nil + default: + return nil, + fmt.Errorf("argument must be a string or byte slice: %#v", args[0]) + } +} + +// parseString parses a UUID from the string. If enforceLength is false, it will +// parse a partial UUID. An error is returned if the input, stripped of hyphens, +// is not even length. +func (u *UUIDFieldIndex) parseString(s string, enforceLength bool) ([]byte, error) { + // Verify the length + l := len(s) + if enforceLength && l != 36 { + return nil, fmt.Errorf("UUID must be 36 characters") + } else if l > 36 { + return nil, fmt.Errorf("Invalid UUID length. UUID have 36 characters; got %d", l) + } + + hyphens := strings.Count(s, "-") + if hyphens > 4 { + return nil, fmt.Errorf(`UUID should have maximum of 4 "-"; got %d`, hyphens) + } + + // The sanitized length is the length of the original string without the "-". + sanitized := strings.Replace(s, "-", "", -1) + sanitizedLength := len(sanitized) + if sanitizedLength%2 != 0 { + return nil, fmt.Errorf("Input (without hyphens) must be even length") + } + + dec, err := hex.DecodeString(sanitized) + if err != nil { + return nil, fmt.Errorf("Invalid UUID: %v", err) + } + + return dec, nil +} + +// FieldSetIndex is used to extract a field from an object using reflection and +// builds an index on whether the field is set by comparing it against its +// type's nil value. +type FieldSetIndex struct { + Field string +} + +func (f *FieldSetIndex) FromObject(obj interface{}) (bool, []byte, error) { + v := reflect.ValueOf(obj) + v = reflect.Indirect(v) // Dereference the pointer if any + + fv := v.FieldByName(f.Field) + if !fv.IsValid() { + return false, nil, + fmt.Errorf("field '%s' for %#v is invalid", f.Field, obj) + } + + if fv.Interface() == reflect.Zero(fv.Type()).Interface() { + return true, []byte{0}, nil + } + + return true, []byte{1}, nil +} + +func (f *FieldSetIndex) FromArgs(args ...interface{}) ([]byte, error) { + return fromBoolArgs(args) +} + +// ConditionalIndex builds an index based on a condition specified by a passed +// user function. This function may examine the passed object and return a +// boolean to encapsulate an arbitrarily complex conditional. +type ConditionalIndex struct { + Conditional ConditionalIndexFunc +} + +// ConditionalIndexFunc is the required function interface for a +// ConditionalIndex. +type ConditionalIndexFunc func(obj interface{}) (bool, error) + +func (c *ConditionalIndex) FromObject(obj interface{}) (bool, []byte, error) { + // Call the user's function + res, err := c.Conditional(obj) + if err != nil { + return false, nil, fmt.Errorf("ConditionalIndexFunc(%#v) failed: %v", obj, err) + } + + if res { + return true, []byte{1}, nil + } + + return true, []byte{0}, nil +} + +func (c *ConditionalIndex) FromArgs(args ...interface{}) ([]byte, error) { + return fromBoolArgs(args) +} + +// fromBoolArgs is a helper that expects only a single boolean argument and +// returns a single length byte array containing either a one or zero depending +// on whether the passed input is true or false respectively. +func fromBoolArgs(args []interface{}) ([]byte, error) { + if len(args) != 1 { + return nil, fmt.Errorf("must provide only a single argument") + } + + if val, ok := args[0].(bool); !ok { + return nil, fmt.Errorf("argument must be a boolean type: %#v", args[0]) + } else if val { + return []byte{1}, nil + } + + return []byte{0}, nil +} + +// CompoundIndex is used to build an index using multiple sub-indexes +// Prefix based iteration is supported as long as the appropriate prefix +// of indexers support it. All sub-indexers are only assumed to expect +// a single argument. +type CompoundIndex struct { + Indexes []Indexer + + // AllowMissing results in an index based on only the indexers + // that return data. If true, you may end up with 2/3 columns + // indexed which might be useful for an index scan. Otherwise, + // the CompoundIndex requires all indexers to be satisfied. + AllowMissing bool +} + +func (c *CompoundIndex) FromObject(raw interface{}) (bool, []byte, error) { + var out []byte + for i, idxRaw := range c.Indexes { + idx, ok := idxRaw.(SingleIndexer) + if !ok { + return false, nil, fmt.Errorf("sub-index %d error: %s", i, "sub-index must be a SingleIndexer") + } + ok, val, err := idx.FromObject(raw) + if err != nil { + return false, nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + if !ok { + if c.AllowMissing { + break + } else { + return false, nil, nil + } + } + out = append(out, val...) + } + return true, out, nil +} + +func (c *CompoundIndex) FromArgs(args ...interface{}) ([]byte, error) { + if len(args) != len(c.Indexes) { + return nil, fmt.Errorf("non-equivalent argument count and index fields") + } + var out []byte + for i, arg := range args { + val, err := c.Indexes[i].FromArgs(arg) + if err != nil { + return nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + out = append(out, val...) + } + return out, nil +} + +func (c *CompoundIndex) PrefixFromArgs(args ...interface{}) ([]byte, error) { + if len(args) > len(c.Indexes) { + return nil, fmt.Errorf("more arguments than index fields") + } + var out []byte + for i, arg := range args { + if i+1 < len(args) { + val, err := c.Indexes[i].FromArgs(arg) + if err != nil { + return nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + out = append(out, val...) + } else { + prefixIndexer, ok := c.Indexes[i].(PrefixIndexer) + if !ok { + return nil, fmt.Errorf("sub-index %d does not support prefix scanning", i) + } + val, err := prefixIndexer.PrefixFromArgs(arg) + if err != nil { + return nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + out = append(out, val...) + } + } + return out, nil +} + +// CompoundMultiIndex is used to build an index using multiple +// sub-indexes. +// +// Unlike CompoundIndex, CompoundMultiIndex can have both +// SingleIndexer and MultiIndexer sub-indexers. However, each +// MultiIndexer adds considerable overhead/complexity in terms of +// the number of indexes created under-the-hood. It is not suggested +// to use more than one or two, if possible. +// +// Another change from CompoundIndexer is that if AllowMissing is +// set, not only is it valid to have empty index fields, but it will +// still create index values up to the first empty index. This means +// that if you have a value with an empty field, rather than using a +// prefix for lookup, you can simply pass in less arguments. As an +// example, if {Foo, Bar} is indexed but Bar is missing for a value +// and AllowMissing is set, an index will still be created for {Foo} +// and it is valid to do a lookup passing in only Foo as an argument. +// Note that the ordering isn't guaranteed -- it's last-insert wins, +// but this is true if you have two objects that have the same +// indexes not using AllowMissing anyways. +// +// Because StringMapFieldIndexers can take a varying number of args, +// it is currently a requirement that whenever it is used, two +// arguments must _always_ be provided for it. In theory we only +// need one, except a bug in that indexer means the single-argument +// version will never work. You can leave the second argument nil, +// but it will never produce a value. We support this for whenever +// that bug is fixed, likely in a next major version bump. +// +// Prefix-based indexing is not currently supported. +type CompoundMultiIndex struct { + Indexes []Indexer + + // AllowMissing results in an index based on only the indexers + // that return data. If true, you may end up with 2/3 columns + // indexed which might be useful for an index scan. Otherwise, + // CompoundMultiIndex requires all indexers to be satisfied. + AllowMissing bool +} + +func (c *CompoundMultiIndex) FromObject(raw interface{}) (bool, [][]byte, error) { + // At each entry, builder is storing the results from the next index + builder := make([][][]byte, 0, len(c.Indexes)) + // Start with something higher to avoid resizing if possible + out := make([][]byte, 0, len(c.Indexes)^3) + +forloop: + // This loop goes through each indexer and adds the value(s) provided to the next + // entry in the slice. We can then later walk it like a tree to construct the indices. + for i, idxRaw := range c.Indexes { + switch idx := idxRaw.(type) { + case SingleIndexer: + ok, val, err := idx.FromObject(raw) + if err != nil { + return false, nil, fmt.Errorf("single sub-index %d error: %v", i, err) + } + if !ok { + if c.AllowMissing { + break forloop + } else { + return false, nil, nil + } + } + builder = append(builder, [][]byte{val}) + + case MultiIndexer: + ok, vals, err := idx.FromObject(raw) + if err != nil { + return false, nil, fmt.Errorf("multi sub-index %d error: %v", i, err) + } + if !ok { + if c.AllowMissing { + break forloop + } else { + return false, nil, nil + } + } + + // Add each of the new values to each of the old values + builder = append(builder, vals) + + default: + return false, nil, fmt.Errorf("sub-index %d does not satisfy either SingleIndexer or MultiIndexer", i) + } + } + + // We are walking through the builder slice essentially in a depth-first fashion, + // building the prefix and leaves as we go. If AllowMissing is false, we only insert + // these full paths to leaves. Otherwise, we also insert each prefix along the way. + // This allows for lookup in FromArgs when AllowMissing is true that does not contain + // the full set of arguments. e.g. for {Foo, Bar} where an object has only the Foo + // field specified as "abc", it is valid to call FromArgs with just "abc". + var walkVals func([]byte, int) + walkVals = func(currPrefix []byte, depth int) { + if depth == len(builder)-1 { + // These are the "leaves", so append directly + for _, v := range builder[depth] { + out = append(out, append(currPrefix, v...)) + } + return + } + for _, v := range builder[depth] { + nextPrefix := append(currPrefix, v...) + if c.AllowMissing { + out = append(out, nextPrefix) + } + walkVals(nextPrefix, depth+1) + } + } + + walkVals(nil, 0) + + return true, out, nil +} + +func (c *CompoundMultiIndex) FromArgs(args ...interface{}) ([]byte, error) { + var stringMapCount int + var argCount int + for _, index := range c.Indexes { + if argCount >= len(args) { + break + } + if _, ok := index.(*StringMapFieldIndex); ok { + // We require pairs for StringMapFieldIndex, but only got one + if argCount+1 >= len(args) { + return nil, errors.New("invalid number of arguments") + } + stringMapCount++ + argCount += 2 + } else { + argCount++ + } + } + argCount = 0 + + switch c.AllowMissing { + case true: + if len(args) > len(c.Indexes)+stringMapCount { + return nil, errors.New("too many arguments") + } + + default: + if len(args) != len(c.Indexes)+stringMapCount { + return nil, errors.New("number of arguments does not equal number of indexers") + } + } + + var out []byte + var val []byte + var err error + for i, idx := range c.Indexes { + if argCount >= len(args) { + // We're done; should only hit this if AllowMissing + break + } + if _, ok := idx.(*StringMapFieldIndex); ok { + if args[argCount+1] == nil { + val, err = idx.FromArgs(args[argCount]) + } else { + val, err = idx.FromArgs(args[argCount : argCount+2]...) + } + argCount += 2 + } else { + val, err = idx.FromArgs(args[argCount]) + argCount++ + } + if err != nil { + return nil, fmt.Errorf("sub-index %d error: %v", i, err) + } + out = append(out, val...) + } + return out, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/memdb.go b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/memdb.go new file mode 100644 index 00000000..65c92073 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/memdb.go @@ -0,0 +1,97 @@ +// Package memdb provides an in-memory database that supports transactions +// and MVCC. +package memdb + +import ( + "sync" + "sync/atomic" + "unsafe" + + "github.com/hashicorp/go-immutable-radix" +) + +// MemDB is an in-memory database. +// +// MemDB provides a table abstraction to store objects (rows) with multiple +// indexes based on inserted values. The database makes use of immutable radix +// trees to provide transactions and MVCC. +type MemDB struct { + schema *DBSchema + root unsafe.Pointer // *iradix.Tree underneath + primary bool + + // There can only be a single writer at once + writer sync.Mutex +} + +// NewMemDB creates a new MemDB with the given schema +func NewMemDB(schema *DBSchema) (*MemDB, error) { + // Validate the schema + if err := schema.Validate(); err != nil { + return nil, err + } + + // Create the MemDB + db := &MemDB{ + schema: schema, + root: unsafe.Pointer(iradix.New()), + primary: true, + } + if err := db.initialize(); err != nil { + return nil, err + } + + return db, nil +} + +// getRoot is used to do an atomic load of the root pointer +func (db *MemDB) getRoot() *iradix.Tree { + root := (*iradix.Tree)(atomic.LoadPointer(&db.root)) + return root +} + +// Txn is used to start a new transaction, in either read or write mode. +// There can only be a single concurrent writer, but any number of readers. +func (db *MemDB) Txn(write bool) *Txn { + if write { + db.writer.Lock() + } + txn := &Txn{ + db: db, + write: write, + rootTxn: db.getRoot().Txn(), + } + return txn +} + +// Snapshot is used to capture a point-in-time snapshot +// of the database that will not be affected by any write +// operations to the existing DB. +func (db *MemDB) Snapshot() *MemDB { + clone := &MemDB{ + schema: db.schema, + root: unsafe.Pointer(db.getRoot()), + primary: false, + } + return clone +} + +// initialize is used to setup the DB for use after creation. This should +// be called only once after allocating a MemDB. +func (db *MemDB) initialize() error { + root := db.getRoot() + for tName, tableSchema := range db.schema.Tables { + for iName := range tableSchema.Indexes { + index := iradix.New() + path := indexPath(tName, iName) + root, _, _ = root.Insert(path, index) + } + } + db.root = unsafe.Pointer(root) + return nil +} + +// indexPath returns the path from the root to the given table index +func indexPath(table, index string) []byte { + return []byte(table + "." + index) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/schema.go b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/schema.go new file mode 100644 index 00000000..e6a9b526 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/schema.go @@ -0,0 +1,114 @@ +package memdb + +import "fmt" + +// DBSchema is the schema to use for the full database with a MemDB instance. +// +// MemDB will require a valid schema. Schema validation can be tested using +// the Validate function. Calling this function is recommended in unit tests. +type DBSchema struct { + // Tables is the set of tables within this database. The key is the + // table name and must match the Name in TableSchema. + Tables map[string]*TableSchema +} + +// Validate validates the schema. +func (s *DBSchema) Validate() error { + if s == nil { + return fmt.Errorf("schema is nil") + } + + if len(s.Tables) == 0 { + return fmt.Errorf("schema has no tables defined") + } + + for name, table := range s.Tables { + if name != table.Name { + return fmt.Errorf("table name mis-match for '%s'", name) + } + + if err := table.Validate(); err != nil { + return fmt.Errorf("table %q: %s", name, err) + } + } + + return nil +} + +// TableSchema is the schema for a single table. +type TableSchema struct { + // Name of the table. This must match the key in the Tables map in DBSchema. + Name string + + // Indexes is the set of indexes for querying this table. The key + // is a unique name for the index and must match the Name in the + // IndexSchema. + Indexes map[string]*IndexSchema +} + +// Validate is used to validate the table schema +func (s *TableSchema) Validate() error { + if s.Name == "" { + return fmt.Errorf("missing table name") + } + + if len(s.Indexes) == 0 { + return fmt.Errorf("missing table indexes for '%s'", s.Name) + } + + if _, ok := s.Indexes["id"]; !ok { + return fmt.Errorf("must have id index") + } + + if !s.Indexes["id"].Unique { + return fmt.Errorf("id index must be unique") + } + + if _, ok := s.Indexes["id"].Indexer.(SingleIndexer); !ok { + return fmt.Errorf("id index must be a SingleIndexer") + } + + for name, index := range s.Indexes { + if name != index.Name { + return fmt.Errorf("index name mis-match for '%s'", name) + } + + if err := index.Validate(); err != nil { + return fmt.Errorf("index %q: %s", name, err) + } + } + + return nil +} + +// IndexSchema is the schema for an index. An index defines how a table is +// queried. +type IndexSchema struct { + // Name of the index. This must be unique among a tables set of indexes. + // This must match the key in the map of Indexes for a TableSchema. + Name string + + // AllowMissing if true ignores this index if it doesn't produce a + // value. For example, an index that extracts a field that doesn't + // exist from a structure. + AllowMissing bool + + Unique bool + Indexer Indexer +} + +func (s *IndexSchema) Validate() error { + if s.Name == "" { + return fmt.Errorf("missing index name") + } + if s.Indexer == nil { + return fmt.Errorf("missing index function for '%s'", s.Name) + } + switch s.Indexer.(type) { + case SingleIndexer: + case MultiIndexer: + default: + return fmt.Errorf("indexer for '%s' must be a SingleIndexer or MultiIndexer", s.Name) + } + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/txn.go b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/txn.go new file mode 100644 index 00000000..762a81b7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/txn.go @@ -0,0 +1,674 @@ +package memdb + +import ( + "bytes" + "fmt" + "strings" + "sync/atomic" + "unsafe" + + iradix "github.com/hashicorp/go-immutable-radix" +) + +const ( + id = "id" +) + +var ( + // ErrNotFound is returned when the requested item is not found + ErrNotFound = fmt.Errorf("not found") +) + +// tableIndex is a tuple of (Table, Index) used for lookups +type tableIndex struct { + Table string + Index string +} + +// Txn is a transaction against a MemDB. +// This can be a read or write transaction. +type Txn struct { + db *MemDB + write bool + rootTxn *iradix.Txn + after []func() + + modified map[tableIndex]*iradix.Txn +} + +// readableIndex returns a transaction usable for reading the given +// index in a table. If a write transaction is in progress, we may need +// to use an existing modified txn. +func (txn *Txn) readableIndex(table, index string) *iradix.Txn { + // Look for existing transaction + if txn.write && txn.modified != nil { + key := tableIndex{table, index} + exist, ok := txn.modified[key] + if ok { + return exist + } + } + + // Create a read transaction + path := indexPath(table, index) + raw, _ := txn.rootTxn.Get(path) + indexTxn := raw.(*iradix.Tree).Txn() + return indexTxn +} + +// writableIndex returns a transaction usable for modifying the +// given index in a table. +func (txn *Txn) writableIndex(table, index string) *iradix.Txn { + if txn.modified == nil { + txn.modified = make(map[tableIndex]*iradix.Txn) + } + + // Look for existing transaction + key := tableIndex{table, index} + exist, ok := txn.modified[key] + if ok { + return exist + } + + // Start a new transaction + path := indexPath(table, index) + raw, _ := txn.rootTxn.Get(path) + indexTxn := raw.(*iradix.Tree).Txn() + + // If we are the primary DB, enable mutation tracking. Snapshots should + // not notify, otherwise we will trigger watches on the primary DB when + // the writes will not be visible. + indexTxn.TrackMutate(txn.db.primary) + + // Keep this open for the duration of the txn + txn.modified[key] = indexTxn + return indexTxn +} + +// Abort is used to cancel this transaction. +// This is a noop for read transactions. +func (txn *Txn) Abort() { + // Noop for a read transaction + if !txn.write { + return + } + + // Check if already aborted or committed + if txn.rootTxn == nil { + return + } + + // Clear the txn + txn.rootTxn = nil + txn.modified = nil + + // Release the writer lock since this is invalid + txn.db.writer.Unlock() +} + +// Commit is used to finalize this transaction. +// This is a noop for read transactions. +func (txn *Txn) Commit() { + // Noop for a read transaction + if !txn.write { + return + } + + // Check if already aborted or committed + if txn.rootTxn == nil { + return + } + + // Commit each sub-transaction scoped to (table, index) + for key, subTxn := range txn.modified { + path := indexPath(key.Table, key.Index) + final := subTxn.CommitOnly() + txn.rootTxn.Insert(path, final) + } + + // Update the root of the DB + newRoot := txn.rootTxn.CommitOnly() + atomic.StorePointer(&txn.db.root, unsafe.Pointer(newRoot)) + + // Now issue all of the mutation updates (this is safe to call + // even if mutation tracking isn't enabled); we do this after + // the root pointer is swapped so that waking responders will + // see the new state. + for _, subTxn := range txn.modified { + subTxn.Notify() + } + txn.rootTxn.Notify() + + // Clear the txn + txn.rootTxn = nil + txn.modified = nil + + // Release the writer lock since this is invalid + txn.db.writer.Unlock() + + // Run the deferred functions, if any + for i := len(txn.after); i > 0; i-- { + fn := txn.after[i-1] + fn() + } +} + +// Insert is used to add or update an object into the given table +func (txn *Txn) Insert(table string, obj interface{}) error { + if !txn.write { + return fmt.Errorf("cannot insert in read-only transaction") + } + + // Get the table schema + tableSchema, ok := txn.db.schema.Tables[table] + if !ok { + return fmt.Errorf("invalid table '%s'", table) + } + + // Get the primary ID of the object + idSchema := tableSchema.Indexes[id] + idIndexer := idSchema.Indexer.(SingleIndexer) + ok, idVal, err := idIndexer.FromObject(obj) + if err != nil { + return fmt.Errorf("failed to build primary index: %v", err) + } + if !ok { + return fmt.Errorf("object missing primary index") + } + + // Lookup the object by ID first, to see if this is an update + idTxn := txn.writableIndex(table, id) + existing, update := idTxn.Get(idVal) + + // On an update, there is an existing object with the given + // primary ID. We do the update by deleting the current object + // and inserting the new object. + for name, indexSchema := range tableSchema.Indexes { + indexTxn := txn.writableIndex(table, name) + + // Determine the new index value + var ( + ok bool + vals [][]byte + err error + ) + switch indexer := indexSchema.Indexer.(type) { + case SingleIndexer: + var val []byte + ok, val, err = indexer.FromObject(obj) + vals = [][]byte{val} + case MultiIndexer: + ok, vals, err = indexer.FromObject(obj) + } + if err != nil { + return fmt.Errorf("failed to build index '%s': %v", name, err) + } + + // Handle non-unique index by computing a unique index. + // This is done by appending the primary key which must + // be unique anyways. + if ok && !indexSchema.Unique { + for i := range vals { + vals[i] = append(vals[i], idVal...) + } + } + + // Handle the update by deleting from the index first + if update { + var ( + okExist bool + valsExist [][]byte + err error + ) + switch indexer := indexSchema.Indexer.(type) { + case SingleIndexer: + var valExist []byte + okExist, valExist, err = indexer.FromObject(existing) + valsExist = [][]byte{valExist} + case MultiIndexer: + okExist, valsExist, err = indexer.FromObject(existing) + } + if err != nil { + return fmt.Errorf("failed to build index '%s': %v", name, err) + } + if okExist { + for i, valExist := range valsExist { + // Handle non-unique index by computing a unique index. + // This is done by appending the primary key which must + // be unique anyways. + if !indexSchema.Unique { + valExist = append(valExist, idVal...) + } + + // If we are writing to the same index with the same value, + // we can avoid the delete as the insert will overwrite the + // value anyways. + if i >= len(vals) || !bytes.Equal(valExist, vals[i]) { + indexTxn.Delete(valExist) + } + } + } + } + + // If there is no index value, either this is an error or an expected + // case and we can skip updating + if !ok { + if indexSchema.AllowMissing { + continue + } else { + return fmt.Errorf("missing value for index '%s'", name) + } + } + + // Update the value of the index + for _, val := range vals { + indexTxn.Insert(val, obj) + } + } + return nil +} + +// Delete is used to delete a single object from the given table +// This object must already exist in the table +func (txn *Txn) Delete(table string, obj interface{}) error { + if !txn.write { + return fmt.Errorf("cannot delete in read-only transaction") + } + + // Get the table schema + tableSchema, ok := txn.db.schema.Tables[table] + if !ok { + return fmt.Errorf("invalid table '%s'", table) + } + + // Get the primary ID of the object + idSchema := tableSchema.Indexes[id] + idIndexer := idSchema.Indexer.(SingleIndexer) + ok, idVal, err := idIndexer.FromObject(obj) + if err != nil { + return fmt.Errorf("failed to build primary index: %v", err) + } + if !ok { + return fmt.Errorf("object missing primary index") + } + + // Lookup the object by ID first, check fi we should continue + idTxn := txn.writableIndex(table, id) + existing, ok := idTxn.Get(idVal) + if !ok { + return ErrNotFound + } + + // Remove the object from all the indexes + for name, indexSchema := range tableSchema.Indexes { + indexTxn := txn.writableIndex(table, name) + + // Handle the update by deleting from the index first + var ( + ok bool + vals [][]byte + err error + ) + switch indexer := indexSchema.Indexer.(type) { + case SingleIndexer: + var val []byte + ok, val, err = indexer.FromObject(existing) + vals = [][]byte{val} + case MultiIndexer: + ok, vals, err = indexer.FromObject(existing) + } + if err != nil { + return fmt.Errorf("failed to build index '%s': %v", name, err) + } + if ok { + // Handle non-unique index by computing a unique index. + // This is done by appending the primary key which must + // be unique anyways. + for _, val := range vals { + if !indexSchema.Unique { + val = append(val, idVal...) + } + indexTxn.Delete(val) + } + } + } + return nil +} + +// DeletePrefix is used to delete an entire subtree based on a prefix. +// The given index must be a prefix index, and will be used to perform a scan and enumerate the set of objects to delete. +// These will be removed from all other indexes, and then a special prefix operation will delete the objects from the given index in an efficient subtree delete operation. +// This is useful when you have a very large number of objects indexed by the given index, along with a much smaller number of entries in the other indexes for those objects. +func (txn *Txn) DeletePrefix(table string, prefix_index string, prefix string) (bool, error) { + if !txn.write { + return false, fmt.Errorf("cannot delete in read-only transaction") + } + + if !strings.HasSuffix(prefix_index, "_prefix") { + return false, fmt.Errorf("Index name for DeletePrefix must be a prefix index, Got %v ", prefix_index) + } + + deletePrefixIndex := strings.TrimSuffix(prefix_index, "_prefix") + + // Get an iterator over all of the keys with the given prefix. + entries, err := txn.Get(table, prefix_index, prefix) + if err != nil { + return false, fmt.Errorf("failed kvs lookup: %s", err) + } + // Get the table schema + tableSchema, ok := txn.db.schema.Tables[table] + if !ok { + return false, fmt.Errorf("invalid table '%s'", table) + } + + foundAny := false + for entry := entries.Next(); entry != nil; entry = entries.Next() { + if !foundAny { + foundAny = true + } + // Get the primary ID of the object + idSchema := tableSchema.Indexes[id] + idIndexer := idSchema.Indexer.(SingleIndexer) + ok, idVal, err := idIndexer.FromObject(entry) + if err != nil { + return false, fmt.Errorf("failed to build primary index: %v", err) + } + if !ok { + return false, fmt.Errorf("object missing primary index") + } + // Remove the object from all the indexes except the given prefix index + for name, indexSchema := range tableSchema.Indexes { + if name == deletePrefixIndex { + continue + } + indexTxn := txn.writableIndex(table, name) + + // Handle the update by deleting from the index first + var ( + ok bool + vals [][]byte + err error + ) + switch indexer := indexSchema.Indexer.(type) { + case SingleIndexer: + var val []byte + ok, val, err = indexer.FromObject(entry) + vals = [][]byte{val} + case MultiIndexer: + ok, vals, err = indexer.FromObject(entry) + } + if err != nil { + return false, fmt.Errorf("failed to build index '%s': %v", name, err) + } + + if ok { + // Handle non-unique index by computing a unique index. + // This is done by appending the primary key which must + // be unique anyways. + for _, val := range vals { + if !indexSchema.Unique { + val = append(val, idVal...) + } + indexTxn.Delete(val) + } + } + } + } + if foundAny { + indexTxn := txn.writableIndex(table, deletePrefixIndex) + ok = indexTxn.DeletePrefix([]byte(prefix)) + if !ok { + panic(fmt.Errorf("prefix %v matched some entries but DeletePrefix did not delete any ", prefix)) + } + return true, nil + } + return false, nil +} + +// DeleteAll is used to delete all the objects in a given table +// matching the constraints on the index +func (txn *Txn) DeleteAll(table, index string, args ...interface{}) (int, error) { + if !txn.write { + return 0, fmt.Errorf("cannot delete in read-only transaction") + } + + // Get all the objects + iter, err := txn.Get(table, index, args...) + if err != nil { + return 0, err + } + + // Put them into a slice so there are no safety concerns while actually + // performing the deletes + var objs []interface{} + for { + obj := iter.Next() + if obj == nil { + break + } + + objs = append(objs, obj) + } + + // Do the deletes + num := 0 + for _, obj := range objs { + if err := txn.Delete(table, obj); err != nil { + return num, err + } + num++ + } + return num, nil +} + +// FirstWatch is used to return the first matching object for +// the given constraints on the index along with the watch channel +func (txn *Txn) FirstWatch(table, index string, args ...interface{}) (<-chan struct{}, interface{}, error) { + // Get the index value + indexSchema, val, err := txn.getIndexValue(table, index, args...) + if err != nil { + return nil, nil, err + } + + // Get the index itself + indexTxn := txn.readableIndex(table, indexSchema.Name) + + // Do an exact lookup + if indexSchema.Unique && val != nil && indexSchema.Name == index { + watch, obj, ok := indexTxn.GetWatch(val) + if !ok { + return watch, nil, nil + } + return watch, obj, nil + } + + // Handle non-unique index by using an iterator and getting the first value + iter := indexTxn.Root().Iterator() + watch := iter.SeekPrefixWatch(val) + _, value, _ := iter.Next() + return watch, value, nil +} + +// First is used to return the first matching object for +// the given constraints on the index +func (txn *Txn) First(table, index string, args ...interface{}) (interface{}, error) { + _, val, err := txn.FirstWatch(table, index, args...) + return val, err +} + +// LongestPrefix is used to fetch the longest prefix match for the given +// constraints on the index. Note that this will not work with the memdb +// StringFieldIndex because it adds null terminators which prevent the +// algorithm from correctly finding a match (it will get to right before the +// null and fail to find a leaf node). This should only be used where the prefix +// given is capable of matching indexed entries directly, which typically only +// applies to a custom indexer. See the unit test for an example. +func (txn *Txn) LongestPrefix(table, index string, args ...interface{}) (interface{}, error) { + // Enforce that this only works on prefix indexes. + if !strings.HasSuffix(index, "_prefix") { + return nil, fmt.Errorf("must use '%s_prefix' on index", index) + } + + // Get the index value. + indexSchema, val, err := txn.getIndexValue(table, index, args...) + if err != nil { + return nil, err + } + + // This algorithm only makes sense against a unique index, otherwise the + // index keys will have the IDs appended to them. + if !indexSchema.Unique { + return nil, fmt.Errorf("index '%s' is not unique", index) + } + + // Find the longest prefix match with the given index. + indexTxn := txn.readableIndex(table, indexSchema.Name) + if _, value, ok := indexTxn.Root().LongestPrefix(val); ok { + return value, nil + } + return nil, nil +} + +// getIndexValue is used to get the IndexSchema and the value +// used to scan the index given the parameters. This handles prefix based +// scans when the index has the "_prefix" suffix. The index must support +// prefix iteration. +func (txn *Txn) getIndexValue(table, index string, args ...interface{}) (*IndexSchema, []byte, error) { + // Get the table schema + tableSchema, ok := txn.db.schema.Tables[table] + if !ok { + return nil, nil, fmt.Errorf("invalid table '%s'", table) + } + + // Check for a prefix scan + prefixScan := false + if strings.HasSuffix(index, "_prefix") { + index = strings.TrimSuffix(index, "_prefix") + prefixScan = true + } + + // Get the index schema + indexSchema, ok := tableSchema.Indexes[index] + if !ok { + return nil, nil, fmt.Errorf("invalid index '%s'", index) + } + + // Hot-path for when there are no arguments + if len(args) == 0 { + return indexSchema, nil, nil + } + + // Special case the prefix scanning + if prefixScan { + prefixIndexer, ok := indexSchema.Indexer.(PrefixIndexer) + if !ok { + return indexSchema, nil, + fmt.Errorf("index '%s' does not support prefix scanning", index) + } + + val, err := prefixIndexer.PrefixFromArgs(args...) + if err != nil { + return indexSchema, nil, fmt.Errorf("index error: %v", err) + } + return indexSchema, val, err + } + + // Get the exact match index + val, err := indexSchema.Indexer.FromArgs(args...) + if err != nil { + return indexSchema, nil, fmt.Errorf("index error: %v", err) + } + return indexSchema, val, err +} + +// ResultIterator is used to iterate over a list of results +// from a Get query on a table. +type ResultIterator interface { + WatchCh() <-chan struct{} + Next() interface{} +} + +// Get is used to construct a ResultIterator over all the +// rows that match the given constraints of an index. +func (txn *Txn) Get(table, index string, args ...interface{}) (ResultIterator, error) { + indexIter, val, err := txn.getIndexIterator(table, index, args...) + if err != nil { + return nil, err + } + + // Seek the iterator to the appropriate sub-set + watchCh := indexIter.SeekPrefixWatch(val) + + // Create an iterator + iter := &radixIterator{ + iter: indexIter, + watchCh: watchCh, + } + return iter, nil +} + +// LowerBound is used to construct a ResultIterator over all the the range of +// rows that have an index value greater than or equal to the provide args. +// Calling this then iterating until the rows are larger than required allows +// range scans within an index. It is not possible to watch the resulting +// iterator since the radix tree doesn't efficiently allow watching on lower +// bound changes. The WatchCh returned will be nill and so will block forever. +func (txn *Txn) LowerBound(table, index string, args ...interface{}) (ResultIterator, error) { + indexIter, val, err := txn.getIndexIterator(table, index, args...) + if err != nil { + return nil, err + } + + // Seek the iterator to the appropriate sub-set + indexIter.SeekLowerBound(val) + + // Create an iterator + iter := &radixIterator{ + iter: indexIter, + } + return iter, nil +} + +func (txn *Txn) getIndexIterator(table, index string, args ...interface{}) (*iradix.Iterator, []byte, error) { + // Get the index value to scan + indexSchema, val, err := txn.getIndexValue(table, index, args...) + if err != nil { + return nil, nil, err + } + + // Get the index itself + indexTxn := txn.readableIndex(table, indexSchema.Name) + indexRoot := indexTxn.Root() + + // Get an interator over the index + indexIter := indexRoot.Iterator() + return indexIter, val, nil +} + +// Defer is used to push a new arbitrary function onto a stack which +// gets called when a transaction is committed and finished. Deferred +// functions are called in LIFO order, and only invoked at the end of +// write transactions. +func (txn *Txn) Defer(fn func()) { + txn.after = append(txn.after, fn) +} + +// radixIterator is used to wrap an underlying iradix iterator. +// This is much more efficient than a sliceIterator as we are not +// materializing the entire view. +type radixIterator struct { + iter *iradix.Iterator + watchCh <-chan struct{} +} + +func (r *radixIterator) WatchCh() <-chan struct{} { + return r.watchCh +} + +func (r *radixIterator) Next() interface{} { + _, value, ok := r.iter.Next() + if !ok { + return nil + } + return value +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/watch.go b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/watch.go new file mode 100644 index 00000000..a6f01213 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/watch.go @@ -0,0 +1,129 @@ +package memdb + +import ( + "context" + "time" +) + +// WatchSet is a collection of watch channels. +type WatchSet map[<-chan struct{}]struct{} + +// NewWatchSet constructs a new watch set. +func NewWatchSet() WatchSet { + return make(map[<-chan struct{}]struct{}) +} + +// Add appends a watchCh to the WatchSet if non-nil. +func (w WatchSet) Add(watchCh <-chan struct{}) { + if w == nil { + return + } + + if _, ok := w[watchCh]; !ok { + w[watchCh] = struct{}{} + } +} + +// AddWithLimit appends a watchCh to the WatchSet if non-nil, and if the given +// softLimit hasn't been exceeded. Otherwise, it will watch the given alternate +// channel. It's expected that the altCh will be the same on many calls to this +// function, so you will exceed the soft limit a little bit if you hit this, but +// not by much. +// +// This is useful if you want to track individual items up to some limit, after +// which you watch a higher-level channel (usually a channel from start start of +// an iterator higher up in the radix tree) that will watch a superset of items. +func (w WatchSet) AddWithLimit(softLimit int, watchCh <-chan struct{}, altCh <-chan struct{}) { + // This is safe for a nil WatchSet so we don't need to check that here. + if len(w) < softLimit { + w.Add(watchCh) + } else { + w.Add(altCh) + } +} + +// Watch is used to wait for either the watch set to trigger or a timeout. +// Returns true on timeout. +func (w WatchSet) Watch(timeoutCh <-chan time.Time) bool { + if w == nil { + return false + } + + // Create a context that gets cancelled when the timeout is triggered + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go func() { + select { + case <-timeoutCh: + cancel() + case <-ctx.Done(): + } + }() + + return w.WatchCtx(ctx) == context.Canceled +} + +// WatchCtx is used to wait for either the watch set to trigger or for the +// context to be cancelled. Watch with a timeout channel can be mimicked by +// creating a context with a deadline. WatchCtx should be preferred over Watch. +func (w WatchSet) WatchCtx(ctx context.Context) error { + if w == nil { + return nil + } + + if n := len(w); n <= aFew { + idx := 0 + chunk := make([]<-chan struct{}, aFew) + for watchCh := range w { + chunk[idx] = watchCh + idx++ + } + return watchFew(ctx, chunk) + } + + return w.watchMany(ctx) +} + +// watchMany is used if there are many watchers. +func (w WatchSet) watchMany(ctx context.Context) error { + // Set up a goroutine for each watcher. + triggerCh := make(chan struct{}, 1) + watcher := func(chunk []<-chan struct{}) { + if err := watchFew(ctx, chunk); err == nil { + select { + case triggerCh <- struct{}{}: + default: + } + } + } + + // Apportion the watch channels into chunks we can feed into the + // watchFew helper. + idx := 0 + chunk := make([]<-chan struct{}, aFew) + for watchCh := range w { + subIdx := idx % aFew + chunk[subIdx] = watchCh + idx++ + + // Fire off this chunk and start a fresh one. + if idx%aFew == 0 { + go watcher(chunk) + chunk = make([]<-chan struct{}, aFew) + } + } + + // Make sure to watch any residual channels in the last chunk. + if idx%aFew != 0 { + go watcher(chunk) + } + + // Wait for a channel to trigger or timeout. + select { + case <-triggerCh: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/watch_few.go b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/watch_few.go new file mode 100644 index 00000000..b211eeea --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/hashicorp/go-memdb/watch_few.go @@ -0,0 +1,117 @@ +package memdb + +//go:generate sh -c "go run watch-gen/main.go >watch_few.go" + +import ( + "context" +) + +// aFew gives how many watchers this function is wired to support. You must +// always pass a full slice of this length, but unused channels can be nil. +const aFew = 32 + +// watchFew is used if there are only a few watchers as a performance +// optimization. +func watchFew(ctx context.Context, ch []<-chan struct{}) error { + select { + + case <-ch[0]: + return nil + + case <-ch[1]: + return nil + + case <-ch[2]: + return nil + + case <-ch[3]: + return nil + + case <-ch[4]: + return nil + + case <-ch[5]: + return nil + + case <-ch[6]: + return nil + + case <-ch[7]: + return nil + + case <-ch[8]: + return nil + + case <-ch[9]: + return nil + + case <-ch[10]: + return nil + + case <-ch[11]: + return nil + + case <-ch[12]: + return nil + + case <-ch[13]: + return nil + + case <-ch[14]: + return nil + + case <-ch[15]: + return nil + + case <-ch[16]: + return nil + + case <-ch[17]: + return nil + + case <-ch[18]: + return nil + + case <-ch[19]: + return nil + + case <-ch[20]: + return nil + + case <-ch[21]: + return nil + + case <-ch[22]: + return nil + + case <-ch[23]: + return nil + + case <-ch[24]: + return nil + + case <-ch[25]: + return nil + + case <-ch[26]: + return nil + + case <-ch[27]: + return nil + + case <-ch[28]: + return nil + + case <-ch[29]: + return nil + + case <-ch[30]: + return nil + + case <-ch[31]: + return nil + + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ishidawataru/sctp/GO_LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/ishidawataru/sctp/GO_LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ishidawataru/sctp/GO_LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ishidawataru/sctp/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/ishidawataru/sctp/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ishidawataru/sctp/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ishidawataru/sctp/README.md b/vendor/github.com/elastic/beats/vendor/github.com/ishidawataru/sctp/README.md new file mode 100644 index 00000000..574ececa --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ishidawataru/sctp/README.md @@ -0,0 +1,18 @@ +Stream Control Transmission Protocol (SCTP) +---- + +[![Build Status](https://travis-ci.org/ishidawataru/sctp.svg?branch=master)](https://travis-ci.org/ishidawataru/sctp/builds) + +Examples +---- + +See `example/sctp.go` + +```go +$ cd example +$ go build +$ # run example SCTP server +$ ./example -server -port 1000 -ip 10.10.0.1,10.20.0.1 +$ # run example SCTP client +$ ./example -port 1000 -ip 10.10.0.1,10.20.0.1 +``` diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ishidawataru/sctp/ipsock_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/ishidawataru/sctp/ipsock_linux.go new file mode 100644 index 00000000..f5632b72 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ishidawataru/sctp/ipsock_linux.go @@ -0,0 +1,218 @@ +package sctp + +import ( + "net" + "os" + "sync" + "syscall" +) + +//from https://github.com/golang/go +// Boolean to int. +func boolint(b bool) int { + if b { + return 1 + } + return 0 +} + +//from https://github.com/golang/go +func ipToSockaddr(family int, ip net.IP, port int, zone string) (syscall.Sockaddr, error) { + switch family { + case syscall.AF_INET: + if len(ip) == 0 { + ip = net.IPv4zero + } + ip4 := ip.To4() + if ip4 == nil { + return nil, &net.AddrError{Err: "non-IPv4 address", Addr: ip.String()} + } + sa := &syscall.SockaddrInet4{Port: port} + copy(sa.Addr[:], ip4) + return sa, nil + case syscall.AF_INET6: + // In general, an IP wildcard address, which is either + // "0.0.0.0" or "::", means the entire IP addressing + // space. For some historical reason, it is used to + // specify "any available address" on some operations + // of IP node. + // + // When the IP node supports IPv4-mapped IPv6 address, + // we allow an listener to listen to the wildcard + // address of both IP addressing spaces by specifying + // IPv6 wildcard address. + if len(ip) == 0 || ip.Equal(net.IPv4zero) { + ip = net.IPv6zero + } + // We accept any IPv6 address including IPv4-mapped + // IPv6 address. + ip6 := ip.To16() + if ip6 == nil { + return nil, &net.AddrError{Err: "non-IPv6 address", Addr: ip.String()} + } + //we set ZoneId to 0, as currently we use this functon only to probe the IP capabilities of the host + //if real Zone handling is required, the zone cache implementation in golang/net should be pulled here + sa := &syscall.SockaddrInet6{Port: port, ZoneId: 0} + copy(sa.Addr[:], ip6) + return sa, nil + } + return nil, &net.AddrError{Err: "invalid address family", Addr: ip.String()} +} + +//from https://github.com/golang/go +func sockaddr(a *net.TCPAddr, family int) (syscall.Sockaddr, error) { + if a == nil { + return nil, nil + } + return ipToSockaddr(family, a.IP, a.Port, a.Zone) +} + +//from https://github.com/golang/go +type ipStackCapabilities struct { + sync.Once // guards following + ipv4Enabled bool + ipv6Enabled bool + ipv4MappedIPv6Enabled bool +} + +//from https://github.com/golang/go +var ipStackCaps ipStackCapabilities + +//from https://github.com/golang/go +// supportsIPv4 reports whether the platform supports IPv4 networking +// functionality. +func supportsIPv4() bool { + ipStackCaps.Once.Do(ipStackCaps.probe) + return ipStackCaps.ipv4Enabled +} + +//from https://github.com/golang/go +// supportsIPv6 reports whether the platform supports IPv6 networking +// functionality. +func supportsIPv6() bool { + ipStackCaps.Once.Do(ipStackCaps.probe) + return ipStackCaps.ipv6Enabled +} + +//from https://github.com/golang/go +// supportsIPv4map reports whether the platform supports mapping an +// IPv4 address inside an IPv6 address at transport layer +// protocols. See RFC 4291, RFC 4038 and RFC 3493. +func supportsIPv4map() bool { + ipStackCaps.Once.Do(ipStackCaps.probe) + return ipStackCaps.ipv4MappedIPv6Enabled +} + +//from https://github.com/golang/go +// Probe probes IPv4, IPv6 and IPv4-mapped IPv6 communication +// capabilities which are controlled by the IPV6_V6ONLY socket option +// and kernel configuration. +// +// Should we try to use the IPv4 socket interface if we're only +// dealing with IPv4 sockets? As long as the host system understands +// IPv4-mapped IPv6, it's okay to pass IPv4-mapeed IPv6 addresses to +// the IPv6 interface. That simplifies our code and is most +// general. Unfortunately, we need to run on kernels built without +// IPv6 support too. So probe the kernel to figure it out. +func (p *ipStackCapabilities) probe() { + s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_STREAM, syscall.IPPROTO_TCP) + switch err { + case syscall.EAFNOSUPPORT, syscall.EPROTONOSUPPORT: + case nil: + syscall.Close(s) + p.ipv4Enabled = true + } + var probes = []struct { + laddr net.TCPAddr + value int + }{ + // IPv6 communication capability + {laddr: net.TCPAddr{IP: net.IPv6loopback}, value: 1}, + // IPv4-mapped IPv6 address communication capability + {laddr: net.TCPAddr{IP: net.IPv4(127, 0, 0, 1)}, value: 0}, + } + + for i := range probes { + s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_STREAM, syscall.IPPROTO_TCP) + if err != nil { + continue + } + defer syscall.Close(s) + syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, probes[i].value) + sa, err := sockaddr(&(probes[i].laddr), syscall.AF_INET6) + if err != nil { + continue + } + if err := syscall.Bind(s, sa); err != nil { + continue + } + if i == 0 { + p.ipv6Enabled = true + } else { + p.ipv4MappedIPv6Enabled = true + } + } +} + +//from https://github.com/golang/go +//Change: we check the first IP address in the list of candidate SCTP IP addresses +func (a *SCTPAddr) isWildcard() bool { + if a == nil { + return true + } + if 0 == len(a.IPAddrs) { + return true + } + + return a.IPAddrs[0].IP.IsUnspecified() +} + +func (a *SCTPAddr) family() int { + if a != nil { + for _, ip := range a.IPAddrs { + if ip.IP.To4() == nil { + return syscall.AF_INET6 + } + } + } + return syscall.AF_INET +} + +//from https://github.com/golang/go +func favoriteAddrFamily(network string, laddr *SCTPAddr, raddr *SCTPAddr, mode string) (family int, ipv6only bool) { + switch network[len(network)-1] { + case '4': + return syscall.AF_INET, false + case '6': + return syscall.AF_INET6, true + } + + if mode == "listen" && (laddr == nil || laddr.isWildcard()) { + if supportsIPv4map() || !supportsIPv4() { + return syscall.AF_INET6, false + } + if laddr == nil { + return syscall.AF_INET, false + } + return laddr.family(), false + } + + if (laddr == nil || laddr.family() == syscall.AF_INET) && + (raddr == nil || raddr.family() == syscall.AF_INET) { + return syscall.AF_INET, false + } + return syscall.AF_INET6, false +} + +//from https://github.com/golang/go +//Changes: it is for SCTP only +func setDefaultSockopts(s int, family int, ipv6only bool) error { + if family == syscall.AF_INET6 { + // Allow both IP versions even if the OS default + // is otherwise. Note that some operating systems + // never admit this option. + syscall.SetsockoptInt(s, syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, boolint(ipv6only)) + } + // Allow broadcast. + return os.NewSyscallError("setsockopt", syscall.SetsockoptInt(s, syscall.SOL_SOCKET, syscall.SO_BROADCAST, 1)) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ishidawataru/sctp/sctp.go b/vendor/github.com/elastic/beats/vendor/github.com/ishidawataru/sctp/sctp.go new file mode 100644 index 00000000..34ea7ca4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ishidawataru/sctp/sctp.go @@ -0,0 +1,696 @@ +package sctp + +import ( + "bytes" + "encoding/binary" + "fmt" + "net" + "strconv" + "strings" + "sync" + "sync/atomic" + "syscall" + "time" + "unsafe" +) + +const ( + SOL_SCTP = 132 + + SCTP_BINDX_ADD_ADDR = 0x01 + SCTP_BINDX_REM_ADDR = 0x02 + + MSG_NOTIFICATION = 0x8000 +) + +const ( + SCTP_RTOINFO = iota + SCTP_ASSOCINFO + SCTP_INITMSG + SCTP_NODELAY + SCTP_AUTOCLOSE + SCTP_SET_PEER_PRIMARY_ADDR + SCTP_PRIMARY_ADDR + SCTP_ADAPTATION_LAYER + SCTP_DISABLE_FRAGMENTS + SCTP_PEER_ADDR_PARAMS + SCTP_DEFAULT_SENT_PARAM + SCTP_EVENTS + SCTP_I_WANT_MAPPED_V4_ADDR + SCTP_MAXSEG + SCTP_STATUS + SCTP_GET_PEER_ADDR_INFO + SCTP_DELAYED_ACK_TIME + SCTP_DELAYED_ACK = SCTP_DELAYED_ACK_TIME + SCTP_DELAYED_SACK = SCTP_DELAYED_ACK_TIME + + SCTP_SOCKOPT_BINDX_ADD = 100 + SCTP_SOCKOPT_BINDX_REM = 101 + SCTP_SOCKOPT_PEELOFF = 102 + SCTP_GET_PEER_ADDRS = 108 + SCTP_GET_LOCAL_ADDRS = 109 + SCTP_SOCKOPT_CONNECTX = 110 + SCTP_SOCKOPT_CONNECTX3 = 111 +) + +const ( + SCTP_EVENT_DATA_IO = 1 << iota + SCTP_EVENT_ASSOCIATION + SCTP_EVENT_ADDRESS + SCTP_EVENT_SEND_FAILURE + SCTP_EVENT_PEER_ERROR + SCTP_EVENT_SHUTDOWN + SCTP_EVENT_PARTIAL_DELIVERY + SCTP_EVENT_ADAPTATION_LAYER + SCTP_EVENT_AUTHENTICATION + SCTP_EVENT_SENDER_DRY + + SCTP_EVENT_ALL = SCTP_EVENT_DATA_IO | SCTP_EVENT_ASSOCIATION | SCTP_EVENT_ADDRESS | SCTP_EVENT_SEND_FAILURE | SCTP_EVENT_PEER_ERROR | SCTP_EVENT_SHUTDOWN | SCTP_EVENT_PARTIAL_DELIVERY | SCTP_EVENT_ADAPTATION_LAYER | SCTP_EVENT_AUTHENTICATION | SCTP_EVENT_SENDER_DRY +) + +type SCTPNotificationType int + +const ( + SCTP_SN_TYPE_BASE = SCTPNotificationType(iota + (1 << 15)) + SCTP_ASSOC_CHANGE + SCTP_PEER_ADDR_CHANGE + SCTP_SEND_FAILED + SCTP_REMOTE_ERROR + SCTP_SHUTDOWN_EVENT + SCTP_PARTIAL_DELIVERY_EVENT + SCTP_ADAPTATION_INDICATION + SCTP_AUTHENTICATION_INDICATION + SCTP_SENDER_DRY_EVENT +) + +type NotificationHandler func([]byte) error + +type EventSubscribe struct { + DataIO uint8 + Association uint8 + Address uint8 + SendFailure uint8 + PeerError uint8 + Shutdown uint8 + PartialDelivery uint8 + AdaptationLayer uint8 + Authentication uint8 + SenderDry uint8 +} + +const ( + SCTP_CMSG_INIT = iota + SCTP_CMSG_SNDRCV + SCTP_CMSG_SNDINFO + SCTP_CMSG_RCVINFO + SCTP_CMSG_NXTINFO +) + +const ( + SCTP_UNORDERED = 1 << iota + SCTP_ADDR_OVER + SCTP_ABORT + SCTP_SACK_IMMEDIATELY + SCTP_EOF +) + +const ( + SCTP_MAX_STREAM = 0xffff +) + +type InitMsg struct { + NumOstreams uint16 + MaxInstreams uint16 + MaxAttempts uint16 + MaxInitTimeout uint16 +} + +type SndRcvInfo struct { + Stream uint16 + SSN uint16 + Flags uint16 + _ uint16 + PPID uint32 + Context uint32 + TTL uint32 + TSN uint32 + CumTSN uint32 + AssocID int32 +} + +type SndInfo struct { + SID uint16 + Flags uint16 + PPID uint32 + Context uint32 + AssocID int32 +} + +type GetAddrsOld struct { + AssocID int32 + AddrNum int32 + Addrs uintptr +} + +type NotificationHeader struct { + Type uint16 + Flags uint16 + Length uint32 +} + +type SCTPState uint16 + +const ( + SCTP_COMM_UP = SCTPState(iota) + SCTP_COMM_LOST + SCTP_RESTART + SCTP_SHUTDOWN_COMP + SCTP_CANT_STR_ASSOC +) + +var nativeEndian binary.ByteOrder +var sndRcvInfoSize uintptr + +func init() { + i := uint16(1) + if *(*byte)(unsafe.Pointer(&i)) == 0 { + nativeEndian = binary.BigEndian + } else { + nativeEndian = binary.LittleEndian + } + info := SndRcvInfo{} + sndRcvInfoSize = unsafe.Sizeof(info) +} + +func toBuf(v interface{}) []byte { + var buf bytes.Buffer + binary.Write(&buf, nativeEndian, v) + return buf.Bytes() +} + +func htons(h uint16) uint16 { + if nativeEndian == binary.LittleEndian { + return (h << 8 & 0xff00) | (h >> 8 & 0xff) + } + return h +} + +var ntohs = htons + +// setInitOpts sets options for an SCTP association initialization +// see https://tools.ietf.org/html/rfc4960#page-25 +func setInitOpts(fd int, options InitMsg) error { + optlen := unsafe.Sizeof(options) + _, _, err := setsockopt(fd, SCTP_INITMSG, uintptr(unsafe.Pointer(&options)), uintptr(optlen)) + return err +} + +func setNumOstreams(fd, num int) error { + return setInitOpts(fd, InitMsg{NumOstreams: uint16(num)}) +} + +type SCTPAddr struct { + IPAddrs []net.IPAddr + Port int +} + +func (a *SCTPAddr) ToRawSockAddrBuf() []byte { + p := htons(uint16(a.Port)) + if len(a.IPAddrs) == 0 { // if a.IPAddrs list is empty - fall back to IPv4 zero addr + s := syscall.RawSockaddrInet4{ + Family: syscall.AF_INET, + Port: p, + } + copy(s.Addr[:], net.IPv4zero) + return toBuf(s) + } + buf := []byte{} + for _, ip := range a.IPAddrs { + ipBytes := ip.IP + if len(ipBytes) == 0 { + ipBytes = net.IPv4zero + } + if ip4 := ipBytes.To4(); ip4 != nil { + s := syscall.RawSockaddrInet4{ + Family: syscall.AF_INET, + Port: p, + } + copy(s.Addr[:], ip4) + buf = append(buf, toBuf(s)...) + } else { + var scopeid uint32 + ifi, err := net.InterfaceByName(ip.Zone) + if err == nil { + scopeid = uint32(ifi.Index) + } + s := syscall.RawSockaddrInet6{ + Family: syscall.AF_INET6, + Port: p, + Scope_id: scopeid, + } + copy(s.Addr[:], ipBytes) + buf = append(buf, toBuf(s)...) + } + } + return buf +} + +func (a *SCTPAddr) String() string { + var b bytes.Buffer + + for n, i := range a.IPAddrs { + if i.IP.To4() != nil { + b.WriteString(i.String()) + } else if i.IP.To16() != nil { + b.WriteRune('[') + b.WriteString(i.String()) + b.WriteRune(']') + } + if n < len(a.IPAddrs)-1 { + b.WriteRune('/') + } + } + b.WriteRune(':') + b.WriteString(strconv.Itoa(a.Port)) + return b.String() +} + +func (a *SCTPAddr) Network() string { return "sctp" } + +func ResolveSCTPAddr(network, addrs string) (*SCTPAddr, error) { + tcpnet := "" + switch network { + case "", "sctp": + tcpnet = "tcp" + case "sctp4": + tcpnet = "tcp4" + case "sctp6": + tcpnet = "tcp6" + default: + return nil, fmt.Errorf("invalid net: %s", network) + } + elems := strings.Split(addrs, "/") + if len(elems) == 0 { + return nil, fmt.Errorf("invalid input: %s", addrs) + } + ipaddrs := make([]net.IPAddr, 0, len(elems)) + for _, e := range elems[:len(elems)-1] { + tcpa, err := net.ResolveTCPAddr(tcpnet, e+":") + if err != nil { + return nil, err + } + ipaddrs = append(ipaddrs, net.IPAddr{IP: tcpa.IP, Zone: tcpa.Zone}) + } + tcpa, err := net.ResolveTCPAddr(tcpnet, elems[len(elems)-1]) + if err != nil { + return nil, err + } + if tcpa.IP != nil { + ipaddrs = append(ipaddrs, net.IPAddr{IP: tcpa.IP, Zone: tcpa.Zone}) + } else { + ipaddrs = nil + } + return &SCTPAddr{ + IPAddrs: ipaddrs, + Port: tcpa.Port, + }, nil +} + +func SCTPConnect(fd int, addr *SCTPAddr) (int, error) { + buf := addr.ToRawSockAddrBuf() + param := GetAddrsOld{ + AddrNum: int32(len(buf)), + Addrs: uintptr(uintptr(unsafe.Pointer(&buf[0]))), + } + optlen := unsafe.Sizeof(param) + _, _, err := getsockopt(fd, SCTP_SOCKOPT_CONNECTX3, uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) + if err == nil { + return int(param.AssocID), nil + } else if err != syscall.ENOPROTOOPT { + return 0, err + } + r0, _, err := setsockopt(fd, SCTP_SOCKOPT_CONNECTX, uintptr(unsafe.Pointer(&buf[0])), uintptr(len(buf))) + return int(r0), err +} + +func SCTPBind(fd int, addr *SCTPAddr, flags int) error { + var option uintptr + switch flags { + case SCTP_BINDX_ADD_ADDR: + option = SCTP_SOCKOPT_BINDX_ADD + case SCTP_BINDX_REM_ADDR: + option = SCTP_SOCKOPT_BINDX_REM + default: + return syscall.EINVAL + } + + buf := addr.ToRawSockAddrBuf() + _, _, err := setsockopt(fd, option, uintptr(unsafe.Pointer(&buf[0])), uintptr(len(buf))) + return err +} + +type SCTPConn struct { + _fd int32 + notificationHandler NotificationHandler +} + +func (c *SCTPConn) fd() int { + return int(atomic.LoadInt32(&c._fd)) +} + +func NewSCTPConn(fd int, handler NotificationHandler) *SCTPConn { + conn := &SCTPConn{ + _fd: int32(fd), + notificationHandler: handler, + } + return conn +} + +func (c *SCTPConn) Write(b []byte) (int, error) { + return c.SCTPWrite(b, nil) +} + +func (c *SCTPConn) Read(b []byte) (int, error) { + n, _, err := c.SCTPRead(b) + if n < 0 { + n = 0 + } + return n, err +} + +func (c *SCTPConn) SetInitMsg(numOstreams, maxInstreams, maxAttempts, maxInitTimeout int) error { + return setInitOpts(c.fd(), InitMsg{ + NumOstreams: uint16(numOstreams), + MaxInstreams: uint16(maxInstreams), + MaxAttempts: uint16(maxAttempts), + MaxInitTimeout: uint16(maxInitTimeout), + }) +} + +func (c *SCTPConn) SubscribeEvents(flags int) error { + var d, a, ad, sf, p, sh, pa, ada, au, se uint8 + if flags&SCTP_EVENT_DATA_IO > 0 { + d = 1 + } + if flags&SCTP_EVENT_ASSOCIATION > 0 { + a = 1 + } + if flags&SCTP_EVENT_ADDRESS > 0 { + ad = 1 + } + if flags&SCTP_EVENT_SEND_FAILURE > 0 { + sf = 1 + } + if flags&SCTP_EVENT_PEER_ERROR > 0 { + p = 1 + } + if flags&SCTP_EVENT_SHUTDOWN > 0 { + sh = 1 + } + if flags&SCTP_EVENT_PARTIAL_DELIVERY > 0 { + pa = 1 + } + if flags&SCTP_EVENT_ADAPTATION_LAYER > 0 { + ada = 1 + } + if flags&SCTP_EVENT_AUTHENTICATION > 0 { + au = 1 + } + if flags&SCTP_EVENT_SENDER_DRY > 0 { + se = 1 + } + param := EventSubscribe{ + DataIO: d, + Association: a, + Address: ad, + SendFailure: sf, + PeerError: p, + Shutdown: sh, + PartialDelivery: pa, + AdaptationLayer: ada, + Authentication: au, + SenderDry: se, + } + optlen := unsafe.Sizeof(param) + _, _, err := setsockopt(c.fd(), SCTP_EVENTS, uintptr(unsafe.Pointer(¶m)), uintptr(optlen)) + return err +} + +func (c *SCTPConn) SubscribedEvents() (int, error) { + param := EventSubscribe{} + optlen := unsafe.Sizeof(param) + _, _, err := getsockopt(c.fd(), SCTP_EVENTS, uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) + if err != nil { + return 0, err + } + var flags int + if param.DataIO > 0 { + flags |= SCTP_EVENT_DATA_IO + } + if param.Association > 0 { + flags |= SCTP_EVENT_ASSOCIATION + } + if param.Address > 0 { + flags |= SCTP_EVENT_ADDRESS + } + if param.SendFailure > 0 { + flags |= SCTP_EVENT_SEND_FAILURE + } + if param.PeerError > 0 { + flags |= SCTP_EVENT_PEER_ERROR + } + if param.Shutdown > 0 { + flags |= SCTP_EVENT_SHUTDOWN + } + if param.PartialDelivery > 0 { + flags |= SCTP_EVENT_PARTIAL_DELIVERY + } + if param.AdaptationLayer > 0 { + flags |= SCTP_EVENT_ADAPTATION_LAYER + } + if param.Authentication > 0 { + flags |= SCTP_EVENT_AUTHENTICATION + } + if param.SenderDry > 0 { + flags |= SCTP_EVENT_SENDER_DRY + } + return flags, nil +} + +func (c *SCTPConn) SetDefaultSentParam(info *SndRcvInfo) error { + optlen := unsafe.Sizeof(*info) + _, _, err := setsockopt(c.fd(), SCTP_DEFAULT_SENT_PARAM, uintptr(unsafe.Pointer(info)), uintptr(optlen)) + return err +} + +func (c *SCTPConn) GetDefaultSentParam() (*SndRcvInfo, error) { + info := &SndRcvInfo{} + optlen := unsafe.Sizeof(*info) + _, _, err := getsockopt(c.fd(), SCTP_DEFAULT_SENT_PARAM, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(&optlen))) + return info, err +} + +func resolveFromRawAddr(ptr unsafe.Pointer, n int) (*SCTPAddr, error) { + addr := &SCTPAddr{ + IPAddrs: make([]net.IPAddr, n), + } + + switch family := (*(*syscall.RawSockaddrAny)(ptr)).Addr.Family; family { + case syscall.AF_INET: + addr.Port = int(ntohs(uint16((*(*syscall.RawSockaddrInet4)(ptr)).Port))) + tmp := syscall.RawSockaddrInet4{} + size := unsafe.Sizeof(tmp) + for i := 0; i < n; i++ { + a := *(*syscall.RawSockaddrInet4)(unsafe.Pointer( + uintptr(ptr) + size*uintptr(i))) + addr.IPAddrs[i] = net.IPAddr{IP: a.Addr[:]} + } + case syscall.AF_INET6: + addr.Port = int(ntohs(uint16((*(*syscall.RawSockaddrInet4)(ptr)).Port))) + tmp := syscall.RawSockaddrInet6{} + size := unsafe.Sizeof(tmp) + for i := 0; i < n; i++ { + a := *(*syscall.RawSockaddrInet6)(unsafe.Pointer( + uintptr(ptr) + size*uintptr(i))) + var zone string + ifi, err := net.InterfaceByIndex(int(a.Scope_id)) + if err == nil { + zone = ifi.Name + } + addr.IPAddrs[i] = net.IPAddr{IP: a.Addr[:], Zone: zone} + } + default: + return nil, fmt.Errorf("unknown address family: %d", family) + } + return addr, nil +} + +func sctpGetAddrs(fd, id, optname int) (*SCTPAddr, error) { + + type getaddrs struct { + assocId int32 + addrNum uint32 + addrs [4096]byte + } + param := getaddrs{ + assocId: int32(id), + } + optlen := unsafe.Sizeof(param) + _, _, err := getsockopt(fd, uintptr(optname), uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) + if err != nil { + return nil, err + } + return resolveFromRawAddr(unsafe.Pointer(¶m.addrs), int(param.addrNum)) +} + +func (c *SCTPConn) SCTPGetPrimaryPeerAddr() (*SCTPAddr, error) { + + type sctpGetSetPrim struct { + assocId int32 + addrs [128]byte + } + param := sctpGetSetPrim{ + assocId: int32(0), + } + optlen := unsafe.Sizeof(param) + _, _, err := getsockopt(c.fd(), SCTP_PRIMARY_ADDR, uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) + if err != nil { + return nil, err + } + return resolveFromRawAddr(unsafe.Pointer(¶m.addrs), 1) +} + +func (c *SCTPConn) SCTPLocalAddr(id int) (*SCTPAddr, error) { + return sctpGetAddrs(c.fd(), id, SCTP_GET_LOCAL_ADDRS) +} + +func (c *SCTPConn) SCTPRemoteAddr(id int) (*SCTPAddr, error) { + return sctpGetAddrs(c.fd(), id, SCTP_GET_PEER_ADDRS) +} + +func (c *SCTPConn) LocalAddr() net.Addr { + addr, err := sctpGetAddrs(c.fd(), 0, SCTP_GET_LOCAL_ADDRS) + if err != nil { + return nil + } + return addr +} + +func (c *SCTPConn) RemoteAddr() net.Addr { + addr, err := sctpGetAddrs(c.fd(), 0, SCTP_GET_PEER_ADDRS) + if err != nil { + return nil + } + return addr +} + +func (c *SCTPConn) PeelOff(id int) (*SCTPConn, error) { + type peeloffArg struct { + assocId int32 + sd int + } + param := peeloffArg{ + assocId: int32(id), + } + optlen := unsafe.Sizeof(param) + _, _, err := getsockopt(c.fd(), SCTP_SOCKOPT_PEELOFF, uintptr(unsafe.Pointer(¶m)), uintptr(unsafe.Pointer(&optlen))) + if err != nil { + return nil, err + } + return &SCTPConn{_fd: int32(param.sd)}, nil +} + +func (c *SCTPConn) SetDeadline(t time.Time) error { + return syscall.EOPNOTSUPP +} + +func (c *SCTPConn) SetReadDeadline(t time.Time) error { + return syscall.EOPNOTSUPP +} + +func (c *SCTPConn) SetWriteDeadline(t time.Time) error { + return syscall.EOPNOTSUPP +} + +type SCTPListener struct { + fd int + m sync.Mutex +} + +func (ln *SCTPListener) Addr() net.Addr { + laddr, err := sctpGetAddrs(ln.fd, 0, SCTP_GET_LOCAL_ADDRS) + if err != nil { + return nil + } + return laddr +} + +type SCTPSndRcvInfoWrappedConn struct { + conn *SCTPConn +} + +func NewSCTPSndRcvInfoWrappedConn(conn *SCTPConn) *SCTPSndRcvInfoWrappedConn { + conn.SubscribeEvents(SCTP_EVENT_DATA_IO) + return &SCTPSndRcvInfoWrappedConn{conn} +} + +func (c *SCTPSndRcvInfoWrappedConn) Write(b []byte) (int, error) { + if len(b) < int(sndRcvInfoSize) { + return 0, syscall.EINVAL + } + info := (*SndRcvInfo)(unsafe.Pointer(&b[0])) + n, err := c.conn.SCTPWrite(b[sndRcvInfoSize:], info) + return n + int(sndRcvInfoSize), err +} + +func (c *SCTPSndRcvInfoWrappedConn) Read(b []byte) (int, error) { + if len(b) < int(sndRcvInfoSize) { + return 0, syscall.EINVAL + } + n, info, err := c.conn.SCTPRead(b[sndRcvInfoSize:]) + if err != nil { + return n, err + } + copy(b, toBuf(info)) + return n + int(sndRcvInfoSize), err +} + +func (c *SCTPSndRcvInfoWrappedConn) Close() error { + return c.conn.Close() +} + +func (c *SCTPSndRcvInfoWrappedConn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +func (c *SCTPSndRcvInfoWrappedConn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +func (c *SCTPSndRcvInfoWrappedConn) SetDeadline(t time.Time) error { + return c.conn.SetDeadline(t) +} + +func (c *SCTPSndRcvInfoWrappedConn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +func (c *SCTPSndRcvInfoWrappedConn) SetWriteDeadline(t time.Time) error { + return c.conn.SetWriteDeadline(t) +} + +func (c *SCTPSndRcvInfoWrappedConn) SetWriteBuffer(bytes int) error { + return c.conn.SetWriteBuffer(bytes) +} + +func (c *SCTPSndRcvInfoWrappedConn) GetWriteBuffer() (int, error) { + return c.conn.GetWriteBuffer() +} + +func (c *SCTPSndRcvInfoWrappedConn) SetReadBuffer(bytes int) error { + return c.conn.SetReadBuffer(bytes) +} + +func (c *SCTPSndRcvInfoWrappedConn) GetReadBuffer() (int, error) { + return c.conn.GetReadBuffer() +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ishidawataru/sctp/sctp_linux.go b/vendor/github.com/elastic/beats/vendor/github.com/ishidawataru/sctp/sctp_linux.go new file mode 100644 index 00000000..2e1976cc --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ishidawataru/sctp/sctp_linux.go @@ -0,0 +1,252 @@ +// +build linux,!386 + +package sctp + +import ( + "io" + "net" + "sync/atomic" + "syscall" + "unsafe" +) + +func setsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) { + // FIXME: syscall.SYS_SETSOCKOPT is undefined on 386 + r0, r1, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, + uintptr(fd), + SOL_SCTP, + optname, + optval, + optlen, + 0) + if errno != 0 { + return r0, r1, errno + } + return r0, r1, nil +} + +func getsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) { + // FIXME: syscall.SYS_GETSOCKOPT is undefined on 386 + r0, r1, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, + uintptr(fd), + SOL_SCTP, + optname, + optval, + optlen, + 0) + if errno != 0 { + return r0, r1, errno + } + return r0, r1, nil +} + +func (c *SCTPConn) SCTPWrite(b []byte, info *SndRcvInfo) (int, error) { + var cbuf []byte + if info != nil { + cmsgBuf := toBuf(info) + hdr := &syscall.Cmsghdr{ + Level: syscall.IPPROTO_SCTP, + Type: SCTP_CMSG_SNDRCV, + } + + // bitwidth of hdr.Len is platform-specific, + // so we use hdr.SetLen() rather than directly setting hdr.Len + hdr.SetLen(syscall.CmsgSpace(len(cmsgBuf))) + cbuf = append(toBuf(hdr), cmsgBuf...) + } + return syscall.SendmsgN(c.fd(), b, cbuf, nil, 0) +} + +func parseSndRcvInfo(b []byte) (*SndRcvInfo, error) { + msgs, err := syscall.ParseSocketControlMessage(b) + if err != nil { + return nil, err + } + for _, m := range msgs { + if m.Header.Level == syscall.IPPROTO_SCTP { + switch m.Header.Type { + case SCTP_CMSG_SNDRCV: + return (*SndRcvInfo)(unsafe.Pointer(&m.Data[0])), nil + } + } + } + return nil, nil +} + +func (c *SCTPConn) SCTPRead(b []byte) (int, *SndRcvInfo, error) { + oob := make([]byte, 254) + for { + n, oobn, recvflags, _, err := syscall.Recvmsg(c.fd(), b, oob, 0) + if err != nil { + return n, nil, err + } + + if n == 0 && oobn == 0 { + return 0, nil, io.EOF + } + + if recvflags&MSG_NOTIFICATION > 0 && c.notificationHandler != nil { + if err := c.notificationHandler(b[:n]); err != nil { + return 0, nil, err + } + } else { + var info *SndRcvInfo + if oobn > 0 { + info, err = parseSndRcvInfo(oob[:oobn]) + } + return n, info, err + } + } +} + +func (c *SCTPConn) Close() error { + if c != nil { + fd := atomic.SwapInt32(&c._fd, -1) + if fd > 0 { + info := &SndRcvInfo{ + Flags: SCTP_EOF, + } + c.SCTPWrite(nil, info) + syscall.Shutdown(int(fd), syscall.SHUT_RDWR) + return syscall.Close(int(fd)) + } + } + return syscall.EBADF +} + +func (c *SCTPConn) SetWriteBuffer(bytes int) error { + return syscall.SetsockoptInt(c.fd(), syscall.SOL_SOCKET, syscall.SO_SNDBUF, bytes) +} + +func (c *SCTPConn) GetWriteBuffer() (int, error) { + return syscall.GetsockoptInt(c.fd(), syscall.SOL_SOCKET, syscall.SO_SNDBUF) +} + +func (c *SCTPConn) SetReadBuffer(bytes int) error { + return syscall.SetsockoptInt(c.fd(), syscall.SOL_SOCKET, syscall.SO_RCVBUF, bytes) +} + +func (c *SCTPConn) GetReadBuffer() (int, error) { + return syscall.GetsockoptInt(c.fd(), syscall.SOL_SOCKET, syscall.SO_RCVBUF) +} + +// ListenSCTP - start listener on specified address/port +func ListenSCTP(net string, laddr *SCTPAddr) (*SCTPListener, error) { + return ListenSCTPExt(net, laddr, InitMsg{NumOstreams: SCTP_MAX_STREAM}) +} + +// ListenSCTPExt - start listener on specified address/port with given SCTP options +func ListenSCTPExt(network string, laddr *SCTPAddr, options InitMsg) (*SCTPListener, error) { + af, ipv6only := favoriteAddrFamily(network, laddr, nil, "listen") + sock, err := syscall.Socket( + af, + syscall.SOCK_STREAM, + syscall.IPPROTO_SCTP, + ) + if err != nil { + return nil, err + } + + // close socket on error + defer func() { + if err != nil { + syscall.Close(sock) + } + }() + if err = setDefaultSockopts(sock, af, ipv6only); err != nil { + return nil, err + } + err = setInitOpts(sock, options) + if err != nil { + return nil, err + } + + if laddr != nil { + // If IP address and/or port was not provided so far, let's use the unspecified IPv4 or IPv6 address + if len(laddr.IPAddrs) == 0 { + if af == syscall.AF_INET { + laddr.IPAddrs = append(laddr.IPAddrs, net.IPAddr{IP: net.IPv4zero}) + } else if af == syscall.AF_INET6 { + laddr.IPAddrs = append(laddr.IPAddrs, net.IPAddr{IP: net.IPv6zero}) + } + } + err := SCTPBind(sock, laddr, SCTP_BINDX_ADD_ADDR) + if err != nil { + return nil, err + } + } + err = syscall.Listen(sock, syscall.SOMAXCONN) + if err != nil { + return nil, err + } + return &SCTPListener{ + fd: sock, + }, nil +} + +// AcceptSCTP waits for and returns the next SCTP connection to the listener. +func (ln *SCTPListener) AcceptSCTP() (*SCTPConn, error) { + fd, _, err := syscall.Accept4(ln.fd, 0) + return NewSCTPConn(fd, nil), err +} + +// Accept waits for and returns the next connection connection to the listener. +func (ln *SCTPListener) Accept() (net.Conn, error) { + return ln.AcceptSCTP() +} + +func (ln *SCTPListener) Close() error { + syscall.Shutdown(ln.fd, syscall.SHUT_RDWR) + return syscall.Close(ln.fd) +} + +// DialSCTP - bind socket to laddr (if given) and connect to raddr +func DialSCTP(net string, laddr, raddr *SCTPAddr) (*SCTPConn, error) { + return DialSCTPExt(net, laddr, raddr, InitMsg{NumOstreams: SCTP_MAX_STREAM}) +} + +// DialSCTPExt - same as DialSCTP but with given SCTP options +func DialSCTPExt(network string, laddr, raddr *SCTPAddr, options InitMsg) (*SCTPConn, error) { + af, ipv6only := favoriteAddrFamily(network, laddr, raddr, "dial") + sock, err := syscall.Socket( + af, + syscall.SOCK_STREAM, + syscall.IPPROTO_SCTP, + ) + if err != nil { + return nil, err + } + + // close socket on error + defer func() { + if err != nil { + syscall.Close(sock) + } + }() + if err = setDefaultSockopts(sock, af, ipv6only); err != nil { + return nil, err + } + err = setInitOpts(sock, options) + if err != nil { + return nil, err + } + if laddr != nil { + // If IP address and/or port was not provided so far, let's use the unspecified IPv4 or IPv6 address + if len(laddr.IPAddrs) == 0 { + if af == syscall.AF_INET { + laddr.IPAddrs = append(laddr.IPAddrs, net.IPAddr{IP: net.IPv4zero}) + } else if af == syscall.AF_INET6 { + laddr.IPAddrs = append(laddr.IPAddrs, net.IPAddr{IP: net.IPv6zero}) + } + } + err := SCTPBind(sock, laddr, SCTP_BINDX_ADD_ADDR) + if err != nil { + return nil, err + } + } + _, err = SCTPConnect(sock, raddr) + if err != nil { + return nil, err + } + return NewSCTPConn(sock, nil), nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/ishidawataru/sctp/sctp_unsupported.go b/vendor/github.com/elastic/beats/vendor/github.com/ishidawataru/sctp/sctp_unsupported.go new file mode 100644 index 00000000..ed12f46f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/ishidawataru/sctp/sctp_unsupported.go @@ -0,0 +1,75 @@ +// +build !linux linux,386 + +package sctp + +import ( + "errors" + "net" + "runtime" +) + +var ErrUnsupported = errors.New("SCTP is unsupported on " + runtime.GOOS + "/" + runtime.GOARCH) + +func setsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) { + return 0, 0, ErrUnsupported +} + +func getsockopt(fd int, optname, optval, optlen uintptr) (uintptr, uintptr, error) { + return 0, 0, ErrUnsupported +} + +func (c *SCTPConn) SCTPWrite(b []byte, info *SndRcvInfo) (int, error) { + return 0, ErrUnsupported +} + +func (c *SCTPConn) SCTPRead(b []byte) (int, *SndRcvInfo, error) { + return 0, nil, ErrUnsupported +} + +func (c *SCTPConn) Close() error { + return ErrUnsupported +} + +func (c *SCTPConn) SetWriteBuffer(bytes int) error { + return ErrUnsupported +} + +func (c *SCTPConn) GetWriteBuffer() (int, error) { + return 0, ErrUnsupported +} + +func (c *SCTPConn) SetReadBuffer(bytes int) error { + return ErrUnsupported +} + +func (c *SCTPConn) GetReadBuffer() (int, error) { + return 0, ErrUnsupported +} + +func ListenSCTP(net string, laddr *SCTPAddr) (*SCTPListener, error) { + return nil, ErrUnsupported +} + +func ListenSCTPExt(net string, laddr *SCTPAddr, options InitMsg) (*SCTPListener, error) { + return nil, ErrUnsupported +} + +func (ln *SCTPListener) Accept() (net.Conn, error) { + return nil, ErrUnsupported +} + +func (ln *SCTPListener) AcceptSCTP() (*SCTPConn, error) { + return nil, ErrUnsupported +} + +func (ln *SCTPListener) Close() error { + return ErrUnsupported +} + +func DialSCTP(net string, laddr, raddr *SCTPAddr) (*SCTPConn, error) { + return nil, ErrUnsupported +} + +func DialSCTPExt(network string, laddr, raddr *SCTPAddr, options InitMsg) (*SCTPConn, error) { + return nil, ErrUnsupported +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/elastic/beats/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE new file mode 100644 index 00000000..5d8cb5b7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE @@ -0,0 +1 @@ +Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/elastic/beats/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile new file mode 100644 index 00000000..81be2143 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile @@ -0,0 +1,7 @@ +all: + +cover: + go test -cover -v -coverprofile=cover.dat ./... + go tool cover -func cover.dat + +.PHONY: cover diff --git a/vendor/github.com/elastic/beats/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/elastic/beats/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go new file mode 100644 index 00000000..258c0636 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go @@ -0,0 +1,75 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/golang/protobuf/proto" +) + +var errInvalidVarint = errors.New("invalid varint32 encountered") + +// ReadDelimited decodes a message from the provided length-delimited stream, +// where the length is encoded as 32-bit varint prefix to the message body. +// It returns the total number of bytes read and any applicable error. This is +// roughly equivalent to the companion Java API's +// MessageLite#parseDelimitedFrom. As per the reader contract, this function +// calls r.Read repeatedly as required until exactly one message including its +// prefix is read and decoded (or an error has occurred). The function never +// reads more bytes from the stream than required. The function never returns +// an error if a message has been read and decoded correctly, even if the end +// of the stream has been reached in doing so. In that case, any subsequent +// calls return (0, io.EOF). +func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { + // Per AbstractParser#parsePartialDelimitedFrom with + // CodedInputStream#readRawVarint32. + var headerBuf [binary.MaxVarintLen32]byte + var bytesRead, varIntBytes int + var messageLength uint64 + for varIntBytes == 0 { // i.e. no varint has been decoded yet. + if bytesRead >= len(headerBuf) { + return bytesRead, errInvalidVarint + } + // We have to read byte by byte here to avoid reading more bytes + // than required. Each read byte is appended to what we have + // read before. + newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) + if newBytesRead == 0 { + if err != nil { + return bytesRead, err + } + // A Reader should not return (0, nil), but if it does, + // it should be treated as no-op (according to the + // Reader contract). So let's go on... + continue + } + bytesRead += newBytesRead + // Now present everything read so far to the varint decoder and + // see if a varint can be decoded already. + messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) + } + + messageBuf := make([]byte, messageLength) + newBytesRead, err := io.ReadFull(r, messageBuf) + bytesRead += newBytesRead + if err != nil { + return bytesRead, err + } + + return bytesRead, proto.Unmarshal(messageBuf, m) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go new file mode 100644 index 00000000..c318385c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pbutil provides record length-delimited Protocol Buffer streaming. +package pbutil diff --git a/vendor/github.com/elastic/beats/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/elastic/beats/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go new file mode 100644 index 00000000..8fb59ad2 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go @@ -0,0 +1,46 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "io" + + "github.com/golang/protobuf/proto" +) + +// WriteDelimited encodes and dumps a message to the provided writer prefixed +// with a 32-bit varint indicating the length of the encoded message, producing +// a length-delimited record stream, which can be used to chain together +// encoded messages of the same type together in a file. It returns the total +// number of bytes written and any applicable error. This is roughly +// equivalent to the companion Java API's MessageLite#writeDelimitedTo. +func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { + buffer, err := proto.Marshal(m) + if err != nil { + return 0, err + } + + var buf [binary.MaxVarintLen32]byte + encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) + + sync, err := w.Write(buf[:encodedLength]) + if err != nil { + return sync, err + } + + n, err = w.Write(buffer) + return n + sync, err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/mitchellh/go-homedir/LICENSE new file mode 100644 index 00000000..f9c841a5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/mitchellh/go-homedir/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/elastic/beats/vendor/github.com/mitchellh/go-homedir/README.md new file mode 100644 index 00000000..d70706d5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/mitchellh/go-homedir/README.md @@ -0,0 +1,14 @@ +# go-homedir + +This is a Go library for detecting the user's home directory without +the use of cgo, so the library can be used in cross-compilation environments. + +Usage is incredibly simple, just call `homedir.Dir()` to get the home directory +for a user, and `homedir.Expand()` to expand the `~` in a path to the home +directory. + +**Why not just use `os/user`?** The built-in `os/user` package requires +cgo on Darwin systems. This means that any Go code that uses that package +cannot cross compile. But 99% of the time the use for `os/user` is just to +retrieve the home directory, which we can do for the current user without +cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/mitchellh/go-homedir/go.mod b/vendor/github.com/elastic/beats/vendor/github.com/mitchellh/go-homedir/go.mod new file mode 100644 index 00000000..7efa09a0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/mitchellh/go-homedir/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/go-homedir diff --git a/vendor/github.com/elastic/beats/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/elastic/beats/vendor/github.com/mitchellh/go-homedir/homedir.go new file mode 100644 index 00000000..25378537 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/mitchellh/go-homedir/homedir.go @@ -0,0 +1,167 @@ +package homedir + +import ( + "bytes" + "errors" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" +) + +// DisableCache will disable caching of the home directory. Caching is enabled +// by default. +var DisableCache bool + +var homedirCache string +var cacheLock sync.RWMutex + +// Dir returns the home directory for the executing user. +// +// This uses an OS-specific method for discovering the home directory. +// An error is returned if a home directory cannot be detected. +func Dir() (string, error) { + if !DisableCache { + cacheLock.RLock() + cached := homedirCache + cacheLock.RUnlock() + if cached != "" { + return cached, nil + } + } + + cacheLock.Lock() + defer cacheLock.Unlock() + + var result string + var err error + if runtime.GOOS == "windows" { + result, err = dirWindows() + } else { + // Unix-like system, so just assume Unix + result, err = dirUnix() + } + + if err != nil { + return "", err + } + homedirCache = result + return result, nil +} + +// Expand expands the path to include the home directory if the path +// is prefixed with `~`. If it isn't prefixed with `~`, the path is +// returned as-is. +func Expand(path string) (string, error) { + if len(path) == 0 { + return path, nil + } + + if path[0] != '~' { + return path, nil + } + + if len(path) > 1 && path[1] != '/' && path[1] != '\\' { + return "", errors.New("cannot expand user-specific home dir") + } + + dir, err := Dir() + if err != nil { + return "", err + } + + return filepath.Join(dir, path[1:]), nil +} + +// Reset clears the cache, forcing the next call to Dir to re-detect +// the home directory. This generally never has to be called, but can be +// useful in tests if you're modifying the home directory via the HOME +// env var or something. +func Reset() { + cacheLock.Lock() + defer cacheLock.Unlock() + homedirCache = "" +} + +func dirUnix() (string, error) { + homeEnv := "HOME" + if runtime.GOOS == "plan9" { + // On plan9, env vars are lowercase. + homeEnv = "home" + } + + // First prefer the HOME environmental variable + if home := os.Getenv(homeEnv); home != "" { + return home, nil + } + + var stdout bytes.Buffer + + // If that fails, try OS specific commands + if runtime.GOOS == "darwin" { + cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`) + cmd.Stdout = &stdout + if err := cmd.Run(); err == nil { + result := strings.TrimSpace(stdout.String()) + if result != "" { + return result, nil + } + } + } else { + cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + // If the error is ErrNotFound, we ignore it. Otherwise, return it. + if err != exec.ErrNotFound { + return "", err + } + } else { + if passwd := strings.TrimSpace(stdout.String()); passwd != "" { + // username:password:uid:gid:gecos:home:shell + passwdParts := strings.SplitN(passwd, ":", 7) + if len(passwdParts) > 5 { + return passwdParts[5], nil + } + } + } + } + + // If all else fails, try the shell + stdout.Reset() + cmd := exec.Command("sh", "-c", "cd && pwd") + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + return "", err + } + + result := strings.TrimSpace(stdout.String()) + if result == "" { + return "", errors.New("blank output when reading home directory") + } + + return result, nil +} + +func dirWindows() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + + // Prefer standard environment variable USERPROFILE + if home := os.Getenv("USERPROFILE"); home != "" { + return home, nil + } + + drive := os.Getenv("HOMEDRIVE") + path := os.Getenv("HOMEPATH") + home := drive + path + if drive == "" || path == "" { + return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank") + } + + return home, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/NOTICE new file mode 100644 index 00000000..dd878a30 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/NOTICE @@ -0,0 +1,23 @@ +Prometheus instrumentation library for Go applications +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +perks - a fork of https://github.com/bmizerany/perks +https://github.com/beorn7/perks +Copyright 2013-2015 Blake Mizerany, Björn Rabenstein +See https://github.com/beorn7/perks/blob/master/README.md for license details. + +Go support for Protocol Buffers - Google's data interchange format +http://github.com/golang/protobuf/ +Copyright 2010 The Go Authors +See source code for license details. + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/README.md new file mode 100644 index 00000000..44986bff --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/README.md @@ -0,0 +1 @@ +See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus). diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/build_info.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/build_info.go new file mode 100644 index 00000000..288f0e85 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/build_info.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build go1.12 + +package prometheus + +import "runtime/debug" + +// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go 1.12+. +func readBuildInfo() (path, version, sum string) { + path, version, sum = "unknown", "unknown", "unknown" + if bi, ok := debug.ReadBuildInfo(); ok { + path = bi.Main.Path + version = bi.Main.Version + sum = bi.Main.Sum + } + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go new file mode 100644 index 00000000..6609e287 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/build_info_pre_1.12.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !go1.12 + +package prometheus + +// readBuildInfo is a wrapper around debug.ReadBuildInfo for Go versions before +// 1.12. Remove this whole file once the minimum supported Go version is 1.12. +func readBuildInfo() (path, version, sum string) { + return "unknown", "unknown", "unknown" +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/collector.go new file mode 100644 index 00000000..1e839650 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -0,0 +1,120 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Collector is the interface implemented by anything that can be used by +// Prometheus to collect metrics. A Collector has to be registered for +// collection. See Registerer.Register. +// +// The stock metrics provided by this package (Gauge, Counter, Summary, +// Histogram, Untyped) are also Collectors (which only ever collect one metric, +// namely itself). An implementer of Collector may, however, collect multiple +// metrics in a coordinated fashion and/or create metrics on the fly. Examples +// for collectors already implemented in this library are the metric vectors +// (i.e. collection of multiple instances of the same Metric but with different +// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. +type Collector interface { + // Describe sends the super-set of all possible descriptors of metrics + // collected by this Collector to the provided channel and returns once + // the last descriptor has been sent. The sent descriptors fulfill the + // consistency and uniqueness requirements described in the Desc + // documentation. + // + // It is valid if one and the same Collector sends duplicate + // descriptors. Those duplicates are simply ignored. However, two + // different Collectors must not send duplicate descriptors. + // + // Sending no descriptor at all marks the Collector as “unchecked”, + // i.e. no checks will be performed at registration time, and the + // Collector may yield any Metric it sees fit in its Collect method. + // + // This method idempotently sends the same descriptors throughout the + // lifetime of the Collector. It may be called concurrently and + // therefore must be implemented in a concurrency safe way. + // + // If a Collector encounters an error while executing this method, it + // must send an invalid descriptor (created with NewInvalidDesc) to + // signal the error to the registry. + Describe(chan<- *Desc) + // Collect is called by the Prometheus registry when collecting + // metrics. The implementation sends each collected metric via the + // provided channel and returns once the last metric has been sent. The + // descriptor of each sent metric is one of those returned by Describe + // (unless the Collector is unchecked, see above). Returned metrics that + // share the same descriptor must differ in their variable label + // values. + // + // This method may be called concurrently and must therefore be + // implemented in a concurrency safe way. Blocking occurs at the expense + // of total performance of rendering all registered metrics. Ideally, + // Collector implementations support concurrent readers. + Collect(chan<- Metric) +} + +// DescribeByCollect is a helper to implement the Describe method of a custom +// Collector. It collects the metrics from the provided Collector and sends +// their descriptors to the provided channel. +// +// If a Collector collects the same metrics throughout its lifetime, its +// Describe method can simply be implemented as: +// +// func (c customCollector) Describe(ch chan<- *Desc) { +// DescribeByCollect(c, ch) +// } +// +// However, this will not work if the metrics collected change dynamically over +// the lifetime of the Collector in a way that their combined set of descriptors +// changes as well. The shortcut implementation will then violate the contract +// of the Describe method. If a Collector sometimes collects no metrics at all +// (for example vectors like CounterVec, GaugeVec, etc., which only collect +// metrics after a metric with a fully specified label set has been accessed), +// it might even get registered as an unchecked Collector (cf. the Register +// method of the Registerer interface). Hence, only use this shortcut +// implementation of Describe if you are certain to fulfill the contract. +// +// The Collector example demonstrates a use of DescribeByCollect. +func DescribeByCollect(c Collector, descs chan<- *Desc) { + metrics := make(chan Metric) + go func() { + c.Collect(metrics) + close(metrics) + }() + for m := range metrics { + descs <- m.Desc() + } +} + +// selfCollector implements Collector for a single Metric so that the Metric +// collects itself. Add it as an anonymous field to a struct that implements +// Metric, and call init with the Metric itself as an argument. +type selfCollector struct { + self Metric +} + +// init provides the selfCollector with a reference to the metric it is supposed +// to collect. It is usually called within the factory function to create a +// metric. See example. +func (c *selfCollector) init(self Metric) { + c.self = self +} + +// Describe implements Collector. +func (c *selfCollector) Describe(ch chan<- *Desc) { + ch <- c.self.Desc() +} + +// Collect implements Collector. +func (c *selfCollector) Collect(ch chan<- Metric) { + ch <- c.self +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/counter.go new file mode 100644 index 00000000..d463e36d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -0,0 +1,277 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "math" + "sync/atomic" + + dto "github.com/prometheus/client_model/go" +) + +// Counter is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. +// +// A Counter is typically used to count requests served, tasks completed, errors +// occurred, etc. +// +// To create Counter instances, use NewCounter. +type Counter interface { + Metric + Collector + + // Inc increments the counter by 1. Use Add to increment it by arbitrary + // non-negative values. + Inc() + // Add adds the given value to the counter. It panics if the value is < + // 0. + Add(float64) +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts Opts + +// NewCounter creates a new Counter based on the provided CounterOpts. +// +// The returned implementation tracks the counter value in two separate +// variables, a float64 and a uint64. The latter is used to track calls of the +// Inc method and calls of the Add method with a value that can be represented +// as a uint64. This allows atomic increments of the counter with optimal +// performance. (It is common to have an Inc call in very hot execution paths.) +// Both internal tracking values are added up in the Write method. This has to +// be taken into account when it comes to precision and overflow behavior. +func NewCounter(opts CounterOpts) Counter { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &counter{desc: desc, labelPairs: desc.constLabelPairs} + result.init(result) // Init self-collection. + return result +} + +type counter struct { + // valBits contains the bits of the represented float64 value, while + // valInt stores values that are exact integers. Both have to go first + // in the struct to guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + valInt uint64 + + selfCollector + desc *Desc + + labelPairs []*dto.LabelPair +} + +func (c *counter) Desc() *Desc { + return c.desc +} + +func (c *counter) Add(v float64) { + if v < 0 { + panic(errors.New("counter cannot decrease in value")) + } + ival := uint64(v) + if float64(ival) == v { + atomic.AddUint64(&c.valInt, ival) + return + } + + for { + oldBits := atomic.LoadUint64(&c.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&c.valBits, oldBits, newBits) { + return + } + } +} + +func (c *counter) Inc() { + atomic.AddUint64(&c.valInt, 1) +} + +func (c *counter) Write(out *dto.Metric) error { + fval := math.Float64frombits(atomic.LoadUint64(&c.valBits)) + ival := atomic.LoadUint64(&c.valInt) + val := fval + float64(ival) + + return populateMetric(CounterValue, val, c.labelPairs, out) +} + +// CounterVec is a Collector that bundles a set of Counters that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. number of HTTP requests, partitioned by response code and +// method). Create instances with NewCounterVec. +type CounterVec struct { + *metricVec +} + +// NewCounterVec creates a new CounterVec based on the provided CounterOpts and +// partitioned by the given label names. +func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &CounterVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + } + result := &counter{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues returns the Counter for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Counter is created. +// +// It is possible to call this method without using the returned Counter to only +// create the new Counter but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Counter for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Counter from the CounterVec. In that case, +// the Counter will still exist, but it will not be exported anymore, even if a +// Counter with the same label values is created later. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// GetMetricWith returns the Counter for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Counter is created. Implications of +// creating a Counter without using it and keeping the Counter for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *CounterVec) GetMetricWith(labels Labels) (Counter, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (v *CounterVec) WithLabelValues(lvs ...string) Counter { + c, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return c +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *CounterVec) With(labels Labels) Counter { + c, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return c +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the CounterVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *CounterVec) CurryWith(labels Labels) (*CounterVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &CounterVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *CounterVec) MustCurryWith(labels Labels) *CounterVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +// CounterFunc is a Counter whose value is determined at collect time by calling a +// provided function. +// +// To create CounterFunc instances, use NewCounterFunc. +type CounterFunc interface { + Metric + Collector +} + +// NewCounterFunc creates a new CounterFunc based on the provided +// CounterOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a CounterFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. The function should also honor +// the contract for a Counter (values only go up, not down), but compliance will +// not be checked. +func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), CounterValue, function) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/desc.go new file mode 100644 index 00000000..1d034f87 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -0,0 +1,184 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "sort" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// Desc is the descriptor used by every Prometheus Metric. It is essentially +// the immutable meta-data of a Metric. The normal Metric implementations +// included in this package manage their Desc under the hood. Users only have to +// deal with Desc if they use advanced features like the ExpvarCollector or +// custom Collectors and Metrics. +// +// Descriptors registered with the same registry have to fulfill certain +// consistency and uniqueness criteria if they share the same fully-qualified +// name: They must have the same help string and the same label names (aka label +// dimensions) in each, constLabels and variableLabels, but they must differ in +// the values of the constLabels. +// +// Descriptors that share the same fully-qualified names and the same label +// values of their constLabels are considered equal. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabelPairs contains precalculated DTO label pairs based on + // the constant labels. + constLabelPairs []*dto.LabelPair + // VariableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + // id is a hash of the values of the ConstLabels and fqName. This + // must be unique among all registered descriptors and can therefore be + // used as an identifier of the descriptor. + id uint64 + // dimHash is a hash of the label names (preset and variable) and the + // Help string. Each Desc with the same fqName must have the same + // dimHash. + dimHash uint64 + // err is an error that occurred during construction. It is reported on + // registration time. + err error +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName must not be empty. +// +// variableLabels only contain the label names. Their label values are variable +// and therefore not part of the Desc. (They are managed within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Collector example for a usage pattern. +func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + variableLabels: variableLabels, + } + if !model.IsValidMetricName(model.LabelValue(fqName)) { + d.err = fmt.Errorf("%q is not a valid metric name", fqName) + return d + } + // labelValues contains the label values of const labels (in order of + // their sorted label names) plus the fqName (at position 0). + labelValues := make([]string, 1, len(constLabels)+1) + labelValues[0] = fqName + labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNameSet := map[string]struct{}{} + // First add only the const label names and sort them... + for labelName := range constLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) + return d + } + labelNames = append(labelNames, labelName) + labelNameSet[labelName] = struct{}{} + } + sort.Strings(labelNames) + // ... so that we can now add const label values in the order of their names. + for _, labelName := range labelNames { + labelValues = append(labelValues, constLabels[labelName]) + } + // Validate the const label values. They can't have a wrong cardinality, so + // use in len(labelValues) as expectedNumberOfValues. + if err := validateLabelValues(labelValues, len(labelValues)); err != nil { + d.err = err + return d + } + // Now add the variable label names, but prefix them with something that + // cannot be in a regular label name. That prevents matching the label + // dimension with a different mix between preset and variable labels. + for _, labelName := range variableLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name for metric %q", labelName, fqName) + return d + } + labelNames = append(labelNames, "$"+labelName) + labelNameSet[labelName] = struct{}{} + } + if len(labelNames) != len(labelNameSet) { + d.err = errors.New("duplicate label names") + return d + } + + vh := hashNew() + for _, val := range labelValues { + vh = hashAdd(vh, val) + vh = hashAddByte(vh, separatorByte) + } + d.id = vh + // Sort labelNames so that order doesn't matter for the hash. + sort.Strings(labelNames) + // Now hash together (in this order) the help string and the sorted + // label names. + lh := hashNew() + lh = hashAdd(lh, help) + lh = hashAddByte(lh, separatorByte) + for _, labelName := range labelNames { + lh = hashAdd(lh, labelName) + lh = hashAddByte(lh, separatorByte) + } + d.dimHash = lh + + d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) + for n, v := range constLabels { + d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(v), + }) + } + sort.Sort(labelPairSorter(d.constLabelPairs)) + return d +} + +// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the +// provided error set. If a collector returning such a descriptor is registered, +// registration will fail with the provided error. NewInvalidDesc can be used by +// a Collector to signal inability to describe itself. +func NewInvalidDesc(err error) *Desc { + return &Desc{ + err: err, + } +} + +func (d *Desc) String() string { + lpStrings := make([]string, 0, len(d.constLabelPairs)) + for _, lp := range d.constLabelPairs { + lpStrings = append( + lpStrings, + fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), + ) + } + return fmt.Sprintf( + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + d.fqName, + d.help, + strings.Join(lpStrings, ","), + d.variableLabels, + ) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/doc.go new file mode 100644 index 00000000..01977de6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -0,0 +1,200 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus is the core instrumentation package. It provides metrics +// primitives to instrument code for monitoring. It also offers a registry for +// metrics. Sub-packages allow to expose the registered metrics via HTTP +// (package promhttp) or push them to a Pushgateway (package push). There is +// also a sub-package promauto, which provides metrics constructors with +// automatic registration. +// +// All exported functions and methods are safe to be used concurrently unless +// specified otherwise. +// +// A Basic Example +// +// As a starting point, a very basic usage example: +// +// package main +// +// import ( +// "log" +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// var ( +// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }) +// hdFailures = prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ) +// ) +// +// func init() { +// // Metrics have to be registered to be exposed: +// prometheus.MustRegister(cpuTemp) +// prometheus.MustRegister(hdFailures) +// } +// +// func main() { +// cpuTemp.Set(65.3) +// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() +// +// // The Handler function provides a default handler to expose metrics +// // via an HTTP server. "/metrics" is the usual endpoint for that. +// http.Handle("/metrics", promhttp.Handler()) +// log.Fatal(http.ListenAndServe(":8080", nil)) +// } +// +// +// This is a complete program that exports two metrics, a Gauge and a Counter, +// the latter with a label attached to turn it into a (one-dimensional) vector. +// +// Metrics +// +// The number of exported identifiers in this package might appear a bit +// overwhelming. However, in addition to the basic plumbing shown in the example +// above, you only need to understand the different metric types and their +// vector versions for basic usage. Furthermore, if you are not concerned with +// fine-grained control of when and how to register metrics with the registry, +// have a look at the promauto package, which will effectively allow you to +// ignore registration altogether in simple cases. +// +// Above, you have already touched the Counter and the Gauge. There are two more +// advanced metric types: the Summary and Histogram. A more thorough description +// of those four metric types can be found in the Prometheus docs: +// https://prometheus.io/docs/concepts/metric_types/ +// +// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the +// Prometheus server not to assume anything about its type. +// +// In addition to the fundamental metric types Gauge, Counter, Summary, +// Histogram, and Untyped, a very important part of the Prometheus data model is +// the partitioning of samples along dimensions called labels, which results in +// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, +// HistogramVec, and UntypedVec. +// +// While only the fundamental metric types implement the Metric interface, both +// the metrics and their vector versions implement the Collector interface. A +// Collector manages the collection of a number of Metrics, but for convenience, +// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, +// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, +// SummaryVec, HistogramVec, and UntypedVec are not. +// +// To create instances of Metrics and their vector versions, you need a suitable +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or +// UntypedOpts. +// +// Custom Collectors and constant Metrics +// +// While you could create your own implementations of Metric, most likely you +// will only ever implement the Collector interface on your own. At a first +// glance, a custom Collector seems handy to bundle Metrics for common +// registration (with the prime example of the different metric vectors above, +// which bundle all the metrics of the same name but with different labels). +// +// There is a more involved use case, too: If you already have metrics +// available, created outside of the Prometheus context, you don't need the +// interface of the various Metric types. You essentially want to mirror the +// existing numbers into Prometheus Metrics during collection. An own +// implementation of the Collector interface is perfect for that. You can create +// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and +// NewConstSummary (and their respective Must… versions). That will happen in +// the Collect method. The Describe method has to return separate Desc +// instances, representative of the “throw-away” metrics to be created later. +// NewDesc comes in handy to create those Desc instances. Alternatively, you +// could return no Desc at all, which will mark the Collector “unchecked”. No +// checks are performed at registration time, but metric consistency will still +// be ensured at scrape time, i.e. any inconsistencies will lead to scrape +// errors. Thus, with unchecked Collectors, the responsibility to not collect +// metrics that lead to inconsistencies in the total scrape result lies with the +// implementer of the Collector. While this is not a desirable state, it is +// sometimes necessary. The typical use case is a situation where the exact +// metrics to be returned by a Collector cannot be predicted at registration +// time, but the implementer has sufficient knowledge of the whole system to +// guarantee metric consistency. +// +// The Collector example illustrates the use case. You can also look at the +// source code of the processCollector (mirroring process metrics), the +// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar +// metrics) as examples that are used in this package itself. +// +// If you just need to call a function to get a single float value to collect as +// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting +// shortcuts. +// +// Advanced Uses of the Registry +// +// While MustRegister is the by far most common way of registering a Collector, +// sometimes you might want to handle the errors the registration might cause. +// As suggested by the name, MustRegister panics if an error occurs. With the +// Register function, the error is returned and can be handled. +// +// An error is returned if the registered Collector is incompatible or +// inconsistent with already registered metrics. The registry aims for +// consistency of the collected metrics according to the Prometheus data model. +// Inconsistencies are ideally detected at registration time, not at collect +// time. The former will usually be detected at start-up time of a program, +// while the latter will only happen at scrape time, possibly not even on the +// first scrape if the inconsistency only becomes relevant later. That is the +// main reason why a Collector and a Metric have to describe themselves to the +// registry. +// +// So far, everything we did operated on the so-called default registry, as it +// can be found in the global DefaultRegisterer variable. With NewRegistry, you +// can create a custom registry, or you can even implement the Registerer or +// Gatherer interfaces yourself. The methods Register and Unregister work in the +// same way on a custom registry as the global functions Register and Unregister +// on the default registry. +// +// There are a number of uses for custom registries: You can use registries with +// special properties, see NewPedanticRegistry. You can avoid global state, as +// it is imposed by the DefaultRegisterer. You can use multiple registries at +// the same time to expose different metrics in different ways. You can use +// separate registries for testing purposes. +// +// Also note that the DefaultRegisterer comes registered with a Collector for Go +// runtime metrics (via NewGoCollector) and a Collector for process metrics (via +// NewProcessCollector). With a custom registry, you are in control and decide +// yourself about the Collectors to register. +// +// HTTP Exposition +// +// The Registry implements the Gatherer interface. The caller of the Gather +// method can then expose the gathered metrics in some way. Usually, the metrics +// are served via HTTP on the /metrics endpoint. That's happening in the example +// above. The tools to expose metrics via HTTP are in the promhttp sub-package. +// +// Pushing to the Pushgateway +// +// Function for pushing to the Pushgateway can be found in the push sub-package. +// +// Graphite Bridge +// +// Functions and examples to push metrics from a Gatherer to Graphite can be +// found in the graphite sub-package. +// +// Other Means of Exposition +// +// More ways of exposing metrics can easily be added by following the approaches +// of the existing implementations. +package prometheus diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go new file mode 100644 index 00000000..18a99d5f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go @@ -0,0 +1,119 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "encoding/json" + "expvar" +) + +type expvarCollector struct { + exports map[string]*Desc +} + +// NewExpvarCollector returns a newly allocated expvar Collector that still has +// to be registered with a Prometheus registry. +// +// An expvar Collector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the expvar Collector is inherently slower +// than native Prometheus metrics. Thus, the expvar Collector is probably great +// for experiments and prototying, but you should seriously consider a more +// direct implementation of Prometheus metrics for monitoring production +// systems. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*Desc) Collector { + return &expvarCollector{ + exports: exports, + } +} + +// Describe implements Collector. +func (e *expvarCollector) Describe(ch chan<- *Desc) { + for _, desc := range e.exports { + ch <- desc + } +} + +// Collect implements Collector. +func (e *expvarCollector) Collect(ch chan<- Metric) { + for name, desc := range e.exports { + var m Metric + expVar := expvar.Get(name) + if expVar == nil { + continue + } + var v interface{} + labels := make([]string, len(desc.variableLabels)) + if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { + ch <- NewInvalidMetric(desc, err) + continue + } + var processValue func(v interface{}, i int) + processValue = func(v interface{}, i int) { + if i >= len(labels) { + copiedLabels := append(make([]string, 0, len(labels)), labels...) + switch v := v.(type) { + case float64: + m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) + case bool: + if v { + m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) + } else { + m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) + } + default: + return + } + ch <- m + return + } + vm, ok := v.(map[string]interface{}) + if !ok { + return + } + for lv, val := range vm { + labels[i] = lv + processValue(val, i+1) + } + } + processValue(v, 0) + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/fnv.go new file mode 100644 index 00000000..3d383a73 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/fnv.go @@ -0,0 +1,42 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/gauge.go new file mode 100644 index 00000000..71d406bd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -0,0 +1,286 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "sync/atomic" + "time" + + dto "github.com/prometheus/client_model/go" +) + +// Gauge is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// A Gauge is typically used for measured values like temperatures or current +// memory usage, but also "counts" that can go up and down, like the number of +// running goroutines. +// +// To create Gauge instances, use NewGauge. +type Gauge interface { + Metric + Collector + + // Set sets the Gauge to an arbitrary value. + Set(float64) + // Inc increments the Gauge by 1. Use Add to increment it by arbitrary + // values. + Inc() + // Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary + // values. + Dec() + // Add adds the given value to the Gauge. (The value can be negative, + // resulting in a decrease of the Gauge.) + Add(float64) + // Sub subtracts the given value from the Gauge. (The value can be + // negative, resulting in an increase of the Gauge.) + Sub(float64) + + // SetToCurrentTime sets the Gauge to the current Unix time in seconds. + SetToCurrentTime() +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts Opts + +// NewGauge creates a new Gauge based on the provided GaugeOpts. +// +// The returned implementation is optimized for a fast Set method. If you have a +// choice for managing the value of a Gauge via Set vs. Inc/Dec/Add/Sub, pick +// the former. For example, the Inc method of the returned Gauge is slower than +// the Inc method of a Counter returned by NewCounter. This matches the typical +// scenarios for Gauges and Counters, where the former tends to be Set-heavy and +// the latter Inc-heavy. +func NewGauge(opts GaugeOpts) Gauge { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &gauge{desc: desc, labelPairs: desc.constLabelPairs} + result.init(result) // Init self-collection. + return result +} + +type gauge struct { + // valBits contains the bits of the represented float64 value. It has + // to go first in the struct to guarantee alignment for atomic + // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + + selfCollector + + desc *Desc + labelPairs []*dto.LabelPair +} + +func (g *gauge) Desc() *Desc { + return g.desc +} + +func (g *gauge) Set(val float64) { + atomic.StoreUint64(&g.valBits, math.Float64bits(val)) +} + +func (g *gauge) SetToCurrentTime() { + g.Set(float64(time.Now().UnixNano()) / 1e9) +} + +func (g *gauge) Inc() { + g.Add(1) +} + +func (g *gauge) Dec() { + g.Add(-1) +} + +func (g *gauge) Add(val float64) { + for { + oldBits := atomic.LoadUint64(&g.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&g.valBits, oldBits, newBits) { + return + } + } +} + +func (g *gauge) Sub(val float64) { + g.Add(val * -1) +} + +func (g *gauge) Write(out *dto.Metric) error { + val := math.Float64frombits(atomic.LoadUint64(&g.valBits)) + return populateMetric(GaugeValue, val, g.labelPairs, out) +} + +// GaugeVec is a Collector that bundles a set of Gauges that all share the same +// Desc, but have different values for their variable labels. This is used if +// you want to count the same thing partitioned by various dimensions +// (e.g. number of operations queued, partitioned by user and operation +// type). Create instances with NewGaugeVec. +type GaugeVec struct { + *metricVec +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and +// partitioned by the given label names. +func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &GaugeVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + if len(lvs) != len(desc.variableLabels) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, lvs)) + } + result := &gauge{desc: desc, labelPairs: makeLabelPairs(desc, lvs)} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues returns the Gauge for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Gauge is created. +// +// It is possible to call this method without using the returned Gauge to only +// create the new Gauge but leave it at its starting value 0. See also the +// SummaryVec example. +// +// Keeping the Gauge for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Gauge from the GaugeVec. In that case, the +// Gauge will still exist, but it will not be exported anymore, even if a +// Gauge with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +func (v *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// GetMetricWith returns the Gauge for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Gauge is created. Implications of +// creating a Gauge without using it and keeping the Gauge for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (v *GaugeVec) WithLabelValues(lvs ...string) Gauge { + g, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return g +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Add(42) +func (v *GaugeVec) With(labels Labels) Gauge { + g, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return g +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the GaugeVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *GaugeVec) CurryWith(labels Labels) (*GaugeVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &GaugeVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *GaugeVec) MustCurryWith(labels Labels) *GaugeVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, function) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go new file mode 100644 index 00000000..dc9247fe --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -0,0 +1,396 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "runtime" + "runtime/debug" + "sync" + "time" +) + +type goCollector struct { + goroutinesDesc *Desc + threadsDesc *Desc + gcDesc *Desc + goInfoDesc *Desc + + // ms... are memstats related. + msLast *runtime.MemStats // Previously collected memstats. + msLastTimestamp time.Time + msMtx sync.Mutex // Protects msLast and msLastTimestamp. + msMetrics memStatsMetrics + msRead func(*runtime.MemStats) // For mocking in tests. + msMaxWait time.Duration // Wait time for fresh memstats. + msMaxAge time.Duration // Maximum allowed age of old memstats. +} + +// NewGoCollector returns a collector that exports metrics about the current Go +// process. This includes memory stats. To collect those, runtime.ReadMemStats +// is called. This requires to “stop the world”, which usually only happens for +// garbage collection (GC). Take the following implications into account when +// deciding whether to use the Go collector: +// +// 1. The performance impact of stopping the world is the more relevant the more +// frequently metrics are collected. However, with Go1.9 or later the +// stop-the-world time per metrics collection is very short (~25µs) so that the +// performance impact will only matter in rare cases. However, with older Go +// versions, the stop-the-world duration depends on the heap size and can be +// quite significant (~1.7 ms/GiB as per +// https://go-review.googlesource.com/c/go/+/34937). +// +// 2. During an ongoing GC, nothing else can stop the world. Therefore, if the +// metrics collection happens to coincide with GC, it will only complete after +// GC has finished. Usually, GC is fast enough to not cause problems. However, +// with a very large heap, GC might take multiple seconds, which is enough to +// cause scrape timeouts in common setups. To avoid this problem, the Go +// collector will use the memstats from a previous collection if +// runtime.ReadMemStats takes more than 1s. However, if there are no previously +// collected memstats, or their collection is more than 5m ago, the collection +// will block until runtime.ReadMemStats succeeds. (The problem might be solved +// in Go1.13, see https://github.com/golang/go/issues/19812 for the related Go +// issue.) +func NewGoCollector() Collector { + return &goCollector{ + goroutinesDesc: NewDesc( + "go_goroutines", + "Number of goroutines that currently exist.", + nil, nil), + threadsDesc: NewDesc( + "go_threads", + "Number of OS threads created.", + nil, nil), + gcDesc: NewDesc( + "go_gc_duration_seconds", + "A summary of the GC invocation durations.", + nil, nil), + goInfoDesc: NewDesc( + "go_info", + "Information about the Go environment.", + nil, Labels{"version": runtime.Version()}), + msLast: &runtime.MemStats{}, + msRead: runtime.ReadMemStats, + msMaxWait: time.Second, + msMaxAge: 5 * time.Minute, + msMetrics: memStatsMetrics{ + { + desc: NewDesc( + memstatNamespace("alloc_bytes"), + "Number of bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("alloc_bytes_total"), + "Total number of bytes allocated, even if freed.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("sys_bytes"), + "Number of bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("lookups_total"), + "Total number of pointer lookups.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("mallocs_total"), + "Total number of mallocs.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("frees_total"), + "Total number of frees.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("heap_alloc_bytes"), + "Number of heap bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_sys_bytes"), + "Number of heap bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_idle_bytes"), + "Number of heap bytes waiting to be used.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_inuse_bytes"), + "Number of heap bytes that are in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_released_bytes"), + "Number of heap bytes released to OS.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_objects"), + "Number of allocated objects.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_inuse_bytes"), + "Number of bytes in use by the stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_sys_bytes"), + "Number of bytes obtained from system for stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_inuse_bytes"), + "Number of bytes in use by mspan structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_sys_bytes"), + "Number of bytes used for mspan structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_inuse_bytes"), + "Number of bytes in use by mcache structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_sys_bytes"), + "Number of bytes used for mcache structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("buck_hash_sys_bytes"), + "Number of bytes used by the profiling bucket hash table.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_sys_bytes"), + "Number of bytes used for garbage collection system metadata.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("other_sys_bytes"), + "Number of bytes used for other system allocations.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("next_gc_bytes"), + "Number of heap bytes when next garbage collection will take place.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("last_gc_time_seconds"), + "Number of seconds since 1970 of last garbage collection.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_cpu_fraction"), + "The fraction of this program's available CPU time used by the GC since the program started.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction }, + valType: GaugeValue, + }, + }, + } +} + +func memstatNamespace(s string) string { + return "go_memstats_" + s +} + +// Describe returns all descriptions of the collector. +func (c *goCollector) Describe(ch chan<- *Desc) { + ch <- c.goroutinesDesc + ch <- c.threadsDesc + ch <- c.gcDesc + ch <- c.goInfoDesc + for _, i := range c.msMetrics { + ch <- i.desc + } +} + +// Collect returns the current state of all metrics of the collector. +func (c *goCollector) Collect(ch chan<- Metric) { + var ( + ms = &runtime.MemStats{} + done = make(chan struct{}) + ) + // Start reading memstats first as it might take a while. + go func() { + c.msRead(ms) + c.msMtx.Lock() + c.msLast = ms + c.msLastTimestamp = time.Now() + c.msMtx.Unlock() + close(done) + }() + + ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine())) + n, _ := runtime.ThreadCreateProfile(nil) + ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n)) + + var stats debug.GCStats + stats.PauseQuantiles = make([]time.Duration, 5) + debug.ReadGCStats(&stats) + + quantiles := make(map[float64]float64) + for idx, pq := range stats.PauseQuantiles[1:] { + quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() + } + quantiles[0.0] = stats.PauseQuantiles[0].Seconds() + ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles) + + ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1) + + timer := time.NewTimer(c.msMaxWait) + select { + case <-done: // Our own ReadMemStats succeeded in time. Use it. + timer.Stop() // Important for high collection frequencies to not pile up timers. + c.msCollect(ch, ms) + return + case <-timer.C: // Time out, use last memstats if possible. Continue below. + } + c.msMtx.Lock() + if time.Since(c.msLastTimestamp) < c.msMaxAge { + // Last memstats are recent enough. Collect from them under the lock. + c.msCollect(ch, c.msLast) + c.msMtx.Unlock() + return + } + // If we are here, the last memstats are too old or don't exist. We have + // to wait until our own ReadMemStats finally completes. For that to + // happen, we have to release the lock. + c.msMtx.Unlock() + <-done + c.msCollect(ch, ms) +} + +func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) { + for _, i := range c.msMetrics { + ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) + } +} + +// memStatsMetrics provide description, value, and value type for memstat metrics. +type memStatsMetrics []struct { + desc *Desc + eval func(*runtime.MemStats) float64 + valType ValueType +} + +// NewBuildInfoCollector returns a collector collecting a single metric +// "go_build_info" with the constant value 1 and three labels "path", "version", +// and "checksum". Their label values contain the main module path, version, and +// checksum, respectively. The labels will only have meaningful values if the +// binary is built with Go module support and from source code retrieved from +// the source repository (rather than the local file system). This is usually +// accomplished by building from outside of GOPATH, specifying the full address +// of the main package, e.g. "GO111MODULE=on go run +// github.com/prometheus/client_golang/examples/random". If built without Go +// module support, all label values will be "unknown". If built with Go module +// support but using the source code from the local file system, the "path" will +// be set appropriately, but "checksum" will be empty and "version" will be +// "(devel)". +// +// This collector uses only the build information for the main module. See +// https://github.com/povilasv/prommod for an example of a collector for the +// module dependencies. +func NewBuildInfoCollector() Collector { + path, version, sum := readBuildInfo() + c := &selfCollector{MustNewConstMetric( + NewDesc( + "go_build_info", + "Build information about the main Go module.", + nil, Labels{"path": path, "version": version, "checksum": sum}, + ), + GaugeValue, 1)} + c.init(c.self) + return c +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/histogram.go new file mode 100644 index 00000000..d7ea67bd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -0,0 +1,586 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "runtime" + "sort" + "sync" + "sync/atomic" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// A Histogram counts individual observations from an event or sample stream in +// configurable buckets. Similar to a summary, it also provides a sum of +// observations and an observation count. +// +// On the Prometheus server, quantiles can be calculated from a Histogram using +// the histogram_quantile function in the query language. +// +// Note that Histograms, in contrast to Summaries, can be aggregated with the +// Prometheus query language (see the documentation for detailed +// procedures). However, Histograms require the user to pre-define suitable +// buckets, and they are in general less accurate. The Observe method of a +// Histogram has a very low performance overhead in comparison with the Observe +// method of a Summary. +// +// To create Histogram instances, use NewHistogram. +type Histogram interface { + Metric + Collector + + // Observe adds a single observation to the histogram. + Observe(float64) +} + +// bucketLabel is used for the label that defines the upper bound of a +// bucket of a histogram ("le" -> "less or equal"). +const bucketLabel = "le" + +// DefBuckets are the default Histogram buckets. The default buckets are +// tailored to broadly measure the response time (in seconds) of a network +// service. Most likely, however, you will be required to define buckets +// customized to your use case. +var ( + DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} + + errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, + ) +) + +// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest +// bucket has an upper bound of 'start'. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is zero or negative. +func LinearBuckets(start, width float64, count int) []float64 { + if count < 1 { + panic("LinearBuckets needs a positive count") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start += width + } + return buckets +} + +// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an +// upper bound of 'start' and each following bucket's upper bound is 'factor' +// times the previous bucket's upper bound. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, +// or if 'factor' is less than or equal 1. +func ExponentialBuckets(start, factor float64, count int) []float64 { + if count < 1 { + panic("ExponentialBuckets needs a positive count") + } + if start <= 0 { + panic("ExponentialBuckets needs a positive start value") + } + if factor <= 1 { + panic("ExponentialBuckets needs a factor greater than 1") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start *= factor + } + return buckets +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name to a non-empty string. All other fields are optional +// and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. +type HistogramOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Histogram (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Histogram must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Histogram. + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels + + // Buckets defines the buckets into which observations are counted. Each + // element in the slice is the upper inclusive bound of a bucket. The + // values must be sorted in strictly increasing order. There is no need + // to add a highest bucket with +Inf bound, it will be added + // implicitly. The default value is DefBuckets. + Buckets []float64 +} + +// NewHistogram creates a new Histogram based on the provided HistogramOpts. It +// panics if the buckets in HistogramOpts are not in strictly increasing order. +func NewHistogram(opts HistogramOpts) Histogram { + return newHistogram( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { + if len(desc.variableLabels) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + } + + for _, n := range desc.variableLabels { + if n == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + + if len(opts.Buckets) == 0 { + opts.Buckets = DefBuckets + } + + h := &histogram{ + desc: desc, + upperBounds: opts.Buckets, + labelPairs: makeLabelPairs(desc, labelValues), + counts: [2]*histogramCounts{&histogramCounts{}, &histogramCounts{}}, + } + for i, upperBound := range h.upperBounds { + if i < len(h.upperBounds)-1 { + if upperBound >= h.upperBounds[i+1] { + panic(fmt.Errorf( + "histogram buckets must be in increasing order: %f >= %f", + upperBound, h.upperBounds[i+1], + )) + } + } else { + if math.IsInf(upperBound, +1) { + // The +Inf bucket is implicit. Remove it here. + h.upperBounds = h.upperBounds[:i] + } + } + } + // Finally we know the final length of h.upperBounds and can make buckets + // for both counts: + h.counts[0].buckets = make([]uint64, len(h.upperBounds)) + h.counts[1].buckets = make([]uint64, len(h.upperBounds)) + + h.init(h) // Init self-collection. + return h +} + +type histogramCounts struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 + buckets []uint64 +} + +type histogram struct { + // countAndHotIdx enables lock-free writes with use of atomic updates. + // The most significant bit is the hot index [0 or 1] of the count field + // below. Observe calls update the hot one. All remaining bits count the + // number of Observe calls. Observe starts by incrementing this counter, + // and finish by incrementing the count field in the respective + // histogramCounts, as a marker for completion. + // + // Calls of the Write method (which are non-mutating reads from the + // perspective of the histogram) swap the hot–cold under the writeMtx + // lock. A cooldown is awaited (while locked) by comparing the number of + // observations with the initiation count. Once they match, then the + // last observation on the now cool one has completed. All cool fields must + // be merged into the new hot before releasing writeMtx. + // + // Fields with atomic access first! See alignment constraint: + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + countAndHotIdx uint64 + + selfCollector + desc *Desc + writeMtx sync.Mutex // Only used in the Write method. + + // Two counts, one is "hot" for lock-free observations, the other is + // "cold" for writing out a dto.Metric. It has to be an array of + // pointers to guarantee 64bit alignment of the histogramCounts, see + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. + counts [2]*histogramCounts + + upperBounds []float64 + labelPairs []*dto.LabelPair +} + +func (h *histogram) Desc() *Desc { + return h.desc +} + +func (h *histogram) Observe(v float64) { + // TODO(beorn7): For small numbers of buckets (<30), a linear search is + // slightly faster than the binary search. If we really care, we could + // switch from one search strategy to the other depending on the number + // of buckets. + // + // Microbenchmarks (BenchmarkHistogramNoLabels): + // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op + // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op + // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + i := sort.SearchFloat64s(h.upperBounds, v) + + // We increment h.countAndHotIdx so that the counter in the lower + // 63 bits gets incremented. At the same time, we get the new value + // back, which we can use to find the currently-hot counts. + n := atomic.AddUint64(&h.countAndHotIdx, 1) + hotCounts := h.counts[n>>63] + + if i < len(h.upperBounds) { + atomic.AddUint64(&hotCounts.buckets[i], 1) + } + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + break + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hotCounts.count, 1) +} + +func (h *histogram) Write(out *dto.Metric) error { + // For simplicity, we protect this whole method by a mutex. It is not in + // the hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it, if possible at + // all. + h.writeMtx.Lock() + defer h.writeMtx.Unlock() + + // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) + // without touching the count bits. See the struct comments for a full + // description of the algorithm. + n := atomic.AddUint64(&h.countAndHotIdx, 1<<63) + // count is contained unchanged in the lower 63 bits. + count := n & ((1 << 63) - 1) + // The most significant bit tells us which counts is hot. The complement + // is thus the cold one. + hotCounts := h.counts[n>>63] + coldCounts := h.counts[(^n)>>63] + + // Await cooldown. + for count != atomic.LoadUint64(&coldCounts.count) { + runtime.Gosched() // Let observations get work done. + } + + his := &dto.Histogram{ + Bucket: make([]*dto.Bucket, len(h.upperBounds)), + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + } + var cumCount uint64 + for i, upperBound := range h.upperBounds { + cumCount += atomic.LoadUint64(&coldCounts.buckets[i]) + his.Bucket[i] = &dto.Bucket{ + CumulativeCount: proto.Uint64(cumCount), + UpperBound: proto.Float64(upperBound), + } + } + + out.Histogram = his + out.Label = h.labelPairs + + // Finally add all the cold counts to the new hot counts and reset the cold counts. + atomic.AddUint64(&hotCounts.count, count) + atomic.StoreUint64(&coldCounts.count, 0) + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + his.GetSampleSum()) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } + for i := range h.upperBounds { + atomic.AddUint64(&hotCounts.buckets[i], atomic.LoadUint64(&coldCounts.buckets[i])) + atomic.StoreUint64(&coldCounts.buckets[i], 0) + } + return nil +} + +// HistogramVec is a Collector that bundles a set of Histograms that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewHistogramVec. +type HistogramVec struct { + *metricVec +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and +// partitioned by the given label names. +func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &HistogramVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newHistogram(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues returns the Histogram for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Histogram is created. +// +// It is possible to call this method without using the returned Histogram to only +// create the new Histogram but leave it at its starting value, a Histogram without +// any observations. +// +// Keeping the Histogram for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Histogram from the HistogramVec. In that case, the +// Histogram will still exist, but it will not be exported anymore, even if a +// Histogram with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith returns the Histogram for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Histogram is created. Implications of +// creating a Histogram without using it and keeping the Histogram for later use +// are the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *HistogramVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (v *HistogramVec) WithLabelValues(lvs ...string) Observer { + h, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return h +} + +// With works as GetMetricWith but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *HistogramVec) With(labels Labels) Observer { + h, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return h +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the HistogramVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *HistogramVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &HistogramVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *HistogramVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +type constHistogram struct { + desc *Desc + count uint64 + sum float64 + buckets map[float64]uint64 + labelPairs []*dto.LabelPair +} + +func (h *constHistogram) Desc() *Desc { + return h.desc +} + +func (h *constHistogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, 0, len(h.buckets)) + + his.SampleCount = proto.Uint64(h.count) + his.SampleSum = proto.Float64(h.sum) + + for upperBound, count := range h.buckets { + buckets = append(buckets, &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + }) + } + + if len(buckets) > 0 { + sort.Sort(buckSort(buckets)) + } + his.Bucket = buckets + + out.Histogram = his + out.Label = h.labelPairs + + return nil +} + +// NewConstHistogram returns a metric representing a Prometheus histogram with +// fixed values for the count, sum, and bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// buckets is a map of upper bounds to cumulative counts, excluding the +Inf +// bucket. +// +// NewConstHistogram returns an error if the length of labelValues is not +// consistent with the variable labels in Desc or if Desc is invalid. +func NewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstHistogram is a version of NewConstHistogram that panics where +// NewConstMetric would have returned an error. +func MustNewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) Metric { + m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type buckSort []*dto.Bucket + +func (s buckSort) Len() int { + return len(s) +} + +func (s buckSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s buckSort) Less(i, j int) bool { + return s[i].GetUpperBound() < s[j].GetUpperBound() +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go new file mode 100644 index 00000000..351c26e1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/internal/metric.go @@ -0,0 +1,85 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "sort" + + dto "github.com/prometheus/client_model/go" +) + +// metricSorter is a sortable slice of *dto.Metric. +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + if len(s[i].Label) != len(s[j].Label) { + // This should not happen. The metrics are + // inconsistent. However, we have to deal with the fact, as + // people might use custom collectors or metric family injection + // to create inconsistent metrics. So let's simply compare the + // number of labels in this case. That will still yield + // reproducible sorting. + return len(s[i].Label) < len(s[j].Label) + } + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + + // We should never arrive here. Multiple metrics with the same + // label set in the same scrape will lead to undefined ingestion + // behavior. However, as above, we have to provide stable sorting + // here, even for inconsistent metrics. So sort equal metrics + // by their timestamp, with missing timestamps (implying "now") + // coming last. + if s[i].TimestampMs == nil { + return false + } + if s[j].TimestampMs == nil { + return true + } + return s[i].GetTimestampMs() < s[j].GetTimestampMs() +} + +// NormalizeMetricFamilies returns a MetricFamily slice with empty +// MetricFamilies pruned and the remaining MetricFamilies sorted by name within +// the slice, with the contained Metrics sorted within each MetricFamily. +func NormalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + names := make([]string, 0, len(metricFamiliesByName)) + for name, mf := range metricFamiliesByName { + if len(mf.Metric) > 0 { + names = append(names, name) + } + } + sort.Strings(names) + result := make([]*dto.MetricFamily, 0, len(names)) + for _, name := range names { + result = append(result, metricFamiliesByName[name]) + } + return result +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/labels.go new file mode 100644 index 00000000..2744443a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/labels.go @@ -0,0 +1,87 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "strings" + "unicode/utf8" + + "github.com/prometheus/common/model" +) + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +func makeInconsistentCardinalityError(fqName string, labels, labelValues []string) error { + return fmt.Errorf( + "%s: %q has %d variable labels named %q but %d values %q were provided", + errInconsistentCardinality, fqName, + len(labels), labels, + len(labelValues), labelValues, + ) +} + +func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error { + if len(labels) != expectedNumberOfValues { + return fmt.Errorf( + "%s: expected %d label values but got %d in %#v", + errInconsistentCardinality, expectedNumberOfValues, + len(labels), labels, + ) + } + + for name, val := range labels { + if !utf8.ValidString(val) { + return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val) + } + } + + return nil +} + +func validateLabelValues(vals []string, expectedNumberOfValues int) error { + if len(vals) != expectedNumberOfValues { + return fmt.Errorf( + "%s: expected %d label values but got %d in %#v", + errInconsistentCardinality, expectedNumberOfValues, + len(vals), vals, + ) + } + + for _, val := range vals { + if !utf8.ValidString(val) { + return fmt.Errorf("label value %q is not valid UTF-8", val) + } + } + + return nil +} + +func checkLabelName(l string) bool { + return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/metric.go new file mode 100644 index 00000000..55e6d86d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -0,0 +1,174 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "strings" + "time" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +const separatorByte byte = 255 + +// A Metric models a single sample value with its meta data being exported to +// Prometheus. Implementations of Metric in this package are Gauge, Counter, +// Histogram, Summary, and Untyped. +type Metric interface { + // Desc returns the descriptor for the Metric. This method idempotently + // returns the same descriptor throughout the lifetime of the + // Metric. The returned descriptor is immutable by contract. A Metric + // unable to describe itself must return an invalid descriptor (created + // with NewInvalidDesc). + Desc() *Desc + // Write encodes the Metric into a "Metric" Protocol Buffer data + // transmission object. + // + // Metric implementations must observe concurrency safety as reads of + // this metric may occur at any time, and any blocking occurs at the + // expense of total performance of rendering all registered + // metrics. Ideally, Metric implementations should support concurrent + // readers. + // + // While populating dto.Metric, it is the responsibility of the + // implementation to ensure validity of the Metric protobuf (like valid + // UTF-8 strings or syntactically valid metric and label names). It is + // recommended to sort labels lexicographically. Callers of Write should + // still make sure of sorting if they depend on it. + Write(*dto.Metric) error + // TODO(beorn7): The original rationale of passing in a pre-allocated + // dto.Metric protobuf to save allocations has disappeared. The + // signature of this method should be changed to "Write() (*dto.Metric, + // error)". +} + +// Opts bundles the options for creating most Metric types. Each metric +// implementation XXX has its own XXXOpts type, but in most cases, it is just be +// an alias of this type (which might change when the requirement arises.) +// +// It is mandatory to set Name to a non-empty string. All other fields are +// optional and can safely be left at their zero value, although it is strongly +// encouraged to set a Help string. +type Opts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Metric (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the metric must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this metric. + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + if name == "" { + return "" + } + switch { + case namespace != "" && subsystem != "": + return strings.Join([]string{namespace, subsystem, name}, "_") + case namespace != "": + return strings.Join([]string{namespace, name}, "_") + case subsystem != "": + return strings.Join([]string{subsystem, name}, "_") + } + return name +} + +// labelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. +type labelPairSorter []*dto.LabelPair + +func (s labelPairSorter) Len() int { + return len(s) +} + +func (s labelPairSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s labelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} + +type invalidMetric struct { + desc *Desc + err error +} + +// NewInvalidMetric returns a metric whose Write method always returns the +// provided error. It is useful if a Collector finds itself unable to collect +// a metric and wishes to report an error to the registry. +func NewInvalidMetric(desc *Desc, err error) Metric { + return &invalidMetric{desc, err} +} + +func (m *invalidMetric) Desc() *Desc { return m.desc } + +func (m *invalidMetric) Write(*dto.Metric) error { return m.err } + +type timestampedMetric struct { + Metric + t time.Time +} + +func (m timestampedMetric) Write(pb *dto.Metric) error { + e := m.Metric.Write(pb) + pb.TimestampMs = proto.Int64(m.t.Unix()*1000 + int64(m.t.Nanosecond()/1000000)) + return e +} + +// NewMetricWithTimestamp returns a new Metric wrapping the provided Metric in a +// way that it has an explicit timestamp set to the provided Time. This is only +// useful in rare cases as the timestamp of a Prometheus metric should usually +// be set by the Prometheus server during scraping. Exceptions include mirroring +// metrics with given timestamps from other metric +// sources. +// +// NewMetricWithTimestamp works best with MustNewConstMetric, +// MustNewConstHistogram, and MustNewConstSummary, see example. +// +// Currently, the exposition formats used by Prometheus are limited to +// millisecond resolution. Thus, the provided time will be rounded down to the +// next full millisecond value. +func NewMetricWithTimestamp(t time.Time, m Metric) Metric { + return timestampedMetric{Metric: m, t: t} +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/observer.go new file mode 100644 index 00000000..5806cd09 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/observer.go @@ -0,0 +1,52 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Observer is the interface that wraps the Observe method, which is used by +// Histogram and Summary to add observations. +type Observer interface { + Observe(float64) +} + +// The ObserverFunc type is an adapter to allow the use of ordinary +// functions as Observers. If f is a function with the appropriate +// signature, ObserverFunc(f) is an Observer that calls f. +// +// This adapter is usually used in connection with the Timer type, and there are +// two general use cases: +// +// The most common one is to use a Gauge as the Observer for a Timer. +// See the "Gauge" Timer example. +// +// The more advanced use case is to create a function that dynamically decides +// which Observer to use for observing the duration. See the "Complex" Timer +// example. +type ObserverFunc func(float64) + +// Observe calls f(value). It implements Observer. +func (f ObserverFunc) Observe(value float64) { + f(value) +} + +// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`. +type ObserverVec interface { + GetMetricWith(Labels) (Observer, error) + GetMetricWithLabelValues(lvs ...string) (Observer, error) + With(Labels) Observer + WithLabelValues(...string) Observer + CurryWith(Labels) (ObserverVec, error) + MustCurryWith(Labels) ObserverVec + + Collector +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go new file mode 100644 index 00000000..9b809794 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -0,0 +1,151 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "os" +) + +type processCollector struct { + collectFn func(chan<- Metric) + pidFn func() (int, error) + reportErrors bool + cpuTotal *Desc + openFDs, maxFDs *Desc + vsize, maxVsize *Desc + rss *Desc + startTime *Desc +} + +// ProcessCollectorOpts defines the behavior of a process metrics collector +// created with NewProcessCollector. +type ProcessCollectorOpts struct { + // PidFn returns the PID of the process the collector collects metrics + // for. It is called upon each collection. By default, the PID of the + // current process is used, as determined on construction time by + // calling os.Getpid(). + PidFn func() (int, error) + // If non-empty, each of the collected metrics is prefixed by the + // provided string and an underscore ("_"). + Namespace string + // If true, any error encountered during collection is reported as an + // invalid metric (see NewInvalidMetric). Otherwise, errors are ignored + // and the collected metrics will be incomplete. (Possibly, no metrics + // will be collected at all.) While that's usually not desired, it is + // appropriate for the common "mix-in" of process metrics, where process + // metrics are nice to have, but failing to collect them should not + // disrupt the collection of the remaining metrics. + ReportErrors bool +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including CPU, memory and file descriptor usage as well as +// the process start time. The detailed behavior is defined by the provided +// ProcessCollectorOpts. The zero value of ProcessCollectorOpts creates a +// collector for the current process with an empty namespace string and no error +// reporting. +// +// The collector only works on operating systems with a Linux-style proc +// filesystem and on Microsoft Windows. On other operating systems, it will not +// collect any metrics. +func NewProcessCollector(opts ProcessCollectorOpts) Collector { + ns := "" + if len(opts.Namespace) > 0 { + ns = opts.Namespace + "_" + } + + c := &processCollector{ + reportErrors: opts.ReportErrors, + cpuTotal: NewDesc( + ns+"process_cpu_seconds_total", + "Total user and system CPU time spent in seconds.", + nil, nil, + ), + openFDs: NewDesc( + ns+"process_open_fds", + "Number of open file descriptors.", + nil, nil, + ), + maxFDs: NewDesc( + ns+"process_max_fds", + "Maximum number of open file descriptors.", + nil, nil, + ), + vsize: NewDesc( + ns+"process_virtual_memory_bytes", + "Virtual memory size in bytes.", + nil, nil, + ), + maxVsize: NewDesc( + ns+"process_virtual_memory_max_bytes", + "Maximum amount of virtual memory available in bytes.", + nil, nil, + ), + rss: NewDesc( + ns+"process_resident_memory_bytes", + "Resident memory size in bytes.", + nil, nil, + ), + startTime: NewDesc( + ns+"process_start_time_seconds", + "Start time of the process since unix epoch in seconds.", + nil, nil, + ), + } + + if opts.PidFn == nil { + pid := os.Getpid() + c.pidFn = func() (int, error) { return pid, nil } + } else { + c.pidFn = opts.PidFn + } + + // Set up process metric collection if supported by the runtime. + if canCollectProcess() { + c.collectFn = c.processCollect + } else { + c.collectFn = func(ch chan<- Metric) { + c.reportError(ch, nil, errors.New("process metrics not supported on this platform")) + } + } + + return c +} + +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + ch <- c.cpuTotal + ch <- c.openFDs + ch <- c.maxFDs + ch <- c.vsize + ch <- c.maxVsize + ch <- c.rss + ch <- c.startTime +} + +// Collect returns the current state of all metrics of the collector. +func (c *processCollector) Collect(ch chan<- Metric) { + c.collectFn(ch) +} + +func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) { + if !c.reportErrors { + return + } + if desc == nil { + desc = NewInvalidDesc(err) + } + ch <- NewInvalidMetric(desc, err) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go new file mode 100644 index 00000000..3117461c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go @@ -0,0 +1,65 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !windows + +package prometheus + +import ( + "github.com/prometheus/procfs" +) + +func canCollectProcess() bool { + _, err := procfs.NewDefaultFS() + return err == nil +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + c.reportError(ch, nil, err) + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + c.reportError(ch, nil, err) + return + } + + if stat, err := p.Stat(); err == nil { + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime()) + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory())) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory())) + if startTime, err := stat.StartTime(); err == nil { + ch <- MustNewConstMetric(c.startTime, GaugeValue, startTime) + } else { + c.reportError(ch, c.startTime, err) + } + } else { + c.reportError(ch, nil, err) + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds)) + } else { + c.reportError(ch, c.openFDs, err) + } + + if limits, err := p.Limits(); err == nil { + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles)) + ch <- MustNewConstMetric(c.maxVsize, GaugeValue, float64(limits.AddressSpace)) + } else { + c.reportError(ch, nil, err) + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go new file mode 100644 index 00000000..e0b935d1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/process_collector_windows.go @@ -0,0 +1,112 @@ +// Copyright 2019 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "syscall" + "unsafe" + + "golang.org/x/sys/windows" +) + +func canCollectProcess() bool { + return true +} + +var ( + modpsapi = syscall.NewLazyDLL("psapi.dll") + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procGetProcessMemoryInfo = modpsapi.NewProc("GetProcessMemoryInfo") + procGetProcessHandleCount = modkernel32.NewProc("GetProcessHandleCount") +) + +type processMemoryCounters struct { + // https://docs.microsoft.com/en-us/windows/desktop/api/psapi/ns-psapi-_process_memory_counters_ex + _ uint32 + PageFaultCount uint32 + PeakWorkingSetSize uint64 + WorkingSetSize uint64 + QuotaPeakPagedPoolUsage uint64 + QuotaPagedPoolUsage uint64 + QuotaPeakNonPagedPoolUsage uint64 + QuotaNonPagedPoolUsage uint64 + PagefileUsage uint64 + PeakPagefileUsage uint64 + PrivateUsage uint64 +} + +func getProcessMemoryInfo(handle windows.Handle) (processMemoryCounters, error) { + mem := processMemoryCounters{} + r1, _, err := procGetProcessMemoryInfo.Call( + uintptr(handle), + uintptr(unsafe.Pointer(&mem)), + uintptr(unsafe.Sizeof(mem)), + ) + if r1 != 1 { + return mem, err + } else { + return mem, nil + } +} + +func getProcessHandleCount(handle windows.Handle) (uint32, error) { + var count uint32 + r1, _, err := procGetProcessHandleCount.Call( + uintptr(handle), + uintptr(unsafe.Pointer(&count)), + ) + if r1 != 1 { + return 0, err + } else { + return count, nil + } +} + +func (c *processCollector) processCollect(ch chan<- Metric) { + h, err := windows.GetCurrentProcess() + if err != nil { + c.reportError(ch, nil, err) + return + } + + var startTime, exitTime, kernelTime, userTime windows.Filetime + err = windows.GetProcessTimes(h, &startTime, &exitTime, &kernelTime, &userTime) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.startTime, GaugeValue, float64(startTime.Nanoseconds()/1e9)) + ch <- MustNewConstMetric(c.cpuTotal, CounterValue, fileTimeToSeconds(kernelTime)+fileTimeToSeconds(userTime)) + + mem, err := getProcessMemoryInfo(h) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.vsize, GaugeValue, float64(mem.PrivateUsage)) + ch <- MustNewConstMetric(c.rss, GaugeValue, float64(mem.WorkingSetSize)) + + handles, err := getProcessHandleCount(h) + if err != nil { + c.reportError(ch, nil, err) + return + } + ch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(handles)) + ch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(16*1024*1024)) // Windows has a hard-coded max limit, not per-process. +} + +func fileTimeToSeconds(ft windows.Filetime) float64 { + return float64(uint64(ft.HighDateTime)<<32+uint64(ft.LowDateTime)) / 1e7 +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go new file mode 100644 index 00000000..d1354b10 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go @@ -0,0 +1,366 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "bufio" + "io" + "net" + "net/http" +) + +const ( + closeNotifier = 1 << iota + flusher + hijacker + readerFrom + pusher +) + +type delegator interface { + http.ResponseWriter + + Status() int + Written() int64 +} + +type responseWriterDelegator struct { + http.ResponseWriter + + status int + written int64 + wroteHeader bool + observeWriteHeader func(int) +} + +func (r *responseWriterDelegator) Status() int { + return r.status +} + +func (r *responseWriterDelegator) Written() int64 { + return r.written +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) + if r.observeWriteHeader != nil { + r.observeWriteHeader(code) + } +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type closeNotifierDelegator struct{ *responseWriterDelegator } +type flusherDelegator struct{ *responseWriterDelegator } +type hijackerDelegator struct{ *responseWriterDelegator } +type readerFromDelegator struct{ *responseWriterDelegator } +type pusherDelegator struct{ *responseWriterDelegator } + +func (d closeNotifierDelegator) CloseNotify() <-chan bool { + //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to + //remove support from client_golang yet. + return d.ResponseWriter.(http.CloseNotifier).CloseNotify() +} +func (d flusherDelegator) Flush() { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !d.wroteHeader { + d.WriteHeader(http.StatusOK) + } + d.ResponseWriter.(http.Flusher).Flush() +} +func (d hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return d.ResponseWriter.(http.Hijacker).Hijack() +} +func (d readerFromDelegator) ReadFrom(re io.Reader) (int64, error) { + // If applicable, call WriteHeader here so that observeWriteHeader is + // handled appropriately. + if !d.wroteHeader { + d.WriteHeader(http.StatusOK) + } + n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re) + d.written += n + return n, err +} +func (d pusherDelegator) Push(target string, opts *http.PushOptions) error { + return d.ResponseWriter.(http.Pusher).Push(target, opts) +} + +var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32) + +func init() { + // TODO(beorn7): Code generation would help here. + pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0 + return d + } + pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1 + return closeNotifierDelegator{d} + } + pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2 + return flusherDelegator{d} + } + pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3 + return struct { + *responseWriterDelegator + http.Flusher + http.CloseNotifier + }{d, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4 + return hijackerDelegator{d} + } + pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5 + return struct { + *responseWriterDelegator + http.Hijacker + http.CloseNotifier + }{d, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + }{d, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7 + return struct { + *responseWriterDelegator + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8 + return readerFromDelegator{d} + } + pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.CloseNotifier + }{d, readerFromDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + }{d, readerFromDelegator{d}, flusherDelegator{d}} + } + pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + }{d, readerFromDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15 + return struct { + *responseWriterDelegator + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16 + return pusherDelegator{d} + } + pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17 + return struct { + *responseWriterDelegator + http.Pusher + http.CloseNotifier + }{d, pusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + }{d, pusherDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19 + return struct { + *responseWriterDelegator + http.Pusher + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + }{d, pusherDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23 + return struct { + *responseWriterDelegator + http.Pusher + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + }{d, pusherDelegator{d}, readerFromDelegator{d}} + } + pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, closeNotifierDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}} + } + pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31 + return struct { + *responseWriterDelegator + http.Pusher + io.ReaderFrom + http.Hijacker + http.Flusher + http.CloseNotifier + }{d, pusherDelegator{d}, readerFromDelegator{d}, hijackerDelegator{d}, flusherDelegator{d}, closeNotifierDelegator{d}} + } +} + +func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator { + d := &responseWriterDelegator{ + ResponseWriter: w, + observeWriteHeader: observeWriteHeaderFunc, + } + + id := 0 + //lint:ignore SA1019 http.CloseNotifier is deprecated but we don't want to + //remove support from client_golang yet. + if _, ok := w.(http.CloseNotifier); ok { + id += closeNotifier + } + if _, ok := w.(http.Flusher); ok { + id += flusher + } + if _, ok := w.(http.Hijacker); ok { + id += hijacker + } + if _, ok := w.(io.ReaderFrom); ok { + id += readerFrom + } + if _, ok := w.(http.Pusher); ok { + id += pusher + } + + return pickDelegator[id](d) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go new file mode 100644 index 00000000..cea5a90f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -0,0 +1,349 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package promhttp provides tooling around HTTP servers and clients. +// +// First, the package allows the creation of http.Handler instances to expose +// Prometheus metrics via HTTP. promhttp.Handler acts on the +// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a +// custom registry or anything that implements the Gatherer interface. It also +// allows the creation of handlers that act differently on errors or allow to +// log errors. +// +// Second, the package provides tooling to instrument instances of http.Handler +// via middleware. Middleware wrappers follow the naming scheme +// InstrumentHandlerX, where X describes the intended use of the middleware. +// See each function's doc comment for specific details. +// +// Finally, the package allows for an http.RoundTripper to be instrumented via +// middleware. Middleware wrappers follow the naming scheme +// InstrumentRoundTripperX, where X describes the intended use of the +// middleware. See each function's doc comment for specific details. +package promhttp + +import ( + "compress/gzip" + "fmt" + "io" + "net/http" + "strings" + "sync" + "time" + + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" +) + +const ( + contentTypeHeader = "Content-Type" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var gzipPool = sync.Pool{ + New: func() interface{} { + return gzip.NewWriter(nil) + }, +} + +// Handler returns an http.Handler for the prometheus.DefaultGatherer, using +// default HandlerOpts, i.e. it reports the first error as an HTTP error, it has +// no error logging, and it applies compression if requested by the client. +// +// The returned http.Handler is already instrumented using the +// InstrumentMetricHandler function and the prometheus.DefaultRegisterer. If you +// create multiple http.Handlers by separate calls of the Handler function, the +// metrics used for instrumentation will be shared between them, providing +// global scrape counts. +// +// This function is meant to cover the bulk of basic use cases. If you are doing +// anything that requires more customization (including using a non-default +// Gatherer, different instrumentation, and non-default HandlerOpts), use the +// HandlerFor function. See there for details. +func Handler() http.Handler { + return InstrumentMetricHandler( + prometheus.DefaultRegisterer, HandlerFor(prometheus.DefaultGatherer, HandlerOpts{}), + ) +} + +// HandlerFor returns an uninstrumented http.Handler for the provided +// Gatherer. The behavior of the Handler is defined by the provided +// HandlerOpts. Thus, HandlerFor is useful to create http.Handlers for custom +// Gatherers, with non-default HandlerOpts, and/or with custom (or no) +// instrumentation. Use the InstrumentMetricHandler function to apply the same +// kind of instrumentation as it is used by the Handler function. +func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { + var ( + inFlightSem chan struct{} + errCnt = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_errors_total", + Help: "Total number of internal errors encountered by the promhttp metric handler.", + }, + []string{"cause"}, + ) + ) + + if opts.MaxRequestsInFlight > 0 { + inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) + } + if opts.Registry != nil { + // Initialize all possibilites that can occur below. + errCnt.WithLabelValues("gathering") + errCnt.WithLabelValues("encoding") + if err := opts.Registry.Register(errCnt); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + errCnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + } + + h := http.HandlerFunc(func(rsp http.ResponseWriter, req *http.Request) { + if inFlightSem != nil { + select { + case inFlightSem <- struct{}{}: // All good, carry on. + defer func() { <-inFlightSem }() + default: + http.Error(rsp, fmt.Sprintf( + "Limit of concurrent requests reached (%d), try again later.", opts.MaxRequestsInFlight, + ), http.StatusServiceUnavailable) + return + } + } + mfs, err := reg.Gather() + if err != nil { + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error gathering metrics:", err) + } + errCnt.WithLabelValues("gathering").Inc() + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + if len(mfs) == 0 { + // Still report the error if no metrics have been gathered. + httpError(rsp, err) + return + } + case HTTPErrorOnError: + httpError(rsp, err) + return + } + } + + contentType := expfmt.Negotiate(req.Header) + header := rsp.Header() + header.Set(contentTypeHeader, string(contentType)) + + w := io.Writer(rsp) + if !opts.DisableCompression && gzipAccepted(req.Header) { + header.Set(contentEncodingHeader, "gzip") + gz := gzipPool.Get().(*gzip.Writer) + defer gzipPool.Put(gz) + + gz.Reset(w) + defer gz.Close() + + w = gz + } + + enc := expfmt.NewEncoder(w, contentType) + + var lastErr error + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + lastErr = err + if opts.ErrorLog != nil { + opts.ErrorLog.Println("error encoding and sending metric family:", err) + } + errCnt.WithLabelValues("encoding").Inc() + switch opts.ErrorHandling { + case PanicOnError: + panic(err) + case ContinueOnError: + // Handled later. + case HTTPErrorOnError: + httpError(rsp, err) + return + } + } + } + + if lastErr != nil { + httpError(rsp, lastErr) + } + }) + + if opts.Timeout <= 0 { + return h + } + return http.TimeoutHandler(h, opts.Timeout, fmt.Sprintf( + "Exceeded configured timeout of %v.\n", + opts.Timeout, + )) +} + +// InstrumentMetricHandler is usually used with an http.Handler returned by the +// HandlerFor function. It instruments the provided http.Handler with two +// metrics: A counter vector "promhttp_metric_handler_requests_total" to count +// scrapes partitioned by HTTP status code, and a gauge +// "promhttp_metric_handler_requests_in_flight" to track the number of +// simultaneous scrapes. This function idempotently registers collectors for +// both metrics with the provided Registerer. It panics if the registration +// fails. The provided metrics are useful to see how many scrapes hit the +// monitored target (which could be from different Prometheus servers or other +// scrapers), and how often they overlap (which would result in more than one +// scrape in flight at the same time). Note that the scrapes-in-flight gauge +// will contain the scrape by which it is exposed, while the scrape counter will +// only get incremented after the scrape is complete (as only then the status +// code is known). For tracking scrape durations, use the +// "scrape_duration_seconds" gauge created by the Prometheus server upon each +// scrape. +func InstrumentMetricHandler(reg prometheus.Registerer, handler http.Handler) http.Handler { + cnt := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "promhttp_metric_handler_requests_total", + Help: "Total number of scrapes by HTTP status code.", + }, + []string{"code"}, + ) + // Initialize the most likely HTTP status codes. + cnt.WithLabelValues("200") + cnt.WithLabelValues("500") + cnt.WithLabelValues("503") + if err := reg.Register(cnt); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + cnt = are.ExistingCollector.(*prometheus.CounterVec) + } else { + panic(err) + } + } + + gge := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "promhttp_metric_handler_requests_in_flight", + Help: "Current number of scrapes being served.", + }) + if err := reg.Register(gge); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + gge = are.ExistingCollector.(prometheus.Gauge) + } else { + panic(err) + } + } + + return InstrumentHandlerCounter(cnt, InstrumentHandlerInFlight(gge, handler)) +} + +// HandlerErrorHandling defines how a Handler serving metrics will handle +// errors. +type HandlerErrorHandling int + +// These constants cause handlers serving metrics to behave as described if +// errors are encountered. +const ( + // Serve an HTTP status code 500 upon the first error + // encountered. Report the error message in the body. + HTTPErrorOnError HandlerErrorHandling = iota + // Ignore errors and try to serve as many metrics as possible. However, + // if no metrics can be served, serve an HTTP status code 500 and the + // last error message in the body. Only use this in deliberate "best + // effort" metrics collection scenarios. In this case, it is highly + // recommended to provide other means of detecting errors: By setting an + // ErrorLog in HandlerOpts, the errors are logged. By providing a + // Registry in HandlerOpts, the exposed metrics include an error counter + // "promhttp_metric_handler_errors_total", which can be used for + // alerts. + ContinueOnError + // Panic upon the first error encountered (useful for "crash only" apps). + PanicOnError +) + +// Logger is the minimal interface HandlerOpts needs for logging. Note that +// log.Logger from the standard library implements this interface, and it is +// easy to implement by custom loggers, if they don't do so already anyway. +type Logger interface { + Println(v ...interface{}) +} + +// HandlerOpts specifies options how to serve metrics via an http.Handler. The +// zero value of HandlerOpts is a reasonable default. +type HandlerOpts struct { + // ErrorLog specifies an optional logger for errors collecting and + // serving metrics. If nil, errors are not logged at all. + ErrorLog Logger + // ErrorHandling defines how errors are handled. Note that errors are + // logged regardless of the configured ErrorHandling provided ErrorLog + // is not nil. + ErrorHandling HandlerErrorHandling + // If Registry is not nil, it is used to register a metric + // "promhttp_metric_handler_errors_total", partitioned by "cause". A + // failed registration causes a panic. Note that this error counter is + // different from the instrumentation you get from the various + // InstrumentHandler... helpers. It counts errors that don't necessarily + // result in a non-2xx HTTP status code. There are two typical cases: + // (1) Encoding errors that only happen after streaming of the HTTP body + // has already started (and the status code 200 has been sent). This + // should only happen with custom collectors. (2) Collection errors with + // no effect on the HTTP status code because ErrorHandling is set to + // ContinueOnError. + Registry prometheus.Registerer + // If DisableCompression is true, the handler will never compress the + // response, even if requested by the client. + DisableCompression bool + // The number of concurrent HTTP requests is limited to + // MaxRequestsInFlight. Additional requests are responded to with 503 + // Service Unavailable and a suitable message in the body. If + // MaxRequestsInFlight is 0 or negative, no limit is applied. + MaxRequestsInFlight int + // If handling a request takes longer than Timeout, it is responded to + // with 503 ServiceUnavailable and a suitable Message. No timeout is + // applied if Timeout is 0 or negative. Note that with the current + // implementation, reaching the timeout simply ends the HTTP requests as + // described above (and even that only if sending of the body hasn't + // started yet), while the bulk work of gathering all the metrics keeps + // running in the background (with the eventual result to be thrown + // away). Until the implementation is improved, it is recommended to + // implement a separate timeout in potentially slow Collectors. + Timeout time.Duration +} + +// gzipAccepted returns whether the client will accept gzip-encoded content. +func gzipAccepted(header http.Header) bool { + a := header.Get(acceptEncodingHeader) + parts := strings.Split(a, ",") + for _, part := range parts { + part = strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return true + } + } + return false +} + +// httpError removes any content-encoding header and then calls http.Error with +// the provided error and http.StatusInternalServerErrer. Error contents is +// supposed to be uncompressed plain text. However, same as with a plain +// http.Error, any header settings will be void if the header has already been +// sent. The error message will still be written to the writer, but it will +// probably be of limited use. +func httpError(rsp http.ResponseWriter, err error) { + rsp.Header().Del(contentEncodingHeader) + http.Error( + rsp, + "An error has occurred while serving metrics:\n\n"+err.Error(), + http.StatusInternalServerError, + ) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go new file mode 100644 index 00000000..83c49b66 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go @@ -0,0 +1,219 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "crypto/tls" + "net/http" + "net/http/httptrace" + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// The RoundTripperFunc type is an adapter to allow the use of ordinary +// functions as RoundTrippers. If f is a function with the appropriate +// signature, RountTripperFunc(f) is a RoundTripper that calls f. +type RoundTripperFunc func(req *http.Request) (*http.Response, error) + +// RoundTrip implements the RoundTripper interface. +func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { + return rt(r) +} + +// InstrumentRoundTripperInFlight is a middleware that wraps the provided +// http.RoundTripper. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.RoundTripper. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + gauge.Inc() + defer gauge.Dec() + return next.RoundTrip(r) + }) +} + +// InstrumentRoundTripperCounter is a middleware that wraps the provided +// http.RoundTripper to observe the request result with the provided CounterVec. +// The CounterVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. Partitioning of the CounterVec happens by HTTP status code +// and/or HTTP method if the respective instance label names are present in the +// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. +// +// If the wrapped RoundTripper panics or returns a non-nil error, the Counter +// is not incremented. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(counter) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + resp, err := next.RoundTrip(r) + if err == nil { + counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc() + } + return resp, err + }) +} + +// InstrumentRoundTripperDuration is a middleware that wraps the provided +// http.RoundTripper to observe the request duration with the provided +// ObserverVec. The ObserverVec must have zero, one, or two non-const +// non-curried labels. For those, the only allowed label names are "code" and +// "method". The function panics otherwise. The Observe method of the Observer +// in the ObserverVec is called with the request duration in +// seconds. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped RoundTripper panics or returns a non-nil error, no values are +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc { + code, method := checkLabels(obs) + + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + resp, err := next.RoundTrip(r) + if err == nil { + obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds()) + } + return resp, err + }) +} + +// InstrumentTrace is used to offer flexibility in instrumenting the available +// httptrace.ClientTrace hook functions. Each function is passed a float64 +// representing the time in seconds since the start of the http request. A user +// may choose to use separately buckets Histograms, or implement custom +// instance labels on a per function basis. +type InstrumentTrace struct { + GotConn func(float64) + PutIdleConn func(float64) + GotFirstResponseByte func(float64) + Got100Continue func(float64) + DNSStart func(float64) + DNSDone func(float64) + ConnectStart func(float64) + ConnectDone func(float64) + TLSHandshakeStart func(float64) + TLSHandshakeDone func(float64) + WroteHeaders func(float64) + Wait100Continue func(float64) + WroteRequest func(float64) +} + +// InstrumentRoundTripperTrace is a middleware that wraps the provided +// RoundTripper and reports times to hook functions provided in the +// InstrumentTrace struct. Hook functions that are not present in the provided +// InstrumentTrace struct are ignored. Times reported to the hook functions are +// time since the start of the request. Only with Go1.9+, those times are +// guaranteed to never be negative. (Earlier Go versions are not using a +// monotonic clock.) Note that partitioning of Histograms is expensive and +// should be used judiciously. +// +// For hook functions that receive an error as an argument, no observations are +// made in the event of a non-nil error value. +// +// See the example for ExampleInstrumentRoundTripperDuration for example usage. +func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc { + return RoundTripperFunc(func(r *http.Request) (*http.Response, error) { + start := time.Now() + + trace := &httptrace.ClientTrace{ + GotConn: func(_ httptrace.GotConnInfo) { + if it.GotConn != nil { + it.GotConn(time.Since(start).Seconds()) + } + }, + PutIdleConn: func(err error) { + if err != nil { + return + } + if it.PutIdleConn != nil { + it.PutIdleConn(time.Since(start).Seconds()) + } + }, + DNSStart: func(_ httptrace.DNSStartInfo) { + if it.DNSStart != nil { + it.DNSStart(time.Since(start).Seconds()) + } + }, + DNSDone: func(_ httptrace.DNSDoneInfo) { + if it.DNSDone != nil { + it.DNSDone(time.Since(start).Seconds()) + } + }, + ConnectStart: func(_, _ string) { + if it.ConnectStart != nil { + it.ConnectStart(time.Since(start).Seconds()) + } + }, + ConnectDone: func(_, _ string, err error) { + if err != nil { + return + } + if it.ConnectDone != nil { + it.ConnectDone(time.Since(start).Seconds()) + } + }, + GotFirstResponseByte: func() { + if it.GotFirstResponseByte != nil { + it.GotFirstResponseByte(time.Since(start).Seconds()) + } + }, + Got100Continue: func() { + if it.Got100Continue != nil { + it.Got100Continue(time.Since(start).Seconds()) + } + }, + TLSHandshakeStart: func() { + if it.TLSHandshakeStart != nil { + it.TLSHandshakeStart(time.Since(start).Seconds()) + } + }, + TLSHandshakeDone: func(_ tls.ConnectionState, err error) { + if err != nil { + return + } + if it.TLSHandshakeDone != nil { + it.TLSHandshakeDone(time.Since(start).Seconds()) + } + }, + WroteHeaders: func() { + if it.WroteHeaders != nil { + it.WroteHeaders(time.Since(start).Seconds()) + } + }, + Wait100Continue: func() { + if it.Wait100Continue != nil { + it.Wait100Continue(time.Since(start).Seconds()) + } + }, + WroteRequest: func(_ httptrace.WroteRequestInfo) { + if it.WroteRequest != nil { + it.WroteRequest(time.Since(start).Seconds()) + } + }, + } + r = r.WithContext(httptrace.WithClientTrace(r.Context(), trace)) + + return next.RoundTrip(r) + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go new file mode 100644 index 00000000..9db24380 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -0,0 +1,447 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package promhttp + +import ( + "errors" + "net/http" + "strconv" + "strings" + "time" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +// magicString is used for the hacky label test in checkLabels. Remove once fixed. +const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa" + +// InstrumentHandlerInFlight is a middleware that wraps the provided +// http.Handler. It sets the provided prometheus.Gauge to the number of +// requests currently handled by the wrapped http.Handler. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + g.Inc() + defer g.Dec() + next.ServeHTTP(w, r) + }) +} + +// InstrumentHandlerDuration is a middleware that wraps the provided +// http.Handler to observe the request duration with the provided ObserverVec. +// The ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the request duration in seconds. Partitioning happens by HTTP +// status code and/or HTTP method if the respective instance label names are +// present in the ObserverVec. For unpartitioned observations, use an +// ObserverVec with zero labels. Note that partitioning of Histograms is +// expensive and should be used judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + + obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds()) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + next.ServeHTTP(w, r) + obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds()) + }) +} + +// InstrumentHandlerCounter is a middleware that wraps the provided http.Handler +// to observe the request result with the provided CounterVec. The CounterVec +// must have zero, one, or two non-const non-curried labels. For those, the only +// allowed label names are "code" and "method". The function panics +// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or +// HTTP method if the respective instance label names are present in the +// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, the Counter is not incremented. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(counter) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + counter.With(labels(code, method, r.Method, d.Status())).Inc() + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + counter.With(labels(code, method, r.Method, 0)).Inc() + }) +} + +// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided +// http.Handler to observe with the provided ObserverVec the request duration +// until the response headers are written. The ObserverVec must have zero, one, +// or two non-const non-curried labels. For those, the only allowed label names +// are "code" and "method". The function panics otherwise. The Observe method of +// the Observer in the ObserverVec is called with the request duration in +// seconds. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. +// +// If the wrapped Handler panics before calling WriteHeader, no value is +// reported. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + d := newDelegator(w, func(status int) { + obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds()) + }) + next.ServeHTTP(d, r) + }) +} + +// InstrumentHandlerRequestSize is a middleware that wraps the provided +// http.Handler to observe the request size with the provided ObserverVec. The +// ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the request size in bytes. Partitioning happens by HTTP status +// code and/or HTTP method if the respective instance label names are present in +// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero +// labels. Note that partitioning of Histograms is expensive and should be used +// judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc { + code, method := checkLabels(obs) + + if code { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size)) + }) + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + next.ServeHTTP(w, r) + size := computeApproximateRequestSize(r) + obs.With(labels(code, method, r.Method, 0)).Observe(float64(size)) + }) +} + +// InstrumentHandlerResponseSize is a middleware that wraps the provided +// http.Handler to observe the response size with the provided ObserverVec. The +// ObserverVec must have zero, one, or two non-const non-curried labels. For +// those, the only allowed label names are "code" and "method". The function +// panics otherwise. The Observe method of the Observer in the ObserverVec is +// called with the response size in bytes. Partitioning happens by HTTP status +// code and/or HTTP method if the respective instance label names are present in +// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero +// labels. Note that partitioning of Histograms is expensive and should be used +// judiciously. +// +// If the wrapped Handler does not set a status code, a status code of 200 is assumed. +// +// If the wrapped Handler panics, no values are reported. +// +// See the example for InstrumentHandlerDuration for example usage. +func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler { + code, method := checkLabels(obs) + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + d := newDelegator(w, nil) + next.ServeHTTP(d, r) + obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written())) + }) +} + +func checkLabels(c prometheus.Collector) (code bool, method bool) { + // TODO(beorn7): Remove this hacky way to check for instance labels + // once Descriptors can have their dimensionality queried. + var ( + desc *prometheus.Desc + m prometheus.Metric + pm dto.Metric + lvs []string + ) + + // Get the Desc from the Collector. + descc := make(chan *prometheus.Desc, 1) + c.Describe(descc) + + select { + case desc = <-descc: + default: + panic("no description provided by collector") + } + select { + case <-descc: + panic("more than one description provided by collector") + default: + } + + close(descc) + + // Create a ConstMetric with the Desc. Since we don't know how many + // variable labels there are, try for as long as it needs. + for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) { + m, err = prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, lvs...) + } + + // Write out the metric into a proto message and look at the labels. + // If the value is not the magicString, it is a constLabel, which doesn't interest us. + // If the label is curried, it doesn't interest us. + // In all other cases, only "code" or "method" is allowed. + if err := m.Write(&pm); err != nil { + panic("error checking metric for labels") + } + for _, label := range pm.Label { + name, value := label.GetName(), label.GetValue() + if value != magicString || isLabelCurried(c, name) { + continue + } + switch name { + case "code": + code = true + case "method": + method = true + default: + panic("metric partitioned with non-supported labels") + } + } + return +} + +func isLabelCurried(c prometheus.Collector, label string) bool { + // This is even hackier than the label test above. + // We essentially try to curry again and see if it works. + // But for that, we need to type-convert to the two + // types we use here, ObserverVec or *CounterVec. + switch v := c.(type) { + case *prometheus.CounterVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + case prometheus.ObserverVec: + if _, err := v.CurryWith(prometheus.Labels{label: "dummy"}); err == nil { + return false + } + default: + panic("unsupported metric vec type") + } + return true +} + +// emptyLabels is a one-time allocation for non-partitioned metrics to avoid +// unnecessary allocations on each request. +var emptyLabels = prometheus.Labels{} + +func labels(code, method bool, reqMethod string, status int) prometheus.Labels { + if !(code || method) { + return emptyLabels + } + labels := prometheus.Labels{} + + if code { + labels["code"] = sanitizeCode(status) + } + if method { + labels["method"] = sanitizeMethod(reqMethod) + } + + return labels +} + +func computeApproximateRequestSize(r *http.Request) int { + s := 0 + if r.URL != nil { + s += len(r.URL.String()) + } + + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + return s +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +// If the wrapped http.Handler has not set a status code, i.e. the value is +// currently 0, santizeCode will return 200, for consistency with behavior in +// the stdlib. +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200, 0: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/registry.go new file mode 100644 index 00000000..f4e9a99b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -0,0 +1,945 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "sort" + "strings" + "sync" + "unicode/utf8" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/expfmt" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus/internal" +) + +const ( + // Capacity for the channel to collect metrics and descriptors. + capMetricChan = 1000 + capDescChan = 10 +) + +// DefaultRegisterer and DefaultGatherer are the implementations of the +// Registerer and Gatherer interface a number of convenience functions in this +// package act on. Initially, both variables point to the same Registry, which +// has a process collector (currently on Linux only, see NewProcessCollector) +// and a Go collector (see NewGoCollector, in particular the note about +// stop-the-world implication with Go versions older than 1.9) already +// registered. This approach to keep default instances as global state mirrors +// the approach of other packages in the Go standard library. Note that there +// are caveats. Change the variables with caution and only if you understand the +// consequences. Users who want to avoid global state altogether should not use +// the convenience functions and act on custom instances instead. +var ( + defaultRegistry = NewRegistry() + DefaultRegisterer Registerer = defaultRegistry + DefaultGatherer Gatherer = defaultRegistry +) + +func init() { + MustRegister(NewProcessCollector(ProcessCollectorOpts{})) + MustRegister(NewGoCollector()) +} + +// NewRegistry creates a new vanilla Registry without any Collectors +// pre-registered. +func NewRegistry() *Registry { + return &Registry{ + collectorsByID: map[uint64]Collector{}, + descIDs: map[uint64]struct{}{}, + dimHashesByName: map[string]uint64{}, + } +} + +// NewPedanticRegistry returns a registry that checks during collection if each +// collected Metric is consistent with its reported Desc, and if the Desc has +// actually been registered with the registry. Unchecked Collectors (those whose +// Describe method does not yield any descriptors) are excluded from the check. +// +// Usually, a Registry will be happy as long as the union of all collected +// Metrics is consistent and valid even if some metrics are not consistent with +// their own Desc or a Desc provided by their registered Collector. Well-behaved +// Collectors and Metrics will only provide consistent Descs. This Registry is +// useful to test the implementation of Collectors and Metrics. +func NewPedanticRegistry() *Registry { + r := NewRegistry() + r.pedanticChecksEnabled = true + return r +} + +// Registerer is the interface for the part of a registry in charge of +// registering and unregistering. Users of custom registries should use +// Registerer as type for registration purposes (rather than the Registry type +// directly). In that way, they are free to use custom Registerer implementation +// (e.g. for testing purposes). +type Registerer interface { + // Register registers a new Collector to be included in metrics + // collection. It returns an error if the descriptors provided by the + // Collector are invalid or if they — in combination with descriptors of + // already registered Collectors — do not fulfill the consistency and + // uniqueness criteria described in the documentation of metric.Desc. + // + // If the provided Collector is equal to a Collector already registered + // (which includes the case of re-registering the same Collector), the + // returned error is an instance of AlreadyRegisteredError, which + // contains the previously registered Collector. + // + // A Collector whose Describe method does not yield any Desc is treated + // as unchecked. Registration will always succeed. No check for + // re-registering (see previous paragraph) is performed. Thus, the + // caller is responsible for not double-registering the same unchecked + // Collector, and for providing a Collector that will not cause + // inconsistent metrics on collection. (This would lead to scrape + // errors.) + Register(Collector) error + // MustRegister works like Register but registers any number of + // Collectors and panics upon the first registration that causes an + // error. + MustRegister(...Collector) + // Unregister unregisters the Collector that equals the Collector passed + // in as an argument. (Two Collectors are considered equal if their + // Describe method yields the same set of descriptors.) The function + // returns whether a Collector was unregistered. Note that an unchecked + // Collector cannot be unregistered (as its Describe method does not + // yield any descriptor). + // + // Note that even after unregistering, it will not be possible to + // register a new Collector that is inconsistent with the unregistered + // Collector, e.g. a Collector collecting metrics with the same name but + // a different help string. The rationale here is that the same registry + // instance must only collect consistent metrics throughout its + // lifetime. + Unregister(Collector) bool +} + +// Gatherer is the interface for the part of a registry in charge of gathering +// the collected metrics into a number of MetricFamilies. The Gatherer interface +// comes with the same general implication as described for the Registerer +// interface. +type Gatherer interface { + // Gather calls the Collect method of the registered Collectors and then + // gathers the collected metrics into a lexicographically sorted slice + // of uniquely named MetricFamily protobufs. Gather ensures that the + // returned slice is valid and self-consistent so that it can be used + // for valid exposition. As an exception to the strict consistency + // requirements described for metric.Desc, Gather will tolerate + // different sets of label names for metrics of the same metric family. + // + // Even if an error occurs, Gather attempts to gather as many metrics as + // possible. Hence, if a non-nil error is returned, the returned + // MetricFamily slice could be nil (in case of a fatal error that + // prevented any meaningful metric collection) or contain a number of + // MetricFamily protobufs, some of which might be incomplete, and some + // might be missing altogether. The returned error (which might be a + // MultiError) explains the details. Note that this is mostly useful for + // debugging purposes. If the gathered protobufs are to be used for + // exposition in actual monitoring, it is almost always better to not + // expose an incomplete result and instead disregard the returned + // MetricFamily protobufs in case the returned error is non-nil. + Gather() ([]*dto.MetricFamily, error) +} + +// Register registers the provided Collector with the DefaultRegisterer. +// +// Register is a shortcut for DefaultRegisterer.Register(c). See there for more +// details. +func Register(c Collector) error { + return DefaultRegisterer.Register(c) +} + +// MustRegister registers the provided Collectors with the DefaultRegisterer and +// panics if any error occurs. +// +// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See +// there for more details. +func MustRegister(cs ...Collector) { + DefaultRegisterer.MustRegister(cs...) +} + +// Unregister removes the registration of the provided Collector from the +// DefaultRegisterer. +// +// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for +// more details. +func Unregister(c Collector) bool { + return DefaultRegisterer.Unregister(c) +} + +// GathererFunc turns a function into a Gatherer. +type GathererFunc func() ([]*dto.MetricFamily, error) + +// Gather implements Gatherer. +func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { + return gf() +} + +// AlreadyRegisteredError is returned by the Register method if the Collector to +// be registered has already been registered before, or a different Collector +// that collects the same metrics has been registered before. Registration fails +// in that case, but you can detect from the kind of error what has +// happened. The error contains fields for the existing Collector and the +// (rejected) new Collector that equals the existing one. This can be used to +// find out if an equal Collector has been registered before and switch over to +// using the old one, as demonstrated in the example. +type AlreadyRegisteredError struct { + ExistingCollector, NewCollector Collector +} + +func (err AlreadyRegisteredError) Error() string { + return "duplicate metrics collector registration attempted" +} + +// MultiError is a slice of errors implementing the error interface. It is used +// by a Gatherer to report multiple errors during MetricFamily gathering. +type MultiError []error + +func (errs MultiError) Error() string { + if len(errs) == 0 { + return "" + } + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) + for _, err := range errs { + fmt.Fprintf(buf, "\n* %s", err) + } + return buf.String() +} + +// Append appends the provided error if it is not nil. +func (errs *MultiError) Append(err error) { + if err != nil { + *errs = append(*errs, err) + } +} + +// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only +// contained error as error if len(errs is 1). In all other cases, it returns +// the MultiError directly. This is helpful for returning a MultiError in a way +// that only uses the MultiError if needed. +func (errs MultiError) MaybeUnwrap() error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + return errs + } +} + +// Registry registers Prometheus collectors, collects their metrics, and gathers +// them into MetricFamilies for exposition. It implements both Registerer and +// Gatherer. The zero value is not usable. Create instances with NewRegistry or +// NewPedanticRegistry. +type Registry struct { + mtx sync.RWMutex + collectorsByID map[uint64]Collector // ID is a hash of the descIDs. + descIDs map[uint64]struct{} + dimHashesByName map[string]uint64 + uncheckedCollectors []Collector + pedanticChecksEnabled bool +} + +// Register implements Registerer. +func (r *Registry) Register(c Collector) error { + var ( + descChan = make(chan *Desc, capDescChan) + newDescIDs = map[uint64]struct{}{} + newDimHashesByName = map[string]uint64{} + collectorID uint64 // Just a sum of all desc IDs. + duplicateDescErr error + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + r.mtx.Lock() + defer func() { + // Drain channel in case of premature return to not leak a goroutine. + for range descChan { + } + r.mtx.Unlock() + }() + // Conduct various tests... + for desc := range descChan { + + // Is the descriptor valid at all? + if desc.err != nil { + return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + } + + // Is the descID unique? + // (In other words: Is the fqName + constLabel combination unique?) + if _, exists := r.descIDs[desc.id]; exists { + duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) + } + // If it is not a duplicate desc in this collector, add it to + // the collectorID. (We allow duplicate descs within the same + // collector, but their existence must be a no-op.) + if _, exists := newDescIDs[desc.id]; !exists { + newDescIDs[desc.id] = struct{}{} + collectorID += desc.id + } + + // Are all the label names and the help string consistent with + // previous descriptors of the same name? + // First check existing descriptors... + if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) + } + } else { + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) + } + } else { + newDimHashesByName[desc.fqName] = desc.dimHash + } + } + } + // A Collector yielding no Desc at all is considered unchecked. + if len(newDescIDs) == 0 { + r.uncheckedCollectors = append(r.uncheckedCollectors, c) + return nil + } + if existing, exists := r.collectorsByID[collectorID]; exists { + switch e := existing.(type) { + case *wrappingCollector: + return AlreadyRegisteredError{ + ExistingCollector: e.unwrapRecursively(), + NewCollector: c, + } + default: + return AlreadyRegisteredError{ + ExistingCollector: e, + NewCollector: c, + } + } + } + // If the collectorID is new, but at least one of the descs existed + // before, we are in trouble. + if duplicateDescErr != nil { + return duplicateDescErr + } + + // Only after all tests have passed, actually register. + r.collectorsByID[collectorID] = c + for hash := range newDescIDs { + r.descIDs[hash] = struct{}{} + } + for name, dimHash := range newDimHashesByName { + r.dimHashesByName[name] = dimHash + } + return nil +} + +// Unregister implements Registerer. +func (r *Registry) Unregister(c Collector) bool { + var ( + descChan = make(chan *Desc, capDescChan) + descIDs = map[uint64]struct{}{} + collectorID uint64 // Just a sum of the desc IDs. + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + for desc := range descChan { + if _, exists := descIDs[desc.id]; !exists { + collectorID += desc.id + descIDs[desc.id] = struct{}{} + } + } + + r.mtx.RLock() + if _, exists := r.collectorsByID[collectorID]; !exists { + r.mtx.RUnlock() + return false + } + r.mtx.RUnlock() + + r.mtx.Lock() + defer r.mtx.Unlock() + + delete(r.collectorsByID, collectorID) + for id := range descIDs { + delete(r.descIDs, id) + } + // dimHashesByName is left untouched as those must be consistent + // throughout the lifetime of a program. + return true +} + +// MustRegister implements Registerer. +func (r *Registry) MustRegister(cs ...Collector) { + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +// Gather implements Gatherer. +func (r *Registry) Gather() ([]*dto.MetricFamily, error) { + var ( + checkedMetricChan = make(chan Metric, capMetricChan) + uncheckedMetricChan = make(chan Metric, capMetricChan) + metricHashes = map[uint64]struct{}{} + wg sync.WaitGroup + errs MultiError // The collected errors to return in the end. + registeredDescIDs map[uint64]struct{} // Only used for pedantic checks + ) + + r.mtx.RLock() + goroutineBudget := len(r.collectorsByID) + len(r.uncheckedCollectors) + metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) + checkedCollectors := make(chan Collector, len(r.collectorsByID)) + uncheckedCollectors := make(chan Collector, len(r.uncheckedCollectors)) + for _, collector := range r.collectorsByID { + checkedCollectors <- collector + } + for _, collector := range r.uncheckedCollectors { + uncheckedCollectors <- collector + } + // In case pedantic checks are enabled, we have to copy the map before + // giving up the RLock. + if r.pedanticChecksEnabled { + registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) + for id := range r.descIDs { + registeredDescIDs[id] = struct{}{} + } + } + r.mtx.RUnlock() + + wg.Add(goroutineBudget) + + collectWorker := func() { + for { + select { + case collector := <-checkedCollectors: + collector.Collect(checkedMetricChan) + case collector := <-uncheckedCollectors: + collector.Collect(uncheckedMetricChan) + default: + return + } + wg.Done() + } + } + + // Start the first worker now to make sure at least one is running. + go collectWorker() + goroutineBudget-- + + // Close checkedMetricChan and uncheckedMetricChan once all collectors + // are collected. + go func() { + wg.Wait() + close(checkedMetricChan) + close(uncheckedMetricChan) + }() + + // Drain checkedMetricChan and uncheckedMetricChan in case of premature return. + defer func() { + if checkedMetricChan != nil { + for range checkedMetricChan { + } + } + if uncheckedMetricChan != nil { + for range uncheckedMetricChan { + } + } + }() + + // Copy the channel references so we can nil them out later to remove + // them from the select statements below. + cmc := checkedMetricChan + umc := uncheckedMetricChan + + for { + select { + case metric, ok := <-cmc: + if !ok { + cmc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + registeredDescIDs, + )) + case metric, ok := <-umc: + if !ok { + umc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + nil, + )) + default: + if goroutineBudget <= 0 || len(checkedCollectors)+len(uncheckedCollectors) == 0 { + // All collectors are already being worked on or + // we have already as many goroutines started as + // there are collectors. Do the same as above, + // just without the default. + select { + case metric, ok := <-cmc: + if !ok { + cmc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + registeredDescIDs, + )) + case metric, ok := <-umc: + if !ok { + umc = nil + break + } + errs.Append(processMetric( + metric, metricFamiliesByName, + metricHashes, + nil, + )) + } + break + } + // Start more workers. + go collectWorker() + goroutineBudget-- + runtime.Gosched() + } + // Once both checkedMetricChan and uncheckdMetricChan are closed + // and drained, the contraption above will nil out cmc and umc, + // and then we can leave the collect loop here. + if cmc == nil && umc == nil { + break + } + } + return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// WriteToTextfile calls Gather on the provided Gatherer, encodes the result in the +// Prometheus text format, and writes it to a temporary file. Upon success, the +// temporary file is renamed to the provided filename. +// +// This is intended for use with the textfile collector of the node exporter. +// Note that the node exporter expects the filename to be suffixed with ".prom". +func WriteToTextfile(filename string, g Gatherer) error { + tmp, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)) + if err != nil { + return err + } + defer os.Remove(tmp.Name()) + + mfs, err := g.Gather() + if err != nil { + return err + } + for _, mf := range mfs { + if _, err := expfmt.MetricFamilyToText(tmp, mf); err != nil { + return err + } + } + if err := tmp.Close(); err != nil { + return err + } + + if err := os.Chmod(tmp.Name(), 0644); err != nil { + return err + } + return os.Rename(tmp.Name(), filename) +} + +// processMetric is an internal helper method only used by the Gather method. +func processMetric( + metric Metric, + metricFamiliesByName map[string]*dto.MetricFamily, + metricHashes map[uint64]struct{}, + registeredDescIDs map[uint64]struct{}, +) error { + desc := metric.Desc() + // Wrapped metrics collected by an unchecked Collector can have an + // invalid Desc. + if desc.err != nil { + return desc.err + } + dtoMetric := &dto.Metric{} + if err := metric.Write(dtoMetric); err != nil { + return fmt.Errorf("error collecting metric %v: %s", desc, err) + } + metricFamily, ok := metricFamiliesByName[desc.fqName] + if ok { // Existing name. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), + ) + } + // TODO(beorn7): Simplify switch once Desc has type. + switch metricFamily.GetType() { + case dto.MetricType_COUNTER: + if dtoMetric.Counter == nil { + return fmt.Errorf( + "collected metric %s %s should be a Counter", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_GAUGE: + if dtoMetric.Gauge == nil { + return fmt.Errorf( + "collected metric %s %s should be a Gauge", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_SUMMARY: + if dtoMetric.Summary == nil { + return fmt.Errorf( + "collected metric %s %s should be a Summary", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_UNTYPED: + if dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s should be Untyped", + desc.fqName, dtoMetric, + ) + } + case dto.MetricType_HISTOGRAM: + if dtoMetric.Histogram == nil { + return fmt.Errorf( + "collected metric %s %s should be a Histogram", + desc.fqName, dtoMetric, + ) + } + default: + panic("encountered MetricFamily with invalid type") + } + } else { // New name. + metricFamily = &dto.MetricFamily{} + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + // TODO(beorn7): Simplify switch once Desc has type. + switch { + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + return fmt.Errorf("empty metric collected: %s", dtoMetric) + } + if err := checkSuffixCollisions(metricFamily, metricFamiliesByName); err != nil { + return err + } + metricFamiliesByName[desc.fqName] = metricFamily + } + if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes); err != nil { + return err + } + if registeredDescIDs != nil { + // Is the desc registered at all? + if _, exist := registeredDescIDs[desc.id]; !exist { + return fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { + return err + } + } + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + return nil +} + +// Gatherers is a slice of Gatherer instances that implements the Gatherer +// interface itself. Its Gather method calls Gather on all Gatherers in the +// slice in order and returns the merged results. Errors returned from the +// Gather calls are all returned in a flattened MultiError. Duplicate and +// inconsistent Metrics are skipped (first occurrence in slice order wins) and +// reported in the returned error. +// +// Gatherers can be used to merge the Gather results from multiple +// Registries. It also provides a way to directly inject existing MetricFamily +// protobufs into the gathering by creating a custom Gatherer with a Gather +// method that simply returns the existing MetricFamily protobufs. Note that no +// registration is involved (in contrast to Collector registration), so +// obviously registration-time checks cannot happen. Any inconsistencies between +// the gathered MetricFamilies are reported as errors by the Gather method, and +// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies +// (e.g. syntactically invalid metric or label names) will go undetected. +type Gatherers []Gatherer + +// Gather implements Gatherer. +func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { + var ( + metricFamiliesByName = map[string]*dto.MetricFamily{} + metricHashes = map[uint64]struct{}{} + errs MultiError // The collected errors to return in the end. + ) + + for i, g := range gs { + mfs, err := g.Gather() + if err != nil { + if multiErr, ok := err.(MultiError); ok { + for _, err := range multiErr { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } else { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } + for _, mf := range mfs { + existingMF, exists := metricFamiliesByName[mf.GetName()] + if exists { + if existingMF.GetHelp() != mf.GetHelp() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has help %q but should have %q", + mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), + )) + continue + } + if existingMF.GetType() != mf.GetType() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has type %s but should have %s", + mf.GetName(), mf.GetType(), existingMF.GetType(), + )) + continue + } + } else { + existingMF = &dto.MetricFamily{} + existingMF.Name = mf.Name + existingMF.Help = mf.Help + existingMF.Type = mf.Type + if err := checkSuffixCollisions(existingMF, metricFamiliesByName); err != nil { + errs = append(errs, err) + continue + } + metricFamiliesByName[mf.GetName()] = existingMF + } + for _, m := range mf.Metric { + if err := checkMetricConsistency(existingMF, m, metricHashes); err != nil { + errs = append(errs, err) + continue + } + existingMF.Metric = append(existingMF.Metric, m) + } + } + } + return internal.NormalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// checkSuffixCollisions checks for collisions with the “magic” suffixes the +// Prometheus text format and the internal metric representation of the +// Prometheus server add while flattening Summaries and Histograms. +func checkSuffixCollisions(mf *dto.MetricFamily, mfs map[string]*dto.MetricFamily) error { + var ( + newName = mf.GetName() + newType = mf.GetType() + newNameWithoutSuffix = "" + ) + switch { + case strings.HasSuffix(newName, "_count"): + newNameWithoutSuffix = newName[:len(newName)-6] + case strings.HasSuffix(newName, "_sum"): + newNameWithoutSuffix = newName[:len(newName)-4] + case strings.HasSuffix(newName, "_bucket"): + newNameWithoutSuffix = newName[:len(newName)-7] + } + if newNameWithoutSuffix != "" { + if existingMF, ok := mfs[newNameWithoutSuffix]; ok { + switch existingMF.GetType() { + case dto.MetricType_SUMMARY: + if !strings.HasSuffix(newName, "_bucket") { + return fmt.Errorf( + "collected metric named %q collides with previously collected summary named %q", + newName, newNameWithoutSuffix, + ) + } + case dto.MetricType_HISTOGRAM: + return fmt.Errorf( + "collected metric named %q collides with previously collected histogram named %q", + newName, newNameWithoutSuffix, + ) + } + } + } + if newType == dto.MetricType_SUMMARY || newType == dto.MetricType_HISTOGRAM { + if _, ok := mfs[newName+"_count"]; ok { + return fmt.Errorf( + "collected histogram or summary named %q collides with previously collected metric named %q", + newName, newName+"_count", + ) + } + if _, ok := mfs[newName+"_sum"]; ok { + return fmt.Errorf( + "collected histogram or summary named %q collides with previously collected metric named %q", + newName, newName+"_sum", + ) + } + } + if newType == dto.MetricType_HISTOGRAM { + if _, ok := mfs[newName+"_bucket"]; ok { + return fmt.Errorf( + "collected histogram named %q collides with previously collected metric named %q", + newName, newName+"_bucket", + ) + } + } + return nil +} + +// checkMetricConsistency checks if the provided Metric is consistent with the +// provided MetricFamily. It also hashes the Metric labels and the MetricFamily +// name. If the resulting hash is already in the provided metricHashes, an error +// is returned. If not, it is added to metricHashes. +func checkMetricConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + metricHashes map[uint64]struct{}, +) error { + name := metricFamily.GetName() + + // Type consistency with metric family. + if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || + metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || + metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || + metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || + metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %q { %s} is not a %s", + name, dtoMetric, metricFamily.GetType(), + ) + } + + previousLabelName := "" + for _, labelPair := range dtoMetric.GetLabel() { + labelName := labelPair.GetName() + if labelName == previousLabelName { + return fmt.Errorf( + "collected metric %q { %s} has two or more labels with the same name: %s", + name, dtoMetric, labelName, + ) + } + if !checkLabelName(labelName) { + return fmt.Errorf( + "collected metric %q { %s} has a label with an invalid name: %s", + name, dtoMetric, labelName, + ) + } + if dtoMetric.Summary != nil && labelName == quantileLabel { + return fmt.Errorf( + "collected metric %q { %s} must not have an explicit %q label", + name, dtoMetric, quantileLabel, + ) + } + if !utf8.ValidString(labelPair.GetValue()) { + return fmt.Errorf( + "collected metric %q { %s} has a label named %q whose value is not utf8: %#v", + name, dtoMetric, labelName, labelPair.GetValue()) + } + previousLabelName = labelName + } + + // Is the metric unique (i.e. no other metric with the same name and the same labels)? + h := hashNew() + h = hashAdd(h, name) + h = hashAddByte(h, separatorByte) + // Make sure label pairs are sorted. We depend on it for the consistency + // check. + if !sort.IsSorted(labelPairSorter(dtoMetric.Label)) { + // We cannot sort dtoMetric.Label in place as it is immutable by contract. + copiedLabels := make([]*dto.LabelPair, len(dtoMetric.Label)) + copy(copiedLabels, dtoMetric.Label) + sort.Sort(labelPairSorter(copiedLabels)) + dtoMetric.Label = copiedLabels + } + for _, lp := range dtoMetric.Label { + h = hashAdd(h, lp.GetName()) + h = hashAddByte(h, separatorByte) + h = hashAdd(h, lp.GetValue()) + h = hashAddByte(h, separatorByte) + } + if _, exists := metricHashes[h]; exists { + return fmt.Errorf( + "collected metric %q { %s} was collected before with the same name and label values", + name, dtoMetric, + ) + } + metricHashes[h] = struct{}{} + return nil +} + +func checkDescConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + desc *Desc, +) error { + // Desc help consistency with metric family help. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, + ) + } + + // Is the desc consistent with the content of the metric? + lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label)) + copy(lpsFromDesc, desc.constLabelPairs) + for _, l := range desc.variableLabels { + lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ + Name: proto.String(l), + }) + } + if len(lpsFromDesc) != len(dtoMetric.Label) { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + sort.Sort(labelPairSorter(lpsFromDesc)) + for i, lpFromDesc := range lpsFromDesc { + lpFromMetric := dtoMetric.Label[i] + if lpFromDesc.GetName() != lpFromMetric.GetName() || + lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + } + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/summary.go new file mode 100644 index 00000000..c970fdee --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -0,0 +1,736 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "runtime" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/beorn7/perks/quantile" + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// quantileLabel is used for the label that defines the quantile in a +// summary. +const quantileLabel = "quantile" + +// A Summary captures individual observations from an event or sample stream and +// summarizes them in a manner similar to traditional summary statistics: 1. sum +// of observations, 2. observation count, 3. rank estimations. +// +// A typical use-case is the observation of request latencies. By default, a +// Summary provides the median, the 90th and the 99th percentile of the latency +// as rank estimations. However, the default behavior will change in the +// upcoming v1.0.0 of the library. There will be no rank estimations at all by +// default. For a sane transition, it is recommended to set the desired rank +// estimations explicitly. +// +// Note that the rank estimations cannot be aggregated in a meaningful way with +// the Prometheus query language (i.e. you cannot average or add them). If you +// need aggregatable quantiles (e.g. you want the 99th percentile latency of all +// queries served across all instances of a service), consider the Histogram +// metric type. See the Prometheus documentation for more details. +// +// To create Summary instances, use NewSummary. +type Summary interface { + Metric + Collector + + // Observe adds a single observation to the summary. + Observe(float64) +} + +var errQuantileLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in summaries", quantileLabel, +) + +// Default values for SummaryOpts. +const ( + // DefMaxAge is the default duration for which observations stay + // relevant. + DefMaxAge time.Duration = 10 * time.Minute + // DefAgeBuckets is the default number of buckets used to calculate the + // age of observations. + DefAgeBuckets = 5 + // DefBufCap is the standard buffer size for collecting Summary observations. + DefBufCap = 500 +) + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name to a non-empty string. While all other fields are +// optional and can safely be left at their zero value, it is recommended to set +// a help string and to explicitly set the Objectives field to the desired value +// as the default value will change in the upcoming v1.0.0 of the library. +type SummaryOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Summary (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Summary must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Summary. + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // Due to the way a Summary is represented in the Prometheus text format + // and how it is handled by the Prometheus server internally, “quantile” + // is an illegal label name. Construction of a Summary or SummaryVec + // will panic if this label name is used in ConstLabels. + // + // ConstLabels are only used rarely. In particular, do not use them to + // attach the same labels to all your metrics. Those use cases are + // better covered by target labels set by the scraping Prometheus + // server, or by one specific metric (e.g. a build_info or a + // machine_role metric). See also + // https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels,-not-static-scraped-labels + ConstLabels Labels + + // Objectives defines the quantile rank estimates with their respective + // absolute error. If Objectives[q] = e, then the value reported for q + // will be the φ-quantile value for some φ between q-e and q+e. The + // default value is an empty map, resulting in a summary without + // quantiles. + Objectives map[float64]float64 + + // MaxAge defines the duration for which an observation stays relevant + // for the summary. Must be positive. The default value is DefMaxAge. + MaxAge time.Duration + + // AgeBuckets is the number of buckets used to exclude observations that + // are older than MaxAge from the summary. A higher number has a + // resource penalty, so only increase it if the higher resolution is + // really required. For very high observation rates, you might want to + // reduce the number of age buckets. With only one age bucket, you will + // effectively see a complete reset of the summary each time MaxAge has + // passed. The default value is DefAgeBuckets. + AgeBuckets uint32 + + // BufCap defines the default sample stream buffer size. The default + // value of DefBufCap should suffice for most uses. If there is a need + // to increase the value, a multiple of 500 is recommended (because that + // is the internal buffer size of the underlying package + // "github.com/bmizerany/perks/quantile"). + BufCap uint32 +} + +// Problem with the sliding-window decay algorithm... The Merge method of +// perk/quantile is actually not working as advertised - and it might be +// unfixable, as the underlying algorithm is apparently not capable of merging +// summaries in the first place. To avoid using Merge, we are currently adding +// observations to _each_ age bucket, i.e. the effort to add a sample is +// essentially multiplied by the number of age buckets. When rotating age +// buckets, we empty the previous head stream. On scrape time, we simply take +// the quantiles from the head stream (no merging required). Result: More effort +// on observation time, less effort on scrape time, which is exactly the +// opposite of what we try to accomplish, but at least the results are correct. +// +// The quite elegant previous contraption to merge the age buckets efficiently +// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) +// can't be used anymore. + +// NewSummary creates a new Summary based on the provided SummaryOpts. +func NewSummary(opts SummaryOpts) Summary { + return newSummary( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { + if len(desc.variableLabels) != len(labelValues) { + panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels, labelValues)) + } + + for _, n := range desc.variableLabels { + if n == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + + if opts.Objectives == nil { + opts.Objectives = map[float64]float64{} + } + + if opts.MaxAge < 0 { + panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) + } + if opts.MaxAge == 0 { + opts.MaxAge = DefMaxAge + } + + if opts.AgeBuckets == 0 { + opts.AgeBuckets = DefAgeBuckets + } + + if opts.BufCap == 0 { + opts.BufCap = DefBufCap + } + + if len(opts.Objectives) == 0 { + // Use the lock-free implementation of a Summary without objectives. + s := &noObjectivesSummary{ + desc: desc, + labelPairs: makeLabelPairs(desc, labelValues), + counts: [2]*summaryCounts{&summaryCounts{}, &summaryCounts{}}, + } + s.init(s) // Init self-collection. + return s + } + + s := &summary{ + desc: desc, + + objectives: opts.Objectives, + sortedObjectives: make([]float64, 0, len(opts.Objectives)), + + labelPairs: makeLabelPairs(desc, labelValues), + + hotBuf: make([]float64, 0, opts.BufCap), + coldBuf: make([]float64, 0, opts.BufCap), + streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), + } + s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.hotBufExpTime = s.headStreamExpTime + + for i := uint32(0); i < opts.AgeBuckets; i++ { + s.streams = append(s.streams, s.newStream()) + } + s.headStream = s.streams[0] + + for qu := range s.objectives { + s.sortedObjectives = append(s.sortedObjectives, qu) + } + sort.Float64s(s.sortedObjectives) + + s.init(s) // Init self-collection. + return s +} + +type summary struct { + selfCollector + + bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. + mtx sync.Mutex // Protects every other moving part. + // Lock bufMtx before mtx if both are needed. + + desc *Desc + + objectives map[float64]float64 + sortedObjectives []float64 + + labelPairs []*dto.LabelPair + + sum float64 + cnt uint64 + + hotBuf, coldBuf []float64 + + streams []*quantile.Stream + streamDuration time.Duration + headStream *quantile.Stream + headStreamIdx int + headStreamExpTime, hotBufExpTime time.Time +} + +func (s *summary) Desc() *Desc { + return s.desc +} + +func (s *summary) Observe(v float64) { + s.bufMtx.Lock() + defer s.bufMtx.Unlock() + + now := time.Now() + if now.After(s.hotBufExpTime) { + s.asyncFlush(now) + } + s.hotBuf = append(s.hotBuf, v) + if len(s.hotBuf) == cap(s.hotBuf) { + s.asyncFlush(now) + } +} + +func (s *summary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.objectives)) + + s.bufMtx.Lock() + s.mtx.Lock() + // Swap bufs even if hotBuf is empty to set new hotBufExpTime. + s.swapBufs(time.Now()) + s.bufMtx.Unlock() + + s.flushColdBuf() + sum.SampleCount = proto.Uint64(s.cnt) + sum.SampleSum = proto.Float64(s.sum) + + for _, rank := range s.sortedObjectives { + var q float64 + if s.headStream.Count() == 0 { + q = math.NaN() + } else { + q = s.headStream.Query(rank) + } + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + s.mtx.Unlock() + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + return nil +} + +func (s *summary) newStream() *quantile.Stream { + return quantile.NewTargeted(s.objectives) +} + +// asyncFlush needs bufMtx locked. +func (s *summary) asyncFlush(now time.Time) { + s.mtx.Lock() + s.swapBufs(now) + + // Unblock the original goroutine that was responsible for the mutation + // that triggered the compaction. But hold onto the global non-buffer + // state mutex until the operation finishes. + go func() { + s.flushColdBuf() + s.mtx.Unlock() + }() +} + +// rotateStreams needs mtx AND bufMtx locked. +func (s *summary) maybeRotateStreams() { + for !s.hotBufExpTime.Equal(s.headStreamExpTime) { + s.headStream.Reset() + s.headStreamIdx++ + if s.headStreamIdx >= len(s.streams) { + s.headStreamIdx = 0 + } + s.headStream = s.streams[s.headStreamIdx] + s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) + } +} + +// flushColdBuf needs mtx locked. +func (s *summary) flushColdBuf() { + for _, v := range s.coldBuf { + for _, stream := range s.streams { + stream.Insert(v) + } + s.cnt++ + s.sum += v + } + s.coldBuf = s.coldBuf[0:0] + s.maybeRotateStreams() +} + +// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. +func (s *summary) swapBufs(now time.Time) { + if len(s.coldBuf) != 0 { + panic("coldBuf is not empty") + } + s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf + // hotBuf is now empty and gets new expiration set. + for now.After(s.hotBufExpTime) { + s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) + } +} + +type summaryCounts struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 +} + +type noObjectivesSummary struct { + // countAndHotIdx enables lock-free writes with use of atomic updates. + // The most significant bit is the hot index [0 or 1] of the count field + // below. Observe calls update the hot one. All remaining bits count the + // number of Observe calls. Observe starts by incrementing this counter, + // and finish by incrementing the count field in the respective + // summaryCounts, as a marker for completion. + // + // Calls of the Write method (which are non-mutating reads from the + // perspective of the summary) swap the hot–cold under the writeMtx + // lock. A cooldown is awaited (while locked) by comparing the number of + // observations with the initiation count. Once they match, then the + // last observation on the now cool one has completed. All cool fields must + // be merged into the new hot before releasing writeMtx. + + // Fields with atomic access first! See alignment constraint: + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + countAndHotIdx uint64 + + selfCollector + desc *Desc + writeMtx sync.Mutex // Only used in the Write method. + + // Two counts, one is "hot" for lock-free observations, the other is + // "cold" for writing out a dto.Metric. It has to be an array of + // pointers to guarantee 64bit alignment of the histogramCounts, see + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG. + counts [2]*summaryCounts + + labelPairs []*dto.LabelPair +} + +func (s *noObjectivesSummary) Desc() *Desc { + return s.desc +} + +func (s *noObjectivesSummary) Observe(v float64) { + // We increment h.countAndHotIdx so that the counter in the lower + // 63 bits gets incremented. At the same time, we get the new value + // back, which we can use to find the currently-hot counts. + n := atomic.AddUint64(&s.countAndHotIdx, 1) + hotCounts := s.counts[n>>63] + + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + break + } + } + // Increment count last as we take it as a signal that the observation + // is complete. + atomic.AddUint64(&hotCounts.count, 1) +} + +func (s *noObjectivesSummary) Write(out *dto.Metric) error { + // For simplicity, we protect this whole method by a mutex. It is not in + // the hot path, i.e. Observe is called much more often than Write. The + // complication of making Write lock-free isn't worth it, if possible at + // all. + s.writeMtx.Lock() + defer s.writeMtx.Unlock() + + // Adding 1<<63 switches the hot index (from 0 to 1 or from 1 to 0) + // without touching the count bits. See the struct comments for a full + // description of the algorithm. + n := atomic.AddUint64(&s.countAndHotIdx, 1<<63) + // count is contained unchanged in the lower 63 bits. + count := n & ((1 << 63) - 1) + // The most significant bit tells us which counts is hot. The complement + // is thus the cold one. + hotCounts := s.counts[n>>63] + coldCounts := s.counts[(^n)>>63] + + // Await cooldown. + for count != atomic.LoadUint64(&coldCounts.count) { + runtime.Gosched() // Let observations get work done. + } + + sum := &dto.Summary{ + SampleCount: proto.Uint64(count), + SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))), + } + + out.Summary = sum + out.Label = s.labelPairs + + // Finally add all the cold counts to the new hot counts and reset the cold counts. + atomic.AddUint64(&hotCounts.count, count) + atomic.StoreUint64(&coldCounts.count, 0) + for { + oldBits := atomic.LoadUint64(&hotCounts.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + sum.GetSampleSum()) + if atomic.CompareAndSwapUint64(&hotCounts.sumBits, oldBits, newBits) { + atomic.StoreUint64(&coldCounts.sumBits, 0) + break + } + } + return nil +} + +type quantSort []*dto.Quantile + +func (s quantSort) Len() int { + return len(s) +} + +func (s quantSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s quantSort) Less(i, j int) bool { + return s[i].GetQuantile() < s[j].GetQuantile() +} + +// SummaryVec is a Collector that bundles a set of Summaries that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewSummaryVec. +type SummaryVec struct { + *metricVec +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and +// partitioned by the given label names. +// +// Due to the way a Summary is represented in the Prometheus text format and how +// it is handled by the Prometheus server internally, “quantile” is an illegal +// label name. NewSummaryVec will panic if this label name is used. +func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { + for _, ln := range labelNames { + if ln == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &SummaryVec{ + metricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newSummary(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues returns the Summary for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Summary is created. +// +// It is possible to call this method without using the returned Summary to only +// create the new Summary but leave it at its starting value, a Summary without +// any observations. +// +// Keeping the Summary for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Summary from the SummaryVec. In that case, +// the Summary will still exist, but it will not be exported anymore, even if a +// Summary with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc (minus any curried labels). +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (v *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) { + metric, err := v.metricVec.getMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// GetMetricWith returns the Summary for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Summary is created. Implications of +// creating a Summary without using it and keeping the Summary for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc (minus any curried labels). +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (v *SummaryVec) GetMetricWith(labels Labels) (Observer, error) { + metric, err := v.metricVec.getMetricWith(labels) + if metric != nil { + return metric.(Observer), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. Not returning an +// error allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (v *SummaryVec) WithLabelValues(lvs ...string) Observer { + s, err := v.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return s +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. Not returning an error allows shortcuts like +// myVec.With(prometheus.Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (v *SummaryVec) With(labels Labels) Observer { + s, err := v.GetMetricWith(labels) + if err != nil { + panic(err) + } + return s +} + +// CurryWith returns a vector curried with the provided labels, i.e. the +// returned vector has those labels pre-set for all labeled operations performed +// on it. The cardinality of the curried vector is reduced accordingly. The +// order of the remaining labels stays the same (just with the curried labels +// taken out of the sequence – which is relevant for the +// (GetMetric)WithLabelValues methods). It is possible to curry a curried +// vector, but only with labels not yet used for currying before. +// +// The metrics contained in the SummaryVec are shared between the curried and +// uncurried vectors. They are just accessed differently. Curried and uncurried +// vectors behave identically in terms of collection. Only one must be +// registered with a given registry (usually the uncurried version). The Reset +// method deletes all metrics, even if called on a curried vector. +func (v *SummaryVec) CurryWith(labels Labels) (ObserverVec, error) { + vec, err := v.curryWith(labels) + if vec != nil { + return &SummaryVec{vec}, err + } + return nil, err +} + +// MustCurryWith works as CurryWith but panics where CurryWith would have +// returned an error. +func (v *SummaryVec) MustCurryWith(labels Labels) ObserverVec { + vec, err := v.CurryWith(labels) + if err != nil { + panic(err) + } + return vec +} + +type constSummary struct { + desc *Desc + count uint64 + sum float64 + quantiles map[float64]float64 + labelPairs []*dto.LabelPair +} + +func (s *constSummary) Desc() *Desc { + return s.desc +} + +func (s *constSummary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.quantiles)) + + sum.SampleCount = proto.Uint64(s.count) + sum.SampleSum = proto.Float64(s.sum) + + for rank, q := range s.quantiles { + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + + return nil +} + +// NewConstSummary returns a metric representing a Prometheus summary with fixed +// values for the count, sum, and quantiles. As those parameters cannot be +// changed, the returned value does not implement the Summary interface (but +// only the Metric interface). Users of this package will not have much use for +// it in regular operations. However, when implementing custom Collectors, it is +// useful as a throw-away metric that is generated on the fly to send it to +// Prometheus in the Collect method. +// +// quantiles maps ranks to quantile values. For example, a median latency of +// 0.23s and a 99th percentile latency of 0.56s would be expressed as: +// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// NewConstSummary returns an error if the length of labelValues is not +// consistent with the variable labels in Desc or if Desc is invalid. +func NewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstSummary is a version of NewConstSummary that panics where +// NewConstMetric would have returned an error. +func MustNewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) Metric { + m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/timer.go new file mode 100644 index 00000000..8d5f1052 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/timer.go @@ -0,0 +1,54 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "time" + +// Timer is a helper type to time functions. Use NewTimer to create new +// instances. +type Timer struct { + begin time.Time + observer Observer +} + +// NewTimer creates a new Timer. The provided Observer is used to observe a +// duration in seconds. Timer is usually used to time a function call in the +// following way: +// func TimeMe() { +// timer := NewTimer(myHistogram) +// defer timer.ObserveDuration() +// // Do actual work. +// } +func NewTimer(o Observer) *Timer { + return &Timer{ + begin: time.Now(), + observer: o, + } +} + +// ObserveDuration records the duration passed since the Timer was created with +// NewTimer. It calls the Observe method of the Observer provided during +// construction with the duration in seconds as an argument. The observed +// duration is also returned. ObserveDuration is usually called with a defer +// statement. +// +// Note that this method is only guaranteed to never observe negative durations +// if used with Go1.9+. +func (t *Timer) ObserveDuration() time.Duration { + d := time.Since(t.begin) + if t.observer != nil { + t.observer.Observe(d.Seconds()) + } + return d +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/untyped.go new file mode 100644 index 00000000..0f9ce63f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/untyped.go @@ -0,0 +1,42 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// UntypedOpts is an alias for Opts. See there for doc comments. +type UntypedOpts Opts + +// UntypedFunc works like GaugeFunc but the collected metric is of type +// "Untyped". UntypedFunc is useful to mirror an external metric of unknown +// type. +// +// To create UntypedFunc instances, use NewUntypedFunc. +type UntypedFunc interface { + Metric + Collector +} + +// NewUntypedFunc creates a new UntypedFunc based on the provided +// UntypedOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where an UntypedFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, function) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/value.go new file mode 100644 index 00000000..eb248f10 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -0,0 +1,162 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sort" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +// valueFunc is a generic metric for simple values retrieved on collect time +// from a function. It implements Metric and Collector. Its effective type is +// determined by ValueType. This is a low-level building block used by the +// library to back the implementations of CounterFunc, GaugeFunc, and +// UntypedFunc. +type valueFunc struct { + selfCollector + + desc *Desc + valType ValueType + function func() float64 + labelPairs []*dto.LabelPair +} + +// newValueFunc returns a newly allocated valueFunc with the given Desc and +// ValueType. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a valueFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { + result := &valueFunc{ + desc: desc, + valType: valueType, + function: function, + labelPairs: makeLabelPairs(desc, nil), + } + result.init(result) + return result +} + +func (v *valueFunc) Desc() *Desc { + return v.desc +} + +func (v *valueFunc) Write(out *dto.Metric) error { + return populateMetric(v.valType, v.function(), v.labelPairs, out) +} + +// NewConstMetric returns a metric with one fixed value that cannot be +// changed. Users of this package will not have much use for it in regular +// operations. However, when implementing custom Collectors, it is useful as a +// throw-away metric that is generated on the fly to send it to Prometheus in +// the Collect method. NewConstMetric returns an error if the length of +// labelValues is not consistent with the variable labels in Desc or if Desc is +// invalid. +func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { + if desc.err != nil { + return nil, desc.err + } + if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil { + return nil, err + } + return &constMetric{ + desc: desc, + valType: valueType, + val: value, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstMetric is a version of NewConstMetric that panics where +// NewConstMetric would have returned an error. +func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + m, err := NewConstMetric(desc, valueType, value, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type constMetric struct { + desc *Desc + valType ValueType + val float64 + labelPairs []*dto.LabelPair +} + +func (m *constMetric) Desc() *Desc { + return m.desc +} + +func (m *constMetric) Write(out *dto.Metric) error { + return populateMetric(m.valType, m.val, m.labelPairs, out) +} + +func populateMetric( + t ValueType, + v float64, + labelPairs []*dto.LabelPair, + m *dto.Metric, +) error { + m.Label = labelPairs + switch t { + case CounterValue: + m.Counter = &dto.Counter{Value: proto.Float64(v)} + case GaugeValue: + m.Gauge = &dto.Gauge{Value: proto.Float64(v)} + case UntypedValue: + m.Untyped = &dto.Untyped{Value: proto.Float64(v)} + default: + return fmt.Errorf("encountered unknown type %v", t) + } + return nil +} + +func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { + totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + if totalLen == 0 { + // Super fast path. + return nil + } + if len(desc.variableLabels) == 0 { + // Moderately fast path. + return desc.constLabelPairs + } + labelPairs := make([]*dto.LabelPair, 0, totalLen) + for i, n := range desc.variableLabels { + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(labelValues[i]), + }) + } + labelPairs = append(labelPairs, desc.constLabelPairs...) + sort.Sort(labelPairSorter(labelPairs)) + return labelPairs +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/vec.go new file mode 100644 index 00000000..14ed9e85 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -0,0 +1,472 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sync" + + "github.com/prometheus/common/model" +) + +// metricVec is a Collector to bundle metrics of the same name that differ in +// their label values. metricVec is not used directly (and therefore +// unexported). It is used as a building block for implementations of vectors of +// a given metric type, like GaugeVec, CounterVec, SummaryVec, and HistogramVec. +// It also handles label currying. It uses basicMetricVec internally. +type metricVec struct { + *metricMap + + curry []curriedLabelValue + + // hashAdd and hashAddByte can be replaced for testing collision handling. + hashAdd func(h uint64, s string) uint64 + hashAddByte func(h uint64, b byte) uint64 +} + +// newMetricVec returns an initialized metricVec. +func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec { + return &metricVec{ + metricMap: &metricMap{ + metrics: map[uint64][]metricWithLabelValues{}, + desc: desc, + newMetric: newMetric, + }, + hashAdd: hashAdd, + hashAddByte: hashAddByte, + } +} + +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *metricVec) DeleteLabelValues(lvs ...string) bool { + h, err := m.hashLabelValues(lvs) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabelValues(h, lvs, m.curry) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. However, such inconsistent Labels +// can never match an actual metric, so the method will always return false in +// that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *metricVec) Delete(labels Labels) bool { + h, err := m.hashLabels(labels) + if err != nil { + return false + } + + return m.metricMap.deleteByHashWithLabels(h, labels, m.curry) +} + +func (m *metricVec) curryWith(labels Labels) (*metricVec, error) { + var ( + newCurry []curriedLabelValue + oldCurry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(oldCurry) && oldCurry[iCurry].index == i { + if ok { + return nil, fmt.Errorf("label name %q is already curried", label) + } + newCurry = append(newCurry, oldCurry[iCurry]) + iCurry++ + } else { + if !ok { + continue // Label stays uncurried. + } + newCurry = append(newCurry, curriedLabelValue{i, val}) + } + } + if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 { + return nil, fmt.Errorf("%d unknown label(s) found during currying", l) + } + + return &metricVec{ + metricMap: m.metricMap, + curry: newCurry, + hashAdd: m.hashAdd, + hashAddByte: m.hashAddByte, + }, nil +} + +func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) { + h, err := m.hashLabelValues(lvs) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabelValues(h, lvs, m.curry), nil +} + +func (m *metricVec) getMetricWith(labels Labels) (Metric, error) { + h, err := m.hashLabels(labels) + if err != nil { + return nil, err + } + + return m.metricMap.getOrCreateMetricWithLabels(h, labels, m.curry), nil +} + +func (m *metricVec) hashLabelValues(vals []string) (uint64, error) { + if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iVals, iCurry int + ) + for i := 0; i < len(m.desc.variableLabels); i++ { + if iCurry < len(curry) && curry[iCurry].index == i { + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + h = m.hashAdd(h, vals[iVals]) + iVals++ + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +func (m *metricVec) hashLabels(labels Labels) (uint64, error) { + if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil { + return 0, err + } + + var ( + h = hashNew() + curry = m.curry + iCurry int + ) + for i, label := range m.desc.variableLabels { + val, ok := labels[label] + if iCurry < len(curry) && curry[iCurry].index == i { + if ok { + return 0, fmt.Errorf("label name %q is already curried", label) + } + h = m.hashAdd(h, curry[iCurry].value) + iCurry++ + } else { + if !ok { + return 0, fmt.Errorf("label name %q missing in label map", label) + } + h = m.hashAdd(h, val) + } + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +// metricWithLabelValues provides the metric and its label values for +// disambiguation on hash collision. +type metricWithLabelValues struct { + values []string + metric Metric +} + +// curriedLabelValue sets the curried value for a label at the given index. +type curriedLabelValue struct { + index int + value string +} + +// metricMap is a helper for metricVec and shared between differently curried +// metricVecs. +type metricMap struct { + mtx sync.RWMutex // Protects metrics. + metrics map[uint64][]metricWithLabelValues + desc *Desc + newMetric func(labelValues ...string) Metric +} + +// Describe implements Collector. It will send exactly one Desc to the provided +// channel. +func (m *metricMap) Describe(ch chan<- *Desc) { + ch <- m.desc +} + +// Collect implements Collector. +func (m *metricMap) Collect(ch chan<- Metric) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + for _, metrics := range m.metrics { + for _, metric := range metrics { + ch <- metric.metric + } + } +} + +// Reset deletes all metrics in this vector. +func (m *metricMap) Reset() { + m.mtx.Lock() + defer m.mtx.Unlock() + + for h := range m.metrics { + delete(m.metrics, h) + } +} + +// deleteByHashWithLabelValues removes the metric from the hash bucket h. If +// there are multiple matches in the bucket, use lvs to select a metric and +// remove only that metric. +func (m *metricMap) deleteByHashWithLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] + if !ok { + return false + } + + i := findMetricWithLabelValues(metrics, lvs, curry) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.metrics, h) + } + return true +} + +// deleteByHashWithLabels removes the metric from the hash bucket h. If there +// are multiple matches in the bucket, use lvs to select a metric and remove +// only that metric. +func (m *metricMap) deleteByHashWithLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + metrics, ok := m.metrics[h] + if !ok { + return false + } + i := findMetricWithLabels(m.desc, metrics, labels, curry) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.metrics[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.metrics, h) + } + return true +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *metricMap) getOrCreateMetricWithLabelValues( + hash uint64, lvs []string, curry []curriedLabelValue, +) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs, curry) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs, curry) + if !ok { + inlinedLVs := inlineLabelValues(lvs, curry) + metric = m.newMetric(inlinedLVs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: inlinedLVs, metric: metric}) + } + return metric +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *metricMap) getOrCreateMetricWithLabels( + hash uint64, labels Labels, curry []curriedLabelValue, +) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithHashAndLabels(hash, labels, curry) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithHashAndLabels(hash, labels, curry) + if !ok { + lvs := extractLabelValues(m.desc, labels, curry) + metric = m.newMetric(lvs...) + m.metrics[hash] = append(m.metrics[hash], metricWithLabelValues{values: lvs, metric: metric}) + } + return metric +} + +// getMetricWithHashAndLabelValues gets a metric while handling possible +// collisions in the hash space. Must be called while holding the read mutex. +func (m *metricMap) getMetricWithHashAndLabelValues( + h uint64, lvs []string, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] + if ok { + if i := findMetricWithLabelValues(metrics, lvs, curry); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// getMetricWithHashAndLabels gets a metric while handling possible collisions in +// the hash space. Must be called while holding read mutex. +func (m *metricMap) getMetricWithHashAndLabels( + h uint64, labels Labels, curry []curriedLabelValue, +) (Metric, bool) { + metrics, ok := m.metrics[h] + if ok { + if i := findMetricWithLabels(m.desc, metrics, labels, curry); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// findMetricWithLabelValues returns the index of the matching metric or +// len(metrics) if not found. +func findMetricWithLabelValues( + metrics []metricWithLabelValues, lvs []string, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchLabelValues(metric.values, lvs, curry) { + return i + } + } + return len(metrics) +} + +// findMetricWithLabels returns the index of the matching metric or len(metrics) +// if not found. +func findMetricWithLabels( + desc *Desc, metrics []metricWithLabelValues, labels Labels, curry []curriedLabelValue, +) int { + for i, metric := range metrics { + if matchLabels(desc, metric.values, labels, curry) { + return i + } + } + return len(metrics) +} + +func matchLabelValues(values []string, lvs []string, curry []curriedLabelValue) bool { + if len(values) != len(lvs)+len(curry) { + return false + } + var iLVs, iCurry int + for i, v := range values { + if iCurry < len(curry) && curry[iCurry].index == i { + if v != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if v != lvs[iLVs] { + return false + } + iLVs++ + } + return true +} + +func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool { + if len(values) != len(labels)+len(curry) { + return false + } + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + if values[i] != curry[iCurry].value { + return false + } + iCurry++ + continue + } + if values[i] != labels[k] { + return false + } + } + return true +} + +func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string { + labelValues := make([]string, len(labels)+len(curry)) + iCurry := 0 + for i, k := range desc.variableLabels { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = labels[k] + } + return labelValues +} + +func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string { + labelValues := make([]string, len(lvs)+len(curry)) + var iCurry, iLVs int + for i := range labelValues { + if iCurry < len(curry) && curry[iCurry].index == i { + labelValues[i] = curry[iCurry].value + iCurry++ + continue + } + labelValues[i] = lvs[iLVs] + iLVs++ + } + return labelValues +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/wrap.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/wrap.go new file mode 100644 index 00000000..e303eef6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_golang/prometheus/wrap.go @@ -0,0 +1,200 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sort" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// WrapRegistererWith returns a Registerer wrapping the provided +// Registerer. Collectors registered with the returned Registerer will be +// registered with the wrapped Registerer in a modified way. The modified +// Collector adds the provided Labels to all Metrics it collects (as +// ConstLabels). The Metrics collected by the unmodified Collector must not +// duplicate any of those labels. +// +// WrapRegistererWith provides a way to add fixed labels to a subset of +// Collectors. It should not be used to add fixed labels to all metrics exposed. +// +// Conflicts between Collectors registered through the original Registerer with +// Collectors registered through the wrapping Registerer will still be +// detected. Any AlreadyRegisteredError returned by the Register method of +// either Registerer will contain the ExistingCollector in the form it was +// provided to the respective registry. +// +// The Collector example demonstrates a use of WrapRegistererWith. +func WrapRegistererWith(labels Labels, reg Registerer) Registerer { + return &wrappingRegisterer{ + wrappedRegisterer: reg, + labels: labels, + } +} + +// WrapRegistererWithPrefix returns a Registerer wrapping the provided +// Registerer. Collectors registered with the returned Registerer will be +// registered with the wrapped Registerer in a modified way. The modified +// Collector adds the provided prefix to the name of all Metrics it collects. +// +// WrapRegistererWithPrefix is useful to have one place to prefix all metrics of +// a sub-system. To make this work, register metrics of the sub-system with the +// wrapping Registerer returned by WrapRegistererWithPrefix. It is rarely useful +// to use the same prefix for all metrics exposed. In particular, do not prefix +// metric names that are standardized across applications, as that would break +// horizontal monitoring, for example the metrics provided by the Go collector +// (see NewGoCollector) and the process collector (see NewProcessCollector). (In +// fact, those metrics are already prefixed with “go_” or “process_”, +// respectively.) +// +// Conflicts between Collectors registered through the original Registerer with +// Collectors registered through the wrapping Registerer will still be +// detected. Any AlreadyRegisteredError returned by the Register method of +// either Registerer will contain the ExistingCollector in the form it was +// provided to the respective registry. +func WrapRegistererWithPrefix(prefix string, reg Registerer) Registerer { + return &wrappingRegisterer{ + wrappedRegisterer: reg, + prefix: prefix, + } +} + +type wrappingRegisterer struct { + wrappedRegisterer Registerer + prefix string + labels Labels +} + +func (r *wrappingRegisterer) Register(c Collector) error { + return r.wrappedRegisterer.Register(&wrappingCollector{ + wrappedCollector: c, + prefix: r.prefix, + labels: r.labels, + }) +} + +func (r *wrappingRegisterer) MustRegister(cs ...Collector) { + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +func (r *wrappingRegisterer) Unregister(c Collector) bool { + return r.wrappedRegisterer.Unregister(&wrappingCollector{ + wrappedCollector: c, + prefix: r.prefix, + labels: r.labels, + }) +} + +type wrappingCollector struct { + wrappedCollector Collector + prefix string + labels Labels +} + +func (c *wrappingCollector) Collect(ch chan<- Metric) { + wrappedCh := make(chan Metric) + go func() { + c.wrappedCollector.Collect(wrappedCh) + close(wrappedCh) + }() + for m := range wrappedCh { + ch <- &wrappingMetric{ + wrappedMetric: m, + prefix: c.prefix, + labels: c.labels, + } + } +} + +func (c *wrappingCollector) Describe(ch chan<- *Desc) { + wrappedCh := make(chan *Desc) + go func() { + c.wrappedCollector.Describe(wrappedCh) + close(wrappedCh) + }() + for desc := range wrappedCh { + ch <- wrapDesc(desc, c.prefix, c.labels) + } +} + +func (c *wrappingCollector) unwrapRecursively() Collector { + switch wc := c.wrappedCollector.(type) { + case *wrappingCollector: + return wc.unwrapRecursively() + default: + return wc + } +} + +type wrappingMetric struct { + wrappedMetric Metric + prefix string + labels Labels +} + +func (m *wrappingMetric) Desc() *Desc { + return wrapDesc(m.wrappedMetric.Desc(), m.prefix, m.labels) +} + +func (m *wrappingMetric) Write(out *dto.Metric) error { + if err := m.wrappedMetric.Write(out); err != nil { + return err + } + if len(m.labels) == 0 { + // No wrapping labels. + return nil + } + for ln, lv := range m.labels { + out.Label = append(out.Label, &dto.LabelPair{ + Name: proto.String(ln), + Value: proto.String(lv), + }) + } + sort.Sort(labelPairSorter(out.Label)) + return nil +} + +func wrapDesc(desc *Desc, prefix string, labels Labels) *Desc { + constLabels := Labels{} + for _, lp := range desc.constLabelPairs { + constLabels[*lp.Name] = *lp.Value + } + for ln, lv := range labels { + if _, alreadyUsed := constLabels[ln]; alreadyUsed { + return &Desc{ + fqName: desc.fqName, + help: desc.help, + variableLabels: desc.variableLabels, + constLabelPairs: desc.constLabelPairs, + err: fmt.Errorf("attempted wrapping with already existing label name %q", ln), + } + } + constLabels[ln] = lv + } + // NewDesc will do remaining validations. + newDesc := NewDesc(prefix+desc.fqName, desc.help, desc.variableLabels, constLabels) + // Propagate errors if there was any. This will override any errer + // created by NewDesc above, i.e. earlier errors get precedence. + if desc.err != nil { + newDesc.err = desc.err + } + return newDesc +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_model/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_model/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_model/NOTICE new file mode 100644 index 00000000..20110e41 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_model/NOTICE @@ -0,0 +1,5 @@ +Data model artifacts for Prometheus. +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_model/go/metrics.pb.go new file mode 100644 index 00000000..9805432c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -0,0 +1,629 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: metrics.proto + +package io_prometheus_client // import "github.com/prometheus/client_model/go" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type MetricType int32 + +const ( + MetricType_COUNTER MetricType = 0 + MetricType_GAUGE MetricType = 1 + MetricType_SUMMARY MetricType = 2 + MetricType_UNTYPED MetricType = 3 + MetricType_HISTOGRAM MetricType = 4 +) + +var MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", +} +var MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, +} + +func (x MetricType) Enum() *MetricType { + p := new(MetricType) + *p = x + return p +} +func (x MetricType) String() string { + return proto.EnumName(MetricType_name, int32(x)) +} +func (x *MetricType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") + if err != nil { + return err + } + *x = MetricType(value) + return nil +} +func (MetricType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} +} + +type LabelPair struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LabelPair) Reset() { *m = LabelPair{} } +func (m *LabelPair) String() string { return proto.CompactTextString(m) } +func (*LabelPair) ProtoMessage() {} +func (*LabelPair) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{0} +} +func (m *LabelPair) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LabelPair.Unmarshal(m, b) +} +func (m *LabelPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LabelPair.Marshal(b, m, deterministic) +} +func (dst *LabelPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelPair.Merge(dst, src) +} +func (m *LabelPair) XXX_Size() int { + return xxx_messageInfo_LabelPair.Size(m) +} +func (m *LabelPair) XXX_DiscardUnknown() { + xxx_messageInfo_LabelPair.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelPair proto.InternalMessageInfo + +func (m *LabelPair) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *LabelPair) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Gauge struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Gauge) Reset() { *m = Gauge{} } +func (m *Gauge) String() string { return proto.CompactTextString(m) } +func (*Gauge) ProtoMessage() {} +func (*Gauge) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{1} +} +func (m *Gauge) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Gauge.Unmarshal(m, b) +} +func (m *Gauge) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Gauge.Marshal(b, m, deterministic) +} +func (dst *Gauge) XXX_Merge(src proto.Message) { + xxx_messageInfo_Gauge.Merge(dst, src) +} +func (m *Gauge) XXX_Size() int { + return xxx_messageInfo_Gauge.Size(m) +} +func (m *Gauge) XXX_DiscardUnknown() { + xxx_messageInfo_Gauge.DiscardUnknown(m) +} + +var xxx_messageInfo_Gauge proto.InternalMessageInfo + +func (m *Gauge) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Counter struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Counter) Reset() { *m = Counter{} } +func (m *Counter) String() string { return proto.CompactTextString(m) } +func (*Counter) ProtoMessage() {} +func (*Counter) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{2} +} +func (m *Counter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Counter.Unmarshal(m, b) +} +func (m *Counter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Counter.Marshal(b, m, deterministic) +} +func (dst *Counter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Counter.Merge(dst, src) +} +func (m *Counter) XXX_Size() int { + return xxx_messageInfo_Counter.Size(m) +} +func (m *Counter) XXX_DiscardUnknown() { + xxx_messageInfo_Counter.DiscardUnknown(m) +} + +var xxx_messageInfo_Counter proto.InternalMessageInfo + +func (m *Counter) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Quantile struct { + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Quantile) Reset() { *m = Quantile{} } +func (m *Quantile) String() string { return proto.CompactTextString(m) } +func (*Quantile) ProtoMessage() {} +func (*Quantile) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{3} +} +func (m *Quantile) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Quantile.Unmarshal(m, b) +} +func (m *Quantile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Quantile.Marshal(b, m, deterministic) +} +func (dst *Quantile) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quantile.Merge(dst, src) +} +func (m *Quantile) XXX_Size() int { + return xxx_messageInfo_Quantile.Size(m) +} +func (m *Quantile) XXX_DiscardUnknown() { + xxx_messageInfo_Quantile.DiscardUnknown(m) +} + +var xxx_messageInfo_Quantile proto.InternalMessageInfo + +func (m *Quantile) GetQuantile() float64 { + if m != nil && m.Quantile != nil { + return *m.Quantile + } + return 0 +} + +func (m *Quantile) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Summary struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage() {} +func (*Summary) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{4} +} +func (m *Summary) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Summary.Unmarshal(m, b) +} +func (m *Summary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Summary.Marshal(b, m, deterministic) +} +func (dst *Summary) XXX_Merge(src proto.Message) { + xxx_messageInfo_Summary.Merge(dst, src) +} +func (m *Summary) XXX_Size() int { + return xxx_messageInfo_Summary.Size(m) +} +func (m *Summary) XXX_DiscardUnknown() { + xxx_messageInfo_Summary.DiscardUnknown(m) +} + +var xxx_messageInfo_Summary proto.InternalMessageInfo + +func (m *Summary) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Summary) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Summary) GetQuantile() []*Quantile { + if m != nil { + return m.Quantile + } + return nil +} + +type Untyped struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Untyped) Reset() { *m = Untyped{} } +func (m *Untyped) String() string { return proto.CompactTextString(m) } +func (*Untyped) ProtoMessage() {} +func (*Untyped) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{5} +} +func (m *Untyped) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Untyped.Unmarshal(m, b) +} +func (m *Untyped) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Untyped.Marshal(b, m, deterministic) +} +func (dst *Untyped) XXX_Merge(src proto.Message) { + xxx_messageInfo_Untyped.Merge(dst, src) +} +func (m *Untyped) XXX_Size() int { + return xxx_messageInfo_Untyped.Size(m) +} +func (m *Untyped) XXX_DiscardUnknown() { + xxx_messageInfo_Untyped.DiscardUnknown(m) +} + +var xxx_messageInfo_Untyped proto.InternalMessageInfo + +func (m *Untyped) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Histogram struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} +func (*Histogram) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{6} +} +func (m *Histogram) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Histogram.Unmarshal(m, b) +} +func (m *Histogram) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Histogram.Marshal(b, m, deterministic) +} +func (dst *Histogram) XXX_Merge(src proto.Message) { + xxx_messageInfo_Histogram.Merge(dst, src) +} +func (m *Histogram) XXX_Size() int { + return xxx_messageInfo_Histogram.Size(m) +} +func (m *Histogram) XXX_DiscardUnknown() { + xxx_messageInfo_Histogram.DiscardUnknown(m) +} + +var xxx_messageInfo_Histogram proto.InternalMessageInfo + +func (m *Histogram) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Histogram) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Histogram) GetBucket() []*Bucket { + if m != nil { + return m.Bucket + } + return nil +} + +type Bucket struct { + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count,json=cumulativeCount" json:"cumulative_count,omitempty"` + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound,json=upperBound" json:"upper_bound,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Bucket) Reset() { *m = Bucket{} } +func (m *Bucket) String() string { return proto.CompactTextString(m) } +func (*Bucket) ProtoMessage() {} +func (*Bucket) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{7} +} +func (m *Bucket) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Bucket.Unmarshal(m, b) +} +func (m *Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Bucket.Marshal(b, m, deterministic) +} +func (dst *Bucket) XXX_Merge(src proto.Message) { + xxx_messageInfo_Bucket.Merge(dst, src) +} +func (m *Bucket) XXX_Size() int { + return xxx_messageInfo_Bucket.Size(m) +} +func (m *Bucket) XXX_DiscardUnknown() { + xxx_messageInfo_Bucket.DiscardUnknown(m) +} + +var xxx_messageInfo_Bucket proto.InternalMessageInfo + +func (m *Bucket) GetCumulativeCount() uint64 { + if m != nil && m.CumulativeCount != nil { + return *m.CumulativeCount + } + return 0 +} + +func (m *Bucket) GetUpperBound() float64 { + if m != nil && m.UpperBound != nil { + return *m.UpperBound + } + return 0 +} + +type Metric struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms,json=timestampMs" json:"timestamp_ms,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} +func (*Metric) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{8} +} +func (m *Metric) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Metric.Unmarshal(m, b) +} +func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Metric.Marshal(b, m, deterministic) +} +func (dst *Metric) XXX_Merge(src proto.Message) { + xxx_messageInfo_Metric.Merge(dst, src) +} +func (m *Metric) XXX_Size() int { + return xxx_messageInfo_Metric.Size(m) +} +func (m *Metric) XXX_DiscardUnknown() { + xxx_messageInfo_Metric.DiscardUnknown(m) +} + +var xxx_messageInfo_Metric proto.InternalMessageInfo + +func (m *Metric) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Metric) GetGauge() *Gauge { + if m != nil { + return m.Gauge + } + return nil +} + +func (m *Metric) GetCounter() *Counter { + if m != nil { + return m.Counter + } + return nil +} + +func (m *Metric) GetSummary() *Summary { + if m != nil { + return m.Summary + } + return nil +} + +func (m *Metric) GetUntyped() *Untyped { + if m != nil { + return m.Untyped + } + return nil +} + +func (m *Metric) GetHistogram() *Histogram { + if m != nil { + return m.Histogram + } + return nil +} + +func (m *Metric) GetTimestampMs() int64 { + if m != nil && m.TimestampMs != nil { + return *m.TimestampMs + } + return 0 +} + +type MetricFamily struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MetricFamily) Reset() { *m = MetricFamily{} } +func (m *MetricFamily) String() string { return proto.CompactTextString(m) } +func (*MetricFamily) ProtoMessage() {} +func (*MetricFamily) Descriptor() ([]byte, []int) { + return fileDescriptor_metrics_c97c9a2b9560cb8f, []int{9} +} +func (m *MetricFamily) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MetricFamily.Unmarshal(m, b) +} +func (m *MetricFamily) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MetricFamily.Marshal(b, m, deterministic) +} +func (dst *MetricFamily) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricFamily.Merge(dst, src) +} +func (m *MetricFamily) XXX_Size() int { + return xxx_messageInfo_MetricFamily.Size(m) +} +func (m *MetricFamily) XXX_DiscardUnknown() { + xxx_messageInfo_MetricFamily.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricFamily proto.InternalMessageInfo + +func (m *MetricFamily) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MetricFamily) GetHelp() string { + if m != nil && m.Help != nil { + return *m.Help + } + return "" +} + +func (m *MetricFamily) GetType() MetricType { + if m != nil && m.Type != nil { + return *m.Type + } + return MetricType_COUNTER +} + +func (m *MetricFamily) GetMetric() []*Metric { + if m != nil { + return m.Metric + } + return nil +} + +func init() { + proto.RegisterType((*LabelPair)(nil), "io.prometheus.client.LabelPair") + proto.RegisterType((*Gauge)(nil), "io.prometheus.client.Gauge") + proto.RegisterType((*Counter)(nil), "io.prometheus.client.Counter") + proto.RegisterType((*Quantile)(nil), "io.prometheus.client.Quantile") + proto.RegisterType((*Summary)(nil), "io.prometheus.client.Summary") + proto.RegisterType((*Untyped)(nil), "io.prometheus.client.Untyped") + proto.RegisterType((*Histogram)(nil), "io.prometheus.client.Histogram") + proto.RegisterType((*Bucket)(nil), "io.prometheus.client.Bucket") + proto.RegisterType((*Metric)(nil), "io.prometheus.client.Metric") + proto.RegisterType((*MetricFamily)(nil), "io.prometheus.client.MetricFamily") + proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) +} + +func init() { proto.RegisterFile("metrics.proto", fileDescriptor_metrics_c97c9a2b9560cb8f) } + +var fileDescriptor_metrics_c97c9a2b9560cb8f = []byte{ + // 591 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x4f, 0x4f, 0xdb, 0x4e, + 0x14, 0xfc, 0x99, 0xd8, 0x09, 0x7e, 0x86, 0x5f, 0xad, 0x15, 0x07, 0xab, 0x2d, 0x25, 0xcd, 0x89, + 0xf6, 0x10, 0x54, 0x04, 0xaa, 0x44, 0xdb, 0x03, 0x50, 0x1a, 0x2a, 0xd5, 0x40, 0x37, 0xc9, 0x81, + 0x5e, 0xac, 0x8d, 0x59, 0x25, 0x56, 0xbd, 0xb6, 0x6b, 0xef, 0x22, 0xe5, 0xdc, 0x43, 0xbf, 0x47, + 0xbf, 0x68, 0xab, 0xfd, 0xe3, 0x18, 0x24, 0xc3, 0xa9, 0xb7, 0xb7, 0xf3, 0x66, 0xde, 0x8e, 0x77, + 0xc7, 0x0b, 0x9b, 0x8c, 0xf2, 0x32, 0x89, 0xab, 0x61, 0x51, 0xe6, 0x3c, 0x47, 0x5b, 0x49, 0x2e, + 0x2b, 0x46, 0xf9, 0x82, 0x8a, 0x6a, 0x18, 0xa7, 0x09, 0xcd, 0xf8, 0xe0, 0x10, 0xdc, 0x2f, 0x64, + 0x46, 0xd3, 0x2b, 0x92, 0x94, 0x08, 0x81, 0x9d, 0x11, 0x46, 0x03, 0xab, 0x6f, 0xed, 0xba, 0x58, + 0xd5, 0x68, 0x0b, 0x9c, 0x5b, 0x92, 0x0a, 0x1a, 0xac, 0x29, 0x50, 0x2f, 0x06, 0xdb, 0xe0, 0x8c, + 0x88, 0x98, 0xdf, 0x69, 0x4b, 0x8d, 0x55, 0xb7, 0x77, 0xa0, 0x77, 0x9a, 0x8b, 0x8c, 0xd3, 0xf2, + 0x01, 0xc2, 0x7b, 0x58, 0xff, 0x2a, 0x48, 0xc6, 0x93, 0x94, 0xa2, 0xa7, 0xb0, 0xfe, 0xc3, 0xd4, + 0x86, 0xb4, 0x5a, 0xdf, 0xdf, 0x7d, 0xa5, 0xfe, 0x65, 0x41, 0x6f, 0x2c, 0x18, 0x23, 0xe5, 0x12, + 0xbd, 0x84, 0x8d, 0x8a, 0xb0, 0x22, 0xa5, 0x51, 0x2c, 0x77, 0x54, 0x13, 0x6c, 0xec, 0x69, 0x4c, + 0x99, 0x40, 0xdb, 0x00, 0x86, 0x52, 0x09, 0x66, 0x26, 0xb9, 0x1a, 0x19, 0x0b, 0x86, 0x8e, 0xee, + 0xec, 0xdf, 0xe9, 0x77, 0x76, 0xbd, 0xfd, 0x17, 0xc3, 0xb6, 0xb3, 0x1a, 0xd6, 0x8e, 0x1b, 0x7f, + 0xf2, 0x43, 0xa7, 0x19, 0x5f, 0x16, 0xf4, 0xe6, 0x81, 0x0f, 0xfd, 0x69, 0x81, 0x7b, 0x9e, 0x54, + 0x3c, 0x9f, 0x97, 0x84, 0xfd, 0x03, 0xb3, 0x07, 0xd0, 0x9d, 0x89, 0xf8, 0x3b, 0xe5, 0xc6, 0xea, + 0xf3, 0x76, 0xab, 0x27, 0x8a, 0x83, 0x0d, 0x77, 0x30, 0x81, 0xae, 0x46, 0xd0, 0x2b, 0xf0, 0x63, + 0xc1, 0x44, 0x4a, 0x78, 0x72, 0x7b, 0xdf, 0xc5, 0x93, 0x06, 0xd7, 0x4e, 0x76, 0xc0, 0x13, 0x45, + 0x41, 0xcb, 0x68, 0x96, 0x8b, 0xec, 0xc6, 0x58, 0x01, 0x05, 0x9d, 0x48, 0x64, 0xf0, 0x67, 0x0d, + 0xba, 0xa1, 0xca, 0x18, 0x3a, 0x04, 0x27, 0x95, 0x31, 0x0a, 0x2c, 0xe5, 0x6a, 0xa7, 0xdd, 0xd5, + 0x2a, 0x69, 0x58, 0xb3, 0xd1, 0x1b, 0x70, 0xe6, 0x32, 0x46, 0x6a, 0xb8, 0xb7, 0xff, 0xac, 0x5d, + 0xa6, 0x92, 0x86, 0x35, 0x13, 0xbd, 0x85, 0x5e, 0xac, 0xa3, 0x15, 0x74, 0x94, 0x68, 0xbb, 0x5d, + 0x64, 0xf2, 0x87, 0x6b, 0xb6, 0x14, 0x56, 0x3a, 0x33, 0x81, 0xfd, 0x98, 0xd0, 0x04, 0x0b, 0xd7, + 0x6c, 0x29, 0x14, 0xfa, 0x8e, 0x03, 0xe7, 0x31, 0xa1, 0x09, 0x02, 0xae, 0xd9, 0xe8, 0x03, 0xb8, + 0x8b, 0xfa, 0xea, 0x83, 0x9e, 0x92, 0x3e, 0x70, 0x30, 0xab, 0x84, 0xe0, 0x46, 0x21, 0xc3, 0xc2, + 0x13, 0x46, 0x2b, 0x4e, 0x58, 0x11, 0xb1, 0x2a, 0xe8, 0xf6, 0xad, 0xdd, 0x0e, 0xf6, 0x56, 0x58, + 0x58, 0x0d, 0x7e, 0x5b, 0xb0, 0xa1, 0x6f, 0xe0, 0x13, 0x61, 0x49, 0xba, 0x6c, 0xfd, 0x83, 0x11, + 0xd8, 0x0b, 0x9a, 0x16, 0xe6, 0x07, 0x56, 0x35, 0x3a, 0x00, 0x5b, 0x7a, 0x54, 0x47, 0xf8, 0xff, + 0x7e, 0xbf, 0xdd, 0x95, 0x9e, 0x3c, 0x59, 0x16, 0x14, 0x2b, 0xb6, 0x0c, 0x9f, 0x7e, 0x53, 0x02, + 0xfb, 0xb1, 0xf0, 0x69, 0x1d, 0x36, 0xdc, 0xd7, 0x21, 0x40, 0x33, 0x09, 0x79, 0xd0, 0x3b, 0xbd, + 0x9c, 0x5e, 0x4c, 0xce, 0xb0, 0xff, 0x1f, 0x72, 0xc1, 0x19, 0x1d, 0x4f, 0x47, 0x67, 0xbe, 0x25, + 0xf1, 0xf1, 0x34, 0x0c, 0x8f, 0xf1, 0xb5, 0xbf, 0x26, 0x17, 0xd3, 0x8b, 0xc9, 0xf5, 0xd5, 0xd9, + 0x47, 0xbf, 0x83, 0x36, 0xc1, 0x3d, 0xff, 0x3c, 0x9e, 0x5c, 0x8e, 0xf0, 0x71, 0xe8, 0xdb, 0x27, + 0x18, 0x5a, 0x5f, 0xb2, 0x6f, 0x47, 0xf3, 0x84, 0x2f, 0xc4, 0x6c, 0x18, 0xe7, 0x6c, 0xaf, 0xe9, + 0xee, 0xe9, 0x6e, 0xc4, 0xf2, 0x1b, 0x9a, 0xee, 0xcd, 0xf3, 0x77, 0x49, 0x1e, 0x35, 0xdd, 0x48, + 0x77, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0x21, 0x7f, 0x64, 0x2b, 0x05, 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/LICENSE new file mode 100644 index 00000000..261eeb9e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/NOTICE new file mode 100644 index 00000000..636a2c1a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/NOTICE @@ -0,0 +1,5 @@ +Common libraries shared by Prometheus Go components. +Copyright 2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/expfmt/decode.go new file mode 100644 index 00000000..c092723e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/expfmt/decode.go @@ -0,0 +1,429 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "math" + "mime" + "net/http" + + dto "github.com/prometheus/client_model/go" + + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/model" +) + +// Decoder types decode an input stream into metric families. +type Decoder interface { + Decode(*dto.MetricFamily) error +} + +// DecodeOptions contains options used by the Decoder and in sample extraction. +type DecodeOptions struct { + // Timestamp is added to each value from the stream that has no explicit timestamp set. + Timestamp model.Time +} + +// ResponseFormat extracts the correct format from a HTTP response header. +// If no matching format can be found FormatUnknown is returned. +func ResponseFormat(h http.Header) Format { + ct := h.Get(hdrContentType) + + mediatype, params, err := mime.ParseMediaType(ct) + if err != nil { + return FmtUnknown + } + + const textType = "text/plain" + + switch mediatype { + case ProtoType: + if p, ok := params["proto"]; ok && p != ProtoProtocol { + return FmtUnknown + } + if e, ok := params["encoding"]; ok && e != "delimited" { + return FmtUnknown + } + return FmtProtoDelim + + case textType: + if v, ok := params["version"]; ok && v != TextVersion { + return FmtUnknown + } + return FmtText + } + + return FmtUnknown +} + +// NewDecoder returns a new decoder based on the given input format. +// If the input format does not imply otherwise, a text format decoder is returned. +func NewDecoder(r io.Reader, format Format) Decoder { + switch format { + case FmtProtoDelim: + return &protoDecoder{r: r} + } + return &textDecoder{r: r} +} + +// protoDecoder implements the Decoder interface for protocol buffers. +type protoDecoder struct { + r io.Reader +} + +// Decode implements the Decoder interface. +func (d *protoDecoder) Decode(v *dto.MetricFamily) error { + _, err := pbutil.ReadDelimited(d.r, v) + if err != nil { + return err + } + if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + return fmt.Errorf("invalid metric name %q", v.GetName()) + } + for _, m := range v.GetMetric() { + if m == nil { + continue + } + for _, l := range m.GetLabel() { + if l == nil { + continue + } + if !model.LabelValue(l.GetValue()).IsValid() { + return fmt.Errorf("invalid label value %q", l.GetValue()) + } + if !model.LabelName(l.GetName()).IsValid() { + return fmt.Errorf("invalid label name %q", l.GetName()) + } + } + } + return nil +} + +// textDecoder implements the Decoder interface for the text protocol. +type textDecoder struct { + r io.Reader + p TextParser + fams []*dto.MetricFamily +} + +// Decode implements the Decoder interface. +func (d *textDecoder) Decode(v *dto.MetricFamily) error { + // TODO(fabxc): Wrap this as a line reader to make streaming safer. + if len(d.fams) == 0 { + // No cached metric families, read everything and parse metrics. + fams, err := d.p.TextToMetricFamilies(d.r) + if err != nil { + return err + } + if len(fams) == 0 { + return io.EOF + } + d.fams = make([]*dto.MetricFamily, 0, len(fams)) + for _, f := range fams { + d.fams = append(d.fams, f) + } + } + + *v = *d.fams[0] + d.fams = d.fams[1:] + + return nil +} + +// SampleDecoder wraps a Decoder to extract samples from the metric families +// decoded by the wrapped Decoder. +type SampleDecoder struct { + Dec Decoder + Opts *DecodeOptions + + f dto.MetricFamily +} + +// Decode calls the Decode method of the wrapped Decoder and then extracts the +// samples from the decoded MetricFamily into the provided model.Vector. +func (sd *SampleDecoder) Decode(s *model.Vector) error { + err := sd.Dec.Decode(&sd.f) + if err != nil { + return err + } + *s, err = extractSamples(&sd.f, sd.Opts) + return err +} + +// ExtractSamples builds a slice of samples from the provided metric +// families. If an error occurrs during sample extraction, it continues to +// extract from the remaining metric families. The returned error is the last +// error that has occurred. +func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { + var ( + all model.Vector + lastErr error + ) + for _, f := range fams { + some, err := extractSamples(f, o) + if err != nil { + lastErr = err + continue + } + all = append(all, some...) + } + return all, lastErr +} + +func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { + switch f.GetType() { + case dto.MetricType_COUNTER: + return extractCounter(o, f), nil + case dto.MetricType_GAUGE: + return extractGauge(o, f), nil + case dto.MetricType_SUMMARY: + return extractSummary(o, f), nil + case dto.MetricType_UNTYPED: + return extractUntyped(o, f), nil + case dto.MetricType_HISTOGRAM: + return extractHistogram(o, f), nil + } + return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) +} + +func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Counter == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Counter.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Gauge == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Gauge.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Untyped == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Untyped.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Summary == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + for _, q := range m.Summary.Quantile { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + // BUG(matt): Update other names to "quantile". + lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetValue()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleCount()), + Timestamp: timestamp, + }) + } + + return samples +} + +func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Histogram == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + infSeen := false + + for _, q := range m.Histogram.Bucket { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetCumulativeCount()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + count := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleCount()), + Timestamp: timestamp, + } + samples = append(samples, count) + + if !infSeen { + // Append an infinity bucket sample. + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: count.Value, + Timestamp: timestamp, + }) + } + } + + return samples +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/expfmt/encode.go new file mode 100644 index 00000000..11839ed6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/expfmt/encode.go @@ -0,0 +1,88 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + + dto "github.com/prometheus/client_model/go" +) + +// Encoder types encode metric families into an underlying wire protocol. +type Encoder interface { + Encode(*dto.MetricFamily) error +} + +type encoder func(*dto.MetricFamily) error + +func (e encoder) Encode(v *dto.MetricFamily) error { + return e(v) +} + +// Negotiate returns the Content-Type based on the given Accept header. +// If no appropriate accepted type is found, FmtText is returned. +func Negotiate(h http.Header) Format { + for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + // Check for protocol buffer + if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { + switch ac.Params["encoding"] { + case "delimited": + return FmtProtoDelim + case "text": + return FmtProtoText + case "compact-text": + return FmtProtoCompact + } + } + // Check for text format. + ver := ac.Params["version"] + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return FmtText + } + } + return FmtText +} + +// NewEncoder returns a new encoder based on content type negotiation. +func NewEncoder(w io.Writer, format Format) Encoder { + switch format { + case FmtProtoDelim: + return encoder(func(v *dto.MetricFamily) error { + _, err := pbutil.WriteDelimited(w, v) + return err + }) + case FmtProtoCompact: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, v.String()) + return err + }) + case FmtProtoText: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + return err + }) + case FmtText: + return encoder(func(v *dto.MetricFamily) error { + _, err := MetricFamilyToText(w, v) + return err + }) + } + panic("expfmt.NewEncoder: unknown format") +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/expfmt/expfmt.go new file mode 100644 index 00000000..c71bcb98 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -0,0 +1,38 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package expfmt contains tools for reading and writing Prometheus metrics. +package expfmt + +// Format specifies the HTTP content type of the different wire protocols. +type Format string + +// Constants to assemble the Content-Type values for the different wire protocols. +const ( + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + + // The Content-Type values for the different wire protocols. + FmtUnknown Format = `` + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + FmtProtoText Format = ProtoFmt + ` encoding=text` + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` +) + +const ( + hdrContentType = "Content-Type" + hdrAccept = "Accept" +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/expfmt/fuzz.go new file mode 100644 index 00000000..dc2eedee --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -0,0 +1,36 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Build only when actually fuzzing +// +build gofuzz + +package expfmt + +import "bytes" + +// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: +// +// go-fuzz-build github.com/prometheus/common/expfmt +// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz +// +// Further input samples should go in the folder fuzz/corpus. +func Fuzz(in []byte) int { + parser := TextParser{} + _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) + + if err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/expfmt/text_create.go new file mode 100644 index 00000000..8e473d0f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -0,0 +1,468 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + "sync" + + "github.com/prometheus/common/model" + + dto "github.com/prometheus/client_model/go" +) + +// enhancedWriter has all the enhanced write functions needed here. bytes.Buffer +// implements it. +type enhancedWriter interface { + io.Writer + WriteRune(r rune) (n int, err error) + WriteString(s string) (n int, err error) + WriteByte(c byte) error +} + +const ( + initialBufSize = 512 + initialNumBufSize = 24 +) + +var ( + bufPool = sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(make([]byte, 0, initialBufSize)) + }, + } + numBufPool = sync.Pool{ + New: func() interface{} { + b := make([]byte, 0, initialNumBufSize) + return &b + }, + } +) + +// MetricFamilyToText converts a MetricFamily proto message into text format and +// writes the resulting lines to 'out'. It returns the number of bytes written +// and any error encountered. The output will have the same order as the input, +// no further sorting is performed. Furthermore, this function assumes the input +// is already sanitized and does not perform any sanity checks. If the input +// contains duplicate metrics or invalid metric or label names, the conversion +// will result in invalid text format output. +// +// This method fulfills the type 'prometheus.encoder'. +func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (written int, err error) { + // Fail-fast checks. + if len(in.Metric) == 0 { + return 0, fmt.Errorf("MetricFamily has no metrics: %s", in) + } + name := in.GetName() + if name == "" { + return 0, fmt.Errorf("MetricFamily has no name: %s", in) + } + + // Try the interface upgrade. If it doesn't work, we'll use a + // bytes.Buffer from the sync.Pool and write out its content to out in a + // single go in the end. + w, ok := out.(enhancedWriter) + if !ok { + b := bufPool.Get().(*bytes.Buffer) + b.Reset() + w = b + defer func() { + bWritten, bErr := out.Write(b.Bytes()) + written = bWritten + if err == nil { + err = bErr + } + bufPool.Put(b) + }() + } + + var n int + + // Comments, first HELP, then TYPE. + if in.Help != nil { + n, err = w.WriteString("# HELP ") + written += n + if err != nil { + return + } + n, err = w.WriteString(name) + written += n + if err != nil { + return + } + err = w.WriteByte(' ') + written++ + if err != nil { + return + } + n, err = writeEscapedString(w, *in.Help, false) + written += n + if err != nil { + return + } + err = w.WriteByte('\n') + written++ + if err != nil { + return + } + } + n, err = w.WriteString("# TYPE ") + written += n + if err != nil { + return + } + n, err = w.WriteString(name) + written += n + if err != nil { + return + } + metricType := in.GetType() + switch metricType { + case dto.MetricType_COUNTER: + n, err = w.WriteString(" counter\n") + case dto.MetricType_GAUGE: + n, err = w.WriteString(" gauge\n") + case dto.MetricType_SUMMARY: + n, err = w.WriteString(" summary\n") + case dto.MetricType_UNTYPED: + n, err = w.WriteString(" untyped\n") + case dto.MetricType_HISTOGRAM: + n, err = w.WriteString(" histogram\n") + default: + return written, fmt.Errorf("unknown metric type %s", metricType.String()) + } + written += n + if err != nil { + return + } + + // Finally the samples, one line for each. + for _, metric := range in.Metric { + switch metricType { + case dto.MetricType_COUNTER: + if metric.Counter == nil { + return written, fmt.Errorf( + "expected counter in metric %s %s", name, metric, + ) + } + n, err = writeSample( + w, name, "", metric, "", 0, + metric.Counter.GetValue(), + ) + case dto.MetricType_GAUGE: + if metric.Gauge == nil { + return written, fmt.Errorf( + "expected gauge in metric %s %s", name, metric, + ) + } + n, err = writeSample( + w, name, "", metric, "", 0, + metric.Gauge.GetValue(), + ) + case dto.MetricType_UNTYPED: + if metric.Untyped == nil { + return written, fmt.Errorf( + "expected untyped in metric %s %s", name, metric, + ) + } + n, err = writeSample( + w, name, "", metric, "", 0, + metric.Untyped.GetValue(), + ) + case dto.MetricType_SUMMARY: + if metric.Summary == nil { + return written, fmt.Errorf( + "expected summary in metric %s %s", name, metric, + ) + } + for _, q := range metric.Summary.Quantile { + n, err = writeSample( + w, name, "", metric, + model.QuantileLabel, q.GetQuantile(), + q.GetValue(), + ) + written += n + if err != nil { + return + } + } + n, err = writeSample( + w, name, "_sum", metric, "", 0, + metric.Summary.GetSampleSum(), + ) + written += n + if err != nil { + return + } + n, err = writeSample( + w, name, "_count", metric, "", 0, + float64(metric.Summary.GetSampleCount()), + ) + case dto.MetricType_HISTOGRAM: + if metric.Histogram == nil { + return written, fmt.Errorf( + "expected histogram in metric %s %s", name, metric, + ) + } + infSeen := false + for _, b := range metric.Histogram.Bucket { + n, err = writeSample( + w, name, "_bucket", metric, + model.BucketLabel, b.GetUpperBound(), + float64(b.GetCumulativeCount()), + ) + written += n + if err != nil { + return + } + if math.IsInf(b.GetUpperBound(), +1) { + infSeen = true + } + } + if !infSeen { + n, err = writeSample( + w, name, "_bucket", metric, + model.BucketLabel, math.Inf(+1), + float64(metric.Histogram.GetSampleCount()), + ) + written += n + if err != nil { + return + } + } + n, err = writeSample( + w, name, "_sum", metric, "", 0, + metric.Histogram.GetSampleSum(), + ) + written += n + if err != nil { + return + } + n, err = writeSample( + w, name, "_count", metric, "", 0, + float64(metric.Histogram.GetSampleCount()), + ) + default: + return written, fmt.Errorf( + "unexpected type in metric %s %s", name, metric, + ) + } + written += n + if err != nil { + return + } + } + return +} + +// writeSample writes a single sample in text format to w, given the metric +// name, the metric proto message itself, optionally an additional label name +// with a float64 value (use empty string as label name if not required), and +// the value. The function returns the number of bytes written and any error +// encountered. +func writeSample( + w enhancedWriter, + name, suffix string, + metric *dto.Metric, + additionalLabelName string, additionalLabelValue float64, + value float64, +) (int, error) { + var written int + n, err := w.WriteString(name) + written += n + if err != nil { + return written, err + } + if suffix != "" { + n, err = w.WriteString(suffix) + written += n + if err != nil { + return written, err + } + } + n, err = writeLabelPairs( + w, metric.Label, additionalLabelName, additionalLabelValue, + ) + written += n + if err != nil { + return written, err + } + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeFloat(w, value) + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + n, err = writeInt(w, *metric.TimestampMs) + written += n + if err != nil { + return written, err + } + } + err = w.WriteByte('\n') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeLabelPairs converts a slice of LabelPair proto messages plus the +// explicitly given additional label pair into text formatted as required by the +// text format and writes it to 'w'. An empty slice in combination with an empty +// string 'additionalLabelName' results in nothing being written. Otherwise, the +// label pairs are written, escaped as required by the text format, and enclosed +// in '{...}'. The function returns the number of bytes written and any error +// encountered. +func writeLabelPairs( + w enhancedWriter, + in []*dto.LabelPair, + additionalLabelName string, additionalLabelValue float64, +) (int, error) { + if len(in) == 0 && additionalLabelName == "" { + return 0, nil + } + var ( + written int + separator byte = '{' + ) + for _, lp := range in { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(lp.GetName()) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeEscapedString(w, lp.GetValue(), true) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + separator = ',' + } + if additionalLabelName != "" { + err := w.WriteByte(separator) + written++ + if err != nil { + return written, err + } + n, err := w.WriteString(additionalLabelName) + written += n + if err != nil { + return written, err + } + n, err = w.WriteString(`="`) + written += n + if err != nil { + return written, err + } + n, err = writeFloat(w, additionalLabelValue) + written += n + if err != nil { + return written, err + } + err = w.WriteByte('"') + written++ + if err != nil { + return written, err + } + } + err := w.WriteByte('}') + written++ + if err != nil { + return written, err + } + return written, nil +} + +// writeEscapedString replaces '\' by '\\', new line character by '\n', and - if +// includeDoubleQuote is true - '"' by '\"'. +var ( + escaper = strings.NewReplacer("\\", `\\`, "\n", `\n`) + quotedEscaper = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) +) + +func writeEscapedString(w enhancedWriter, v string, includeDoubleQuote bool) (int, error) { + if includeDoubleQuote { + return quotedEscaper.WriteString(w, v) + } else { + return escaper.WriteString(w, v) + } +} + +// writeFloat is equivalent to fmt.Fprint with a float64 argument but hardcodes +// a few common cases for increased efficiency. For non-hardcoded cases, it uses +// strconv.AppendFloat to avoid allocations, similar to writeInt. +func writeFloat(w enhancedWriter, f float64) (int, error) { + switch { + case f == 1: + return 1, w.WriteByte('1') + case f == 0: + return 1, w.WriteByte('0') + case f == -1: + return w.WriteString("-1") + case math.IsNaN(f): + return w.WriteString("NaN") + case math.IsInf(f, +1): + return w.WriteString("+Inf") + case math.IsInf(f, -1): + return w.WriteString("-Inf") + default: + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendFloat((*bp)[:0], f, 'g', -1, 64) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err + } +} + +// writeInt is equivalent to fmt.Fprint with an int64 argument but uses +// strconv.AppendInt with a byte slice taken from a sync.Pool to avoid +// allocations. +func writeInt(w enhancedWriter, i int64) (int, error) { + bp := numBufPool.Get().(*[]byte) + *bp = strconv.AppendInt((*bp)[:0], i, 10) + written, err := w.Write(*bp) + numBufPool.Put(bp) + return written, err +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/expfmt/text_parse.go new file mode 100644 index 00000000..ec3d86ba --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -0,0 +1,757 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" +) + +// A stateFn is a function that represents a state in a state machine. By +// executing it, the state is progressed to the next state. The stateFn returns +// another stateFn, which represents the new state. The end state is represented +// by nil. +type stateFn func() stateFn + +// ParseError signals errors while parsing the simple and flat text-based +// exchange format. +type ParseError struct { + Line int + Msg string +} + +// Error implements the error interface. +func (e ParseError) Error() string { + return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) +} + +// TextParser is used to parse the simple and flat text-based exchange format. Its +// zero value is ready to use. +type TextParser struct { + metricFamiliesByName map[string]*dto.MetricFamily + buf *bufio.Reader // Where the parsed input is read through. + err error // Most recent error. + lineCount int // Tracks the line count for error messages. + currentByte byte // The most recent byte read. + currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. + currentMF *dto.MetricFamily + currentMetric *dto.Metric + currentLabelPair *dto.LabelPair + + // The remaining member variables are only used for summaries/histograms. + currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' + // Summary specific. + summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentQuantile float64 + // Histogram specific. + histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentBucket float64 + // These tell us if the currently processed line ends on '_count' or + // '_sum' respectively and belong to a summary/histogram, representing the sample + // count and sum of that summary/histogram. + currentIsSummaryCount, currentIsSummarySum bool + currentIsHistogramCount, currentIsHistogramSum bool +} + +// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange +// format and creates MetricFamily proto messages. It returns the MetricFamily +// proto messages in a map where the metric names are the keys, along with any +// error encountered. +// +// If the input contains duplicate metrics (i.e. lines with the same metric name +// and exactly the same label set), the resulting MetricFamily will contain +// duplicate Metric proto messages. Similar is true for duplicate label +// names. Checks for duplicates have to be performed separately, if required. +// Also note that neither the metrics within each MetricFamily are sorted nor +// the label pairs within each Metric. Sorting is not required for the most +// frequent use of this method, which is sample ingestion in the Prometheus +// server. However, for presentation purposes, you might want to sort the +// metrics, and in some cases, you must sort the labels, e.g. for consumption by +// the metric family injection hook of the Prometheus registry. +// +// Summaries and histograms are rather special beasts. You would probably not +// use them in the simple text format anyway. This method can deal with +// summaries and histograms if they are presented in exactly the way the +// text.Create function creates them. +// +// This method must not be called concurrently. If you want to parse different +// input concurrently, instantiate a separate Parser for each goroutine. +func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { + p.reset(in) + for nextState := p.startOfLine; nextState != nil; nextState = nextState() { + // Magic happens here... + } + // Get rid of empty metric families. + for k, mf := range p.metricFamiliesByName { + if len(mf.GetMetric()) == 0 { + delete(p.metricFamiliesByName, k) + } + } + // If p.err is io.EOF now, we have run into a premature end of the input + // stream. Turn this error into something nicer and more + // meaningful. (io.EOF is often used as a signal for the legitimate end + // of an input stream.) + if p.err == io.EOF { + p.parseError("unexpected end of input stream") + } + return p.metricFamiliesByName, p.err +} + +func (p *TextParser) reset(in io.Reader) { + p.metricFamiliesByName = map[string]*dto.MetricFamily{} + if p.buf == nil { + p.buf = bufio.NewReader(in) + } else { + p.buf.Reset(in) + } + p.err = nil + p.lineCount = 0 + if p.summaries == nil || len(p.summaries) > 0 { + p.summaries = map[uint64]*dto.Metric{} + } + if p.histograms == nil || len(p.histograms) > 0 { + p.histograms = map[uint64]*dto.Metric{} + } + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() +} + +// startOfLine represents the state where the next byte read from p.buf is the +// start of a line (or whitespace leading up to it). +func (p *TextParser) startOfLine() stateFn { + p.lineCount++ + if p.skipBlankTab(); p.err != nil { + // End of input reached. This is the only case where + // that is not an error but a signal that we are done. + p.err = nil + return nil + } + switch p.currentByte { + case '#': + return p.startComment + case '\n': + return p.startOfLine // Empty line, start the next one. + } + return p.readingMetricName +} + +// startComment represents the state where the next byte read from p.buf is the +// start of a comment (or whitespace leading up to it). +func (p *TextParser) startComment() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + return p.startOfLine + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + // If we have hit the end of line already, there is nothing left + // to do. This is not considered a syntax error. + if p.currentByte == '\n' { + return p.startOfLine + } + keyword := p.currentToken.String() + if keyword != "HELP" && keyword != "TYPE" { + // Generic comment, ignore by fast forwarding to end of line. + for p.currentByte != '\n' { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return nil // Unexpected end of input. + } + } + return p.startOfLine + } + // There is something. Next has to be a metric name. + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenAsMetricName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + if !isBlankOrTab(p.currentByte) { + p.parseError("invalid metric name in comment") + return nil + } + p.setOrCreateCurrentMF() + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + switch keyword { + case "HELP": + return p.readingHelp + case "TYPE": + return p.readingType + } + panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) +} + +// readingMetricName represents the state where the last byte read (now in +// p.currentByte) is the first byte of a metric name. +func (p *TextParser) readingMetricName() stateFn { + if p.readTokenAsMetricName(); p.err != nil { + return nil + } + if p.currentToken.Len() == 0 { + p.parseError("invalid metric name") + return nil + } + p.setOrCreateCurrentMF() + // Now is the time to fix the type if it hasn't happened yet. + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + // Do not append the newly created currentMetric to + // currentMF.Metric right now. First wait if this is a summary, + // and the metric exists already, which we can only know after + // having read all the labels. + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingLabels +} + +// readingLabels represents the state where the last byte read (now in +// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the +// first byte of the value (otherwise). +func (p *TextParser) readingLabels() stateFn { + // Summaries/histograms are special. We have to reset the + // currentLabels map, currentQuantile and currentBucket before starting to + // read labels. + if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + p.currentLabels = map[string]string{} + p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() + } + if p.currentByte != '{' { + return p.readingValue + } + return p.startLabelName +} + +// startLabelName represents the state where the next byte read from p.buf is +// the start of a label name (or whitespace leading up to it). +func (p *TextParser) startLabelName() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '}' { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + } + if p.readTokenAsLabelName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() == 0 { + p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) + return nil + } + p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} + if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { + p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + return nil + } + // Special summary/histogram treatment. Don't add 'quantile' and 'le' + // labels to 'real' labels. + if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && + !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) + } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + return nil + } + return p.startLabelValue +} + +// startLabelValue represents the state where the next byte read from p.buf is +// the start of a (quoted) label value (or whitespace leading up to it). +func (p *TextParser) startLabelValue() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '"' { + p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) + return nil + } + if p.readTokenAsLabelValue(); p.err != nil { + return nil + } + if !model.LabelValue(p.currentToken.String()).IsValid() { + p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String())) + return nil + } + p.currentLabelPair.Value = proto.String(p.currentToken.String()) + // Special treatment of summaries: + // - Quantile labels are special, will result in dto.Quantile later. + // - Other labels have to be added to currentLabels for signature calculation. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if p.currentLabelPair.GetName() == model.QuantileLabel { + if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + // Similar special treatment of histograms. + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentLabelPair.GetName() == model.BucketLabel { + if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + switch p.currentByte { + case ',': + return p.startLabelName + + case '}': + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) + return nil + } +} + +// readingValue represents the state where the last byte read (now in +// p.currentByte) is the first byte of the sample value (i.e. a float). +func (p *TextParser) readingValue() stateFn { + // When we are here, we have read all the labels, so for the + // special case of a summary/histogram, we can finally find out + // if the metric already exists. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + signature := model.LabelsToSignature(p.currentLabels) + if summary := p.summaries[signature]; summary != nil { + p.currentMetric = summary + } else { + p.summaries[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + signature := model.LabelsToSignature(p.currentLabels) + if histogram := p.histograms[signature]; histogram != nil { + p.currentMetric = histogram + } else { + p.histograms[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else { + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + value, err := strconv.ParseFloat(p.currentToken.String(), 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) + return nil + } + switch p.currentMF.GetType() { + case dto.MetricType_COUNTER: + p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} + case dto.MetricType_GAUGE: + p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} + case dto.MetricType_UNTYPED: + p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} + case dto.MetricType_SUMMARY: + // *sigh* + if p.currentMetric.Summary == nil { + p.currentMetric.Summary = &dto.Summary{} + } + switch { + case p.currentIsSummaryCount: + p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsSummarySum: + p.currentMetric.Summary.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentQuantile): + p.currentMetric.Summary.Quantile = append( + p.currentMetric.Summary.Quantile, + &dto.Quantile{ + Quantile: proto.Float64(p.currentQuantile), + Value: proto.Float64(value), + }, + ) + } + case dto.MetricType_HISTOGRAM: + // *sigh* + if p.currentMetric.Histogram == nil { + p.currentMetric.Histogram = &dto.Histogram{} + } + switch { + case p.currentIsHistogramCount: + p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsHistogramSum: + p.currentMetric.Histogram.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentBucket): + p.currentMetric.Histogram.Bucket = append( + p.currentMetric.Histogram.Bucket, + &dto.Bucket{ + UpperBound: proto.Float64(p.currentBucket), + CumulativeCount: proto.Uint64(uint64(value)), + }, + ) + } + default: + p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) + } + if p.currentByte == '\n' { + return p.startOfLine + } + return p.startTimestamp +} + +// startTimestamp represents the state where the next byte read from p.buf is +// the start of the timestamp (or whitespace leading up to it). +func (p *TextParser) startTimestamp() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) + return nil + } + p.currentMetric.TimestampMs = proto.Int64(timestamp) + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() > 0 { + p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) + return nil + } + return p.startOfLine +} + +// readingHelp represents the state where the last byte read (now in +// p.currentByte) is the first byte of the docstring after 'HELP'. +func (p *TextParser) readingHelp() stateFn { + if p.currentMF.Help != nil { + p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) + return nil + } + // Rest of line is the docstring. + if p.readTokenUntilNewline(true); p.err != nil { + return nil // Unexpected end of input. + } + p.currentMF.Help = proto.String(p.currentToken.String()) + return p.startOfLine +} + +// readingType represents the state where the last byte read (now in +// p.currentByte) is the first byte of the type hint after 'HELP'. +func (p *TextParser) readingType() stateFn { + if p.currentMF.Type != nil { + p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) + return nil + } + // Rest of line is the type. + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] + if !ok { + p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) + return nil + } + p.currentMF.Type = dto.MetricType(metricType).Enum() + return p.startOfLine +} + +// parseError sets p.err to a ParseError at the current line with the given +// message. +func (p *TextParser) parseError(msg string) { + p.err = ParseError{ + Line: p.lineCount, + Msg: msg, + } +} + +// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte +// that is neither ' ' nor '\t'. That byte is left in p.currentByte. +func (p *TextParser) skipBlankTab() { + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { + return + } + } +} + +// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do +// anything if p.currentByte is neither ' ' nor '\t'. +func (p *TextParser) skipBlankTabIfCurrentBlankTab() { + if isBlankOrTab(p.currentByte) { + p.skipBlankTab() + } +} + +// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The +// first byte considered is the byte already read (now in p.currentByte). The +// first whitespace byte encountered is still copied into p.currentByte, but not +// into p.currentToken. +func (p *TextParser) readTokenUntilWhitespace() { + p.currentToken.Reset() + for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first +// byte considered is the byte already read (now in p.currentByte). The first +// newline byte encountered is still copied into p.currentByte, but not into +// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are +// recognized: '\\' translates into '\', and '\n' into a line-feed character. +// All other escape sequences are invalid and cause an error. +func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { + p.currentToken.Reset() + escaped := false + for p.err == nil { + if recognizeEscapeSequence && escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '\n': + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a metric name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsMetricName() { + p.currentToken.Reset() + if !isValidMetricNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelName copies a label name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a label name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsLabelName() { + p.currentToken.Reset() + if !isValidLabelNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. +// In contrast to the other 'readTokenAs...' functions, which start with the +// last read byte in p.currentByte, this method ignores p.currentByte and starts +// with reading a new byte from p.buf. The first byte not part of a label value +// is still copied into p.currentByte, but not into p.currentToken. +func (p *TextParser) readTokenAsLabelValue() { + p.currentToken.Reset() + escaped := false + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return + } + if escaped { + switch p.currentByte { + case '"', '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + continue + } + switch p.currentByte { + case '"': + return + case '\n': + p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } +} + +func (p *TextParser) setOrCreateCurrentMF() { + p.currentIsSummaryCount = false + p.currentIsSummarySum = false + p.currentIsHistogramCount = false + p.currentIsHistogramSum = false + name := p.currentToken.String() + if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { + return + } + // Try out if this is a _sum or _count for a summary/histogram. + summaryName := summaryMetricName(name) + if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if isCount(name) { + p.currentIsSummaryCount = true + } + if isSum(name) { + p.currentIsSummarySum = true + } + return + } + } + histogramName := histogramMetricName(name) + if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if isCount(name) { + p.currentIsHistogramCount = true + } + if isSum(name) { + p.currentIsHistogramSum = true + } + return + } + } + p.currentMF = &dto.MetricFamily{Name: proto.String(name)} + p.metricFamiliesByName[name] = p.currentMF +} + +func isValidLabelNameStart(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' +} + +func isValidLabelNameContinuation(b byte) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +} + +func isValidMetricNameStart(b byte) bool { + return isValidLabelNameStart(b) || b == ':' +} + +func isValidMetricNameContinuation(b byte) bool { + return isValidLabelNameContinuation(b) || b == ':' +} + +func isBlankOrTab(b byte) bool { + return b == ' ' || b == '\t' +} + +func isCount(name string) bool { + return len(name) > 6 && name[len(name)-6:] == "_count" +} + +func isSum(name string) bool { + return len(name) > 4 && name[len(name)-4:] == "_sum" +} + +func isBucket(name string) bool { + return len(name) > 7 && name[len(name)-7:] == "_bucket" +} + +func summaryMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + default: + return name + } +} + +func histogramMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + case isBucket(name): + return name[:len(name)-7] + default: + return name + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt new file mode 100644 index 00000000..7723656d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt @@ -0,0 +1,67 @@ +PACKAGE + +package goautoneg +import "bitbucket.org/ww/goautoneg" + +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +FUNCTIONS + +func Negotiate(header string, alternatives []string) (content_type string) +Negotiate the most appropriate content_type given the accept header +and a list of alternatives. + +func ParseAccept(header string) (accept []Accept) +Parse an Accept Header string returning a sorted list +of clauses + + +TYPES + +type Accept struct { + Type, SubType string + Q float32 + Params map[string]string +} +Structure to represent a clause in an HTTP Accept Header + + +SUBDIRECTORIES + + .hg diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go new file mode 100644 index 00000000..26e92288 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -0,0 +1,162 @@ +/* +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +*/ +package goautoneg + +import ( + "sort" + "strconv" + "strings" +) + +// Structure to represent a clause in an HTTP Accept Header +type Accept struct { + Type, SubType string + Q float64 + Params map[string]string +} + +// For internal use, so that we can use the sort interface +type accept_slice []Accept + +func (accept accept_slice) Len() int { + slice := []Accept(accept) + return len(slice) +} + +func (accept accept_slice) Less(i, j int) bool { + slice := []Accept(accept) + ai, aj := slice[i], slice[j] + if ai.Q > aj.Q { + return true + } + if ai.Type != "*" && aj.Type == "*" { + return true + } + if ai.SubType != "*" && aj.SubType == "*" { + return true + } + return false +} + +func (accept accept_slice) Swap(i, j int) { + slice := []Accept(accept) + slice[i], slice[j] = slice[j], slice[i] +} + +// Parse an Accept Header string returning a sorted list +// of clauses +func ParseAccept(header string) (accept []Accept) { + parts := strings.Split(header, ",") + accept = make([]Accept, 0, len(parts)) + for _, part := range parts { + part := strings.Trim(part, " ") + + a := Accept{} + a.Params = make(map[string]string) + a.Q = 1.0 + + mrp := strings.Split(part, ";") + + media_range := mrp[0] + sp := strings.Split(media_range, "/") + a.Type = strings.Trim(sp[0], " ") + + switch { + case len(sp) == 1 && a.Type == "*": + a.SubType = "*" + case len(sp) == 2: + a.SubType = strings.Trim(sp[1], " ") + default: + continue + } + + if len(mrp) == 1 { + accept = append(accept, a) + continue + } + + for _, param := range mrp[1:] { + sp := strings.SplitN(param, "=", 2) + if len(sp) != 2 { + continue + } + token := strings.Trim(sp[0], " ") + if token == "q" { + a.Q, _ = strconv.ParseFloat(sp[1], 32) + } else { + a.Params[token] = strings.Trim(sp[1], " ") + } + } + + accept = append(accept, a) + } + + slice := accept_slice(accept) + sort.Sort(slice) + + return +} + +// Negotiate the most appropriate content_type given the accept header +// and a list of alternatives. +func Negotiate(header string, alternatives []string) (content_type string) { + asp := make([][]string, 0, len(alternatives)) + for _, ctype := range alternatives { + asp = append(asp, strings.SplitN(ctype, "/", 2)) + } + for _, clause := range ParseAccept(header) { + for i, ctsp := range asp { + if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { + content_type = alternatives[i] + return + } + if clause.Type == ctsp[0] && clause.SubType == "*" { + content_type = alternatives[i] + return + } + if clause.Type == "*" && clause.SubType == "*" { + content_type = alternatives[i] + return + } + } + } + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/alert.go new file mode 100644 index 00000000..35e739c7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/alert.go @@ -0,0 +1,136 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "time" +) + +type AlertStatus string + +const ( + AlertFiring AlertStatus = "firing" + AlertResolved AlertStatus = "resolved" +) + +// Alert is a generic representation of an alert in the Prometheus eco-system. +type Alert struct { + // Label value pairs for purpose of aggregation, matching, and disposition + // dispatching. This must minimally include an "alertname" label. + Labels LabelSet `json:"labels"` + + // Extra key/value information which does not define alert identity. + Annotations LabelSet `json:"annotations"` + + // The known time range for this alert. Both ends are optional. + StartsAt time.Time `json:"startsAt,omitempty"` + EndsAt time.Time `json:"endsAt,omitempty"` + GeneratorURL string `json:"generatorURL"` +} + +// Name returns the name of the alert. It is equivalent to the "alertname" label. +func (a *Alert) Name() string { + return string(a.Labels[AlertNameLabel]) +} + +// Fingerprint returns a unique hash for the alert. It is equivalent to +// the fingerprint of the alert's label set. +func (a *Alert) Fingerprint() Fingerprint { + return a.Labels.Fingerprint() +} + +func (a *Alert) String() string { + s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) + if a.Resolved() { + return s + "[resolved]" + } + return s + "[active]" +} + +// Resolved returns true iff the activity interval ended in the past. +func (a *Alert) Resolved() bool { + return a.ResolvedAt(time.Now()) +} + +// ResolvedAt returns true off the activity interval ended before +// the given timestamp. +func (a *Alert) ResolvedAt(ts time.Time) bool { + if a.EndsAt.IsZero() { + return false + } + return !a.EndsAt.After(ts) +} + +// Status returns the status of the alert. +func (a *Alert) Status() AlertStatus { + if a.Resolved() { + return AlertResolved + } + return AlertFiring +} + +// Validate checks whether the alert data is inconsistent. +func (a *Alert) Validate() error { + if a.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if err := a.Labels.Validate(); err != nil { + return fmt.Errorf("invalid label set: %s", err) + } + if len(a.Labels) == 0 { + return fmt.Errorf("at least one label pair required") + } + if err := a.Annotations.Validate(); err != nil { + return fmt.Errorf("invalid annotations: %s", err) + } + return nil +} + +// Alert is a list of alerts that can be sorted in chronological order. +type Alerts []*Alert + +func (as Alerts) Len() int { return len(as) } +func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } + +func (as Alerts) Less(i, j int) bool { + if as[i].StartsAt.Before(as[j].StartsAt) { + return true + } + if as[i].EndsAt.Before(as[j].EndsAt) { + return true + } + return as[i].Fingerprint() < as[j].Fingerprint() +} + +// HasFiring returns true iff one of the alerts is not resolved. +func (as Alerts) HasFiring() bool { + for _, a := range as { + if !a.Resolved() { + return true + } + } + return false +} + +// Status returns StatusFiring iff at least one of the alerts is firing. +func (as Alerts) Status() AlertStatus { + if as.HasFiring() { + return AlertFiring + } + return AlertResolved +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/fingerprinting.go new file mode 100644 index 00000000..fc4de410 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/fingerprinting.go @@ -0,0 +1,105 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" +) + +// Fingerprint provides a hash-capable representation of a Metric. +// For our purposes, FNV-1A 64-bit is used. +type Fingerprint uint64 + +// FingerprintFromString transforms a string representation into a Fingerprint. +func FingerprintFromString(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + return Fingerprint(num), err +} + +// ParseFingerprint parses the input string into a fingerprint. +func ParseFingerprint(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0, err + } + return Fingerprint(num), nil +} + +func (f Fingerprint) String() string { + return fmt.Sprintf("%016x", uint64(f)) +} + +// Fingerprints represents a collection of Fingerprint subject to a given +// natural sorting scheme. It implements sort.Interface. +type Fingerprints []Fingerprint + +// Len implements sort.Interface. +func (f Fingerprints) Len() int { + return len(f) +} + +// Less implements sort.Interface. +func (f Fingerprints) Less(i, j int) bool { + return f[i] < f[j] +} + +// Swap implements sort.Interface. +func (f Fingerprints) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// FingerprintSet is a set of Fingerprints. +type FingerprintSet map[Fingerprint]struct{} + +// Equal returns true if both sets contain the same elements (and not more). +func (s FingerprintSet) Equal(o FingerprintSet) bool { + if len(s) != len(o) { + return false + } + + for k := range s { + if _, ok := o[k]; !ok { + return false + } + } + + return true +} + +// Intersection returns the elements contained in both sets. +func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { + myLength, otherLength := len(s), len(o) + if myLength == 0 || otherLength == 0 { + return FingerprintSet{} + } + + subSet := s + superSet := o + + if otherLength < myLength { + subSet = o + superSet = s + } + + out := FingerprintSet{} + + for k := range subSet { + if _, ok := superSet[k]; ok { + out[k] = struct{}{} + } + } + + return out +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/fnv.go new file mode 100644 index 00000000..038fc1c9 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/fnv.go @@ -0,0 +1,42 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/labels.go new file mode 100644 index 00000000..41051a01 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/labels.go @@ -0,0 +1,210 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + "unicode/utf8" +) + +const ( + // AlertNameLabel is the name of the label containing the an alert's name. + AlertNameLabel = "alertname" + + // ExportedLabelPrefix is the prefix to prepend to the label names present in + // exported metrics if a label of the same name is added by the server. + ExportedLabelPrefix = "exported_" + + // MetricNameLabel is the label name indicating the metric name of a + // timeseries. + MetricNameLabel = "__name__" + + // SchemeLabel is the name of the label that holds the scheme on which to + // scrape a target. + SchemeLabel = "__scheme__" + + // AddressLabel is the name of the label that holds the address of + // a scrape target. + AddressLabel = "__address__" + + // MetricsPathLabel is the name of the label that holds the path on which to + // scrape a target. + MetricsPathLabel = "__metrics_path__" + + // ReservedLabelPrefix is a prefix which is not legal in user-supplied + // label names. + ReservedLabelPrefix = "__" + + // MetaLabelPrefix is a prefix for labels that provide meta information. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. + MetaLabelPrefix = "__meta_" + + // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. This is reserved for use in + // Prometheus configuration files by users. + TmpLabelPrefix = "__tmp_" + + // ParamLabelPrefix is a prefix for labels that provide URL parameters + // used to scrape a target. + ParamLabelPrefix = "__param_" + + // JobLabel is the label name indicating the job from which a timeseries + // was scraped. + JobLabel = "job" + + // InstanceLabel is the label name used for the instance label. + InstanceLabel = "instance" + + // BucketLabel is used for the label that defines the upper bound of a + // bucket of a histogram ("le" -> "less or equal"). + BucketLabel = "le" + + // QuantileLabel is used for the label that defines the quantile in a + // summary. + QuantileLabel = "quantile" +) + +// LabelNameRE is a regular expression matching valid label names. Note that the +// IsValid method of LabelName performs the same check but faster than a match +// with this regular expression. +var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") + +// A LabelName is a key for a LabelSet or Metric. It has a value associated +// therewith. +type LabelName string + +// IsValid is true iff the label name matches the pattern of LabelNameRE. This +// method, however, does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValid() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (ln *LabelName) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// LabelNames is a sortable LabelName slice. In implements sort.Interface. +type LabelNames []LabelName + +func (l LabelNames) Len() int { + return len(l) +} + +func (l LabelNames) Less(i, j int) bool { + return l[i] < l[j] +} + +func (l LabelNames) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +func (l LabelNames) String() string { + labelStrings := make([]string, 0, len(l)) + for _, label := range l { + labelStrings = append(labelStrings, string(label)) + } + return strings.Join(labelStrings, ", ") +} + +// A LabelValue is an associated value for a LabelName. +type LabelValue string + +// IsValid returns true iff the string is a valid UTF8. +func (lv LabelValue) IsValid() bool { + return utf8.ValidString(string(lv)) +} + +// LabelValues is a sortable LabelValue slice. It implements sort.Interface. +type LabelValues []LabelValue + +func (l LabelValues) Len() int { + return len(l) +} + +func (l LabelValues) Less(i, j int) bool { + return string(l[i]) < string(l[j]) +} + +func (l LabelValues) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +// LabelPair pairs a name with a value. +type LabelPair struct { + Name LabelName + Value LabelValue +} + +// LabelPairs is a sortable slice of LabelPair pointers. It implements +// sort.Interface. +type LabelPairs []*LabelPair + +func (l LabelPairs) Len() int { + return len(l) +} + +func (l LabelPairs) Less(i, j int) bool { + switch { + case l[i].Name > l[j].Name: + return false + case l[i].Name < l[j].Name: + return true + case l[i].Value > l[j].Value: + return false + case l[i].Value < l[j].Value: + return true + default: + return false + } +} + +func (l LabelPairs) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/labelset.go new file mode 100644 index 00000000..6eda08a7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/labelset.go @@ -0,0 +1,169 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "sort" + "strings" +) + +// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet +// may be fully-qualified down to the point where it may resolve to a single +// Metric in the data store or not. All operations that occur within the realm +// of a LabelSet can emit a vector of Metric entities to which the LabelSet may +// match. +type LabelSet map[LabelName]LabelValue + +// Validate checks whether all names and values in the label set +// are valid. +func (ls LabelSet) Validate() error { + for ln, lv := range ls { + if !ln.IsValid() { + return fmt.Errorf("invalid name %q", ln) + } + if !lv.IsValid() { + return fmt.Errorf("invalid value %q", lv) + } + } + return nil +} + +// Equal returns true iff both label sets have exactly the same key/value pairs. +func (ls LabelSet) Equal(o LabelSet) bool { + if len(ls) != len(o) { + return false + } + for ln, lv := range ls { + olv, ok := o[ln] + if !ok { + return false + } + if olv != lv { + return false + } + } + return true +} + +// Before compares the metrics, using the following criteria: +// +// If m has fewer labels than o, it is before o. If it has more, it is not. +// +// If the number of labels is the same, the superset of all label names is +// sorted alphanumerically. The first differing label pair found in that order +// determines the outcome: If the label does not exist at all in m, then m is +// before o, and vice versa. Otherwise the label value is compared +// alphanumerically. +// +// If m and o are equal, the method returns false. +func (ls LabelSet) Before(o LabelSet) bool { + if len(ls) < len(o) { + return true + } + if len(ls) > len(o) { + return false + } + + lns := make(LabelNames, 0, len(ls)+len(o)) + for ln := range ls { + lns = append(lns, ln) + } + for ln := range o { + lns = append(lns, ln) + } + // It's probably not worth it to de-dup lns. + sort.Sort(lns) + for _, ln := range lns { + mlv, ok := ls[ln] + if !ok { + return true + } + olv, ok := o[ln] + if !ok { + return false + } + if mlv < olv { + return true + } + if mlv > olv { + return false + } + } + return false +} + +// Clone returns a copy of the label set. +func (ls LabelSet) Clone() LabelSet { + lsn := make(LabelSet, len(ls)) + for ln, lv := range ls { + lsn[ln] = lv + } + return lsn +} + +// Merge is a helper function to non-destructively merge two label sets. +func (l LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(l)) + + for k, v := range l { + result[k] = v + } + + for k, v := range other { + result[k] = v + } + + return result +} + +func (l LabelSet) String() string { + lstrs := make([]string, 0, len(l)) + for l, v := range l { + lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) + } + + sort.Strings(lstrs) + return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) +} + +// Fingerprint returns the LabelSet's fingerprint. +func (ls LabelSet) Fingerprint() Fingerprint { + return labelSetToFingerprint(ls) +} + +// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (ls LabelSet) FastFingerprint() Fingerprint { + return labelSetToFastFingerprint(ls) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (l *LabelSet) UnmarshalJSON(b []byte) error { + var m map[LabelName]LabelValue + if err := json.Unmarshal(b, &m); err != nil { + return err + } + // encoding/json only unmarshals maps of the form map[string]T. It treats + // LabelName as a string and does not call its UnmarshalJSON method. + // Thus, we have to replicate the behavior here. + for ln := range m { + if !ln.IsValid() { + return fmt.Errorf("%q is not a valid label name", ln) + } + } + *l = LabelSet(m) + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/metric.go new file mode 100644 index 00000000..00804b7f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/metric.go @@ -0,0 +1,102 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "regexp" + "sort" + "strings" +) + +var ( + // MetricNameRE is a regular expression matching valid metric + // names. Note that the IsValidMetricName function performs the same + // check but faster than a match with this regular expression. + MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) +) + +// A Metric is similar to a LabelSet, but the key difference is that a Metric is +// a singleton and refers to one and only one stream of samples. +type Metric LabelSet + +// Equal compares the metrics. +func (m Metric) Equal(o Metric) bool { + return LabelSet(m).Equal(LabelSet(o)) +} + +// Before compares the metrics' underlying label sets. +func (m Metric) Before(o Metric) bool { + return LabelSet(m).Before(LabelSet(o)) +} + +// Clone returns a copy of the Metric. +func (m Metric) Clone() Metric { + clone := make(Metric, len(m)) + for k, v := range m { + clone[k] = v + } + return clone +} + +func (m Metric) String() string { + metricName, hasName := m[MetricNameLabel] + numLabels := len(m) - 1 + if !hasName { + numLabels = len(m) + } + labelStrings := make([]string, 0, numLabels) + for label, value := range m { + if label != MetricNameLabel { + labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) + } + } + + switch numLabels { + case 0: + if hasName { + return string(metricName) + } + return "{}" + default: + sort.Strings(labelStrings) + return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) + } +} + +// Fingerprint returns a Metric's Fingerprint. +func (m Metric) Fingerprint() Fingerprint { + return LabelSet(m).Fingerprint() +} + +// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (m Metric) FastFingerprint() Fingerprint { + return LabelSet(m).FastFingerprint() +} + +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. +// This function, however, does not use MetricNameRE for the check but a much +// faster hardcoded implementation. +func IsValidMetricName(n LabelValue) bool { + if len(n) == 0 { + return false + } + for i, b := range n { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/model.go new file mode 100644 index 00000000..a7b96917 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/model.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package model contains common data structures that are shared across +// Prometheus components and libraries. +package model diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/signature.go new file mode 100644 index 00000000..8762b13c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/signature.go @@ -0,0 +1,144 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sort" +) + +// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is +// used to separate label names, label values, and other strings from each other +// when calculating their combined hash value (aka signature aka fingerprint). +const SeparatorByte byte = 255 + +var ( + // cache the signature of an empty label set. + emptyLabelSignature = hashNew() +) + +// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a +// given label set. (Collisions are possible but unlikely if the number of label +// sets the function is applied to is small.) +func LabelsToSignature(labels map[string]string) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + labelNames := make([]string, 0, len(labels)) + for labelName := range labels { + labelNames = append(labelNames, labelName) + } + sort.Strings(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, labelName) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, labels[labelName]) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as +// parameter (rather than a label map) and returns a Fingerprint. +func labelSetToFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + labelNames := make(LabelNames, 0, len(ls)) + for labelName := range ls { + labelNames = append(labelNames, labelName) + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(ls[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return Fingerprint(sum) +} + +// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a +// faster and less allocation-heavy hash function, which is more susceptible to +// create hash collisions. Therefore, collision detection should be applied. +func labelSetToFastFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + var result uint64 + for labelName, labelValue := range ls { + sum := hashNew() + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(labelValue)) + result ^= sum + } + return Fingerprint(result) +} + +// SignatureForLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and only includes the labels with the +// specified LabelNames into the signature calculation. The labels passed in +// will be sorted by this function. +func SignatureForLabels(m Metric, labels ...LabelName) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + sort.Sort(LabelNames(labels)) + + sum := hashNew() + for _, label := range labels { + sum = hashAdd(sum, string(label)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[label])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and excludes the labels with any of the +// specified LabelNames from the signature calculation. +func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { + if len(m) == 0 { + return emptyLabelSignature + } + + labelNames := make(LabelNames, 0, len(m)) + for labelName := range m { + if _, exclude := labels[labelName]; !exclude { + labelNames = append(labelNames, labelName) + } + } + if len(labelNames) == 0 { + return emptyLabelSignature + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/silence.go new file mode 100644 index 00000000..bb99889d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/silence.go @@ -0,0 +1,106 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "time" +) + +// Matcher describes a matches the value of a given label. +type Matcher struct { + Name LabelName `json:"name"` + Value string `json:"value"` + IsRegex bool `json:"isRegex"` +} + +func (m *Matcher) UnmarshalJSON(b []byte) error { + type plain Matcher + if err := json.Unmarshal(b, (*plain)(m)); err != nil { + return err + } + + if len(m.Name) == 0 { + return fmt.Errorf("label name in matcher must not be empty") + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return err + } + } + return nil +} + +// Validate returns true iff all fields of the matcher have valid values. +func (m *Matcher) Validate() error { + if !m.Name.IsValid() { + return fmt.Errorf("invalid name %q", m.Name) + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return fmt.Errorf("invalid regular expression %q", m.Value) + } + } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { + return fmt.Errorf("invalid value %q", m.Value) + } + return nil +} + +// Silence defines the representation of a silence definition in the Prometheus +// eco-system. +type Silence struct { + ID uint64 `json:"id,omitempty"` + + Matchers []*Matcher `json:"matchers"` + + StartsAt time.Time `json:"startsAt"` + EndsAt time.Time `json:"endsAt"` + + CreatedAt time.Time `json:"createdAt,omitempty"` + CreatedBy string `json:"createdBy"` + Comment string `json:"comment,omitempty"` +} + +// Validate returns true iff all fields of the silence have valid values. +func (s *Silence) Validate() error { + if len(s.Matchers) == 0 { + return fmt.Errorf("at least one matcher required") + } + for _, m := range s.Matchers { + if err := m.Validate(); err != nil { + return fmt.Errorf("invalid matcher: %s", err) + } + } + if s.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if s.EndsAt.IsZero() { + return fmt.Errorf("end time missing") + } + if s.EndsAt.Before(s.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if s.CreatedBy == "" { + return fmt.Errorf("creator information missing") + } + if s.Comment == "" { + return fmt.Errorf("comment missing") + } + if s.CreatedAt.IsZero() { + return fmt.Errorf("creation timestamp missing") + } + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/time.go new file mode 100644 index 00000000..7b0064fd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/time.go @@ -0,0 +1,270 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +const ( + // MinimumTick is the minimum supported time resolution. This has to be + // at least time.Second in order for the code below to work. + minimumTick = time.Millisecond + // second is the Time duration equivalent to one second. + second = int64(time.Second / minimumTick) + // The number of nanoseconds per minimum tick. + nanosPerTick = int64(minimumTick / time.Nanosecond) + + // Earliest is the earliest Time representable. Handy for + // initializing a high watermark. + Earliest = Time(math.MinInt64) + // Latest is the latest Time representable. Handy for initializing + // a low watermark. + Latest = Time(math.MaxInt64) +) + +// Time is the number of milliseconds since the epoch +// (1970-01-01 00:00 UTC) excluding leap seconds. +type Time int64 + +// Interval describes an interval between two timestamps. +type Interval struct { + Start, End Time +} + +// Now returns the current time as a Time. +func Now() Time { + return TimeFromUnixNano(time.Now().UnixNano()) +} + +// TimeFromUnix returns the Time equivalent to the Unix Time t +// provided in seconds. +func TimeFromUnix(t int64) Time { + return Time(t * second) +} + +// TimeFromUnixNano returns the Time equivalent to the Unix Time +// t provided in nanoseconds. +func TimeFromUnixNano(t int64) Time { + return Time(t / nanosPerTick) +} + +// Equal reports whether two Times represent the same instant. +func (t Time) Equal(o Time) bool { + return t == o +} + +// Before reports whether the Time t is before o. +func (t Time) Before(o Time) bool { + return t < o +} + +// After reports whether the Time t is after o. +func (t Time) After(o Time) bool { + return t > o +} + +// Add returns the Time t + d. +func (t Time) Add(d time.Duration) Time { + return t + Time(d/minimumTick) +} + +// Sub returns the Duration t - o. +func (t Time) Sub(o Time) time.Duration { + return time.Duration(t-o) * minimumTick +} + +// Time returns the time.Time representation of t. +func (t Time) Time() time.Time { + return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) +} + +// Unix returns t as a Unix time, the number of seconds elapsed +// since January 1, 1970 UTC. +func (t Time) Unix() int64 { + return int64(t) / second +} + +// UnixNano returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC. +func (t Time) UnixNano() int64 { + return int64(t) * nanosPerTick +} + +// The number of digits after the dot. +var dotPrecision = int(math.Log10(float64(second))) + +// String returns a string representation of the Time. +func (t Time) String() string { + return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) +} + +// MarshalJSON implements the json.Marshaler interface. +func (t Time) MarshalJSON() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (t *Time) UnmarshalJSON(b []byte) error { + p := strings.Split(string(b), ".") + switch len(p) { + case 1: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + *t = Time(v * second) + + case 2: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + v *= second + + prec := dotPrecision - len(p[1]) + if prec < 0 { + p[1] = p[1][:dotPrecision] + } else if prec > 0 { + p[1] = p[1] + strings.Repeat("0", prec) + } + + va, err := strconv.ParseInt(p[1], 10, 32) + if err != nil { + return err + } + + // If the value was something like -0.1 the negative is lost in the + // parsing because of the leading zero, this ensures that we capture it. + if len(p[0]) > 0 && p[0][0] == '-' && v+va > 0 { + *t = Time(v+va) * -1 + } else { + *t = Time(v + va) + } + + default: + return fmt.Errorf("invalid time %q", string(b)) + } + return nil +} + +// Duration wraps time.Duration. It is used to parse the custom duration format +// from YAML. +// This type should not propagate beyond the scope of input/output processing. +type Duration time.Duration + +// Set implements pflag/flag.Value +func (d *Duration) Set(s string) error { + var err error + *d, err = ParseDuration(s) + return err +} + +// Type implements pflag.Value +func (d *Duration) Type() string { + return "duration" +} + +var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") + +// ParseDuration parses a string into a time.Duration, assuming that a year +// always has 365d, a week always has 7d, and a day always has 24h. +func ParseDuration(durationStr string) (Duration, error) { + matches := durationRE.FindStringSubmatch(durationStr) + if len(matches) != 3 { + return 0, fmt.Errorf("not a valid duration string: %q", durationStr) + } + var ( + n, _ = strconv.Atoi(matches[1]) + dur = time.Duration(n) * time.Millisecond + ) + switch unit := matches[2]; unit { + case "y": + dur *= 1000 * 60 * 60 * 24 * 365 + case "w": + dur *= 1000 * 60 * 60 * 24 * 7 + case "d": + dur *= 1000 * 60 * 60 * 24 + case "h": + dur *= 1000 * 60 * 60 + case "m": + dur *= 1000 * 60 + case "s": + dur *= 1000 + case "ms": + // Value already correct + default: + return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) + } + return Duration(dur), nil +} + +func (d Duration) String() string { + var ( + ms = int64(time.Duration(d) / time.Millisecond) + unit = "ms" + ) + if ms == 0 { + return "0s" + } + factors := map[string]int64{ + "y": 1000 * 60 * 60 * 24 * 365, + "w": 1000 * 60 * 60 * 24 * 7, + "d": 1000 * 60 * 60 * 24, + "h": 1000 * 60 * 60, + "m": 1000 * 60, + "s": 1000, + "ms": 1, + } + + switch int64(0) { + case ms % factors["y"]: + unit = "y" + case ms % factors["w"]: + unit = "w" + case ms % factors["d"]: + unit = "d" + case ms % factors["h"]: + unit = "h" + case ms % factors["m"]: + unit = "m" + case ms % factors["s"]: + unit = "s" + } + return fmt.Sprintf("%v%v", ms/factors[unit], unit) +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (d Duration) MarshalYAML() (interface{}, error) { + return d.String(), nil +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + dur, err := ParseDuration(s) + if err != nil { + return err + } + *d = dur + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/value.go new file mode 100644 index 00000000..c9d8fb1a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/prometheus/common/model/value.go @@ -0,0 +1,416 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "math" + "sort" + "strconv" + "strings" +) + +var ( + // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a + // non-existing sample pair. It is a SamplePair with timestamp Earliest and + // value 0.0. Note that the natural zero value of SamplePair has a timestamp + // of 0, which is possible to appear in a real SamplePair and thus not + // suitable to signal a non-existing SamplePair. + ZeroSamplePair = SamplePair{Timestamp: Earliest} + + // ZeroSample is the pseudo zero-value of Sample used to signal a + // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, + // and metric nil. Note that the natural zero value of Sample has a timestamp + // of 0, which is possible to appear in a real Sample and thus not suitable + // to signal a non-existing Sample. + ZeroSample = Sample{Timestamp: Earliest} +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { + if v == o { + return true + } + return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +// MarshalJSON implements json.Marshaler. +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} + +// Sample is a sample pair associated with a metric. +type Sample struct { + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +// Equal compares first the metrics, then the timestamp, then the value. The +// semantics of value equality is defined by SampleValue.Equal. +func (s *Sample) Equal(o *Sample) bool { + if s == o { + return true + } + + if !s.Metric.Equal(o.Metric) { + return false + } + if !s.Timestamp.Equal(o.Timestamp) { + return false + } + + return s.Value.Equal(o.Value) +} + +func (s Sample) String() string { + return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }) +} + +// MarshalJSON implements json.Marshaler. +func (s Sample) MarshalJSON() ([]byte, error) { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + return json.Marshal(&v) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Sample) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + s.Metric = v.Metric + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + + return nil +} + +// Samples is a sortable Sample slice. It implements sort.Interface. +type Samples []*Sample + +func (s Samples) Len() int { + return len(s) +} + +// Less compares first the metrics, then the timestamp. +func (s Samples) Less(i, j int) bool { + switch { + case s[i].Metric.Before(s[j].Metric): + return true + case s[j].Metric.Before(s[i].Metric): + return false + case s[i].Timestamp.Before(s[j].Timestamp): + return true + default: + return false + } +} + +func (s Samples) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Equal compares two sets of samples and returns true if they are equal. +func (s Samples) Equal(o Samples) bool { + if len(s) != len(o) { + return false + } + + for i, sample := range s { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// SampleStream is a stream of Values belonging to an attached COWMetric. +type SampleStream struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` +} + +func (ss SampleStream) String() string { + vals := make([]string, len(ss.Values)) + for i, v := range ss.Values { + vals[i] = v.String() + } + return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) +} + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} + +// Scalar is a scalar value evaluated at the set timestamp. +type Scalar struct { + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s Scalar) String() string { + return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) +} + +// MarshalJSON implements json.Marshaler. +func (s Scalar) MarshalJSON() ([]byte, error) { + v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) + return json.Marshal([...]interface{}{s.Timestamp, string(v)}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Scalar) UnmarshalJSON(b []byte) error { + var f string + v := [...]interface{}{&s.Timestamp, &f} + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + value, err := strconv.ParseFloat(f, 64) + if err != nil { + return fmt.Errorf("error parsing sample value: %s", err) + } + s.Value = SampleValue(value) + return nil +} + +// String is a string value evaluated at the set timestamp. +type String struct { + Value string `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s *String) String() string { + return s.Value +} + +// MarshalJSON implements json.Marshaler. +func (s String) MarshalJSON() ([]byte, error) { + return json.Marshal([]interface{}{s.Timestamp, s.Value}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *String) UnmarshalJSON(b []byte) error { + v := [...]interface{}{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Vector is basically only an alias for Samples, but the +// contract is that in a Vector, all Samples have the same timestamp. +type Vector []*Sample + +func (vec Vector) String() string { + entries := make([]string, len(vec)) + for i, s := range vec { + entries[i] = s.String() + } + return strings.Join(entries, "\n") +} + +func (vec Vector) Len() int { return len(vec) } +func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } + +// Less compares first the metrics, then the timestamp. +func (vec Vector) Less(i, j int) bool { + switch { + case vec[i].Metric.Before(vec[j].Metric): + return true + case vec[j].Metric.Before(vec[i].Metric): + return false + case vec[i].Timestamp.Before(vec[j].Timestamp): + return true + default: + return false + } +} + +// Equal compares two sets of samples and returns true if they are equal. +func (vec Vector) Equal(o Vector) bool { + if len(vec) != len(o) { + return false + } + + for i, sample := range vec { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// Matrix is a list of time series. +type Matrix []*SampleStream + +func (m Matrix) Len() int { return len(m) } +func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } +func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } + +func (mat Matrix) String() string { + matCp := make(Matrix, len(mat)) + copy(matCp, mat) + sort.Sort(matCp) + + strs := make([]string, len(matCp)) + + for i, ss := range matCp { + strs[i] = ss.String() + } + + return strings.Join(strs, "\n") +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/objx/Taskfile.yml b/vendor/github.com/elastic/beats/vendor/github.com/stretchr/objx/Taskfile.yml deleted file mode 100644 index a3c52236..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/stretchr/objx/Taskfile.yml +++ /dev/null @@ -1,30 +0,0 @@ -default: - deps: [test] - -update-deps: - desc: Updates dependencies - cmds: - - dep ensure - - dep ensure -update - -lint: - desc: Checks code style - cmds: - - gofmt -d -s *.go - - go vet . - silent: true - -lint-fix: - desc: Fixes code style - cmds: - - gofmt -w -s *.go - -test: - desc: Runs go tests - cmds: - - go test -race . - -test-coverage: - desc: Runs go tests and calucates test coverage - cmds: - - go test -coverprofile=c.out . diff --git a/vendor/github.com/elastic/beats/vendor/github.com/theckman/go-flock/appveyor.yml b/vendor/github.com/elastic/beats/vendor/github.com/theckman/go-flock/appveyor.yml deleted file mode 100644 index 2b2d603f..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/theckman/go-flock/appveyor.yml +++ /dev/null @@ -1,25 +0,0 @@ -version: '{build}' - -build: false -deploy: false - -clone_folder: 'c:\gopath\src\github.com\theckman\go-flock' - -environment: - GOPATH: 'c:\gopath' - GOVERSION: '1.9.2' - -init: - - git config --global core.autocrlf input - -install: - - rmdir c:\go /s /q - - appveyor DownloadFile https://storage.googleapis.com/golang/go%GOVERSION%.windows-amd64.msi - - msiexec /i go%GOVERSION%.windows-amd64.msi /q - - set Path=c:\go\bin;c:\gopath\bin;%Path% - - go version - - go env - -test_script: - - go get -t ./... - - go test -v ./... diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/bin.yml b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/bin.yml deleted file mode 100644 index 416b225d..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/bin.yml +++ /dev/null @@ -1,53 +0,0 @@ -import: - - 'types.yml' - -main: | - package bin - - import "encoding/binary" - - {{ range $enc, $long := data.endianess }} - {{ range $signdness, $long := data.signdness }} - {{ range $k, $bits := data.bits }} - {{ invoke "makeBinType" "bits" $bits "name" $signdness "enc" $enc }} - {{ end }} - {{ end }} - {{ end }} - -# makeBinType(bits, name, enc) -templates.makeBinType: | - {{ $bits := .bits }} - {{ $len := div $bits 8 }} - {{ $name := .name }} - {{ $enc := .enc }} - {{ $endianess := index data.endianess $enc }} - {{ $inst := capitalize $endianess | printf "%vEndian" }} - {{ $signdness := index data.signdness $name }} - {{ $gotype := printf "%v%v" (index data.baseType $name) $bits }} - {{ $accessor := printf "Uint%v" $bits }} - - {{ $type := printf "%v%v%v" (capitalize $name) $bits $enc }} - - // {{ $type }} wraps a byte array into a {{ $endianess }} endian encoded {{ $bits }}bit {{ $signdness }} integer. - type {{ $type }} [{{ $len }}]byte - - // Len returns the number of bytes required to store the value. - func (b *{{ $type }}) Len() int { return {{ $len }} } - - // Get returns the decoded value. - func (b *{{ $type }}) Get() {{ $gotype }} { - {{- if (eq $bits 8) }} - return {{ $gotype }}(b[0]) - {{ else }} - return {{ $gotype }}(binary.{{ $inst }}.{{ $accessor }}(b[:])) - {{ end -}} - } - - // Set encodes a new value into the backing buffer: - func (b *{{ $type }}) Set(v {{ $gotype }}) { - {{- if (eq $bits 8) }} - b[0] = byte(v) - {{ else }} - binary.{{ $inst }}.Put{{ $accessor }}(b[:], uint{{ $bits }}(v)) - {{ end -}} - } diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/bin_test.yml b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/bin_test.yml deleted file mode 100644 index 4da772af..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/bin_test.yml +++ /dev/null @@ -1,53 +0,0 @@ -import: - - 'types.yml' - -main: | - package bin - - func TestPrimitives(t *testing.T) { - {{ range $enc, $long := data.endianess }} - {{ range $signdness, $long := data.signdness }} - {{ range $k, $bits := data.bits }} - {{ invoke "makeBinType" "bits" $bits "name" $signdness "enc" $enc }} - {{ end }} - {{ end }} - {{ end }} - } - -templates.makeBinType: | - {{ $bits := .bits }} - {{ $len := div $bits 8 }} - {{ $name := .name }} - {{ $enc := .enc }} - {{ $endianess := index data.endianess $enc }} - {{ $inst := capitalize $endianess | printf "%vEndian" }} - {{ $signdness := index data.signdness $name }} - {{ $gotype := printf "%v%v" (index data.baseType $name) $bits }} - {{ $accessor := printf "Uint%v" $bits }} - - {{ $type := printf "%v%v%v" (capitalize $name) $bits $enc }} - - t.Run("{{ $gotype }} {{ $endianess }} endian", func(t *testing.T) { - var v {{ $type }} - err := quick.Check(func(in {{ $gotype }}) bool { - v.Set(in) - - // check raw contents correct encoding - tmp := make([]byte, v.Len()) - {{ if (eq $bits 8) }} - tmp[0] = byte(in) - {{ else }} - binary.{{ $inst }}.Put{{ $accessor }}(tmp, uint{{ $bits }}(in)) - {{ end }} - if !bytes.Equal(v[:], tmp) { - t.Error("encoding mismatch") - return false - } - - // check extracted value matches original value - return v.Get() == in - }, nil) - if err != nil { - t.Error(err) - } - }) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/types.yml b/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/types.yml deleted file mode 100644 index 0ef2ce20..00000000 --- a/vendor/github.com/elastic/beats/vendor/github.com/urso/go-bin/types.yml +++ /dev/null @@ -1,6 +0,0 @@ -data: - bits: [8, 16, 32, 64] - endianess: {le: little, be: big} - signdness: {i: signed, u: unsigned} - baseType: {i: int, u: uint} - diff --git a/vendor/github.com/elastic/beats/vendor/github.com/weppos/publicsuffix-go/LICENSE.txt b/vendor/github.com/elastic/beats/vendor/github.com/weppos/publicsuffix-go/LICENSE.txt new file mode 100644 index 00000000..c72edb19 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/weppos/publicsuffix-go/LICENSE.txt @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016-2018 Simone Carletti + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/elastic/beats/vendor/github.com/weppos/publicsuffix-go/publicsuffix/publicsuffix.go b/vendor/github.com/elastic/beats/vendor/github.com/weppos/publicsuffix-go/publicsuffix/publicsuffix.go new file mode 100644 index 00000000..16c30ae0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/weppos/publicsuffix-go/publicsuffix/publicsuffix.go @@ -0,0 +1,541 @@ +//go:generate go run ../cmd/gen/gen.go + +// Package publicsuffix provides a domain name parser +// based on data from the public suffix list http://publicsuffix.org/. +// A public suffix is one under which Internet users can directly register names. +package publicsuffix + +import ( + "bufio" + "fmt" + "io" + "net/http/cookiejar" + "os" + "strings" + + "golang.org/x/net/idna" +) + +const ( + // Version identifies the current library version. + // This is a pro forma convention given that Go dependencies + // tends to be fetched directly from the repo. + Version = "0.10.0" + + NormalType = 1 + WildcardType = 2 + ExceptionType = 3 + + listTokenPrivateDomains = "===BEGIN PRIVATE DOMAINS===" + listTokenComment = "//" +) + +// DefaultList is the default List and it is used by Parse and Domain. +var DefaultList = NewList() + +// DefaultRule is the default Rule that represents "*". +var DefaultRule = MustNewRule("*") + +// DefaultParserOptions are the default options used to parse a Public Suffix list. +var DefaultParserOptions = &ParserOption{PrivateDomains: true, ASCIIEncoded: false} + +// DefaultFindOptions are the default options used to perform the lookup of rules in the list. +var DefaultFindOptions = &FindOptions{IgnorePrivate: false, DefaultRule: DefaultRule} + +// Rule represents a single rule in a Public Suffix List. +type Rule struct { + Type int + Value string + Length int + Private bool +} + +// ParserOption are the options you can use to customize the way a List +// is parsed from a file or a string. +type ParserOption struct { + // Set to false to skip the private domains when parsing. + // Default to true, which means the private domains are included. + PrivateDomains bool + + // Set to false if the input is encoded in U-labels (Unicode) + // as opposite to A-labels. + // Default to false, which means the list is containing Unicode domains. + // This is the default because the original PSL currently contains Unicode. + ASCIIEncoded bool +} + +// FindOptions are the options you can use to customize the way a Rule +// is searched within the list. +type FindOptions struct { + // Set to true to ignore the rules within the "Private" section of the Public Suffix List. + IgnorePrivate bool + + // The default rule to use when no rule matches the input. + // The format Public Suffix algorithm states that the rule "*" should be used when no other rule matches, + // but some consumers may have different needs. + DefaultRule *Rule +} + +// List represents a Public Suffix List. +type List struct { + // rules is kept private because you should not access rules directly + rules map[string]*Rule +} + +// NewList creates a new empty list. +func NewList() *List { + return &List{ + rules: map[string]*Rule{}, + } +} + +// NewListFromString parses a string that represents a Public Suffix source +// and returns a List initialized with the rules in the source. +func NewListFromString(src string, options *ParserOption) (*List, error) { + l := NewList() + _, err := l.LoadString(src, options) + return l, err +} + +// NewListFromFile parses a string that represents a Public Suffix source +// and returns a List initialized with the rules in the source. +func NewListFromFile(path string, options *ParserOption) (*List, error) { + l := NewList() + _, err := l.LoadFile(path, options) + return l, err +} + +// Load parses and loads a set of rules from an io.Reader into the current list. +func (l *List) Load(r io.Reader, options *ParserOption) ([]Rule, error) { + return l.parse(r, options) +} + +// LoadString parses and loads a set of rules from a String into the current list. +func (l *List) LoadString(src string, options *ParserOption) ([]Rule, error) { + r := strings.NewReader(src) + return l.parse(r, options) +} + +// LoadFile parses and loads a set of rules from a File into the current list. +func (l *List) LoadFile(path string, options *ParserOption) ([]Rule, error) { + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + return l.parse(f, options) +} + +// AddRule adds a new rule to the list. +// +// The exact position of the rule into the list is unpredictable. +// The list may be optimized internally for lookups, therefore the algorithm +// will decide the best position for the new rule. +func (l *List) AddRule(r *Rule) error { + l.rules[r.Value] = r + return nil +} + +// Size returns the size of the list, which is the number of rules. +func (l *List) Size() int { + return len(l.rules) +} + +func (l *List) parse(r io.Reader, options *ParserOption) ([]Rule, error) { + if options == nil { + options = DefaultParserOptions + } + var rules []Rule + + scanner := bufio.NewScanner(r) + var section int // 1 == ICANN, 2 == PRIVATE + +Scanning: + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + switch { + + // skip blank lines + case line == "": + break + + // include private domains or stop scanner + case strings.Contains(line, listTokenPrivateDomains): + if !options.PrivateDomains { + break Scanning + } + section = 2 + + // skip comments + case strings.HasPrefix(line, listTokenComment): + break + + default: + var rule *Rule + var err error + + if options.ASCIIEncoded { + rule, err = NewRule(line) + } else { + rule, err = NewRuleUnicode(line) + } + if err != nil { + return []Rule{}, err + } + + rule.Private = (section == 2) + l.AddRule(rule) + rules = append(rules, *rule) + } + + } + + return rules, scanner.Err() +} + +// Find and returns the most appropriate rule for the domain name. +func (l *List) Find(name string, options *FindOptions) *Rule { + if options == nil { + options = DefaultFindOptions + } + + part := name + for { + rule, ok := l.rules[part] + + if ok && rule.Match(name) && !(options.IgnorePrivate && rule.Private) { + return rule + } + + i := strings.IndexRune(part, '.') + if i < 0 { + return options.DefaultRule + } + + part = part[i+1:] + } + +} + +// NewRule parses the rule content, creates and returns a Rule. +// +// The content of the rule MUST be encoded in ASCII (A-labels). +func NewRule(content string) (*Rule, error) { + var rule *Rule + var value string + + switch content[0:1] { + case "*": // wildcard + if content == "*" { + value = "" + } else { + value = content[2:] + } + rule = &Rule{Type: WildcardType, Value: value, Length: len(Labels(value)) + 1} + case "!": // exception + value = content[1:] + rule = &Rule{Type: ExceptionType, Value: value, Length: len(Labels(value))} + default: // normal + value = content + rule = &Rule{Type: NormalType, Value: value, Length: len(Labels(value))} + } + + return rule, nil +} + +// NewRuleUnicode is like NewRule, but expects the content to be encoded in Unicode (U-labels). +func NewRuleUnicode(content string) (*Rule, error) { + var err error + + content, err = ToASCII(content) + if err != nil { + return nil, err + } + + return NewRule(content) +} + +// MustNewRule is like NewRule, but panics if the content cannot be parsed. +func MustNewRule(content string) *Rule { + rule, err := NewRule(content) + if err != nil { + panic(err) + } + return rule +} + +// Match checks if the rule matches the name. +// +// A domain name is said to match a rule if and only if all of the following conditions are met: +// - When the domain and rule are split into corresponding labels, +// that the domain contains as many or more labels than the rule. +// - Beginning with the right-most labels of both the domain and the rule, +// and continuing for all labels in the rule, one finds that for every pair, +// either they are identical, or that the label from the rule is "*". +// +// See https://publicsuffix.org/list/ +func (r *Rule) Match(name string) bool { + left := strings.TrimSuffix(name, r.Value) + + // the name contains as many labels than the rule + // this is a match, unless it's a wildcard + // because the wildcard requires one more label + if left == "" { + return r.Type != WildcardType + } + + // if there is one more label, the rule match + // because either the rule is shorter than the domain + // or the rule is a wildcard and there is one more label + return left[len(left)-1:] == "." +} + +// Decompose takes a name as input and decomposes it into a tuple of , +// according to the rule definition and type. +func (r *Rule) Decompose(name string) (result [2]string) { + if r == DefaultRule { + i := strings.LastIndex(name, ".") + if i < 0 { + return + } + result[0], result[1] = name[:i], name[i+1:] + return + } + switch r.Type { + case NormalType: + name = strings.TrimSuffix(name, r.Value) + if len(name) == 0 { + return + } + result[0], result[1] = name[:len(name)-1], r.Value + case WildcardType: + name := strings.TrimSuffix(name, r.Value) + if len(name) == 0 { + return + } + name = name[:len(name)-1] + i := strings.LastIndex(name, ".") + if i < 0 { + return + } + result[0], result[1] = name[:i], name[i+1:]+"."+r.Value + case ExceptionType: + i := strings.IndexRune(r.Value, '.') + if i < 0 { + return + } + suffix := r.Value[i+1:] + name = strings.TrimSuffix(name, suffix) + if len(name) == 0 { + return + } + result[0], result[1] = name[:len(name)-1], suffix + } + return +} + +// Labels decomposes given domain name into labels, +// corresponding to the dot-separated tokens. +func Labels(name string) []string { + return strings.Split(name, ".") +} + +// DomainName represents a domain name. +type DomainName struct { + TLD string + SLD string + TRD string + Rule *Rule +} + +// String joins the components of the domain name into a single string. +// Empty labels are skipped. +// +// Examples: +// +// DomainName{"com", "example"}.String() +// // example.com +// DomainName{"com", "example", "www"}.String() +// // www.example.com +// +func (d *DomainName) String() string { + switch { + case d.TLD == "": + return "" + case d.SLD == "": + return d.TLD + case d.TRD == "": + return d.SLD + "." + d.TLD + default: + return d.TRD + "." + d.SLD + "." + d.TLD + } +} + +// Domain extract and return the domain name from the input +// using the default (Public Suffix) List. +// +// Examples: +// +// publicsuffix.Domain("example.com") +// // example.com +// publicsuffix.Domain("www.example.com") +// // example.com +// publicsuffix.Domain("www.example.co.uk") +// // example.co.uk +// +func Domain(name string) (string, error) { + return DomainFromListWithOptions(DefaultList, name, DefaultFindOptions) +} + +// Parse decomposes the name into TLD, SLD, TRD +// using the default (Public Suffix) List, +// and returns the result as a DomainName +// +// Examples: +// +// list := NewList() +// +// publicsuffix.Parse("example.com") +// // &DomainName{"com", "example"} +// publicsuffix.Parse("www.example.com") +// // &DomainName{"com", "example", "www"} +// publicsuffix.Parse("www.example.co.uk") +// // &DomainName{"co.uk", "example"} +// +func Parse(name string) (*DomainName, error) { + return ParseFromListWithOptions(DefaultList, name, DefaultFindOptions) +} + +// DomainFromListWithOptions extract and return the domain name from the input +// using the (Public Suffix) list passed as argument. +// +// Examples: +// +// list := NewList() +// +// publicsuffix.DomainFromListWithOptions(list, "example.com") +// // example.com +// publicsuffix.DomainFromListWithOptions(list, "www.example.com") +// // example.com +// publicsuffix.DomainFromListWithOptions(list, "www.example.co.uk") +// // example.co.uk +// +func DomainFromListWithOptions(l *List, name string, options *FindOptions) (string, error) { + dn, err := ParseFromListWithOptions(l, name, options) + if err != nil { + return "", err + } + return dn.SLD + "." + dn.TLD, nil +} + +// ParseFromListWithOptions decomposes the name into TLD, SLD, TRD +// using the (Public Suffix) list passed as argument, +// and returns the result as a DomainName +// +// Examples: +// +// list := NewList() +// +// publicsuffix.ParseFromListWithOptions(list, "example.com") +// // &DomainName{"com", "example"} +// publicsuffix.ParseFromListWithOptions(list, "www.example.com") +// // &DomainName{"com", "example", "www"} +// publicsuffix.ParseFromListWithOptions(list, "www.example.co.uk") +// // &DomainName{"co.uk", "example"} +// +func ParseFromListWithOptions(l *List, name string, options *FindOptions) (*DomainName, error) { + n, err := normalize(name) + if err != nil { + return nil, err + } + + r := l.Find(n, options) + if r == nil { + return nil, fmt.Errorf("no rule matching name %s", name) + } + + parts := r.Decompose(n) + left, tld := parts[0], parts[1] + if tld == "" { + return nil, fmt.Errorf("%s is a suffix", n) + } + + dn := &DomainName{ + Rule: r, + TLD: tld, + } + if i := strings.LastIndex(left, "."); i < 0 { + dn.SLD = left + } else { + dn.TRD = left[:i] + dn.SLD = left[i+1:] + } + return dn, nil +} + +func normalize(name string) (string, error) { + ret := strings.ToLower(name) + + if ret == "" { + return "", fmt.Errorf("name is blank") + } + if ret[0] == '.' { + return "", fmt.Errorf("name %s starts with a dot", ret) + } + + return ret, nil +} + +// ToASCII is a wrapper for idna.ToASCII. +// +// This wrapper exists because idna.ToASCII backward-compatibility was broken twice in few months +// and I can't call this package directly anymore. The wrapper performs some terrible-but-necessary +// before-after replacements to make sure an already ASCII input always results in the same output +// even if passed through ToASCII. +// +// See golang/net@67957fd0b1, golang/net@f2499483f9, golang/net@78ebe5c8b6, +// and weppos/publicsuffix-go#66. +func ToASCII(s string) (string, error) { + // .example.com should be .example.com + // ..example.com should be ..example.com + if strings.HasPrefix(s, ".") { + dotIndex := 0 + for i := 0; i < len(s); i++ { + if s[i] == '.' { + dotIndex = i + } else { + break + } + } + out, err := idna.ToASCII(s[dotIndex+1:]) + out = s[:dotIndex+1] + out + return out, err + } + + return idna.ToASCII(s) +} + +// ToUnicode is a wrapper for idna.ToUnicode. +// +// See ToASCII for more details about why this wrapper exists. +func ToUnicode(s string) (string, error) { + return idna.ToUnicode(s) +} + +// CookieJarList implements the cookiejar.PublicSuffixList interface. +var CookieJarList cookiejar.PublicSuffixList = cookiejarList{DefaultList} + +type cookiejarList struct { + List *List +} + +// PublicSuffix implements cookiejar.PublicSuffixList. +func (l cookiejarList) PublicSuffix(domain string) string { + rule := l.List.Find(domain, nil) + return rule.Decompose(domain)[1] +} + +// PublicSuffix implements cookiejar.String. +func (cookiejarList) String() string { + return defaultListVersion +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/weppos/publicsuffix-go/publicsuffix/rules.go b/vendor/github.com/elastic/beats/vendor/github.com/weppos/publicsuffix-go/publicsuffix/rules.go new file mode 100644 index 00000000..8ca11c63 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/weppos/publicsuffix-go/publicsuffix/rules.go @@ -0,0 +1,8806 @@ +// This file is automatically generated +// Run "go run cmd/gen/gen.go" to update the list. + +package publicsuffix + +const defaultListVersion = "PSL version 5f6654 (Fri Aug 23 16:59:24 2019)" + +func DefaultRules() [8787]Rule { + return r +} + +var r = [8787]Rule{ + {1, "ac", 1, false}, + {1, "com.ac", 2, false}, + {1, "edu.ac", 2, false}, + {1, "gov.ac", 2, false}, + {1, "net.ac", 2, false}, + {1, "mil.ac", 2, false}, + {1, "org.ac", 2, false}, + {1, "ad", 1, false}, + {1, "nom.ad", 2, false}, + {1, "ae", 1, false}, + {1, "co.ae", 2, false}, + {1, "net.ae", 2, false}, + {1, "org.ae", 2, false}, + {1, "sch.ae", 2, false}, + {1, "ac.ae", 2, false}, + {1, "gov.ae", 2, false}, + {1, "mil.ae", 2, false}, + {1, "aero", 1, false}, + {1, "accident-investigation.aero", 2, false}, + {1, "accident-prevention.aero", 2, false}, + {1, "aerobatic.aero", 2, false}, + {1, "aeroclub.aero", 2, false}, + {1, "aerodrome.aero", 2, false}, + {1, "agents.aero", 2, false}, + {1, "aircraft.aero", 2, false}, + {1, "airline.aero", 2, false}, + {1, "airport.aero", 2, false}, + {1, "air-surveillance.aero", 2, false}, + {1, "airtraffic.aero", 2, false}, + {1, "air-traffic-control.aero", 2, false}, + {1, "ambulance.aero", 2, false}, + {1, "amusement.aero", 2, false}, + {1, "association.aero", 2, false}, + {1, "author.aero", 2, false}, + {1, "ballooning.aero", 2, false}, + {1, "broker.aero", 2, false}, + {1, "caa.aero", 2, false}, + {1, "cargo.aero", 2, false}, + {1, "catering.aero", 2, false}, + {1, "certification.aero", 2, false}, + {1, "championship.aero", 2, false}, + {1, "charter.aero", 2, false}, + {1, "civilaviation.aero", 2, false}, + {1, "club.aero", 2, false}, + {1, "conference.aero", 2, false}, + {1, "consultant.aero", 2, false}, + {1, "consulting.aero", 2, false}, + {1, "control.aero", 2, false}, + {1, "council.aero", 2, false}, + {1, "crew.aero", 2, false}, + {1, "design.aero", 2, false}, + {1, "dgca.aero", 2, false}, + {1, "educator.aero", 2, false}, + {1, "emergency.aero", 2, false}, + {1, "engine.aero", 2, false}, + {1, "engineer.aero", 2, false}, + {1, "entertainment.aero", 2, false}, + {1, "equipment.aero", 2, false}, + {1, "exchange.aero", 2, false}, + {1, "express.aero", 2, false}, + {1, "federation.aero", 2, false}, + {1, "flight.aero", 2, false}, + {1, "freight.aero", 2, false}, + {1, "fuel.aero", 2, false}, + {1, "gliding.aero", 2, false}, + {1, "government.aero", 2, false}, + {1, "groundhandling.aero", 2, false}, + {1, "group.aero", 2, false}, + {1, "hanggliding.aero", 2, false}, + {1, "homebuilt.aero", 2, false}, + {1, "insurance.aero", 2, false}, + {1, "journal.aero", 2, false}, + {1, "journalist.aero", 2, false}, + {1, "leasing.aero", 2, false}, + {1, "logistics.aero", 2, false}, + {1, "magazine.aero", 2, false}, + {1, "maintenance.aero", 2, false}, + {1, "media.aero", 2, false}, + {1, "microlight.aero", 2, false}, + {1, "modelling.aero", 2, false}, + {1, "navigation.aero", 2, false}, + {1, "parachuting.aero", 2, false}, + {1, "paragliding.aero", 2, false}, + {1, "passenger-association.aero", 2, false}, + {1, "pilot.aero", 2, false}, + {1, "press.aero", 2, false}, + {1, "production.aero", 2, false}, + {1, "recreation.aero", 2, false}, + {1, "repbody.aero", 2, false}, + {1, "res.aero", 2, false}, + {1, "research.aero", 2, false}, + {1, "rotorcraft.aero", 2, false}, + {1, "safety.aero", 2, false}, + {1, "scientist.aero", 2, false}, + {1, "services.aero", 2, false}, + {1, "show.aero", 2, false}, + {1, "skydiving.aero", 2, false}, + {1, "software.aero", 2, false}, + {1, "student.aero", 2, false}, + {1, "trader.aero", 2, false}, + {1, "trading.aero", 2, false}, + {1, "trainer.aero", 2, false}, + {1, "union.aero", 2, false}, + {1, "workinggroup.aero", 2, false}, + {1, "works.aero", 2, false}, + {1, "af", 1, false}, + {1, "gov.af", 2, false}, + {1, "com.af", 2, false}, + {1, "org.af", 2, false}, + {1, "net.af", 2, false}, + {1, "edu.af", 2, false}, + {1, "ag", 1, false}, + {1, "com.ag", 2, false}, + {1, "org.ag", 2, false}, + {1, "net.ag", 2, false}, + {1, "co.ag", 2, false}, + {1, "nom.ag", 2, false}, + {1, "ai", 1, false}, + {1, "off.ai", 2, false}, + {1, "com.ai", 2, false}, + {1, "net.ai", 2, false}, + {1, "org.ai", 2, false}, + {1, "al", 1, false}, + {1, "com.al", 2, false}, + {1, "edu.al", 2, false}, + {1, "gov.al", 2, false}, + {1, "mil.al", 2, false}, + {1, "net.al", 2, false}, + {1, "org.al", 2, false}, + {1, "am", 1, false}, + {1, "co.am", 2, false}, + {1, "com.am", 2, false}, + {1, "commune.am", 2, false}, + {1, "net.am", 2, false}, + {1, "org.am", 2, false}, + {1, "ao", 1, false}, + {1, "ed.ao", 2, false}, + {1, "gv.ao", 2, false}, + {1, "og.ao", 2, false}, + {1, "co.ao", 2, false}, + {1, "pb.ao", 2, false}, + {1, "it.ao", 2, false}, + {1, "aq", 1, false}, + {1, "ar", 1, false}, + {1, "com.ar", 2, false}, + {1, "edu.ar", 2, false}, + {1, "gob.ar", 2, false}, + {1, "gov.ar", 2, false}, + {1, "int.ar", 2, false}, + {1, "mil.ar", 2, false}, + {1, "musica.ar", 2, false}, + {1, "net.ar", 2, false}, + {1, "org.ar", 2, false}, + {1, "tur.ar", 2, false}, + {1, "arpa", 1, false}, + {1, "e164.arpa", 2, false}, + {1, "in-addr.arpa", 2, false}, + {1, "ip6.arpa", 2, false}, + {1, "iris.arpa", 2, false}, + {1, "uri.arpa", 2, false}, + {1, "urn.arpa", 2, false}, + {1, "as", 1, false}, + {1, "gov.as", 2, false}, + {1, "asia", 1, false}, + {1, "at", 1, false}, + {1, "ac.at", 2, false}, + {1, "co.at", 2, false}, + {1, "gv.at", 2, false}, + {1, "or.at", 2, false}, + {1, "au", 1, false}, + {1, "com.au", 2, false}, + {1, "net.au", 2, false}, + {1, "org.au", 2, false}, + {1, "edu.au", 2, false}, + {1, "gov.au", 2, false}, + {1, "asn.au", 2, false}, + {1, "id.au", 2, false}, + {1, "info.au", 2, false}, + {1, "conf.au", 2, false}, + {1, "oz.au", 2, false}, + {1, "act.au", 2, false}, + {1, "nsw.au", 2, false}, + {1, "nt.au", 2, false}, + {1, "qld.au", 2, false}, + {1, "sa.au", 2, false}, + {1, "tas.au", 2, false}, + {1, "vic.au", 2, false}, + {1, "wa.au", 2, false}, + {1, "act.edu.au", 3, false}, + {1, "catholic.edu.au", 3, false}, + {1, "eq.edu.au", 3, false}, + {1, "nsw.edu.au", 3, false}, + {1, "nt.edu.au", 3, false}, + {1, "qld.edu.au", 3, false}, + {1, "sa.edu.au", 3, false}, + {1, "tas.edu.au", 3, false}, + {1, "vic.edu.au", 3, false}, + {1, "wa.edu.au", 3, false}, + {1, "qld.gov.au", 3, false}, + {1, "sa.gov.au", 3, false}, + {1, "tas.gov.au", 3, false}, + {1, "vic.gov.au", 3, false}, + {1, "wa.gov.au", 3, false}, + {1, "education.tas.edu.au", 4, false}, + {1, "schools.nsw.edu.au", 4, false}, + {1, "aw", 1, false}, + {1, "com.aw", 2, false}, + {1, "ax", 1, false}, + {1, "az", 1, false}, + {1, "com.az", 2, false}, + {1, "net.az", 2, false}, + {1, "int.az", 2, false}, + {1, "gov.az", 2, false}, + {1, "org.az", 2, false}, + {1, "edu.az", 2, false}, + {1, "info.az", 2, false}, + {1, "pp.az", 2, false}, + {1, "mil.az", 2, false}, + {1, "name.az", 2, false}, + {1, "pro.az", 2, false}, + {1, "biz.az", 2, false}, + {1, "ba", 1, false}, + {1, "com.ba", 2, false}, + {1, "edu.ba", 2, false}, + {1, "gov.ba", 2, false}, + {1, "mil.ba", 2, false}, + {1, "net.ba", 2, false}, + {1, "org.ba", 2, false}, + {1, "bb", 1, false}, + {1, "biz.bb", 2, false}, + {1, "co.bb", 2, false}, + {1, "com.bb", 2, false}, + {1, "edu.bb", 2, false}, + {1, "gov.bb", 2, false}, + {1, "info.bb", 2, false}, + {1, "net.bb", 2, false}, + {1, "org.bb", 2, false}, + {1, "store.bb", 2, false}, + {1, "tv.bb", 2, false}, + {2, "bd", 2, false}, + {1, "be", 1, false}, + {1, "ac.be", 2, false}, + {1, "bf", 1, false}, + {1, "gov.bf", 2, false}, + {1, "bg", 1, false}, + {1, "a.bg", 2, false}, + {1, "b.bg", 2, false}, + {1, "c.bg", 2, false}, + {1, "d.bg", 2, false}, + {1, "e.bg", 2, false}, + {1, "f.bg", 2, false}, + {1, "g.bg", 2, false}, + {1, "h.bg", 2, false}, + {1, "i.bg", 2, false}, + {1, "j.bg", 2, false}, + {1, "k.bg", 2, false}, + {1, "l.bg", 2, false}, + {1, "m.bg", 2, false}, + {1, "n.bg", 2, false}, + {1, "o.bg", 2, false}, + {1, "p.bg", 2, false}, + {1, "q.bg", 2, false}, + {1, "r.bg", 2, false}, + {1, "s.bg", 2, false}, + {1, "t.bg", 2, false}, + {1, "u.bg", 2, false}, + {1, "v.bg", 2, false}, + {1, "w.bg", 2, false}, + {1, "x.bg", 2, false}, + {1, "y.bg", 2, false}, + {1, "z.bg", 2, false}, + {1, "0.bg", 2, false}, + {1, "1.bg", 2, false}, + {1, "2.bg", 2, false}, + {1, "3.bg", 2, false}, + {1, "4.bg", 2, false}, + {1, "5.bg", 2, false}, + {1, "6.bg", 2, false}, + {1, "7.bg", 2, false}, + {1, "8.bg", 2, false}, + {1, "9.bg", 2, false}, + {1, "bh", 1, false}, + {1, "com.bh", 2, false}, + {1, "edu.bh", 2, false}, + {1, "net.bh", 2, false}, + {1, "org.bh", 2, false}, + {1, "gov.bh", 2, false}, + {1, "bi", 1, false}, + {1, "co.bi", 2, false}, + {1, "com.bi", 2, false}, + {1, "edu.bi", 2, false}, + {1, "or.bi", 2, false}, + {1, "org.bi", 2, false}, + {1, "biz", 1, false}, + {1, "bj", 1, false}, + {1, "asso.bj", 2, false}, + {1, "barreau.bj", 2, false}, + {1, "gouv.bj", 2, false}, + {1, "bm", 1, false}, + {1, "com.bm", 2, false}, + {1, "edu.bm", 2, false}, + {1, "gov.bm", 2, false}, + {1, "net.bm", 2, false}, + {1, "org.bm", 2, false}, + {1, "bn", 1, false}, + {1, "com.bn", 2, false}, + {1, "edu.bn", 2, false}, + {1, "gov.bn", 2, false}, + {1, "net.bn", 2, false}, + {1, "org.bn", 2, false}, + {1, "bo", 1, false}, + {1, "com.bo", 2, false}, + {1, "edu.bo", 2, false}, + {1, "gob.bo", 2, false}, + {1, "int.bo", 2, false}, + {1, "org.bo", 2, false}, + {1, "net.bo", 2, false}, + {1, "mil.bo", 2, false}, + {1, "tv.bo", 2, false}, + {1, "web.bo", 2, false}, + {1, "academia.bo", 2, false}, + {1, "agro.bo", 2, false}, + {1, "arte.bo", 2, false}, + {1, "blog.bo", 2, false}, + {1, "bolivia.bo", 2, false}, + {1, "ciencia.bo", 2, false}, + {1, "cooperativa.bo", 2, false}, + {1, "democracia.bo", 2, false}, + {1, "deporte.bo", 2, false}, + {1, "ecologia.bo", 2, false}, + {1, "economia.bo", 2, false}, + {1, "empresa.bo", 2, false}, + {1, "indigena.bo", 2, false}, + {1, "industria.bo", 2, false}, + {1, "info.bo", 2, false}, + {1, "medicina.bo", 2, false}, + {1, "movimiento.bo", 2, false}, + {1, "musica.bo", 2, false}, + {1, "natural.bo", 2, false}, + {1, "nombre.bo", 2, false}, + {1, "noticias.bo", 2, false}, + {1, "patria.bo", 2, false}, + {1, "politica.bo", 2, false}, + {1, "profesional.bo", 2, false}, + {1, "plurinacional.bo", 2, false}, + {1, "pueblo.bo", 2, false}, + {1, "revista.bo", 2, false}, + {1, "salud.bo", 2, false}, + {1, "tecnologia.bo", 2, false}, + {1, "tksat.bo", 2, false}, + {1, "transporte.bo", 2, false}, + {1, "wiki.bo", 2, false}, + {1, "br", 1, false}, + {1, "9guacu.br", 2, false}, + {1, "abc.br", 2, false}, + {1, "adm.br", 2, false}, + {1, "adv.br", 2, false}, + {1, "agr.br", 2, false}, + {1, "aju.br", 2, false}, + {1, "am.br", 2, false}, + {1, "anani.br", 2, false}, + {1, "aparecida.br", 2, false}, + {1, "arq.br", 2, false}, + {1, "art.br", 2, false}, + {1, "ato.br", 2, false}, + {1, "b.br", 2, false}, + {1, "barueri.br", 2, false}, + {1, "belem.br", 2, false}, + {1, "bhz.br", 2, false}, + {1, "bio.br", 2, false}, + {1, "blog.br", 2, false}, + {1, "bmd.br", 2, false}, + {1, "boavista.br", 2, false}, + {1, "bsb.br", 2, false}, + {1, "campinagrande.br", 2, false}, + {1, "campinas.br", 2, false}, + {1, "caxias.br", 2, false}, + {1, "cim.br", 2, false}, + {1, "cng.br", 2, false}, + {1, "cnt.br", 2, false}, + {1, "com.br", 2, false}, + {1, "contagem.br", 2, false}, + {1, "coop.br", 2, false}, + {1, "cri.br", 2, false}, + {1, "cuiaba.br", 2, false}, + {1, "curitiba.br", 2, false}, + {1, "def.br", 2, false}, + {1, "ecn.br", 2, false}, + {1, "eco.br", 2, false}, + {1, "edu.br", 2, false}, + {1, "emp.br", 2, false}, + {1, "eng.br", 2, false}, + {1, "esp.br", 2, false}, + {1, "etc.br", 2, false}, + {1, "eti.br", 2, false}, + {1, "far.br", 2, false}, + {1, "feira.br", 2, false}, + {1, "flog.br", 2, false}, + {1, "floripa.br", 2, false}, + {1, "fm.br", 2, false}, + {1, "fnd.br", 2, false}, + {1, "fortal.br", 2, false}, + {1, "fot.br", 2, false}, + {1, "foz.br", 2, false}, + {1, "fst.br", 2, false}, + {1, "g12.br", 2, false}, + {1, "ggf.br", 2, false}, + {1, "goiania.br", 2, false}, + {1, "gov.br", 2, false}, + {1, "ac.gov.br", 3, false}, + {1, "al.gov.br", 3, false}, + {1, "am.gov.br", 3, false}, + {1, "ap.gov.br", 3, false}, + {1, "ba.gov.br", 3, false}, + {1, "ce.gov.br", 3, false}, + {1, "df.gov.br", 3, false}, + {1, "es.gov.br", 3, false}, + {1, "go.gov.br", 3, false}, + {1, "ma.gov.br", 3, false}, + {1, "mg.gov.br", 3, false}, + {1, "ms.gov.br", 3, false}, + {1, "mt.gov.br", 3, false}, + {1, "pa.gov.br", 3, false}, + {1, "pb.gov.br", 3, false}, + {1, "pe.gov.br", 3, false}, + {1, "pi.gov.br", 3, false}, + {1, "pr.gov.br", 3, false}, + {1, "rj.gov.br", 3, false}, + {1, "rn.gov.br", 3, false}, + {1, "ro.gov.br", 3, false}, + {1, "rr.gov.br", 3, false}, + {1, "rs.gov.br", 3, false}, + {1, "sc.gov.br", 3, false}, + {1, "se.gov.br", 3, false}, + {1, "sp.gov.br", 3, false}, + {1, "to.gov.br", 3, false}, + {1, "gru.br", 2, false}, + {1, "imb.br", 2, false}, + {1, "ind.br", 2, false}, + {1, "inf.br", 2, false}, + {1, "jab.br", 2, false}, + {1, "jampa.br", 2, false}, + {1, "jdf.br", 2, false}, + {1, "joinville.br", 2, false}, + {1, "jor.br", 2, false}, + {1, "jus.br", 2, false}, + {1, "leg.br", 2, false}, + {1, "lel.br", 2, false}, + {1, "londrina.br", 2, false}, + {1, "macapa.br", 2, false}, + {1, "maceio.br", 2, false}, + {1, "manaus.br", 2, false}, + {1, "maringa.br", 2, false}, + {1, "mat.br", 2, false}, + {1, "med.br", 2, false}, + {1, "mil.br", 2, false}, + {1, "morena.br", 2, false}, + {1, "mp.br", 2, false}, + {1, "mus.br", 2, false}, + {1, "natal.br", 2, false}, + {1, "net.br", 2, false}, + {1, "niteroi.br", 2, false}, + {2, "nom.br", 3, false}, + {1, "not.br", 2, false}, + {1, "ntr.br", 2, false}, + {1, "odo.br", 2, false}, + {1, "ong.br", 2, false}, + {1, "org.br", 2, false}, + {1, "osasco.br", 2, false}, + {1, "palmas.br", 2, false}, + {1, "poa.br", 2, false}, + {1, "ppg.br", 2, false}, + {1, "pro.br", 2, false}, + {1, "psc.br", 2, false}, + {1, "psi.br", 2, false}, + {1, "pvh.br", 2, false}, + {1, "qsl.br", 2, false}, + {1, "radio.br", 2, false}, + {1, "rec.br", 2, false}, + {1, "recife.br", 2, false}, + {1, "ribeirao.br", 2, false}, + {1, "rio.br", 2, false}, + {1, "riobranco.br", 2, false}, + {1, "riopreto.br", 2, false}, + {1, "salvador.br", 2, false}, + {1, "sampa.br", 2, false}, + {1, "santamaria.br", 2, false}, + {1, "santoandre.br", 2, false}, + {1, "saobernardo.br", 2, false}, + {1, "saogonca.br", 2, false}, + {1, "sjc.br", 2, false}, + {1, "slg.br", 2, false}, + {1, "slz.br", 2, false}, + {1, "sorocaba.br", 2, false}, + {1, "srv.br", 2, false}, + {1, "taxi.br", 2, false}, + {1, "tc.br", 2, false}, + {1, "teo.br", 2, false}, + {1, "the.br", 2, false}, + {1, "tmp.br", 2, false}, + {1, "trd.br", 2, false}, + {1, "tur.br", 2, false}, + {1, "tv.br", 2, false}, + {1, "udi.br", 2, false}, + {1, "vet.br", 2, false}, + {1, "vix.br", 2, false}, + {1, "vlog.br", 2, false}, + {1, "wiki.br", 2, false}, + {1, "zlg.br", 2, false}, + {1, "bs", 1, false}, + {1, "com.bs", 2, false}, + {1, "net.bs", 2, false}, + {1, "org.bs", 2, false}, + {1, "edu.bs", 2, false}, + {1, "gov.bs", 2, false}, + {1, "bt", 1, false}, + {1, "com.bt", 2, false}, + {1, "edu.bt", 2, false}, + {1, "gov.bt", 2, false}, + {1, "net.bt", 2, false}, + {1, "org.bt", 2, false}, + {1, "bv", 1, false}, + {1, "bw", 1, false}, + {1, "co.bw", 2, false}, + {1, "org.bw", 2, false}, + {1, "by", 1, false}, + {1, "gov.by", 2, false}, + {1, "mil.by", 2, false}, + {1, "com.by", 2, false}, + {1, "of.by", 2, false}, + {1, "bz", 1, false}, + {1, "com.bz", 2, false}, + {1, "net.bz", 2, false}, + {1, "org.bz", 2, false}, + {1, "edu.bz", 2, false}, + {1, "gov.bz", 2, false}, + {1, "ca", 1, false}, + {1, "ab.ca", 2, false}, + {1, "bc.ca", 2, false}, + {1, "mb.ca", 2, false}, + {1, "nb.ca", 2, false}, + {1, "nf.ca", 2, false}, + {1, "nl.ca", 2, false}, + {1, "ns.ca", 2, false}, + {1, "nt.ca", 2, false}, + {1, "nu.ca", 2, false}, + {1, "on.ca", 2, false}, + {1, "pe.ca", 2, false}, + {1, "qc.ca", 2, false}, + {1, "sk.ca", 2, false}, + {1, "yk.ca", 2, false}, + {1, "gc.ca", 2, false}, + {1, "cat", 1, false}, + {1, "cc", 1, false}, + {1, "cd", 1, false}, + {1, "gov.cd", 2, false}, + {1, "cf", 1, false}, + {1, "cg", 1, false}, + {1, "ch", 1, false}, + {1, "ci", 1, false}, + {1, "org.ci", 2, false}, + {1, "or.ci", 2, false}, + {1, "com.ci", 2, false}, + {1, "co.ci", 2, false}, + {1, "edu.ci", 2, false}, + {1, "ed.ci", 2, false}, + {1, "ac.ci", 2, false}, + {1, "net.ci", 2, false}, + {1, "go.ci", 2, false}, + {1, "asso.ci", 2, false}, + {1, "xn--aroport-bya.ci", 2, false}, + {1, "int.ci", 2, false}, + {1, "presse.ci", 2, false}, + {1, "md.ci", 2, false}, + {1, "gouv.ci", 2, false}, + {2, "ck", 2, false}, + {3, "www.ck", 2, false}, + {1, "cl", 1, false}, + {1, "gov.cl", 2, false}, + {1, "gob.cl", 2, false}, + {1, "co.cl", 2, false}, + {1, "mil.cl", 2, false}, + {1, "cm", 1, false}, + {1, "co.cm", 2, false}, + {1, "com.cm", 2, false}, + {1, "gov.cm", 2, false}, + {1, "net.cm", 2, false}, + {1, "cn", 1, false}, + {1, "ac.cn", 2, false}, + {1, "com.cn", 2, false}, + {1, "edu.cn", 2, false}, + {1, "gov.cn", 2, false}, + {1, "net.cn", 2, false}, + {1, "org.cn", 2, false}, + {1, "mil.cn", 2, false}, + {1, "xn--55qx5d.cn", 2, false}, + {1, "xn--io0a7i.cn", 2, false}, + {1, "xn--od0alg.cn", 2, false}, + {1, "ah.cn", 2, false}, + {1, "bj.cn", 2, false}, + {1, "cq.cn", 2, false}, + {1, "fj.cn", 2, false}, + {1, "gd.cn", 2, false}, + {1, "gs.cn", 2, false}, + {1, "gz.cn", 2, false}, + {1, "gx.cn", 2, false}, + {1, "ha.cn", 2, false}, + {1, "hb.cn", 2, false}, + {1, "he.cn", 2, false}, + {1, "hi.cn", 2, false}, + {1, "hl.cn", 2, false}, + {1, "hn.cn", 2, false}, + {1, "jl.cn", 2, false}, + {1, "js.cn", 2, false}, + {1, "jx.cn", 2, false}, + {1, "ln.cn", 2, false}, + {1, "nm.cn", 2, false}, + {1, "nx.cn", 2, false}, + {1, "qh.cn", 2, false}, + {1, "sc.cn", 2, false}, + {1, "sd.cn", 2, false}, + {1, "sh.cn", 2, false}, + {1, "sn.cn", 2, false}, + {1, "sx.cn", 2, false}, + {1, "tj.cn", 2, false}, + {1, "xj.cn", 2, false}, + {1, "xz.cn", 2, false}, + {1, "yn.cn", 2, false}, + {1, "zj.cn", 2, false}, + {1, "hk.cn", 2, false}, + {1, "mo.cn", 2, false}, + {1, "tw.cn", 2, false}, + {1, "co", 1, false}, + {1, "arts.co", 2, false}, + {1, "com.co", 2, false}, + {1, "edu.co", 2, false}, + {1, "firm.co", 2, false}, + {1, "gov.co", 2, false}, + {1, "info.co", 2, false}, + {1, "int.co", 2, false}, + {1, "mil.co", 2, false}, + {1, "net.co", 2, false}, + {1, "nom.co", 2, false}, + {1, "org.co", 2, false}, + {1, "rec.co", 2, false}, + {1, "web.co", 2, false}, + {1, "com", 1, false}, + {1, "coop", 1, false}, + {1, "cr", 1, false}, + {1, "ac.cr", 2, false}, + {1, "co.cr", 2, false}, + {1, "ed.cr", 2, false}, + {1, "fi.cr", 2, false}, + {1, "go.cr", 2, false}, + {1, "or.cr", 2, false}, + {1, "sa.cr", 2, false}, + {1, "cu", 1, false}, + {1, "com.cu", 2, false}, + {1, "edu.cu", 2, false}, + {1, "org.cu", 2, false}, + {1, "net.cu", 2, false}, + {1, "gov.cu", 2, false}, + {1, "inf.cu", 2, false}, + {1, "cv", 1, false}, + {1, "cw", 1, false}, + {1, "com.cw", 2, false}, + {1, "edu.cw", 2, false}, + {1, "net.cw", 2, false}, + {1, "org.cw", 2, false}, + {1, "cx", 1, false}, + {1, "gov.cx", 2, false}, + {1, "cy", 1, false}, + {1, "ac.cy", 2, false}, + {1, "biz.cy", 2, false}, + {1, "com.cy", 2, false}, + {1, "ekloges.cy", 2, false}, + {1, "gov.cy", 2, false}, + {1, "ltd.cy", 2, false}, + {1, "name.cy", 2, false}, + {1, "net.cy", 2, false}, + {1, "org.cy", 2, false}, + {1, "parliament.cy", 2, false}, + {1, "press.cy", 2, false}, + {1, "pro.cy", 2, false}, + {1, "tm.cy", 2, false}, + {1, "cz", 1, false}, + {1, "de", 1, false}, + {1, "dj", 1, false}, + {1, "dk", 1, false}, + {1, "dm", 1, false}, + {1, "com.dm", 2, false}, + {1, "net.dm", 2, false}, + {1, "org.dm", 2, false}, + {1, "edu.dm", 2, false}, + {1, "gov.dm", 2, false}, + {1, "do", 1, false}, + {1, "art.do", 2, false}, + {1, "com.do", 2, false}, + {1, "edu.do", 2, false}, + {1, "gob.do", 2, false}, + {1, "gov.do", 2, false}, + {1, "mil.do", 2, false}, + {1, "net.do", 2, false}, + {1, "org.do", 2, false}, + {1, "sld.do", 2, false}, + {1, "web.do", 2, false}, + {1, "dz", 1, false}, + {1, "com.dz", 2, false}, + {1, "org.dz", 2, false}, + {1, "net.dz", 2, false}, + {1, "gov.dz", 2, false}, + {1, "edu.dz", 2, false}, + {1, "asso.dz", 2, false}, + {1, "pol.dz", 2, false}, + {1, "art.dz", 2, false}, + {1, "ec", 1, false}, + {1, "com.ec", 2, false}, + {1, "info.ec", 2, false}, + {1, "net.ec", 2, false}, + {1, "fin.ec", 2, false}, + {1, "k12.ec", 2, false}, + {1, "med.ec", 2, false}, + {1, "pro.ec", 2, false}, + {1, "org.ec", 2, false}, + {1, "edu.ec", 2, false}, + {1, "gov.ec", 2, false}, + {1, "gob.ec", 2, false}, + {1, "mil.ec", 2, false}, + {1, "edu", 1, false}, + {1, "ee", 1, false}, + {1, "edu.ee", 2, false}, + {1, "gov.ee", 2, false}, + {1, "riik.ee", 2, false}, + {1, "lib.ee", 2, false}, + {1, "med.ee", 2, false}, + {1, "com.ee", 2, false}, + {1, "pri.ee", 2, false}, + {1, "aip.ee", 2, false}, + {1, "org.ee", 2, false}, + {1, "fie.ee", 2, false}, + {1, "eg", 1, false}, + {1, "com.eg", 2, false}, + {1, "edu.eg", 2, false}, + {1, "eun.eg", 2, false}, + {1, "gov.eg", 2, false}, + {1, "mil.eg", 2, false}, + {1, "name.eg", 2, false}, + {1, "net.eg", 2, false}, + {1, "org.eg", 2, false}, + {1, "sci.eg", 2, false}, + {2, "er", 2, false}, + {1, "es", 1, false}, + {1, "com.es", 2, false}, + {1, "nom.es", 2, false}, + {1, "org.es", 2, false}, + {1, "gob.es", 2, false}, + {1, "edu.es", 2, false}, + {1, "et", 1, false}, + {1, "com.et", 2, false}, + {1, "gov.et", 2, false}, + {1, "org.et", 2, false}, + {1, "edu.et", 2, false}, + {1, "biz.et", 2, false}, + {1, "name.et", 2, false}, + {1, "info.et", 2, false}, + {1, "net.et", 2, false}, + {1, "eu", 1, false}, + {1, "fi", 1, false}, + {1, "aland.fi", 2, false}, + {2, "fj", 2, false}, + {2, "fk", 2, false}, + {1, "fm", 1, false}, + {1, "fo", 1, false}, + {1, "fr", 1, false}, + {1, "asso.fr", 2, false}, + {1, "com.fr", 2, false}, + {1, "gouv.fr", 2, false}, + {1, "nom.fr", 2, false}, + {1, "prd.fr", 2, false}, + {1, "tm.fr", 2, false}, + {1, "aeroport.fr", 2, false}, + {1, "avocat.fr", 2, false}, + {1, "avoues.fr", 2, false}, + {1, "cci.fr", 2, false}, + {1, "chambagri.fr", 2, false}, + {1, "chirurgiens-dentistes.fr", 2, false}, + {1, "experts-comptables.fr", 2, false}, + {1, "geometre-expert.fr", 2, false}, + {1, "greta.fr", 2, false}, + {1, "huissier-justice.fr", 2, false}, + {1, "medecin.fr", 2, false}, + {1, "notaires.fr", 2, false}, + {1, "pharmacien.fr", 2, false}, + {1, "port.fr", 2, false}, + {1, "veterinaire.fr", 2, false}, + {1, "ga", 1, false}, + {1, "gb", 1, false}, + {1, "gd", 1, false}, + {1, "ge", 1, false}, + {1, "com.ge", 2, false}, + {1, "edu.ge", 2, false}, + {1, "gov.ge", 2, false}, + {1, "org.ge", 2, false}, + {1, "mil.ge", 2, false}, + {1, "net.ge", 2, false}, + {1, "pvt.ge", 2, false}, + {1, "gf", 1, false}, + {1, "gg", 1, false}, + {1, "co.gg", 2, false}, + {1, "net.gg", 2, false}, + {1, "org.gg", 2, false}, + {1, "gh", 1, false}, + {1, "com.gh", 2, false}, + {1, "edu.gh", 2, false}, + {1, "gov.gh", 2, false}, + {1, "org.gh", 2, false}, + {1, "mil.gh", 2, false}, + {1, "gi", 1, false}, + {1, "com.gi", 2, false}, + {1, "ltd.gi", 2, false}, + {1, "gov.gi", 2, false}, + {1, "mod.gi", 2, false}, + {1, "edu.gi", 2, false}, + {1, "org.gi", 2, false}, + {1, "gl", 1, false}, + {1, "co.gl", 2, false}, + {1, "com.gl", 2, false}, + {1, "edu.gl", 2, false}, + {1, "net.gl", 2, false}, + {1, "org.gl", 2, false}, + {1, "gm", 1, false}, + {1, "gn", 1, false}, + {1, "ac.gn", 2, false}, + {1, "com.gn", 2, false}, + {1, "edu.gn", 2, false}, + {1, "gov.gn", 2, false}, + {1, "org.gn", 2, false}, + {1, "net.gn", 2, false}, + {1, "gov", 1, false}, + {1, "gp", 1, false}, + {1, "com.gp", 2, false}, + {1, "net.gp", 2, false}, + {1, "mobi.gp", 2, false}, + {1, "edu.gp", 2, false}, + {1, "org.gp", 2, false}, + {1, "asso.gp", 2, false}, + {1, "gq", 1, false}, + {1, "gr", 1, false}, + {1, "com.gr", 2, false}, + {1, "edu.gr", 2, false}, + {1, "net.gr", 2, false}, + {1, "org.gr", 2, false}, + {1, "gov.gr", 2, false}, + {1, "gs", 1, false}, + {1, "gt", 1, false}, + {1, "com.gt", 2, false}, + {1, "edu.gt", 2, false}, + {1, "gob.gt", 2, false}, + {1, "ind.gt", 2, false}, + {1, "mil.gt", 2, false}, + {1, "net.gt", 2, false}, + {1, "org.gt", 2, false}, + {1, "gu", 1, false}, + {1, "com.gu", 2, false}, + {1, "edu.gu", 2, false}, + {1, "gov.gu", 2, false}, + {1, "guam.gu", 2, false}, + {1, "info.gu", 2, false}, + {1, "net.gu", 2, false}, + {1, "org.gu", 2, false}, + {1, "web.gu", 2, false}, + {1, "gw", 1, false}, + {1, "gy", 1, false}, + {1, "co.gy", 2, false}, + {1, "com.gy", 2, false}, + {1, "edu.gy", 2, false}, + {1, "gov.gy", 2, false}, + {1, "net.gy", 2, false}, + {1, "org.gy", 2, false}, + {1, "hk", 1, false}, + {1, "com.hk", 2, false}, + {1, "edu.hk", 2, false}, + {1, "gov.hk", 2, false}, + {1, "idv.hk", 2, false}, + {1, "net.hk", 2, false}, + {1, "org.hk", 2, false}, + {1, "xn--55qx5d.hk", 2, false}, + {1, "xn--wcvs22d.hk", 2, false}, + {1, "xn--lcvr32d.hk", 2, false}, + {1, "xn--mxtq1m.hk", 2, false}, + {1, "xn--gmqw5a.hk", 2, false}, + {1, "xn--ciqpn.hk", 2, false}, + {1, "xn--gmq050i.hk", 2, false}, + {1, "xn--zf0avx.hk", 2, false}, + {1, "xn--io0a7i.hk", 2, false}, + {1, "xn--mk0axi.hk", 2, false}, + {1, "xn--od0alg.hk", 2, false}, + {1, "xn--od0aq3b.hk", 2, false}, + {1, "xn--tn0ag.hk", 2, false}, + {1, "xn--uc0atv.hk", 2, false}, + {1, "xn--uc0ay4a.hk", 2, false}, + {1, "hm", 1, false}, + {1, "hn", 1, false}, + {1, "com.hn", 2, false}, + {1, "edu.hn", 2, false}, + {1, "org.hn", 2, false}, + {1, "net.hn", 2, false}, + {1, "mil.hn", 2, false}, + {1, "gob.hn", 2, false}, + {1, "hr", 1, false}, + {1, "iz.hr", 2, false}, + {1, "from.hr", 2, false}, + {1, "name.hr", 2, false}, + {1, "com.hr", 2, false}, + {1, "ht", 1, false}, + {1, "com.ht", 2, false}, + {1, "shop.ht", 2, false}, + {1, "firm.ht", 2, false}, + {1, "info.ht", 2, false}, + {1, "adult.ht", 2, false}, + {1, "net.ht", 2, false}, + {1, "pro.ht", 2, false}, + {1, "org.ht", 2, false}, + {1, "med.ht", 2, false}, + {1, "art.ht", 2, false}, + {1, "coop.ht", 2, false}, + {1, "pol.ht", 2, false}, + {1, "asso.ht", 2, false}, + {1, "edu.ht", 2, false}, + {1, "rel.ht", 2, false}, + {1, "gouv.ht", 2, false}, + {1, "perso.ht", 2, false}, + {1, "hu", 1, false}, + {1, "co.hu", 2, false}, + {1, "info.hu", 2, false}, + {1, "org.hu", 2, false}, + {1, "priv.hu", 2, false}, + {1, "sport.hu", 2, false}, + {1, "tm.hu", 2, false}, + {1, "2000.hu", 2, false}, + {1, "agrar.hu", 2, false}, + {1, "bolt.hu", 2, false}, + {1, "casino.hu", 2, false}, + {1, "city.hu", 2, false}, + {1, "erotica.hu", 2, false}, + {1, "erotika.hu", 2, false}, + {1, "film.hu", 2, false}, + {1, "forum.hu", 2, false}, + {1, "games.hu", 2, false}, + {1, "hotel.hu", 2, false}, + {1, "ingatlan.hu", 2, false}, + {1, "jogasz.hu", 2, false}, + {1, "konyvelo.hu", 2, false}, + {1, "lakas.hu", 2, false}, + {1, "media.hu", 2, false}, + {1, "news.hu", 2, false}, + {1, "reklam.hu", 2, false}, + {1, "sex.hu", 2, false}, + {1, "shop.hu", 2, false}, + {1, "suli.hu", 2, false}, + {1, "szex.hu", 2, false}, + {1, "tozsde.hu", 2, false}, + {1, "utazas.hu", 2, false}, + {1, "video.hu", 2, false}, + {1, "id", 1, false}, + {1, "ac.id", 2, false}, + {1, "biz.id", 2, false}, + {1, "co.id", 2, false}, + {1, "desa.id", 2, false}, + {1, "go.id", 2, false}, + {1, "mil.id", 2, false}, + {1, "my.id", 2, false}, + {1, "net.id", 2, false}, + {1, "or.id", 2, false}, + {1, "ponpes.id", 2, false}, + {1, "sch.id", 2, false}, + {1, "web.id", 2, false}, + {1, "ie", 1, false}, + {1, "gov.ie", 2, false}, + {1, "il", 1, false}, + {1, "ac.il", 2, false}, + {1, "co.il", 2, false}, + {1, "gov.il", 2, false}, + {1, "idf.il", 2, false}, + {1, "k12.il", 2, false}, + {1, "muni.il", 2, false}, + {1, "net.il", 2, false}, + {1, "org.il", 2, false}, + {1, "im", 1, false}, + {1, "ac.im", 2, false}, + {1, "co.im", 2, false}, + {1, "com.im", 2, false}, + {1, "ltd.co.im", 3, false}, + {1, "net.im", 2, false}, + {1, "org.im", 2, false}, + {1, "plc.co.im", 3, false}, + {1, "tt.im", 2, false}, + {1, "tv.im", 2, false}, + {1, "in", 1, false}, + {1, "co.in", 2, false}, + {1, "firm.in", 2, false}, + {1, "net.in", 2, false}, + {1, "org.in", 2, false}, + {1, "gen.in", 2, false}, + {1, "ind.in", 2, false}, + {1, "nic.in", 2, false}, + {1, "ac.in", 2, false}, + {1, "edu.in", 2, false}, + {1, "res.in", 2, false}, + {1, "gov.in", 2, false}, + {1, "mil.in", 2, false}, + {1, "info", 1, false}, + {1, "int", 1, false}, + {1, "eu.int", 2, false}, + {1, "io", 1, false}, + {1, "com.io", 2, false}, + {1, "iq", 1, false}, + {1, "gov.iq", 2, false}, + {1, "edu.iq", 2, false}, + {1, "mil.iq", 2, false}, + {1, "com.iq", 2, false}, + {1, "org.iq", 2, false}, + {1, "net.iq", 2, false}, + {1, "ir", 1, false}, + {1, "ac.ir", 2, false}, + {1, "co.ir", 2, false}, + {1, "gov.ir", 2, false}, + {1, "id.ir", 2, false}, + {1, "net.ir", 2, false}, + {1, "org.ir", 2, false}, + {1, "sch.ir", 2, false}, + {1, "xn--mgba3a4f16a.ir", 2, false}, + {1, "xn--mgba3a4fra.ir", 2, false}, + {1, "is", 1, false}, + {1, "net.is", 2, false}, + {1, "com.is", 2, false}, + {1, "edu.is", 2, false}, + {1, "gov.is", 2, false}, + {1, "org.is", 2, false}, + {1, "int.is", 2, false}, + {1, "it", 1, false}, + {1, "gov.it", 2, false}, + {1, "edu.it", 2, false}, + {1, "abr.it", 2, false}, + {1, "abruzzo.it", 2, false}, + {1, "aosta-valley.it", 2, false}, + {1, "aostavalley.it", 2, false}, + {1, "bas.it", 2, false}, + {1, "basilicata.it", 2, false}, + {1, "cal.it", 2, false}, + {1, "calabria.it", 2, false}, + {1, "cam.it", 2, false}, + {1, "campania.it", 2, false}, + {1, "emilia-romagna.it", 2, false}, + {1, "emiliaromagna.it", 2, false}, + {1, "emr.it", 2, false}, + {1, "friuli-v-giulia.it", 2, false}, + {1, "friuli-ve-giulia.it", 2, false}, + {1, "friuli-vegiulia.it", 2, false}, + {1, "friuli-venezia-giulia.it", 2, false}, + {1, "friuli-veneziagiulia.it", 2, false}, + {1, "friuli-vgiulia.it", 2, false}, + {1, "friuliv-giulia.it", 2, false}, + {1, "friulive-giulia.it", 2, false}, + {1, "friulivegiulia.it", 2, false}, + {1, "friulivenezia-giulia.it", 2, false}, + {1, "friuliveneziagiulia.it", 2, false}, + {1, "friulivgiulia.it", 2, false}, + {1, "fvg.it", 2, false}, + {1, "laz.it", 2, false}, + {1, "lazio.it", 2, false}, + {1, "lig.it", 2, false}, + {1, "liguria.it", 2, false}, + {1, "lom.it", 2, false}, + {1, "lombardia.it", 2, false}, + {1, "lombardy.it", 2, false}, + {1, "lucania.it", 2, false}, + {1, "mar.it", 2, false}, + {1, "marche.it", 2, false}, + {1, "mol.it", 2, false}, + {1, "molise.it", 2, false}, + {1, "piedmont.it", 2, false}, + {1, "piemonte.it", 2, false}, + {1, "pmn.it", 2, false}, + {1, "pug.it", 2, false}, + {1, "puglia.it", 2, false}, + {1, "sar.it", 2, false}, + {1, "sardegna.it", 2, false}, + {1, "sardinia.it", 2, false}, + {1, "sic.it", 2, false}, + {1, "sicilia.it", 2, false}, + {1, "sicily.it", 2, false}, + {1, "taa.it", 2, false}, + {1, "tos.it", 2, false}, + {1, "toscana.it", 2, false}, + {1, "trentin-sud-tirol.it", 2, false}, + {1, "xn--trentin-sd-tirol-rzb.it", 2, false}, + {1, "trentin-sudtirol.it", 2, false}, + {1, "xn--trentin-sdtirol-7vb.it", 2, false}, + {1, "trentin-sued-tirol.it", 2, false}, + {1, "trentin-suedtirol.it", 2, false}, + {1, "trentino-a-adige.it", 2, false}, + {1, "trentino-aadige.it", 2, false}, + {1, "trentino-alto-adige.it", 2, false}, + {1, "trentino-altoadige.it", 2, false}, + {1, "trentino-s-tirol.it", 2, false}, + {1, "trentino-stirol.it", 2, false}, + {1, "trentino-sud-tirol.it", 2, false}, + {1, "xn--trentino-sd-tirol-c3b.it", 2, false}, + {1, "trentino-sudtirol.it", 2, false}, + {1, "xn--trentino-sdtirol-szb.it", 2, false}, + {1, "trentino-sued-tirol.it", 2, false}, + {1, "trentino-suedtirol.it", 2, false}, + {1, "trentino.it", 2, false}, + {1, "trentinoa-adige.it", 2, false}, + {1, "trentinoaadige.it", 2, false}, + {1, "trentinoalto-adige.it", 2, false}, + {1, "trentinoaltoadige.it", 2, false}, + {1, "trentinos-tirol.it", 2, false}, + {1, "trentinostirol.it", 2, false}, + {1, "trentinosud-tirol.it", 2, false}, + {1, "xn--trentinosd-tirol-rzb.it", 2, false}, + {1, "trentinosudtirol.it", 2, false}, + {1, "xn--trentinosdtirol-7vb.it", 2, false}, + {1, "trentinosued-tirol.it", 2, false}, + {1, "trentinosuedtirol.it", 2, false}, + {1, "trentinsud-tirol.it", 2, false}, + {1, "xn--trentinsd-tirol-6vb.it", 2, false}, + {1, "trentinsudtirol.it", 2, false}, + {1, "xn--trentinsdtirol-nsb.it", 2, false}, + {1, "trentinsued-tirol.it", 2, false}, + {1, "trentinsuedtirol.it", 2, false}, + {1, "tuscany.it", 2, false}, + {1, "umb.it", 2, false}, + {1, "umbria.it", 2, false}, + {1, "val-d-aosta.it", 2, false}, + {1, "val-daosta.it", 2, false}, + {1, "vald-aosta.it", 2, false}, + {1, "valdaosta.it", 2, false}, + {1, "valle-aosta.it", 2, false}, + {1, "valle-d-aosta.it", 2, false}, + {1, "valle-daosta.it", 2, false}, + {1, "valleaosta.it", 2, false}, + {1, "valled-aosta.it", 2, false}, + {1, "valledaosta.it", 2, false}, + {1, "vallee-aoste.it", 2, false}, + {1, "xn--valle-aoste-ebb.it", 2, false}, + {1, "vallee-d-aoste.it", 2, false}, + {1, "xn--valle-d-aoste-ehb.it", 2, false}, + {1, "valleeaoste.it", 2, false}, + {1, "xn--valleaoste-e7a.it", 2, false}, + {1, "valleedaoste.it", 2, false}, + {1, "xn--valledaoste-ebb.it", 2, false}, + {1, "vao.it", 2, false}, + {1, "vda.it", 2, false}, + {1, "ven.it", 2, false}, + {1, "veneto.it", 2, false}, + {1, "ag.it", 2, false}, + {1, "agrigento.it", 2, false}, + {1, "al.it", 2, false}, + {1, "alessandria.it", 2, false}, + {1, "alto-adige.it", 2, false}, + {1, "altoadige.it", 2, false}, + {1, "an.it", 2, false}, + {1, "ancona.it", 2, false}, + {1, "andria-barletta-trani.it", 2, false}, + {1, "andria-trani-barletta.it", 2, false}, + {1, "andriabarlettatrani.it", 2, false}, + {1, "andriatranibarletta.it", 2, false}, + {1, "ao.it", 2, false}, + {1, "aosta.it", 2, false}, + {1, "aoste.it", 2, false}, + {1, "ap.it", 2, false}, + {1, "aq.it", 2, false}, + {1, "aquila.it", 2, false}, + {1, "ar.it", 2, false}, + {1, "arezzo.it", 2, false}, + {1, "ascoli-piceno.it", 2, false}, + {1, "ascolipiceno.it", 2, false}, + {1, "asti.it", 2, false}, + {1, "at.it", 2, false}, + {1, "av.it", 2, false}, + {1, "avellino.it", 2, false}, + {1, "ba.it", 2, false}, + {1, "balsan-sudtirol.it", 2, false}, + {1, "xn--balsan-sdtirol-nsb.it", 2, false}, + {1, "balsan-suedtirol.it", 2, false}, + {1, "balsan.it", 2, false}, + {1, "bari.it", 2, false}, + {1, "barletta-trani-andria.it", 2, false}, + {1, "barlettatraniandria.it", 2, false}, + {1, "belluno.it", 2, false}, + {1, "benevento.it", 2, false}, + {1, "bergamo.it", 2, false}, + {1, "bg.it", 2, false}, + {1, "bi.it", 2, false}, + {1, "biella.it", 2, false}, + {1, "bl.it", 2, false}, + {1, "bn.it", 2, false}, + {1, "bo.it", 2, false}, + {1, "bologna.it", 2, false}, + {1, "bolzano-altoadige.it", 2, false}, + {1, "bolzano.it", 2, false}, + {1, "bozen-sudtirol.it", 2, false}, + {1, "xn--bozen-sdtirol-2ob.it", 2, false}, + {1, "bozen-suedtirol.it", 2, false}, + {1, "bozen.it", 2, false}, + {1, "br.it", 2, false}, + {1, "brescia.it", 2, false}, + {1, "brindisi.it", 2, false}, + {1, "bs.it", 2, false}, + {1, "bt.it", 2, false}, + {1, "bulsan-sudtirol.it", 2, false}, + {1, "xn--bulsan-sdtirol-nsb.it", 2, false}, + {1, "bulsan-suedtirol.it", 2, false}, + {1, "bulsan.it", 2, false}, + {1, "bz.it", 2, false}, + {1, "ca.it", 2, false}, + {1, "cagliari.it", 2, false}, + {1, "caltanissetta.it", 2, false}, + {1, "campidano-medio.it", 2, false}, + {1, "campidanomedio.it", 2, false}, + {1, "campobasso.it", 2, false}, + {1, "carbonia-iglesias.it", 2, false}, + {1, "carboniaiglesias.it", 2, false}, + {1, "carrara-massa.it", 2, false}, + {1, "carraramassa.it", 2, false}, + {1, "caserta.it", 2, false}, + {1, "catania.it", 2, false}, + {1, "catanzaro.it", 2, false}, + {1, "cb.it", 2, false}, + {1, "ce.it", 2, false}, + {1, "cesena-forli.it", 2, false}, + {1, "xn--cesena-forl-mcb.it", 2, false}, + {1, "cesenaforli.it", 2, false}, + {1, "xn--cesenaforl-i8a.it", 2, false}, + {1, "ch.it", 2, false}, + {1, "chieti.it", 2, false}, + {1, "ci.it", 2, false}, + {1, "cl.it", 2, false}, + {1, "cn.it", 2, false}, + {1, "co.it", 2, false}, + {1, "como.it", 2, false}, + {1, "cosenza.it", 2, false}, + {1, "cr.it", 2, false}, + {1, "cremona.it", 2, false}, + {1, "crotone.it", 2, false}, + {1, "cs.it", 2, false}, + {1, "ct.it", 2, false}, + {1, "cuneo.it", 2, false}, + {1, "cz.it", 2, false}, + {1, "dell-ogliastra.it", 2, false}, + {1, "dellogliastra.it", 2, false}, + {1, "en.it", 2, false}, + {1, "enna.it", 2, false}, + {1, "fc.it", 2, false}, + {1, "fe.it", 2, false}, + {1, "fermo.it", 2, false}, + {1, "ferrara.it", 2, false}, + {1, "fg.it", 2, false}, + {1, "fi.it", 2, false}, + {1, "firenze.it", 2, false}, + {1, "florence.it", 2, false}, + {1, "fm.it", 2, false}, + {1, "foggia.it", 2, false}, + {1, "forli-cesena.it", 2, false}, + {1, "xn--forl-cesena-fcb.it", 2, false}, + {1, "forlicesena.it", 2, false}, + {1, "xn--forlcesena-c8a.it", 2, false}, + {1, "fr.it", 2, false}, + {1, "frosinone.it", 2, false}, + {1, "ge.it", 2, false}, + {1, "genoa.it", 2, false}, + {1, "genova.it", 2, false}, + {1, "go.it", 2, false}, + {1, "gorizia.it", 2, false}, + {1, "gr.it", 2, false}, + {1, "grosseto.it", 2, false}, + {1, "iglesias-carbonia.it", 2, false}, + {1, "iglesiascarbonia.it", 2, false}, + {1, "im.it", 2, false}, + {1, "imperia.it", 2, false}, + {1, "is.it", 2, false}, + {1, "isernia.it", 2, false}, + {1, "kr.it", 2, false}, + {1, "la-spezia.it", 2, false}, + {1, "laquila.it", 2, false}, + {1, "laspezia.it", 2, false}, + {1, "latina.it", 2, false}, + {1, "lc.it", 2, false}, + {1, "le.it", 2, false}, + {1, "lecce.it", 2, false}, + {1, "lecco.it", 2, false}, + {1, "li.it", 2, false}, + {1, "livorno.it", 2, false}, + {1, "lo.it", 2, false}, + {1, "lodi.it", 2, false}, + {1, "lt.it", 2, false}, + {1, "lu.it", 2, false}, + {1, "lucca.it", 2, false}, + {1, "macerata.it", 2, false}, + {1, "mantova.it", 2, false}, + {1, "massa-carrara.it", 2, false}, + {1, "massacarrara.it", 2, false}, + {1, "matera.it", 2, false}, + {1, "mb.it", 2, false}, + {1, "mc.it", 2, false}, + {1, "me.it", 2, false}, + {1, "medio-campidano.it", 2, false}, + {1, "mediocampidano.it", 2, false}, + {1, "messina.it", 2, false}, + {1, "mi.it", 2, false}, + {1, "milan.it", 2, false}, + {1, "milano.it", 2, false}, + {1, "mn.it", 2, false}, + {1, "mo.it", 2, false}, + {1, "modena.it", 2, false}, + {1, "monza-brianza.it", 2, false}, + {1, "monza-e-della-brianza.it", 2, false}, + {1, "monza.it", 2, false}, + {1, "monzabrianza.it", 2, false}, + {1, "monzaebrianza.it", 2, false}, + {1, "monzaedellabrianza.it", 2, false}, + {1, "ms.it", 2, false}, + {1, "mt.it", 2, false}, + {1, "na.it", 2, false}, + {1, "naples.it", 2, false}, + {1, "napoli.it", 2, false}, + {1, "no.it", 2, false}, + {1, "novara.it", 2, false}, + {1, "nu.it", 2, false}, + {1, "nuoro.it", 2, false}, + {1, "og.it", 2, false}, + {1, "ogliastra.it", 2, false}, + {1, "olbia-tempio.it", 2, false}, + {1, "olbiatempio.it", 2, false}, + {1, "or.it", 2, false}, + {1, "oristano.it", 2, false}, + {1, "ot.it", 2, false}, + {1, "pa.it", 2, false}, + {1, "padova.it", 2, false}, + {1, "padua.it", 2, false}, + {1, "palermo.it", 2, false}, + {1, "parma.it", 2, false}, + {1, "pavia.it", 2, false}, + {1, "pc.it", 2, false}, + {1, "pd.it", 2, false}, + {1, "pe.it", 2, false}, + {1, "perugia.it", 2, false}, + {1, "pesaro-urbino.it", 2, false}, + {1, "pesarourbino.it", 2, false}, + {1, "pescara.it", 2, false}, + {1, "pg.it", 2, false}, + {1, "pi.it", 2, false}, + {1, "piacenza.it", 2, false}, + {1, "pisa.it", 2, false}, + {1, "pistoia.it", 2, false}, + {1, "pn.it", 2, false}, + {1, "po.it", 2, false}, + {1, "pordenone.it", 2, false}, + {1, "potenza.it", 2, false}, + {1, "pr.it", 2, false}, + {1, "prato.it", 2, false}, + {1, "pt.it", 2, false}, + {1, "pu.it", 2, false}, + {1, "pv.it", 2, false}, + {1, "pz.it", 2, false}, + {1, "ra.it", 2, false}, + {1, "ragusa.it", 2, false}, + {1, "ravenna.it", 2, false}, + {1, "rc.it", 2, false}, + {1, "re.it", 2, false}, + {1, "reggio-calabria.it", 2, false}, + {1, "reggio-emilia.it", 2, false}, + {1, "reggiocalabria.it", 2, false}, + {1, "reggioemilia.it", 2, false}, + {1, "rg.it", 2, false}, + {1, "ri.it", 2, false}, + {1, "rieti.it", 2, false}, + {1, "rimini.it", 2, false}, + {1, "rm.it", 2, false}, + {1, "rn.it", 2, false}, + {1, "ro.it", 2, false}, + {1, "roma.it", 2, false}, + {1, "rome.it", 2, false}, + {1, "rovigo.it", 2, false}, + {1, "sa.it", 2, false}, + {1, "salerno.it", 2, false}, + {1, "sassari.it", 2, false}, + {1, "savona.it", 2, false}, + {1, "si.it", 2, false}, + {1, "siena.it", 2, false}, + {1, "siracusa.it", 2, false}, + {1, "so.it", 2, false}, + {1, "sondrio.it", 2, false}, + {1, "sp.it", 2, false}, + {1, "sr.it", 2, false}, + {1, "ss.it", 2, false}, + {1, "suedtirol.it", 2, false}, + {1, "xn--sdtirol-n2a.it", 2, false}, + {1, "sv.it", 2, false}, + {1, "ta.it", 2, false}, + {1, "taranto.it", 2, false}, + {1, "te.it", 2, false}, + {1, "tempio-olbia.it", 2, false}, + {1, "tempioolbia.it", 2, false}, + {1, "teramo.it", 2, false}, + {1, "terni.it", 2, false}, + {1, "tn.it", 2, false}, + {1, "to.it", 2, false}, + {1, "torino.it", 2, false}, + {1, "tp.it", 2, false}, + {1, "tr.it", 2, false}, + {1, "trani-andria-barletta.it", 2, false}, + {1, "trani-barletta-andria.it", 2, false}, + {1, "traniandriabarletta.it", 2, false}, + {1, "tranibarlettaandria.it", 2, false}, + {1, "trapani.it", 2, false}, + {1, "trento.it", 2, false}, + {1, "treviso.it", 2, false}, + {1, "trieste.it", 2, false}, + {1, "ts.it", 2, false}, + {1, "turin.it", 2, false}, + {1, "tv.it", 2, false}, + {1, "ud.it", 2, false}, + {1, "udine.it", 2, false}, + {1, "urbino-pesaro.it", 2, false}, + {1, "urbinopesaro.it", 2, false}, + {1, "va.it", 2, false}, + {1, "varese.it", 2, false}, + {1, "vb.it", 2, false}, + {1, "vc.it", 2, false}, + {1, "ve.it", 2, false}, + {1, "venezia.it", 2, false}, + {1, "venice.it", 2, false}, + {1, "verbania.it", 2, false}, + {1, "vercelli.it", 2, false}, + {1, "verona.it", 2, false}, + {1, "vi.it", 2, false}, + {1, "vibo-valentia.it", 2, false}, + {1, "vibovalentia.it", 2, false}, + {1, "vicenza.it", 2, false}, + {1, "viterbo.it", 2, false}, + {1, "vr.it", 2, false}, + {1, "vs.it", 2, false}, + {1, "vt.it", 2, false}, + {1, "vv.it", 2, false}, + {1, "je", 1, false}, + {1, "co.je", 2, false}, + {1, "net.je", 2, false}, + {1, "org.je", 2, false}, + {2, "jm", 2, false}, + {1, "jo", 1, false}, + {1, "com.jo", 2, false}, + {1, "org.jo", 2, false}, + {1, "net.jo", 2, false}, + {1, "edu.jo", 2, false}, + {1, "sch.jo", 2, false}, + {1, "gov.jo", 2, false}, + {1, "mil.jo", 2, false}, + {1, "name.jo", 2, false}, + {1, "jobs", 1, false}, + {1, "jp", 1, false}, + {1, "ac.jp", 2, false}, + {1, "ad.jp", 2, false}, + {1, "co.jp", 2, false}, + {1, "ed.jp", 2, false}, + {1, "go.jp", 2, false}, + {1, "gr.jp", 2, false}, + {1, "lg.jp", 2, false}, + {1, "ne.jp", 2, false}, + {1, "or.jp", 2, false}, + {1, "aichi.jp", 2, false}, + {1, "akita.jp", 2, false}, + {1, "aomori.jp", 2, false}, + {1, "chiba.jp", 2, false}, + {1, "ehime.jp", 2, false}, + {1, "fukui.jp", 2, false}, + {1, "fukuoka.jp", 2, false}, + {1, "fukushima.jp", 2, false}, + {1, "gifu.jp", 2, false}, + {1, "gunma.jp", 2, false}, + {1, "hiroshima.jp", 2, false}, + {1, "hokkaido.jp", 2, false}, + {1, "hyogo.jp", 2, false}, + {1, "ibaraki.jp", 2, false}, + {1, "ishikawa.jp", 2, false}, + {1, "iwate.jp", 2, false}, + {1, "kagawa.jp", 2, false}, + {1, "kagoshima.jp", 2, false}, + {1, "kanagawa.jp", 2, false}, + {1, "kochi.jp", 2, false}, + {1, "kumamoto.jp", 2, false}, + {1, "kyoto.jp", 2, false}, + {1, "mie.jp", 2, false}, + {1, "miyagi.jp", 2, false}, + {1, "miyazaki.jp", 2, false}, + {1, "nagano.jp", 2, false}, + {1, "nagasaki.jp", 2, false}, + {1, "nara.jp", 2, false}, + {1, "niigata.jp", 2, false}, + {1, "oita.jp", 2, false}, + {1, "okayama.jp", 2, false}, + {1, "okinawa.jp", 2, false}, + {1, "osaka.jp", 2, false}, + {1, "saga.jp", 2, false}, + {1, "saitama.jp", 2, false}, + {1, "shiga.jp", 2, false}, + {1, "shimane.jp", 2, false}, + {1, "shizuoka.jp", 2, false}, + {1, "tochigi.jp", 2, false}, + {1, "tokushima.jp", 2, false}, + {1, "tokyo.jp", 2, false}, + {1, "tottori.jp", 2, false}, + {1, "toyama.jp", 2, false}, + {1, "wakayama.jp", 2, false}, + {1, "yamagata.jp", 2, false}, + {1, "yamaguchi.jp", 2, false}, + {1, "yamanashi.jp", 2, false}, + {1, "xn--4pvxs.jp", 2, false}, + {1, "xn--vgu402c.jp", 2, false}, + {1, "xn--c3s14m.jp", 2, false}, + {1, "xn--f6qx53a.jp", 2, false}, + {1, "xn--8pvr4u.jp", 2, false}, + {1, "xn--uist22h.jp", 2, false}, + {1, "xn--djrs72d6uy.jp", 2, false}, + {1, "xn--mkru45i.jp", 2, false}, + {1, "xn--0trq7p7nn.jp", 2, false}, + {1, "xn--8ltr62k.jp", 2, false}, + {1, "xn--2m4a15e.jp", 2, false}, + {1, "xn--efvn9s.jp", 2, false}, + {1, "xn--32vp30h.jp", 2, false}, + {1, "xn--4it797k.jp", 2, false}, + {1, "xn--1lqs71d.jp", 2, false}, + {1, "xn--5rtp49c.jp", 2, false}, + {1, "xn--5js045d.jp", 2, false}, + {1, "xn--ehqz56n.jp", 2, false}, + {1, "xn--1lqs03n.jp", 2, false}, + {1, "xn--qqqt11m.jp", 2, false}, + {1, "xn--kbrq7o.jp", 2, false}, + {1, "xn--pssu33l.jp", 2, false}, + {1, "xn--ntsq17g.jp", 2, false}, + {1, "xn--uisz3g.jp", 2, false}, + {1, "xn--6btw5a.jp", 2, false}, + {1, "xn--1ctwo.jp", 2, false}, + {1, "xn--6orx2r.jp", 2, false}, + {1, "xn--rht61e.jp", 2, false}, + {1, "xn--rht27z.jp", 2, false}, + {1, "xn--djty4k.jp", 2, false}, + {1, "xn--nit225k.jp", 2, false}, + {1, "xn--rht3d.jp", 2, false}, + {1, "xn--klty5x.jp", 2, false}, + {1, "xn--kltx9a.jp", 2, false}, + {1, "xn--kltp7d.jp", 2, false}, + {1, "xn--uuwu58a.jp", 2, false}, + {1, "xn--zbx025d.jp", 2, false}, + {1, "xn--ntso0iqx3a.jp", 2, false}, + {1, "xn--elqq16h.jp", 2, false}, + {1, "xn--4it168d.jp", 2, false}, + {1, "xn--klt787d.jp", 2, false}, + {1, "xn--rny31h.jp", 2, false}, + {1, "xn--7t0a264c.jp", 2, false}, + {1, "xn--5rtq34k.jp", 2, false}, + {1, "xn--k7yn95e.jp", 2, false}, + {1, "xn--tor131o.jp", 2, false}, + {1, "xn--d5qv7z876c.jp", 2, false}, + {2, "kawasaki.jp", 3, false}, + {2, "kitakyushu.jp", 3, false}, + {2, "kobe.jp", 3, false}, + {2, "nagoya.jp", 3, false}, + {2, "sapporo.jp", 3, false}, + {2, "sendai.jp", 3, false}, + {2, "yokohama.jp", 3, false}, + {3, "city.kawasaki.jp", 3, false}, + {3, "city.kitakyushu.jp", 3, false}, + {3, "city.kobe.jp", 3, false}, + {3, "city.nagoya.jp", 3, false}, + {3, "city.sapporo.jp", 3, false}, + {3, "city.sendai.jp", 3, false}, + {3, "city.yokohama.jp", 3, false}, + {1, "aisai.aichi.jp", 3, false}, + {1, "ama.aichi.jp", 3, false}, + {1, "anjo.aichi.jp", 3, false}, + {1, "asuke.aichi.jp", 3, false}, + {1, "chiryu.aichi.jp", 3, false}, + {1, "chita.aichi.jp", 3, false}, + {1, "fuso.aichi.jp", 3, false}, + {1, "gamagori.aichi.jp", 3, false}, + {1, "handa.aichi.jp", 3, false}, + {1, "hazu.aichi.jp", 3, false}, + {1, "hekinan.aichi.jp", 3, false}, + {1, "higashiura.aichi.jp", 3, false}, + {1, "ichinomiya.aichi.jp", 3, false}, + {1, "inazawa.aichi.jp", 3, false}, + {1, "inuyama.aichi.jp", 3, false}, + {1, "isshiki.aichi.jp", 3, false}, + {1, "iwakura.aichi.jp", 3, false}, + {1, "kanie.aichi.jp", 3, false}, + {1, "kariya.aichi.jp", 3, false}, + {1, "kasugai.aichi.jp", 3, false}, + {1, "kira.aichi.jp", 3, false}, + {1, "kiyosu.aichi.jp", 3, false}, + {1, "komaki.aichi.jp", 3, false}, + {1, "konan.aichi.jp", 3, false}, + {1, "kota.aichi.jp", 3, false}, + {1, "mihama.aichi.jp", 3, false}, + {1, "miyoshi.aichi.jp", 3, false}, + {1, "nishio.aichi.jp", 3, false}, + {1, "nisshin.aichi.jp", 3, false}, + {1, "obu.aichi.jp", 3, false}, + {1, "oguchi.aichi.jp", 3, false}, + {1, "oharu.aichi.jp", 3, false}, + {1, "okazaki.aichi.jp", 3, false}, + {1, "owariasahi.aichi.jp", 3, false}, + {1, "seto.aichi.jp", 3, false}, + {1, "shikatsu.aichi.jp", 3, false}, + {1, "shinshiro.aichi.jp", 3, false}, + {1, "shitara.aichi.jp", 3, false}, + {1, "tahara.aichi.jp", 3, false}, + {1, "takahama.aichi.jp", 3, false}, + {1, "tobishima.aichi.jp", 3, false}, + {1, "toei.aichi.jp", 3, false}, + {1, "togo.aichi.jp", 3, false}, + {1, "tokai.aichi.jp", 3, false}, + {1, "tokoname.aichi.jp", 3, false}, + {1, "toyoake.aichi.jp", 3, false}, + {1, "toyohashi.aichi.jp", 3, false}, + {1, "toyokawa.aichi.jp", 3, false}, + {1, "toyone.aichi.jp", 3, false}, + {1, "toyota.aichi.jp", 3, false}, + {1, "tsushima.aichi.jp", 3, false}, + {1, "yatomi.aichi.jp", 3, false}, + {1, "akita.akita.jp", 3, false}, + {1, "daisen.akita.jp", 3, false}, + {1, "fujisato.akita.jp", 3, false}, + {1, "gojome.akita.jp", 3, false}, + {1, "hachirogata.akita.jp", 3, false}, + {1, "happou.akita.jp", 3, false}, + {1, "higashinaruse.akita.jp", 3, false}, + {1, "honjo.akita.jp", 3, false}, + {1, "honjyo.akita.jp", 3, false}, + {1, "ikawa.akita.jp", 3, false}, + {1, "kamikoani.akita.jp", 3, false}, + {1, "kamioka.akita.jp", 3, false}, + {1, "katagami.akita.jp", 3, false}, + {1, "kazuno.akita.jp", 3, false}, + {1, "kitaakita.akita.jp", 3, false}, + {1, "kosaka.akita.jp", 3, false}, + {1, "kyowa.akita.jp", 3, false}, + {1, "misato.akita.jp", 3, false}, + {1, "mitane.akita.jp", 3, false}, + {1, "moriyoshi.akita.jp", 3, false}, + {1, "nikaho.akita.jp", 3, false}, + {1, "noshiro.akita.jp", 3, false}, + {1, "odate.akita.jp", 3, false}, + {1, "oga.akita.jp", 3, false}, + {1, "ogata.akita.jp", 3, false}, + {1, "semboku.akita.jp", 3, false}, + {1, "yokote.akita.jp", 3, false}, + {1, "yurihonjo.akita.jp", 3, false}, + {1, "aomori.aomori.jp", 3, false}, + {1, "gonohe.aomori.jp", 3, false}, + {1, "hachinohe.aomori.jp", 3, false}, + {1, "hashikami.aomori.jp", 3, false}, + {1, "hiranai.aomori.jp", 3, false}, + {1, "hirosaki.aomori.jp", 3, false}, + {1, "itayanagi.aomori.jp", 3, false}, + {1, "kuroishi.aomori.jp", 3, false}, + {1, "misawa.aomori.jp", 3, false}, + {1, "mutsu.aomori.jp", 3, false}, + {1, "nakadomari.aomori.jp", 3, false}, + {1, "noheji.aomori.jp", 3, false}, + {1, "oirase.aomori.jp", 3, false}, + {1, "owani.aomori.jp", 3, false}, + {1, "rokunohe.aomori.jp", 3, false}, + {1, "sannohe.aomori.jp", 3, false}, + {1, "shichinohe.aomori.jp", 3, false}, + {1, "shingo.aomori.jp", 3, false}, + {1, "takko.aomori.jp", 3, false}, + {1, "towada.aomori.jp", 3, false}, + {1, "tsugaru.aomori.jp", 3, false}, + {1, "tsuruta.aomori.jp", 3, false}, + {1, "abiko.chiba.jp", 3, false}, + {1, "asahi.chiba.jp", 3, false}, + {1, "chonan.chiba.jp", 3, false}, + {1, "chosei.chiba.jp", 3, false}, + {1, "choshi.chiba.jp", 3, false}, + {1, "chuo.chiba.jp", 3, false}, + {1, "funabashi.chiba.jp", 3, false}, + {1, "futtsu.chiba.jp", 3, false}, + {1, "hanamigawa.chiba.jp", 3, false}, + {1, "ichihara.chiba.jp", 3, false}, + {1, "ichikawa.chiba.jp", 3, false}, + {1, "ichinomiya.chiba.jp", 3, false}, + {1, "inzai.chiba.jp", 3, false}, + {1, "isumi.chiba.jp", 3, false}, + {1, "kamagaya.chiba.jp", 3, false}, + {1, "kamogawa.chiba.jp", 3, false}, + {1, "kashiwa.chiba.jp", 3, false}, + {1, "katori.chiba.jp", 3, false}, + {1, "katsuura.chiba.jp", 3, false}, + {1, "kimitsu.chiba.jp", 3, false}, + {1, "kisarazu.chiba.jp", 3, false}, + {1, "kozaki.chiba.jp", 3, false}, + {1, "kujukuri.chiba.jp", 3, false}, + {1, "kyonan.chiba.jp", 3, false}, + {1, "matsudo.chiba.jp", 3, false}, + {1, "midori.chiba.jp", 3, false}, + {1, "mihama.chiba.jp", 3, false}, + {1, "minamiboso.chiba.jp", 3, false}, + {1, "mobara.chiba.jp", 3, false}, + {1, "mutsuzawa.chiba.jp", 3, false}, + {1, "nagara.chiba.jp", 3, false}, + {1, "nagareyama.chiba.jp", 3, false}, + {1, "narashino.chiba.jp", 3, false}, + {1, "narita.chiba.jp", 3, false}, + {1, "noda.chiba.jp", 3, false}, + {1, "oamishirasato.chiba.jp", 3, false}, + {1, "omigawa.chiba.jp", 3, false}, + {1, "onjuku.chiba.jp", 3, false}, + {1, "otaki.chiba.jp", 3, false}, + {1, "sakae.chiba.jp", 3, false}, + {1, "sakura.chiba.jp", 3, false}, + {1, "shimofusa.chiba.jp", 3, false}, + {1, "shirako.chiba.jp", 3, false}, + {1, "shiroi.chiba.jp", 3, false}, + {1, "shisui.chiba.jp", 3, false}, + {1, "sodegaura.chiba.jp", 3, false}, + {1, "sosa.chiba.jp", 3, false}, + {1, "tako.chiba.jp", 3, false}, + {1, "tateyama.chiba.jp", 3, false}, + {1, "togane.chiba.jp", 3, false}, + {1, "tohnosho.chiba.jp", 3, false}, + {1, "tomisato.chiba.jp", 3, false}, + {1, "urayasu.chiba.jp", 3, false}, + {1, "yachimata.chiba.jp", 3, false}, + {1, "yachiyo.chiba.jp", 3, false}, + {1, "yokaichiba.chiba.jp", 3, false}, + {1, "yokoshibahikari.chiba.jp", 3, false}, + {1, "yotsukaido.chiba.jp", 3, false}, + {1, "ainan.ehime.jp", 3, false}, + {1, "honai.ehime.jp", 3, false}, + {1, "ikata.ehime.jp", 3, false}, + {1, "imabari.ehime.jp", 3, false}, + {1, "iyo.ehime.jp", 3, false}, + {1, "kamijima.ehime.jp", 3, false}, + {1, "kihoku.ehime.jp", 3, false}, + {1, "kumakogen.ehime.jp", 3, false}, + {1, "masaki.ehime.jp", 3, false}, + {1, "matsuno.ehime.jp", 3, false}, + {1, "matsuyama.ehime.jp", 3, false}, + {1, "namikata.ehime.jp", 3, false}, + {1, "niihama.ehime.jp", 3, false}, + {1, "ozu.ehime.jp", 3, false}, + {1, "saijo.ehime.jp", 3, false}, + {1, "seiyo.ehime.jp", 3, false}, + {1, "shikokuchuo.ehime.jp", 3, false}, + {1, "tobe.ehime.jp", 3, false}, + {1, "toon.ehime.jp", 3, false}, + {1, "uchiko.ehime.jp", 3, false}, + {1, "uwajima.ehime.jp", 3, false}, + {1, "yawatahama.ehime.jp", 3, false}, + {1, "echizen.fukui.jp", 3, false}, + {1, "eiheiji.fukui.jp", 3, false}, + {1, "fukui.fukui.jp", 3, false}, + {1, "ikeda.fukui.jp", 3, false}, + {1, "katsuyama.fukui.jp", 3, false}, + {1, "mihama.fukui.jp", 3, false}, + {1, "minamiechizen.fukui.jp", 3, false}, + {1, "obama.fukui.jp", 3, false}, + {1, "ohi.fukui.jp", 3, false}, + {1, "ono.fukui.jp", 3, false}, + {1, "sabae.fukui.jp", 3, false}, + {1, "sakai.fukui.jp", 3, false}, + {1, "takahama.fukui.jp", 3, false}, + {1, "tsuruga.fukui.jp", 3, false}, + {1, "wakasa.fukui.jp", 3, false}, + {1, "ashiya.fukuoka.jp", 3, false}, + {1, "buzen.fukuoka.jp", 3, false}, + {1, "chikugo.fukuoka.jp", 3, false}, + {1, "chikuho.fukuoka.jp", 3, false}, + {1, "chikujo.fukuoka.jp", 3, false}, + {1, "chikushino.fukuoka.jp", 3, false}, + {1, "chikuzen.fukuoka.jp", 3, false}, + {1, "chuo.fukuoka.jp", 3, false}, + {1, "dazaifu.fukuoka.jp", 3, false}, + {1, "fukuchi.fukuoka.jp", 3, false}, + {1, "hakata.fukuoka.jp", 3, false}, + {1, "higashi.fukuoka.jp", 3, false}, + {1, "hirokawa.fukuoka.jp", 3, false}, + {1, "hisayama.fukuoka.jp", 3, false}, + {1, "iizuka.fukuoka.jp", 3, false}, + {1, "inatsuki.fukuoka.jp", 3, false}, + {1, "kaho.fukuoka.jp", 3, false}, + {1, "kasuga.fukuoka.jp", 3, false}, + {1, "kasuya.fukuoka.jp", 3, false}, + {1, "kawara.fukuoka.jp", 3, false}, + {1, "keisen.fukuoka.jp", 3, false}, + {1, "koga.fukuoka.jp", 3, false}, + {1, "kurate.fukuoka.jp", 3, false}, + {1, "kurogi.fukuoka.jp", 3, false}, + {1, "kurume.fukuoka.jp", 3, false}, + {1, "minami.fukuoka.jp", 3, false}, + {1, "miyako.fukuoka.jp", 3, false}, + {1, "miyama.fukuoka.jp", 3, false}, + {1, "miyawaka.fukuoka.jp", 3, false}, + {1, "mizumaki.fukuoka.jp", 3, false}, + {1, "munakata.fukuoka.jp", 3, false}, + {1, "nakagawa.fukuoka.jp", 3, false}, + {1, "nakama.fukuoka.jp", 3, false}, + {1, "nishi.fukuoka.jp", 3, false}, + {1, "nogata.fukuoka.jp", 3, false}, + {1, "ogori.fukuoka.jp", 3, false}, + {1, "okagaki.fukuoka.jp", 3, false}, + {1, "okawa.fukuoka.jp", 3, false}, + {1, "oki.fukuoka.jp", 3, false}, + {1, "omuta.fukuoka.jp", 3, false}, + {1, "onga.fukuoka.jp", 3, false}, + {1, "onojo.fukuoka.jp", 3, false}, + {1, "oto.fukuoka.jp", 3, false}, + {1, "saigawa.fukuoka.jp", 3, false}, + {1, "sasaguri.fukuoka.jp", 3, false}, + {1, "shingu.fukuoka.jp", 3, false}, + {1, "shinyoshitomi.fukuoka.jp", 3, false}, + {1, "shonai.fukuoka.jp", 3, false}, + {1, "soeda.fukuoka.jp", 3, false}, + {1, "sue.fukuoka.jp", 3, false}, + {1, "tachiarai.fukuoka.jp", 3, false}, + {1, "tagawa.fukuoka.jp", 3, false}, + {1, "takata.fukuoka.jp", 3, false}, + {1, "toho.fukuoka.jp", 3, false}, + {1, "toyotsu.fukuoka.jp", 3, false}, + {1, "tsuiki.fukuoka.jp", 3, false}, + {1, "ukiha.fukuoka.jp", 3, false}, + {1, "umi.fukuoka.jp", 3, false}, + {1, "usui.fukuoka.jp", 3, false}, + {1, "yamada.fukuoka.jp", 3, false}, + {1, "yame.fukuoka.jp", 3, false}, + {1, "yanagawa.fukuoka.jp", 3, false}, + {1, "yukuhashi.fukuoka.jp", 3, false}, + {1, "aizubange.fukushima.jp", 3, false}, + {1, "aizumisato.fukushima.jp", 3, false}, + {1, "aizuwakamatsu.fukushima.jp", 3, false}, + {1, "asakawa.fukushima.jp", 3, false}, + {1, "bandai.fukushima.jp", 3, false}, + {1, "date.fukushima.jp", 3, false}, + {1, "fukushima.fukushima.jp", 3, false}, + {1, "furudono.fukushima.jp", 3, false}, + {1, "futaba.fukushima.jp", 3, false}, + {1, "hanawa.fukushima.jp", 3, false}, + {1, "higashi.fukushima.jp", 3, false}, + {1, "hirata.fukushima.jp", 3, false}, + {1, "hirono.fukushima.jp", 3, false}, + {1, "iitate.fukushima.jp", 3, false}, + {1, "inawashiro.fukushima.jp", 3, false}, + {1, "ishikawa.fukushima.jp", 3, false}, + {1, "iwaki.fukushima.jp", 3, false}, + {1, "izumizaki.fukushima.jp", 3, false}, + {1, "kagamiishi.fukushima.jp", 3, false}, + {1, "kaneyama.fukushima.jp", 3, false}, + {1, "kawamata.fukushima.jp", 3, false}, + {1, "kitakata.fukushima.jp", 3, false}, + {1, "kitashiobara.fukushima.jp", 3, false}, + {1, "koori.fukushima.jp", 3, false}, + {1, "koriyama.fukushima.jp", 3, false}, + {1, "kunimi.fukushima.jp", 3, false}, + {1, "miharu.fukushima.jp", 3, false}, + {1, "mishima.fukushima.jp", 3, false}, + {1, "namie.fukushima.jp", 3, false}, + {1, "nango.fukushima.jp", 3, false}, + {1, "nishiaizu.fukushima.jp", 3, false}, + {1, "nishigo.fukushima.jp", 3, false}, + {1, "okuma.fukushima.jp", 3, false}, + {1, "omotego.fukushima.jp", 3, false}, + {1, "ono.fukushima.jp", 3, false}, + {1, "otama.fukushima.jp", 3, false}, + {1, "samegawa.fukushima.jp", 3, false}, + {1, "shimogo.fukushima.jp", 3, false}, + {1, "shirakawa.fukushima.jp", 3, false}, + {1, "showa.fukushima.jp", 3, false}, + {1, "soma.fukushima.jp", 3, false}, + {1, "sukagawa.fukushima.jp", 3, false}, + {1, "taishin.fukushima.jp", 3, false}, + {1, "tamakawa.fukushima.jp", 3, false}, + {1, "tanagura.fukushima.jp", 3, false}, + {1, "tenei.fukushima.jp", 3, false}, + {1, "yabuki.fukushima.jp", 3, false}, + {1, "yamato.fukushima.jp", 3, false}, + {1, "yamatsuri.fukushima.jp", 3, false}, + {1, "yanaizu.fukushima.jp", 3, false}, + {1, "yugawa.fukushima.jp", 3, false}, + {1, "anpachi.gifu.jp", 3, false}, + {1, "ena.gifu.jp", 3, false}, + {1, "gifu.gifu.jp", 3, false}, + {1, "ginan.gifu.jp", 3, false}, + {1, "godo.gifu.jp", 3, false}, + {1, "gujo.gifu.jp", 3, false}, + {1, "hashima.gifu.jp", 3, false}, + {1, "hichiso.gifu.jp", 3, false}, + {1, "hida.gifu.jp", 3, false}, + {1, "higashishirakawa.gifu.jp", 3, false}, + {1, "ibigawa.gifu.jp", 3, false}, + {1, "ikeda.gifu.jp", 3, false}, + {1, "kakamigahara.gifu.jp", 3, false}, + {1, "kani.gifu.jp", 3, false}, + {1, "kasahara.gifu.jp", 3, false}, + {1, "kasamatsu.gifu.jp", 3, false}, + {1, "kawaue.gifu.jp", 3, false}, + {1, "kitagata.gifu.jp", 3, false}, + {1, "mino.gifu.jp", 3, false}, + {1, "minokamo.gifu.jp", 3, false}, + {1, "mitake.gifu.jp", 3, false}, + {1, "mizunami.gifu.jp", 3, false}, + {1, "motosu.gifu.jp", 3, false}, + {1, "nakatsugawa.gifu.jp", 3, false}, + {1, "ogaki.gifu.jp", 3, false}, + {1, "sakahogi.gifu.jp", 3, false}, + {1, "seki.gifu.jp", 3, false}, + {1, "sekigahara.gifu.jp", 3, false}, + {1, "shirakawa.gifu.jp", 3, false}, + {1, "tajimi.gifu.jp", 3, false}, + {1, "takayama.gifu.jp", 3, false}, + {1, "tarui.gifu.jp", 3, false}, + {1, "toki.gifu.jp", 3, false}, + {1, "tomika.gifu.jp", 3, false}, + {1, "wanouchi.gifu.jp", 3, false}, + {1, "yamagata.gifu.jp", 3, false}, + {1, "yaotsu.gifu.jp", 3, false}, + {1, "yoro.gifu.jp", 3, false}, + {1, "annaka.gunma.jp", 3, false}, + {1, "chiyoda.gunma.jp", 3, false}, + {1, "fujioka.gunma.jp", 3, false}, + {1, "higashiagatsuma.gunma.jp", 3, false}, + {1, "isesaki.gunma.jp", 3, false}, + {1, "itakura.gunma.jp", 3, false}, + {1, "kanna.gunma.jp", 3, false}, + {1, "kanra.gunma.jp", 3, false}, + {1, "katashina.gunma.jp", 3, false}, + {1, "kawaba.gunma.jp", 3, false}, + {1, "kiryu.gunma.jp", 3, false}, + {1, "kusatsu.gunma.jp", 3, false}, + {1, "maebashi.gunma.jp", 3, false}, + {1, "meiwa.gunma.jp", 3, false}, + {1, "midori.gunma.jp", 3, false}, + {1, "minakami.gunma.jp", 3, false}, + {1, "naganohara.gunma.jp", 3, false}, + {1, "nakanojo.gunma.jp", 3, false}, + {1, "nanmoku.gunma.jp", 3, false}, + {1, "numata.gunma.jp", 3, false}, + {1, "oizumi.gunma.jp", 3, false}, + {1, "ora.gunma.jp", 3, false}, + {1, "ota.gunma.jp", 3, false}, + {1, "shibukawa.gunma.jp", 3, false}, + {1, "shimonita.gunma.jp", 3, false}, + {1, "shinto.gunma.jp", 3, false}, + {1, "showa.gunma.jp", 3, false}, + {1, "takasaki.gunma.jp", 3, false}, + {1, "takayama.gunma.jp", 3, false}, + {1, "tamamura.gunma.jp", 3, false}, + {1, "tatebayashi.gunma.jp", 3, false}, + {1, "tomioka.gunma.jp", 3, false}, + {1, "tsukiyono.gunma.jp", 3, false}, + {1, "tsumagoi.gunma.jp", 3, false}, + {1, "ueno.gunma.jp", 3, false}, + {1, "yoshioka.gunma.jp", 3, false}, + {1, "asaminami.hiroshima.jp", 3, false}, + {1, "daiwa.hiroshima.jp", 3, false}, + {1, "etajima.hiroshima.jp", 3, false}, + {1, "fuchu.hiroshima.jp", 3, false}, + {1, "fukuyama.hiroshima.jp", 3, false}, + {1, "hatsukaichi.hiroshima.jp", 3, false}, + {1, "higashihiroshima.hiroshima.jp", 3, false}, + {1, "hongo.hiroshima.jp", 3, false}, + {1, "jinsekikogen.hiroshima.jp", 3, false}, + {1, "kaita.hiroshima.jp", 3, false}, + {1, "kui.hiroshima.jp", 3, false}, + {1, "kumano.hiroshima.jp", 3, false}, + {1, "kure.hiroshima.jp", 3, false}, + {1, "mihara.hiroshima.jp", 3, false}, + {1, "miyoshi.hiroshima.jp", 3, false}, + {1, "naka.hiroshima.jp", 3, false}, + {1, "onomichi.hiroshima.jp", 3, false}, + {1, "osakikamijima.hiroshima.jp", 3, false}, + {1, "otake.hiroshima.jp", 3, false}, + {1, "saka.hiroshima.jp", 3, false}, + {1, "sera.hiroshima.jp", 3, false}, + {1, "seranishi.hiroshima.jp", 3, false}, + {1, "shinichi.hiroshima.jp", 3, false}, + {1, "shobara.hiroshima.jp", 3, false}, + {1, "takehara.hiroshima.jp", 3, false}, + {1, "abashiri.hokkaido.jp", 3, false}, + {1, "abira.hokkaido.jp", 3, false}, + {1, "aibetsu.hokkaido.jp", 3, false}, + {1, "akabira.hokkaido.jp", 3, false}, + {1, "akkeshi.hokkaido.jp", 3, false}, + {1, "asahikawa.hokkaido.jp", 3, false}, + {1, "ashibetsu.hokkaido.jp", 3, false}, + {1, "ashoro.hokkaido.jp", 3, false}, + {1, "assabu.hokkaido.jp", 3, false}, + {1, "atsuma.hokkaido.jp", 3, false}, + {1, "bibai.hokkaido.jp", 3, false}, + {1, "biei.hokkaido.jp", 3, false}, + {1, "bifuka.hokkaido.jp", 3, false}, + {1, "bihoro.hokkaido.jp", 3, false}, + {1, "biratori.hokkaido.jp", 3, false}, + {1, "chippubetsu.hokkaido.jp", 3, false}, + {1, "chitose.hokkaido.jp", 3, false}, + {1, "date.hokkaido.jp", 3, false}, + {1, "ebetsu.hokkaido.jp", 3, false}, + {1, "embetsu.hokkaido.jp", 3, false}, + {1, "eniwa.hokkaido.jp", 3, false}, + {1, "erimo.hokkaido.jp", 3, false}, + {1, "esan.hokkaido.jp", 3, false}, + {1, "esashi.hokkaido.jp", 3, false}, + {1, "fukagawa.hokkaido.jp", 3, false}, + {1, "fukushima.hokkaido.jp", 3, false}, + {1, "furano.hokkaido.jp", 3, false}, + {1, "furubira.hokkaido.jp", 3, false}, + {1, "haboro.hokkaido.jp", 3, false}, + {1, "hakodate.hokkaido.jp", 3, false}, + {1, "hamatonbetsu.hokkaido.jp", 3, false}, + {1, "hidaka.hokkaido.jp", 3, false}, + {1, "higashikagura.hokkaido.jp", 3, false}, + {1, "higashikawa.hokkaido.jp", 3, false}, + {1, "hiroo.hokkaido.jp", 3, false}, + {1, "hokuryu.hokkaido.jp", 3, false}, + {1, "hokuto.hokkaido.jp", 3, false}, + {1, "honbetsu.hokkaido.jp", 3, false}, + {1, "horokanai.hokkaido.jp", 3, false}, + {1, "horonobe.hokkaido.jp", 3, false}, + {1, "ikeda.hokkaido.jp", 3, false}, + {1, "imakane.hokkaido.jp", 3, false}, + {1, "ishikari.hokkaido.jp", 3, false}, + {1, "iwamizawa.hokkaido.jp", 3, false}, + {1, "iwanai.hokkaido.jp", 3, false}, + {1, "kamifurano.hokkaido.jp", 3, false}, + {1, "kamikawa.hokkaido.jp", 3, false}, + {1, "kamishihoro.hokkaido.jp", 3, false}, + {1, "kamisunagawa.hokkaido.jp", 3, false}, + {1, "kamoenai.hokkaido.jp", 3, false}, + {1, "kayabe.hokkaido.jp", 3, false}, + {1, "kembuchi.hokkaido.jp", 3, false}, + {1, "kikonai.hokkaido.jp", 3, false}, + {1, "kimobetsu.hokkaido.jp", 3, false}, + {1, "kitahiroshima.hokkaido.jp", 3, false}, + {1, "kitami.hokkaido.jp", 3, false}, + {1, "kiyosato.hokkaido.jp", 3, false}, + {1, "koshimizu.hokkaido.jp", 3, false}, + {1, "kunneppu.hokkaido.jp", 3, false}, + {1, "kuriyama.hokkaido.jp", 3, false}, + {1, "kuromatsunai.hokkaido.jp", 3, false}, + {1, "kushiro.hokkaido.jp", 3, false}, + {1, "kutchan.hokkaido.jp", 3, false}, + {1, "kyowa.hokkaido.jp", 3, false}, + {1, "mashike.hokkaido.jp", 3, false}, + {1, "matsumae.hokkaido.jp", 3, false}, + {1, "mikasa.hokkaido.jp", 3, false}, + {1, "minamifurano.hokkaido.jp", 3, false}, + {1, "mombetsu.hokkaido.jp", 3, false}, + {1, "moseushi.hokkaido.jp", 3, false}, + {1, "mukawa.hokkaido.jp", 3, false}, + {1, "muroran.hokkaido.jp", 3, false}, + {1, "naie.hokkaido.jp", 3, false}, + {1, "nakagawa.hokkaido.jp", 3, false}, + {1, "nakasatsunai.hokkaido.jp", 3, false}, + {1, "nakatombetsu.hokkaido.jp", 3, false}, + {1, "nanae.hokkaido.jp", 3, false}, + {1, "nanporo.hokkaido.jp", 3, false}, + {1, "nayoro.hokkaido.jp", 3, false}, + {1, "nemuro.hokkaido.jp", 3, false}, + {1, "niikappu.hokkaido.jp", 3, false}, + {1, "niki.hokkaido.jp", 3, false}, + {1, "nishiokoppe.hokkaido.jp", 3, false}, + {1, "noboribetsu.hokkaido.jp", 3, false}, + {1, "numata.hokkaido.jp", 3, false}, + {1, "obihiro.hokkaido.jp", 3, false}, + {1, "obira.hokkaido.jp", 3, false}, + {1, "oketo.hokkaido.jp", 3, false}, + {1, "okoppe.hokkaido.jp", 3, false}, + {1, "otaru.hokkaido.jp", 3, false}, + {1, "otobe.hokkaido.jp", 3, false}, + {1, "otofuke.hokkaido.jp", 3, false}, + {1, "otoineppu.hokkaido.jp", 3, false}, + {1, "oumu.hokkaido.jp", 3, false}, + {1, "ozora.hokkaido.jp", 3, false}, + {1, "pippu.hokkaido.jp", 3, false}, + {1, "rankoshi.hokkaido.jp", 3, false}, + {1, "rebun.hokkaido.jp", 3, false}, + {1, "rikubetsu.hokkaido.jp", 3, false}, + {1, "rishiri.hokkaido.jp", 3, false}, + {1, "rishirifuji.hokkaido.jp", 3, false}, + {1, "saroma.hokkaido.jp", 3, false}, + {1, "sarufutsu.hokkaido.jp", 3, false}, + {1, "shakotan.hokkaido.jp", 3, false}, + {1, "shari.hokkaido.jp", 3, false}, + {1, "shibecha.hokkaido.jp", 3, false}, + {1, "shibetsu.hokkaido.jp", 3, false}, + {1, "shikabe.hokkaido.jp", 3, false}, + {1, "shikaoi.hokkaido.jp", 3, false}, + {1, "shimamaki.hokkaido.jp", 3, false}, + {1, "shimizu.hokkaido.jp", 3, false}, + {1, "shimokawa.hokkaido.jp", 3, false}, + {1, "shinshinotsu.hokkaido.jp", 3, false}, + {1, "shintoku.hokkaido.jp", 3, false}, + {1, "shiranuka.hokkaido.jp", 3, false}, + {1, "shiraoi.hokkaido.jp", 3, false}, + {1, "shiriuchi.hokkaido.jp", 3, false}, + {1, "sobetsu.hokkaido.jp", 3, false}, + {1, "sunagawa.hokkaido.jp", 3, false}, + {1, "taiki.hokkaido.jp", 3, false}, + {1, "takasu.hokkaido.jp", 3, false}, + {1, "takikawa.hokkaido.jp", 3, false}, + {1, "takinoue.hokkaido.jp", 3, false}, + {1, "teshikaga.hokkaido.jp", 3, false}, + {1, "tobetsu.hokkaido.jp", 3, false}, + {1, "tohma.hokkaido.jp", 3, false}, + {1, "tomakomai.hokkaido.jp", 3, false}, + {1, "tomari.hokkaido.jp", 3, false}, + {1, "toya.hokkaido.jp", 3, false}, + {1, "toyako.hokkaido.jp", 3, false}, + {1, "toyotomi.hokkaido.jp", 3, false}, + {1, "toyoura.hokkaido.jp", 3, false}, + {1, "tsubetsu.hokkaido.jp", 3, false}, + {1, "tsukigata.hokkaido.jp", 3, false}, + {1, "urakawa.hokkaido.jp", 3, false}, + {1, "urausu.hokkaido.jp", 3, false}, + {1, "uryu.hokkaido.jp", 3, false}, + {1, "utashinai.hokkaido.jp", 3, false}, + {1, "wakkanai.hokkaido.jp", 3, false}, + {1, "wassamu.hokkaido.jp", 3, false}, + {1, "yakumo.hokkaido.jp", 3, false}, + {1, "yoichi.hokkaido.jp", 3, false}, + {1, "aioi.hyogo.jp", 3, false}, + {1, "akashi.hyogo.jp", 3, false}, + {1, "ako.hyogo.jp", 3, false}, + {1, "amagasaki.hyogo.jp", 3, false}, + {1, "aogaki.hyogo.jp", 3, false}, + {1, "asago.hyogo.jp", 3, false}, + {1, "ashiya.hyogo.jp", 3, false}, + {1, "awaji.hyogo.jp", 3, false}, + {1, "fukusaki.hyogo.jp", 3, false}, + {1, "goshiki.hyogo.jp", 3, false}, + {1, "harima.hyogo.jp", 3, false}, + {1, "himeji.hyogo.jp", 3, false}, + {1, "ichikawa.hyogo.jp", 3, false}, + {1, "inagawa.hyogo.jp", 3, false}, + {1, "itami.hyogo.jp", 3, false}, + {1, "kakogawa.hyogo.jp", 3, false}, + {1, "kamigori.hyogo.jp", 3, false}, + {1, "kamikawa.hyogo.jp", 3, false}, + {1, "kasai.hyogo.jp", 3, false}, + {1, "kasuga.hyogo.jp", 3, false}, + {1, "kawanishi.hyogo.jp", 3, false}, + {1, "miki.hyogo.jp", 3, false}, + {1, "minamiawaji.hyogo.jp", 3, false}, + {1, "nishinomiya.hyogo.jp", 3, false}, + {1, "nishiwaki.hyogo.jp", 3, false}, + {1, "ono.hyogo.jp", 3, false}, + {1, "sanda.hyogo.jp", 3, false}, + {1, "sannan.hyogo.jp", 3, false}, + {1, "sasayama.hyogo.jp", 3, false}, + {1, "sayo.hyogo.jp", 3, false}, + {1, "shingu.hyogo.jp", 3, false}, + {1, "shinonsen.hyogo.jp", 3, false}, + {1, "shiso.hyogo.jp", 3, false}, + {1, "sumoto.hyogo.jp", 3, false}, + {1, "taishi.hyogo.jp", 3, false}, + {1, "taka.hyogo.jp", 3, false}, + {1, "takarazuka.hyogo.jp", 3, false}, + {1, "takasago.hyogo.jp", 3, false}, + {1, "takino.hyogo.jp", 3, false}, + {1, "tamba.hyogo.jp", 3, false}, + {1, "tatsuno.hyogo.jp", 3, false}, + {1, "toyooka.hyogo.jp", 3, false}, + {1, "yabu.hyogo.jp", 3, false}, + {1, "yashiro.hyogo.jp", 3, false}, + {1, "yoka.hyogo.jp", 3, false}, + {1, "yokawa.hyogo.jp", 3, false}, + {1, "ami.ibaraki.jp", 3, false}, + {1, "asahi.ibaraki.jp", 3, false}, + {1, "bando.ibaraki.jp", 3, false}, + {1, "chikusei.ibaraki.jp", 3, false}, + {1, "daigo.ibaraki.jp", 3, false}, + {1, "fujishiro.ibaraki.jp", 3, false}, + {1, "hitachi.ibaraki.jp", 3, false}, + {1, "hitachinaka.ibaraki.jp", 3, false}, + {1, "hitachiomiya.ibaraki.jp", 3, false}, + {1, "hitachiota.ibaraki.jp", 3, false}, + {1, "ibaraki.ibaraki.jp", 3, false}, + {1, "ina.ibaraki.jp", 3, false}, + {1, "inashiki.ibaraki.jp", 3, false}, + {1, "itako.ibaraki.jp", 3, false}, + {1, "iwama.ibaraki.jp", 3, false}, + {1, "joso.ibaraki.jp", 3, false}, + {1, "kamisu.ibaraki.jp", 3, false}, + {1, "kasama.ibaraki.jp", 3, false}, + {1, "kashima.ibaraki.jp", 3, false}, + {1, "kasumigaura.ibaraki.jp", 3, false}, + {1, "koga.ibaraki.jp", 3, false}, + {1, "miho.ibaraki.jp", 3, false}, + {1, "mito.ibaraki.jp", 3, false}, + {1, "moriya.ibaraki.jp", 3, false}, + {1, "naka.ibaraki.jp", 3, false}, + {1, "namegata.ibaraki.jp", 3, false}, + {1, "oarai.ibaraki.jp", 3, false}, + {1, "ogawa.ibaraki.jp", 3, false}, + {1, "omitama.ibaraki.jp", 3, false}, + {1, "ryugasaki.ibaraki.jp", 3, false}, + {1, "sakai.ibaraki.jp", 3, false}, + {1, "sakuragawa.ibaraki.jp", 3, false}, + {1, "shimodate.ibaraki.jp", 3, false}, + {1, "shimotsuma.ibaraki.jp", 3, false}, + {1, "shirosato.ibaraki.jp", 3, false}, + {1, "sowa.ibaraki.jp", 3, false}, + {1, "suifu.ibaraki.jp", 3, false}, + {1, "takahagi.ibaraki.jp", 3, false}, + {1, "tamatsukuri.ibaraki.jp", 3, false}, + {1, "tokai.ibaraki.jp", 3, false}, + {1, "tomobe.ibaraki.jp", 3, false}, + {1, "tone.ibaraki.jp", 3, false}, + {1, "toride.ibaraki.jp", 3, false}, + {1, "tsuchiura.ibaraki.jp", 3, false}, + {1, "tsukuba.ibaraki.jp", 3, false}, + {1, "uchihara.ibaraki.jp", 3, false}, + {1, "ushiku.ibaraki.jp", 3, false}, + {1, "yachiyo.ibaraki.jp", 3, false}, + {1, "yamagata.ibaraki.jp", 3, false}, + {1, "yawara.ibaraki.jp", 3, false}, + {1, "yuki.ibaraki.jp", 3, false}, + {1, "anamizu.ishikawa.jp", 3, false}, + {1, "hakui.ishikawa.jp", 3, false}, + {1, "hakusan.ishikawa.jp", 3, false}, + {1, "kaga.ishikawa.jp", 3, false}, + {1, "kahoku.ishikawa.jp", 3, false}, + {1, "kanazawa.ishikawa.jp", 3, false}, + {1, "kawakita.ishikawa.jp", 3, false}, + {1, "komatsu.ishikawa.jp", 3, false}, + {1, "nakanoto.ishikawa.jp", 3, false}, + {1, "nanao.ishikawa.jp", 3, false}, + {1, "nomi.ishikawa.jp", 3, false}, + {1, "nonoichi.ishikawa.jp", 3, false}, + {1, "noto.ishikawa.jp", 3, false}, + {1, "shika.ishikawa.jp", 3, false}, + {1, "suzu.ishikawa.jp", 3, false}, + {1, "tsubata.ishikawa.jp", 3, false}, + {1, "tsurugi.ishikawa.jp", 3, false}, + {1, "uchinada.ishikawa.jp", 3, false}, + {1, "wajima.ishikawa.jp", 3, false}, + {1, "fudai.iwate.jp", 3, false}, + {1, "fujisawa.iwate.jp", 3, false}, + {1, "hanamaki.iwate.jp", 3, false}, + {1, "hiraizumi.iwate.jp", 3, false}, + {1, "hirono.iwate.jp", 3, false}, + {1, "ichinohe.iwate.jp", 3, false}, + {1, "ichinoseki.iwate.jp", 3, false}, + {1, "iwaizumi.iwate.jp", 3, false}, + {1, "iwate.iwate.jp", 3, false}, + {1, "joboji.iwate.jp", 3, false}, + {1, "kamaishi.iwate.jp", 3, false}, + {1, "kanegasaki.iwate.jp", 3, false}, + {1, "karumai.iwate.jp", 3, false}, + {1, "kawai.iwate.jp", 3, false}, + {1, "kitakami.iwate.jp", 3, false}, + {1, "kuji.iwate.jp", 3, false}, + {1, "kunohe.iwate.jp", 3, false}, + {1, "kuzumaki.iwate.jp", 3, false}, + {1, "miyako.iwate.jp", 3, false}, + {1, "mizusawa.iwate.jp", 3, false}, + {1, "morioka.iwate.jp", 3, false}, + {1, "ninohe.iwate.jp", 3, false}, + {1, "noda.iwate.jp", 3, false}, + {1, "ofunato.iwate.jp", 3, false}, + {1, "oshu.iwate.jp", 3, false}, + {1, "otsuchi.iwate.jp", 3, false}, + {1, "rikuzentakata.iwate.jp", 3, false}, + {1, "shiwa.iwate.jp", 3, false}, + {1, "shizukuishi.iwate.jp", 3, false}, + {1, "sumita.iwate.jp", 3, false}, + {1, "tanohata.iwate.jp", 3, false}, + {1, "tono.iwate.jp", 3, false}, + {1, "yahaba.iwate.jp", 3, false}, + {1, "yamada.iwate.jp", 3, false}, + {1, "ayagawa.kagawa.jp", 3, false}, + {1, "higashikagawa.kagawa.jp", 3, false}, + {1, "kanonji.kagawa.jp", 3, false}, + {1, "kotohira.kagawa.jp", 3, false}, + {1, "manno.kagawa.jp", 3, false}, + {1, "marugame.kagawa.jp", 3, false}, + {1, "mitoyo.kagawa.jp", 3, false}, + {1, "naoshima.kagawa.jp", 3, false}, + {1, "sanuki.kagawa.jp", 3, false}, + {1, "tadotsu.kagawa.jp", 3, false}, + {1, "takamatsu.kagawa.jp", 3, false}, + {1, "tonosho.kagawa.jp", 3, false}, + {1, "uchinomi.kagawa.jp", 3, false}, + {1, "utazu.kagawa.jp", 3, false}, + {1, "zentsuji.kagawa.jp", 3, false}, + {1, "akune.kagoshima.jp", 3, false}, + {1, "amami.kagoshima.jp", 3, false}, + {1, "hioki.kagoshima.jp", 3, false}, + {1, "isa.kagoshima.jp", 3, false}, + {1, "isen.kagoshima.jp", 3, false}, + {1, "izumi.kagoshima.jp", 3, false}, + {1, "kagoshima.kagoshima.jp", 3, false}, + {1, "kanoya.kagoshima.jp", 3, false}, + {1, "kawanabe.kagoshima.jp", 3, false}, + {1, "kinko.kagoshima.jp", 3, false}, + {1, "kouyama.kagoshima.jp", 3, false}, + {1, "makurazaki.kagoshima.jp", 3, false}, + {1, "matsumoto.kagoshima.jp", 3, false}, + {1, "minamitane.kagoshima.jp", 3, false}, + {1, "nakatane.kagoshima.jp", 3, false}, + {1, "nishinoomote.kagoshima.jp", 3, false}, + {1, "satsumasendai.kagoshima.jp", 3, false}, + {1, "soo.kagoshima.jp", 3, false}, + {1, "tarumizu.kagoshima.jp", 3, false}, + {1, "yusui.kagoshima.jp", 3, false}, + {1, "aikawa.kanagawa.jp", 3, false}, + {1, "atsugi.kanagawa.jp", 3, false}, + {1, "ayase.kanagawa.jp", 3, false}, + {1, "chigasaki.kanagawa.jp", 3, false}, + {1, "ebina.kanagawa.jp", 3, false}, + {1, "fujisawa.kanagawa.jp", 3, false}, + {1, "hadano.kanagawa.jp", 3, false}, + {1, "hakone.kanagawa.jp", 3, false}, + {1, "hiratsuka.kanagawa.jp", 3, false}, + {1, "isehara.kanagawa.jp", 3, false}, + {1, "kaisei.kanagawa.jp", 3, false}, + {1, "kamakura.kanagawa.jp", 3, false}, + {1, "kiyokawa.kanagawa.jp", 3, false}, + {1, "matsuda.kanagawa.jp", 3, false}, + {1, "minamiashigara.kanagawa.jp", 3, false}, + {1, "miura.kanagawa.jp", 3, false}, + {1, "nakai.kanagawa.jp", 3, false}, + {1, "ninomiya.kanagawa.jp", 3, false}, + {1, "odawara.kanagawa.jp", 3, false}, + {1, "oi.kanagawa.jp", 3, false}, + {1, "oiso.kanagawa.jp", 3, false}, + {1, "sagamihara.kanagawa.jp", 3, false}, + {1, "samukawa.kanagawa.jp", 3, false}, + {1, "tsukui.kanagawa.jp", 3, false}, + {1, "yamakita.kanagawa.jp", 3, false}, + {1, "yamato.kanagawa.jp", 3, false}, + {1, "yokosuka.kanagawa.jp", 3, false}, + {1, "yugawara.kanagawa.jp", 3, false}, + {1, "zama.kanagawa.jp", 3, false}, + {1, "zushi.kanagawa.jp", 3, false}, + {1, "aki.kochi.jp", 3, false}, + {1, "geisei.kochi.jp", 3, false}, + {1, "hidaka.kochi.jp", 3, false}, + {1, "higashitsuno.kochi.jp", 3, false}, + {1, "ino.kochi.jp", 3, false}, + {1, "kagami.kochi.jp", 3, false}, + {1, "kami.kochi.jp", 3, false}, + {1, "kitagawa.kochi.jp", 3, false}, + {1, "kochi.kochi.jp", 3, false}, + {1, "mihara.kochi.jp", 3, false}, + {1, "motoyama.kochi.jp", 3, false}, + {1, "muroto.kochi.jp", 3, false}, + {1, "nahari.kochi.jp", 3, false}, + {1, "nakamura.kochi.jp", 3, false}, + {1, "nankoku.kochi.jp", 3, false}, + {1, "nishitosa.kochi.jp", 3, false}, + {1, "niyodogawa.kochi.jp", 3, false}, + {1, "ochi.kochi.jp", 3, false}, + {1, "okawa.kochi.jp", 3, false}, + {1, "otoyo.kochi.jp", 3, false}, + {1, "otsuki.kochi.jp", 3, false}, + {1, "sakawa.kochi.jp", 3, false}, + {1, "sukumo.kochi.jp", 3, false}, + {1, "susaki.kochi.jp", 3, false}, + {1, "tosa.kochi.jp", 3, false}, + {1, "tosashimizu.kochi.jp", 3, false}, + {1, "toyo.kochi.jp", 3, false}, + {1, "tsuno.kochi.jp", 3, false}, + {1, "umaji.kochi.jp", 3, false}, + {1, "yasuda.kochi.jp", 3, false}, + {1, "yusuhara.kochi.jp", 3, false}, + {1, "amakusa.kumamoto.jp", 3, false}, + {1, "arao.kumamoto.jp", 3, false}, + {1, "aso.kumamoto.jp", 3, false}, + {1, "choyo.kumamoto.jp", 3, false}, + {1, "gyokuto.kumamoto.jp", 3, false}, + {1, "kamiamakusa.kumamoto.jp", 3, false}, + {1, "kikuchi.kumamoto.jp", 3, false}, + {1, "kumamoto.kumamoto.jp", 3, false}, + {1, "mashiki.kumamoto.jp", 3, false}, + {1, "mifune.kumamoto.jp", 3, false}, + {1, "minamata.kumamoto.jp", 3, false}, + {1, "minamioguni.kumamoto.jp", 3, false}, + {1, "nagasu.kumamoto.jp", 3, false}, + {1, "nishihara.kumamoto.jp", 3, false}, + {1, "oguni.kumamoto.jp", 3, false}, + {1, "ozu.kumamoto.jp", 3, false}, + {1, "sumoto.kumamoto.jp", 3, false}, + {1, "takamori.kumamoto.jp", 3, false}, + {1, "uki.kumamoto.jp", 3, false}, + {1, "uto.kumamoto.jp", 3, false}, + {1, "yamaga.kumamoto.jp", 3, false}, + {1, "yamato.kumamoto.jp", 3, false}, + {1, "yatsushiro.kumamoto.jp", 3, false}, + {1, "ayabe.kyoto.jp", 3, false}, + {1, "fukuchiyama.kyoto.jp", 3, false}, + {1, "higashiyama.kyoto.jp", 3, false}, + {1, "ide.kyoto.jp", 3, false}, + {1, "ine.kyoto.jp", 3, false}, + {1, "joyo.kyoto.jp", 3, false}, + {1, "kameoka.kyoto.jp", 3, false}, + {1, "kamo.kyoto.jp", 3, false}, + {1, "kita.kyoto.jp", 3, false}, + {1, "kizu.kyoto.jp", 3, false}, + {1, "kumiyama.kyoto.jp", 3, false}, + {1, "kyotamba.kyoto.jp", 3, false}, + {1, "kyotanabe.kyoto.jp", 3, false}, + {1, "kyotango.kyoto.jp", 3, false}, + {1, "maizuru.kyoto.jp", 3, false}, + {1, "minami.kyoto.jp", 3, false}, + {1, "minamiyamashiro.kyoto.jp", 3, false}, + {1, "miyazu.kyoto.jp", 3, false}, + {1, "muko.kyoto.jp", 3, false}, + {1, "nagaokakyo.kyoto.jp", 3, false}, + {1, "nakagyo.kyoto.jp", 3, false}, + {1, "nantan.kyoto.jp", 3, false}, + {1, "oyamazaki.kyoto.jp", 3, false}, + {1, "sakyo.kyoto.jp", 3, false}, + {1, "seika.kyoto.jp", 3, false}, + {1, "tanabe.kyoto.jp", 3, false}, + {1, "uji.kyoto.jp", 3, false}, + {1, "ujitawara.kyoto.jp", 3, false}, + {1, "wazuka.kyoto.jp", 3, false}, + {1, "yamashina.kyoto.jp", 3, false}, + {1, "yawata.kyoto.jp", 3, false}, + {1, "asahi.mie.jp", 3, false}, + {1, "inabe.mie.jp", 3, false}, + {1, "ise.mie.jp", 3, false}, + {1, "kameyama.mie.jp", 3, false}, + {1, "kawagoe.mie.jp", 3, false}, + {1, "kiho.mie.jp", 3, false}, + {1, "kisosaki.mie.jp", 3, false}, + {1, "kiwa.mie.jp", 3, false}, + {1, "komono.mie.jp", 3, false}, + {1, "kumano.mie.jp", 3, false}, + {1, "kuwana.mie.jp", 3, false}, + {1, "matsusaka.mie.jp", 3, false}, + {1, "meiwa.mie.jp", 3, false}, + {1, "mihama.mie.jp", 3, false}, + {1, "minamiise.mie.jp", 3, false}, + {1, "misugi.mie.jp", 3, false}, + {1, "miyama.mie.jp", 3, false}, + {1, "nabari.mie.jp", 3, false}, + {1, "shima.mie.jp", 3, false}, + {1, "suzuka.mie.jp", 3, false}, + {1, "tado.mie.jp", 3, false}, + {1, "taiki.mie.jp", 3, false}, + {1, "taki.mie.jp", 3, false}, + {1, "tamaki.mie.jp", 3, false}, + {1, "toba.mie.jp", 3, false}, + {1, "tsu.mie.jp", 3, false}, + {1, "udono.mie.jp", 3, false}, + {1, "ureshino.mie.jp", 3, false}, + {1, "watarai.mie.jp", 3, false}, + {1, "yokkaichi.mie.jp", 3, false}, + {1, "furukawa.miyagi.jp", 3, false}, + {1, "higashimatsushima.miyagi.jp", 3, false}, + {1, "ishinomaki.miyagi.jp", 3, false}, + {1, "iwanuma.miyagi.jp", 3, false}, + {1, "kakuda.miyagi.jp", 3, false}, + {1, "kami.miyagi.jp", 3, false}, + {1, "kawasaki.miyagi.jp", 3, false}, + {1, "marumori.miyagi.jp", 3, false}, + {1, "matsushima.miyagi.jp", 3, false}, + {1, "minamisanriku.miyagi.jp", 3, false}, + {1, "misato.miyagi.jp", 3, false}, + {1, "murata.miyagi.jp", 3, false}, + {1, "natori.miyagi.jp", 3, false}, + {1, "ogawara.miyagi.jp", 3, false}, + {1, "ohira.miyagi.jp", 3, false}, + {1, "onagawa.miyagi.jp", 3, false}, + {1, "osaki.miyagi.jp", 3, false}, + {1, "rifu.miyagi.jp", 3, false}, + {1, "semine.miyagi.jp", 3, false}, + {1, "shibata.miyagi.jp", 3, false}, + {1, "shichikashuku.miyagi.jp", 3, false}, + {1, "shikama.miyagi.jp", 3, false}, + {1, "shiogama.miyagi.jp", 3, false}, + {1, "shiroishi.miyagi.jp", 3, false}, + {1, "tagajo.miyagi.jp", 3, false}, + {1, "taiwa.miyagi.jp", 3, false}, + {1, "tome.miyagi.jp", 3, false}, + {1, "tomiya.miyagi.jp", 3, false}, + {1, "wakuya.miyagi.jp", 3, false}, + {1, "watari.miyagi.jp", 3, false}, + {1, "yamamoto.miyagi.jp", 3, false}, + {1, "zao.miyagi.jp", 3, false}, + {1, "aya.miyazaki.jp", 3, false}, + {1, "ebino.miyazaki.jp", 3, false}, + {1, "gokase.miyazaki.jp", 3, false}, + {1, "hyuga.miyazaki.jp", 3, false}, + {1, "kadogawa.miyazaki.jp", 3, false}, + {1, "kawaminami.miyazaki.jp", 3, false}, + {1, "kijo.miyazaki.jp", 3, false}, + {1, "kitagawa.miyazaki.jp", 3, false}, + {1, "kitakata.miyazaki.jp", 3, false}, + {1, "kitaura.miyazaki.jp", 3, false}, + {1, "kobayashi.miyazaki.jp", 3, false}, + {1, "kunitomi.miyazaki.jp", 3, false}, + {1, "kushima.miyazaki.jp", 3, false}, + {1, "mimata.miyazaki.jp", 3, false}, + {1, "miyakonojo.miyazaki.jp", 3, false}, + {1, "miyazaki.miyazaki.jp", 3, false}, + {1, "morotsuka.miyazaki.jp", 3, false}, + {1, "nichinan.miyazaki.jp", 3, false}, + {1, "nishimera.miyazaki.jp", 3, false}, + {1, "nobeoka.miyazaki.jp", 3, false}, + {1, "saito.miyazaki.jp", 3, false}, + {1, "shiiba.miyazaki.jp", 3, false}, + {1, "shintomi.miyazaki.jp", 3, false}, + {1, "takaharu.miyazaki.jp", 3, false}, + {1, "takanabe.miyazaki.jp", 3, false}, + {1, "takazaki.miyazaki.jp", 3, false}, + {1, "tsuno.miyazaki.jp", 3, false}, + {1, "achi.nagano.jp", 3, false}, + {1, "agematsu.nagano.jp", 3, false}, + {1, "anan.nagano.jp", 3, false}, + {1, "aoki.nagano.jp", 3, false}, + {1, "asahi.nagano.jp", 3, false}, + {1, "azumino.nagano.jp", 3, false}, + {1, "chikuhoku.nagano.jp", 3, false}, + {1, "chikuma.nagano.jp", 3, false}, + {1, "chino.nagano.jp", 3, false}, + {1, "fujimi.nagano.jp", 3, false}, + {1, "hakuba.nagano.jp", 3, false}, + {1, "hara.nagano.jp", 3, false}, + {1, "hiraya.nagano.jp", 3, false}, + {1, "iida.nagano.jp", 3, false}, + {1, "iijima.nagano.jp", 3, false}, + {1, "iiyama.nagano.jp", 3, false}, + {1, "iizuna.nagano.jp", 3, false}, + {1, "ikeda.nagano.jp", 3, false}, + {1, "ikusaka.nagano.jp", 3, false}, + {1, "ina.nagano.jp", 3, false}, + {1, "karuizawa.nagano.jp", 3, false}, + {1, "kawakami.nagano.jp", 3, false}, + {1, "kiso.nagano.jp", 3, false}, + {1, "kisofukushima.nagano.jp", 3, false}, + {1, "kitaaiki.nagano.jp", 3, false}, + {1, "komagane.nagano.jp", 3, false}, + {1, "komoro.nagano.jp", 3, false}, + {1, "matsukawa.nagano.jp", 3, false}, + {1, "matsumoto.nagano.jp", 3, false}, + {1, "miasa.nagano.jp", 3, false}, + {1, "minamiaiki.nagano.jp", 3, false}, + {1, "minamimaki.nagano.jp", 3, false}, + {1, "minamiminowa.nagano.jp", 3, false}, + {1, "minowa.nagano.jp", 3, false}, + {1, "miyada.nagano.jp", 3, false}, + {1, "miyota.nagano.jp", 3, false}, + {1, "mochizuki.nagano.jp", 3, false}, + {1, "nagano.nagano.jp", 3, false}, + {1, "nagawa.nagano.jp", 3, false}, + {1, "nagiso.nagano.jp", 3, false}, + {1, "nakagawa.nagano.jp", 3, false}, + {1, "nakano.nagano.jp", 3, false}, + {1, "nozawaonsen.nagano.jp", 3, false}, + {1, "obuse.nagano.jp", 3, false}, + {1, "ogawa.nagano.jp", 3, false}, + {1, "okaya.nagano.jp", 3, false}, + {1, "omachi.nagano.jp", 3, false}, + {1, "omi.nagano.jp", 3, false}, + {1, "ookuwa.nagano.jp", 3, false}, + {1, "ooshika.nagano.jp", 3, false}, + {1, "otaki.nagano.jp", 3, false}, + {1, "otari.nagano.jp", 3, false}, + {1, "sakae.nagano.jp", 3, false}, + {1, "sakaki.nagano.jp", 3, false}, + {1, "saku.nagano.jp", 3, false}, + {1, "sakuho.nagano.jp", 3, false}, + {1, "shimosuwa.nagano.jp", 3, false}, + {1, "shinanomachi.nagano.jp", 3, false}, + {1, "shiojiri.nagano.jp", 3, false}, + {1, "suwa.nagano.jp", 3, false}, + {1, "suzaka.nagano.jp", 3, false}, + {1, "takagi.nagano.jp", 3, false}, + {1, "takamori.nagano.jp", 3, false}, + {1, "takayama.nagano.jp", 3, false}, + {1, "tateshina.nagano.jp", 3, false}, + {1, "tatsuno.nagano.jp", 3, false}, + {1, "togakushi.nagano.jp", 3, false}, + {1, "togura.nagano.jp", 3, false}, + {1, "tomi.nagano.jp", 3, false}, + {1, "ueda.nagano.jp", 3, false}, + {1, "wada.nagano.jp", 3, false}, + {1, "yamagata.nagano.jp", 3, false}, + {1, "yamanouchi.nagano.jp", 3, false}, + {1, "yasaka.nagano.jp", 3, false}, + {1, "yasuoka.nagano.jp", 3, false}, + {1, "chijiwa.nagasaki.jp", 3, false}, + {1, "futsu.nagasaki.jp", 3, false}, + {1, "goto.nagasaki.jp", 3, false}, + {1, "hasami.nagasaki.jp", 3, false}, + {1, "hirado.nagasaki.jp", 3, false}, + {1, "iki.nagasaki.jp", 3, false}, + {1, "isahaya.nagasaki.jp", 3, false}, + {1, "kawatana.nagasaki.jp", 3, false}, + {1, "kuchinotsu.nagasaki.jp", 3, false}, + {1, "matsuura.nagasaki.jp", 3, false}, + {1, "nagasaki.nagasaki.jp", 3, false}, + {1, "obama.nagasaki.jp", 3, false}, + {1, "omura.nagasaki.jp", 3, false}, + {1, "oseto.nagasaki.jp", 3, false}, + {1, "saikai.nagasaki.jp", 3, false}, + {1, "sasebo.nagasaki.jp", 3, false}, + {1, "seihi.nagasaki.jp", 3, false}, + {1, "shimabara.nagasaki.jp", 3, false}, + {1, "shinkamigoto.nagasaki.jp", 3, false}, + {1, "togitsu.nagasaki.jp", 3, false}, + {1, "tsushima.nagasaki.jp", 3, false}, + {1, "unzen.nagasaki.jp", 3, false}, + {1, "ando.nara.jp", 3, false}, + {1, "gose.nara.jp", 3, false}, + {1, "heguri.nara.jp", 3, false}, + {1, "higashiyoshino.nara.jp", 3, false}, + {1, "ikaruga.nara.jp", 3, false}, + {1, "ikoma.nara.jp", 3, false}, + {1, "kamikitayama.nara.jp", 3, false}, + {1, "kanmaki.nara.jp", 3, false}, + {1, "kashiba.nara.jp", 3, false}, + {1, "kashihara.nara.jp", 3, false}, + {1, "katsuragi.nara.jp", 3, false}, + {1, "kawai.nara.jp", 3, false}, + {1, "kawakami.nara.jp", 3, false}, + {1, "kawanishi.nara.jp", 3, false}, + {1, "koryo.nara.jp", 3, false}, + {1, "kurotaki.nara.jp", 3, false}, + {1, "mitsue.nara.jp", 3, false}, + {1, "miyake.nara.jp", 3, false}, + {1, "nara.nara.jp", 3, false}, + {1, "nosegawa.nara.jp", 3, false}, + {1, "oji.nara.jp", 3, false}, + {1, "ouda.nara.jp", 3, false}, + {1, "oyodo.nara.jp", 3, false}, + {1, "sakurai.nara.jp", 3, false}, + {1, "sango.nara.jp", 3, false}, + {1, "shimoichi.nara.jp", 3, false}, + {1, "shimokitayama.nara.jp", 3, false}, + {1, "shinjo.nara.jp", 3, false}, + {1, "soni.nara.jp", 3, false}, + {1, "takatori.nara.jp", 3, false}, + {1, "tawaramoto.nara.jp", 3, false}, + {1, "tenkawa.nara.jp", 3, false}, + {1, "tenri.nara.jp", 3, false}, + {1, "uda.nara.jp", 3, false}, + {1, "yamatokoriyama.nara.jp", 3, false}, + {1, "yamatotakada.nara.jp", 3, false}, + {1, "yamazoe.nara.jp", 3, false}, + {1, "yoshino.nara.jp", 3, false}, + {1, "aga.niigata.jp", 3, false}, + {1, "agano.niigata.jp", 3, false}, + {1, "gosen.niigata.jp", 3, false}, + {1, "itoigawa.niigata.jp", 3, false}, + {1, "izumozaki.niigata.jp", 3, false}, + {1, "joetsu.niigata.jp", 3, false}, + {1, "kamo.niigata.jp", 3, false}, + {1, "kariwa.niigata.jp", 3, false}, + {1, "kashiwazaki.niigata.jp", 3, false}, + {1, "minamiuonuma.niigata.jp", 3, false}, + {1, "mitsuke.niigata.jp", 3, false}, + {1, "muika.niigata.jp", 3, false}, + {1, "murakami.niigata.jp", 3, false}, + {1, "myoko.niigata.jp", 3, false}, + {1, "nagaoka.niigata.jp", 3, false}, + {1, "niigata.niigata.jp", 3, false}, + {1, "ojiya.niigata.jp", 3, false}, + {1, "omi.niigata.jp", 3, false}, + {1, "sado.niigata.jp", 3, false}, + {1, "sanjo.niigata.jp", 3, false}, + {1, "seiro.niigata.jp", 3, false}, + {1, "seirou.niigata.jp", 3, false}, + {1, "sekikawa.niigata.jp", 3, false}, + {1, "shibata.niigata.jp", 3, false}, + {1, "tagami.niigata.jp", 3, false}, + {1, "tainai.niigata.jp", 3, false}, + {1, "tochio.niigata.jp", 3, false}, + {1, "tokamachi.niigata.jp", 3, false}, + {1, "tsubame.niigata.jp", 3, false}, + {1, "tsunan.niigata.jp", 3, false}, + {1, "uonuma.niigata.jp", 3, false}, + {1, "yahiko.niigata.jp", 3, false}, + {1, "yoita.niigata.jp", 3, false}, + {1, "yuzawa.niigata.jp", 3, false}, + {1, "beppu.oita.jp", 3, false}, + {1, "bungoono.oita.jp", 3, false}, + {1, "bungotakada.oita.jp", 3, false}, + {1, "hasama.oita.jp", 3, false}, + {1, "hiji.oita.jp", 3, false}, + {1, "himeshima.oita.jp", 3, false}, + {1, "hita.oita.jp", 3, false}, + {1, "kamitsue.oita.jp", 3, false}, + {1, "kokonoe.oita.jp", 3, false}, + {1, "kuju.oita.jp", 3, false}, + {1, "kunisaki.oita.jp", 3, false}, + {1, "kusu.oita.jp", 3, false}, + {1, "oita.oita.jp", 3, false}, + {1, "saiki.oita.jp", 3, false}, + {1, "taketa.oita.jp", 3, false}, + {1, "tsukumi.oita.jp", 3, false}, + {1, "usa.oita.jp", 3, false}, + {1, "usuki.oita.jp", 3, false}, + {1, "yufu.oita.jp", 3, false}, + {1, "akaiwa.okayama.jp", 3, false}, + {1, "asakuchi.okayama.jp", 3, false}, + {1, "bizen.okayama.jp", 3, false}, + {1, "hayashima.okayama.jp", 3, false}, + {1, "ibara.okayama.jp", 3, false}, + {1, "kagamino.okayama.jp", 3, false}, + {1, "kasaoka.okayama.jp", 3, false}, + {1, "kibichuo.okayama.jp", 3, false}, + {1, "kumenan.okayama.jp", 3, false}, + {1, "kurashiki.okayama.jp", 3, false}, + {1, "maniwa.okayama.jp", 3, false}, + {1, "misaki.okayama.jp", 3, false}, + {1, "nagi.okayama.jp", 3, false}, + {1, "niimi.okayama.jp", 3, false}, + {1, "nishiawakura.okayama.jp", 3, false}, + {1, "okayama.okayama.jp", 3, false}, + {1, "satosho.okayama.jp", 3, false}, + {1, "setouchi.okayama.jp", 3, false}, + {1, "shinjo.okayama.jp", 3, false}, + {1, "shoo.okayama.jp", 3, false}, + {1, "soja.okayama.jp", 3, false}, + {1, "takahashi.okayama.jp", 3, false}, + {1, "tamano.okayama.jp", 3, false}, + {1, "tsuyama.okayama.jp", 3, false}, + {1, "wake.okayama.jp", 3, false}, + {1, "yakage.okayama.jp", 3, false}, + {1, "aguni.okinawa.jp", 3, false}, + {1, "ginowan.okinawa.jp", 3, false}, + {1, "ginoza.okinawa.jp", 3, false}, + {1, "gushikami.okinawa.jp", 3, false}, + {1, "haebaru.okinawa.jp", 3, false}, + {1, "higashi.okinawa.jp", 3, false}, + {1, "hirara.okinawa.jp", 3, false}, + {1, "iheya.okinawa.jp", 3, false}, + {1, "ishigaki.okinawa.jp", 3, false}, + {1, "ishikawa.okinawa.jp", 3, false}, + {1, "itoman.okinawa.jp", 3, false}, + {1, "izena.okinawa.jp", 3, false}, + {1, "kadena.okinawa.jp", 3, false}, + {1, "kin.okinawa.jp", 3, false}, + {1, "kitadaito.okinawa.jp", 3, false}, + {1, "kitanakagusuku.okinawa.jp", 3, false}, + {1, "kumejima.okinawa.jp", 3, false}, + {1, "kunigami.okinawa.jp", 3, false}, + {1, "minamidaito.okinawa.jp", 3, false}, + {1, "motobu.okinawa.jp", 3, false}, + {1, "nago.okinawa.jp", 3, false}, + {1, "naha.okinawa.jp", 3, false}, + {1, "nakagusuku.okinawa.jp", 3, false}, + {1, "nakijin.okinawa.jp", 3, false}, + {1, "nanjo.okinawa.jp", 3, false}, + {1, "nishihara.okinawa.jp", 3, false}, + {1, "ogimi.okinawa.jp", 3, false}, + {1, "okinawa.okinawa.jp", 3, false}, + {1, "onna.okinawa.jp", 3, false}, + {1, "shimoji.okinawa.jp", 3, false}, + {1, "taketomi.okinawa.jp", 3, false}, + {1, "tarama.okinawa.jp", 3, false}, + {1, "tokashiki.okinawa.jp", 3, false}, + {1, "tomigusuku.okinawa.jp", 3, false}, + {1, "tonaki.okinawa.jp", 3, false}, + {1, "urasoe.okinawa.jp", 3, false}, + {1, "uruma.okinawa.jp", 3, false}, + {1, "yaese.okinawa.jp", 3, false}, + {1, "yomitan.okinawa.jp", 3, false}, + {1, "yonabaru.okinawa.jp", 3, false}, + {1, "yonaguni.okinawa.jp", 3, false}, + {1, "zamami.okinawa.jp", 3, false}, + {1, "abeno.osaka.jp", 3, false}, + {1, "chihayaakasaka.osaka.jp", 3, false}, + {1, "chuo.osaka.jp", 3, false}, + {1, "daito.osaka.jp", 3, false}, + {1, "fujiidera.osaka.jp", 3, false}, + {1, "habikino.osaka.jp", 3, false}, + {1, "hannan.osaka.jp", 3, false}, + {1, "higashiosaka.osaka.jp", 3, false}, + {1, "higashisumiyoshi.osaka.jp", 3, false}, + {1, "higashiyodogawa.osaka.jp", 3, false}, + {1, "hirakata.osaka.jp", 3, false}, + {1, "ibaraki.osaka.jp", 3, false}, + {1, "ikeda.osaka.jp", 3, false}, + {1, "izumi.osaka.jp", 3, false}, + {1, "izumiotsu.osaka.jp", 3, false}, + {1, "izumisano.osaka.jp", 3, false}, + {1, "kadoma.osaka.jp", 3, false}, + {1, "kaizuka.osaka.jp", 3, false}, + {1, "kanan.osaka.jp", 3, false}, + {1, "kashiwara.osaka.jp", 3, false}, + {1, "katano.osaka.jp", 3, false}, + {1, "kawachinagano.osaka.jp", 3, false}, + {1, "kishiwada.osaka.jp", 3, false}, + {1, "kita.osaka.jp", 3, false}, + {1, "kumatori.osaka.jp", 3, false}, + {1, "matsubara.osaka.jp", 3, false}, + {1, "minato.osaka.jp", 3, false}, + {1, "minoh.osaka.jp", 3, false}, + {1, "misaki.osaka.jp", 3, false}, + {1, "moriguchi.osaka.jp", 3, false}, + {1, "neyagawa.osaka.jp", 3, false}, + {1, "nishi.osaka.jp", 3, false}, + {1, "nose.osaka.jp", 3, false}, + {1, "osakasayama.osaka.jp", 3, false}, + {1, "sakai.osaka.jp", 3, false}, + {1, "sayama.osaka.jp", 3, false}, + {1, "sennan.osaka.jp", 3, false}, + {1, "settsu.osaka.jp", 3, false}, + {1, "shijonawate.osaka.jp", 3, false}, + {1, "shimamoto.osaka.jp", 3, false}, + {1, "suita.osaka.jp", 3, false}, + {1, "tadaoka.osaka.jp", 3, false}, + {1, "taishi.osaka.jp", 3, false}, + {1, "tajiri.osaka.jp", 3, false}, + {1, "takaishi.osaka.jp", 3, false}, + {1, "takatsuki.osaka.jp", 3, false}, + {1, "tondabayashi.osaka.jp", 3, false}, + {1, "toyonaka.osaka.jp", 3, false}, + {1, "toyono.osaka.jp", 3, false}, + {1, "yao.osaka.jp", 3, false}, + {1, "ariake.saga.jp", 3, false}, + {1, "arita.saga.jp", 3, false}, + {1, "fukudomi.saga.jp", 3, false}, + {1, "genkai.saga.jp", 3, false}, + {1, "hamatama.saga.jp", 3, false}, + {1, "hizen.saga.jp", 3, false}, + {1, "imari.saga.jp", 3, false}, + {1, "kamimine.saga.jp", 3, false}, + {1, "kanzaki.saga.jp", 3, false}, + {1, "karatsu.saga.jp", 3, false}, + {1, "kashima.saga.jp", 3, false}, + {1, "kitagata.saga.jp", 3, false}, + {1, "kitahata.saga.jp", 3, false}, + {1, "kiyama.saga.jp", 3, false}, + {1, "kouhoku.saga.jp", 3, false}, + {1, "kyuragi.saga.jp", 3, false}, + {1, "nishiarita.saga.jp", 3, false}, + {1, "ogi.saga.jp", 3, false}, + {1, "omachi.saga.jp", 3, false}, + {1, "ouchi.saga.jp", 3, false}, + {1, "saga.saga.jp", 3, false}, + {1, "shiroishi.saga.jp", 3, false}, + {1, "taku.saga.jp", 3, false}, + {1, "tara.saga.jp", 3, false}, + {1, "tosu.saga.jp", 3, false}, + {1, "yoshinogari.saga.jp", 3, false}, + {1, "arakawa.saitama.jp", 3, false}, + {1, "asaka.saitama.jp", 3, false}, + {1, "chichibu.saitama.jp", 3, false}, + {1, "fujimi.saitama.jp", 3, false}, + {1, "fujimino.saitama.jp", 3, false}, + {1, "fukaya.saitama.jp", 3, false}, + {1, "hanno.saitama.jp", 3, false}, + {1, "hanyu.saitama.jp", 3, false}, + {1, "hasuda.saitama.jp", 3, false}, + {1, "hatogaya.saitama.jp", 3, false}, + {1, "hatoyama.saitama.jp", 3, false}, + {1, "hidaka.saitama.jp", 3, false}, + {1, "higashichichibu.saitama.jp", 3, false}, + {1, "higashimatsuyama.saitama.jp", 3, false}, + {1, "honjo.saitama.jp", 3, false}, + {1, "ina.saitama.jp", 3, false}, + {1, "iruma.saitama.jp", 3, false}, + {1, "iwatsuki.saitama.jp", 3, false}, + {1, "kamiizumi.saitama.jp", 3, false}, + {1, "kamikawa.saitama.jp", 3, false}, + {1, "kamisato.saitama.jp", 3, false}, + {1, "kasukabe.saitama.jp", 3, false}, + {1, "kawagoe.saitama.jp", 3, false}, + {1, "kawaguchi.saitama.jp", 3, false}, + {1, "kawajima.saitama.jp", 3, false}, + {1, "kazo.saitama.jp", 3, false}, + {1, "kitamoto.saitama.jp", 3, false}, + {1, "koshigaya.saitama.jp", 3, false}, + {1, "kounosu.saitama.jp", 3, false}, + {1, "kuki.saitama.jp", 3, false}, + {1, "kumagaya.saitama.jp", 3, false}, + {1, "matsubushi.saitama.jp", 3, false}, + {1, "minano.saitama.jp", 3, false}, + {1, "misato.saitama.jp", 3, false}, + {1, "miyashiro.saitama.jp", 3, false}, + {1, "miyoshi.saitama.jp", 3, false}, + {1, "moroyama.saitama.jp", 3, false}, + {1, "nagatoro.saitama.jp", 3, false}, + {1, "namegawa.saitama.jp", 3, false}, + {1, "niiza.saitama.jp", 3, false}, + {1, "ogano.saitama.jp", 3, false}, + {1, "ogawa.saitama.jp", 3, false}, + {1, "ogose.saitama.jp", 3, false}, + {1, "okegawa.saitama.jp", 3, false}, + {1, "omiya.saitama.jp", 3, false}, + {1, "otaki.saitama.jp", 3, false}, + {1, "ranzan.saitama.jp", 3, false}, + {1, "ryokami.saitama.jp", 3, false}, + {1, "saitama.saitama.jp", 3, false}, + {1, "sakado.saitama.jp", 3, false}, + {1, "satte.saitama.jp", 3, false}, + {1, "sayama.saitama.jp", 3, false}, + {1, "shiki.saitama.jp", 3, false}, + {1, "shiraoka.saitama.jp", 3, false}, + {1, "soka.saitama.jp", 3, false}, + {1, "sugito.saitama.jp", 3, false}, + {1, "toda.saitama.jp", 3, false}, + {1, "tokigawa.saitama.jp", 3, false}, + {1, "tokorozawa.saitama.jp", 3, false}, + {1, "tsurugashima.saitama.jp", 3, false}, + {1, "urawa.saitama.jp", 3, false}, + {1, "warabi.saitama.jp", 3, false}, + {1, "yashio.saitama.jp", 3, false}, + {1, "yokoze.saitama.jp", 3, false}, + {1, "yono.saitama.jp", 3, false}, + {1, "yorii.saitama.jp", 3, false}, + {1, "yoshida.saitama.jp", 3, false}, + {1, "yoshikawa.saitama.jp", 3, false}, + {1, "yoshimi.saitama.jp", 3, false}, + {1, "aisho.shiga.jp", 3, false}, + {1, "gamo.shiga.jp", 3, false}, + {1, "higashiomi.shiga.jp", 3, false}, + {1, "hikone.shiga.jp", 3, false}, + {1, "koka.shiga.jp", 3, false}, + {1, "konan.shiga.jp", 3, false}, + {1, "kosei.shiga.jp", 3, false}, + {1, "koto.shiga.jp", 3, false}, + {1, "kusatsu.shiga.jp", 3, false}, + {1, "maibara.shiga.jp", 3, false}, + {1, "moriyama.shiga.jp", 3, false}, + {1, "nagahama.shiga.jp", 3, false}, + {1, "nishiazai.shiga.jp", 3, false}, + {1, "notogawa.shiga.jp", 3, false}, + {1, "omihachiman.shiga.jp", 3, false}, + {1, "otsu.shiga.jp", 3, false}, + {1, "ritto.shiga.jp", 3, false}, + {1, "ryuoh.shiga.jp", 3, false}, + {1, "takashima.shiga.jp", 3, false}, + {1, "takatsuki.shiga.jp", 3, false}, + {1, "torahime.shiga.jp", 3, false}, + {1, "toyosato.shiga.jp", 3, false}, + {1, "yasu.shiga.jp", 3, false}, + {1, "akagi.shimane.jp", 3, false}, + {1, "ama.shimane.jp", 3, false}, + {1, "gotsu.shimane.jp", 3, false}, + {1, "hamada.shimane.jp", 3, false}, + {1, "higashiizumo.shimane.jp", 3, false}, + {1, "hikawa.shimane.jp", 3, false}, + {1, "hikimi.shimane.jp", 3, false}, + {1, "izumo.shimane.jp", 3, false}, + {1, "kakinoki.shimane.jp", 3, false}, + {1, "masuda.shimane.jp", 3, false}, + {1, "matsue.shimane.jp", 3, false}, + {1, "misato.shimane.jp", 3, false}, + {1, "nishinoshima.shimane.jp", 3, false}, + {1, "ohda.shimane.jp", 3, false}, + {1, "okinoshima.shimane.jp", 3, false}, + {1, "okuizumo.shimane.jp", 3, false}, + {1, "shimane.shimane.jp", 3, false}, + {1, "tamayu.shimane.jp", 3, false}, + {1, "tsuwano.shimane.jp", 3, false}, + {1, "unnan.shimane.jp", 3, false}, + {1, "yakumo.shimane.jp", 3, false}, + {1, "yasugi.shimane.jp", 3, false}, + {1, "yatsuka.shimane.jp", 3, false}, + {1, "arai.shizuoka.jp", 3, false}, + {1, "atami.shizuoka.jp", 3, false}, + {1, "fuji.shizuoka.jp", 3, false}, + {1, "fujieda.shizuoka.jp", 3, false}, + {1, "fujikawa.shizuoka.jp", 3, false}, + {1, "fujinomiya.shizuoka.jp", 3, false}, + {1, "fukuroi.shizuoka.jp", 3, false}, + {1, "gotemba.shizuoka.jp", 3, false}, + {1, "haibara.shizuoka.jp", 3, false}, + {1, "hamamatsu.shizuoka.jp", 3, false}, + {1, "higashiizu.shizuoka.jp", 3, false}, + {1, "ito.shizuoka.jp", 3, false}, + {1, "iwata.shizuoka.jp", 3, false}, + {1, "izu.shizuoka.jp", 3, false}, + {1, "izunokuni.shizuoka.jp", 3, false}, + {1, "kakegawa.shizuoka.jp", 3, false}, + {1, "kannami.shizuoka.jp", 3, false}, + {1, "kawanehon.shizuoka.jp", 3, false}, + {1, "kawazu.shizuoka.jp", 3, false}, + {1, "kikugawa.shizuoka.jp", 3, false}, + {1, "kosai.shizuoka.jp", 3, false}, + {1, "makinohara.shizuoka.jp", 3, false}, + {1, "matsuzaki.shizuoka.jp", 3, false}, + {1, "minamiizu.shizuoka.jp", 3, false}, + {1, "mishima.shizuoka.jp", 3, false}, + {1, "morimachi.shizuoka.jp", 3, false}, + {1, "nishiizu.shizuoka.jp", 3, false}, + {1, "numazu.shizuoka.jp", 3, false}, + {1, "omaezaki.shizuoka.jp", 3, false}, + {1, "shimada.shizuoka.jp", 3, false}, + {1, "shimizu.shizuoka.jp", 3, false}, + {1, "shimoda.shizuoka.jp", 3, false}, + {1, "shizuoka.shizuoka.jp", 3, false}, + {1, "susono.shizuoka.jp", 3, false}, + {1, "yaizu.shizuoka.jp", 3, false}, + {1, "yoshida.shizuoka.jp", 3, false}, + {1, "ashikaga.tochigi.jp", 3, false}, + {1, "bato.tochigi.jp", 3, false}, + {1, "haga.tochigi.jp", 3, false}, + {1, "ichikai.tochigi.jp", 3, false}, + {1, "iwafune.tochigi.jp", 3, false}, + {1, "kaminokawa.tochigi.jp", 3, false}, + {1, "kanuma.tochigi.jp", 3, false}, + {1, "karasuyama.tochigi.jp", 3, false}, + {1, "kuroiso.tochigi.jp", 3, false}, + {1, "mashiko.tochigi.jp", 3, false}, + {1, "mibu.tochigi.jp", 3, false}, + {1, "moka.tochigi.jp", 3, false}, + {1, "motegi.tochigi.jp", 3, false}, + {1, "nasu.tochigi.jp", 3, false}, + {1, "nasushiobara.tochigi.jp", 3, false}, + {1, "nikko.tochigi.jp", 3, false}, + {1, "nishikata.tochigi.jp", 3, false}, + {1, "nogi.tochigi.jp", 3, false}, + {1, "ohira.tochigi.jp", 3, false}, + {1, "ohtawara.tochigi.jp", 3, false}, + {1, "oyama.tochigi.jp", 3, false}, + {1, "sakura.tochigi.jp", 3, false}, + {1, "sano.tochigi.jp", 3, false}, + {1, "shimotsuke.tochigi.jp", 3, false}, + {1, "shioya.tochigi.jp", 3, false}, + {1, "takanezawa.tochigi.jp", 3, false}, + {1, "tochigi.tochigi.jp", 3, false}, + {1, "tsuga.tochigi.jp", 3, false}, + {1, "ujiie.tochigi.jp", 3, false}, + {1, "utsunomiya.tochigi.jp", 3, false}, + {1, "yaita.tochigi.jp", 3, false}, + {1, "aizumi.tokushima.jp", 3, false}, + {1, "anan.tokushima.jp", 3, false}, + {1, "ichiba.tokushima.jp", 3, false}, + {1, "itano.tokushima.jp", 3, false}, + {1, "kainan.tokushima.jp", 3, false}, + {1, "komatsushima.tokushima.jp", 3, false}, + {1, "matsushige.tokushima.jp", 3, false}, + {1, "mima.tokushima.jp", 3, false}, + {1, "minami.tokushima.jp", 3, false}, + {1, "miyoshi.tokushima.jp", 3, false}, + {1, "mugi.tokushima.jp", 3, false}, + {1, "nakagawa.tokushima.jp", 3, false}, + {1, "naruto.tokushima.jp", 3, false}, + {1, "sanagochi.tokushima.jp", 3, false}, + {1, "shishikui.tokushima.jp", 3, false}, + {1, "tokushima.tokushima.jp", 3, false}, + {1, "wajiki.tokushima.jp", 3, false}, + {1, "adachi.tokyo.jp", 3, false}, + {1, "akiruno.tokyo.jp", 3, false}, + {1, "akishima.tokyo.jp", 3, false}, + {1, "aogashima.tokyo.jp", 3, false}, + {1, "arakawa.tokyo.jp", 3, false}, + {1, "bunkyo.tokyo.jp", 3, false}, + {1, "chiyoda.tokyo.jp", 3, false}, + {1, "chofu.tokyo.jp", 3, false}, + {1, "chuo.tokyo.jp", 3, false}, + {1, "edogawa.tokyo.jp", 3, false}, + {1, "fuchu.tokyo.jp", 3, false}, + {1, "fussa.tokyo.jp", 3, false}, + {1, "hachijo.tokyo.jp", 3, false}, + {1, "hachioji.tokyo.jp", 3, false}, + {1, "hamura.tokyo.jp", 3, false}, + {1, "higashikurume.tokyo.jp", 3, false}, + {1, "higashimurayama.tokyo.jp", 3, false}, + {1, "higashiyamato.tokyo.jp", 3, false}, + {1, "hino.tokyo.jp", 3, false}, + {1, "hinode.tokyo.jp", 3, false}, + {1, "hinohara.tokyo.jp", 3, false}, + {1, "inagi.tokyo.jp", 3, false}, + {1, "itabashi.tokyo.jp", 3, false}, + {1, "katsushika.tokyo.jp", 3, false}, + {1, "kita.tokyo.jp", 3, false}, + {1, "kiyose.tokyo.jp", 3, false}, + {1, "kodaira.tokyo.jp", 3, false}, + {1, "koganei.tokyo.jp", 3, false}, + {1, "kokubunji.tokyo.jp", 3, false}, + {1, "komae.tokyo.jp", 3, false}, + {1, "koto.tokyo.jp", 3, false}, + {1, "kouzushima.tokyo.jp", 3, false}, + {1, "kunitachi.tokyo.jp", 3, false}, + {1, "machida.tokyo.jp", 3, false}, + {1, "meguro.tokyo.jp", 3, false}, + {1, "minato.tokyo.jp", 3, false}, + {1, "mitaka.tokyo.jp", 3, false}, + {1, "mizuho.tokyo.jp", 3, false}, + {1, "musashimurayama.tokyo.jp", 3, false}, + {1, "musashino.tokyo.jp", 3, false}, + {1, "nakano.tokyo.jp", 3, false}, + {1, "nerima.tokyo.jp", 3, false}, + {1, "ogasawara.tokyo.jp", 3, false}, + {1, "okutama.tokyo.jp", 3, false}, + {1, "ome.tokyo.jp", 3, false}, + {1, "oshima.tokyo.jp", 3, false}, + {1, "ota.tokyo.jp", 3, false}, + {1, "setagaya.tokyo.jp", 3, false}, + {1, "shibuya.tokyo.jp", 3, false}, + {1, "shinagawa.tokyo.jp", 3, false}, + {1, "shinjuku.tokyo.jp", 3, false}, + {1, "suginami.tokyo.jp", 3, false}, + {1, "sumida.tokyo.jp", 3, false}, + {1, "tachikawa.tokyo.jp", 3, false}, + {1, "taito.tokyo.jp", 3, false}, + {1, "tama.tokyo.jp", 3, false}, + {1, "toshima.tokyo.jp", 3, false}, + {1, "chizu.tottori.jp", 3, false}, + {1, "hino.tottori.jp", 3, false}, + {1, "kawahara.tottori.jp", 3, false}, + {1, "koge.tottori.jp", 3, false}, + {1, "kotoura.tottori.jp", 3, false}, + {1, "misasa.tottori.jp", 3, false}, + {1, "nanbu.tottori.jp", 3, false}, + {1, "nichinan.tottori.jp", 3, false}, + {1, "sakaiminato.tottori.jp", 3, false}, + {1, "tottori.tottori.jp", 3, false}, + {1, "wakasa.tottori.jp", 3, false}, + {1, "yazu.tottori.jp", 3, false}, + {1, "yonago.tottori.jp", 3, false}, + {1, "asahi.toyama.jp", 3, false}, + {1, "fuchu.toyama.jp", 3, false}, + {1, "fukumitsu.toyama.jp", 3, false}, + {1, "funahashi.toyama.jp", 3, false}, + {1, "himi.toyama.jp", 3, false}, + {1, "imizu.toyama.jp", 3, false}, + {1, "inami.toyama.jp", 3, false}, + {1, "johana.toyama.jp", 3, false}, + {1, "kamiichi.toyama.jp", 3, false}, + {1, "kurobe.toyama.jp", 3, false}, + {1, "nakaniikawa.toyama.jp", 3, false}, + {1, "namerikawa.toyama.jp", 3, false}, + {1, "nanto.toyama.jp", 3, false}, + {1, "nyuzen.toyama.jp", 3, false}, + {1, "oyabe.toyama.jp", 3, false}, + {1, "taira.toyama.jp", 3, false}, + {1, "takaoka.toyama.jp", 3, false}, + {1, "tateyama.toyama.jp", 3, false}, + {1, "toga.toyama.jp", 3, false}, + {1, "tonami.toyama.jp", 3, false}, + {1, "toyama.toyama.jp", 3, false}, + {1, "unazuki.toyama.jp", 3, false}, + {1, "uozu.toyama.jp", 3, false}, + {1, "yamada.toyama.jp", 3, false}, + {1, "arida.wakayama.jp", 3, false}, + {1, "aridagawa.wakayama.jp", 3, false}, + {1, "gobo.wakayama.jp", 3, false}, + {1, "hashimoto.wakayama.jp", 3, false}, + {1, "hidaka.wakayama.jp", 3, false}, + {1, "hirogawa.wakayama.jp", 3, false}, + {1, "inami.wakayama.jp", 3, false}, + {1, "iwade.wakayama.jp", 3, false}, + {1, "kainan.wakayama.jp", 3, false}, + {1, "kamitonda.wakayama.jp", 3, false}, + {1, "katsuragi.wakayama.jp", 3, false}, + {1, "kimino.wakayama.jp", 3, false}, + {1, "kinokawa.wakayama.jp", 3, false}, + {1, "kitayama.wakayama.jp", 3, false}, + {1, "koya.wakayama.jp", 3, false}, + {1, "koza.wakayama.jp", 3, false}, + {1, "kozagawa.wakayama.jp", 3, false}, + {1, "kudoyama.wakayama.jp", 3, false}, + {1, "kushimoto.wakayama.jp", 3, false}, + {1, "mihama.wakayama.jp", 3, false}, + {1, "misato.wakayama.jp", 3, false}, + {1, "nachikatsuura.wakayama.jp", 3, false}, + {1, "shingu.wakayama.jp", 3, false}, + {1, "shirahama.wakayama.jp", 3, false}, + {1, "taiji.wakayama.jp", 3, false}, + {1, "tanabe.wakayama.jp", 3, false}, + {1, "wakayama.wakayama.jp", 3, false}, + {1, "yuasa.wakayama.jp", 3, false}, + {1, "yura.wakayama.jp", 3, false}, + {1, "asahi.yamagata.jp", 3, false}, + {1, "funagata.yamagata.jp", 3, false}, + {1, "higashine.yamagata.jp", 3, false}, + {1, "iide.yamagata.jp", 3, false}, + {1, "kahoku.yamagata.jp", 3, false}, + {1, "kaminoyama.yamagata.jp", 3, false}, + {1, "kaneyama.yamagata.jp", 3, false}, + {1, "kawanishi.yamagata.jp", 3, false}, + {1, "mamurogawa.yamagata.jp", 3, false}, + {1, "mikawa.yamagata.jp", 3, false}, + {1, "murayama.yamagata.jp", 3, false}, + {1, "nagai.yamagata.jp", 3, false}, + {1, "nakayama.yamagata.jp", 3, false}, + {1, "nanyo.yamagata.jp", 3, false}, + {1, "nishikawa.yamagata.jp", 3, false}, + {1, "obanazawa.yamagata.jp", 3, false}, + {1, "oe.yamagata.jp", 3, false}, + {1, "oguni.yamagata.jp", 3, false}, + {1, "ohkura.yamagata.jp", 3, false}, + {1, "oishida.yamagata.jp", 3, false}, + {1, "sagae.yamagata.jp", 3, false}, + {1, "sakata.yamagata.jp", 3, false}, + {1, "sakegawa.yamagata.jp", 3, false}, + {1, "shinjo.yamagata.jp", 3, false}, + {1, "shirataka.yamagata.jp", 3, false}, + {1, "shonai.yamagata.jp", 3, false}, + {1, "takahata.yamagata.jp", 3, false}, + {1, "tendo.yamagata.jp", 3, false}, + {1, "tozawa.yamagata.jp", 3, false}, + {1, "tsuruoka.yamagata.jp", 3, false}, + {1, "yamagata.yamagata.jp", 3, false}, + {1, "yamanobe.yamagata.jp", 3, false}, + {1, "yonezawa.yamagata.jp", 3, false}, + {1, "yuza.yamagata.jp", 3, false}, + {1, "abu.yamaguchi.jp", 3, false}, + {1, "hagi.yamaguchi.jp", 3, false}, + {1, "hikari.yamaguchi.jp", 3, false}, + {1, "hofu.yamaguchi.jp", 3, false}, + {1, "iwakuni.yamaguchi.jp", 3, false}, + {1, "kudamatsu.yamaguchi.jp", 3, false}, + {1, "mitou.yamaguchi.jp", 3, false}, + {1, "nagato.yamaguchi.jp", 3, false}, + {1, "oshima.yamaguchi.jp", 3, false}, + {1, "shimonoseki.yamaguchi.jp", 3, false}, + {1, "shunan.yamaguchi.jp", 3, false}, + {1, "tabuse.yamaguchi.jp", 3, false}, + {1, "tokuyama.yamaguchi.jp", 3, false}, + {1, "toyota.yamaguchi.jp", 3, false}, + {1, "ube.yamaguchi.jp", 3, false}, + {1, "yuu.yamaguchi.jp", 3, false}, + {1, "chuo.yamanashi.jp", 3, false}, + {1, "doshi.yamanashi.jp", 3, false}, + {1, "fuefuki.yamanashi.jp", 3, false}, + {1, "fujikawa.yamanashi.jp", 3, false}, + {1, "fujikawaguchiko.yamanashi.jp", 3, false}, + {1, "fujiyoshida.yamanashi.jp", 3, false}, + {1, "hayakawa.yamanashi.jp", 3, false}, + {1, "hokuto.yamanashi.jp", 3, false}, + {1, "ichikawamisato.yamanashi.jp", 3, false}, + {1, "kai.yamanashi.jp", 3, false}, + {1, "kofu.yamanashi.jp", 3, false}, + {1, "koshu.yamanashi.jp", 3, false}, + {1, "kosuge.yamanashi.jp", 3, false}, + {1, "minami-alps.yamanashi.jp", 3, false}, + {1, "minobu.yamanashi.jp", 3, false}, + {1, "nakamichi.yamanashi.jp", 3, false}, + {1, "nanbu.yamanashi.jp", 3, false}, + {1, "narusawa.yamanashi.jp", 3, false}, + {1, "nirasaki.yamanashi.jp", 3, false}, + {1, "nishikatsura.yamanashi.jp", 3, false}, + {1, "oshino.yamanashi.jp", 3, false}, + {1, "otsuki.yamanashi.jp", 3, false}, + {1, "showa.yamanashi.jp", 3, false}, + {1, "tabayama.yamanashi.jp", 3, false}, + {1, "tsuru.yamanashi.jp", 3, false}, + {1, "uenohara.yamanashi.jp", 3, false}, + {1, "yamanakako.yamanashi.jp", 3, false}, + {1, "yamanashi.yamanashi.jp", 3, false}, + {1, "ke", 1, false}, + {1, "ac.ke", 2, false}, + {1, "co.ke", 2, false}, + {1, "go.ke", 2, false}, + {1, "info.ke", 2, false}, + {1, "me.ke", 2, false}, + {1, "mobi.ke", 2, false}, + {1, "ne.ke", 2, false}, + {1, "or.ke", 2, false}, + {1, "sc.ke", 2, false}, + {1, "kg", 1, false}, + {1, "org.kg", 2, false}, + {1, "net.kg", 2, false}, + {1, "com.kg", 2, false}, + {1, "edu.kg", 2, false}, + {1, "gov.kg", 2, false}, + {1, "mil.kg", 2, false}, + {2, "kh", 2, false}, + {1, "ki", 1, false}, + {1, "edu.ki", 2, false}, + {1, "biz.ki", 2, false}, + {1, "net.ki", 2, false}, + {1, "org.ki", 2, false}, + {1, "gov.ki", 2, false}, + {1, "info.ki", 2, false}, + {1, "com.ki", 2, false}, + {1, "km", 1, false}, + {1, "org.km", 2, false}, + {1, "nom.km", 2, false}, + {1, "gov.km", 2, false}, + {1, "prd.km", 2, false}, + {1, "tm.km", 2, false}, + {1, "edu.km", 2, false}, + {1, "mil.km", 2, false}, + {1, "ass.km", 2, false}, + {1, "com.km", 2, false}, + {1, "coop.km", 2, false}, + {1, "asso.km", 2, false}, + {1, "presse.km", 2, false}, + {1, "medecin.km", 2, false}, + {1, "notaires.km", 2, false}, + {1, "pharmaciens.km", 2, false}, + {1, "veterinaire.km", 2, false}, + {1, "gouv.km", 2, false}, + {1, "kn", 1, false}, + {1, "net.kn", 2, false}, + {1, "org.kn", 2, false}, + {1, "edu.kn", 2, false}, + {1, "gov.kn", 2, false}, + {1, "kp", 1, false}, + {1, "com.kp", 2, false}, + {1, "edu.kp", 2, false}, + {1, "gov.kp", 2, false}, + {1, "org.kp", 2, false}, + {1, "rep.kp", 2, false}, + {1, "tra.kp", 2, false}, + {1, "kr", 1, false}, + {1, "ac.kr", 2, false}, + {1, "co.kr", 2, false}, + {1, "es.kr", 2, false}, + {1, "go.kr", 2, false}, + {1, "hs.kr", 2, false}, + {1, "kg.kr", 2, false}, + {1, "mil.kr", 2, false}, + {1, "ms.kr", 2, false}, + {1, "ne.kr", 2, false}, + {1, "or.kr", 2, false}, + {1, "pe.kr", 2, false}, + {1, "re.kr", 2, false}, + {1, "sc.kr", 2, false}, + {1, "busan.kr", 2, false}, + {1, "chungbuk.kr", 2, false}, + {1, "chungnam.kr", 2, false}, + {1, "daegu.kr", 2, false}, + {1, "daejeon.kr", 2, false}, + {1, "gangwon.kr", 2, false}, + {1, "gwangju.kr", 2, false}, + {1, "gyeongbuk.kr", 2, false}, + {1, "gyeonggi.kr", 2, false}, + {1, "gyeongnam.kr", 2, false}, + {1, "incheon.kr", 2, false}, + {1, "jeju.kr", 2, false}, + {1, "jeonbuk.kr", 2, false}, + {1, "jeonnam.kr", 2, false}, + {1, "seoul.kr", 2, false}, + {1, "ulsan.kr", 2, false}, + {1, "kw", 1, false}, + {1, "com.kw", 2, false}, + {1, "edu.kw", 2, false}, + {1, "emb.kw", 2, false}, + {1, "gov.kw", 2, false}, + {1, "ind.kw", 2, false}, + {1, "net.kw", 2, false}, + {1, "org.kw", 2, false}, + {1, "ky", 1, false}, + {1, "edu.ky", 2, false}, + {1, "gov.ky", 2, false}, + {1, "com.ky", 2, false}, + {1, "org.ky", 2, false}, + {1, "net.ky", 2, false}, + {1, "kz", 1, false}, + {1, "org.kz", 2, false}, + {1, "edu.kz", 2, false}, + {1, "net.kz", 2, false}, + {1, "gov.kz", 2, false}, + {1, "mil.kz", 2, false}, + {1, "com.kz", 2, false}, + {1, "la", 1, false}, + {1, "int.la", 2, false}, + {1, "net.la", 2, false}, + {1, "info.la", 2, false}, + {1, "edu.la", 2, false}, + {1, "gov.la", 2, false}, + {1, "per.la", 2, false}, + {1, "com.la", 2, false}, + {1, "org.la", 2, false}, + {1, "lb", 1, false}, + {1, "com.lb", 2, false}, + {1, "edu.lb", 2, false}, + {1, "gov.lb", 2, false}, + {1, "net.lb", 2, false}, + {1, "org.lb", 2, false}, + {1, "lc", 1, false}, + {1, "com.lc", 2, false}, + {1, "net.lc", 2, false}, + {1, "co.lc", 2, false}, + {1, "org.lc", 2, false}, + {1, "edu.lc", 2, false}, + {1, "gov.lc", 2, false}, + {1, "li", 1, false}, + {1, "lk", 1, false}, + {1, "gov.lk", 2, false}, + {1, "sch.lk", 2, false}, + {1, "net.lk", 2, false}, + {1, "int.lk", 2, false}, + {1, "com.lk", 2, false}, + {1, "org.lk", 2, false}, + {1, "edu.lk", 2, false}, + {1, "ngo.lk", 2, false}, + {1, "soc.lk", 2, false}, + {1, "web.lk", 2, false}, + {1, "ltd.lk", 2, false}, + {1, "assn.lk", 2, false}, + {1, "grp.lk", 2, false}, + {1, "hotel.lk", 2, false}, + {1, "ac.lk", 2, false}, + {1, "lr", 1, false}, + {1, "com.lr", 2, false}, + {1, "edu.lr", 2, false}, + {1, "gov.lr", 2, false}, + {1, "org.lr", 2, false}, + {1, "net.lr", 2, false}, + {1, "ls", 1, false}, + {1, "ac.ls", 2, false}, + {1, "biz.ls", 2, false}, + {1, "co.ls", 2, false}, + {1, "edu.ls", 2, false}, + {1, "gov.ls", 2, false}, + {1, "info.ls", 2, false}, + {1, "net.ls", 2, false}, + {1, "org.ls", 2, false}, + {1, "sc.ls", 2, false}, + {1, "lt", 1, false}, + {1, "gov.lt", 2, false}, + {1, "lu", 1, false}, + {1, "lv", 1, false}, + {1, "com.lv", 2, false}, + {1, "edu.lv", 2, false}, + {1, "gov.lv", 2, false}, + {1, "org.lv", 2, false}, + {1, "mil.lv", 2, false}, + {1, "id.lv", 2, false}, + {1, "net.lv", 2, false}, + {1, "asn.lv", 2, false}, + {1, "conf.lv", 2, false}, + {1, "ly", 1, false}, + {1, "com.ly", 2, false}, + {1, "net.ly", 2, false}, + {1, "gov.ly", 2, false}, + {1, "plc.ly", 2, false}, + {1, "edu.ly", 2, false}, + {1, "sch.ly", 2, false}, + {1, "med.ly", 2, false}, + {1, "org.ly", 2, false}, + {1, "id.ly", 2, false}, + {1, "ma", 1, false}, + {1, "co.ma", 2, false}, + {1, "net.ma", 2, false}, + {1, "gov.ma", 2, false}, + {1, "org.ma", 2, false}, + {1, "ac.ma", 2, false}, + {1, "press.ma", 2, false}, + {1, "mc", 1, false}, + {1, "tm.mc", 2, false}, + {1, "asso.mc", 2, false}, + {1, "md", 1, false}, + {1, "me", 1, false}, + {1, "co.me", 2, false}, + {1, "net.me", 2, false}, + {1, "org.me", 2, false}, + {1, "edu.me", 2, false}, + {1, "ac.me", 2, false}, + {1, "gov.me", 2, false}, + {1, "its.me", 2, false}, + {1, "priv.me", 2, false}, + {1, "mg", 1, false}, + {1, "org.mg", 2, false}, + {1, "nom.mg", 2, false}, + {1, "gov.mg", 2, false}, + {1, "prd.mg", 2, false}, + {1, "tm.mg", 2, false}, + {1, "edu.mg", 2, false}, + {1, "mil.mg", 2, false}, + {1, "com.mg", 2, false}, + {1, "co.mg", 2, false}, + {1, "mh", 1, false}, + {1, "mil", 1, false}, + {1, "mk", 1, false}, + {1, "com.mk", 2, false}, + {1, "org.mk", 2, false}, + {1, "net.mk", 2, false}, + {1, "edu.mk", 2, false}, + {1, "gov.mk", 2, false}, + {1, "inf.mk", 2, false}, + {1, "name.mk", 2, false}, + {1, "ml", 1, false}, + {1, "com.ml", 2, false}, + {1, "edu.ml", 2, false}, + {1, "gouv.ml", 2, false}, + {1, "gov.ml", 2, false}, + {1, "net.ml", 2, false}, + {1, "org.ml", 2, false}, + {1, "presse.ml", 2, false}, + {2, "mm", 2, false}, + {1, "mn", 1, false}, + {1, "gov.mn", 2, false}, + {1, "edu.mn", 2, false}, + {1, "org.mn", 2, false}, + {1, "mo", 1, false}, + {1, "com.mo", 2, false}, + {1, "net.mo", 2, false}, + {1, "org.mo", 2, false}, + {1, "edu.mo", 2, false}, + {1, "gov.mo", 2, false}, + {1, "mobi", 1, false}, + {1, "mp", 1, false}, + {1, "mq", 1, false}, + {1, "mr", 1, false}, + {1, "gov.mr", 2, false}, + {1, "ms", 1, false}, + {1, "com.ms", 2, false}, + {1, "edu.ms", 2, false}, + {1, "gov.ms", 2, false}, + {1, "net.ms", 2, false}, + {1, "org.ms", 2, false}, + {1, "mt", 1, false}, + {1, "com.mt", 2, false}, + {1, "edu.mt", 2, false}, + {1, "net.mt", 2, false}, + {1, "org.mt", 2, false}, + {1, "mu", 1, false}, + {1, "com.mu", 2, false}, + {1, "net.mu", 2, false}, + {1, "org.mu", 2, false}, + {1, "gov.mu", 2, false}, + {1, "ac.mu", 2, false}, + {1, "co.mu", 2, false}, + {1, "or.mu", 2, false}, + {1, "museum", 1, false}, + {1, "academy.museum", 2, false}, + {1, "agriculture.museum", 2, false}, + {1, "air.museum", 2, false}, + {1, "airguard.museum", 2, false}, + {1, "alabama.museum", 2, false}, + {1, "alaska.museum", 2, false}, + {1, "amber.museum", 2, false}, + {1, "ambulance.museum", 2, false}, + {1, "american.museum", 2, false}, + {1, "americana.museum", 2, false}, + {1, "americanantiques.museum", 2, false}, + {1, "americanart.museum", 2, false}, + {1, "amsterdam.museum", 2, false}, + {1, "and.museum", 2, false}, + {1, "annefrank.museum", 2, false}, + {1, "anthro.museum", 2, false}, + {1, "anthropology.museum", 2, false}, + {1, "antiques.museum", 2, false}, + {1, "aquarium.museum", 2, false}, + {1, "arboretum.museum", 2, false}, + {1, "archaeological.museum", 2, false}, + {1, "archaeology.museum", 2, false}, + {1, "architecture.museum", 2, false}, + {1, "art.museum", 2, false}, + {1, "artanddesign.museum", 2, false}, + {1, "artcenter.museum", 2, false}, + {1, "artdeco.museum", 2, false}, + {1, "arteducation.museum", 2, false}, + {1, "artgallery.museum", 2, false}, + {1, "arts.museum", 2, false}, + {1, "artsandcrafts.museum", 2, false}, + {1, "asmatart.museum", 2, false}, + {1, "assassination.museum", 2, false}, + {1, "assisi.museum", 2, false}, + {1, "association.museum", 2, false}, + {1, "astronomy.museum", 2, false}, + {1, "atlanta.museum", 2, false}, + {1, "austin.museum", 2, false}, + {1, "australia.museum", 2, false}, + {1, "automotive.museum", 2, false}, + {1, "aviation.museum", 2, false}, + {1, "axis.museum", 2, false}, + {1, "badajoz.museum", 2, false}, + {1, "baghdad.museum", 2, false}, + {1, "bahn.museum", 2, false}, + {1, "bale.museum", 2, false}, + {1, "baltimore.museum", 2, false}, + {1, "barcelona.museum", 2, false}, + {1, "baseball.museum", 2, false}, + {1, "basel.museum", 2, false}, + {1, "baths.museum", 2, false}, + {1, "bauern.museum", 2, false}, + {1, "beauxarts.museum", 2, false}, + {1, "beeldengeluid.museum", 2, false}, + {1, "bellevue.museum", 2, false}, + {1, "bergbau.museum", 2, false}, + {1, "berkeley.museum", 2, false}, + {1, "berlin.museum", 2, false}, + {1, "bern.museum", 2, false}, + {1, "bible.museum", 2, false}, + {1, "bilbao.museum", 2, false}, + {1, "bill.museum", 2, false}, + {1, "birdart.museum", 2, false}, + {1, "birthplace.museum", 2, false}, + {1, "bonn.museum", 2, false}, + {1, "boston.museum", 2, false}, + {1, "botanical.museum", 2, false}, + {1, "botanicalgarden.museum", 2, false}, + {1, "botanicgarden.museum", 2, false}, + {1, "botany.museum", 2, false}, + {1, "brandywinevalley.museum", 2, false}, + {1, "brasil.museum", 2, false}, + {1, "bristol.museum", 2, false}, + {1, "british.museum", 2, false}, + {1, "britishcolumbia.museum", 2, false}, + {1, "broadcast.museum", 2, false}, + {1, "brunel.museum", 2, false}, + {1, "brussel.museum", 2, false}, + {1, "brussels.museum", 2, false}, + {1, "bruxelles.museum", 2, false}, + {1, "building.museum", 2, false}, + {1, "burghof.museum", 2, false}, + {1, "bus.museum", 2, false}, + {1, "bushey.museum", 2, false}, + {1, "cadaques.museum", 2, false}, + {1, "california.museum", 2, false}, + {1, "cambridge.museum", 2, false}, + {1, "can.museum", 2, false}, + {1, "canada.museum", 2, false}, + {1, "capebreton.museum", 2, false}, + {1, "carrier.museum", 2, false}, + {1, "cartoonart.museum", 2, false}, + {1, "casadelamoneda.museum", 2, false}, + {1, "castle.museum", 2, false}, + {1, "castres.museum", 2, false}, + {1, "celtic.museum", 2, false}, + {1, "center.museum", 2, false}, + {1, "chattanooga.museum", 2, false}, + {1, "cheltenham.museum", 2, false}, + {1, "chesapeakebay.museum", 2, false}, + {1, "chicago.museum", 2, false}, + {1, "children.museum", 2, false}, + {1, "childrens.museum", 2, false}, + {1, "childrensgarden.museum", 2, false}, + {1, "chiropractic.museum", 2, false}, + {1, "chocolate.museum", 2, false}, + {1, "christiansburg.museum", 2, false}, + {1, "cincinnati.museum", 2, false}, + {1, "cinema.museum", 2, false}, + {1, "circus.museum", 2, false}, + {1, "civilisation.museum", 2, false}, + {1, "civilization.museum", 2, false}, + {1, "civilwar.museum", 2, false}, + {1, "clinton.museum", 2, false}, + {1, "clock.museum", 2, false}, + {1, "coal.museum", 2, false}, + {1, "coastaldefence.museum", 2, false}, + {1, "cody.museum", 2, false}, + {1, "coldwar.museum", 2, false}, + {1, "collection.museum", 2, false}, + {1, "colonialwilliamsburg.museum", 2, false}, + {1, "coloradoplateau.museum", 2, false}, + {1, "columbia.museum", 2, false}, + {1, "columbus.museum", 2, false}, + {1, "communication.museum", 2, false}, + {1, "communications.museum", 2, false}, + {1, "community.museum", 2, false}, + {1, "computer.museum", 2, false}, + {1, "computerhistory.museum", 2, false}, + {1, "xn--comunicaes-v6a2o.museum", 2, false}, + {1, "contemporary.museum", 2, false}, + {1, "contemporaryart.museum", 2, false}, + {1, "convent.museum", 2, false}, + {1, "copenhagen.museum", 2, false}, + {1, "corporation.museum", 2, false}, + {1, "xn--correios-e-telecomunicaes-ghc29a.museum", 2, false}, + {1, "corvette.museum", 2, false}, + {1, "costume.museum", 2, false}, + {1, "countryestate.museum", 2, false}, + {1, "county.museum", 2, false}, + {1, "crafts.museum", 2, false}, + {1, "cranbrook.museum", 2, false}, + {1, "creation.museum", 2, false}, + {1, "cultural.museum", 2, false}, + {1, "culturalcenter.museum", 2, false}, + {1, "culture.museum", 2, false}, + {1, "cyber.museum", 2, false}, + {1, "cymru.museum", 2, false}, + {1, "dali.museum", 2, false}, + {1, "dallas.museum", 2, false}, + {1, "database.museum", 2, false}, + {1, "ddr.museum", 2, false}, + {1, "decorativearts.museum", 2, false}, + {1, "delaware.museum", 2, false}, + {1, "delmenhorst.museum", 2, false}, + {1, "denmark.museum", 2, false}, + {1, "depot.museum", 2, false}, + {1, "design.museum", 2, false}, + {1, "detroit.museum", 2, false}, + {1, "dinosaur.museum", 2, false}, + {1, "discovery.museum", 2, false}, + {1, "dolls.museum", 2, false}, + {1, "donostia.museum", 2, false}, + {1, "durham.museum", 2, false}, + {1, "eastafrica.museum", 2, false}, + {1, "eastcoast.museum", 2, false}, + {1, "education.museum", 2, false}, + {1, "educational.museum", 2, false}, + {1, "egyptian.museum", 2, false}, + {1, "eisenbahn.museum", 2, false}, + {1, "elburg.museum", 2, false}, + {1, "elvendrell.museum", 2, false}, + {1, "embroidery.museum", 2, false}, + {1, "encyclopedic.museum", 2, false}, + {1, "england.museum", 2, false}, + {1, "entomology.museum", 2, false}, + {1, "environment.museum", 2, false}, + {1, "environmentalconservation.museum", 2, false}, + {1, "epilepsy.museum", 2, false}, + {1, "essex.museum", 2, false}, + {1, "estate.museum", 2, false}, + {1, "ethnology.museum", 2, false}, + {1, "exeter.museum", 2, false}, + {1, "exhibition.museum", 2, false}, + {1, "family.museum", 2, false}, + {1, "farm.museum", 2, false}, + {1, "farmequipment.museum", 2, false}, + {1, "farmers.museum", 2, false}, + {1, "farmstead.museum", 2, false}, + {1, "field.museum", 2, false}, + {1, "figueres.museum", 2, false}, + {1, "filatelia.museum", 2, false}, + {1, "film.museum", 2, false}, + {1, "fineart.museum", 2, false}, + {1, "finearts.museum", 2, false}, + {1, "finland.museum", 2, false}, + {1, "flanders.museum", 2, false}, + {1, "florida.museum", 2, false}, + {1, "force.museum", 2, false}, + {1, "fortmissoula.museum", 2, false}, + {1, "fortworth.museum", 2, false}, + {1, "foundation.museum", 2, false}, + {1, "francaise.museum", 2, false}, + {1, "frankfurt.museum", 2, false}, + {1, "franziskaner.museum", 2, false}, + {1, "freemasonry.museum", 2, false}, + {1, "freiburg.museum", 2, false}, + {1, "fribourg.museum", 2, false}, + {1, "frog.museum", 2, false}, + {1, "fundacio.museum", 2, false}, + {1, "furniture.museum", 2, false}, + {1, "gallery.museum", 2, false}, + {1, "garden.museum", 2, false}, + {1, "gateway.museum", 2, false}, + {1, "geelvinck.museum", 2, false}, + {1, "gemological.museum", 2, false}, + {1, "geology.museum", 2, false}, + {1, "georgia.museum", 2, false}, + {1, "giessen.museum", 2, false}, + {1, "glas.museum", 2, false}, + {1, "glass.museum", 2, false}, + {1, "gorge.museum", 2, false}, + {1, "grandrapids.museum", 2, false}, + {1, "graz.museum", 2, false}, + {1, "guernsey.museum", 2, false}, + {1, "halloffame.museum", 2, false}, + {1, "hamburg.museum", 2, false}, + {1, "handson.museum", 2, false}, + {1, "harvestcelebration.museum", 2, false}, + {1, "hawaii.museum", 2, false}, + {1, "health.museum", 2, false}, + {1, "heimatunduhren.museum", 2, false}, + {1, "hellas.museum", 2, false}, + {1, "helsinki.museum", 2, false}, + {1, "hembygdsforbund.museum", 2, false}, + {1, "heritage.museum", 2, false}, + {1, "histoire.museum", 2, false}, + {1, "historical.museum", 2, false}, + {1, "historicalsociety.museum", 2, false}, + {1, "historichouses.museum", 2, false}, + {1, "historisch.museum", 2, false}, + {1, "historisches.museum", 2, false}, + {1, "history.museum", 2, false}, + {1, "historyofscience.museum", 2, false}, + {1, "horology.museum", 2, false}, + {1, "house.museum", 2, false}, + {1, "humanities.museum", 2, false}, + {1, "illustration.museum", 2, false}, + {1, "imageandsound.museum", 2, false}, + {1, "indian.museum", 2, false}, + {1, "indiana.museum", 2, false}, + {1, "indianapolis.museum", 2, false}, + {1, "indianmarket.museum", 2, false}, + {1, "intelligence.museum", 2, false}, + {1, "interactive.museum", 2, false}, + {1, "iraq.museum", 2, false}, + {1, "iron.museum", 2, false}, + {1, "isleofman.museum", 2, false}, + {1, "jamison.museum", 2, false}, + {1, "jefferson.museum", 2, false}, + {1, "jerusalem.museum", 2, false}, + {1, "jewelry.museum", 2, false}, + {1, "jewish.museum", 2, false}, + {1, "jewishart.museum", 2, false}, + {1, "jfk.museum", 2, false}, + {1, "journalism.museum", 2, false}, + {1, "judaica.museum", 2, false}, + {1, "judygarland.museum", 2, false}, + {1, "juedisches.museum", 2, false}, + {1, "juif.museum", 2, false}, + {1, "karate.museum", 2, false}, + {1, "karikatur.museum", 2, false}, + {1, "kids.museum", 2, false}, + {1, "koebenhavn.museum", 2, false}, + {1, "koeln.museum", 2, false}, + {1, "kunst.museum", 2, false}, + {1, "kunstsammlung.museum", 2, false}, + {1, "kunstunddesign.museum", 2, false}, + {1, "labor.museum", 2, false}, + {1, "labour.museum", 2, false}, + {1, "lajolla.museum", 2, false}, + {1, "lancashire.museum", 2, false}, + {1, "landes.museum", 2, false}, + {1, "lans.museum", 2, false}, + {1, "xn--lns-qla.museum", 2, false}, + {1, "larsson.museum", 2, false}, + {1, "lewismiller.museum", 2, false}, + {1, "lincoln.museum", 2, false}, + {1, "linz.museum", 2, false}, + {1, "living.museum", 2, false}, + {1, "livinghistory.museum", 2, false}, + {1, "localhistory.museum", 2, false}, + {1, "london.museum", 2, false}, + {1, "losangeles.museum", 2, false}, + {1, "louvre.museum", 2, false}, + {1, "loyalist.museum", 2, false}, + {1, "lucerne.museum", 2, false}, + {1, "luxembourg.museum", 2, false}, + {1, "luzern.museum", 2, false}, + {1, "mad.museum", 2, false}, + {1, "madrid.museum", 2, false}, + {1, "mallorca.museum", 2, false}, + {1, "manchester.museum", 2, false}, + {1, "mansion.museum", 2, false}, + {1, "mansions.museum", 2, false}, + {1, "manx.museum", 2, false}, + {1, "marburg.museum", 2, false}, + {1, "maritime.museum", 2, false}, + {1, "maritimo.museum", 2, false}, + {1, "maryland.museum", 2, false}, + {1, "marylhurst.museum", 2, false}, + {1, "media.museum", 2, false}, + {1, "medical.museum", 2, false}, + {1, "medizinhistorisches.museum", 2, false}, + {1, "meeres.museum", 2, false}, + {1, "memorial.museum", 2, false}, + {1, "mesaverde.museum", 2, false}, + {1, "michigan.museum", 2, false}, + {1, "midatlantic.museum", 2, false}, + {1, "military.museum", 2, false}, + {1, "mill.museum", 2, false}, + {1, "miners.museum", 2, false}, + {1, "mining.museum", 2, false}, + {1, "minnesota.museum", 2, false}, + {1, "missile.museum", 2, false}, + {1, "missoula.museum", 2, false}, + {1, "modern.museum", 2, false}, + {1, "moma.museum", 2, false}, + {1, "money.museum", 2, false}, + {1, "monmouth.museum", 2, false}, + {1, "monticello.museum", 2, false}, + {1, "montreal.museum", 2, false}, + {1, "moscow.museum", 2, false}, + {1, "motorcycle.museum", 2, false}, + {1, "muenchen.museum", 2, false}, + {1, "muenster.museum", 2, false}, + {1, "mulhouse.museum", 2, false}, + {1, "muncie.museum", 2, false}, + {1, "museet.museum", 2, false}, + {1, "museumcenter.museum", 2, false}, + {1, "museumvereniging.museum", 2, false}, + {1, "music.museum", 2, false}, + {1, "national.museum", 2, false}, + {1, "nationalfirearms.museum", 2, false}, + {1, "nationalheritage.museum", 2, false}, + {1, "nativeamerican.museum", 2, false}, + {1, "naturalhistory.museum", 2, false}, + {1, "naturalhistorymuseum.museum", 2, false}, + {1, "naturalsciences.museum", 2, false}, + {1, "nature.museum", 2, false}, + {1, "naturhistorisches.museum", 2, false}, + {1, "natuurwetenschappen.museum", 2, false}, + {1, "naumburg.museum", 2, false}, + {1, "naval.museum", 2, false}, + {1, "nebraska.museum", 2, false}, + {1, "neues.museum", 2, false}, + {1, "newhampshire.museum", 2, false}, + {1, "newjersey.museum", 2, false}, + {1, "newmexico.museum", 2, false}, + {1, "newport.museum", 2, false}, + {1, "newspaper.museum", 2, false}, + {1, "newyork.museum", 2, false}, + {1, "niepce.museum", 2, false}, + {1, "norfolk.museum", 2, false}, + {1, "north.museum", 2, false}, + {1, "nrw.museum", 2, false}, + {1, "nyc.museum", 2, false}, + {1, "nyny.museum", 2, false}, + {1, "oceanographic.museum", 2, false}, + {1, "oceanographique.museum", 2, false}, + {1, "omaha.museum", 2, false}, + {1, "online.museum", 2, false}, + {1, "ontario.museum", 2, false}, + {1, "openair.museum", 2, false}, + {1, "oregon.museum", 2, false}, + {1, "oregontrail.museum", 2, false}, + {1, "otago.museum", 2, false}, + {1, "oxford.museum", 2, false}, + {1, "pacific.museum", 2, false}, + {1, "paderborn.museum", 2, false}, + {1, "palace.museum", 2, false}, + {1, "paleo.museum", 2, false}, + {1, "palmsprings.museum", 2, false}, + {1, "panama.museum", 2, false}, + {1, "paris.museum", 2, false}, + {1, "pasadena.museum", 2, false}, + {1, "pharmacy.museum", 2, false}, + {1, "philadelphia.museum", 2, false}, + {1, "philadelphiaarea.museum", 2, false}, + {1, "philately.museum", 2, false}, + {1, "phoenix.museum", 2, false}, + {1, "photography.museum", 2, false}, + {1, "pilots.museum", 2, false}, + {1, "pittsburgh.museum", 2, false}, + {1, "planetarium.museum", 2, false}, + {1, "plantation.museum", 2, false}, + {1, "plants.museum", 2, false}, + {1, "plaza.museum", 2, false}, + {1, "portal.museum", 2, false}, + {1, "portland.museum", 2, false}, + {1, "portlligat.museum", 2, false}, + {1, "posts-and-telecommunications.museum", 2, false}, + {1, "preservation.museum", 2, false}, + {1, "presidio.museum", 2, false}, + {1, "press.museum", 2, false}, + {1, "project.museum", 2, false}, + {1, "public.museum", 2, false}, + {1, "pubol.museum", 2, false}, + {1, "quebec.museum", 2, false}, + {1, "railroad.museum", 2, false}, + {1, "railway.museum", 2, false}, + {1, "research.museum", 2, false}, + {1, "resistance.museum", 2, false}, + {1, "riodejaneiro.museum", 2, false}, + {1, "rochester.museum", 2, false}, + {1, "rockart.museum", 2, false}, + {1, "roma.museum", 2, false}, + {1, "russia.museum", 2, false}, + {1, "saintlouis.museum", 2, false}, + {1, "salem.museum", 2, false}, + {1, "salvadordali.museum", 2, false}, + {1, "salzburg.museum", 2, false}, + {1, "sandiego.museum", 2, false}, + {1, "sanfrancisco.museum", 2, false}, + {1, "santabarbara.museum", 2, false}, + {1, "santacruz.museum", 2, false}, + {1, "santafe.museum", 2, false}, + {1, "saskatchewan.museum", 2, false}, + {1, "satx.museum", 2, false}, + {1, "savannahga.museum", 2, false}, + {1, "schlesisches.museum", 2, false}, + {1, "schoenbrunn.museum", 2, false}, + {1, "schokoladen.museum", 2, false}, + {1, "school.museum", 2, false}, + {1, "schweiz.museum", 2, false}, + {1, "science.museum", 2, false}, + {1, "scienceandhistory.museum", 2, false}, + {1, "scienceandindustry.museum", 2, false}, + {1, "sciencecenter.museum", 2, false}, + {1, "sciencecenters.museum", 2, false}, + {1, "science-fiction.museum", 2, false}, + {1, "sciencehistory.museum", 2, false}, + {1, "sciences.museum", 2, false}, + {1, "sciencesnaturelles.museum", 2, false}, + {1, "scotland.museum", 2, false}, + {1, "seaport.museum", 2, false}, + {1, "settlement.museum", 2, false}, + {1, "settlers.museum", 2, false}, + {1, "shell.museum", 2, false}, + {1, "sherbrooke.museum", 2, false}, + {1, "sibenik.museum", 2, false}, + {1, "silk.museum", 2, false}, + {1, "ski.museum", 2, false}, + {1, "skole.museum", 2, false}, + {1, "society.museum", 2, false}, + {1, "sologne.museum", 2, false}, + {1, "soundandvision.museum", 2, false}, + {1, "southcarolina.museum", 2, false}, + {1, "southwest.museum", 2, false}, + {1, "space.museum", 2, false}, + {1, "spy.museum", 2, false}, + {1, "square.museum", 2, false}, + {1, "stadt.museum", 2, false}, + {1, "stalbans.museum", 2, false}, + {1, "starnberg.museum", 2, false}, + {1, "state.museum", 2, false}, + {1, "stateofdelaware.museum", 2, false}, + {1, "station.museum", 2, false}, + {1, "steam.museum", 2, false}, + {1, "steiermark.museum", 2, false}, + {1, "stjohn.museum", 2, false}, + {1, "stockholm.museum", 2, false}, + {1, "stpetersburg.museum", 2, false}, + {1, "stuttgart.museum", 2, false}, + {1, "suisse.museum", 2, false}, + {1, "surgeonshall.museum", 2, false}, + {1, "surrey.museum", 2, false}, + {1, "svizzera.museum", 2, false}, + {1, "sweden.museum", 2, false}, + {1, "sydney.museum", 2, false}, + {1, "tank.museum", 2, false}, + {1, "tcm.museum", 2, false}, + {1, "technology.museum", 2, false}, + {1, "telekommunikation.museum", 2, false}, + {1, "television.museum", 2, false}, + {1, "texas.museum", 2, false}, + {1, "textile.museum", 2, false}, + {1, "theater.museum", 2, false}, + {1, "time.museum", 2, false}, + {1, "timekeeping.museum", 2, false}, + {1, "topology.museum", 2, false}, + {1, "torino.museum", 2, false}, + {1, "touch.museum", 2, false}, + {1, "town.museum", 2, false}, + {1, "transport.museum", 2, false}, + {1, "tree.museum", 2, false}, + {1, "trolley.museum", 2, false}, + {1, "trust.museum", 2, false}, + {1, "trustee.museum", 2, false}, + {1, "uhren.museum", 2, false}, + {1, "ulm.museum", 2, false}, + {1, "undersea.museum", 2, false}, + {1, "university.museum", 2, false}, + {1, "usa.museum", 2, false}, + {1, "usantiques.museum", 2, false}, + {1, "usarts.museum", 2, false}, + {1, "uscountryestate.museum", 2, false}, + {1, "usculture.museum", 2, false}, + {1, "usdecorativearts.museum", 2, false}, + {1, "usgarden.museum", 2, false}, + {1, "ushistory.museum", 2, false}, + {1, "ushuaia.museum", 2, false}, + {1, "uslivinghistory.museum", 2, false}, + {1, "utah.museum", 2, false}, + {1, "uvic.museum", 2, false}, + {1, "valley.museum", 2, false}, + {1, "vantaa.museum", 2, false}, + {1, "versailles.museum", 2, false}, + {1, "viking.museum", 2, false}, + {1, "village.museum", 2, false}, + {1, "virginia.museum", 2, false}, + {1, "virtual.museum", 2, false}, + {1, "virtuel.museum", 2, false}, + {1, "vlaanderen.museum", 2, false}, + {1, "volkenkunde.museum", 2, false}, + {1, "wales.museum", 2, false}, + {1, "wallonie.museum", 2, false}, + {1, "war.museum", 2, false}, + {1, "washingtondc.museum", 2, false}, + {1, "watchandclock.museum", 2, false}, + {1, "watch-and-clock.museum", 2, false}, + {1, "western.museum", 2, false}, + {1, "westfalen.museum", 2, false}, + {1, "whaling.museum", 2, false}, + {1, "wildlife.museum", 2, false}, + {1, "williamsburg.museum", 2, false}, + {1, "windmill.museum", 2, false}, + {1, "workshop.museum", 2, false}, + {1, "york.museum", 2, false}, + {1, "yorkshire.museum", 2, false}, + {1, "yosemite.museum", 2, false}, + {1, "youth.museum", 2, false}, + {1, "zoological.museum", 2, false}, + {1, "zoology.museum", 2, false}, + {1, "xn--9dbhblg6di.museum", 2, false}, + {1, "xn--h1aegh.museum", 2, false}, + {1, "mv", 1, false}, + {1, "aero.mv", 2, false}, + {1, "biz.mv", 2, false}, + {1, "com.mv", 2, false}, + {1, "coop.mv", 2, false}, + {1, "edu.mv", 2, false}, + {1, "gov.mv", 2, false}, + {1, "info.mv", 2, false}, + {1, "int.mv", 2, false}, + {1, "mil.mv", 2, false}, + {1, "museum.mv", 2, false}, + {1, "name.mv", 2, false}, + {1, "net.mv", 2, false}, + {1, "org.mv", 2, false}, + {1, "pro.mv", 2, false}, + {1, "mw", 1, false}, + {1, "ac.mw", 2, false}, + {1, "biz.mw", 2, false}, + {1, "co.mw", 2, false}, + {1, "com.mw", 2, false}, + {1, "coop.mw", 2, false}, + {1, "edu.mw", 2, false}, + {1, "gov.mw", 2, false}, + {1, "int.mw", 2, false}, + {1, "museum.mw", 2, false}, + {1, "net.mw", 2, false}, + {1, "org.mw", 2, false}, + {1, "mx", 1, false}, + {1, "com.mx", 2, false}, + {1, "org.mx", 2, false}, + {1, "gob.mx", 2, false}, + {1, "edu.mx", 2, false}, + {1, "net.mx", 2, false}, + {1, "my", 1, false}, + {1, "com.my", 2, false}, + {1, "net.my", 2, false}, + {1, "org.my", 2, false}, + {1, "gov.my", 2, false}, + {1, "edu.my", 2, false}, + {1, "mil.my", 2, false}, + {1, "name.my", 2, false}, + {1, "mz", 1, false}, + {1, "ac.mz", 2, false}, + {1, "adv.mz", 2, false}, + {1, "co.mz", 2, false}, + {1, "edu.mz", 2, false}, + {1, "gov.mz", 2, false}, + {1, "mil.mz", 2, false}, + {1, "net.mz", 2, false}, + {1, "org.mz", 2, false}, + {1, "na", 1, false}, + {1, "info.na", 2, false}, + {1, "pro.na", 2, false}, + {1, "name.na", 2, false}, + {1, "school.na", 2, false}, + {1, "or.na", 2, false}, + {1, "dr.na", 2, false}, + {1, "us.na", 2, false}, + {1, "mx.na", 2, false}, + {1, "ca.na", 2, false}, + {1, "in.na", 2, false}, + {1, "cc.na", 2, false}, + {1, "tv.na", 2, false}, + {1, "ws.na", 2, false}, + {1, "mobi.na", 2, false}, + {1, "co.na", 2, false}, + {1, "com.na", 2, false}, + {1, "org.na", 2, false}, + {1, "name", 1, false}, + {1, "nc", 1, false}, + {1, "asso.nc", 2, false}, + {1, "nom.nc", 2, false}, + {1, "ne", 1, false}, + {1, "net", 1, false}, + {1, "nf", 1, false}, + {1, "com.nf", 2, false}, + {1, "net.nf", 2, false}, + {1, "per.nf", 2, false}, + {1, "rec.nf", 2, false}, + {1, "web.nf", 2, false}, + {1, "arts.nf", 2, false}, + {1, "firm.nf", 2, false}, + {1, "info.nf", 2, false}, + {1, "other.nf", 2, false}, + {1, "store.nf", 2, false}, + {1, "ng", 1, false}, + {1, "com.ng", 2, false}, + {1, "edu.ng", 2, false}, + {1, "gov.ng", 2, false}, + {1, "i.ng", 2, false}, + {1, "mil.ng", 2, false}, + {1, "mobi.ng", 2, false}, + {1, "name.ng", 2, false}, + {1, "net.ng", 2, false}, + {1, "org.ng", 2, false}, + {1, "sch.ng", 2, false}, + {1, "ni", 1, false}, + {1, "ac.ni", 2, false}, + {1, "biz.ni", 2, false}, + {1, "co.ni", 2, false}, + {1, "com.ni", 2, false}, + {1, "edu.ni", 2, false}, + {1, "gob.ni", 2, false}, + {1, "in.ni", 2, false}, + {1, "info.ni", 2, false}, + {1, "int.ni", 2, false}, + {1, "mil.ni", 2, false}, + {1, "net.ni", 2, false}, + {1, "nom.ni", 2, false}, + {1, "org.ni", 2, false}, + {1, "web.ni", 2, false}, + {1, "nl", 1, false}, + {1, "no", 1, false}, + {1, "fhs.no", 2, false}, + {1, "vgs.no", 2, false}, + {1, "fylkesbibl.no", 2, false}, + {1, "folkebibl.no", 2, false}, + {1, "museum.no", 2, false}, + {1, "idrett.no", 2, false}, + {1, "priv.no", 2, false}, + {1, "mil.no", 2, false}, + {1, "stat.no", 2, false}, + {1, "dep.no", 2, false}, + {1, "kommune.no", 2, false}, + {1, "herad.no", 2, false}, + {1, "aa.no", 2, false}, + {1, "ah.no", 2, false}, + {1, "bu.no", 2, false}, + {1, "fm.no", 2, false}, + {1, "hl.no", 2, false}, + {1, "hm.no", 2, false}, + {1, "jan-mayen.no", 2, false}, + {1, "mr.no", 2, false}, + {1, "nl.no", 2, false}, + {1, "nt.no", 2, false}, + {1, "of.no", 2, false}, + {1, "ol.no", 2, false}, + {1, "oslo.no", 2, false}, + {1, "rl.no", 2, false}, + {1, "sf.no", 2, false}, + {1, "st.no", 2, false}, + {1, "svalbard.no", 2, false}, + {1, "tm.no", 2, false}, + {1, "tr.no", 2, false}, + {1, "va.no", 2, false}, + {1, "vf.no", 2, false}, + {1, "gs.aa.no", 3, false}, + {1, "gs.ah.no", 3, false}, + {1, "gs.bu.no", 3, false}, + {1, "gs.fm.no", 3, false}, + {1, "gs.hl.no", 3, false}, + {1, "gs.hm.no", 3, false}, + {1, "gs.jan-mayen.no", 3, false}, + {1, "gs.mr.no", 3, false}, + {1, "gs.nl.no", 3, false}, + {1, "gs.nt.no", 3, false}, + {1, "gs.of.no", 3, false}, + {1, "gs.ol.no", 3, false}, + {1, "gs.oslo.no", 3, false}, + {1, "gs.rl.no", 3, false}, + {1, "gs.sf.no", 3, false}, + {1, "gs.st.no", 3, false}, + {1, "gs.svalbard.no", 3, false}, + {1, "gs.tm.no", 3, false}, + {1, "gs.tr.no", 3, false}, + {1, "gs.va.no", 3, false}, + {1, "gs.vf.no", 3, false}, + {1, "akrehamn.no", 2, false}, + {1, "xn--krehamn-dxa.no", 2, false}, + {1, "algard.no", 2, false}, + {1, "xn--lgrd-poac.no", 2, false}, + {1, "arna.no", 2, false}, + {1, "brumunddal.no", 2, false}, + {1, "bryne.no", 2, false}, + {1, "bronnoysund.no", 2, false}, + {1, "xn--brnnysund-m8ac.no", 2, false}, + {1, "drobak.no", 2, false}, + {1, "xn--drbak-wua.no", 2, false}, + {1, "egersund.no", 2, false}, + {1, "fetsund.no", 2, false}, + {1, "floro.no", 2, false}, + {1, "xn--flor-jra.no", 2, false}, + {1, "fredrikstad.no", 2, false}, + {1, "hokksund.no", 2, false}, + {1, "honefoss.no", 2, false}, + {1, "xn--hnefoss-q1a.no", 2, false}, + {1, "jessheim.no", 2, false}, + {1, "jorpeland.no", 2, false}, + {1, "xn--jrpeland-54a.no", 2, false}, + {1, "kirkenes.no", 2, false}, + {1, "kopervik.no", 2, false}, + {1, "krokstadelva.no", 2, false}, + {1, "langevag.no", 2, false}, + {1, "xn--langevg-jxa.no", 2, false}, + {1, "leirvik.no", 2, false}, + {1, "mjondalen.no", 2, false}, + {1, "xn--mjndalen-64a.no", 2, false}, + {1, "mo-i-rana.no", 2, false}, + {1, "mosjoen.no", 2, false}, + {1, "xn--mosjen-eya.no", 2, false}, + {1, "nesoddtangen.no", 2, false}, + {1, "orkanger.no", 2, false}, + {1, "osoyro.no", 2, false}, + {1, "xn--osyro-wua.no", 2, false}, + {1, "raholt.no", 2, false}, + {1, "xn--rholt-mra.no", 2, false}, + {1, "sandnessjoen.no", 2, false}, + {1, "xn--sandnessjen-ogb.no", 2, false}, + {1, "skedsmokorset.no", 2, false}, + {1, "slattum.no", 2, false}, + {1, "spjelkavik.no", 2, false}, + {1, "stathelle.no", 2, false}, + {1, "stavern.no", 2, false}, + {1, "stjordalshalsen.no", 2, false}, + {1, "xn--stjrdalshalsen-sqb.no", 2, false}, + {1, "tananger.no", 2, false}, + {1, "tranby.no", 2, false}, + {1, "vossevangen.no", 2, false}, + {1, "afjord.no", 2, false}, + {1, "xn--fjord-lra.no", 2, false}, + {1, "agdenes.no", 2, false}, + {1, "al.no", 2, false}, + {1, "xn--l-1fa.no", 2, false}, + {1, "alesund.no", 2, false}, + {1, "xn--lesund-hua.no", 2, false}, + {1, "alstahaug.no", 2, false}, + {1, "alta.no", 2, false}, + {1, "xn--lt-liac.no", 2, false}, + {1, "alaheadju.no", 2, false}, + {1, "xn--laheadju-7ya.no", 2, false}, + {1, "alvdal.no", 2, false}, + {1, "amli.no", 2, false}, + {1, "xn--mli-tla.no", 2, false}, + {1, "amot.no", 2, false}, + {1, "xn--mot-tla.no", 2, false}, + {1, "andebu.no", 2, false}, + {1, "andoy.no", 2, false}, + {1, "xn--andy-ira.no", 2, false}, + {1, "andasuolo.no", 2, false}, + {1, "ardal.no", 2, false}, + {1, "xn--rdal-poa.no", 2, false}, + {1, "aremark.no", 2, false}, + {1, "arendal.no", 2, false}, + {1, "xn--s-1fa.no", 2, false}, + {1, "aseral.no", 2, false}, + {1, "xn--seral-lra.no", 2, false}, + {1, "asker.no", 2, false}, + {1, "askim.no", 2, false}, + {1, "askvoll.no", 2, false}, + {1, "askoy.no", 2, false}, + {1, "xn--asky-ira.no", 2, false}, + {1, "asnes.no", 2, false}, + {1, "xn--snes-poa.no", 2, false}, + {1, "audnedaln.no", 2, false}, + {1, "aukra.no", 2, false}, + {1, "aure.no", 2, false}, + {1, "aurland.no", 2, false}, + {1, "aurskog-holand.no", 2, false}, + {1, "xn--aurskog-hland-jnb.no", 2, false}, + {1, "austevoll.no", 2, false}, + {1, "austrheim.no", 2, false}, + {1, "averoy.no", 2, false}, + {1, "xn--avery-yua.no", 2, false}, + {1, "balestrand.no", 2, false}, + {1, "ballangen.no", 2, false}, + {1, "balat.no", 2, false}, + {1, "xn--blt-elab.no", 2, false}, + {1, "balsfjord.no", 2, false}, + {1, "bahccavuotna.no", 2, false}, + {1, "xn--bhccavuotna-k7a.no", 2, false}, + {1, "bamble.no", 2, false}, + {1, "bardu.no", 2, false}, + {1, "beardu.no", 2, false}, + {1, "beiarn.no", 2, false}, + {1, "bajddar.no", 2, false}, + {1, "xn--bjddar-pta.no", 2, false}, + {1, "baidar.no", 2, false}, + {1, "xn--bidr-5nac.no", 2, false}, + {1, "berg.no", 2, false}, + {1, "bergen.no", 2, false}, + {1, "berlevag.no", 2, false}, + {1, "xn--berlevg-jxa.no", 2, false}, + {1, "bearalvahki.no", 2, false}, + {1, "xn--bearalvhki-y4a.no", 2, false}, + {1, "bindal.no", 2, false}, + {1, "birkenes.no", 2, false}, + {1, "bjarkoy.no", 2, false}, + {1, "xn--bjarky-fya.no", 2, false}, + {1, "bjerkreim.no", 2, false}, + {1, "bjugn.no", 2, false}, + {1, "bodo.no", 2, false}, + {1, "xn--bod-2na.no", 2, false}, + {1, "badaddja.no", 2, false}, + {1, "xn--bdddj-mrabd.no", 2, false}, + {1, "budejju.no", 2, false}, + {1, "bokn.no", 2, false}, + {1, "bremanger.no", 2, false}, + {1, "bronnoy.no", 2, false}, + {1, "xn--brnny-wuac.no", 2, false}, + {1, "bygland.no", 2, false}, + {1, "bykle.no", 2, false}, + {1, "barum.no", 2, false}, + {1, "xn--brum-voa.no", 2, false}, + {1, "bo.telemark.no", 3, false}, + {1, "xn--b-5ga.telemark.no", 3, false}, + {1, "bo.nordland.no", 3, false}, + {1, "xn--b-5ga.nordland.no", 3, false}, + {1, "bievat.no", 2, false}, + {1, "xn--bievt-0qa.no", 2, false}, + {1, "bomlo.no", 2, false}, + {1, "xn--bmlo-gra.no", 2, false}, + {1, "batsfjord.no", 2, false}, + {1, "xn--btsfjord-9za.no", 2, false}, + {1, "bahcavuotna.no", 2, false}, + {1, "xn--bhcavuotna-s4a.no", 2, false}, + {1, "dovre.no", 2, false}, + {1, "drammen.no", 2, false}, + {1, "drangedal.no", 2, false}, + {1, "dyroy.no", 2, false}, + {1, "xn--dyry-ira.no", 2, false}, + {1, "donna.no", 2, false}, + {1, "xn--dnna-gra.no", 2, false}, + {1, "eid.no", 2, false}, + {1, "eidfjord.no", 2, false}, + {1, "eidsberg.no", 2, false}, + {1, "eidskog.no", 2, false}, + {1, "eidsvoll.no", 2, false}, + {1, "eigersund.no", 2, false}, + {1, "elverum.no", 2, false}, + {1, "enebakk.no", 2, false}, + {1, "engerdal.no", 2, false}, + {1, "etne.no", 2, false}, + {1, "etnedal.no", 2, false}, + {1, "evenes.no", 2, false}, + {1, "evenassi.no", 2, false}, + {1, "xn--eveni-0qa01ga.no", 2, false}, + {1, "evje-og-hornnes.no", 2, false}, + {1, "farsund.no", 2, false}, + {1, "fauske.no", 2, false}, + {1, "fuossko.no", 2, false}, + {1, "fuoisku.no", 2, false}, + {1, "fedje.no", 2, false}, + {1, "fet.no", 2, false}, + {1, "finnoy.no", 2, false}, + {1, "xn--finny-yua.no", 2, false}, + {1, "fitjar.no", 2, false}, + {1, "fjaler.no", 2, false}, + {1, "fjell.no", 2, false}, + {1, "flakstad.no", 2, false}, + {1, "flatanger.no", 2, false}, + {1, "flekkefjord.no", 2, false}, + {1, "flesberg.no", 2, false}, + {1, "flora.no", 2, false}, + {1, "fla.no", 2, false}, + {1, "xn--fl-zia.no", 2, false}, + {1, "folldal.no", 2, false}, + {1, "forsand.no", 2, false}, + {1, "fosnes.no", 2, false}, + {1, "frei.no", 2, false}, + {1, "frogn.no", 2, false}, + {1, "froland.no", 2, false}, + {1, "frosta.no", 2, false}, + {1, "frana.no", 2, false}, + {1, "xn--frna-woa.no", 2, false}, + {1, "froya.no", 2, false}, + {1, "xn--frya-hra.no", 2, false}, + {1, "fusa.no", 2, false}, + {1, "fyresdal.no", 2, false}, + {1, "forde.no", 2, false}, + {1, "xn--frde-gra.no", 2, false}, + {1, "gamvik.no", 2, false}, + {1, "gangaviika.no", 2, false}, + {1, "xn--ggaviika-8ya47h.no", 2, false}, + {1, "gaular.no", 2, false}, + {1, "gausdal.no", 2, false}, + {1, "gildeskal.no", 2, false}, + {1, "xn--gildeskl-g0a.no", 2, false}, + {1, "giske.no", 2, false}, + {1, "gjemnes.no", 2, false}, + {1, "gjerdrum.no", 2, false}, + {1, "gjerstad.no", 2, false}, + {1, "gjesdal.no", 2, false}, + {1, "gjovik.no", 2, false}, + {1, "xn--gjvik-wua.no", 2, false}, + {1, "gloppen.no", 2, false}, + {1, "gol.no", 2, false}, + {1, "gran.no", 2, false}, + {1, "grane.no", 2, false}, + {1, "granvin.no", 2, false}, + {1, "gratangen.no", 2, false}, + {1, "grimstad.no", 2, false}, + {1, "grong.no", 2, false}, + {1, "kraanghke.no", 2, false}, + {1, "xn--kranghke-b0a.no", 2, false}, + {1, "grue.no", 2, false}, + {1, "gulen.no", 2, false}, + {1, "hadsel.no", 2, false}, + {1, "halden.no", 2, false}, + {1, "halsa.no", 2, false}, + {1, "hamar.no", 2, false}, + {1, "hamaroy.no", 2, false}, + {1, "habmer.no", 2, false}, + {1, "xn--hbmer-xqa.no", 2, false}, + {1, "hapmir.no", 2, false}, + {1, "xn--hpmir-xqa.no", 2, false}, + {1, "hammerfest.no", 2, false}, + {1, "hammarfeasta.no", 2, false}, + {1, "xn--hmmrfeasta-s4ac.no", 2, false}, + {1, "haram.no", 2, false}, + {1, "hareid.no", 2, false}, + {1, "harstad.no", 2, false}, + {1, "hasvik.no", 2, false}, + {1, "aknoluokta.no", 2, false}, + {1, "xn--koluokta-7ya57h.no", 2, false}, + {1, "hattfjelldal.no", 2, false}, + {1, "aarborte.no", 2, false}, + {1, "haugesund.no", 2, false}, + {1, "hemne.no", 2, false}, + {1, "hemnes.no", 2, false}, + {1, "hemsedal.no", 2, false}, + {1, "heroy.more-og-romsdal.no", 3, false}, + {1, "xn--hery-ira.xn--mre-og-romsdal-qqb.no", 3, false}, + {1, "heroy.nordland.no", 3, false}, + {1, "xn--hery-ira.nordland.no", 3, false}, + {1, "hitra.no", 2, false}, + {1, "hjartdal.no", 2, false}, + {1, "hjelmeland.no", 2, false}, + {1, "hobol.no", 2, false}, + {1, "xn--hobl-ira.no", 2, false}, + {1, "hof.no", 2, false}, + {1, "hol.no", 2, false}, + {1, "hole.no", 2, false}, + {1, "holmestrand.no", 2, false}, + {1, "holtalen.no", 2, false}, + {1, "xn--holtlen-hxa.no", 2, false}, + {1, "hornindal.no", 2, false}, + {1, "horten.no", 2, false}, + {1, "hurdal.no", 2, false}, + {1, "hurum.no", 2, false}, + {1, "hvaler.no", 2, false}, + {1, "hyllestad.no", 2, false}, + {1, "hagebostad.no", 2, false}, + {1, "xn--hgebostad-g3a.no", 2, false}, + {1, "hoyanger.no", 2, false}, + {1, "xn--hyanger-q1a.no", 2, false}, + {1, "hoylandet.no", 2, false}, + {1, "xn--hylandet-54a.no", 2, false}, + {1, "ha.no", 2, false}, + {1, "xn--h-2fa.no", 2, false}, + {1, "ibestad.no", 2, false}, + {1, "inderoy.no", 2, false}, + {1, "xn--indery-fya.no", 2, false}, + {1, "iveland.no", 2, false}, + {1, "jevnaker.no", 2, false}, + {1, "jondal.no", 2, false}, + {1, "jolster.no", 2, false}, + {1, "xn--jlster-bya.no", 2, false}, + {1, "karasjok.no", 2, false}, + {1, "karasjohka.no", 2, false}, + {1, "xn--krjohka-hwab49j.no", 2, false}, + {1, "karlsoy.no", 2, false}, + {1, "galsa.no", 2, false}, + {1, "xn--gls-elac.no", 2, false}, + {1, "karmoy.no", 2, false}, + {1, "xn--karmy-yua.no", 2, false}, + {1, "kautokeino.no", 2, false}, + {1, "guovdageaidnu.no", 2, false}, + {1, "klepp.no", 2, false}, + {1, "klabu.no", 2, false}, + {1, "xn--klbu-woa.no", 2, false}, + {1, "kongsberg.no", 2, false}, + {1, "kongsvinger.no", 2, false}, + {1, "kragero.no", 2, false}, + {1, "xn--krager-gya.no", 2, false}, + {1, "kristiansand.no", 2, false}, + {1, "kristiansund.no", 2, false}, + {1, "krodsherad.no", 2, false}, + {1, "xn--krdsherad-m8a.no", 2, false}, + {1, "kvalsund.no", 2, false}, + {1, "rahkkeravju.no", 2, false}, + {1, "xn--rhkkervju-01af.no", 2, false}, + {1, "kvam.no", 2, false}, + {1, "kvinesdal.no", 2, false}, + {1, "kvinnherad.no", 2, false}, + {1, "kviteseid.no", 2, false}, + {1, "kvitsoy.no", 2, false}, + {1, "xn--kvitsy-fya.no", 2, false}, + {1, "kvafjord.no", 2, false}, + {1, "xn--kvfjord-nxa.no", 2, false}, + {1, "giehtavuoatna.no", 2, false}, + {1, "kvanangen.no", 2, false}, + {1, "xn--kvnangen-k0a.no", 2, false}, + {1, "navuotna.no", 2, false}, + {1, "xn--nvuotna-hwa.no", 2, false}, + {1, "kafjord.no", 2, false}, + {1, "xn--kfjord-iua.no", 2, false}, + {1, "gaivuotna.no", 2, false}, + {1, "xn--givuotna-8ya.no", 2, false}, + {1, "larvik.no", 2, false}, + {1, "lavangen.no", 2, false}, + {1, "lavagis.no", 2, false}, + {1, "loabat.no", 2, false}, + {1, "xn--loabt-0qa.no", 2, false}, + {1, "lebesby.no", 2, false}, + {1, "davvesiida.no", 2, false}, + {1, "leikanger.no", 2, false}, + {1, "leirfjord.no", 2, false}, + {1, "leka.no", 2, false}, + {1, "leksvik.no", 2, false}, + {1, "lenvik.no", 2, false}, + {1, "leangaviika.no", 2, false}, + {1, "xn--leagaviika-52b.no", 2, false}, + {1, "lesja.no", 2, false}, + {1, "levanger.no", 2, false}, + {1, "lier.no", 2, false}, + {1, "lierne.no", 2, false}, + {1, "lillehammer.no", 2, false}, + {1, "lillesand.no", 2, false}, + {1, "lindesnes.no", 2, false}, + {1, "lindas.no", 2, false}, + {1, "xn--linds-pra.no", 2, false}, + {1, "lom.no", 2, false}, + {1, "loppa.no", 2, false}, + {1, "lahppi.no", 2, false}, + {1, "xn--lhppi-xqa.no", 2, false}, + {1, "lund.no", 2, false}, + {1, "lunner.no", 2, false}, + {1, "luroy.no", 2, false}, + {1, "xn--lury-ira.no", 2, false}, + {1, "luster.no", 2, false}, + {1, "lyngdal.no", 2, false}, + {1, "lyngen.no", 2, false}, + {1, "ivgu.no", 2, false}, + {1, "lardal.no", 2, false}, + {1, "lerdal.no", 2, false}, + {1, "xn--lrdal-sra.no", 2, false}, + {1, "lodingen.no", 2, false}, + {1, "xn--ldingen-q1a.no", 2, false}, + {1, "lorenskog.no", 2, false}, + {1, "xn--lrenskog-54a.no", 2, false}, + {1, "loten.no", 2, false}, + {1, "xn--lten-gra.no", 2, false}, + {1, "malvik.no", 2, false}, + {1, "masoy.no", 2, false}, + {1, "xn--msy-ula0h.no", 2, false}, + {1, "muosat.no", 2, false}, + {1, "xn--muost-0qa.no", 2, false}, + {1, "mandal.no", 2, false}, + {1, "marker.no", 2, false}, + {1, "marnardal.no", 2, false}, + {1, "masfjorden.no", 2, false}, + {1, "meland.no", 2, false}, + {1, "meldal.no", 2, false}, + {1, "melhus.no", 2, false}, + {1, "meloy.no", 2, false}, + {1, "xn--mely-ira.no", 2, false}, + {1, "meraker.no", 2, false}, + {1, "xn--merker-kua.no", 2, false}, + {1, "moareke.no", 2, false}, + {1, "xn--moreke-jua.no", 2, false}, + {1, "midsund.no", 2, false}, + {1, "midtre-gauldal.no", 2, false}, + {1, "modalen.no", 2, false}, + {1, "modum.no", 2, false}, + {1, "molde.no", 2, false}, + {1, "moskenes.no", 2, false}, + {1, "moss.no", 2, false}, + {1, "mosvik.no", 2, false}, + {1, "malselv.no", 2, false}, + {1, "xn--mlselv-iua.no", 2, false}, + {1, "malatvuopmi.no", 2, false}, + {1, "xn--mlatvuopmi-s4a.no", 2, false}, + {1, "namdalseid.no", 2, false}, + {1, "aejrie.no", 2, false}, + {1, "namsos.no", 2, false}, + {1, "namsskogan.no", 2, false}, + {1, "naamesjevuemie.no", 2, false}, + {1, "xn--nmesjevuemie-tcba.no", 2, false}, + {1, "laakesvuemie.no", 2, false}, + {1, "nannestad.no", 2, false}, + {1, "narvik.no", 2, false}, + {1, "narviika.no", 2, false}, + {1, "naustdal.no", 2, false}, + {1, "nedre-eiker.no", 2, false}, + {1, "nes.akershus.no", 3, false}, + {1, "nes.buskerud.no", 3, false}, + {1, "nesna.no", 2, false}, + {1, "nesodden.no", 2, false}, + {1, "nesseby.no", 2, false}, + {1, "unjarga.no", 2, false}, + {1, "xn--unjrga-rta.no", 2, false}, + {1, "nesset.no", 2, false}, + {1, "nissedal.no", 2, false}, + {1, "nittedal.no", 2, false}, + {1, "nord-aurdal.no", 2, false}, + {1, "nord-fron.no", 2, false}, + {1, "nord-odal.no", 2, false}, + {1, "norddal.no", 2, false}, + {1, "nordkapp.no", 2, false}, + {1, "davvenjarga.no", 2, false}, + {1, "xn--davvenjrga-y4a.no", 2, false}, + {1, "nordre-land.no", 2, false}, + {1, "nordreisa.no", 2, false}, + {1, "raisa.no", 2, false}, + {1, "xn--risa-5na.no", 2, false}, + {1, "nore-og-uvdal.no", 2, false}, + {1, "notodden.no", 2, false}, + {1, "naroy.no", 2, false}, + {1, "xn--nry-yla5g.no", 2, false}, + {1, "notteroy.no", 2, false}, + {1, "xn--nttery-byae.no", 2, false}, + {1, "odda.no", 2, false}, + {1, "oksnes.no", 2, false}, + {1, "xn--ksnes-uua.no", 2, false}, + {1, "oppdal.no", 2, false}, + {1, "oppegard.no", 2, false}, + {1, "xn--oppegrd-ixa.no", 2, false}, + {1, "orkdal.no", 2, false}, + {1, "orland.no", 2, false}, + {1, "xn--rland-uua.no", 2, false}, + {1, "orskog.no", 2, false}, + {1, "xn--rskog-uua.no", 2, false}, + {1, "orsta.no", 2, false}, + {1, "xn--rsta-fra.no", 2, false}, + {1, "os.hedmark.no", 3, false}, + {1, "os.hordaland.no", 3, false}, + {1, "osen.no", 2, false}, + {1, "osteroy.no", 2, false}, + {1, "xn--ostery-fya.no", 2, false}, + {1, "ostre-toten.no", 2, false}, + {1, "xn--stre-toten-zcb.no", 2, false}, + {1, "overhalla.no", 2, false}, + {1, "ovre-eiker.no", 2, false}, + {1, "xn--vre-eiker-k8a.no", 2, false}, + {1, "oyer.no", 2, false}, + {1, "xn--yer-zna.no", 2, false}, + {1, "oygarden.no", 2, false}, + {1, "xn--ygarden-p1a.no", 2, false}, + {1, "oystre-slidre.no", 2, false}, + {1, "xn--ystre-slidre-ujb.no", 2, false}, + {1, "porsanger.no", 2, false}, + {1, "porsangu.no", 2, false}, + {1, "xn--porsgu-sta26f.no", 2, false}, + {1, "porsgrunn.no", 2, false}, + {1, "radoy.no", 2, false}, + {1, "xn--rady-ira.no", 2, false}, + {1, "rakkestad.no", 2, false}, + {1, "rana.no", 2, false}, + {1, "ruovat.no", 2, false}, + {1, "randaberg.no", 2, false}, + {1, "rauma.no", 2, false}, + {1, "rendalen.no", 2, false}, + {1, "rennebu.no", 2, false}, + {1, "rennesoy.no", 2, false}, + {1, "xn--rennesy-v1a.no", 2, false}, + {1, "rindal.no", 2, false}, + {1, "ringebu.no", 2, false}, + {1, "ringerike.no", 2, false}, + {1, "ringsaker.no", 2, false}, + {1, "rissa.no", 2, false}, + {1, "risor.no", 2, false}, + {1, "xn--risr-ira.no", 2, false}, + {1, "roan.no", 2, false}, + {1, "rollag.no", 2, false}, + {1, "rygge.no", 2, false}, + {1, "ralingen.no", 2, false}, + {1, "xn--rlingen-mxa.no", 2, false}, + {1, "rodoy.no", 2, false}, + {1, "xn--rdy-0nab.no", 2, false}, + {1, "romskog.no", 2, false}, + {1, "xn--rmskog-bya.no", 2, false}, + {1, "roros.no", 2, false}, + {1, "xn--rros-gra.no", 2, false}, + {1, "rost.no", 2, false}, + {1, "xn--rst-0na.no", 2, false}, + {1, "royken.no", 2, false}, + {1, "xn--ryken-vua.no", 2, false}, + {1, "royrvik.no", 2, false}, + {1, "xn--ryrvik-bya.no", 2, false}, + {1, "rade.no", 2, false}, + {1, "xn--rde-ula.no", 2, false}, + {1, "salangen.no", 2, false}, + {1, "siellak.no", 2, false}, + {1, "saltdal.no", 2, false}, + {1, "salat.no", 2, false}, + {1, "xn--slt-elab.no", 2, false}, + {1, "xn--slat-5na.no", 2, false}, + {1, "samnanger.no", 2, false}, + {1, "sande.more-og-romsdal.no", 3, false}, + {1, "sande.xn--mre-og-romsdal-qqb.no", 3, false}, + {1, "sande.vestfold.no", 3, false}, + {1, "sandefjord.no", 2, false}, + {1, "sandnes.no", 2, false}, + {1, "sandoy.no", 2, false}, + {1, "xn--sandy-yua.no", 2, false}, + {1, "sarpsborg.no", 2, false}, + {1, "sauda.no", 2, false}, + {1, "sauherad.no", 2, false}, + {1, "sel.no", 2, false}, + {1, "selbu.no", 2, false}, + {1, "selje.no", 2, false}, + {1, "seljord.no", 2, false}, + {1, "sigdal.no", 2, false}, + {1, "siljan.no", 2, false}, + {1, "sirdal.no", 2, false}, + {1, "skaun.no", 2, false}, + {1, "skedsmo.no", 2, false}, + {1, "ski.no", 2, false}, + {1, "skien.no", 2, false}, + {1, "skiptvet.no", 2, false}, + {1, "skjervoy.no", 2, false}, + {1, "xn--skjervy-v1a.no", 2, false}, + {1, "skierva.no", 2, false}, + {1, "xn--skierv-uta.no", 2, false}, + {1, "skjak.no", 2, false}, + {1, "xn--skjk-soa.no", 2, false}, + {1, "skodje.no", 2, false}, + {1, "skanland.no", 2, false}, + {1, "xn--sknland-fxa.no", 2, false}, + {1, "skanit.no", 2, false}, + {1, "xn--sknit-yqa.no", 2, false}, + {1, "smola.no", 2, false}, + {1, "xn--smla-hra.no", 2, false}, + {1, "snillfjord.no", 2, false}, + {1, "snasa.no", 2, false}, + {1, "xn--snsa-roa.no", 2, false}, + {1, "snoasa.no", 2, false}, + {1, "snaase.no", 2, false}, + {1, "xn--snase-nra.no", 2, false}, + {1, "sogndal.no", 2, false}, + {1, "sokndal.no", 2, false}, + {1, "sola.no", 2, false}, + {1, "solund.no", 2, false}, + {1, "songdalen.no", 2, false}, + {1, "sortland.no", 2, false}, + {1, "spydeberg.no", 2, false}, + {1, "stange.no", 2, false}, + {1, "stavanger.no", 2, false}, + {1, "steigen.no", 2, false}, + {1, "steinkjer.no", 2, false}, + {1, "stjordal.no", 2, false}, + {1, "xn--stjrdal-s1a.no", 2, false}, + {1, "stokke.no", 2, false}, + {1, "stor-elvdal.no", 2, false}, + {1, "stord.no", 2, false}, + {1, "stordal.no", 2, false}, + {1, "storfjord.no", 2, false}, + {1, "omasvuotna.no", 2, false}, + {1, "strand.no", 2, false}, + {1, "stranda.no", 2, false}, + {1, "stryn.no", 2, false}, + {1, "sula.no", 2, false}, + {1, "suldal.no", 2, false}, + {1, "sund.no", 2, false}, + {1, "sunndal.no", 2, false}, + {1, "surnadal.no", 2, false}, + {1, "sveio.no", 2, false}, + {1, "svelvik.no", 2, false}, + {1, "sykkylven.no", 2, false}, + {1, "sogne.no", 2, false}, + {1, "xn--sgne-gra.no", 2, false}, + {1, "somna.no", 2, false}, + {1, "xn--smna-gra.no", 2, false}, + {1, "sondre-land.no", 2, false}, + {1, "xn--sndre-land-0cb.no", 2, false}, + {1, "sor-aurdal.no", 2, false}, + {1, "xn--sr-aurdal-l8a.no", 2, false}, + {1, "sor-fron.no", 2, false}, + {1, "xn--sr-fron-q1a.no", 2, false}, + {1, "sor-odal.no", 2, false}, + {1, "xn--sr-odal-q1a.no", 2, false}, + {1, "sor-varanger.no", 2, false}, + {1, "xn--sr-varanger-ggb.no", 2, false}, + {1, "matta-varjjat.no", 2, false}, + {1, "xn--mtta-vrjjat-k7af.no", 2, false}, + {1, "sorfold.no", 2, false}, + {1, "xn--srfold-bya.no", 2, false}, + {1, "sorreisa.no", 2, false}, + {1, "xn--srreisa-q1a.no", 2, false}, + {1, "sorum.no", 2, false}, + {1, "xn--srum-gra.no", 2, false}, + {1, "tana.no", 2, false}, + {1, "deatnu.no", 2, false}, + {1, "time.no", 2, false}, + {1, "tingvoll.no", 2, false}, + {1, "tinn.no", 2, false}, + {1, "tjeldsund.no", 2, false}, + {1, "dielddanuorri.no", 2, false}, + {1, "tjome.no", 2, false}, + {1, "xn--tjme-hra.no", 2, false}, + {1, "tokke.no", 2, false}, + {1, "tolga.no", 2, false}, + {1, "torsken.no", 2, false}, + {1, "tranoy.no", 2, false}, + {1, "xn--trany-yua.no", 2, false}, + {1, "tromso.no", 2, false}, + {1, "xn--troms-zua.no", 2, false}, + {1, "tromsa.no", 2, false}, + {1, "romsa.no", 2, false}, + {1, "trondheim.no", 2, false}, + {1, "troandin.no", 2, false}, + {1, "trysil.no", 2, false}, + {1, "trana.no", 2, false}, + {1, "xn--trna-woa.no", 2, false}, + {1, "trogstad.no", 2, false}, + {1, "xn--trgstad-r1a.no", 2, false}, + {1, "tvedestrand.no", 2, false}, + {1, "tydal.no", 2, false}, + {1, "tynset.no", 2, false}, + {1, "tysfjord.no", 2, false}, + {1, "divtasvuodna.no", 2, false}, + {1, "divttasvuotna.no", 2, false}, + {1, "tysnes.no", 2, false}, + {1, "tysvar.no", 2, false}, + {1, "xn--tysvr-vra.no", 2, false}, + {1, "tonsberg.no", 2, false}, + {1, "xn--tnsberg-q1a.no", 2, false}, + {1, "ullensaker.no", 2, false}, + {1, "ullensvang.no", 2, false}, + {1, "ulvik.no", 2, false}, + {1, "utsira.no", 2, false}, + {1, "vadso.no", 2, false}, + {1, "xn--vads-jra.no", 2, false}, + {1, "cahcesuolo.no", 2, false}, + {1, "xn--hcesuolo-7ya35b.no", 2, false}, + {1, "vaksdal.no", 2, false}, + {1, "valle.no", 2, false}, + {1, "vang.no", 2, false}, + {1, "vanylven.no", 2, false}, + {1, "vardo.no", 2, false}, + {1, "xn--vard-jra.no", 2, false}, + {1, "varggat.no", 2, false}, + {1, "xn--vrggt-xqad.no", 2, false}, + {1, "vefsn.no", 2, false}, + {1, "vaapste.no", 2, false}, + {1, "vega.no", 2, false}, + {1, "vegarshei.no", 2, false}, + {1, "xn--vegrshei-c0a.no", 2, false}, + {1, "vennesla.no", 2, false}, + {1, "verdal.no", 2, false}, + {1, "verran.no", 2, false}, + {1, "vestby.no", 2, false}, + {1, "vestnes.no", 2, false}, + {1, "vestre-slidre.no", 2, false}, + {1, "vestre-toten.no", 2, false}, + {1, "vestvagoy.no", 2, false}, + {1, "xn--vestvgy-ixa6o.no", 2, false}, + {1, "vevelstad.no", 2, false}, + {1, "vik.no", 2, false}, + {1, "vikna.no", 2, false}, + {1, "vindafjord.no", 2, false}, + {1, "volda.no", 2, false}, + {1, "voss.no", 2, false}, + {1, "varoy.no", 2, false}, + {1, "xn--vry-yla5g.no", 2, false}, + {1, "vagan.no", 2, false}, + {1, "xn--vgan-qoa.no", 2, false}, + {1, "voagat.no", 2, false}, + {1, "vagsoy.no", 2, false}, + {1, "xn--vgsy-qoa0j.no", 2, false}, + {1, "vaga.no", 2, false}, + {1, "xn--vg-yiab.no", 2, false}, + {1, "valer.ostfold.no", 3, false}, + {1, "xn--vler-qoa.xn--stfold-9xa.no", 3, false}, + {1, "valer.hedmark.no", 3, false}, + {1, "xn--vler-qoa.hedmark.no", 3, false}, + {2, "np", 2, false}, + {1, "nr", 1, false}, + {1, "biz.nr", 2, false}, + {1, "info.nr", 2, false}, + {1, "gov.nr", 2, false}, + {1, "edu.nr", 2, false}, + {1, "org.nr", 2, false}, + {1, "net.nr", 2, false}, + {1, "com.nr", 2, false}, + {1, "nu", 1, false}, + {1, "nz", 1, false}, + {1, "ac.nz", 2, false}, + {1, "co.nz", 2, false}, + {1, "cri.nz", 2, false}, + {1, "geek.nz", 2, false}, + {1, "gen.nz", 2, false}, + {1, "govt.nz", 2, false}, + {1, "health.nz", 2, false}, + {1, "iwi.nz", 2, false}, + {1, "kiwi.nz", 2, false}, + {1, "maori.nz", 2, false}, + {1, "mil.nz", 2, false}, + {1, "xn--mori-qsa.nz", 2, false}, + {1, "net.nz", 2, false}, + {1, "org.nz", 2, false}, + {1, "parliament.nz", 2, false}, + {1, "school.nz", 2, false}, + {1, "om", 1, false}, + {1, "co.om", 2, false}, + {1, "com.om", 2, false}, + {1, "edu.om", 2, false}, + {1, "gov.om", 2, false}, + {1, "med.om", 2, false}, + {1, "museum.om", 2, false}, + {1, "net.om", 2, false}, + {1, "org.om", 2, false}, + {1, "pro.om", 2, false}, + {1, "onion", 1, false}, + {1, "org", 1, false}, + {1, "pa", 1, false}, + {1, "ac.pa", 2, false}, + {1, "gob.pa", 2, false}, + {1, "com.pa", 2, false}, + {1, "org.pa", 2, false}, + {1, "sld.pa", 2, false}, + {1, "edu.pa", 2, false}, + {1, "net.pa", 2, false}, + {1, "ing.pa", 2, false}, + {1, "abo.pa", 2, false}, + {1, "med.pa", 2, false}, + {1, "nom.pa", 2, false}, + {1, "pe", 1, false}, + {1, "edu.pe", 2, false}, + {1, "gob.pe", 2, false}, + {1, "nom.pe", 2, false}, + {1, "mil.pe", 2, false}, + {1, "org.pe", 2, false}, + {1, "com.pe", 2, false}, + {1, "net.pe", 2, false}, + {1, "pf", 1, false}, + {1, "com.pf", 2, false}, + {1, "org.pf", 2, false}, + {1, "edu.pf", 2, false}, + {2, "pg", 2, false}, + {1, "ph", 1, false}, + {1, "com.ph", 2, false}, + {1, "net.ph", 2, false}, + {1, "org.ph", 2, false}, + {1, "gov.ph", 2, false}, + {1, "edu.ph", 2, false}, + {1, "ngo.ph", 2, false}, + {1, "mil.ph", 2, false}, + {1, "i.ph", 2, false}, + {1, "pk", 1, false}, + {1, "com.pk", 2, false}, + {1, "net.pk", 2, false}, + {1, "edu.pk", 2, false}, + {1, "org.pk", 2, false}, + {1, "fam.pk", 2, false}, + {1, "biz.pk", 2, false}, + {1, "web.pk", 2, false}, + {1, "gov.pk", 2, false}, + {1, "gob.pk", 2, false}, + {1, "gok.pk", 2, false}, + {1, "gon.pk", 2, false}, + {1, "gop.pk", 2, false}, + {1, "gos.pk", 2, false}, + {1, "info.pk", 2, false}, + {1, "pl", 1, false}, + {1, "com.pl", 2, false}, + {1, "net.pl", 2, false}, + {1, "org.pl", 2, false}, + {1, "aid.pl", 2, false}, + {1, "agro.pl", 2, false}, + {1, "atm.pl", 2, false}, + {1, "auto.pl", 2, false}, + {1, "biz.pl", 2, false}, + {1, "edu.pl", 2, false}, + {1, "gmina.pl", 2, false}, + {1, "gsm.pl", 2, false}, + {1, "info.pl", 2, false}, + {1, "mail.pl", 2, false}, + {1, "miasta.pl", 2, false}, + {1, "media.pl", 2, false}, + {1, "mil.pl", 2, false}, + {1, "nieruchomosci.pl", 2, false}, + {1, "nom.pl", 2, false}, + {1, "pc.pl", 2, false}, + {1, "powiat.pl", 2, false}, + {1, "priv.pl", 2, false}, + {1, "realestate.pl", 2, false}, + {1, "rel.pl", 2, false}, + {1, "sex.pl", 2, false}, + {1, "shop.pl", 2, false}, + {1, "sklep.pl", 2, false}, + {1, "sos.pl", 2, false}, + {1, "szkola.pl", 2, false}, + {1, "targi.pl", 2, false}, + {1, "tm.pl", 2, false}, + {1, "tourism.pl", 2, false}, + {1, "travel.pl", 2, false}, + {1, "turystyka.pl", 2, false}, + {1, "gov.pl", 2, false}, + {1, "ap.gov.pl", 3, false}, + {1, "ic.gov.pl", 3, false}, + {1, "is.gov.pl", 3, false}, + {1, "us.gov.pl", 3, false}, + {1, "kmpsp.gov.pl", 3, false}, + {1, "kppsp.gov.pl", 3, false}, + {1, "kwpsp.gov.pl", 3, false}, + {1, "psp.gov.pl", 3, false}, + {1, "wskr.gov.pl", 3, false}, + {1, "kwp.gov.pl", 3, false}, + {1, "mw.gov.pl", 3, false}, + {1, "ug.gov.pl", 3, false}, + {1, "um.gov.pl", 3, false}, + {1, "umig.gov.pl", 3, false}, + {1, "ugim.gov.pl", 3, false}, + {1, "upow.gov.pl", 3, false}, + {1, "uw.gov.pl", 3, false}, + {1, "starostwo.gov.pl", 3, false}, + {1, "pa.gov.pl", 3, false}, + {1, "po.gov.pl", 3, false}, + {1, "psse.gov.pl", 3, false}, + {1, "pup.gov.pl", 3, false}, + {1, "rzgw.gov.pl", 3, false}, + {1, "sa.gov.pl", 3, false}, + {1, "so.gov.pl", 3, false}, + {1, "sr.gov.pl", 3, false}, + {1, "wsa.gov.pl", 3, false}, + {1, "sko.gov.pl", 3, false}, + {1, "uzs.gov.pl", 3, false}, + {1, "wiih.gov.pl", 3, false}, + {1, "winb.gov.pl", 3, false}, + {1, "pinb.gov.pl", 3, false}, + {1, "wios.gov.pl", 3, false}, + {1, "witd.gov.pl", 3, false}, + {1, "wzmiuw.gov.pl", 3, false}, + {1, "piw.gov.pl", 3, false}, + {1, "wiw.gov.pl", 3, false}, + {1, "griw.gov.pl", 3, false}, + {1, "wif.gov.pl", 3, false}, + {1, "oum.gov.pl", 3, false}, + {1, "sdn.gov.pl", 3, false}, + {1, "zp.gov.pl", 3, false}, + {1, "uppo.gov.pl", 3, false}, + {1, "mup.gov.pl", 3, false}, + {1, "wuoz.gov.pl", 3, false}, + {1, "konsulat.gov.pl", 3, false}, + {1, "oirm.gov.pl", 3, false}, + {1, "augustow.pl", 2, false}, + {1, "babia-gora.pl", 2, false}, + {1, "bedzin.pl", 2, false}, + {1, "beskidy.pl", 2, false}, + {1, "bialowieza.pl", 2, false}, + {1, "bialystok.pl", 2, false}, + {1, "bielawa.pl", 2, false}, + {1, "bieszczady.pl", 2, false}, + {1, "boleslawiec.pl", 2, false}, + {1, "bydgoszcz.pl", 2, false}, + {1, "bytom.pl", 2, false}, + {1, "cieszyn.pl", 2, false}, + {1, "czeladz.pl", 2, false}, + {1, "czest.pl", 2, false}, + {1, "dlugoleka.pl", 2, false}, + {1, "elblag.pl", 2, false}, + {1, "elk.pl", 2, false}, + {1, "glogow.pl", 2, false}, + {1, "gniezno.pl", 2, false}, + {1, "gorlice.pl", 2, false}, + {1, "grajewo.pl", 2, false}, + {1, "ilawa.pl", 2, false}, + {1, "jaworzno.pl", 2, false}, + {1, "jelenia-gora.pl", 2, false}, + {1, "jgora.pl", 2, false}, + {1, "kalisz.pl", 2, false}, + {1, "kazimierz-dolny.pl", 2, false}, + {1, "karpacz.pl", 2, false}, + {1, "kartuzy.pl", 2, false}, + {1, "kaszuby.pl", 2, false}, + {1, "katowice.pl", 2, false}, + {1, "kepno.pl", 2, false}, + {1, "ketrzyn.pl", 2, false}, + {1, "klodzko.pl", 2, false}, + {1, "kobierzyce.pl", 2, false}, + {1, "kolobrzeg.pl", 2, false}, + {1, "konin.pl", 2, false}, + {1, "konskowola.pl", 2, false}, + {1, "kutno.pl", 2, false}, + {1, "lapy.pl", 2, false}, + {1, "lebork.pl", 2, false}, + {1, "legnica.pl", 2, false}, + {1, "lezajsk.pl", 2, false}, + {1, "limanowa.pl", 2, false}, + {1, "lomza.pl", 2, false}, + {1, "lowicz.pl", 2, false}, + {1, "lubin.pl", 2, false}, + {1, "lukow.pl", 2, false}, + {1, "malbork.pl", 2, false}, + {1, "malopolska.pl", 2, false}, + {1, "mazowsze.pl", 2, false}, + {1, "mazury.pl", 2, false}, + {1, "mielec.pl", 2, false}, + {1, "mielno.pl", 2, false}, + {1, "mragowo.pl", 2, false}, + {1, "naklo.pl", 2, false}, + {1, "nowaruda.pl", 2, false}, + {1, "nysa.pl", 2, false}, + {1, "olawa.pl", 2, false}, + {1, "olecko.pl", 2, false}, + {1, "olkusz.pl", 2, false}, + {1, "olsztyn.pl", 2, false}, + {1, "opoczno.pl", 2, false}, + {1, "opole.pl", 2, false}, + {1, "ostroda.pl", 2, false}, + {1, "ostroleka.pl", 2, false}, + {1, "ostrowiec.pl", 2, false}, + {1, "ostrowwlkp.pl", 2, false}, + {1, "pila.pl", 2, false}, + {1, "pisz.pl", 2, false}, + {1, "podhale.pl", 2, false}, + {1, "podlasie.pl", 2, false}, + {1, "polkowice.pl", 2, false}, + {1, "pomorze.pl", 2, false}, + {1, "pomorskie.pl", 2, false}, + {1, "prochowice.pl", 2, false}, + {1, "pruszkow.pl", 2, false}, + {1, "przeworsk.pl", 2, false}, + {1, "pulawy.pl", 2, false}, + {1, "radom.pl", 2, false}, + {1, "rawa-maz.pl", 2, false}, + {1, "rybnik.pl", 2, false}, + {1, "rzeszow.pl", 2, false}, + {1, "sanok.pl", 2, false}, + {1, "sejny.pl", 2, false}, + {1, "slask.pl", 2, false}, + {1, "slupsk.pl", 2, false}, + {1, "sosnowiec.pl", 2, false}, + {1, "stalowa-wola.pl", 2, false}, + {1, "skoczow.pl", 2, false}, + {1, "starachowice.pl", 2, false}, + {1, "stargard.pl", 2, false}, + {1, "suwalki.pl", 2, false}, + {1, "swidnica.pl", 2, false}, + {1, "swiebodzin.pl", 2, false}, + {1, "swinoujscie.pl", 2, false}, + {1, "szczecin.pl", 2, false}, + {1, "szczytno.pl", 2, false}, + {1, "tarnobrzeg.pl", 2, false}, + {1, "tgory.pl", 2, false}, + {1, "turek.pl", 2, false}, + {1, "tychy.pl", 2, false}, + {1, "ustka.pl", 2, false}, + {1, "walbrzych.pl", 2, false}, + {1, "warmia.pl", 2, false}, + {1, "warszawa.pl", 2, false}, + {1, "waw.pl", 2, false}, + {1, "wegrow.pl", 2, false}, + {1, "wielun.pl", 2, false}, + {1, "wlocl.pl", 2, false}, + {1, "wloclawek.pl", 2, false}, + {1, "wodzislaw.pl", 2, false}, + {1, "wolomin.pl", 2, false}, + {1, "wroclaw.pl", 2, false}, + {1, "zachpomor.pl", 2, false}, + {1, "zagan.pl", 2, false}, + {1, "zarow.pl", 2, false}, + {1, "zgora.pl", 2, false}, + {1, "zgorzelec.pl", 2, false}, + {1, "pm", 1, false}, + {1, "pn", 1, false}, + {1, "gov.pn", 2, false}, + {1, "co.pn", 2, false}, + {1, "org.pn", 2, false}, + {1, "edu.pn", 2, false}, + {1, "net.pn", 2, false}, + {1, "post", 1, false}, + {1, "pr", 1, false}, + {1, "com.pr", 2, false}, + {1, "net.pr", 2, false}, + {1, "org.pr", 2, false}, + {1, "gov.pr", 2, false}, + {1, "edu.pr", 2, false}, + {1, "isla.pr", 2, false}, + {1, "pro.pr", 2, false}, + {1, "biz.pr", 2, false}, + {1, "info.pr", 2, false}, + {1, "name.pr", 2, false}, + {1, "est.pr", 2, false}, + {1, "prof.pr", 2, false}, + {1, "ac.pr", 2, false}, + {1, "pro", 1, false}, + {1, "aaa.pro", 2, false}, + {1, "aca.pro", 2, false}, + {1, "acct.pro", 2, false}, + {1, "avocat.pro", 2, false}, + {1, "bar.pro", 2, false}, + {1, "cpa.pro", 2, false}, + {1, "eng.pro", 2, false}, + {1, "jur.pro", 2, false}, + {1, "law.pro", 2, false}, + {1, "med.pro", 2, false}, + {1, "recht.pro", 2, false}, + {1, "ps", 1, false}, + {1, "edu.ps", 2, false}, + {1, "gov.ps", 2, false}, + {1, "sec.ps", 2, false}, + {1, "plo.ps", 2, false}, + {1, "com.ps", 2, false}, + {1, "org.ps", 2, false}, + {1, "net.ps", 2, false}, + {1, "pt", 1, false}, + {1, "net.pt", 2, false}, + {1, "gov.pt", 2, false}, + {1, "org.pt", 2, false}, + {1, "edu.pt", 2, false}, + {1, "int.pt", 2, false}, + {1, "publ.pt", 2, false}, + {1, "com.pt", 2, false}, + {1, "nome.pt", 2, false}, + {1, "pw", 1, false}, + {1, "co.pw", 2, false}, + {1, "ne.pw", 2, false}, + {1, "or.pw", 2, false}, + {1, "ed.pw", 2, false}, + {1, "go.pw", 2, false}, + {1, "belau.pw", 2, false}, + {1, "py", 1, false}, + {1, "com.py", 2, false}, + {1, "coop.py", 2, false}, + {1, "edu.py", 2, false}, + {1, "gov.py", 2, false}, + {1, "mil.py", 2, false}, + {1, "net.py", 2, false}, + {1, "org.py", 2, false}, + {1, "qa", 1, false}, + {1, "com.qa", 2, false}, + {1, "edu.qa", 2, false}, + {1, "gov.qa", 2, false}, + {1, "mil.qa", 2, false}, + {1, "name.qa", 2, false}, + {1, "net.qa", 2, false}, + {1, "org.qa", 2, false}, + {1, "sch.qa", 2, false}, + {1, "re", 1, false}, + {1, "asso.re", 2, false}, + {1, "com.re", 2, false}, + {1, "nom.re", 2, false}, + {1, "ro", 1, false}, + {1, "arts.ro", 2, false}, + {1, "com.ro", 2, false}, + {1, "firm.ro", 2, false}, + {1, "info.ro", 2, false}, + {1, "nom.ro", 2, false}, + {1, "nt.ro", 2, false}, + {1, "org.ro", 2, false}, + {1, "rec.ro", 2, false}, + {1, "store.ro", 2, false}, + {1, "tm.ro", 2, false}, + {1, "www.ro", 2, false}, + {1, "rs", 1, false}, + {1, "ac.rs", 2, false}, + {1, "co.rs", 2, false}, + {1, "edu.rs", 2, false}, + {1, "gov.rs", 2, false}, + {1, "in.rs", 2, false}, + {1, "org.rs", 2, false}, + {1, "ru", 1, false}, + {1, "ac.ru", 2, false}, + {1, "edu.ru", 2, false}, + {1, "gov.ru", 2, false}, + {1, "int.ru", 2, false}, + {1, "mil.ru", 2, false}, + {1, "test.ru", 2, false}, + {1, "rw", 1, false}, + {1, "ac.rw", 2, false}, + {1, "co.rw", 2, false}, + {1, "coop.rw", 2, false}, + {1, "gov.rw", 2, false}, + {1, "mil.rw", 2, false}, + {1, "net.rw", 2, false}, + {1, "org.rw", 2, false}, + {1, "sa", 1, false}, + {1, "com.sa", 2, false}, + {1, "net.sa", 2, false}, + {1, "org.sa", 2, false}, + {1, "gov.sa", 2, false}, + {1, "med.sa", 2, false}, + {1, "pub.sa", 2, false}, + {1, "edu.sa", 2, false}, + {1, "sch.sa", 2, false}, + {1, "sb", 1, false}, + {1, "com.sb", 2, false}, + {1, "edu.sb", 2, false}, + {1, "gov.sb", 2, false}, + {1, "net.sb", 2, false}, + {1, "org.sb", 2, false}, + {1, "sc", 1, false}, + {1, "com.sc", 2, false}, + {1, "gov.sc", 2, false}, + {1, "net.sc", 2, false}, + {1, "org.sc", 2, false}, + {1, "edu.sc", 2, false}, + {1, "sd", 1, false}, + {1, "com.sd", 2, false}, + {1, "net.sd", 2, false}, + {1, "org.sd", 2, false}, + {1, "edu.sd", 2, false}, + {1, "med.sd", 2, false}, + {1, "tv.sd", 2, false}, + {1, "gov.sd", 2, false}, + {1, "info.sd", 2, false}, + {1, "se", 1, false}, + {1, "a.se", 2, false}, + {1, "ac.se", 2, false}, + {1, "b.se", 2, false}, + {1, "bd.se", 2, false}, + {1, "brand.se", 2, false}, + {1, "c.se", 2, false}, + {1, "d.se", 2, false}, + {1, "e.se", 2, false}, + {1, "f.se", 2, false}, + {1, "fh.se", 2, false}, + {1, "fhsk.se", 2, false}, + {1, "fhv.se", 2, false}, + {1, "g.se", 2, false}, + {1, "h.se", 2, false}, + {1, "i.se", 2, false}, + {1, "k.se", 2, false}, + {1, "komforb.se", 2, false}, + {1, "kommunalforbund.se", 2, false}, + {1, "komvux.se", 2, false}, + {1, "l.se", 2, false}, + {1, "lanbib.se", 2, false}, + {1, "m.se", 2, false}, + {1, "n.se", 2, false}, + {1, "naturbruksgymn.se", 2, false}, + {1, "o.se", 2, false}, + {1, "org.se", 2, false}, + {1, "p.se", 2, false}, + {1, "parti.se", 2, false}, + {1, "pp.se", 2, false}, + {1, "press.se", 2, false}, + {1, "r.se", 2, false}, + {1, "s.se", 2, false}, + {1, "t.se", 2, false}, + {1, "tm.se", 2, false}, + {1, "u.se", 2, false}, + {1, "w.se", 2, false}, + {1, "x.se", 2, false}, + {1, "y.se", 2, false}, + {1, "z.se", 2, false}, + {1, "sg", 1, false}, + {1, "com.sg", 2, false}, + {1, "net.sg", 2, false}, + {1, "org.sg", 2, false}, + {1, "gov.sg", 2, false}, + {1, "edu.sg", 2, false}, + {1, "per.sg", 2, false}, + {1, "sh", 1, false}, + {1, "com.sh", 2, false}, + {1, "net.sh", 2, false}, + {1, "gov.sh", 2, false}, + {1, "org.sh", 2, false}, + {1, "mil.sh", 2, false}, + {1, "si", 1, false}, + {1, "sj", 1, false}, + {1, "sk", 1, false}, + {1, "sl", 1, false}, + {1, "com.sl", 2, false}, + {1, "net.sl", 2, false}, + {1, "edu.sl", 2, false}, + {1, "gov.sl", 2, false}, + {1, "org.sl", 2, false}, + {1, "sm", 1, false}, + {1, "sn", 1, false}, + {1, "art.sn", 2, false}, + {1, "com.sn", 2, false}, + {1, "edu.sn", 2, false}, + {1, "gouv.sn", 2, false}, + {1, "org.sn", 2, false}, + {1, "perso.sn", 2, false}, + {1, "univ.sn", 2, false}, + {1, "so", 1, false}, + {1, "com.so", 2, false}, + {1, "net.so", 2, false}, + {1, "org.so", 2, false}, + {1, "sr", 1, false}, + {1, "st", 1, false}, + {1, "co.st", 2, false}, + {1, "com.st", 2, false}, + {1, "consulado.st", 2, false}, + {1, "edu.st", 2, false}, + {1, "embaixada.st", 2, false}, + {1, "gov.st", 2, false}, + {1, "mil.st", 2, false}, + {1, "net.st", 2, false}, + {1, "org.st", 2, false}, + {1, "principe.st", 2, false}, + {1, "saotome.st", 2, false}, + {1, "store.st", 2, false}, + {1, "su", 1, false}, + {1, "sv", 1, false}, + {1, "com.sv", 2, false}, + {1, "edu.sv", 2, false}, + {1, "gob.sv", 2, false}, + {1, "org.sv", 2, false}, + {1, "red.sv", 2, false}, + {1, "sx", 1, false}, + {1, "gov.sx", 2, false}, + {1, "sy", 1, false}, + {1, "edu.sy", 2, false}, + {1, "gov.sy", 2, false}, + {1, "net.sy", 2, false}, + {1, "mil.sy", 2, false}, + {1, "com.sy", 2, false}, + {1, "org.sy", 2, false}, + {1, "sz", 1, false}, + {1, "co.sz", 2, false}, + {1, "ac.sz", 2, false}, + {1, "org.sz", 2, false}, + {1, "tc", 1, false}, + {1, "td", 1, false}, + {1, "tel", 1, false}, + {1, "tf", 1, false}, + {1, "tg", 1, false}, + {1, "th", 1, false}, + {1, "ac.th", 2, false}, + {1, "co.th", 2, false}, + {1, "go.th", 2, false}, + {1, "in.th", 2, false}, + {1, "mi.th", 2, false}, + {1, "net.th", 2, false}, + {1, "or.th", 2, false}, + {1, "tj", 1, false}, + {1, "ac.tj", 2, false}, + {1, "biz.tj", 2, false}, + {1, "co.tj", 2, false}, + {1, "com.tj", 2, false}, + {1, "edu.tj", 2, false}, + {1, "go.tj", 2, false}, + {1, "gov.tj", 2, false}, + {1, "int.tj", 2, false}, + {1, "mil.tj", 2, false}, + {1, "name.tj", 2, false}, + {1, "net.tj", 2, false}, + {1, "nic.tj", 2, false}, + {1, "org.tj", 2, false}, + {1, "test.tj", 2, false}, + {1, "web.tj", 2, false}, + {1, "tk", 1, false}, + {1, "tl", 1, false}, + {1, "gov.tl", 2, false}, + {1, "tm", 1, false}, + {1, "com.tm", 2, false}, + {1, "co.tm", 2, false}, + {1, "org.tm", 2, false}, + {1, "net.tm", 2, false}, + {1, "nom.tm", 2, false}, + {1, "gov.tm", 2, false}, + {1, "mil.tm", 2, false}, + {1, "edu.tm", 2, false}, + {1, "tn", 1, false}, + {1, "com.tn", 2, false}, + {1, "ens.tn", 2, false}, + {1, "fin.tn", 2, false}, + {1, "gov.tn", 2, false}, + {1, "ind.tn", 2, false}, + {1, "intl.tn", 2, false}, + {1, "nat.tn", 2, false}, + {1, "net.tn", 2, false}, + {1, "org.tn", 2, false}, + {1, "info.tn", 2, false}, + {1, "perso.tn", 2, false}, + {1, "tourism.tn", 2, false}, + {1, "edunet.tn", 2, false}, + {1, "rnrt.tn", 2, false}, + {1, "rns.tn", 2, false}, + {1, "rnu.tn", 2, false}, + {1, "mincom.tn", 2, false}, + {1, "agrinet.tn", 2, false}, + {1, "defense.tn", 2, false}, + {1, "turen.tn", 2, false}, + {1, "to", 1, false}, + {1, "com.to", 2, false}, + {1, "gov.to", 2, false}, + {1, "net.to", 2, false}, + {1, "org.to", 2, false}, + {1, "edu.to", 2, false}, + {1, "mil.to", 2, false}, + {1, "tr", 1, false}, + {1, "av.tr", 2, false}, + {1, "bbs.tr", 2, false}, + {1, "bel.tr", 2, false}, + {1, "biz.tr", 2, false}, + {1, "com.tr", 2, false}, + {1, "dr.tr", 2, false}, + {1, "edu.tr", 2, false}, + {1, "gen.tr", 2, false}, + {1, "gov.tr", 2, false}, + {1, "info.tr", 2, false}, + {1, "mil.tr", 2, false}, + {1, "k12.tr", 2, false}, + {1, "kep.tr", 2, false}, + {1, "name.tr", 2, false}, + {1, "net.tr", 2, false}, + {1, "org.tr", 2, false}, + {1, "pol.tr", 2, false}, + {1, "tel.tr", 2, false}, + {1, "tsk.tr", 2, false}, + {1, "tv.tr", 2, false}, + {1, "web.tr", 2, false}, + {1, "nc.tr", 2, false}, + {1, "gov.nc.tr", 3, false}, + {1, "tt", 1, false}, + {1, "co.tt", 2, false}, + {1, "com.tt", 2, false}, + {1, "org.tt", 2, false}, + {1, "net.tt", 2, false}, + {1, "biz.tt", 2, false}, + {1, "info.tt", 2, false}, + {1, "pro.tt", 2, false}, + {1, "int.tt", 2, false}, + {1, "coop.tt", 2, false}, + {1, "jobs.tt", 2, false}, + {1, "mobi.tt", 2, false}, + {1, "travel.tt", 2, false}, + {1, "museum.tt", 2, false}, + {1, "aero.tt", 2, false}, + {1, "name.tt", 2, false}, + {1, "gov.tt", 2, false}, + {1, "edu.tt", 2, false}, + {1, "tv", 1, false}, + {1, "tw", 1, false}, + {1, "edu.tw", 2, false}, + {1, "gov.tw", 2, false}, + {1, "mil.tw", 2, false}, + {1, "com.tw", 2, false}, + {1, "net.tw", 2, false}, + {1, "org.tw", 2, false}, + {1, "idv.tw", 2, false}, + {1, "game.tw", 2, false}, + {1, "ebiz.tw", 2, false}, + {1, "club.tw", 2, false}, + {1, "xn--zf0ao64a.tw", 2, false}, + {1, "xn--uc0atv.tw", 2, false}, + {1, "xn--czrw28b.tw", 2, false}, + {1, "tz", 1, false}, + {1, "ac.tz", 2, false}, + {1, "co.tz", 2, false}, + {1, "go.tz", 2, false}, + {1, "hotel.tz", 2, false}, + {1, "info.tz", 2, false}, + {1, "me.tz", 2, false}, + {1, "mil.tz", 2, false}, + {1, "mobi.tz", 2, false}, + {1, "ne.tz", 2, false}, + {1, "or.tz", 2, false}, + {1, "sc.tz", 2, false}, + {1, "tv.tz", 2, false}, + {1, "ua", 1, false}, + {1, "com.ua", 2, false}, + {1, "edu.ua", 2, false}, + {1, "gov.ua", 2, false}, + {1, "in.ua", 2, false}, + {1, "net.ua", 2, false}, + {1, "org.ua", 2, false}, + {1, "cherkassy.ua", 2, false}, + {1, "cherkasy.ua", 2, false}, + {1, "chernigov.ua", 2, false}, + {1, "chernihiv.ua", 2, false}, + {1, "chernivtsi.ua", 2, false}, + {1, "chernovtsy.ua", 2, false}, + {1, "ck.ua", 2, false}, + {1, "cn.ua", 2, false}, + {1, "cr.ua", 2, false}, + {1, "crimea.ua", 2, false}, + {1, "cv.ua", 2, false}, + {1, "dn.ua", 2, false}, + {1, "dnepropetrovsk.ua", 2, false}, + {1, "dnipropetrovsk.ua", 2, false}, + {1, "dominic.ua", 2, false}, + {1, "donetsk.ua", 2, false}, + {1, "dp.ua", 2, false}, + {1, "if.ua", 2, false}, + {1, "ivano-frankivsk.ua", 2, false}, + {1, "kh.ua", 2, false}, + {1, "kharkiv.ua", 2, false}, + {1, "kharkov.ua", 2, false}, + {1, "kherson.ua", 2, false}, + {1, "khmelnitskiy.ua", 2, false}, + {1, "khmelnytskyi.ua", 2, false}, + {1, "kiev.ua", 2, false}, + {1, "kirovograd.ua", 2, false}, + {1, "km.ua", 2, false}, + {1, "kr.ua", 2, false}, + {1, "krym.ua", 2, false}, + {1, "ks.ua", 2, false}, + {1, "kv.ua", 2, false}, + {1, "kyiv.ua", 2, false}, + {1, "lg.ua", 2, false}, + {1, "lt.ua", 2, false}, + {1, "lugansk.ua", 2, false}, + {1, "lutsk.ua", 2, false}, + {1, "lv.ua", 2, false}, + {1, "lviv.ua", 2, false}, + {1, "mk.ua", 2, false}, + {1, "mykolaiv.ua", 2, false}, + {1, "nikolaev.ua", 2, false}, + {1, "od.ua", 2, false}, + {1, "odesa.ua", 2, false}, + {1, "odessa.ua", 2, false}, + {1, "pl.ua", 2, false}, + {1, "poltava.ua", 2, false}, + {1, "rivne.ua", 2, false}, + {1, "rovno.ua", 2, false}, + {1, "rv.ua", 2, false}, + {1, "sb.ua", 2, false}, + {1, "sebastopol.ua", 2, false}, + {1, "sevastopol.ua", 2, false}, + {1, "sm.ua", 2, false}, + {1, "sumy.ua", 2, false}, + {1, "te.ua", 2, false}, + {1, "ternopil.ua", 2, false}, + {1, "uz.ua", 2, false}, + {1, "uzhgorod.ua", 2, false}, + {1, "vinnica.ua", 2, false}, + {1, "vinnytsia.ua", 2, false}, + {1, "vn.ua", 2, false}, + {1, "volyn.ua", 2, false}, + {1, "yalta.ua", 2, false}, + {1, "zaporizhzhe.ua", 2, false}, + {1, "zaporizhzhia.ua", 2, false}, + {1, "zhitomir.ua", 2, false}, + {1, "zhytomyr.ua", 2, false}, + {1, "zp.ua", 2, false}, + {1, "zt.ua", 2, false}, + {1, "ug", 1, false}, + {1, "co.ug", 2, false}, + {1, "or.ug", 2, false}, + {1, "ac.ug", 2, false}, + {1, "sc.ug", 2, false}, + {1, "go.ug", 2, false}, + {1, "ne.ug", 2, false}, + {1, "com.ug", 2, false}, + {1, "org.ug", 2, false}, + {1, "uk", 1, false}, + {1, "ac.uk", 2, false}, + {1, "co.uk", 2, false}, + {1, "gov.uk", 2, false}, + {1, "ltd.uk", 2, false}, + {1, "me.uk", 2, false}, + {1, "net.uk", 2, false}, + {1, "nhs.uk", 2, false}, + {1, "org.uk", 2, false}, + {1, "plc.uk", 2, false}, + {1, "police.uk", 2, false}, + {2, "sch.uk", 3, false}, + {1, "us", 1, false}, + {1, "dni.us", 2, false}, + {1, "fed.us", 2, false}, + {1, "isa.us", 2, false}, + {1, "kids.us", 2, false}, + {1, "nsn.us", 2, false}, + {1, "ak.us", 2, false}, + {1, "al.us", 2, false}, + {1, "ar.us", 2, false}, + {1, "as.us", 2, false}, + {1, "az.us", 2, false}, + {1, "ca.us", 2, false}, + {1, "co.us", 2, false}, + {1, "ct.us", 2, false}, + {1, "dc.us", 2, false}, + {1, "de.us", 2, false}, + {1, "fl.us", 2, false}, + {1, "ga.us", 2, false}, + {1, "gu.us", 2, false}, + {1, "hi.us", 2, false}, + {1, "ia.us", 2, false}, + {1, "id.us", 2, false}, + {1, "il.us", 2, false}, + {1, "in.us", 2, false}, + {1, "ks.us", 2, false}, + {1, "ky.us", 2, false}, + {1, "la.us", 2, false}, + {1, "ma.us", 2, false}, + {1, "md.us", 2, false}, + {1, "me.us", 2, false}, + {1, "mi.us", 2, false}, + {1, "mn.us", 2, false}, + {1, "mo.us", 2, false}, + {1, "ms.us", 2, false}, + {1, "mt.us", 2, false}, + {1, "nc.us", 2, false}, + {1, "nd.us", 2, false}, + {1, "ne.us", 2, false}, + {1, "nh.us", 2, false}, + {1, "nj.us", 2, false}, + {1, "nm.us", 2, false}, + {1, "nv.us", 2, false}, + {1, "ny.us", 2, false}, + {1, "oh.us", 2, false}, + {1, "ok.us", 2, false}, + {1, "or.us", 2, false}, + {1, "pa.us", 2, false}, + {1, "pr.us", 2, false}, + {1, "ri.us", 2, false}, + {1, "sc.us", 2, false}, + {1, "sd.us", 2, false}, + {1, "tn.us", 2, false}, + {1, "tx.us", 2, false}, + {1, "ut.us", 2, false}, + {1, "vi.us", 2, false}, + {1, "vt.us", 2, false}, + {1, "va.us", 2, false}, + {1, "wa.us", 2, false}, + {1, "wi.us", 2, false}, + {1, "wv.us", 2, false}, + {1, "wy.us", 2, false}, + {1, "k12.ak.us", 3, false}, + {1, "k12.al.us", 3, false}, + {1, "k12.ar.us", 3, false}, + {1, "k12.as.us", 3, false}, + {1, "k12.az.us", 3, false}, + {1, "k12.ca.us", 3, false}, + {1, "k12.co.us", 3, false}, + {1, "k12.ct.us", 3, false}, + {1, "k12.dc.us", 3, false}, + {1, "k12.de.us", 3, false}, + {1, "k12.fl.us", 3, false}, + {1, "k12.ga.us", 3, false}, + {1, "k12.gu.us", 3, false}, + {1, "k12.ia.us", 3, false}, + {1, "k12.id.us", 3, false}, + {1, "k12.il.us", 3, false}, + {1, "k12.in.us", 3, false}, + {1, "k12.ks.us", 3, false}, + {1, "k12.ky.us", 3, false}, + {1, "k12.la.us", 3, false}, + {1, "k12.ma.us", 3, false}, + {1, "k12.md.us", 3, false}, + {1, "k12.me.us", 3, false}, + {1, "k12.mi.us", 3, false}, + {1, "k12.mn.us", 3, false}, + {1, "k12.mo.us", 3, false}, + {1, "k12.ms.us", 3, false}, + {1, "k12.mt.us", 3, false}, + {1, "k12.nc.us", 3, false}, + {1, "k12.ne.us", 3, false}, + {1, "k12.nh.us", 3, false}, + {1, "k12.nj.us", 3, false}, + {1, "k12.nm.us", 3, false}, + {1, "k12.nv.us", 3, false}, + {1, "k12.ny.us", 3, false}, + {1, "k12.oh.us", 3, false}, + {1, "k12.ok.us", 3, false}, + {1, "k12.or.us", 3, false}, + {1, "k12.pa.us", 3, false}, + {1, "k12.pr.us", 3, false}, + {1, "k12.ri.us", 3, false}, + {1, "k12.sc.us", 3, false}, + {1, "k12.tn.us", 3, false}, + {1, "k12.tx.us", 3, false}, + {1, "k12.ut.us", 3, false}, + {1, "k12.vi.us", 3, false}, + {1, "k12.vt.us", 3, false}, + {1, "k12.va.us", 3, false}, + {1, "k12.wa.us", 3, false}, + {1, "k12.wi.us", 3, false}, + {1, "k12.wy.us", 3, false}, + {1, "cc.ak.us", 3, false}, + {1, "cc.al.us", 3, false}, + {1, "cc.ar.us", 3, false}, + {1, "cc.as.us", 3, false}, + {1, "cc.az.us", 3, false}, + {1, "cc.ca.us", 3, false}, + {1, "cc.co.us", 3, false}, + {1, "cc.ct.us", 3, false}, + {1, "cc.dc.us", 3, false}, + {1, "cc.de.us", 3, false}, + {1, "cc.fl.us", 3, false}, + {1, "cc.ga.us", 3, false}, + {1, "cc.gu.us", 3, false}, + {1, "cc.hi.us", 3, false}, + {1, "cc.ia.us", 3, false}, + {1, "cc.id.us", 3, false}, + {1, "cc.il.us", 3, false}, + {1, "cc.in.us", 3, false}, + {1, "cc.ks.us", 3, false}, + {1, "cc.ky.us", 3, false}, + {1, "cc.la.us", 3, false}, + {1, "cc.ma.us", 3, false}, + {1, "cc.md.us", 3, false}, + {1, "cc.me.us", 3, false}, + {1, "cc.mi.us", 3, false}, + {1, "cc.mn.us", 3, false}, + {1, "cc.mo.us", 3, false}, + {1, "cc.ms.us", 3, false}, + {1, "cc.mt.us", 3, false}, + {1, "cc.nc.us", 3, false}, + {1, "cc.nd.us", 3, false}, + {1, "cc.ne.us", 3, false}, + {1, "cc.nh.us", 3, false}, + {1, "cc.nj.us", 3, false}, + {1, "cc.nm.us", 3, false}, + {1, "cc.nv.us", 3, false}, + {1, "cc.ny.us", 3, false}, + {1, "cc.oh.us", 3, false}, + {1, "cc.ok.us", 3, false}, + {1, "cc.or.us", 3, false}, + {1, "cc.pa.us", 3, false}, + {1, "cc.pr.us", 3, false}, + {1, "cc.ri.us", 3, false}, + {1, "cc.sc.us", 3, false}, + {1, "cc.sd.us", 3, false}, + {1, "cc.tn.us", 3, false}, + {1, "cc.tx.us", 3, false}, + {1, "cc.ut.us", 3, false}, + {1, "cc.vi.us", 3, false}, + {1, "cc.vt.us", 3, false}, + {1, "cc.va.us", 3, false}, + {1, "cc.wa.us", 3, false}, + {1, "cc.wi.us", 3, false}, + {1, "cc.wv.us", 3, false}, + {1, "cc.wy.us", 3, false}, + {1, "lib.ak.us", 3, false}, + {1, "lib.al.us", 3, false}, + {1, "lib.ar.us", 3, false}, + {1, "lib.as.us", 3, false}, + {1, "lib.az.us", 3, false}, + {1, "lib.ca.us", 3, false}, + {1, "lib.co.us", 3, false}, + {1, "lib.ct.us", 3, false}, + {1, "lib.dc.us", 3, false}, + {1, "lib.fl.us", 3, false}, + {1, "lib.ga.us", 3, false}, + {1, "lib.gu.us", 3, false}, + {1, "lib.hi.us", 3, false}, + {1, "lib.ia.us", 3, false}, + {1, "lib.id.us", 3, false}, + {1, "lib.il.us", 3, false}, + {1, "lib.in.us", 3, false}, + {1, "lib.ks.us", 3, false}, + {1, "lib.ky.us", 3, false}, + {1, "lib.la.us", 3, false}, + {1, "lib.ma.us", 3, false}, + {1, "lib.md.us", 3, false}, + {1, "lib.me.us", 3, false}, + {1, "lib.mi.us", 3, false}, + {1, "lib.mn.us", 3, false}, + {1, "lib.mo.us", 3, false}, + {1, "lib.ms.us", 3, false}, + {1, "lib.mt.us", 3, false}, + {1, "lib.nc.us", 3, false}, + {1, "lib.nd.us", 3, false}, + {1, "lib.ne.us", 3, false}, + {1, "lib.nh.us", 3, false}, + {1, "lib.nj.us", 3, false}, + {1, "lib.nm.us", 3, false}, + {1, "lib.nv.us", 3, false}, + {1, "lib.ny.us", 3, false}, + {1, "lib.oh.us", 3, false}, + {1, "lib.ok.us", 3, false}, + {1, "lib.or.us", 3, false}, + {1, "lib.pa.us", 3, false}, + {1, "lib.pr.us", 3, false}, + {1, "lib.ri.us", 3, false}, + {1, "lib.sc.us", 3, false}, + {1, "lib.sd.us", 3, false}, + {1, "lib.tn.us", 3, false}, + {1, "lib.tx.us", 3, false}, + {1, "lib.ut.us", 3, false}, + {1, "lib.vi.us", 3, false}, + {1, "lib.vt.us", 3, false}, + {1, "lib.va.us", 3, false}, + {1, "lib.wa.us", 3, false}, + {1, "lib.wi.us", 3, false}, + {1, "lib.wy.us", 3, false}, + {1, "pvt.k12.ma.us", 4, false}, + {1, "chtr.k12.ma.us", 4, false}, + {1, "paroch.k12.ma.us", 4, false}, + {1, "ann-arbor.mi.us", 3, false}, + {1, "cog.mi.us", 3, false}, + {1, "dst.mi.us", 3, false}, + {1, "eaton.mi.us", 3, false}, + {1, "gen.mi.us", 3, false}, + {1, "mus.mi.us", 3, false}, + {1, "tec.mi.us", 3, false}, + {1, "washtenaw.mi.us", 3, false}, + {1, "uy", 1, false}, + {1, "com.uy", 2, false}, + {1, "edu.uy", 2, false}, + {1, "gub.uy", 2, false}, + {1, "mil.uy", 2, false}, + {1, "net.uy", 2, false}, + {1, "org.uy", 2, false}, + {1, "uz", 1, false}, + {1, "co.uz", 2, false}, + {1, "com.uz", 2, false}, + {1, "net.uz", 2, false}, + {1, "org.uz", 2, false}, + {1, "va", 1, false}, + {1, "vc", 1, false}, + {1, "com.vc", 2, false}, + {1, "net.vc", 2, false}, + {1, "org.vc", 2, false}, + {1, "gov.vc", 2, false}, + {1, "mil.vc", 2, false}, + {1, "edu.vc", 2, false}, + {1, "ve", 1, false}, + {1, "arts.ve", 2, false}, + {1, "co.ve", 2, false}, + {1, "com.ve", 2, false}, + {1, "e12.ve", 2, false}, + {1, "edu.ve", 2, false}, + {1, "firm.ve", 2, false}, + {1, "gob.ve", 2, false}, + {1, "gov.ve", 2, false}, + {1, "info.ve", 2, false}, + {1, "int.ve", 2, false}, + {1, "mil.ve", 2, false}, + {1, "net.ve", 2, false}, + {1, "org.ve", 2, false}, + {1, "rec.ve", 2, false}, + {1, "store.ve", 2, false}, + {1, "tec.ve", 2, false}, + {1, "web.ve", 2, false}, + {1, "vg", 1, false}, + {1, "vi", 1, false}, + {1, "co.vi", 2, false}, + {1, "com.vi", 2, false}, + {1, "k12.vi", 2, false}, + {1, "net.vi", 2, false}, + {1, "org.vi", 2, false}, + {1, "vn", 1, false}, + {1, "com.vn", 2, false}, + {1, "net.vn", 2, false}, + {1, "org.vn", 2, false}, + {1, "edu.vn", 2, false}, + {1, "gov.vn", 2, false}, + {1, "int.vn", 2, false}, + {1, "ac.vn", 2, false}, + {1, "biz.vn", 2, false}, + {1, "info.vn", 2, false}, + {1, "name.vn", 2, false}, + {1, "pro.vn", 2, false}, + {1, "health.vn", 2, false}, + {1, "vu", 1, false}, + {1, "com.vu", 2, false}, + {1, "edu.vu", 2, false}, + {1, "net.vu", 2, false}, + {1, "org.vu", 2, false}, + {1, "wf", 1, false}, + {1, "ws", 1, false}, + {1, "com.ws", 2, false}, + {1, "net.ws", 2, false}, + {1, "org.ws", 2, false}, + {1, "gov.ws", 2, false}, + {1, "edu.ws", 2, false}, + {1, "yt", 1, false}, + {1, "xn--mgbaam7a8h", 1, false}, + {1, "xn--y9a3aq", 1, false}, + {1, "xn--54b7fta0cc", 1, false}, + {1, "xn--90ae", 1, false}, + {1, "xn--90ais", 1, false}, + {1, "xn--fiqs8s", 1, false}, + {1, "xn--fiqz9s", 1, false}, + {1, "xn--lgbbat1ad8j", 1, false}, + {1, "xn--wgbh1c", 1, false}, + {1, "xn--e1a4c", 1, false}, + {1, "xn--node", 1, false}, + {1, "xn--qxam", 1, false}, + {1, "xn--j6w193g", 1, false}, + {1, "xn--55qx5d.xn--j6w193g", 2, false}, + {1, "xn--wcvs22d.xn--j6w193g", 2, false}, + {1, "xn--mxtq1m.xn--j6w193g", 2, false}, + {1, "xn--gmqw5a.xn--j6w193g", 2, false}, + {1, "xn--od0alg.xn--j6w193g", 2, false}, + {1, "xn--uc0atv.xn--j6w193g", 2, false}, + {1, "xn--2scrj9c", 1, false}, + {1, "xn--3hcrj9c", 1, false}, + {1, "xn--45br5cyl", 1, false}, + {1, "xn--h2breg3eve", 1, false}, + {1, "xn--h2brj9c8c", 1, false}, + {1, "xn--mgbgu82a", 1, false}, + {1, "xn--rvc1e0am3e", 1, false}, + {1, "xn--h2brj9c", 1, false}, + {1, "xn--mgbbh1a", 1, false}, + {1, "xn--mgbbh1a71e", 1, false}, + {1, "xn--fpcrj9c3d", 1, false}, + {1, "xn--gecrj9c", 1, false}, + {1, "xn--s9brj9c", 1, false}, + {1, "xn--45brj9c", 1, false}, + {1, "xn--xkc2dl3a5ee0h", 1, false}, + {1, "xn--mgba3a4f16a", 1, false}, + {1, "xn--mgba3a4fra", 1, false}, + {1, "xn--mgbtx2b", 1, false}, + {1, "xn--mgbayh7gpa", 1, false}, + {1, "xn--3e0b707e", 1, false}, + {1, "xn--80ao21a", 1, false}, + {1, "xn--fzc2c9e2c", 1, false}, + {1, "xn--xkc2al3hye2a", 1, false}, + {1, "xn--mgbc0a9azcg", 1, false}, + {1, "xn--d1alf", 1, false}, + {1, "xn--l1acc", 1, false}, + {1, "xn--mix891f", 1, false}, + {1, "xn--mix082f", 1, false}, + {1, "xn--mgbx4cd0ab", 1, false}, + {1, "xn--mgb9awbf", 1, false}, + {1, "xn--mgbai9azgqp6j", 1, false}, + {1, "xn--mgbai9a5eva00b", 1, false}, + {1, "xn--ygbi2ammx", 1, false}, + {1, "xn--90a3ac", 1, false}, + {1, "xn--o1ac.xn--90a3ac", 2, false}, + {1, "xn--c1avg.xn--90a3ac", 2, false}, + {1, "xn--90azh.xn--90a3ac", 2, false}, + {1, "xn--d1at.xn--90a3ac", 2, false}, + {1, "xn--o1ach.xn--90a3ac", 2, false}, + {1, "xn--80au.xn--90a3ac", 2, false}, + {1, "xn--p1ai", 1, false}, + {1, "xn--wgbl6a", 1, false}, + {1, "xn--mgberp4a5d4ar", 1, false}, + {1, "xn--mgberp4a5d4a87g", 1, false}, + {1, "xn--mgbqly7c0a67fbc", 1, false}, + {1, "xn--mgbqly7cvafr", 1, false}, + {1, "xn--mgbpl2fh", 1, false}, + {1, "xn--yfro4i67o", 1, false}, + {1, "xn--clchc0ea0b2g2a9gcd", 1, false}, + {1, "xn--ogbpf8fl", 1, false}, + {1, "xn--mgbtf8fl", 1, false}, + {1, "xn--o3cw4h", 1, false}, + {1, "xn--12c1fe0br.xn--o3cw4h", 2, false}, + {1, "xn--12co0c3b4eva.xn--o3cw4h", 2, false}, + {1, "xn--h3cuzk1di.xn--o3cw4h", 2, false}, + {1, "xn--o3cyx2a.xn--o3cw4h", 2, false}, + {1, "xn--m3ch0j3a.xn--o3cw4h", 2, false}, + {1, "xn--12cfi8ixb8l.xn--o3cw4h", 2, false}, + {1, "xn--pgbs0dh", 1, false}, + {1, "xn--kpry57d", 1, false}, + {1, "xn--kprw13d", 1, false}, + {1, "xn--nnx388a", 1, false}, + {1, "xn--j1amh", 1, false}, + {1, "xn--mgb2ddes", 1, false}, + {1, "xxx", 1, false}, + {2, "ye", 2, false}, + {1, "ac.za", 2, false}, + {1, "agric.za", 2, false}, + {1, "alt.za", 2, false}, + {1, "co.za", 2, false}, + {1, "edu.za", 2, false}, + {1, "gov.za", 2, false}, + {1, "grondar.za", 2, false}, + {1, "law.za", 2, false}, + {1, "mil.za", 2, false}, + {1, "net.za", 2, false}, + {1, "ngo.za", 2, false}, + {1, "nic.za", 2, false}, + {1, "nis.za", 2, false}, + {1, "nom.za", 2, false}, + {1, "org.za", 2, false}, + {1, "school.za", 2, false}, + {1, "tm.za", 2, false}, + {1, "web.za", 2, false}, + {1, "zm", 1, false}, + {1, "ac.zm", 2, false}, + {1, "biz.zm", 2, false}, + {1, "co.zm", 2, false}, + {1, "com.zm", 2, false}, + {1, "edu.zm", 2, false}, + {1, "gov.zm", 2, false}, + {1, "info.zm", 2, false}, + {1, "mil.zm", 2, false}, + {1, "net.zm", 2, false}, + {1, "org.zm", 2, false}, + {1, "sch.zm", 2, false}, + {1, "zw", 1, false}, + {1, "ac.zw", 2, false}, + {1, "co.zw", 2, false}, + {1, "gov.zw", 2, false}, + {1, "mil.zw", 2, false}, + {1, "org.zw", 2, false}, + {1, "aaa", 1, false}, + {1, "aarp", 1, false}, + {1, "abarth", 1, false}, + {1, "abb", 1, false}, + {1, "abbott", 1, false}, + {1, "abbvie", 1, false}, + {1, "abc", 1, false}, + {1, "able", 1, false}, + {1, "abogado", 1, false}, + {1, "abudhabi", 1, false}, + {1, "academy", 1, false}, + {1, "accenture", 1, false}, + {1, "accountant", 1, false}, + {1, "accountants", 1, false}, + {1, "aco", 1, false}, + {1, "actor", 1, false}, + {1, "adac", 1, false}, + {1, "ads", 1, false}, + {1, "adult", 1, false}, + {1, "aeg", 1, false}, + {1, "aetna", 1, false}, + {1, "afamilycompany", 1, false}, + {1, "afl", 1, false}, + {1, "africa", 1, false}, + {1, "agakhan", 1, false}, + {1, "agency", 1, false}, + {1, "aig", 1, false}, + {1, "aigo", 1, false}, + {1, "airbus", 1, false}, + {1, "airforce", 1, false}, + {1, "airtel", 1, false}, + {1, "akdn", 1, false}, + {1, "alfaromeo", 1, false}, + {1, "alibaba", 1, false}, + {1, "alipay", 1, false}, + {1, "allfinanz", 1, false}, + {1, "allstate", 1, false}, + {1, "ally", 1, false}, + {1, "alsace", 1, false}, + {1, "alstom", 1, false}, + {1, "americanexpress", 1, false}, + {1, "americanfamily", 1, false}, + {1, "amex", 1, false}, + {1, "amfam", 1, false}, + {1, "amica", 1, false}, + {1, "amsterdam", 1, false}, + {1, "analytics", 1, false}, + {1, "android", 1, false}, + {1, "anquan", 1, false}, + {1, "anz", 1, false}, + {1, "aol", 1, false}, + {1, "apartments", 1, false}, + {1, "app", 1, false}, + {1, "apple", 1, false}, + {1, "aquarelle", 1, false}, + {1, "arab", 1, false}, + {1, "aramco", 1, false}, + {1, "archi", 1, false}, + {1, "army", 1, false}, + {1, "art", 1, false}, + {1, "arte", 1, false}, + {1, "asda", 1, false}, + {1, "associates", 1, false}, + {1, "athleta", 1, false}, + {1, "attorney", 1, false}, + {1, "auction", 1, false}, + {1, "audi", 1, false}, + {1, "audible", 1, false}, + {1, "audio", 1, false}, + {1, "auspost", 1, false}, + {1, "author", 1, false}, + {1, "auto", 1, false}, + {1, "autos", 1, false}, + {1, "avianca", 1, false}, + {1, "aws", 1, false}, + {1, "axa", 1, false}, + {1, "azure", 1, false}, + {1, "baby", 1, false}, + {1, "baidu", 1, false}, + {1, "banamex", 1, false}, + {1, "bananarepublic", 1, false}, + {1, "band", 1, false}, + {1, "bank", 1, false}, + {1, "bar", 1, false}, + {1, "barcelona", 1, false}, + {1, "barclaycard", 1, false}, + {1, "barclays", 1, false}, + {1, "barefoot", 1, false}, + {1, "bargains", 1, false}, + {1, "baseball", 1, false}, + {1, "basketball", 1, false}, + {1, "bauhaus", 1, false}, + {1, "bayern", 1, false}, + {1, "bbc", 1, false}, + {1, "bbt", 1, false}, + {1, "bbva", 1, false}, + {1, "bcg", 1, false}, + {1, "bcn", 1, false}, + {1, "beats", 1, false}, + {1, "beauty", 1, false}, + {1, "beer", 1, false}, + {1, "bentley", 1, false}, + {1, "berlin", 1, false}, + {1, "best", 1, false}, + {1, "bestbuy", 1, false}, + {1, "bet", 1, false}, + {1, "bharti", 1, false}, + {1, "bible", 1, false}, + {1, "bid", 1, false}, + {1, "bike", 1, false}, + {1, "bing", 1, false}, + {1, "bingo", 1, false}, + {1, "bio", 1, false}, + {1, "black", 1, false}, + {1, "blackfriday", 1, false}, + {1, "blockbuster", 1, false}, + {1, "blog", 1, false}, + {1, "bloomberg", 1, false}, + {1, "blue", 1, false}, + {1, "bms", 1, false}, + {1, "bmw", 1, false}, + {1, "bnpparibas", 1, false}, + {1, "boats", 1, false}, + {1, "boehringer", 1, false}, + {1, "bofa", 1, false}, + {1, "bom", 1, false}, + {1, "bond", 1, false}, + {1, "boo", 1, false}, + {1, "book", 1, false}, + {1, "booking", 1, false}, + {1, "bosch", 1, false}, + {1, "bostik", 1, false}, + {1, "boston", 1, false}, + {1, "bot", 1, false}, + {1, "boutique", 1, false}, + {1, "box", 1, false}, + {1, "bradesco", 1, false}, + {1, "bridgestone", 1, false}, + {1, "broadway", 1, false}, + {1, "broker", 1, false}, + {1, "brother", 1, false}, + {1, "brussels", 1, false}, + {1, "budapest", 1, false}, + {1, "bugatti", 1, false}, + {1, "build", 1, false}, + {1, "builders", 1, false}, + {1, "business", 1, false}, + {1, "buy", 1, false}, + {1, "buzz", 1, false}, + {1, "bzh", 1, false}, + {1, "cab", 1, false}, + {1, "cafe", 1, false}, + {1, "cal", 1, false}, + {1, "call", 1, false}, + {1, "calvinklein", 1, false}, + {1, "cam", 1, false}, + {1, "camera", 1, false}, + {1, "camp", 1, false}, + {1, "cancerresearch", 1, false}, + {1, "canon", 1, false}, + {1, "capetown", 1, false}, + {1, "capital", 1, false}, + {1, "capitalone", 1, false}, + {1, "car", 1, false}, + {1, "caravan", 1, false}, + {1, "cards", 1, false}, + {1, "care", 1, false}, + {1, "career", 1, false}, + {1, "careers", 1, false}, + {1, "cars", 1, false}, + {1, "cartier", 1, false}, + {1, "casa", 1, false}, + {1, "case", 1, false}, + {1, "caseih", 1, false}, + {1, "cash", 1, false}, + {1, "casino", 1, false}, + {1, "catering", 1, false}, + {1, "catholic", 1, false}, + {1, "cba", 1, false}, + {1, "cbn", 1, false}, + {1, "cbre", 1, false}, + {1, "cbs", 1, false}, + {1, "ceb", 1, false}, + {1, "center", 1, false}, + {1, "ceo", 1, false}, + {1, "cern", 1, false}, + {1, "cfa", 1, false}, + {1, "cfd", 1, false}, + {1, "chanel", 1, false}, + {1, "channel", 1, false}, + {1, "charity", 1, false}, + {1, "chase", 1, false}, + {1, "chat", 1, false}, + {1, "cheap", 1, false}, + {1, "chintai", 1, false}, + {1, "christmas", 1, false}, + {1, "chrome", 1, false}, + {1, "chrysler", 1, false}, + {1, "church", 1, false}, + {1, "cipriani", 1, false}, + {1, "circle", 1, false}, + {1, "cisco", 1, false}, + {1, "citadel", 1, false}, + {1, "citi", 1, false}, + {1, "citic", 1, false}, + {1, "city", 1, false}, + {1, "cityeats", 1, false}, + {1, "claims", 1, false}, + {1, "cleaning", 1, false}, + {1, "click", 1, false}, + {1, "clinic", 1, false}, + {1, "clinique", 1, false}, + {1, "clothing", 1, false}, + {1, "cloud", 1, false}, + {1, "club", 1, false}, + {1, "clubmed", 1, false}, + {1, "coach", 1, false}, + {1, "codes", 1, false}, + {1, "coffee", 1, false}, + {1, "college", 1, false}, + {1, "cologne", 1, false}, + {1, "comcast", 1, false}, + {1, "commbank", 1, false}, + {1, "community", 1, false}, + {1, "company", 1, false}, + {1, "compare", 1, false}, + {1, "computer", 1, false}, + {1, "comsec", 1, false}, + {1, "condos", 1, false}, + {1, "construction", 1, false}, + {1, "consulting", 1, false}, + {1, "contact", 1, false}, + {1, "contractors", 1, false}, + {1, "cooking", 1, false}, + {1, "cookingchannel", 1, false}, + {1, "cool", 1, false}, + {1, "corsica", 1, false}, + {1, "country", 1, false}, + {1, "coupon", 1, false}, + {1, "coupons", 1, false}, + {1, "courses", 1, false}, + {1, "cpa", 1, false}, + {1, "credit", 1, false}, + {1, "creditcard", 1, false}, + {1, "creditunion", 1, false}, + {1, "cricket", 1, false}, + {1, "crown", 1, false}, + {1, "crs", 1, false}, + {1, "cruise", 1, false}, + {1, "cruises", 1, false}, + {1, "csc", 1, false}, + {1, "cuisinella", 1, false}, + {1, "cymru", 1, false}, + {1, "cyou", 1, false}, + {1, "dabur", 1, false}, + {1, "dad", 1, false}, + {1, "dance", 1, false}, + {1, "data", 1, false}, + {1, "date", 1, false}, + {1, "dating", 1, false}, + {1, "datsun", 1, false}, + {1, "day", 1, false}, + {1, "dclk", 1, false}, + {1, "dds", 1, false}, + {1, "deal", 1, false}, + {1, "dealer", 1, false}, + {1, "deals", 1, false}, + {1, "degree", 1, false}, + {1, "delivery", 1, false}, + {1, "dell", 1, false}, + {1, "deloitte", 1, false}, + {1, "delta", 1, false}, + {1, "democrat", 1, false}, + {1, "dental", 1, false}, + {1, "dentist", 1, false}, + {1, "desi", 1, false}, + {1, "design", 1, false}, + {1, "dev", 1, false}, + {1, "dhl", 1, false}, + {1, "diamonds", 1, false}, + {1, "diet", 1, false}, + {1, "digital", 1, false}, + {1, "direct", 1, false}, + {1, "directory", 1, false}, + {1, "discount", 1, false}, + {1, "discover", 1, false}, + {1, "dish", 1, false}, + {1, "diy", 1, false}, + {1, "dnp", 1, false}, + {1, "docs", 1, false}, + {1, "doctor", 1, false}, + {1, "dodge", 1, false}, + {1, "dog", 1, false}, + {1, "domains", 1, false}, + {1, "dot", 1, false}, + {1, "download", 1, false}, + {1, "drive", 1, false}, + {1, "dtv", 1, false}, + {1, "dubai", 1, false}, + {1, "duck", 1, false}, + {1, "dunlop", 1, false}, + {1, "duns", 1, false}, + {1, "dupont", 1, false}, + {1, "durban", 1, false}, + {1, "dvag", 1, false}, + {1, "dvr", 1, false}, + {1, "earth", 1, false}, + {1, "eat", 1, false}, + {1, "eco", 1, false}, + {1, "edeka", 1, false}, + {1, "education", 1, false}, + {1, "email", 1, false}, + {1, "emerck", 1, false}, + {1, "energy", 1, false}, + {1, "engineer", 1, false}, + {1, "engineering", 1, false}, + {1, "enterprises", 1, false}, + {1, "epson", 1, false}, + {1, "equipment", 1, false}, + {1, "ericsson", 1, false}, + {1, "erni", 1, false}, + {1, "esq", 1, false}, + {1, "estate", 1, false}, + {1, "esurance", 1, false}, + {1, "etisalat", 1, false}, + {1, "eurovision", 1, false}, + {1, "eus", 1, false}, + {1, "events", 1, false}, + {1, "everbank", 1, false}, + {1, "exchange", 1, false}, + {1, "expert", 1, false}, + {1, "exposed", 1, false}, + {1, "express", 1, false}, + {1, "extraspace", 1, false}, + {1, "fage", 1, false}, + {1, "fail", 1, false}, + {1, "fairwinds", 1, false}, + {1, "faith", 1, false}, + {1, "family", 1, false}, + {1, "fan", 1, false}, + {1, "fans", 1, false}, + {1, "farm", 1, false}, + {1, "farmers", 1, false}, + {1, "fashion", 1, false}, + {1, "fast", 1, false}, + {1, "fedex", 1, false}, + {1, "feedback", 1, false}, + {1, "ferrari", 1, false}, + {1, "ferrero", 1, false}, + {1, "fiat", 1, false}, + {1, "fidelity", 1, false}, + {1, "fido", 1, false}, + {1, "film", 1, false}, + {1, "final", 1, false}, + {1, "finance", 1, false}, + {1, "financial", 1, false}, + {1, "fire", 1, false}, + {1, "firestone", 1, false}, + {1, "firmdale", 1, false}, + {1, "fish", 1, false}, + {1, "fishing", 1, false}, + {1, "fit", 1, false}, + {1, "fitness", 1, false}, + {1, "flickr", 1, false}, + {1, "flights", 1, false}, + {1, "flir", 1, false}, + {1, "florist", 1, false}, + {1, "flowers", 1, false}, + {1, "fly", 1, false}, + {1, "foo", 1, false}, + {1, "food", 1, false}, + {1, "foodnetwork", 1, false}, + {1, "football", 1, false}, + {1, "ford", 1, false}, + {1, "forex", 1, false}, + {1, "forsale", 1, false}, + {1, "forum", 1, false}, + {1, "foundation", 1, false}, + {1, "fox", 1, false}, + {1, "free", 1, false}, + {1, "fresenius", 1, false}, + {1, "frl", 1, false}, + {1, "frogans", 1, false}, + {1, "frontdoor", 1, false}, + {1, "frontier", 1, false}, + {1, "ftr", 1, false}, + {1, "fujitsu", 1, false}, + {1, "fujixerox", 1, false}, + {1, "fun", 1, false}, + {1, "fund", 1, false}, + {1, "furniture", 1, false}, + {1, "futbol", 1, false}, + {1, "fyi", 1, false}, + {1, "gal", 1, false}, + {1, "gallery", 1, false}, + {1, "gallo", 1, false}, + {1, "gallup", 1, false}, + {1, "game", 1, false}, + {1, "games", 1, false}, + {1, "gap", 1, false}, + {1, "garden", 1, false}, + {1, "gay", 1, false}, + {1, "gbiz", 1, false}, + {1, "gdn", 1, false}, + {1, "gea", 1, false}, + {1, "gent", 1, false}, + {1, "genting", 1, false}, + {1, "george", 1, false}, + {1, "ggee", 1, false}, + {1, "gift", 1, false}, + {1, "gifts", 1, false}, + {1, "gives", 1, false}, + {1, "giving", 1, false}, + {1, "glade", 1, false}, + {1, "glass", 1, false}, + {1, "gle", 1, false}, + {1, "global", 1, false}, + {1, "globo", 1, false}, + {1, "gmail", 1, false}, + {1, "gmbh", 1, false}, + {1, "gmo", 1, false}, + {1, "gmx", 1, false}, + {1, "godaddy", 1, false}, + {1, "gold", 1, false}, + {1, "goldpoint", 1, false}, + {1, "golf", 1, false}, + {1, "goo", 1, false}, + {1, "goodyear", 1, false}, + {1, "goog", 1, false}, + {1, "google", 1, false}, + {1, "gop", 1, false}, + {1, "got", 1, false}, + {1, "grainger", 1, false}, + {1, "graphics", 1, false}, + {1, "gratis", 1, false}, + {1, "green", 1, false}, + {1, "gripe", 1, false}, + {1, "grocery", 1, false}, + {1, "group", 1, false}, + {1, "guardian", 1, false}, + {1, "gucci", 1, false}, + {1, "guge", 1, false}, + {1, "guide", 1, false}, + {1, "guitars", 1, false}, + {1, "guru", 1, false}, + {1, "hair", 1, false}, + {1, "hamburg", 1, false}, + {1, "hangout", 1, false}, + {1, "haus", 1, false}, + {1, "hbo", 1, false}, + {1, "hdfc", 1, false}, + {1, "hdfcbank", 1, false}, + {1, "health", 1, false}, + {1, "healthcare", 1, false}, + {1, "help", 1, false}, + {1, "helsinki", 1, false}, + {1, "here", 1, false}, + {1, "hermes", 1, false}, + {1, "hgtv", 1, false}, + {1, "hiphop", 1, false}, + {1, "hisamitsu", 1, false}, + {1, "hitachi", 1, false}, + {1, "hiv", 1, false}, + {1, "hkt", 1, false}, + {1, "hockey", 1, false}, + {1, "holdings", 1, false}, + {1, "holiday", 1, false}, + {1, "homedepot", 1, false}, + {1, "homegoods", 1, false}, + {1, "homes", 1, false}, + {1, "homesense", 1, false}, + {1, "honda", 1, false}, + {1, "horse", 1, false}, + {1, "hospital", 1, false}, + {1, "host", 1, false}, + {1, "hosting", 1, false}, + {1, "hot", 1, false}, + {1, "hoteles", 1, false}, + {1, "hotels", 1, false}, + {1, "hotmail", 1, false}, + {1, "house", 1, false}, + {1, "how", 1, false}, + {1, "hsbc", 1, false}, + {1, "hughes", 1, false}, + {1, "hyatt", 1, false}, + {1, "hyundai", 1, false}, + {1, "ibm", 1, false}, + {1, "icbc", 1, false}, + {1, "ice", 1, false}, + {1, "icu", 1, false}, + {1, "ieee", 1, false}, + {1, "ifm", 1, false}, + {1, "ikano", 1, false}, + {1, "imamat", 1, false}, + {1, "imdb", 1, false}, + {1, "immo", 1, false}, + {1, "immobilien", 1, false}, + {1, "inc", 1, false}, + {1, "industries", 1, false}, + {1, "infiniti", 1, false}, + {1, "ing", 1, false}, + {1, "ink", 1, false}, + {1, "institute", 1, false}, + {1, "insurance", 1, false}, + {1, "insure", 1, false}, + {1, "intel", 1, false}, + {1, "international", 1, false}, + {1, "intuit", 1, false}, + {1, "investments", 1, false}, + {1, "ipiranga", 1, false}, + {1, "irish", 1, false}, + {1, "ismaili", 1, false}, + {1, "ist", 1, false}, + {1, "istanbul", 1, false}, + {1, "itau", 1, false}, + {1, "itv", 1, false}, + {1, "iveco", 1, false}, + {1, "jaguar", 1, false}, + {1, "java", 1, false}, + {1, "jcb", 1, false}, + {1, "jcp", 1, false}, + {1, "jeep", 1, false}, + {1, "jetzt", 1, false}, + {1, "jewelry", 1, false}, + {1, "jio", 1, false}, + {1, "jll", 1, false}, + {1, "jmp", 1, false}, + {1, "jnj", 1, false}, + {1, "joburg", 1, false}, + {1, "jot", 1, false}, + {1, "joy", 1, false}, + {1, "jpmorgan", 1, false}, + {1, "jprs", 1, false}, + {1, "juegos", 1, false}, + {1, "juniper", 1, false}, + {1, "kaufen", 1, false}, + {1, "kddi", 1, false}, + {1, "kerryhotels", 1, false}, + {1, "kerrylogistics", 1, false}, + {1, "kerryproperties", 1, false}, + {1, "kfh", 1, false}, + {1, "kia", 1, false}, + {1, "kim", 1, false}, + {1, "kinder", 1, false}, + {1, "kindle", 1, false}, + {1, "kitchen", 1, false}, + {1, "kiwi", 1, false}, + {1, "koeln", 1, false}, + {1, "komatsu", 1, false}, + {1, "kosher", 1, false}, + {1, "kpmg", 1, false}, + {1, "kpn", 1, false}, + {1, "krd", 1, false}, + {1, "kred", 1, false}, + {1, "kuokgroup", 1, false}, + {1, "kyoto", 1, false}, + {1, "lacaixa", 1, false}, + {1, "ladbrokes", 1, false}, + {1, "lamborghini", 1, false}, + {1, "lamer", 1, false}, + {1, "lancaster", 1, false}, + {1, "lancia", 1, false}, + {1, "lancome", 1, false}, + {1, "land", 1, false}, + {1, "landrover", 1, false}, + {1, "lanxess", 1, false}, + {1, "lasalle", 1, false}, + {1, "lat", 1, false}, + {1, "latino", 1, false}, + {1, "latrobe", 1, false}, + {1, "law", 1, false}, + {1, "lawyer", 1, false}, + {1, "lds", 1, false}, + {1, "lease", 1, false}, + {1, "leclerc", 1, false}, + {1, "lefrak", 1, false}, + {1, "legal", 1, false}, + {1, "lego", 1, false}, + {1, "lexus", 1, false}, + {1, "lgbt", 1, false}, + {1, "liaison", 1, false}, + {1, "lidl", 1, false}, + {1, "life", 1, false}, + {1, "lifeinsurance", 1, false}, + {1, "lifestyle", 1, false}, + {1, "lighting", 1, false}, + {1, "like", 1, false}, + {1, "lilly", 1, false}, + {1, "limited", 1, false}, + {1, "limo", 1, false}, + {1, "lincoln", 1, false}, + {1, "linde", 1, false}, + {1, "link", 1, false}, + {1, "lipsy", 1, false}, + {1, "live", 1, false}, + {1, "living", 1, false}, + {1, "lixil", 1, false}, + {1, "llc", 1, false}, + {1, "loan", 1, false}, + {1, "loans", 1, false}, + {1, "locker", 1, false}, + {1, "locus", 1, false}, + {1, "loft", 1, false}, + {1, "lol", 1, false}, + {1, "london", 1, false}, + {1, "lotte", 1, false}, + {1, "lotto", 1, false}, + {1, "love", 1, false}, + {1, "lpl", 1, false}, + {1, "lplfinancial", 1, false}, + {1, "ltd", 1, false}, + {1, "ltda", 1, false}, + {1, "lundbeck", 1, false}, + {1, "lupin", 1, false}, + {1, "luxe", 1, false}, + {1, "luxury", 1, false}, + {1, "macys", 1, false}, + {1, "madrid", 1, false}, + {1, "maif", 1, false}, + {1, "maison", 1, false}, + {1, "makeup", 1, false}, + {1, "man", 1, false}, + {1, "management", 1, false}, + {1, "mango", 1, false}, + {1, "map", 1, false}, + {1, "market", 1, false}, + {1, "marketing", 1, false}, + {1, "markets", 1, false}, + {1, "marriott", 1, false}, + {1, "marshalls", 1, false}, + {1, "maserati", 1, false}, + {1, "mattel", 1, false}, + {1, "mba", 1, false}, + {1, "mckinsey", 1, false}, + {1, "med", 1, false}, + {1, "media", 1, false}, + {1, "meet", 1, false}, + {1, "melbourne", 1, false}, + {1, "meme", 1, false}, + {1, "memorial", 1, false}, + {1, "men", 1, false}, + {1, "menu", 1, false}, + {1, "merckmsd", 1, false}, + {1, "metlife", 1, false}, + {1, "miami", 1, false}, + {1, "microsoft", 1, false}, + {1, "mini", 1, false}, + {1, "mint", 1, false}, + {1, "mit", 1, false}, + {1, "mitsubishi", 1, false}, + {1, "mlb", 1, false}, + {1, "mls", 1, false}, + {1, "mma", 1, false}, + {1, "mobile", 1, false}, + {1, "mobily", 1, false}, + {1, "moda", 1, false}, + {1, "moe", 1, false}, + {1, "moi", 1, false}, + {1, "mom", 1, false}, + {1, "monash", 1, false}, + {1, "money", 1, false}, + {1, "monster", 1, false}, + {1, "mopar", 1, false}, + {1, "mormon", 1, false}, + {1, "mortgage", 1, false}, + {1, "moscow", 1, false}, + {1, "moto", 1, false}, + {1, "motorcycles", 1, false}, + {1, "mov", 1, false}, + {1, "movie", 1, false}, + {1, "movistar", 1, false}, + {1, "msd", 1, false}, + {1, "mtn", 1, false}, + {1, "mtr", 1, false}, + {1, "mutual", 1, false}, + {1, "nab", 1, false}, + {1, "nadex", 1, false}, + {1, "nagoya", 1, false}, + {1, "nationwide", 1, false}, + {1, "natura", 1, false}, + {1, "navy", 1, false}, + {1, "nba", 1, false}, + {1, "nec", 1, false}, + {1, "netbank", 1, false}, + {1, "netflix", 1, false}, + {1, "network", 1, false}, + {1, "neustar", 1, false}, + {1, "new", 1, false}, + {1, "newholland", 1, false}, + {1, "news", 1, false}, + {1, "next", 1, false}, + {1, "nextdirect", 1, false}, + {1, "nexus", 1, false}, + {1, "nfl", 1, false}, + {1, "ngo", 1, false}, + {1, "nhk", 1, false}, + {1, "nico", 1, false}, + {1, "nike", 1, false}, + {1, "nikon", 1, false}, + {1, "ninja", 1, false}, + {1, "nissan", 1, false}, + {1, "nissay", 1, false}, + {1, "nokia", 1, false}, + {1, "northwesternmutual", 1, false}, + {1, "norton", 1, false}, + {1, "now", 1, false}, + {1, "nowruz", 1, false}, + {1, "nowtv", 1, false}, + {1, "nra", 1, false}, + {1, "nrw", 1, false}, + {1, "ntt", 1, false}, + {1, "nyc", 1, false}, + {1, "obi", 1, false}, + {1, "observer", 1, false}, + {1, "off", 1, false}, + {1, "office", 1, false}, + {1, "okinawa", 1, false}, + {1, "olayan", 1, false}, + {1, "olayangroup", 1, false}, + {1, "oldnavy", 1, false}, + {1, "ollo", 1, false}, + {1, "omega", 1, false}, + {1, "one", 1, false}, + {1, "ong", 1, false}, + {1, "onl", 1, false}, + {1, "online", 1, false}, + {1, "onyourside", 1, false}, + {1, "ooo", 1, false}, + {1, "open", 1, false}, + {1, "oracle", 1, false}, + {1, "orange", 1, false}, + {1, "organic", 1, false}, + {1, "origins", 1, false}, + {1, "osaka", 1, false}, + {1, "otsuka", 1, false}, + {1, "ott", 1, false}, + {1, "ovh", 1, false}, + {1, "page", 1, false}, + {1, "panasonic", 1, false}, + {1, "paris", 1, false}, + {1, "pars", 1, false}, + {1, "partners", 1, false}, + {1, "parts", 1, false}, + {1, "party", 1, false}, + {1, "passagens", 1, false}, + {1, "pay", 1, false}, + {1, "pccw", 1, false}, + {1, "pet", 1, false}, + {1, "pfizer", 1, false}, + {1, "pharmacy", 1, false}, + {1, "phd", 1, false}, + {1, "philips", 1, false}, + {1, "phone", 1, false}, + {1, "photo", 1, false}, + {1, "photography", 1, false}, + {1, "photos", 1, false}, + {1, "physio", 1, false}, + {1, "piaget", 1, false}, + {1, "pics", 1, false}, + {1, "pictet", 1, false}, + {1, "pictures", 1, false}, + {1, "pid", 1, false}, + {1, "pin", 1, false}, + {1, "ping", 1, false}, + {1, "pink", 1, false}, + {1, "pioneer", 1, false}, + {1, "pizza", 1, false}, + {1, "place", 1, false}, + {1, "play", 1, false}, + {1, "playstation", 1, false}, + {1, "plumbing", 1, false}, + {1, "plus", 1, false}, + {1, "pnc", 1, false}, + {1, "pohl", 1, false}, + {1, "poker", 1, false}, + {1, "politie", 1, false}, + {1, "porn", 1, false}, + {1, "pramerica", 1, false}, + {1, "praxi", 1, false}, + {1, "press", 1, false}, + {1, "prime", 1, false}, + {1, "prod", 1, false}, + {1, "productions", 1, false}, + {1, "prof", 1, false}, + {1, "progressive", 1, false}, + {1, "promo", 1, false}, + {1, "properties", 1, false}, + {1, "property", 1, false}, + {1, "protection", 1, false}, + {1, "pru", 1, false}, + {1, "prudential", 1, false}, + {1, "pub", 1, false}, + {1, "pwc", 1, false}, + {1, "qpon", 1, false}, + {1, "quebec", 1, false}, + {1, "quest", 1, false}, + {1, "qvc", 1, false}, + {1, "racing", 1, false}, + {1, "radio", 1, false}, + {1, "raid", 1, false}, + {1, "read", 1, false}, + {1, "realestate", 1, false}, + {1, "realtor", 1, false}, + {1, "realty", 1, false}, + {1, "recipes", 1, false}, + {1, "red", 1, false}, + {1, "redstone", 1, false}, + {1, "redumbrella", 1, false}, + {1, "rehab", 1, false}, + {1, "reise", 1, false}, + {1, "reisen", 1, false}, + {1, "reit", 1, false}, + {1, "reliance", 1, false}, + {1, "ren", 1, false}, + {1, "rent", 1, false}, + {1, "rentals", 1, false}, + {1, "repair", 1, false}, + {1, "report", 1, false}, + {1, "republican", 1, false}, + {1, "rest", 1, false}, + {1, "restaurant", 1, false}, + {1, "review", 1, false}, + {1, "reviews", 1, false}, + {1, "rexroth", 1, false}, + {1, "rich", 1, false}, + {1, "richardli", 1, false}, + {1, "ricoh", 1, false}, + {1, "rightathome", 1, false}, + {1, "ril", 1, false}, + {1, "rio", 1, false}, + {1, "rip", 1, false}, + {1, "rmit", 1, false}, + {1, "rocher", 1, false}, + {1, "rocks", 1, false}, + {1, "rodeo", 1, false}, + {1, "rogers", 1, false}, + {1, "room", 1, false}, + {1, "rsvp", 1, false}, + {1, "rugby", 1, false}, + {1, "ruhr", 1, false}, + {1, "run", 1, false}, + {1, "rwe", 1, false}, + {1, "ryukyu", 1, false}, + {1, "saarland", 1, false}, + {1, "safe", 1, false}, + {1, "safety", 1, false}, + {1, "sakura", 1, false}, + {1, "sale", 1, false}, + {1, "salon", 1, false}, + {1, "samsclub", 1, false}, + {1, "samsung", 1, false}, + {1, "sandvik", 1, false}, + {1, "sandvikcoromant", 1, false}, + {1, "sanofi", 1, false}, + {1, "sap", 1, false}, + {1, "sarl", 1, false}, + {1, "sas", 1, false}, + {1, "save", 1, false}, + {1, "saxo", 1, false}, + {1, "sbi", 1, false}, + {1, "sbs", 1, false}, + {1, "sca", 1, false}, + {1, "scb", 1, false}, + {1, "schaeffler", 1, false}, + {1, "schmidt", 1, false}, + {1, "scholarships", 1, false}, + {1, "school", 1, false}, + {1, "schule", 1, false}, + {1, "schwarz", 1, false}, + {1, "science", 1, false}, + {1, "scjohnson", 1, false}, + {1, "scor", 1, false}, + {1, "scot", 1, false}, + {1, "search", 1, false}, + {1, "seat", 1, false}, + {1, "secure", 1, false}, + {1, "security", 1, false}, + {1, "seek", 1, false}, + {1, "select", 1, false}, + {1, "sener", 1, false}, + {1, "services", 1, false}, + {1, "ses", 1, false}, + {1, "seven", 1, false}, + {1, "sew", 1, false}, + {1, "sex", 1, false}, + {1, "sexy", 1, false}, + {1, "sfr", 1, false}, + {1, "shangrila", 1, false}, + {1, "sharp", 1, false}, + {1, "shaw", 1, false}, + {1, "shell", 1, false}, + {1, "shia", 1, false}, + {1, "shiksha", 1, false}, + {1, "shoes", 1, false}, + {1, "shop", 1, false}, + {1, "shopping", 1, false}, + {1, "shouji", 1, false}, + {1, "show", 1, false}, + {1, "showtime", 1, false}, + {1, "shriram", 1, false}, + {1, "silk", 1, false}, + {1, "sina", 1, false}, + {1, "singles", 1, false}, + {1, "site", 1, false}, + {1, "ski", 1, false}, + {1, "skin", 1, false}, + {1, "sky", 1, false}, + {1, "skype", 1, false}, + {1, "sling", 1, false}, + {1, "smart", 1, false}, + {1, "smile", 1, false}, + {1, "sncf", 1, false}, + {1, "soccer", 1, false}, + {1, "social", 1, false}, + {1, "softbank", 1, false}, + {1, "software", 1, false}, + {1, "sohu", 1, false}, + {1, "solar", 1, false}, + {1, "solutions", 1, false}, + {1, "song", 1, false}, + {1, "sony", 1, false}, + {1, "soy", 1, false}, + {1, "space", 1, false}, + {1, "sport", 1, false}, + {1, "spot", 1, false}, + {1, "spreadbetting", 1, false}, + {1, "srl", 1, false}, + {1, "srt", 1, false}, + {1, "stada", 1, false}, + {1, "staples", 1, false}, + {1, "star", 1, false}, + {1, "statebank", 1, false}, + {1, "statefarm", 1, false}, + {1, "stc", 1, false}, + {1, "stcgroup", 1, false}, + {1, "stockholm", 1, false}, + {1, "storage", 1, false}, + {1, "store", 1, false}, + {1, "stream", 1, false}, + {1, "studio", 1, false}, + {1, "study", 1, false}, + {1, "style", 1, false}, + {1, "sucks", 1, false}, + {1, "supplies", 1, false}, + {1, "supply", 1, false}, + {1, "support", 1, false}, + {1, "surf", 1, false}, + {1, "surgery", 1, false}, + {1, "suzuki", 1, false}, + {1, "swatch", 1, false}, + {1, "swiftcover", 1, false}, + {1, "swiss", 1, false}, + {1, "sydney", 1, false}, + {1, "symantec", 1, false}, + {1, "systems", 1, false}, + {1, "tab", 1, false}, + {1, "taipei", 1, false}, + {1, "talk", 1, false}, + {1, "taobao", 1, false}, + {1, "target", 1, false}, + {1, "tatamotors", 1, false}, + {1, "tatar", 1, false}, + {1, "tattoo", 1, false}, + {1, "tax", 1, false}, + {1, "taxi", 1, false}, + {1, "tci", 1, false}, + {1, "tdk", 1, false}, + {1, "team", 1, false}, + {1, "tech", 1, false}, + {1, "technology", 1, false}, + {1, "telefonica", 1, false}, + {1, "temasek", 1, false}, + {1, "tennis", 1, false}, + {1, "teva", 1, false}, + {1, "thd", 1, false}, + {1, "theater", 1, false}, + {1, "theatre", 1, false}, + {1, "tiaa", 1, false}, + {1, "tickets", 1, false}, + {1, "tienda", 1, false}, + {1, "tiffany", 1, false}, + {1, "tips", 1, false}, + {1, "tires", 1, false}, + {1, "tirol", 1, false}, + {1, "tjmaxx", 1, false}, + {1, "tjx", 1, false}, + {1, "tkmaxx", 1, false}, + {1, "tmall", 1, false}, + {1, "today", 1, false}, + {1, "tokyo", 1, false}, + {1, "tools", 1, false}, + {1, "top", 1, false}, + {1, "toray", 1, false}, + {1, "toshiba", 1, false}, + {1, "total", 1, false}, + {1, "tours", 1, false}, + {1, "town", 1, false}, + {1, "toyota", 1, false}, + {1, "toys", 1, false}, + {1, "trade", 1, false}, + {1, "trading", 1, false}, + {1, "training", 1, false}, + {1, "travel", 1, false}, + {1, "travelchannel", 1, false}, + {1, "travelers", 1, false}, + {1, "travelersinsurance", 1, false}, + {1, "trust", 1, false}, + {1, "trv", 1, false}, + {1, "tube", 1, false}, + {1, "tui", 1, false}, + {1, "tunes", 1, false}, + {1, "tushu", 1, false}, + {1, "tvs", 1, false}, + {1, "ubank", 1, false}, + {1, "ubs", 1, false}, + {1, "uconnect", 1, false}, + {1, "unicom", 1, false}, + {1, "university", 1, false}, + {1, "uno", 1, false}, + {1, "uol", 1, false}, + {1, "ups", 1, false}, + {1, "vacations", 1, false}, + {1, "vana", 1, false}, + {1, "vanguard", 1, false}, + {1, "vegas", 1, false}, + {1, "ventures", 1, false}, + {1, "verisign", 1, false}, + {1, "versicherung", 1, false}, + {1, "vet", 1, false}, + {1, "viajes", 1, false}, + {1, "video", 1, false}, + {1, "vig", 1, false}, + {1, "viking", 1, false}, + {1, "villas", 1, false}, + {1, "vin", 1, false}, + {1, "vip", 1, false}, + {1, "virgin", 1, false}, + {1, "visa", 1, false}, + {1, "vision", 1, false}, + {1, "vistaprint", 1, false}, + {1, "viva", 1, false}, + {1, "vivo", 1, false}, + {1, "vlaanderen", 1, false}, + {1, "vodka", 1, false}, + {1, "volkswagen", 1, false}, + {1, "volvo", 1, false}, + {1, "vote", 1, false}, + {1, "voting", 1, false}, + {1, "voto", 1, false}, + {1, "voyage", 1, false}, + {1, "vuelos", 1, false}, + {1, "wales", 1, false}, + {1, "walmart", 1, false}, + {1, "walter", 1, false}, + {1, "wang", 1, false}, + {1, "wanggou", 1, false}, + {1, "warman", 1, false}, + {1, "watch", 1, false}, + {1, "watches", 1, false}, + {1, "weather", 1, false}, + {1, "weatherchannel", 1, false}, + {1, "webcam", 1, false}, + {1, "weber", 1, false}, + {1, "website", 1, false}, + {1, "wed", 1, false}, + {1, "wedding", 1, false}, + {1, "weibo", 1, false}, + {1, "weir", 1, false}, + {1, "whoswho", 1, false}, + {1, "wien", 1, false}, + {1, "wiki", 1, false}, + {1, "williamhill", 1, false}, + {1, "win", 1, false}, + {1, "windows", 1, false}, + {1, "wine", 1, false}, + {1, "winners", 1, false}, + {1, "wme", 1, false}, + {1, "wolterskluwer", 1, false}, + {1, "woodside", 1, false}, + {1, "work", 1, false}, + {1, "works", 1, false}, + {1, "world", 1, false}, + {1, "wow", 1, false}, + {1, "wtc", 1, false}, + {1, "wtf", 1, false}, + {1, "xbox", 1, false}, + {1, "xerox", 1, false}, + {1, "xfinity", 1, false}, + {1, "xihuan", 1, false}, + {1, "xin", 1, false}, + {1, "xn--11b4c3d", 1, false}, + {1, "xn--1ck2e1b", 1, false}, + {1, "xn--1qqw23a", 1, false}, + {1, "xn--30rr7y", 1, false}, + {1, "xn--3bst00m", 1, false}, + {1, "xn--3ds443g", 1, false}, + {1, "xn--3oq18vl8pn36a", 1, false}, + {1, "xn--3pxu8k", 1, false}, + {1, "xn--42c2d9a", 1, false}, + {1, "xn--45q11c", 1, false}, + {1, "xn--4gbrim", 1, false}, + {1, "xn--55qw42g", 1, false}, + {1, "xn--55qx5d", 1, false}, + {1, "xn--5su34j936bgsg", 1, false}, + {1, "xn--5tzm5g", 1, false}, + {1, "xn--6frz82g", 1, false}, + {1, "xn--6qq986b3xl", 1, false}, + {1, "xn--80adxhks", 1, false}, + {1, "xn--80aqecdr1a", 1, false}, + {1, "xn--80asehdb", 1, false}, + {1, "xn--80aswg", 1, false}, + {1, "xn--8y0a063a", 1, false}, + {1, "xn--9dbq2a", 1, false}, + {1, "xn--9et52u", 1, false}, + {1, "xn--9krt00a", 1, false}, + {1, "xn--b4w605ferd", 1, false}, + {1, "xn--bck1b9a5dre4c", 1, false}, + {1, "xn--c1avg", 1, false}, + {1, "xn--c2br7g", 1, false}, + {1, "xn--cck2b3b", 1, false}, + {1, "xn--cg4bki", 1, false}, + {1, "xn--czr694b", 1, false}, + {1, "xn--czrs0t", 1, false}, + {1, "xn--czru2d", 1, false}, + {1, "xn--d1acj3b", 1, false}, + {1, "xn--eckvdtc9d", 1, false}, + {1, "xn--efvy88h", 1, false}, + {1, "xn--estv75g", 1, false}, + {1, "xn--fct429k", 1, false}, + {1, "xn--fhbei", 1, false}, + {1, "xn--fiq228c5hs", 1, false}, + {1, "xn--fiq64b", 1, false}, + {1, "xn--fjq720a", 1, false}, + {1, "xn--flw351e", 1, false}, + {1, "xn--fzys8d69uvgm", 1, false}, + {1, "xn--g2xx48c", 1, false}, + {1, "xn--gckr3f0f", 1, false}, + {1, "xn--gk3at1e", 1, false}, + {1, "xn--hxt814e", 1, false}, + {1, "xn--i1b6b1a6a2e", 1, false}, + {1, "xn--imr513n", 1, false}, + {1, "xn--io0a7i", 1, false}, + {1, "xn--j1aef", 1, false}, + {1, "xn--jlq61u9w7b", 1, false}, + {1, "xn--jvr189m", 1, false}, + {1, "xn--kcrx77d1x4a", 1, false}, + {1, "xn--kpu716f", 1, false}, + {1, "xn--kput3i", 1, false}, + {1, "xn--mgba3a3ejt", 1, false}, + {1, "xn--mgba7c0bbn0a", 1, false}, + {1, "xn--mgbaakc7dvf", 1, false}, + {1, "xn--mgbab2bd", 1, false}, + {1, "xn--mgbb9fbpob", 1, false}, + {1, "xn--mgbca7dzdo", 1, false}, + {1, "xn--mgbi4ecexp", 1, false}, + {1, "xn--mgbt3dhd", 1, false}, + {1, "xn--mk1bu44c", 1, false}, + {1, "xn--mxtq1m", 1, false}, + {1, "xn--ngbc5azd", 1, false}, + {1, "xn--ngbe9e0a", 1, false}, + {1, "xn--ngbrx", 1, false}, + {1, "xn--nqv7f", 1, false}, + {1, "xn--nqv7fs00ema", 1, false}, + {1, "xn--nyqy26a", 1, false}, + {1, "xn--otu796d", 1, false}, + {1, "xn--p1acf", 1, false}, + {1, "xn--pbt977c", 1, false}, + {1, "xn--pssy2u", 1, false}, + {1, "xn--q9jyb4c", 1, false}, + {1, "xn--qcka1pmc", 1, false}, + {1, "xn--rhqv96g", 1, false}, + {1, "xn--rovu88b", 1, false}, + {1, "xn--ses554g", 1, false}, + {1, "xn--t60b56a", 1, false}, + {1, "xn--tckwe", 1, false}, + {1, "xn--tiq49xqyj", 1, false}, + {1, "xn--unup4y", 1, false}, + {1, "xn--vermgensberater-ctb", 1, false}, + {1, "xn--vermgensberatung-pwb", 1, false}, + {1, "xn--vhquv", 1, false}, + {1, "xn--vuq861b", 1, false}, + {1, "xn--w4r85el8fhu5dnra", 1, false}, + {1, "xn--w4rs40l", 1, false}, + {1, "xn--xhq521b", 1, false}, + {1, "xn--zfr164b", 1, false}, + {1, "xyz", 1, false}, + {1, "yachts", 1, false}, + {1, "yahoo", 1, false}, + {1, "yamaxun", 1, false}, + {1, "yandex", 1, false}, + {1, "yodobashi", 1, false}, + {1, "yoga", 1, false}, + {1, "yokohama", 1, false}, + {1, "you", 1, false}, + {1, "youtube", 1, false}, + {1, "yun", 1, false}, + {1, "zappos", 1, false}, + {1, "zara", 1, false}, + {1, "zero", 1, false}, + {1, "zip", 1, false}, + {1, "zone", 1, false}, + {1, "zuerich", 1, false}, + {1, "cc.ua", 2, true}, + {1, "inf.ua", 2, true}, + {1, "ltd.ua", 2, true}, + {1, "beep.pl", 2, true}, + {1, "barsy.ca", 2, true}, + {2, "compute.estate", 3, true}, + {2, "alces.network", 3, true}, + {1, "alwaysdata.net", 2, true}, + {1, "cloudfront.net", 2, true}, + {2, "compute.amazonaws.com", 4, true}, + {2, "compute-1.amazonaws.com", 4, true}, + {2, "compute.amazonaws.com.cn", 5, true}, + {1, "us-east-1.amazonaws.com", 3, true}, + {1, "cn-north-1.eb.amazonaws.com.cn", 5, true}, + {1, "cn-northwest-1.eb.amazonaws.com.cn", 5, true}, + {1, "elasticbeanstalk.com", 2, true}, + {1, "ap-northeast-1.elasticbeanstalk.com", 3, true}, + {1, "ap-northeast-2.elasticbeanstalk.com", 3, true}, + {1, "ap-northeast-3.elasticbeanstalk.com", 3, true}, + {1, "ap-south-1.elasticbeanstalk.com", 3, true}, + {1, "ap-southeast-1.elasticbeanstalk.com", 3, true}, + {1, "ap-southeast-2.elasticbeanstalk.com", 3, true}, + {1, "ca-central-1.elasticbeanstalk.com", 3, true}, + {1, "eu-central-1.elasticbeanstalk.com", 3, true}, + {1, "eu-west-1.elasticbeanstalk.com", 3, true}, + {1, "eu-west-2.elasticbeanstalk.com", 3, true}, + {1, "eu-west-3.elasticbeanstalk.com", 3, true}, + {1, "sa-east-1.elasticbeanstalk.com", 3, true}, + {1, "us-east-1.elasticbeanstalk.com", 3, true}, + {1, "us-east-2.elasticbeanstalk.com", 3, true}, + {1, "us-gov-west-1.elasticbeanstalk.com", 3, true}, + {1, "us-west-1.elasticbeanstalk.com", 3, true}, + {1, "us-west-2.elasticbeanstalk.com", 3, true}, + {2, "elb.amazonaws.com", 4, true}, + {2, "elb.amazonaws.com.cn", 5, true}, + {1, "s3.amazonaws.com", 3, true}, + {1, "s3-ap-northeast-1.amazonaws.com", 3, true}, + {1, "s3-ap-northeast-2.amazonaws.com", 3, true}, + {1, "s3-ap-south-1.amazonaws.com", 3, true}, + {1, "s3-ap-southeast-1.amazonaws.com", 3, true}, + {1, "s3-ap-southeast-2.amazonaws.com", 3, true}, + {1, "s3-ca-central-1.amazonaws.com", 3, true}, + {1, "s3-eu-central-1.amazonaws.com", 3, true}, + {1, "s3-eu-west-1.amazonaws.com", 3, true}, + {1, "s3-eu-west-2.amazonaws.com", 3, true}, + {1, "s3-eu-west-3.amazonaws.com", 3, true}, + {1, "s3-external-1.amazonaws.com", 3, true}, + {1, "s3-fips-us-gov-west-1.amazonaws.com", 3, true}, + {1, "s3-sa-east-1.amazonaws.com", 3, true}, + {1, "s3-us-gov-west-1.amazonaws.com", 3, true}, + {1, "s3-us-east-2.amazonaws.com", 3, true}, + {1, "s3-us-west-1.amazonaws.com", 3, true}, + {1, "s3-us-west-2.amazonaws.com", 3, true}, + {1, "s3.ap-northeast-2.amazonaws.com", 4, true}, + {1, "s3.ap-south-1.amazonaws.com", 4, true}, + {1, "s3.cn-north-1.amazonaws.com.cn", 5, true}, + {1, "s3.ca-central-1.amazonaws.com", 4, true}, + {1, "s3.eu-central-1.amazonaws.com", 4, true}, + {1, "s3.eu-west-2.amazonaws.com", 4, true}, + {1, "s3.eu-west-3.amazonaws.com", 4, true}, + {1, "s3.us-east-2.amazonaws.com", 4, true}, + {1, "s3.dualstack.ap-northeast-1.amazonaws.com", 5, true}, + {1, "s3.dualstack.ap-northeast-2.amazonaws.com", 5, true}, + {1, "s3.dualstack.ap-south-1.amazonaws.com", 5, true}, + {1, "s3.dualstack.ap-southeast-1.amazonaws.com", 5, true}, + {1, "s3.dualstack.ap-southeast-2.amazonaws.com", 5, true}, + {1, "s3.dualstack.ca-central-1.amazonaws.com", 5, true}, + {1, "s3.dualstack.eu-central-1.amazonaws.com", 5, true}, + {1, "s3.dualstack.eu-west-1.amazonaws.com", 5, true}, + {1, "s3.dualstack.eu-west-2.amazonaws.com", 5, true}, + {1, "s3.dualstack.eu-west-3.amazonaws.com", 5, true}, + {1, "s3.dualstack.sa-east-1.amazonaws.com", 5, true}, + {1, "s3.dualstack.us-east-1.amazonaws.com", 5, true}, + {1, "s3.dualstack.us-east-2.amazonaws.com", 5, true}, + {1, "s3-website-us-east-1.amazonaws.com", 3, true}, + {1, "s3-website-us-west-1.amazonaws.com", 3, true}, + {1, "s3-website-us-west-2.amazonaws.com", 3, true}, + {1, "s3-website-ap-northeast-1.amazonaws.com", 3, true}, + {1, "s3-website-ap-southeast-1.amazonaws.com", 3, true}, + {1, "s3-website-ap-southeast-2.amazonaws.com", 3, true}, + {1, "s3-website-eu-west-1.amazonaws.com", 3, true}, + {1, "s3-website-sa-east-1.amazonaws.com", 3, true}, + {1, "s3-website.ap-northeast-2.amazonaws.com", 4, true}, + {1, "s3-website.ap-south-1.amazonaws.com", 4, true}, + {1, "s3-website.ca-central-1.amazonaws.com", 4, true}, + {1, "s3-website.eu-central-1.amazonaws.com", 4, true}, + {1, "s3-website.eu-west-2.amazonaws.com", 4, true}, + {1, "s3-website.eu-west-3.amazonaws.com", 4, true}, + {1, "s3-website.us-east-2.amazonaws.com", 4, true}, + {1, "t3l3p0rt.net", 2, true}, + {1, "tele.amune.org", 3, true}, + {1, "apigee.io", 2, true}, + {1, "on-aptible.com", 2, true}, + {1, "user.aseinet.ne.jp", 4, true}, + {1, "gv.vc", 2, true}, + {1, "d.gv.vc", 3, true}, + {1, "user.party.eus", 3, true}, + {1, "pimienta.org", 2, true}, + {1, "poivron.org", 2, true}, + {1, "potager.org", 2, true}, + {1, "sweetpepper.org", 2, true}, + {1, "myasustor.com", 2, true}, + {1, "go-vip.co", 2, true}, + {1, "go-vip.net", 2, true}, + {1, "wpcomstaging.com", 2, true}, + {1, "myfritz.net", 2, true}, + {2, "awdev.ca", 3, true}, + {2, "advisor.ws", 3, true}, + {1, "b-data.io", 2, true}, + {1, "backplaneapp.io", 2, true}, + {1, "balena-devices.com", 2, true}, + {1, "app.banzaicloud.io", 3, true}, + {1, "betainabox.com", 2, true}, + {1, "bnr.la", 2, true}, + {1, "blackbaudcdn.net", 2, true}, + {1, "boomla.net", 2, true}, + {1, "boxfuse.io", 2, true}, + {1, "square7.ch", 2, true}, + {1, "bplaced.com", 2, true}, + {1, "bplaced.de", 2, true}, + {1, "square7.de", 2, true}, + {1, "bplaced.net", 2, true}, + {1, "square7.net", 2, true}, + {1, "browsersafetymark.io", 2, true}, + {1, "uk0.bigv.io", 3, true}, + {1, "dh.bytemark.co.uk", 4, true}, + {1, "vm.bytemark.co.uk", 4, true}, + {1, "mycd.eu", 2, true}, + {1, "carrd.co", 2, true}, + {1, "crd.co", 2, true}, + {1, "uwu.ai", 2, true}, + {1, "ae.org", 2, true}, + {1, "ar.com", 2, true}, + {1, "br.com", 2, true}, + {1, "cn.com", 2, true}, + {1, "com.de", 2, true}, + {1, "com.se", 2, true}, + {1, "de.com", 2, true}, + {1, "eu.com", 2, true}, + {1, "gb.com", 2, true}, + {1, "gb.net", 2, true}, + {1, "hu.com", 2, true}, + {1, "hu.net", 2, true}, + {1, "jp.net", 2, true}, + {1, "jpn.com", 2, true}, + {1, "kr.com", 2, true}, + {1, "mex.com", 2, true}, + {1, "no.com", 2, true}, + {1, "qc.com", 2, true}, + {1, "ru.com", 2, true}, + {1, "sa.com", 2, true}, + {1, "se.net", 2, true}, + {1, "uk.com", 2, true}, + {1, "uk.net", 2, true}, + {1, "us.com", 2, true}, + {1, "uy.com", 2, true}, + {1, "za.bz", 2, true}, + {1, "za.com", 2, true}, + {1, "africa.com", 2, true}, + {1, "gr.com", 2, true}, + {1, "in.net", 2, true}, + {1, "us.org", 2, true}, + {1, "co.com", 2, true}, + {1, "c.la", 2, true}, + {1, "certmgr.org", 2, true}, + {1, "xenapponazure.com", 2, true}, + {1, "discourse.group", 2, true}, + {1, "virtueeldomein.nl", 2, true}, + {1, "cleverapps.io", 2, true}, + {2, "lcl.dev", 3, true}, + {2, "stg.dev", 3, true}, + {1, "c66.me", 2, true}, + {1, "cloud66.ws", 2, true}, + {1, "cloud66.zone", 2, true}, + {1, "jdevcloud.com", 2, true}, + {1, "wpdevcloud.com", 2, true}, + {1, "cloudaccess.host", 2, true}, + {1, "freesite.host", 2, true}, + {1, "cloudaccess.net", 2, true}, + {1, "cloudcontrolled.com", 2, true}, + {1, "cloudcontrolapp.com", 2, true}, + {1, "cloudera.site", 2, true}, + {1, "trycloudflare.com", 2, true}, + {1, "workers.dev", 2, true}, + {1, "wnext.app", 2, true}, + {1, "co.ca", 2, true}, + {2, "otap.co", 3, true}, + {1, "co.cz", 2, true}, + {1, "c.cdn77.org", 3, true}, + {1, "cdn77-ssl.net", 2, true}, + {1, "r.cdn77.net", 3, true}, + {1, "rsc.cdn77.org", 3, true}, + {1, "ssl.origin.cdn77-secure.org", 4, true}, + {1, "cloudns.asia", 2, true}, + {1, "cloudns.biz", 2, true}, + {1, "cloudns.club", 2, true}, + {1, "cloudns.cc", 2, true}, + {1, "cloudns.eu", 2, true}, + {1, "cloudns.in", 2, true}, + {1, "cloudns.info", 2, true}, + {1, "cloudns.org", 2, true}, + {1, "cloudns.pro", 2, true}, + {1, "cloudns.pw", 2, true}, + {1, "cloudns.us", 2, true}, + {1, "cloudeity.net", 2, true}, + {1, "cnpy.gdn", 2, true}, + {1, "co.nl", 2, true}, + {1, "co.no", 2, true}, + {1, "webhosting.be", 2, true}, + {1, "hosting-cluster.nl", 2, true}, + {1, "dyn.cosidns.de", 3, true}, + {1, "dynamisches-dns.de", 2, true}, + {1, "dnsupdater.de", 2, true}, + {1, "internet-dns.de", 2, true}, + {1, "l-o-g-i-n.de", 2, true}, + {1, "dynamic-dns.info", 2, true}, + {1, "feste-ip.net", 2, true}, + {1, "knx-server.net", 2, true}, + {1, "static-access.net", 2, true}, + {1, "realm.cz", 2, true}, + {2, "cryptonomic.net", 3, true}, + {1, "cupcake.is", 2, true}, + {1, "cyon.link", 2, true}, + {1, "cyon.site", 2, true}, + {1, "daplie.me", 2, true}, + {1, "localhost.daplie.me", 3, true}, + {1, "dattolocal.com", 2, true}, + {1, "dattorelay.com", 2, true}, + {1, "dattoweb.com", 2, true}, + {1, "mydatto.com", 2, true}, + {1, "dattolocal.net", 2, true}, + {1, "mydatto.net", 2, true}, + {1, "biz.dk", 2, true}, + {1, "co.dk", 2, true}, + {1, "firm.dk", 2, true}, + {1, "reg.dk", 2, true}, + {1, "store.dk", 2, true}, + {2, "dapps.earth", 3, true}, + {2, "bzz.dapps.earth", 4, true}, + {1, "debian.net", 2, true}, + {1, "dedyn.io", 2, true}, + {1, "dnshome.de", 2, true}, + {1, "online.th", 2, true}, + {1, "shop.th", 2, true}, + {1, "drayddns.com", 2, true}, + {1, "dreamhosters.com", 2, true}, + {1, "mydrobo.com", 2, true}, + {1, "drud.io", 2, true}, + {1, "drud.us", 2, true}, + {1, "duckdns.org", 2, true}, + {1, "dy.fi", 2, true}, + {1, "tunk.org", 2, true}, + {1, "dyndns-at-home.com", 2, true}, + {1, "dyndns-at-work.com", 2, true}, + {1, "dyndns-blog.com", 2, true}, + {1, "dyndns-free.com", 2, true}, + {1, "dyndns-home.com", 2, true}, + {1, "dyndns-ip.com", 2, true}, + {1, "dyndns-mail.com", 2, true}, + {1, "dyndns-office.com", 2, true}, + {1, "dyndns-pics.com", 2, true}, + {1, "dyndns-remote.com", 2, true}, + {1, "dyndns-server.com", 2, true}, + {1, "dyndns-web.com", 2, true}, + {1, "dyndns-wiki.com", 2, true}, + {1, "dyndns-work.com", 2, true}, + {1, "dyndns.biz", 2, true}, + {1, "dyndns.info", 2, true}, + {1, "dyndns.org", 2, true}, + {1, "dyndns.tv", 2, true}, + {1, "at-band-camp.net", 2, true}, + {1, "ath.cx", 2, true}, + {1, "barrel-of-knowledge.info", 2, true}, + {1, "barrell-of-knowledge.info", 2, true}, + {1, "better-than.tv", 2, true}, + {1, "blogdns.com", 2, true}, + {1, "blogdns.net", 2, true}, + {1, "blogdns.org", 2, true}, + {1, "blogsite.org", 2, true}, + {1, "boldlygoingnowhere.org", 2, true}, + {1, "broke-it.net", 2, true}, + {1, "buyshouses.net", 2, true}, + {1, "cechire.com", 2, true}, + {1, "dnsalias.com", 2, true}, + {1, "dnsalias.net", 2, true}, + {1, "dnsalias.org", 2, true}, + {1, "dnsdojo.com", 2, true}, + {1, "dnsdojo.net", 2, true}, + {1, "dnsdojo.org", 2, true}, + {1, "does-it.net", 2, true}, + {1, "doesntexist.com", 2, true}, + {1, "doesntexist.org", 2, true}, + {1, "dontexist.com", 2, true}, + {1, "dontexist.net", 2, true}, + {1, "dontexist.org", 2, true}, + {1, "doomdns.com", 2, true}, + {1, "doomdns.org", 2, true}, + {1, "dvrdns.org", 2, true}, + {1, "dyn-o-saur.com", 2, true}, + {1, "dynalias.com", 2, true}, + {1, "dynalias.net", 2, true}, + {1, "dynalias.org", 2, true}, + {1, "dynathome.net", 2, true}, + {1, "dyndns.ws", 2, true}, + {1, "endofinternet.net", 2, true}, + {1, "endofinternet.org", 2, true}, + {1, "endoftheinternet.org", 2, true}, + {1, "est-a-la-maison.com", 2, true}, + {1, "est-a-la-masion.com", 2, true}, + {1, "est-le-patron.com", 2, true}, + {1, "est-mon-blogueur.com", 2, true}, + {1, "for-better.biz", 2, true}, + {1, "for-more.biz", 2, true}, + {1, "for-our.info", 2, true}, + {1, "for-some.biz", 2, true}, + {1, "for-the.biz", 2, true}, + {1, "forgot.her.name", 3, true}, + {1, "forgot.his.name", 3, true}, + {1, "from-ak.com", 2, true}, + {1, "from-al.com", 2, true}, + {1, "from-ar.com", 2, true}, + {1, "from-az.net", 2, true}, + {1, "from-ca.com", 2, true}, + {1, "from-co.net", 2, true}, + {1, "from-ct.com", 2, true}, + {1, "from-dc.com", 2, true}, + {1, "from-de.com", 2, true}, + {1, "from-fl.com", 2, true}, + {1, "from-ga.com", 2, true}, + {1, "from-hi.com", 2, true}, + {1, "from-ia.com", 2, true}, + {1, "from-id.com", 2, true}, + {1, "from-il.com", 2, true}, + {1, "from-in.com", 2, true}, + {1, "from-ks.com", 2, true}, + {1, "from-ky.com", 2, true}, + {1, "from-la.net", 2, true}, + {1, "from-ma.com", 2, true}, + {1, "from-md.com", 2, true}, + {1, "from-me.org", 2, true}, + {1, "from-mi.com", 2, true}, + {1, "from-mn.com", 2, true}, + {1, "from-mo.com", 2, true}, + {1, "from-ms.com", 2, true}, + {1, "from-mt.com", 2, true}, + {1, "from-nc.com", 2, true}, + {1, "from-nd.com", 2, true}, + {1, "from-ne.com", 2, true}, + {1, "from-nh.com", 2, true}, + {1, "from-nj.com", 2, true}, + {1, "from-nm.com", 2, true}, + {1, "from-nv.com", 2, true}, + {1, "from-ny.net", 2, true}, + {1, "from-oh.com", 2, true}, + {1, "from-ok.com", 2, true}, + {1, "from-or.com", 2, true}, + {1, "from-pa.com", 2, true}, + {1, "from-pr.com", 2, true}, + {1, "from-ri.com", 2, true}, + {1, "from-sc.com", 2, true}, + {1, "from-sd.com", 2, true}, + {1, "from-tn.com", 2, true}, + {1, "from-tx.com", 2, true}, + {1, "from-ut.com", 2, true}, + {1, "from-va.com", 2, true}, + {1, "from-vt.com", 2, true}, + {1, "from-wa.com", 2, true}, + {1, "from-wi.com", 2, true}, + {1, "from-wv.com", 2, true}, + {1, "from-wy.com", 2, true}, + {1, "ftpaccess.cc", 2, true}, + {1, "fuettertdasnetz.de", 2, true}, + {1, "game-host.org", 2, true}, + {1, "game-server.cc", 2, true}, + {1, "getmyip.com", 2, true}, + {1, "gets-it.net", 2, true}, + {1, "go.dyndns.org", 3, true}, + {1, "gotdns.com", 2, true}, + {1, "gotdns.org", 2, true}, + {1, "groks-the.info", 2, true}, + {1, "groks-this.info", 2, true}, + {1, "ham-radio-op.net", 2, true}, + {1, "here-for-more.info", 2, true}, + {1, "hobby-site.com", 2, true}, + {1, "hobby-site.org", 2, true}, + {1, "home.dyndns.org", 3, true}, + {1, "homedns.org", 2, true}, + {1, "homeftp.net", 2, true}, + {1, "homeftp.org", 2, true}, + {1, "homeip.net", 2, true}, + {1, "homelinux.com", 2, true}, + {1, "homelinux.net", 2, true}, + {1, "homelinux.org", 2, true}, + {1, "homeunix.com", 2, true}, + {1, "homeunix.net", 2, true}, + {1, "homeunix.org", 2, true}, + {1, "iamallama.com", 2, true}, + {1, "in-the-band.net", 2, true}, + {1, "is-a-anarchist.com", 2, true}, + {1, "is-a-blogger.com", 2, true}, + {1, "is-a-bookkeeper.com", 2, true}, + {1, "is-a-bruinsfan.org", 2, true}, + {1, "is-a-bulls-fan.com", 2, true}, + {1, "is-a-candidate.org", 2, true}, + {1, "is-a-caterer.com", 2, true}, + {1, "is-a-celticsfan.org", 2, true}, + {1, "is-a-chef.com", 2, true}, + {1, "is-a-chef.net", 2, true}, + {1, "is-a-chef.org", 2, true}, + {1, "is-a-conservative.com", 2, true}, + {1, "is-a-cpa.com", 2, true}, + {1, "is-a-cubicle-slave.com", 2, true}, + {1, "is-a-democrat.com", 2, true}, + {1, "is-a-designer.com", 2, true}, + {1, "is-a-doctor.com", 2, true}, + {1, "is-a-financialadvisor.com", 2, true}, + {1, "is-a-geek.com", 2, true}, + {1, "is-a-geek.net", 2, true}, + {1, "is-a-geek.org", 2, true}, + {1, "is-a-green.com", 2, true}, + {1, "is-a-guru.com", 2, true}, + {1, "is-a-hard-worker.com", 2, true}, + {1, "is-a-hunter.com", 2, true}, + {1, "is-a-knight.org", 2, true}, + {1, "is-a-landscaper.com", 2, true}, + {1, "is-a-lawyer.com", 2, true}, + {1, "is-a-liberal.com", 2, true}, + {1, "is-a-libertarian.com", 2, true}, + {1, "is-a-linux-user.org", 2, true}, + {1, "is-a-llama.com", 2, true}, + {1, "is-a-musician.com", 2, true}, + {1, "is-a-nascarfan.com", 2, true}, + {1, "is-a-nurse.com", 2, true}, + {1, "is-a-painter.com", 2, true}, + {1, "is-a-patsfan.org", 2, true}, + {1, "is-a-personaltrainer.com", 2, true}, + {1, "is-a-photographer.com", 2, true}, + {1, "is-a-player.com", 2, true}, + {1, "is-a-republican.com", 2, true}, + {1, "is-a-rockstar.com", 2, true}, + {1, "is-a-socialist.com", 2, true}, + {1, "is-a-soxfan.org", 2, true}, + {1, "is-a-student.com", 2, true}, + {1, "is-a-teacher.com", 2, true}, + {1, "is-a-techie.com", 2, true}, + {1, "is-a-therapist.com", 2, true}, + {1, "is-an-accountant.com", 2, true}, + {1, "is-an-actor.com", 2, true}, + {1, "is-an-actress.com", 2, true}, + {1, "is-an-anarchist.com", 2, true}, + {1, "is-an-artist.com", 2, true}, + {1, "is-an-engineer.com", 2, true}, + {1, "is-an-entertainer.com", 2, true}, + {1, "is-by.us", 2, true}, + {1, "is-certified.com", 2, true}, + {1, "is-found.org", 2, true}, + {1, "is-gone.com", 2, true}, + {1, "is-into-anime.com", 2, true}, + {1, "is-into-cars.com", 2, true}, + {1, "is-into-cartoons.com", 2, true}, + {1, "is-into-games.com", 2, true}, + {1, "is-leet.com", 2, true}, + {1, "is-lost.org", 2, true}, + {1, "is-not-certified.com", 2, true}, + {1, "is-saved.org", 2, true}, + {1, "is-slick.com", 2, true}, + {1, "is-uberleet.com", 2, true}, + {1, "is-very-bad.org", 2, true}, + {1, "is-very-evil.org", 2, true}, + {1, "is-very-good.org", 2, true}, + {1, "is-very-nice.org", 2, true}, + {1, "is-very-sweet.org", 2, true}, + {1, "is-with-theband.com", 2, true}, + {1, "isa-geek.com", 2, true}, + {1, "isa-geek.net", 2, true}, + {1, "isa-geek.org", 2, true}, + {1, "isa-hockeynut.com", 2, true}, + {1, "issmarterthanyou.com", 2, true}, + {1, "isteingeek.de", 2, true}, + {1, "istmein.de", 2, true}, + {1, "kicks-ass.net", 2, true}, + {1, "kicks-ass.org", 2, true}, + {1, "knowsitall.info", 2, true}, + {1, "land-4-sale.us", 2, true}, + {1, "lebtimnetz.de", 2, true}, + {1, "leitungsen.de", 2, true}, + {1, "likes-pie.com", 2, true}, + {1, "likescandy.com", 2, true}, + {1, "merseine.nu", 2, true}, + {1, "mine.nu", 2, true}, + {1, "misconfused.org", 2, true}, + {1, "mypets.ws", 2, true}, + {1, "myphotos.cc", 2, true}, + {1, "neat-url.com", 2, true}, + {1, "office-on-the.net", 2, true}, + {1, "on-the-web.tv", 2, true}, + {1, "podzone.net", 2, true}, + {1, "podzone.org", 2, true}, + {1, "readmyblog.org", 2, true}, + {1, "saves-the-whales.com", 2, true}, + {1, "scrapper-site.net", 2, true}, + {1, "scrapping.cc", 2, true}, + {1, "selfip.biz", 2, true}, + {1, "selfip.com", 2, true}, + {1, "selfip.info", 2, true}, + {1, "selfip.net", 2, true}, + {1, "selfip.org", 2, true}, + {1, "sells-for-less.com", 2, true}, + {1, "sells-for-u.com", 2, true}, + {1, "sells-it.net", 2, true}, + {1, "sellsyourhome.org", 2, true}, + {1, "servebbs.com", 2, true}, + {1, "servebbs.net", 2, true}, + {1, "servebbs.org", 2, true}, + {1, "serveftp.net", 2, true}, + {1, "serveftp.org", 2, true}, + {1, "servegame.org", 2, true}, + {1, "shacknet.nu", 2, true}, + {1, "simple-url.com", 2, true}, + {1, "space-to-rent.com", 2, true}, + {1, "stuff-4-sale.org", 2, true}, + {1, "stuff-4-sale.us", 2, true}, + {1, "teaches-yoga.com", 2, true}, + {1, "thruhere.net", 2, true}, + {1, "traeumtgerade.de", 2, true}, + {1, "webhop.biz", 2, true}, + {1, "webhop.info", 2, true}, + {1, "webhop.net", 2, true}, + {1, "webhop.org", 2, true}, + {1, "worse-than.tv", 2, true}, + {1, "writesthisblog.com", 2, true}, + {1, "ddnss.de", 2, true}, + {1, "dyn.ddnss.de", 3, true}, + {1, "dyndns.ddnss.de", 3, true}, + {1, "dyndns1.de", 2, true}, + {1, "dyn-ip24.de", 2, true}, + {1, "home-webserver.de", 2, true}, + {1, "dyn.home-webserver.de", 3, true}, + {1, "myhome-server.de", 2, true}, + {1, "ddnss.org", 2, true}, + {1, "definima.net", 2, true}, + {1, "definima.io", 2, true}, + {1, "bci.dnstrace.pro", 3, true}, + {1, "ddnsfree.com", 2, true}, + {1, "ddnsgeek.com", 2, true}, + {1, "giize.com", 2, true}, + {1, "gleeze.com", 2, true}, + {1, "kozow.com", 2, true}, + {1, "loseyourip.com", 2, true}, + {1, "ooguy.com", 2, true}, + {1, "theworkpc.com", 2, true}, + {1, "casacam.net", 2, true}, + {1, "dynu.net", 2, true}, + {1, "accesscam.org", 2, true}, + {1, "camdvr.org", 2, true}, + {1, "freeddns.org", 2, true}, + {1, "mywire.org", 2, true}, + {1, "webredirect.org", 2, true}, + {1, "myddns.rocks", 2, true}, + {1, "blogsite.xyz", 2, true}, + {1, "dynv6.net", 2, true}, + {1, "e4.cz", 2, true}, + {1, "mytuleap.com", 2, true}, + {1, "onred.one", 2, true}, + {1, "staging.onred.one", 3, true}, + {1, "enonic.io", 2, true}, + {1, "customer.enonic.io", 3, true}, + {1, "eu.org", 2, true}, + {1, "al.eu.org", 3, true}, + {1, "asso.eu.org", 3, true}, + {1, "at.eu.org", 3, true}, + {1, "au.eu.org", 3, true}, + {1, "be.eu.org", 3, true}, + {1, "bg.eu.org", 3, true}, + {1, "ca.eu.org", 3, true}, + {1, "cd.eu.org", 3, true}, + {1, "ch.eu.org", 3, true}, + {1, "cn.eu.org", 3, true}, + {1, "cy.eu.org", 3, true}, + {1, "cz.eu.org", 3, true}, + {1, "de.eu.org", 3, true}, + {1, "dk.eu.org", 3, true}, + {1, "edu.eu.org", 3, true}, + {1, "ee.eu.org", 3, true}, + {1, "es.eu.org", 3, true}, + {1, "fi.eu.org", 3, true}, + {1, "fr.eu.org", 3, true}, + {1, "gr.eu.org", 3, true}, + {1, "hr.eu.org", 3, true}, + {1, "hu.eu.org", 3, true}, + {1, "ie.eu.org", 3, true}, + {1, "il.eu.org", 3, true}, + {1, "in.eu.org", 3, true}, + {1, "int.eu.org", 3, true}, + {1, "is.eu.org", 3, true}, + {1, "it.eu.org", 3, true}, + {1, "jp.eu.org", 3, true}, + {1, "kr.eu.org", 3, true}, + {1, "lt.eu.org", 3, true}, + {1, "lu.eu.org", 3, true}, + {1, "lv.eu.org", 3, true}, + {1, "mc.eu.org", 3, true}, + {1, "me.eu.org", 3, true}, + {1, "mk.eu.org", 3, true}, + {1, "mt.eu.org", 3, true}, + {1, "my.eu.org", 3, true}, + {1, "net.eu.org", 3, true}, + {1, "ng.eu.org", 3, true}, + {1, "nl.eu.org", 3, true}, + {1, "no.eu.org", 3, true}, + {1, "nz.eu.org", 3, true}, + {1, "paris.eu.org", 3, true}, + {1, "pl.eu.org", 3, true}, + {1, "pt.eu.org", 3, true}, + {1, "q-a.eu.org", 3, true}, + {1, "ro.eu.org", 3, true}, + {1, "ru.eu.org", 3, true}, + {1, "se.eu.org", 3, true}, + {1, "si.eu.org", 3, true}, + {1, "sk.eu.org", 3, true}, + {1, "tr.eu.org", 3, true}, + {1, "uk.eu.org", 3, true}, + {1, "us.eu.org", 3, true}, + {1, "eu-1.evennode.com", 3, true}, + {1, "eu-2.evennode.com", 3, true}, + {1, "eu-3.evennode.com", 3, true}, + {1, "eu-4.evennode.com", 3, true}, + {1, "us-1.evennode.com", 3, true}, + {1, "us-2.evennode.com", 3, true}, + {1, "us-3.evennode.com", 3, true}, + {1, "us-4.evennode.com", 3, true}, + {1, "twmail.cc", 2, true}, + {1, "twmail.net", 2, true}, + {1, "twmail.org", 2, true}, + {1, "mymailer.com.tw", 3, true}, + {1, "url.tw", 2, true}, + {1, "apps.fbsbx.com", 3, true}, + {1, "ru.net", 2, true}, + {1, "adygeya.ru", 2, true}, + {1, "bashkiria.ru", 2, true}, + {1, "bir.ru", 2, true}, + {1, "cbg.ru", 2, true}, + {1, "com.ru", 2, true}, + {1, "dagestan.ru", 2, true}, + {1, "grozny.ru", 2, true}, + {1, "kalmykia.ru", 2, true}, + {1, "kustanai.ru", 2, true}, + {1, "marine.ru", 2, true}, + {1, "mordovia.ru", 2, true}, + {1, "msk.ru", 2, true}, + {1, "mytis.ru", 2, true}, + {1, "nalchik.ru", 2, true}, + {1, "nov.ru", 2, true}, + {1, "pyatigorsk.ru", 2, true}, + {1, "spb.ru", 2, true}, + {1, "vladikavkaz.ru", 2, true}, + {1, "vladimir.ru", 2, true}, + {1, "abkhazia.su", 2, true}, + {1, "adygeya.su", 2, true}, + {1, "aktyubinsk.su", 2, true}, + {1, "arkhangelsk.su", 2, true}, + {1, "armenia.su", 2, true}, + {1, "ashgabad.su", 2, true}, + {1, "azerbaijan.su", 2, true}, + {1, "balashov.su", 2, true}, + {1, "bashkiria.su", 2, true}, + {1, "bryansk.su", 2, true}, + {1, "bukhara.su", 2, true}, + {1, "chimkent.su", 2, true}, + {1, "dagestan.su", 2, true}, + {1, "east-kazakhstan.su", 2, true}, + {1, "exnet.su", 2, true}, + {1, "georgia.su", 2, true}, + {1, "grozny.su", 2, true}, + {1, "ivanovo.su", 2, true}, + {1, "jambyl.su", 2, true}, + {1, "kalmykia.su", 2, true}, + {1, "kaluga.su", 2, true}, + {1, "karacol.su", 2, true}, + {1, "karaganda.su", 2, true}, + {1, "karelia.su", 2, true}, + {1, "khakassia.su", 2, true}, + {1, "krasnodar.su", 2, true}, + {1, "kurgan.su", 2, true}, + {1, "kustanai.su", 2, true}, + {1, "lenug.su", 2, true}, + {1, "mangyshlak.su", 2, true}, + {1, "mordovia.su", 2, true}, + {1, "msk.su", 2, true}, + {1, "murmansk.su", 2, true}, + {1, "nalchik.su", 2, true}, + {1, "navoi.su", 2, true}, + {1, "north-kazakhstan.su", 2, true}, + {1, "nov.su", 2, true}, + {1, "obninsk.su", 2, true}, + {1, "penza.su", 2, true}, + {1, "pokrovsk.su", 2, true}, + {1, "sochi.su", 2, true}, + {1, "spb.su", 2, true}, + {1, "tashkent.su", 2, true}, + {1, "termez.su", 2, true}, + {1, "togliatti.su", 2, true}, + {1, "troitsk.su", 2, true}, + {1, "tselinograd.su", 2, true}, + {1, "tula.su", 2, true}, + {1, "tuva.su", 2, true}, + {1, "vladikavkaz.su", 2, true}, + {1, "vladimir.su", 2, true}, + {1, "vologda.su", 2, true}, + {1, "channelsdvr.net", 2, true}, + {1, "fastly-terrarium.com", 2, true}, + {1, "fastlylb.net", 2, true}, + {1, "map.fastlylb.net", 3, true}, + {1, "freetls.fastly.net", 3, true}, + {1, "map.fastly.net", 3, true}, + {1, "a.prod.fastly.net", 4, true}, + {1, "global.prod.fastly.net", 4, true}, + {1, "a.ssl.fastly.net", 4, true}, + {1, "b.ssl.fastly.net", 4, true}, + {1, "global.ssl.fastly.net", 4, true}, + {1, "fastpanel.direct", 2, true}, + {1, "fastvps-server.com", 2, true}, + {1, "fhapp.xyz", 2, true}, + {1, "fedorainfracloud.org", 2, true}, + {1, "fedorapeople.org", 2, true}, + {1, "cloud.fedoraproject.org", 3, true}, + {1, "app.os.fedoraproject.org", 4, true}, + {1, "app.os.stg.fedoraproject.org", 5, true}, + {1, "mydobiss.com", 2, true}, + {1, "filegear.me", 2, true}, + {1, "filegear-au.me", 2, true}, + {1, "filegear-de.me", 2, true}, + {1, "filegear-gb.me", 2, true}, + {1, "filegear-ie.me", 2, true}, + {1, "filegear-jp.me", 2, true}, + {1, "filegear-sg.me", 2, true}, + {1, "firebaseapp.com", 2, true}, + {1, "flynnhub.com", 2, true}, + {1, "flynnhosting.net", 2, true}, + {1, "freebox-os.com", 2, true}, + {1, "freeboxos.com", 2, true}, + {1, "fbx-os.fr", 2, true}, + {1, "fbxos.fr", 2, true}, + {1, "freebox-os.fr", 2, true}, + {1, "freeboxos.fr", 2, true}, + {1, "freedesktop.org", 2, true}, + {2, "futurecms.at", 3, true}, + {2, "ex.futurecms.at", 4, true}, + {2, "in.futurecms.at", 4, true}, + {1, "futurehosting.at", 2, true}, + {1, "futuremailing.at", 2, true}, + {2, "ex.ortsinfo.at", 4, true}, + {2, "kunden.ortsinfo.at", 4, true}, + {2, "statics.cloud", 3, true}, + {1, "service.gov.uk", 3, true}, + {1, "gehirn.ne.jp", 3, true}, + {1, "usercontent.jp", 2, true}, + {1, "lab.ms", 2, true}, + {1, "github.io", 2, true}, + {1, "githubusercontent.com", 2, true}, + {1, "gitlab.io", 2, true}, + {1, "glitch.me", 2, true}, + {1, "cloudapps.digital", 2, true}, + {1, "london.cloudapps.digital", 3, true}, + {1, "homeoffice.gov.uk", 3, true}, + {1, "ro.im", 2, true}, + {1, "shop.ro", 2, true}, + {1, "goip.de", 2, true}, + {1, "run.app", 2, true}, + {1, "a.run.app", 3, true}, + {1, "web.app", 2, true}, + {2, "0emm.com", 3, true}, + {1, "appspot.com", 2, true}, + {1, "blogspot.ae", 2, true}, + {1, "blogspot.al", 2, true}, + {1, "blogspot.am", 2, true}, + {1, "blogspot.ba", 2, true}, + {1, "blogspot.be", 2, true}, + {1, "blogspot.bg", 2, true}, + {1, "blogspot.bj", 2, true}, + {1, "blogspot.ca", 2, true}, + {1, "blogspot.cf", 2, true}, + {1, "blogspot.ch", 2, true}, + {1, "blogspot.cl", 2, true}, + {1, "blogspot.co.at", 3, true}, + {1, "blogspot.co.id", 3, true}, + {1, "blogspot.co.il", 3, true}, + {1, "blogspot.co.ke", 3, true}, + {1, "blogspot.co.nz", 3, true}, + {1, "blogspot.co.uk", 3, true}, + {1, "blogspot.co.za", 3, true}, + {1, "blogspot.com", 2, true}, + {1, "blogspot.com.ar", 3, true}, + {1, "blogspot.com.au", 3, true}, + {1, "blogspot.com.br", 3, true}, + {1, "blogspot.com.by", 3, true}, + {1, "blogspot.com.co", 3, true}, + {1, "blogspot.com.cy", 3, true}, + {1, "blogspot.com.ee", 3, true}, + {1, "blogspot.com.eg", 3, true}, + {1, "blogspot.com.es", 3, true}, + {1, "blogspot.com.mt", 3, true}, + {1, "blogspot.com.ng", 3, true}, + {1, "blogspot.com.tr", 3, true}, + {1, "blogspot.com.uy", 3, true}, + {1, "blogspot.cv", 2, true}, + {1, "blogspot.cz", 2, true}, + {1, "blogspot.de", 2, true}, + {1, "blogspot.dk", 2, true}, + {1, "blogspot.fi", 2, true}, + {1, "blogspot.fr", 2, true}, + {1, "blogspot.gr", 2, true}, + {1, "blogspot.hk", 2, true}, + {1, "blogspot.hr", 2, true}, + {1, "blogspot.hu", 2, true}, + {1, "blogspot.ie", 2, true}, + {1, "blogspot.in", 2, true}, + {1, "blogspot.is", 2, true}, + {1, "blogspot.it", 2, true}, + {1, "blogspot.jp", 2, true}, + {1, "blogspot.kr", 2, true}, + {1, "blogspot.li", 2, true}, + {1, "blogspot.lt", 2, true}, + {1, "blogspot.lu", 2, true}, + {1, "blogspot.md", 2, true}, + {1, "blogspot.mk", 2, true}, + {1, "blogspot.mr", 2, true}, + {1, "blogspot.mx", 2, true}, + {1, "blogspot.my", 2, true}, + {1, "blogspot.nl", 2, true}, + {1, "blogspot.no", 2, true}, + {1, "blogspot.pe", 2, true}, + {1, "blogspot.pt", 2, true}, + {1, "blogspot.qa", 2, true}, + {1, "blogspot.re", 2, true}, + {1, "blogspot.ro", 2, true}, + {1, "blogspot.rs", 2, true}, + {1, "blogspot.ru", 2, true}, + {1, "blogspot.se", 2, true}, + {1, "blogspot.sg", 2, true}, + {1, "blogspot.si", 2, true}, + {1, "blogspot.sk", 2, true}, + {1, "blogspot.sn", 2, true}, + {1, "blogspot.td", 2, true}, + {1, "blogspot.tw", 2, true}, + {1, "blogspot.ug", 2, true}, + {1, "blogspot.vn", 2, true}, + {1, "cloudfunctions.net", 2, true}, + {1, "cloud.goog", 2, true}, + {1, "codespot.com", 2, true}, + {1, "googleapis.com", 2, true}, + {1, "googlecode.com", 2, true}, + {1, "pagespeedmobilizer.com", 2, true}, + {1, "publishproxy.com", 2, true}, + {1, "withgoogle.com", 2, true}, + {1, "withyoutube.com", 2, true}, + {1, "fin.ci", 2, true}, + {1, "free.hr", 2, true}, + {1, "caa.li", 2, true}, + {1, "ua.rs", 2, true}, + {1, "conf.se", 2, true}, + {1, "hs.zone", 2, true}, + {1, "hs.run", 2, true}, + {1, "hashbang.sh", 2, true}, + {1, "hasura.app", 2, true}, + {1, "hasura-app.io", 2, true}, + {1, "hepforge.org", 2, true}, + {1, "herokuapp.com", 2, true}, + {1, "herokussl.com", 2, true}, + {1, "myravendb.com", 2, true}, + {1, "ravendb.community", 2, true}, + {1, "ravendb.me", 2, true}, + {1, "development.run", 2, true}, + {1, "ravendb.run", 2, true}, + {1, "bpl.biz", 2, true}, + {1, "orx.biz", 2, true}, + {1, "ng.city", 2, true}, + {1, "biz.gl", 2, true}, + {1, "ng.ink", 2, true}, + {1, "col.ng", 2, true}, + {1, "firm.ng", 2, true}, + {1, "gen.ng", 2, true}, + {1, "ltd.ng", 2, true}, + {1, "ng.school", 2, true}, + {1, "sch.so", 2, true}, + {1, "xn--hkkinen-5wa.fi", 2, true}, + {2, "moonscale.io", 3, true}, + {1, "moonscale.net", 2, true}, + {1, "iki.fi", 2, true}, + {1, "dyn-berlin.de", 2, true}, + {1, "in-berlin.de", 2, true}, + {1, "in-brb.de", 2, true}, + {1, "in-butter.de", 2, true}, + {1, "in-dsl.de", 2, true}, + {1, "in-dsl.net", 2, true}, + {1, "in-dsl.org", 2, true}, + {1, "in-vpn.de", 2, true}, + {1, "in-vpn.net", 2, true}, + {1, "in-vpn.org", 2, true}, + {1, "biz.at", 2, true}, + {1, "info.at", 2, true}, + {1, "info.cx", 2, true}, + {1, "ac.leg.br", 3, true}, + {1, "al.leg.br", 3, true}, + {1, "am.leg.br", 3, true}, + {1, "ap.leg.br", 3, true}, + {1, "ba.leg.br", 3, true}, + {1, "ce.leg.br", 3, true}, + {1, "df.leg.br", 3, true}, + {1, "es.leg.br", 3, true}, + {1, "go.leg.br", 3, true}, + {1, "ma.leg.br", 3, true}, + {1, "mg.leg.br", 3, true}, + {1, "ms.leg.br", 3, true}, + {1, "mt.leg.br", 3, true}, + {1, "pa.leg.br", 3, true}, + {1, "pb.leg.br", 3, true}, + {1, "pe.leg.br", 3, true}, + {1, "pi.leg.br", 3, true}, + {1, "pr.leg.br", 3, true}, + {1, "rj.leg.br", 3, true}, + {1, "rn.leg.br", 3, true}, + {1, "ro.leg.br", 3, true}, + {1, "rr.leg.br", 3, true}, + {1, "rs.leg.br", 3, true}, + {1, "sc.leg.br", 3, true}, + {1, "se.leg.br", 3, true}, + {1, "sp.leg.br", 3, true}, + {1, "to.leg.br", 3, true}, + {1, "pixolino.com", 2, true}, + {1, "ipifony.net", 2, true}, + {1, "mein-iserv.de", 2, true}, + {1, "test-iserv.de", 2, true}, + {1, "iserv.dev", 2, true}, + {1, "iobb.net", 2, true}, + {1, "myjino.ru", 2, true}, + {2, "hosting.myjino.ru", 4, true}, + {2, "landing.myjino.ru", 4, true}, + {2, "spectrum.myjino.ru", 4, true}, + {2, "vps.myjino.ru", 4, true}, + {2, "triton.zone", 3, true}, + {2, "cns.joyent.com", 4, true}, + {1, "js.org", 2, true}, + {1, "kaas.gg", 2, true}, + {1, "khplay.nl", 2, true}, + {1, "keymachine.de", 2, true}, + {1, "kinghost.net", 2, true}, + {1, "uni5.net", 2, true}, + {1, "knightpoint.systems", 2, true}, + {1, "co.krd", 2, true}, + {1, "edu.krd", 2, true}, + {1, "git-repos.de", 2, true}, + {1, "lcube-server.de", 2, true}, + {1, "svn-repos.de", 2, true}, + {1, "leadpages.co", 2, true}, + {1, "lpages.co", 2, true}, + {1, "lpusercontent.com", 2, true}, + {1, "lelux.site", 2, true}, + {1, "co.business", 2, true}, + {1, "co.education", 2, true}, + {1, "co.events", 2, true}, + {1, "co.financial", 2, true}, + {1, "co.network", 2, true}, + {1, "co.place", 2, true}, + {1, "co.technology", 2, true}, + {1, "app.lmpm.com", 3, true}, + {1, "linkitools.space", 2, true}, + {1, "linkyard.cloud", 2, true}, + {1, "linkyard-cloud.ch", 2, true}, + {1, "members.linode.com", 3, true}, + {1, "nodebalancer.linode.com", 3, true}, + {1, "we.bs", 2, true}, + {1, "loginline.app", 2, true}, + {1, "loginline.dev", 2, true}, + {1, "loginline.io", 2, true}, + {1, "loginline.services", 2, true}, + {1, "loginline.site", 2, true}, + {1, "krasnik.pl", 2, true}, + {1, "leczna.pl", 2, true}, + {1, "lubartow.pl", 2, true}, + {1, "lublin.pl", 2, true}, + {1, "poniatowa.pl", 2, true}, + {1, "swidnik.pl", 2, true}, + {1, "uklugs.org", 2, true}, + {1, "glug.org.uk", 3, true}, + {1, "lug.org.uk", 3, true}, + {1, "lugs.org.uk", 3, true}, + {1, "barsy.bg", 2, true}, + {1, "barsy.co.uk", 3, true}, + {1, "barsyonline.co.uk", 3, true}, + {1, "barsycenter.com", 2, true}, + {1, "barsyonline.com", 2, true}, + {1, "barsy.club", 2, true}, + {1, "barsy.de", 2, true}, + {1, "barsy.eu", 2, true}, + {1, "barsy.in", 2, true}, + {1, "barsy.info", 2, true}, + {1, "barsy.io", 2, true}, + {1, "barsy.me", 2, true}, + {1, "barsy.menu", 2, true}, + {1, "barsy.mobi", 2, true}, + {1, "barsy.net", 2, true}, + {1, "barsy.online", 2, true}, + {1, "barsy.org", 2, true}, + {1, "barsy.pro", 2, true}, + {1, "barsy.pub", 2, true}, + {1, "barsy.shop", 2, true}, + {1, "barsy.site", 2, true}, + {1, "barsy.support", 2, true}, + {1, "barsy.uk", 2, true}, + {2, "magentosite.cloud", 3, true}, + {1, "mayfirst.info", 2, true}, + {1, "mayfirst.org", 2, true}, + {1, "hb.cldmail.ru", 3, true}, + {1, "miniserver.com", 2, true}, + {1, "memset.net", 2, true}, + {1, "cloud.metacentrum.cz", 3, true}, + {1, "custom.metacentrum.cz", 3, true}, + {1, "flt.cloud.muni.cz", 4, true}, + {1, "usr.cloud.muni.cz", 4, true}, + {1, "meteorapp.com", 2, true}, + {1, "eu.meteorapp.com", 3, true}, + {1, "co.pl", 2, true}, + {1, "azurecontainer.io", 2, true}, + {1, "azurewebsites.net", 2, true}, + {1, "azure-mobile.net", 2, true}, + {1, "cloudapp.net", 2, true}, + {1, "mozilla-iot.org", 2, true}, + {1, "bmoattachments.org", 2, true}, + {1, "net.ru", 2, true}, + {1, "org.ru", 2, true}, + {1, "pp.ru", 2, true}, + {1, "ui.nabu.casa", 3, true}, + {1, "pony.club", 2, true}, + {1, "of.fashion", 2, true}, + {1, "on.fashion", 2, true}, + {1, "of.football", 2, true}, + {1, "in.london", 2, true}, + {1, "of.london", 2, true}, + {1, "for.men", 2, true}, + {1, "and.mom", 2, true}, + {1, "for.mom", 2, true}, + {1, "for.one", 2, true}, + {1, "for.sale", 2, true}, + {1, "of.work", 2, true}, + {1, "to.work", 2, true}, + {1, "nctu.me", 2, true}, + {1, "bitballoon.com", 2, true}, + {1, "netlify.com", 2, true}, + {1, "4u.com", 2, true}, + {1, "ngrok.io", 2, true}, + {1, "nh-serv.co.uk", 3, true}, + {1, "nfshost.com", 2, true}, + {1, "dnsking.ch", 2, true}, + {1, "mypi.co", 2, true}, + {1, "n4t.co", 2, true}, + {1, "001www.com", 2, true}, + {1, "ddnslive.com", 2, true}, + {1, "myiphost.com", 2, true}, + {1, "forumz.info", 2, true}, + {1, "16-b.it", 2, true}, + {1, "32-b.it", 2, true}, + {1, "64-b.it", 2, true}, + {1, "soundcast.me", 2, true}, + {1, "tcp4.me", 2, true}, + {1, "dnsup.net", 2, true}, + {1, "hicam.net", 2, true}, + {1, "now-dns.net", 2, true}, + {1, "ownip.net", 2, true}, + {1, "vpndns.net", 2, true}, + {1, "dynserv.org", 2, true}, + {1, "now-dns.org", 2, true}, + {1, "x443.pw", 2, true}, + {1, "now-dns.top", 2, true}, + {1, "ntdll.top", 2, true}, + {1, "freeddns.us", 2, true}, + {1, "crafting.xyz", 2, true}, + {1, "zapto.xyz", 2, true}, + {1, "nsupdate.info", 2, true}, + {1, "nerdpol.ovh", 2, true}, + {1, "blogsyte.com", 2, true}, + {1, "brasilia.me", 2, true}, + {1, "cable-modem.org", 2, true}, + {1, "ciscofreak.com", 2, true}, + {1, "collegefan.org", 2, true}, + {1, "couchpotatofries.org", 2, true}, + {1, "damnserver.com", 2, true}, + {1, "ddns.me", 2, true}, + {1, "ditchyourip.com", 2, true}, + {1, "dnsfor.me", 2, true}, + {1, "dnsiskinky.com", 2, true}, + {1, "dvrcam.info", 2, true}, + {1, "dynns.com", 2, true}, + {1, "eating-organic.net", 2, true}, + {1, "fantasyleague.cc", 2, true}, + {1, "geekgalaxy.com", 2, true}, + {1, "golffan.us", 2, true}, + {1, "health-carereform.com", 2, true}, + {1, "homesecuritymac.com", 2, true}, + {1, "homesecuritypc.com", 2, true}, + {1, "hopto.me", 2, true}, + {1, "ilovecollege.info", 2, true}, + {1, "loginto.me", 2, true}, + {1, "mlbfan.org", 2, true}, + {1, "mmafan.biz", 2, true}, + {1, "myactivedirectory.com", 2, true}, + {1, "mydissent.net", 2, true}, + {1, "myeffect.net", 2, true}, + {1, "mymediapc.net", 2, true}, + {1, "mypsx.net", 2, true}, + {1, "mysecuritycamera.com", 2, true}, + {1, "mysecuritycamera.net", 2, true}, + {1, "mysecuritycamera.org", 2, true}, + {1, "net-freaks.com", 2, true}, + {1, "nflfan.org", 2, true}, + {1, "nhlfan.net", 2, true}, + {1, "no-ip.ca", 2, true}, + {1, "no-ip.co.uk", 3, true}, + {1, "no-ip.net", 2, true}, + {1, "noip.us", 2, true}, + {1, "onthewifi.com", 2, true}, + {1, "pgafan.net", 2, true}, + {1, "point2this.com", 2, true}, + {1, "pointto.us", 2, true}, + {1, "privatizehealthinsurance.net", 2, true}, + {1, "quicksytes.com", 2, true}, + {1, "read-books.org", 2, true}, + {1, "securitytactics.com", 2, true}, + {1, "serveexchange.com", 2, true}, + {1, "servehumour.com", 2, true}, + {1, "servep2p.com", 2, true}, + {1, "servesarcasm.com", 2, true}, + {1, "stufftoread.com", 2, true}, + {1, "ufcfan.org", 2, true}, + {1, "unusualperson.com", 2, true}, + {1, "workisboring.com", 2, true}, + {1, "3utilities.com", 2, true}, + {1, "bounceme.net", 2, true}, + {1, "ddns.net", 2, true}, + {1, "ddnsking.com", 2, true}, + {1, "gotdns.ch", 2, true}, + {1, "hopto.org", 2, true}, + {1, "myftp.biz", 2, true}, + {1, "myftp.org", 2, true}, + {1, "myvnc.com", 2, true}, + {1, "no-ip.biz", 2, true}, + {1, "no-ip.info", 2, true}, + {1, "no-ip.org", 2, true}, + {1, "noip.me", 2, true}, + {1, "redirectme.net", 2, true}, + {1, "servebeer.com", 2, true}, + {1, "serveblog.net", 2, true}, + {1, "servecounterstrike.com", 2, true}, + {1, "serveftp.com", 2, true}, + {1, "servegame.com", 2, true}, + {1, "servehalflife.com", 2, true}, + {1, "servehttp.com", 2, true}, + {1, "serveirc.com", 2, true}, + {1, "serveminecraft.net", 2, true}, + {1, "servemp3.com", 2, true}, + {1, "servepics.com", 2, true}, + {1, "servequake.com", 2, true}, + {1, "sytes.net", 2, true}, + {1, "webhop.me", 2, true}, + {1, "zapto.org", 2, true}, + {1, "stage.nodeart.io", 3, true}, + {1, "nodum.co", 2, true}, + {1, "nodum.io", 2, true}, + {1, "pcloud.host", 2, true}, + {1, "nyc.mn", 2, true}, + {1, "nom.ae", 2, true}, + {1, "nom.af", 2, true}, + {1, "nom.ai", 2, true}, + {1, "nom.al", 2, true}, + {1, "nym.by", 2, true}, + {1, "nym.bz", 2, true}, + {1, "nom.cl", 2, true}, + {1, "nym.ec", 2, true}, + {1, "nom.gd", 2, true}, + {1, "nom.ge", 2, true}, + {1, "nom.gl", 2, true}, + {1, "nym.gr", 2, true}, + {1, "nom.gt", 2, true}, + {1, "nym.gy", 2, true}, + {1, "nym.hk", 2, true}, + {1, "nom.hn", 2, true}, + {1, "nym.ie", 2, true}, + {1, "nom.im", 2, true}, + {1, "nom.ke", 2, true}, + {1, "nym.kz", 2, true}, + {1, "nym.la", 2, true}, + {1, "nym.lc", 2, true}, + {1, "nom.li", 2, true}, + {1, "nym.li", 2, true}, + {1, "nym.lt", 2, true}, + {1, "nym.lu", 2, true}, + {1, "nym.me", 2, true}, + {1, "nom.mk", 2, true}, + {1, "nym.mn", 2, true}, + {1, "nym.mx", 2, true}, + {1, "nom.nu", 2, true}, + {1, "nym.nz", 2, true}, + {1, "nym.pe", 2, true}, + {1, "nym.pt", 2, true}, + {1, "nom.pw", 2, true}, + {1, "nom.qa", 2, true}, + {1, "nym.ro", 2, true}, + {1, "nom.rs", 2, true}, + {1, "nom.si", 2, true}, + {1, "nym.sk", 2, true}, + {1, "nom.st", 2, true}, + {1, "nym.su", 2, true}, + {1, "nym.sx", 2, true}, + {1, "nom.tj", 2, true}, + {1, "nym.tw", 2, true}, + {1, "nom.ug", 2, true}, + {1, "nom.uy", 2, true}, + {1, "nom.vc", 2, true}, + {1, "nom.vg", 2, true}, + {1, "cya.gg", 2, true}, + {1, "cloudycluster.net", 2, true}, + {1, "nid.io", 2, true}, + {1, "opencraft.hosting", 2, true}, + {1, "operaunite.com", 2, true}, + {1, "outsystemscloud.com", 2, true}, + {1, "ownprovider.com", 2, true}, + {1, "own.pm", 2, true}, + {1, "ox.rs", 2, true}, + {1, "oy.lc", 2, true}, + {1, "pgfog.com", 2, true}, + {1, "pagefrontapp.com", 2, true}, + {1, "art.pl", 2, true}, + {1, "gliwice.pl", 2, true}, + {1, "krakow.pl", 2, true}, + {1, "poznan.pl", 2, true}, + {1, "wroc.pl", 2, true}, + {1, "zakopane.pl", 2, true}, + {1, "pantheonsite.io", 2, true}, + {1, "gotpantheon.com", 2, true}, + {1, "mypep.link", 2, true}, + {1, "on-web.fr", 2, true}, + {2, "platform.sh", 3, true}, + {2, "platformsh.site", 3, true}, + {1, "dyn53.io", 2, true}, + {1, "co.bn", 2, true}, + {1, "xen.prgmr.com", 3, true}, + {1, "priv.at", 2, true}, + {1, "prvcy.page", 2, true}, + {2, "dweb.link", 3, true}, + {1, "protonet.io", 2, true}, + {1, "chirurgiens-dentistes-en-france.fr", 2, true}, + {1, "byen.site", 2, true}, + {1, "pubtls.org", 2, true}, + {1, "qualifioapp.com", 2, true}, + {1, "instantcloud.cn", 2, true}, + {1, "ras.ru", 2, true}, + {1, "qa2.com", 2, true}, + {1, "dev-myqnapcloud.com", 2, true}, + {1, "alpha-myqnapcloud.com", 2, true}, + {1, "myqnapcloud.com", 2, true}, + {2, "quipelements.com", 3, true}, + {1, "vapor.cloud", 2, true}, + {1, "vaporcloud.io", 2, true}, + {1, "rackmaze.com", 2, true}, + {1, "rackmaze.net", 2, true}, + {2, "on-rancher.cloud", 3, true}, + {2, "on-rio.io", 3, true}, + {1, "readthedocs.io", 2, true}, + {1, "rhcloud.com", 2, true}, + {1, "app.render.com", 3, true}, + {1, "onrender.com", 2, true}, + {1, "repl.co", 2, true}, + {1, "repl.run", 2, true}, + {1, "resindevice.io", 2, true}, + {1, "devices.resinstaging.io", 3, true}, + {1, "hzc.io", 2, true}, + {1, "wellbeingzone.eu", 2, true}, + {1, "ptplus.fit", 2, true}, + {1, "wellbeingzone.co.uk", 3, true}, + {1, "git-pages.rit.edu", 3, true}, + {1, "sandcats.io", 2, true}, + {1, "logoip.de", 2, true}, + {1, "logoip.com", 2, true}, + {1, "schokokeks.net", 2, true}, + {1, "scrysec.com", 2, true}, + {1, "firewall-gateway.com", 2, true}, + {1, "firewall-gateway.de", 2, true}, + {1, "my-gateway.de", 2, true}, + {1, "my-router.de", 2, true}, + {1, "spdns.de", 2, true}, + {1, "spdns.eu", 2, true}, + {1, "firewall-gateway.net", 2, true}, + {1, "my-firewall.org", 2, true}, + {1, "myfirewall.org", 2, true}, + {1, "spdns.org", 2, true}, + {1, "biz.ua", 2, true}, + {1, "co.ua", 2, true}, + {1, "pp.ua", 2, true}, + {1, "shiftedit.io", 2, true}, + {1, "myshopblocks.com", 2, true}, + {1, "shopitsite.com", 2, true}, + {1, "mo-siemens.io", 2, true}, + {1, "1kapp.com", 2, true}, + {1, "appchizi.com", 2, true}, + {1, "applinzi.com", 2, true}, + {1, "sinaapp.com", 2, true}, + {1, "vipsinaapp.com", 2, true}, + {1, "siteleaf.net", 2, true}, + {1, "bounty-full.com", 2, true}, + {1, "alpha.bounty-full.com", 3, true}, + {1, "beta.bounty-full.com", 3, true}, + {1, "stackhero-network.com", 2, true}, + {1, "static.land", 2, true}, + {1, "dev.static.land", 3, true}, + {1, "sites.static.land", 3, true}, + {1, "apps.lair.io", 3, true}, + {2, "stolos.io", 3, true}, + {1, "spacekit.io", 2, true}, + {1, "customer.speedpartner.de", 3, true}, + {1, "api.stdlib.com", 3, true}, + {1, "storj.farm", 2, true}, + {1, "utwente.io", 2, true}, + {1, "soc.srcf.net", 3, true}, + {1, "user.srcf.net", 3, true}, + {1, "temp-dns.com", 2, true}, + {1, "applicationcloud.io", 2, true}, + {1, "scapp.io", 2, true}, + {2, "s5y.io", 3, true}, + {2, "sensiosite.cloud", 3, true}, + {1, "syncloud.it", 2, true}, + {1, "diskstation.me", 2, true}, + {1, "dscloud.biz", 2, true}, + {1, "dscloud.me", 2, true}, + {1, "dscloud.mobi", 2, true}, + {1, "dsmynas.com", 2, true}, + {1, "dsmynas.net", 2, true}, + {1, "dsmynas.org", 2, true}, + {1, "familyds.com", 2, true}, + {1, "familyds.net", 2, true}, + {1, "familyds.org", 2, true}, + {1, "i234.me", 2, true}, + {1, "myds.me", 2, true}, + {1, "synology.me", 2, true}, + {1, "vpnplus.to", 2, true}, + {1, "taifun-dns.de", 2, true}, + {1, "gda.pl", 2, true}, + {1, "gdansk.pl", 2, true}, + {1, "gdynia.pl", 2, true}, + {1, "med.pl", 2, true}, + {1, "sopot.pl", 2, true}, + {1, "edugit.org", 2, true}, + {1, "telebit.app", 2, true}, + {1, "telebit.io", 2, true}, + {2, "telebit.xyz", 3, true}, + {1, "gwiddle.co.uk", 3, true}, + {1, "thingdustdata.com", 2, true}, + {1, "cust.dev.thingdust.io", 4, true}, + {1, "cust.disrec.thingdust.io", 4, true}, + {1, "cust.prod.thingdust.io", 4, true}, + {1, "cust.testing.thingdust.io", 4, true}, + {1, "arvo.network", 2, true}, + {1, "azimuth.network", 2, true}, + {1, "bloxcms.com", 2, true}, + {1, "townnews-staging.com", 2, true}, + {1, "12hp.at", 2, true}, + {1, "2ix.at", 2, true}, + {1, "4lima.at", 2, true}, + {1, "lima-city.at", 2, true}, + {1, "12hp.ch", 2, true}, + {1, "2ix.ch", 2, true}, + {1, "4lima.ch", 2, true}, + {1, "lima-city.ch", 2, true}, + {1, "trafficplex.cloud", 2, true}, + {1, "de.cool", 2, true}, + {1, "12hp.de", 2, true}, + {1, "2ix.de", 2, true}, + {1, "4lima.de", 2, true}, + {1, "lima-city.de", 2, true}, + {1, "1337.pictures", 2, true}, + {1, "clan.rip", 2, true}, + {1, "lima-city.rocks", 2, true}, + {1, "webspace.rocks", 2, true}, + {1, "lima.zone", 2, true}, + {2, "transurl.be", 3, true}, + {2, "transurl.eu", 3, true}, + {2, "transurl.nl", 3, true}, + {1, "tuxfamily.org", 2, true}, + {1, "dd-dns.de", 2, true}, + {1, "diskstation.eu", 2, true}, + {1, "diskstation.org", 2, true}, + {1, "dray-dns.de", 2, true}, + {1, "draydns.de", 2, true}, + {1, "dyn-vpn.de", 2, true}, + {1, "dynvpn.de", 2, true}, + {1, "mein-vigor.de", 2, true}, + {1, "my-vigor.de", 2, true}, + {1, "my-wan.de", 2, true}, + {1, "syno-ds.de", 2, true}, + {1, "synology-diskstation.de", 2, true}, + {1, "synology-ds.de", 2, true}, + {1, "uber.space", 2, true}, + {2, "uberspace.de", 3, true}, + {1, "hk.com", 2, true}, + {1, "hk.org", 2, true}, + {1, "ltd.hk", 2, true}, + {1, "inc.hk", 2, true}, + {1, "virtualuser.de", 2, true}, + {1, "virtual-user.de", 2, true}, + {1, "lib.de.us", 3, true}, + {1, "2038.io", 2, true}, + {1, "router.management", 2, true}, + {1, "v-info.info", 2, true}, + {1, "voorloper.cloud", 2, true}, + {1, "wafflecell.com", 2, true}, + {2, "webhare.dev", 3, true}, + {1, "wedeploy.io", 2, true}, + {1, "wedeploy.me", 2, true}, + {1, "wedeploy.sh", 2, true}, + {1, "remotewd.com", 2, true}, + {1, "wmflabs.org", 2, true}, + {1, "half.host", 2, true}, + {1, "xnbay.com", 2, true}, + {1, "u2.xnbay.com", 3, true}, + {1, "u2-local.xnbay.com", 3, true}, + {1, "cistron.nl", 2, true}, + {1, "demon.nl", 2, true}, + {1, "xs4all.space", 2, true}, + {1, "yandexcloud.net", 2, true}, + {1, "storage.yandexcloud.net", 3, true}, + {1, "website.yandexcloud.net", 3, true}, + {1, "official.academy", 2, true}, + {1, "yolasite.com", 2, true}, + {1, "ybo.faith", 2, true}, + {1, "yombo.me", 2, true}, + {1, "homelink.one", 2, true}, + {1, "ybo.party", 2, true}, + {1, "ybo.review", 2, true}, + {1, "ybo.science", 2, true}, + {1, "ybo.trade", 2, true}, + {1, "nohost.me", 2, true}, + {1, "noho.st", 2, true}, + {1, "za.net", 2, true}, + {1, "za.org", 2, true}, + {1, "now.sh", 2, true}, + {1, "bss.design", 2, true}, + {1, "basicserver.io", 2, true}, + {1, "virtualserver.io", 2, true}, + {1, "site.builder.nu", 3, true}, + {1, "enterprisecloud.nu", 2, true}, + {1, "zone.id", 2, true}, +} + +func init() { + for i := range r { + DefaultList.AddRule(&r[i]) + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/LICENSE new file mode 100644 index 00000000..830522ba --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/LICENSE @@ -0,0 +1,257 @@ +ZCrypto is an original work created at the University of Michigan, and is +licensed under the Apache 2.0 license. However, ZCrypto contains a fork of +several packages from Golang standard library, as well as code from the +BoringSSL test runner. Files that were created by Google, and new files in +forks of packages maintained by Google have a Google copyright and fall under +the ISC license. In addition ZCrypto includes a `util/isURL.go` file created by +Alex Saskevich and licensed under the MIT license. All other files are copyright +Regents of the University of Michigan, and fall under the Apache 2.0 license. +All three licenses are reproduced at the bottom of this file. + +-------- + +ISC License used for Google code + +/* Copyright (c) 2015, Google Inc. + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ + +-------- + +MIT License used for util/isURL.go adopted from https://github.com/asaskevich/govalidator + + The MIT License (MIT) + + Copyright (c) 2014 Alex Saskevich + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + +-------- + +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + ZCrypto Copyright 2015 Regents of the University of Michigan + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/json/dhe.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/json/dhe.go new file mode 100644 index 00000000..0d4770bc --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/json/dhe.go @@ -0,0 +1,130 @@ +/* + * ZGrab Copyright 2015 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package json + +import ( + "encoding/json" + "math/big" +) + +// DHParams can be used to store finite-field Diffie-Hellman parameters. At any +// point in time, it is unlikely that both OurPrivate and TheirPrivate will be +// non-nil. +type DHParams struct { + Prime *big.Int + Generator *big.Int + ServerPublic *big.Int + ServerPrivate *big.Int + ClientPublic *big.Int + ClientPrivate *big.Int + SessionKey *big.Int +} + +type auxDHParams struct { + Prime *cryptoParameter `json:"prime"` + Generator *cryptoParameter `json:"generator"` + ServerPublic *cryptoParameter `json:"server_public,omitempty"` + ServerPrivate *cryptoParameter `json:"server_private,omitempty"` + ClientPublic *cryptoParameter `json:"client_public,omitempty"` + ClientPrivate *cryptoParameter `json:"client_private,omitempty"` + SessionKey *cryptoParameter `json:"session_key,omitempty"` +} + +// MarshalJSON implements the json.Marshal interface +func (p *DHParams) MarshalJSON() ([]byte, error) { + aux := auxDHParams{ + Prime: &cryptoParameter{Int: p.Prime}, + Generator: &cryptoParameter{Int: p.Generator}, + } + if p.ServerPublic != nil { + aux.ServerPublic = &cryptoParameter{Int: p.ServerPublic} + } + if p.ServerPrivate != nil { + aux.ServerPrivate = &cryptoParameter{Int: p.ServerPrivate} + } + if p.ClientPublic != nil { + aux.ClientPublic = &cryptoParameter{Int: p.ClientPublic} + } + if p.ClientPrivate != nil { + aux.ClientPrivate = &cryptoParameter{Int: p.ClientPrivate} + } + if p.SessionKey != nil { + aux.SessionKey = &cryptoParameter{Int: p.SessionKey} + } + return json.Marshal(aux) +} + +// UnmarshalJSON implement the json.Unmarshaler interface +func (p *DHParams) UnmarshalJSON(b []byte) error { + var aux auxDHParams + if err := json.Unmarshal(b, &aux); err != nil { + return err + } + if aux.Prime != nil { + p.Prime = aux.Prime.Int + } + if aux.Generator != nil { + p.Generator = aux.Generator.Int + } + if aux.ServerPublic != nil { + p.ServerPublic = aux.ServerPublic.Int + } + if aux.ServerPrivate != nil { + p.ServerPrivate = aux.ServerPrivate.Int + } + if aux.ClientPublic != nil { + p.ClientPublic = aux.ClientPublic.Int + } + if aux.ClientPrivate != nil { + p.ClientPrivate = aux.ClientPrivate.Int + } + if aux.SessionKey != nil { + p.SessionKey = aux.SessionKey.Int + } + return nil +} + +// CryptoParameter represents a big.Int used a parameter in some cryptography. +// It serializes to json as a tupe of a base64-encoded number and a length in +// bits. +type cryptoParameter struct { + *big.Int +} + +type auxCryptoParameter struct { + Raw []byte `json:"value"` + Length int `json:"length"` +} + +// MarshalJSON implements the json.Marshaler interface +func (p *cryptoParameter) MarshalJSON() ([]byte, error) { + var aux auxCryptoParameter + if p.Int != nil { + aux.Raw = p.Bytes() + aux.Length = 8 * len(aux.Raw) + } + return json.Marshal(&aux) +} + +// UnmarshalJSON implements the json.Unmarshal interface +func (p *cryptoParameter) UnmarshalJSON(b []byte) error { + var aux auxCryptoParameter + if err := json.Unmarshal(b, &aux); err != nil { + return err + } + p.Int = new(big.Int) + p.SetBytes(aux.Raw) + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/json/ecdhe.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/json/ecdhe.go new file mode 100644 index 00000000..5d3d1917 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/json/ecdhe.go @@ -0,0 +1,107 @@ +/* + * ZGrab Copyright 2015 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package json + +import ( + "crypto/elliptic" + "encoding/json" + "math/big" +) + +// TLSCurveID is the type of a TLS identifier for an elliptic curve. See +// http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-8 +type TLSCurveID uint16 + +// ECDHPrivateParams are the TLS key exchange parameters for ECDH keys. +type ECDHPrivateParams struct { + Value []byte `json:"value,omitempty"` + Length int `json:"length,omitempty"` +} + +// ECDHParams stores elliptic-curve Diffie-Hellman paramters.At any point in +// time, it is unlikely that both ServerPrivate and ClientPrivate will be non-nil. +type ECDHParams struct { + TLSCurveID TLSCurveID `json:"curve_id,omitempty"` + Curve elliptic.Curve `json:"-"` + ServerPublic *ECPoint `json:"server_public,omitempty"` + ServerPrivate *ECDHPrivateParams `json:"server_private,omitempty"` + ClientPublic *ECPoint `json:"client_public,omitempty"` + ClientPrivate *ECDHPrivateParams `json:"client_private,omitempty"` +} + +// ECPoint represents an elliptic curve point and serializes nicely to JSON +type ECPoint struct { + X *big.Int + Y *big.Int +} + +// MarshalJSON implements the json.Marshler interface +func (p *ECPoint) MarshalJSON() ([]byte, error) { + aux := struct { + X *cryptoParameter `json:"x"` + Y *cryptoParameter `json:"y"` + }{ + X: &cryptoParameter{Int: p.X}, + Y: &cryptoParameter{Int: p.Y}, + } + return json.Marshal(&aux) +} + +// UnmarshalJSON implements the json.Unmarshler interface +func (p *ECPoint) UnmarshalJSON(b []byte) error { + aux := struct { + X *cryptoParameter `json:"x"` + Y *cryptoParameter `json:"y"` + }{} + if err := json.Unmarshal(b, &aux); err != nil { + return err + } + p.X = aux.X.Int + p.Y = aux.Y.Int + return nil +} + +// Description returns the description field for the given ID. See +// http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-8 +func (c *TLSCurveID) Description() string { + if desc, ok := ecIDToName[*c]; ok { + return desc + } + return "unknown" +} + +// MarshalJSON implements the json.Marshaler interface +func (c *TLSCurveID) MarshalJSON() ([]byte, error) { + aux := struct { + Name string `json:"name"` + ID uint16 `json:"id"` + }{ + Name: c.Description(), + ID: uint16(*c), + } + return json.Marshal(&aux) +} + +//UnmarshalJSON implements the json.Unmarshaler interface +func (c *TLSCurveID) UnmarshalJSON(b []byte) error { + aux := struct { + ID uint16 `json:"id"` + }{} + if err := json.Unmarshal(b, &aux); err != nil { + return err + } + *c = TLSCurveID(aux.ID) + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/json/names.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/json/names.go new file mode 100644 index 00000000..38828248 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/json/names.go @@ -0,0 +1,113 @@ +/* + * ZGrab Copyright 2015 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package json + +// IANA-assigned curve ID values, see +// http://www.iana.org/assignments/tls-parameters/tls-parameters.xml#tls-parameters-8 +const ( + Sect163k1 TLSCurveID = 1 + Sect163r1 TLSCurveID = 2 + Sect163r2 TLSCurveID = 3 + Sect193r1 TLSCurveID = 4 + Sect193r2 TLSCurveID = 5 + Sect233k1 TLSCurveID = 6 + Sect233r1 TLSCurveID = 7 + Sect239k1 TLSCurveID = 8 + Sect283k1 TLSCurveID = 9 + Sect283r1 TLSCurveID = 10 + Sect409k1 TLSCurveID = 11 + Sect409r1 TLSCurveID = 12 + Sect571k1 TLSCurveID = 13 + Sect571r1 TLSCurveID = 14 + Secp160k1 TLSCurveID = 15 + Secp160r1 TLSCurveID = 16 + Secp160r2 TLSCurveID = 17 + Secp192k1 TLSCurveID = 18 + Secp192r1 TLSCurveID = 19 + Secp224k1 TLSCurveID = 20 + Secp224r1 TLSCurveID = 21 + Secp256k1 TLSCurveID = 22 + Secp256r1 TLSCurveID = 23 + Secp384r1 TLSCurveID = 24 + Secp521r1 TLSCurveID = 25 + BrainpoolP256r1 TLSCurveID = 26 + BrainpoolP384r1 TLSCurveID = 27 + BrainpoolP512r1 TLSCurveID = 28 +) + +var ecIDToName map[TLSCurveID]string +var ecNameToID map[string]TLSCurveID + +func init() { + ecIDToName = make(map[TLSCurveID]string, 64) + ecIDToName[Sect163k1] = "sect163k1" + ecIDToName[Sect163r1] = "sect163r1" + ecIDToName[Sect163r2] = "sect163r2" + ecIDToName[Sect193r1] = "sect193r1" + ecIDToName[Sect193r2] = "sect193r2" + ecIDToName[Sect233k1] = "sect233k1" + ecIDToName[Sect233r1] = "sect233r1" + ecIDToName[Sect239k1] = "sect239k1" + ecIDToName[Sect283k1] = "sect283k1" + ecIDToName[Sect283r1] = "sect283r1" + ecIDToName[Sect409k1] = "sect409k1" + ecIDToName[Sect409r1] = "sect409r1" + ecIDToName[Sect571k1] = "sect571k1" + ecIDToName[Sect571r1] = "sect571r1" + ecIDToName[Secp160k1] = "secp160k1" + ecIDToName[Secp160r1] = "secp160r1" + ecIDToName[Secp160r2] = "secp160r2" + ecIDToName[Secp192k1] = "secp192k1" + ecIDToName[Secp192r1] = "secp192r1" + ecIDToName[Secp224k1] = "secp224k1" + ecIDToName[Secp224r1] = "secp224r1" + ecIDToName[Secp256k1] = "secp256k1" + ecIDToName[Secp256r1] = "secp256r1" + ecIDToName[Secp384r1] = "secp384r1" + ecIDToName[Secp521r1] = "secp521r1" + ecIDToName[BrainpoolP256r1] = "brainpoolp256r1" + ecIDToName[BrainpoolP384r1] = "brainpoolp384r1" + ecIDToName[BrainpoolP512r1] = "brainpoolp512r1" + + ecNameToID = make(map[string]TLSCurveID, 64) + ecNameToID["sect163k1"] = Sect163k1 + ecNameToID["sect163r1"] = Sect163r1 + ecNameToID["sect163r2"] = Sect163r2 + ecNameToID["sect193r1"] = Sect193r1 + ecNameToID["sect193r2"] = Sect193r2 + ecNameToID["sect233k1"] = Sect233k1 + ecNameToID["sect233r1"] = Sect233r1 + ecNameToID["sect239k1"] = Sect239k1 + ecNameToID["sect283k1"] = Sect283k1 + ecNameToID["sect283r1"] = Sect283r1 + ecNameToID["sect409k1"] = Sect409k1 + ecNameToID["sect409r1"] = Sect409r1 + ecNameToID["sect571k1"] = Sect571k1 + ecNameToID["sect571r1"] = Sect571r1 + ecNameToID["secp160k1"] = Secp160k1 + ecNameToID["secp160r1"] = Secp160r1 + ecNameToID["secp160r2"] = Secp160r2 + ecNameToID["secp192k1"] = Secp192k1 + ecNameToID["secp192r1"] = Secp192r1 + ecNameToID["secp224k1"] = Secp224k1 + ecNameToID["secp224r1"] = Secp224r1 + ecNameToID["secp256k1"] = Secp256k1 + ecNameToID["secp256r1"] = Secp256r1 + ecNameToID["secp384r1"] = Secp384r1 + ecNameToID["secp521r1"] = Secp521r1 + ecNameToID["brainpoolp256r1"] = BrainpoolP256r1 + ecNameToID["brainpoolp384r1"] = BrainpoolP384r1 + ecNameToID["brainpoolp512r1"] = BrainpoolP512r1 +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/json/rsa.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/json/rsa.go new file mode 100644 index 00000000..27025697 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/json/rsa.go @@ -0,0 +1,67 @@ +/* + * ZGrab Copyright 2015 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package json + +import ( + "crypto/rsa" + "encoding/json" + "fmt" + "math/big" +) + +// RSAPublicKey provides JSON methods for the standard rsa.PublicKey. +type RSAPublicKey struct { + *rsa.PublicKey +} + +type auxRSAPublicKey struct { + Exponent int `json:"exponent"` + Modulus []byte `json:"modulus"` + Length int `json:"length"` +} + +// RSAClientParams are the TLS key exchange parameters for RSA keys. +type RSAClientParams struct { + Length uint16 `json:"length,omitempty"` + EncryptedPMS []byte `json:"encrypted_pre_master_secret,omitempty"` +} + +// MarshalJSON implements the json.Marshal interface +func (rp *RSAPublicKey) MarshalJSON() ([]byte, error) { + var aux auxRSAPublicKey + if rp.PublicKey != nil { + aux.Exponent = rp.E + aux.Modulus = rp.N.Bytes() + aux.Length = len(aux.Modulus) * 8 + } + return json.Marshal(&aux) +} + +// UnmarshalJSON implements the json.Unmarshal interface +func (rp *RSAPublicKey) UnmarshalJSON(b []byte) error { + var aux auxRSAPublicKey + if err := json.Unmarshal(b, &aux); err != nil { + return err + } + if rp.PublicKey == nil { + rp.PublicKey = new(rsa.PublicKey) + } + rp.E = aux.Exponent + rp.N = big.NewInt(0).SetBytes(aux.Modulus) + if len(aux.Modulus)*8 != aux.Length { + return fmt.Errorf("mismatched length (got %d, field specified %d)", len(aux.Modulus), aux.Length) + } + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/util/isURL.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/util/isURL.go new file mode 100644 index 00000000..6a09a471 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/util/isURL.go @@ -0,0 +1,77 @@ +/* +The MIT License (MIT) + +Copyright (c) 2014 Alex Saskevich + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +package util + +import ( + "net/url" + "regexp" + "strings" + "unicode/utf8" +) + +const ( + maxURLRuneCount = 2083 + minURLRuneCount = 3 + + IP = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))` + URLSchema = `((ftp|tcp|udp|wss?|https?):\/\/)` + URLUsername = `(\S+(:\S*)?@)` + URLPath = `((\/|\?|#)[^\s]*)` + URLPort = `(:(\d{1,5}))` + URLIP = `([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))` + URLSubdomain = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))` +) + +var ( + URL = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$` + rxURL = regexp.MustCompile(URL) +) + +// IsURL check if the string is an URL. +// This function is (graciously) adopted from +// https://github.com/asaskevich/govalidator to avoid needing a full dependency on +// `govalidator` for the one `IsURL` function. +func IsURL(str string) bool { + if str == "" || utf8.RuneCountInString(str) >= maxURLRuneCount || len(str) <= minURLRuneCount || strings.HasPrefix(str, ".") { + return false + } + strTemp := str + if strings.Contains(str, ":") && !strings.Contains(str, "://") { + // support no indicated urlscheme but with colon for port number + // http:// is appended so url.Parse will succeed, strTemp used so it does not impact rxURL.MatchString + strTemp = "http://" + str + } + u, err := url.Parse(strTemp) + if err != nil { + return false + } + if strings.HasPrefix(u.Host, ".") { + return false + } + if u.Host == "" && (u.Path != "" && !strings.Contains(u.Path, ".")) { + return false + } + return rxURL.MatchString(str) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/README.md b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/README.md new file mode 100644 index 00000000..e2a28835 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/README.md @@ -0,0 +1,8 @@ +Originally based on the go/crypto/x509 standard library, +this package has now diverged enough that it is no longer +updated with direct correspondence to new go releases. + +Approximately supports all the features of +github.com/golang/go/crypto/x509 package at: +branch: release-branch.go1.10 +revision: dea961ebd9f871b39b3bdaab32f952037f28cd71 diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/cert_pool.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/cert_pool.go new file mode 100644 index 00000000..a6c6d2b0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/cert_pool.go @@ -0,0 +1,171 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "encoding/pem" +) + +// CertPool is a set of certificates. +type CertPool struct { + bySubjectKeyId map[string][]int + byName map[string][]int + bySHA256 map[string]int + certs []*Certificate +} + +// NewCertPool returns a new, empty CertPool. +func NewCertPool() *CertPool { + return &CertPool{ + bySubjectKeyId: make(map[string][]int), + byName: make(map[string][]int), + bySHA256: make(map[string]int), + } +} + +// findVerifiedParents attempts to find certificates in s which have signed the +// given certificate. If any candidates were rejected then errCert will be set +// to one of them, arbitrarily, and err will contain the reason that it was +// rejected. +func (s *CertPool) findVerifiedParents(cert *Certificate) (parents []int, errCert *Certificate, err error) { + if s == nil { + return + } + var candidates []int + + if len(cert.AuthorityKeyId) > 0 { + candidates, _ = s.bySubjectKeyId[string(cert.AuthorityKeyId)] + } + if len(candidates) == 0 { + candidates, _ = s.byName[string(cert.RawIssuer)] + } + + for _, c := range candidates { + if err = cert.CheckSignatureFrom(s.certs[c]); err == nil { + cert.validSignature = true + parents = append(parents, c) + } else { + errCert = s.certs[c] + } + } + + return +} + +// Contains returns true if c is in s. +func (s *CertPool) Contains(c *Certificate) bool { + if s == nil { + return false + } + _, ok := s.bySHA256[string(c.FingerprintSHA256)] + return ok +} + +// Covers returns true if all certs in pool are in s. +func (s *CertPool) Covers(pool *CertPool) bool { + if pool == nil { + return true + } + for _, c := range pool.certs { + if !s.Contains(c) { + return false + } + } + return true +} + +// Certificates returns a list of parsed certificates in the pool. +func (s *CertPool) Certificates() []*Certificate { + out := make([]*Certificate, 0, len(s.certs)) + out = append(out, s.certs...) + return out +} + +// Size returns the number of unique certificates in the CertPool. +func (s *CertPool) Size() int { + if s == nil { + return 0 + } + return len(s.certs) +} + +// Sum returns the union of two certificate pools as a new certificate pool. +func (s *CertPool) Sum(other *CertPool) (sum *CertPool) { + sum = NewCertPool() + if s != nil { + for _, c := range s.certs { + sum.AddCert(c) + } + } + if other != nil { + for _, c := range other.certs { + sum.AddCert(c) + } + } + return +} + +// AddCert adds a certificate to a pool. +func (s *CertPool) AddCert(cert *Certificate) { + if cert == nil { + panic("adding nil Certificate to CertPool") + } + + // Check that the certificate isn't being added twice. + sha256fp := string(cert.FingerprintSHA256) + if _, ok := s.bySHA256[sha256fp]; ok { + return + } + + n := len(s.certs) + s.certs = append(s.certs, cert) + + if len(cert.SubjectKeyId) > 0 { + keyId := string(cert.SubjectKeyId) + s.bySubjectKeyId[keyId] = append(s.bySubjectKeyId[keyId], n) + } + name := string(cert.RawSubject) + s.byName[name] = append(s.byName[name], n) + s.bySHA256[sha256fp] = n +} + +// AppendCertsFromPEM attempts to parse a series of PEM encoded certificates. +// It appends any certificates found to s and reports whether any certificates +// were successfully parsed. +// +// On many Linux systems, /etc/ssl/cert.pem will contain the system wide set +// of root CAs in a format suitable for this function. +func (s *CertPool) AppendCertsFromPEM(pemCerts []byte) (ok bool) { + for len(pemCerts) > 0 { + var block *pem.Block + block, pemCerts = pem.Decode(pemCerts) + if block == nil { + break + } + if block.Type != "CERTIFICATE" || len(block.Headers) != 0 { + continue + } + + cert, err := ParseCertificate(block.Bytes) + if err != nil { + continue + } + + s.AddCert(cert) + ok = true + } + + return +} + +// Subjects returns a list of the DER-encoded subjects of +// all of the certificates in the pool. +func (s *CertPool) Subjects() [][]byte { + res := make([][]byte, len(s.certs)) + for i, c := range s.certs { + res[i] = c.RawSubject + } + return res +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/certificate_type.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/certificate_type.go new file mode 100644 index 00000000..7bb7f324 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/certificate_type.go @@ -0,0 +1,64 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import "encoding/json" + +// TODO: Automatically generate this file from a CSV + +// CertificateType represents whether a certificate is a root, intermediate, or +// leaf. +type CertificateType int + +// CertificateType constants. Values should not be considered significant aside +// from CertificateTypeUnknown is the zero value. +const ( + CertificateTypeUnknown CertificateType = 0 + CertificateTypeLeaf CertificateType = 1 + CertificateTypeIntermediate CertificateType = 2 + CertificateTypeRoot CertificateType = 3 +) + +const ( + certificateTypeStringLeaf = "leaf" + certificateTypeStringIntermediate = "intermediate" + certificateTypeStringRoot = "root" + certificateTypeStringUnknown = "unknown" +) + +// MarshalJSON implements the json.Marshaler interface. Any unknown integer +// value is considered the same as CertificateTypeUnknown. +func (t CertificateType) MarshalJSON() ([]byte, error) { + switch t { + case CertificateTypeLeaf: + return json.Marshal(certificateTypeStringLeaf) + case CertificateTypeIntermediate: + return json.Marshal(certificateTypeStringIntermediate) + case CertificateTypeRoot: + return json.Marshal(certificateTypeStringRoot) + default: + return json.Marshal(certificateTypeStringUnknown) + } +} + +// UnmarshalJSON implements the json.Unmarshaler interface. Any unknown string +// is considered the same CertificateTypeUnknown. +func (t *CertificateType) UnmarshalJSON(b []byte) error { + var certificateTypeString string + if err := json.Unmarshal(b, &certificateTypeString); err != nil { + return err + } + switch certificateTypeString { + case certificateTypeStringLeaf: + *t = CertificateTypeLeaf + case certificateTypeStringIntermediate: + *t = CertificateTypeIntermediate + case certificateTypeStringRoot: + *t = CertificateTypeRoot + default: + *t = CertificateTypeUnknown + } + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/chain.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/chain.go new file mode 100644 index 00000000..bca0f278 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/chain.go @@ -0,0 +1,70 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "bytes" + "strings" +) + +// CertificateChain is a slice of certificates. The 0'th element is the leaf, +// and the last element is a root. Successive elements have a child-parent +// relationship. +type CertificateChain []*Certificate + +// Range runs a function on each element of chain. It can modify each +// certificate in place. +func (chain CertificateChain) Range(f func(int, *Certificate)) { + for i, c := range chain { + f(i, c) + } +} + +// SubjectAndKeyInChain returns true if the given SubjectAndKey is found in any +// certificate in the chain. +func (chain CertificateChain) SubjectAndKeyInChain(sk *SubjectAndKey) bool { + for _, cert := range chain { + if bytes.Equal(sk.RawSubject, cert.RawSubject) && bytes.Equal(sk.RawSubjectPublicKeyInfo, cert.RawSubjectPublicKeyInfo) { + return true + } + } + return false +} + +// CertificateSubjectAndKeyInChain returns true if the SubjectAndKey from c is +// found in any certificate in the chain. +func (chain CertificateChain) CertificateSubjectAndKeyInChain(c *Certificate) bool { + for _, cert := range chain { + if bytes.Equal(c.RawSubject, cert.RawSubject) && bytes.Equal(c.RawSubjectPublicKeyInfo, cert.RawSubjectPublicKeyInfo) { + return true + } + } + return false +} + +// CertificateInChain returns true if c is in the chain. +func (chain CertificateChain) CertificateInChain(c *Certificate) bool { + for _, cert := range chain { + if bytes.Equal(c.Raw, cert.Raw) { + return true + } + } + return false +} + +func (chain CertificateChain) AppendToFreshChain(c *Certificate) CertificateChain { + n := make([]*Certificate, len(chain)+1) + copy(n, chain) + n[len(chain)] = c + return n +} + +func (chain CertificateChain) chainID() string { + var parts []string + for _, c := range chain { + parts = append(parts, string(c.FingerprintSHA256)) + } + return strings.Join(parts, "") +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/ct/serialization.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/ct/serialization.go new file mode 100644 index 00000000..aac17dc2 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/ct/serialization.go @@ -0,0 +1,168 @@ +package ct + +// This file contains selectively chosen snippets of +// github.com/google/certificate-transparency-go@ 5cfe585726ad9d990d4db524d6ce2567b13e2f80 +// +// These snippets only perform deserialization for SCTs and are recreated here to prevent pulling in the whole of the ct +// which contains yet another version of x509,asn1 and tls + +import ( + "encoding/binary" + "errors" + "fmt" + "io" +) + +// Variable size structure prefix-header byte lengths +const ( + CertificateLengthBytes = 3 + PreCertificateLengthBytes = 3 + ExtensionsLengthBytes = 2 + CertificateChainLengthBytes = 3 + SignatureLengthBytes = 2 +) + +func writeUint(w io.Writer, value uint64, numBytes int) error { + buf := make([]uint8, numBytes) + for i := 0; i < numBytes; i++ { + buf[numBytes-i-1] = uint8(value & 0xff) + value >>= 8 + } + if value != 0 { + return errors.New("numBytes was insufficiently large to represent value") + } + if _, err := w.Write(buf); err != nil { + return err + } + return nil +} + +func writeVarBytes(w io.Writer, value []byte, numLenBytes int) error { + if err := writeUint(w, uint64(len(value)), numLenBytes); err != nil { + return err + } + if _, err := w.Write(value); err != nil { + return err + } + return nil +} + +func readUint(r io.Reader, numBytes int) (uint64, error) { + var l uint64 + for i := 0; i < numBytes; i++ { + l <<= 8 + var t uint8 + if err := binary.Read(r, binary.BigEndian, &t); err != nil { + return 0, err + } + l |= uint64(t) + } + return l, nil +} + +// Reads a variable length array of bytes from |r|. |numLenBytes| specifies the +// number of (BigEndian) prefix-bytes which contain the length of the actual +// array data bytes that follow. +// Allocates an array to hold the contents and returns a slice view into it if +// the read was successful, or an error otherwise. +func readVarBytes(r io.Reader, numLenBytes int) ([]byte, error) { + switch { + case numLenBytes > 8: + return nil, fmt.Errorf("numLenBytes too large (%d)", numLenBytes) + case numLenBytes == 0: + return nil, errors.New("numLenBytes should be > 0") + } + l, err := readUint(r, numLenBytes) + if err != nil { + return nil, err + } + data := make([]byte, l) + if n, err := io.ReadFull(r, data); err != nil { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return nil, fmt.Errorf("short read: expected %d but got %d", l, n) + } + return nil, err + } + return data, nil +} + +// UnmarshalDigitallySigned reconstructs a DigitallySigned structure from a Reader +func UnmarshalDigitallySigned(r io.Reader) (*DigitallySigned, error) { + var h byte + if err := binary.Read(r, binary.BigEndian, &h); err != nil { + return nil, fmt.Errorf("failed to read HashAlgorithm: %v", err) + } + + var s byte + if err := binary.Read(r, binary.BigEndian, &s); err != nil { + return nil, fmt.Errorf("failed to read SignatureAlgorithm: %v", err) + } + + sig, err := readVarBytes(r, SignatureLengthBytes) + if err != nil { + return nil, fmt.Errorf("failed to read Signature bytes: %v", err) + } + + return &DigitallySigned{ + HashAlgorithm: HashAlgorithm(h), + SignatureAlgorithm: SignatureAlgorithm(s), + Signature: sig, + }, nil +} + +func marshalDigitallySignedHere(ds DigitallySigned, here []byte) ([]byte, error) { + sigLen := len(ds.Signature) + dsOutLen := 2 + SignatureLengthBytes + sigLen + if here == nil { + here = make([]byte, dsOutLen) + } + if len(here) < dsOutLen { + return nil, ErrNotEnoughBuffer + } + here = here[0:dsOutLen] + + here[0] = byte(ds.HashAlgorithm) + here[1] = byte(ds.SignatureAlgorithm) + binary.BigEndian.PutUint16(here[2:4], uint16(sigLen)) + copy(here[4:], ds.Signature) + + return here, nil +} + +// MarshalDigitallySigned marshalls a DigitallySigned structure into a byte array +func MarshalDigitallySigned(ds DigitallySigned) ([]byte, error) { + return marshalDigitallySignedHere(ds, nil) +} + +func deserializeSCTV1(r io.Reader, sct *SignedCertificateTimestamp) error { + if err := binary.Read(r, binary.BigEndian, &sct.LogID); err != nil { + return err + } + if err := binary.Read(r, binary.BigEndian, &sct.Timestamp); err != nil { + return err + } + ext, err := readVarBytes(r, ExtensionsLengthBytes) + if err != nil { + return err + } + sct.Extensions = ext + ds, err := UnmarshalDigitallySigned(r) + if err != nil { + return err + } + sct.Signature = *ds + return nil +} + +func DeserializeSCT(r io.Reader) (*SignedCertificateTimestamp, error) { + var sct SignedCertificateTimestamp + if err := binary.Read(r, binary.BigEndian, &sct.SCTVersion); err != nil { + return nil, err + } + switch sct.SCTVersion { + case V1: + return &sct, deserializeSCTV1(r, &sct) + default: + return nil, fmt.Errorf("unknown SCT version %d", sct.SCTVersion) + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/ct/types.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/ct/types.go new file mode 100644 index 00000000..8d894d33 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/ct/types.go @@ -0,0 +1,229 @@ +package ct + +// This file contains selectively chosen snippets of +// github.com/google/certificate-transparency-go@ 5cfe585726ad9d990d4db524d6ce2567b13e2f80 +// +// These snippets only perform deserialization for SCTs and are recreated here to prevent pulling in the whole of the ct +// which contains yet another version of x509,asn1 and tls + +import ( + "bytes" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "fmt" +) + +// CTExtensions is a representation of the raw bytes of any CtExtension +// structure (see section 3.2) +type CTExtensions []byte + +// SHA256Hash represents the output from the SHA256 hash function. +type SHA256Hash [sha256.Size]byte + +// FromBase64String populates the SHA256 struct with the contents of the base64 data passed in. +func (s *SHA256Hash) FromBase64String(b64 string) error { + bs, err := base64.StdEncoding.DecodeString(b64) + if err != nil { + return fmt.Errorf("failed to unbase64 LogID: %v", err) + } + if len(bs) != sha256.Size { + return fmt.Errorf("invalid SHA256 length, expected 32 but got %d", len(bs)) + } + copy(s[:], bs) + return nil +} + +// Base64String returns the base64 representation of this SHA256Hash. +func (s SHA256Hash) Base64String() string { + return base64.StdEncoding.EncodeToString(s[:]) +} + +// MarshalJSON implements the json.Marshaller interface for SHA256Hash. +func (s SHA256Hash) MarshalJSON() ([]byte, error) { + return []byte(`"` + s.Base64String() + `"`), nil +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (s *SHA256Hash) UnmarshalJSON(b []byte) error { + var content string + if err := json.Unmarshal(b, &content); err != nil { + return fmt.Errorf("failed to unmarshal SHA256Hash: %v", err) + } + return s.FromBase64String(content) +} + +// HashAlgorithm from the DigitallySigned struct +type HashAlgorithm byte + +// HashAlgorithm constants +const ( + None HashAlgorithm = 0 + MD5 HashAlgorithm = 1 + SHA1 HashAlgorithm = 2 + SHA224 HashAlgorithm = 3 + SHA256 HashAlgorithm = 4 + SHA384 HashAlgorithm = 5 + SHA512 HashAlgorithm = 6 +) + +func (h HashAlgorithm) String() string { + switch h { + case None: + return "None" + case MD5: + return "MD5" + case SHA1: + return "SHA1" + case SHA224: + return "SHA224" + case SHA256: + return "SHA256" + case SHA384: + return "SHA384" + case SHA512: + return "SHA512" + default: + return fmt.Sprintf("UNKNOWN(%d)", h) + } +} + +// SignatureAlgorithm from the the DigitallySigned struct +type SignatureAlgorithm byte + +// SignatureAlgorithm constants +const ( + Anonymous SignatureAlgorithm = 0 + RSA SignatureAlgorithm = 1 + DSA SignatureAlgorithm = 2 + ECDSA SignatureAlgorithm = 3 +) + +func (s SignatureAlgorithm) String() string { + switch s { + case Anonymous: + return "Anonymous" + case RSA: + return "RSA" + case DSA: + return "DSA" + case ECDSA: + return "ECDSA" + default: + return fmt.Sprintf("UNKNOWN(%d)", s) + } +} + +// DigitallySigned represents an RFC5246 DigitallySigned structure +type DigitallySigned struct { + HashAlgorithm HashAlgorithm + SignatureAlgorithm SignatureAlgorithm + Signature []byte +} + +// FromBase64String populates the DigitallySigned structure from the base64 data passed in. +// Returns an error if the base64 data is invalid. +func (d *DigitallySigned) FromBase64String(b64 string) error { + raw, err := base64.StdEncoding.DecodeString(b64) + if err != nil { + return fmt.Errorf("failed to unbase64 DigitallySigned: %v", err) + } + ds, err := UnmarshalDigitallySigned(bytes.NewReader(raw)) + if err != nil { + return fmt.Errorf("failed to unmarshal DigitallySigned: %v", err) + } + *d = *ds + return nil +} + +// Base64String returns the base64 representation of the DigitallySigned struct. +func (d DigitallySigned) Base64String() (string, error) { + b, err := MarshalDigitallySigned(d) + if err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(b), nil +} + +// MarshalJSON implements the json.Marshaller interface. +func (d DigitallySigned) MarshalJSON() ([]byte, error) { + b64, err := d.Base64String() + if err != nil { + return []byte{}, err + } + return []byte(`"` + b64 + `"`), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (d *DigitallySigned) UnmarshalJSON(b []byte) error { + var content string + if err := json.Unmarshal(b, &content); err != nil { + return fmt.Errorf("failed to unmarshal DigitallySigned: %v", err) + } + return d.FromBase64String(content) +} + +// Version represents the Version enum from section 3.2 of the RFC: +// enum { v1(0), (255) } Version; +type Version uint8 + +func (v Version) String() string { + switch v { + case V1: + return "V1" + default: + return fmt.Sprintf("UnknownVersion(%d)", v) + } +} + +// CT Version constants, see section 3.2 of the RFC. +const ( + V1 Version = 0 +) + +// SignedCertificateTimestamp represents the structure returned by the +// add-chain and add-pre-chain methods after base64 decoding. (see RFC sections +// 3.2 ,4.1 and 4.2) +type SignedCertificateTimestamp struct { + SCTVersion Version `json:"version"` // The version of the protocol to which the SCT conforms + LogID SHA256Hash `json:"log_id"` // the SHA-256 hash of the log's public key, calculated over + // the DER encoding of the key represented as SubjectPublicKeyInfo. + Timestamp uint64 `json:"timestamp,omitempty"` // Timestamp (in ms since unix epoc) at which the SCT was issued. NOTE: When this is serialized, the output is in seconds, not milliseconds. + Extensions CTExtensions `json:"extensions,omitempty"` // For future extensions to the protocol + Signature DigitallySigned `json:"signature"` // The Log's signature for this SCT +} + +// Copied from ct/types.go 2018/06/15 to deal with BQ timestamp overflow; output +// is expected to be seconds, not milliseconds. +type auxSignedCertificateTimestamp SignedCertificateTimestamp + +const kMaxTimestamp = 253402300799 + +// MarshalJSON implements the JSON.Marshaller interface. +func (sct *SignedCertificateTimestamp) MarshalJSON() ([]byte, error) { + aux := auxSignedCertificateTimestamp(*sct) + aux.Timestamp = sct.Timestamp / 1000 // convert ms to sec + if aux.Timestamp > kMaxTimestamp { + aux.Timestamp = 0 + } + return json.Marshal(&aux) +} + +type sctError int + +// Preallocate errors for performance +var ( + ErrInvalidVersion error = sctError(1) + ErrNotEnoughBuffer error = sctError(2) +) + +func (e sctError) Error() string { + switch e { + case ErrInvalidVersion: + return "invalid SCT version detected" + case ErrNotEnoughBuffer: + return "provided buffer was too small" + default: + return "unknown error" + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/example.json b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/example.json new file mode 100644 index 00000000..dd225da7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/example.json @@ -0,0 +1,65 @@ +{ + "domain": null, + "certificate": { + "version": 3, + "serial_number": 123893, + "signature_algorithm": { + "id": 123, + "name": "SHA1" + }, + "issuer": { + "common_name": "Starfield CA", + "attributes": [ + { "organization": "Startfield" }, + { "location": "Scottsdale" }, + { "state": "Arizona" }, + { "country": "US" } + ] + }, + "validity": { + "start": "20140102", + "end": "20150102", + "length" :8760 + }, + "subject": { + "common_name": "*.tools.ieft.org", + "attributes": [ + { "organization_unit": "Domain Control Validated" } + ] + }, + "subject_key_info": { + "algorithm": { + "id": 234, + "name": "RSA" + }, + "key": { + "modulus": "base64encodedmodulus", + "exponent": 65537 + } + }, + "extensions": [ + { + "id": 345, + "name": "Certificate Basic Constraints", + "is_ca": false + }, + { + "id": 456, + "name": "Alt Names", + "alt_names": [ + "*.tools.ietf.org", + "tools.ietf.org" + ] + } + ] + }, + "signature_algorithm": { + "id": 123, + "name": "SHA1" + }, + "signature": { + "value": "base64encodedsignature", + "is_valid": true, + "matches_domain": null + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/extended_key_usage.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/extended_key_usage.go new file mode 100644 index 00000000..4b66fc52 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/extended_key_usage.go @@ -0,0 +1,679 @@ +// Created by extended_key_usage_gen; DO NOT EDIT + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "encoding/asn1" +) + +const ( + OID_EKU_APPLE_CODE_SIGNING = "1.2.840.113635.100.4.1" + OID_EKU_APPLE_CODE_SIGNING_DEVELOPMENT = "1.2.840.113635.100.4.1.1" + OID_EKU_APPLE_SOFTWARE_UPDATE_SIGNING = "1.2.840.113635.100.4.1.2" + OID_EKU_APPLE_CODE_SIGNING_THIRD_PARTY = "1.2.840.113635.100.4.1.3" + OID_EKU_APPLE_RESOURCE_SIGNING = "1.2.840.113635.100.4.1.4" + OID_EKU_APPLE_ICHAT_SIGNING = "1.2.840.113635.100.4.2" + OID_EKU_APPLE_ICHAT_ENCRYPTION = "1.2.840.113635.100.4.3" + OID_EKU_APPLE_SYSTEM_IDENTITY = "1.2.840.113635.100.4.4" + OID_EKU_APPLE_CRYPTO_ENV = "1.2.840.113635.100.4.5" + OID_EKU_APPLE_CRYPTO_PRODUCTION_ENV = "1.2.840.113635.100.4.5.1" + OID_EKU_APPLE_CRYPTO_MAINTENANCE_ENV = "1.2.840.113635.100.4.5.2" + OID_EKU_APPLE_CRYPTO_TEST_ENV = "1.2.840.113635.100.4.5.3" + OID_EKU_APPLE_CRYPTO_DEVELOPMENT_ENV = "1.2.840.113635.100.4.5.4" + OID_EKU_APPLE_CRYPTO_QOS = "1.2.840.113635.100.4.6" + OID_EKU_APPLE_CRYPTO_TIER0_QOS = "1.2.840.113635.100.4.6.1" + OID_EKU_APPLE_CRYPTO_TIER1_QOS = "1.2.840.113635.100.4.6.2" + OID_EKU_APPLE_CRYPTO_TIER2_QOS = "1.2.840.113635.100.4.6.3" + OID_EKU_APPLE_CRYPTO_TIER3_QOS = "1.2.840.113635.100.4.6.4" + OID_EKU_MICROSOFT_CERT_TRUST_LIST_SIGNING = "1.3.6.1.4.1.311.10.3.1" + OID_EKU_MICROSOFT_QUALIFIED_SUBORDINATE = "1.3.6.1.4.1.311.10.3.10" + OID_EKU_MICROSOFT_KEY_RECOVERY_3 = "1.3.6.1.4.1.311.10.3.11" + OID_EKU_MICROSOFT_DOCUMENT_SIGNING = "1.3.6.1.4.1.311.10.3.12" + OID_EKU_MICROSOFT_LIFETIME_SIGNING = "1.3.6.1.4.1.311.10.3.13" + OID_EKU_MICROSOFT_MOBILE_DEVICE_SOFTWARE = "1.3.6.1.4.1.311.10.3.14" + OID_EKU_MICROSOFT_SMART_DISPLAY = "1.3.6.1.4.1.311.10.3.15" + OID_EKU_MICROSOFT_CSP_SIGNATURE = "1.3.6.1.4.1.311.10.3.16" + OID_EKU_MICROSOFT_TIMESTAMP_SIGNING = "1.3.6.1.4.1.311.10.3.2" + OID_EKU_MICROSOFT_SERVER_GATED_CRYPTO = "1.3.6.1.4.1.311.10.3.3" + OID_EKU_MICROSOFT_SGC_SERIALIZED = "1.3.6.1.4.1.311.10.3.3.1" + OID_EKU_MICROSOFT_ENCRYPTED_FILE_SYSTEM = "1.3.6.1.4.1.311.10.3.4" + OID_EKU_MICROSOFT_EFS_RECOVERY = "1.3.6.1.4.1.311.10.3.4.1" + OID_EKU_MICROSOFT_WHQL_CRYPTO = "1.3.6.1.4.1.311.10.3.5" + OID_EKU_MICROSOFT_NT5_CRYPTO = "1.3.6.1.4.1.311.10.3.6" + OID_EKU_MICROSOFT_OEM_WHQL_CRYPTO = "1.3.6.1.4.1.311.10.3.7" + OID_EKU_MICROSOFT_EMBEDDED_NT_CRYPTO = "1.3.6.1.4.1.311.10.3.8" + OID_EKU_MICROSOFT_ROOT_LIST_SIGNER = "1.3.6.1.4.1.311.10.3.9" + OID_EKU_MICROSOFT_DRM = "1.3.6.1.4.1.311.10.5.1" + OID_EKU_MICROSOFT_DRM_INDIVIDUALIZATION = "1.3.6.1.4.1.311.10.5.2" + OID_EKU_MICROSOFT_LICENSES = "1.3.6.1.4.1.311.10.5.3" + OID_EKU_MICROSOFT_LICENSE_SERVER = "1.3.6.1.4.1.311.10.5.4" + OID_EKU_MICROSOFT_ENROLLMENT_AGENT = "1.3.6.1.4.1.311.20.2.1" + OID_EKU_MICROSOFT_SMARTCARD_LOGON = "1.3.6.1.4.1.311.20.2.2" + OID_EKU_MICROSOFT_CA_EXCHANGE = "1.3.6.1.4.1.311.21.5" + OID_EKU_MICROSOFT_KEY_RECOVERY_21 = "1.3.6.1.4.1.311.21.6" + OID_EKU_MICROSOFT_SYSTEM_HEALTH = "1.3.6.1.4.1.311.47.1.1" + OID_EKU_MICROSOFT_SYSTEM_HEALTH_LOOPHOLE = "1.3.6.1.4.1.311.47.1.3" + OID_EKU_MICROSOFT_KERNEL_MODE_CODE_SIGNING = "1.3.6.1.4.1.311.61.1.1" + OID_EKU_SERVER_AUTH = "1.3.6.1.5.5.7.3.1" + OID_EKU_DVCS = "1.3.6.1.5.5.7.3.10" + OID_EKU_SBGP_CERT_AA_SERVICE_AUTH = "1.3.6.1.5.5.7.3.11" + OID_EKU_EAP_OVER_PPP = "1.3.6.1.5.5.7.3.13" + OID_EKU_EAP_OVER_LAN = "1.3.6.1.5.5.7.3.14" + OID_EKU_CLIENT_AUTH = "1.3.6.1.5.5.7.3.2" + OID_EKU_CODE_SIGNING = "1.3.6.1.5.5.7.3.3" + OID_EKU_EMAIL_PROTECTION = "1.3.6.1.5.5.7.3.4" + OID_EKU_IPSEC_END_SYSTEM = "1.3.6.1.5.5.7.3.5" + OID_EKU_IPSEC_TUNNEL = "1.3.6.1.5.5.7.3.6" + OID_EKU_IPSEC_USER = "1.3.6.1.5.5.7.3.7" + OID_EKU_TIME_STAMPING = "1.3.6.1.5.5.7.3.8" + OID_EKU_OCSP_SIGNING = "1.3.6.1.5.5.7.3.9" + OID_EKU_IPSEC_INTERMEDIATE_SYSTEM_USAGE = "1.3.6.1.5.5.8.2.2" + OID_EKU_NETSCAPE_SERVER_GATED_CRYPTO = "2.16.840.1.113730.4.1" + OID_EKU_ANY = "2.5.29.37.0" +) + +var ( + oidExtKeyUsageAppleCodeSigning = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 4, 1} + oidExtKeyUsageAppleCodeSigningDevelopment = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 4, 1, 1} + oidExtKeyUsageAppleSoftwareUpdateSigning = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 4, 1, 2} + oidExtKeyUsageAppleCodeSigningThirdParty = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 4, 1, 3} + oidExtKeyUsageAppleResourceSigning = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 4, 1, 4} + oidExtKeyUsageAppleIchatSigning = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 4, 2} + oidExtKeyUsageAppleIchatEncryption = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 4, 3} + oidExtKeyUsageAppleSystemIdentity = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 4, 4} + oidExtKeyUsageAppleCryptoEnv = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 4, 5} + oidExtKeyUsageAppleCryptoProductionEnv = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 4, 5, 1} + oidExtKeyUsageAppleCryptoMaintenanceEnv = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 4, 5, 2} + oidExtKeyUsageAppleCryptoTestEnv = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 4, 5, 3} + oidExtKeyUsageAppleCryptoDevelopmentEnv = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 4, 5, 4} + oidExtKeyUsageAppleCryptoQos = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 4, 6} + oidExtKeyUsageAppleCryptoTier0Qos = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 4, 6, 1} + oidExtKeyUsageAppleCryptoTier1Qos = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 4, 6, 2} + oidExtKeyUsageAppleCryptoTier2Qos = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 4, 6, 3} + oidExtKeyUsageAppleCryptoTier3Qos = asn1.ObjectIdentifier{1, 2, 840, 113635, 100, 4, 6, 4} + oidExtKeyUsageMicrosoftCertTrustListSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 1} + oidExtKeyUsageMicrosoftQualifiedSubordinate = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 10} + oidExtKeyUsageMicrosoftKeyRecovery3 = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 11} + oidExtKeyUsageMicrosoftDocumentSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 12} + oidExtKeyUsageMicrosoftLifetimeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 13} + oidExtKeyUsageMicrosoftMobileDeviceSoftware = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 14} + oidExtKeyUsageMicrosoftSmartDisplay = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 15} + oidExtKeyUsageMicrosoftCspSignature = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 16} + oidExtKeyUsageMicrosoftTimestampSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 2} + oidExtKeyUsageMicrosoftServerGatedCrypto = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 3} + oidExtKeyUsageMicrosoftSgcSerialized = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 3, 1} + oidExtKeyUsageMicrosoftEncryptedFileSystem = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 4} + oidExtKeyUsageMicrosoftEfsRecovery = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 4, 1} + oidExtKeyUsageMicrosoftWhqlCrypto = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 5} + oidExtKeyUsageMicrosoftNt5Crypto = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 6} + oidExtKeyUsageMicrosoftOemWhqlCrypto = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 7} + oidExtKeyUsageMicrosoftEmbeddedNtCrypto = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 8} + oidExtKeyUsageMicrosoftRootListSigner = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 9} + oidExtKeyUsageMicrosoftDrm = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 5, 1} + oidExtKeyUsageMicrosoftDrmIndividualization = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 5, 2} + oidExtKeyUsageMicrosoftLicenses = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 5, 3} + oidExtKeyUsageMicrosoftLicenseServer = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 5, 4} + oidExtKeyUsageMicrosoftEnrollmentAgent = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 20, 2, 1} + oidExtKeyUsageMicrosoftSmartcardLogon = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 20, 2, 2} + oidExtKeyUsageMicrosoftCaExchange = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 21, 5} + oidExtKeyUsageMicrosoftKeyRecovery21 = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 21, 6} + oidExtKeyUsageMicrosoftSystemHealth = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 47, 1, 1} + oidExtKeyUsageMicrosoftSystemHealthLoophole = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 47, 1, 3} + oidExtKeyUsageMicrosoftKernelModeCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 61, 1, 1} + oidExtKeyUsageServerAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 1} + oidExtKeyUsageDvcs = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 10} + oidExtKeyUsageSbgpCertAaServiceAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 11} + oidExtKeyUsageEapOverPpp = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 13} + oidExtKeyUsageEapOverLan = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 14} + oidExtKeyUsageClientAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 2} + oidExtKeyUsageCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 3} + oidExtKeyUsageEmailProtection = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 4} + oidExtKeyUsageIpsecEndSystem = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 5} + oidExtKeyUsageIpsecTunnel = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 6} + oidExtKeyUsageIpsecUser = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 7} + oidExtKeyUsageTimeStamping = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 8} + oidExtKeyUsageOcspSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 9} + oidExtKeyUsageIpsecIntermediateSystemUsage = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 8, 2, 2} + oidExtKeyUsageNetscapeServerGatedCrypto = asn1.ObjectIdentifier{2, 16, 840, 1, 113730, 4, 1} + oidExtKeyUsageAny = asn1.ObjectIdentifier{2, 5, 29, 37, 0} +) + +const ( + ExtKeyUsageAppleCodeSigning ExtKeyUsage = iota + ExtKeyUsageAppleCodeSigningDevelopment + ExtKeyUsageAppleSoftwareUpdateSigning + ExtKeyUsageAppleCodeSigningThirdParty + ExtKeyUsageAppleResourceSigning + ExtKeyUsageAppleIchatSigning + ExtKeyUsageAppleIchatEncryption + ExtKeyUsageAppleSystemIdentity + ExtKeyUsageAppleCryptoEnv + ExtKeyUsageAppleCryptoProductionEnv + ExtKeyUsageAppleCryptoMaintenanceEnv + ExtKeyUsageAppleCryptoTestEnv + ExtKeyUsageAppleCryptoDevelopmentEnv + ExtKeyUsageAppleCryptoQos + ExtKeyUsageAppleCryptoTier0Qos + ExtKeyUsageAppleCryptoTier1Qos + ExtKeyUsageAppleCryptoTier2Qos + ExtKeyUsageAppleCryptoTier3Qos + ExtKeyUsageMicrosoftCertTrustListSigning + ExtKeyUsageMicrosoftQualifiedSubordinate + ExtKeyUsageMicrosoftKeyRecovery3 + ExtKeyUsageMicrosoftDocumentSigning + ExtKeyUsageMicrosoftLifetimeSigning + ExtKeyUsageMicrosoftMobileDeviceSoftware + ExtKeyUsageMicrosoftSmartDisplay + ExtKeyUsageMicrosoftCspSignature + ExtKeyUsageMicrosoftTimestampSigning + ExtKeyUsageMicrosoftServerGatedCrypto + ExtKeyUsageMicrosoftSgcSerialized + ExtKeyUsageMicrosoftEncryptedFileSystem + ExtKeyUsageMicrosoftEfsRecovery + ExtKeyUsageMicrosoftWhqlCrypto + ExtKeyUsageMicrosoftNt5Crypto + ExtKeyUsageMicrosoftOemWhqlCrypto + ExtKeyUsageMicrosoftEmbeddedNtCrypto + ExtKeyUsageMicrosoftRootListSigner + ExtKeyUsageMicrosoftDrm + ExtKeyUsageMicrosoftDrmIndividualization + ExtKeyUsageMicrosoftLicenses + ExtKeyUsageMicrosoftLicenseServer + ExtKeyUsageMicrosoftEnrollmentAgent + ExtKeyUsageMicrosoftSmartcardLogon + ExtKeyUsageMicrosoftCaExchange + ExtKeyUsageMicrosoftKeyRecovery21 + ExtKeyUsageMicrosoftSystemHealth + ExtKeyUsageMicrosoftSystemHealthLoophole + ExtKeyUsageMicrosoftKernelModeCodeSigning + ExtKeyUsageServerAuth + ExtKeyUsageDvcs + ExtKeyUsageSbgpCertAaServiceAuth + ExtKeyUsageEapOverPpp + ExtKeyUsageEapOverLan + ExtKeyUsageClientAuth + ExtKeyUsageCodeSigning + ExtKeyUsageEmailProtection + ExtKeyUsageIpsecEndSystem + ExtKeyUsageIpsecTunnel + ExtKeyUsageIpsecUser + ExtKeyUsageTimeStamping + ExtKeyUsageOcspSigning + ExtKeyUsageIpsecIntermediateSystemUsage + ExtKeyUsageNetscapeServerGatedCrypto + ExtKeyUsageAny +) + +type auxExtendedKeyUsage struct { + AppleCodeSigning bool `json:"apple_code_signing,omitempty" oid:"1.2.840.113635.100.4.1"` + AppleCodeSigningDevelopment bool `json:"apple_code_signing_development,omitempty" oid:"1.2.840.113635.100.4.1.1"` + AppleSoftwareUpdateSigning bool `json:"apple_software_update_signing,omitempty" oid:"1.2.840.113635.100.4.1.2"` + AppleCodeSigningThirdParty bool `json:"apple_code_signing_third_party,omitempty" oid:"1.2.840.113635.100.4.1.3"` + AppleResourceSigning bool `json:"apple_resource_signing,omitempty" oid:"1.2.840.113635.100.4.1.4"` + AppleIchatSigning bool `json:"apple_ichat_signing,omitempty" oid:"1.2.840.113635.100.4.2"` + AppleIchatEncryption bool `json:"apple_ichat_encryption,omitempty" oid:"1.2.840.113635.100.4.3"` + AppleSystemIdentity bool `json:"apple_system_identity,omitempty" oid:"1.2.840.113635.100.4.4"` + AppleCryptoEnv bool `json:"apple_crypto_env,omitempty" oid:"1.2.840.113635.100.4.5"` + AppleCryptoProductionEnv bool `json:"apple_crypto_production_env,omitempty" oid:"1.2.840.113635.100.4.5.1"` + AppleCryptoMaintenanceEnv bool `json:"apple_crypto_maintenance_env,omitempty" oid:"1.2.840.113635.100.4.5.2"` + AppleCryptoTestEnv bool `json:"apple_crypto_test_env,omitempty" oid:"1.2.840.113635.100.4.5.3"` + AppleCryptoDevelopmentEnv bool `json:"apple_crypto_development_env,omitempty" oid:"1.2.840.113635.100.4.5.4"` + AppleCryptoQos bool `json:"apple_crypto_qos,omitempty" oid:"1.2.840.113635.100.4.6"` + AppleCryptoTier0Qos bool `json:"apple_crypto_tier0_qos,omitempty" oid:"1.2.840.113635.100.4.6.1"` + AppleCryptoTier1Qos bool `json:"apple_crypto_tier1_qos,omitempty" oid:"1.2.840.113635.100.4.6.2"` + AppleCryptoTier2Qos bool `json:"apple_crypto_tier2_qos,omitempty" oid:"1.2.840.113635.100.4.6.3"` + AppleCryptoTier3Qos bool `json:"apple_crypto_tier3_qos,omitempty" oid:"1.2.840.113635.100.4.6.4"` + MicrosoftCertTrustListSigning bool `json:"microsoft_cert_trust_list_signing,omitempty" oid:"1.3.6.1.4.1.311.10.3.1"` + MicrosoftQualifiedSubordinate bool `json:"microsoft_qualified_subordinate,omitempty" oid:"1.3.6.1.4.1.311.10.3.10"` + MicrosoftKeyRecovery3 bool `json:"microsoft_key_recovery_3,omitempty" oid:"1.3.6.1.4.1.311.10.3.11"` + MicrosoftDocumentSigning bool `json:"microsoft_document_signing,omitempty" oid:"1.3.6.1.4.1.311.10.3.12"` + MicrosoftLifetimeSigning bool `json:"microsoft_lifetime_signing,omitempty" oid:"1.3.6.1.4.1.311.10.3.13"` + MicrosoftMobileDeviceSoftware bool `json:"microsoft_mobile_device_software,omitempty" oid:"1.3.6.1.4.1.311.10.3.14"` + MicrosoftSmartDisplay bool `json:"microsoft_smart_display,omitempty" oid:"1.3.6.1.4.1.311.10.3.15"` + MicrosoftCspSignature bool `json:"microsoft_csp_signature,omitempty" oid:"1.3.6.1.4.1.311.10.3.16"` + MicrosoftTimestampSigning bool `json:"microsoft_timestamp_signing,omitempty" oid:"1.3.6.1.4.1.311.10.3.2"` + MicrosoftServerGatedCrypto bool `json:"microsoft_server_gated_crypto,omitempty" oid:"1.3.6.1.4.1.311.10.3.3"` + MicrosoftSgcSerialized bool `json:"microsoft_sgc_serialized,omitempty" oid:"1.3.6.1.4.1.311.10.3.3.1"` + MicrosoftEncryptedFileSystem bool `json:"microsoft_encrypted_file_system,omitempty" oid:"1.3.6.1.4.1.311.10.3.4"` + MicrosoftEfsRecovery bool `json:"microsoft_efs_recovery,omitempty" oid:"1.3.6.1.4.1.311.10.3.4.1"` + MicrosoftWhqlCrypto bool `json:"microsoft_whql_crypto,omitempty" oid:"1.3.6.1.4.1.311.10.3.5"` + MicrosoftNt5Crypto bool `json:"microsoft_nt5_crypto,omitempty" oid:"1.3.6.1.4.1.311.10.3.6"` + MicrosoftOemWhqlCrypto bool `json:"microsoft_oem_whql_crypto,omitempty" oid:"1.3.6.1.4.1.311.10.3.7"` + MicrosoftEmbeddedNtCrypto bool `json:"microsoft_embedded_nt_crypto,omitempty" oid:"1.3.6.1.4.1.311.10.3.8"` + MicrosoftRootListSigner bool `json:"microsoft_root_list_signer,omitempty" oid:"1.3.6.1.4.1.311.10.3.9"` + MicrosoftDrm bool `json:"microsoft_drm,omitempty" oid:"1.3.6.1.4.1.311.10.5.1"` + MicrosoftDrmIndividualization bool `json:"microsoft_drm_individualization,omitempty" oid:"1.3.6.1.4.1.311.10.5.2"` + MicrosoftLicenses bool `json:"microsoft_licenses,omitempty" oid:"1.3.6.1.4.1.311.10.5.3"` + MicrosoftLicenseServer bool `json:"microsoft_license_server,omitempty" oid:"1.3.6.1.4.1.311.10.5.4"` + MicrosoftEnrollmentAgent bool `json:"microsoft_enrollment_agent,omitempty" oid:"1.3.6.1.4.1.311.20.2.1"` + MicrosoftSmartcardLogon bool `json:"microsoft_smartcard_logon,omitempty" oid:"1.3.6.1.4.1.311.20.2.2"` + MicrosoftCaExchange bool `json:"microsoft_ca_exchange,omitempty" oid:"1.3.6.1.4.1.311.21.5"` + MicrosoftKeyRecovery21 bool `json:"microsoft_key_recovery_21,omitempty" oid:"1.3.6.1.4.1.311.21.6"` + MicrosoftSystemHealth bool `json:"microsoft_system_health,omitempty" oid:"1.3.6.1.4.1.311.47.1.1"` + MicrosoftSystemHealthLoophole bool `json:"microsoft_system_health_loophole,omitempty" oid:"1.3.6.1.4.1.311.47.1.3"` + MicrosoftKernelModeCodeSigning bool `json:"microsoft_kernel_mode_code_signing,omitempty" oid:"1.3.6.1.4.1.311.61.1.1"` + ServerAuth bool `json:"server_auth,omitempty" oid:"1.3.6.1.5.5.7.3.1"` + Dvcs bool `json:"dvcs,omitempty" oid:"1.3.6.1.5.5.7.3.10"` + SbgpCertAaServiceAuth bool `json:"sbgp_cert_aa_service_auth,omitempty" oid:"1.3.6.1.5.5.7.3.11"` + EapOverPpp bool `json:"eap_over_ppp,omitempty" oid:"1.3.6.1.5.5.7.3.13"` + EapOverLan bool `json:"eap_over_lan,omitempty" oid:"1.3.6.1.5.5.7.3.14"` + ClientAuth bool `json:"client_auth,omitempty" oid:"1.3.6.1.5.5.7.3.2"` + CodeSigning bool `json:"code_signing,omitempty" oid:"1.3.6.1.5.5.7.3.3"` + EmailProtection bool `json:"email_protection,omitempty" oid:"1.3.6.1.5.5.7.3.4"` + IpsecEndSystem bool `json:"ipsec_end_system,omitempty" oid:"1.3.6.1.5.5.7.3.5"` + IpsecTunnel bool `json:"ipsec_tunnel,omitempty" oid:"1.3.6.1.5.5.7.3.6"` + IpsecUser bool `json:"ipsec_user,omitempty" oid:"1.3.6.1.5.5.7.3.7"` + TimeStamping bool `json:"time_stamping,omitempty" oid:"1.3.6.1.5.5.7.3.8"` + OcspSigning bool `json:"ocsp_signing,omitempty" oid:"1.3.6.1.5.5.7.3.9"` + IpsecIntermediateSystemUsage bool `json:"ipsec_intermediate_system_usage,omitempty" oid:"1.3.6.1.5.5.8.2.2"` + NetscapeServerGatedCrypto bool `json:"netscape_server_gated_crypto,omitempty" oid:"2.16.840.1.113730.4.1"` + Any bool `json:"any,omitempty" oid:"2.5.29.37.0"` + Unknown []string `json:"unknown,omitempty"` +} + +func (aux *auxExtendedKeyUsage) populateFromASN1(oid asn1.ObjectIdentifier) { + s := oid.String() + switch s { + case OID_EKU_APPLE_CODE_SIGNING: + aux.AppleCodeSigning = true + case OID_EKU_APPLE_CODE_SIGNING_DEVELOPMENT: + aux.AppleCodeSigningDevelopment = true + case OID_EKU_APPLE_SOFTWARE_UPDATE_SIGNING: + aux.AppleSoftwareUpdateSigning = true + case OID_EKU_APPLE_CODE_SIGNING_THIRD_PARTY: + aux.AppleCodeSigningThirdParty = true + case OID_EKU_APPLE_RESOURCE_SIGNING: + aux.AppleResourceSigning = true + case OID_EKU_APPLE_ICHAT_SIGNING: + aux.AppleIchatSigning = true + case OID_EKU_APPLE_ICHAT_ENCRYPTION: + aux.AppleIchatEncryption = true + case OID_EKU_APPLE_SYSTEM_IDENTITY: + aux.AppleSystemIdentity = true + case OID_EKU_APPLE_CRYPTO_ENV: + aux.AppleCryptoEnv = true + case OID_EKU_APPLE_CRYPTO_PRODUCTION_ENV: + aux.AppleCryptoProductionEnv = true + case OID_EKU_APPLE_CRYPTO_MAINTENANCE_ENV: + aux.AppleCryptoMaintenanceEnv = true + case OID_EKU_APPLE_CRYPTO_TEST_ENV: + aux.AppleCryptoTestEnv = true + case OID_EKU_APPLE_CRYPTO_DEVELOPMENT_ENV: + aux.AppleCryptoDevelopmentEnv = true + case OID_EKU_APPLE_CRYPTO_QOS: + aux.AppleCryptoQos = true + case OID_EKU_APPLE_CRYPTO_TIER0_QOS: + aux.AppleCryptoTier0Qos = true + case OID_EKU_APPLE_CRYPTO_TIER1_QOS: + aux.AppleCryptoTier1Qos = true + case OID_EKU_APPLE_CRYPTO_TIER2_QOS: + aux.AppleCryptoTier2Qos = true + case OID_EKU_APPLE_CRYPTO_TIER3_QOS: + aux.AppleCryptoTier3Qos = true + case OID_EKU_MICROSOFT_CERT_TRUST_LIST_SIGNING: + aux.MicrosoftCertTrustListSigning = true + case OID_EKU_MICROSOFT_QUALIFIED_SUBORDINATE: + aux.MicrosoftQualifiedSubordinate = true + case OID_EKU_MICROSOFT_KEY_RECOVERY_3: + aux.MicrosoftKeyRecovery3 = true + case OID_EKU_MICROSOFT_DOCUMENT_SIGNING: + aux.MicrosoftDocumentSigning = true + case OID_EKU_MICROSOFT_LIFETIME_SIGNING: + aux.MicrosoftLifetimeSigning = true + case OID_EKU_MICROSOFT_MOBILE_DEVICE_SOFTWARE: + aux.MicrosoftMobileDeviceSoftware = true + case OID_EKU_MICROSOFT_SMART_DISPLAY: + aux.MicrosoftSmartDisplay = true + case OID_EKU_MICROSOFT_CSP_SIGNATURE: + aux.MicrosoftCspSignature = true + case OID_EKU_MICROSOFT_TIMESTAMP_SIGNING: + aux.MicrosoftTimestampSigning = true + case OID_EKU_MICROSOFT_SERVER_GATED_CRYPTO: + aux.MicrosoftServerGatedCrypto = true + case OID_EKU_MICROSOFT_SGC_SERIALIZED: + aux.MicrosoftSgcSerialized = true + case OID_EKU_MICROSOFT_ENCRYPTED_FILE_SYSTEM: + aux.MicrosoftEncryptedFileSystem = true + case OID_EKU_MICROSOFT_EFS_RECOVERY: + aux.MicrosoftEfsRecovery = true + case OID_EKU_MICROSOFT_WHQL_CRYPTO: + aux.MicrosoftWhqlCrypto = true + case OID_EKU_MICROSOFT_NT5_CRYPTO: + aux.MicrosoftNt5Crypto = true + case OID_EKU_MICROSOFT_OEM_WHQL_CRYPTO: + aux.MicrosoftOemWhqlCrypto = true + case OID_EKU_MICROSOFT_EMBEDDED_NT_CRYPTO: + aux.MicrosoftEmbeddedNtCrypto = true + case OID_EKU_MICROSOFT_ROOT_LIST_SIGNER: + aux.MicrosoftRootListSigner = true + case OID_EKU_MICROSOFT_DRM: + aux.MicrosoftDrm = true + case OID_EKU_MICROSOFT_DRM_INDIVIDUALIZATION: + aux.MicrosoftDrmIndividualization = true + case OID_EKU_MICROSOFT_LICENSES: + aux.MicrosoftLicenses = true + case OID_EKU_MICROSOFT_LICENSE_SERVER: + aux.MicrosoftLicenseServer = true + case OID_EKU_MICROSOFT_ENROLLMENT_AGENT: + aux.MicrosoftEnrollmentAgent = true + case OID_EKU_MICROSOFT_SMARTCARD_LOGON: + aux.MicrosoftSmartcardLogon = true + case OID_EKU_MICROSOFT_CA_EXCHANGE: + aux.MicrosoftCaExchange = true + case OID_EKU_MICROSOFT_KEY_RECOVERY_21: + aux.MicrosoftKeyRecovery21 = true + case OID_EKU_MICROSOFT_SYSTEM_HEALTH: + aux.MicrosoftSystemHealth = true + case OID_EKU_MICROSOFT_SYSTEM_HEALTH_LOOPHOLE: + aux.MicrosoftSystemHealthLoophole = true + case OID_EKU_MICROSOFT_KERNEL_MODE_CODE_SIGNING: + aux.MicrosoftKernelModeCodeSigning = true + case OID_EKU_SERVER_AUTH: + aux.ServerAuth = true + case OID_EKU_DVCS: + aux.Dvcs = true + case OID_EKU_SBGP_CERT_AA_SERVICE_AUTH: + aux.SbgpCertAaServiceAuth = true + case OID_EKU_EAP_OVER_PPP: + aux.EapOverPpp = true + case OID_EKU_EAP_OVER_LAN: + aux.EapOverLan = true + case OID_EKU_CLIENT_AUTH: + aux.ClientAuth = true + case OID_EKU_CODE_SIGNING: + aux.CodeSigning = true + case OID_EKU_EMAIL_PROTECTION: + aux.EmailProtection = true + case OID_EKU_IPSEC_END_SYSTEM: + aux.IpsecEndSystem = true + case OID_EKU_IPSEC_TUNNEL: + aux.IpsecTunnel = true + case OID_EKU_IPSEC_USER: + aux.IpsecUser = true + case OID_EKU_TIME_STAMPING: + aux.TimeStamping = true + case OID_EKU_OCSP_SIGNING: + aux.OcspSigning = true + case OID_EKU_IPSEC_INTERMEDIATE_SYSTEM_USAGE: + aux.IpsecIntermediateSystemUsage = true + case OID_EKU_NETSCAPE_SERVER_GATED_CRYPTO: + aux.NetscapeServerGatedCrypto = true + case OID_EKU_ANY: + aux.Any = true + default: + } + return +} + +func (aux *auxExtendedKeyUsage) populateFromExtKeyUsage(eku ExtKeyUsage) { + switch eku { + case ExtKeyUsageAppleCodeSigning: + aux.AppleCodeSigning = true + case ExtKeyUsageAppleCodeSigningDevelopment: + aux.AppleCodeSigningDevelopment = true + case ExtKeyUsageAppleSoftwareUpdateSigning: + aux.AppleSoftwareUpdateSigning = true + case ExtKeyUsageAppleCodeSigningThirdParty: + aux.AppleCodeSigningThirdParty = true + case ExtKeyUsageAppleResourceSigning: + aux.AppleResourceSigning = true + case ExtKeyUsageAppleIchatSigning: + aux.AppleIchatSigning = true + case ExtKeyUsageAppleIchatEncryption: + aux.AppleIchatEncryption = true + case ExtKeyUsageAppleSystemIdentity: + aux.AppleSystemIdentity = true + case ExtKeyUsageAppleCryptoEnv: + aux.AppleCryptoEnv = true + case ExtKeyUsageAppleCryptoProductionEnv: + aux.AppleCryptoProductionEnv = true + case ExtKeyUsageAppleCryptoMaintenanceEnv: + aux.AppleCryptoMaintenanceEnv = true + case ExtKeyUsageAppleCryptoTestEnv: + aux.AppleCryptoTestEnv = true + case ExtKeyUsageAppleCryptoDevelopmentEnv: + aux.AppleCryptoDevelopmentEnv = true + case ExtKeyUsageAppleCryptoQos: + aux.AppleCryptoQos = true + case ExtKeyUsageAppleCryptoTier0Qos: + aux.AppleCryptoTier0Qos = true + case ExtKeyUsageAppleCryptoTier1Qos: + aux.AppleCryptoTier1Qos = true + case ExtKeyUsageAppleCryptoTier2Qos: + aux.AppleCryptoTier2Qos = true + case ExtKeyUsageAppleCryptoTier3Qos: + aux.AppleCryptoTier3Qos = true + case ExtKeyUsageMicrosoftCertTrustListSigning: + aux.MicrosoftCertTrustListSigning = true + case ExtKeyUsageMicrosoftQualifiedSubordinate: + aux.MicrosoftQualifiedSubordinate = true + case ExtKeyUsageMicrosoftKeyRecovery3: + aux.MicrosoftKeyRecovery3 = true + case ExtKeyUsageMicrosoftDocumentSigning: + aux.MicrosoftDocumentSigning = true + case ExtKeyUsageMicrosoftLifetimeSigning: + aux.MicrosoftLifetimeSigning = true + case ExtKeyUsageMicrosoftMobileDeviceSoftware: + aux.MicrosoftMobileDeviceSoftware = true + case ExtKeyUsageMicrosoftSmartDisplay: + aux.MicrosoftSmartDisplay = true + case ExtKeyUsageMicrosoftCspSignature: + aux.MicrosoftCspSignature = true + case ExtKeyUsageMicrosoftTimestampSigning: + aux.MicrosoftTimestampSigning = true + case ExtKeyUsageMicrosoftServerGatedCrypto: + aux.MicrosoftServerGatedCrypto = true + case ExtKeyUsageMicrosoftSgcSerialized: + aux.MicrosoftSgcSerialized = true + case ExtKeyUsageMicrosoftEncryptedFileSystem: + aux.MicrosoftEncryptedFileSystem = true + case ExtKeyUsageMicrosoftEfsRecovery: + aux.MicrosoftEfsRecovery = true + case ExtKeyUsageMicrosoftWhqlCrypto: + aux.MicrosoftWhqlCrypto = true + case ExtKeyUsageMicrosoftNt5Crypto: + aux.MicrosoftNt5Crypto = true + case ExtKeyUsageMicrosoftOemWhqlCrypto: + aux.MicrosoftOemWhqlCrypto = true + case ExtKeyUsageMicrosoftEmbeddedNtCrypto: + aux.MicrosoftEmbeddedNtCrypto = true + case ExtKeyUsageMicrosoftRootListSigner: + aux.MicrosoftRootListSigner = true + case ExtKeyUsageMicrosoftDrm: + aux.MicrosoftDrm = true + case ExtKeyUsageMicrosoftDrmIndividualization: + aux.MicrosoftDrmIndividualization = true + case ExtKeyUsageMicrosoftLicenses: + aux.MicrosoftLicenses = true + case ExtKeyUsageMicrosoftLicenseServer: + aux.MicrosoftLicenseServer = true + case ExtKeyUsageMicrosoftEnrollmentAgent: + aux.MicrosoftEnrollmentAgent = true + case ExtKeyUsageMicrosoftSmartcardLogon: + aux.MicrosoftSmartcardLogon = true + case ExtKeyUsageMicrosoftCaExchange: + aux.MicrosoftCaExchange = true + case ExtKeyUsageMicrosoftKeyRecovery21: + aux.MicrosoftKeyRecovery21 = true + case ExtKeyUsageMicrosoftSystemHealth: + aux.MicrosoftSystemHealth = true + case ExtKeyUsageMicrosoftSystemHealthLoophole: + aux.MicrosoftSystemHealthLoophole = true + case ExtKeyUsageMicrosoftKernelModeCodeSigning: + aux.MicrosoftKernelModeCodeSigning = true + case ExtKeyUsageServerAuth: + aux.ServerAuth = true + case ExtKeyUsageDvcs: + aux.Dvcs = true + case ExtKeyUsageSbgpCertAaServiceAuth: + aux.SbgpCertAaServiceAuth = true + case ExtKeyUsageEapOverPpp: + aux.EapOverPpp = true + case ExtKeyUsageEapOverLan: + aux.EapOverLan = true + case ExtKeyUsageClientAuth: + aux.ClientAuth = true + case ExtKeyUsageCodeSigning: + aux.CodeSigning = true + case ExtKeyUsageEmailProtection: + aux.EmailProtection = true + case ExtKeyUsageIpsecEndSystem: + aux.IpsecEndSystem = true + case ExtKeyUsageIpsecTunnel: + aux.IpsecTunnel = true + case ExtKeyUsageIpsecUser: + aux.IpsecUser = true + case ExtKeyUsageTimeStamping: + aux.TimeStamping = true + case ExtKeyUsageOcspSigning: + aux.OcspSigning = true + case ExtKeyUsageIpsecIntermediateSystemUsage: + aux.IpsecIntermediateSystemUsage = true + case ExtKeyUsageNetscapeServerGatedCrypto: + aux.NetscapeServerGatedCrypto = true + case ExtKeyUsageAny: + aux.Any = true + default: + } + return +} + +var ekuOIDs map[string]asn1.ObjectIdentifier + +var ekuConstants map[string]ExtKeyUsage + +func init() { + ekuOIDs = make(map[string]asn1.ObjectIdentifier) + ekuOIDs[OID_EKU_APPLE_CODE_SIGNING] = oidExtKeyUsageAppleCodeSigning + ekuOIDs[OID_EKU_APPLE_CODE_SIGNING_DEVELOPMENT] = oidExtKeyUsageAppleCodeSigningDevelopment + ekuOIDs[OID_EKU_APPLE_SOFTWARE_UPDATE_SIGNING] = oidExtKeyUsageAppleSoftwareUpdateSigning + ekuOIDs[OID_EKU_APPLE_CODE_SIGNING_THIRD_PARTY] = oidExtKeyUsageAppleCodeSigningThirdParty + ekuOIDs[OID_EKU_APPLE_RESOURCE_SIGNING] = oidExtKeyUsageAppleResourceSigning + ekuOIDs[OID_EKU_APPLE_ICHAT_SIGNING] = oidExtKeyUsageAppleIchatSigning + ekuOIDs[OID_EKU_APPLE_ICHAT_ENCRYPTION] = oidExtKeyUsageAppleIchatEncryption + ekuOIDs[OID_EKU_APPLE_SYSTEM_IDENTITY] = oidExtKeyUsageAppleSystemIdentity + ekuOIDs[OID_EKU_APPLE_CRYPTO_ENV] = oidExtKeyUsageAppleCryptoEnv + ekuOIDs[OID_EKU_APPLE_CRYPTO_PRODUCTION_ENV] = oidExtKeyUsageAppleCryptoProductionEnv + ekuOIDs[OID_EKU_APPLE_CRYPTO_MAINTENANCE_ENV] = oidExtKeyUsageAppleCryptoMaintenanceEnv + ekuOIDs[OID_EKU_APPLE_CRYPTO_TEST_ENV] = oidExtKeyUsageAppleCryptoTestEnv + ekuOIDs[OID_EKU_APPLE_CRYPTO_DEVELOPMENT_ENV] = oidExtKeyUsageAppleCryptoDevelopmentEnv + ekuOIDs[OID_EKU_APPLE_CRYPTO_QOS] = oidExtKeyUsageAppleCryptoQos + ekuOIDs[OID_EKU_APPLE_CRYPTO_TIER0_QOS] = oidExtKeyUsageAppleCryptoTier0Qos + ekuOIDs[OID_EKU_APPLE_CRYPTO_TIER1_QOS] = oidExtKeyUsageAppleCryptoTier1Qos + ekuOIDs[OID_EKU_APPLE_CRYPTO_TIER2_QOS] = oidExtKeyUsageAppleCryptoTier2Qos + ekuOIDs[OID_EKU_APPLE_CRYPTO_TIER3_QOS] = oidExtKeyUsageAppleCryptoTier3Qos + ekuOIDs[OID_EKU_MICROSOFT_CERT_TRUST_LIST_SIGNING] = oidExtKeyUsageMicrosoftCertTrustListSigning + ekuOIDs[OID_EKU_MICROSOFT_QUALIFIED_SUBORDINATE] = oidExtKeyUsageMicrosoftQualifiedSubordinate + ekuOIDs[OID_EKU_MICROSOFT_KEY_RECOVERY_3] = oidExtKeyUsageMicrosoftKeyRecovery3 + ekuOIDs[OID_EKU_MICROSOFT_DOCUMENT_SIGNING] = oidExtKeyUsageMicrosoftDocumentSigning + ekuOIDs[OID_EKU_MICROSOFT_LIFETIME_SIGNING] = oidExtKeyUsageMicrosoftLifetimeSigning + ekuOIDs[OID_EKU_MICROSOFT_MOBILE_DEVICE_SOFTWARE] = oidExtKeyUsageMicrosoftMobileDeviceSoftware + ekuOIDs[OID_EKU_MICROSOFT_SMART_DISPLAY] = oidExtKeyUsageMicrosoftSmartDisplay + ekuOIDs[OID_EKU_MICROSOFT_CSP_SIGNATURE] = oidExtKeyUsageMicrosoftCspSignature + ekuOIDs[OID_EKU_MICROSOFT_TIMESTAMP_SIGNING] = oidExtKeyUsageMicrosoftTimestampSigning + ekuOIDs[OID_EKU_MICROSOFT_SERVER_GATED_CRYPTO] = oidExtKeyUsageMicrosoftServerGatedCrypto + ekuOIDs[OID_EKU_MICROSOFT_SGC_SERIALIZED] = oidExtKeyUsageMicrosoftSgcSerialized + ekuOIDs[OID_EKU_MICROSOFT_ENCRYPTED_FILE_SYSTEM] = oidExtKeyUsageMicrosoftEncryptedFileSystem + ekuOIDs[OID_EKU_MICROSOFT_EFS_RECOVERY] = oidExtKeyUsageMicrosoftEfsRecovery + ekuOIDs[OID_EKU_MICROSOFT_WHQL_CRYPTO] = oidExtKeyUsageMicrosoftWhqlCrypto + ekuOIDs[OID_EKU_MICROSOFT_NT5_CRYPTO] = oidExtKeyUsageMicrosoftNt5Crypto + ekuOIDs[OID_EKU_MICROSOFT_OEM_WHQL_CRYPTO] = oidExtKeyUsageMicrosoftOemWhqlCrypto + ekuOIDs[OID_EKU_MICROSOFT_EMBEDDED_NT_CRYPTO] = oidExtKeyUsageMicrosoftEmbeddedNtCrypto + ekuOIDs[OID_EKU_MICROSOFT_ROOT_LIST_SIGNER] = oidExtKeyUsageMicrosoftRootListSigner + ekuOIDs[OID_EKU_MICROSOFT_DRM] = oidExtKeyUsageMicrosoftDrm + ekuOIDs[OID_EKU_MICROSOFT_DRM_INDIVIDUALIZATION] = oidExtKeyUsageMicrosoftDrmIndividualization + ekuOIDs[OID_EKU_MICROSOFT_LICENSES] = oidExtKeyUsageMicrosoftLicenses + ekuOIDs[OID_EKU_MICROSOFT_LICENSE_SERVER] = oidExtKeyUsageMicrosoftLicenseServer + ekuOIDs[OID_EKU_MICROSOFT_ENROLLMENT_AGENT] = oidExtKeyUsageMicrosoftEnrollmentAgent + ekuOIDs[OID_EKU_MICROSOFT_SMARTCARD_LOGON] = oidExtKeyUsageMicrosoftSmartcardLogon + ekuOIDs[OID_EKU_MICROSOFT_CA_EXCHANGE] = oidExtKeyUsageMicrosoftCaExchange + ekuOIDs[OID_EKU_MICROSOFT_KEY_RECOVERY_21] = oidExtKeyUsageMicrosoftKeyRecovery21 + ekuOIDs[OID_EKU_MICROSOFT_SYSTEM_HEALTH] = oidExtKeyUsageMicrosoftSystemHealth + ekuOIDs[OID_EKU_MICROSOFT_SYSTEM_HEALTH_LOOPHOLE] = oidExtKeyUsageMicrosoftSystemHealthLoophole + ekuOIDs[OID_EKU_MICROSOFT_KERNEL_MODE_CODE_SIGNING] = oidExtKeyUsageMicrosoftKernelModeCodeSigning + ekuOIDs[OID_EKU_SERVER_AUTH] = oidExtKeyUsageServerAuth + ekuOIDs[OID_EKU_DVCS] = oidExtKeyUsageDvcs + ekuOIDs[OID_EKU_SBGP_CERT_AA_SERVICE_AUTH] = oidExtKeyUsageSbgpCertAaServiceAuth + ekuOIDs[OID_EKU_EAP_OVER_PPP] = oidExtKeyUsageEapOverPpp + ekuOIDs[OID_EKU_EAP_OVER_LAN] = oidExtKeyUsageEapOverLan + ekuOIDs[OID_EKU_CLIENT_AUTH] = oidExtKeyUsageClientAuth + ekuOIDs[OID_EKU_CODE_SIGNING] = oidExtKeyUsageCodeSigning + ekuOIDs[OID_EKU_EMAIL_PROTECTION] = oidExtKeyUsageEmailProtection + ekuOIDs[OID_EKU_IPSEC_END_SYSTEM] = oidExtKeyUsageIpsecEndSystem + ekuOIDs[OID_EKU_IPSEC_TUNNEL] = oidExtKeyUsageIpsecTunnel + ekuOIDs[OID_EKU_IPSEC_USER] = oidExtKeyUsageIpsecUser + ekuOIDs[OID_EKU_TIME_STAMPING] = oidExtKeyUsageTimeStamping + ekuOIDs[OID_EKU_OCSP_SIGNING] = oidExtKeyUsageOcspSigning + ekuOIDs[OID_EKU_IPSEC_INTERMEDIATE_SYSTEM_USAGE] = oidExtKeyUsageIpsecIntermediateSystemUsage + ekuOIDs[OID_EKU_NETSCAPE_SERVER_GATED_CRYPTO] = oidExtKeyUsageNetscapeServerGatedCrypto + ekuOIDs[OID_EKU_ANY] = oidExtKeyUsageAny + + ekuConstants = make(map[string]ExtKeyUsage) + ekuConstants[OID_EKU_APPLE_CODE_SIGNING] = ExtKeyUsageAppleCodeSigning + ekuConstants[OID_EKU_APPLE_CODE_SIGNING_DEVELOPMENT] = ExtKeyUsageAppleCodeSigningDevelopment + ekuConstants[OID_EKU_APPLE_SOFTWARE_UPDATE_SIGNING] = ExtKeyUsageAppleSoftwareUpdateSigning + ekuConstants[OID_EKU_APPLE_CODE_SIGNING_THIRD_PARTY] = ExtKeyUsageAppleCodeSigningThirdParty + ekuConstants[OID_EKU_APPLE_RESOURCE_SIGNING] = ExtKeyUsageAppleResourceSigning + ekuConstants[OID_EKU_APPLE_ICHAT_SIGNING] = ExtKeyUsageAppleIchatSigning + ekuConstants[OID_EKU_APPLE_ICHAT_ENCRYPTION] = ExtKeyUsageAppleIchatEncryption + ekuConstants[OID_EKU_APPLE_SYSTEM_IDENTITY] = ExtKeyUsageAppleSystemIdentity + ekuConstants[OID_EKU_APPLE_CRYPTO_ENV] = ExtKeyUsageAppleCryptoEnv + ekuConstants[OID_EKU_APPLE_CRYPTO_PRODUCTION_ENV] = ExtKeyUsageAppleCryptoProductionEnv + ekuConstants[OID_EKU_APPLE_CRYPTO_MAINTENANCE_ENV] = ExtKeyUsageAppleCryptoMaintenanceEnv + ekuConstants[OID_EKU_APPLE_CRYPTO_TEST_ENV] = ExtKeyUsageAppleCryptoTestEnv + ekuConstants[OID_EKU_APPLE_CRYPTO_DEVELOPMENT_ENV] = ExtKeyUsageAppleCryptoDevelopmentEnv + ekuConstants[OID_EKU_APPLE_CRYPTO_QOS] = ExtKeyUsageAppleCryptoQos + ekuConstants[OID_EKU_APPLE_CRYPTO_TIER0_QOS] = ExtKeyUsageAppleCryptoTier0Qos + ekuConstants[OID_EKU_APPLE_CRYPTO_TIER1_QOS] = ExtKeyUsageAppleCryptoTier1Qos + ekuConstants[OID_EKU_APPLE_CRYPTO_TIER2_QOS] = ExtKeyUsageAppleCryptoTier2Qos + ekuConstants[OID_EKU_APPLE_CRYPTO_TIER3_QOS] = ExtKeyUsageAppleCryptoTier3Qos + ekuConstants[OID_EKU_MICROSOFT_CERT_TRUST_LIST_SIGNING] = ExtKeyUsageMicrosoftCertTrustListSigning + ekuConstants[OID_EKU_MICROSOFT_QUALIFIED_SUBORDINATE] = ExtKeyUsageMicrosoftQualifiedSubordinate + ekuConstants[OID_EKU_MICROSOFT_KEY_RECOVERY_3] = ExtKeyUsageMicrosoftKeyRecovery3 + ekuConstants[OID_EKU_MICROSOFT_DOCUMENT_SIGNING] = ExtKeyUsageMicrosoftDocumentSigning + ekuConstants[OID_EKU_MICROSOFT_LIFETIME_SIGNING] = ExtKeyUsageMicrosoftLifetimeSigning + ekuConstants[OID_EKU_MICROSOFT_MOBILE_DEVICE_SOFTWARE] = ExtKeyUsageMicrosoftMobileDeviceSoftware + ekuConstants[OID_EKU_MICROSOFT_SMART_DISPLAY] = ExtKeyUsageMicrosoftSmartDisplay + ekuConstants[OID_EKU_MICROSOFT_CSP_SIGNATURE] = ExtKeyUsageMicrosoftCspSignature + ekuConstants[OID_EKU_MICROSOFT_TIMESTAMP_SIGNING] = ExtKeyUsageMicrosoftTimestampSigning + ekuConstants[OID_EKU_MICROSOFT_SERVER_GATED_CRYPTO] = ExtKeyUsageMicrosoftServerGatedCrypto + ekuConstants[OID_EKU_MICROSOFT_SGC_SERIALIZED] = ExtKeyUsageMicrosoftSgcSerialized + ekuConstants[OID_EKU_MICROSOFT_ENCRYPTED_FILE_SYSTEM] = ExtKeyUsageMicrosoftEncryptedFileSystem + ekuConstants[OID_EKU_MICROSOFT_EFS_RECOVERY] = ExtKeyUsageMicrosoftEfsRecovery + ekuConstants[OID_EKU_MICROSOFT_WHQL_CRYPTO] = ExtKeyUsageMicrosoftWhqlCrypto + ekuConstants[OID_EKU_MICROSOFT_NT5_CRYPTO] = ExtKeyUsageMicrosoftNt5Crypto + ekuConstants[OID_EKU_MICROSOFT_OEM_WHQL_CRYPTO] = ExtKeyUsageMicrosoftOemWhqlCrypto + ekuConstants[OID_EKU_MICROSOFT_EMBEDDED_NT_CRYPTO] = ExtKeyUsageMicrosoftEmbeddedNtCrypto + ekuConstants[OID_EKU_MICROSOFT_ROOT_LIST_SIGNER] = ExtKeyUsageMicrosoftRootListSigner + ekuConstants[OID_EKU_MICROSOFT_DRM] = ExtKeyUsageMicrosoftDrm + ekuConstants[OID_EKU_MICROSOFT_DRM_INDIVIDUALIZATION] = ExtKeyUsageMicrosoftDrmIndividualization + ekuConstants[OID_EKU_MICROSOFT_LICENSES] = ExtKeyUsageMicrosoftLicenses + ekuConstants[OID_EKU_MICROSOFT_LICENSE_SERVER] = ExtKeyUsageMicrosoftLicenseServer + ekuConstants[OID_EKU_MICROSOFT_ENROLLMENT_AGENT] = ExtKeyUsageMicrosoftEnrollmentAgent + ekuConstants[OID_EKU_MICROSOFT_SMARTCARD_LOGON] = ExtKeyUsageMicrosoftSmartcardLogon + ekuConstants[OID_EKU_MICROSOFT_CA_EXCHANGE] = ExtKeyUsageMicrosoftCaExchange + ekuConstants[OID_EKU_MICROSOFT_KEY_RECOVERY_21] = ExtKeyUsageMicrosoftKeyRecovery21 + ekuConstants[OID_EKU_MICROSOFT_SYSTEM_HEALTH] = ExtKeyUsageMicrosoftSystemHealth + ekuConstants[OID_EKU_MICROSOFT_SYSTEM_HEALTH_LOOPHOLE] = ExtKeyUsageMicrosoftSystemHealthLoophole + ekuConstants[OID_EKU_MICROSOFT_KERNEL_MODE_CODE_SIGNING] = ExtKeyUsageMicrosoftKernelModeCodeSigning + ekuConstants[OID_EKU_SERVER_AUTH] = ExtKeyUsageServerAuth + ekuConstants[OID_EKU_DVCS] = ExtKeyUsageDvcs + ekuConstants[OID_EKU_SBGP_CERT_AA_SERVICE_AUTH] = ExtKeyUsageSbgpCertAaServiceAuth + ekuConstants[OID_EKU_EAP_OVER_PPP] = ExtKeyUsageEapOverPpp + ekuConstants[OID_EKU_EAP_OVER_LAN] = ExtKeyUsageEapOverLan + ekuConstants[OID_EKU_CLIENT_AUTH] = ExtKeyUsageClientAuth + ekuConstants[OID_EKU_CODE_SIGNING] = ExtKeyUsageCodeSigning + ekuConstants[OID_EKU_EMAIL_PROTECTION] = ExtKeyUsageEmailProtection + ekuConstants[OID_EKU_IPSEC_END_SYSTEM] = ExtKeyUsageIpsecEndSystem + ekuConstants[OID_EKU_IPSEC_TUNNEL] = ExtKeyUsageIpsecTunnel + ekuConstants[OID_EKU_IPSEC_USER] = ExtKeyUsageIpsecUser + ekuConstants[OID_EKU_TIME_STAMPING] = ExtKeyUsageTimeStamping + ekuConstants[OID_EKU_OCSP_SIGNING] = ExtKeyUsageOcspSigning + ekuConstants[OID_EKU_IPSEC_INTERMEDIATE_SYSTEM_USAGE] = ExtKeyUsageIpsecIntermediateSystemUsage + ekuConstants[OID_EKU_NETSCAPE_SERVER_GATED_CRYPTO] = ExtKeyUsageNetscapeServerGatedCrypto + ekuConstants[OID_EKU_ANY] = ExtKeyUsageAny +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/extended_key_usage_schema.sh b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/extended_key_usage_schema.sh new file mode 100755 index 00000000..b8811911 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/extended_key_usage_schema.sh @@ -0,0 +1,21 @@ +#!/bin/bash +set -e + +# TODO: This should really be generated by Go code as a subrecord, but +# importing in Python is hard. This is quick and dirty. + +FIELDS=$(\ + cat extended_key_usage.go |\ + grep json |\ + cut -d ':' -f 2 |\ + sed 's|,omitempty||g' |\ + tr -d '`') +echo "extended_key_usage = SubRecord({" +for f in $FIELDS; do + if [ $f == "\"unknown\"" ]; then + echo " $f: ListOf(OID())" + else + echo " $f: Boolean()," + fi +done +echo "})" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/extensions.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/extensions.go new file mode 100644 index 00000000..91c21d4c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/extensions.go @@ -0,0 +1,760 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "encoding/asn1" + "encoding/hex" + "encoding/json" + "net" + "strconv" + "strings" + + "github.com/zmap/zcrypto/x509/ct" + "github.com/zmap/zcrypto/x509/pkix" +) + +var ( + oidExtKeyUsage = asn1.ObjectIdentifier{2, 5, 29, 15} + oidExtBasicConstraints = asn1.ObjectIdentifier{2, 5, 29, 19} + oidExtSubjectAltName = asn1.ObjectIdentifier{2, 5, 29, 17} + oidExtIssuerAltName = asn1.ObjectIdentifier{2, 5, 29, 18} + oidExtNameConstraints = asn1.ObjectIdentifier{2, 5, 29, 30} + oidCRLDistributionPoints = asn1.ObjectIdentifier{2, 5, 29, 31} + oidExtAuthKeyId = asn1.ObjectIdentifier{2, 5, 29, 35} + oidExtSubjectKeyId = asn1.ObjectIdentifier{2, 5, 29, 14} + oidExtExtendedKeyUsage = asn1.ObjectIdentifier{2, 5, 29, 37} + oidExtCertificatePolicy = asn1.ObjectIdentifier{2, 5, 29, 32} + + oidExtAuthorityInfoAccess = oidExtensionAuthorityInfoAccess + oidExtensionCTPrecertificatePoison = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3} + oidExtSignedCertificateTimestampList = oidExtensionSignedCertificateTimestampList +) + +type CertificateExtensions struct { + KeyUsage KeyUsage `json:"key_usage,omitempty"` + BasicConstraints *BasicConstraints `json:"basic_constraints,omitempty"` + SubjectAltName *GeneralNames `json:"subject_alt_name,omitempty"` + IssuerAltName *GeneralNames `json:"issuer_alt_name,omitempty"` + NameConstraints *NameConstraints `json:"name_constraints,omitempty"` + CRLDistributionPoints CRLDistributionPoints `json:"crl_distribution_points,omitempty"` + AuthKeyID SubjAuthKeyId `json:"authority_key_id,omitempty"` + SubjectKeyID SubjAuthKeyId `json:"subject_key_id,omitempty"` + ExtendedKeyUsage *ExtendedKeyUsageExtension `json:"extended_key_usage,omitempty"` + CertificatePolicies *CertificatePoliciesData `json:"certificate_policies,omitempty"` + AuthorityInfoAccess *AuthorityInfoAccess `json:"authority_info_access,omitempty"` + IsPrecert IsPrecert `json:"ct_poison,omitempty"` + SignedCertificateTimestampList []*ct.SignedCertificateTimestamp `json:"signed_certificate_timestamps,omitempty"` + TorServiceDescriptors []*TorServiceDescriptorHash `json:"tor_service_descriptors,omitempty"` +} + +type UnknownCertificateExtensions []pkix.Extension + +type IsPrecert bool + +type BasicConstraints struct { + IsCA bool `json:"is_ca"` + MaxPathLen *int `json:"max_path_len,omitempty"` +} + +type NoticeReference struct { + Organization string `json:"organization,omitempty"` + NoticeNumbers NoticeNumber `json:"notice_numbers,omitempty"` +} + +type UserNoticeData struct { + ExplicitText string `json:"explicit_text,omitempty"` + NoticeReference []NoticeReference `json:"notice_reference,omitempty"` +} + +type CertificatePoliciesJSON struct { + PolicyIdentifier string `json:"id,omitempty"` + CPSUri []string `json:"cps,omitempty"` + UserNotice []UserNoticeData `json:"user_notice,omitempty"` +} + +type CertificatePolicies []CertificatePoliciesJSON + +type CertificatePoliciesData struct { + PolicyIdentifiers []asn1.ObjectIdentifier + QualifierId [][]asn1.ObjectIdentifier + CPSUri [][]string + ExplicitTexts [][]string + NoticeRefOrganization [][]string + NoticeRefNumbers [][]NoticeNumber +} + +func (cp *CertificatePoliciesData) MarshalJSON() ([]byte, error) { + policies := CertificatePolicies{} + for idx, oid := range cp.PolicyIdentifiers { + cpsJSON := CertificatePoliciesJSON{} + cpsJSON.PolicyIdentifier = oid.String() + for _, uri := range cp.CPSUri[idx] { + cpsJSON.CPSUri = append(cpsJSON.CPSUri, uri) + } + + for idx2, explicit_text := range cp.ExplicitTexts[idx] { + uNoticeData := UserNoticeData{} + uNoticeData.ExplicitText = explicit_text + noticeRef := NoticeReference{} + if len(cp.NoticeRefOrganization[idx]) > 0 { + organization := cp.NoticeRefOrganization[idx][idx2] + noticeRef.Organization = organization + noticeRef.NoticeNumbers = cp.NoticeRefNumbers[idx][idx2] + uNoticeData.NoticeReference = append(uNoticeData.NoticeReference, noticeRef) + } + cpsJSON.UserNotice = append(cpsJSON.UserNotice, uNoticeData) + } + + policies = append(policies, cpsJSON) + } + return json.Marshal(policies) +} + +// GeneralNames corresponds an X.509 GeneralName defined in +// Section 4.2.1.6 of RFC 5280. +// +// GeneralName ::= CHOICE { +// otherName [0] AnotherName, +// rfc822Name [1] IA5String, +// dNSName [2] IA5String, +// x400Address [3] ORAddress, +// directoryName [4] Name, +// ediPartyName [5] EDIPartyName, +// uniformResourceIdentifier [6] IA5String, +// iPAddress [7] OCTET STRING, +// registeredID [8] OBJECT IDENTIFIER } +type GeneralNames struct { + DirectoryNames []pkix.Name + DNSNames []string + EDIPartyNames []pkix.EDIPartyName + EmailAddresses []string + IPAddresses []net.IP + OtherNames []pkix.OtherName + RegisteredIDs []asn1.ObjectIdentifier + URIs []string +} + +type jsonGeneralNames struct { + DirectoryNames []pkix.Name `json:"directory_names,omitempty"` + DNSNames []string `json:"dns_names,omitempty"` + EDIPartyNames []pkix.EDIPartyName `json:"edi_party_names,omitempty"` + EmailAddresses []string `json:"email_addresses,omitempty"` + IPAddresses []net.IP `json:"ip_addresses,omitempty"` + OtherNames []pkix.OtherName `json:"other_names,omitempty"` + RegisteredIDs []string `json:"registered_ids,omitempty"` + URIs []string `json:"uniform_resource_identifiers,omitempty"` +} + +func (gn *GeneralNames) MarshalJSON() ([]byte, error) { + jsan := jsonGeneralNames{ + DirectoryNames: gn.DirectoryNames, + DNSNames: gn.DNSNames, + EDIPartyNames: gn.EDIPartyNames, + EmailAddresses: gn.EmailAddresses, + IPAddresses: gn.IPAddresses, + OtherNames: gn.OtherNames, + RegisteredIDs: make([]string, 0, len(gn.RegisteredIDs)), + URIs: gn.URIs, + } + for _, id := range gn.RegisteredIDs { + jsan.RegisteredIDs = append(jsan.RegisteredIDs, id.String()) + } + return json.Marshal(jsan) +} + +func (gn *GeneralNames) UnmarshalJSON(b []byte) error { + var jsan jsonGeneralNames + err := json.Unmarshal(b, &jsan) + if err != nil { + return err + } + + gn.DirectoryNames = jsan.DirectoryNames + gn.DNSNames = jsan.DNSNames + gn.EDIPartyNames = jsan.EDIPartyNames + gn.EmailAddresses = jsan.EmailAddresses + gn.IPAddresses = jsan.IPAddresses + gn.OtherNames = jsan.OtherNames + gn.RegisteredIDs = make([]asn1.ObjectIdentifier, len(jsan.RegisteredIDs)) + gn.URIs = jsan.URIs + + for i, rID := range jsan.RegisteredIDs { + arcs := strings.Split(rID, ".") + oid := make(asn1.ObjectIdentifier, len(arcs)) + + for j, s := range arcs { + tmp, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return err + } + oid[j] = int(tmp) + } + gn.RegisteredIDs[i] = oid + } + return nil +} + +// TODO: Handle excluded names + +type NameConstraints struct { + Critical bool `json:"critical"` + + PermittedDNSNames []GeneralSubtreeString + PermittedEmailAddresses []GeneralSubtreeString + PermittedIPAddresses []GeneralSubtreeIP + PermittedDirectoryNames []GeneralSubtreeName + PermittedEdiPartyNames []GeneralSubtreeEdi + PermittedRegisteredIDs []GeneralSubtreeOid + + ExcludedEmailAddresses []GeneralSubtreeString + ExcludedDNSNames []GeneralSubtreeString + ExcludedIPAddresses []GeneralSubtreeIP + ExcludedDirectoryNames []GeneralSubtreeName + ExcludedEdiPartyNames []GeneralSubtreeEdi + ExcludedRegisteredIDs []GeneralSubtreeOid +} + +type NameConstraintsJSON struct { + Critical bool `json:"critical"` + + PermittedDNSNames []string `json:"permitted_names,omitempty"` + PermittedEmailAddresses []string `json:"permitted_email_addresses,omitempty"` + PermittedIPAddresses []GeneralSubtreeIP `json:"permitted_ip_addresses,omitempty"` + PermittedDirectoryNames []pkix.Name `json:"permitted_directory_names,omitempty"` + PermittedEdiPartyNames []pkix.EDIPartyName `json:"permitted_edi_party_names,omitempty"` + PermittedRegisteredIDs []string `json:"permitted_registred_id,omitempty"` + + ExcludedDNSNames []string `json:"excluded_names,omitempty"` + ExcludedEmailAddresses []string `json:"excluded_email_addresses,omitempty"` + ExcludedIPAddresses []GeneralSubtreeIP `json:"excluded_ip_addresses,omitempty"` + ExcludedDirectoryNames []pkix.Name `json:"excluded_directory_names,omitempty"` + ExcludedEdiPartyNames []pkix.EDIPartyName `json:"excluded_edi_party_names,omitempty"` + ExcludedRegisteredIDs []string `json:"excluded_registred_id,omitempty"` +} + +func (nc *NameConstraints) UnmarshalJSON(b []byte) error { + var ncJson NameConstraintsJSON + err := json.Unmarshal(b, &ncJson) + if err != nil { + return err + } + for _, dns := range ncJson.PermittedDNSNames { + nc.PermittedDNSNames = append(nc.PermittedDNSNames, GeneralSubtreeString{Data: dns}) + } + for _, email := range ncJson.PermittedEmailAddresses { + nc.PermittedEmailAddresses = append(nc.PermittedEmailAddresses, GeneralSubtreeString{Data: email}) + } + for _, constraint := range ncJson.PermittedIPAddresses { + nc.PermittedIPAddresses = append(nc.PermittedIPAddresses, constraint) + } + for _, directory := range ncJson.PermittedDirectoryNames { + nc.PermittedDirectoryNames = append(nc.PermittedDirectoryNames, GeneralSubtreeName{Data: directory}) + } + for _, edi := range ncJson.PermittedEdiPartyNames { + nc.PermittedEdiPartyNames = append(nc.PermittedEdiPartyNames, GeneralSubtreeEdi{Data: edi}) + } + for _, id := range ncJson.PermittedRegisteredIDs { + arcs := strings.Split(id, ".") + oid := make(asn1.ObjectIdentifier, len(arcs)) + + for j, s := range arcs { + tmp, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return err + } + oid[j] = int(tmp) + } + nc.PermittedRegisteredIDs = append(nc.PermittedRegisteredIDs, GeneralSubtreeOid{Data: oid}) + } + + for _, dns := range ncJson.ExcludedDNSNames { + nc.ExcludedDNSNames = append(nc.ExcludedDNSNames, GeneralSubtreeString{Data: dns}) + } + for _, email := range ncJson.ExcludedEmailAddresses { + nc.ExcludedEmailAddresses = append(nc.ExcludedEmailAddresses, GeneralSubtreeString{Data: email}) + } + for _, constraint := range ncJson.ExcludedIPAddresses { + nc.ExcludedIPAddresses = append(nc.ExcludedIPAddresses, constraint) + } + for _, directory := range ncJson.ExcludedDirectoryNames { + nc.ExcludedDirectoryNames = append(nc.ExcludedDirectoryNames, GeneralSubtreeName{Data: directory}) + } + for _, edi := range ncJson.ExcludedEdiPartyNames { + nc.ExcludedEdiPartyNames = append(nc.ExcludedEdiPartyNames, GeneralSubtreeEdi{Data: edi}) + } + for _, id := range ncJson.ExcludedRegisteredIDs { + arcs := strings.Split(id, ".") + oid := make(asn1.ObjectIdentifier, len(arcs)) + + for j, s := range arcs { + tmp, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return err + } + oid[j] = int(tmp) + } + nc.ExcludedRegisteredIDs = append(nc.ExcludedRegisteredIDs, GeneralSubtreeOid{Data: oid}) + } + return nil +} + +func (nc NameConstraints) MarshalJSON() ([]byte, error) { + var out NameConstraintsJSON + for _, dns := range nc.PermittedDNSNames { + out.PermittedDNSNames = append(out.PermittedDNSNames, dns.Data) + } + for _, email := range nc.PermittedEmailAddresses { + out.PermittedEmailAddresses = append(out.PermittedEmailAddresses, email.Data) + } + out.PermittedIPAddresses = nc.PermittedIPAddresses + for _, directory := range nc.PermittedDirectoryNames { + out.PermittedDirectoryNames = append(out.PermittedDirectoryNames, directory.Data) + } + for _, edi := range nc.PermittedEdiPartyNames { + out.PermittedEdiPartyNames = append(out.PermittedEdiPartyNames, edi.Data) + } + for _, id := range nc.PermittedRegisteredIDs { + out.PermittedRegisteredIDs = append(out.PermittedRegisteredIDs, id.Data.String()) + } + + for _, dns := range nc.ExcludedDNSNames { + out.ExcludedDNSNames = append(out.ExcludedDNSNames, dns.Data) + } + for _, email := range nc.ExcludedEmailAddresses { + out.ExcludedEmailAddresses = append(out.ExcludedEmailAddresses, email.Data) + } + for _, ip := range nc.ExcludedIPAddresses { + out.ExcludedIPAddresses = append(out.ExcludedIPAddresses, ip) + } + for _, directory := range nc.ExcludedDirectoryNames { + out.ExcludedDirectoryNames = append(out.ExcludedDirectoryNames, directory.Data) + } + for _, edi := range nc.ExcludedEdiPartyNames { + out.ExcludedEdiPartyNames = append(out.ExcludedEdiPartyNames, edi.Data) + } + for _, id := range nc.ExcludedRegisteredIDs { + out.ExcludedRegisteredIDs = append(out.ExcludedRegisteredIDs, id.Data.String()) + } + return json.Marshal(out) +} + +type CRLDistributionPoints []string + +type SubjAuthKeyId []byte + +func (kid SubjAuthKeyId) MarshalJSON() ([]byte, error) { + enc := hex.EncodeToString(kid) + return json.Marshal(enc) +} + +type ExtendedKeyUsage []ExtKeyUsage + +type ExtendedKeyUsageExtension struct { + Known ExtendedKeyUsage + Unknown []asn1.ObjectIdentifier +} + +// MarshalJSON implements the json.Marshal interface. The output is a struct of +// bools, with an additional `Value` field containing the actual OIDs. +func (e *ExtendedKeyUsageExtension) MarshalJSON() ([]byte, error) { + aux := new(auxExtendedKeyUsage) + for _, e := range e.Known { + aux.populateFromExtKeyUsage(e) + } + for _, oid := range e.Unknown { + aux.Unknown = append(aux.Unknown, oid.String()) + } + return json.Marshal(aux) +} + +func (e *ExtendedKeyUsageExtension) UnmarshalJSON(b []byte) error { + aux := new(auxExtendedKeyUsage) + if err := json.Unmarshal(b, aux); err != nil { + return err + } + // TODO: Generate the reverse functions. + return nil +} + +//go:generate go run extended_key_usage_gen.go + +// The string functions for CertValidationLevel are auto-generated via +// `go generate ` or running `go generate` in the package directory +//go:generate stringer -type=CertValidationLevel -output=generated_certvalidationlevel_string.go +type CertValidationLevel int + +const ( + UnknownValidationLevel CertValidationLevel = 0 + DV CertValidationLevel = 1 + OV CertValidationLevel = 2 + EV CertValidationLevel = 3 +) + +func (c *CertValidationLevel) MarshalJSON() ([]byte, error) { + if *c == UnknownValidationLevel || *c < 0 || *c > EV { + return json.Marshal("unknown") + } + return json.Marshal(c.String()) +} + +// TODO: All of validation-level maps should be auto-generated from +// https://github.com/zmap/constants. + +// ExtendedValidationOIDs contains the UNION of Chromium +// (https://chromium.googlesource.com/chromium/src/net/+/master/cert/ev_root_ca_metadata.cc) +// and Firefox +// (http://hg.mozilla.org/mozilla-central/file/tip/security/certverifier/ExtendedValidation.cpp) +// EV OID lists +var ExtendedValidationOIDs = map[string]interface{}{ + // CA/Browser Forum EV OID standard + // https://cabforum.org/object-registry/ + "2.23.140.1.1": nil, + // CA/Browser Forum EV Code Signing + "2.23.140.1.3": nil, + // CA/Browser Forum .onion EV Certs + "2.23.140.1.31": nil, + // AC Camerfirma S.A. Chambers of Commerce Root - 2008 + // https://www.camerfirma.com + // AC Camerfirma uses the last two arcs to track how the private key + // is managed - the effective verification policy is the same. + "1.3.6.1.4.1.17326.10.14.2.1.2": nil, + "1.3.6.1.4.1.17326.10.14.2.2.2": nil, + // AC Camerfirma S.A. Global Chambersign Root - 2008 + // https://server2.camerfirma.com:8082 + // AC Camerfirma uses the last two arcs to track how the private key + // is managed - the effective verification policy is the same. + "1.3.6.1.4.1.17326.10.8.12.1.2": nil, + "1.3.6.1.4.1.17326.10.8.12.2.2": nil, + // Actalis Authentication Root CA + // https://ssltest-a.actalis.it:8443 + "1.3.159.1.17.1": nil, + // AffirmTrust Commercial + // https://commercial.affirmtrust.com/ + "1.3.6.1.4.1.34697.2.1": nil, + // AffirmTrust Networking + // https://networking.affirmtrust.com:4431 + "1.3.6.1.4.1.34697.2.2": nil, + // AffirmTrust Premium + // https://premium.affirmtrust.com:4432/ + "1.3.6.1.4.1.34697.2.3": nil, + // AffirmTrust Premium ECC + // https://premiumecc.affirmtrust.com:4433/ + "1.3.6.1.4.1.34697.2.4": nil, + // Autoridad de Certificacion Firmaprofesional CIF A62634068 + // https://publifirma.firmaprofesional.com/ + "1.3.6.1.4.1.13177.10.1.3.10": nil, + // Buypass Class 3 CA 1 + // https://valid.evident.ca13.ssl.buypass.no/ + "2.16.578.1.26.1.3.3": nil, + // Certification Authority of WoSign + // CA 沃通根证书 + // https://root2evtest.wosign.com/ + "1.3.6.1.4.1.36305.2": nil, + // CertPlus Class 2 Primary CA (KEYNECTIS) + // https://www.keynectis.com/ + "1.3.6.1.4.1.22234.2.5.2.3.1": nil, + // Certum Trusted Network CA + // https://juice.certum.pl/ + "1.2.616.1.113527.2.5.1.1": nil, + // China Internet Network Information Center EV Certificates Root + // https://evdemo.cnnic.cn/ + "1.3.6.1.4.1.29836.1.10": nil, + // COMODO Certification Authority & USERTrust RSA Certification Authority & UTN-USERFirst-Hardware & AddTrust External CA Root + // https://secure.comodo.com/ + // https://usertrustrsacertificationauthority-ev.comodoca.com/ + // https://addtrustexternalcaroot-ev.comodoca.com + "1.3.6.1.4.1.6449.1.2.1.5.1": nil, + // Cybertrust Global Root & GTE CyberTrust Global Root & Baltimore CyberTrust Root + // https://evup.cybertrust.ne.jp/ctj-ev-upgrader/evseal.gif + // https://www.cybertrust.ne.jp/ + // https://secure.omniroot.com/repository/ + "1.3.6.1.4.1.6334.1.100.1": nil, + // DigiCert High Assurance EV Root CA + // https://www.digicert.com + "2.16.840.1.114412.2.1": nil, + // D-TRUST Root Class 3 CA 2 EV 2009 + // https://certdemo-ev-valid.ssl.d-trust.net/ + "1.3.6.1.4.1.4788.2.202.1": nil, + // Entrust.net Secure Server Certification Authority + // https://www.entrust.net/ + "2.16.840.1.114028.10.1.2": nil, + // E-Tugra Certification Authority + // https://sslev.e-tugra.com.tr + "2.16.792.3.0.4.1.1.4": nil, + // GeoTrust Primary Certification Authority + // https://www.geotrust.com/ + "1.3.6.1.4.1.14370.1.6": nil, + // GlobalSign Root CA - R2 + // https://www.globalsign.com/ + "1.3.6.1.4.1.4146.1.1": nil, + // Go Daddy Class 2 Certification Authority & Go Daddy Root Certificate Authority - G2 + // https://www.godaddy.com/ + // https://valid.gdig2.catest.godaddy.com/ + "2.16.840.1.114413.1.7.23.3": nil, + // Izenpe.com - SHA256 root + // The first OID is for businesses and the second for government entities. + // These are the test sites, respectively: + // https://servicios.izenpe.com + // https://servicios1.izenpe.com + // Windows XP finds this, SHA1, root instead. The policy OIDs are the same + // as for the SHA256 root, above. + "1.3.6.1.4.1.14777.6.1.1": nil, + "1.3.6.1.4.1.14777.6.1.2": nil, + // Network Solutions Certificate Authority + // https://www.networksolutions.com/website-packages/index.jsp + "1.3.6.1.4.1.782.1.2.1.8.1": nil, + // QuoVadis Root CA 2 + // https://www.quovadis.bm/ + "1.3.6.1.4.1.8024.0.2.100.1.2": nil, + // SecureTrust CA, SecureTrust Corporation + // https://www.securetrust.com + // https://www.trustwave.com/ + "2.16.840.1.114404.1.1.2.4.1": nil, + // Security Communication RootCA1 + // https://www.secomtrust.net/contact/form.html + "1.2.392.200091.100.721.1": nil, + // Staat der Nederlanden EV Root CA + // https://pkioevssl-v.quovadisglobal.com/ + "2.16.528.1.1003.1.2.7": nil, + // StartCom Certification Authority + // https://www.startssl.com/ + "1.3.6.1.4.1.23223.1.1.1": nil, + // Starfield Class 2 Certification Authority + // https://www.starfieldtech.com/ + "2.16.840.1.114414.1.7.23.3": nil, + // Starfield Services Root Certificate Authority - G2 + // https://valid.sfsg2.catest.starfieldtech.com/ + "2.16.840.1.114414.1.7.24.3": nil, + // SwissSign Gold CA - G2 + // https://testevg2.swisssign.net/ + "2.16.756.1.89.1.2.1.1": nil, + // Swisscom Root EV CA 2 + // https://test-quarz-ev-ca-2.pre.swissdigicert.ch + "2.16.756.1.83.21.0": nil, + // thawte Primary Root CA + // https://www.thawte.com/ + "2.16.840.1.113733.1.7.48.1": nil, + // TWCA Global Root CA + // https://evssldemo3.twca.com.tw/index.html + "1.3.6.1.4.1.40869.1.1.22.3": nil, + // T-TeleSec GlobalRoot Class 3 + // http://www.telesec.de/ / https://root-class3.test.telesec.de/ + "1.3.6.1.4.1.7879.13.24.1": nil, + // VeriSign Class 3 Public Primary Certification Authority - G5 + // https://www.verisign.com/ + "2.16.840.1.113733.1.7.23.6": nil, + // Wells Fargo WellsSecure Public Root Certificate Authority + // https://nerys.wellsfargo.com/test.html + "2.16.840.1.114171.500.9": nil, + // CN=CFCA EV ROOT,O=China Financial Certification Authority,C=CN + // https://www.cfca.com.cn/ + "2.16.156.112554.3": nil, + // CN=OISTE WISeKey Global Root GB CA,OU=OISTE Foundation Endorsed,O=WISeKey,C=CH + // https://www.wisekey.com/repository/cacertificates/ + "2.16.756.5.14.7.4.8": nil, + // CN=TÜRKTRUST Elektronik Sertifika Hizmet Sağlayıcısı H6,O=TÜRKTRUST Bilgi İletişim ve Bilişim Güvenliği Hizmetleri A...,L=Ankara,C=TR + // https://www.turktrust.com.tr/ + "2.16.792.3.0.3.1.1.5": nil, +} + +// OrganizationValidationOIDs contains CA specific OV OIDs from +// https://cabforum.org/object-registry/ +var OrganizationValidationOIDs = map[string]interface{}{ + // CA/Browser Forum OV OID standard + // https://cabforum.org/object-registry/ + "2.23.140.1.2.2": nil, + // CA/Browser Forum individually validated + "2.23.140.1.2.3": nil, + // Digicert + "2.16.840.1.114412.1.1": nil, + // D-Trust + "1.3.6.1.4.1.4788.2.200.1": nil, + // GoDaddy + "2.16.840.1.114413.1.7.23.2": nil, + // Logius + "2.16.528.1.1003.1.2.5.6": nil, + // QuoVadis + "1.3.6.1.4.1.8024.0.2.100.1.1": nil, + // Starfield + "2.16.840.1.114414.1.7.23.2": nil, + // TurkTrust + "2.16.792.3.0.3.1.1.2": nil, +} + +// DomainValidationOIDs contain OIDs that identify DV certs. +var DomainValidationOIDs = map[string]interface{}{ + // Globalsign + "1.3.6.1.4.1.4146.1.10.10": nil, + // Let's Encrypt + "1.3.6.1.4.1.44947.1.1.1": nil, + // Comodo (eNom) + "1.3.6.1.4.1.6449.1.2.2.10": nil, + // Comodo (WoTrust) + "1.3.6.1.4.1.6449.1.2.2.15": nil, + // Comodo (RBC SOFT) + "1.3.6.1.4.1.6449.1.2.2.16": nil, + // Comodo (RegisterFly) + "1.3.6.1.4.1.6449.1.2.2.17": nil, + // Comodo (Central Security Patrols) + "1.3.6.1.4.1.6449.1.2.2.18": nil, + // Comodo (eBiz Networks) + "1.3.6.1.4.1.6449.1.2.2.19": nil, + // Comodo (OptimumSSL) + "1.3.6.1.4.1.6449.1.2.2.21": nil, + // Comodo (WoSign) + "1.3.6.1.4.1.6449.1.2.2.22": nil, + // Comodo (Register.com) + "1.3.6.1.4.1.6449.1.2.2.24": nil, + // Comodo (The Code Project) + "1.3.6.1.4.1.6449.1.2.2.25": nil, + // Comodo (Gandi) + "1.3.6.1.4.1.6449.1.2.2.26": nil, + // Comodo (GlobeSSL) + "1.3.6.1.4.1.6449.1.2.2.27": nil, + // Comodo (DreamHost) + "1.3.6.1.4.1.6449.1.2.2.28": nil, + // Comodo (TERENA) + "1.3.6.1.4.1.6449.1.2.2.29": nil, + // Comodo (GlobalSSL) + "1.3.6.1.4.1.6449.1.2.2.31": nil, + // Comodo (IceWarp) + "1.3.6.1.4.1.6449.1.2.2.35": nil, + // Comodo (Dotname Korea) + "1.3.6.1.4.1.6449.1.2.2.37": nil, + // Comodo (TrustSign) + "1.3.6.1.4.1.6449.1.2.2.38": nil, + // Comodo (Formidable) + "1.3.6.1.4.1.6449.1.2.2.39": nil, + // Comodo (SSL Blindado) + "1.3.6.1.4.1.6449.1.2.2.40": nil, + // Comodo (Dreamscape Networks) + "1.3.6.1.4.1.6449.1.2.2.41": nil, + // Comodo (K Software) + "1.3.6.1.4.1.6449.1.2.2.42": nil, + // Comodo (FBS) + "1.3.6.1.4.1.6449.1.2.2.44": nil, + // Comodo (ReliaSite) + "1.3.6.1.4.1.6449.1.2.2.45": nil, + // Comodo (CertAssure) + "1.3.6.1.4.1.6449.1.2.2.47": nil, + // Comodo (TrustAsia) + "1.3.6.1.4.1.6449.1.2.2.49": nil, + // Comodo (SecureCore) + "1.3.6.1.4.1.6449.1.2.2.50": nil, + // Comodo (Western Digital) + "1.3.6.1.4.1.6449.1.2.2.51": nil, + // Comodo (cPanel) + "1.3.6.1.4.1.6449.1.2.2.52": nil, + // Comodo (BlackCert) + "1.3.6.1.4.1.6449.1.2.2.53": nil, + // Comodo (KeyNet Systems) + "1.3.6.1.4.1.6449.1.2.2.54": nil, + // Comodo + "1.3.6.1.4.1.6449.1.2.2.7": nil, + // Comodo (CSC) + "1.3.6.1.4.1.6449.1.2.2.8": nil, + // Digicert + "2.16.840.1.114412.1.2": nil, + // GoDaddy + "2.16.840.1.114413.1.7.23.1": nil, + // Starfield + "2.16.840.1.114414.1.7.23.1": nil, + // CA/B Forum + "2.23.140.1.2.1": nil, +} + +// TODO pull out other types +type AuthorityInfoAccess struct { + OCSPServer []string `json:"ocsp_urls,omitempty"` + IssuingCertificateURL []string `json:"issuer_urls,omitempty"` +} + +func (c *Certificate) jsonifyExtensions() (*CertificateExtensions, UnknownCertificateExtensions) { + exts := new(CertificateExtensions) + unk := make([]pkix.Extension, 0, 2) + for _, e := range c.Extensions { + if e.Id.Equal(oidExtKeyUsage) { + exts.KeyUsage = c.KeyUsage + } else if e.Id.Equal(oidExtBasicConstraints) { + exts.BasicConstraints = new(BasicConstraints) + exts.BasicConstraints.IsCA = c.IsCA + if c.MaxPathLen > 0 || c.MaxPathLenZero { + exts.BasicConstraints.MaxPathLen = new(int) + *exts.BasicConstraints.MaxPathLen = c.MaxPathLen + } + } else if e.Id.Equal(oidExtSubjectAltName) { + exts.SubjectAltName = new(GeneralNames) + exts.SubjectAltName.DirectoryNames = c.DirectoryNames + exts.SubjectAltName.DNSNames = c.DNSNames + exts.SubjectAltName.EDIPartyNames = c.EDIPartyNames + exts.SubjectAltName.EmailAddresses = c.EmailAddresses + exts.SubjectAltName.IPAddresses = c.IPAddresses + exts.SubjectAltName.OtherNames = c.OtherNames + exts.SubjectAltName.RegisteredIDs = c.RegisteredIDs + exts.SubjectAltName.URIs = c.URIs + } else if e.Id.Equal(oidExtIssuerAltName) { + exts.IssuerAltName = new(GeneralNames) + exts.IssuerAltName.DirectoryNames = c.IANDirectoryNames + exts.IssuerAltName.DNSNames = c.IANDNSNames + exts.IssuerAltName.EDIPartyNames = c.IANEDIPartyNames + exts.IssuerAltName.EmailAddresses = c.IANEmailAddresses + exts.IssuerAltName.IPAddresses = c.IANIPAddresses + exts.IssuerAltName.OtherNames = c.IANOtherNames + exts.IssuerAltName.RegisteredIDs = c.IANRegisteredIDs + exts.IssuerAltName.URIs = c.IANURIs + } else if e.Id.Equal(oidExtNameConstraints) { + exts.NameConstraints = new(NameConstraints) + exts.NameConstraints.Critical = c.NameConstraintsCritical + + exts.NameConstraints.PermittedDNSNames = c.PermittedDNSNames + exts.NameConstraints.PermittedEmailAddresses = c.PermittedEmailAddresses + exts.NameConstraints.PermittedIPAddresses = c.PermittedIPAddresses + exts.NameConstraints.PermittedDirectoryNames = c.PermittedDirectoryNames + exts.NameConstraints.PermittedEdiPartyNames = c.PermittedEdiPartyNames + exts.NameConstraints.PermittedRegisteredIDs = c.PermittedRegisteredIDs + + exts.NameConstraints.ExcludedEmailAddresses = c.ExcludedEmailAddresses + exts.NameConstraints.ExcludedDNSNames = c.ExcludedDNSNames + exts.NameConstraints.ExcludedIPAddresses = c.ExcludedIPAddresses + exts.NameConstraints.ExcludedDirectoryNames = c.ExcludedDirectoryNames + exts.NameConstraints.ExcludedEdiPartyNames = c.ExcludedEdiPartyNames + exts.NameConstraints.ExcludedRegisteredIDs = c.ExcludedRegisteredIDs + } else if e.Id.Equal(oidCRLDistributionPoints) { + exts.CRLDistributionPoints = c.CRLDistributionPoints + } else if e.Id.Equal(oidExtAuthKeyId) { + exts.AuthKeyID = c.AuthorityKeyId + } else if e.Id.Equal(oidExtExtendedKeyUsage) { + exts.ExtendedKeyUsage = new(ExtendedKeyUsageExtension) + exts.ExtendedKeyUsage.Known = c.ExtKeyUsage + exts.ExtendedKeyUsage.Unknown = c.UnknownExtKeyUsage + } else if e.Id.Equal(oidExtCertificatePolicy) { + exts.CertificatePolicies = new(CertificatePoliciesData) + exts.CertificatePolicies.PolicyIdentifiers = c.PolicyIdentifiers + exts.CertificatePolicies.NoticeRefNumbers = c.NoticeRefNumbers + exts.CertificatePolicies.NoticeRefOrganization = c.ParsedNoticeRefOrganization + exts.CertificatePolicies.ExplicitTexts = c.ParsedExplicitTexts + exts.CertificatePolicies.QualifierId = c.QualifierId + exts.CertificatePolicies.CPSUri = c.CPSuri + + } else if e.Id.Equal(oidExtAuthorityInfoAccess) { + exts.AuthorityInfoAccess = new(AuthorityInfoAccess) + exts.AuthorityInfoAccess.OCSPServer = c.OCSPServer + exts.AuthorityInfoAccess.IssuingCertificateURL = c.IssuingCertificateURL + } else if e.Id.Equal(oidExtSubjectKeyId) { + exts.SubjectKeyID = c.SubjectKeyId + } else if e.Id.Equal(oidExtSignedCertificateTimestampList) { + exts.SignedCertificateTimestampList = c.SignedCertificateTimestampList + } else if e.Id.Equal(oidExtensionCTPrecertificatePoison) { + exts.IsPrecert = true + } else if e.Id.Equal(oidBRTorServiceDescriptor) { + exts.TorServiceDescriptors = c.TorServiceDescriptors + } else { + // Unknown extension + unk = append(unk, e) + } + } + return exts, unk +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/fingerprint.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/fingerprint.go new file mode 100644 index 00000000..e62a7015 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/fingerprint.go @@ -0,0 +1,61 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "bytes" + "crypto/md5" + "crypto/sha1" + "crypto/sha256" + "crypto/sha512" + "encoding/hex" + "encoding/json" +) + +// CertificateFingerprint represents a digest/fingerprint of some data. It can +// easily be encoded to hex and JSON (as a hex string). +type CertificateFingerprint []byte + +// MD5Fingerprint creates a fingerprint of data using the MD5 hash algorithm. +func MD5Fingerprint(data []byte) CertificateFingerprint { + sum := md5.Sum(data) + return sum[:] +} + +// SHA1Fingerprint creates a fingerprint of data using the SHA1 hash algorithm. +func SHA1Fingerprint(data []byte) CertificateFingerprint { + sum := sha1.Sum(data) + return sum[:] +} + +// SHA256Fingerprint creates a fingerprint of data using the SHA256 hash +// algorithm. +func SHA256Fingerprint(data []byte) CertificateFingerprint { + sum := sha256.Sum256(data) + return sum[:] +} + +// SHA512Fingerprint creates a fingerprint of data using the SHA256 hash +// algorithm. +func SHA512Fingerprint(data []byte) CertificateFingerprint { + sum := sha512.Sum512(data) + return sum[:] +} + +// Equal returns true if the fingerprints are bytewise-equal. +func (f CertificateFingerprint) Equal(other CertificateFingerprint) bool { + return bytes.Equal(f, other) +} + +// Hex returns the given fingerprint encoded as a hex string. +func (f CertificateFingerprint) Hex() string { + return hex.EncodeToString(f) +} + +// MarshalJSON implements the json.Marshaler interface, and marshals the +// fingerprint as a hex string. +func (f *CertificateFingerprint) MarshalJSON() ([]byte, error) { + return json.Marshal(f.Hex()) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/generated_certvalidationlevel_string.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/generated_certvalidationlevel_string.go new file mode 100644 index 00000000..23cd32a1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/generated_certvalidationlevel_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=CertValidationLevel -output=generated_certvalidationlevel_string.go"; DO NOT EDIT. + +package x509 + +import "strconv" + +const _CertValidationLevel_name = "UnknownValidationLevelDVOVEV" + +var _CertValidationLevel_index = [...]uint8{0, 22, 24, 26, 28} + +func (i CertValidationLevel) String() string { + if i < 0 || i >= CertValidationLevel(len(_CertValidationLevel_index)-1) { + return "CertValidationLevel(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _CertValidationLevel_name[_CertValidationLevel_index[i]:_CertValidationLevel_index[i+1]] +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/json.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/json.go new file mode 100644 index 00000000..44477995 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/json.go @@ -0,0 +1,645 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "crypto/dsa" + "crypto/ecdsa" + "crypto/rsa" + "encoding/asn1" + "encoding/json" + "errors" + "net" + "sort" + + "strings" + "time" + + jsonKeys "github.com/zmap/zcrypto/json" + "github.com/zmap/zcrypto/util" + "github.com/zmap/zcrypto/x509/pkix" +) + +var kMinTime, kMaxTime time.Time + +func init() { + var err error + kMinTime, err = time.Parse(time.RFC3339, "1970-01-01T00:00:00Z") + if err != nil { + panic(err) + } + kMaxTime, err = time.Parse(time.RFC3339, "9999-12-31T23:59:59Z") + if err != nil { + panic(err) + } +} + +type auxKeyUsage struct { + DigitalSignature bool `json:"digital_signature,omitempty"` + ContentCommitment bool `json:"content_commitment,omitempty"` + KeyEncipherment bool `json:"key_encipherment,omitempty"` + DataEncipherment bool `json:"data_encipherment,omitempty"` + KeyAgreement bool `json:"key_agreement,omitempty"` + CertificateSign bool `json:"certificate_sign,omitempty"` + CRLSign bool `json:"crl_sign,omitempty"` + EncipherOnly bool `json:"encipher_only,omitempty"` + DecipherOnly bool `json:"decipher_only,omitempty"` + Value uint32 `json:"value"` +} + +// MarshalJSON implements the json.Marshaler interface +func (k KeyUsage) MarshalJSON() ([]byte, error) { + var enc auxKeyUsage + enc.Value = uint32(k) + if k&KeyUsageDigitalSignature > 0 { + enc.DigitalSignature = true + } + if k&KeyUsageContentCommitment > 0 { + enc.ContentCommitment = true + } + if k&KeyUsageKeyEncipherment > 0 { + enc.KeyEncipherment = true + } + if k&KeyUsageDataEncipherment > 0 { + enc.DataEncipherment = true + } + if k&KeyUsageKeyAgreement > 0 { + enc.KeyAgreement = true + } + if k&KeyUsageCertSign > 0 { + enc.CertificateSign = true + } + if k&KeyUsageCRLSign > 0 { + enc.CRLSign = true + } + if k&KeyUsageEncipherOnly > 0 { + enc.EncipherOnly = true + } + if k&KeyUsageDecipherOnly > 0 { + enc.DecipherOnly = true + } + return json.Marshal(&enc) +} + +// UnmarshalJSON implements the json.Unmarshler interface +func (k *KeyUsage) UnmarshalJSON(b []byte) error { + var aux auxKeyUsage + if err := json.Unmarshal(b, &aux); err != nil { + return err + } + // TODO: validate the flags match + v := int(aux.Value) + *k = KeyUsage(v) + return nil +} + +// JSONSignatureAlgorithm is the intermediate type +// used when marshaling a PublicKeyAlgorithm out to JSON. +type JSONSignatureAlgorithm struct { + Name string `json:"name,omitempty"` + OID pkix.AuxOID `json:"oid"` +} + +// MarshalJSON implements the json.Marshaler interface +// MAY NOT PRESERVE ORIGINAL OID FROM CERTIFICATE - +// CONSIDER USING jsonifySignatureAlgorithm INSTEAD! +func (s *SignatureAlgorithm) MarshalJSON() ([]byte, error) { + aux := JSONSignatureAlgorithm{ + Name: s.String(), + } + for _, val := range signatureAlgorithmDetails { + if val.algo == *s { + aux.OID = make([]int, len(val.oid)) + for idx := range val.oid { + aux.OID[idx] = val.oid[idx] + } + } + } + return json.Marshal(&aux) +} + +// UnmarshalJSON implements the json.Unmarshler interface +func (s *SignatureAlgorithm) UnmarshalJSON(b []byte) error { + var aux JSONSignatureAlgorithm + if err := json.Unmarshal(b, &aux); err != nil { + return err + } + *s = UnknownSignatureAlgorithm + oid := asn1.ObjectIdentifier(aux.OID.AsSlice()) + if oid.Equal(oidSignatureRSAPSS) { + pssAlgs := []SignatureAlgorithm{SHA256WithRSAPSS, SHA384WithRSAPSS, SHA512WithRSAPSS} + for _, alg := range pssAlgs { + if strings.Compare(alg.String(), aux.Name) == 0 { + *s = alg + break + } + } + } else { + for _, val := range signatureAlgorithmDetails { + if val.oid.Equal(oid) { + *s = val.algo + break + } + } + } + return nil +} + +// jsonifySignatureAlgorithm gathers the necessary fields in a Certificate +// into a JSONSignatureAlgorithm, which can then use the default +// JSON marhsalers and unmarshalers. THIS FUNCTION IS PREFERED OVER +// THE CUSTOM JSON MARSHALER PRESENTED ABOVE FOR SIGNATUREALGORITHM +// BECAUSE THIS METHOD PRESERVES THE OID ORIGINALLY IN THE CERTIFICATE! +// This reason also explains why we need this function - +// the OID is unfortunately stored outside the scope of a +// SignatureAlgorithm struct and cannot be recovered without access to the +// entire Certificate if we do not know the signature algorithm. +func (c *Certificate) jsonifySignatureAlgorithm() JSONSignatureAlgorithm { + aux := JSONSignatureAlgorithm{} + if c.SignatureAlgorithm == 0 { + aux.Name = "unknown_algorithm" + } else { + aux.Name = c.SignatureAlgorithm.String() + } + aux.OID = make([]int, len(c.SignatureAlgorithmOID)) + for idx := range c.SignatureAlgorithmOID { + aux.OID[idx] = c.SignatureAlgorithmOID[idx] + } + return aux +} + +type auxPublicKeyAlgorithm struct { + Name string `json:"name,omitempty"` + OID *pkix.AuxOID `json:"oid,omitempty"` +} + +var publicKeyNameToAlgorithm = map[string]PublicKeyAlgorithm{ + "RSA": RSA, + "DSA": DSA, + "ECDSA": ECDSA, +} + +// MarshalJSON implements the json.Marshaler interface +func (p *PublicKeyAlgorithm) MarshalJSON() ([]byte, error) { + aux := auxPublicKeyAlgorithm{ + Name: p.String(), + } + return json.Marshal(&aux) +} + +// UnmarshalJSON implements the json.Unmarshaler interface +func (p *PublicKeyAlgorithm) UnmarshalJSON(b []byte) error { + var aux auxPublicKeyAlgorithm + if err := json.Unmarshal(b, &aux); err != nil { + return err + } + *p = publicKeyNameToAlgorithm[aux.Name] + return nil +} + +func clampTime(t time.Time) time.Time { + if t.Before(kMinTime) { + return kMinTime + } + if t.After(kMaxTime) { + return kMaxTime + } + return t +} + +type auxValidity struct { + Start string `json:"start"` + End string `json:"end"` + ValidityPeriod int `json:"length"` +} + +func (v *validity) MarshalJSON() ([]byte, error) { + aux := auxValidity{ + Start: clampTime(v.NotBefore.UTC()).Format(time.RFC3339), + End: clampTime(v.NotAfter.UTC()).Format(time.RFC3339), + ValidityPeriod: int(v.NotAfter.Sub(v.NotBefore).Seconds()), + } + return json.Marshal(&aux) +} + +func (v *validity) UnmarshalJSON(b []byte) error { + var aux auxValidity + if err := json.Unmarshal(b, &aux); err != nil { + return err + } + var err error + if v.NotBefore, err = time.Parse(time.RFC3339, aux.Start); err != nil { + return err + } + if v.NotAfter, err = time.Parse(time.RFC3339, aux.End); err != nil { + return err + } + + return nil +} + +// ECDSAPublicKeyJSON - used to condense several fields from a +// ECDSA public key into one field for use in JSONCertificate. +// Uses default JSON marshal and unmarshal methods +type ECDSAPublicKeyJSON struct { + B []byte `json:"b"` + Curve string `json:"curve"` + Gx []byte `json:"gx"` + Gy []byte `json:"gy"` + Length int `json:"length"` + N []byte `json:"n"` + P []byte `json:"p"` + Pub []byte `json:"pub,omitempty"` + X []byte `json:"x"` + Y []byte `json:"y"` +} + +// DSAPublicKeyJSON - used to condense several fields from a +// DSA public key into one field for use in JSONCertificate. +// Uses default JSON marshal and unmarshal methods +type DSAPublicKeyJSON struct { + G []byte `json:"g"` + P []byte `json:"p"` + Q []byte `json:"q"` + Y []byte `json:"y"` +} + +// GetDSAPublicKeyJSON - get the DSAPublicKeyJSON for the given standard DSA PublicKey. +func GetDSAPublicKeyJSON(key *dsa.PublicKey) *DSAPublicKeyJSON { + return &DSAPublicKeyJSON{ + P: key.P.Bytes(), + Q: key.Q.Bytes(), + G: key.G.Bytes(), + Y: key.Y.Bytes(), + } +} + +// GetRSAPublicKeyJSON - get the jsonKeys.RSAPublicKey for the given standard RSA PublicKey. +func GetRSAPublicKeyJSON(key *rsa.PublicKey) *jsonKeys.RSAPublicKey { + rsaKey := new(jsonKeys.RSAPublicKey) + rsaKey.PublicKey = key + return rsaKey +} + +// GetECDSAPublicKeyJSON - get the GetECDSAPublicKeyJSON for the given standard ECDSA PublicKey. +func GetECDSAPublicKeyJSON(key *ecdsa.PublicKey) *ECDSAPublicKeyJSON { + params := key.Params() + return &ECDSAPublicKeyJSON{ + P: params.P.Bytes(), + N: params.N.Bytes(), + B: params.B.Bytes(), + Gx: params.Gx.Bytes(), + Gy: params.Gy.Bytes(), + X: key.X.Bytes(), + Y: key.Y.Bytes(), + Curve: key.Curve.Params().Name, + Length: key.Curve.Params().BitSize, + } +} + +// GetAugmentedECDSAPublicKeyJSON - get the GetECDSAPublicKeyJSON for the given "augmented" +// ECDSA PublicKey. +func GetAugmentedECDSAPublicKeyJSON(key *AugmentedECDSA) *ECDSAPublicKeyJSON { + params := key.Pub.Params() + return &ECDSAPublicKeyJSON{ + P: params.P.Bytes(), + N: params.N.Bytes(), + B: params.B.Bytes(), + Gx: params.Gx.Bytes(), + Gy: params.Gy.Bytes(), + X: key.Pub.X.Bytes(), + Y: key.Pub.Y.Bytes(), + Curve: key.Pub.Curve.Params().Name, + Length: key.Pub.Curve.Params().BitSize, + Pub: key.Raw.Bytes, + } +} + +// jsonifySubjectKey - Convert public key data in a Certificate +// into json output format for JSONCertificate +func (c *Certificate) jsonifySubjectKey() JSONSubjectKeyInfo { + j := JSONSubjectKeyInfo{ + KeyAlgorithm: c.PublicKeyAlgorithm, + SPKIFingerprint: c.SPKIFingerprint, + } + + switch key := c.PublicKey.(type) { + case *rsa.PublicKey: + rsaKey := new(jsonKeys.RSAPublicKey) + rsaKey.PublicKey = key + j.RSAPublicKey = rsaKey + case *dsa.PublicKey: + j.DSAPublicKey = &DSAPublicKeyJSON{ + P: key.P.Bytes(), + Q: key.Q.Bytes(), + G: key.G.Bytes(), + Y: key.Y.Bytes(), + } + case *ecdsa.PublicKey: + params := key.Params() + j.ECDSAPublicKey = &ECDSAPublicKeyJSON{ + P: params.P.Bytes(), + N: params.N.Bytes(), + B: params.B.Bytes(), + Gx: params.Gx.Bytes(), + Gy: params.Gy.Bytes(), + X: key.X.Bytes(), + Y: key.Y.Bytes(), + Curve: key.Curve.Params().Name, + Length: key.Curve.Params().BitSize, + } + case *AugmentedECDSA: + params := key.Pub.Params() + j.ECDSAPublicKey = &ECDSAPublicKeyJSON{ + P: params.P.Bytes(), + N: params.N.Bytes(), + B: params.B.Bytes(), + Gx: params.Gx.Bytes(), + Gy: params.Gy.Bytes(), + X: key.Pub.X.Bytes(), + Y: key.Pub.Y.Bytes(), + Curve: key.Pub.Curve.Params().Name, + Length: key.Pub.Curve.Params().BitSize, + Pub: key.Raw.Bytes, + } + } + return j +} + +// JSONSubjectKeyInfo - used to condense several fields from x509.Certificate +// related to the subject public key into one field within JSONCertificate +// Unfortunately, this struct cannot have its own Marshal method since it +// needs information from multiple fields in x509.Certificate +type JSONSubjectKeyInfo struct { + KeyAlgorithm PublicKeyAlgorithm `json:"key_algorithm"` + RSAPublicKey *jsonKeys.RSAPublicKey `json:"rsa_public_key,omitempty"` + DSAPublicKey *DSAPublicKeyJSON `json:"dsa_public_key,omitempty"` + ECDSAPublicKey *ECDSAPublicKeyJSON `json:"ecdsa_public_key,omitempty"` + SPKIFingerprint CertificateFingerprint `json:"fingerprint_sha256"` +} + +// JSONSignature - used to condense several fields from x509.Certificate +// related to the signature into one field within JSONCertificate +// Unfortunately, this struct cannot have its own Marshal method since it +// needs information from multiple fields in x509.Certificate +type JSONSignature struct { + SignatureAlgorithm JSONSignatureAlgorithm `json:"signature_algorithm"` + Value []byte `json:"value"` + Valid bool `json:"valid"` + SelfSigned bool `json:"self_signed"` +} + +// JSONValidity - used to condense several fields related +// to validity in x509.Certificate into one field within JSONCertificate +// Unfortunately, this struct cannot have its own Marshal method since it +// needs information from multiple fields in x509.Certificate +type JSONValidity struct { + validity + ValidityPeriod int +} + +// JSONCertificate - used to condense data from x509.Certificate when marhsaling +// into JSON. This struct has a distinct and independent layout from +// x509.Certificate, mostly for condensing data across repetitive +// fields and making it more presentable. +type JSONCertificate struct { + Version int `json:"version"` + SerialNumber string `json:"serial_number"` + SignatureAlgorithm JSONSignatureAlgorithm `json:"signature_algorithm"` + Issuer pkix.Name `json:"issuer"` + IssuerDN string `json:"issuer_dn,omitempty"` + Validity JSONValidity `json:"validity"` + Subject pkix.Name `json:"subject"` + SubjectDN string `json:"subject_dn,omitempty"` + SubjectKeyInfo JSONSubjectKeyInfo `json:"subject_key_info"` + Extensions *CertificateExtensions `json:"extensions,omitempty"` + UnknownExtensions UnknownCertificateExtensions `json:"unknown_extensions,omitempty"` + Signature JSONSignature `json:"signature"` + FingerprintMD5 CertificateFingerprint `json:"fingerprint_md5"` + FingerprintSHA1 CertificateFingerprint `json:"fingerprint_sha1"` + FingerprintSHA256 CertificateFingerprint `json:"fingerprint_sha256"` + FingerprintNoCT CertificateFingerprint `json:"tbs_noct_fingerprint"` + SPKISubjectFingerprint CertificateFingerprint `json:"spki_subject_fingerprint"` + TBSCertificateFingerprint CertificateFingerprint `json:"tbs_fingerprint"` + ValidationLevel CertValidationLevel `json:"validation_level"` + Names []string `json:"names,omitempty"` + Redacted bool `json:"redacted"` +} + +func (c *Certificate) MarshalJSON() ([]byte, error) { + // Fill out the certificate + jc := new(JSONCertificate) + jc.Version = c.Version + jc.SerialNumber = c.SerialNumber.String() + jc.Issuer = c.Issuer + jc.IssuerDN = c.Issuer.String() + + jc.Validity.NotBefore = c.NotBefore + jc.Validity.NotAfter = c.NotAfter + jc.Validity.ValidityPeriod = c.ValidityPeriod + jc.Subject = c.Subject + jc.SubjectDN = c.Subject.String() + + if isValidName(c.Subject.CommonName) { + jc.Names = append(jc.Names, c.Subject.CommonName) + } + + for _, name := range c.DNSNames { + if isValidName(name) { + jc.Names = append(jc.Names, name) + } else if !strings.Contains(name, ".") { //just a TLD + jc.Names = append(jc.Names, name) + } + + } + + for _, name := range c.URIs { + if util.IsURL(name) { + jc.Names = append(jc.Names, name) + } + } + + for _, name := range c.IPAddresses { + str := name.String() + if util.IsURL(str) { + jc.Names = append(jc.Names, str) + } + } + + jc.Names = purgeNameDuplicates(jc.Names) + jc.Redacted = false + for _, name := range jc.Names { + if strings.HasPrefix(name, "?") { + jc.Redacted = true + } + } + + jc.SubjectKeyInfo = c.jsonifySubjectKey() + jc.Extensions, jc.UnknownExtensions = c.jsonifyExtensions() + + // TODO: Handle the fact this might not match + jc.SignatureAlgorithm = c.jsonifySignatureAlgorithm() + jc.Signature.SignatureAlgorithm = jc.SignatureAlgorithm + jc.Signature.Value = c.Signature + jc.Signature.Valid = c.validSignature + jc.Signature.SelfSigned = c.SelfSigned + if c.SelfSigned { + jc.Signature.Valid = true + } + jc.FingerprintMD5 = c.FingerprintMD5 + jc.FingerprintSHA1 = c.FingerprintSHA1 + jc.FingerprintSHA256 = c.FingerprintSHA256 + jc.FingerprintNoCT = c.FingerprintNoCT + jc.SPKISubjectFingerprint = c.SPKISubjectFingerprint + jc.TBSCertificateFingerprint = c.TBSCertificateFingerprint + jc.ValidationLevel = c.ValidationLevel + + return json.Marshal(jc) +} + +// UnmarshalJSON - intentionally implimented to always error, +// as this method should not be used. The MarshalJSON method +// on Certificate condenses data in a way that is not recoverable. +// Use the x509.ParseCertificate function instead or +// JSONCertificateWithRaw Marshal method +func (jc *JSONCertificate) UnmarshalJSON(b []byte) error { + return errors.New("Do not unmarshal cert JSON directly, use JSONCertificateWithRaw or x509.ParseCertificate function") +} + +// UnmarshalJSON - intentionally implimented to always error, +// as this method should not be used. The MarshalJSON method +// on Certificate condenses data in a way that is not recoverable. +// Use the x509.ParseCertificate function instead or +// JSONCertificateWithRaw Marshal method +func (c *Certificate) UnmarshalJSON(b []byte) error { + return errors.New("Do not unmarshal cert JSON directly, use JSONCertificateWithRaw or x509.ParseCertificate function") +} + +// JSONCertificateWithRaw - intermediate struct for unmarshaling json +// of a certificate - the raw is require since the +// MarshalJSON method on Certificate condenses data in a way that +// makes extraction to the original in Unmarshal impossible. +// The JSON output of Marshal is not even used to construct +// a certificate, all we need is raw +type JSONCertificateWithRaw struct { + Raw []byte `json:"raw,omitempty"` +} + +// ParseRaw - for converting the intermediate object +// JSONCertificateWithRaw into a parsed Certificate +// see description of JSONCertificateWithRaw for +// why this is used instead of UnmarshalJSON methods +func (c *JSONCertificateWithRaw) ParseRaw() (*Certificate, error) { + return ParseCertificate(c.Raw) +} + +func purgeNameDuplicates(names []string) (out []string) { + hashset := make(map[string]bool, len(names)) + for _, name := range names { + if _, inc := hashset[name]; !inc { + hashset[name] = true + } + } + + out = make([]string, 0, len(hashset)) + for key := range hashset { + out = append(out, key) + } + + sort.Strings(out) // must sort to ensure output is deterministic! + return +} + +func isValidName(name string) (ret bool) { + + // Check for wildcards and redacts, ignore malformed urls + if strings.HasPrefix(name, "?.") || strings.HasPrefix(name, "*.") { + ret = isValidName(name[2:]) + } else { + ret = util.IsURL(name) + } + return +} + +func orMask(ip net.IP, mask net.IPMask) net.IP { + if len(ip) == 0 || len(mask) == 0 { + return nil + } + if len(ip) != net.IPv4len && len(ip) != net.IPv6len { + return nil + } + if len(ip) != len(mask) { + return nil + } + out := make([]byte, len(ip)) + for idx := range ip { + out[idx] = ip[idx] | mask[idx] + } + return out +} + +func invertMask(mask net.IPMask) net.IPMask { + if mask == nil { + return nil + } + out := make([]byte, len(mask)) + for idx := range mask { + out[idx] = ^mask[idx] + } + return out +} + +type auxGeneralSubtreeIP struct { + CIDR string `json:"cidr,omitempty"` + Begin string `json:"begin,omitempty"` + End string `json:"end,omitempty"` + Mask string `json:"mask,omitempty"` +} + +func (g *GeneralSubtreeIP) MarshalJSON() ([]byte, error) { + aux := auxGeneralSubtreeIP{} + aux.CIDR = g.Data.String() + // Check to see if the subnet is valid. An invalid subnet will return 0,0 + // from Size(). If the subnet is invalid, only output the CIDR. + ones, bits := g.Data.Mask.Size() + if ones == 0 && bits == 0 { + return json.Marshal(&aux) + } + // The first IP in the range should be `ip & mask`. + begin := g.Data.IP.Mask(g.Data.Mask) + if begin != nil { + aux.Begin = begin.String() + } + // The last IP (inclusive) is `ip & (^mask)`. + inverseMask := invertMask(g.Data.Mask) + end := orMask(g.Data.IP, inverseMask) + if end != nil { + aux.End = end.String() + } + // Output the mask as an IP, but enforce it can be formatted correctly. + // net.IP.String() only works on byte arrays of the correct length. + maskLen := len(g.Data.Mask) + if maskLen == net.IPv4len || maskLen == net.IPv6len { + maskAsIP := net.IP(g.Data.Mask) + aux.Mask = maskAsIP.String() + } + return json.Marshal(&aux) +} + +func (g *GeneralSubtreeIP) UnmarshalJSON(b []byte) error { + aux := auxGeneralSubtreeIP{} + if err := json.Unmarshal(b, &aux); err != nil { + return err + } + ip, ipNet, err := net.ParseCIDR(aux.CIDR) + if err != nil { + return err + } + g.Data.IP = ip + g.Data.Mask = ipNet.Mask + g.Min = 0 + g.Max = 0 + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/names.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/names.go new file mode 100644 index 00000000..012f9192 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/names.go @@ -0,0 +1,30 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +func (p PublicKeyAlgorithm) String() string { + if p >= total_key_algorithms || p < 0 { + p = UnknownPublicKeyAlgorithm + } + return keyAlgorithmNames[p] +} + +func (c *Certificate) SignatureAlgorithmName() string { + switch c.SignatureAlgorithm { + case UnknownSignatureAlgorithm: + return c.SignatureAlgorithmOID.String() + default: + return c.SignatureAlgorithm.String() + } +} + +func (c *Certificate) PublicKeyAlgorithmName() string { + switch c.PublicKeyAlgorithm { + case UnknownPublicKeyAlgorithm: + return c.PublicKeyAlgorithmOID.String() + default: + return c.PublicKeyAlgorithm.String() + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/pem_decrypt.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/pem_decrypt.go new file mode 100644 index 00000000..0388d63e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/pem_decrypt.go @@ -0,0 +1,240 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +// RFC 1423 describes the encryption of PEM blocks. The algorithm used to +// generate a key from the password was derived by looking at the OpenSSL +// implementation. + +import ( + "crypto/aes" + "crypto/cipher" + "crypto/des" + "crypto/md5" + "encoding/hex" + "encoding/pem" + "errors" + "io" + "strings" +) + +type PEMCipher int + +// Possible values for the EncryptPEMBlock encryption algorithm. +const ( + _ PEMCipher = iota + PEMCipherDES + PEMCipher3DES + PEMCipherAES128 + PEMCipherAES192 + PEMCipherAES256 +) + +// rfc1423Algo holds a method for enciphering a PEM block. +type rfc1423Algo struct { + cipher PEMCipher + name string + cipherFunc func(key []byte) (cipher.Block, error) + keySize int + blockSize int +} + +// rfc1423Algos holds a slice of the possible ways to encrypt a PEM +// block. The ivSize numbers were taken from the OpenSSL source. +var rfc1423Algos = []rfc1423Algo{{ + cipher: PEMCipherDES, + name: "DES-CBC", + cipherFunc: des.NewCipher, + keySize: 8, + blockSize: des.BlockSize, +}, { + cipher: PEMCipher3DES, + name: "DES-EDE3-CBC", + cipherFunc: des.NewTripleDESCipher, + keySize: 24, + blockSize: des.BlockSize, +}, { + cipher: PEMCipherAES128, + name: "AES-128-CBC", + cipherFunc: aes.NewCipher, + keySize: 16, + blockSize: aes.BlockSize, +}, { + cipher: PEMCipherAES192, + name: "AES-192-CBC", + cipherFunc: aes.NewCipher, + keySize: 24, + blockSize: aes.BlockSize, +}, { + cipher: PEMCipherAES256, + name: "AES-256-CBC", + cipherFunc: aes.NewCipher, + keySize: 32, + blockSize: aes.BlockSize, +}, +} + +// deriveKey uses a key derivation function to stretch the password into a key +// with the number of bits our cipher requires. This algorithm was derived from +// the OpenSSL source. +func (c rfc1423Algo) deriveKey(password, salt []byte) []byte { + hash := md5.New() + out := make([]byte, c.keySize) + var digest []byte + + for i := 0; i < len(out); i += len(digest) { + hash.Reset() + hash.Write(digest) + hash.Write(password) + hash.Write(salt) + digest = hash.Sum(digest[:0]) + copy(out[i:], digest) + } + return out +} + +// IsEncryptedPEMBlock returns if the PEM block is password encrypted. +func IsEncryptedPEMBlock(b *pem.Block) bool { + _, ok := b.Headers["DEK-Info"] + return ok +} + +// IncorrectPasswordError is returned when an incorrect password is detected. +var IncorrectPasswordError = errors.New("x509: decryption password incorrect") + +// DecryptPEMBlock takes a password encrypted PEM block and the password used to +// encrypt it and returns a slice of decrypted DER encoded bytes. It inspects +// the DEK-Info header to determine the algorithm used for decryption. If no +// DEK-Info header is present, an error is returned. If an incorrect password +// is detected an IncorrectPasswordError is returned. Because of deficiencies +// in the encrypted-PEM format, it's not always possible to detect an incorrect +// password. In these cases no error will be returned but the decrypted DER +// bytes will be random noise. +func DecryptPEMBlock(b *pem.Block, password []byte) ([]byte, error) { + dek, ok := b.Headers["DEK-Info"] + if !ok { + return nil, errors.New("x509: no DEK-Info header in block") + } + + idx := strings.Index(dek, ",") + if idx == -1 { + return nil, errors.New("x509: malformed DEK-Info header") + } + + mode, hexIV := dek[:idx], dek[idx+1:] + ciph := cipherByName(mode) + if ciph == nil { + return nil, errors.New("x509: unknown encryption mode") + } + iv, err := hex.DecodeString(hexIV) + if err != nil { + return nil, err + } + if len(iv) != ciph.blockSize { + return nil, errors.New("x509: incorrect IV size") + } + + // Based on the OpenSSL implementation. The salt is the first 8 bytes + // of the initialization vector. + key := ciph.deriveKey(password, iv[:8]) + block, err := ciph.cipherFunc(key) + if err != nil { + return nil, err + } + + if len(b.Bytes)%block.BlockSize() != 0 { + return nil, errors.New("x509: encrypted PEM data is not a multiple of the block size") + } + + data := make([]byte, len(b.Bytes)) + dec := cipher.NewCBCDecrypter(block, iv) + dec.CryptBlocks(data, b.Bytes) + + // Blocks are padded using a scheme where the last n bytes of padding are all + // equal to n. It can pad from 1 to blocksize bytes inclusive. See RFC 1423. + // For example: + // [x y z 2 2] + // [x y 7 7 7 7 7 7 7] + // If we detect a bad padding, we assume it is an invalid password. + dlen := len(data) + if dlen == 0 || dlen%ciph.blockSize != 0 { + return nil, errors.New("x509: invalid padding") + } + last := int(data[dlen-1]) + if dlen < last { + return nil, IncorrectPasswordError + } + if last == 0 || last > ciph.blockSize { + return nil, IncorrectPasswordError + } + for _, val := range data[dlen-last:] { + if int(val) != last { + return nil, IncorrectPasswordError + } + } + return data[:dlen-last], nil +} + +// EncryptPEMBlock returns a PEM block of the specified type holding the +// given DER-encoded data encrypted with the specified algorithm and +// password. +func EncryptPEMBlock(rand io.Reader, blockType string, data, password []byte, alg PEMCipher) (*pem.Block, error) { + ciph := cipherByKey(alg) + if ciph == nil { + return nil, errors.New("x509: unknown encryption mode") + } + iv := make([]byte, ciph.blockSize) + if _, err := io.ReadFull(rand, iv); err != nil { + return nil, errors.New("x509: cannot generate IV: " + err.Error()) + } + // The salt is the first 8 bytes of the initialization vector, + // matching the key derivation in DecryptPEMBlock. + key := ciph.deriveKey(password, iv[:8]) + block, err := ciph.cipherFunc(key) + if err != nil { + return nil, err + } + enc := cipher.NewCBCEncrypter(block, iv) + pad := ciph.blockSize - len(data)%ciph.blockSize + encrypted := make([]byte, len(data), len(data)+pad) + // We could save this copy by encrypting all the whole blocks in + // the data separately, but it doesn't seem worth the additional + // code. + copy(encrypted, data) + // See RFC 1423, section 1.1 + for i := 0; i < pad; i++ { + encrypted = append(encrypted, byte(pad)) + } + enc.CryptBlocks(encrypted, encrypted) + + return &pem.Block{ + Type: blockType, + Headers: map[string]string{ + "Proc-Type": "4,ENCRYPTED", + "DEK-Info": ciph.name + "," + hex.EncodeToString(iv), + }, + Bytes: encrypted, + }, nil +} + +func cipherByName(name string) *rfc1423Algo { + for i := range rfc1423Algos { + alg := &rfc1423Algos[i] + if alg.name == name { + return alg + } + } + return nil +} + +func cipherByKey(key PEMCipher) *rfc1423Algo { + for i := range rfc1423Algos { + alg := &rfc1423Algos[i] + if alg.cipher == key { + return alg + } + } + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/pkcs1.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/pkcs1.go new file mode 100644 index 00000000..73bc7623 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/pkcs1.go @@ -0,0 +1,121 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "crypto/rsa" + "encoding/asn1" + "errors" + "math/big" +) + +// pkcs1PrivateKey is a structure which mirrors the PKCS#1 ASN.1 for an RSA private key. +type pkcs1PrivateKey struct { + Version int + N *big.Int + E int + D *big.Int + P *big.Int + Q *big.Int + // We ignore these values, if present, because rsa will calculate them. + Dp *big.Int `asn1:"optional"` + Dq *big.Int `asn1:"optional"` + Qinv *big.Int `asn1:"optional"` + + AdditionalPrimes []pkcs1AdditionalRSAPrime `asn1:"optional,omitempty"` +} + +type pkcs1AdditionalRSAPrime struct { + Prime *big.Int + + // We ignore these values because rsa will calculate them. + Exp *big.Int + Coeff *big.Int +} + +// pkcs1PublicKey reflects the ASN.1 structure of a PKCS#1 public key. +type pkcs1PublicKey struct { + N *big.Int + E int +} + +// ParsePKCS1PrivateKey returns an RSA private key from its ASN.1 PKCS#1 DER encoded form. +func ParsePKCS1PrivateKey(der []byte) (*rsa.PrivateKey, error) { + var priv pkcs1PrivateKey + rest, err := asn1.Unmarshal(der, &priv) + if len(rest) > 0 { + return nil, asn1.SyntaxError{Msg: "trailing data"} + } + if err != nil { + return nil, err + } + + if priv.Version > 1 { + return nil, errors.New("x509: unsupported private key version") + } + + if priv.N.Sign() <= 0 || priv.D.Sign() <= 0 || priv.P.Sign() <= 0 || priv.Q.Sign() <= 0 { + return nil, errors.New("x509: private key contains zero or negative value") + } + + key := new(rsa.PrivateKey) + key.PublicKey = rsa.PublicKey{ + E: priv.E, + N: priv.N, + } + + key.D = priv.D + key.Primes = make([]*big.Int, 2+len(priv.AdditionalPrimes)) + key.Primes[0] = priv.P + key.Primes[1] = priv.Q + for i, a := range priv.AdditionalPrimes { + if a.Prime.Sign() <= 0 { + return nil, errors.New("x509: private key contains zero or negative prime") + } + key.Primes[i+2] = a.Prime + // We ignore the other two values because rsa will calculate + // them as needed. + } + + err = key.Validate() + if err != nil { + return nil, err + } + key.Precompute() + + return key, nil +} + +// MarshalPKCS1PrivateKey converts a private key to ASN.1 DER encoded form. +func MarshalPKCS1PrivateKey(key *rsa.PrivateKey) []byte { + key.Precompute() + + version := 0 + if len(key.Primes) > 2 { + version = 1 + } + + priv := pkcs1PrivateKey{ + Version: version, + N: key.N, + E: key.PublicKey.E, + D: key.D, + P: key.Primes[0], + Q: key.Primes[1], + Dp: key.Precomputed.Dp, + Dq: key.Precomputed.Dq, + Qinv: key.Precomputed.Qinv, + } + + priv.AdditionalPrimes = make([]pkcs1AdditionalRSAPrime, len(key.Precomputed.CRTValues)) + for i, values := range key.Precomputed.CRTValues { + priv.AdditionalPrimes[i].Prime = key.Primes[2+i] + priv.AdditionalPrimes[i].Exp = values.Exp + priv.AdditionalPrimes[i].Coeff = values.Coeff + } + + b, _ := asn1.Marshal(priv) + return b +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/pkcs8.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/pkcs8.go new file mode 100644 index 00000000..d69049fa --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/pkcs8.go @@ -0,0 +1,54 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "encoding/asn1" + "errors" + "fmt" + "github.com/zmap/zcrypto/x509/pkix" +) + +// pkcs8 reflects an ASN.1, PKCS#8 PrivateKey. See +// ftp://ftp.rsasecurity.com/pub/pkcs/pkcs-8/pkcs-8v1_2.asn +// and RFC 5208. +type pkcs8 struct { + Version int + Algo pkix.AlgorithmIdentifier + PrivateKey []byte + // optional attributes omitted. +} + +// ParsePKCS8PrivateKey parses an unencrypted, PKCS#8 private key. +// See RFC 5208. +func ParsePKCS8PrivateKey(der []byte) (key interface{}, err error) { + var privKey pkcs8 + if _, err := asn1.Unmarshal(der, &privKey); err != nil { + return nil, err + } + switch { + case privKey.Algo.Algorithm.Equal(oidPublicKeyRSA): + key, err = ParsePKCS1PrivateKey(privKey.PrivateKey) + if err != nil { + return nil, errors.New("x509: failed to parse RSA private key embedded in PKCS#8: " + err.Error()) + } + return key, nil + + case privKey.Algo.Algorithm.Equal(oidPublicKeyECDSA): + bytes := privKey.Algo.Parameters.FullBytes + namedCurveOID := new(asn1.ObjectIdentifier) + if _, err := asn1.Unmarshal(bytes, namedCurveOID); err != nil { + namedCurveOID = nil + } + key, err = parseECPrivateKey(namedCurveOID, privKey.PrivateKey) + if err != nil { + return nil, errors.New("x509: failed to parse EC private key embedded in PKCS#8: " + err.Error()) + } + return key, nil + + default: + return nil, fmt.Errorf("x509: PKCS#8 wrapping contained private key with unknown algorithm: %v", privKey.Algo.Algorithm) + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/pkix/json.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/pkix/json.go new file mode 100644 index 00000000..1e484bac --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/pkix/json.go @@ -0,0 +1,274 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkix + +import ( + "encoding/asn1" + "encoding/json" + "errors" + "strconv" + "strings" +) + +type auxAttributeTypeAndValue struct { + Type string `json:"type,omitempty"` + Value string `json:"value,omitempty"` +} + +// MarshalJSON implements the json.Marshaler interface. +func (a *AttributeTypeAndValue) MarshalJSON() ([]byte, error) { + aux := auxAttributeTypeAndValue{} + aux.Type = a.Type.String() + if s, ok := a.Value.(string); ok { + aux.Value = s + } + return json.Marshal(&aux) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (a *AttributeTypeAndValue) UnmarshalJSON(b []byte) error { + aux := auxAttributeTypeAndValue{} + if err := json.Unmarshal(b, &aux); err != nil { + return err + } + a.Type = nil + if len(aux.Type) > 0 { + parts := strings.Split(aux.Type, ".") + for _, part := range parts { + i, err := strconv.Atoi(part) + if err != nil { + return err + } + a.Type = append(a.Type, i) + } + } + a.Value = aux.Value + return nil +} + +type auxOtherName struct { + ID string `json:"id,omitempty"` + Value []byte `json:"value,omitempty"` +} + +// MarshalJSON implements the json.Marshaler interface. +func (o *OtherName) MarshalJSON() ([]byte, error) { + aux := auxOtherName{ + ID: o.TypeID.String(), + Value: o.Value.Bytes, + } + return json.Marshal(&aux) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (o *OtherName) UnmarshalJSON(b []byte) (err error) { + aux := auxOtherName{} + if err = json.Unmarshal(b, &aux); err != nil { + return + } + + // Turn dot-notation back into an OID + if len(aux.ID) == 0 { + return errors.New("empty type ID") + } + parts := strings.Split(aux.ID, ".") + o.TypeID = nil + for _, part := range parts { + i, err := strconv.Atoi(part) + if err != nil { + return err + } + o.TypeID = append(o.TypeID, i) + } + + // Build the ASN.1 value + o.Value = asn1.RawValue{ + Tag: 0, + Class: asn1.ClassContextSpecific, + IsCompound: true, + Bytes: aux.Value, + } + o.Value.FullBytes, err = asn1.Marshal(o.Value) + return +} + +type auxExtension struct { + ID string `json:"id,omitempty"` + Critical bool `json:"critical"` + Value []byte `json:"value,omitempty"` +} + +// MarshalJSON implements the json.Marshaler interface. +func (ext *Extension) MarshalJSON() ([]byte, error) { + aux := auxExtension{ + ID: ext.Id.String(), + Critical: ext.Critical, + Value: ext.Value, + } + return json.Marshal(&aux) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (ext *Extension) UnmarshalJSON(b []byte) (err error) { + aux := auxExtension{} + if err = json.Unmarshal(b, &aux); err != nil { + return + } + + parts := strings.Split(aux.ID, ".") + for _, part := range parts { + i, err := strconv.Atoi(part) + if err != nil { + return err + } + ext.Id = append(ext.Id, i) + } + ext.Critical = aux.Critical + ext.Value = aux.Value + return +} + +type auxName struct { + CommonName []string `json:"common_name,omitempty"` + SerialNumber []string `json:"serial_number,omitempty"` + Country []string `json:"country,omitempty"` + Locality []string `json:"locality,omitempty"` + Province []string `json:"province,omitempty"` + StreetAddress []string `json:"street_address,omitempty"` + Organization []string `json:"organization,omitempty"` + OrganizationalUnit []string `json:"organizational_unit,omitempty"` + PostalCode []string `json:"postal_code,omitempty"` + DomainComponent []string `json:"domain_component,omitempty"` + EmailAddress []string `json:"email_address,omitempty"` + GivenName []string `json:"given_name,omitempty"` + Surname []string `json:"surname,omitempty"` + // EV + JurisdictionCountry []string `json:"jurisdiction_country,omitempty"` + JurisdictionLocality []string `json:"jurisdiction_locality,omitempty"` + JurisdictionProvince []string `json:"jurisdiction_province,omitempty"` + + UnknownAttributes []AttributeTypeAndValue `json:"-"` +} + +// MarshalJSON implements the json.Marshaler interface. +func (n *Name) MarshalJSON() ([]byte, error) { + aux := auxName{} + attrs := n.ToRDNSequence() + for _, attrSet := range attrs { + for _, a := range attrSet { + s, ok := a.Value.(string) + if !ok { + continue + } + if a.Type.Equal(oidCommonName) { + aux.CommonName = append(aux.CommonName, s) + } else if a.Type.Equal(oidSurname) { + aux.Surname = append(aux.Surname, s) + } else if a.Type.Equal(oidSerialNumber) { + aux.SerialNumber = append(aux.SerialNumber, s) + } else if a.Type.Equal(oidCountry) { + aux.Country = append(aux.Country, s) + } else if a.Type.Equal(oidLocality) { + aux.Locality = append(aux.Locality, s) + } else if a.Type.Equal(oidProvince) { + aux.Province = append(aux.Province, s) + } else if a.Type.Equal(oidStreetAddress) { + aux.StreetAddress = append(aux.StreetAddress, s) + } else if a.Type.Equal(oidOrganization) { + aux.Organization = append(aux.Organization, s) + } else if a.Type.Equal(oidGivenName) { + aux.GivenName = append(aux.GivenName, s) + } else if a.Type.Equal(oidOrganizationalUnit) { + aux.OrganizationalUnit = append(aux.OrganizationalUnit, s) + } else if a.Type.Equal(oidPostalCode) { + aux.PostalCode = append(aux.PostalCode, s) + } else if a.Type.Equal(oidDomainComponent) { + aux.DomainComponent = append(aux.DomainComponent, s) + } else if a.Type.Equal(oidDNEmailAddress) { + aux.EmailAddress = append(aux.EmailAddress, s) + // EV + } else if a.Type.Equal(oidJurisdictionCountry) { + aux.JurisdictionCountry = append(aux.JurisdictionCountry, s) + } else if a.Type.Equal(oidJurisdictionLocality) { + aux.JurisdictionLocality = append(aux.JurisdictionLocality, s) + } else if a.Type.Equal(oidJurisdictionProvince) { + aux.JurisdictionProvince = append(aux.JurisdictionProvince, s) + } else { + aux.UnknownAttributes = append(aux.UnknownAttributes, a) + } + } + } + return json.Marshal(&aux) +} + +func appendATV(names []AttributeTypeAndValue, fieldVals []string, asn1Id asn1.ObjectIdentifier) []AttributeTypeAndValue { + if len(fieldVals) == 0 { + return names + } + + for _, val := range fieldVals { + names = append(names, AttributeTypeAndValue{Type: asn1Id, Value: val}) + } + + return names +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (n *Name) UnmarshalJSON(b []byte) error { + aux := auxName{} + if err := json.Unmarshal(b, &aux); err != nil { + return err + } + + // Populate Names as []AttributeTypeAndValue + n.Names = appendATV(n.Names, aux.Country, oidCountry) + n.Names = appendATV(n.Names, aux.Organization, oidOrganization) + n.Names = appendATV(n.Names, aux.OrganizationalUnit, oidOrganizationalUnit) + n.Names = appendATV(n.Names, aux.Locality, oidLocality) + n.Names = appendATV(n.Names, aux.Province, oidProvince) + n.Names = appendATV(n.Names, aux.StreetAddress, oidStreetAddress) + n.Names = appendATV(n.Names, aux.PostalCode, oidPostalCode) + n.Names = appendATV(n.Names, aux.DomainComponent, oidDomainComponent) + n.Names = appendATV(n.Names, aux.EmailAddress, oidDNEmailAddress) + // EV + n.Names = appendATV(n.Names, aux.JurisdictionCountry, oidJurisdictionCountry) + n.Names = appendATV(n.Names, aux.JurisdictionLocality, oidJurisdictionLocality) + n.Names = appendATV(n.Names, aux.JurisdictionProvince, oidJurisdictionProvince) + + n.Names = appendATV(n.Names, aux.CommonName, oidCommonName) + n.Names = appendATV(n.Names, aux.SerialNumber, oidSerialNumber) + + // Populate specific fields as []string + n.Country = aux.Country + n.Organization = aux.Organization + n.OrganizationalUnit = aux.OrganizationalUnit + n.Locality = aux.Locality + n.Province = aux.Province + n.StreetAddress = aux.StreetAddress + n.PostalCode = aux.PostalCode + n.DomainComponent = aux.DomainComponent + // EV + n.JurisdictionCountry = aux.JurisdictionCountry + n.JurisdictionLocality = aux.JurisdictionLocality + n.JurisdictionProvince = aux.JurisdictionProvince + + // CommonName and SerialNumber are not arrays. + if len(aux.CommonName) > 0 { + n.CommonName = aux.CommonName[0] + } + if len(aux.SerialNumber) > 0 { + n.SerialNumber = aux.SerialNumber[0] + } + + // Add "extra" commonNames and serialNumbers to ExtraNames. + if len(aux.CommonName) > 1 { + n.ExtraNames = appendATV(n.ExtraNames, aux.CommonName[1:], oidCommonName) + } + if len(aux.SerialNumber) > 1 { + n.ExtraNames = appendATV(n.ExtraNames, aux.SerialNumber[1:], oidSerialNumber) + } + + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/pkix/oid.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/pkix/oid.go new file mode 100644 index 00000000..314ab7b5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/pkix/oid.go @@ -0,0 +1,74 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkix + +import ( + "encoding/asn1" + "encoding/json" + "fmt" + "strconv" + "strings" +) + +// AuxOID behaves similar to asn1.ObjectIdentifier, except encodes to JSON as a +// string in dot notation. It is a type synonym for []int, and can be converted +// to an asn1.ObjectIdentifier by going through []int and back. +type AuxOID []int + +// AsSlice returns a slice over the inner-representation +func (aux *AuxOID) AsSlice() []int { + return *aux +} + +// CopyAsSlice returns a copy of the inter-representation as a slice +func (aux *AuxOID) CopyAsSlice() []int { + out := make([]int, len(*aux)) + copy(out, *aux) + return out +} + +// Equal tests (deep) equality of two AuxOIDs +func (aux *AuxOID) Equal(other *AuxOID) bool { + var a []int = *aux + var b []int = *other + if len(a) != len(b) { + return false + } + for idx := range a { + if a[idx] != b[idx] { + return false + } + } + return true +} + +// MarshalJSON implements the json.Marshaler interface +func (aux *AuxOID) MarshalJSON() ([]byte, error) { + var oid asn1.ObjectIdentifier + oid = []int(*aux) + return json.Marshal(oid.String()) +} + +// UnmarshalJSON implements the json.Unmarshaler interface +func (aux *AuxOID) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + parts := strings.Split(s, ".") + if len(parts) < 1 { + return fmt.Errorf("Invalid OID string %s", s) + } + slice := make([]int, len(parts)) + for idx := range parts { + n, err := strconv.Atoi(parts[idx]) + if err != nil || n < 0 { + return fmt.Errorf("Invalid OID integer %s", parts[idx]) + } + slice[idx] = n + } + *aux = slice + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/pkix/oid_names.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/pkix/oid_names.go new file mode 100644 index 00000000..c29aedba --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/pkix/oid_names.go @@ -0,0 +1,1013 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkix + +// OIDName stores the short and long version of the name of an IANA-assigned OID +type OIDName struct { + ShortName string `json:"short_name"` + LongName string `json:"long_name"` +} + +var oidDotNotationToNames map[string]OIDName + +func init() { + oidDotNotationToNames = make(map[string]OIDName, 1024) + + oidDotNotationToNames["0.0"] = OIDName{ShortName: "UNDEF", LongName: "undefined"} + oidDotNotationToNames["1.2.840.113549"] = OIDName{ShortName: "rsadsi", LongName: "RSA Data Security"} + oidDotNotationToNames["1.2.840.113549.1"] = OIDName{ShortName: "pkcs", LongName: "RSA Data Security"} + oidDotNotationToNames["1.2.840.113549.2.2"] = OIDName{ShortName: "MD2", LongName: "md2"} + oidDotNotationToNames["1.2.840.113549.2.5"] = OIDName{ShortName: "MD5", LongName: "md5"} + oidDotNotationToNames["1.2.840.113549.3.4"] = OIDName{ShortName: "RC4", LongName: "rc4"} + oidDotNotationToNames["1.2.840.113549.1.1.1"] = OIDName{ShortName: "rsaEncryption", LongName: "rsaEncryption"} + oidDotNotationToNames["1.2.840.113549.1.1.2"] = OIDName{ShortName: "RSA-MD2", LongName: "md2WithRSAEncryption"} + oidDotNotationToNames["1.2.840.113549.1.1.4"] = OIDName{ShortName: "RSA-MD5", LongName: "md5WithRSAEncryption"} + oidDotNotationToNames["1.2.840.113549.1.5.1"] = OIDName{ShortName: "PBE-MD2-DES", LongName: "pbeWithMD2AndDES-CBC"} + oidDotNotationToNames["1.2.840.113549.1.5.3"] = OIDName{ShortName: "PBE-MD5-DES", LongName: "pbeWithMD5AndDES-CBC"} + oidDotNotationToNames["2.5"] = OIDName{ShortName: "X500", LongName: "directory services (X.500)"} + oidDotNotationToNames["2.5.4"] = OIDName{ShortName: "X509", LongName: "X509"} + oidDotNotationToNames["2.5.4.3"] = OIDName{ShortName: "CN", LongName: "commonName"} + oidDotNotationToNames["2.5.4.6"] = OIDName{ShortName: "C", LongName: "countryName"} + oidDotNotationToNames["2.5.4.7"] = OIDName{ShortName: "L", LongName: "localityName"} + oidDotNotationToNames["2.5.4.8"] = OIDName{ShortName: "ST", LongName: "stateOrProvinceName"} + oidDotNotationToNames["2.5.4.10"] = OIDName{ShortName: "O", LongName: "organizationName"} + oidDotNotationToNames["2.5.4.11"] = OIDName{ShortName: "OU", LongName: "organizationalUnitName"} + oidDotNotationToNames["2.5.8.1.1"] = OIDName{ShortName: "RSA", LongName: "rsa"} + oidDotNotationToNames["1.2.840.113549.1.7"] = OIDName{ShortName: "pkcs7", LongName: "pkcs7"} + oidDotNotationToNames["1.2.840.113549.1.7.1"] = OIDName{ShortName: "pkcs7-data", LongName: "pkcs7-data"} + oidDotNotationToNames["1.2.840.113549.1.7.2"] = OIDName{ShortName: "pkcs7-signedData", LongName: "pkcs7-signedData"} + oidDotNotationToNames["1.2.840.113549.1.7.3"] = OIDName{ShortName: "pkcs7-envelopedData", LongName: "pkcs7-envelopedData"} + oidDotNotationToNames["1.2.840.113549.1.7.4"] = OIDName{ShortName: "pkcs7-signedAndEnvelopedData", LongName: "pkcs7-signedAndEnvelopedData"} + oidDotNotationToNames["1.2.840.113549.1.7.5"] = OIDName{ShortName: "pkcs7-digestData", LongName: "pkcs7-digestData"} + oidDotNotationToNames["1.2.840.113549.1.7.6"] = OIDName{ShortName: "pkcs7-encryptedData", LongName: "pkcs7-encryptedData"} + oidDotNotationToNames["1.2.840.113549.1.3"] = OIDName{ShortName: "pkcs3", LongName: "pkcs3"} + oidDotNotationToNames["1.2.840.113549.1.3.1"] = OIDName{ShortName: "dhKeyAgreement", LongName: "dhKeyAgreement"} + oidDotNotationToNames["1.3.14.3.2.6"] = OIDName{ShortName: "DES-ECB", LongName: "des-ecb"} + oidDotNotationToNames["1.3.14.3.2.9"] = OIDName{ShortName: "DES-CFB", LongName: "des-cfb"} + oidDotNotationToNames["1.3.14.3.2.7"] = OIDName{ShortName: "DES-CBC", LongName: "des-cbc"} + oidDotNotationToNames["1.3.14.3.2.17"] = OIDName{ShortName: "DES-EDE", LongName: "des-ede"} + oidDotNotationToNames["1.3.6.1.4.1.188.7.1.1.2"] = OIDName{ShortName: "IDEA-CBC", LongName: "idea-cbc"} + oidDotNotationToNames["1.2.840.113549.3.2"] = OIDName{ShortName: "RC2-CBC", LongName: "rc2-cbc"} + oidDotNotationToNames["1.3.14.3.2.18"] = OIDName{ShortName: "SHA", LongName: "sha"} + oidDotNotationToNames["1.3.14.3.2.15"] = OIDName{ShortName: "RSA-SHA", LongName: "shaWithRSAEncryption"} + oidDotNotationToNames["1.2.840.113549.3.7"] = OIDName{ShortName: "DES-EDE3-CBC", LongName: "des-ede3-cbc"} + oidDotNotationToNames["1.3.14.3.2.8"] = OIDName{ShortName: "DES-OFB", LongName: "des-ofb"} + oidDotNotationToNames["1.2.840.113549.1.9"] = OIDName{ShortName: "pkcs9", LongName: "pkcs9"} + oidDotNotationToNames["1.2.840.113549.1.9.1"] = OIDName{ShortName: "emailAddress", LongName: "emailAddress"} + oidDotNotationToNames["1.2.840.113549.1.9.2"] = OIDName{ShortName: "unstructuredName", LongName: "unstructuredName"} + oidDotNotationToNames["1.2.840.113549.1.9.3"] = OIDName{ShortName: "contentType", LongName: "contentType"} + oidDotNotationToNames["1.2.840.113549.1.9.4"] = OIDName{ShortName: "messageDigest", LongName: "messageDigest"} + oidDotNotationToNames["1.2.840.113549.1.9.5"] = OIDName{ShortName: "signingTime", LongName: "signingTime"} + oidDotNotationToNames["1.2.840.113549.1.9.6"] = OIDName{ShortName: "countersignature", LongName: "countersignature"} + oidDotNotationToNames["1.2.840.113549.1.9.7"] = OIDName{ShortName: "challengePassword", LongName: "challengePassword"} + oidDotNotationToNames["1.2.840.113549.1.9.8"] = OIDName{ShortName: "unstructuredAddress", LongName: "unstructuredAddress"} + oidDotNotationToNames["1.2.840.113549.1.9.9"] = OIDName{ShortName: "extendedCertificateAttributes", LongName: "extendedCertificateAttributes"} + oidDotNotationToNames["2.16.840.1.113730"] = OIDName{ShortName: "Netscape", LongName: "Netscape Communications Corp."} + oidDotNotationToNames["2.16.840.1.113730.1"] = OIDName{ShortName: "nsCertExt", LongName: "Netscape Certificate Extension"} + oidDotNotationToNames["2.16.840.1.113730.2"] = OIDName{ShortName: "nsDataType", LongName: "Netscape Data Type"} + oidDotNotationToNames["1.3.14.3.2.26"] = OIDName{ShortName: "SHA1", LongName: "sha1"} + oidDotNotationToNames["1.2.840.113549.1.1.5"] = OIDName{ShortName: "RSA-SHA1", LongName: "sha1WithRSAEncryption"} + oidDotNotationToNames["1.3.14.3.2.13"] = OIDName{ShortName: "DSA-SHA", LongName: "dsaWithSHA"} + oidDotNotationToNames["1.3.14.3.2.12"] = OIDName{ShortName: "DSA-old", LongName: "dsaEncryption-old"} + oidDotNotationToNames["1.2.840.113549.1.5.11"] = OIDName{ShortName: "PBE-SHA1-RC2-64", LongName: "pbeWithSHA1AndRC2-CBC"} + oidDotNotationToNames["1.2.840.113549.1.5.12"] = OIDName{ShortName: "PBKDF2", LongName: "PBKDF2"} + oidDotNotationToNames["1.3.14.3.2.27"] = OIDName{ShortName: "DSA-SHA1-old", LongName: "dsaWithSHA1-old"} + oidDotNotationToNames["2.16.840.1.113730.1.1"] = OIDName{ShortName: "nsCertType", LongName: "Netscape Cert Type"} + oidDotNotationToNames["2.16.840.1.113730.1.2"] = OIDName{ShortName: "nsBaseUrl", LongName: "Netscape Base Url"} + oidDotNotationToNames["2.16.840.1.113730.1.3"] = OIDName{ShortName: "nsRevocationUrl", LongName: "Netscape Revocation Url"} + oidDotNotationToNames["2.16.840.1.113730.1.4"] = OIDName{ShortName: "nsCaRevocationUrl", LongName: "Netscape CA Revocation Url"} + oidDotNotationToNames["2.16.840.1.113730.1.7"] = OIDName{ShortName: "nsRenewalUrl", LongName: "Netscape Renewal Url"} + oidDotNotationToNames["2.16.840.1.113730.1.8"] = OIDName{ShortName: "nsCaPolicyUrl", LongName: "Netscape CA Policy Url"} + oidDotNotationToNames["2.16.840.1.113730.1.12"] = OIDName{ShortName: "nsSslServerName", LongName: "Netscape SSL Server Name"} + oidDotNotationToNames["2.16.840.1.113730.1.13"] = OIDName{ShortName: "nsComment", LongName: "Netscape Comment"} + oidDotNotationToNames["2.16.840.1.113730.2.5"] = OIDName{ShortName: "nsCertSequence", LongName: "Netscape Certificate Sequence"} + oidDotNotationToNames["2.5.29"] = OIDName{ShortName: "id-ce", LongName: "id-ce"} + oidDotNotationToNames["2.5.29.14"] = OIDName{ShortName: "subjectKeyIdentifier", LongName: "X509v3 Subject Key Identifier"} + oidDotNotationToNames["2.5.29.15"] = OIDName{ShortName: "keyUsage", LongName: "X509v3 Key Usage"} + oidDotNotationToNames["2.5.29.16"] = OIDName{ShortName: "privateKeyUsagePeriod", LongName: "X509v3 Private Key Usage Period"} + oidDotNotationToNames["2.5.29.17"] = OIDName{ShortName: "subjectAltName", LongName: "X509v3 Subject Alternative Name"} + oidDotNotationToNames["2.5.29.18"] = OIDName{ShortName: "issuerAltName", LongName: "X509v3 Issuer Alternative Name"} + oidDotNotationToNames["2.5.29.19"] = OIDName{ShortName: "basicConstraints", LongName: "X509v3 Basic Constraints"} + oidDotNotationToNames["2.5.29.20"] = OIDName{ShortName: "crlNumber", LongName: "X509v3 CRL Number"} + oidDotNotationToNames["2.5.29.32"] = OIDName{ShortName: "certificatePolicies", LongName: "X509v3 Certificate Policies"} + oidDotNotationToNames["2.5.29.35"] = OIDName{ShortName: "authorityKeyIdentifier", LongName: "X509v3 Authority Key Identifier"} + oidDotNotationToNames["1.3.6.1.4.1.3029.1.2"] = OIDName{ShortName: "BF-CBC", LongName: "bf-cbc"} + oidDotNotationToNames["2.5.8.3.101"] = OIDName{ShortName: "MDC2", LongName: "mdc2"} + oidDotNotationToNames["2.5.8.3.100"] = OIDName{ShortName: "RSA-MDC2", LongName: "mdc2WithRSA"} + oidDotNotationToNames["2.5.4.42"] = OIDName{ShortName: "GN", LongName: "givenName"} + oidDotNotationToNames["2.5.4.4"] = OIDName{ShortName: "SN", LongName: "surname"} + oidDotNotationToNames["2.5.4.43"] = OIDName{ShortName: "initials", LongName: "initials"} + oidDotNotationToNames["2.5.29.31"] = OIDName{ShortName: "crlDistributionPoints", LongName: "X509v3 CRL Distribution Points"} + oidDotNotationToNames["1.3.14.3.2.3"] = OIDName{ShortName: "RSA-NP-MD5", LongName: "md5WithRSA"} + oidDotNotationToNames["2.5.4.5"] = OIDName{ShortName: "serialNumber", LongName: "serialNumber"} + oidDotNotationToNames["2.5.4.12"] = OIDName{ShortName: "title", LongName: "title"} + oidDotNotationToNames["2.5.4.13"] = OIDName{ShortName: "description", LongName: "description"} + oidDotNotationToNames["1.2.840.113533.7.66.10"] = OIDName{ShortName: "CAST5-CBC", LongName: "cast5-cbc"} + oidDotNotationToNames["1.2.840.113533.7.66.12"] = OIDName{ShortName: "pbeWithMD5AndCast5CBC", LongName: "pbeWithMD5AndCast5CBC"} + oidDotNotationToNames["1.2.840.10040.4.3"] = OIDName{ShortName: "DSA-SHA1", LongName: "dsaWithSHA1"} + oidDotNotationToNames["1.3.14.3.2.29"] = OIDName{ShortName: "RSA-SHA1-2", LongName: "sha1WithRSA"} + oidDotNotationToNames["1.2.840.10040.4.1"] = OIDName{ShortName: "DSA", LongName: "dsaEncryption"} + oidDotNotationToNames["1.3.36.3.2.1"] = OIDName{ShortName: "RIPEMD160", LongName: "ripemd160"} + oidDotNotationToNames["1.3.36.3.3.1.2"] = OIDName{ShortName: "RSA-RIPEMD160", LongName: "ripemd160WithRSA"} + oidDotNotationToNames["1.2.840.113549.3.8"] = OIDName{ShortName: "RC5-CBC", LongName: "rc5-cbc"} + oidDotNotationToNames["1.1.1.1.666.1"] = OIDName{ShortName: "RLE", LongName: "run length compression"} + oidDotNotationToNames["1.2.840.113549.1.9.16.3.8"] = OIDName{ShortName: "ZLIB", LongName: "zlib compression"} + oidDotNotationToNames["2.5.29.37"] = OIDName{ShortName: "extendedKeyUsage", LongName: "X509v3 Extended Key Usage"} + oidDotNotationToNames["1.3.6.1.5.5.7"] = OIDName{ShortName: "PKIX", LongName: "PKIX"} + oidDotNotationToNames["1.3.6.1.5.5.7.3"] = OIDName{ShortName: "id-kp", LongName: "id-kp"} + oidDotNotationToNames["1.3.6.1.5.5.7.3.1"] = OIDName{ShortName: "serverAuth", LongName: "TLS Web Server Authentication"} + oidDotNotationToNames["1.3.6.1.5.5.7.3.2"] = OIDName{ShortName: "clientAuth", LongName: "TLS Web Client Authentication"} + oidDotNotationToNames["1.3.6.1.5.5.7.3.3"] = OIDName{ShortName: "codeSigning", LongName: "Code Signing"} + oidDotNotationToNames["1.3.6.1.5.5.7.3.4"] = OIDName{ShortName: "emailProtection", LongName: "E-mail Protection"} + oidDotNotationToNames["1.3.6.1.5.5.7.3.8"] = OIDName{ShortName: "timeStamping", LongName: "Time Stamping"} + oidDotNotationToNames["1.3.6.1.4.1.311.2.1.21"] = OIDName{ShortName: "msCodeInd", LongName: "Microsoft Individual Code Signing"} + oidDotNotationToNames["1.3.6.1.4.1.311.2.1.22"] = OIDName{ShortName: "msCodeCom", LongName: "Microsoft Commercial Code Signing"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.3.1"] = OIDName{ShortName: "msCTLSign", LongName: "Microsoft Trust List Signing"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.3.3"] = OIDName{ShortName: "msSGC", LongName: "Microsoft Server Gated Crypto"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.3.4"] = OIDName{ShortName: "msEFS", LongName: "Microsoft Encrypted File System"} + oidDotNotationToNames["2.16.840.1.113730.4.1"] = OIDName{ShortName: "nsSGC", LongName: "Netscape Server Gated Crypto"} + oidDotNotationToNames["2.5.29.27"] = OIDName{ShortName: "deltaCRL", LongName: "X509v3 Delta CRL Indicator"} + oidDotNotationToNames["2.5.29.21"] = OIDName{ShortName: "CRLReason", LongName: "X509v3 CRL Reason Code"} + oidDotNotationToNames["2.5.29.24"] = OIDName{ShortName: "invalidityDate", LongName: "Invalidity Date"} + oidDotNotationToNames["1.3.101.1.4.1"] = OIDName{ShortName: "SXNetID", LongName: "Strong Extranet ID"} + oidDotNotationToNames["1.2.840.113549.1.12.1.1"] = OIDName{ShortName: "PBE-SHA1-RC4-128", LongName: "pbeWithSHA1And128BitRC4"} + oidDotNotationToNames["1.2.840.113549.1.12.1.2"] = OIDName{ShortName: "PBE-SHA1-RC4-40", LongName: "pbeWithSHA1And40BitRC4"} + oidDotNotationToNames["1.2.840.113549.1.12.1.3"] = OIDName{ShortName: "PBE-SHA1-3DES", LongName: "pbeWithSHA1And3-KeyTripleDES-CBC"} + oidDotNotationToNames["1.2.840.113549.1.12.1.4"] = OIDName{ShortName: "PBE-SHA1-2DES", LongName: "pbeWithSHA1And2-KeyTripleDES-CBC"} + oidDotNotationToNames["1.2.840.113549.1.12.1.5"] = OIDName{ShortName: "PBE-SHA1-RC2-128", LongName: "pbeWithSHA1And128BitRC2-CBC"} + oidDotNotationToNames["1.2.840.113549.1.12.1.6"] = OIDName{ShortName: "PBE-SHA1-RC2-40", LongName: "pbeWithSHA1And40BitRC2-CBC"} + oidDotNotationToNames["1.2.840.113549.1.12.10.1.1"] = OIDName{ShortName: "keyBag", LongName: "keyBag"} + oidDotNotationToNames["1.2.840.113549.1.12.10.1.2"] = OIDName{ShortName: "pkcs8ShroudedKeyBag", LongName: "pkcs8ShroudedKeyBag"} + oidDotNotationToNames["1.2.840.113549.1.12.10.1.3"] = OIDName{ShortName: "certBag", LongName: "certBag"} + oidDotNotationToNames["1.2.840.113549.1.12.10.1.4"] = OIDName{ShortName: "crlBag", LongName: "crlBag"} + oidDotNotationToNames["1.2.840.113549.1.12.10.1.5"] = OIDName{ShortName: "secretBag", LongName: "secretBag"} + oidDotNotationToNames["1.2.840.113549.1.12.10.1.6"] = OIDName{ShortName: "safeContentsBag", LongName: "safeContentsBag"} + oidDotNotationToNames["1.2.840.113549.1.9.20"] = OIDName{ShortName: "friendlyName", LongName: "friendlyName"} + oidDotNotationToNames["1.2.840.113549.1.9.21"] = OIDName{ShortName: "localKeyID", LongName: "localKeyID"} + oidDotNotationToNames["1.2.840.113549.1.9.22.1"] = OIDName{ShortName: "x509Certificate", LongName: "x509Certificate"} + oidDotNotationToNames["1.2.840.113549.1.9.22.2"] = OIDName{ShortName: "sdsiCertificate", LongName: "sdsiCertificate"} + oidDotNotationToNames["1.2.840.113549.1.9.23.1"] = OIDName{ShortName: "x509Crl", LongName: "x509Crl"} + oidDotNotationToNames["1.2.840.113549.1.5.13"] = OIDName{ShortName: "PBES2", LongName: "PBES2"} + oidDotNotationToNames["1.2.840.113549.1.5.14"] = OIDName{ShortName: "PBMAC1", LongName: "PBMAC1"} + oidDotNotationToNames["1.2.840.113549.2.7"] = OIDName{ShortName: "hmacWithSHA1", LongName: "hmacWithSHA1"} + oidDotNotationToNames["1.3.6.1.5.5.7.2.1"] = OIDName{ShortName: "id-qt-cps", LongName: "Policy Qualifier CPS"} + oidDotNotationToNames["1.3.6.1.5.5.7.2.2"] = OIDName{ShortName: "id-qt-unotice", LongName: "Policy Qualifier User Notice"} + oidDotNotationToNames["1.2.840.113549.1.9.15"] = OIDName{ShortName: "SMIME-CAPS", LongName: "S/MIME Capabilities"} + oidDotNotationToNames["1.2.840.113549.1.5.4"] = OIDName{ShortName: "PBE-MD2-RC2-64", LongName: "pbeWithMD2AndRC2-CBC"} + oidDotNotationToNames["1.2.840.113549.1.5.6"] = OIDName{ShortName: "PBE-MD5-RC2-64", LongName: "pbeWithMD5AndRC2-CBC"} + oidDotNotationToNames["1.2.840.113549.1.5.10"] = OIDName{ShortName: "PBE-SHA1-DES", LongName: "pbeWithSHA1AndDES-CBC"} + oidDotNotationToNames["1.3.6.1.4.1.311.2.1.14"] = OIDName{ShortName: "msExtReq", LongName: "Microsoft Extension Request"} + oidDotNotationToNames["1.2.840.113549.1.9.14"] = OIDName{ShortName: "extReq", LongName: "Extension Request"} + oidDotNotationToNames["2.5.4.41"] = OIDName{ShortName: "name", LongName: "name"} + oidDotNotationToNames["2.5.4.46"] = OIDName{ShortName: "dnQualifier", LongName: "dnQualifier"} + oidDotNotationToNames["1.3.6.1.5.5.7.1"] = OIDName{ShortName: "id-pe", LongName: "id-pe"} + oidDotNotationToNames["1.3.6.1.5.5.7.48"] = OIDName{ShortName: "id-ad", LongName: "id-ad"} + oidDotNotationToNames["1.3.6.1.5.5.7.1.1"] = OIDName{ShortName: "authorityInfoAccess", LongName: "Authority Information Access"} + oidDotNotationToNames["1.3.6.1.5.5.7.48.1"] = OIDName{ShortName: "OCSP", LongName: "OCSP"} + oidDotNotationToNames["1.3.6.1.5.5.7.48.2"] = OIDName{ShortName: "caIssuers", LongName: "CA Issuers"} + oidDotNotationToNames["1.3.6.1.5.5.7.3.9"] = OIDName{ShortName: "OCSPSigning", LongName: "OCSP Signing"} + oidDotNotationToNames["1.0"] = OIDName{ShortName: "ISO", LongName: "iso"} + oidDotNotationToNames["1.2"] = OIDName{ShortName: "member-body", LongName: "ISO Member Body"} + oidDotNotationToNames["1.2.840"] = OIDName{ShortName: "ISO-US", LongName: "ISO US Member Body"} + oidDotNotationToNames["1.2.840.10040"] = OIDName{ShortName: "X9-57", LongName: "X9.57"} + oidDotNotationToNames["1.2.840.10040.4"] = OIDName{ShortName: "X9cm", LongName: "X9.57 CM ?"} + oidDotNotationToNames["1.2.840.113549.1.1"] = OIDName{ShortName: "pkcs1", LongName: "pkcs1"} + oidDotNotationToNames["1.2.840.113549.1.5"] = OIDName{ShortName: "pkcs5", LongName: "pkcs5"} + oidDotNotationToNames["1.2.840.113549.1.9.16"] = OIDName{ShortName: "SMIME", LongName: "S/MIME"} + oidDotNotationToNames["1.2.840.113549.1.9.16.0"] = OIDName{ShortName: "id-smime-mod", LongName: "id-smime-mod"} + oidDotNotationToNames["1.2.840.113549.1.9.16.1"] = OIDName{ShortName: "id-smime-ct", LongName: "id-smime-ct"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2"] = OIDName{ShortName: "id-smime-aa", LongName: "id-smime-aa"} + oidDotNotationToNames["1.2.840.113549.1.9.16.3"] = OIDName{ShortName: "id-smime-alg", LongName: "id-smime-alg"} + oidDotNotationToNames["1.2.840.113549.1.9.16.4"] = OIDName{ShortName: "id-smime-cd", LongName: "id-smime-cd"} + oidDotNotationToNames["1.2.840.113549.1.9.16.5"] = OIDName{ShortName: "id-smime-spq", LongName: "id-smime-spq"} + oidDotNotationToNames["1.2.840.113549.1.9.16.6"] = OIDName{ShortName: "id-smime-cti", LongName: "id-smime-cti"} + oidDotNotationToNames["1.2.840.113549.1.9.16.0.1"] = OIDName{ShortName: "id-smime-mod-cms", LongName: "id-smime-mod-cms"} + oidDotNotationToNames["1.2.840.113549.1.9.16.0.2"] = OIDName{ShortName: "id-smime-mod-ess", LongName: "id-smime-mod-ess"} + oidDotNotationToNames["1.2.840.113549.1.9.16.0.3"] = OIDName{ShortName: "id-smime-mod-oid", LongName: "id-smime-mod-oid"} + oidDotNotationToNames["1.2.840.113549.1.9.16.0.4"] = OIDName{ShortName: "id-smime-mod-msg-v3", LongName: "id-smime-mod-msg-v3"} + oidDotNotationToNames["1.2.840.113549.1.9.16.0.5"] = OIDName{ShortName: "id-smime-mod-ets-eSignature-88", LongName: "id-smime-mod-ets-eSignature-88"} + oidDotNotationToNames["1.2.840.113549.1.9.16.0.6"] = OIDName{ShortName: "id-smime-mod-ets-eSignature-97", LongName: "id-smime-mod-ets-eSignature-97"} + oidDotNotationToNames["1.2.840.113549.1.9.16.0.7"] = OIDName{ShortName: "id-smime-mod-ets-eSigPolicy-88", LongName: "id-smime-mod-ets-eSigPolicy-88"} + oidDotNotationToNames["1.2.840.113549.1.9.16.0.8"] = OIDName{ShortName: "id-smime-mod-ets-eSigPolicy-97", LongName: "id-smime-mod-ets-eSigPolicy-97"} + oidDotNotationToNames["1.2.840.113549.1.9.16.1.1"] = OIDName{ShortName: "id-smime-ct-receipt", LongName: "id-smime-ct-receipt"} + oidDotNotationToNames["1.2.840.113549.1.9.16.1.2"] = OIDName{ShortName: "id-smime-ct-authData", LongName: "id-smime-ct-authData"} + oidDotNotationToNames["1.2.840.113549.1.9.16.1.3"] = OIDName{ShortName: "id-smime-ct-publishCert", LongName: "id-smime-ct-publishCert"} + oidDotNotationToNames["1.2.840.113549.1.9.16.1.4"] = OIDName{ShortName: "id-smime-ct-TSTInfo", LongName: "id-smime-ct-TSTInfo"} + oidDotNotationToNames["1.2.840.113549.1.9.16.1.5"] = OIDName{ShortName: "id-smime-ct-TDTInfo", LongName: "id-smime-ct-TDTInfo"} + oidDotNotationToNames["1.2.840.113549.1.9.16.1.6"] = OIDName{ShortName: "id-smime-ct-contentInfo", LongName: "id-smime-ct-contentInfo"} + oidDotNotationToNames["1.2.840.113549.1.9.16.1.7"] = OIDName{ShortName: "id-smime-ct-DVCSRequestData", LongName: "id-smime-ct-DVCSRequestData"} + oidDotNotationToNames["1.2.840.113549.1.9.16.1.8"] = OIDName{ShortName: "id-smime-ct-DVCSResponseData", LongName: "id-smime-ct-DVCSResponseData"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.1"] = OIDName{ShortName: "id-smime-aa-receiptRequest", LongName: "id-smime-aa-receiptRequest"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.2"] = OIDName{ShortName: "id-smime-aa-securityLabel", LongName: "id-smime-aa-securityLabel"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.3"] = OIDName{ShortName: "id-smime-aa-mlExpandHistory", LongName: "id-smime-aa-mlExpandHistory"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.4"] = OIDName{ShortName: "id-smime-aa-contentHint", LongName: "id-smime-aa-contentHint"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.5"] = OIDName{ShortName: "id-smime-aa-msgSigDigest", LongName: "id-smime-aa-msgSigDigest"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.6"] = OIDName{ShortName: "id-smime-aa-encapContentType", LongName: "id-smime-aa-encapContentType"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.7"] = OIDName{ShortName: "id-smime-aa-contentIdentifier", LongName: "id-smime-aa-contentIdentifier"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.8"] = OIDName{ShortName: "id-smime-aa-macValue", LongName: "id-smime-aa-macValue"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.9"] = OIDName{ShortName: "id-smime-aa-equivalentLabels", LongName: "id-smime-aa-equivalentLabels"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.10"] = OIDName{ShortName: "id-smime-aa-contentReference", LongName: "id-smime-aa-contentReference"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.11"] = OIDName{ShortName: "id-smime-aa-encrypKeyPref", LongName: "id-smime-aa-encrypKeyPref"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.12"] = OIDName{ShortName: "id-smime-aa-signingCertificate", LongName: "id-smime-aa-signingCertificate"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.13"] = OIDName{ShortName: "id-smime-aa-smimeEncryptCerts", LongName: "id-smime-aa-smimeEncryptCerts"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.14"] = OIDName{ShortName: "id-smime-aa-timeStampToken", LongName: "id-smime-aa-timeStampToken"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.15"] = OIDName{ShortName: "id-smime-aa-ets-sigPolicyId", LongName: "id-smime-aa-ets-sigPolicyId"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.16"] = OIDName{ShortName: "id-smime-aa-ets-commitmentType", LongName: "id-smime-aa-ets-commitmentType"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.17"] = OIDName{ShortName: "id-smime-aa-ets-signerLocation", LongName: "id-smime-aa-ets-signerLocation"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.18"] = OIDName{ShortName: "id-smime-aa-ets-signerAttr", LongName: "id-smime-aa-ets-signerAttr"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.19"] = OIDName{ShortName: "id-smime-aa-ets-otherSigCert", LongName: "id-smime-aa-ets-otherSigCert"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.20"] = OIDName{ShortName: "id-smime-aa-ets-contentTimestamp", LongName: "id-smime-aa-ets-contentTimestamp"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.21"] = OIDName{ShortName: "id-smime-aa-ets-CertificateRefs", LongName: "id-smime-aa-ets-CertificateRefs"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.22"] = OIDName{ShortName: "id-smime-aa-ets-RevocationRefs", LongName: "id-smime-aa-ets-RevocationRefs"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.23"] = OIDName{ShortName: "id-smime-aa-ets-certValues", LongName: "id-smime-aa-ets-certValues"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.24"] = OIDName{ShortName: "id-smime-aa-ets-revocationValues", LongName: "id-smime-aa-ets-revocationValues"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.25"] = OIDName{ShortName: "id-smime-aa-ets-escTimeStamp", LongName: "id-smime-aa-ets-escTimeStamp"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.26"] = OIDName{ShortName: "id-smime-aa-ets-certCRLTimestamp", LongName: "id-smime-aa-ets-certCRLTimestamp"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.27"] = OIDName{ShortName: "id-smime-aa-ets-archiveTimeStamp", LongName: "id-smime-aa-ets-archiveTimeStamp"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.28"] = OIDName{ShortName: "id-smime-aa-signatureType", LongName: "id-smime-aa-signatureType"} + oidDotNotationToNames["1.2.840.113549.1.9.16.2.29"] = OIDName{ShortName: "id-smime-aa-dvcs-dvc", LongName: "id-smime-aa-dvcs-dvc"} + oidDotNotationToNames["1.2.840.113549.1.9.16.3.1"] = OIDName{ShortName: "id-smime-alg-ESDHwith3DES", LongName: "id-smime-alg-ESDHwith3DES"} + oidDotNotationToNames["1.2.840.113549.1.9.16.3.2"] = OIDName{ShortName: "id-smime-alg-ESDHwithRC2", LongName: "id-smime-alg-ESDHwithRC2"} + oidDotNotationToNames["1.2.840.113549.1.9.16.3.3"] = OIDName{ShortName: "id-smime-alg-3DESwrap", LongName: "id-smime-alg-3DESwrap"} + oidDotNotationToNames["1.2.840.113549.1.9.16.3.4"] = OIDName{ShortName: "id-smime-alg-RC2wrap", LongName: "id-smime-alg-RC2wrap"} + oidDotNotationToNames["1.2.840.113549.1.9.16.3.5"] = OIDName{ShortName: "id-smime-alg-ESDH", LongName: "id-smime-alg-ESDH"} + oidDotNotationToNames["1.2.840.113549.1.9.16.3.6"] = OIDName{ShortName: "id-smime-alg-CMS3DESwrap", LongName: "id-smime-alg-CMS3DESwrap"} + oidDotNotationToNames["1.2.840.113549.1.9.16.3.7"] = OIDName{ShortName: "id-smime-alg-CMSRC2wrap", LongName: "id-smime-alg-CMSRC2wrap"} + oidDotNotationToNames["1.2.840.113549.1.9.16.4.1"] = OIDName{ShortName: "id-smime-cd-ldap", LongName: "id-smime-cd-ldap"} + oidDotNotationToNames["1.2.840.113549.1.9.16.5.1"] = OIDName{ShortName: "id-smime-spq-ets-sqt-uri", LongName: "id-smime-spq-ets-sqt-uri"} + oidDotNotationToNames["1.2.840.113549.1.9.16.5.2"] = OIDName{ShortName: "id-smime-spq-ets-sqt-unotice", LongName: "id-smime-spq-ets-sqt-unotice"} + oidDotNotationToNames["1.2.840.113549.1.9.16.6.1"] = OIDName{ShortName: "id-smime-cti-ets-proofOfOrigin", LongName: "id-smime-cti-ets-proofOfOrigin"} + oidDotNotationToNames["1.2.840.113549.1.9.16.6.2"] = OIDName{ShortName: "id-smime-cti-ets-proofOfReceipt", LongName: "id-smime-cti-ets-proofOfReceipt"} + oidDotNotationToNames["1.2.840.113549.1.9.16.6.3"] = OIDName{ShortName: "id-smime-cti-ets-proofOfDelivery", LongName: "id-smime-cti-ets-proofOfDelivery"} + oidDotNotationToNames["1.2.840.113549.1.9.16.6.4"] = OIDName{ShortName: "id-smime-cti-ets-proofOfSender", LongName: "id-smime-cti-ets-proofOfSender"} + oidDotNotationToNames["1.2.840.113549.1.9.16.6.5"] = OIDName{ShortName: "id-smime-cti-ets-proofOfApproval", LongName: "id-smime-cti-ets-proofOfApproval"} + oidDotNotationToNames["1.2.840.113549.1.9.16.6.6"] = OIDName{ShortName: "id-smime-cti-ets-proofOfCreation", LongName: "id-smime-cti-ets-proofOfCreation"} + oidDotNotationToNames["1.2.840.113549.2.4"] = OIDName{ShortName: "MD4", LongName: "md4"} + oidDotNotationToNames["1.3.6.1.5.5.7.0"] = OIDName{ShortName: "id-pkix-mod", LongName: "id-pkix-mod"} + oidDotNotationToNames["1.3.6.1.5.5.7.2"] = OIDName{ShortName: "id-qt", LongName: "id-qt"} + oidDotNotationToNames["1.3.6.1.5.5.7.4"] = OIDName{ShortName: "id-it", LongName: "id-it"} + oidDotNotationToNames["1.3.6.1.5.5.7.5"] = OIDName{ShortName: "id-pkip", LongName: "id-pkip"} + oidDotNotationToNames["1.3.6.1.5.5.7.6"] = OIDName{ShortName: "id-alg", LongName: "id-alg"} + oidDotNotationToNames["1.3.6.1.5.5.7.7"] = OIDName{ShortName: "id-cmc", LongName: "id-cmc"} + oidDotNotationToNames["1.3.6.1.5.5.7.8"] = OIDName{ShortName: "id-on", LongName: "id-on"} + oidDotNotationToNames["1.3.6.1.5.5.7.9"] = OIDName{ShortName: "id-pda", LongName: "id-pda"} + oidDotNotationToNames["1.3.6.1.5.5.7.10"] = OIDName{ShortName: "id-aca", LongName: "id-aca"} + oidDotNotationToNames["1.3.6.1.5.5.7.11"] = OIDName{ShortName: "id-qcs", LongName: "id-qcs"} + oidDotNotationToNames["1.3.6.1.5.5.7.12"] = OIDName{ShortName: "id-cct", LongName: "id-cct"} + oidDotNotationToNames["1.3.6.1.5.5.7.0.1"] = OIDName{ShortName: "id-pkix1-explicit-88", LongName: "id-pkix1-explicit-88"} + oidDotNotationToNames["1.3.6.1.5.5.7.0.2"] = OIDName{ShortName: "id-pkix1-implicit-88", LongName: "id-pkix1-implicit-88"} + oidDotNotationToNames["1.3.6.1.5.5.7.0.3"] = OIDName{ShortName: "id-pkix1-explicit-93", LongName: "id-pkix1-explicit-93"} + oidDotNotationToNames["1.3.6.1.5.5.7.0.4"] = OIDName{ShortName: "id-pkix1-implicit-93", LongName: "id-pkix1-implicit-93"} + oidDotNotationToNames["1.3.6.1.5.5.7.0.5"] = OIDName{ShortName: "id-mod-crmf", LongName: "id-mod-crmf"} + oidDotNotationToNames["1.3.6.1.5.5.7.0.6"] = OIDName{ShortName: "id-mod-cmc", LongName: "id-mod-cmc"} + oidDotNotationToNames["1.3.6.1.5.5.7.0.7"] = OIDName{ShortName: "id-mod-kea-profile-88", LongName: "id-mod-kea-profile-88"} + oidDotNotationToNames["1.3.6.1.5.5.7.0.8"] = OIDName{ShortName: "id-mod-kea-profile-93", LongName: "id-mod-kea-profile-93"} + oidDotNotationToNames["1.3.6.1.5.5.7.0.9"] = OIDName{ShortName: "id-mod-cmp", LongName: "id-mod-cmp"} + oidDotNotationToNames["1.3.6.1.5.5.7.0.10"] = OIDName{ShortName: "id-mod-qualified-cert-88", LongName: "id-mod-qualified-cert-88"} + oidDotNotationToNames["1.3.6.1.5.5.7.0.11"] = OIDName{ShortName: "id-mod-qualified-cert-93", LongName: "id-mod-qualified-cert-93"} + oidDotNotationToNames["1.3.6.1.5.5.7.0.12"] = OIDName{ShortName: "id-mod-attribute-cert", LongName: "id-mod-attribute-cert"} + oidDotNotationToNames["1.3.6.1.5.5.7.0.13"] = OIDName{ShortName: "id-mod-timestamp-protocol", LongName: "id-mod-timestamp-protocol"} + oidDotNotationToNames["1.3.6.1.5.5.7.0.14"] = OIDName{ShortName: "id-mod-ocsp", LongName: "id-mod-ocsp"} + oidDotNotationToNames["1.3.6.1.5.5.7.0.15"] = OIDName{ShortName: "id-mod-dvcs", LongName: "id-mod-dvcs"} + oidDotNotationToNames["1.3.6.1.5.5.7.0.16"] = OIDName{ShortName: "id-mod-cmp2000", LongName: "id-mod-cmp2000"} + oidDotNotationToNames["1.3.6.1.5.5.7.1.2"] = OIDName{ShortName: "biometricInfo", LongName: "Biometric Info"} + oidDotNotationToNames["1.3.6.1.5.5.7.1.3"] = OIDName{ShortName: "qcStatements", LongName: "qcStatements"} + oidDotNotationToNames["1.3.6.1.5.5.7.1.4"] = OIDName{ShortName: "ac-auditEntity", LongName: "ac-auditEntity"} + oidDotNotationToNames["1.3.6.1.5.5.7.1.5"] = OIDName{ShortName: "ac-targeting", LongName: "ac-targeting"} + oidDotNotationToNames["1.3.6.1.5.5.7.1.6"] = OIDName{ShortName: "aaControls", LongName: "aaControls"} + oidDotNotationToNames["1.3.6.1.5.5.7.1.7"] = OIDName{ShortName: "sbgp-ipAddrBlock", LongName: "sbgp-ipAddrBlock"} + oidDotNotationToNames["1.3.6.1.5.5.7.1.8"] = OIDName{ShortName: "sbgp-autonomousSysNum", LongName: "sbgp-autonomousSysNum"} + oidDotNotationToNames["1.3.6.1.5.5.7.1.9"] = OIDName{ShortName: "sbgp-routerIdentifier", LongName: "sbgp-routerIdentifier"} + oidDotNotationToNames["1.3.6.1.5.5.7.2.3"] = OIDName{ShortName: "textNotice", LongName: "textNotice"} + oidDotNotationToNames["1.3.6.1.5.5.7.3.5"] = OIDName{ShortName: "ipsecEndSystem", LongName: "IPSec End System"} + oidDotNotationToNames["1.3.6.1.5.5.7.3.6"] = OIDName{ShortName: "ipsecTunnel", LongName: "IPSec Tunnel"} + oidDotNotationToNames["1.3.6.1.5.5.7.3.7"] = OIDName{ShortName: "ipsecUser", LongName: "IPSec User"} + oidDotNotationToNames["1.3.6.1.5.5.7.3.10"] = OIDName{ShortName: "DVCS", LongName: "dvcs"} + oidDotNotationToNames["1.3.6.1.5.5.7.4.1"] = OIDName{ShortName: "id-it-caProtEncCert", LongName: "id-it-caProtEncCert"} + oidDotNotationToNames["1.3.6.1.5.5.7.4.2"] = OIDName{ShortName: "id-it-signKeyPairTypes", LongName: "id-it-signKeyPairTypes"} + oidDotNotationToNames["1.3.6.1.5.5.7.4.3"] = OIDName{ShortName: "id-it-encKeyPairTypes", LongName: "id-it-encKeyPairTypes"} + oidDotNotationToNames["1.3.6.1.5.5.7.4.4"] = OIDName{ShortName: "id-it-preferredSymmAlg", LongName: "id-it-preferredSymmAlg"} + oidDotNotationToNames["1.3.6.1.5.5.7.4.5"] = OIDName{ShortName: "id-it-caKeyUpdateInfo", LongName: "id-it-caKeyUpdateInfo"} + oidDotNotationToNames["1.3.6.1.5.5.7.4.6"] = OIDName{ShortName: "id-it-currentCRL", LongName: "id-it-currentCRL"} + oidDotNotationToNames["1.3.6.1.5.5.7.4.7"] = OIDName{ShortName: "id-it-unsupportedOIDs", LongName: "id-it-unsupportedOIDs"} + oidDotNotationToNames["1.3.6.1.5.5.7.4.8"] = OIDName{ShortName: "id-it-subscriptionRequest", LongName: "id-it-subscriptionRequest"} + oidDotNotationToNames["1.3.6.1.5.5.7.4.9"] = OIDName{ShortName: "id-it-subscriptionResponse", LongName: "id-it-subscriptionResponse"} + oidDotNotationToNames["1.3.6.1.5.5.7.4.10"] = OIDName{ShortName: "id-it-keyPairParamReq", LongName: "id-it-keyPairParamReq"} + oidDotNotationToNames["1.3.6.1.5.5.7.4.11"] = OIDName{ShortName: "id-it-keyPairParamRep", LongName: "id-it-keyPairParamRep"} + oidDotNotationToNames["1.3.6.1.5.5.7.4.12"] = OIDName{ShortName: "id-it-revPassphrase", LongName: "id-it-revPassphrase"} + oidDotNotationToNames["1.3.6.1.5.5.7.4.13"] = OIDName{ShortName: "id-it-implicitConfirm", LongName: "id-it-implicitConfirm"} + oidDotNotationToNames["1.3.6.1.5.5.7.4.14"] = OIDName{ShortName: "id-it-confirmWaitTime", LongName: "id-it-confirmWaitTime"} + oidDotNotationToNames["1.3.6.1.5.5.7.4.15"] = OIDName{ShortName: "id-it-origPKIMessage", LongName: "id-it-origPKIMessage"} + oidDotNotationToNames["1.3.6.1.5.5.7.5.1"] = OIDName{ShortName: "id-regCtrl", LongName: "id-regCtrl"} + oidDotNotationToNames["1.3.6.1.5.5.7.5.2"] = OIDName{ShortName: "id-regInfo", LongName: "id-regInfo"} + oidDotNotationToNames["1.3.6.1.5.5.7.5.1.1"] = OIDName{ShortName: "id-regCtrl-regToken", LongName: "id-regCtrl-regToken"} + oidDotNotationToNames["1.3.6.1.5.5.7.5.1.2"] = OIDName{ShortName: "id-regCtrl-authenticator", LongName: "id-regCtrl-authenticator"} + oidDotNotationToNames["1.3.6.1.5.5.7.5.1.3"] = OIDName{ShortName: "id-regCtrl-pkiPublicationInfo", LongName: "id-regCtrl-pkiPublicationInfo"} + oidDotNotationToNames["1.3.6.1.5.5.7.5.1.4"] = OIDName{ShortName: "id-regCtrl-pkiArchiveOptions", LongName: "id-regCtrl-pkiArchiveOptions"} + oidDotNotationToNames["1.3.6.1.5.5.7.5.1.5"] = OIDName{ShortName: "id-regCtrl-oldCertID", LongName: "id-regCtrl-oldCertID"} + oidDotNotationToNames["1.3.6.1.5.5.7.5.1.6"] = OIDName{ShortName: "id-regCtrl-protocolEncrKey", LongName: "id-regCtrl-protocolEncrKey"} + oidDotNotationToNames["1.3.6.1.5.5.7.5.2.1"] = OIDName{ShortName: "id-regInfo-utf8Pairs", LongName: "id-regInfo-utf8Pairs"} + oidDotNotationToNames["1.3.6.1.5.5.7.5.2.2"] = OIDName{ShortName: "id-regInfo-certReq", LongName: "id-regInfo-certReq"} + oidDotNotationToNames["1.3.6.1.5.5.7.6.1"] = OIDName{ShortName: "id-alg-des40", LongName: "id-alg-des40"} + oidDotNotationToNames["1.3.6.1.5.5.7.6.2"] = OIDName{ShortName: "id-alg-noSignature", LongName: "id-alg-noSignature"} + oidDotNotationToNames["1.3.6.1.5.5.7.6.3"] = OIDName{ShortName: "id-alg-dh-sig-hmac-sha1", LongName: "id-alg-dh-sig-hmac-sha1"} + oidDotNotationToNames["1.3.6.1.5.5.7.6.4"] = OIDName{ShortName: "id-alg-dh-pop", LongName: "id-alg-dh-pop"} + oidDotNotationToNames["1.3.6.1.5.5.7.7.1"] = OIDName{ShortName: "id-cmc-statusInfo", LongName: "id-cmc-statusInfo"} + oidDotNotationToNames["1.3.6.1.5.5.7.7.2"] = OIDName{ShortName: "id-cmc-identification", LongName: "id-cmc-identification"} + oidDotNotationToNames["1.3.6.1.5.5.7.7.3"] = OIDName{ShortName: "id-cmc-identityProof", LongName: "id-cmc-identityProof"} + oidDotNotationToNames["1.3.6.1.5.5.7.7.4"] = OIDName{ShortName: "id-cmc-dataReturn", LongName: "id-cmc-dataReturn"} + oidDotNotationToNames["1.3.6.1.5.5.7.7.5"] = OIDName{ShortName: "id-cmc-transactionId", LongName: "id-cmc-transactionId"} + oidDotNotationToNames["1.3.6.1.5.5.7.7.6"] = OIDName{ShortName: "id-cmc-senderNonce", LongName: "id-cmc-senderNonce"} + oidDotNotationToNames["1.3.6.1.5.5.7.7.7"] = OIDName{ShortName: "id-cmc-recipientNonce", LongName: "id-cmc-recipientNonce"} + oidDotNotationToNames["1.3.6.1.5.5.7.7.8"] = OIDName{ShortName: "id-cmc-addExtensions", LongName: "id-cmc-addExtensions"} + oidDotNotationToNames["1.3.6.1.5.5.7.7.9"] = OIDName{ShortName: "id-cmc-encryptedPOP", LongName: "id-cmc-encryptedPOP"} + oidDotNotationToNames["1.3.6.1.5.5.7.7.10"] = OIDName{ShortName: "id-cmc-decryptedPOP", LongName: "id-cmc-decryptedPOP"} + oidDotNotationToNames["1.3.6.1.5.5.7.7.11"] = OIDName{ShortName: "id-cmc-lraPOPWitness", LongName: "id-cmc-lraPOPWitness"} + oidDotNotationToNames["1.3.6.1.5.5.7.7.15"] = OIDName{ShortName: "id-cmc-getCert", LongName: "id-cmc-getCert"} + oidDotNotationToNames["1.3.6.1.5.5.7.7.16"] = OIDName{ShortName: "id-cmc-getCRL", LongName: "id-cmc-getCRL"} + oidDotNotationToNames["1.3.6.1.5.5.7.7.17"] = OIDName{ShortName: "id-cmc-revokeRequest", LongName: "id-cmc-revokeRequest"} + oidDotNotationToNames["1.3.6.1.5.5.7.7.18"] = OIDName{ShortName: "id-cmc-regInfo", LongName: "id-cmc-regInfo"} + oidDotNotationToNames["1.3.6.1.5.5.7.7.19"] = OIDName{ShortName: "id-cmc-responseInfo", LongName: "id-cmc-responseInfo"} + oidDotNotationToNames["1.3.6.1.5.5.7.7.21"] = OIDName{ShortName: "id-cmc-queryPending", LongName: "id-cmc-queryPending"} + oidDotNotationToNames["1.3.6.1.5.5.7.7.22"] = OIDName{ShortName: "id-cmc-popLinkRandom", LongName: "id-cmc-popLinkRandom"} + oidDotNotationToNames["1.3.6.1.5.5.7.7.23"] = OIDName{ShortName: "id-cmc-popLinkWitness", LongName: "id-cmc-popLinkWitness"} + oidDotNotationToNames["1.3.6.1.5.5.7.7.24"] = OIDName{ShortName: "id-cmc-confirmCertAcceptance", LongName: "id-cmc-confirmCertAcceptance"} + oidDotNotationToNames["1.3.6.1.5.5.7.8.1"] = OIDName{ShortName: "id-on-personalData", LongName: "id-on-personalData"} + oidDotNotationToNames["1.3.6.1.5.5.7.9.1"] = OIDName{ShortName: "id-pda-dateOfBirth", LongName: "id-pda-dateOfBirth"} + oidDotNotationToNames["1.3.6.1.5.5.7.9.2"] = OIDName{ShortName: "id-pda-placeOfBirth", LongName: "id-pda-placeOfBirth"} + oidDotNotationToNames["1.3.6.1.5.5.7.9.3"] = OIDName{ShortName: "id-pda-gender", LongName: "id-pda-gender"} + oidDotNotationToNames["1.3.6.1.5.5.7.9.4"] = OIDName{ShortName: "id-pda-countryOfCitizenship", LongName: "id-pda-countryOfCitizenship"} + oidDotNotationToNames["1.3.6.1.5.5.7.9.5"] = OIDName{ShortName: "id-pda-countryOfResidence", LongName: "id-pda-countryOfResidence"} + oidDotNotationToNames["1.3.6.1.5.5.7.10.1"] = OIDName{ShortName: "id-aca-authenticationInfo", LongName: "id-aca-authenticationInfo"} + oidDotNotationToNames["1.3.6.1.5.5.7.10.2"] = OIDName{ShortName: "id-aca-accessIdentity", LongName: "id-aca-accessIdentity"} + oidDotNotationToNames["1.3.6.1.5.5.7.10.3"] = OIDName{ShortName: "id-aca-chargingIdentity", LongName: "id-aca-chargingIdentity"} + oidDotNotationToNames["1.3.6.1.5.5.7.10.4"] = OIDName{ShortName: "id-aca-group", LongName: "id-aca-group"} + oidDotNotationToNames["1.3.6.1.5.5.7.10.5"] = OIDName{ShortName: "id-aca-role", LongName: "id-aca-role"} + oidDotNotationToNames["1.3.6.1.5.5.7.11.1"] = OIDName{ShortName: "id-qcs-pkixQCSyntax-v1", LongName: "id-qcs-pkixQCSyntax-v1"} + oidDotNotationToNames["1.3.6.1.5.5.7.12.1"] = OIDName{ShortName: "id-cct-crs", LongName: "id-cct-crs"} + oidDotNotationToNames["1.3.6.1.5.5.7.12.2"] = OIDName{ShortName: "id-cct-PKIData", LongName: "id-cct-PKIData"} + oidDotNotationToNames["1.3.6.1.5.5.7.12.3"] = OIDName{ShortName: "id-cct-PKIResponse", LongName: "id-cct-PKIResponse"} + oidDotNotationToNames["1.3.6.1.5.5.7.48.3"] = OIDName{ShortName: "ad_timestamping", LongName: "AD Time Stamping"} + oidDotNotationToNames["1.3.6.1.5.5.7.48.4"] = OIDName{ShortName: "AD_DVCS", LongName: "ad dvcs"} + oidDotNotationToNames["1.3.6.1.5.5.7.48.1.1"] = OIDName{ShortName: "basicOCSPResponse", LongName: "Basic OCSP Response"} + oidDotNotationToNames["1.3.6.1.5.5.7.48.1.2"] = OIDName{ShortName: "Nonce", LongName: "OCSP Nonce"} + oidDotNotationToNames["1.3.6.1.5.5.7.48.1.3"] = OIDName{ShortName: "CrlID", LongName: "OCSP CRL ID"} + oidDotNotationToNames["1.3.6.1.5.5.7.48.1.4"] = OIDName{ShortName: "acceptableResponses", LongName: "Acceptable OCSP Responses"} + oidDotNotationToNames["1.3.6.1.5.5.7.48.1.5"] = OIDName{ShortName: "noCheck", LongName: "OCSP No Check"} + oidDotNotationToNames["1.3.6.1.5.5.7.48.1.6"] = OIDName{ShortName: "archiveCutoff", LongName: "OCSP Archive Cutoff"} + oidDotNotationToNames["1.3.6.1.5.5.7.48.1.7"] = OIDName{ShortName: "serviceLocator", LongName: "OCSP Service Locator"} + oidDotNotationToNames["1.3.6.1.5.5.7.48.1.8"] = OIDName{ShortName: "extendedStatus", LongName: "Extended OCSP Status"} + oidDotNotationToNames["1.3.6.1.5.5.7.48.1.9"] = OIDName{ShortName: "valid", LongName: "valid"} + oidDotNotationToNames["1.3.6.1.5.5.7.48.1.10"] = OIDName{ShortName: "path", LongName: "path"} + oidDotNotationToNames["1.3.6.1.5.5.7.48.1.11"] = OIDName{ShortName: "trustRoot", LongName: "Trust Root"} + oidDotNotationToNames["1.3.14.3.2"] = OIDName{ShortName: "algorithm", LongName: "algorithm"} + oidDotNotationToNames["1.3.14.3.2.11"] = OIDName{ShortName: "rsaSignature", LongName: "rsaSignature"} + oidDotNotationToNames["2.5.8"] = OIDName{ShortName: "X500algorithms", LongName: "directory services - algorithms"} + oidDotNotationToNames["1.3"] = OIDName{ShortName: "ORG", LongName: "org"} + oidDotNotationToNames["1.3.6"] = OIDName{ShortName: "DOD", LongName: "dod"} + oidDotNotationToNames["1.3.6.1"] = OIDName{ShortName: "IANA", LongName: "iana"} + oidDotNotationToNames["1.3.6.1.1"] = OIDName{ShortName: "directory", LongName: "Directory"} + oidDotNotationToNames["1.3.6.1.2"] = OIDName{ShortName: "mgmt", LongName: "Management"} + oidDotNotationToNames["1.3.6.1.3"] = OIDName{ShortName: "experimental", LongName: "Experimental"} + oidDotNotationToNames["1.3.6.1.4"] = OIDName{ShortName: "private", LongName: "Private"} + oidDotNotationToNames["1.3.6.1.5"] = OIDName{ShortName: "security", LongName: "Security"} + oidDotNotationToNames["1.3.6.1.6"] = OIDName{ShortName: "snmpv2", LongName: "SNMPv2"} + oidDotNotationToNames["1.3.6.1.7"] = OIDName{ShortName: "Mail", LongName: "Mail"} + oidDotNotationToNames["1.3.6.1.4.1"] = OIDName{ShortName: "enterprises", LongName: "Enterprises"} + oidDotNotationToNames["1.3.6.1.4.1.1466.344"] = OIDName{ShortName: "dcobject", LongName: "dcObject"} + oidDotNotationToNames["0.9.2342.19200300.100.1.25"] = OIDName{ShortName: "DC", LongName: "domainComponent"} + oidDotNotationToNames["0.9.2342.19200300.100.4.13"] = OIDName{ShortName: "domain", LongName: "Domain"} + oidDotNotationToNames["0.0"] = OIDName{ShortName: "NULL", LongName: "NULL"} + oidDotNotationToNames["2.5.1.5"] = OIDName{ShortName: "selected-attribute-types", LongName: "Selected Attribute Types"} + oidDotNotationToNames["2.5.1.5.55"] = OIDName{ShortName: "clearance", LongName: "clearance"} + oidDotNotationToNames["1.2.840.113549.1.1.3"] = OIDName{ShortName: "RSA-MD4", LongName: "md4WithRSAEncryption"} + oidDotNotationToNames["1.3.6.1.5.5.7.1.10"] = OIDName{ShortName: "ac-proxying", LongName: "ac-proxying"} + oidDotNotationToNames["1.3.6.1.5.5.7.1.11"] = OIDName{ShortName: "subjectInfoAccess", LongName: "Subject Information Access"} + oidDotNotationToNames["1.3.6.1.5.5.7.10.6"] = OIDName{ShortName: "id-aca-encAttrs", LongName: "id-aca-encAttrs"} + oidDotNotationToNames["2.5.4.72"] = OIDName{ShortName: "role", LongName: "role"} + oidDotNotationToNames["2.5.29.36"] = OIDName{ShortName: "policyConstraints", LongName: "X509v3 Policy Constraints"} + oidDotNotationToNames["2.5.29.55"] = OIDName{ShortName: "targetInformation", LongName: "X509v3 AC Targeting"} + oidDotNotationToNames["2.5.29.56"] = OIDName{ShortName: "noRevAvail", LongName: "X509v3 No Revocation Available"} + oidDotNotationToNames["0.0"] = OIDName{ShortName: "NULL", LongName: "NULL"} + oidDotNotationToNames["1.2.840.10045"] = OIDName{ShortName: "ansi-X9-62", LongName: "ANSI X9.62"} + oidDotNotationToNames["1.2.840.10045.1.1"] = OIDName{ShortName: "prime-field", LongName: "prime-field"} + oidDotNotationToNames["1.2.840.10045.1.2"] = OIDName{ShortName: "characteristic-two-field", LongName: "characteristic-two-field"} + oidDotNotationToNames["1.2.840.10045.2.1"] = OIDName{ShortName: "id-ecPublicKey", LongName: "id-ecPublicKey"} + oidDotNotationToNames["1.2.840.10045.3.1.1"] = OIDName{ShortName: "prime192v1", LongName: "prime192v1"} + oidDotNotationToNames["1.2.840.10045.3.1.2"] = OIDName{ShortName: "prime192v2", LongName: "prime192v2"} + oidDotNotationToNames["1.2.840.10045.3.1.3"] = OIDName{ShortName: "prime192v3", LongName: "prime192v3"} + oidDotNotationToNames["1.2.840.10045.3.1.4"] = OIDName{ShortName: "prime239v1", LongName: "prime239v1"} + oidDotNotationToNames["1.2.840.10045.3.1.5"] = OIDName{ShortName: "prime239v2", LongName: "prime239v2"} + oidDotNotationToNames["1.2.840.10045.3.1.6"] = OIDName{ShortName: "prime239v3", LongName: "prime239v3"} + oidDotNotationToNames["1.2.840.10045.3.1.7"] = OIDName{ShortName: "prime256v1", LongName: "prime256v1"} + oidDotNotationToNames["1.2.840.10045.4.1"] = OIDName{ShortName: "ecdsa-with-SHA1", LongName: "ecdsa-with-SHA1"} + oidDotNotationToNames["1.3.6.1.4.1.311.17.1"] = OIDName{ShortName: "CSPName", LongName: "Microsoft CSP Name"} + oidDotNotationToNames["2.16.840.1.101.3.4.1.1"] = OIDName{ShortName: "AES-128-ECB", LongName: "aes-128-ecb"} + oidDotNotationToNames["2.16.840.1.101.3.4.1.2"] = OIDName{ShortName: "AES-128-CBC", LongName: "aes-128-cbc"} + oidDotNotationToNames["2.16.840.1.101.3.4.1.3"] = OIDName{ShortName: "AES-128-OFB", LongName: "aes-128-ofb"} + oidDotNotationToNames["2.16.840.1.101.3.4.1.4"] = OIDName{ShortName: "AES-128-CFB", LongName: "aes-128-cfb"} + oidDotNotationToNames["2.16.840.1.101.3.4.1.21"] = OIDName{ShortName: "AES-192-ECB", LongName: "aes-192-ecb"} + oidDotNotationToNames["2.16.840.1.101.3.4.1.22"] = OIDName{ShortName: "AES-192-CBC", LongName: "aes-192-cbc"} + oidDotNotationToNames["2.16.840.1.101.3.4.1.23"] = OIDName{ShortName: "AES-192-OFB", LongName: "aes-192-ofb"} + oidDotNotationToNames["2.16.840.1.101.3.4.1.24"] = OIDName{ShortName: "AES-192-CFB", LongName: "aes-192-cfb"} + oidDotNotationToNames["2.16.840.1.101.3.4.1.41"] = OIDName{ShortName: "AES-256-ECB", LongName: "aes-256-ecb"} + oidDotNotationToNames["2.16.840.1.101.3.4.1.42"] = OIDName{ShortName: "AES-256-CBC", LongName: "aes-256-cbc"} + oidDotNotationToNames["2.16.840.1.101.3.4.1.43"] = OIDName{ShortName: "AES-256-OFB", LongName: "aes-256-ofb"} + oidDotNotationToNames["2.16.840.1.101.3.4.1.44"] = OIDName{ShortName: "AES-256-CFB", LongName: "aes-256-cfb"} + oidDotNotationToNames["2.5.29.23"] = OIDName{ShortName: "holdInstructionCode", LongName: "Hold Instruction Code"} + oidDotNotationToNames["1.2.840.10040.2.1"] = OIDName{ShortName: "holdInstructionNone", LongName: "Hold Instruction None"} + oidDotNotationToNames["1.2.840.10040.2.2"] = OIDName{ShortName: "holdInstructionCallIssuer", LongName: "Hold Instruction Call Issuer"} + oidDotNotationToNames["1.2.840.10040.2.3"] = OIDName{ShortName: "holdInstructionReject", LongName: "Hold Instruction Reject"} + oidDotNotationToNames["0.9"] = OIDName{ShortName: "data", LongName: "data"} + oidDotNotationToNames["0.9.2342"] = OIDName{ShortName: "pss", LongName: "pss"} + oidDotNotationToNames["0.9.2342.19200300"] = OIDName{ShortName: "ucl", LongName: "ucl"} + oidDotNotationToNames["0.9.2342.19200300.100"] = OIDName{ShortName: "pilot", LongName: "pilot"} + oidDotNotationToNames["0.9.2342.19200300.100.1"] = OIDName{ShortName: "pilotAttributeType", LongName: "pilotAttributeType"} + oidDotNotationToNames["0.9.2342.19200300.100.3"] = OIDName{ShortName: "pilotAttributeSyntax", LongName: "pilotAttributeSyntax"} + oidDotNotationToNames["0.9.2342.19200300.100.4"] = OIDName{ShortName: "pilotObjectClass", LongName: "pilotObjectClass"} + oidDotNotationToNames["0.9.2342.19200300.100.10"] = OIDName{ShortName: "pilotGroups", LongName: "pilotGroups"} + oidDotNotationToNames["0.9.2342.19200300.100.3.4"] = OIDName{ShortName: "iA5StringSyntax", LongName: "iA5StringSyntax"} + oidDotNotationToNames["0.9.2342.19200300.100.3.5"] = OIDName{ShortName: "caseIgnoreIA5StringSyntax", LongName: "caseIgnoreIA5StringSyntax"} + oidDotNotationToNames["0.9.2342.19200300.100.4.3"] = OIDName{ShortName: "pilotObject", LongName: "pilotObject"} + oidDotNotationToNames["0.9.2342.19200300.100.4.4"] = OIDName{ShortName: "pilotPerson", LongName: "pilotPerson"} + oidDotNotationToNames["0.9.2342.19200300.100.4.5"] = OIDName{ShortName: "account", LongName: "account"} + oidDotNotationToNames["0.9.2342.19200300.100.4.6"] = OIDName{ShortName: "document", LongName: "document"} + oidDotNotationToNames["0.9.2342.19200300.100.4.7"] = OIDName{ShortName: "room", LongName: "room"} + oidDotNotationToNames["0.9.2342.19200300.100.4.9"] = OIDName{ShortName: "documentSeries", LongName: "documentSeries"} + oidDotNotationToNames["0.9.2342.19200300.100.4.14"] = OIDName{ShortName: "rFC822localPart", LongName: "rFC822localPart"} + oidDotNotationToNames["0.9.2342.19200300.100.4.15"] = OIDName{ShortName: "dNSDomain", LongName: "dNSDomain"} + oidDotNotationToNames["0.9.2342.19200300.100.4.17"] = OIDName{ShortName: "domainRelatedObject", LongName: "domainRelatedObject"} + oidDotNotationToNames["0.9.2342.19200300.100.4.18"] = OIDName{ShortName: "friendlyCountry", LongName: "friendlyCountry"} + oidDotNotationToNames["0.9.2342.19200300.100.4.19"] = OIDName{ShortName: "simpleSecurityObject", LongName: "simpleSecurityObject"} + oidDotNotationToNames["0.9.2342.19200300.100.4.20"] = OIDName{ShortName: "pilotOrganization", LongName: "pilotOrganization"} + oidDotNotationToNames["0.9.2342.19200300.100.4.21"] = OIDName{ShortName: "pilotDSA", LongName: "pilotDSA"} + oidDotNotationToNames["0.9.2342.19200300.100.4.22"] = OIDName{ShortName: "qualityLabelledData", LongName: "qualityLabelledData"} + oidDotNotationToNames["0.9.2342.19200300.100.1.1"] = OIDName{ShortName: "UID", LongName: "userId"} + oidDotNotationToNames["0.9.2342.19200300.100.1.2"] = OIDName{ShortName: "textEncodedORAddress", LongName: "textEncodedORAddress"} + oidDotNotationToNames["0.9.2342.19200300.100.1.3"] = OIDName{ShortName: "mail", LongName: "rfc822Mailbox"} + oidDotNotationToNames["0.9.2342.19200300.100.1.4"] = OIDName{ShortName: "info", LongName: "info"} + oidDotNotationToNames["0.9.2342.19200300.100.1.5"] = OIDName{ShortName: "favouriteDrink", LongName: "favouriteDrink"} + oidDotNotationToNames["0.9.2342.19200300.100.1.6"] = OIDName{ShortName: "roomNumber", LongName: "roomNumber"} + oidDotNotationToNames["0.9.2342.19200300.100.1.7"] = OIDName{ShortName: "photo", LongName: "photo"} + oidDotNotationToNames["0.9.2342.19200300.100.1.8"] = OIDName{ShortName: "userClass", LongName: "userClass"} + oidDotNotationToNames["0.9.2342.19200300.100.1.9"] = OIDName{ShortName: "host", LongName: "host"} + oidDotNotationToNames["0.9.2342.19200300.100.1.10"] = OIDName{ShortName: "manager", LongName: "manager"} + oidDotNotationToNames["0.9.2342.19200300.100.1.11"] = OIDName{ShortName: "documentIdentifier", LongName: "documentIdentifier"} + oidDotNotationToNames["0.9.2342.19200300.100.1.12"] = OIDName{ShortName: "documentTitle", LongName: "documentTitle"} + oidDotNotationToNames["0.9.2342.19200300.100.1.13"] = OIDName{ShortName: "documentVersion", LongName: "documentVersion"} + oidDotNotationToNames["0.9.2342.19200300.100.1.14"] = OIDName{ShortName: "documentAuthor", LongName: "documentAuthor"} + oidDotNotationToNames["0.9.2342.19200300.100.1.15"] = OIDName{ShortName: "documentLocation", LongName: "documentLocation"} + oidDotNotationToNames["0.9.2342.19200300.100.1.20"] = OIDName{ShortName: "homeTelephoneNumber", LongName: "homeTelephoneNumber"} + oidDotNotationToNames["0.9.2342.19200300.100.1.21"] = OIDName{ShortName: "secretary", LongName: "secretary"} + oidDotNotationToNames["0.9.2342.19200300.100.1.22"] = OIDName{ShortName: "otherMailbox", LongName: "otherMailbox"} + oidDotNotationToNames["0.9.2342.19200300.100.1.23"] = OIDName{ShortName: "lastModifiedTime", LongName: "lastModifiedTime"} + oidDotNotationToNames["0.9.2342.19200300.100.1.24"] = OIDName{ShortName: "lastModifiedBy", LongName: "lastModifiedBy"} + oidDotNotationToNames["0.9.2342.19200300.100.1.26"] = OIDName{ShortName: "aRecord", LongName: "aRecord"} + oidDotNotationToNames["0.9.2342.19200300.100.1.27"] = OIDName{ShortName: "pilotAttributeType27", LongName: "pilotAttributeType27"} + oidDotNotationToNames["0.9.2342.19200300.100.1.28"] = OIDName{ShortName: "mXRecord", LongName: "mXRecord"} + oidDotNotationToNames["0.9.2342.19200300.100.1.29"] = OIDName{ShortName: "nSRecord", LongName: "nSRecord"} + oidDotNotationToNames["0.9.2342.19200300.100.1.30"] = OIDName{ShortName: "sOARecord", LongName: "sOARecord"} + oidDotNotationToNames["0.9.2342.19200300.100.1.31"] = OIDName{ShortName: "cNAMERecord", LongName: "cNAMERecord"} + oidDotNotationToNames["0.9.2342.19200300.100.1.37"] = OIDName{ShortName: "associatedDomain", LongName: "associatedDomain"} + oidDotNotationToNames["0.9.2342.19200300.100.1.38"] = OIDName{ShortName: "associatedName", LongName: "associatedName"} + oidDotNotationToNames["0.9.2342.19200300.100.1.39"] = OIDName{ShortName: "homePostalAddress", LongName: "homePostalAddress"} + oidDotNotationToNames["0.9.2342.19200300.100.1.40"] = OIDName{ShortName: "personalTitle", LongName: "personalTitle"} + oidDotNotationToNames["0.9.2342.19200300.100.1.41"] = OIDName{ShortName: "mobileTelephoneNumber", LongName: "mobileTelephoneNumber"} + oidDotNotationToNames["0.9.2342.19200300.100.1.42"] = OIDName{ShortName: "pagerTelephoneNumber", LongName: "pagerTelephoneNumber"} + oidDotNotationToNames["0.9.2342.19200300.100.1.43"] = OIDName{ShortName: "friendlyCountryName", LongName: "friendlyCountryName"} + oidDotNotationToNames["0.9.2342.19200300.100.1.45"] = OIDName{ShortName: "organizationalStatus", LongName: "organizationalStatus"} + oidDotNotationToNames["0.9.2342.19200300.100.1.46"] = OIDName{ShortName: "janetMailbox", LongName: "janetMailbox"} + oidDotNotationToNames["0.9.2342.19200300.100.1.47"] = OIDName{ShortName: "mailPreferenceOption", LongName: "mailPreferenceOption"} + oidDotNotationToNames["0.9.2342.19200300.100.1.48"] = OIDName{ShortName: "buildingName", LongName: "buildingName"} + oidDotNotationToNames["0.9.2342.19200300.100.1.49"] = OIDName{ShortName: "dSAQuality", LongName: "dSAQuality"} + oidDotNotationToNames["0.9.2342.19200300.100.1.50"] = OIDName{ShortName: "singleLevelQuality", LongName: "singleLevelQuality"} + oidDotNotationToNames["0.9.2342.19200300.100.1.51"] = OIDName{ShortName: "subtreeMinimumQuality", LongName: "subtreeMinimumQuality"} + oidDotNotationToNames["0.9.2342.19200300.100.1.52"] = OIDName{ShortName: "subtreeMaximumQuality", LongName: "subtreeMaximumQuality"} + oidDotNotationToNames["0.9.2342.19200300.100.1.53"] = OIDName{ShortName: "personalSignature", LongName: "personalSignature"} + oidDotNotationToNames["0.9.2342.19200300.100.1.54"] = OIDName{ShortName: "dITRedirect", LongName: "dITRedirect"} + oidDotNotationToNames["0.9.2342.19200300.100.1.55"] = OIDName{ShortName: "audio", LongName: "audio"} + oidDotNotationToNames["0.9.2342.19200300.100.1.56"] = OIDName{ShortName: "documentPublisher", LongName: "documentPublisher"} + oidDotNotationToNames["2.5.4.45"] = OIDName{ShortName: "x500UniqueIdentifier", LongName: "x500UniqueIdentifier"} + oidDotNotationToNames["1.3.6.1.7.1"] = OIDName{ShortName: "mime-mhs", LongName: "MIME MHS"} + oidDotNotationToNames["1.3.6.1.7.1.1"] = OIDName{ShortName: "mime-mhs-headings", LongName: "mime-mhs-headings"} + oidDotNotationToNames["1.3.6.1.7.1.2"] = OIDName{ShortName: "mime-mhs-bodies", LongName: "mime-mhs-bodies"} + oidDotNotationToNames["1.3.6.1.7.1.1.1"] = OIDName{ShortName: "id-hex-partial-message", LongName: "id-hex-partial-message"} + oidDotNotationToNames["1.3.6.1.7.1.1.2"] = OIDName{ShortName: "id-hex-multipart-message", LongName: "id-hex-multipart-message"} + oidDotNotationToNames["2.5.4.44"] = OIDName{ShortName: "generationQualifier", LongName: "generationQualifier"} + oidDotNotationToNames["2.5.4.65"] = OIDName{ShortName: "pseudonym", LongName: "pseudonym"} + oidDotNotationToNames["2.23.42"] = OIDName{ShortName: "id-set", LongName: "Secure Electronic Transactions"} + oidDotNotationToNames["2.23.42.0"] = OIDName{ShortName: "set-ctype", LongName: "content types"} + oidDotNotationToNames["2.23.42.1"] = OIDName{ShortName: "set-msgExt", LongName: "message extensions"} + oidDotNotationToNames["2.23.42.3"] = OIDName{ShortName: "set-attr", LongName: "set-attr"} + oidDotNotationToNames["2.23.42.5"] = OIDName{ShortName: "set-policy", LongName: "set-policy"} + oidDotNotationToNames["2.23.42.7"] = OIDName{ShortName: "set-certExt", LongName: "certificate extensions"} + oidDotNotationToNames["2.23.42.8"] = OIDName{ShortName: "set-brand", LongName: "set-brand"} + oidDotNotationToNames["2.23.42.0.0"] = OIDName{ShortName: "setct-PANData", LongName: "setct-PANData"} + oidDotNotationToNames["2.23.42.0.1"] = OIDName{ShortName: "setct-PANToken", LongName: "setct-PANToken"} + oidDotNotationToNames["2.23.42.0.2"] = OIDName{ShortName: "setct-PANOnly", LongName: "setct-PANOnly"} + oidDotNotationToNames["2.23.42.0.3"] = OIDName{ShortName: "setct-OIData", LongName: "setct-OIData"} + oidDotNotationToNames["2.23.42.0.4"] = OIDName{ShortName: "setct-PI", LongName: "setct-PI"} + oidDotNotationToNames["2.23.42.0.5"] = OIDName{ShortName: "setct-PIData", LongName: "setct-PIData"} + oidDotNotationToNames["2.23.42.0.6"] = OIDName{ShortName: "setct-PIDataUnsigned", LongName: "setct-PIDataUnsigned"} + oidDotNotationToNames["2.23.42.0.7"] = OIDName{ShortName: "setct-HODInput", LongName: "setct-HODInput"} + oidDotNotationToNames["2.23.42.0.8"] = OIDName{ShortName: "setct-AuthResBaggage", LongName: "setct-AuthResBaggage"} + oidDotNotationToNames["2.23.42.0.9"] = OIDName{ShortName: "setct-AuthRevReqBaggage", LongName: "setct-AuthRevReqBaggage"} + oidDotNotationToNames["2.23.42.0.10"] = OIDName{ShortName: "setct-AuthRevResBaggage", LongName: "setct-AuthRevResBaggage"} + oidDotNotationToNames["2.23.42.0.11"] = OIDName{ShortName: "setct-CapTokenSeq", LongName: "setct-CapTokenSeq"} + oidDotNotationToNames["2.23.42.0.12"] = OIDName{ShortName: "setct-PInitResData", LongName: "setct-PInitResData"} + oidDotNotationToNames["2.23.42.0.13"] = OIDName{ShortName: "setct-PI-TBS", LongName: "setct-PI-TBS"} + oidDotNotationToNames["2.23.42.0.14"] = OIDName{ShortName: "setct-PResData", LongName: "setct-PResData"} + oidDotNotationToNames["2.23.42.0.16"] = OIDName{ShortName: "setct-AuthReqTBS", LongName: "setct-AuthReqTBS"} + oidDotNotationToNames["2.23.42.0.17"] = OIDName{ShortName: "setct-AuthResTBS", LongName: "setct-AuthResTBS"} + oidDotNotationToNames["2.23.42.0.18"] = OIDName{ShortName: "setct-AuthResTBSX", LongName: "setct-AuthResTBSX"} + oidDotNotationToNames["2.23.42.0.19"] = OIDName{ShortName: "setct-AuthTokenTBS", LongName: "setct-AuthTokenTBS"} + oidDotNotationToNames["2.23.42.0.20"] = OIDName{ShortName: "setct-CapTokenData", LongName: "setct-CapTokenData"} + oidDotNotationToNames["2.23.42.0.21"] = OIDName{ShortName: "setct-CapTokenTBS", LongName: "setct-CapTokenTBS"} + oidDotNotationToNames["2.23.42.0.22"] = OIDName{ShortName: "setct-AcqCardCodeMsg", LongName: "setct-AcqCardCodeMsg"} + oidDotNotationToNames["2.23.42.0.23"] = OIDName{ShortName: "setct-AuthRevReqTBS", LongName: "setct-AuthRevReqTBS"} + oidDotNotationToNames["2.23.42.0.24"] = OIDName{ShortName: "setct-AuthRevResData", LongName: "setct-AuthRevResData"} + oidDotNotationToNames["2.23.42.0.25"] = OIDName{ShortName: "setct-AuthRevResTBS", LongName: "setct-AuthRevResTBS"} + oidDotNotationToNames["2.23.42.0.26"] = OIDName{ShortName: "setct-CapReqTBS", LongName: "setct-CapReqTBS"} + oidDotNotationToNames["2.23.42.0.27"] = OIDName{ShortName: "setct-CapReqTBSX", LongName: "setct-CapReqTBSX"} + oidDotNotationToNames["2.23.42.0.28"] = OIDName{ShortName: "setct-CapResData", LongName: "setct-CapResData"} + oidDotNotationToNames["2.23.42.0.29"] = OIDName{ShortName: "setct-CapRevReqTBS", LongName: "setct-CapRevReqTBS"} + oidDotNotationToNames["2.23.42.0.30"] = OIDName{ShortName: "setct-CapRevReqTBSX", LongName: "setct-CapRevReqTBSX"} + oidDotNotationToNames["2.23.42.0.31"] = OIDName{ShortName: "setct-CapRevResData", LongName: "setct-CapRevResData"} + oidDotNotationToNames["2.23.42.0.32"] = OIDName{ShortName: "setct-CredReqTBS", LongName: "setct-CredReqTBS"} + oidDotNotationToNames["2.23.42.0.33"] = OIDName{ShortName: "setct-CredReqTBSX", LongName: "setct-CredReqTBSX"} + oidDotNotationToNames["2.23.42.0.34"] = OIDName{ShortName: "setct-CredResData", LongName: "setct-CredResData"} + oidDotNotationToNames["2.23.42.0.35"] = OIDName{ShortName: "setct-CredRevReqTBS", LongName: "setct-CredRevReqTBS"} + oidDotNotationToNames["2.23.42.0.36"] = OIDName{ShortName: "setct-CredRevReqTBSX", LongName: "setct-CredRevReqTBSX"} + oidDotNotationToNames["2.23.42.0.37"] = OIDName{ShortName: "setct-CredRevResData", LongName: "setct-CredRevResData"} + oidDotNotationToNames["2.23.42.0.38"] = OIDName{ShortName: "setct-PCertReqData", LongName: "setct-PCertReqData"} + oidDotNotationToNames["2.23.42.0.39"] = OIDName{ShortName: "setct-PCertResTBS", LongName: "setct-PCertResTBS"} + oidDotNotationToNames["2.23.42.0.40"] = OIDName{ShortName: "setct-BatchAdminReqData", LongName: "setct-BatchAdminReqData"} + oidDotNotationToNames["2.23.42.0.41"] = OIDName{ShortName: "setct-BatchAdminResData", LongName: "setct-BatchAdminResData"} + oidDotNotationToNames["2.23.42.0.42"] = OIDName{ShortName: "setct-CardCInitResTBS", LongName: "setct-CardCInitResTBS"} + oidDotNotationToNames["2.23.42.0.43"] = OIDName{ShortName: "setct-MeAqCInitResTBS", LongName: "setct-MeAqCInitResTBS"} + oidDotNotationToNames["2.23.42.0.44"] = OIDName{ShortName: "setct-RegFormResTBS", LongName: "setct-RegFormResTBS"} + oidDotNotationToNames["2.23.42.0.45"] = OIDName{ShortName: "setct-CertReqData", LongName: "setct-CertReqData"} + oidDotNotationToNames["2.23.42.0.46"] = OIDName{ShortName: "setct-CertReqTBS", LongName: "setct-CertReqTBS"} + oidDotNotationToNames["2.23.42.0.47"] = OIDName{ShortName: "setct-CertResData", LongName: "setct-CertResData"} + oidDotNotationToNames["2.23.42.0.48"] = OIDName{ShortName: "setct-CertInqReqTBS", LongName: "setct-CertInqReqTBS"} + oidDotNotationToNames["2.23.42.0.49"] = OIDName{ShortName: "setct-ErrorTBS", LongName: "setct-ErrorTBS"} + oidDotNotationToNames["2.23.42.0.50"] = OIDName{ShortName: "setct-PIDualSignedTBE", LongName: "setct-PIDualSignedTBE"} + oidDotNotationToNames["2.23.42.0.51"] = OIDName{ShortName: "setct-PIUnsignedTBE", LongName: "setct-PIUnsignedTBE"} + oidDotNotationToNames["2.23.42.0.52"] = OIDName{ShortName: "setct-AuthReqTBE", LongName: "setct-AuthReqTBE"} + oidDotNotationToNames["2.23.42.0.53"] = OIDName{ShortName: "setct-AuthResTBE", LongName: "setct-AuthResTBE"} + oidDotNotationToNames["2.23.42.0.54"] = OIDName{ShortName: "setct-AuthResTBEX", LongName: "setct-AuthResTBEX"} + oidDotNotationToNames["2.23.42.0.55"] = OIDName{ShortName: "setct-AuthTokenTBE", LongName: "setct-AuthTokenTBE"} + oidDotNotationToNames["2.23.42.0.56"] = OIDName{ShortName: "setct-CapTokenTBE", LongName: "setct-CapTokenTBE"} + oidDotNotationToNames["2.23.42.0.57"] = OIDName{ShortName: "setct-CapTokenTBEX", LongName: "setct-CapTokenTBEX"} + oidDotNotationToNames["2.23.42.0.58"] = OIDName{ShortName: "setct-AcqCardCodeMsgTBE", LongName: "setct-AcqCardCodeMsgTBE"} + oidDotNotationToNames["2.23.42.0.59"] = OIDName{ShortName: "setct-AuthRevReqTBE", LongName: "setct-AuthRevReqTBE"} + oidDotNotationToNames["2.23.42.0.60"] = OIDName{ShortName: "setct-AuthRevResTBE", LongName: "setct-AuthRevResTBE"} + oidDotNotationToNames["2.23.42.0.61"] = OIDName{ShortName: "setct-AuthRevResTBEB", LongName: "setct-AuthRevResTBEB"} + oidDotNotationToNames["2.23.42.0.62"] = OIDName{ShortName: "setct-CapReqTBE", LongName: "setct-CapReqTBE"} + oidDotNotationToNames["2.23.42.0.63"] = OIDName{ShortName: "setct-CapReqTBEX", LongName: "setct-CapReqTBEX"} + oidDotNotationToNames["2.23.42.0.64"] = OIDName{ShortName: "setct-CapResTBE", LongName: "setct-CapResTBE"} + oidDotNotationToNames["2.23.42.0.65"] = OIDName{ShortName: "setct-CapRevReqTBE", LongName: "setct-CapRevReqTBE"} + oidDotNotationToNames["2.23.42.0.66"] = OIDName{ShortName: "setct-CapRevReqTBEX", LongName: "setct-CapRevReqTBEX"} + oidDotNotationToNames["2.23.42.0.67"] = OIDName{ShortName: "setct-CapRevResTBE", LongName: "setct-CapRevResTBE"} + oidDotNotationToNames["2.23.42.0.68"] = OIDName{ShortName: "setct-CredReqTBE", LongName: "setct-CredReqTBE"} + oidDotNotationToNames["2.23.42.0.69"] = OIDName{ShortName: "setct-CredReqTBEX", LongName: "setct-CredReqTBEX"} + oidDotNotationToNames["2.23.42.0.70"] = OIDName{ShortName: "setct-CredResTBE", LongName: "setct-CredResTBE"} + oidDotNotationToNames["2.23.42.0.71"] = OIDName{ShortName: "setct-CredRevReqTBE", LongName: "setct-CredRevReqTBE"} + oidDotNotationToNames["2.23.42.0.72"] = OIDName{ShortName: "setct-CredRevReqTBEX", LongName: "setct-CredRevReqTBEX"} + oidDotNotationToNames["2.23.42.0.73"] = OIDName{ShortName: "setct-CredRevResTBE", LongName: "setct-CredRevResTBE"} + oidDotNotationToNames["2.23.42.0.74"] = OIDName{ShortName: "setct-BatchAdminReqTBE", LongName: "setct-BatchAdminReqTBE"} + oidDotNotationToNames["2.23.42.0.75"] = OIDName{ShortName: "setct-BatchAdminResTBE", LongName: "setct-BatchAdminResTBE"} + oidDotNotationToNames["2.23.42.0.76"] = OIDName{ShortName: "setct-RegFormReqTBE", LongName: "setct-RegFormReqTBE"} + oidDotNotationToNames["2.23.42.0.77"] = OIDName{ShortName: "setct-CertReqTBE", LongName: "setct-CertReqTBE"} + oidDotNotationToNames["2.23.42.0.78"] = OIDName{ShortName: "setct-CertReqTBEX", LongName: "setct-CertReqTBEX"} + oidDotNotationToNames["2.23.42.0.79"] = OIDName{ShortName: "setct-CertResTBE", LongName: "setct-CertResTBE"} + oidDotNotationToNames["2.23.42.0.80"] = OIDName{ShortName: "setct-CRLNotificationTBS", LongName: "setct-CRLNotificationTBS"} + oidDotNotationToNames["2.23.42.0.81"] = OIDName{ShortName: "setct-CRLNotificationResTBS", LongName: "setct-CRLNotificationResTBS"} + oidDotNotationToNames["2.23.42.0.82"] = OIDName{ShortName: "setct-BCIDistributionTBS", LongName: "setct-BCIDistributionTBS"} + oidDotNotationToNames["2.23.42.1.1"] = OIDName{ShortName: "setext-genCrypt", LongName: "generic cryptogram"} + oidDotNotationToNames["2.23.42.1.3"] = OIDName{ShortName: "setext-miAuth", LongName: "merchant initiated auth"} + oidDotNotationToNames["2.23.42.1.4"] = OIDName{ShortName: "setext-pinSecure", LongName: "setext-pinSecure"} + oidDotNotationToNames["2.23.42.1.5"] = OIDName{ShortName: "setext-pinAny", LongName: "setext-pinAny"} + oidDotNotationToNames["2.23.42.1.7"] = OIDName{ShortName: "setext-track2", LongName: "setext-track2"} + oidDotNotationToNames["2.23.42.1.8"] = OIDName{ShortName: "setext-cv", LongName: "additional verification"} + oidDotNotationToNames["2.23.42.5.0"] = OIDName{ShortName: "set-policy-root", LongName: "set-policy-root"} + oidDotNotationToNames["2.23.42.7.0"] = OIDName{ShortName: "setCext-hashedRoot", LongName: "setCext-hashedRoot"} + oidDotNotationToNames["2.23.42.7.1"] = OIDName{ShortName: "setCext-certType", LongName: "setCext-certType"} + oidDotNotationToNames["2.23.42.7.2"] = OIDName{ShortName: "setCext-merchData", LongName: "setCext-merchData"} + oidDotNotationToNames["2.23.42.7.3"] = OIDName{ShortName: "setCext-cCertRequired", LongName: "setCext-cCertRequired"} + oidDotNotationToNames["2.23.42.7.4"] = OIDName{ShortName: "setCext-tunneling", LongName: "setCext-tunneling"} + oidDotNotationToNames["2.23.42.7.5"] = OIDName{ShortName: "setCext-setExt", LongName: "setCext-setExt"} + oidDotNotationToNames["2.23.42.7.6"] = OIDName{ShortName: "setCext-setQualf", LongName: "setCext-setQualf"} + oidDotNotationToNames["2.23.42.7.7"] = OIDName{ShortName: "setCext-PGWYcapabilities", LongName: "setCext-PGWYcapabilities"} + oidDotNotationToNames["2.23.42.7.8"] = OIDName{ShortName: "setCext-TokenIdentifier", LongName: "setCext-TokenIdentifier"} + oidDotNotationToNames["2.23.42.7.9"] = OIDName{ShortName: "setCext-Track2Data", LongName: "setCext-Track2Data"} + oidDotNotationToNames["2.23.42.7.10"] = OIDName{ShortName: "setCext-TokenType", LongName: "setCext-TokenType"} + oidDotNotationToNames["2.23.42.7.11"] = OIDName{ShortName: "setCext-IssuerCapabilities", LongName: "setCext-IssuerCapabilities"} + oidDotNotationToNames["2.23.42.3.0"] = OIDName{ShortName: "setAttr-Cert", LongName: "setAttr-Cert"} + oidDotNotationToNames["2.23.42.3.1"] = OIDName{ShortName: "setAttr-PGWYcap", LongName: "payment gateway capabilities"} + oidDotNotationToNames["2.23.42.3.2"] = OIDName{ShortName: "setAttr-TokenType", LongName: "setAttr-TokenType"} + oidDotNotationToNames["2.23.42.3.3"] = OIDName{ShortName: "setAttr-IssCap", LongName: "issuer capabilities"} + oidDotNotationToNames["2.23.42.3.0.0"] = OIDName{ShortName: "set-rootKeyThumb", LongName: "set-rootKeyThumb"} + oidDotNotationToNames["2.23.42.3.0.1"] = OIDName{ShortName: "set-addPolicy", LongName: "set-addPolicy"} + oidDotNotationToNames["2.23.42.3.2.1"] = OIDName{ShortName: "setAttr-Token-EMV", LongName: "setAttr-Token-EMV"} + oidDotNotationToNames["2.23.42.3.2.2"] = OIDName{ShortName: "setAttr-Token-B0Prime", LongName: "setAttr-Token-B0Prime"} + oidDotNotationToNames["2.23.42.3.3.3"] = OIDName{ShortName: "setAttr-IssCap-CVM", LongName: "setAttr-IssCap-CVM"} + oidDotNotationToNames["2.23.42.3.3.4"] = OIDName{ShortName: "setAttr-IssCap-T2", LongName: "setAttr-IssCap-T2"} + oidDotNotationToNames["2.23.42.3.3.5"] = OIDName{ShortName: "setAttr-IssCap-Sig", LongName: "setAttr-IssCap-Sig"} + oidDotNotationToNames["2.23.42.3.3.3.1"] = OIDName{ShortName: "setAttr-GenCryptgrm", LongName: "generate cryptogram"} + oidDotNotationToNames["2.23.42.3.3.4.1"] = OIDName{ShortName: "setAttr-T2Enc", LongName: "encrypted track 2"} + oidDotNotationToNames["2.23.42.3.3.4.2"] = OIDName{ShortName: "setAttr-T2cleartxt", LongName: "cleartext track 2"} + oidDotNotationToNames["2.23.42.3.3.5.1"] = OIDName{ShortName: "setAttr-TokICCsig", LongName: "ICC or token signature"} + oidDotNotationToNames["2.23.42.3.3.5.2"] = OIDName{ShortName: "setAttr-SecDevSig", LongName: "secure device signature"} + oidDotNotationToNames["2.23.42.8.1"] = OIDName{ShortName: "set-brand-IATA-ATA", LongName: "set-brand-IATA-ATA"} + oidDotNotationToNames["2.23.42.8.30"] = OIDName{ShortName: "set-brand-Diners", LongName: "set-brand-Diners"} + oidDotNotationToNames["2.23.42.8.34"] = OIDName{ShortName: "set-brand-AmericanExpress", LongName: "set-brand-AmericanExpress"} + oidDotNotationToNames["2.23.42.8.35"] = OIDName{ShortName: "set-brand-JCB", LongName: "set-brand-JCB"} + oidDotNotationToNames["2.23.42.8.4"] = OIDName{ShortName: "set-brand-Visa", LongName: "set-brand-Visa"} + oidDotNotationToNames["2.23.42.8.5"] = OIDName{ShortName: "set-brand-MasterCard", LongName: "set-brand-MasterCard"} + oidDotNotationToNames["2.23.42.8.6011"] = OIDName{ShortName: "set-brand-Novus", LongName: "set-brand-Novus"} + oidDotNotationToNames["1.2.840.113549.3.10"] = OIDName{ShortName: "DES-CDMF", LongName: "des-cdmf"} + oidDotNotationToNames["1.2.840.113549.1.1.6"] = OIDName{ShortName: "rsaOAEPEncryptionSET", LongName: "rsaOAEPEncryptionSET"} + oidDotNotationToNames["0.0"] = OIDName{ShortName: "ITU-T", LongName: "itu-t"} + oidDotNotationToNames["2.0"] = OIDName{ShortName: "JOINT-ISO-ITU-T", LongName: "joint-iso-itu-t"} + oidDotNotationToNames["2.23"] = OIDName{ShortName: "international-organizations", LongName: "International Organizations"} + oidDotNotationToNames["1.3.6.1.4.1.311.20.2.2"] = OIDName{ShortName: "msSmartcardLogin", LongName: "Microsoft Smartcardlogin"} + oidDotNotationToNames["1.3.6.1.4.1.311.20.2.3"] = OIDName{ShortName: "msUPN", LongName: "Microsoft Universal Principal Name"} + oidDotNotationToNames["2.5.4.9"] = OIDName{ShortName: "street", LongName: "streetAddress"} + oidDotNotationToNames["2.5.4.17"] = OIDName{ShortName: "postalCode", LongName: "postalCode"} + oidDotNotationToNames["1.3.6.1.5.5.7.21"] = OIDName{ShortName: "id-ppl", LongName: "id-ppl"} + oidDotNotationToNames["1.3.6.1.5.5.7.1.14"] = OIDName{ShortName: "proxyCertInfo", LongName: "Proxy Certificate Information"} + oidDotNotationToNames["1.3.6.1.5.5.7.21.0"] = OIDName{ShortName: "id-ppl-anyLanguage", LongName: "Any language"} + oidDotNotationToNames["1.3.6.1.5.5.7.21.1"] = OIDName{ShortName: "id-ppl-inheritAll", LongName: "Inherit all"} + oidDotNotationToNames["2.5.29.30"] = OIDName{ShortName: "nameConstraints", LongName: "X509v3 Name Constraints"} + oidDotNotationToNames["1.3.6.1.5.5.7.21.2"] = OIDName{ShortName: "id-ppl-independent", LongName: "Independent"} + oidDotNotationToNames["1.2.840.113549.1.1.11"] = OIDName{ShortName: "RSA-SHA256", LongName: "sha256WithRSAEncryption"} + oidDotNotationToNames["1.2.840.113549.1.1.12"] = OIDName{ShortName: "RSA-SHA384", LongName: "sha384WithRSAEncryption"} + oidDotNotationToNames["1.2.840.113549.1.1.13"] = OIDName{ShortName: "RSA-SHA512", LongName: "sha512WithRSAEncryption"} + oidDotNotationToNames["1.2.840.113549.1.1.14"] = OIDName{ShortName: "RSA-SHA224", LongName: "sha224WithRSAEncryption"} + oidDotNotationToNames["2.16.840.1.101.3.4.2.1"] = OIDName{ShortName: "SHA256", LongName: "sha256"} + oidDotNotationToNames["2.16.840.1.101.3.4.2.2"] = OIDName{ShortName: "SHA384", LongName: "sha384"} + oidDotNotationToNames["2.16.840.1.101.3.4.2.3"] = OIDName{ShortName: "SHA512", LongName: "sha512"} + oidDotNotationToNames["2.16.840.1.101.3.4.2.4"] = OIDName{ShortName: "SHA224", LongName: "sha224"} + oidDotNotationToNames["1.3"] = OIDName{ShortName: "identified-organization", LongName: "identified-organization"} + oidDotNotationToNames["1.3.132"] = OIDName{ShortName: "certicom-arc", LongName: "certicom-arc"} + oidDotNotationToNames["2.23.43"] = OIDName{ShortName: "wap", LongName: "wap"} + oidDotNotationToNames["2.23.43.1"] = OIDName{ShortName: "wap-wsg", LongName: "wap-wsg"} + oidDotNotationToNames["1.2.840.10045.1.2.3"] = OIDName{ShortName: "id-characteristic-two-basis", LongName: "id-characteristic-two-basis"} + oidDotNotationToNames["1.2.840.10045.1.2.3.1"] = OIDName{ShortName: "onBasis", LongName: "onBasis"} + oidDotNotationToNames["1.2.840.10045.1.2.3.2"] = OIDName{ShortName: "tpBasis", LongName: "tpBasis"} + oidDotNotationToNames["1.2.840.10045.1.2.3.3"] = OIDName{ShortName: "ppBasis", LongName: "ppBasis"} + oidDotNotationToNames["1.2.840.10045.3.0.1"] = OIDName{ShortName: "c2pnb163v1", LongName: "c2pnb163v1"} + oidDotNotationToNames["1.2.840.10045.3.0.2"] = OIDName{ShortName: "c2pnb163v2", LongName: "c2pnb163v2"} + oidDotNotationToNames["1.2.840.10045.3.0.3"] = OIDName{ShortName: "c2pnb163v3", LongName: "c2pnb163v3"} + oidDotNotationToNames["1.2.840.10045.3.0.4"] = OIDName{ShortName: "c2pnb176v1", LongName: "c2pnb176v1"} + oidDotNotationToNames["1.2.840.10045.3.0.5"] = OIDName{ShortName: "c2tnb191v1", LongName: "c2tnb191v1"} + oidDotNotationToNames["1.2.840.10045.3.0.6"] = OIDName{ShortName: "c2tnb191v2", LongName: "c2tnb191v2"} + oidDotNotationToNames["1.2.840.10045.3.0.7"] = OIDName{ShortName: "c2tnb191v3", LongName: "c2tnb191v3"} + oidDotNotationToNames["1.2.840.10045.3.0.8"] = OIDName{ShortName: "c2onb191v4", LongName: "c2onb191v4"} + oidDotNotationToNames["1.2.840.10045.3.0.9"] = OIDName{ShortName: "c2onb191v5", LongName: "c2onb191v5"} + oidDotNotationToNames["1.2.840.10045.3.0.10"] = OIDName{ShortName: "c2pnb208w1", LongName: "c2pnb208w1"} + oidDotNotationToNames["1.2.840.10045.3.0.11"] = OIDName{ShortName: "c2tnb239v1", LongName: "c2tnb239v1"} + oidDotNotationToNames["1.2.840.10045.3.0.12"] = OIDName{ShortName: "c2tnb239v2", LongName: "c2tnb239v2"} + oidDotNotationToNames["1.2.840.10045.3.0.13"] = OIDName{ShortName: "c2tnb239v3", LongName: "c2tnb239v3"} + oidDotNotationToNames["1.2.840.10045.3.0.14"] = OIDName{ShortName: "c2onb239v4", LongName: "c2onb239v4"} + oidDotNotationToNames["1.2.840.10045.3.0.15"] = OIDName{ShortName: "c2onb239v5", LongName: "c2onb239v5"} + oidDotNotationToNames["1.2.840.10045.3.0.16"] = OIDName{ShortName: "c2pnb272w1", LongName: "c2pnb272w1"} + oidDotNotationToNames["1.2.840.10045.3.0.17"] = OIDName{ShortName: "c2pnb304w1", LongName: "c2pnb304w1"} + oidDotNotationToNames["1.2.840.10045.3.0.18"] = OIDName{ShortName: "c2tnb359v1", LongName: "c2tnb359v1"} + oidDotNotationToNames["1.2.840.10045.3.0.19"] = OIDName{ShortName: "c2pnb368w1", LongName: "c2pnb368w1"} + oidDotNotationToNames["1.2.840.10045.3.0.20"] = OIDName{ShortName: "c2tnb431r1", LongName: "c2tnb431r1"} + oidDotNotationToNames["1.3.132.0.6"] = OIDName{ShortName: "secp112r1", LongName: "secp112r1"} + oidDotNotationToNames["1.3.132.0.7"] = OIDName{ShortName: "secp112r2", LongName: "secp112r2"} + oidDotNotationToNames["1.3.132.0.28"] = OIDName{ShortName: "secp128r1", LongName: "secp128r1"} + oidDotNotationToNames["1.3.132.0.29"] = OIDName{ShortName: "secp128r2", LongName: "secp128r2"} + oidDotNotationToNames["1.3.132.0.9"] = OIDName{ShortName: "secp160k1", LongName: "secp160k1"} + oidDotNotationToNames["1.3.132.0.8"] = OIDName{ShortName: "secp160r1", LongName: "secp160r1"} + oidDotNotationToNames["1.3.132.0.30"] = OIDName{ShortName: "secp160r2", LongName: "secp160r2"} + oidDotNotationToNames["1.3.132.0.31"] = OIDName{ShortName: "secp192k1", LongName: "secp192k1"} + oidDotNotationToNames["1.3.132.0.32"] = OIDName{ShortName: "secp224k1", LongName: "secp224k1"} + oidDotNotationToNames["1.3.132.0.33"] = OIDName{ShortName: "secp224r1", LongName: "secp224r1"} + oidDotNotationToNames["1.3.132.0.10"] = OIDName{ShortName: "secp256k1", LongName: "secp256k1"} + oidDotNotationToNames["1.3.132.0.34"] = OIDName{ShortName: "secp384r1", LongName: "secp384r1"} + oidDotNotationToNames["1.3.132.0.35"] = OIDName{ShortName: "secp521r1", LongName: "secp521r1"} + oidDotNotationToNames["1.3.132.0.4"] = OIDName{ShortName: "sect113r1", LongName: "sect113r1"} + oidDotNotationToNames["1.3.132.0.5"] = OIDName{ShortName: "sect113r2", LongName: "sect113r2"} + oidDotNotationToNames["1.3.132.0.22"] = OIDName{ShortName: "sect131r1", LongName: "sect131r1"} + oidDotNotationToNames["1.3.132.0.23"] = OIDName{ShortName: "sect131r2", LongName: "sect131r2"} + oidDotNotationToNames["1.3.132.0.1"] = OIDName{ShortName: "sect163k1", LongName: "sect163k1"} + oidDotNotationToNames["1.3.132.0.2"] = OIDName{ShortName: "sect163r1", LongName: "sect163r1"} + oidDotNotationToNames["1.3.132.0.15"] = OIDName{ShortName: "sect163r2", LongName: "sect163r2"} + oidDotNotationToNames["1.3.132.0.24"] = OIDName{ShortName: "sect193r1", LongName: "sect193r1"} + oidDotNotationToNames["1.3.132.0.25"] = OIDName{ShortName: "sect193r2", LongName: "sect193r2"} + oidDotNotationToNames["1.3.132.0.26"] = OIDName{ShortName: "sect233k1", LongName: "sect233k1"} + oidDotNotationToNames["1.3.132.0.27"] = OIDName{ShortName: "sect233r1", LongName: "sect233r1"} + oidDotNotationToNames["1.3.132.0.3"] = OIDName{ShortName: "sect239k1", LongName: "sect239k1"} + oidDotNotationToNames["1.3.132.0.16"] = OIDName{ShortName: "sect283k1", LongName: "sect283k1"} + oidDotNotationToNames["1.3.132.0.17"] = OIDName{ShortName: "sect283r1", LongName: "sect283r1"} + oidDotNotationToNames["1.3.132.0.36"] = OIDName{ShortName: "sect409k1", LongName: "sect409k1"} + oidDotNotationToNames["1.3.132.0.37"] = OIDName{ShortName: "sect409r1", LongName: "sect409r1"} + oidDotNotationToNames["1.3.132.0.38"] = OIDName{ShortName: "sect571k1", LongName: "sect571k1"} + oidDotNotationToNames["1.3.132.0.39"] = OIDName{ShortName: "sect571r1", LongName: "sect571r1"} + oidDotNotationToNames["2.23.43.1.4.1"] = OIDName{ShortName: "wap-wsg-idm-ecid-wtls1", LongName: "wap-wsg-idm-ecid-wtls1"} + oidDotNotationToNames["2.23.43.1.4.3"] = OIDName{ShortName: "wap-wsg-idm-ecid-wtls3", LongName: "wap-wsg-idm-ecid-wtls3"} + oidDotNotationToNames["2.23.43.1.4.4"] = OIDName{ShortName: "wap-wsg-idm-ecid-wtls4", LongName: "wap-wsg-idm-ecid-wtls4"} + oidDotNotationToNames["2.23.43.1.4.5"] = OIDName{ShortName: "wap-wsg-idm-ecid-wtls5", LongName: "wap-wsg-idm-ecid-wtls5"} + oidDotNotationToNames["2.23.43.1.4.6"] = OIDName{ShortName: "wap-wsg-idm-ecid-wtls6", LongName: "wap-wsg-idm-ecid-wtls6"} + oidDotNotationToNames["2.23.43.1.4.7"] = OIDName{ShortName: "wap-wsg-idm-ecid-wtls7", LongName: "wap-wsg-idm-ecid-wtls7"} + oidDotNotationToNames["2.23.43.1.4.8"] = OIDName{ShortName: "wap-wsg-idm-ecid-wtls8", LongName: "wap-wsg-idm-ecid-wtls8"} + oidDotNotationToNames["2.23.43.1.4.9"] = OIDName{ShortName: "wap-wsg-idm-ecid-wtls9", LongName: "wap-wsg-idm-ecid-wtls9"} + oidDotNotationToNames["2.23.43.1.4.10"] = OIDName{ShortName: "wap-wsg-idm-ecid-wtls10", LongName: "wap-wsg-idm-ecid-wtls10"} + oidDotNotationToNames["2.23.43.1.4.11"] = OIDName{ShortName: "wap-wsg-idm-ecid-wtls11", LongName: "wap-wsg-idm-ecid-wtls11"} + oidDotNotationToNames["2.23.43.1.4.12"] = OIDName{ShortName: "wap-wsg-idm-ecid-wtls12", LongName: "wap-wsg-idm-ecid-wtls12"} + oidDotNotationToNames["2.5.29.32.0"] = OIDName{ShortName: "anyPolicy", LongName: "X509v3 Any Policy"} + oidDotNotationToNames["2.5.29.33"] = OIDName{ShortName: "policyMappings", LongName: "X509v3 Policy Mappings"} + oidDotNotationToNames["2.5.29.54"] = OIDName{ShortName: "inhibitAnyPolicy", LongName: "X509v3 Inhibit Any Policy"} + oidDotNotationToNames["1.2.392.200011.61.1.1.1.2"] = OIDName{ShortName: "CAMELLIA-128-CBC", LongName: "camellia-128-cbc"} + oidDotNotationToNames["1.2.392.200011.61.1.1.1.3"] = OIDName{ShortName: "CAMELLIA-192-CBC", LongName: "camellia-192-cbc"} + oidDotNotationToNames["1.2.392.200011.61.1.1.1.4"] = OIDName{ShortName: "CAMELLIA-256-CBC", LongName: "camellia-256-cbc"} + oidDotNotationToNames["0.3.4401.5.3.1.9.1"] = OIDName{ShortName: "CAMELLIA-128-ECB", LongName: "camellia-128-ecb"} + oidDotNotationToNames["0.3.4401.5.3.1.9.21"] = OIDName{ShortName: "CAMELLIA-192-ECB", LongName: "camellia-192-ecb"} + oidDotNotationToNames["0.3.4401.5.3.1.9.41"] = OIDName{ShortName: "CAMELLIA-256-ECB", LongName: "camellia-256-ecb"} + oidDotNotationToNames["0.3.4401.5.3.1.9.4"] = OIDName{ShortName: "CAMELLIA-128-CFB", LongName: "camellia-128-cfb"} + oidDotNotationToNames["0.3.4401.5.3.1.9.24"] = OIDName{ShortName: "CAMELLIA-192-CFB", LongName: "camellia-192-cfb"} + oidDotNotationToNames["0.3.4401.5.3.1.9.44"] = OIDName{ShortName: "CAMELLIA-256-CFB", LongName: "camellia-256-cfb"} + oidDotNotationToNames["0.3.4401.5.3.1.9.3"] = OIDName{ShortName: "CAMELLIA-128-OFB", LongName: "camellia-128-ofb"} + oidDotNotationToNames["0.3.4401.5.3.1.9.23"] = OIDName{ShortName: "CAMELLIA-192-OFB", LongName: "camellia-192-ofb"} + oidDotNotationToNames["0.3.4401.5.3.1.9.43"] = OIDName{ShortName: "CAMELLIA-256-OFB", LongName: "camellia-256-ofb"} + oidDotNotationToNames["2.5.29.9"] = OIDName{ShortName: "subjectDirectoryAttributes", LongName: "X509v3 Subject Directory Attributes"} + oidDotNotationToNames["2.5.29.28"] = OIDName{ShortName: "issuingDistributionPoint", LongName: "X509v3 Issuing Distrubution Point"} + oidDotNotationToNames["2.5.29.29"] = OIDName{ShortName: "certificateIssuer", LongName: "X509v3 Certificate Issuer"} + oidDotNotationToNames["1.2.410.200004"] = OIDName{ShortName: "KISA", LongName: "kisa"} + oidDotNotationToNames["1.2.410.200004.1.3"] = OIDName{ShortName: "SEED-ECB", LongName: "seed-ecb"} + oidDotNotationToNames["1.2.410.200004.1.4"] = OIDName{ShortName: "SEED-CBC", LongName: "seed-cbc"} + oidDotNotationToNames["1.2.410.200004.1.6"] = OIDName{ShortName: "SEED-OFB", LongName: "seed-ofb"} + oidDotNotationToNames["1.2.410.200004.1.5"] = OIDName{ShortName: "SEED-CFB", LongName: "seed-cfb"} + oidDotNotationToNames["1.3.6.1.5.5.8.1.1"] = OIDName{ShortName: "HMAC-MD5", LongName: "hmac-md5"} + oidDotNotationToNames["1.3.6.1.5.5.8.1.2"] = OIDName{ShortName: "HMAC-SHA1", LongName: "hmac-sha1"} + oidDotNotationToNames["1.2.840.113533.7.66.13"] = OIDName{ShortName: "id-PasswordBasedMAC", LongName: "password based MAC"} + oidDotNotationToNames["1.2.840.113533.7.66.30"] = OIDName{ShortName: "id-DHBasedMac", LongName: "Diffie-Hellman based MAC"} + oidDotNotationToNames["1.3.6.1.5.5.7.4.16"] = OIDName{ShortName: "id-it-suppLangTags", LongName: "id-it-suppLangTags"} + oidDotNotationToNames["1.3.6.1.5.5.7.48.5"] = OIDName{ShortName: "caRepository", LongName: "CA Repository"} + oidDotNotationToNames["1.2.840.113549.1.9.16.1.9"] = OIDName{ShortName: "id-smime-ct-compressedData", LongName: "id-smime-ct-compressedData"} + oidDotNotationToNames["1.2.840.113549.1.9.16.1.27"] = OIDName{ShortName: "id-ct-asciiTextWithCRLF", LongName: "id-ct-asciiTextWithCRLF"} + oidDotNotationToNames["2.16.840.1.101.3.4.1.5"] = OIDName{ShortName: "id-aes128-wrap", LongName: "id-aes128-wrap"} + oidDotNotationToNames["2.16.840.1.101.3.4.1.25"] = OIDName{ShortName: "id-aes192-wrap", LongName: "id-aes192-wrap"} + oidDotNotationToNames["2.16.840.1.101.3.4.1.45"] = OIDName{ShortName: "id-aes256-wrap", LongName: "id-aes256-wrap"} + oidDotNotationToNames["1.2.840.10045.4.2"] = OIDName{ShortName: "ecdsa-with-Recommended", LongName: "ecdsa-with-Recommended"} + oidDotNotationToNames["1.2.840.10045.4.3"] = OIDName{ShortName: "ecdsa-with-Specified", LongName: "ecdsa-with-Specified"} + oidDotNotationToNames["1.2.840.10045.4.3.1"] = OIDName{ShortName: "ecdsa-with-SHA224", LongName: "ecdsa-with-SHA224"} + oidDotNotationToNames["1.2.840.10045.4.3.2"] = OIDName{ShortName: "ecdsa-with-SHA256", LongName: "ecdsa-with-SHA256"} + oidDotNotationToNames["1.2.840.10045.4.3.3"] = OIDName{ShortName: "ecdsa-with-SHA384", LongName: "ecdsa-with-SHA384"} + oidDotNotationToNames["1.2.840.10045.4.3.4"] = OIDName{ShortName: "ecdsa-with-SHA512", LongName: "ecdsa-with-SHA512"} + oidDotNotationToNames["1.2.840.113549.2.6"] = OIDName{ShortName: "hmacWithMD5", LongName: "hmacWithMD5"} + oidDotNotationToNames["1.2.840.113549.2.8"] = OIDName{ShortName: "hmacWithSHA224", LongName: "hmacWithSHA224"} + oidDotNotationToNames["1.2.840.113549.2.9"] = OIDName{ShortName: "hmacWithSHA256", LongName: "hmacWithSHA256"} + oidDotNotationToNames["1.2.840.113549.2.10"] = OIDName{ShortName: "hmacWithSHA384", LongName: "hmacWithSHA384"} + oidDotNotationToNames["1.2.840.113549.2.11"] = OIDName{ShortName: "hmacWithSHA512", LongName: "hmacWithSHA512"} + oidDotNotationToNames["2.16.840.1.101.3.4.3.1"] = OIDName{ShortName: "dsa_with_SHA224", LongName: "dsa_with_SHA224"} + oidDotNotationToNames["2.16.840.1.101.3.4.3.2"] = OIDName{ShortName: "dsa_with_SHA256", LongName: "dsa_with_SHA256"} + oidDotNotationToNames["1.0.10118.3.0.55"] = OIDName{ShortName: "whirlpool", LongName: "whirlpool"} + oidDotNotationToNames["1.2.643.2.2"] = OIDName{ShortName: "cryptopro", LongName: "cryptopro"} + oidDotNotationToNames["1.2.643.2.9"] = OIDName{ShortName: "cryptocom", LongName: "cryptocom"} + oidDotNotationToNames["1.2.643.2.2.3"] = OIDName{ShortName: "id-GostR3411-94-with-GostR3410-2001", LongName: "GOST R 34.11-94 with GOST R 34.10-2001"} + oidDotNotationToNames["1.2.643.2.2.4"] = OIDName{ShortName: "id-GostR3411-94-with-GostR3410-94", LongName: "GOST R 34.11-94 with GOST R 34.10-94"} + oidDotNotationToNames["1.2.643.2.2.9"] = OIDName{ShortName: "md_gost94", LongName: "GOST R 34.11-94"} + oidDotNotationToNames["1.2.643.2.2.10"] = OIDName{ShortName: "id-HMACGostR3411-94", LongName: "HMAC GOST 34.11-94"} + oidDotNotationToNames["1.2.643.2.2.19"] = OIDName{ShortName: "gost2001", LongName: "GOST R 34.10-2001"} + oidDotNotationToNames["1.2.643.2.2.20"] = OIDName{ShortName: "gost94", LongName: "GOST R 34.10-94"} + oidDotNotationToNames["1.2.643.2.2.21"] = OIDName{ShortName: "gost89", LongName: "GOST 28147-89"} + oidDotNotationToNames["1.2.643.2.2.22"] = OIDName{ShortName: "gost-mac", LongName: "GOST 28147-89 MAC"} + oidDotNotationToNames["1.2.643.2.2.23"] = OIDName{ShortName: "prf-gostr3411-94", LongName: "GOST R 34.11-94 PRF"} + oidDotNotationToNames["1.2.643.2.2.98"] = OIDName{ShortName: "id-GostR3410-2001DH", LongName: "GOST R 34.10-2001 DH"} + oidDotNotationToNames["1.2.643.2.2.99"] = OIDName{ShortName: "id-GostR3410-94DH", LongName: "GOST R 34.10-94 DH"} + oidDotNotationToNames["1.2.643.2.2.14.1"] = OIDName{ShortName: "id-Gost28147-89-CryptoPro-KeyMeshing", LongName: "id-Gost28147-89-CryptoPro-KeyMeshing"} + oidDotNotationToNames["1.2.643.2.2.14.0"] = OIDName{ShortName: "id-Gost28147-89-None-KeyMeshing", LongName: "id-Gost28147-89-None-KeyMeshing"} + oidDotNotationToNames["1.2.643.2.2.30.0"] = OIDName{ShortName: "id-GostR3411-94-TestParamSet", LongName: "id-GostR3411-94-TestParamSet"} + oidDotNotationToNames["1.2.643.2.2.30.1"] = OIDName{ShortName: "id-GostR3411-94-CryptoProParamSet", LongName: "id-GostR3411-94-CryptoProParamSet"} + oidDotNotationToNames["1.2.643.2.2.31.0"] = OIDName{ShortName: "id-Gost28147-89-TestParamSet", LongName: "id-Gost28147-89-TestParamSet"} + oidDotNotationToNames["1.2.643.2.2.31.1"] = OIDName{ShortName: "id-Gost28147-89-CryptoPro-A-ParamSet", LongName: "id-Gost28147-89-CryptoPro-A-ParamSet"} + oidDotNotationToNames["1.2.643.2.2.31.2"] = OIDName{ShortName: "id-Gost28147-89-CryptoPro-B-ParamSet", LongName: "id-Gost28147-89-CryptoPro-B-ParamSet"} + oidDotNotationToNames["1.2.643.2.2.31.3"] = OIDName{ShortName: "id-Gost28147-89-CryptoPro-C-ParamSet", LongName: "id-Gost28147-89-CryptoPro-C-ParamSet"} + oidDotNotationToNames["1.2.643.2.2.31.4"] = OIDName{ShortName: "id-Gost28147-89-CryptoPro-D-ParamSet", LongName: "id-Gost28147-89-CryptoPro-D-ParamSet"} + oidDotNotationToNames["1.2.643.2.2.31.5"] = OIDName{ShortName: "id-Gost28147-89-CryptoPro-Oscar-1-1-ParamSet", LongName: "id-Gost28147-89-CryptoPro-Oscar-1-1-ParamSet"} + oidDotNotationToNames["1.2.643.2.2.31.6"] = OIDName{ShortName: "id-Gost28147-89-CryptoPro-Oscar-1-0-ParamSet", LongName: "id-Gost28147-89-CryptoPro-Oscar-1-0-ParamSet"} + oidDotNotationToNames["1.2.643.2.2.31.7"] = OIDName{ShortName: "id-Gost28147-89-CryptoPro-RIC-1-ParamSet", LongName: "id-Gost28147-89-CryptoPro-RIC-1-ParamSet"} + oidDotNotationToNames["1.2.643.2.2.32.0"] = OIDName{ShortName: "id-GostR3410-94-TestParamSet", LongName: "id-GostR3410-94-TestParamSet"} + oidDotNotationToNames["1.2.643.2.2.32.2"] = OIDName{ShortName: "id-GostR3410-94-CryptoPro-A-ParamSet", LongName: "id-GostR3410-94-CryptoPro-A-ParamSet"} + oidDotNotationToNames["1.2.643.2.2.32.3"] = OIDName{ShortName: "id-GostR3410-94-CryptoPro-B-ParamSet", LongName: "id-GostR3410-94-CryptoPro-B-ParamSet"} + oidDotNotationToNames["1.2.643.2.2.32.4"] = OIDName{ShortName: "id-GostR3410-94-CryptoPro-C-ParamSet", LongName: "id-GostR3410-94-CryptoPro-C-ParamSet"} + oidDotNotationToNames["1.2.643.2.2.32.5"] = OIDName{ShortName: "id-GostR3410-94-CryptoPro-D-ParamSet", LongName: "id-GostR3410-94-CryptoPro-D-ParamSet"} + oidDotNotationToNames["1.2.643.2.2.33.1"] = OIDName{ShortName: "id-GostR3410-94-CryptoPro-XchA-ParamSet", LongName: "id-GostR3410-94-CryptoPro-XchA-ParamSet"} + oidDotNotationToNames["1.2.643.2.2.33.2"] = OIDName{ShortName: "id-GostR3410-94-CryptoPro-XchB-ParamSet", LongName: "id-GostR3410-94-CryptoPro-XchB-ParamSet"} + oidDotNotationToNames["1.2.643.2.2.33.3"] = OIDName{ShortName: "id-GostR3410-94-CryptoPro-XchC-ParamSet", LongName: "id-GostR3410-94-CryptoPro-XchC-ParamSet"} + oidDotNotationToNames["1.2.643.2.2.35.0"] = OIDName{ShortName: "id-GostR3410-2001-TestParamSet", LongName: "id-GostR3410-2001-TestParamSet"} + oidDotNotationToNames["1.2.643.2.2.35.1"] = OIDName{ShortName: "id-GostR3410-2001-CryptoPro-A-ParamSet", LongName: "id-GostR3410-2001-CryptoPro-A-ParamSet"} + oidDotNotationToNames["1.2.643.2.2.35.2"] = OIDName{ShortName: "id-GostR3410-2001-CryptoPro-B-ParamSet", LongName: "id-GostR3410-2001-CryptoPro-B-ParamSet"} + oidDotNotationToNames["1.2.643.2.2.35.3"] = OIDName{ShortName: "id-GostR3410-2001-CryptoPro-C-ParamSet", LongName: "id-GostR3410-2001-CryptoPro-C-ParamSet"} + oidDotNotationToNames["1.2.643.2.2.36.0"] = OIDName{ShortName: "id-GostR3410-2001-CryptoPro-XchA-ParamSet", LongName: "id-GostR3410-2001-CryptoPro-XchA-ParamSet"} + oidDotNotationToNames["1.2.643.2.2.36.1"] = OIDName{ShortName: "id-GostR3410-2001-CryptoPro-XchB-ParamSet", LongName: "id-GostR3410-2001-CryptoPro-XchB-ParamSet"} + oidDotNotationToNames["1.2.643.2.2.20.1"] = OIDName{ShortName: "id-GostR3410-94-a", LongName: "id-GostR3410-94-a"} + oidDotNotationToNames["1.2.643.2.2.20.2"] = OIDName{ShortName: "id-GostR3410-94-aBis", LongName: "id-GostR3410-94-aBis"} + oidDotNotationToNames["1.2.643.2.2.20.3"] = OIDName{ShortName: "id-GostR3410-94-b", LongName: "id-GostR3410-94-b"} + oidDotNotationToNames["1.2.643.2.2.20.4"] = OIDName{ShortName: "id-GostR3410-94-bBis", LongName: "id-GostR3410-94-bBis"} + oidDotNotationToNames["1.2.643.2.9.1.6.1"] = OIDName{ShortName: "id-Gost28147-89-cc", LongName: "GOST 28147-89 Cryptocom ParamSet"} + oidDotNotationToNames["1.2.643.2.9.1.5.3"] = OIDName{ShortName: "gost94cc", LongName: "GOST 34.10-94 Cryptocom"} + oidDotNotationToNames["1.2.643.2.9.1.5.4"] = OIDName{ShortName: "gost2001cc", LongName: "GOST 34.10-2001 Cryptocom"} + oidDotNotationToNames["1.2.643.2.9.1.3.3"] = OIDName{ShortName: "id-GostR3411-94-with-GostR3410-94-cc", LongName: "GOST R 34.11-94 with GOST R 34.10-94 Cryptocom"} + oidDotNotationToNames["1.2.643.2.9.1.3.4"] = OIDName{ShortName: "id-GostR3411-94-with-GostR3410-2001-cc", LongName: "GOST R 34.11-94 with GOST R 34.10-2001 Cryptocom"} + oidDotNotationToNames["1.2.643.2.9.1.8.1"] = OIDName{ShortName: "id-GostR3410-2001-ParamSet-cc", LongName: "GOST R 3410-2001 Parameter Set Cryptocom"} + oidDotNotationToNames["1.3.6.1.4.1.311.17.2"] = OIDName{ShortName: "LocalKeySet", LongName: "Microsoft Local Key set"} + oidDotNotationToNames["2.5.29.46"] = OIDName{ShortName: "freshestCRL", LongName: "X509v3 Freshest CRL"} + oidDotNotationToNames["1.3.6.1.5.5.7.8.3"] = OIDName{ShortName: "id-on-permanentIdentifier", LongName: "Permanent Identifier"} + oidDotNotationToNames["2.5.4.14"] = OIDName{ShortName: "searchGuide", LongName: "searchGuide"} + oidDotNotationToNames["2.5.4.15"] = OIDName{ShortName: "businessCategory", LongName: "businessCategory"} + oidDotNotationToNames["2.5.4.16"] = OIDName{ShortName: "postalAddress", LongName: "postalAddress"} + oidDotNotationToNames["2.5.4.18"] = OIDName{ShortName: "postOfficeBox", LongName: "postOfficeBox"} + oidDotNotationToNames["2.5.4.19"] = OIDName{ShortName: "physicalDeliveryOfficeName", LongName: "physicalDeliveryOfficeName"} + oidDotNotationToNames["2.5.4.20"] = OIDName{ShortName: "telephoneNumber", LongName: "telephoneNumber"} + oidDotNotationToNames["2.5.4.21"] = OIDName{ShortName: "telexNumber", LongName: "telexNumber"} + oidDotNotationToNames["2.5.4.22"] = OIDName{ShortName: "teletexTerminalIdentifier", LongName: "teletexTerminalIdentifier"} + oidDotNotationToNames["2.5.4.23"] = OIDName{ShortName: "facsimileTelephoneNumber", LongName: "facsimileTelephoneNumber"} + oidDotNotationToNames["2.5.4.24"] = OIDName{ShortName: "x121Address", LongName: "x121Address"} + oidDotNotationToNames["2.5.4.25"] = OIDName{ShortName: "internationaliSDNNumber", LongName: "internationaliSDNNumber"} + oidDotNotationToNames["2.5.4.26"] = OIDName{ShortName: "registeredAddress", LongName: "registeredAddress"} + oidDotNotationToNames["2.5.4.27"] = OIDName{ShortName: "destinationIndicator", LongName: "destinationIndicator"} + oidDotNotationToNames["2.5.4.28"] = OIDName{ShortName: "preferredDeliveryMethod", LongName: "preferredDeliveryMethod"} + oidDotNotationToNames["2.5.4.29"] = OIDName{ShortName: "presentationAddress", LongName: "presentationAddress"} + oidDotNotationToNames["2.5.4.30"] = OIDName{ShortName: "supportedApplicationContext", LongName: "supportedApplicationContext"} + oidDotNotationToNames["2.5.4.31"] = OIDName{ShortName: "member", LongName: "member"} + oidDotNotationToNames["2.5.4.32"] = OIDName{ShortName: "owner", LongName: "owner"} + oidDotNotationToNames["2.5.4.33"] = OIDName{ShortName: "roleOccupant", LongName: "roleOccupant"} + oidDotNotationToNames["2.5.4.34"] = OIDName{ShortName: "seeAlso", LongName: "seeAlso"} + oidDotNotationToNames["2.5.4.35"] = OIDName{ShortName: "userPassword", LongName: "userPassword"} + oidDotNotationToNames["2.5.4.36"] = OIDName{ShortName: "userCertificate", LongName: "userCertificate"} + oidDotNotationToNames["2.5.4.37"] = OIDName{ShortName: "cACertificate", LongName: "cACertificate"} + oidDotNotationToNames["2.5.4.38"] = OIDName{ShortName: "authorityRevocationList", LongName: "authorityRevocationList"} + oidDotNotationToNames["2.5.4.39"] = OIDName{ShortName: "certificateRevocationList", LongName: "certificateRevocationList"} + oidDotNotationToNames["2.5.4.40"] = OIDName{ShortName: "crossCertificatePair", LongName: "crossCertificatePair"} + oidDotNotationToNames["2.5.4.47"] = OIDName{ShortName: "enhancedSearchGuide", LongName: "enhancedSearchGuide"} + oidDotNotationToNames["2.5.4.48"] = OIDName{ShortName: "protocolInformation", LongName: "protocolInformation"} + oidDotNotationToNames["2.5.4.49"] = OIDName{ShortName: "distinguishedName", LongName: "distinguishedName"} + oidDotNotationToNames["2.5.4.50"] = OIDName{ShortName: "uniqueMember", LongName: "uniqueMember"} + oidDotNotationToNames["2.5.4.51"] = OIDName{ShortName: "houseIdentifier", LongName: "houseIdentifier"} + oidDotNotationToNames["2.5.4.52"] = OIDName{ShortName: "supportedAlgorithms", LongName: "supportedAlgorithms"} + oidDotNotationToNames["2.5.4.53"] = OIDName{ShortName: "deltaRevocationList", LongName: "deltaRevocationList"} + oidDotNotationToNames["2.5.4.54"] = OIDName{ShortName: "dmdName", LongName: "dmdName"} + oidDotNotationToNames["1.3.6.1.4.1.311.17.1"] = OIDName{ShortName: "MS_LOCAL_MACHINE_KEYSET", LongName: "MS_LOCAL_MACHINE_KEYSET"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.4.1"] = OIDName{ShortName: "MS_YESNO_TRUST_ATTR", LongName: "MS_YESNO_TRUST_ATTR"} + oidDotNotationToNames["1.3.6.1.4.1.311.13.2.1"] = OIDName{ShortName: "MS_ENROLLMENT_NAME_VALUE_PAIR", LongName: "MS_ENROLLMENT_NAME_VALUE_PAIR"} + oidDotNotationToNames["1.3.6.1.4.1.311.13.2.3"] = OIDName{ShortName: "MS_OS_VERSION", LongName: "MS_OS_VERSION"} + oidDotNotationToNames["1.3.6.1.4.1.311.13.2.2"] = OIDName{ShortName: "MS_ENROLLMENT_CSP_PROVIDER", LongName: "MS_ENROLLMENT_CSP_PROVIDER"} + oidDotNotationToNames["1.3.6.1.4.1.311.12.1.2"] = OIDName{ShortName: "MS_CATALOG_LIST_MEMBER", LongName: "MS_CATALOG_LIST_MEMBER"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.11"] = OIDName{ShortName: "MS_CERT_PROP_ID_PREFIX", LongName: "MS_CERT_PROP_ID_PREFIX"} + oidDotNotationToNames["1.3.6.1.4.1.311.13.1"] = OIDName{ShortName: "MS_RENEWAL_CERTIFICATE", LongName: "MS_RENEWAL_CERTIFICATE"} + oidDotNotationToNames["1.3.6.1.4.1.311"] = OIDName{ShortName: "MS_OID", LongName: "MS_OID"} + oidDotNotationToNames["1.3.6.1.4.1.311.2.1.30"] = OIDName{ShortName: "MS_SPC_SIPINFO_OBJID", LongName: "MS_SPC_SIPINFO_OBJID"} + oidDotNotationToNames["1.3.6.1.4.1.311.88.3"] = OIDName{ShortName: "MS_CAPICOM_ENCRYPTED_DATA", LongName: "MS_CAPICOM_ENCRYPTED_DATA"} + oidDotNotationToNames["1.3.6.1.4.1.311.88.2"] = OIDName{ShortName: "MS_CAPICOM_ATTRIBUTE", LongName: "MS_CAPICOM_ATTRIBUTE"} + oidDotNotationToNames["1.3.6.1.4.1.311.88.1"] = OIDName{ShortName: "MS_CAPICOM_VERSION", LongName: "MS_CAPICOM_VERSION"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.6.2"] = OIDName{ShortName: "MS_LICENSE_SERVER", LongName: "MS_LICENSE_SERVER"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.10.1"] = OIDName{ShortName: "MS_CMC_ADD_ATTRIBUTES", LongName: "MS_CMC_ADD_ATTRIBUTES"} + oidDotNotationToNames["1.3.6.1.4.1.311.3.2.1"] = OIDName{ShortName: "MS_SPC_TIME_STAMP_REQUEST_OBJID", LongName: "MS_SPC_TIME_STAMP_REQUEST_OBJID"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.12.1"] = OIDName{ShortName: "MS_ANY_APPLICATION_POLICY", LongName: "MS_ANY_APPLICATION_POLICY"} + oidDotNotationToNames["1.3.6.1.4.1.311.44.0.4"] = OIDName{ShortName: "MS_PEERNET_CERT_VERSION", LongName: "MS_PEERNET_CERT_VERSION"} + oidDotNotationToNames["1.3.6.1.4.1.311.21.19"] = OIDName{ShortName: "MS_DS_EMAIL_REPLICATION", LongName: "MS_DS_EMAIL_REPLICATION"} + oidDotNotationToNames["1.3.6.1.4.1.311.21.16"] = OIDName{ShortName: "MS_ARCHIVED_KEY_CERT_HASH", LongName: "MS_ARCHIVED_KEY_CERT_HASH"} + oidDotNotationToNames["1.3.6.1.4.1.311.21.17"] = OIDName{ShortName: "MS_ISSUED_CERT_HASH", LongName: "MS_ISSUED_CERT_HASH"} + oidDotNotationToNames["1.3.6.1.4.1.311.21.14"] = OIDName{ShortName: "MS_CRL_SELF_CDP", LongName: "MS_CRL_SELF_CDP"} + oidDotNotationToNames["1.3.6.1.4.1.311.21.15"] = OIDName{ShortName: "MS_REQUIRE_CERT_CHAIN_POLICY", LongName: "MS_REQUIRE_CERT_CHAIN_POLICY"} + oidDotNotationToNames["1.3.6.1.4.1.311.21.12"] = OIDName{ShortName: "MS_APPLICATION_POLICY_CONSTRAINTS", LongName: "MS_APPLICATION_POLICY_CONSTRAINTS"} + oidDotNotationToNames["1.3.6.1.4.1.311.21.13"] = OIDName{ShortName: "MS_ARCHIVED_KEY_ATTR", LongName: "MS_ARCHIVED_KEY_ATTR"} + oidDotNotationToNames["1.3.6.1.4.1.311.21.10"] = OIDName{ShortName: "MS_APPLICATION_CERT_POLICIES", LongName: "MS_APPLICATION_CERT_POLICIES"} + oidDotNotationToNames["1.3.6.1.4.1.311.21.11"] = OIDName{ShortName: "MS_APPLICATION_POLICY_MAPPINGS", LongName: "MS_APPLICATION_POLICY_MAPPINGS"} + oidDotNotationToNames["1.3.6.1.4.1.311.44"] = OIDName{ShortName: "MS_Peer_Networking", LongName: "MS_Peer_Networking"} + oidDotNotationToNames["1.3.6.1.4.1.311.12.2.1"] = OIDName{ShortName: "MS_CAT_NAMEVALUE_OBJID", LongName: "MS_CAT_NAMEVALUE_OBJID"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.5.1"] = OIDName{ShortName: "MS_DRM", LongName: "MS_DRM"} + oidDotNotationToNames["1.3.6.1.4.1.311.43"] = OIDName{ShortName: "MS_WWOps_BizExt", LongName: "MS_WWOps_BizExt"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.5.2"] = OIDName{ShortName: "MS_DRM_INDIVIDUALIZATION", LongName: "MS_DRM_INDIVIDUALIZATION"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.3.13"] = OIDName{ShortName: "MS_KP_LIFETIME_SIGNING", LongName: "MS_KP_LIFETIME_SIGNING"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.3.12"] = OIDName{ShortName: "MS_KP_DOCUMENT_SIGNING", LongName: "MS_KP_DOCUMENT_SIGNING"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.3.11"] = OIDName{ShortName: "MS_KP_KEY_RECOVERY", LongName: "MS_KP_KEY_RECOVERY"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.3.10"] = OIDName{ShortName: "MS_KP_QUALIFIED_SUBORDINATION", LongName: "MS_KP_QUALIFIED_SUBORDINATION"} + oidDotNotationToNames["1.3.6.1.4.1.311.18.1"] = OIDName{ShortName: "MS_PKIX_LICENSE_INFO", LongName: "MS_PKIX_LICENSE_INFO"} + oidDotNotationToNames["1.3.6.1.4.1.311.18.2"] = OIDName{ShortName: "MS_PKIX_MANUFACTURER", LongName: "MS_PKIX_MANUFACTURER"} + oidDotNotationToNames["1.3.6.1.4.1.311.18.3"] = OIDName{ShortName: "MS_PKIX_MANUFACTURER_MS_SPECIFIC", LongName: "MS_PKIX_MANUFACTURER_MS_SPECIFIC"} + oidDotNotationToNames["1.3.6.1.4.1.311.18.4"] = OIDName{ShortName: "MS_PKIX_HYDRA_CERT_VERSION", LongName: "MS_PKIX_HYDRA_CERT_VERSION"} + oidDotNotationToNames["1.3.6.1.4.1.311.18.5"] = OIDName{ShortName: "MS_PKIX_LICENSED_PRODUCT_INFO", LongName: "MS_PKIX_LICENSED_PRODUCT_INFO"} + oidDotNotationToNames["1.3.6.1.4.1.311.18.6"] = OIDName{ShortName: "MS_PKIX_MS_LICENSE_SERVER_INFO", LongName: "MS_PKIX_MS_LICENSE_SERVER_INFO"} + oidDotNotationToNames["1.3.6.1.4.1.311.18.7"] = OIDName{ShortName: "MS_PKIS_PRODUCT_SPECIFIC_OID", LongName: "MS_PKIS_PRODUCT_SPECIFIC_OID"} + oidDotNotationToNames["1.3.6.1.4.1.311.21.22"] = OIDName{ShortName: "MS_CERTSRV_CROSSCA_VERSION", LongName: "MS_CERTSRV_CROSSCA_VERSION"} + oidDotNotationToNames["1.3.6.1.4.1.311.21.21"] = OIDName{ShortName: "MS_ENCRYPTED_KEY_HASH", LongName: "MS_ENCRYPTED_KEY_HASH"} + oidDotNotationToNames["1.3.6.1.4.1.311.21.20"] = OIDName{ShortName: "MS_REQUEST_CLIENT_INFO", LongName: "MS_REQUEST_CLIENT_INFO"} + oidDotNotationToNames["1.3.6.1.4.1.311.20.3"] = OIDName{ShortName: "MS_CERT_MANIFOLD", LongName: "MS_CERT_MANIFOLD"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.1.1"] = OIDName{ShortName: "MS_SORTED_CTL", LongName: "MS_SORTED_CTL"} + oidDotNotationToNames["1.3.6.1.4.1.311.44.1.3"] = OIDName{ShortName: "MS_PEERNET_PNRP_PAYLOAD", LongName: "MS_PEERNET_PNRP_PAYLOAD"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.7.1"] = OIDName{ShortName: "MS_KEYID_RDN", LongName: "MS_KEYID_RDN"} + oidDotNotationToNames["1.3.6.1.4.1.311.44.1.1"] = OIDName{ShortName: "MS_PEERNET_PNRP_ADDRESS", LongName: "MS_PEERNET_PNRP_ADDRESS"} + oidDotNotationToNames["1.3.6.1.4.1.311.21.8"] = OIDName{ShortName: "MS_ENTERPRISE_OID_ROOT", LongName: "MS_ENTERPRISE_OID_ROOT"} + oidDotNotationToNames["1.3.6.1.4.1.311.44.1.4"] = OIDName{ShortName: "MS_PEERNET_PNRP_ID", LongName: "MS_PEERNET_PNRP_ID"} + oidDotNotationToNames["1.3.6.1.4.1.311.44.3.1"] = OIDName{ShortName: "MS_PEERNET_GROUPING_PEERNAME", LongName: "MS_PEERNET_GROUPING_PEERNAME"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.12"] = OIDName{ShortName: "MS_CryptUI", LongName: "MS_CryptUI"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.10"] = OIDName{ShortName: "MS_CMC_OIDs", LongName: "MS_CMC_OIDs"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.11"] = OIDName{ShortName: "MS_certificate_property_OIDs", LongName: "MS_certificate_property_OIDs"} + oidDotNotationToNames["1.3.6.1.4.1.311.2.1.4"] = OIDName{ShortName: "MS_SPC_INDIRECT_DATA_OBJID", LongName: "MS_SPC_INDIRECT_DATA_OBJID"} + oidDotNotationToNames["1.3.6.1.4.1.311.2.2"] = OIDName{ShortName: "MS_CTL_for_Software_Publishers_Trusted_CAs", LongName: "MS_CTL_for_Software_Publishers_Trusted_CAs"} + oidDotNotationToNames["1.3.6.1.4.1.311.44.3.5"] = OIDName{ShortName: "MS_PEERNET_GROUPING_CLASSIFIERS", LongName: "MS_PEERNET_GROUPING_CLASSIFIERS"} + oidDotNotationToNames["1.3.6.1.4.1.311.2"] = OIDName{ShortName: "MS_Authenticode", LongName: "MS_Authenticode"} + oidDotNotationToNames["1.3.6.1.4.1.311.3"] = OIDName{ShortName: "MS_Time_Stamping", LongName: "MS_Time_Stamping"} + oidDotNotationToNames["1.3.6.1.4.1.311.21.7"] = OIDName{ShortName: "MS_CERTIFICATE_TEMPLATE", LongName: "MS_CERTIFICATE_TEMPLATE"} + oidDotNotationToNames["1.3.6.1.4.1.311.4"] = OIDName{ShortName: "MS_Permissions", LongName: "MS_Permissions"} + oidDotNotationToNames["1.3.6.1.4.1.311.30"] = OIDName{ShortName: "MS_IIS", LongName: "MS_IIS"} + oidDotNotationToNames["1.3.6.1.4.1.311.19"] = OIDName{ShortName: "MS_ISPU_Test", LongName: "MS_ISPU_Test"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.3.7"] = OIDName{ShortName: "MS_OEM_WHQL_CRYPTO", LongName: "MS_OEM_WHQL_CRYPTO"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.3.6"] = OIDName{ShortName: "MS_NT5_CRYPTO", LongName: "MS_NT5_CRYPTO"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.3.5"] = OIDName{ShortName: "MS_WHQL_CRYPTO", LongName: "MS_WHQL_CRYPTO"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.3.4"] = OIDName{ShortName: "MS_EFS_CRYPTO", LongName: "MS_EFS_CRYPTO"} + oidDotNotationToNames["1.3.6.1.4.1.311.20.2.3"] = OIDName{ShortName: "MS_NT_PRINCIPAL_NAME", LongName: "MS_NT_PRINCIPAL_NAME"} + oidDotNotationToNames["1.3.6.1.4.1.311.20.2.2"] = OIDName{ShortName: "MS_KP_SMARTCARD_LOGON", LongName: "MS_KP_SMARTCARD_LOGON"} + oidDotNotationToNames["1.3.6.1.4.1.311.20.2.1"] = OIDName{ShortName: "MS_ENROLLMENT_AGENT", LongName: "MS_ENROLLMENT_AGENT"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.3.9"] = OIDName{ShortName: "MS_ROOT_LIST_SIGNER", LongName: "MS_ROOT_LIST_SIGNER"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.3.8"] = OIDName{ShortName: "MS_EMBEDDED_NT_CRYPTO", LongName: "MS_EMBEDDED_NT_CRYPTO"} + oidDotNotationToNames["1.3.6.1.4.1.311.18.8"] = OIDName{ShortName: "MS_PKIS_TLSERVER_SPK_OID", LongName: "MS_PKIS_TLSERVER_SPK_OID"} + oidDotNotationToNames["1.3.6.1.4.1.311.2.2.2"] = OIDName{ShortName: "MS_TRUSTED_CLIENT_AUTH_CA_LIST", LongName: "MS_TRUSTED_CLIENT_AUTH_CA_LIST"} + oidDotNotationToNames["1.3.6.1.4.1.311.2.2.3"] = OIDName{ShortName: "MS_TRUSTED_SERVER_AUTH_CA_LIST", LongName: "MS_TRUSTED_SERVER_AUTH_CA_LIST"} + oidDotNotationToNames["1.3.6.1.4.1.311.12.1.1"] = OIDName{ShortName: "MS_CATALOG_LIST", LongName: "MS_CATALOG_LIST"} + oidDotNotationToNames["1.3.6.1.4.1.311.2.2.1"] = OIDName{ShortName: "MS_TRUSTED_CODESIGNING_CA_LIST", LongName: "MS_TRUSTED_CODESIGNING_CA_LIST"} + oidDotNotationToNames["1.3.6.1.4.1.311.45"] = OIDName{ShortName: "MS_Mobile_Devices_Code_Signing", LongName: "MS_Mobile_Devices_Code_Signing"} + oidDotNotationToNames["1.3.6.1.4.1.311.30.1"] = OIDName{ShortName: "MS_IIS_VIRTUAL_SERVER", LongName: "MS_IIS_VIRTUAL_SERVER"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.3.14"] = OIDName{ShortName: "MS_KP_MOBILE_DEVICE_SOFTWARE", LongName: "MS_KP_MOBILE_DEVICE_SOFTWARE"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.8.1"] = OIDName{ShortName: "MS_REMOVE_CERTIFICATE", LongName: "MS_REMOVE_CERTIFICATE"} + oidDotNotationToNames["1.3.6.1.4.1.311.42"] = OIDName{ShortName: "MS_Corporate_PKI_(ITG)", LongName: "MS_Corporate_PKI_(ITG)"} + oidDotNotationToNames["1.3.6.1.4.1.311.2.1.26"] = OIDName{ShortName: "MS_SPC_MINIMAL_CRITERIA_OBJID", LongName: "MS_SPC_MINIMAL_CRITERIA_OBJID"} + oidDotNotationToNames["1.3.6.1.4.1.311.44.3.2"] = OIDName{ShortName: "MS_PEERNET_GROUPING_FLAGS", LongName: "MS_PEERNET_GROUPING_FLAGS"} + oidDotNotationToNames["1.3.6.1.4.1.311.44.3.3"] = OIDName{ShortName: "MS_PEERNET_GROUPING_ROLES", LongName: "MS_PEERNET_GROUPING_ROLES"} + oidDotNotationToNames["1.3.6.1.4.1.311.41"] = OIDName{ShortName: "MS_Licensing_and_Registration", LongName: "MS_Licensing_and_Registration"} + oidDotNotationToNames["1.3.6.1.4.1.311.20"] = OIDName{ShortName: "MS_Enrollment_Infrastructure", LongName: "MS_Enrollment_Infrastructure"} + oidDotNotationToNames["1.3.6.1.4.1.311.40"] = OIDName{ShortName: "MS_Fonts", LongName: "MS_Fonts"} + oidDotNotationToNames["1.3.6.1.4.1.311.21"] = OIDName{ShortName: "MS_CertSrv_Infrastructure", LongName: "MS_CertSrv_Infrastructure"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.3.3.1"] = OIDName{ShortName: "MS_SERIALIZED", LongName: "MS_SERIALIZED"} + oidDotNotationToNames["1.3.6.1.4.1.311.12.2.2"] = OIDName{ShortName: "MS_CAT_MEMBERINFO_OBJID", LongName: "MS_CAT_MEMBERINFO_OBJID"} + oidDotNotationToNames["1.3.6.1.4.1.311.25"] = OIDName{ShortName: "MS_Directory_Service", LongName: "MS_Directory_Service"} + oidDotNotationToNames["1.3.6.1.4.1.311.44.0.3"] = OIDName{ShortName: "MS_PEERNET_CLASSIFIER", LongName: "MS_PEERNET_CLASSIFIER"} + oidDotNotationToNames["1.3.6.1.4.1.311.44.0.1"] = OIDName{ShortName: "MS_PEERNET_CERT_TYPE", LongName: "MS_PEERNET_CERT_TYPE"} + oidDotNotationToNames["1.3.6.1.4.1.311.44.1"] = OIDName{ShortName: "MS_PEERNET_PNRP", LongName: "MS_PEERNET_PNRP"} + oidDotNotationToNames["1.3.6.1.4.1.311.88.3.1"] = OIDName{ShortName: "MS_CAPICOM_ENCRYPTED_CONTENT", LongName: "MS_CAPICOM_ENCRYPTED_CONTENT"} + oidDotNotationToNames["1.3.6.1.4.1.311.44.0.2"] = OIDName{ShortName: "MS_PEERNET_PEERNAME", LongName: "MS_PEERNET_PEERNAME"} + oidDotNotationToNames["1.3.6.1.4.1.311.44.3"] = OIDName{ShortName: "MS_PEERNET_GROUPING", LongName: "MS_PEERNET_GROUPING"} + oidDotNotationToNames["1.3.6.1.4.1.311.44.1.2"] = OIDName{ShortName: "MS_PEERNET_PNRP_FLAGS", LongName: "MS_PEERNET_PNRP_FLAGS"} + oidDotNotationToNames["1.3.6.1.4.1.311.15"] = OIDName{ShortName: "MS_Java", LongName: "MS_Java"} + oidDotNotationToNames["1.3.6.1.4.1.311.16"] = OIDName{ShortName: "MS_Outlook/Exchange", LongName: "MS_Outlook/Exchange"} + oidDotNotationToNames["1.3.6.1.4.1.311.17"] = OIDName{ShortName: "MS_PKCS12_attributes", LongName: "MS_PKCS12_attributes"} + oidDotNotationToNames["1.3.6.1.4.1.311.10"] = OIDName{ShortName: "MS_Crypto_2.0", LongName: "MS_Crypto_2.0"} + oidDotNotationToNames["1.3.6.1.4.1.311.21.9"] = OIDName{ShortName: "MS_RDN_DUMMY_SIGNER", LongName: "MS_RDN_DUMMY_SIGNER"} + oidDotNotationToNames["1.3.6.1.4.1.311.12"] = OIDName{ShortName: "MS_Catalog", LongName: "MS_Catalog"} + oidDotNotationToNames["1.3.6.1.4.1.311.13"] = OIDName{ShortName: "MS_PKCS10_OIDs", LongName: "MS_PKCS10_OIDs"} + oidDotNotationToNames["1.3.6.1.4.1.311.21.4"] = OIDName{ShortName: "MS_CRL_NEXT_PUBLISH", LongName: "MS_CRL_NEXT_PUBLISH"} + oidDotNotationToNames["1.3.6.1.4.1.311.21.5"] = OIDName{ShortName: "MS_KP_CA_EXCHANGE", LongName: "MS_KP_CA_EXCHANGE"} + oidDotNotationToNames["1.3.6.1.4.1.311.21.6"] = OIDName{ShortName: "MS_KP_KEY_RECOVERY_AGENT", LongName: "MS_KP_KEY_RECOVERY_AGENT"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.6.1"] = OIDName{ShortName: "MS_LICENSES", LongName: "MS_LICENSES"} + oidDotNotationToNames["1.3.6.1.4.1.311.18"] = OIDName{ShortName: "MS_Hydra", LongName: "MS_Hydra"} + oidDotNotationToNames["1.3.6.1.4.1.311.21.1"] = OIDName{ShortName: "MS_CERTSRV_CA_VERSION", LongName: "MS_CERTSRV_CA_VERSION"} + oidDotNotationToNames["1.3.6.1.4.1.311.21.2"] = OIDName{ShortName: "MS_CERTSRV_PREVIOUS_CERT_HASH", LongName: "MS_CERTSRV_PREVIOUS_CERT_HASH"} + oidDotNotationToNames["1.3.6.1.4.1.311.21.3"] = OIDName{ShortName: "MS_CRL_VIRTUAL_BASE", LongName: "MS_CRL_VIRTUAL_BASE"} + oidDotNotationToNames["1.3.6.1.4.1.311.31.1"] = OIDName{ShortName: "MS_PRODUCT_UPDATE", LongName: "MS_PRODUCT_UPDATE"} + oidDotNotationToNames["1.3.6.1.4.1.311.16.4"] = OIDName{ShortName: "MS_MICROSOFT_Encryption_Key_Preference", LongName: "MS_MICROSOFT_Encryption_Key_Preference"} + oidDotNotationToNames["1.3.6.1.4.1.311.44.2"] = OIDName{ShortName: "MS_PEERNET_IDENTITY", LongName: "MS_PEERNET_IDENTITY"} + oidDotNotationToNames["1.3.6.1.4.1.311.88"] = OIDName{ShortName: "MS_CAPICOM", LongName: "MS_CAPICOM"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.9.1"] = OIDName{ShortName: "MS_CROSS_CERT_DIST_POINTS", LongName: "MS_CROSS_CERT_DIST_POINTS"} + oidDotNotationToNames["1.3.6.1.4.1.311.2.1.19"] = OIDName{ShortName: "MS_SPC_STRUCTURED_STORAGE_DATA_OBJID", LongName: "MS_SPC_STRUCTURED_STORAGE_DATA_OBJID"} + oidDotNotationToNames["1.3.6.1.4.1.311.2.1.18"] = OIDName{ShortName: "MS_SPC_RAW_FILE_DATA_OBJID", LongName: "MS_SPC_RAW_FILE_DATA_OBJID"} + oidDotNotationToNames["1.3.6.1.4.1.311.2.1.25"] = OIDName{ShortName: "MS_SPC_GLUE_RDN_OBJID", LongName: "MS_SPC_GLUE_RDN_OBJID"} + oidDotNotationToNames["1.3.6.1.4.1.311.2.1.11"] = OIDName{ShortName: "MS_SPC_STATEMENT_TYPE_OBJID", LongName: "MS_SPC_STATEMENT_TYPE_OBJID"} + oidDotNotationToNames["1.3.6.1.4.1.311.2.1.10"] = OIDName{ShortName: "MS_SPC_SP_AGENCY_INFO_OBJID", LongName: "MS_SPC_SP_AGENCY_INFO_OBJID"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.3.4.1"] = OIDName{ShortName: "MS_EFS_RECOVERY", LongName: "MS_EFS_RECOVERY"} + oidDotNotationToNames["1.3.6.1.4.1.311.2.1.12"] = OIDName{ShortName: "MS_SPC_SP_OPUS_INFO_OBJID", LongName: "MS_SPC_SP_OPUS_INFO_OBJID"} + oidDotNotationToNames["1.3.6.1.4.1.311.2.1.15"] = OIDName{ShortName: "MS_SPC_PE_IMAGE_DATA_OBJID", LongName: "MS_SPC_PE_IMAGE_DATA_OBJID"} + oidDotNotationToNames["1.3.6.1.4.1.311.2.1.14"] = OIDName{ShortName: "MS_SPC_CERT_EXTENSIONS_OBJID", LongName: "MS_SPC_CERT_EXTENSIONS_OBJID"} + oidDotNotationToNames["1.3.6.1.4.1.311.25.1"] = OIDName{ShortName: "MS_NTDS_REPLICATION", LongName: "MS_NTDS_REPLICATION"} + oidDotNotationToNames["1.3.6.1.4.1.311.2.1.27"] = OIDName{ShortName: "MS_SPC_FINANCIAL_CRITERIA_OBJID", LongName: "MS_SPC_FINANCIAL_CRITERIA_OBJID"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.3.3"] = OIDName{ShortName: "MS_SERVER_GATED_CRYPTO", LongName: "MS_SERVER_GATED_CRYPTO"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.11.1"] = OIDName{ShortName: "MS_CERT_PROP_ID_PREFIX", LongName: "MS_CERT_PROP_ID_PREFIX"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.3.2"] = OIDName{ShortName: "MS_KP_TIME_STAMP_SIGNING", LongName: "MS_KP_TIME_STAMP_SIGNING"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.3.1"] = OIDName{ShortName: "MS_KP_CTL_USAGE_SIGNING", LongName: "MS_KP_CTL_USAGE_SIGNING"} + oidDotNotationToNames["1.3.6.1.4.1.311.31"] = OIDName{ShortName: "MS_Windows_updates_and_service_packs", LongName: "MS_Windows_updates_and_service_packs"} + oidDotNotationToNames["1.3.6.1.4.1.311.88.2.1"] = OIDName{ShortName: "MS_CAPICOM_DOCUMENT_NAME", LongName: "MS_CAPICOM_DOCUMENT_NAME"} + oidDotNotationToNames["1.3.6.1.4.1.311.88.2.2"] = OIDName{ShortName: "MS_CAPICOM_DOCUMENT_DESCRIPTION", LongName: "MS_CAPICOM_DOCUMENT_DESCRIPTION"} + oidDotNotationToNames["1.3.6.1.4.1.311.44.2.2"] = OIDName{ShortName: "MS_PEERNET_IDENTITY_FLAGS", LongName: "MS_PEERNET_IDENTITY_FLAGS"} + oidDotNotationToNames["1.3.6.1.4.1.311.20.1"] = OIDName{ShortName: "MS_AUTO_ENROLL_CTL_USAGE", LongName: "MS_AUTO_ENROLL_CTL_USAGE"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.1"] = OIDName{ShortName: "MS_CTL", LongName: "MS_CTL"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.2"] = OIDName{ShortName: "MS_NEXT_UPDATE_LOCATION", LongName: "MS_NEXT_UPDATE_LOCATION"} + oidDotNotationToNames["1.3.6.1.4.1.311.20.2"] = OIDName{ShortName: "MS_ENROLL_CERTTYPE_EXTENSION", LongName: "MS_ENROLL_CERTTYPE_EXTENSION"} + oidDotNotationToNames["1.3.6.1.4.1.311.2.1.20"] = OIDName{ShortName: "MS_SPC_JAVA_CLASS_DATA_OBJID", LongName: "MS_SPC_JAVA_CLASS_DATA_OBJID"} + oidDotNotationToNames["1.3.6.1.4.1.311.2.1.21"] = OIDName{ShortName: "MS_SPC_INDIVIDUAL_SP_KEY_PURPOSE_OBJID", LongName: "MS_SPC_INDIVIDUAL_SP_KEY_PURPOSE_OBJID"} + oidDotNotationToNames["1.3.6.1.4.1.311.2.1.22"] = OIDName{ShortName: "MS_SPC_COMMERCIAL_SP_KEY_PURPOSE_OBJID", LongName: "MS_SPC_COMMERCIAL_SP_KEY_PURPOSE_OBJID"} + oidDotNotationToNames["1.3.6.1.4.1.311.10.7"] = OIDName{ShortName: "MS_MICROSOFT_RDN_PREFIX", LongName: "MS_MICROSOFT_RDN_PREFIX"} + oidDotNotationToNames["1.3.6.1.4.1.311.2.1.28"] = OIDName{ShortName: "MS_SPC_LINK_OBJID", LongName: "MS_SPC_LINK_OBJID"} + // EV Certificates + oidDotNotationToNames["1.3.6.1.4.1.311.60.2.1.1"] = OIDName{ShortName: "jurisdictionLocality", LongName: "jurisdictionLocalityName"} + oidDotNotationToNames["1.3.6.1.4.1.311.60.2.1.2"] = OIDName{ShortName: "jurisdictionStateOrProvince", LongName: "jurisdictionStateOrProvinceName"} + oidDotNotationToNames["1.3.6.1.4.1.311.60.2.1.3"] = OIDName{ShortName: "jurisdictionCountry", LongName: "jurisdictionCountryName"} + +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/pkix/pkix.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/pkix/pkix.go new file mode 100644 index 00000000..ef8289c7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/pkix/pkix.go @@ -0,0 +1,289 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkix contains shared, low level structures used for ASN.1 parsing +// and serialization of X.509 certificates, CRL and OCSP. +package pkix + +import ( + "encoding/asn1" + "math/big" + "strings" + "time" +) + +// AlgorithmIdentifier represents the ASN.1 structure of the same name. See RFC +// 5280, section 4.1.1.2. +type AlgorithmIdentifier struct { + Algorithm asn1.ObjectIdentifier + Parameters asn1.RawValue `asn1:"optional"` +} + +type RDNSequence []RelativeDistinguishedNameSET + +type RelativeDistinguishedNameSET []AttributeTypeAndValue + +// AttributeTypeAndValue mirrors the ASN.1 structure of the same name in +// http://tools.ietf.org/html/rfc5280#section-4.1.2.4 +type AttributeTypeAndValue struct { + Type asn1.ObjectIdentifier `json:"type"` + Value interface{} `json:"value"` +} + +// AttributeTypeAndValueSET represents a set of ASN.1 sequences of +// AttributeTypeAndValue sequences from RFC 2986 (PKCS #10). +type AttributeTypeAndValueSET struct { + Type asn1.ObjectIdentifier + Value [][]AttributeTypeAndValue `asn1:"set"` +} + +// Extension represents the ASN.1 structure of the same name. See RFC +// 5280, section 4.2. +type Extension struct { + Id asn1.ObjectIdentifier + Critical bool `asn1:"optional"` + Value []byte +} + +// Name represents an X.509 distinguished name. This only includes the common +// elements of a DN. Additional elements in the name are ignored. +type Name struct { + Country, Organization, OrganizationalUnit []string + Locality, Province []string + StreetAddress, PostalCode, DomainComponent []string + EmailAddress []string + SerialNumber, CommonName string + GivenName, Surname []string + // EV Components + JurisdictionLocality, JurisdictionProvince, JurisdictionCountry []string + + Names []AttributeTypeAndValue + ExtraNames []AttributeTypeAndValue + + // OriginalRDNS is saved if the name is populated using FillFromRDNSequence. + // Additionally, if OriginalRDNS is non-nil, the String and ToRDNSequence + // methods will simply use this. + OriginalRDNS RDNSequence +} + +// FillFromRDNSequence populates n based on the AttributeTypeAndValueSETs in the +// RDNSequence. It save the sequence as OriginalRDNS. +func (n *Name) FillFromRDNSequence(rdns *RDNSequence) { + n.OriginalRDNS = *rdns + for _, rdn := range *rdns { + if len(rdn) == 0 { + continue + } + atv := rdn[0] + n.Names = append(n.Names, atv) + value, ok := atv.Value.(string) + if !ok { + continue + } + + t := atv.Type + if len(t) == 4 && t[0] == 2 && t[1] == 5 && t[2] == 4 { + switch t[3] { + case 3: + n.CommonName = value + case 4: + n.Surname = append(n.Surname, value) + case 5: + n.SerialNumber = value + case 6: + n.Country = append(n.Country, value) + case 7: + n.Locality = append(n.Locality, value) + case 8: + n.Province = append(n.Province, value) + case 9: + n.StreetAddress = append(n.StreetAddress, value) + case 10: + n.Organization = append(n.Organization, value) + case 11: + n.OrganizationalUnit = append(n.OrganizationalUnit, value) + case 17: + n.PostalCode = append(n.PostalCode, value) + case 42: + n.GivenName = append(n.GivenName, value) + } + } else if t.Equal(oidDomainComponent) { + n.DomainComponent = append(n.DomainComponent, value) + } else if t.Equal(oidDNEmailAddress) { + // Deprecated, see RFC 5280 Section 4.1.2.6 + n.EmailAddress = append(n.EmailAddress, value) + } else if t.Equal(oidJurisdictionLocality) { + n.JurisdictionLocality = append(n.JurisdictionLocality, value) + } else if t.Equal(oidJurisdictionProvince) { + n.JurisdictionProvince = append(n.JurisdictionProvince, value) + } else if t.Equal(oidJurisdictionCountry) { + n.JurisdictionCountry = append(n.JurisdictionCountry, value) + } + } +} + +var ( + oidCountry = []int{2, 5, 4, 6} + oidOrganization = []int{2, 5, 4, 10} + oidOrganizationalUnit = []int{2, 5, 4, 11} + oidCommonName = []int{2, 5, 4, 3} + oidSurname = []int{2, 5, 4, 4} + oidSerialNumber = []int{2, 5, 4, 5} + oidLocality = []int{2, 5, 4, 7} + oidProvince = []int{2, 5, 4, 8} + oidStreetAddress = []int{2, 5, 4, 9} + oidPostalCode = []int{2, 5, 4, 17} + oidGivenName = []int{2, 5, 4, 42} + oidDomainComponent = []int{0, 9, 2342, 19200300, 100, 1, 25} + oidDNEmailAddress = []int{1, 2, 840, 113549, 1, 9, 1} + // EV + oidJurisdictionLocality = []int{1, 3, 6, 1, 4, 1, 311, 60, 2, 1, 1} + oidJurisdictionProvince = []int{1, 3, 6, 1, 4, 1, 311, 60, 2, 1, 2} + oidJurisdictionCountry = []int{1, 3, 6, 1, 4, 1, 311, 60, 2, 1, 3} +) + +// appendRDNs appends a relativeDistinguishedNameSET to the given RDNSequence +// and returns the new value. The relativeDistinguishedNameSET contains an +// attributeTypeAndValue for each of the given values. See RFC 5280, A.1, and +// search for AttributeTypeAndValue. +func (n Name) appendRDNs(in RDNSequence, values []string, oid asn1.ObjectIdentifier) RDNSequence { + // NOTE: stdlib prevents adding if the oid is already present in n.ExtraNames + //if len(values) == 0 || oidInAttributeTypeAndValue(oid, n.ExtraNames) { + if len(values) == 0 { + return in + } + + s := make([]AttributeTypeAndValue, len(values)) + for i, value := range values { + s[i].Type = oid + s[i].Value = value + } + + return append(in, s) +} + +// String returns an RDNSequence as comma seperated list of +// AttributeTypeAndValues in canonical form. +func (seq RDNSequence) String() string { + out := make([]string, 0, len(seq)) + // An RDNSequence is effectively an [][]AttributeTypeAndValue + for _, atvSet := range seq { + for _, atv := range atvSet { + // Convert each individual AttributeTypeAndValue to X=Y + attrParts := make([]string, 0, 2) + oidString := atv.Type.String() + oidName, ok := oidDotNotationToNames[oidString] + if ok { + attrParts = append(attrParts, oidName.ShortName) + } else { + attrParts = append(attrParts, oidString) + } + switch value := atv.Value.(type) { + case string: + attrParts = append(attrParts, value) + case []byte: + attrParts = append(attrParts, string(value)) + default: + continue + } + attrString := strings.Join(attrParts, "=") + out = append(out, attrString) + } + } + return strings.Join(out, ", ") +} + +// ToRDNSequence returns OriginalRDNS is populated. Otherwise, it builds an +// RDNSequence in canonical order. +func (n Name) ToRDNSequence() (ret RDNSequence) { + if n.OriginalRDNS != nil { + return n.OriginalRDNS + } + if len(n.CommonName) > 0 { + ret = n.appendRDNs(ret, []string{n.CommonName}, oidCommonName) + } + ret = n.appendRDNs(ret, n.OrganizationalUnit, oidOrganizationalUnit) + ret = n.appendRDNs(ret, n.Organization, oidOrganization) + ret = n.appendRDNs(ret, n.StreetAddress, oidStreetAddress) + ret = n.appendRDNs(ret, n.Locality, oidLocality) + ret = n.appendRDNs(ret, n.Province, oidProvince) + ret = n.appendRDNs(ret, n.PostalCode, oidPostalCode) + ret = n.appendRDNs(ret, n.Country, oidCountry) + ret = n.appendRDNs(ret, n.DomainComponent, oidDomainComponent) + // EV Components + ret = n.appendRDNs(ret, n.JurisdictionLocality, oidJurisdictionLocality) + ret = n.appendRDNs(ret, n.JurisdictionProvince, oidJurisdictionProvince) + ret = n.appendRDNs(ret, n.JurisdictionCountry, oidJurisdictionCountry) + if len(n.SerialNumber) > 0 { + ret = n.appendRDNs(ret, []string{n.SerialNumber}, oidSerialNumber) + } + ret = append(ret, n.ExtraNames) + return ret +} + +// oidInAttributeTypeAndValue returns whether a type with the given OID exists +// in atv. +func oidInAttributeTypeAndValue(oid asn1.ObjectIdentifier, atv []AttributeTypeAndValue) bool { + for _, a := range atv { + if a.Type.Equal(oid) { + return true + } + } + return false +} + +// CertificateList represents the ASN.1 structure of the same name. See RFC +// 5280, section 5.1. Use Certificate.CheckCRLSignature to verify the +// signature. +type CertificateList struct { + TBSCertList TBSCertificateList + SignatureAlgorithm AlgorithmIdentifier + SignatureValue asn1.BitString +} + +// HasExpired reports whether now is past the expiry time of certList. +func (certList *CertificateList) HasExpired(now time.Time) bool { + return now.After(certList.TBSCertList.NextUpdate) +} + +// String returns a canonical representation of a DistinguishedName +func (n *Name) String() string { + seq := n.ToRDNSequence() + return seq.String() +} + +// OtherName represents the ASN.1 structure of the same name. See RFC +// 5280, section 4.2.1.6. +type OtherName struct { + TypeID asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"explicit"` +} + +// EDIPartyName represents the ASN.1 structure of the same name. See RFC +// 5280, section 4.2.1.6. +type EDIPartyName struct { + NameAssigner string `asn1:"tag:0,optional,explicit" json:"name_assigner,omitempty"` + PartyName string `asn1:"tag:1,explicit" json:"party_name"` +} + +// TBSCertificateList represents the ASN.1 structure of the same name. See RFC +// 5280, section 5.1. +type TBSCertificateList struct { + Raw asn1.RawContent + Version int `asn1:"optional,default:0"` + Signature AlgorithmIdentifier + Issuer RDNSequence + ThisUpdate time.Time + NextUpdate time.Time `asn1:"optional"` + RevokedCertificates []RevokedCertificate `asn1:"optional"` + Extensions []Extension `asn1:"tag:0,optional,explicit"` +} + +// RevokedCertificate represents the ASN.1 structure of the same name. See RFC +// 5280, section 5.1. +type RevokedCertificate struct { + SerialNumber *big.Int + RevocationTime time.Time + Extensions []Extension `asn1:"optional"` +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/sec1.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/sec1.go new file mode 100644 index 00000000..33f376c0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/sec1.go @@ -0,0 +1,105 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "encoding/asn1" + "errors" + "fmt" + "math/big" +) + +const ecPrivKeyVersion = 1 + +// ecPrivateKey reflects an ASN.1 Elliptic Curve Private Key Structure. +// References: +// RFC 5915 +// SEC1 - http://www.secg.org/sec1-v2.pdf +// Per RFC 5915 the NamedCurveOID is marked as ASN.1 OPTIONAL, however in +// most cases it is not. +type ecPrivateKey struct { + Version int + PrivateKey []byte + NamedCurveOID asn1.ObjectIdentifier `asn1:"optional,explicit,tag:0"` + PublicKey asn1.BitString `asn1:"optional,explicit,tag:1"` +} + +// ParseECPrivateKey parses an ASN.1 Elliptic Curve Private Key Structure. +func ParseECPrivateKey(der []byte) (*ecdsa.PrivateKey, error) { + return parseECPrivateKey(nil, der) +} + +// MarshalECPrivateKey marshals an EC private key into ASN.1, DER format. +func MarshalECPrivateKey(key *ecdsa.PrivateKey) ([]byte, error) { + oid, ok := oidFromNamedCurve(key.Curve) + if !ok { + return nil, errors.New("x509: unknown elliptic curve") + } + + privateKeyBytes := key.D.Bytes() + paddedPrivateKey := make([]byte, (key.Curve.Params().N.BitLen()+7)/8) + copy(paddedPrivateKey[len(paddedPrivateKey)-len(privateKeyBytes):], privateKeyBytes) + + return asn1.Marshal(ecPrivateKey{ + Version: 1, + PrivateKey: paddedPrivateKey, + NamedCurveOID: oid, + PublicKey: asn1.BitString{Bytes: elliptic.Marshal(key.Curve, key.X, key.Y)}, + }) +} + +// parseECPrivateKey parses an ASN.1 Elliptic Curve Private Key Structure. +// The OID for the named curve may be provided from another source (such as +// the PKCS8 container) - if it is provided then use this instead of the OID +// that may exist in the EC private key structure. +func parseECPrivateKey(namedCurveOID *asn1.ObjectIdentifier, der []byte) (key *ecdsa.PrivateKey, err error) { + var privKey ecPrivateKey + if _, err := asn1.Unmarshal(der, &privKey); err != nil { + return nil, errors.New("x509: failed to parse EC private key: " + err.Error()) + } + if privKey.Version != ecPrivKeyVersion { + return nil, fmt.Errorf("x509: unknown EC private key version %d", privKey.Version) + } + + var curve elliptic.Curve + if namedCurveOID != nil { + curve = namedCurveFromOID(*namedCurveOID) + } else { + curve = namedCurveFromOID(privKey.NamedCurveOID) + } + if curve == nil { + return nil, errors.New("x509: unknown elliptic curve") + } + + k := new(big.Int).SetBytes(privKey.PrivateKey) + curveOrder := curve.Params().N + if k.Cmp(curveOrder) >= 0 { + return nil, errors.New("x509: invalid elliptic curve private key value") + } + priv := new(ecdsa.PrivateKey) + priv.Curve = curve + priv.D = k + + privateKey := make([]byte, (curveOrder.BitLen()+7)/8) + + // Some private keys have leading zero padding. This is invalid + // according to [SEC1], but this code will ignore it. + for len(privKey.PrivateKey) > len(privateKey) { + if privKey.PrivateKey[0] != 0 { + return nil, errors.New("x509: invalid private key length") + } + privKey.PrivateKey = privKey.PrivateKey[1:] + } + + // Some private keys remove all leading zeros, this is also invalid + // according to [SEC1] but since OpenSSL used to do this, we ignore + // this too. + copy(privateKey[len(privateKey)-len(privKey.PrivateKey):], privKey.PrivateKey) + priv.X, priv.Y = curve.ScalarBaseMult(privateKey) + + return priv, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/tor_service_descriptor.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/tor_service_descriptor.go new file mode 100644 index 00000000..366bd959 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/tor_service_descriptor.go @@ -0,0 +1,158 @@ +package x509 + +import ( + "encoding/asn1" + "github.com/zmap/zcrypto/x509/pkix" +) + +var ( + // oidBRTorServiceDescriptor is the assigned OID for the CAB Forum Tor Service + // Descriptor Hash extension (see EV Guidelines Appendix F) + oidBRTorServiceDescriptor = asn1.ObjectIdentifier{2, 23, 140, 1, 31} +) + +// TorServiceDescriptorHash is a structure corrsponding to the +// TorServiceDescriptorHash SEQUENCE described in Appendix F ("Issuance of +// Certificates for .onion Domain Names"). +// +// Each TorServiceDescriptorHash holds an onion URI (a utf8 string with the +// .onion address that was validated), a hash algorithm name (computed based on +// the pkix.AlgorithmIdentifier in the TorServiceDescriptorHash), the hash bytes +// (computed over the DER encoding of the ASN.1 SubjectPublicKey of the .onion +// service), and the number of bits in the hash bytes. +type TorServiceDescriptorHash struct { + Onion string `json:"onion"` + Algorithm pkix.AlgorithmIdentifier `json:"-"` + AlgorithmName string `json:"algorithm_name"` + Hash CertificateFingerprint `json:"hash"` + HashBits int `json:"hash_bits"` +} + +// parseTorServiceDescriptorSyntax parses the given pkix.Extension (assumed to +// have OID == oidBRTorServiceDescriptor) and returns a slice of parsed +// TorServiceDescriptorHash objects, or an error. An error will be returned if +// there are any structural errors related to the ASN.1 content (wrong tags, +// trailing data, missing fields, etc). +func parseTorServiceDescriptorSyntax(ext pkix.Extension) ([]*TorServiceDescriptorHash, error) { + // TorServiceDescriptorSyntax ::= + // SEQUENCE ( 1..MAX ) of TorServiceDescriptorHash + var seq asn1.RawValue + rest, err := asn1.Unmarshal(ext.Value, &seq) + if err != nil { + return nil, asn1.SyntaxError{ + Msg: "unable to unmarshal outer TorServiceDescriptor SEQUENCE", + } + } + if len(rest) != 0 { + return nil, asn1.SyntaxError{ + Msg: "trailing data after outer TorServiceDescriptor SEQUENCE", + } + } + if seq.Tag != asn1.TagSequence || seq.Class != asn1.ClassUniversal || !seq.IsCompound { + return nil, asn1.SyntaxError{ + Msg: "invalid outer TorServiceDescriptor SEQUENCE", + } + } + + var descriptors []*TorServiceDescriptorHash + rest = seq.Bytes + for len(rest) > 0 { + var descriptor *TorServiceDescriptorHash + descriptor, rest, err = parseTorServiceDescriptorHash(rest) + if err != nil { + return nil, err + } + descriptors = append(descriptors, descriptor) + } + return descriptors, nil +} + +// parseTorServiceDescriptorHash unmarshals a SEQUENCE from the provided data +// and parses a TorServiceDescriptorHash using the data contained in the +// sequence. The TorServiceDescriptorHash object and the remaining data are +// returned if no error occurs. +func parseTorServiceDescriptorHash(data []byte) (*TorServiceDescriptorHash, []byte, error) { + // TorServiceDescriptorHash:: = SEQUENCE { + // onionURI UTF8String + // algorithm AlgorithmIdentifier + // subjectPublicKeyHash BIT STRING + // } + var outerSeq asn1.RawValue + var err error + data, err = asn1.Unmarshal(data, &outerSeq) + if err != nil { + return nil, data, asn1.SyntaxError{ + Msg: "error unmarshaling TorServiceDescriptorHash SEQUENCE", + } + } + if outerSeq.Tag != asn1.TagSequence || + outerSeq.Class != asn1.ClassUniversal || + !outerSeq.IsCompound { + return nil, data, asn1.SyntaxError{ + Msg: "TorServiceDescriptorHash missing compound SEQUENCE tag", + } + } + fieldData := outerSeq.Bytes + + // Unmarshal and verify the structure of the onionURI UTF8String field. + var rawOnionURI asn1.RawValue + fieldData, err = asn1.Unmarshal(fieldData, &rawOnionURI) + if err != nil { + return nil, data, asn1.SyntaxError{ + Msg: "error unmarshaling TorServiceDescriptorHash onionURI", + } + } + if rawOnionURI.Tag != asn1.TagUTF8String || + rawOnionURI.Class != asn1.ClassUniversal || + rawOnionURI.IsCompound { + return nil, data, asn1.SyntaxError{ + Msg: "TorServiceDescriptorHash missing non-compound UTF8String tag", + } + } + + // Unmarshal and verify the structure of the algorithm UTF8String field. + var algorithm pkix.AlgorithmIdentifier + fieldData, err = asn1.Unmarshal(fieldData, &algorithm) + if err != nil { + return nil, nil, asn1.SyntaxError{ + Msg: "error unmarshaling TorServiceDescriptorHash algorithm", + } + } + + var algorithmName string + if algorithm.Algorithm.Equal(oidSHA256) { + algorithmName = "SHA256" + } else if algorithm.Algorithm.Equal(oidSHA384) { + algorithmName = "SHA384" + } else if algorithm.Algorithm.Equal(oidSHA512) { + algorithmName = "SHA512" + } else { + algorithmName = "Unknown" + } + + // Unmarshal and verify the structure of the Subject Public Key Hash BitString + // field. + var spkh asn1.BitString + fieldData, err = asn1.Unmarshal(fieldData, &spkh) + if err != nil { + return nil, data, asn1.SyntaxError{ + Msg: "error unmarshaling TorServiceDescriptorHash Hash", + } + } + + // There should be no trailing data after the TorServiceDescriptorHash + // SEQUENCE. + if len(fieldData) > 0 { + return nil, data, asn1.SyntaxError{ + Msg: "trailing data after TorServiceDescriptorHash", + } + } + + return &TorServiceDescriptorHash{ + Onion: string(rawOnionURI.Bytes), + Algorithm: algorithm, + AlgorithmName: algorithmName, + HashBits: spkh.BitLength, + Hash: CertificateFingerprint(spkh.Bytes), + }, data, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/validation.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/validation.go new file mode 100644 index 00000000..e582e54f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/validation.go @@ -0,0 +1,60 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import "time" + +// Validation stores different validation levels for a given certificate +type Validation struct { + BrowserTrusted bool `json:"browser_trusted"` + BrowserError string `json:"browser_error,omitempty"` + MatchesDomain bool `json:"matches_domain,omitempty"` + Domain string `json:"-"` +} + +// ValidateWithStupidDetail fills out a Validation struct given a leaf +// certificate and intermediates / roots. If opts.DNSName is set, then it will +// also check if the domain matches. +// +// Deprecated: Use verifier.Verify() instead. +func (c *Certificate) ValidateWithStupidDetail(opts VerifyOptions) (chains []CertificateChain, validation *Validation, err error) { + + // Manually set the time, so that all verifies we do get the same time + if opts.CurrentTime.IsZero() { + opts.CurrentTime = time.Now() + } + + // XXX: Don't pass a KeyUsage to the Verify API + opts.KeyUsages = nil + domain := opts.DNSName + opts.DNSName = "" + + out := new(Validation) + out.Domain = domain + + if chains, _, _, err = c.Verify(opts); err != nil { + out.BrowserError = err.Error() + } else { + out.BrowserTrusted = true + } + + if domain != "" { + nameErr := c.VerifyHostname(domain) + if nameErr != nil { + out.MatchesDomain = false + } else { + out.MatchesDomain = true + } + + // Make sure we return an error if either chain building or hostname + // verification fails. + if err == nil && nameErr != nil { + err = nameErr + } + } + validation = out + + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/verify.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/verify.go new file mode 100644 index 00000000..450f985c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/verify.go @@ -0,0 +1,635 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package x509 + +import ( + "errors" + "fmt" + "net" + "strings" + "time" + "unicode/utf8" +) + +type InvalidReason int + +const ( + // NotAuthorizedToSign results when a certificate is signed by another + // which isn't marked as a CA certificate. + NotAuthorizedToSign InvalidReason = iota + + // Expired results when a certificate has expired, based on the time + // given in the VerifyOptions. + Expired + + // CANotAuthorizedForThisName results when an intermediate or root + // certificate has a name constraint which doesn't include the name + // being checked. + CANotAuthorizedForThisName + + // CANotAuthorizedForThisEmail results when an intermediate or root + // certificate has a name constraint which doesn't include the email + // being checked. + CANotAuthorizedForThisEmail + + // CANotAuthorizedForThisIP results when an intermediate or root + // certificate has a name constraint which doesn't include the IP + // being checked. + CANotAuthorizedForThisIP + + // CANotAuthorizedForThisDirectory results when an intermediate or root + // certificate has a name constraint which doesn't include the directory + // being checked. + CANotAuthorizedForThisDirectory + + // TooManyIntermediates results when a path length constraint is + // violated. + TooManyIntermediates + + // IncompatibleUsage results when the certificate's key usage indicates + // that it may only be used for a different purpose. + IncompatibleUsage + + // NameMismatch results when the subject name of a parent certificate + // does not match the issuer name in the child. + NameMismatch + + // NeverValid results when the certificate could never have been valid due to + // some date-related issue, e.g. NotBefore > NotAfter. + NeverValid + + // IsSelfSigned results when the certificate is self-signed and not a trusted + // root. + IsSelfSigned +) + +// CertificateInvalidError results when an odd error occurs. Users of this +// library probably want to handle all these errors uniformly. +type CertificateInvalidError struct { + Cert *Certificate + Reason InvalidReason +} + +func (e CertificateInvalidError) Error() string { + switch e.Reason { + case NotAuthorizedToSign: + return "x509: certificate is not authorized to sign other certificates" + case Expired: + return "x509: certificate has expired or is not yet valid" + case CANotAuthorizedForThisName: + return "x509: a root or intermediate certificate is not authorized to sign in this domain" + case CANotAuthorizedForThisEmail: + return "x509: a root or intermediate certificate is not authorized to sign this email address" + case CANotAuthorizedForThisIP: + return "x509: a root or intermediate certificate is not authorized to sign this IP address" + case CANotAuthorizedForThisDirectory: + return "x509: a root or intermediate certificate is not authorized to sign in this directory" + case TooManyIntermediates: + return "x509: too many intermediates for path length constraint" + case IncompatibleUsage: + return "x509: certificate specifies an incompatible key usage" + case NameMismatch: + return "x509: issuer name does not match subject from issuing certificate" + case NeverValid: + return "x509: certificate will never be valid" + } + return "x509: unknown error" +} + +// HostnameError results when the set of authorized names doesn't match the +// requested name. +type HostnameError struct { + Certificate *Certificate + Host string +} + +func (h HostnameError) Error() string { + c := h.Certificate + + var valid string + if ip := net.ParseIP(h.Host); ip != nil { + // Trying to validate an IP + if len(c.IPAddresses) == 0 { + return "x509: cannot validate certificate for " + h.Host + " because it doesn't contain any IP SANs" + } + for _, san := range c.IPAddresses { + if len(valid) > 0 { + valid += ", " + } + valid += san.String() + } + } else { + if c.hasSANExtension() { + valid = strings.Join(c.DNSNames, ", ") + } else { + valid = c.Subject.CommonName + } + } + + if len(valid) == 0 { + return "x509: certificate is not valid for any names, but wanted to match " + h.Host + } + return "x509: certificate is valid for " + valid + ", not " + h.Host +} + +// UnknownAuthorityError results when the certificate issuer is unknown +type UnknownAuthorityError struct { + Cert *Certificate + // hintErr contains an error that may be helpful in determining why an + // authority wasn't found. + hintErr error + // hintCert contains a possible authority certificate that was rejected + // because of the error in hintErr. + hintCert *Certificate +} + +func (e UnknownAuthorityError) Error() string { + s := "x509: certificate signed by unknown authority" + if e.hintErr != nil { + certName := e.hintCert.Subject.CommonName + if len(certName) == 0 { + if len(e.hintCert.Subject.Organization) > 0 { + certName = e.hintCert.Subject.Organization[0] + } else { + certName = "serial:" + e.hintCert.SerialNumber.String() + } + } + s += fmt.Sprintf(" (possibly because of %q while trying to verify candidate authority certificate %q)", e.hintErr, certName) + } + return s +} + +// SystemRootsError results when we fail to load the system root certificates. +type SystemRootsError struct { + Err error +} + +func (se SystemRootsError) Error() string { + msg := "x509: failed to load system roots and no roots provided" + if se.Err != nil { + return msg + "; " + se.Err.Error() + } + return msg +} + +// errNotParsed is returned when a certificate without ASN.1 contents is +// verified. Platform-specific verification needs the ASN.1 contents. +var errNotParsed = errors.New("x509: missing ASN.1 contents; use ParseCertificate") + +const maxIntermediateCount = 10 + +// VerifyOptions contains parameters for Certificate.Verify. It's a structure +// because other PKIX verification APIs have ended up needing many options. +type VerifyOptions struct { + DNSName string + EmailAddress string + IPAddress net.IP + + Intermediates *CertPool + Roots *CertPool // if nil, the system roots are used + CurrentTime time.Time // if zero, the current time is used + // KeyUsage specifies which Extended Key Usage values are acceptable. + // An empty list means ExtKeyUsageServerAuth. Key usage is considered a + // constraint down the chain which mirrors Windows CryptoAPI behaviour, + // but not the spec. To accept any key usage, include ExtKeyUsageAny. + KeyUsages []ExtKeyUsage +} + +const ( + leafCertificate = iota + intermediateCertificate + rootCertificate +) + +func matchNameConstraint(domain, constraint string) bool { + // The meaning of zero length constraints is not specified, but this + // code follows NSS and accepts them as matching everything. + if len(constraint) == 0 { + return true + } + + if len(domain) < len(constraint) { + return false + } + + prefixLen := len(domain) - len(constraint) + if !strings.EqualFold(domain[prefixLen:], constraint) { + return false + } + + if prefixLen == 0 { + return true + } + + isSubdomain := domain[prefixLen-1] == '.' + constraintHasLeadingDot := constraint[0] == '.' + return isSubdomain != constraintHasLeadingDot +} + +// NOTE: the stdlib function does many more checks and is preferable. For backwards compatibility using this version + +// isValid performs validity checks on the c. It will never return a +// date-related error. +func (c *Certificate) isValid(certType CertificateType, currentChain CertificateChain) error { + + // KeyUsage status flags are ignored. From Engineering Security, Peter + // Gutmann: A European government CA marked its signing certificates as + // being valid for encryption only, but no-one noticed. Another + // European CA marked its signature keys as not being valid for + // signatures. A different CA marked its own trusted root certificate + // as being invalid for certificate signing. Another national CA + // distributed a certificate to be used to encrypt data for the + // country’s tax authority that was marked as only being usable for + // digital signatures but not for encryption. Yet another CA reversed + // the order of the bit flags in the keyUsage due to confusion over + // encoding endianness, essentially setting a random keyUsage in + // certificates that it issued. Another CA created a self-invalidating + // certificate by adding a certificate policy statement stipulating + // that the certificate had to be used strictly as specified in the + // keyUsage, and a keyUsage containing a flag indicating that the RSA + // encryption key could only be used for Diffie-Hellman key agreement. + + if certType == CertificateTypeIntermediate && (!c.BasicConstraintsValid || !c.IsCA) { + return CertificateInvalidError{c, NotAuthorizedToSign} + } + + if c.BasicConstraintsValid && c.MaxPathLen >= 0 { + numIntermediates := len(currentChain) - 1 + if numIntermediates > c.MaxPathLen { + return CertificateInvalidError{c, TooManyIntermediates} + } + } + + if len(currentChain) > maxIntermediateCount { + return CertificateInvalidError{c, TooManyIntermediates} + } + + return nil +} + +// Verify attempts to verify c by building one or more chains from c to a +// certificate in opts.Roots, using certificates in opts.Intermediates if +// needed. If successful, it returns one or more chains where the first +// element of the chain is c and the last element is from opts.Roots. +// +// If opts.Roots is nil and system roots are unavailable the returned error +// will be of type SystemRootsError. +// +// WARNING: this doesn't do any revocation checking. +func (c *Certificate) Verify(opts VerifyOptions) (current, expired, never []CertificateChain, err error) { + + if opts.Roots == nil { + err = SystemRootsError{} + return + } + + err = c.isValid(CertificateTypeLeaf, nil) + if err != nil { + return + } + + candidateChains, err := c.buildChains(make(map[int][]CertificateChain), []*Certificate{c}, &opts) + if err != nil { + return + } + + keyUsages := opts.KeyUsages + if len(keyUsages) == 0 { + keyUsages = []ExtKeyUsage{ExtKeyUsageServerAuth} + } + + // If any key usage is acceptable then we're done. + hasKeyUsageAny := false + for _, usage := range keyUsages { + if usage == ExtKeyUsageAny { + hasKeyUsageAny = true + break + } + } + + var chains []CertificateChain + if hasKeyUsageAny { + chains = candidateChains + } else { + for _, candidate := range candidateChains { + if checkChainForKeyUsage(candidate, keyUsages) { + chains = append(chains, candidate) + } + } + } + + if len(chains) == 0 { + err = CertificateInvalidError{c, IncompatibleUsage} + return + } + + current, expired, never = FilterByDate(chains, opts.CurrentTime) + if len(current) == 0 { + if len(expired) > 0 { + err = CertificateInvalidError{c, Expired} + } else if len(never) > 0 { + err = CertificateInvalidError{c, NeverValid} + } + return + } + + if len(opts.DNSName) > 0 { + err = c.VerifyHostname(opts.DNSName) + if err != nil { + return + } + } + return +} + +func appendToFreshChain(chain []*Certificate, cert *Certificate) []*Certificate { + n := make([]*Certificate, len(chain)+1) + copy(n, chain) + n[len(chain)] = cert + return n +} + +// buildChains returns all chains of length < maxIntermediateCount. Chains begin +// the certificate being validated (chain[0] = c), and end at a root. It +// enforces that all intermediates can sign certificates, and checks signatures. +// It does not enforce expiration. +func (c *Certificate) buildChains(cache map[int][]CertificateChain, currentChain CertificateChain, opts *VerifyOptions) (chains []CertificateChain, err error) { + + // If the certificate being validated is a root, add the chain of length one + // containing just the root. Only do this on the first call to buildChains, + // when the len(currentChain) = 1. + if len(currentChain) == 1 && opts.Roots.Contains(c) { + chains = append(chains, CertificateChain{c}) + } + + if len(chains) == 0 && c.SelfSigned { + err = CertificateInvalidError{c, IsSelfSigned} + } + + // Find roots that signed c and have matching SKID/AKID and Subject/Issuer. + possibleRoots, failedRoot, rootErr := opts.Roots.findVerifiedParents(c) + + // If any roots are parents of c, create new chain for each one of them. + for _, rootNum := range possibleRoots { + root := opts.Roots.certs[rootNum] + err = root.isValid(CertificateTypeRoot, currentChain) + if err != nil { + continue + } + if !currentChain.CertificateInChain(root) { + chains = append(chains, currentChain.AppendToFreshChain(root)) + } + } + + // The root chains of length N+1 are now "done". Now we'll look for any + // intermediates that issue this certificate, meaning that any chain to a root + // through these intermediates is at least length N+2. + possibleIntermediates, failedIntermediate, intermediateErr := opts.Intermediates.findVerifiedParents(c) + + for _, intermediateNum := range possibleIntermediates { + intermediate := opts.Intermediates.certs[intermediateNum] + if opts.Roots.Contains(intermediate) { + continue + } + if currentChain.CertificateSubjectAndKeyInChain(intermediate) { + continue + } + err = intermediate.isValid(CertificateTypeIntermediate, currentChain) + if err != nil { + continue + } + + // We don't want to add any certificate to chains that doesn't somehow get + // to a root. We don't know if all chains through the intermediates will end + // at a root, so we slice off the back half of the chain and try to build + // that part separately. + childChains, ok := cache[intermediateNum] + if !ok { + childChains, err = intermediate.buildChains(cache, currentChain.AppendToFreshChain(intermediate), opts) + cache[intermediateNum] = childChains + } + chains = append(chains, childChains...) + } + + if len(chains) > 0 { + err = nil + } + + if len(chains) == 0 && err == nil { + hintErr := rootErr + hintCert := failedRoot + if hintErr == nil { + hintErr = intermediateErr + hintCert = failedIntermediate + } + err = UnknownAuthorityError{c, hintErr, hintCert} + } + + return +} + +func matchHostnames(pattern, host string) bool { + host = strings.TrimSuffix(host, ".") + pattern = strings.TrimSuffix(pattern, ".") + + if len(pattern) == 0 || len(host) == 0 { + return false + } + + patternParts := strings.Split(pattern, ".") + hostParts := strings.Split(host, ".") + + if len(patternParts) != len(hostParts) { + return false + } + + for i, patternPart := range patternParts { + if /*i == 0 &&*/ patternPart == "*" { + continue + } + if patternPart != hostParts[i] { + return false + } + } + + return true +} + +// toLowerCaseASCII returns a lower-case version of in. See RFC 6125 6.4.1. We use +// an explicitly ASCII function to avoid any sharp corners resulting from +// performing Unicode operations on DNS labels. +func toLowerCaseASCII(in string) string { + // If the string is already lower-case then there's nothing to do. + isAlreadyLowerCase := true + for _, c := range in { + if c == utf8.RuneError { + // If we get a UTF-8 error then there might be + // upper-case ASCII bytes in the invalid sequence. + isAlreadyLowerCase = false + break + } + if 'A' <= c && c <= 'Z' { + isAlreadyLowerCase = false + break + } + } + + if isAlreadyLowerCase { + return in + } + + out := []byte(in) + for i, c := range out { + if 'A' <= c && c <= 'Z' { + out[i] += 'a' - 'A' + } + } + return string(out) +} + +// VerifyHostname returns nil if c is a valid certificate for the named host. +// Otherwise it returns an error describing the mismatch. +func (c *Certificate) VerifyHostname(h string) error { + // IP addresses may be written in [ ]. + candidateIP := h + if len(h) >= 3 && h[0] == '[' && h[len(h)-1] == ']' { + candidateIP = h[1 : len(h)-1] + } + if ip := net.ParseIP(candidateIP); ip != nil { + // We only match IP addresses against IP SANs. + // https://tools.ietf.org/html/rfc6125#appendix-B.2 + for _, candidate := range c.IPAddresses { + if ip.Equal(candidate) { + return nil + } + } + return HostnameError{c, candidateIP} + } + + lowered := toLowerCaseASCII(h) + + if c.hasSANExtension() { + for _, match := range c.DNSNames { + if matchHostnames(toLowerCaseASCII(match), lowered) { + return nil + } + } + // If Subject Alt Name is given, we ignore the common name. + } else if matchHostnames(toLowerCaseASCII(c.Subject.CommonName), lowered) { + return nil + } + + return HostnameError{c, h} +} + +func checkChainForKeyUsage(chain []*Certificate, keyUsages []ExtKeyUsage) bool { + usages := make([]ExtKeyUsage, len(keyUsages)) + copy(usages, keyUsages) + + if len(chain) == 0 { + return false + } + + usagesRemaining := len(usages) + + // We walk down the list and cross out any usages that aren't supported + // by each certificate. If we cross out all the usages, then the chain + // is unacceptable. + +NextCert: + for i := len(chain) - 1; i >= 0; i-- { + cert := chain[i] + if len(cert.ExtKeyUsage) == 0 && len(cert.UnknownExtKeyUsage) == 0 { + // The certificate doesn't have any extended key usage specified. + continue + } + + for _, usage := range cert.ExtKeyUsage { + if usage == ExtKeyUsageAny { + // The certificate is explicitly good for any usage. + continue NextCert + } + } + + const invalidUsage ExtKeyUsage = -1 + + NextRequestedUsage: + for i, requestedUsage := range usages { + if requestedUsage == invalidUsage { + continue + } + + for _, usage := range cert.ExtKeyUsage { + if requestedUsage == usage { + continue NextRequestedUsage + } else if requestedUsage == ExtKeyUsageServerAuth && + (usage == ExtKeyUsageNetscapeServerGatedCrypto || + usage == ExtKeyUsageMicrosoftServerGatedCrypto) { + // In order to support COMODO + // certificate chains, we have to + // accept Netscape or Microsoft SGC + // usages as equal to ServerAuth. + continue NextRequestedUsage + } + } + + usages[i] = invalidUsage + usagesRemaining-- + if usagesRemaining == 0 { + return false + } + } + } + + return true +} + +// earlier returns the earlier of a and b +func earlier(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +// later returns the later of a and b +func later(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} + +// check expirations divides chains into a set of disjoint chains, containing +// current chains valid now, expired chains that were valid at some point, and +// the set of chains that were never valid. +func FilterByDate(chains []CertificateChain, now time.Time) (current, expired, never []CertificateChain) { + for _, chain := range chains { + if len(chain) == 0 { + continue + } + leaf := chain[0] + lowerBound := leaf.NotBefore + upperBound := leaf.NotAfter + for _, c := range chain[1:] { + lowerBound = later(lowerBound, c.NotBefore) + upperBound = earlier(upperBound, c.NotAfter) + } + valid := lowerBound.Before(now) && upperBound.After(now) + wasValid := lowerBound.Before(upperBound) + if valid && !wasValid { + // Math/logic tells us this is impossible. + panic("valid && !wasValid should not be possible") + } + if valid { + current = append(current, chain) + } else if wasValid { + expired = append(expired, chain) + } else { + never = append(never, chain) + } + } + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/x509.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/x509.go new file mode 100644 index 00000000..a3b75c74 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zcrypto/x509/x509.go @@ -0,0 +1,2930 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package x509 parses X.509-encoded keys and certificates. +// +// Originally based on the go/crypto/x509 standard library, +// this package has now diverged enough that it is no longer +// updated with direct correspondence to new go releases. + +package x509 + +import ( + // all of the hash libraries need to be imported for side-effects, + // so that crypto.RegisterHash is called + _ "crypto/md5" + "crypto/sha256" + _ "crypto/sha512" + "io" + "strings" + + "bytes" + "crypto" + "crypto/dsa" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "encoding/asn1" + "encoding/pem" + "errors" + "fmt" + "math/big" + "net" + "strconv" + "time" + + "github.com/weppos/publicsuffix-go/publicsuffix" + "github.com/zmap/zcrypto/x509/ct" + "github.com/zmap/zcrypto/x509/pkix" +) + +// pkixPublicKey reflects a PKIX public key structure. See SubjectPublicKeyInfo +// in RFC 3280. +type pkixPublicKey struct { + Algo pkix.AlgorithmIdentifier + BitString asn1.BitString +} + +// ParsePKIXPublicKey parses a DER encoded public key. These values are +// typically found in PEM blocks with "BEGIN PUBLIC KEY". +// +// Supported key types include RSA, DSA, and ECDSA. Unknown key +// types result in an error. +// +// On success, pub will be of type *rsa.PublicKey, *dsa.PublicKey, +// or *ecdsa.PublicKey. +func ParsePKIXPublicKey(derBytes []byte) (pub interface{}, err error) { + var pki publicKeyInfo + if rest, err := asn1.Unmarshal(derBytes, &pki); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after ASN.1 of public-key") + } + algo := getPublicKeyAlgorithmFromOID(pki.Algorithm.Algorithm) + if algo == UnknownPublicKeyAlgorithm { + return nil, errors.New("x509: unknown public key algorithm") + } + return parsePublicKey(algo, &pki) +} + +func marshalPublicKey(pub interface{}) (publicKeyBytes []byte, publicKeyAlgorithm pkix.AlgorithmIdentifier, err error) { + switch pub := pub.(type) { + case *rsa.PublicKey: + publicKeyBytes, err = asn1.Marshal(pkcs1PublicKey{ + N: pub.N, + E: pub.E, + }) + if err != nil { + return nil, pkix.AlgorithmIdentifier{}, err + } + publicKeyAlgorithm.Algorithm = oidPublicKeyRSA + // This is a NULL parameters value which is required by + // https://tools.ietf.org/html/rfc3279#section-2.3.1. + publicKeyAlgorithm.Parameters = asn1.NullRawValue + case *ecdsa.PublicKey: + publicKeyBytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) + oid, ok := oidFromNamedCurve(pub.Curve) + if !ok { + return nil, pkix.AlgorithmIdentifier{}, errors.New("x509: unsupported elliptic curve") + } + publicKeyAlgorithm.Algorithm = oidPublicKeyECDSA + var paramBytes []byte + paramBytes, err = asn1.Marshal(oid) + if err != nil { + return + } + publicKeyAlgorithm.Parameters.FullBytes = paramBytes + case *AugmentedECDSA: + return marshalPublicKey(pub.Pub) + default: + return nil, pkix.AlgorithmIdentifier{}, errors.New("x509: only RSA and ECDSA public keys supported") + } + + return publicKeyBytes, publicKeyAlgorithm, nil +} + +// MarshalPKIXPublicKey serialises a public key to DER-encoded PKIX format. +func MarshalPKIXPublicKey(pub interface{}) ([]byte, error) { + var publicKeyBytes []byte + var publicKeyAlgorithm pkix.AlgorithmIdentifier + var err error + + if publicKeyBytes, publicKeyAlgorithm, err = marshalPublicKey(pub); err != nil { + return nil, err + } + + pkix := pkixPublicKey{ + Algo: publicKeyAlgorithm, + BitString: asn1.BitString{ + Bytes: publicKeyBytes, + BitLength: 8 * len(publicKeyBytes), + }, + } + + ret, _ := asn1.Marshal(pkix) + return ret, nil +} + +// These structures reflect the ASN.1 structure of X.509 certificates.: + +type certificate struct { + Raw asn1.RawContent + TBSCertificate tbsCertificate + SignatureAlgorithm pkix.AlgorithmIdentifier + SignatureValue asn1.BitString +} + +type tbsCertificate struct { + Raw asn1.RawContent + Version int `asn1:"optional,explicit,default:0,tag:0"` + SerialNumber *big.Int + SignatureAlgorithm pkix.AlgorithmIdentifier + Issuer asn1.RawValue + Validity validity + Subject asn1.RawValue + PublicKey publicKeyInfo + UniqueId asn1.BitString `asn1:"optional,tag:1"` + SubjectUniqueId asn1.BitString `asn1:"optional,tag:2"` + Extensions []pkix.Extension `asn1:"optional,explicit,tag:3"` +} + +type dsaAlgorithmParameters struct { + P, Q, G *big.Int +} + +type dsaSignature struct { + R, S *big.Int +} + +type ecdsaSignature dsaSignature + +type AugmentedECDSA struct { + Pub *ecdsa.PublicKey + Raw asn1.BitString +} + +type validity struct { + NotBefore, NotAfter time.Time +} + +type publicKeyInfo struct { + Raw asn1.RawContent + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString +} + +// RFC 5280, 4.2.1.1 +type authKeyId struct { + Id []byte `asn1:"optional,tag:0"` +} + +type SignatureAlgorithmOID asn1.ObjectIdentifier + +type SignatureAlgorithm int + +const ( + UnknownSignatureAlgorithm SignatureAlgorithm = iota + MD2WithRSA + MD5WithRSA + SHA1WithRSA + SHA256WithRSA + SHA384WithRSA + SHA512WithRSA + DSAWithSHA1 + DSAWithSHA256 + ECDSAWithSHA1 + ECDSAWithSHA256 + ECDSAWithSHA384 + ECDSAWithSHA512 + SHA256WithRSAPSS + SHA384WithRSAPSS + SHA512WithRSAPSS +) + +func (algo SignatureAlgorithm) isRSAPSS() bool { + switch algo { + case SHA256WithRSAPSS, SHA384WithRSAPSS, SHA512WithRSAPSS: + return true + default: + return false + } +} + +var algoName = [...]string{ + MD2WithRSA: "MD2-RSA", + MD5WithRSA: "MD5-RSA", + SHA1WithRSA: "SHA1-RSA", + SHA256WithRSA: "SHA256-RSA", + SHA384WithRSA: "SHA384-RSA", + SHA512WithRSA: "SHA512-RSA", + SHA256WithRSAPSS: "SHA256-RSAPSS", + SHA384WithRSAPSS: "SHA384-RSAPSS", + SHA512WithRSAPSS: "SHA512-RSAPSS", + DSAWithSHA1: "DSA-SHA1", + DSAWithSHA256: "DSA-SHA256", + ECDSAWithSHA1: "ECDSA-SHA1", + ECDSAWithSHA256: "ECDSA-SHA256", + ECDSAWithSHA384: "ECDSA-SHA384", + ECDSAWithSHA512: "ECDSA-SHA512", +} + +func (algo SignatureAlgorithm) String() string { + if 0 < algo && int(algo) < len(algoName) { + return algoName[algo] + } + return strconv.Itoa(int(algo)) +} + +var keyAlgorithmNames = []string{ + "unknown_algorithm", + "RSA", + "DSA", + "ECDSA", +} + +type PublicKeyAlgorithm int + +const ( + UnknownPublicKeyAlgorithm PublicKeyAlgorithm = iota + RSA + DSA + ECDSA + total_key_algorithms +) + +// OIDs for signature algorithms +// +// pkcs-1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 1 } +// +// +// RFC 3279 2.2.1 RSA Signature Algorithms +// +// md2WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 2 } +// +// md5WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 4 } +// +// sha-1WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 5 } +// +// dsaWithSha1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) x9-57(10040) x9cm(4) 3 } +// +// RFC 3279 2.2.3 ECDSA Signature Algorithm +// +// ecdsa-with-SHA1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) ansi-x962(10045) +// signatures(4) ecdsa-with-SHA1(1)} +// +// +// RFC 4055 5 PKCS #1 Version 1.5 +// +// sha256WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 11 } +// +// sha384WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 12 } +// +// sha512WithRSAEncryption OBJECT IDENTIFIER ::= { pkcs-1 13 } +// +// +// RFC 5758 3.1 DSA Signature Algorithms +// +// dsaWithSha256 OBJECT IDENTIFIER ::= { +// joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101) +// csor(3) algorithms(4) id-dsa-with-sha2(3) 2} +// +// RFC 5758 3.2 ECDSA Signature Algorithm +// +// ecdsa-with-SHA256 OBJECT IDENTIFIER ::= { iso(1) member-body(2) +// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 2 } +// +// ecdsa-with-SHA384 OBJECT IDENTIFIER ::= { iso(1) member-body(2) +// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 3 } +// +// ecdsa-with-SHA512 OBJECT IDENTIFIER ::= { iso(1) member-body(2) +// us(840) ansi-X9-62(10045) signatures(4) ecdsa-with-SHA2(3) 4 } + +var ( + oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2} + oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} + oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} + oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} + oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} + oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} + oidSignatureRSAPSS = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 10} + oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} + oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2} + oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} + oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} + oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} + oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} + + oidSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1} + oidSHA384 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2} + oidSHA512 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3} + + oidMGF1 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 8} + + // oidISOSignatureSHA1WithRSA means the same as oidSignatureSHA1WithRSA + // but it's specified by ISO. Microsoft's makecert.exe has been known + // to produce certificates with this OID. + oidISOSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 3, 14, 3, 2, 29} +) + +var signatureAlgorithmDetails = []struct { + algo SignatureAlgorithm + oid asn1.ObjectIdentifier + pubKeyAlgo PublicKeyAlgorithm + hash crypto.Hash +}{ + {MD2WithRSA, oidSignatureMD2WithRSA, RSA, crypto.Hash(0) /* no value for MD2 */}, + {MD5WithRSA, oidSignatureMD5WithRSA, RSA, crypto.MD5}, + {SHA1WithRSA, oidSignatureSHA1WithRSA, RSA, crypto.SHA1}, + {SHA1WithRSA, oidISOSignatureSHA1WithRSA, RSA, crypto.SHA1}, + {SHA256WithRSA, oidSignatureSHA256WithRSA, RSA, crypto.SHA256}, + {SHA384WithRSA, oidSignatureSHA384WithRSA, RSA, crypto.SHA384}, + {SHA512WithRSA, oidSignatureSHA512WithRSA, RSA, crypto.SHA512}, + {SHA256WithRSAPSS, oidSignatureRSAPSS, RSA, crypto.SHA256}, + {SHA384WithRSAPSS, oidSignatureRSAPSS, RSA, crypto.SHA384}, + {SHA512WithRSAPSS, oidSignatureRSAPSS, RSA, crypto.SHA512}, + {DSAWithSHA1, oidSignatureDSAWithSHA1, DSA, crypto.SHA1}, + {DSAWithSHA256, oidSignatureDSAWithSHA256, DSA, crypto.SHA256}, + {ECDSAWithSHA1, oidSignatureECDSAWithSHA1, ECDSA, crypto.SHA1}, + {ECDSAWithSHA256, oidSignatureECDSAWithSHA256, ECDSA, crypto.SHA256}, + {ECDSAWithSHA384, oidSignatureECDSAWithSHA384, ECDSA, crypto.SHA384}, + {ECDSAWithSHA512, oidSignatureECDSAWithSHA512, ECDSA, crypto.SHA512}, +} + +// pssParameters reflects the parameters in an AlgorithmIdentifier that +// specifies RSA PSS. See https://tools.ietf.org/html/rfc3447#appendix-A.2.3 +type pssParameters struct { + // The following three fields are not marked as + // optional because the default values specify SHA-1, + // which is no longer suitable for use in signatures. + Hash pkix.AlgorithmIdentifier `asn1:"explicit,tag:0"` + MGF pkix.AlgorithmIdentifier `asn1:"explicit,tag:1"` + SaltLength int `asn1:"explicit,tag:2"` + TrailerField int `asn1:"optional,explicit,tag:3,default:1"` +} + +// rsaPSSParameters returns an asn1.RawValue suitable for use as the Parameters +// in an AlgorithmIdentifier that specifies RSA PSS. +func rsaPSSParameters(hashFunc crypto.Hash) asn1.RawValue { + var hashOID asn1.ObjectIdentifier + + switch hashFunc { + case crypto.SHA256: + hashOID = oidSHA256 + case crypto.SHA384: + hashOID = oidSHA384 + case crypto.SHA512: + hashOID = oidSHA512 + } + + params := pssParameters{ + Hash: pkix.AlgorithmIdentifier{ + Algorithm: hashOID, + Parameters: asn1.NullRawValue, + }, + MGF: pkix.AlgorithmIdentifier{ + Algorithm: oidMGF1, + }, + SaltLength: hashFunc.Size(), + TrailerField: 1, + } + + mgf1Params := pkix.AlgorithmIdentifier{ + Algorithm: hashOID, + Parameters: asn1.NullRawValue, + } + + var err error + params.MGF.Parameters.FullBytes, err = asn1.Marshal(mgf1Params) + if err != nil { + panic(err) + } + + serialized, err := asn1.Marshal(params) + if err != nil { + panic(err) + } + + return asn1.RawValue{FullBytes: serialized} +} + +// GetSignatureAlgorithmFromAI converts asn1 AlgorithmIdentifier to SignatureAlgorithm int +func GetSignatureAlgorithmFromAI(ai pkix.AlgorithmIdentifier) SignatureAlgorithm { + if !ai.Algorithm.Equal(oidSignatureRSAPSS) { + for _, details := range signatureAlgorithmDetails { + if ai.Algorithm.Equal(details.oid) { + return details.algo + } + } + return UnknownSignatureAlgorithm + } + + // RSA PSS is special because it encodes important parameters + // in the Parameters. + + var params pssParameters + if _, err := asn1.Unmarshal(ai.Parameters.FullBytes, ¶ms); err != nil { + return UnknownSignatureAlgorithm + } + + var mgf1HashFunc pkix.AlgorithmIdentifier + if _, err := asn1.Unmarshal(params.MGF.Parameters.FullBytes, &mgf1HashFunc); err != nil { + return UnknownSignatureAlgorithm + } + + // PSS is greatly overburdened with options. This code forces + // them into three buckets by requiring that the MGF1 hash + // function always match the message hash function (as + // recommended in + // https://tools.ietf.org/html/rfc3447#section-8.1), that the + // salt length matches the hash length, and that the trailer + // field has the default value. + if !bytes.Equal(params.Hash.Parameters.FullBytes, asn1.NullBytes) || + !params.MGF.Algorithm.Equal(oidMGF1) || + !mgf1HashFunc.Algorithm.Equal(params.Hash.Algorithm) || + !bytes.Equal(mgf1HashFunc.Parameters.FullBytes, asn1.NullBytes) || + params.TrailerField != 1 { + return UnknownSignatureAlgorithm + } + + switch { + case params.Hash.Algorithm.Equal(oidSHA256) && params.SaltLength == 32: + return SHA256WithRSAPSS + case params.Hash.Algorithm.Equal(oidSHA384) && params.SaltLength == 48: + return SHA384WithRSAPSS + case params.Hash.Algorithm.Equal(oidSHA512) && params.SaltLength == 64: + return SHA512WithRSAPSS + } + + return UnknownSignatureAlgorithm +} + +// RFC 3279, 2.3 Public Key Algorithms +// +// pkcs-1 OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840) +// rsadsi(113549) pkcs(1) 1 } +// +// rsaEncryption OBJECT IDENTIFIER ::== { pkcs1-1 1 } +// +// id-dsa OBJECT IDENTIFIER ::== { iso(1) member-body(2) us(840) +// x9-57(10040) x9cm(4) 1 } +// +// RFC 5480, 2.1.1 Unrestricted Algorithm Identifier and Parameters +// +// id-ecPublicKey OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) ansi-X9-62(10045) keyType(2) 1 } +var ( + oidPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} + oidPublicKeyDSA = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 1} + oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1} +) + +func getPublicKeyAlgorithmFromOID(oid asn1.ObjectIdentifier) PublicKeyAlgorithm { + switch { + case oid.Equal(oidPublicKeyRSA): + return RSA + case oid.Equal(oidPublicKeyDSA): + return DSA + case oid.Equal(oidPublicKeyECDSA): + return ECDSA + } + return UnknownPublicKeyAlgorithm +} + +// RFC 5480, 2.1.1.1. Named Curve +// +// secp224r1 OBJECT IDENTIFIER ::= { +// iso(1) identified-organization(3) certicom(132) curve(0) 33 } +// +// secp256r1 OBJECT IDENTIFIER ::= { +// iso(1) member-body(2) us(840) ansi-X9-62(10045) curves(3) +// prime(1) 7 } +// +// secp384r1 OBJECT IDENTIFIER ::= { +// iso(1) identified-organization(3) certicom(132) curve(0) 34 } +// +// secp521r1 OBJECT IDENTIFIER ::= { +// iso(1) identified-organization(3) certicom(132) curve(0) 35 } +// +// NB: secp256r1 is equivalent to prime256v1 +var ( + oidNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33} + oidNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7} + oidNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34} + oidNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35} +) + +func namedCurveFromOID(oid asn1.ObjectIdentifier) elliptic.Curve { + switch { + case oid.Equal(oidNamedCurveP224): + return elliptic.P224() + case oid.Equal(oidNamedCurveP256): + return elliptic.P256() + case oid.Equal(oidNamedCurveP384): + return elliptic.P384() + case oid.Equal(oidNamedCurveP521): + return elliptic.P521() + } + return nil +} + +func oidFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) { + switch curve { + case elliptic.P224(): + return oidNamedCurveP224, true + case elliptic.P256(): + return oidNamedCurveP256, true + case elliptic.P384(): + return oidNamedCurveP384, true + case elliptic.P521(): + return oidNamedCurveP521, true + } + + return nil, false +} + +// KeyUsage represents the set of actions that are valid for a given key. It's +// a bitmap of the KeyUsage* constants. +type KeyUsage int + +const ( + KeyUsageDigitalSignature KeyUsage = 1 << iota + KeyUsageContentCommitment + KeyUsageKeyEncipherment + KeyUsageDataEncipherment + KeyUsageKeyAgreement + KeyUsageCertSign + KeyUsageCRLSign + KeyUsageEncipherOnly + KeyUsageDecipherOnly +) + +// RFC 5280, 4.2.1.12 Extended Key Usage +// +// anyExtendedKeyUsage OBJECT IDENTIFIER ::= { id-ce-extKeyUsage 0 } +// +// id-kp OBJECT IDENTIFIER ::= { id-pkix 3 } +// +// id-kp-serverAuth OBJECT IDENTIFIER ::= { id-kp 1 } +// id-kp-clientAuth OBJECT IDENTIFIER ::= { id-kp 2 } +// id-kp-codeSigning OBJECT IDENTIFIER ::= { id-kp 3 } +// id-kp-emailProtection OBJECT IDENTIFIER ::= { id-kp 4 } +// id-kp-timeStamping OBJECT IDENTIFIER ::= { id-kp 8 } +// id-kp-OCSPSigning OBJECT IDENTIFIER ::= { id-kp 9 } +//var ( +// oidExtKeyUsageAny = asn1.ObjectIdentifier{2, 5, 29, 37, 0} +// oidExtKeyUsageServerAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 1} +// oidExtKeyUsageClientAuth = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 2} +// oidExtKeyUsageCodeSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 3} +// oidExtKeyUsageEmailProtection = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 4} +// oidExtKeyUsageIPSECEndSystem = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 5} +// oidExtKeyUsageIPSECTunnel = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 6} +// oidExtKeyUsageIPSECUser = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 7} +// oidExtKeyUsageTimeStamping = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 8} +// oidExtKeyUsageOCSPSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 3, 9} +// oidExtKeyUsageMicrosoftServerGatedCrypto = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 10, 3, 3} +// oidExtKeyUsageNetscapeServerGatedCrypto = asn1.ObjectIdentifier{2, 16, 840, 1, 113730, 4, 1} +//) + +// ExtKeyUsage represents an extended set of actions that are valid for a given key. +// Each of the ExtKeyUsage* constants define a unique action. +type ExtKeyUsage int + +// TODO: slight differences in case in some names. Should be easy to align with stdlib. +// leaving for now to not break compatibility + +// extKeyUsageOIDs contains the mapping between an ExtKeyUsage and its OID. +var extKeyUsageOIDs = []struct { + extKeyUsage ExtKeyUsage + oid asn1.ObjectIdentifier +}{ + {ExtKeyUsageAny, oidExtKeyUsageAny}, + {ExtKeyUsageServerAuth, oidExtKeyUsageServerAuth}, + {ExtKeyUsageClientAuth, oidExtKeyUsageClientAuth}, + {ExtKeyUsageCodeSigning, oidExtKeyUsageCodeSigning}, + {ExtKeyUsageEmailProtection, oidExtKeyUsageEmailProtection}, + //{ExtKeyUsageIPSECEndSystem, oidExtKeyUsageIPSECEndSystem}, + {ExtKeyUsageIpsecUser, oidExtKeyUsageIpsecEndSystem}, + //{ExtKeyUsageIPSECTunnel, oidExtKeyUsageIPSECTunnel}, + {ExtKeyUsageIpsecTunnel, oidExtKeyUsageIpsecTunnel}, + //{ExtKeyUsageIPSECUser, oidExtKeyUsageIPSECUser}, + {ExtKeyUsageIpsecUser, oidExtKeyUsageIpsecUser}, + {ExtKeyUsageTimeStamping, oidExtKeyUsageTimeStamping}, + //{ExtKeyUsageOCSPSigning, oidExtKeyUsageOCSPSigning}, + {ExtKeyUsageOcspSigning, oidExtKeyUsageOcspSigning}, + {ExtKeyUsageMicrosoftServerGatedCrypto, oidExtKeyUsageMicrosoftServerGatedCrypto}, + {ExtKeyUsageNetscapeServerGatedCrypto, oidExtKeyUsageNetscapeServerGatedCrypto}, +} + +// TODO: slight differences in case in some names. Should be easy to align with stdlib. +// leaving for now to not break compatibility + +// extKeyUsageOIDs contains the mapping between an ExtKeyUsage and its OID. +var nativeExtKeyUsageOIDs = []struct { + extKeyUsage ExtKeyUsage + oid asn1.ObjectIdentifier +}{ + {ExtKeyUsageAny, oidExtKeyUsageAny}, + {ExtKeyUsageServerAuth, oidExtKeyUsageServerAuth}, + {ExtKeyUsageClientAuth, oidExtKeyUsageClientAuth}, + {ExtKeyUsageCodeSigning, oidExtKeyUsageCodeSigning}, + {ExtKeyUsageEmailProtection, oidExtKeyUsageEmailProtection}, + {ExtKeyUsageIpsecEndSystem, oidExtKeyUsageIpsecEndSystem}, + {ExtKeyUsageIpsecTunnel, oidExtKeyUsageIpsecTunnel}, + {ExtKeyUsageIpsecUser, oidExtKeyUsageIpsecUser}, + {ExtKeyUsageTimeStamping, oidExtKeyUsageTimeStamping}, + {ExtKeyUsageOcspSigning, oidExtKeyUsageOcspSigning}, + {ExtKeyUsageMicrosoftServerGatedCrypto, oidExtKeyUsageMicrosoftServerGatedCrypto}, + {ExtKeyUsageNetscapeServerGatedCrypto, oidExtKeyUsageNetscapeServerGatedCrypto}, +} + +func extKeyUsageFromOID(oid asn1.ObjectIdentifier) (eku ExtKeyUsage, ok bool) { + s := oid.String() + eku, ok = ekuConstants[s] + return +} + +func oidFromExtKeyUsage(eku ExtKeyUsage) (oid asn1.ObjectIdentifier, ok bool) { + for _, pair := range nativeExtKeyUsageOIDs { + if eku == pair.extKeyUsage { + return pair.oid, true + } + } + return +} + +// A Certificate represents an X.509 certificate. +type Certificate struct { + Raw []byte // Complete ASN.1 DER content (certificate, signature algorithm and signature). + RawTBSCertificate []byte // Certificate part of raw ASN.1 DER content. + RawSubjectPublicKeyInfo []byte // DER encoded SubjectPublicKeyInfo. + RawSubject []byte // DER encoded Subject + RawIssuer []byte // DER encoded Issuer + + Signature []byte + SignatureAlgorithm SignatureAlgorithm + + SelfSigned bool + + SignatureAlgorithmOID asn1.ObjectIdentifier + + PublicKeyAlgorithm PublicKeyAlgorithm + PublicKey interface{} + + PublicKeyAlgorithmOID asn1.ObjectIdentifier + + Version int + SerialNumber *big.Int + Issuer pkix.Name + Subject pkix.Name + NotBefore, NotAfter time.Time // Validity bounds. + ValidityPeriod int + KeyUsage KeyUsage + + IssuerUniqueId asn1.BitString + SubjectUniqueId asn1.BitString + + // Extensions contains raw X.509 extensions. When parsing certificates, + // this can be used to extract non-critical extensions that are not + // parsed by this package. When marshaling certificates, the Extensions + // field is ignored, see ExtraExtensions. + Extensions []pkix.Extension + + // ExtensionsMap contains raw x.509 extensions keyed by OID (in string + // representation). It allows fast membership testing of specific OIDs. Like + // the Extensions field this field is ignored when marshaling certificates. If + // multiple extensions with the same OID are present only the last + // pkix.Extension will be in this map. Consult the `Extensions` slice when it + // is required to process all extensions including duplicates. + ExtensionsMap map[string]pkix.Extension + + // ExtraExtensions contains extensions to be copied, raw, into any + // marshaled certificates. Values override any extensions that would + // otherwise be produced based on the other fields. The ExtraExtensions + // field is not populated when parsing certificates, see Extensions. + ExtraExtensions []pkix.Extension + + // UnhandledCriticalExtensions contains a list of extension IDs that + // were not (fully) processed when parsing. Verify will fail if this + // slice is non-empty, unless verification is delegated to an OS + // library which understands all the critical extensions. + // + // Users can access these extensions using Extensions and can remove + // elements from this slice if they believe that they have been + // handled. + UnhandledCriticalExtensions []asn1.ObjectIdentifier + + ExtKeyUsage []ExtKeyUsage // Sequence of extended key usages. + UnknownExtKeyUsage []asn1.ObjectIdentifier // Encountered extended key usages unknown to this package. + + BasicConstraintsValid bool // if true then the next two fields are valid. + IsCA bool + + // MaxPathLen and MaxPathLenZero indicate the presence and + // value of the BasicConstraints' "pathLenConstraint". + // + // When parsing a certificate, a positive non-zero MaxPathLen + // means that the field was specified, -1 means it was unset, + // and MaxPathLenZero being true mean that the field was + // explicitly set to zero. The case of MaxPathLen==0 with MaxPathLenZero==false + // should be treated equivalent to -1 (unset). + // + // When generating a certificate, an unset pathLenConstraint + // can be requested with either MaxPathLen == -1 or using the + // zero value for both MaxPathLen and MaxPathLenZero. + MaxPathLen int + // MaxPathLenZero indicates that BasicConstraintsValid==true and + // MaxPathLen==0 should be interpreted as an actual Max path length + // of zero. Otherwise, that combination is interpreted as MaxPathLen + // not being set. + MaxPathLenZero bool + + SubjectKeyId []byte + AuthorityKeyId []byte + + // RFC 5280, 4.2.2.1 (Authority Information Access) + OCSPServer []string + IssuingCertificateURL []string + + // Subject Alternate Name values + OtherNames []pkix.OtherName + DNSNames []string + EmailAddresses []string + DirectoryNames []pkix.Name + EDIPartyNames []pkix.EDIPartyName + URIs []string + IPAddresses []net.IP + RegisteredIDs []asn1.ObjectIdentifier + + // Issuer Alternative Name values + IANOtherNames []pkix.OtherName + IANDNSNames []string + IANEmailAddresses []string + IANDirectoryNames []pkix.Name + IANEDIPartyNames []pkix.EDIPartyName + IANURIs []string + IANIPAddresses []net.IP + IANRegisteredIDs []asn1.ObjectIdentifier + + // Certificate Policies values + QualifierId [][]asn1.ObjectIdentifier + CPSuri [][]string + ExplicitTexts [][]asn1.RawValue + NoticeRefOrgnization [][]asn1.RawValue + NoticeRefNumbers [][]NoticeNumber + + ParsedExplicitTexts [][]string + ParsedNoticeRefOrganization [][]string + + // Name constraints + NameConstraintsCritical bool // if true then the name constraints are marked critical. + PermittedDNSNames []GeneralSubtreeString + ExcludedDNSNames []GeneralSubtreeString + PermittedEmailAddresses []GeneralSubtreeString + ExcludedEmailAddresses []GeneralSubtreeString + PermittedIPAddresses []GeneralSubtreeIP + ExcludedIPAddresses []GeneralSubtreeIP + PermittedDirectoryNames []GeneralSubtreeName + ExcludedDirectoryNames []GeneralSubtreeName + PermittedEdiPartyNames []GeneralSubtreeEdi + ExcludedEdiPartyNames []GeneralSubtreeEdi + PermittedRegisteredIDs []GeneralSubtreeOid + ExcludedRegisteredIDs []GeneralSubtreeOid + PermittedX400Addresses []GeneralSubtreeRaw + ExcludedX400Addresses []GeneralSubtreeRaw + + // CRL Distribution Points + CRLDistributionPoints []string + + PolicyIdentifiers []asn1.ObjectIdentifier + ValidationLevel CertValidationLevel + + // Fingerprints + FingerprintMD5 CertificateFingerprint + FingerprintSHA1 CertificateFingerprint + FingerprintSHA256 CertificateFingerprint + FingerprintNoCT CertificateFingerprint + + // SPKI + SPKIFingerprint CertificateFingerprint + SPKISubjectFingerprint CertificateFingerprint + TBSCertificateFingerprint CertificateFingerprint + + IsPrecert bool + + // Internal + validSignature bool + + // CT + SignedCertificateTimestampList []*ct.SignedCertificateTimestamp + + // Used to speed up the zlint checks. Populated by the GetParsedDNSNames method. + parsedDNSNames []ParsedDomainName + // Used to speed up the zlint checks. Populated by the GetParsedCommonName method + parsedCommonName *ParsedDomainName + + // CAB Forum Tor Service Descriptor Hash Extensions (see EV Guidelines + // Appendix F) + TorServiceDescriptors []*TorServiceDescriptorHash +} + +// ParsedDomainName is a structure holding a parsed domain name (CommonName or +// DNS SAN) and a parsing error. +type ParsedDomainName struct { + DomainString string + ParsedDomain *publicsuffix.DomainName + ParseError error +} + +// GetParsedDNSNames returns a list of parsed SAN DNS names. It is used to cache the parsing result and +// speed up zlint linters. If invalidateCache is true, then the cache is repopulated with current list of string from +// Certificate.DNSNames. This parameter should always be false, unless the Certificate.DNSNames have been modified +// after calling GetParsedDNSNames the previous time. +func (c *Certificate) GetParsedDNSNames(invalidateCache bool) []ParsedDomainName { + if c.parsedDNSNames != nil && !invalidateCache { + return c.parsedDNSNames + } + c.parsedDNSNames = make([]ParsedDomainName, len(c.DNSNames)) + + for i := range c.DNSNames { + var parsedDomain, parseError = publicsuffix.ParseFromListWithOptions(publicsuffix.DefaultList, + c.DNSNames[i], + &publicsuffix.FindOptions{IgnorePrivate: true, DefaultRule: publicsuffix.DefaultRule}) + + c.parsedDNSNames[i].DomainString = c.DNSNames[i] + c.parsedDNSNames[i].ParsedDomain = parsedDomain + c.parsedDNSNames[i].ParseError = parseError + } + + return c.parsedDNSNames +} + +// GetParsedCommonName returns parsed subject CommonName. It is used to cache the parsing result and +// speed up zlint linters. If invalidateCache is true, then the cache is repopulated with current subject CommonName. +// This parameter should always be false, unless the Certificate.Subject.CommonName have been modified +// after calling GetParsedSubjectCommonName the previous time. +func (c *Certificate) GetParsedSubjectCommonName(invalidateCache bool) ParsedDomainName { + if c.parsedCommonName != nil && !invalidateCache { + return *c.parsedCommonName + } + + var parsedDomain, parseError = publicsuffix.ParseFromListWithOptions(publicsuffix.DefaultList, + c.Subject.CommonName, + &publicsuffix.FindOptions{IgnorePrivate: true, DefaultRule: publicsuffix.DefaultRule}) + + c.parsedCommonName = &ParsedDomainName{ + DomainString: c.Subject.CommonName, + ParsedDomain: parsedDomain, + ParseError: parseError, + } + + return *c.parsedCommonName +} + +// ErrUnsupportedAlgorithm results from attempting to perform an operation that +// involves algorithms that are not currently implemented. +var ErrUnsupportedAlgorithm = errors.New("x509: cannot verify signature: algorithm unimplemented") + +// An InsecureAlgorithmError +type InsecureAlgorithmError SignatureAlgorithm + +func (e InsecureAlgorithmError) Error() string { + return fmt.Sprintf("x509: cannot verify signature: insecure algorithm %v", SignatureAlgorithm(e)) +} + +// ConstraintViolationError results when a requested usage is not permitted by +// a certificate. For example: checking a signature when the public key isn't a +// certificate signing key. +type ConstraintViolationError struct{} + +func (ConstraintViolationError) Error() string { + return "x509: invalid signature: parent certificate cannot sign this kind of certificate" +} + +func (c *Certificate) Equal(other *Certificate) bool { + return bytes.Equal(c.Raw, other.Raw) +} + +func (c *Certificate) hasSANExtension() bool { + return oidInExtensions(oidExtensionSubjectAltName, c.Extensions) +} + +// Entrust have a broken root certificate (CN=Entrust.net Certification +// Authority (2048)) which isn't marked as a CA certificate and is thus invalid +// according to PKIX. +// We recognise this certificate by its SubjectPublicKeyInfo and exempt it +// from the Basic Constraints requirement. +// See http://www.entrust.net/knowledge-base/technote.cfm?tn=7869 +// +// TODO(agl): remove this hack once their reissued root is sufficiently +// widespread. +var entrustBrokenSPKI = []byte{ + 0x30, 0x82, 0x01, 0x22, 0x30, 0x0d, 0x06, 0x09, + 0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, + 0x01, 0x05, 0x00, 0x03, 0x82, 0x01, 0x0f, 0x00, + 0x30, 0x82, 0x01, 0x0a, 0x02, 0x82, 0x01, 0x01, + 0x00, 0x97, 0xa3, 0x2d, 0x3c, 0x9e, 0xde, 0x05, + 0xda, 0x13, 0xc2, 0x11, 0x8d, 0x9d, 0x8e, 0xe3, + 0x7f, 0xc7, 0x4b, 0x7e, 0x5a, 0x9f, 0xb3, 0xff, + 0x62, 0xab, 0x73, 0xc8, 0x28, 0x6b, 0xba, 0x10, + 0x64, 0x82, 0x87, 0x13, 0xcd, 0x57, 0x18, 0xff, + 0x28, 0xce, 0xc0, 0xe6, 0x0e, 0x06, 0x91, 0x50, + 0x29, 0x83, 0xd1, 0xf2, 0xc3, 0x2a, 0xdb, 0xd8, + 0xdb, 0x4e, 0x04, 0xcc, 0x00, 0xeb, 0x8b, 0xb6, + 0x96, 0xdc, 0xbc, 0xaa, 0xfa, 0x52, 0x77, 0x04, + 0xc1, 0xdb, 0x19, 0xe4, 0xae, 0x9c, 0xfd, 0x3c, + 0x8b, 0x03, 0xef, 0x4d, 0xbc, 0x1a, 0x03, 0x65, + 0xf9, 0xc1, 0xb1, 0x3f, 0x72, 0x86, 0xf2, 0x38, + 0xaa, 0x19, 0xae, 0x10, 0x88, 0x78, 0x28, 0xda, + 0x75, 0xc3, 0x3d, 0x02, 0x82, 0x02, 0x9c, 0xb9, + 0xc1, 0x65, 0x77, 0x76, 0x24, 0x4c, 0x98, 0xf7, + 0x6d, 0x31, 0x38, 0xfb, 0xdb, 0xfe, 0xdb, 0x37, + 0x02, 0x76, 0xa1, 0x18, 0x97, 0xa6, 0xcc, 0xde, + 0x20, 0x09, 0x49, 0x36, 0x24, 0x69, 0x42, 0xf6, + 0xe4, 0x37, 0x62, 0xf1, 0x59, 0x6d, 0xa9, 0x3c, + 0xed, 0x34, 0x9c, 0xa3, 0x8e, 0xdb, 0xdc, 0x3a, + 0xd7, 0xf7, 0x0a, 0x6f, 0xef, 0x2e, 0xd8, 0xd5, + 0x93, 0x5a, 0x7a, 0xed, 0x08, 0x49, 0x68, 0xe2, + 0x41, 0xe3, 0x5a, 0x90, 0xc1, 0x86, 0x55, 0xfc, + 0x51, 0x43, 0x9d, 0xe0, 0xb2, 0xc4, 0x67, 0xb4, + 0xcb, 0x32, 0x31, 0x25, 0xf0, 0x54, 0x9f, 0x4b, + 0xd1, 0x6f, 0xdb, 0xd4, 0xdd, 0xfc, 0xaf, 0x5e, + 0x6c, 0x78, 0x90, 0x95, 0xde, 0xca, 0x3a, 0x48, + 0xb9, 0x79, 0x3c, 0x9b, 0x19, 0xd6, 0x75, 0x05, + 0xa0, 0xf9, 0x88, 0xd7, 0xc1, 0xe8, 0xa5, 0x09, + 0xe4, 0x1a, 0x15, 0xdc, 0x87, 0x23, 0xaa, 0xb2, + 0x75, 0x8c, 0x63, 0x25, 0x87, 0xd8, 0xf8, 0x3d, + 0xa6, 0xc2, 0xcc, 0x66, 0xff, 0xa5, 0x66, 0x68, + 0x55, 0x02, 0x03, 0x01, 0x00, 0x01, +} + +// CheckSignatureFrom verifies that the signature on c is a valid signature +// from parent. +func (c *Certificate) CheckSignatureFrom(parent *Certificate) (err error) { + // RFC 5280, 4.2.1.9: + // "If the basic constraints extension is not present in a version 3 + // certificate, or the extension is present but the cA boolean is not + // asserted, then the certified public key MUST NOT be used to verify + // certificate signatures." + // (except for Entrust, see comment above entrustBrokenSPKI) + if (parent.Version == 3 && !parent.BasicConstraintsValid || + parent.BasicConstraintsValid && !parent.IsCA) && + !bytes.Equal(c.RawSubjectPublicKeyInfo, entrustBrokenSPKI) { + return ConstraintViolationError{} + } + + if parent.KeyUsage != 0 && parent.KeyUsage&KeyUsageCertSign == 0 { + return ConstraintViolationError{} + } + + if parent.PublicKeyAlgorithm == UnknownPublicKeyAlgorithm { + return ErrUnsupportedAlgorithm + } + + // TODO(agl): don't ignore the path length constraint. + + if !bytes.Equal(parent.RawSubject, c.RawIssuer) { + return errors.New("Mis-match issuer/subject") + } + + return parent.CheckSignature(c.SignatureAlgorithm, c.RawTBSCertificate, c.Signature) +} + +func CheckSignatureFromKey(publicKey interface{}, algo SignatureAlgorithm, signed, signature []byte) (err error) { + var hashType crypto.Hash + + switch algo { + // NOTE: exception to stdlib, allow MD5 algorithm + case MD5WithRSA: + hashType = crypto.MD5 + case SHA1WithRSA, DSAWithSHA1, ECDSAWithSHA1: + hashType = crypto.SHA1 + case SHA256WithRSA, SHA256WithRSAPSS, DSAWithSHA256, ECDSAWithSHA256: + hashType = crypto.SHA256 + case SHA384WithRSA, SHA384WithRSAPSS, ECDSAWithSHA384: + hashType = crypto.SHA384 + case SHA512WithRSA, SHA512WithRSAPSS, ECDSAWithSHA512: + hashType = crypto.SHA512 + //case MD2WithRSA, MD5WithRSA: + case MD2WithRSA: + return InsecureAlgorithmError(algo) + default: + return ErrUnsupportedAlgorithm + } + + if !hashType.Available() { + return ErrUnsupportedAlgorithm + } + h := hashType.New() + + h.Write(signed) + digest := h.Sum(nil) + + switch pub := publicKey.(type) { + case *rsa.PublicKey: + if algo.isRSAPSS() { + return rsa.VerifyPSS(pub, hashType, digest, signature, &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash}) + } else { + return rsa.VerifyPKCS1v15(pub, hashType, digest, signature) + } + case *dsa.PublicKey: + dsaSig := new(dsaSignature) + if rest, err := asn1.Unmarshal(signature, dsaSig); err != nil { + return err + } else if len(rest) != 0 { + return errors.New("x509: trailing data after DSA signature") + } + if dsaSig.R.Sign() <= 0 || dsaSig.S.Sign() <= 0 { + return errors.New("x509: DSA signature contained zero or negative values") + } + if !dsa.Verify(pub, digest, dsaSig.R, dsaSig.S) { + return errors.New("x509: DSA verification failure") + } + return + case *ecdsa.PublicKey: + ecdsaSig := new(ecdsaSignature) + if rest, err := asn1.Unmarshal(signature, ecdsaSig); err != nil { + return err + } else if len(rest) != 0 { + return errors.New("x509: trailing data after ECDSA signature") + } + if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 { + return errors.New("x509: ECDSA signature contained zero or negative values") + } + if !ecdsa.Verify(pub, digest, ecdsaSig.R, ecdsaSig.S) { + return errors.New("x509: ECDSA verification failure") + } + return + case *AugmentedECDSA: + ecdsaSig := new(ecdsaSignature) + if _, err := asn1.Unmarshal(signature, ecdsaSig); err != nil { + return err + } + if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 { + return errors.New("x509: ECDSA signature contained zero or negative values") + } + if !ecdsa.Verify(pub.Pub, digest, ecdsaSig.R, ecdsaSig.S) { + return errors.New("x509: ECDSA verification failure") + } + return + } + return ErrUnsupportedAlgorithm +} + +// CheckSignature verifies that signature is a valid signature over signed from +// c's public key. +func (c *Certificate) CheckSignature(algo SignatureAlgorithm, signed, signature []byte) (err error) { + return CheckSignatureFromKey(c.PublicKey, algo, signed, signature) +} + +// CheckCRLSignature checks that the signature in crl is from c. +func (c *Certificate) CheckCRLSignature(crl *pkix.CertificateList) error { + algo := GetSignatureAlgorithmFromAI(crl.SignatureAlgorithm) + return c.CheckSignature(algo, crl.TBSCertList.Raw, crl.SignatureValue.RightAlign()) +} + +// UnhandledCriticalExtension results when the certificate contains an +// unimplemented X.509 extension marked as critical. +type UnhandledCriticalExtension struct { + oid asn1.ObjectIdentifier + message string +} + +func (h UnhandledCriticalExtension) Error() string { + return fmt.Sprintf("x509: unhandled critical extension: %s | %s", h.oid, h.message) +} + +// TimeInValidityPeriod returns true if NotBefore < t < NotAfter +func (c *Certificate) TimeInValidityPeriod(t time.Time) bool { + return c.NotBefore.Before(t) && c.NotAfter.After(t) +} + +// RFC 5280 4.2.1.4 +type policyInformation struct { + Policy asn1.ObjectIdentifier + Qualifiers []policyQualifierInfo `asn1:"optional"` +} + +type policyQualifierInfo struct { + PolicyQualifierId asn1.ObjectIdentifier + Qualifier asn1.RawValue +} + +type userNotice struct { + NoticeRef noticeReference `asn1:"optional"` + ExplicitText asn1.RawValue `asn1:"optional"` +} + +type noticeReference struct { + Organization asn1.RawValue + NoticeNumbers []int +} + +type NoticeNumber []int + +type generalSubtree struct { + Value asn1.RawValue `asn1:"optional"` + Min int `asn1:"tag:0,default:0,optional"` + Max int `asn1:"tag:1,optional"` +} + +type GeneralSubtreeString struct { + Data string + Max int + Min int +} + +type GeneralSubtreeIP struct { + Data net.IPNet + Max int + Min int +} + +type GeneralSubtreeName struct { + Data pkix.Name + Max int + Min int +} + +type GeneralSubtreeEdi struct { + Data pkix.EDIPartyName + Max int + Min int +} + +type GeneralSubtreeOid struct { + Data asn1.ObjectIdentifier + Max int + Min int +} + +type GeneralSubtreeRaw struct { + Data asn1.RawValue + Max int + Min int +} + +type basicConstraints struct { + IsCA bool `asn1:"optional"` + MaxPathLen int `asn1:"optional,default:-1"` +} + +// RFC 5280, 4.2.1.10 +type nameConstraints struct { + Permitted []generalSubtree `asn1:"optional,tag:0"` + Excluded []generalSubtree `asn1:"optional,tag:1"` +} + +// RFC 5280, 4.2.2.1 +type authorityInfoAccess struct { + Method asn1.ObjectIdentifier + Location asn1.RawValue +} + +// RFC 5280, 4.2.1.14 +type distributionPoint struct { + DistributionPoint distributionPointName `asn1:"optional,tag:0"` + Reason asn1.BitString `asn1:"optional,tag:1"` + CRLIssuer asn1.RawValue `asn1:"optional,tag:2"` +} + +type distributionPointName struct { + FullName asn1.RawValue `asn1:"optional,tag:0"` + RelativeName pkix.RDNSequence `asn1:"optional,tag:1"` +} + +func maxValidationLevel(a, b CertValidationLevel) CertValidationLevel { + if a > b { + return a + } + return b +} + +func getMaxCertValidationLevel(oids []asn1.ObjectIdentifier) CertValidationLevel { + maxOID := UnknownValidationLevel + for _, oid := range oids { + if _, ok := ExtendedValidationOIDs[oid.String()]; ok { + return EV + } else if _, ok := OrganizationValidationOIDs[oid.String()]; ok { + maxOID = maxValidationLevel(maxOID, OV) + } else if _, ok := DomainValidationOIDs[oid.String()]; ok { + maxOID = maxValidationLevel(maxOID, DV) + } + } + return maxOID +} + +func parsePublicKey(algo PublicKeyAlgorithm, keyData *publicKeyInfo) (interface{}, error) { + asn1Data := keyData.PublicKey.RightAlign() + switch algo { + case RSA: + + // TODO: disabled since current behaviour does not expect it. Should be enabled though + // RSA public keys must have a NULL in the parameters + // (https://tools.ietf.org/html/rfc3279#section-2.3.1). + //if !bytes.Equal(keyData.Algorithm.Parameters.FullBytes, asn1.NullBytes) { + // return nil, errors.New("x509: RSA key missing NULL parameters") + //} + + p := new(pkcs1PublicKey) + rest, err := asn1.Unmarshal(asn1Data, p) + if err != nil { + return nil, err + } + if len(rest) != 0 { + return nil, errors.New("x509: trailing data after RSA public key") + } + + if p.N.Sign() <= 0 { + return nil, errors.New("x509: RSA modulus is not a positive number") + } + if p.E <= 0 { + return nil, errors.New("x509: RSA public exponent is not a positive number") + } + + pub := &rsa.PublicKey{ + E: p.E, + N: p.N, + } + return pub, nil + case DSA: + var p *big.Int + rest, err := asn1.Unmarshal(asn1Data, &p) + if err != nil { + return nil, err + } + if len(rest) != 0 { + return nil, errors.New("x509: trailing data after DSA public key") + } + paramsData := keyData.Algorithm.Parameters.FullBytes + params := new(dsaAlgorithmParameters) + rest, err = asn1.Unmarshal(paramsData, params) + if err != nil { + return nil, err + } + if len(rest) != 0 { + return nil, errors.New("x509: trailing data after DSA parameters") + } + if p.Sign() <= 0 || params.P.Sign() <= 0 || params.Q.Sign() <= 0 || params.G.Sign() <= 0 { + return nil, errors.New("x509: zero or negative DSA parameter") + } + pub := &dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: params.P, + Q: params.Q, + G: params.G, + }, + Y: p, + } + return pub, nil + case ECDSA: + paramsData := keyData.Algorithm.Parameters.FullBytes + namedCurveOID := new(asn1.ObjectIdentifier) + rest, err := asn1.Unmarshal(paramsData, namedCurveOID) + if err != nil { + return nil, err + } + if len(rest) != 0 { + return nil, errors.New("x509: trailing data after ECDSA parameters") + } + namedCurve := namedCurveFromOID(*namedCurveOID) + if namedCurve == nil { + return nil, errors.New("x509: unsupported elliptic curve") + } + x, y := elliptic.Unmarshal(namedCurve, asn1Data) + if x == nil { + return nil, errors.New("x509: failed to unmarshal elliptic curve point") + } + key := &ecdsa.PublicKey{ + Curve: namedCurve, + X: x, + Y: y, + } + + pub := &AugmentedECDSA{ + Pub: key, + Raw: keyData.PublicKey, + } + return pub, nil + default: + return nil, nil + } +} + +func parseSANExtension(value []byte) (dnsNames, emailAddresses []string, ipAddresses []net.IP, err error) { + // RFC 5280, 4.2.1.6 + + // SubjectAltName ::= GeneralNames + // + // GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName + // + // GeneralName ::= CHOICE { + // otherName [0] OtherName, + // rfc822Name [1] IA5String, + // dNSName [2] IA5String, + // x400Address [3] ORAddress, + // directoryName [4] Name, + // ediPartyName [5] EDIPartyName, + // uniformResourceIdentifier [6] IA5String, + // iPAddress [7] OCTET STRING, + // registeredID [8] OBJECT IDENTIFIER } + var seq asn1.RawValue + var rest []byte + if rest, err = asn1.Unmarshal(value, &seq); err != nil { + return + } else if len(rest) != 0 { + err = errors.New("x509: trailing data after X.509 extension") + return + } + if !seq.IsCompound || seq.Tag != 16 || seq.Class != 0 { + err = asn1.StructuralError{Msg: "bad SAN sequence"} + return + } + + rest = seq.Bytes + for len(rest) > 0 { + var v asn1.RawValue + rest, err = asn1.Unmarshal(rest, &v) + if err != nil { + return + } + switch v.Tag { + case 1: + emailAddresses = append(emailAddresses, string(v.Bytes)) + case 2: + dnsNames = append(dnsNames, string(v.Bytes)) + case 7: + switch len(v.Bytes) { + case net.IPv4len, net.IPv6len: + ipAddresses = append(ipAddresses, v.Bytes) + default: + err = errors.New("x509: certificate contained IP address of length " + strconv.Itoa(len(v.Bytes))) + return + } + } + } + + return +} + +func parseGeneralNames(value []byte) (otherNames []pkix.OtherName, dnsNames, emailAddresses, URIs []string, directoryNames []pkix.Name, ediPartyNames []pkix.EDIPartyName, ipAddresses []net.IP, registeredIDs []asn1.ObjectIdentifier, err error) { + // RFC 5280, 4.2.1.6 + + // SubjectAltName ::= GeneralNames + // + // GeneralNames ::= SEQUENCE SIZE (1..MAX) OF GeneralName + // + // GeneralName ::= CHOICE { + // otherName [0] OtherName, + // rfc822Name [1] IA5String, + // dNSName [2] IA5String, + // x400Address [3] ORAddress, + // directoryName [4] Name, + // ediPartyName [5] EDIPartyName, + // uniformResourceIdentifier [6] IA5String, + // iPAddress [7] OCTET STRING, + // registeredID [8] OBJECT IDENTIFIER } + var seq asn1.RawValue + if _, err = asn1.Unmarshal(value, &seq); err != nil { + return + } + if !seq.IsCompound || seq.Tag != 16 || seq.Class != 0 { + err = asn1.StructuralError{Msg: "bad SAN sequence"} + return + } + + rest := seq.Bytes + for len(rest) > 0 { + var v asn1.RawValue + rest, err = asn1.Unmarshal(rest, &v) + if err != nil { + return + } + switch v.Tag { + case 0: + var oName pkix.OtherName + _, err = asn1.UnmarshalWithParams(v.FullBytes, &oName, "tag:0") + if err != nil { + return + } + otherNames = append(otherNames, oName) + case 1: + emailAddresses = append(emailAddresses, string(v.Bytes)) + case 2: + dnsNames = append(dnsNames, string(v.Bytes)) + case 4: + var rdn pkix.RDNSequence + _, err = asn1.Unmarshal(v.Bytes, &rdn) + if err != nil { + return + } + var dir pkix.Name + dir.FillFromRDNSequence(&rdn) + directoryNames = append(directoryNames, dir) + case 5: + var ediName pkix.EDIPartyName + _, err = asn1.UnmarshalWithParams(v.FullBytes, &ediName, "tag:5") + if err != nil { + return + } + ediPartyNames = append(ediPartyNames, ediName) + case 6: + URIs = append(URIs, string(v.Bytes)) + case 7: + switch len(v.Bytes) { + case net.IPv4len, net.IPv6len: + ipAddresses = append(ipAddresses, v.Bytes) + default: + err = errors.New("x509: certificate contained IP address of length " + strconv.Itoa(len(v.Bytes))) + return + } + case 8: + var id asn1.ObjectIdentifier + _, err = asn1.UnmarshalWithParams(v.FullBytes, &id, "tag:8") + if err != nil { + return + } + registeredIDs = append(registeredIDs, id) + } + } + + return +} + +//TODO +func parseCertificate(in *certificate) (*Certificate, error) { + out := new(Certificate) + out.Raw = in.Raw + out.RawTBSCertificate = in.TBSCertificate.Raw + out.RawSubjectPublicKeyInfo = in.TBSCertificate.PublicKey.Raw + out.RawSubject = in.TBSCertificate.Subject.FullBytes + out.RawIssuer = in.TBSCertificate.Issuer.FullBytes + + // Fingerprints + out.FingerprintMD5 = MD5Fingerprint(in.Raw) + out.FingerprintSHA1 = SHA1Fingerprint(in.Raw) + out.FingerprintSHA256 = SHA256Fingerprint(in.Raw) + out.SPKIFingerprint = SHA256Fingerprint(in.TBSCertificate.PublicKey.Raw) + out.TBSCertificateFingerprint = SHA256Fingerprint(in.TBSCertificate.Raw) + + tbs := in.TBSCertificate + originalExtensions := in.TBSCertificate.Extensions + + // Blow away the raw data since it also includes CT data + tbs.Raw = nil + + // remove the CT extensions + extensions := make([]pkix.Extension, 0, len(originalExtensions)) + for _, extension := range originalExtensions { + if extension.Id.Equal(oidExtensionCTPrecertificatePoison) { + continue + } + if extension.Id.Equal(oidExtensionSignedCertificateTimestampList) { + continue + } + extensions = append(extensions, extension) + } + + tbs.Extensions = extensions + + tbsbytes, err := asn1.Marshal(tbs) + if err != nil { + return nil, err + } + if tbsbytes == nil { + return nil, asn1.SyntaxError{Msg: "Trailing data"} + } + out.FingerprintNoCT = SHA256Fingerprint(tbsbytes[:]) + + // Hash both SPKI and Subject to create a fingerprint that we can use to describe a CA + hasher := sha256.New() + hasher.Write(in.TBSCertificate.PublicKey.Raw) + hasher.Write(in.TBSCertificate.Subject.FullBytes) + out.SPKISubjectFingerprint = hasher.Sum(nil) + + out.Signature = in.SignatureValue.RightAlign() + out.SignatureAlgorithm = + GetSignatureAlgorithmFromAI(in.TBSCertificate.SignatureAlgorithm) + + out.SignatureAlgorithmOID = in.TBSCertificate.SignatureAlgorithm.Algorithm + + out.PublicKeyAlgorithm = + getPublicKeyAlgorithmFromOID(in.TBSCertificate.PublicKey.Algorithm.Algorithm) + out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCertificate.PublicKey) + if err != nil { + return nil, err + } + + out.PublicKeyAlgorithmOID = in.TBSCertificate.PublicKey.Algorithm.Algorithm + out.Version = in.TBSCertificate.Version + 1 + out.SerialNumber = in.TBSCertificate.SerialNumber + + var issuer, subject pkix.RDNSequence + if _, err := asn1.Unmarshal(in.TBSCertificate.Subject.FullBytes, &subject); err != nil { + return nil, err + } + if _, err := asn1.Unmarshal(in.TBSCertificate.Issuer.FullBytes, &issuer); err != nil { + return nil, err + } + + out.Issuer.FillFromRDNSequence(&issuer) + out.Subject.FillFromRDNSequence(&subject) + + // Check if self-signed + if bytes.Equal(out.RawSubject, out.RawIssuer) { + // Possibly self-signed, check the signature against itself. + if err := out.CheckSignature(out.SignatureAlgorithm, out.RawTBSCertificate, out.Signature); err == nil { + out.SelfSigned = true + } + } + + out.NotBefore = in.TBSCertificate.Validity.NotBefore + out.NotAfter = in.TBSCertificate.Validity.NotAfter + + out.ValidityPeriod = int(out.NotAfter.Sub(out.NotBefore).Seconds()) + + out.IssuerUniqueId = in.TBSCertificate.UniqueId + out.SubjectUniqueId = in.TBSCertificate.SubjectUniqueId + + out.ExtensionsMap = make(map[string]pkix.Extension, len(in.TBSCertificate.Extensions)) + for _, e := range in.TBSCertificate.Extensions { + out.Extensions = append(out.Extensions, e) + out.ExtensionsMap[e.Id.String()] = e + + if len(e.Id) == 4 && e.Id[0] == 2 && e.Id[1] == 5 && e.Id[2] == 29 { + switch e.Id[3] { + case 15: + // RFC 5280, 4.2.1.3 + var usageBits asn1.BitString + _, err := asn1.Unmarshal(e.Value, &usageBits) + + if err == nil { + var usage int + for i := 0; i < 9; i++ { + if usageBits.At(i) != 0 { + usage |= 1 << uint(i) + } + } + out.KeyUsage = KeyUsage(usage) + continue + } + case 19: + // RFC 5280, 4.2.1.9 + var constraints basicConstraints + _, err := asn1.Unmarshal(e.Value, &constraints) + + if err == nil { + out.BasicConstraintsValid = true + out.IsCA = constraints.IsCA + out.MaxPathLen = constraints.MaxPathLen + out.MaxPathLenZero = out.MaxPathLen == 0 + continue + } + case 17: + out.OtherNames, out.DNSNames, out.EmailAddresses, out.URIs, out.DirectoryNames, out.EDIPartyNames, out.IPAddresses, out.RegisteredIDs, err = parseGeneralNames(e.Value) + if err != nil { + return nil, err + } + + if len(out.DNSNames) > 0 || len(out.EmailAddresses) > 0 || len(out.IPAddresses) > 0 { + continue + } + // If we didn't parse any of the names then we + // fall through to the critical check below. + case 18: + out.IANOtherNames, out.IANDNSNames, out.IANEmailAddresses, out.IANURIs, out.IANDirectoryNames, out.IANEDIPartyNames, out.IANIPAddresses, out.IANRegisteredIDs, err = parseGeneralNames(e.Value) + if err != nil { + return nil, err + } + + if len(out.IANDNSNames) > 0 || len(out.IANEmailAddresses) > 0 || len(out.IANIPAddresses) > 0 { + continue + } + case 30: + // RFC 5280, 4.2.1.10 + + // NameConstraints ::= SEQUENCE { + // permittedSubtrees [0] GeneralSubtrees OPTIONAL, + // excludedSubtrees [1] GeneralSubtrees OPTIONAL } + // + // GeneralSubtrees ::= SEQUENCE SIZE (1..MAX) OF GeneralSubtree + // + // GeneralSubtree ::= SEQUENCE { + // base GeneralName, + // Min [0] BaseDistance DEFAULT 0, + // Max [1] BaseDistance OPTIONAL } + // + // BaseDistance ::= INTEGER (0..MAX) + + var constraints nameConstraints + _, err := asn1.Unmarshal(e.Value, &constraints) + if err != nil { + return nil, err + } + + if e.Critical { + out.NameConstraintsCritical = true + } + + for _, subtree := range constraints.Permitted { + switch subtree.Value.Tag { + case 1: + out.PermittedEmailAddresses = append(out.PermittedEmailAddresses, GeneralSubtreeString{Data: string(subtree.Value.Bytes), Max: subtree.Max, Min: subtree.Min}) + case 2: + out.PermittedDNSNames = append(out.PermittedDNSNames, GeneralSubtreeString{Data: string(subtree.Value.Bytes), Max: subtree.Max, Min: subtree.Min}) + case 3: + out.PermittedX400Addresses = append(out.PermittedX400Addresses, GeneralSubtreeRaw{Data: subtree.Value, Max: subtree.Max, Min: subtree.Min}) + case 4: + var rawdn pkix.RDNSequence + if _, err := asn1.Unmarshal(subtree.Value.Bytes, &rawdn); err != nil { + return out, err + } + var dn pkix.Name + dn.FillFromRDNSequence(&rawdn) + out.PermittedDirectoryNames = append(out.PermittedDirectoryNames, GeneralSubtreeName{Data: dn, Max: subtree.Max, Min: subtree.Min}) + case 5: + var ediName pkix.EDIPartyName + _, err = asn1.UnmarshalWithParams(subtree.Value.FullBytes, &ediName, "tag:5") + if err != nil { + return out, err + } + out.PermittedEdiPartyNames = append(out.PermittedEdiPartyNames, GeneralSubtreeEdi{Data: ediName, Max: subtree.Max, Min: subtree.Min}) + case 7: + switch len(subtree.Value.Bytes) { + case net.IPv4len * 2: + ip := net.IPNet{IP: subtree.Value.Bytes[:net.IPv4len], Mask: subtree.Value.Bytes[net.IPv4len:]} + out.PermittedIPAddresses = append(out.PermittedIPAddresses, GeneralSubtreeIP{Data: ip, Max: subtree.Max, Min: subtree.Min}) + case net.IPv6len * 2: + ip := net.IPNet{IP: subtree.Value.Bytes[:net.IPv6len], Mask: subtree.Value.Bytes[net.IPv6len:]} + out.PermittedIPAddresses = append(out.PermittedIPAddresses, GeneralSubtreeIP{Data: ip, Max: subtree.Max, Min: subtree.Min}) + default: + return out, errors.New("x509: certificate name constraint contained IP address range of length " + strconv.Itoa(len(subtree.Value.Bytes))) + } + case 8: + var id asn1.ObjectIdentifier + _, err = asn1.UnmarshalWithParams(subtree.Value.FullBytes, &id, "tag:8") + if err != nil { + return out, err + } + out.PermittedRegisteredIDs = append(out.PermittedRegisteredIDs, GeneralSubtreeOid{Data: id, Max: subtree.Max, Min: subtree.Min}) + } + } + for _, subtree := range constraints.Excluded { + switch subtree.Value.Tag { + case 1: + out.ExcludedEmailAddresses = append(out.ExcludedEmailAddresses, GeneralSubtreeString{Data: string(subtree.Value.Bytes), Max: subtree.Max, Min: subtree.Min}) + case 2: + out.ExcludedDNSNames = append(out.ExcludedDNSNames, GeneralSubtreeString{Data: string(subtree.Value.Bytes), Max: subtree.Max, Min: subtree.Min}) + case 3: + out.ExcludedX400Addresses = append(out.ExcludedX400Addresses, GeneralSubtreeRaw{Data: subtree.Value, Max: subtree.Max, Min: subtree.Min}) + case 4: + var rawdn pkix.RDNSequence + if _, err := asn1.Unmarshal(subtree.Value.Bytes, &rawdn); err != nil { + return out, err + } + var dn pkix.Name + dn.FillFromRDNSequence(&rawdn) + out.ExcludedDirectoryNames = append(out.ExcludedDirectoryNames, GeneralSubtreeName{Data: dn, Max: subtree.Max, Min: subtree.Min}) + case 5: + var ediName pkix.EDIPartyName + _, err = asn1.Unmarshal(subtree.Value.Bytes, &ediName) + if err != nil { + return out, err + } + out.ExcludedEdiPartyNames = append(out.ExcludedEdiPartyNames, GeneralSubtreeEdi{Data: ediName, Max: subtree.Max, Min: subtree.Min}) + case 7: + switch len(subtree.Value.Bytes) { + case net.IPv4len * 2: + ip := net.IPNet{IP: subtree.Value.Bytes[:net.IPv4len], Mask: subtree.Value.Bytes[net.IPv4len:]} + out.ExcludedIPAddresses = append(out.ExcludedIPAddresses, GeneralSubtreeIP{Data: ip, Max: subtree.Max, Min: subtree.Min}) + case net.IPv6len * 2: + ip := net.IPNet{IP: subtree.Value.Bytes[:net.IPv6len], Mask: subtree.Value.Bytes[net.IPv6len:]} + out.ExcludedIPAddresses = append(out.ExcludedIPAddresses, GeneralSubtreeIP{Data: ip, Max: subtree.Max, Min: subtree.Min}) + default: + return out, errors.New("x509: certificate name constraint contained IP address range of length " + strconv.Itoa(len(subtree.Value.Bytes))) + } + case 8: + var id asn1.ObjectIdentifier + _, err = asn1.Unmarshal(subtree.Value.Bytes, &id) + if err != nil { + return out, err + } + out.ExcludedRegisteredIDs = append(out.ExcludedRegisteredIDs, GeneralSubtreeOid{Data: id, Max: subtree.Max, Min: subtree.Min}) + } + } + continue + + case 31: + // RFC 5280, 4.2.1.14 + + // CRLDistributionPoints ::= SEQUENCE SIZE (1..MAX) OF DistributionPoint + // + // DistributionPoint ::= SEQUENCE { + // distributionPoint [0] DistributionPointName OPTIONAL, + // reasons [1] ReasonFlags OPTIONAL, + // cRLIssuer [2] GeneralNames OPTIONAL } + // + // DistributionPointName ::= CHOICE { + // fullName [0] GeneralNames, + // nameRelativeToCRLIssuer [1] RelativeDistinguishedName } + + var cdp []distributionPoint + _, err := asn1.Unmarshal(e.Value, &cdp) + if err != nil { + return nil, err + } + + for _, dp := range cdp { + // Per RFC 5280, 4.2.1.13, one of distributionPoint or cRLIssuer may be empty. + if len(dp.DistributionPoint.FullName.Bytes) == 0 { + continue + } + + var n asn1.RawValue + dpName := dp.DistributionPoint.FullName.Bytes + // FullName is a GeneralNames, which is a SEQUENCE OF + // GeneralName, which in turn is a CHOICE. + // Per https://www.ietf.org/rfc/rfc5280.txt, multiple names + // for a single DistributionPoint give different pointers to + // the same CRL. + for len(dpName) > 0 { + dpName, err = asn1.Unmarshal(dpName, &n) + if err != nil { + return nil, err + } + if n.Tag == 6 { + out.CRLDistributionPoints = append(out.CRLDistributionPoints, string(n.Bytes)) + } + } + } + continue + + case 35: + // RFC 5280, 4.2.1.1 + var a authKeyId + _, err = asn1.Unmarshal(e.Value, &a) + if err != nil { + return nil, err + } + out.AuthorityKeyId = a.Id + continue + + case 37: + // RFC 5280, 4.2.1.12. Extended Key Usage + + // id-ce-extKeyUsage OBJECT IDENTIFIER ::= { id-ce 37 } + // + // ExtKeyUsageSyntax ::= SEQUENCE SIZE (1..MAX) OF KeyPurposeId + // + // KeyPurposeId ::= OBJECT IDENTIFIER + + var keyUsage []asn1.ObjectIdentifier + _, err = asn1.Unmarshal(e.Value, &keyUsage) + if err != nil { + return nil, err + } + + for _, u := range keyUsage { + if extKeyUsage, ok := extKeyUsageFromOID(u); ok { + out.ExtKeyUsage = append(out.ExtKeyUsage, extKeyUsage) + } else { + out.UnknownExtKeyUsage = append(out.UnknownExtKeyUsage, u) + } + } + + continue + + case 14: + // RFC 5280, 4.2.1.2 + var keyid []byte + _, err = asn1.Unmarshal(e.Value, &keyid) + if err != nil { + return nil, err + } + out.SubjectKeyId = keyid + continue + + case 32: + // RFC 5280 4.2.1.4: Certificate Policies + var policies []policyInformation + if _, err = asn1.Unmarshal(e.Value, &policies); err != nil { + return nil, err + } + out.PolicyIdentifiers = make([]asn1.ObjectIdentifier, len(policies)) + out.QualifierId = make([][]asn1.ObjectIdentifier, len(policies)) + out.ExplicitTexts = make([][]asn1.RawValue, len(policies)) + out.NoticeRefOrgnization = make([][]asn1.RawValue, len(policies)) + out.NoticeRefNumbers = make([][]NoticeNumber, len(policies)) + out.ParsedExplicitTexts = make([][]string, len(policies)) + out.ParsedNoticeRefOrganization = make([][]string, len(policies)) + out.CPSuri = make([][]string, len(policies)) + + for i, policy := range policies { + out.PolicyIdentifiers[i] = policy.Policy + // parse optional Qualifier for zlint + for _, qualifier := range policy.Qualifiers { + out.QualifierId[i] = append(out.QualifierId[i], qualifier.PolicyQualifierId) + userNoticeOID := asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 2} + cpsURIOID := asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 1} + if qualifier.PolicyQualifierId.Equal(userNoticeOID) { + var un userNotice + if _, err = asn1.Unmarshal(qualifier.Qualifier.FullBytes, &un); err != nil { + return nil, err + } + if len(un.ExplicitText.Bytes) != 0 { + out.ExplicitTexts[i] = append(out.ExplicitTexts[i], un.ExplicitText) + out.ParsedExplicitTexts[i] = append(out.ParsedExplicitTexts[i], string(un.ExplicitText.Bytes)) + } + if un.NoticeRef.Organization.Bytes != nil || un.NoticeRef.NoticeNumbers != nil { + out.NoticeRefOrgnization[i] = append(out.NoticeRefOrgnization[i], un.NoticeRef.Organization) + out.NoticeRefNumbers[i] = append(out.NoticeRefNumbers[i], un.NoticeRef.NoticeNumbers) + out.ParsedNoticeRefOrganization[i] = append(out.ParsedNoticeRefOrganization[i], string(un.NoticeRef.Organization.Bytes)) + } + } + if qualifier.PolicyQualifierId.Equal(cpsURIOID) { + var cpsURIRaw asn1.RawValue + if _, err = asn1.Unmarshal(qualifier.Qualifier.FullBytes, &cpsURIRaw); err != nil { + return nil, err + } + out.CPSuri[i] = append(out.CPSuri[i], string(cpsURIRaw.Bytes)) + } + } + } + if out.SelfSigned { + out.ValidationLevel = UnknownValidationLevel + } else { + // See http://unmitigatedrisk.com/?p=203 + validationLevel := getMaxCertValidationLevel(out.PolicyIdentifiers) + if validationLevel == UnknownValidationLevel { + if (len(out.Subject.Organization) > 0 && out.Subject.Organization[0] == out.Subject.CommonName) || (len(out.Subject.OrganizationalUnit) > 0 && strings.Contains(out.Subject.OrganizationalUnit[0], "Domain Control Validated")) { + if len(out.Subject.Locality) == 0 && len(out.Subject.Province) == 0 && len(out.Subject.PostalCode) == 0 { + validationLevel = DV + } + } else if len(out.Subject.Organization) > 0 && out.Subject.Organization[0] == "Persona Not Validated" && strings.Contains(out.Issuer.CommonName, "StartCom") { + validationLevel = DV + } + } + out.ValidationLevel = validationLevel + } + } + } else if e.Id.Equal(oidExtensionAuthorityInfoAccess) { + // RFC 5280 4.2.2.1: Authority Information Access + var aia []authorityInfoAccess + if _, err = asn1.Unmarshal(e.Value, &aia); err != nil { + return nil, err + } + + for _, v := range aia { + // GeneralName: uniformResourceIdentifier [6] IA5String + if v.Location.Tag != 6 { + continue + } + if v.Method.Equal(oidAuthorityInfoAccessOcsp) { + out.OCSPServer = append(out.OCSPServer, string(v.Location.Bytes)) + } else if v.Method.Equal(oidAuthorityInfoAccessIssuers) { + out.IssuingCertificateURL = append(out.IssuingCertificateURL, string(v.Location.Bytes)) + } + } + } else if e.Id.Equal(oidExtensionSignedCertificateTimestampList) { + // SignedCertificateTimestamp + //var scts asn1.RawValue + var scts []byte + if _, err = asn1.Unmarshal(e.Value, &scts); err != nil { + return nil, err + } + // ignore length of + if len(scts) < 2 { + return nil, errors.New("malformed SCT extension: length field") + } + scts = scts[2:] + for len(scts) > 0 { + length := int(scts[1]) + (int(scts[0]) << 8) + if (length + 2) > len(scts) { + return nil, errors.New("malformed SCT extension: incomplete SCT") + } + sct, err := ct.DeserializeSCT(bytes.NewReader(scts[2 : length+2])) + if err != nil { + return nil, err + } + scts = scts[2+length:] + out.SignedCertificateTimestampList = append(out.SignedCertificateTimestampList, sct) + } + } else if e.Id.Equal(oidExtensionCTPrecertificatePoison) { + if e.Value[0] == 5 && e.Value[1] == 0 { + out.IsPrecert = true + continue + } else { + return nil, UnhandledCriticalExtension{e.Id, "Malformed precert poison"} + } + } else if e.Id.Equal(oidBRTorServiceDescriptor) { + descs, err := parseTorServiceDescriptorSyntax(e) + if err != nil { + return nil, err + } + out.TorServiceDescriptors = descs + } + + //if e.Critical { + // return out, UnhandledCriticalExtension{e.Id} + //} + } + + return out, nil +} + +// ParseCertificate parses a single certificate from the given ASN.1 DER data. +func ParseCertificate(asn1Data []byte) (*Certificate, error) { + var cert certificate + rest, err := asn1.Unmarshal(asn1Data, &cert) + if err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, asn1.SyntaxError{Msg: "trailing data"} + } + + return parseCertificate(&cert) +} + +// ParseCertificates parses one or more certificates from the given ASN.1 DER +// data. The certificates must be concatenated with no intermediate padding. +func ParseCertificates(asn1Data []byte) ([]*Certificate, error) { + var v []*certificate + + for len(asn1Data) > 0 { + cert := new(certificate) + var err error + asn1Data, err = asn1.Unmarshal(asn1Data, cert) + if err != nil { + return nil, err + } + v = append(v, cert) + } + + ret := make([]*Certificate, len(v)) + for i, ci := range v { + cert, err := parseCertificate(ci) + if err != nil { + return nil, err + } + ret[i] = cert + } + + return ret, nil +} + +func ParseTBSCertificate(asn1Data []byte) (*Certificate, error) { + var tbsCert tbsCertificate + rest, err := asn1.Unmarshal(asn1Data, &tbsCert) + if err != nil { + //log.Print("Err unmarshalling asn1Data", asn1Data, rest) + return nil, err + } + if len(rest) > 0 { + return nil, asn1.SyntaxError{Msg: "trailing data"} + } + return parseCertificate(&certificate{ + Raw: tbsCert.Raw, + TBSCertificate: tbsCert}) +} + +// SubjectAndKey represents a (subjecty, subject public key info) tuple. +type SubjectAndKey struct { + RawSubject []byte + RawSubjectPublicKeyInfo []byte + Fingerprint CertificateFingerprint + PublicKey interface{} + PublicKeyAlgorithm PublicKeyAlgorithm +} + +// SubjectAndKey returns a SubjectAndKey for this certificate. +func (c *Certificate) SubjectAndKey() *SubjectAndKey { + return &SubjectAndKey{ + RawSubject: c.RawSubject, + RawSubjectPublicKeyInfo: c.RawSubjectPublicKeyInfo, + Fingerprint: c.SPKISubjectFingerprint, + PublicKey: c.PublicKey, + PublicKeyAlgorithm: c.PublicKeyAlgorithm, + } +} + +func reverseBitsInAByte(in byte) byte { + b1 := in>>4 | in<<4 + b2 := b1>>2&0x33 | b1<<2&0xcc + b3 := b2>>1&0x55 | b2<<1&0xaa + return b3 +} + +// asn1BitLength returns the bit-length of bitString by considering the +// most-significant bit in a byte to be the "first" bit. This convention +// matches ASN.1, but differs from almost everything else. +func asn1BitLength(bitString []byte) int { + bitLen := len(bitString) * 8 + + for i := range bitString { + b := bitString[len(bitString)-i-1] + + for bit := uint(0); bit < 8; bit++ { + if (b>>bit)&1 == 1 { + return bitLen + } + bitLen-- + } + } + + return 0 +} + +var ( + oidExtensionSubjectKeyId = []int{2, 5, 29, 14} + oidExtensionKeyUsage = []int{2, 5, 29, 15} + oidExtensionExtendedKeyUsage = []int{2, 5, 29, 37} + oidExtensionAuthorityKeyId = []int{2, 5, 29, 35} + oidExtensionBasicConstraints = []int{2, 5, 29, 19} + oidExtensionSubjectAltName = []int{2, 5, 29, 17} + oidExtensionIssuerAltName = []int{2, 5, 29, 18} + oidExtensionCertificatePolicies = []int{2, 5, 29, 32} + oidExtensionNameConstraints = []int{2, 5, 29, 30} + oidExtensionCRLDistributionPoints = []int{2, 5, 29, 31} + oidExtensionAuthorityInfoAccess = []int{1, 3, 6, 1, 5, 5, 7, 1, 1} + oidExtensionSignedCertificateTimestampList = []int{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2} +) + +var ( + oidAuthorityInfoAccessOcsp = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1} + oidAuthorityInfoAccessIssuers = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 2} +) + +// oidNotInExtensions returns whether an extension with the given oid exists in +// extensions. +func oidInExtensions(oid asn1.ObjectIdentifier, extensions []pkix.Extension) bool { + for _, e := range extensions { + if e.Id.Equal(oid) { + return true + } + } + return false +} + +// marshalSANs marshals a list of addresses into a the contents of an X.509 +// SubjectAlternativeName extension. +func marshalSANs(dnsNames, emailAddresses []string, ipAddresses []net.IP) (derBytes []byte, err error) { + var rawValues []asn1.RawValue + for _, name := range dnsNames { + rawValues = append(rawValues, asn1.RawValue{Tag: 2, Class: 2, Bytes: []byte(name)}) + } + for _, email := range emailAddresses { + rawValues = append(rawValues, asn1.RawValue{Tag: 1, Class: 2, Bytes: []byte(email)}) + } + for _, rawIP := range ipAddresses { + // If possible, we always want to encode IPv4 addresses in 4 bytes. + ip := rawIP.To4() + if ip == nil { + ip = rawIP + } + rawValues = append(rawValues, asn1.RawValue{Tag: 7, Class: 2, Bytes: ip}) + } + return asn1.Marshal(rawValues) +} + +// NOTE ignoring authorityKeyID argument +func buildExtensions(template *Certificate, _ []byte) (ret []pkix.Extension, err error) { + ret = make([]pkix.Extension, 10 /* Max number of elements. */) + n := 0 + + if template.KeyUsage != 0 && + !oidInExtensions(oidExtensionKeyUsage, template.ExtraExtensions) { + ret[n].Id = oidExtensionKeyUsage + ret[n].Critical = true + + var a [2]byte + a[0] = reverseBitsInAByte(byte(template.KeyUsage)) + a[1] = reverseBitsInAByte(byte(template.KeyUsage >> 8)) + + l := 1 + if a[1] != 0 { + l = 2 + } + + ret[n].Value, err = asn1.Marshal(asn1.BitString{Bytes: a[0:l], BitLength: l * 8}) + if err != nil { + return + } + n++ + } + + if (len(template.ExtKeyUsage) > 0 || len(template.UnknownExtKeyUsage) > 0) && + !oidInExtensions(oidExtensionExtendedKeyUsage, template.ExtraExtensions) { + ret[n].Id = oidExtensionExtendedKeyUsage + + var oids []asn1.ObjectIdentifier + for _, u := range template.ExtKeyUsage { + if oid, ok := oidFromExtKeyUsage(u); ok { + oids = append(oids, oid) + } else { + panic("internal error") + } + } + + oids = append(oids, template.UnknownExtKeyUsage...) + + ret[n].Value, err = asn1.Marshal(oids) + if err != nil { + return + } + n++ + } + + if template.BasicConstraintsValid && !oidInExtensions(oidExtensionBasicConstraints, template.ExtraExtensions) { + // Leaving MaxPathLen as zero indicates that no Max path + // length is desired, unless MaxPathLenZero is set. A value of + // -1 causes encoding/asn1 to omit the value as desired. + maxPathLen := template.MaxPathLen + if maxPathLen == 0 && !template.MaxPathLenZero { + maxPathLen = -1 + } + ret[n].Id = oidExtensionBasicConstraints + ret[n].Value, err = asn1.Marshal(basicConstraints{template.IsCA, maxPathLen}) + ret[n].Critical = true + if err != nil { + return + } + n++ + } + + if len(template.SubjectKeyId) > 0 && !oidInExtensions(oidExtensionSubjectKeyId, template.ExtraExtensions) { + ret[n].Id = oidExtensionSubjectKeyId + ret[n].Value, err = asn1.Marshal(template.SubjectKeyId) + if err != nil { + return + } + n++ + } + + if len(template.AuthorityKeyId) > 0 && !oidInExtensions(oidExtensionAuthorityKeyId, template.ExtraExtensions) { + ret[n].Id = oidExtensionAuthorityKeyId + ret[n].Value, err = asn1.Marshal(authKeyId{template.AuthorityKeyId}) + if err != nil { + return + } + n++ + } + + if (len(template.OCSPServer) > 0 || len(template.IssuingCertificateURL) > 0) && + !oidInExtensions(oidExtensionAuthorityInfoAccess, template.ExtraExtensions) { + ret[n].Id = oidExtensionAuthorityInfoAccess + var aiaValues []authorityInfoAccess + for _, name := range template.OCSPServer { + aiaValues = append(aiaValues, authorityInfoAccess{ + Method: oidAuthorityInfoAccessOcsp, + Location: asn1.RawValue{Tag: 6, Class: 2, Bytes: []byte(name)}, + }) + } + for _, name := range template.IssuingCertificateURL { + aiaValues = append(aiaValues, authorityInfoAccess{ + Method: oidAuthorityInfoAccessIssuers, + Location: asn1.RawValue{Tag: 6, Class: 2, Bytes: []byte(name)}, + }) + } + ret[n].Value, err = asn1.Marshal(aiaValues) + if err != nil { + return + } + n++ + } + + if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0) && + !oidInExtensions(oidExtensionSubjectAltName, template.ExtraExtensions) { + ret[n].Id = oidExtensionSubjectAltName + ret[n].Value, err = marshalSANs(template.DNSNames, template.EmailAddresses, template.IPAddresses) + if err != nil { + return + } + n++ + } + + if len(template.PolicyIdentifiers) > 0 && + !oidInExtensions(oidExtensionCertificatePolicies, template.ExtraExtensions) { + ret[n].Id = oidExtensionCertificatePolicies + policies := make([]policyInformation, len(template.PolicyIdentifiers)) + for i, policy := range template.PolicyIdentifiers { + policies[i].Policy = policy + } + ret[n].Value, err = asn1.Marshal(policies) + if err != nil { + return + } + n++ + } + + // TODO: this can be cleaned up in go1.10 + if (len(template.PermittedEmailAddresses) > 0 || len(template.PermittedDNSNames) > 0 || len(template.PermittedDirectoryNames) > 0 || + len(template.PermittedIPAddresses) > 0 || len(template.ExcludedEmailAddresses) > 0 || len(template.ExcludedDNSNames) > 0 || + len(template.ExcludedDirectoryNames) > 0 || len(template.ExcludedIPAddresses) > 0) && + !oidInExtensions(oidExtensionNameConstraints, template.ExtraExtensions) { + ret[n].Id = oidExtensionNameConstraints + if template.NameConstraintsCritical { + ret[n].Critical = true + } + + var out nameConstraints + + for _, permitted := range template.PermittedEmailAddresses { + out.Permitted = append(out.Permitted, generalSubtree{Value: asn1.RawValue{Tag: 1, Class: 2, Bytes: []byte(permitted.Data)}}) + } + for _, excluded := range template.ExcludedEmailAddresses { + out.Excluded = append(out.Excluded, generalSubtree{Value: asn1.RawValue{Tag: 1, Class: 2, Bytes: []byte(excluded.Data)}}) + } + for _, permitted := range template.PermittedDNSNames { + out.Permitted = append(out.Permitted, generalSubtree{Value: asn1.RawValue{Tag: 2, Class: 2, Bytes: []byte(permitted.Data)}}) + } + for _, excluded := range template.ExcludedDNSNames { + out.Excluded = append(out.Excluded, generalSubtree{Value: asn1.RawValue{Tag: 2, Class: 2, Bytes: []byte(excluded.Data)}}) + } + for _, permitted := range template.PermittedDirectoryNames { + var dn []byte + dn, err = asn1.Marshal(permitted.Data.ToRDNSequence()) + if err != nil { + return + } + out.Permitted = append(out.Permitted, generalSubtree{Value: asn1.RawValue{Tag: 4, Class: 2, IsCompound: true, Bytes: dn}}) + } + for _, excluded := range template.ExcludedDirectoryNames { + var dn []byte + dn, err = asn1.Marshal(excluded.Data.ToRDNSequence()) + if err != nil { + return + } + out.Excluded = append(out.Excluded, generalSubtree{Value: asn1.RawValue{Tag: 4, Class: 2, IsCompound: true, Bytes: dn}}) + } + for _, permitted := range template.PermittedIPAddresses { + ip := append(permitted.Data.IP, permitted.Data.Mask...) + out.Permitted = append(out.Permitted, generalSubtree{Value: asn1.RawValue{Tag: 7, Class: 2, Bytes: ip}}) + } + for _, excluded := range template.ExcludedIPAddresses { + ip := append(excluded.Data.IP, excluded.Data.Mask...) + out.Excluded = append(out.Excluded, generalSubtree{Value: asn1.RawValue{Tag: 7, Class: 2, Bytes: ip}}) + } + ret[n].Value, err = asn1.Marshal(out) + if err != nil { + return + } + n++ + } + + if len(template.CRLDistributionPoints) > 0 && + !oidInExtensions(oidExtensionCRLDistributionPoints, template.ExtraExtensions) { + ret[n].Id = oidExtensionCRLDistributionPoints + + var crlDp []distributionPoint + for _, name := range template.CRLDistributionPoints { + rawFullName, _ := asn1.Marshal(asn1.RawValue{Tag: 6, Class: 2, Bytes: []byte(name)}) + + dp := distributionPoint{ + DistributionPoint: distributionPointName{ + FullName: asn1.RawValue{Tag: 0, Class: 2, IsCompound: true, Bytes: rawFullName}, + }, + } + crlDp = append(crlDp, dp) + } + + ret[n].Value, err = asn1.Marshal(crlDp) + if err != nil { + return + } + n++ + } + + // Adding another extension here? Remember to update the Max number + // of elements in the make() at the top of the function. + + return append(ret[:n], template.ExtraExtensions...), nil +} + +func subjectBytes(cert *Certificate) ([]byte, error) { + if len(cert.RawSubject) > 0 { + return cert.RawSubject, nil + } + + return asn1.Marshal(cert.Subject.ToRDNSequence()) +} + +// signingParamsForPublicKey returns the parameters to use for signing with +// priv. If requestedSigAlgo is not zero then it overrides the default +// signature algorithm. +func signingParamsForPublicKey(pub interface{}, requestedSigAlgo SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) { + var pubType PublicKeyAlgorithm + + switch pub := pub.(type) { + case *rsa.PublicKey: + pubType = RSA + hashFunc = crypto.SHA256 + sigAlgo.Algorithm = oidSignatureSHA256WithRSA + sigAlgo.Parameters = asn1.NullRawValue + + case *ecdsa.PublicKey: + pubType = ECDSA + + switch pub.Curve { + case elliptic.P224(), elliptic.P256(): + hashFunc = crypto.SHA256 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA256 + case elliptic.P384(): + hashFunc = crypto.SHA384 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA384 + case elliptic.P521(): + hashFunc = crypto.SHA512 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA512 + default: + err = errors.New("x509: unknown elliptic curve") + } + + default: + err = errors.New("x509: only RSA and ECDSA keys supported") + } + + if err != nil { + return + } + + if requestedSigAlgo == 0 { + return + } + + found := false + for _, details := range signatureAlgorithmDetails { + if details.algo == requestedSigAlgo { + if details.pubKeyAlgo != pubType { + err = errors.New("x509: requested SignatureAlgorithm does not match private key type") + return + } + sigAlgo.Algorithm, hashFunc = details.oid, details.hash + if hashFunc == 0 { + err = errors.New("x509: cannot sign with hash function requested") + return + } + if requestedSigAlgo.isRSAPSS() { + sigAlgo.Parameters = rsaPSSParameters(hashFunc) + } + found = true + break + } + } + + if !found { + err = errors.New("x509: unknown SignatureAlgorithm") + } + + return +} + +// CreateCertificate creates a new certificate based on a template. +// The following members of template are used: AuthorityKeyId, +// BasicConstraintsValid, DNSNames, ExcludedDNSDomains, ExtKeyUsage, +// IsCA, KeyUsage, MaxPathLen, MaxPathLenZero, NotAfter, NotBefore, +// PermittedDNSDomains, PermittedDNSDomainsCritical, SerialNumber, +// SignatureAlgorithm, Subject, SubjectKeyId, and UnknownExtKeyUsage. +// +// The certificate is signed by parent. If parent is equal to template then the +// certificate is self-signed. The parameter pub is the public key of the +// signee and priv is the private key of the signer. +// +// The returned slice is the certificate in DER encoding. +// +// All keys types that are implemented via crypto.Signer are supported (This +// includes *rsa.PublicKey and *ecdsa.PublicKey.) +// +// The AuthorityKeyId will be taken from the SubjectKeyId of parent, if any, +// unless the resulting certificate is self-signed. Otherwise the value from +// template will be used. +func CreateCertificate(rand io.Reader, template, parent *Certificate, pub, priv interface{}) (cert []byte, err error) { + key, ok := priv.(crypto.Signer) + if !ok { + return nil, errors.New("x509: certificate private key does not implement crypto.Signer") + } + + if template.SerialNumber == nil { + return nil, errors.New("x509: no SerialNumber given") + } + + hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(key.Public(), template.SignatureAlgorithm) + if err != nil { + return nil, err + } + + publicKeyBytes, publicKeyAlgorithm, err := marshalPublicKey(pub) + if err != nil { + return nil, err + } + + asn1Issuer, err := subjectBytes(parent) + if err != nil { + return + } + + asn1Subject, err := subjectBytes(template) + if err != nil { + return + } + + authorityKeyId := template.AuthorityKeyId + if !bytes.Equal(asn1Issuer, asn1Subject) && len(parent.SubjectKeyId) > 0 { + authorityKeyId = parent.SubjectKeyId + } + + extensions, err := buildExtensions(template, authorityKeyId) + if err != nil { + return + } + + encodedPublicKey := asn1.BitString{BitLength: len(publicKeyBytes) * 8, Bytes: publicKeyBytes} + c := tbsCertificate{ + Version: 2, + SerialNumber: template.SerialNumber, + SignatureAlgorithm: signatureAlgorithm, + Issuer: asn1.RawValue{FullBytes: asn1Issuer}, + Validity: validity{template.NotBefore.UTC(), template.NotAfter.UTC()}, + Subject: asn1.RawValue{FullBytes: asn1Subject}, + PublicKey: publicKeyInfo{nil, publicKeyAlgorithm, encodedPublicKey}, + Extensions: extensions, + } + + tbsCertContents, err := asn1.Marshal(c) + if err != nil { + return + } + + c.Raw = tbsCertContents + + h := hashFunc.New() + h.Write(tbsCertContents) + digest := h.Sum(nil) + + var signerOpts crypto.SignerOpts + signerOpts = hashFunc + if template.SignatureAlgorithm != 0 && template.SignatureAlgorithm.isRSAPSS() { + signerOpts = &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthEqualsHash, + Hash: hashFunc, + } + } + + var signature []byte + signature, err = key.Sign(rand, digest, signerOpts) + if err != nil { + return + } + + return asn1.Marshal(certificate{ + nil, + c, + signatureAlgorithm, + asn1.BitString{Bytes: signature, BitLength: len(signature) * 8}, + }) +} + +// pemCRLPrefix is the magic string that indicates that we have a PEM encoded +// CRL. +var pemCRLPrefix = []byte("-----BEGIN X509 CRL") + +// pemType is the type of a PEM encoded CRL. +var pemType = "X509 CRL" + +// ParseCRL parses a CRL from the given bytes. It's often the case that PEM +// encoded CRLs will appear where they should be DER encoded, so this function +// will transparently handle PEM encoding as long as there isn't any leading +// garbage. +func ParseCRL(crlBytes []byte) (*pkix.CertificateList, error) { + if bytes.HasPrefix(crlBytes, pemCRLPrefix) { + block, _ := pem.Decode(crlBytes) + if block != nil && block.Type == pemType { + crlBytes = block.Bytes + } + } + return ParseDERCRL(crlBytes) +} + +// ParseDERCRL parses a DER encoded CRL from the given bytes. +func ParseDERCRL(derBytes []byte) (*pkix.CertificateList, error) { + certList := new(pkix.CertificateList) + if rest, err := asn1.Unmarshal(derBytes, certList); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after CRL") + } + return certList, nil +} + +// CreateCRL returns a DER encoded CRL, signed by this Certificate, that +// contains the given list of revoked certificates. +func (c *Certificate) CreateCRL(rand io.Reader, priv interface{}, revokedCerts []pkix.RevokedCertificate, now, expiry time.Time) (crlBytes []byte, err error) { + key, ok := priv.(crypto.Signer) + if !ok { + return nil, errors.New("x509: certificate private key does not implement crypto.Signer") + } + + hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(key.Public(), 0) + if err != nil { + return nil, err + } + + // Force revocation times to UTC per RFC 5280. + revokedCertsUTC := make([]pkix.RevokedCertificate, len(revokedCerts)) + for i, rc := range revokedCerts { + rc.RevocationTime = rc.RevocationTime.UTC() + revokedCertsUTC[i] = rc + } + + tbsCertList := pkix.TBSCertificateList{ + Version: 1, + Signature: signatureAlgorithm, + Issuer: c.Subject.ToRDNSequence(), + ThisUpdate: now.UTC(), + NextUpdate: expiry.UTC(), + RevokedCertificates: revokedCertsUTC, + } + + // Authority Key Id + if len(c.SubjectKeyId) > 0 { + var aki pkix.Extension + aki.Id = oidExtensionAuthorityKeyId + aki.Value, err = asn1.Marshal(authKeyId{Id: c.SubjectKeyId}) + if err != nil { + return + } + tbsCertList.Extensions = append(tbsCertList.Extensions, aki) + } + + tbsCertListContents, err := asn1.Marshal(tbsCertList) + if err != nil { + return + } + + h := hashFunc.New() + h.Write(tbsCertListContents) + digest := h.Sum(nil) + + var signature []byte + signature, err = key.Sign(rand, digest, hashFunc) + if err != nil { + return + } + + return asn1.Marshal(pkix.CertificateList{ + TBSCertList: tbsCertList, + SignatureAlgorithm: signatureAlgorithm, + SignatureValue: asn1.BitString{Bytes: signature, BitLength: len(signature) * 8}, + }) +} + +// CertificateRequest represents a PKCS #10, certificate signature request. +type CertificateRequest struct { + Raw []byte // Complete ASN.1 DER content (CSR, signature algorithm and signature). + RawTBSCertificateRequest []byte // Certificate request info part of raw ASN.1 DER content. + RawSubjectPublicKeyInfo []byte // DER encoded SubjectPublicKeyInfo. + RawSubject []byte // DER encoded Subject. + + Version int + Signature []byte + SignatureAlgorithm SignatureAlgorithm + + PublicKeyAlgorithm PublicKeyAlgorithm + PublicKey interface{} + + Subject pkix.Name + + // Attributes is the dried husk of a bug and shouldn't be used. + Attributes []pkix.AttributeTypeAndValueSET + + // Extensions contains raw X.509 extensions. When parsing CSRs, this + // can be used to extract extensions that are not parsed by this + // package. + Extensions []pkix.Extension + + // ExtraExtensions contains extensions to be copied, raw, into any + // marshaled CSR. Values override any extensions that would otherwise + // be produced based on the other fields but are overridden by any + // extensions specified in Attributes. + // + // The ExtraExtensions field is not populated when parsing CSRs, see + // Extensions. + ExtraExtensions []pkix.Extension + + // Subject Alternate Name values. + DNSNames []string + EmailAddresses []string + IPAddresses []net.IP +} + +// These structures reflect the ASN.1 structure of X.509 certificate +// signature requests (see RFC 2986): + +type tbsCertificateRequest struct { + Raw asn1.RawContent + Version int + Subject asn1.RawValue + PublicKey publicKeyInfo + RawAttributes []asn1.RawValue `asn1:"tag:0"` +} + +type certificateRequest struct { + Raw asn1.RawContent + TBSCSR tbsCertificateRequest + SignatureAlgorithm pkix.AlgorithmIdentifier + SignatureValue asn1.BitString +} + +// oidExtensionRequest is a PKCS#9 OBJECT IDENTIFIER that indicates requested +// extensions in a CSR. +var oidExtensionRequest = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 14} + +// newRawAttributes converts AttributeTypeAndValueSETs from a template +// CertificateRequest's Attributes into tbsCertificateRequest RawAttributes. +func newRawAttributes(attributes []pkix.AttributeTypeAndValueSET) ([]asn1.RawValue, error) { + var rawAttributes []asn1.RawValue + b, err := asn1.Marshal(attributes) + if err != nil { + return nil, err + } + rest, err := asn1.Unmarshal(b, &rawAttributes) + if err != nil { + return nil, err + } + if len(rest) != 0 { + return nil, errors.New("x509: failed to unmarshal raw CSR Attributes") + } + return rawAttributes, nil +} + +// parseRawAttributes Unmarshals RawAttributes intos AttributeTypeAndValueSETs. +func parseRawAttributes(rawAttributes []asn1.RawValue) []pkix.AttributeTypeAndValueSET { + var attributes []pkix.AttributeTypeAndValueSET + for _, rawAttr := range rawAttributes { + var attr pkix.AttributeTypeAndValueSET + rest, err := asn1.Unmarshal(rawAttr.FullBytes, &attr) + // Ignore attributes that don't parse into pkix.AttributeTypeAndValueSET + // (i.e.: challengePassword or unstructuredName). + if err == nil && len(rest) == 0 { + attributes = append(attributes, attr) + } + } + return attributes +} + +// parseCSRExtensions parses the attributes from a CSR and extracts any +// requested extensions. +func parseCSRExtensions(rawAttributes []asn1.RawValue) ([]pkix.Extension, error) { + // pkcs10Attribute reflects the Attribute structure from section 4.1 of + // https://tools.ietf.org/html/rfc2986. + type pkcs10Attribute struct { + Id asn1.ObjectIdentifier + Values []asn1.RawValue `asn1:"set"` + } + + var ret []pkix.Extension + for _, rawAttr := range rawAttributes { + var attr pkcs10Attribute + if rest, err := asn1.Unmarshal(rawAttr.FullBytes, &attr); err != nil || len(rest) != 0 || len(attr.Values) == 0 { + // Ignore attributes that don't parse. + continue + } + + if !attr.Id.Equal(oidExtensionRequest) { + continue + } + + var extensions []pkix.Extension + if _, err := asn1.Unmarshal(attr.Values[0].FullBytes, &extensions); err != nil { + return nil, err + } + ret = append(ret, extensions...) + } + + return ret, nil +} + +// CreateCertificateRequest creates a new certificate request based on a +// template. The following members of template are used: Attributes, DNSNames, +// EmailAddresses, ExtraExtensions, IPAddresses, SignatureAlgorithm, and +// Subject. The private key is the private key of the signer. +// +// The returned slice is the certificate request in DER encoding. +// +// All keys types that are implemented via crypto.Signer are supported (This +// includes *rsa.PublicKey and *ecdsa.PublicKey.) +func CreateCertificateRequest(rand io.Reader, template *CertificateRequest, priv interface{}) (csr []byte, err error) { + key, ok := priv.(crypto.Signer) + if !ok { + return nil, errors.New("x509: certificate private key does not implement crypto.Signer") + } + + var hashFunc crypto.Hash + var sigAlgo pkix.AlgorithmIdentifier + hashFunc, sigAlgo, err = signingParamsForPublicKey(key.Public(), template.SignatureAlgorithm) + if err != nil { + return nil, err + } + + var publicKeyBytes []byte + var publicKeyAlgorithm pkix.AlgorithmIdentifier + publicKeyBytes, publicKeyAlgorithm, err = marshalPublicKey(key.Public()) + if err != nil { + return nil, err + } + + var extensions []pkix.Extension + + if (len(template.DNSNames) > 0 || len(template.EmailAddresses) > 0 || len(template.IPAddresses) > 0) && + !oidInExtensions(oidExtensionSubjectAltName, template.ExtraExtensions) { + sanBytes, err := marshalSANs(template.DNSNames, template.EmailAddresses, template.IPAddresses) + if err != nil { + return nil, err + } + + extensions = append(extensions, pkix.Extension{ + Id: oidExtensionSubjectAltName, + Value: sanBytes, + }) + } + + extensions = append(extensions, template.ExtraExtensions...) + + var attributes []pkix.AttributeTypeAndValueSET + attributes = append(attributes, template.Attributes...) + + if len(extensions) > 0 { + // specifiedExtensions contains all the extensions that we + // found specified via template.Attributes. + specifiedExtensions := make(map[string]bool) + + for _, atvSet := range template.Attributes { + if !atvSet.Type.Equal(oidExtensionRequest) { + continue + } + + for _, atvs := range atvSet.Value { + for _, atv := range atvs { + specifiedExtensions[atv.Type.String()] = true + } + } + } + + atvs := make([]pkix.AttributeTypeAndValue, 0, len(extensions)) + for _, e := range extensions { + if specifiedExtensions[e.Id.String()] { + // Attributes already contained a value for + // this extension and it takes priority. + continue + } + + atvs = append(atvs, pkix.AttributeTypeAndValue{ + // There is no place for the critical flag in a CSR. + Type: e.Id, + Value: e.Value, + }) + } + + // Append the extensions to an existing attribute if possible. + appended := false + for _, atvSet := range attributes { + if !atvSet.Type.Equal(oidExtensionRequest) || len(atvSet.Value) == 0 { + continue + } + + atvSet.Value[0] = append(atvSet.Value[0], atvs...) + appended = true + break + } + + // Otherwise, add a new attribute for the extensions. + if !appended { + attributes = append(attributes, pkix.AttributeTypeAndValueSET{ + Type: oidExtensionRequest, + Value: [][]pkix.AttributeTypeAndValue{ + atvs, + }, + }) + } + } + + asn1Subject := template.RawSubject + if len(asn1Subject) == 0 { + asn1Subject, err = asn1.Marshal(template.Subject.ToRDNSequence()) + if err != nil { + return + } + } + + rawAttributes, err := newRawAttributes(attributes) + if err != nil { + return + } + + tbsCSR := tbsCertificateRequest{ + Version: 0, // PKCS #10, RFC 2986 + Subject: asn1.RawValue{FullBytes: asn1Subject}, + PublicKey: publicKeyInfo{ + Algorithm: publicKeyAlgorithm, + PublicKey: asn1.BitString{ + Bytes: publicKeyBytes, + BitLength: len(publicKeyBytes) * 8, + }, + }, + RawAttributes: rawAttributes, + } + + tbsCSRContents, err := asn1.Marshal(tbsCSR) + if err != nil { + return + } + tbsCSR.Raw = tbsCSRContents + + h := hashFunc.New() + h.Write(tbsCSRContents) + digest := h.Sum(nil) + + var signature []byte + signature, err = key.Sign(rand, digest, hashFunc) + if err != nil { + return + } + + return asn1.Marshal(certificateRequest{ + TBSCSR: tbsCSR, + SignatureAlgorithm: sigAlgo, + SignatureValue: asn1.BitString{ + Bytes: signature, + BitLength: len(signature) * 8, + }, + }) +} + +// ParseCertificateRequest parses a single certificate request from the +// given ASN.1 DER data. +func ParseCertificateRequest(asn1Data []byte) (*CertificateRequest, error) { + var csr certificateRequest + + rest, err := asn1.Unmarshal(asn1Data, &csr) + if err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, asn1.SyntaxError{Msg: "trailing data"} + } + + return parseCertificateRequest(&csr) +} + +func parseCertificateRequest(in *certificateRequest) (*CertificateRequest, error) { + out := &CertificateRequest{ + Raw: in.Raw, + RawTBSCertificateRequest: in.TBSCSR.Raw, + RawSubjectPublicKeyInfo: in.TBSCSR.PublicKey.Raw, + RawSubject: in.TBSCSR.Subject.FullBytes, + + Signature: in.SignatureValue.RightAlign(), + SignatureAlgorithm: GetSignatureAlgorithmFromAI(in.SignatureAlgorithm), + + PublicKeyAlgorithm: getPublicKeyAlgorithmFromOID(in.TBSCSR.PublicKey.Algorithm.Algorithm), + + Version: in.TBSCSR.Version, + Attributes: parseRawAttributes(in.TBSCSR.RawAttributes), + } + + var err error + out.PublicKey, err = parsePublicKey(out.PublicKeyAlgorithm, &in.TBSCSR.PublicKey) + if err != nil { + return nil, err + } + + var subject pkix.RDNSequence + if rest, err := asn1.Unmarshal(in.TBSCSR.Subject.FullBytes, &subject); err != nil { + return nil, err + } else if len(rest) != 0 { + return nil, errors.New("x509: trailing data after X.509 Subject") + } + + out.Subject.FillFromRDNSequence(&subject) + + if out.Extensions, err = parseCSRExtensions(in.TBSCSR.RawAttributes); err != nil { + return nil, err + } + + for _, extension := range out.Extensions { + if extension.Id.Equal(oidExtensionSubjectAltName) { + out.DNSNames, out.EmailAddresses, out.IPAddresses, err = parseSANExtension(extension.Value) + if err != nil { + return nil, err + } + } + } + + return out, nil +} + +// CheckSignature reports whether the signature on c is valid. +func (c *CertificateRequest) CheckSignature() error { + return CheckSignatureFromKey(c.PublicKey, c.SignatureAlgorithm, c.RawTBSCertificateRequest, c.Signature) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/LICENSE b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/LICENSE new file mode 100644 index 00000000..7d5abcdd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2017 Regents of the University of Michigan + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/README.md b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/README.md new file mode 100644 index 00000000..4c3c0190 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/README.md @@ -0,0 +1,189 @@ +ZLint +===== + +[![Build Status](https://travis-ci.org/zmap/zlint.svg?branch=master)](https://travis-ci.org/zmap/zlint) +[![Go Report Card](https://goreportcard.com/badge/github.com/zmap/zlint)](https://goreportcard.com/report/github.com/zmap/zlint) + +ZLint is a X.509 certificate linter written in Go that checks for consistency +with [RFC 5280](https://www.ietf.org/rfc/rfc5280.txt) and the CA/Browser Forum +Baseline Requirements +([v.1.4.8](https://cabforum.org/wp-content/uploads/CA-Browser-Forum-BR-1.4.8.pdf)). + +A detailed list of BR coverage can be found here: +https://docs.google.com/spreadsheets/d/1ywp0op9mkTaggigpdF2YMTubepowJ50KQBhc_b00e-Y. + +Requirements +------------ + +ZLint requires [Go 1.12.x or newer](https://golang.org/doc/install) be +installed. The command line setup instructions assume the `go` command is in +your `$PATH`. + +Versioning +---------- + +ZLint aims to follow [semantic versioning](https://semver.org/). The addition of +new lints will generally result in a MINOR version revision. Since downstream +projects depend on lint results and names for policy decisions changes of this +nature will result in MAJOR version revision. + +Command Line Usage +------------------ + +ZLint can be used on the command-line through a simple bundled executable +_ZLint_ as well as through +[ZCertificate](https://github.com/zmap/zcertificate), a more full-fledged +command-line certificate parser that links against ZLint. + +Example ZLint CLI usage: + + go get github.com/zmap/zlint/cmd/zlint + zlint mycert.pem + + +Library Usage +------------- + +ZLint can also be used as a library: + +```go +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint" +) + +parsed, err := x509.ParseCertificate(raw) +if err != nil { + // The certificate could not be parsed. Either error or halt. + log.Fatalf("could not parse certificate: %s", err) +} +zlintResultSet := zlint.LintCertificate(parsed) +``` + + +See https://github.com/zmap/zlint/blob/master/cmd/zlint/main.go for an example. + + +Adding New Lints +---------------- + +**Generating Lint Scaffolding.** The scaffolding for a new lints can be created +by running `./newLint.sh `. Lint names are generally of +the form `e_subject_common_name_not_from_san` where the first letter is one of: +`e`, `w`, or `n` (error, warning, or notice respectively). Struct names +following Go conventions, e.g., `subjectCommonNameNotFromSAN`. Example: +`./newLint.sh e_subject_common_name_not_from_san subjectCommonNameNotFromSAN`. +This will generate a new lint in the `lints` directory with the necessary +fields filled out. + +**Choosing a Lint Result Level.** When choosing what `lints.LintStatus` your new +lint should return (e.g. `Notice`,`Warn`, `Error`, or `Fatal`) the following +general guidance may help. `Error` should be used for clear violations of RFC/BR +`MUST` or `MUST NOT` requirements and include strong citations. `Warn` should be +used for violations of RFC/BR `SHOULD` or `SHOULD NOT` requirements and again +should include strong citations. `Notice` should be used for more general "FYI" +statements that violate non-codified community standards or for cases where +citations are unclear. Lastly `Fatal` should be used when there is an +unresolvable error in `zlint`, `zcrypto` or some other part of the certificate +processing. + +**Scoping a Lint.** Lints are executed in three steps. First, the ZLint +framework determines whether a certificate falls within the scope of a given +lint by calling `CheckApplies`. This is often used to scope lints to only check +subscriber, intermediate CA, or root CAs. This function commonly calls one of a +select number of helper functions: `IsCA`, `IsSubscriber`, `IsExtInCert`, or +`DNSNamesExist`. Example: + +```go +func (l *caCRLSignNotSet) CheckApplies(c *x509.Certificate) bool { + return c.IsCA && util.IsExtInCert(c, util.KeyUsageOID) +} +``` + +Next, the framework determines whether the certificate was issued after the +effective date of a Lint by checking whether the certificate was issued prior +to the lint's `EffectiveDate`. You'll also need to fill out the source and +description of what the lint is checking. We encourage you to copy text +directly from the BR or RFC here. Example: + +```go +func init() { + RegisterLint(&Lint{ + Name: "e_ca_country_name_missing", + Description: "Root and Subordinate CA certificates MUST have a countryName present in subject information", + Citation: "BRs: 7.1.2.1", + EffectiveDate: util.CABEffectiveDate, + Test: &caCountryNameMissing{}, + }) +} +``` + +The meat of the lint is contained within the `RunTest` function, which is +passed `x509.Certificate`. **Note:** This is an X.509 object from +[ZCrypto](https://github.com/zmap/zcrypto) not the Go standard library. Lints +should perform their described test and then return a `ResultStruct` that +contains a Result and optionally a `Details` string, e.g., +`ResultStruct{Result: Pass}`. If you encounter a situation in which you +typically would return a Go `error` object, instead return +`ResultStruct{Result: Fatal}`. + +Example: + +```go +func (l *caCRLSignNotSet) RunTest(c *x509.Certificate) *ResultStruct { + if c.KeyUsage&x509.KeyUsageCRLSign != 0 { + return &ResultStruct{Result: Pass} + } + return &ResultStruct{Result: Error} +} +``` + +**Creating Tests.** Every lint should also have two corresponding tests for a +success and failure condition. We have typically generated test certificates +using Go (see https://golang.org/pkg/crypto/x509/#CreateCertificate for +details), but OpenSSL could also be used. Test certificates should be placed in +`testlint/testCerts` and called from the test file created by `newLint.sh`. +Prepend the PEM with the output of `openssl x509 -text`. + +Example: + +```go +func TestBasicConstNotCritical(t *testing.T) { + // Only need to change these two values and the lint name + inputPath := "../testlint/testCerts/caBasicConstNotCrit.pem" + expected := Error + out, _ := Lints["e_basic_constraints_not_critical"].ExecuteTest(ReadCertificate(inputPath)) + if out.Result != expected { + t.Errorf("%s: expected %s, got %s", inputPath, expected, out.Status) + } +} + +``` + +Updating the TLD Map +-------------------- + +ZLint maintains [a map of +top-level-domains](https://github.com/zmap/zlint/blob/master/util/gtld_map.go) +and their validity periods that is referenced by linters. As ICANN adds and +removes TLDs this map need to be updated. To do so, ensure the +`zlint-gtld-update` command is installed and in your `$PATH` and run `go +generate`: + + go get github.com/zmap/zlint/cmd/zlint-gtld-update + go generate github.com/zmap/zlint/... + + +License and Copyright +--------------------- + +ZMap Copyright 2019 Regents of the University of Michigan + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the +License at http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See LICENSE for the specific +language governing permissions and limitations under the License. diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/go.mod b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/go.mod new file mode 100644 index 00000000..e28446ae --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/go.mod @@ -0,0 +1,10 @@ +module github.com/zmap/zlint + +require ( + github.com/sirupsen/logrus v1.3.0 + github.com/weppos/publicsuffix-go v0.4.0 + github.com/zmap/zcrypto v0.0.0-20190729165852-9051775e6a2e + golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 + golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 + golang.org/x/text v0.3.0 +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/go.sum b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/go.sum new file mode 100644 index 00000000..032e2a7e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/go.sum @@ -0,0 +1,43 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/sirupsen/logrus v1.3.0 h1:hI/7Q+DtNZ2kINb6qt/lS+IyXnHQe9e90POfeewL/ME= +github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/weppos/publicsuffix-go v0.4.0 h1:YSnfg3V65LcCFKtIGKGoBhkyKolEd0hlipcXaOjdnQw= +github.com/weppos/publicsuffix-go v0.4.0/go.mod h1:z3LCPQ38eedDQSwmsSRW4Y7t2L8Ln16JPQ02lHAdn5k= +github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE= +github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is= +github.com/zmap/zcrypto v0.0.0-20190729165852-9051775e6a2e h1:mvOa4+/DXStR4ZXOks/UsjeFdn5O5JpLUtzqk9U8xXw= +github.com/zmap/zcrypto v0.0.0-20190729165852-9051775e6a2e/go.mod h1:w7kd3qXHh8FNaczNjslXqvFQiv5mMWRXlL9klTUAHc8= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/base.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/base.go new file mode 100644 index 00000000..a7d453c9 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/base.go @@ -0,0 +1,127 @@ +package lints + +/* + * ZLint Copyright 2017 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "time" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +var ( + // Lints is a map of all known lints by name. Add a Lint to the map by calling + // RegisterLint. + Lints = make(map[string]*Lint) +) + +// LintInterface is implemented by each Lint. +type LintInterface interface { + // Initialize runs once per-lint. It is called during RegisterLint(). + Initialize() error + + // CheckApplies runs once per certificate. It returns true if the Lint should + // run on the given certificate. If CheckApplies returns false, the Lint + // result is automatically set to NA without calling CheckEffective() or + // Run(). + CheckApplies(c *x509.Certificate) bool + + // Execute() is the body of the lint. It is called for every certificate for + // which CheckApplies() returns true. + Execute(c *x509.Certificate) *LintResult +} + +// An Enum to programmatically represent the source of a lint +type LintSource int + +const ( + UnknownLintSource LintSource = iota + CABFBaselineRequirements + RFC5280 + RFC5480 + RFC5891 + ZLint + AWSLabs + EtsiEsi // ETSI - Electronic Signatures and Infrastructures (ESI) + CABFEVGuidelines + AppleCTPolicy // https://support.apple.com/en-us/HT205280 +) + +// A Lint struct represents a single lint, e.g. +// "e_basic_constraints_not_critical". It contains an implementation of LintInterface. +type Lint struct { + + // Name is a lowercase underscore-separated string describing what a given + // Lint checks. If Name beings with "w", the lint MUST NOT return Error, only + // Warn. If Name beings with "e", the Lint MUST NOT return Warn, only Error. + Name string `json:"name,omitempty"` + + // A human-readable description of what the Lint checks. Usually copied + // directly from the CA/B Baseline Requirements or RFC 5280. + Description string `json:"description,omitempty"` + + // The source of the check, e.g. "BRs: 6.1.6" or "RFC 5280: 4.1.2.6". + Citation string `json:"citation,omitempty"` + + // Programmatic source of the check, BRs, RFC5280, or ZLint + Source LintSource `json:"-"` + + // Lints automatically returns NE for all certificates where CheckApplies() is + // true but with NotBefore < EffectiveDate. This check is bypassed if + // EffectiveDate is zero. + EffectiveDate time.Time `json:"-"` + + // The implementation of the lint logic. + Lint LintInterface `json:"-"` +} + +// CheckEffective returns true if c was issued on or after the EffectiveDate. If +// EffectiveDate is zero, CheckEffective always returns true. +func (l *Lint) CheckEffective(c *x509.Certificate) bool { + if l.EffectiveDate.IsZero() || !l.EffectiveDate.After(c.NotBefore) { + return true + } + return false +} + +// Execute runs the lint against a certificate. For lints that are +// sourced from the CA/B Forum Baseline Requirements, we first determine +// if they are within the purview of the BRs. See LintInterface for details +// about the other methods called. The ordering is as follows: +// +// CheckApplies() +// CheckEffective() +// Execute() +func (l *Lint) Execute(cert *x509.Certificate) *LintResult { + if l.Source == CABFBaselineRequirements && !util.IsServerAuthCert(cert) { + return &LintResult{Status: NA} + } + if !l.Lint.CheckApplies(cert) { + return &LintResult{Status: NA} + } else if !l.CheckEffective(cert) { + return &LintResult{Status: NE} + } + res := l.Lint.Execute(cert) + return res +} + +// RegisterLint must be called once for each lint to be excuted. Duplicate lint +// names are squashed. Normally, RegisterLint is called during init(). +func RegisterLint(l *Lint) { + if err := l.Lint.Initialize(); err != nil { + panic("could not initialize lint: " + l.Name + ": " + err.Error()) + } + Lints[l.Name] = l +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_basic_constraints_not_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_basic_constraints_not_critical.go new file mode 100644 index 00000000..c0114ccd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_basic_constraints_not_critical.go @@ -0,0 +1,66 @@ +package lints + +/* + * ZLint Copyright 2017 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +/************************************************ +RFC 5280: 4.2.1.9 +Conforming CAs MUST include this extension in all CA certificates that contain +public keys used to validate digital signatures on certificates and MUST mark +the extension as critical in such certificates. This extension MAY appear as a +critical or non- critical extension in CA certificates that contain public keys +used exclusively for purposes other than validating digital signatures on +certificates. Such CA certificates include ones that contain public keys used +exclusively for validating digital signatures on CRLs and ones that contain key +management public keys used with certificate. +************************************************/ + +type basicConstCrit struct{} + +func (l *basicConstCrit) Initialize() error { + return nil +} + +func (l *basicConstCrit) CheckApplies(c *x509.Certificate) bool { + return c.IsCA && util.IsExtInCert(c, util.BasicConstOID) +} + +func (l *basicConstCrit) Execute(c *x509.Certificate) *LintResult { + // Add actual lint here + if e := util.GetExtFromCert(c, util.BasicConstOID); e != nil { + if e.Critical { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } + } else { + return &LintResult{Status: NA} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_basic_constraints_not_critical", + Description: "basicConstraints MUST appear as a critical extension", + Citation: "RFC 5280: 4.2.1.9", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &basicConstCrit{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_common_name_missing.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_common_name_missing.go new file mode 100644 index 00000000..ed98ffa9 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_common_name_missing.go @@ -0,0 +1,49 @@ +package lints + +/* + * ZLint Copyright 2017 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type caCommonNameMissing struct{} + +func (l *caCommonNameMissing) Initialize() error { + return nil +} + +func (l *caCommonNameMissing) CheckApplies(c *x509.Certificate) bool { + return util.IsCACert(c) +} + +func (l *caCommonNameMissing) Execute(c *x509.Certificate) *LintResult { + if c.Subject.CommonName == "" { + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ca_common_name_missing", + Description: "CA Certificates common name MUST be included.", + Citation: "BRs: 7.1.4.3.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABV148Date, + Lint: &caCommonNameMissing{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_country_name_invalid.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_country_name_invalid.go new file mode 100644 index 00000000..470e6d27 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_country_name_invalid.go @@ -0,0 +1,62 @@ +package lints + +/* + * ZLint Copyright 2017 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +/************************************************ +BRs: 7.1.2.1e +The Certificate Subject MUST contain the following: +‐ countryName (OID 2.5.4.6). +This field MUST contain the two‐letter ISO 3166‐1 country code for the country +in which the CA’s place of business is located. +************************************************/ + +type caCountryNameInvalid struct{} + +func (l *caCountryNameInvalid) Initialize() error { + return nil +} + +func (l *caCountryNameInvalid) CheckApplies(c *x509.Certificate) bool { + return c.IsCA +} + +func (l *caCountryNameInvalid) Execute(c *x509.Certificate) *LintResult { + if c.Subject.Country != nil { + for _, j := range c.Subject.Country { + if !util.IsISOCountryCode(j) { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: NA} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ca_country_name_invalid", + Description: "Root and Subordinate CA certificates MUST have a two-letter country code specified in ISO 3166-1", + Citation: "BRs: 7.1.2.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &caCountryNameInvalid{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_country_name_missing.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_country_name_missing.go new file mode 100644 index 00000000..51ec0544 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_country_name_missing.go @@ -0,0 +1,57 @@ +package lints + +/* + * ZLint Copyright 2017 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +/************************************************ +BRs: 7.1.2.1e +The Certificate Subject MUST contain the following: +‐ countryName (OID 2.5.4.6). +This field MUST contain the two‐letter ISO 3166‐1 country code for the country +in which the CA’s place of business is located. +************************************************/ + +type caCountryNameMissing struct{} + +func (l *caCountryNameMissing) Initialize() error { + return nil +} + +func (l *caCountryNameMissing) CheckApplies(c *x509.Certificate) bool { + return c.IsCA +} + +func (l *caCountryNameMissing) Execute(c *x509.Certificate) *LintResult { + if c.Subject.Country != nil && c.Subject.Country[0] != "" { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ca_country_name_missing", + Description: "Root and Subordinate CA certificates MUST have a countryName present in subject information", + Citation: "BRs: 7.1.2.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &caCountryNameMissing{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_crl_sign_not_set.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_crl_sign_not_set.go new file mode 100644 index 00000000..ab5c075a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_crl_sign_not_set.go @@ -0,0 +1,56 @@ +package lints + +/* + * ZLint Copyright 2017 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +/************************************************ +BRs: 7.1.2.1b +This extension MUST be present and MUST be marked critical. Bit positions for +keyCertSign and cRLSign MUST be set. If the Root CA Private Key is used for +signing OCSP responses, then the digitalSignature bit MUST be set. +************************************************/ + +type caCRLSignNotSet struct{} + +func (l *caCRLSignNotSet) Initialize() error { + return nil +} + +func (l *caCRLSignNotSet) CheckApplies(c *x509.Certificate) bool { + return c.IsCA && util.IsExtInCert(c, util.KeyUsageOID) +} + +func (l *caCRLSignNotSet) Execute(c *x509.Certificate) *LintResult { + if c.KeyUsage&x509.KeyUsageCRLSign != 0 { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ca_crl_sign_not_set", + Description: "Root and Subordinate CA certificate keyUsage extension's crlSign bit MUST be set", + Citation: "BRs: 7.1.2.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &caCRLSignNotSet{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_digital_signature_not_set.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_digital_signature_not_set.go new file mode 100644 index 00000000..b715d01b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_digital_signature_not_set.go @@ -0,0 +1,55 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +BRs: 7.1.2.1b +This extension MUST be present and MUST be marked critical. Bit positions for keyCertSign and cRLSign MUST be set. +If the Root CA Private Key is used for signing OCSP responses, then the digitalSignature bit MUST be set. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type caDigSignNotSet struct{} + +func (l *caDigSignNotSet) Initialize() error { + return nil +} + +func (l *caDigSignNotSet) CheckApplies(c *x509.Certificate) bool { + return c.IsCA && util.IsExtInCert(c, util.KeyUsageOID) +} + +func (l *caDigSignNotSet) Execute(c *x509.Certificate) *LintResult { + if c.KeyUsage&x509.KeyUsageDigitalSignature != 0 { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Notice} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "n_ca_digital_signature_not_set", + Description: "Root and Subordinate CA Certificates that wish to use their private key for signing OCSP responses will not be able to without their digital signature set", + Citation: "BRs: 7.1.2.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &caDigSignNotSet{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_is_ca.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_is_ca.go new file mode 100644 index 00000000..776ded76 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_is_ca.go @@ -0,0 +1,62 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "encoding/asn1" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type caIsCA struct{} + +type basicConstraints struct { + IsCA bool `asn1:"optional"` + MaxPathLen int `asn1:"optional,default:-1"` +} + +func (l *caIsCA) Initialize() error { + return nil +} + +func (l *caIsCA) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.KeyUsageOID) && c.KeyUsage&x509.KeyUsageCertSign != 0 && util.IsExtInCert(c, util.BasicConstOID) +} + +func (l *caIsCA) Execute(c *x509.Certificate) *LintResult { + e := util.GetExtFromCert(c, util.BasicConstOID) + var constraints basicConstraints + _, err := asn1.Unmarshal(e.Value, &constraints) + if err != nil { + return &LintResult{Status: Fatal} + } + if constraints.IsCA == true { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ca_is_ca", + Description: "Root and Sub CA Certificate: The CA field MUST be set to true.", + Citation: "BRs: 7.1.2.1, BRs: 7.1.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &caIsCA{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_key_cert_sign_not_set.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_key_cert_sign_not_set.go new file mode 100644 index 00000000..c327d9bc --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_key_cert_sign_not_set.go @@ -0,0 +1,55 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +BRs: 7.1.2.1b +This extension MUST be present and MUST be marked critical. Bit positions for keyCertSign and cRLSign MUST be set. +If the Root CA Private Key is used for signing OCSP responses, then the digitalSignature bit MUST be set. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type caKeyCertSignNotSet struct{} + +func (l *caKeyCertSignNotSet) Initialize() error { + return nil +} + +func (l *caKeyCertSignNotSet) CheckApplies(c *x509.Certificate) bool { + return c.IsCA && util.IsExtInCert(c, util.KeyUsageOID) +} + +func (l *caKeyCertSignNotSet) Execute(c *x509.Certificate) *LintResult { + if c.KeyUsage&x509.KeyUsageCertSign != 0 { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ca_key_cert_sign_not_set", + Description: "Root CA Certificate: Bit positions for keyCertSign and cRLSign MUST be set.", + Citation: "BRs: 7.1.2.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &caKeyCertSignNotSet{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_key_usage_missing.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_key_usage_missing.go new file mode 100644 index 00000000..14cb6562 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_key_usage_missing.go @@ -0,0 +1,57 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +RFC 5280: 4.2.1.3 +Conforming CAs MUST include this extension in certificates that + contain public keys that are used to validate digital signatures on + other public key certificates or CRLs. When present, conforming CAs + SHOULD mark this extension as critical. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type caKeyUsageMissing struct{} + +func (l *caKeyUsageMissing) Initialize() error { + return nil +} + +func (l *caKeyUsageMissing) CheckApplies(c *x509.Certificate) bool { + return c.IsCA +} + +func (l *caKeyUsageMissing) Execute(c *x509.Certificate) *LintResult { + if c.KeyUsage != x509.KeyUsage(0) { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ca_key_usage_missing", + Description: "Root and Subordinate CA certificate keyUsage extension MUST be present", + Citation: "BRs: 7.1.2.1, RFC 5280: 4.2.1.3", + Source: CABFBaselineRequirements, + EffectiveDate: util.RFC3280Date, + Lint: &caKeyUsageMissing{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_key_usage_not_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_key_usage_not_critical.go new file mode 100644 index 00000000..e19a2977 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_key_usage_not_critical.go @@ -0,0 +1,55 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +BRs: 7.1.2.1b +This extension MUST be present and MUST be marked critical. Bit positions for keyCertSign and cRLSign MUST be set. +If the Root CA Private Key is used for signing OCSP responses, then the digitalSignature bit MUST be set. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type caKeyUsageNotCrit struct{} + +func (l *caKeyUsageNotCrit) Initialize() error { + return nil +} + +func (l *caKeyUsageNotCrit) CheckApplies(c *x509.Certificate) bool { + return c.IsCA && util.IsExtInCert(c, util.KeyUsageOID) +} + +func (l *caKeyUsageNotCrit) Execute(c *x509.Certificate) *LintResult { + if e := util.GetExtFromCert(c, util.KeyUsageOID); e.Critical { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ca_key_usage_not_critical", + Description: "Root and Subordinate CA certificate keyUsage extension MUST be marked as critical", + Citation: "BRs: 7.1.2.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &caKeyUsageNotCrit{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_organization_name_missing.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_organization_name_missing.go new file mode 100644 index 00000000..57bcb278 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_organization_name_missing.go @@ -0,0 +1,54 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +BRs: 7.1.2.1e +The Certificate Subject MUST contain the following: organizationName (OID 2.5.4.10): This field MUST be present and the contents MUST contain either the Subject CA’s name or DBA as verified under Section 3.2.2.2. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type caOrganizationNameMissing struct{} + +func (l *caOrganizationNameMissing) Initialize() error { + return nil +} + +func (l *caOrganizationNameMissing) CheckApplies(c *x509.Certificate) bool { + return c.IsCA +} + +func (l *caOrganizationNameMissing) Execute(c *x509.Certificate) *LintResult { + if c.Subject.Organization != nil && c.Subject.Organization[0] != "" { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ca_organization_name_missing", + Description: "Root and Subordinate CA certificates MUST have a organizationName present in subject information", + Citation: "BRs: 7.1.2.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &caOrganizationNameMissing{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_subject_field_empty.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_subject_field_empty.go new file mode 100644 index 00000000..fb1aab3a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ca_subject_field_empty.go @@ -0,0 +1,61 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +RFC 5280: 4.1.2.6 +The subject field identifies the entity associated with the public + key stored in the subject public key field. The subject name MAY be + carried in the subject field and/or the subjectAltName extension. If + the subject is a CA (e.g., the basic constraints extension, as + discussed in Section 4.2.1.9, is present and the value of cA is + TRUE), then the subject field MUST be populated with a non-empty + distinguished name matching the contents of the issuer field (Section + 4.1.2.4) in all certificates issued by the subject CA. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type caSubjectEmpty struct{} + +func (l *caSubjectEmpty) Initialize() error { + return nil +} + +func (l *caSubjectEmpty) CheckApplies(c *x509.Certificate) bool { + return c.IsCA +} + +func (l *caSubjectEmpty) Execute(c *x509.Certificate) *LintResult { + if &c.Subject != nil && util.NotAllNameFieldsAreEmpty(&c.Subject) { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ca_subject_field_empty", + Description: "CA Certificates subject field MUST not be empty and MUST have a non-empty distingushed name", + Citation: "RFC 5280: 4.1.2.6", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &caSubjectEmpty{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cab_dv_conflicts_with_locality.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cab_dv_conflicts_with_locality.go new file mode 100644 index 00000000..a16b480e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cab_dv_conflicts_with_locality.go @@ -0,0 +1,51 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// If the Certificate asserts the policy identifier of 2.23.140.1.2.1, then it MUST NOT include +// organizationName, streetAddress, localityName, stateOrProvinceName, or postalCode in the Subject field. + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type certPolicyConflictsWithLocality struct{} + +func (l *certPolicyConflictsWithLocality) Initialize() error { + return nil +} + +func (l *certPolicyConflictsWithLocality) CheckApplies(cert *x509.Certificate) bool { + return util.SliceContainsOID(cert.PolicyIdentifiers, util.BRDomainValidatedOID) && !util.IsCACert(cert) +} + +func (l *certPolicyConflictsWithLocality) Execute(cert *x509.Certificate) *LintResult { + if util.TypeInName(&cert.Subject, util.LocalityNameOID) { + return &LintResult{Status: Error} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_cab_dv_conflicts_with_locality", + Description: "If certificate policy 2.23.140.1.2.1 (CA/B BR domain validated) is included, locality name MUST NOT be included in subject", + Citation: "BRs: 7.1.6.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &certPolicyConflictsWithLocality{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cab_dv_conflicts_with_org.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cab_dv_conflicts_with_org.go new file mode 100644 index 00000000..80809f1e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cab_dv_conflicts_with_org.go @@ -0,0 +1,54 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// If the Certificate asserts the policy identifier of 2.23.140.1.2.1, then it MUST NOT include +// organizationName, streetAddress, localityName, stateOrProvinceName, or postalCode in the Subject field. + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type certPolicyConflictsWithOrg struct{} + +func (l *certPolicyConflictsWithOrg) Initialize() error { + return nil +} + +func (l *certPolicyConflictsWithOrg) CheckApplies(cert *x509.Certificate) bool { + return util.SliceContainsOID(cert.PolicyIdentifiers, util.BRDomainValidatedOID) && !util.IsCACert(cert) +} + +func (l *certPolicyConflictsWithOrg) Execute(cert *x509.Certificate) *LintResult { + var out LintResult + if util.TypeInName(&cert.Subject, util.OrganizationNameOID) { + out.Status = Error + } else { + out.Status = Pass + } + return &out +} + +func init() { + RegisterLint(&Lint{ + Name: "e_cab_dv_conflicts_with_org", + Description: "If certificate policy 2.23.140.1.2.1 (CA/B BR domain validated) is included, organization name MUST NOT be included in subject", + Citation: "BRs: 7.1.6.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &certPolicyConflictsWithOrg{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cab_dv_conflicts_with_postal.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cab_dv_conflicts_with_postal.go new file mode 100644 index 00000000..c83c66c7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cab_dv_conflicts_with_postal.go @@ -0,0 +1,54 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// If the Certificate asserts the policy identifier of 2.23.140.1.2.1, then it MUST NOT include +// organizationName, streetAddress, localityName, stateOrProvinceName, or postalCode in the Subject field. + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type certPolicyConflictsWithPostal struct{} + +func (l *certPolicyConflictsWithPostal) Initialize() error { + return nil +} + +func (l *certPolicyConflictsWithPostal) CheckApplies(cert *x509.Certificate) bool { + return util.SliceContainsOID(cert.PolicyIdentifiers, util.BRDomainValidatedOID) && !util.IsCACert(cert) +} + +func (l *certPolicyConflictsWithPostal) Execute(cert *x509.Certificate) *LintResult { + var out LintResult + if util.TypeInName(&cert.Subject, util.PostalCodeOID) { + out.Status = Error + } else { + out.Status = Pass + } + return &out +} + +func init() { + RegisterLint(&Lint{ + Name: "e_cab_dv_conflicts_with_postal", + Description: "If certificate policy 2.23.140.1.2.1 (CA/B BR domain validated) is included, postalCode MUST NOT be included in subject", + Citation: "BRs: 7.1.6.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &certPolicyConflictsWithPostal{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cab_dv_conflicts_with_province.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cab_dv_conflicts_with_province.go new file mode 100644 index 00000000..b046a0ee --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cab_dv_conflicts_with_province.go @@ -0,0 +1,54 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// If the Certificate asserts the policy identifier of 2.23.140.1.2.1, then it MUST NOT include +// organizationName, streetAddress, localityName, stateOrProvinceName, or postalCode in the Subject field. + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type certPolicyConflictsWithProvince struct{} + +func (l *certPolicyConflictsWithProvince) Initialize() error { + return nil +} + +func (l *certPolicyConflictsWithProvince) CheckApplies(cert *x509.Certificate) bool { + return util.SliceContainsOID(cert.PolicyIdentifiers, util.BRDomainValidatedOID) && !util.IsCACert(cert) +} + +func (l *certPolicyConflictsWithProvince) Execute(cert *x509.Certificate) *LintResult { + var out LintResult + if util.TypeInName(&cert.Subject, util.StateOrProvinceNameOID) { + out.Status = Error + } else { + out.Status = Pass + } + return &out +} + +func init() { + RegisterLint(&Lint{ + Name: "e_cab_dv_conflicts_with_province", + Description: "If certificate policy 2.23.140.1.2.1 (CA/B BR domain validated) is included, stateOrProvinceName MUST NOT be included in subject", + Citation: "BRs: 7.1.6.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &certPolicyConflictsWithProvince{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cab_dv_conflicts_with_street.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cab_dv_conflicts_with_street.go new file mode 100644 index 00000000..588baf12 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cab_dv_conflicts_with_street.go @@ -0,0 +1,54 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// If the Certificate asserts the policy identifier of 2.23.140.1.2.1, then it MUST NOT include +// organizationName, streetAddress, localityName, stateOrProvinceName, or postalCode in the Subject field. + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type certPolicyConflictsWithStreet struct{} + +func (l *certPolicyConflictsWithStreet) Initialize() error { + return nil +} + +func (l *certPolicyConflictsWithStreet) CheckApplies(cert *x509.Certificate) bool { + return util.SliceContainsOID(cert.PolicyIdentifiers, util.BRDomainValidatedOID) && !util.IsCACert(cert) +} + +func (l *certPolicyConflictsWithStreet) Execute(cert *x509.Certificate) *LintResult { + var out LintResult + if util.TypeInName(&cert.Subject, util.StreetAddressOID) { + out.Status = Error + } else { + out.Status = Pass + } + return &out +} + +func init() { + RegisterLint(&Lint{ + Name: "e_cab_dv_conflicts_with_street", + Description: "If certificate policy 2.23.140.1.2.1 (CA/B BR domain validated) is included, streetAddress MUST NOT be included in subject", + Citation: "BRs: 7.1.6.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &certPolicyConflictsWithStreet{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cab_iv_requires_personal_name.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cab_iv_requires_personal_name.go new file mode 100644 index 00000000..41436374 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cab_iv_requires_personal_name.go @@ -0,0 +1,53 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/*If the Certificate asserts the policy identifier of 2.23.140.1.2.3, then it MUST also include (i) either organizationName or givenName and surname, (ii) localityName (to the extent such field is required under Section 7.1.4.2.2), (iii) stateOrProvinceName (to the extent required under Section 7.1.4.2.2), and (iv) countryName in the Subject field.*/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type CertPolicyRequiresPersonalName struct{} + +func (l *CertPolicyRequiresPersonalName) Initialize() error { + return nil +} + +func (l *CertPolicyRequiresPersonalName) CheckApplies(cert *x509.Certificate) bool { + return util.SliceContainsOID(cert.PolicyIdentifiers, util.BRIndividualValidatedOID) && !util.IsCACert(cert) +} + +func (l *CertPolicyRequiresPersonalName) Execute(cert *x509.Certificate) *LintResult { + var out LintResult + if util.TypeInName(&cert.Subject, util.OrganizationNameOID) || (util.TypeInName(&cert.Subject, util.GivenNameOID) && util.TypeInName(&cert.Subject, util.SurnameOID)) { + out.Status = Pass + } else { + out.Status = Error + } + return &out +} + +func init() { + RegisterLint(&Lint{ + Name: "e_cab_iv_requires_personal_name", + Description: "If certificate policy 2.23.140.1.2.3 is included, either organizationName or givenName and surname MUST be included in subject", + Citation: "BRs: 7.1.6.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABV131Date, + Lint: &CertPolicyRequiresPersonalName{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cab_ov_requires_org.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cab_ov_requires_org.go new file mode 100644 index 00000000..a8529f59 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cab_ov_requires_org.go @@ -0,0 +1,53 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/*If the Certificate asserts the policy identifier of 2.23.140.1.2.2, then it MUST also include organizationName, localityName (to the extent such field is required under Section 7.1.4.2.2), stateOrProvinceName (to the extent such field is required under Section 7.1.4.2.2), and countryName in the Subject field.*/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type CertPolicyRequiresOrg struct{} + +func (l *CertPolicyRequiresOrg) Initialize() error { + return nil +} + +func (l *CertPolicyRequiresOrg) CheckApplies(cert *x509.Certificate) bool { + return util.SliceContainsOID(cert.PolicyIdentifiers, util.BROrganizationValidatedOID) && !util.IsCACert(cert) +} + +func (l *CertPolicyRequiresOrg) Execute(cert *x509.Certificate) *LintResult { + var out LintResult + if util.TypeInName(&cert.Subject, util.OrganizationNameOID) { + out.Status = Pass + } else { + out.Status = Error + } + return &out +} + +func init() { + RegisterLint(&Lint{ + Name: "e_cab_ov_requires_org", + Description: "If certificate policy 2.23.140.1.2.2 is included, organizationName MUST be included in subject", + Citation: "BRs: 7.1.6.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &CertPolicyRequiresOrg{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cert_contains_unique_identifier.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cert_contains_unique_identifier.go new file mode 100644 index 00000000..9d8beb1e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cert_contains_unique_identifier.go @@ -0,0 +1,61 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ + These fields MUST only appear if the version is 2 or 3 (Section 4.1.2.1). + These fields MUST NOT appear if the version is 1. The subject and issuer + unique identifiers are present in the certificate to handle the possibility + of reuse of subject and/or issuer names over time. This profile RECOMMENDS + that names not be reused for different entities and that Internet certificates + not make use of unique identifiers. CAs conforming to this profile MUST NOT + generate certificates with unique identifiers. Applications conforming to + this profile SHOULD be capable of parsing certificates that include unique + identifiers, but there are no processing requirements associated with the + unique identifiers. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type CertContainsUniqueIdentifier struct{} + +func (l *CertContainsUniqueIdentifier) Initialize() error { + return nil +} + +func (l *CertContainsUniqueIdentifier) CheckApplies(cert *x509.Certificate) bool { + return true +} + +func (l *CertContainsUniqueIdentifier) Execute(cert *x509.Certificate) *LintResult { + if cert.IssuerUniqueId.Bytes == nil && cert.SubjectUniqueId.Bytes == nil { + return &LintResult{Status: Pass} + } //else + return &LintResult{Status: Error} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_cert_contains_unique_identifier", + Description: "CAs MUST NOT generate certificate with unique identifiers", + Source: RFC5280, + Citation: "RFC 5280: 4.1.2.8", + EffectiveDate: util.RFC5280Date, + Lint: &CertContainsUniqueIdentifier{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cert_extensions_version_not_3.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cert_extensions_version_not_3.go new file mode 100644 index 00000000..faefda9b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cert_extensions_version_not_3.go @@ -0,0 +1,67 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +4.1.2.1. Version + This field describes the version of the encoded certificate. When + extensions are used, as expected in this profile, version MUST be 3 + (value is 2). If no extensions are present, but a UniqueIdentifier + is present, the version SHOULD be 2 (value is 1); however, the version + MAY be 3. If only basic fields are present, the version SHOULD be 1 + (the value is omitted from the certificate as the default value); + however, the version MAY be 2 or 3. + + Implementations SHOULD be prepared to accept any version certificate. + At a minimum, conforming implementations MUST recognize version 3 certificates. +4.1.2.9. Extensions + This field MUST only appear if the version is 3 (Section 4.1.2.1). + If present, this field is a SEQUENCE of one or more certificate + extensions. The format and content of certificate extensions in the + Internet PKI are defined in Section 4.2. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type CertExtensionsVersonNot3 struct{} + +func (l *CertExtensionsVersonNot3) Initialize() error { + return nil +} + +func (l *CertExtensionsVersonNot3) CheckApplies(cert *x509.Certificate) bool { + return true +} + +func (l *CertExtensionsVersonNot3) Execute(cert *x509.Certificate) *LintResult { + if cert.Version != 3 && len(cert.Extensions) != 0 { + return &LintResult{Status: Error} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_cert_extensions_version_not_3", + Description: "The extensions field MUST only appear in version 3 certificates", + Citation: "RFC 5280: 4.1.2.9", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &CertExtensionsVersonNot3{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cert_policy_iv_requires_country.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cert_policy_iv_requires_country.go new file mode 100644 index 00000000..4a48162d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cert_policy_iv_requires_country.go @@ -0,0 +1,53 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/*If the Certificate asserts the policy identifier of 2.23.140.1.2.3, then it MUST also include (i) either organizationName or givenName and surname, (ii) localityName (to the extent such field is required under Section 7.1.4.2.2), (iii) stateOrProvinceName (to the extent required under Section 7.1.4.2.2), and (iv) countryName in the Subject field.*/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type CertPolicyIVRequiresCountry struct{} + +func (l *CertPolicyIVRequiresCountry) Initialize() error { + return nil +} + +func (l *CertPolicyIVRequiresCountry) CheckApplies(cert *x509.Certificate) bool { + return util.SliceContainsOID(cert.PolicyIdentifiers, util.BRIndividualValidatedOID) +} + +func (l *CertPolicyIVRequiresCountry) Execute(cert *x509.Certificate) *LintResult { + var out LintResult + if util.TypeInName(&cert.Subject, util.CountryNameOID) { + out.Status = Pass + } else { + out.Status = Error + } + return &out +} + +func init() { + RegisterLint(&Lint{ + Name: "e_cert_policy_iv_requires_country", + Description: "If certificate policy 2.23.140.1.2.3 is included, countryName MUST be included in subject", + Citation: "BRs: 7.1.6.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABV131Date, + Lint: &CertPolicyIVRequiresCountry{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cert_policy_iv_requires_province_or_locality.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cert_policy_iv_requires_province_or_locality.go new file mode 100644 index 00000000..5618c4b9 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cert_policy_iv_requires_province_or_locality.go @@ -0,0 +1,54 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// 7.1.6.1: If the Certificate asserts the policy identifier of 2.23.140.1.2.3, then it MUST also include (i) either organizationName or givenName and surname, (ii) localityName (to the extent such field is required under Section 7.1.4.2.2), (iii) stateOrProvinceName (to the extent required under Section 7.1.4.2.2), and (iv) countryName in the Subject field. +// 7.1.4.2.2 applies only to subscriber certificates. + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type CertPolicyIVRequiresProvinceOrLocal struct{} + +func (l *CertPolicyIVRequiresProvinceOrLocal) Initialize() error { + return nil +} + +func (l *CertPolicyIVRequiresProvinceOrLocal) CheckApplies(cert *x509.Certificate) bool { + return util.IsSubscriberCert(cert) && util.SliceContainsOID(cert.PolicyIdentifiers, util.BRIndividualValidatedOID) +} + +func (l *CertPolicyIVRequiresProvinceOrLocal) Execute(cert *x509.Certificate) *LintResult { + var out LintResult + if util.TypeInName(&cert.Subject, util.LocalityNameOID) || util.TypeInName(&cert.Subject, util.StateOrProvinceNameOID) { + out.Status = Pass + } else { + out.Status = Error + } + return &out +} + +func init() { + RegisterLint(&Lint{ + Name: "e_cert_policy_iv_requires_province_or_locality", + Description: "If certificate policy 2.23.140.1.2.3 is included, localityName or stateOrProvinceName MUST be included in subject", + Citation: "BRs: 7.1.6.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABV131Date, + Lint: &CertPolicyIVRequiresProvinceOrLocal{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cert_policy_ov_requires_country.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cert_policy_ov_requires_country.go new file mode 100644 index 00000000..d74f7aa0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cert_policy_ov_requires_country.go @@ -0,0 +1,53 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/*If the Certificate asserts the policy identifier of 2.23.140.1.2.2, then it MUST also include organizationName, localityName (to the extent such field is required under Section 7.1.4.2.2), stateOrProvinceName (to the extent such field is required under Section 7.1.4.2.2), and countryName in the Subject field.*/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type CertPolicyOVRequiresCountry struct{} + +func (l *CertPolicyOVRequiresCountry) Initialize() error { + return nil +} + +func (l *CertPolicyOVRequiresCountry) CheckApplies(cert *x509.Certificate) bool { + return util.SliceContainsOID(cert.PolicyIdentifiers, util.BROrganizationValidatedOID) +} + +func (l *CertPolicyOVRequiresCountry) Execute(cert *x509.Certificate) *LintResult { + var out LintResult + if util.TypeInName(&cert.Subject, util.CountryNameOID) { + out.Status = Pass + } else { + out.Status = Error + } + return &out +} + +func init() { + RegisterLint(&Lint{ + Name: "e_cert_policy_ov_requires_country", + Description: "If certificate policy 2.23.140.1.2.2 is included, countryName MUST be included in subject", + Citation: "BRs: 7.1.6.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &CertPolicyOVRequiresCountry{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cert_policy_ov_requires_province_or_locality.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cert_policy_ov_requires_province_or_locality.go new file mode 100644 index 00000000..0031a52d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cert_policy_ov_requires_province_or_locality.go @@ -0,0 +1,54 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// 7.1.6.1: If the Certificate asserts the policy identifier of 2.23.140.1.2.2, then it MUST also include organizationName, localityName (to the extent such field is required under Section 7.1.4.2.2), stateOrProvinceName (to the extent such field is required under Section 7.1.4.2.2), and countryName in the Subject field.*/ +// 7.1.4.2.2 applies only to subscriber certificates. + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type CertPolicyOVRequiresProvinceOrLocal struct{} + +func (l *CertPolicyOVRequiresProvinceOrLocal) Initialize() error { + return nil +} + +func (l *CertPolicyOVRequiresProvinceOrLocal) CheckApplies(cert *x509.Certificate) bool { + return util.IsSubscriberCert(cert) && util.SliceContainsOID(cert.PolicyIdentifiers, util.BROrganizationValidatedOID) +} + +func (l *CertPolicyOVRequiresProvinceOrLocal) Execute(cert *x509.Certificate) *LintResult { + var out LintResult + if util.TypeInName(&cert.Subject, util.LocalityNameOID) || util.TypeInName(&cert.Subject, util.StateOrProvinceNameOID) { + out.Status = Pass + } else { + out.Status = Error + } + return &out +} + +func init() { + RegisterLint(&Lint{ + Name: "e_cert_policy_ov_requires_province_or_locality", + Description: "If certificate policy 2.23.140.1.2.2 is included, localityName or stateOrProvinceName MUST be included in subject", + Citation: "BRs: 7.1.6.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &CertPolicyOVRequiresProvinceOrLocal{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cert_unique_identifier_version_not_2_or_3.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cert_unique_identifier_version_not_2_or_3.go new file mode 100644 index 00000000..98a8218d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_cert_unique_identifier_version_not_2_or_3.go @@ -0,0 +1,63 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************** +RFC 5280: 4.1.2.8 + These fields MUST only appear if the version is 2 or 3 (Section 4.1.2.1). + These fields MUST NOT appear if the version is 1. The subject and issuer + unique identifiers are present in the certificate to handle the possibility + of reuse of subject and/or issuer names over time. This profile RECOMMENDS + that names not be reused for different entities and that Internet certificates + not make use of unique identifiers. CAs conforming to this profile MUST NOT + generate certificates with unique identifiers. Applications conforming to + this profile SHOULD be capable of parsing certificates that include unique + identifiers, but there are no processing requirements associated with the + unique identifiers. +****************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type certUniqueIdVersion struct{} + +func (l *certUniqueIdVersion) Initialize() error { + return nil +} + +func (l *certUniqueIdVersion) CheckApplies(c *x509.Certificate) bool { + return c.IssuerUniqueId.Bytes != nil || c.SubjectUniqueId.Bytes != nil +} + +func (l *certUniqueIdVersion) Execute(c *x509.Certificate) *LintResult { + if (c.Version) != 2 && (c.Version) != 3 { + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_cert_unique_identifier_version_not_2_or_3", + Description: "Unique identifiers MUST only appear if the X.509 version is 2 or 3", + Citation: "RFC 5280: 4.1.2.8", + Source: RFC5280, + EffectiveDate: util.RFC5280Date, + Lint: &certUniqueIdVersion{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ct_sct_policy_count_unsatisfied.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ct_sct_policy_count_unsatisfied.go new file mode 100644 index 00000000..0cc6c87b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ct_sct_policy_count_unsatisfied.go @@ -0,0 +1,156 @@ +/* + * ZLint Copyright 2019 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package lints + +import ( + "fmt" + "time" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zcrypto/x509/ct" + "github.com/zmap/zlint/util" +) + +type sctPolicyCount struct{} + +// Initialize for a sctPolicyCount instance does nothing. +func (l *sctPolicyCount) Initialize() error { + return nil +} + +// CheckApplies returns true for any subscriber certificates that are not +// precertificates (e.g. that do not have the CT poison extension defined in RFC +// 6962. +func (l *sctPolicyCount) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) && !util.IsExtInCert(c, util.CtPoisonOID) +} + +// Execute checks if the provided certificate has embedded SCTs from +// a sufficient number of unique CT logs to meet Apple's CT log policy[0], +// effective Oct 15th, 2018. +// +// The number of required SCTs from different logs is calculated based on the +// Certificate's lifetime. If the number of required SCTs are not embedded in +// the certificate a Notice level LintResult is returned. +// +// | Certificate lifetime | # of SCTs from separate logs | +// ------------------------------------------------------- +// | Less than 15 months | 2 | +// | 15 to 27 months | 3 | +// | 27 to 39 months | 4 | +// | More than 39 months | 5 | +// ------------------------------------------------------- +// +// Important note 1: We can't know whether additional SCTs were presented +// alongside the certificate via OCSP stapling. This linter assumes only +// embedded SCTs are used and ignores the portion of the Apple policy related to +// SCTs delivered via OCSP. This is one limitation that restricts the linter's +// findings to Notice level. See more background discussion in Issue 226[1]. +// +// Important note 2: The linter doesn't maintain a list of Apple's trusted +// logs. The SCTs embedded in the certificate may not be from log's Apple +// actually trusts. Similarly the embedded SCT signatures are not validated +// in any way. +// +// [0]: https://support.apple.com/en-us/HT205280 +// [1]: https://github.com/zmap/zlint/issues/226 +func (l *sctPolicyCount) Execute(c *x509.Certificate) *LintResult { + // Determine the required number of SCTs from separate logs + expected := appleCTPolicyExpectedSCTs(c) + + // If there are no SCTs then the job is easy. We can return a Notice + // LintResult immediately. + if len(c.SignedCertificateTimestampList) == 0 && expected > 0 { + return &LintResult{ + Status: Notice, + Details: fmt.Sprintf( + "Certificate had 0 embedded SCTs. Browser policy may require %d for this certificate.", + expected), + } + } + + // Build a map from LogID to SCT so that we can count embedded SCTs by unique + // log. + sctsByLogID := make(map[ct.SHA256Hash]*ct.SignedCertificateTimestamp) + for _, sct := range c.SignedCertificateTimestampList { + sctsByLogID[sct.LogID] = sct + } + + // If the number of embedded SCTs from separate logs meets expected return + // a Pass result. + if len(sctsByLogID) >= expected { + return &LintResult{Status: Pass} + } + + // Otherwise return a Notice result - there weren't enough SCTs embedded in + // the certificate. More must be provided by OCSP stapling if the certificate + // is to meet Apple's CT policy. + return &LintResult{ + Status: Notice, + Details: fmt.Sprintf( + "Certificate had %d embedded SCTs from distinct log IDs. "+ + "Browser policy may require %d for this certificate.", + len(sctsByLogID), expected), + } +} + +// appleCTPolicyExpectedSCTs returns a count of the number of SCTs expected to +// be embedded in the given certificate based on its lifetime. +// +// For this function the relevant portion of Apple's policy is the table +// "Number of embedded SCTs based on certificate lifetime" (Also reproduced in +// the `Execute` godoc comment). +func appleCTPolicyExpectedSCTs(cert *x509.Certificate) int { + // Lifetime is relative to the certificate's NotBefore date. + start := cert.NotBefore + + // Thresholds is an ordered array of lifetime periods and their expected # of + // SCTs. A lifetime period is defined by the cutoff date relative to the + // start of the certificate's lifetime. + thresholds := []struct { + CutoffDate time.Time + Expected int + }{ + // Start date ... 15 months + {CutoffDate: start.AddDate(0, 15, 0), Expected: 2}, + // Start date ... 27 months + {CutoffDate: start.AddDate(0, 27, 0), Expected: 3}, + // Start date ... 39 months + {CutoffDate: start.AddDate(0, 39, 0), Expected: 4}, + } + + // If the certificate's lifetime falls into any of the cutoff date ranges then + // we expect that range's expected # of SCTs for this certificate. This loop + // assumes the `thresholds` list is sorted in ascending order. + for _, threshold := range thresholds { + if cert.NotAfter.Before(threshold.CutoffDate) { + return threshold.Expected + } + } + + // The certificate had a validity > 39 months. + return 5 +} + +func init() { + RegisterLint(&Lint{ + Name: "w_ct_sct_policy_count_unsatisfied", + Description: "Check if certificate has enough embedded SCTs to meet Apple CT Policy", + Citation: "https://support.apple.com/en-us/HT205280", + Source: AppleCTPolicy, + EffectiveDate: util.AppleCTPolicyDate, + Lint: &sctPolicyCount{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dh_params_missing.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dh_params_missing.go new file mode 100644 index 00000000..ab8e3c27 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dh_params_missing.go @@ -0,0 +1,55 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "crypto/dsa" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type dsaParamsMissing struct{} + +func (l *dsaParamsMissing) Initialize() error { + return nil +} + +func (l *dsaParamsMissing) CheckApplies(c *x509.Certificate) bool { + return c.PublicKeyAlgorithm == x509.DSA +} + +func (l *dsaParamsMissing) Execute(c *x509.Certificate) *LintResult { + dsaKey, ok := c.PublicKey.(*dsa.PublicKey) + if !ok { + return &LintResult{Status: Fatal} + } + params := dsaKey.Parameters + if params.P.BitLen() == 0 || params.Q.BitLen() == 0 || params.G.BitLen() == 0 { + return &LintResult{Status: Error} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_dsa_params_missing", + Description: "DSA: Certificates MUST include all domain parameters", + Citation: "BRs: 6.1.6", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &dsaParamsMissing{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_distribution_point_incomplete.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_distribution_point_incomplete.go new file mode 100644 index 00000000..4cd94e20 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_distribution_point_incomplete.go @@ -0,0 +1,84 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************** +The cRLDistributionPoints extension is a SEQUENCE of +DistributionPoint. A DistributionPoint consists of three fields, +each of which is optional: distributionPoint, reasons, and cRLIssuer. +While each of these fields is optional, a DistributionPoint MUST NOT +consist of only the reasons field; either distributionPoint or +cRLIssuer MUST be present. If the certificate issuer is not the CRL +issuer, then the cRLIssuer field MUST be present and contain the Name +of the CRL issuer. If the certificate issuer is also the CRL issuer, +then conforming CAs MUST omit the cRLIssuer field and MUST include +the distributionPoint field. +********************************************************************/ + +import ( + "encoding/asn1" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zcrypto/x509/pkix" + "github.com/zmap/zlint/util" +) + +type distributionPoint struct { + DistributionPoint distributionPointName `asn1:"optional,tag:0"` + Reason asn1.BitString `asn1:"optional,tag:1"` + CRLIssuer asn1.RawValue `asn1:"optional,tag:2"` +} + +type distributionPointName struct { + FullName asn1.RawValue `asn1:"optional,tag:0"` + RelativeName pkix.RDNSequence `asn1:"optional,tag:1"` +} + +type dpIncomplete struct{} + +func (l *dpIncomplete) Initialize() error { + return nil +} + +func (l *dpIncomplete) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.CrlDistOID) +} + +func (l *dpIncomplete) Execute(c *x509.Certificate) *LintResult { + dp := util.GetExtFromCert(c, util.CrlDistOID) + var cdp []distributionPoint + _, err := asn1.Unmarshal(dp.Value, &cdp) + if err != nil { + return &LintResult{Status: Fatal} + } + for _, dp := range cdp { + if dp.Reason.BitLength != 0 && len(dp.DistributionPoint.FullName.Bytes) == 0 && + dp.DistributionPoint.RelativeName == nil && len(dp.CRLIssuer.Bytes) == 0 { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_distribution_point_incomplete", + Description: "A DistributionPoint from the CRLDistributionPoints extension MUST NOT consist of only the reasons field; either distributionPoint or CRLIssuer must be present", + Citation: "RFC 5280: 4.2.1.13", + Source: RFC5280, + EffectiveDate: util.RFC3280Date, + Lint: &dpIncomplete{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_distribution_point_missing_ldap_or_uri.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_distribution_point_missing_ldap_or_uri.go new file mode 100644 index 00000000..a848187f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_distribution_point_missing_ldap_or_uri.go @@ -0,0 +1,57 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +RFC 5280: 4.2.1.13 +When present, DistributionPointName SHOULD include at least one LDAP or HTTP URI. +************************************************/ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type distribNoLDAPorURI struct{} + +func (l *distribNoLDAPorURI) Initialize() error { + return nil +} + +func (l *distribNoLDAPorURI) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.CrlDistOID) +} + +func (l *distribNoLDAPorURI) Execute(c *x509.Certificate) *LintResult { + for _, point := range c.CRLDistributionPoints { + if point = strings.ToLower(point); strings.HasPrefix(point, "http://") || strings.HasPrefix(point, "ldap://") { + return &LintResult{Status: Pass} + } + } + return &LintResult{Status: Warn} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_distribution_point_missing_ldap_or_uri", + Description: "When present in the CRLDistributionPoints extension, DistributionPointName SHOULD include at least one LDAP or HTTP URI", + Citation: "RFC 5280: 4.2.1.13", + Source: RFC5280, + EffectiveDate: util.RFC5280Date, + Lint: &distribNoLDAPorURI{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_bad_character_in_label.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_bad_character_in_label.go new file mode 100644 index 00000000..22c7f09d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_bad_character_in_label.go @@ -0,0 +1,63 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "regexp" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type DNSNameProperCharacters struct { + CompiledExpression *regexp.Regexp +} + +func (l *DNSNameProperCharacters) Initialize() error { + const dnsNameRegexp = `^(\*\.)?(\?\.)*([A-Za-z0-9*_-]+\.)*[A-Za-z0-9*_-]*$` + var err error + l.CompiledExpression, err = regexp.Compile(dnsNameRegexp) + + return err +} + +func (l *DNSNameProperCharacters) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) && util.DNSNamesExist(c) +} + +func (l *DNSNameProperCharacters) Execute(c *x509.Certificate) *LintResult { + if c.Subject.CommonName != "" && !util.CommonNameIsIP(c) { + if !l.CompiledExpression.MatchString(c.Subject.CommonName) { + return &LintResult{Status: Error} + } + } + for _, dns := range c.DNSNames { + if !l.CompiledExpression.MatchString(dns) { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_dnsname_bad_character_in_label", + Description: "Characters in labels of DNSNames MUST be alphanumeric, - , _ or *", + Citation: "BRs: 7.1.4.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &DNSNameProperCharacters{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_check_left_label_wildcard.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_check_left_label_wildcard.go new file mode 100644 index 00000000..846e5220 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_check_left_label_wildcard.go @@ -0,0 +1,66 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type DNSNameLeftLabelWildcardCheck struct{} + +func (l *DNSNameLeftLabelWildcardCheck) Initialize() error { + return nil +} + +func (l *DNSNameLeftLabelWildcardCheck) CheckApplies(c *x509.Certificate) bool { + return true +} + +func wildcardInLeftLabelIncorrect(domain string) bool { + labels := strings.Split(domain, ".") + if len(labels) >= 1 { + leftLabel := labels[0] + if strings.Contains(leftLabel, "*") && leftLabel != "*" { + return true + } + } + return false +} + +func (l *DNSNameLeftLabelWildcardCheck) Execute(c *x509.Certificate) *LintResult { + if wildcardInLeftLabelIncorrect(c.Subject.CommonName) { + return &LintResult{Status: Error} + } + for _, dns := range c.DNSNames { + if wildcardInLeftLabelIncorrect(dns) { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_dnsname_left_label_wildcard_correct", + Description: "Wildcards in the left label of DNSName should only be *", + Citation: "BRs: 7.1.4.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &DNSNameLeftLabelWildcardCheck{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_contains_bare_iana_suffix.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_contains_bare_iana_suffix.go new file mode 100644 index 00000000..4ea11aa4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_contains_bare_iana_suffix.go @@ -0,0 +1,55 @@ +package lints + +/* + * ZLint Copyright 2017 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type dnsNameContainsBareIANASuffix struct{} + +func (l *dnsNameContainsBareIANASuffix) Initialize() error { + return nil +} + +func (l *dnsNameContainsBareIANASuffix) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) && util.DNSNamesExist(c) +} + +func (l *dnsNameContainsBareIANASuffix) Execute(c *x509.Certificate) *LintResult { + if c.Subject.CommonName != "" && !util.CommonNameIsIP(c) { + if util.IsInTLDMap(c.Subject.CommonName) { + return &LintResult{Status: Error} + } + } + for _, dns := range c.DNSNames { + if util.IsInTLDMap(dns) { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_dnsname_contains_bare_iana_suffix", + Description: "DNSNames should not contain a bare IANA suffix.", + Citation: "BRs: 7.1.4.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &dnsNameContainsBareIANASuffix{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_contains_empty_label.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_contains_empty_label.go new file mode 100644 index 00000000..8e11d844 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_contains_empty_label.go @@ -0,0 +1,67 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type DNSNameEmptyLabel struct{} + +func (l *DNSNameEmptyLabel) Initialize() error { + return nil +} + +func (l *DNSNameEmptyLabel) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) && util.DNSNamesExist(c) +} + +func domainHasEmptyLabel(domain string) bool { + labels := strings.Split(domain, ".") + for _, elem := range labels { + if elem == "" { + return true + } + } + return false +} + +func (l *DNSNameEmptyLabel) Execute(c *x509.Certificate) *LintResult { + if c.Subject.CommonName != "" && !util.CommonNameIsIP(c) { + if domainHasEmptyLabel(c.Subject.CommonName) { + return &LintResult{Status: Error} + } + } + for _, dns := range c.DNSNames { + if domainHasEmptyLabel(dns) { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_dnsname_empty_label", + Description: "DNSNames should not have an empty label.", + Citation: "BRs: 7.1.4.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &DNSNameEmptyLabel{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_hyphen_in_sld.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_hyphen_in_sld.go new file mode 100644 index 00000000..8070bf22 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_hyphen_in_sld.go @@ -0,0 +1,66 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type DNSNameHyphenInSLD struct{} + +func (l *DNSNameHyphenInSLD) Initialize() error { + return nil +} + +func (l *DNSNameHyphenInSLD) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) && util.DNSNamesExist(c) +} + +func (l *DNSNameHyphenInSLD) Execute(c *x509.Certificate) *LintResult { + if c.Subject.CommonName != "" && !util.CommonNameIsIP(c) { + domainInfo := c.GetParsedSubjectCommonName(false) + if domainInfo.ParseError != nil { + return &LintResult{Status: NA} + } + if strings.HasPrefix(domainInfo.ParsedDomain.SLD, "-") || strings.HasSuffix(domainInfo.ParsedDomain.SLD, "-") { + return &LintResult{Status: Error} + } + } + parsedSANDNSNames := c.GetParsedDNSNames(false) + for i := range c.GetParsedDNSNames(false) { + if parsedSANDNSNames[i].ParseError != nil { + return &LintResult{Status: NA} + } + if strings.HasPrefix(parsedSANDNSNames[i].ParsedDomain.SLD, "-") || + strings.HasSuffix(parsedSANDNSNames[i].ParsedDomain.SLD, "-") { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_dnsname_hyphen_in_sld", + Description: "DNSName should not have a hyphen beginning or ending the SLD", + Citation: "BRs 7.1.4.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.RFC5280Date, + Lint: &DNSNameHyphenInSLD{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_label_too_long.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_label_too_long.go new file mode 100644 index 00000000..d9d1eec4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_label_too_long.go @@ -0,0 +1,69 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type DNSNameLabelLengthTooLong struct{} + +func (l *DNSNameLabelLengthTooLong) Initialize() error { + return nil +} + +func (l *DNSNameLabelLengthTooLong) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) && util.DNSNamesExist(c) +} + +func labelLengthTooLong(domain string) bool { + labels := strings.Split(domain, ".") + for _, label := range labels { + if len(label) > 63 { + return true + } + } + return false +} + +func (l *DNSNameLabelLengthTooLong) Execute(c *x509.Certificate) *LintResult { + if c.Subject.CommonName != "" && !util.CommonNameIsIP(c) { + labelTooLong := labelLengthTooLong(c.Subject.CommonName) + if labelTooLong { + return &LintResult{Status: Error} + } + } + for _, dns := range c.DNSNames { + labelTooLong := labelLengthTooLong(dns) + if labelTooLong { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_dnsname_label_too_long", + Description: "DNSName labels MUST be less than or equal to 63 characters", + Citation: "RFC 1035", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &DNSNameLabelLengthTooLong{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_right_label_valid_tld.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_right_label_valid_tld.go new file mode 100644 index 00000000..ccd9804e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_right_label_valid_tld.go @@ -0,0 +1,55 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type DNSNameValidTLD struct{} + +func (l *DNSNameValidTLD) Initialize() error { + return nil +} + +func (l *DNSNameValidTLD) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) && util.DNSNamesExist(c) +} + +func (l *DNSNameValidTLD) Execute(c *x509.Certificate) *LintResult { + if c.Subject.CommonName != "" && !util.CommonNameIsIP(c) { + if !util.HasValidTLD(c.Subject.CommonName, c.NotBefore) { + return &LintResult{Status: Error} + } + } + for _, dns := range c.DNSNames { + if !util.HasValidTLD(dns, c.NotBefore) { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_dnsname_not_valid_tld", + Description: "DNSNames must have a valid TLD.", + Citation: "BRs: 7.1.4.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &DNSNameValidTLD{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_underscore_in_sld.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_underscore_in_sld.go new file mode 100644 index 00000000..a54453eb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_underscore_in_sld.go @@ -0,0 +1,66 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type DNSNameUnderscoreInSLD struct{} + +func (l *DNSNameUnderscoreInSLD) Initialize() error { + return nil +} + +func (l *DNSNameUnderscoreInSLD) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) && util.DNSNamesExist(c) +} + +func (l *DNSNameUnderscoreInSLD) Execute(c *x509.Certificate) *LintResult { + if c.Subject.CommonName != "" && !util.CommonNameIsIP(c) { + domainInfo := c.GetParsedSubjectCommonName(false) + if domainInfo.ParseError != nil { + return &LintResult{Status: NA} + } + if strings.Contains(domainInfo.ParsedDomain.SLD, "_") { + return &LintResult{Status: Error} + } + } + + parsedSANDNSNames := c.GetParsedDNSNames(false) + for i := range c.GetParsedDNSNames(false) { + if parsedSANDNSNames[i].ParseError != nil { + return &LintResult{Status: NA} + } + if strings.Contains(parsedSANDNSNames[i].ParsedDomain.SLD, "_") { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_dnsname_underscore_in_sld", + Description: "DNSName should not have underscore in SLD", + Citation: "BRs: 7.1.4.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.RFC5280Date, + Lint: &DNSNameUnderscoreInSLD{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_underscore_in_trd.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_underscore_in_trd.go new file mode 100644 index 00000000..4d70e1e6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_underscore_in_trd.go @@ -0,0 +1,67 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type DNSNameUnderscoreInTRD struct{} + +func (l *DNSNameUnderscoreInTRD) Initialize() error { + return nil +} + +func (l *DNSNameUnderscoreInTRD) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) && util.DNSNamesExist(c) +} + +func (l *DNSNameUnderscoreInTRD) Execute(c *x509.Certificate) *LintResult { + if c.Subject.CommonName != "" && !util.CommonNameIsIP(c) { + domainInfo := c.GetParsedSubjectCommonName(false) + if domainInfo.ParseError != nil { + return &LintResult{Status: NA} + } + if strings.Contains(domainInfo.ParsedDomain.TRD, "_") { + return &LintResult{Status: Warn} + } + } + + parsedSANDNSNames := c.GetParsedDNSNames(false) + for i := range c.GetParsedDNSNames(false) { + if parsedSANDNSNames[i].ParseError != nil { + return &LintResult{Status: NA} + } + if strings.Contains(parsedSANDNSNames[i].ParsedDomain.TRD, "_") { + return &LintResult{Status: Warn} + } + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_dnsname_underscore_in_trd", + Description: "DNSName should not have an underscore in labels left of the ETLD+1", + Citation: "BRs: 7.1.4.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.RFC5280Date, + Lint: &DNSNameUnderscoreInTRD{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_wildcard_left_of_public_suffix.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_wildcard_left_of_public_suffix.go new file mode 100644 index 00000000..a01f4138 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_wildcard_left_of_public_suffix.go @@ -0,0 +1,66 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type DNSNameWildcardLeftofPublicSuffix struct{} + +func (l *DNSNameWildcardLeftofPublicSuffix) Initialize() error { + return nil +} + +func (l *DNSNameWildcardLeftofPublicSuffix) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) && util.DNSNamesExist(c) +} + +func (l *DNSNameWildcardLeftofPublicSuffix) Execute(c *x509.Certificate) *LintResult { + if c.Subject.CommonName != "" && !util.CommonNameIsIP(c) { + domainInfo := c.GetParsedSubjectCommonName(false) + if domainInfo.ParseError != nil { + return &LintResult{Status: NA} + } + + if domainInfo.ParsedDomain.SLD == "*" { + return &LintResult{Status: Warn} + } + } + + parsedSANDNSNames := c.GetParsedDNSNames(false) + for i := range c.GetParsedDNSNames(false) { + if parsedSANDNSNames[i].ParseError != nil { + return &LintResult{Status: NA} + } + + if parsedSANDNSNames[i].ParsedDomain.SLD == "*" { + return &LintResult{Status: Warn} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_dnsname_wildcard_left_of_public_suffix", + Description: "the CA MUST establish and follow a documented procedure[^pubsuffix] that determines if the wildcard character occurs in the first label position to the left of a “registry‐controlled” label or “public suffix”", + Citation: "BRs: 3.2.2.6", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &DNSNameWildcardLeftofPublicSuffix{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_wildcard_only_in_left_label.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_wildcard_only_in_left_label.go new file mode 100644 index 00000000..9aaadcd1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dnsname_wildcard_only_in_left_label.go @@ -0,0 +1,68 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type DNSNameWildcardOnlyInLeftlabel struct{} + +func (l *DNSNameWildcardOnlyInLeftlabel) Initialize() error { + return nil +} + +func (l *DNSNameWildcardOnlyInLeftlabel) CheckApplies(c *x509.Certificate) bool { + return true +} + +func wildcardNotInLeftLabel(domain string) bool { + labels := strings.Split(domain, ".") + if len(labels) > 1 { + labels = labels[1:] + for _, label := range labels { + if strings.Contains(label, "*") { + return true + } + } + } + return false +} + +func (l *DNSNameWildcardOnlyInLeftlabel) Execute(c *x509.Certificate) *LintResult { + if wildcardNotInLeftLabel(c.Subject.CommonName) { + return &LintResult{Status: Error} + } + for _, dns := range c.DNSNames { + if wildcardNotInLeftLabel(dns) { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_dnsname_wildcard_only_in_left_label", + Description: "DNSName should not have wildcards except in the left-most label", + Citation: "BRs: 7.1.4.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &DNSNameWildcardOnlyInLeftlabel{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dsa_correct_order_in_subgroup.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dsa_correct_order_in_subgroup.go new file mode 100644 index 00000000..8fd5eaab --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dsa_correct_order_in_subgroup.go @@ -0,0 +1,65 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "crypto/dsa" + "math/big" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type dsaSubgroup struct{} + +func (l *dsaSubgroup) Initialize() error { + return nil +} + +func (l *dsaSubgroup) CheckApplies(c *x509.Certificate) bool { + if c.PublicKeyAlgorithm != x509.DSA { + return false + } + if _, ok := c.PublicKey.(*dsa.PublicKey); !ok { + return false + } + return true +} + +func (l *dsaSubgroup) Execute(c *x509.Certificate) *LintResult { + dsaKey, ok := c.PublicKey.(*dsa.PublicKey) + if !ok { + return &LintResult{Status: NA} + } + output := big.Int{} + + // Enforce that Y^Q == 1 mod P, e.g. that Order(Y) == Q mod P. + output.Exp(dsaKey.Y, dsaKey.Q, dsaKey.P) + if output.Cmp(big.NewInt(1)) == 0 { + return &LintResult{Status: Pass} + } + return &LintResult{Status: Error} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_dsa_correct_order_in_subgroup", + Description: "DSA: Public key value has the unique correct representation in the field, and that the key has the correct order in the subgroup", + Citation: "BRs: 6.1.6", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &dsaSubgroup{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dsa_improper_modulus_or_divisor_size.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dsa_improper_modulus_or_divisor_size.go new file mode 100644 index 00000000..096b6b1d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dsa_improper_modulus_or_divisor_size.go @@ -0,0 +1,56 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "crypto/dsa" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type dsaImproperSize struct{} + +func (l *dsaImproperSize) Initialize() error { + return nil +} + +func (l *dsaImproperSize) CheckApplies(c *x509.Certificate) bool { + return c.PublicKeyAlgorithm == x509.DSA +} + +func (l *dsaImproperSize) Execute(c *x509.Certificate) *LintResult { + dsaKey, ok := c.PublicKey.(*dsa.PublicKey) + if !ok { + return &LintResult{Status: NA} + } + L := dsaKey.Parameters.P.BitLen() + N := dsaKey.Parameters.Q.BitLen() + if (L == 2048 && N == 224) || (L == 2048 && N == 256) || (L == 3072 && N == 256) { + return &LintResult{Status: Pass} + } + return &LintResult{Status: Error} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_dsa_improper_modulus_or_divisor_size", + Description: "Certificates MUST meet the following requirements for DSA algorithm type and key size: L=2048 and N=224,256 or L=3072 and N=256", + Citation: "BRs: 6.1.5", + Source: CABFBaselineRequirements, + EffectiveDate: util.ZeroDate, + Lint: &dsaImproperSize{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dsa_shorter_than_2048_bits.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dsa_shorter_than_2048_bits.go new file mode 100644 index 00000000..b9a0f290 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dsa_shorter_than_2048_bits.go @@ -0,0 +1,58 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "crypto/dsa" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type dsaTooShort struct{} + +func (l *dsaTooShort) Initialize() error { + return nil +} + +func (l *dsaTooShort) CheckApplies(c *x509.Certificate) bool { + return c.PublicKeyAlgorithm == x509.DSA +} + +func (l *dsaTooShort) Execute(c *x509.Certificate) *LintResult { + dsaKey, ok := c.PublicKey.(*dsa.PublicKey) + if !ok { + return &LintResult{Status: NA} + } + dsaParams := dsaKey.Parameters + L := dsaParams.P.BitLen() + N := dsaParams.Q.BitLen() + if L >= 2048 && N >= 244 { + return &LintResult{Status: Pass} + } + return &LintResult{Status: Error} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_dsa_shorter_than_2048_bits", + Description: "DSA modulus size must be at least 2048 bits", + Citation: "BRs: 6.1.5", + // Refer to BRs: 6.1.5, taking the statement "Before 31 Dec 2010" literally + Source: CABFBaselineRequirements, + EffectiveDate: util.ZeroDate, + Lint: &dsaTooShort{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dsa_unique_correct_representation.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dsa_unique_correct_representation.go new file mode 100644 index 00000000..be356cfa --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_dsa_unique_correct_representation.go @@ -0,0 +1,59 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "crypto/dsa" + "math/big" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type dsaUniqueCorrectRepresentation struct{} + +func (l *dsaUniqueCorrectRepresentation) Initialize() error { + return nil +} + +func (l *dsaUniqueCorrectRepresentation) CheckApplies(c *x509.Certificate) bool { + return c.PublicKeyAlgorithm == x509.DSA +} + +func (l *dsaUniqueCorrectRepresentation) Execute(c *x509.Certificate) *LintResult { + dsaKey, ok := c.PublicKey.(*dsa.PublicKey) + if !ok { + return &LintResult{Status: NA} + } + // Verify that 2 ≤ y ≤ p-2. + two := big.NewInt(2) + pMinusTwo := big.NewInt(0) + pMinusTwo.Sub(dsaKey.P, two) + if two.Cmp(dsaKey.Y) > 0 || dsaKey.Y.Cmp(pMinusTwo) > 0 { + return &LintResult{Status: Error} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_dsa_unique_correct_representation", + Description: "DSA: Public key value has the unique correct representation in the field, and that the key has the correct order in the subgroup", + Citation: "BRs: 6.1.6", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &dsaUniqueCorrectRepresentation{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ec_improper_curves.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ec_improper_curves.go new file mode 100644 index 00000000..81a2827e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ec_improper_curves.go @@ -0,0 +1,71 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +BRs: 6.1.5 +Certificates MUST meet the following requirements for algorithm type and key size. +ECC Curve: NIST P-256, P-384, or P-521 +************************************************/ + +import ( + "crypto/ecdsa" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type ecImproperCurves struct{} + +func (l *ecImproperCurves) Initialize() error { + return nil +} + +func (l *ecImproperCurves) CheckApplies(c *x509.Certificate) bool { + return c.PublicKeyAlgorithm == x509.ECDSA +} + +func (l *ecImproperCurves) Execute(c *x509.Certificate) *LintResult { + /* Declare theKey to be a ECDSA Public Key */ + var theKey *ecdsa.PublicKey + /* Need to do different things based on what c.PublicKey is */ + switch c.PublicKey.(type) { + case *x509.AugmentedECDSA: + temp := c.PublicKey.(*x509.AugmentedECDSA) + theKey = temp.Pub + case *ecdsa.PublicKey: + theKey = c.PublicKey.(*ecdsa.PublicKey) + } + /* Now can actually check the params */ + theParams := theKey.Curve.Params() + switch theParams.Name { + case "P-256", "P-384", "P-521": + return &LintResult{Status: Pass} + default: + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ec_improper_curves", + Description: "Only one of NIST P‐256, P‐384, or P‐521 can be used", + Citation: "BRs: 6.1.5", + Source: CABFBaselineRequirements, + // Refer to BRs: 6.1.5, taking the statement "Before 31 Dec 2010" literally + EffectiveDate: util.ZeroDate, + Lint: &ecImproperCurves{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ecdsa_ee_invalid_ku.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ecdsa_ee_invalid_ku.go new file mode 100644 index 00000000..03c46448 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ecdsa_ee_invalid_ku.go @@ -0,0 +1,98 @@ +/* + * ZLint Copyright 2019 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package lints + +import ( + "fmt" + "sort" + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type ecdsaInvalidKU struct{} + +// Initialize is a no-op for this lint. +func (l *ecdsaInvalidKU) Initialize() error { + return nil +} + +// CheckApplies returns true when the certificate is a subscriber cert using an +// ECDSA public key algorithm. +func (l *ecdsaInvalidKU) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) && c.PublicKeyAlgorithm == x509.ECDSA +} + +// Execute returns a Notice level LintResult if the ECDSA end entity certificate +// being linted has Key Usage bits set other than digitalSignature, +// nonRepudiation/contentCommentment, and keyAgreement. +func (l *ecdsaInvalidKU) Execute(c *x509.Certificate) *LintResult { + // RFC 5480, Section 3 "Key Usage Bits" says: + // + // If the keyUsage extension is present in an End Entity (EE) + // certificate that indicates id-ecPublicKey in SubjectPublicKeyInfo, + // then any combination of the following values MAY be present: + // + // digitalSignature; + // nonRepudiation; and + // keyAgreement. + // + // So we set up `allowedKUs` to match. Note that per RFC 5280: recent editions + // of X.509 renamed "nonRepudiation" to "contentCommitment", which is the name + // of the Go x509 constant we use here alongside the digitalSignature and + // keyAgreement constants. + allowedKUs := map[x509.KeyUsage]bool{ + x509.KeyUsageDigitalSignature: true, + x509.KeyUsageContentCommitment: true, + x509.KeyUsageKeyAgreement: true, + } + + var invalidKUs []string + for ku, kuName := range util.KeyUsageToString { + if c.KeyUsage&ku != 0 { + if !allowedKUs[ku] { + invalidKUs = append(invalidKUs, kuName) + } + } + } + + if len(invalidKUs) > 0 { + // Sort the invalid KUs to allow consistent ordering of Details messages for + // unit testing + sort.Strings(invalidKUs) + return &LintResult{ + Status: Notice, + Details: fmt.Sprintf( + "Certificate had unexpected key usage(s): %s", + strings.Join(invalidKUs, ", ")), + } + } + + return &LintResult{ + Status: Pass, + } +} + +func init() { + RegisterLint(&Lint{ + Name: "n_ecdsa_ee_invalid_ku", + Description: "ECDSA end-entity certificates MAY have key usages: digitalSignature, nonRepudiation and keyAgreement", + Citation: "RFC 5480 Section 3", + Source: RFC5480, + EffectiveDate: util.CABEffectiveDate, + Lint: &ecdsaInvalidKU{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_eku_critical_improperly.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_eku_critical_improperly.go new file mode 100644 index 00000000..652d7efb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_eku_critical_improperly.go @@ -0,0 +1,66 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +RFC 5280: 4.2.1.12 +If a CA includes extended key usages to satisfy such applications, + but does not wish to restrict usages of the key, the CA can include + the special KeyPurposeId anyExtendedKeyUsage in addition to the + particular key purposes required by the applications. Conforming CAs + SHOULD NOT mark this extension as critical if the anyExtendedKeyUsage + KeyPurposeId is present. Applications that require the presence of a + particular purpose MAY reject certificates that include the + anyExtendedKeyUsage OID but not the particular OID expected for the + application. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type ekuBadCritical struct{} + +func (l *ekuBadCritical) Initialize() error { + return nil +} + +func (l *ekuBadCritical) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.EkuSynOid) +} + +func (l *ekuBadCritical) Execute(c *x509.Certificate) *LintResult { + if e := util.GetExtFromCert(c, util.EkuSynOid); e.Critical { + for _, single_use := range c.ExtKeyUsage { + if single_use == x509.ExtKeyUsageAny { + return &LintResult{Status: Warn} + } + } + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_eku_critical_improperly", + Description: "Conforming CAs SHOULD NOT mark extended key usage extension as critical if the anyExtendedKeyUsage KeyPurposedID is present", + Citation: "RFC 5280: 4.2.1.12", + Source: RFC5280, + EffectiveDate: util.RFC3280Date, + Lint: &ekuBadCritical{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ev_business_category_missing.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ev_business_category_missing.go new file mode 100644 index 00000000..d0888fdc --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ev_business_category_missing.go @@ -0,0 +1,49 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type evNoBiz struct{} + +func (l *evNoBiz) Initialize() error { + return nil +} + +func (l *evNoBiz) CheckApplies(c *x509.Certificate) bool { + return util.IsEV(c.PolicyIdentifiers) && util.IsSubscriberCert(c) +} + +func (l *evNoBiz) Execute(c *x509.Certificate) *LintResult { + if util.TypeInName(&c.Subject, util.BusinessOID) { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ev_business_category_missing", + Description: "EV certificates must include businessCategory in subject", + Citation: "BRs: 7.1.6.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.ZeroDate, + Lint: &evNoBiz{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ev_country_name_missing.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ev_country_name_missing.go new file mode 100644 index 00000000..c06451f7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ev_country_name_missing.go @@ -0,0 +1,49 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type evCountryMissing struct{} + +func (l *evCountryMissing) Initialize() error { + return nil +} + +func (l *evCountryMissing) CheckApplies(c *x509.Certificate) bool { + return util.IsEV(c.PolicyIdentifiers) && util.IsSubscriberCert(c) +} + +func (l *evCountryMissing) Execute(c *x509.Certificate) *LintResult { + if util.TypeInName(&c.Subject, util.CountryNameOID) { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ev_country_name_missing", + Description: "EV certificates must include countryName in subject", + Citation: "BRs: 7.1.6.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.ZeroDate, + Lint: &evCountryMissing{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ev_organization_name_missing.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ev_organization_name_missing.go new file mode 100644 index 00000000..c08cf3f6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ev_organization_name_missing.go @@ -0,0 +1,49 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type evOrgMissing struct{} + +func (l *evOrgMissing) Initialize() error { + return nil +} + +func (l *evOrgMissing) CheckApplies(c *x509.Certificate) bool { + return util.IsEV(c.PolicyIdentifiers) && util.IsSubscriberCert(c) +} + +func (l *evOrgMissing) Execute(c *x509.Certificate) *LintResult { + if util.TypeInName(&c.Subject, util.OrganizationNameOID) { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ev_organization_name_missing", + Description: "EV certificates must include organizationName in subject", + Citation: "BRs: 7.1.6.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.ZeroDate, + Lint: &evOrgMissing{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ev_serial_number_missing.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ev_serial_number_missing.go new file mode 100644 index 00000000..7b34ffc8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ev_serial_number_missing.go @@ -0,0 +1,48 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type evSNMissing struct{} + +func (l *evSNMissing) Initialize() error { + return nil +} + +func (l *evSNMissing) CheckApplies(c *x509.Certificate) bool { + return util.IsEV(c.PolicyIdentifiers) && util.IsSubscriberCert(c) +} + +func (l *evSNMissing) Execute(c *x509.Certificate) *LintResult { + if len(c.Subject.SerialNumber) == 0 { + return &LintResult{Status: Error} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ev_serial_number_missing", + Description: "EV certificates must include serialNumber in subject", + Citation: "EV gudelines: 9.2.6", + Source: CABFBaselineRequirements, + EffectiveDate: util.ZeroDate, + Lint: &evSNMissing{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ev_valid_time_too_long.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ev_valid_time_too_long.go new file mode 100644 index 00000000..31fb5e62 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ev_valid_time_too_long.go @@ -0,0 +1,48 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type evValidTooLong struct{} + +func (l *evValidTooLong) Initialize() error { + return nil +} + +func (l *evValidTooLong) CheckApplies(c *x509.Certificate) bool { + return util.IsEV(c.PolicyIdentifiers) && util.IsSubscriberCert(c) +} + +func (l *evValidTooLong) Execute(c *x509.Certificate) *LintResult { + if c.NotBefore.AddDate(0, 0, 825).Before(c.NotAfter) { + return &LintResult{Status: Error} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ev_valid_time_too_long", + Description: "EV certificates must be 825 days in validity or less", + Citation: "BRs: 6.3.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.ZeroDate, + Lint: &evValidTooLong{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_aia_access_location_missing.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_aia_access_location_missing.go new file mode 100644 index 00000000..46fe81f8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_aia_access_location_missing.go @@ -0,0 +1,63 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +RFC 5280: 4.2.2.1 +An authorityInfoAccess extension may include multiple instances of + the id-ad-caIssuers accessMethod. The different instances may + specify different methods for accessing the same information or may + point to different information. When the id-ad-caIssuers + accessMethod is used, at least one instance SHOULD specify an + accessLocation that is an HTTP [RFC2616] or LDAP [RFC4516] URI. + +************************************************/ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type aiaNoHTTPorLDAP struct{} + +func (l *aiaNoHTTPorLDAP) Initialize() error { + return nil +} + +func (l *aiaNoHTTPorLDAP) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.AiaOID) && c.IssuingCertificateURL != nil +} + +func (l *aiaNoHTTPorLDAP) Execute(c *x509.Certificate) *LintResult { + for _, caIssuer := range c.IssuingCertificateURL { + if caIssuer = strings.ToLower(caIssuer); strings.HasPrefix(caIssuer, "http://") || strings.HasPrefix(caIssuer, "ldap://") { + return &LintResult{Status: Pass} + } + } + return &LintResult{Status: Warn} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_ext_aia_access_location_missing", + Description: "When the id-ad-caIssuers accessMethod is used, at least one instance SHOULD specify an accessLocation that is an HTTP or LDAP URI", + Citation: "RFC 5280: 4.2.2.1", + Source: RFC5280, + EffectiveDate: util.RFC5280Date, + Lint: &aiaNoHTTPorLDAP{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_aia_marked_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_aia_marked_critical.go new file mode 100644 index 00000000..9c95f73b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_aia_marked_critical.go @@ -0,0 +1,55 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +Authority Information Access + The authority information access extension indicates how to access information and services for the issuer of the certificate in which the extension appears. Information and services may include on-line validation services and CA policy data. (The location of CRLs is not specified in this extension; that information is provided by the cRLDistributionPoints extension.) This extension may be included in end entity or CA certificates. Conforming CAs MUST mark this extension as non-critical. +************************************************/ +//See also: BRs: 7.1.2.3 & CAB: 7.1.2.2 + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type ExtAiaMarkedCritical struct{} + +func (l *ExtAiaMarkedCritical) Initialize() error { + return nil +} + +func (l *ExtAiaMarkedCritical) CheckApplies(cert *x509.Certificate) bool { + return util.IsExtInCert(cert, util.AiaOID) +} + +func (l *ExtAiaMarkedCritical) Execute(cert *x509.Certificate) *LintResult { + if util.GetExtFromCert(cert, util.AiaOID).Critical { + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_aia_marked_critical", + Description: "Conforming CAs must mark the Authority Information Access extension as non-critical", + Citation: "RFC 5280: 4.2.2.1", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &ExtAiaMarkedCritical{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_authority_key_identifier_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_authority_key_identifier_critical.go new file mode 100644 index 00000000..c85b2c15 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_authority_key_identifier_critical.go @@ -0,0 +1,55 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/********************************************************* +RFC 5280: 4.2.1.1 +Conforming CAs MUST mark this extension as non-critical. +**********************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type authorityKeyIdCritical struct{} + +func (l *authorityKeyIdCritical) Initialize() error { + return nil +} + +func (l *authorityKeyIdCritical) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.AuthkeyOID) +} + +func (l *authorityKeyIdCritical) Execute(c *x509.Certificate) *LintResult { + aki := util.GetExtFromCert(c, util.AuthkeyOID) //pointer to the extension + if aki.Critical { + return &LintResult{Status: Error} + } else { //implies !aki.Critical + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_authority_key_identifier_critical", + Description: "The authority key identifier extension must be non-critical", + Citation: "RFC 5280: 4.2.1.1", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &authorityKeyIdCritical{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_authority_key_identifier_missing.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_authority_key_identifier_missing.go new file mode 100644 index 00000000..4a0050cd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_authority_key_identifier_missing.go @@ -0,0 +1,64 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/*********************************************************************** +RFC 5280: 4.2.1.1 +The keyIdentifier field of the authorityKeyIdentifier extension MUST + be included in all certificates generated by conforming CAs to + facilitate certification path construction. There is one exception; + where a CA distributes its public key in the form of a "self-signed" + certificate, the authority key identifier MAY be omitted. The + signature on a self-signed certificate is generated with the private + key associated with the certificate's subject public key. (This + proves that the issuer possesses both the public and private keys.) + In this case, the subject and authority key identifiers would be + identical, but only the subject key identifier is needed for + certification path building. +***********************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type authorityKeyIdMissing struct{} + +func (l *authorityKeyIdMissing) Initialize() error { + return nil +} + +func (l *authorityKeyIdMissing) CheckApplies(c *x509.Certificate) bool { + return !util.IsRootCA(c) +} + +func (l *authorityKeyIdMissing) Execute(c *x509.Certificate) *LintResult { + if !util.IsExtInCert(c, util.AuthkeyOID) && !util.IsSelfSigned(c) { + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_authority_key_identifier_missing", + Description: "CAs must support key identifiers and include them in all certificates", + Citation: "RFC 5280: 4.2 & 4.2.1.1", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &authorityKeyIdMissing{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_authority_key_identifier_no_key_identifier.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_authority_key_identifier_no_key_identifier.go new file mode 100644 index 00000000..2fd632fb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_authority_key_identifier_no_key_identifier.go @@ -0,0 +1,64 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/*********************************************************************** +RFC 5280: 4.2.1.1 +The keyIdentifier field of the authorityKeyIdentifier extension MUST + be included in all certificates generated by conforming CAs to + facilitate certification path construction. There is one exception; + where a CA distributes its public key in the form of a "self-signed" + certificate, the authority key identifier MAY be omitted. The + signature on a self-signed certificate is generated with the private + key associated with the certificate's subject public key. (This + proves that the issuer possesses both the public and private keys.) + In this case, the subject and authority key identifiers would be + identical, but only the subject key identifier is needed for + certification path building. +***********************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type authorityKeyIdNoKeyIdField struct{} + +func (l *authorityKeyIdNoKeyIdField) Initialize() error { + return nil +} + +func (l *authorityKeyIdNoKeyIdField) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *authorityKeyIdNoKeyIdField) Execute(c *x509.Certificate) *LintResult { + if c.AuthorityKeyId == nil && !util.IsSelfSigned(c) { //will be nil by default if not found in x509.parseCert + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_authority_key_identifier_no_key_identifier", + Description: "CAs must include keyIdentifer field of AKI in all non-self-issued certificates", + Citation: "RFC 5280: 4.2.1.1", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &authorityKeyIdNoKeyIdField{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_cert_policy_contains_noticeref.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_cert_policy_contains_noticeref.go new file mode 100644 index 00000000..33e11ce3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_cert_policy_contains_noticeref.go @@ -0,0 +1,66 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************** +The user notice has two optional fields: the noticeRef field and the +explicitText field. Conforming CAs SHOULD NOT use the noticeRef +option. +********************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type noticeRefPres struct{} + +func (l *noticeRefPres) Initialize() error { + return nil +} + +func (l *noticeRefPres) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.CertPolicyOID) +} + +func (l *noticeRefPres) Execute(c *x509.Certificate) *LintResult { + for _, firstLvl := range c.NoticeRefNumbers { + for _, number := range firstLvl { + if number != nil { + return &LintResult{Status: Warn} + } + } + } + for _, firstLvl := range c.NoticeRefOrgnization { + for _, org := range firstLvl { + if len(org.Bytes) != 0 { + return &LintResult{Status: Warn} + } + } + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_ext_cert_policy_contains_noticeref", + Description: "Compliant certificates SHOULD NOT use the noticeRef option", + Citation: "RFC 5280: 4.2.1.4", + Source: RFC5280, + EffectiveDate: util.RFC5280Date, + Lint: ¬iceRefPres{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_cert_policy_disallowed_any_policy_qualifier.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_cert_policy_disallowed_any_policy_qualifier.go new file mode 100644 index 00000000..e98c2ff3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_cert_policy_disallowed_any_policy_qualifier.go @@ -0,0 +1,63 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************* +RFC 5280: 4.2.1.4 +To promote interoperability, this profile RECOMMENDS that policy +information terms consist of only an OID. Where an OID alone is +insufficient, this profile strongly recommends that the use of +qualifiers be limited to those identified in this section. When +qualifiers are used with the special policy anyPolicy, they MUST be +limited to the qualifiers identified in this section. Only those +qualifiers returned as a result of path validation are considered. +********************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type unrecommendedQualifier struct{} + +func (l *unrecommendedQualifier) Initialize() error { + return nil +} + +func (l *unrecommendedQualifier) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.CertPolicyOID) +} + +func (l *unrecommendedQualifier) Execute(c *x509.Certificate) *LintResult { + for _, firstLvl := range c.QualifierId { + for _, qualifierId := range firstLvl { + if !qualifierId.Equal(util.CpsOID) && !qualifierId.Equal(util.UserNoticeOID) { + return &LintResult{Status: Error} + } + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_cert_policy_disallowed_any_policy_qualifier", + Description: "When qualifiers are used with the special policy anyPolicy, they must be limited to qualifiers identified in this section: (4.2.1.4)", + Citation: "RFC 5280: 4.2.1.4", + Source: RFC5280, + EffectiveDate: util.RFC3280Date, + Lint: &unrecommendedQualifier{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_cert_policy_duplicate.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_cert_policy_duplicate.go new file mode 100644 index 00000000..0cafbbc9 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_cert_policy_duplicate.go @@ -0,0 +1,62 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ + The certificate policies extension contains a sequence of one or more + policy information terms, each of which consists of an object identifier + (OID) and optional qualifiers. Optional qualifiers, which MAY be present, + are not expected to change the definition of the policy. A certificate + policy OID MUST NOT appear more than once in a certificate policies extension. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type ExtCertPolicyDuplicate struct{} + +func (l *ExtCertPolicyDuplicate) Initialize() error { + return nil +} + +func (l *ExtCertPolicyDuplicate) CheckApplies(cert *x509.Certificate) bool { + return util.IsExtInCert(cert, util.CertPolicyOID) +} + +func (l *ExtCertPolicyDuplicate) Execute(cert *x509.Certificate) *LintResult { + // O(n^2) is not terrible here because n is small + for i := 0; i < len(cert.PolicyIdentifiers); i++ { + for j := i + 1; j < len(cert.PolicyIdentifiers); j++ { + if i != j && cert.PolicyIdentifiers[i].Equal(cert.PolicyIdentifiers[j]) { + // Any one duplicate fails the test, so return here + return &LintResult{Status: Error} + } + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_cert_policy_duplicate", + Description: "A certificate policy OID must not appear more than once in the extension", + Citation: "RFC 5280: 4.2.1.4", + Source: RFC5280, + EffectiveDate: util.RFC5280Date, + Lint: &ExtCertPolicyDuplicate{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_cert_policy_explicit_text_ia5_string.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_cert_policy_explicit_text_ia5_string.go new file mode 100644 index 00000000..b07470b2 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_cert_policy_explicit_text_ia5_string.go @@ -0,0 +1,71 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************** + +An explicitText field includes the textual statement directly in +the certificate. The explicitText field is a string with a +maximum size of 200 characters. Conforming CAs SHOULD use the +UTF8String encoding for explicitText. VisibleString or BMPString +are acceptable but less preferred alternatives. Conforming CAs +MUST NOT encode explicitText as IA5String. The explicitText string +SHOULD NOT include any control characters (e.g., U+0000 to U+001F +and U+007F to U+009F). When the UTF8String or BMPString encoding +is used, all character sequences SHOULD be normalized according +to Unicode normalization form C (NFC) [NFC]. +********************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type explicitTextIA5String struct{} + +func (l *explicitTextIA5String) Initialize() error { + return nil +} + +func (l *explicitTextIA5String) CheckApplies(c *x509.Certificate) bool { + for _, text := range c.ExplicitTexts { + if text != nil { + return true + } + } + return false +} + +func (l *explicitTextIA5String) Execute(c *x509.Certificate) *LintResult { + for _, firstLvl := range c.ExplicitTexts { + for _, text := range firstLvl { + if text.Tag == 22 { + return &LintResult{Status: Error} + } + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_cert_policy_explicit_text_ia5_string", + Description: "Compliant certificates must not encode explicitTest as an IA5String", + Citation: "RFC 6818: 3", + Source: RFC5280, + EffectiveDate: util.RFC6818Date, + Lint: &explicitTextIA5String{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_cert_policy_explicit_text_includes_control.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_cert_policy_explicit_text_includes_control.go new file mode 100644 index 00000000..648d5ed4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_cert_policy_explicit_text_includes_control.go @@ -0,0 +1,89 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/********************************************************************* +An explicitText field includes the textual statement directly in +the certificate. The explicitText field is a string with a +maximum size of 200 characters. Conforming CAs SHOULD use the +UTF8String encoding for explicitText, but MAY use IA5String. +Conforming CAs MUST NOT encode explicitText as VisibleString or +BMPString. The explicitText string SHOULD NOT include any control +characters (e.g., U+0000 to U+001F and U+007F to U+009F). When +the UTF8String encoding is used, all character sequences SHOULD be +normalized according to Unicode normalization form C (NFC) [NFC]. +*********************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type controlChar struct{} + +func (l *controlChar) Initialize() error { + return nil +} + +func (l *controlChar) CheckApplies(c *x509.Certificate) bool { + for _, text := range c.ExplicitTexts { + if text != nil { + return true + } + } + return false +} + +func (l *controlChar) Execute(c *x509.Certificate) *LintResult { + for _, firstLvl := range c.ExplicitTexts { + for _, text := range firstLvl { + if text.Tag == 12 { + for i := 0; i < len(text.Bytes); i++ { + if text.Bytes[i]&0x80 == 0 { + if text.Bytes[i] < 0x20 || text.Bytes[i] == 0x7f { + return &LintResult{Status: Warn} + } + } else if text.Bytes[i]&0x20 == 0 { + if text.Bytes[i] == 0xc2 && text.Bytes[i+1] >= 0x80 && text.Bytes[i+1] <= 0x9f { + return &LintResult{Status: Warn} + } + i += 1 + } else if text.Bytes[i]&0x10 == 0 { + i += 2 + } else if text.Bytes[i]&0x08 == 0 { + i += 3 + } else if text.Bytes[i]&0x04 == 0 { + i += 4 + } else if text.Bytes[i]&0x02 == 0 { + i += 5 + } + } + } + } + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_ext_cert_policy_explicit_text_includes_control", + Description: "Explicit text should not include any control characters", + Citation: "RFC 6818: 3", + Source: RFC5280, + EffectiveDate: util.RFC6818Date, + Lint: &controlChar{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_cert_policy_explicit_text_not_nfc.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_cert_policy_explicit_text_not_nfc.go new file mode 100644 index 00000000..78128034 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_cert_policy_explicit_text_not_nfc.go @@ -0,0 +1,65 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ + When the UTF8String encoding is used, all character sequences SHOULD be + normalized according to Unicode normalization form C (NFC) [NFC]. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" + "golang.org/x/text/unicode/norm" +) + +type ExtCertPolicyExplicitTextNotNFC struct{} + +func (l *ExtCertPolicyExplicitTextNotNFC) Initialize() error { + return nil +} + +func (l *ExtCertPolicyExplicitTextNotNFC) CheckApplies(c *x509.Certificate) bool { + for _, text := range c.ExplicitTexts { + if text != nil { + return true + } + } + return false +} + +func (l *ExtCertPolicyExplicitTextNotNFC) Execute(c *x509.Certificate) *LintResult { + for _, firstLvl := range c.ExplicitTexts { + for _, text := range firstLvl { + if text.Tag == 12 || text.Tag == 30 { + if !norm.NFC.IsNormal(text.Bytes) { + return &LintResult{Status: Warn} + } + } + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_ext_cert_policy_explicit_text_not_nfc", + Description: "When utf8string or bmpstring encoding is used for explicitText field in certificate policy, it SHOULD be normalized by NFC format", + Citation: "RFC6181 3", + Source: RFC5280, + EffectiveDate: util.RFC6818Date, + Lint: &ExtCertPolicyExplicitTextNotNFC{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_cert_policy_explicit_text_not_utf8.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_cert_policy_explicit_text_not_utf8.go new file mode 100644 index 00000000..754d5eb3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_cert_policy_explicit_text_not_utf8.go @@ -0,0 +1,70 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************* +An explicitText field includes the textual statement directly in +the certificate. The explicitText field is a string with a +maximum size of 200 characters. Conforming CAs SHOULD use the +UTF8String encoding for explicitText. VisibleString or BMPString +are acceptable but less preferred alternatives. Conforming CAs +MUST NOT encode explicitText as IA5String. The explicitText string +SHOULD NOT include any control characters (e.g., U+0000 to U+001F +and U+007F to U+009F). When the UTF8String or BMPString encoding +is used, all character sequences SHOULD be normalized according +to Unicode normalization form C (NFC) [NFC]. +*******************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type explicitTextUtf8 struct{} + +func (l *explicitTextUtf8) Initialize() error { + return nil +} + +func (l *explicitTextUtf8) CheckApplies(c *x509.Certificate) bool { + for _, text := range c.ExplicitTexts { + if text != nil { + return true + } + } + return false +} + +func (l *explicitTextUtf8) Execute(c *x509.Certificate) *LintResult { + for _, firstLvl := range c.ExplicitTexts { + for _, text := range firstLvl { + if text.Tag != 12 { + return &LintResult{Status: Warn} + } + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_ext_cert_policy_explicit_text_not_utf8", + Description: "Compliant certificates should use the utf8string encoding for explicitText", + Citation: "RFC 6818: 3", + Source: RFC5280, + EffectiveDate: util.RFC6818Date, + Lint: &explicitTextUtf8{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_cert_policy_explicit_text_too_long.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_cert_policy_explicit_text_too_long.go new file mode 100644 index 00000000..6d7879e7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_cert_policy_explicit_text_too_long.go @@ -0,0 +1,81 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************* +An explicitText field includes the textual statement directly in +the certificate. The explicitText field is a string with a +maximum size of 200 characters. Conforming CAs SHOULD use the +UTF8String encoding for explicitText. VisibleString or BMPString +are acceptable but less preferred alternatives. Conforming CAs +MUST NOT encode explicitText as IA5String. The explicitText string +SHOULD NOT include any control characters (e.g., U+0000 to U+001F +and U+007F to U+009F). When the UTF8String or BMPString encoding +is used, all character sequences SHOULD be normalized according +to Unicode normalization form C (NFC) [NFC]. +*******************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type explicitTextTooLong struct{} + +const tagBMPString int = 30 + +func (l *explicitTextTooLong) Initialize() error { + return nil +} + +func (l *explicitTextTooLong) CheckApplies(c *x509.Certificate) bool { + for _, text := range c.ExplicitTexts { + if text != nil { + return true + } + } + return false +} + +func (l *explicitTextTooLong) Execute(c *x509.Certificate) *LintResult { + for _, firstLvl := range c.ExplicitTexts { + for _, text := range firstLvl { + var runes string + // If the field is a BMPString, we need to parse the bytes out into + // UTF-16-BE runes in order to check their length accurately + // The `Bytes` attribute here is the raw representation of the userNotice + if text.Tag == tagBMPString { + runes, _ = util.ParseBMPString(text.Bytes) + } else { + runes = string(text.Bytes) + } + if len(runes) > 200 { + return &LintResult{Status: Error} + } + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_cert_policy_explicit_text_too_long", + Description: "Explicit text has a maximum size of 200 characters", + Citation: "RFC 6818: 3", + Source: RFC5280, + EffectiveDate: util.RFC6818Date, + Lint: &explicitTextTooLong{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_crl_distribution_marked_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_crl_distribution_marked_critical.go new file mode 100644 index 00000000..be2aa970 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_crl_distribution_marked_critical.go @@ -0,0 +1,56 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +The CRL distribution points extension identifies how CRL information is obtained. The extension SHOULD be non-critical, but this profile RECOMMENDS support for this extension by CAs and applications. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type ExtCrlDistributionMarkedCritical struct{} + +func (l *ExtCrlDistributionMarkedCritical) Initialize() error { + return nil +} + +func (l *ExtCrlDistributionMarkedCritical) CheckApplies(cert *x509.Certificate) bool { + return util.IsExtInCert(cert, util.CrlDistOID) +} + +func (l *ExtCrlDistributionMarkedCritical) Execute(cert *x509.Certificate) *LintResult { + if e := util.GetExtFromCert(cert, util.CrlDistOID); e != nil { + if e.Critical == false { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Warn} + } + } + return &LintResult{Status: NA} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_ext_crl_distribution_marked_critical", + Description: "If included, the CRL Distribution Points extension SHOULD NOT be marked critical", + Citation: "RFC 5280: 4.2.1.13", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &ExtCrlDistributionMarkedCritical{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_duplicate_extension.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_duplicate_extension.go new file mode 100644 index 00000000..01acbbad --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_duplicate_extension.go @@ -0,0 +1,58 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +"A certificate MUST NOT include more than one instance of a particular extension." +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type ExtDuplicateExtension struct{} + +func (l *ExtDuplicateExtension) Initialize() error { + return nil +} + +func (l *ExtDuplicateExtension) CheckApplies(cert *x509.Certificate) bool { + return cert.Version == 3 +} + +func (l *ExtDuplicateExtension) Execute(cert *x509.Certificate) *LintResult { + // O(n^2) is not terrible here because n is capped around 10 + for i := 0; i < len(cert.Extensions); i++ { + for j := i + 1; j < len(cert.Extensions); j++ { + if i != j && cert.Extensions[i].Id.Equal(cert.Extensions[j].Id) { + return &LintResult{Status: Error} + } + } + } + // Nested loop will return if it finds a duplicate, so safe to assume pass + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_duplicate_extension", + Description: "A certificate MUST NOT include more than one instance of a particular extension", + Citation: "RFC 5280: 4.2", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &ExtDuplicateExtension{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_freshest_crl_marked_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_freshest_crl_marked_critical.go new file mode 100644 index 00000000..b2faac92 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_freshest_crl_marked_critical.go @@ -0,0 +1,56 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +The freshest CRL extension identifies how delta CRL information is obtained. The extension MUST be marked as non-critical by conforming CAs. Further discussion of CRL management is contained in Section 5. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zcrypto/x509/pkix" + "github.com/zmap/zlint/util" +) + +type ExtFreshestCrlMarkedCritical struct{} + +func (l *ExtFreshestCrlMarkedCritical) Initialize() error { + return nil +} + +func (l *ExtFreshestCrlMarkedCritical) CheckApplies(cert *x509.Certificate) bool { + return util.IsExtInCert(cert, util.FreshCRLOID) +} + +func (l *ExtFreshestCrlMarkedCritical) Execute(cert *x509.Certificate) *LintResult { + var fCRL *pkix.Extension = util.GetExtFromCert(cert, util.FreshCRLOID) + if fCRL != nil && fCRL.Critical { + return &LintResult{Status: Error} + } else if fCRL != nil && !fCRL.Critical { + return &LintResult{Status: Pass} + } + return &LintResult{Status: NA} //shouldn't happen +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_freshest_crl_marked_critical", + Description: "Freshest CRL MUST be marked as non-critical by conforming CAs", + Citation: "RFC 5280: 4.2.1.15", + Source: RFC5280, + EffectiveDate: util.RFC3280Date, + Lint: &ExtFreshestCrlMarkedCritical{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_critical.go new file mode 100644 index 00000000..00a2182c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_critical.go @@ -0,0 +1,55 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +Issuer Alternative Name + As with Section 4.2.1.6, this extension is used to associate Internet style identities with the certificate issuer. Issuer alternative name MUST be encoded as in 4.2.1.6. Issuer alternative names are not processed as part of the certification path validation algorithm in Section 6. (That is, issuer alternative names are not used in name chaining and name constraints are not enforced.) + Where present, conforming CAs SHOULD mark this extension as non-critical. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type ExtIANCritical struct{} + +func (l *ExtIANCritical) Initialize() error { + return nil +} + +func (l *ExtIANCritical) CheckApplies(cert *x509.Certificate) bool { + return util.IsExtInCert(cert, util.IssuerAlternateNameOID) +} + +func (l *ExtIANCritical) Execute(cert *x509.Certificate) *LintResult { + if util.GetExtFromCert(cert, util.IssuerAlternateNameOID).Critical { + return &LintResult{Status: Warn} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "w_ext_ian_critical", + Description: "Issuer alternate name should be marked as non-critical", + Citation: "RFC 5280: 4.2.1.7", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &ExtIANCritical{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_dns_not_ia5_string.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_dns_not_ia5_string.go new file mode 100644 index 00000000..8b63acdc --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_dns_not_ia5_string.go @@ -0,0 +1,73 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************** +RFC 5280: 4.2.1.7 +When the subjectAltName extension contains a domain name system +label, the domain name MUST be stored in the DNSName (an IA5String). +The name MUST be in the "preferred name syntax", as specified by +Section 3.5 of [RFC1034] and as modified by Section 2.1 of +[RFC1123]. Note that while uppercase and lowercase letters are +allowed in domain names, no significance is attached to the case. In +addition, while the string " " is a legal domain name, subjectAltName +extensions with a DNSName of " " MUST NOT be used. Finally, the use +of the DNS representation for Internet mail addresses +(subscriber.example.com instead of subscriber@example.com) MUST NOT +be used; such identities are to be encoded as rfc822Name. Rules for +encoding internationalized domain names are specified in Section 7.2. +********************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type IANDNSNotIA5String struct{} + +func (l *IANDNSNotIA5String) Initialize() error { + return nil +} + +func (l *IANDNSNotIA5String) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.IssuerAlternateNameOID) +} + +func (l *IANDNSNotIA5String) Execute(c *x509.Certificate) *LintResult { + ext := util.GetExtFromCert(c, util.IssuerAlternateNameOID) + if ext == nil { + return &LintResult{Status: Fatal} + } + ok, err := util.AllAlternateNameWithTagAreIA5(ext, util.DNSNameTag) + if err != nil { + return &LintResult{Status: Fatal} + } + if ok { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_ian_dns_not_ia5_string", + Description: "DNSNames MUST be IA5 strings", + Citation: "RFC 5280: 4.2.1.7", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &IANDNSNotIA5String{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_empty_name.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_empty_name.go new file mode 100644 index 00000000..d0a5428e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_empty_name.go @@ -0,0 +1,80 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/****************************************************************** +RFC 5280: 4.2.1.7 +If the subjectAltName extension is present, the sequence MUST contain +at least one entry. Unlike the subject field, conforming CAs MUST +NOT issue certificates with subjectAltNames containing empty +GeneralName fields. For example, an rfc822Name is represented as an +IA5String. While an empty string is a valid IA5String, such an +rfc822Name is not permitted by this profile. The behavior of clients +that encounter such a certificate when processing a certification +path is not defined by this profile. +******************************************************************/ + +import ( + "encoding/asn1" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type IANEmptyName struct{} + +func (l *IANEmptyName) Initialize() error { + return nil +} + +func (l *IANEmptyName) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.IssuerAlternateNameOID) +} + +func (l *IANEmptyName) Execute(c *x509.Certificate) *LintResult { + value := util.GetExtFromCert(c, util.IssuerAlternateNameOID).Value + var seq asn1.RawValue + if _, err := asn1.Unmarshal(value, &seq); err != nil { + return &LintResult{Status: Fatal} + } + if !seq.IsCompound || seq.Tag != 16 || seq.Class != 0 { + return &LintResult{Status: Fatal} + } + + rest := seq.Bytes + for len(rest) > 0 { + var v asn1.RawValue + var err error + rest, err = asn1.Unmarshal(rest, &v) + if err != nil { + return &LintResult{Status: NA} + } + if len(v.Bytes) == 0 { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_ian_empty_name", + Description: "General name fields must not be empty in IAN", + Citation: "RFC 5280: 4.2.1.7", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &IANEmptyName{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_no_entries.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_no_entries.go new file mode 100644 index 00000000..b3660739 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_no_entries.go @@ -0,0 +1,62 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/********************************************************************** +RFC 5280: 4.2.1.7 +If the issuerAltName extension is present, the sequence MUST contain + at least one entry. Unlike the subject field, conforming CAs MUST + NOT issue certificates with subjectAltNames containing empty + GeneralName fields. For example, an rfc822Name is represented as an + IA5String. While an empty string is a valid IA5String, such an + rfc822Name is not permitted by this profile. The behavior of clients + that encounter such a certificate when processing a certification + path is not defined by this profile. +***********************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type IANNoEntry struct{} + +func (l *IANNoEntry) Initialize() error { + return nil +} + +func (l *IANNoEntry) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.IssuerAlternateNameOID) +} + +func (l *IANNoEntry) Execute(c *x509.Certificate) *LintResult { + ian := util.GetExtFromCert(c, util.IssuerAlternateNameOID) + if util.IsEmptyASN1Sequence(ian.Value) { + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_ian_no_entries", + Description: "If present, the IAN extension must contain at least one entry", + Citation: "RFC 5280: 4.2.1.7", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &IANNoEntry{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_rfc822_format_invalid.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_rfc822_format_invalid.go new file mode 100644 index 00000000..db89f686 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_rfc822_format_invalid.go @@ -0,0 +1,69 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************ +RFC 5280: 4.2.1.6 + When the issuerAltName extension contains an Internet mail address, + the address MUST be stored in the rfc822Name. The format of an + rfc822Name is a "Mailbox" as defined in Section 4.1.2 of [RFC2821]. + A Mailbox has the form "Local-part@Domain". Note that a Mailbox has + no phrase (such as a common name) before it, has no comment (text + surrounded in parentheses) after it, and is not surrounded by "<" and + ">". Rules for encoding Internet mail addresses that include + internationalized domain names are specified in Section 7.5. +************************************************************************/ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type IANEmail struct{} + +func (l *IANEmail) Initialize() error { + return nil +} + +func (l *IANEmail) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.IssuerAlternateNameOID) +} + +func (l *IANEmail) Execute(c *x509.Certificate) *LintResult { + for _, str := range c.IANEmailAddresses { + if str == "" { + continue + } + if strings.Contains(str, " ") { + return &LintResult{Status: Error} + } else if str[0] == '<' || str[len(str)-1] == ')' { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_ian_rfc822_format_invalid", + Description: "Email must not be surrounded with `<>`, and there MUST NOT be trailing comments in `()`", + Citation: "RFC 5280: 4.2.1.7", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &IANEmail{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_space_dns_name.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_space_dns_name.go new file mode 100644 index 00000000..746c6662 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_space_dns_name.go @@ -0,0 +1,66 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/********************************************************************** +RFC 5280: 4.2.1.7 +When the issuerAltName extension contains a domain name system +label, the domain name MUST be stored in the dNSName (an IA5String). +The name MUST be in the "preferred name syntax", as specified by +Section 3.5 of [RFC1034] and as modified by Section 2.1 of +[RFC1123]. Note that while uppercase and lowercase letters are +allowed in domain names, no significance is attached to the case. In +addition, while the string " " is a legal domain name, subjectAltName +extensions with a dNSName of " " MUST NOT be used. Finally, the use +of the DNS representation for Internet mail addresses +(subscriber.example.com instead of subscriber@example.com) MUST NOT +be used; such identities are to be encoded as rfc822Name. Rules for +encoding internationalized domain names are specified in Section 7.2. +**********************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type IANSpace struct{} + +func (l *IANSpace) Initialize() error { + return nil +} + +func (l *IANSpace) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.IssuerAlternateNameOID) +} + +func (l *IANSpace) Execute(c *x509.Certificate) *LintResult { + for _, dns := range c.IANDNSNames { + if dns == " " { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_ian_space_dns_name", + Description: "dNSName ' ' MUST NOT be used", + Citation: "RFC 5280: 4.2.1.6", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &IANSpace{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_uri_format_invalid.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_uri_format_invalid.go new file mode 100644 index 00000000..325a4203 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_uri_format_invalid.go @@ -0,0 +1,69 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +The name MUST include both a +scheme (e.g., "http" or "ftp") and a scheme-specific-part. +************************************************/ + +import ( + "net/url" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type IANURIFormat struct{} + +func (l *IANURIFormat) Initialize() error { + return nil +} + +func (l *IANURIFormat) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.IssuerAlternateNameOID) +} + +func (l *IANURIFormat) Execute(c *x509.Certificate) *LintResult { + for _, uri := range c.IANURIs { + parsed_uri, err := url.Parse(uri) + + if err != nil { + return &LintResult{Status: Error} + } + + //scheme + if parsed_uri.Scheme == "" { + return &LintResult{Status: Error} + } + + //scheme-specific part + if parsed_uri.Host == "" && parsed_uri.User == nil && parsed_uri.Opaque == "" && parsed_uri.Path == "" { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_ian_uri_format_invalid", + Description: "URIs in the subjectAltName extension MUST have a scheme and scheme specific part", + Citation: "RFC5280: 4.2.1.6", + Source: RFC5280, + EffectiveDate: util.RFC5280Date, + Lint: &IANURIFormat{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_uri_host_not_fqdn_or_ip.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_uri_host_not_fqdn_or_ip.go new file mode 100644 index 00000000..3590f1ab --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_uri_host_not_fqdn_or_ip.go @@ -0,0 +1,71 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/********************************************************************* +When the issuerAltName extension contains a URI, the name MUST be +stored in the uniformResourceIdentifier (an IA5String). The name +MUST NOT be a relative URI, and it MUST follow the URI syntax and +encoding rules specified in [RFC3986]. The name MUST include both a +scheme (e.g., "http" or "ftp") and a scheme-specific-part. URIs that +include an authority ([RFC3986], Section 3.2) MUST include a fully +qualified domain name or IP address as the host. Rules for encoding +Internationalized Resource Identifiers (IRIs) are specified in +Section 7.4. +*********************************************************************/ + +import ( + "net/url" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type IANURIFQDNOrIP struct{} + +func (l *IANURIFQDNOrIP) Initialize() error { + return nil +} + +func (l *IANURIFQDNOrIP) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.IssuerAlternateNameOID) +} + +func (l *IANURIFQDNOrIP) Execute(c *x509.Certificate) *LintResult { + for _, uri := range c.IANURIs { + if uri != "" { + parsedUrl, err := url.Parse(uri) + if err != nil { + return &LintResult{Status: Error} + } + host := parsedUrl.Host + if !util.AuthIsFQDNOrIP(host) { + return &LintResult{Status: Error} + } + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_ian_uri_host_not_fqdn_or_ip", + Description: "URIs that include an authority ([RFC3986], Section 3.2) MUST include a fully qualified domain name or IP address as the host", + Citation: "RFC 5280: 4.2.1.6", + Source: RFC5280, + EffectiveDate: util.RFC5280Date, + Lint: &IANURIFQDNOrIP{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_uri_not_ia5.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_uri_not_ia5.go new file mode 100644 index 00000000..04487194 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_uri_not_ia5.go @@ -0,0 +1,59 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +When the issuerAltName extension contains a URI, the name MUST be +stored in the uniformResourceIdentifier (an IA5String). +************************************************/ + +import ( + "unicode" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type IANURIIA5String struct{} + +func (l *IANURIIA5String) Initialize() error { + return nil +} + +func (l *IANURIIA5String) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.IssuerAlternateNameOID) +} + +func (l *IANURIIA5String) Execute(c *x509.Certificate) *LintResult { + for _, uri := range c.IANURIs { + for _, c := range uri { + if c > unicode.MaxASCII { + return &LintResult{Status: Error} + } + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_ian_uri_not_ia5", + Description: "When subjectAltName contains a URI, the name MUST be an IA5 string", + Citation: "RFC5280: 4.2.1.7", + Source: RFC5280, + EffectiveDate: util.RFC5280Date, + Lint: &IANURIIA5String{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_uri_relative.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_uri_relative.go new file mode 100644 index 00000000..fc9db80a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_ian_uri_relative.go @@ -0,0 +1,70 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************* +When the issuerAltName extension contains a URI, the name MUST be +stored in the uniformResourceIdentifier (an IA5String). The name +MUST NOT be a relative URI, and it MUST follow the URI syntax and +encoding rules specified in [RFC3986]. The name MUST include both a +scheme (e.g., "http" or "ftp") and a scheme-specific-part. URIs that +include an authority ([RFC3986], Section 3.2) MUST include a fully +qualified domain name or IP address as the host. Rules for encoding +Internationalized Resource Identifiers (IRIs) are specified in +Section 7.4. +*************************************************************************/ + +import ( + "net/url" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type uriRelative struct{} + +func (l *uriRelative) Initialize() error { + return nil +} + +func (l *uriRelative) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.IssuerAlternateNameOID) +} + +func (l *uriRelative) Execute(c *x509.Certificate) *LintResult { + for _, uri := range c.IANURIs { + parsed_uri, err := url.Parse(uri) + + if err != nil { + return &LintResult{Status: Error} + } + + if !parsed_uri.IsAbs() { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_ian_uri_relative", + Description: "When issuerAltName extension is present and the URI is used, the name MUST NOT be a relative URI", + Citation: "RFC 5280: 4.2.1.7", + Source: RFC5280, + EffectiveDate: util.RFC5280Date, + Lint: &uriRelative{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_key_usage_cert_sign_without_ca.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_key_usage_cert_sign_without_ca.go new file mode 100644 index 00000000..eba91c54 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_key_usage_cert_sign_without_ca.go @@ -0,0 +1,64 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************ +RFC 5280: 4.2.1.9 +The cA boolean indicates whether the certified public key may be used + to verify certificate signatures. If the cA boolean is not asserted, + then the keyCertSign bit in the key usage extension MUST NOT be + asserted. If the basic constraints extension is not present in a + version 3 certificate, or the extension is present but the cA boolean + is not asserted, then the certified public key MUST NOT be used to + verify certificate signatures. +************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type keyUsageCertSignNoCa struct{} + +func (l *keyUsageCertSignNoCa) Initialize() error { + return nil +} + +func (l *keyUsageCertSignNoCa) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.KeyUsageOID) +} + +func (l *keyUsageCertSignNoCa) Execute(c *x509.Certificate) *LintResult { + if (c.KeyUsage & x509.KeyUsageCertSign) != 0 { + if c.BasicConstraintsValid && util.IsCACert(c) { //CA certs may assert certtificate signing usage + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_key_usage_cert_sign_without_ca", + Description: "if the keyCertSign bit is asserted, then the cA bit in the basic constraints extension MUST also be asserted", + Citation: "RFC 5280: 4.2.1.3 & 4.2.1.9", + Source: RFC5280, + EffectiveDate: util.RFC3280Date, + Lint: &keyUsageCertSignNoCa{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_key_usage_not_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_key_usage_not_critical.go new file mode 100644 index 00000000..47271d68 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_key_usage_not_critical.go @@ -0,0 +1,56 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// "When present, conforming CAs SHOULD mark this extension as critical." + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type checkKeyUsageCritical struct{} + +func (l *checkKeyUsageCritical) Initialize() error { + return nil +} + +func (l *checkKeyUsageCritical) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.KeyUsageOID) +} + +func (l *checkKeyUsageCritical) Execute(c *x509.Certificate) *LintResult { + // Add actual lint here + keyUsage := util.GetExtFromCert(c, util.KeyUsageOID) + if keyUsage == nil { + return &LintResult{Status: NA} + } + if keyUsage.Critical { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Warn} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "w_ext_key_usage_not_critical", + Description: "The keyUsage extension SHOULD be critical", + Citation: "RFC 5280: 4.2.1.3", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &checkKeyUsageCritical{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_key_usage_without_bits.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_key_usage_without_bits.go new file mode 100644 index 00000000..574719fd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_key_usage_without_bits.go @@ -0,0 +1,58 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/*********************************************************************** + This profile does not restrict the combinations of bits that may be + set in an instantiation of the keyUsage extension. However, + appropriate values for keyUsage extensions for particular algorithms + are specified in [RFC3279], [RFC4055], and [RFC4491]. When the + keyUsage extension appears in a certificate, at least one of the bits + MUST be set to 1. +***********************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type keyUsageBitsSet struct{} + +func (l *keyUsageBitsSet) Initialize() error { + return nil +} + +func (l *keyUsageBitsSet) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.KeyUsageOID) +} + +func (l *keyUsageBitsSet) Execute(c *x509.Certificate) *LintResult { + if c.KeyUsage == 0 { + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_key_usage_without_bits", + Description: "When the keyUsage extension is included, at least one bit MUST be set to 1", + Citation: "RFC 5280: 4.2.1.3", + Source: RFC5280, + EffectiveDate: util.RFC5280Date, + Lint: &keyUsageBitsSet{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_name_constraints_not_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_name_constraints_not_critical.go new file mode 100644 index 00000000..62b28baa --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_name_constraints_not_critical.go @@ -0,0 +1,62 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************ +Restrictions are defined in terms of permitted or excluded name + subtrees. Any name matching a restriction in the excludedSubtrees + field is invalid regardless of information appearing in the + permittedSubtrees. Conforming CAs MUST mark this extension as + critical and SHOULD NOT impose name constraints on the x400Address, + ediPartyName, or registeredID name forms. Conforming CAs MUST NOT + issue certificates where name constraints is an empty sequence. That + is, either the permittedSubtrees field or the excludedSubtrees MUST + be present. +************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type nameConstraintCrit struct{} + +func (l *nameConstraintCrit) Initialize() error { + return nil +} + +func (l *nameConstraintCrit) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.NameConstOID) +} + +func (l *nameConstraintCrit) Execute(c *x509.Certificate) *LintResult { + e := util.GetExtFromCert(c, util.NameConstOID) + if e.Critical { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_name_constraints_not_critical", + Description: "If it is included, conforming CAs MUST mark the name constrains extension as critical", + Citation: "RFC 5280: 4.2.1.10", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &nameConstraintCrit{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_name_constraints_not_in_ca.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_name_constraints_not_in_ca.go new file mode 100644 index 00000000..dd70b319 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_name_constraints_not_in_ca.go @@ -0,0 +1,60 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/*********************************************************************** +RFC 5280: 4.2.1.10 +The name constraints extension, which MUST be used only in a CA + certificate, indicates a name space within which all subject names in + subsequent certificates in a certification path MUST be located. + Restrictions apply to the subject distinguished name and apply to + subject alternative names. Restrictions apply only when the + specified name form is present. If no name of the type is in the + certificate, the certificate is acceptable. +***********************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type nameConstraintNotCa struct{} + +func (l *nameConstraintNotCa) Initialize() error { + return nil +} + +func (l *nameConstraintNotCa) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.NameConstOID) +} + +func (l *nameConstraintNotCa) Execute(c *x509.Certificate) *LintResult { + if !util.IsCACert(c) { + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_name_constraints_not_in_ca", + Description: "The name constraints extension MUST only be used in CA certificates", + Citation: "RFC 5280: 4.2.1.10", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &nameConstraintNotCa{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_policy_constraints_empty.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_policy_constraints_empty.go new file mode 100644 index 00000000..748e71c5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_policy_constraints_empty.go @@ -0,0 +1,75 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************* +RFC 5280: 4.2.1.11 +Conforming CAs MUST NOT issue certificates where policy constraints + is an empty sequence. That is, either the inhibitPolicyMapping field + or the requireExplicitPolicy field MUST be present. The behavior of + clients that encounter an empty policy constraints field is not + addressed in this profile. +*************************************************************************/ + +import ( + "encoding/asn1" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type policyConstraintsContents struct{} + +func (l *policyConstraintsContents) Initialize() error { + return nil +} + +func (l *policyConstraintsContents) CheckApplies(c *x509.Certificate) bool { + if !(util.IsExtInCert(c, util.PolicyConstOID)) { + return false + } + pc := util.GetExtFromCert(c, util.PolicyConstOID) + var seq asn1.RawValue + rest, err := asn1.Unmarshal(pc.Value, &seq) //only one sequence, so rest should be empty + if err != nil || len(rest) != 0 || seq.Tag != 16 || seq.Class != 0 || !seq.IsCompound { + return false + } + return true +} + +func (l *policyConstraintsContents) Execute(c *x509.Certificate) *LintResult { + pc := util.GetExtFromCert(c, util.PolicyConstOID) + var seq asn1.RawValue + _, err := asn1.Unmarshal(pc.Value, &seq) //only one sequence, so rest should be empty + if err != nil { + return &LintResult{Status: Fatal} + } + if len(seq.Bytes) == 0 { + return &LintResult{Status: Error} + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_policy_constraints_empty", + Description: "Conforming CAs MUST NOT issue certificates where policy constraints is an empty sequence. That is, either the inhibitPolicyMapping field or the requireExplicityPolicy field MUST be present", + Citation: "RFC 5280: 4.2.1.11", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &policyConstraintsContents{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_policy_constraints_not_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_policy_constraints_not_critical.go new file mode 100644 index 00000000..6a09576e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_policy_constraints_not_critical.go @@ -0,0 +1,55 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +RFC 5280: 4.2.1.11 +Conforming CAs MUST mark this extension as critical. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type policyConstraintsCritical struct{} + +func (l *policyConstraintsCritical) Initialize() error { + return nil +} + +func (l *policyConstraintsCritical) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.PolicyConstOID) +} + +func (l *policyConstraintsCritical) Execute(c *x509.Certificate) *LintResult { + pc := util.GetExtFromCert(c, util.PolicyConstOID) + if !pc.Critical { + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_policy_constraints_not_critical", + Description: "Conforming CAs MUST mark the policy constraints extension as critical", + Citation: "RFC 5280: 4.2.1.11", + Source: RFC5280, + EffectiveDate: util.RFC5280Date, + Lint: &policyConstraintsCritical{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_policy_map_any_policy.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_policy_map_any_policy.go new file mode 100644 index 00000000..feabaa40 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_policy_map_any_policy.go @@ -0,0 +1,64 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************** +RFC 5280: 4.2.1.5 +Each issuerDomainPolicy named in the policy mappings extension SHOULD + also be asserted in a certificate policies extension in the same + certificate. Policies MUST NOT be mapped either to or from the + special value anyPolicy (Section 4.2.1.4). +********************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type policyMapAnyPolicy struct{} + +func (l *policyMapAnyPolicy) Initialize() error { + return nil +} + +func (l *policyMapAnyPolicy) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.PolicyMapOID) +} + +func (l *policyMapAnyPolicy) Execute(c *x509.Certificate) *LintResult { + extPolMap := util.GetExtFromCert(c, util.PolicyMapOID) + polMap, err := util.GetMappedPolicies(extPolMap) + if err != nil { + return &LintResult{Status: Fatal} + } + + for _, pair := range polMap { + if util.AnyPolicyOID.Equal(pair[0]) || util.AnyPolicyOID.Equal(pair[1]) { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_policy_map_any_policy", + Description: "Policies must not be mapped to or from the anyPolicy value", + Citation: "RFC 5280: 4.2.1.5", + Source: RFC5280, + EffectiveDate: util.RFC3280Date, + Lint: &policyMapAnyPolicy{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_policy_map_not_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_policy_map_not_critical.go new file mode 100644 index 00000000..97f68150 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_policy_map_not_critical.go @@ -0,0 +1,56 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/********************************************************** +RFC 5280: 4.2.1.5. Policy Mappings +This extension MAY be supported by CAs and/or applications. + Conforming CAs SHOULD mark this extension as critical. +**********************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type policyMapCritical struct{} + +func (l *policyMapCritical) Initialize() error { + return nil +} + +func (l *policyMapCritical) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.PolicyMapOID) +} + +func (l *policyMapCritical) Execute(c *x509.Certificate) *LintResult { + polMap := util.GetExtFromCert(c, util.PolicyMapOID) + if polMap.Critical { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Warn} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "w_ext_policy_map_not_critical", + Description: "Policy mappings should be marked as critical", + Citation: "RFC 5280: 4.2.1.5", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &policyMapCritical{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_policy_map_not_in_cert_policy.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_policy_map_not_in_cert_policy.go new file mode 100644 index 00000000..8da9fbbb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_policy_map_not_in_cert_policy.go @@ -0,0 +1,63 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/********************************************************************* +RFC 5280: 4.2.1.5 +Each issuerDomainPolicy named in the policy mapping extension SHOULD + also be asserted in a certificate policies extension in the same + certificate. Policies SHOULD NOT be mapped either to or from the + special value anyPolicy (section 4.2.1.5). +*********************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type policyMapMatchesCertPolicy struct{} + +func (l *policyMapMatchesCertPolicy) Initialize() error { + return nil +} + +func (l *policyMapMatchesCertPolicy) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.PolicyMapOID) +} + +func (l *policyMapMatchesCertPolicy) Execute(c *x509.Certificate) *LintResult { + extPolMap := util.GetExtFromCert(c, util.PolicyMapOID) + polMap, err := util.GetMappedPolicies(extPolMap) + if err != nil { + return &LintResult{Status: Fatal} + } + for _, pair := range polMap { + if !util.SliceContainsOID(c.PolicyIdentifiers, pair[0]) { + return &LintResult{Status: Warn} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_ext_policy_map_not_in_cert_policy", + Description: "Each issuerDomainPolicy named in the policy mappings extension should also be asserted in a certificate policies extension", + Citation: "RFC 5280: 4.2.1.5", + Source: RFC5280, + EffectiveDate: util.RFC3280Date, + Lint: &policyMapMatchesCertPolicy{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_contains_reserved_ip.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_contains_reserved_ip.go new file mode 100644 index 00000000..50203798 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_contains_reserved_ip.go @@ -0,0 +1,60 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +BRs: 7.1.4.2.1 +Also as of the Effective Date, the CA SHALL NOT +issue a certificate with an Expiry Date later than +1 November 2015 with a subjectAlternativeName extension +or Subject commonName field containing a Reserved IP +Address or Internal Name. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type SANReservedIP struct{} + +func (l *SANReservedIP) Initialize() error { + return nil +} + +func (l *SANReservedIP) CheckApplies(c *x509.Certificate) bool { + return c.NotAfter.After(util.NoReservedIP) +} + +func (l *SANReservedIP) Execute(c *x509.Certificate) *LintResult { + for _, ip := range c.IPAddresses { + if util.IsIANAReserved(ip) { + return &LintResult{Status: Error} + } + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_san_contains_reserved_ip", + Description: "Effective October 1, 2016, CAs must revoke all unexpired certificates that contains a reserved IP or internal name.", + Citation: "BRs: 7.1.4.2.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &SANReservedIP{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_critical_with_subject_dn.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_critical_with_subject_dn.go new file mode 100644 index 00000000..babf7247 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_critical_with_subject_dn.go @@ -0,0 +1,60 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +Further, if the only subject identity included in the certificate is an + alternative name form (e.g., an electronic mail address), then the subject + distinguished name MUST be empty (an empty sequence), and the subjectAltName + extension MUST be present. If the subject field contains an empty sequence, + then the issuing CA MUST include a subjectAltName extension that is marked as + critical. When including the subjectAltName extension in a certificate that + has a non-empty subject distinguished name, conforming CAs SHOULD mark the + subjectAltName extension as non-critical. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type ExtSANCriticalWithSubjectDN struct{} + +func (l *ExtSANCriticalWithSubjectDN) Initialize() error { + return nil +} + +func (l *ExtSANCriticalWithSubjectDN) CheckApplies(cert *x509.Certificate) bool { + return util.IsExtInCert(cert, util.SubjectAlternateNameOID) +} + +func (l *ExtSANCriticalWithSubjectDN) Execute(cert *x509.Certificate) *LintResult { + san := util.GetExtFromCert(cert, util.SubjectAlternateNameOID) + if san.Critical && util.NotAllNameFieldsAreEmpty(&cert.Subject) { + return &LintResult{Status: Warn} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_ext_san_critical_with_subject_dn", + Description: "If the subject contains a distinguished name, subjectAlternateName SHOULD be non-critical", + Citation: "RFC 5280: 4.2.1.6", + Source: CABFBaselineRequirements, + EffectiveDate: util.RFC5280Date, + Lint: &ExtSANCriticalWithSubjectDN{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_directory_name_present.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_directory_name_present.go new file mode 100644 index 00000000..64b9bfc7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_directory_name_present.go @@ -0,0 +1,59 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************************************************ +7.1.4.2.1. Subject Alternative Name Extension +Certificate Field: extensions:subjectAltName +Required/Optional: Required +Contents: This extension MUST contain at least one entry. Each entry MUST be either a dNSName containing +the Fully‐Qualified Domain Name or an iPAddress containing the IP address of a server. The CA MUST +confirm that the Applicant controls the Fully‐Qualified Domain Name or IP address or has been granted the +right to use it by the Domain Name Registrant or IP address assignee, as appropriate. +Wildcard FQDNs are permitted. +*************************************************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type SANDirName struct{} + +func (l *SANDirName) Initialize() error { + return nil +} + +func (l *SANDirName) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) +} + +func (l *SANDirName) Execute(c *x509.Certificate) *LintResult { + if c.DirectoryNames != nil { + return &LintResult{Status: Error} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_san_directory_name_present", + Description: "The Subject Alternate Name extension MUST contain only 'dnsName' and 'ipaddress' name types", + Citation: "BRs: 7.1.4.2.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &SANDirName{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_dns_name_too_long.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_dns_name_too_long.go new file mode 100644 index 00000000..47017c5a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_dns_name_too_long.go @@ -0,0 +1,50 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type SANDNSTooLong struct{} + +func (l *SANDNSTooLong) Initialize() error { + return nil +} + +func (l *SANDNSTooLong) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) && len(c.DNSNames) > 0 +} + +func (l *SANDNSTooLong) Execute(c *x509.Certificate) *LintResult { + for _, dns := range c.DNSNames { + if len(dns) > 253 { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_san_dns_name_too_long", + Description: "DNSName must be less than or equal to 253 bytes", + Citation: "RFC 5280", + Source: RFC5280, + EffectiveDate: util.RFC5280Date, + Lint: &SANDNSTooLong{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_dns_not_ia5_string.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_dns_not_ia5_string.go new file mode 100644 index 00000000..5bc854e7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_dns_not_ia5_string.go @@ -0,0 +1,72 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************** +RFC 5280: 4.2.1.6 +When the subjectAltName extension contains a domain name system +label, the domain name MUST be stored in the dNSName (an IA5String). +The name MUST be in the "preferred name syntax", as specified by +Section 3.5 of [RFC1034] and as modified by Section 2.1 of +[RFC1123]. Note that while uppercase and lowercase letters are +allowed in domain names, no significance is attached to the case. In +addition, while the string " " is a legal domain name, subjectAltName +extensions with a dNSName of " " MUST NOT be used. Finally, the use +of the DNS representation for Internet mail addresses +(subscriber.example.com instead of subscriber@example.com) MUST NOT +be used; such identities are to be encoded as rfc822Name. Rules for +encoding internationalized domain names are specified in Section 7.2. +********************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type SANDNSNotIA5String struct{} + +func (l *SANDNSNotIA5String) Initialize() error { + return nil +} + +func (l *SANDNSNotIA5String) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) +} + +func (l *SANDNSNotIA5String) Execute(c *x509.Certificate) *LintResult { + ext := util.GetExtFromCert(c, util.SubjectAlternateNameOID) + if ext == nil { + return &LintResult{Status: Fatal} + } + ok, err := util.AllAlternateNameWithTagAreIA5(ext, util.DNSNameTag) + if err != nil { + return &LintResult{Status: Fatal} + } + if ok { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} +func init() { + RegisterLint(&Lint{ + Name: "e_ext_san_dns_not_ia5_string", + Description: "dNSNames MUST be IA5 strings", + Citation: "RFC 5280: 4.2.1.6", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &SANDNSNotIA5String{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_edi_party_name_present.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_edi_party_name_present.go new file mode 100644 index 00000000..603195ef --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_edi_party_name_present.go @@ -0,0 +1,59 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************************************************ +7.1.4.2.1. Subject Alternative Name Extension +Certificate Field: extensions:subjectAltName +Required/Optional: Required +Contents: This extension MUST contain at least one entry. Each entry MUST be either a dNSName containing +the Fully‐Qualified Domain Name or an iPAddress containing the IP address of a server. The CA MUST +confirm that the Applicant controls the Fully‐Qualified Domain Name or IP address or has been granted the +right to use it by the Domain Name Registrant or IP address assignee, as appropriate. +Wildcard FQDNs are permitted. +*************************************************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type SANEDI struct{} + +func (l *SANEDI) Initialize() error { + return nil +} + +func (l *SANEDI) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) +} + +func (l *SANEDI) Execute(c *x509.Certificate) *LintResult { + if c.EDIPartyNames != nil { + return &LintResult{Status: Error} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_san_edi_party_name_present", + Description: "The Subject Alternate Name extension MUST contain only 'dnsName' and 'ipaddress' name types", + Citation: "BRs: 7.1.4.2.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &SANEDI{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_empty_name.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_empty_name.go new file mode 100644 index 00000000..e2e9babf --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_empty_name.go @@ -0,0 +1,80 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/****************************************************************** +RFC 5280: 4.2.1.6 +If the subjectAltName extension is present, the sequence MUST contain +at least one entry. Unlike the subject field, conforming CAs MUST +NOT issue certificates with subjectAltNames containing empty +GeneralName fields. For example, an rfc822Name is represented as an +IA5String. While an empty string is a valid IA5String, such an +rfc822Name is not permitted by this profile. The behavior of clients +that encounter such a certificate when processing a certification +path is not defined by this profile. +******************************************************************/ + +import ( + "encoding/asn1" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type SANEmptyName struct{} + +func (l *SANEmptyName) Initialize() error { + return nil +} + +func (l *SANEmptyName) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) +} + +func (l *SANEmptyName) Execute(c *x509.Certificate) *LintResult { + value := util.GetExtFromCert(c, util.SubjectAlternateNameOID).Value + var seq asn1.RawValue + if _, err := asn1.Unmarshal(value, &seq); err != nil { + return &LintResult{Status: Fatal} + } + if !seq.IsCompound || seq.Tag != 16 || seq.Class != 0 { + return &LintResult{Status: Fatal} + } + + rest := seq.Bytes + for len(rest) > 0 { + var v asn1.RawValue + var err error + rest, err = asn1.Unmarshal(rest, &v) + if err != nil { + return &LintResult{Status: NA} + } + if len(v.Bytes) == 0 { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_san_empty_name", + Description: "General name fields MUST NOT be empty in subjectAlternateNames", + Citation: "RFC 5280: 4.2.1.6", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &SANEmptyName{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_missing.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_missing.go new file mode 100644 index 00000000..ccdc3990 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_missing.go @@ -0,0 +1,56 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +BRs: 7.1.4.2.1 +Subject Alternative Name Extension +Certificate Field: extensions:subjectAltName +Required/Optional: Required +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type SANMissing struct{} + +func (l *SANMissing) Initialize() error { + return nil +} + +func (l *SANMissing) CheckApplies(c *x509.Certificate) bool { + return !util.IsCACert(c) +} + +func (l *SANMissing) Execute(c *x509.Certificate) *LintResult { + if util.IsExtInCert(c, util.SubjectAlternateNameOID) { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_san_missing", + Description: "Subscriber certificates MUST contain the Subject Alternate Name extension", + Citation: "BRs: 7.1.4.2.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &SANMissing{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_no_entries.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_no_entries.go new file mode 100644 index 00000000..820f0a4d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_no_entries.go @@ -0,0 +1,62 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/********************************************************************** +RFC 5280: 4.2.1.6 +If the subjectAltName extension is present, the sequence MUST contain + at least one entry. Unlike the subject field, conforming CAs MUST + NOT issue certificates with subjectAltNames containing empty + GeneralName fields. For example, an rfc822Name is represented as an + IA5String. While an empty string is a valid IA5String, such an + rfc822Name is not permitted by this profile. The behavior of clients + that encounter such a certificate when processing a certification + path is not defined by this profile. +***********************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type SANNoEntry struct{} + +func (l *SANNoEntry) Initialize() error { + return nil +} + +func (l *SANNoEntry) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) +} + +func (l *SANNoEntry) Execute(c *x509.Certificate) *LintResult { + san := util.GetExtFromCert(c, util.SubjectAlternateNameOID) + if util.IsEmptyASN1Sequence(san.Value) { + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_san_no_entries", + Description: "If present, the SAN extension MUST contain at least one entry", + Citation: "RFC 5280: 4.2.1.6", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &SANNoEntry{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_not_critical_without_subject.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_not_critical_without_subject.go new file mode 100644 index 00000000..dc8cab2d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_not_critical_without_subject.go @@ -0,0 +1,62 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +RFC 5280: 4.2.1.6 +Further, if the only subject identity included in the certificate is + an alternative name form (e.g., an electronic mail address), then the + subject distinguished name MUST be empty (an empty sequence), and the + subjectAltName extension MUST be present. If the subject field + contains an empty sequence, then the issuing CA MUST include a + subjectAltName extension that is marked as critical. When including + the subjectAltName extension in a certificate that has a non-empty + subject distinguished name, conforming CAs SHOULD mark the + subjectAltName extension as non-critical. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type extSANNotCritNoSubject struct{} + +func (l *extSANNotCritNoSubject) Initialize() error { + return nil +} + +func (l *extSANNotCritNoSubject) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) +} + +func (l *extSANNotCritNoSubject) Execute(c *x509.Certificate) *LintResult { + if e := util.GetExtFromCert(c, util.SubjectAlternateNameOID); !util.NotAllNameFieldsAreEmpty(&c.Subject) && !e.Critical { + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_san_not_critical_without_subject", + Description: "If there is an empty subject field, then the SAN extension MUST be critical", + Citation: "RFC 5280: 4.2.1.6", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &extSANNotCritNoSubject{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_other_name_present.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_other_name_present.go new file mode 100644 index 00000000..dca82008 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_other_name_present.go @@ -0,0 +1,59 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************************************************ +7.1.4.2.1. Subject Alternative Name Extension +Certificate Field: extensions:subjectAltName +Required/Optional: Required +Contents: This extension MUST contain at least one entry. Each entry MUST be either a dNSName containing +the Fully‐Qualified Domain Name or an iPAddress containing the IP address of a server. The CA MUST +confirm that the Applicant controls the Fully‐Qualified Domain Name or IP address or has been granted the +right to use it by the Domain Name Registrant or IP address assignee, as appropriate. +Wildcard FQDNs are permitted. +*************************************************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type SANOtherName struct{} + +func (l *SANOtherName) Initialize() error { + return nil +} + +func (l *SANOtherName) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) +} + +func (l *SANOtherName) Execute(c *x509.Certificate) *LintResult { + if c.OtherNames != nil { + return &LintResult{Status: Error} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_san_other_name_present", + Description: "The Subject Alternate Name extension MUST contain only 'dnsName' and 'ipaddress' name types.", + Citation: "BRs: 7.1.4.2.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &SANOtherName{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_registered_id_present.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_registered_id_present.go new file mode 100644 index 00000000..e3d4cfdc --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_registered_id_present.go @@ -0,0 +1,59 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************************************************ +7.1.4.2.1. Subject Alternative Name Extension +Certificate Field: extensions:subjectAltName +Required/Optional: Required +Contents: This extension MUST contain at least one entry. Each entry MUST be either a dNSName containing +the Fully‐Qualified Domain Name or an iPAddress containing the IP address of a server. The CA MUST +confirm that the Applicant controls the Fully‐Qualified Domain Name or IP address or has been granted the +right to use it by the Domain Name Registrant or IP address assignee, as appropriate. +Wildcard FQDNs are permitted. +*************************************************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type SANRegId struct{} + +func (l *SANRegId) Initialize() error { + return nil +} + +func (l *SANRegId) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) +} + +func (l *SANRegId) Execute(c *x509.Certificate) *LintResult { + if c.RegisteredIDs != nil { + return &LintResult{Status: Error} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_san_registered_id_present", + Description: "The Subject Alternate Name extension MUST contain only 'dnsName' and 'ipaddress' name types.", + Citation: "BRs: 7.1.4.2.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &SANRegId{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_rfc822_format_invalid.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_rfc822_format_invalid.go new file mode 100644 index 00000000..c41c5a1d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_rfc822_format_invalid.go @@ -0,0 +1,69 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************ +RFC 5280: 4.2.1.6 + When the subjectAltName extension contains an Internet mail address, + the address MUST be stored in the rfc822Name. The format of an + rfc822Name is a "Mailbox" as defined in Section 4.1.2 of [RFC2821]. + A Mailbox has the form "Local-part@Domain". Note that a Mailbox has + no phrase (such as a common name) before it, has no comment (text + surrounded in parentheses) after it, and is not surrounded by "<" and + ">". Rules for encoding Internet mail addresses that include + internationalized domain names are specified in Section 7.5. +************************************************************************/ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type invalidEmail struct{} + +func (l *invalidEmail) Initialize() error { + return nil +} + +func (l *invalidEmail) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) +} + +func (l *invalidEmail) Execute(c *x509.Certificate) *LintResult { + for _, str := range c.EmailAddresses { + if str == "" { + continue + } + if strings.Contains(str, " ") { + return &LintResult{Status: Error} + } else if str[0] == '<' || str[len(str)-1] == ')' { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_san_rfc822_format_invalid", + Description: "Email MUST NOT be surrounded with `<>`, and there must be no trailing comments in `()`", + Citation: "RFC 5280: 4.2.1.6", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &invalidEmail{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_rfc822_name_present.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_rfc822_name_present.go new file mode 100644 index 00000000..32b83194 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_rfc822_name_present.go @@ -0,0 +1,59 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************************************************ +7.1.4.2.1. Subject Alternative Name Extension +Certificate Field: extensions:subjectAltName +Required/Optional: Required +Contents: This extension MUST contain at least one entry. Each entry MUST be either a dNSName containing +the Fully‐Qualified Domain Name or an iPAddress containing the IP address of a server. The CA MUST +confirm that the Applicant controls the Fully‐Qualified Domain Name or IP address or has been granted the +right to use it by the Domain Name Registrant or IP address assignee, as appropriate. +Wildcard FQDNs are permitted. +*************************************************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type SANRfc822 struct{} + +func (l *SANRfc822) Initialize() error { + return nil +} + +func (l *SANRfc822) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) +} + +func (l *SANRfc822) Execute(c *x509.Certificate) *LintResult { + if c.EmailAddresses != nil { + return &LintResult{Status: Error} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_san_rfc822_name_present", + Description: "The Subject Alternate Name extension MUST contain only 'dnsName' and 'ipaddress' name types.", + Citation: "BRs: 7.1.4.2.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &SANRfc822{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_space_dns_name.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_space_dns_name.go new file mode 100644 index 00000000..688297f8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_space_dns_name.go @@ -0,0 +1,66 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************ +RFC 5280: 4.2.1.6 +When the subjectAltName extension contains a domain name system + label, the domain name MUST be stored in the dNSName (an IA5String). + The name MUST be in the "preferred name syntax", as specified by + Section 3.5 of [RFC1034] and as modified by Section 2.1 of + [RFC1123]. Note that while uppercase and lowercase letters are + allowed in domain names, no significance is attached to the case. In + addition, while the string " " is a legal domain name, subjectAltName + extensions with a dNSName of " " MUST NOT be used. Finally, the use + of the DNS representation for Internet mail addresses + (subscriber.example.com instead of subscriber@example.com) MUST NOT + be used; such identities are to be encoded as rfc822Name. Rules for + encoding internationalized domain names are specified in Section 7.2. +************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type SANIsSpaceDNS struct{} + +func (l *SANIsSpaceDNS) Initialize() error { + return nil +} + +func (l *SANIsSpaceDNS) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) +} + +func (l *SANIsSpaceDNS) Execute(c *x509.Certificate) *LintResult { + for _, dns := range c.DNSNames { + if dns == " " { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_san_space_dns_name", + Description: "The dNSName ` ` MUST NOT be used", + Citation: "RFC 5280: 4.2.1.6", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &SANIsSpaceDNS{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_uniform_resource_identifier_present.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_uniform_resource_identifier_present.go new file mode 100644 index 00000000..27586401 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_uniform_resource_identifier_present.go @@ -0,0 +1,59 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************************************************ +7.1.4.2.1. Subject Alternative Name Extension +Certificate Field: extensions:subjectAltName +Required/Optional: Required +Contents: This extension MUST contain at least one entry. Each entry MUST be either a dNSName containing +the Fully‐Qualified Domain Name or an iPAddress containing the IP address of a server. The CA MUST +confirm that the Applicant controls the Fully‐Qualified Domain Name or IP address or has been granted the +right to use it by the Domain Name Registrant or IP address assignee, as appropriate. +Wildcard FQDNs are permitted. +*************************************************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type SANURI struct{} + +func (l *SANURI) Initialize() error { + return nil +} + +func (l *SANURI) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) +} + +func (l *SANURI) Execute(c *x509.Certificate) *LintResult { + if c.URIs != nil { + return &LintResult{Status: Error} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_san_uniform_resource_identifier_present", + Description: "The Subject Alternate Name extension MUST contain only 'dnsName' and 'ipaddress' name types", + Citation: "BRs: 7.1.4.2.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &SANURI{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_uri_format_invalid.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_uri_format_invalid.go new file mode 100644 index 00000000..23f22b9d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_uri_format_invalid.go @@ -0,0 +1,69 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +The name MUST include both a +scheme (e.g., "http" or "ftp") and a scheme-specific-part. +************************************************/ + +import ( + "net/url" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type extSANURIFormatInvalid struct{} + +func (l *extSANURIFormatInvalid) Initialize() error { + return nil +} + +func (l *extSANURIFormatInvalid) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) +} + +func (l *extSANURIFormatInvalid) Execute(c *x509.Certificate) *LintResult { + for _, uri := range c.URIs { + parsed_uri, err := url.Parse(uri) + + if err != nil { + return &LintResult{Status: Error} + } + + //scheme + if parsed_uri.Scheme == "" { + return &LintResult{Status: Error} + } + + //scheme-specific part + if parsed_uri.Host == "" && parsed_uri.User == nil && parsed_uri.Opaque == "" && parsed_uri.Path == "" { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_san_uri_format_invalid", + Description: "URIs in SAN extension must have a scheme and scheme specific part", + Citation: "RFC5280: 4.2.1.6", + Source: RFC5280, + EffectiveDate: util.RFC5280Date, + Lint: &extSANURIFormatInvalid{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_uri_host_not_fqdn_or_ip.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_uri_host_not_fqdn_or_ip.go new file mode 100644 index 00000000..6404e43b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_uri_host_not_fqdn_or_ip.go @@ -0,0 +1,76 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/********************************************************************* +When the subjectAltName extension contains a URI, the name MUST be +stored in the uniformResourceIdentifier (an IA5String). The name +MUST NOT be a relative URI, and it MUST follow the URI syntax and +encoding rules specified in [RFC3986]. The name MUST include both a +scheme (e.g., "http" or "ftp") and a scheme-specific-part. URIs that +include an authority ([RFC3986], Section 3.2) MUST include a fully +qualified domain name or IP address as the host. Rules for encoding +Internationalized Resource Identifiers (IRIs) are specified in +Section 7.4. +*********************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" + "net/url" +) + +type SANURIHost struct{} + +func (l *SANURIHost) Initialize() error { + return nil +} + +func (l *SANURIHost) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) +} + +func (l *SANURIHost) Execute(c *x509.Certificate) *LintResult { + for _, uri := range c.URIs { + if uri != "" { + parsed, err := url.Parse(uri) + if err != nil { + return &LintResult{Status: Error} + } + if parsed.Opaque == "" { + // if Opaque is not empty, that means there is no authority, which means that the URI is vacuously OK + if parsed.Host == "" { + return &LintResult{Status: Error} + } + if !util.IsFQDNOrIP(parsed.Host) { + return &LintResult{Status: Error} + } + } + } + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_san_uri_host_not_fqdn_or_ip", + Description: "URIs that include an authority ([RFC3986], Section 3.2) MUST include a fully qualified domain name or IP address as the host", + Citation: "RFC 5280: 4.2.1.7", + Source: RFC5280, + EffectiveDate: util.RFC5280Date, + Lint: &SANURIHost{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_uri_not_ia5.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_uri_not_ia5.go new file mode 100644 index 00000000..39d92b63 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_uri_not_ia5.go @@ -0,0 +1,59 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +When the subjectAltName extension contains a URI, the name MUST be +stored in the uniformResourceIdentifier (an IA5String). +************************************************/ + +import ( + "unicode" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type extSANURINotIA5 struct{} + +func (l *extSANURINotIA5) Initialize() error { + return nil +} + +func (l *extSANURINotIA5) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) +} + +func (l *extSANURINotIA5) Execute(c *x509.Certificate) *LintResult { + for _, uri := range c.URIs { + for _, c := range uri { + if c > unicode.MaxASCII { + return &LintResult{Status: Error} + } + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_san_uri_not_ia5", + Description: "When subjectAlternateName contains a URI, the name MUST be an IA5 string", + Citation: "RFC5280: 4.2.1.6", + Source: RFC5280, + EffectiveDate: util.RFC5280Date, + Lint: &extSANURINotIA5{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_uri_relative.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_uri_relative.go new file mode 100644 index 00000000..b4314e3c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_san_uri_relative.go @@ -0,0 +1,70 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************* +When the subjectAltName extension contains a URI, the name MUST be +stored in the uniformResourceIdentifier (an IA5String). The name +MUST NOT be a relative URI, and it MUST follow the URI syntax and +encoding rules specified in [RFC3986]. The name MUST include both a +scheme (e.g., "http" or "ftp") and a scheme-specific-part. URIs that +include an authority ([RFC3986], Section 3.2) MUST include a fully +qualified domain name or IP address as the host. Rules for encoding +Internationalized Resource Identifiers (IRIs) are specified in +Section 7.4. +*************************************************************************/ + +import ( + "net/url" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type extSANURIRelative struct{} + +func (l *extSANURIRelative) Initialize() error { + return nil +} + +func (l *extSANURIRelative) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) +} + +func (l *extSANURIRelative) Execute(c *x509.Certificate) *LintResult { + for _, uri := range c.URIs { + parsed_uri, err := url.Parse(uri) + + if err != nil { + return &LintResult{Status: Error} + } + + if !parsed_uri.IsAbs() { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_san_uri_relative", + Description: "When the subjectAlternateName extension is present and a URI is used, the name MUST NOT be a relative URI", + Citation: "RFC 5280: 4.2.1.6", + Source: RFC5280, + EffectiveDate: util.RFC5280Date, + Lint: &extSANURIRelative{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_subject_directory_attr_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_subject_directory_attr_critical.go new file mode 100644 index 00000000..687f50e7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_subject_directory_attr_critical.go @@ -0,0 +1,57 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +RFC 5280: 4.2.1.8 +The subject directory attributes extension is used to convey + identification attributes (e.g., nationality) of the subject. The + extension is defined as a sequence of one or more attributes. + Conforming CAs MUST mark this extension as non-critical. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subDirAttrCrit struct{} + +func (l *subDirAttrCrit) Initialize() error { + return nil +} + +func (l *subDirAttrCrit) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectDirAttrOID) +} + +func (l *subDirAttrCrit) Execute(c *x509.Certificate) *LintResult { + if e := util.GetExtFromCert(c, util.SubjectDirAttrOID); e.Critical { + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_subject_directory_attr_critical", + Description: "Conforming CAs MUST mark the Subject Directory Attributes extension as not critical", + Citation: "RFC 5280: 4.2.1.8", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &subDirAttrCrit{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_subject_key_identifier_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_subject_key_identifier_critical.go new file mode 100644 index 00000000..256be132 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_subject_key_identifier_critical.go @@ -0,0 +1,55 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/********************************************************** +RFC 5280: 4.2.1.2 + Conforming CAs MUST mark this extension as non-critical. +**********************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subjectKeyIdCritical struct{} + +func (l *subjectKeyIdCritical) Initialize() error { + return nil +} + +func (l *subjectKeyIdCritical) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectKeyIdentityOID) +} + +func (l *subjectKeyIdCritical) Execute(c *x509.Certificate) *LintResult { + ski := util.GetExtFromCert(c, util.SubjectKeyIdentityOID) //pointer to the extension + if ski.Critical { + return &LintResult{Status: Error} + } else { //implies !ski.Critical + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_subject_key_identifier_critical", + Description: "The subject key identifier extension MUST be non-critical", + Citation: "RFC 5280: 4.2.1.2", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &subjectKeyIdCritical{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_subject_key_identifier_missing_ca.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_subject_key_identifier_missing_ca.go new file mode 100644 index 00000000..472402a1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_subject_key_identifier_missing_ca.go @@ -0,0 +1,69 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ + To facilitate certification path construction, this extension MUST + appear in all conforming CA certificates, that is, all certificates + including the basic constraints extension (Section 4.2.1.9) where the + value of cA is TRUE. In conforming CA certificates, the value of the + subject key identifier MUST be the value placed in the key identifier + field of the authority key identifier extension (Section 4.2.1.1) of + certificates issued by the subject of this certificate. Applications + are not required to verify that key identifiers match when performing + certification path validation. + ... + For end entity certificates, the subject key identifier extension provides + a means for identifying certificates containing the particular public key + used in an application. Where an end entity has obtained multiple certificates, + especially from multiple CAs, the subject key identifier provides a means to + quickly identify the set of certificates containing a particular public key. + To assist applications in identifying the appropriate end entity certificate, + this extension SHOULD be included in all end entity certificates. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subjectKeyIdMissingCA struct{} + +func (l *subjectKeyIdMissingCA) Initialize() error { + return nil +} + +func (l *subjectKeyIdMissingCA) CheckApplies(cert *x509.Certificate) bool { + return util.IsCACert(cert) +} + +func (l *subjectKeyIdMissingCA) Execute(cert *x509.Certificate) *LintResult { + if util.IsExtInCert(cert, util.SubjectKeyIdentityOID) { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_subject_key_identifier_missing_ca", + Description: "CAs MUST include a Subject Key Identifier in all CA certificates", + Citation: "RFC 5280: 4.2 & 4.2.1.2", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &subjectKeyIdMissingCA{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_subject_key_identifier_missing_sub_cert.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_subject_key_identifier_missing_sub_cert.go new file mode 100644 index 00000000..b5faf9f3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_subject_key_identifier_missing_sub_cert.go @@ -0,0 +1,69 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/********************************************************************** + To facilitate certification path construction, this extension MUST + appear in all conforming CA certificates, that is, all certificates + including the basic constraints extension (Section 4.2.1.9) where the + value of cA is TRUE. In conforming CA certificates, the value of the + subject key identifier MUST be the value placed in the key identifier + field of the authority key identifier extension (Section 4.2.1.1) of + certificates issued by the subject of this certificate. Applications + are not required to verify that key identifiers match when performing + certification path validation. + ... + For end entity certificates, the subject key identifier extension provides + a means for identifying certificates containing the particular public key + used in an application. Where an end entity has obtained multiple certificates, + especially from multiple CAs, the subject key identifier provides a means to + quickly identify the set of certificates containing a particular public key. + To assist applications in identifying the appropriate end entity certificate, + this extension SHOULD be included in all end entity certificates. +**********************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subjectKeyIdMissingSubscriber struct{} + +func (l *subjectKeyIdMissingSubscriber) Initialize() error { + return nil +} + +func (l *subjectKeyIdMissingSubscriber) CheckApplies(cert *x509.Certificate) bool { + return !util.IsCACert(cert) +} + +func (l *subjectKeyIdMissingSubscriber) Execute(cert *x509.Certificate) *LintResult { + if util.IsExtInCert(cert, util.SubjectKeyIdentityOID) { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Warn} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "w_ext_subject_key_identifier_missing_sub_cert", + Description: "Sub certificates SHOULD include Subject Key Identifier in end entity certs", + Citation: "RFC 5280: 4.2 & 4.2.1.2", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &subjectKeyIdMissingSubscriber{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_tor_service_descriptor_hash_invalid.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_tor_service_descriptor_hash_invalid.go new file mode 100644 index 00000000..dbb6807d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ext_tor_service_descriptor_hash_invalid.go @@ -0,0 +1,210 @@ +/* + * ZLint Copyright 2019 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package lints + +import ( + "fmt" + "net/url" + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type torServiceDescHashInvalid struct{} + +func (l *torServiceDescHashInvalid) Initialize() error { + // There is nothing to initialize for a torServiceDescHashInvalid linter. + return nil +} + +// CheckApplies returns true if the certificate is a subscriber certificate that +// contains a subject name ending in `.onion`. +func (l *torServiceDescHashInvalid) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) && util.CertificateSubjInTLD(c, onionTLD) +} + +// failResult is a small utility function for creating a failed lint result. +func failResult(format string, args ...interface{}) *LintResult { + return &LintResult{ + Status: Error, + Details: fmt.Sprintf(format, args...), + } +} + +// torServiceDescExtName is a common string prefix used in many lint result +// detail messages to identify the extension at fault. +var torServiceDescExtName = fmt.Sprintf( + "TorServiceDescriptor extension (oid %s)", + util.BRTorServiceDescriptor.String()) + +// lintOnionURL verifies that an Onion URI value from a TorServiceDescriptorHash +// is: +// +// 1) a valid parseable url. +// 2) a URL with a non-empty hostname +// 3) a URL with an https:// protocol scheme +// +// If all of the above hold then nil is returned. If any of the above conditions +// are not met an error lint result pointer is returned. +func lintOnionURL(onion string) *LintResult { + if onionURL, err := url.Parse(onion); err != nil { + return failResult( + "%s contained "+ + "TorServiceDescriptorHash object with invalid Onion URI", + torServiceDescExtName) + } else if onionURL.Host == "" { + return failResult( + "%s contained "+ + "TorServiceDescriptorHash object with Onion URI missing a hostname", + torServiceDescExtName) + } else if onionURL.Scheme != "https" { + return failResult( + "%s contained "+ + "TorServiceDescriptorHash object with Onion URI using a non-HTTPS "+ + "protocol scheme", + torServiceDescExtName) + } + return nil +} + +// Execute will lint the provided certificate. An Error LintResult will be +// returned if: +// +// 1) There is no TorServiceDescriptor extension present. +// 2) There were no TorServiceDescriptors parsed by zcrypto +// 3) There are TorServiceDescriptorHash entries with an invalid Onion URL. +// 4) There are TorServiceDescriptorHash entries with an unknown hash +// algorithm or incorrect hash bit length. +// 5) There is a TorServiceDescriptorHash entry that doesn't correspond to +// an onion subject in the cert. +// 6) There is an onion subject in the cert that doesn't correspond to +// a TorServiceDescriptorHash. +func (l *torServiceDescHashInvalid) Execute(c *x509.Certificate) *LintResult { + // If the BRTorServiceDescriptor extension is missing return a lint error. We + // know the cert contains one or more `.onion` subjects because of + // `CheckApplies` and all such certs are expected to have this extension after + // util.CABV201Date. + if ext := util.GetExtFromCert(c, util.BRTorServiceDescriptor); ext == nil { + return failResult( + "certificate contained a %s domain but is missing a TorServiceDescriptor "+ + "extension (oid %s)", + onionTLD, util.BRTorServiceDescriptor.String()) + } + + // The certificate should have at least one TorServiceDescriptorHash in the + // TorServiceDescriptor extension. + descriptors := c.TorServiceDescriptors + if len(descriptors) == 0 { + return failResult( + "certificate contained a %s domain but TorServiceDescriptor "+ + "extension (oid %s) had no TorServiceDescriptorHash objects", + onionTLD, util.BRTorServiceDescriptor.String()) + } + + // Build a map of all the eTLD+1 onion subjects in the cert to compare against + // the service descriptors. + onionETLDPlusOneMap := make(map[string]string) + for _, subj := range append(c.DNSNames, c.Subject.CommonName) { + if !strings.HasSuffix(subj, onionTLD) { + continue + } + labels := strings.Split(subj, ".") + if len(labels) < 2 { + return failResult("certificate contained a %s domain with too few "+ + "labels: %q", + onionTLD, subj) + } + eTLDPlusOne := strings.Join(labels[len(labels)-2:], ".") + onionETLDPlusOneMap[eTLDPlusOne] = subj + } + + expectedHashBits := map[string]int{ + "SHA256": 256, + "SHA384": 384, + "SHA512": 512, + } + + // Build a map of onion hostname -> TorServiceDescriptorHash using the parsed + // TorServiceDescriptors from zcrypto. + descriptorMap := make(map[string]*x509.TorServiceDescriptorHash) + for _, descriptor := range descriptors { + // each descriptor's Onion URL must be valid + if errResult := lintOnionURL(descriptor.Onion); errResult != nil { + return errResult + } + // each descriptor should have a known hash algorithm and the correct + // corresponding size of hash. + if expectedBits, found := expectedHashBits[descriptor.AlgorithmName]; !found { + return failResult( + "%s contained a TorServiceDescriptorHash for Onion URI %q with an "+ + "unknown hash algorithm", + torServiceDescExtName, descriptor.Onion) + } else if expectedBits != descriptor.HashBits { + return failResult( + "%s contained a TorServiceDescriptorHash with hash algorithm %q but "+ + "only %d bits of hash not %d", + torServiceDescExtName, descriptor.AlgorithmName, + descriptor.HashBits, expectedBits) + } + // NOTE(@cpu): Throwing out the err result here because lintOnionURL already + // ensured the URL is valid. + url, _ := url.Parse(descriptor.Onion) + hostname := url.Hostname() + // there should only be one TorServiceDescriptorHash for each Onion hostname. + if _, exists := descriptorMap[hostname]; exists { + return failResult( + "%s contained more than one TorServiceDescriptorHash for base "+ + "Onion URI %q", + torServiceDescExtName, descriptor.Onion) + } + // there shouldn't be a TorServiceDescriptorHash for a Onion hostname that + // isn't an eTLD+1 in the certificate's subjects. + if _, found := onionETLDPlusOneMap[hostname]; !found { + return failResult( + "%s contained a TorServiceDescriptorHash with a hostname (%q) not "+ + "present as a subject in the certificate", + torServiceDescExtName, hostname) + } + descriptorMap[hostname] = descriptor + } + + // Check if any of the onion subjects in the certificate don't have + // a TorServiceDescriptorHash for the eTLD+1 in the descriptorMap. + for eTLDPlusOne, subjDomain := range onionETLDPlusOneMap { + if _, found := descriptorMap[eTLDPlusOne]; !found { + return failResult( + "%s subject domain name %q does not have a corresponding "+ + "TorServiceDescriptorHash for its eTLD+1", + onionTLD, subjDomain) + } + } + + // Everything checks out! + return &LintResult{ + Status: Pass, + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ext_tor_service_descriptor_hash_invalid", + Description: "certificates with .onion names need valid TorServiceDescriptors in extension", + Citation: "BRS: Ballot 201", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABV201Date, + Lint: &torServiceDescHashInvalid{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_generalized_time_does_not_include_seconds.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_generalized_time_does_not_include_seconds.go new file mode 100644 index 00000000..6ff3801e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_generalized_time_does_not_include_seconds.go @@ -0,0 +1,96 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************** +4.1.2.5.2. GeneralizedTime +The generalized time type, GeneralizedTime, is a standard ASN.1 type +for variable precision representation of time. Optionally, the +GeneralizedTime field can include a representation of the time +differential between local and Greenwich Mean Time. + +For the purposes of this profile, GeneralizedTime values MUST be +expressed in Greenwich Mean Time (Zulu) and MUST include seconds +(i.e., times are YYYYMMDDHHMMSSZ), even where the number of seconds +is zero. GeneralizedTime values MUST NOT include fractional seconds. +********************************************************************/ + +import ( + "encoding/asn1" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type generalizedNoSeconds struct { +} + +func (l *generalizedNoSeconds) Initialize() error { + return nil +} + +func (l *generalizedNoSeconds) CheckApplies(c *x509.Certificate) bool { + firstDate, secondDate := util.GetTimes(c) + beforeTag, afterTag := util.FindTimeType(firstDate, secondDate) + date1Gen := beforeTag == 24 + date2Gen := afterTag == 24 + return date1Gen || date2Gen +} + +func (l *generalizedNoSeconds) Execute(c *x509.Certificate) *LintResult { + r := Pass + date1, date2 := util.GetTimes(c) + beforeTag, afterTag := util.FindTimeType(date1, date2) + date1Gen := beforeTag == 24 + date2Gen := afterTag == 24 + if date1Gen { + // UTC Tests on notBefore + checkSeconds(&r, date1) + if r == Error { + return &LintResult{Status: r} + } + } + if date2Gen { + checkSeconds(&r, date2) + } + return &LintResult{Status: r} +} + +func checkSeconds(r *LintStatus, t asn1.RawValue) { + if t.Bytes[len(t.Bytes)-1] == 'Z' { + if len(t.Bytes) < 15 { + *r = Error + } + } else if t.Bytes[len(t.Bytes)-5] == '-' || t.Bytes[len(t.Bytes)-1] == '+' { + if len(t.Bytes) < 19 { + *r = Error + } + } else { + if len(t.Bytes) < 14 { + *r = Error + } + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_generalized_time_does_not_include_seconds", + Description: "Generalized time values MUST include seconds", + Citation: "RFC 5280: 4.1.2.5.2", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &generalizedNoSeconds{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_generalized_time_includes_fraction_seconds.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_generalized_time_includes_fraction_seconds.go new file mode 100644 index 00000000..787fec59 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_generalized_time_includes_fraction_seconds.go @@ -0,0 +1,96 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************** +4.1.2.5.2. GeneralizedTime +The generalized time type, GeneralizedTime, is a standard ASN.1 type +for variable precision representation of time. Optionally, the +GeneralizedTime field can include a representation of the time +differential between local and Greenwich Mean Time. + +For the purposes of this profile, GeneralizedTime values MUST be +expressed in Greenwich Mean Time (Zulu) and MUST include seconds +(i.e., times are YYYYMMDDHHMMSSZ), even where the number of seconds +is zero. GeneralizedTime values MUST NOT include fractional seconds. +********************************************************************/ + +import ( + "encoding/asn1" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type generalizedTimeFraction struct { +} + +func (l *generalizedTimeFraction) Initialize() error { + return nil +} + +func (l *generalizedTimeFraction) CheckApplies(c *x509.Certificate) bool { + firstDate, secondDate := util.GetTimes(c) + beforeTag, afterTag := util.FindTimeType(firstDate, secondDate) + date1Gen := beforeTag == 24 + date2Gen := afterTag == 24 + return date1Gen || date2Gen +} + +func (l *generalizedTimeFraction) Execute(c *x509.Certificate) *LintResult { + r := Pass + date1, date2 := util.GetTimes(c) + beforeTag, afterTag := util.FindTimeType(date1, date2) + date1Gen := beforeTag == 24 + date2Gen := afterTag == 24 + if date1Gen { + // UTC Tests on notBefore + checkFraction(&r, date1) + if r == Error { + return &LintResult{Status: r} + } + } + if date2Gen { + checkFraction(&r, date2) + } + return &LintResult{Status: r} +} + +func checkFraction(r *LintStatus, t asn1.RawValue) { + if t.Bytes[len(t.Bytes)-1] == 'Z' { + if len(t.Bytes) > 15 { + *r = Error + } + } else if t.Bytes[len(t.Bytes)-5] == '-' || t.Bytes[len(t.Bytes)-1] == '+' { + if len(t.Bytes) > 19 { + *r = Error + } + } else { + if len(t.Bytes) > 14 { + *r = Error + } + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_generalized_time_includes_fraction_seconds", + Description: "Generalized time values MUST NOT include fractional seconds", + Citation: "RFC 5280: 4.1.2.5.2", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &generalizedTimeFraction{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_generalized_time_not_in_zulu.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_generalized_time_not_in_zulu.go new file mode 100644 index 00000000..93efa3f3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_generalized_time_not_in_zulu.go @@ -0,0 +1,77 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************** +4.1.2.5.2. GeneralizedTime +The generalized time type, GeneralizedTime, is a standard ASN.1 type +for variable precision representation of time. Optionally, the +GeneralizedTime field can include a representation of the time +differential between local and Greenwich Mean Time. + +For the purposes of this profile, GeneralizedTime values MUST be +expressed in Greenwich Mean Time (Zulu) and MUST include seconds +(i.e., times are YYYYMMDDHHMMSSZ), even where the number of seconds +is zero. GeneralizedTime values MUST NOT include fractional seconds. +********************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type generalizedNotZulu struct { +} + +func (l *generalizedNotZulu) Initialize() error { + return nil +} + +func (l *generalizedNotZulu) CheckApplies(c *x509.Certificate) bool { + firstDate, secondDate := util.GetTimes(c) + beforeTag, afterTag := util.FindTimeType(firstDate, secondDate) + date1Gen := beforeTag == 24 + date2Gen := afterTag == 24 + return date1Gen || date2Gen +} + +func (l *generalizedNotZulu) Execute(c *x509.Certificate) *LintResult { + date1, date2 := util.GetTimes(c) + beforeTag, afterTag := util.FindTimeType(date1, date2) + date1Gen := beforeTag == 24 + date2Gen := afterTag == 24 + if date1Gen { + if date1.Bytes[len(date1.Bytes)-1] != 'Z' { + return &LintResult{Status: Error} + } + } + if date2Gen { + if date2.Bytes[len(date2.Bytes)-1] != 'Z' { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_generalized_time_not_in_zulu", + Description: "Generalized time values MUST be expressed in Greenwich Mean Time (Zulu)", + Citation: "RFC 5280: 4.1.2.5.2", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &generalizedNotZulu{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ian_bare_wildcard.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ian_bare_wildcard.go new file mode 100644 index 00000000..80241400 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ian_bare_wildcard.go @@ -0,0 +1,52 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type brIANBareWildcard struct{} + +func (l *brIANBareWildcard) Initialize() error { + return nil +} + +func (l *brIANBareWildcard) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.IssuerAlternateNameOID) +} + +func (l *brIANBareWildcard) Execute(c *x509.Certificate) *LintResult { + for _, dns := range c.IANDNSNames { + if strings.HasSuffix(dns, "*") { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ian_bare_wildcard", + Description: "A wildcard MUST be accompanied by other data to its right (Only checks DNSName)", + Citation: "RFC5280", + Source: RFC5280, + EffectiveDate: util.ZeroDate, + Lint: &brIANBareWildcard{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ian_dns_name_includes_null_char.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ian_dns_name_includes_null_char.go new file mode 100644 index 00000000..1bb7b628 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ian_dns_name_includes_null_char.go @@ -0,0 +1,52 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type IANDNSNull struct{} + +func (l *IANDNSNull) Initialize() error { + return nil +} + +func (l *IANDNSNull) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.IssuerAlternateNameOID) +} + +func (l *IANDNSNull) Execute(c *x509.Certificate) *LintResult { + for _, dns := range c.IANDNSNames { + for i := 0; i < len(dns); i++ { + if dns[i] == 0 { + return &LintResult{Status: Error} + } + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ian_dns_name_includes_null_char", + Description: "DNSName MUST NOT include a null character", + Citation: "awslabs certlint", + Source: AWSLabs, + EffectiveDate: util.ZeroDate, + Lint: &IANDNSNull{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ian_dns_name_starts_with_period.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ian_dns_name_starts_with_period.go new file mode 100644 index 00000000..caa4413d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ian_dns_name_starts_with_period.go @@ -0,0 +1,52 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type IANDNSPeriod struct{} + +func (l *IANDNSPeriod) Initialize() error { + return nil +} + +func (l *IANDNSPeriod) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.IssuerAlternateNameOID) +} + +func (l *IANDNSPeriod) Execute(c *x509.Certificate) *LintResult { + for _, dns := range c.IANDNSNames { + if strings.HasPrefix(dns, ".") { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ian_dns_name_starts_with_period", + Description: "DNSName MUST NOT start with a period", + Citation: "awslabs certlint", + Source: AWSLabs, + EffectiveDate: util.ZeroDate, + Lint: &IANDNSPeriod{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ian_iana_pub_suffix_empty.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ian_iana_pub_suffix_empty.go new file mode 100644 index 00000000..ad372188 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ian_iana_pub_suffix_empty.go @@ -0,0 +1,52 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type IANPubSuffix struct{} + +func (l *IANPubSuffix) Initialize() error { + return nil +} + +func (l *IANPubSuffix) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.IssuerAlternateNameOID) +} + +func (l *IANPubSuffix) Execute(c *x509.Certificate) *LintResult { + for _, dns := range c.IANDNSNames { + if len(strings.Split(dns, ".")) < 3 { + return &LintResult{Status: Warn} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_ian_iana_pub_suffix_empty", + Description: "Domain SHOULD NOT have a bare public suffix", + Citation: "awslabs certlint", + Source: AWSLabs, + EffectiveDate: util.ZeroDate, + Lint: &IANPubSuffix{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ian_wildcard_not_first.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ian_wildcard_not_first.go new file mode 100644 index 00000000..e2ce7b5f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_ian_wildcard_not_first.go @@ -0,0 +1,52 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type brIANWildcardFirst struct{} + +func (l *brIANWildcardFirst) Initialize() error { + return nil +} + +func (l *brIANWildcardFirst) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.IssuerAlternateNameOID) +} + +func (l *brIANWildcardFirst) Execute(c *x509.Certificate) *LintResult { + for _, dns := range c.IANDNSNames { + for i := 1; i < len(dns); i++ { + if dns[i] == '*' { + return &LintResult{Status: Error} + } + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_ian_wildcard_not_first", + Description: "A wildcard MUST be in the first label of FQDN (ie not: www.*.com) (Only checks DNSName)", + Citation: "awslabs certlint", + Source: AWSLabs, + EffectiveDate: util.ZeroDate, + Lint: &brIANWildcardFirst{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_idn_dnsname_malformed_unicode.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_idn_dnsname_malformed_unicode.go new file mode 100644 index 00000000..54fa6d35 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_idn_dnsname_malformed_unicode.go @@ -0,0 +1,59 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" + "golang.org/x/net/idna" +) + +type IDNMalformedUnicode struct{} + +func (l *IDNMalformedUnicode) Initialize() error { + return nil +} + +func (l *IDNMalformedUnicode) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) +} + +func (l *IDNMalformedUnicode) Execute(c *x509.Certificate) *LintResult { + for _, dns := range c.DNSNames { + labels := strings.Split(dns, ".") + for _, label := range labels { + if strings.HasPrefix(label, "xn--") { + _, err := idna.ToUnicode(label) + if err != nil { + return &LintResult{Status: Error} + } + } + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_international_dns_name_not_unicode", + Description: "Internationalized DNSNames punycode not valid unicode", + Citation: "RFC 3490", + EffectiveDate: util.RFC3490Date, + Source: RFC5280, + Lint: &IDNMalformedUnicode{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_idn_dnsname_must_be_nfc.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_idn_dnsname_must_be_nfc.go new file mode 100644 index 00000000..081d57a5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_idn_dnsname_must_be_nfc.go @@ -0,0 +1,63 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" + "golang.org/x/net/idna" + "golang.org/x/text/unicode/norm" +) + +type IDNNotNFC struct{} + +func (l *IDNNotNFC) Initialize() error { + return nil +} + +func (l *IDNNotNFC) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) +} + +func (l *IDNNotNFC) Execute(c *x509.Certificate) *LintResult { + for _, dns := range c.DNSNames { + labels := strings.Split(dns, ".") + for _, label := range labels { + if strings.HasPrefix(label, "xn--") { + unicodeLabel, err := idna.ToUnicode(label) + if err != nil { + return &LintResult{Status: NA} + } + if !norm.NFC.IsNormalString(unicodeLabel) { + return &LintResult{Status: Error} + } + } + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_international_dns_name_not_nfc", + Description: "Internationalized DNSNames must be normalized by unicode normalization form C", + Citation: "RFC 8399", + Source: RFC5891, + EffectiveDate: util.RFC8399Date, + Lint: &IDNNotNFC{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_inhibit_any_policy_not_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_inhibit_any_policy_not_critical.go new file mode 100644 index 00000000..79ed2e11 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_inhibit_any_policy_not_critical.go @@ -0,0 +1,63 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +4.2.1.14. Inhibit anyPolicy + The inhibit anyPolicy extension can be used in certificates issued to CAs. + The inhibit anyPolicy extension indicates that the special anyPolicy OID, + with the value { 2 5 29 32 0 }, is not considered an explicit match for other + certificate policies except when it appears in an intermediate self-issued + CA certificate. The value indicates the number of additional non-self-issued + certificates that may appear in the path before anyPolicy is no longer permitted. + For example, a value of one indicates that anyPolicy may be processed in + certificates issued by the subject of this certificate, but not in additional + certificates in the path. + + Conforming CAs MUST mark this extension as critical. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type InhibitAnyPolicyNotCritical struct{} + +func (l *InhibitAnyPolicyNotCritical) Initialize() error { + return nil +} + +func (l *InhibitAnyPolicyNotCritical) CheckApplies(cert *x509.Certificate) bool { + return util.IsExtInCert(cert, util.InhibitAnyPolicyOID) +} + +func (l *InhibitAnyPolicyNotCritical) Execute(cert *x509.Certificate) *LintResult { + if anyPol := util.GetExtFromCert(cert, util.InhibitAnyPolicyOID); !anyPol.Critical { + return &LintResult{Status: Error} + } //else + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_inhibit_any_policy_not_critical", + Description: "CAs MUST mark the inhibitAnyPolicy extension as critical", + Citation: "RFC 5280: 4.2.1.14", + Source: RFC5280, + EffectiveDate: util.RFC3280Date, + Lint: &InhibitAnyPolicyNotCritical{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_invalid_certificate_version.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_invalid_certificate_version.go new file mode 100644 index 00000000..589fb85b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_invalid_certificate_version.go @@ -0,0 +1,53 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +Certificates MUST be of type X.509 v3. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type InvalidCertificateVersion struct{} + +func (l *InvalidCertificateVersion) Initialize() error { + return nil +} + +func (l *InvalidCertificateVersion) CheckApplies(cert *x509.Certificate) bool { + return true +} + +func (l *InvalidCertificateVersion) Execute(cert *x509.Certificate) *LintResult { + if cert.Version != 3 { + return &LintResult{Status: Error} + } + //else + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_invalid_certificate_version", + Description: "Certificates MUST be of type X.590 v3", + Citation: "BRs: 7.1.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABV130Date, + Lint: &InvalidCertificateVersion{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_is_redacted_cert.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_is_redacted_cert.go new file mode 100644 index 00000000..60675a1d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_is_redacted_cert.go @@ -0,0 +1,62 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type DNSNameRedacted struct{} + +func (l *DNSNameRedacted) Initialize() error { + return nil +} + +func (l *DNSNameRedacted) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) +} + +func isRedactedCertificate(domain string) bool { + domain = util.RemovePrependedWildcard(domain) + return strings.HasPrefix(domain, "?.") +} + +func (l *DNSNameRedacted) Execute(c *x509.Certificate) *LintResult { + if c.Subject.CommonName != "" { + if isRedactedCertificate(c.Subject.CommonName) { + return &LintResult{Status: Notice} + } + } + for _, domain := range c.DNSNames { + if isRedactedCertificate(domain) { + return &LintResult{Status: Notice} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "n_contains_redacted_dnsname", + Description: "Some precerts are redacted and of the form ?.?.a.com or *.?.a.com", + Source: ZLint, + Citation: "IETF Draft: https://tools.ietf.org/id/draft-strad-trans-redaction-00.html", + EffectiveDate: util.ZeroDate, + Lint: &DNSNameRedacted{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_issuer_dn_country_not_printable_string.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_issuer_dn_country_not_printable_string.go new file mode 100644 index 00000000..ce79fd4f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_issuer_dn_country_not_printable_string.go @@ -0,0 +1,64 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "encoding/asn1" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type IssuerDNCountryNotPrintableString struct{} + +func (l *IssuerDNCountryNotPrintableString) Initialize() error { + return nil +} + +func (l *IssuerDNCountryNotPrintableString) CheckApplies(c *x509.Certificate) bool { + return len(c.Issuer.Country) > 0 +} + +func (l *IssuerDNCountryNotPrintableString) Execute(c *x509.Certificate) *LintResult { + rdnSequence := util.RawRDNSequence{} + rest, err := asn1.Unmarshal(c.RawIssuer, &rdnSequence) + if err != nil { + return &LintResult{Status: Fatal} + } + if len(rest) > 0 { + return &LintResult{Status: Fatal} + } + + for _, attrTypeAndValueSet := range rdnSequence { + for _, attrTypeAndValue := range attrTypeAndValueSet { + if attrTypeAndValue.Type.Equal(util.CountryNameOID) && attrTypeAndValue.Value.Tag != asn1.TagPrintableString { + return &LintResult{Status: Error} + } + } + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_issuer_dn_country_not_printable_string", + Description: "X520 Distinguished Name Country MUST BE encoded as PrintableString", + Citation: "RFC 5280: Appendix A", + Source: RFC5280, + EffectiveDate: util.ZeroDate, + Lint: &IssuerDNCountryNotPrintableString{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_issuer_dn_leading_whitespace.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_issuer_dn_leading_whitespace.go new file mode 100644 index 00000000..a32b7a1b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_issuer_dn_leading_whitespace.go @@ -0,0 +1,52 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type IssuerDNLeadingSpace struct{} + +func (l *IssuerDNLeadingSpace) Initialize() error { + return nil +} + +func (l *IssuerDNLeadingSpace) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *IssuerDNLeadingSpace) Execute(c *x509.Certificate) *LintResult { + leading, _, err := util.CheckRDNSequenceWhiteSpace(c.RawIssuer) + if err != nil { + return &LintResult{Status: Fatal} + } + if leading { + return &LintResult{Status: Warn} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_issuer_dn_leading_whitespace", + Description: "AttributeValue in issuer RelativeDistinguishedName sequence SHOULD NOT have leading whitespace", + Citation: "AWSLabs certlint", + Source: AWSLabs, + EffectiveDate: util.ZeroDate, + Lint: &IssuerDNLeadingSpace{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_issuer_dn_trailing_whitespace.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_issuer_dn_trailing_whitespace.go new file mode 100644 index 00000000..94b85b20 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_issuer_dn_trailing_whitespace.go @@ -0,0 +1,52 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type IssuerDNTrailingSpace struct{} + +func (l *IssuerDNTrailingSpace) Initialize() error { + return nil +} + +func (l *IssuerDNTrailingSpace) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *IssuerDNTrailingSpace) Execute(c *x509.Certificate) *LintResult { + _, trailing, err := util.CheckRDNSequenceWhiteSpace(c.RawIssuer) + if err != nil { + return &LintResult{Status: Fatal} + } + if trailing { + return &LintResult{Status: Warn} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_issuer_dn_trailing_whitespace", + Description: "AttributeValue in issuer RelativeDistinguishedName sequence SHOULD NOT have trailing whitespace", + Citation: "AWSLabs certlint", + Source: AWSLabs, + EffectiveDate: util.ZeroDate, + Lint: &IssuerDNTrailingSpace{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_issuer_field_empty.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_issuer_field_empty.go new file mode 100644 index 00000000..77fdbc10 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_issuer_field_empty.go @@ -0,0 +1,57 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +RFC 5280: 4.1.2.4 +The issuer field identifies the entity that has signed and issued the + certificate. The issuer field MUST contain a non-empty distinguished + name (DN). The issuer field is defined as the X.501 type Name + [X.501]. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type issuerFieldEmpty struct{} + +func (l *issuerFieldEmpty) Initialize() error { + return nil +} + +func (l *issuerFieldEmpty) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *issuerFieldEmpty) Execute(c *x509.Certificate) *LintResult { + if &c.Issuer != nil && util.NotAllNameFieldsAreEmpty(&c.Issuer) { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_issuer_field_empty", + Description: "Certificate issuer field MUST NOT be empty and must have a non-empty distingushed name", + Citation: "RFC 5280: 4.1.2.4", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &issuerFieldEmpty{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_issuer_multiple_rdn.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_issuer_multiple_rdn.go new file mode 100644 index 00000000..07d385ed --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_issuer_multiple_rdn.go @@ -0,0 +1,58 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "encoding/asn1" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zcrypto/x509/pkix" + "github.com/zmap/zlint/util" +) + +type IssuerRDNHasMultipleAttribute struct{} + +func (l *IssuerRDNHasMultipleAttribute) Initialize() error { + return nil +} + +func (l *IssuerRDNHasMultipleAttribute) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *IssuerRDNHasMultipleAttribute) Execute(c *x509.Certificate) *LintResult { + var issuer pkix.RDNSequence + _, err := asn1.Unmarshal(c.RawIssuer, &issuer) + if err != nil { + return &LintResult{Status: Fatal} + } + for _, rdn := range issuer { + if len(rdn) > 1 { + return &LintResult{Status: Warn} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_multiple_issuer_rdn", + Description: "Certificates should not have multiple attributes in a single RDN (issuer)", + Citation: "awslabs certlint", + Source: AWSLabs, + EffectiveDate: util.ZeroDate, + Lint: &IssuerRDNHasMultipleAttribute{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_name_constraint_empty.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_name_constraint_empty.go new file mode 100644 index 00000000..61a46efe --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_name_constraint_empty.go @@ -0,0 +1,78 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/*********************************************************************** + Restrictions are defined in terms of permitted or excluded name + subtrees. Any name matching a restriction in the excludedSubtrees + field is invalid regardless of information appearing in the + permittedSubtrees. Conforming CAs MUST mark this extension as + critical and SHOULD NOT impose name constraints on the x400Address, + ediPartyName, or registeredID name forms. Conforming CAs MUST NOT + issue certificates where name constraints is an empty sequence. That + is, either the permittedSubtrees field or the excludedSubtrees MUST + be present. +************************************************************************/ + +import ( + "encoding/asn1" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type nameConstraintEmpty struct{} + +func (l *nameConstraintEmpty) Initialize() error { + return nil +} + +func (l *nameConstraintEmpty) CheckApplies(c *x509.Certificate) bool { + if !(util.IsExtInCert(c, util.NameConstOID)) { + return false + } + nc := util.GetExtFromCert(c, util.NameConstOID) + var seq asn1.RawValue + rest, err := asn1.Unmarshal(nc.Value, &seq) //only one sequence, so rest should be empty + if err != nil || len(rest) != 0 || seq.Tag != 16 || seq.Class != 0 || !seq.IsCompound { + return false + } + return true +} + +func (l *nameConstraintEmpty) Execute(c *x509.Certificate) *LintResult { + nc := util.GetExtFromCert(c, util.NameConstOID) + var seq asn1.RawValue + _, err := asn1.Unmarshal(nc.Value, &seq) //only one sequence, so rest should be empty + if err != nil { + return &LintResult{Status: Fatal} + } + if len(seq.Bytes) == 0 { + return &LintResult{Status: Error} + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_name_constraint_empty", + Description: "Conforming CAs MUST NOT issue certificates where name constraints is an empty sequence. That is, either the permittedSubtree or excludedSubtree fields must be present", + Citation: "RFC 5280: 4.2.1.10", + Source: RFC5280, + EffectiveDate: util.RFC5280Date, + Lint: &nameConstraintEmpty{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_name_constraint_maximum_not_absent.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_name_constraint_maximum_not_absent.go new file mode 100644 index 00000000..d80c29dc --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_name_constraint_maximum_not_absent.go @@ -0,0 +1,126 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************ +RFC 5280: 4.2.1.10 +Within this profile, the minimum and maximum fields are not used with +any name forms, thus, the minimum MUST be zero, and maximum MUST be +absent. However, if an application encounters a critical name +constraints extension that specifies other values for minimum or +maximum for a name form that appears in a subsequent certificate, the +application MUST either process these fields or reject the +certificate. +************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type nameConstraintMax struct{} + +func (l *nameConstraintMax) Initialize() error { + return nil +} + +func (l *nameConstraintMax) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.NameConstOID) +} + +func (l *nameConstraintMax) Execute(c *x509.Certificate) *LintResult { + for _, i := range c.PermittedDNSNames { + if i.Max != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.ExcludedDNSNames { + if i.Max != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.PermittedDNSNames { + if i.Max != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.ExcludedEmailAddresses { + if i.Max != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.PermittedIPAddresses { + if i.Max != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.ExcludedIPAddresses { + if i.Max != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.PermittedDirectoryNames { + if i.Max != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.ExcludedDirectoryNames { + if i.Max != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.PermittedEdiPartyNames { + if i.Max != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.ExcludedEdiPartyNames { + if i.Max != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.PermittedRegisteredIDs { + if i.Max != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.ExcludedRegisteredIDs { + if i.Max != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.PermittedX400Addresses { + if i.Max != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.ExcludedX400Addresses { + if i.Max != 0 { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_name_constraint_maximum_not_absent", + Description: "Within the name constraints name form, the maximum field is not used and therefore MUST be absent", + Citation: "RFC 5280: 4.2.1.10", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &nameConstraintMax{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_name_constraint_minimum_non_zero.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_name_constraint_minimum_non_zero.go new file mode 100644 index 00000000..5191dbea --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_name_constraint_minimum_non_zero.go @@ -0,0 +1,126 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************ +RFC 5280: 4.2.1.10 +Within this profile, the minimum and maximum fields are not used with +any name forms, thus, the minimum MUST be zero, and maximum MUST be +absent. However, if an application encounters a critical name +constraints extension that specifies other values for minimum or +maximum for a name form that appears in a subsequent certificate, the +application MUST either process these fields or reject the +certificate. +************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type nameConstMin struct{} + +func (l *nameConstMin) Initialize() error { + return nil +} + +func (l *nameConstMin) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.NameConstOID) +} + +func (l *nameConstMin) Execute(c *x509.Certificate) *LintResult { + for _, i := range c.PermittedDNSNames { + if i.Min != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.ExcludedDNSNames { + if i.Min != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.PermittedEmailAddresses { + if i.Min != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.ExcludedEmailAddresses { + if i.Min != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.PermittedIPAddresses { + if i.Min != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.ExcludedIPAddresses { + if i.Min != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.PermittedDirectoryNames { + if i.Min != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.ExcludedDirectoryNames { + if i.Min != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.PermittedEdiPartyNames { + if i.Min != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.ExcludedEdiPartyNames { + if i.Min != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.PermittedRegisteredIDs { + if i.Min != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.ExcludedRegisteredIDs { + if i.Min != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.PermittedX400Addresses { + if i.Min != 0 { + return &LintResult{Status: Error} + } + } + for _, i := range c.ExcludedX400Addresses { + if i.Min != 0 { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_name_constraint_minimum_non_zero", + Description: "Within the name constraints name forms, the minimum field is not used and therefore MUST be zero", + Citation: "RFC 5280: 4.2.1.10", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &nameConstMin{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_name_constraint_on_edi_party_name.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_name_constraint_on_edi_party_name.go new file mode 100644 index 00000000..a56d9d2c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_name_constraint_on_edi_party_name.go @@ -0,0 +1,61 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************* +RFC 5280: 4.2.1.10 +Restrictions are defined in terms of permitted or excluded name +subtrees. Any name matching a restriction in the excludedSubtrees +field is invalid regardless of information appearing in the +permittedSubtrees. Conforming CAs MUST mark this extension as +critical and SHOULD NOT impose name constraints on the x400Address, +ediPartyName, or registeredID name forms. Conforming CAs MUST NOT +issue certificates where name constraints is an empty sequence. That +is, either the permittedSubtrees field or the excludedSubtrees MUST +be present. +*******************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type nameConstraintOnEDI struct{} + +func (l *nameConstraintOnEDI) Initialize() error { + return nil +} + +func (l *nameConstraintOnEDI) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.NameConstOID) +} + +func (l *nameConstraintOnEDI) Execute(c *x509.Certificate) *LintResult { + if c.PermittedEdiPartyNames != nil || c.ExcludedEdiPartyNames != nil { + return &LintResult{Status: Warn} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_name_constraint_on_edi_party_name", + Description: "The name constraints extension SHOULD NOT impose constraints on the ediPartyName name form", + Citation: "RFC 5280: 4.2.1.10", + Source: RFC5280, + EffectiveDate: util.RFC5280Date, + Lint: &nameConstraintOnEDI{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_name_constraint_on_registered_id.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_name_constraint_on_registered_id.go new file mode 100644 index 00000000..c788a047 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_name_constraint_on_registered_id.go @@ -0,0 +1,61 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************* +RFC 5280: 4.2.1.10 +Restrictions are defined in terms of permitted or excluded name +subtrees. Any name matching a restriction in the excludedSubtrees +field is invalid regardless of information appearing in the +permittedSubtrees. Conforming CAs MUST mark this extension as +critical and SHOULD NOT impose name constraints on the x400Address, +ediPartyName, or registeredID name forms. Conforming CAs MUST NOT +issue certificates where name constraints is an empty sequence. That +is, either the permittedSubtrees field or the excludedSubtrees MUST +be present. +*******************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type nameConstraintOnRegisteredId struct{} + +func (l *nameConstraintOnRegisteredId) Initialize() error { + return nil +} + +func (l *nameConstraintOnRegisteredId) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.NameConstOID) +} + +func (l *nameConstraintOnRegisteredId) Execute(c *x509.Certificate) *LintResult { + if c.PermittedRegisteredIDs != nil || c.ExcludedRegisteredIDs != nil { + return &LintResult{Status: Warn} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_name_constraint_on_registered_id", + Description: "The name constraints extension SHOULD NOT impose constraints on the registeredID name form", + Citation: "RFC 5280: 4.2.1.10", + Source: RFC5280, + EffectiveDate: util.RFC5280Date, + Lint: &nameConstraintOnRegisteredId{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_name_constraint_on_x400.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_name_constraint_on_x400.go new file mode 100644 index 00000000..0184a5f4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_name_constraint_on_x400.go @@ -0,0 +1,61 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************* +RFC 5280: 4.2.1.10 +Restrictions are defined in terms of permitted or excluded name +subtrees. Any name matching a restriction in the excludedSubtrees +field is invalid regardless of information appearing in the +permittedSubtrees. Conforming CAs MUST mark this extension as +critical and SHOULD NOT impose name constraints on the x400Address, +ediPartyName, or registeredID name forms. Conforming CAs MUST NOT +issue certificates where name constraints is an empty sequence. That +is, either the permittedSubtrees field or the excludedSubtrees MUST +be present. +*******************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type nameConstraintOnX400 struct{} + +func (l *nameConstraintOnX400) Initialize() error { + return nil +} + +func (l *nameConstraintOnX400) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.NameConstOID) +} + +func (l *nameConstraintOnX400) Execute(c *x509.Certificate) *LintResult { + if c.PermittedX400Addresses != nil || c.ExcludedX400Addresses != nil { + return &LintResult{Status: Warn} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_name_constraint_on_x400", + Description: "The name constraints extension SHOULD NOT impose constraints on the x400Address name form", + Citation: "RFC 5280: 4.2.1.10", + Source: RFC5280, + EffectiveDate: util.RFC5280Date, + Lint: &nameConstraintOnX400{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_old_root_ca_rsa_mod_less_than_2048_bits.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_old_root_ca_rsa_mod_less_than_2048_bits.go new file mode 100644 index 00000000..7aeda296 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_old_root_ca_rsa_mod_less_than_2048_bits.go @@ -0,0 +1,58 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +Change this to match source TEXT +************************************************/ + +import ( + "crypto/rsa" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type rootCaModSize struct{} + +func (l *rootCaModSize) Initialize() error { + return nil +} + +func (l *rootCaModSize) CheckApplies(c *x509.Certificate) bool { + issueDate := c.NotBefore + _, ok := c.PublicKey.(*rsa.PublicKey) + return ok && c.PublicKeyAlgorithm == x509.RSA && util.IsRootCA(c) && issueDate.Before(util.NoRSA1024RootDate) +} + +func (l *rootCaModSize) Execute(c *x509.Certificate) *LintResult { + key := c.PublicKey.(*rsa.PublicKey) + if key.N.BitLen() < 2048 { + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_old_root_ca_rsa_mod_less_than_2048_bits", + Description: "In a validity period beginning on or before 31 Dec 2010, root CA certificates using RSA public key algorithm MUST use a 2048 bit modulus", + Citation: "BRs: 6.1.5", + Source: CABFBaselineRequirements, + EffectiveDate: util.ZeroDate, + Lint: &rootCaModSize{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_old_sub_ca_rsa_mod_less_than_1024_bits.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_old_sub_ca_rsa_mod_less_than_1024_bits.go new file mode 100644 index 00000000..6f89b65a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_old_sub_ca_rsa_mod_less_than_1024_bits.go @@ -0,0 +1,58 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// CHANGE THIS COMMENT TO MATCH SOURCE TEXT + +import ( + "crypto/rsa" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCaModSize struct{} + +func (l *subCaModSize) Initialize() error { + return nil +} + +func (l *subCaModSize) CheckApplies(c *x509.Certificate) bool { + issueDate := c.NotBefore + endDate := c.NotAfter + _, ok := c.PublicKey.(*rsa.PublicKey) + return ok && util.IsSubCA(c) && issueDate.Before(util.NoRSA1024RootDate) && endDate.Before(util.NoRSA1024Date) +} + +func (l *subCaModSize) Execute(c *x509.Certificate) *LintResult { + key := c.PublicKey.(*rsa.PublicKey) + if key.N.BitLen() < 1024 { + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_old_sub_ca_rsa_mod_less_than_1024_bits", + Description: "In a validity period beginning on or before 31 Dec 2010 and ending on or before 31 Dec 2013, subordinate CA certificates using RSA public key algorithm MUST use a 1024 bit modulus", + Citation: "BRs: 6.1.5", + Source: CABFBaselineRequirements, + // since effective date should be checked against end date in this specific case, putting time check into checkApplies instead, ZeroDate here to automatically pass NE test + EffectiveDate: util.ZeroDate, + Lint: &subCaModSize{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_old_sub_cert_rsa_mod_less_than_1024_bits.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_old_sub_cert_rsa_mod_less_than_1024_bits.go new file mode 100644 index 00000000..b7e0bbcb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_old_sub_cert_rsa_mod_less_than_1024_bits.go @@ -0,0 +1,55 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "crypto/rsa" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subModSize struct{} + +func (l *subModSize) Initialize() error { + return nil +} + +func (l *subModSize) CheckApplies(c *x509.Certificate) bool { + endDate := c.NotAfter + _, ok := c.PublicKey.(*rsa.PublicKey) + return ok && c.PublicKeyAlgorithm == x509.RSA && !util.IsCACert(c) && endDate.Before(util.NoRSA1024Date) +} + +func (l *subModSize) Execute(c *x509.Certificate) *LintResult { + key := c.PublicKey.(*rsa.PublicKey) + if key.N.BitLen() < 1024 { + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_old_sub_cert_rsa_mod_less_than_1024_bits", + Description: "In a validity period ending on or before 31 Dec 2013, subscriber certificates using RSA public key algorithm MUST use a 1024 bit modulus", + Citation: "BRs: 6.1.5", + Source: CABFBaselineRequirements, + // since effective date should be checked against end date in this specific case, putting time check into checkApplies instead, ZeroDate here to automatically pass NE test + EffectiveDate: util.ZeroDate, + Lint: &subModSize{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_onion_subject_validity_time_too_large.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_onion_subject_validity_time_too_large.go new file mode 100644 index 00000000..ef52e8ef --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_onion_subject_validity_time_too_large.go @@ -0,0 +1,68 @@ +/* + * ZLint Copyright 2019 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package lints + +import ( + "fmt" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +const ( + // Ballot 144 specified: + // CAs MUST NOT issue a Certificate that includes a Domain Name where .onion + // is in the right-most label of the Domain Name with a validity period longer + // than 15 months + maxOnionValidityMonths = 15 +) + +type torValidityTooLarge struct{} + +// Initialize for a torValidityTooLarge linter is a NOP. +func (l *torValidityTooLarge) Initialize() error { + return nil +} + +// CheckApplies returns true if the certificate is a subscriber certificate that +// contains a subject name ending in `.onion`. +func (l *torValidityTooLarge) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) && util.CertificateSubjInTLD(c, onionTLD) +} + +// Execute will return an Error LintResult if the provided certificate has +// a validity period longer than the maximum allowed validity for a certificate +// with a .onion subject. +func (l *torValidityTooLarge) Execute(c *x509.Certificate) *LintResult { + if c.NotBefore.AddDate(0, maxOnionValidityMonths, 0).Before(c.NotAfter) { + return &LintResult{ + Status: Error, + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_onion_subject_validity_time_too_large", + Description: fmt.Sprintf( + "certificates with .onion names can not be valid for more than %d months", + maxOnionValidityMonths), + Citation: "CABF EV Guidelines: Appendix F", + Source: CABFEVGuidelines, + EffectiveDate: util.OnionOnlyEVDate, + Lint: &torValidityTooLarge{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_path_len_constraint_improperly_included.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_path_len_constraint_improperly_included.go new file mode 100644 index 00000000..6cea47be --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_path_len_constraint_improperly_included.go @@ -0,0 +1,72 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/****************************************************************** +RFC 5280: 4.2.1.9 +CAs MUST NOT include the pathLenConstraint field unless the cA +boolean is asserted and the key usage extension asserts the +keyCertSign bit. +******************************************************************/ + +import ( + "encoding/asn1" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type pathLenIncluded struct{} + +func (l *pathLenIncluded) Initialize() error { + return nil +} + +func (l *pathLenIncluded) CheckApplies(cert *x509.Certificate) bool { + return util.IsExtInCert(cert, util.BasicConstOID) +} + +func (l *pathLenIncluded) Execute(cert *x509.Certificate) *LintResult { + bc := util.GetExtFromCert(cert, util.BasicConstOID) + var seq asn1.RawValue + var isCa bool + _, err := asn1.Unmarshal(bc.Value, &seq) + if err != nil { + return &LintResult{Status: Fatal} + } + if len(seq.Bytes) == 0 { + return &LintResult{Status: Pass} + } + rest, err := asn1.UnmarshalWithParams(seq.Bytes, &isCa, "optional") + if err != nil { + return &LintResult{Status: Fatal} + } + keyUsageValue := util.IsExtInCert(cert, util.KeyUsageOID) + if len(rest) > 0 && (!cert.IsCA || !keyUsageValue || (keyUsageValue && cert.KeyUsage&x509.KeyUsageCertSign == 0)) { + return &LintResult{Status: Error} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_path_len_constraint_improperly_included", + Description: "CAs MUST NOT include the pathLenConstraint field unless the CA boolean is asserted and the keyCertSign bit is set", + Citation: "RFC 5280: 4.2.1.9", + Source: RFC5280, + EffectiveDate: util.RFC3280Date, + Lint: &pathLenIncluded{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_path_len_constraint_zero_or_less.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_path_len_constraint_zero_or_less.go new file mode 100644 index 00000000..22c77a78 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_path_len_constraint_zero_or_less.go @@ -0,0 +1,78 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************** +The pathLenConstraint field is meaningful only if the cA boolean is +asserted and the key usage extension, if present, asserts the +keyCertSign bit (Section 4.2.1.3). In this case, it gives the +maximum number of non-self-issued intermediate certificates that may +follow this certificate in a valid certification path. (Note: The +last certificate in the certification path is not an intermediate +certificate, and is not included in this limit. Usually, the last +certificate is an end entity certificate, but it can be a CA +certificate.) A pathLenConstraint of zero indicates that no non- +self-issued intermediate CA certificates may follow in a valid +certification path. Where it appears, the pathLenConstraint field +MUST be greater than or equal to zero. Where pathLenConstraint does +not appear, no limit is imposed. +********************************************************************/ + +import ( + "encoding/asn1" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type basicConst struct { + CA bool `asn1:"optional"` + PathLenConstraint int `asn1:"optional"` +} + +type pathLenNonPositive struct { +} + +func (l *pathLenNonPositive) Initialize() error { + return nil +} + +func (l *pathLenNonPositive) CheckApplies(cert *x509.Certificate) bool { + return cert.BasicConstraintsValid +} + +func (l *pathLenNonPositive) Execute(cert *x509.Certificate) *LintResult { + var bc basicConst + + ext := util.GetExtFromCert(cert, util.BasicConstOID) + if _, err := asn1.Unmarshal(ext.Value, &bc); err != nil { + return &LintResult{Status: Fatal} + } + if bc.PathLenConstraint < 0 { + return &LintResult{Status: Error} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_path_len_constraint_zero_or_less", + Description: "Where it appears, the pathLenConstraint field MUST be greater than or equal to zero", + Citation: "RFC 5280: 4.2.1.9", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &pathLenNonPositive{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_public_key_type_not_allowed.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_public_key_type_not_allowed.go new file mode 100644 index 00000000..3e4b0699 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_public_key_type_not_allowed.go @@ -0,0 +1,50 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type publicKeyAllowed struct{} + +func (l *publicKeyAllowed) Initialize() error { + return nil +} + +func (l *publicKeyAllowed) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *publicKeyAllowed) Execute(c *x509.Certificate) *LintResult { + alg := c.PublicKeyAlgorithm + if alg != x509.UnknownPublicKeyAlgorithm { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_public_key_type_not_allowed", + Description: "Certificates MUST have RSA, DSA, or ECDSA public key type", + Citation: "BRs: 6.1.5", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &publicKeyAllowed{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_etsi_present_qcs_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_etsi_present_qcs_critical.go new file mode 100644 index 00000000..4f2b9e9d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_etsi_present_qcs_critical.go @@ -0,0 +1,66 @@ +/* + * ZLint Copyright 2017 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package lints + +import ( + "encoding/asn1" + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type qcStatemQcEtsiPresentQcsCritical struct{} + +func (this *qcStatemQcEtsiPresentQcsCritical) getStatementOid() *asn1.ObjectIdentifier { + return &util.IdEtsiQcsQcCompliance +} + +func (l *qcStatemQcEtsiPresentQcsCritical) Initialize() error { + return nil +} + +func (l *qcStatemQcEtsiPresentQcsCritical) CheckApplies(c *x509.Certificate) bool { + if !util.IsExtInCert(c, util.QcStateOid) { + return false + } + if util.IsAnyEtsiQcStatementPresent(util.GetExtFromCert(c, util.QcStateOid).Value) { + return true + } + return false +} + +func (l *qcStatemQcEtsiPresentQcsCritical) Execute(c *x509.Certificate) *LintResult { + errString := "" + ext := util.GetExtFromCert(c, util.QcStateOid) + if ext.Critical { + errString = "ETSI QC Statement is present and QC Statements extension is marked critical" + } + + if len(errString) == 0 { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error, Details: errString} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_qcstatem_etsi_present_qcs_critical", + Description: "Checks that a QC Statement which contains any of the id-etsi-qcs-... QC Statements is not marked critical", + Citation: "ETSI EN 319 412 - 5 V2.2.1 (2017 - 11) / Section 4.1", + Source: EtsiEsi, + EffectiveDate: util.EtsiEn319_412_5_V2_2_1_Date, + Lint: &qcStatemQcEtsiPresentQcsCritical{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_etsi_type_as_statem.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_etsi_type_as_statem.go new file mode 100644 index 00000000..638de136 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_etsi_type_as_statem.go @@ -0,0 +1,67 @@ +/* + * ZLint Copyright 2017 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package lints + +import ( + "encoding/asn1" + "fmt" + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type qcStatemEtsiTypeAsStatem struct{} + +func (l *qcStatemEtsiTypeAsStatem) Initialize() error { + return nil +} + +func (l *qcStatemEtsiTypeAsStatem) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.QcStateOid) +} + +func (l *qcStatemEtsiTypeAsStatem) Execute(c *x509.Certificate) *LintResult { + errString := "" + ext := util.GetExtFromCert(c, util.QcStateOid) + + oidList := make([]*asn1.ObjectIdentifier, 3) + oidList[0] = &util.IdEtsiQcsQctEsign + oidList[1] = &util.IdEtsiQcsQctEseal + oidList[2] = &util.IdEtsiQcsQctWeb + + for _, oid := range oidList { + r := util.ParseQcStatem(ext.Value, *oid) + util.AppendToStringSemicolonDelim(&errString, r.GetErrorInfo()) + if r.IsPresent() { + util.AppendToStringSemicolonDelim(&errString, fmt.Sprintf("ETSI QC Type OID %v used as QC statement", oid)) + } + } + + if len(errString) == 0 { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error, Details: errString} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_qcstatem_etsi_type_as_statem", + Description: "Checks for erroneous QC Statement OID that actually are represented by ETSI ESI QC type OID.", + Citation: "ETSI EN 319 412 - 5 V2.2.1 (2017 - 11) / Section 4.2.3", + Source: EtsiEsi, + EffectiveDate: util.EtsiEn319_412_5_V2_2_1_Date, + Lint: &qcStatemEtsiTypeAsStatem{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_mandatory_etsi_statems.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_mandatory_etsi_statems.go new file mode 100644 index 00000000..9188659b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_mandatory_etsi_statems.go @@ -0,0 +1,70 @@ +/* + * ZLint Copyright 2017 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package lints + +import ( + "encoding/asn1" + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type qcStatemQcmandatoryEtsiStatems struct{} + +func (l *qcStatemQcmandatoryEtsiStatems) Initialize() error { + return nil +} + +func (l *qcStatemQcmandatoryEtsiStatems) CheckApplies(c *x509.Certificate) bool { + if !util.IsExtInCert(c, util.QcStateOid) { + return false + } + if util.IsAnyEtsiQcStatementPresent(util.GetExtFromCert(c, util.QcStateOid).Value) { + return true + } + return false +} + +func (l *qcStatemQcmandatoryEtsiStatems) Execute(c *x509.Certificate) *LintResult { + errString := "" + ext := util.GetExtFromCert(c, util.QcStateOid) + + oidList := make([]*asn1.ObjectIdentifier, 1) + oidList[0] = &util.IdEtsiQcsQcCompliance + + for _, oid := range oidList { + r := util.ParseQcStatem(ext.Value, *oid) + util.AppendToStringSemicolonDelim(&errString, r.GetErrorInfo()) + if !r.IsPresent() { + util.AppendToStringSemicolonDelim(&errString, "missing mandatory ETSI QC statement") + } + } + + if len(errString) == 0 { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error, Details: errString} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_qcstatem_mandatory_etsi_statems", + Description: "Checks that a QC Statement that contains at least one of the ETSI ESI statements, also features the set of mandatory ETSI ESI QC statements.", + Citation: "ETSI EN 319 412 - 5 V2.2.1 (2017 - 11) / Section 5", + Source: EtsiEsi, + EffectiveDate: util.EtsiEn319_412_5_V2_2_1_Date, + Lint: &qcStatemQcmandatoryEtsiStatems{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_qccompliance_valid.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_qccompliance_valid.go new file mode 100644 index 00000000..8559d4e8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_qccompliance_valid.go @@ -0,0 +1,65 @@ +/* + * ZLint Copyright 2017 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package lints + +import ( + "encoding/asn1" + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type qcStatemQcComplianceValid struct{} + +func (this *qcStatemQcComplianceValid) getStatementOid() *asn1.ObjectIdentifier { + return &util.IdEtsiQcsQcCompliance +} + +func (l *qcStatemQcComplianceValid) Initialize() error { + return nil +} + +func (l *qcStatemQcComplianceValid) CheckApplies(c *x509.Certificate) bool { + if !util.IsExtInCert(c, util.QcStateOid) { + return false + } + if util.ParseQcStatem(util.GetExtFromCert(c, util.QcStateOid).Value, *l.getStatementOid()).IsPresent() { + return true + } + return false +} + +func (l *qcStatemQcComplianceValid) Execute(c *x509.Certificate) *LintResult { + + errString := "" + ext := util.GetExtFromCert(c, util.QcStateOid) + s := util.ParseQcStatem(ext.Value, *l.getStatementOid()) + errString += s.GetErrorInfo() + if len(errString) == 0 { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error, Details: errString} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_qcstatem_qccompliance_valid", + Description: "Checks that a QC Statement of the type id-etsi-qcs-QcCompliance has the correct form", + Citation: "ETSI EN 319 412 - 5 V2.2.1 (2017 - 11) / Section 4.2.1", + Source: EtsiEsi, + EffectiveDate: util.EtsiEn319_412_5_V2_2_1_Date, + Lint: &qcStatemQcComplianceValid{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_qclimitvalue_valid.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_qclimitvalue_valid.go new file mode 100644 index 00000000..4dac4229 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_qclimitvalue_valid.go @@ -0,0 +1,99 @@ +/* + * ZLint Copyright 2017 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package lints + +import ( + "encoding/asn1" + "unicode" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type qcStatemQcLimitValueValid struct{} + +func (this *qcStatemQcLimitValueValid) getStatementOid() *asn1.ObjectIdentifier { + return &util.IdEtsiQcsQcSSCD +} + +func (l *qcStatemQcLimitValueValid) Initialize() error { + return nil +} + +func (l *qcStatemQcLimitValueValid) CheckApplies(c *x509.Certificate) bool { + if !util.IsExtInCert(c, util.QcStateOid) { + return false + } + if util.ParseQcStatem(util.GetExtFromCert(c, util.QcStateOid).Value, *l.getStatementOid()).IsPresent() { + return true + } + return false +} + +func isOnlyLetters(s string) bool { + for _, r := range s { + if !unicode.IsLetter(r) { + return false + } + } + return true +} + +func (l *qcStatemQcLimitValueValid) Execute(c *x509.Certificate) *LintResult { + + errString := "" + ext := util.GetExtFromCert(c, util.QcStateOid) + s := util.ParseQcStatem(ext.Value, *l.getStatementOid()) + errString += s.GetErrorInfo() + if len(errString) == 0 { + qcLv, ok := s.(util.EtsiQcLimitValue) + if !ok { + return &LintResult{Status: Error, Details: "parsed QcStatem is not a EtsiQcLimitValue"} + } + if qcLv.Amount < 0 { + util.AppendToStringSemicolonDelim(&errString, "amount is negative") + } + if qcLv.IsNum { + if qcLv.CurrencyNum < 1 || qcLv.CurrencyNum > 999 { + util.AppendToStringSemicolonDelim(&errString, "numeric currency code is out of range") + } + } else { + if len(qcLv.CurrencyAlph) != 3 { + util.AppendToStringSemicolonDelim(&errString, "invalid string length of currency code") + } + if !isOnlyLetters(qcLv.CurrencyAlph) { + util.AppendToStringSemicolonDelim(&errString, "currency code string contains not only letters") + } + + } + + } + if len(errString) == 0 { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error, Details: errString} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_qcstatem_qclimitvalue_valid", + Description: "Checks that a QC Statement of the type id-etsi-qcs-QcLimitValue has the correct form", + Citation: "ETSI EN 319 412 - 5 V2.2.1 (2017 - 11) / Section 4.3.2", + Source: EtsiEsi, + EffectiveDate: util.EtsiEn319_412_5_V2_2_1_Date, + Lint: &qcStatemQcLimitValueValid{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_qcpds_lang_case.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_qcpds_lang_case.go new file mode 100644 index 00000000..6c49cd08 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_qcpds_lang_case.go @@ -0,0 +1,89 @@ +/* + * ZLint Copyright 2017 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package lints + +import ( + "encoding/asn1" + "fmt" + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" + "unicode" +) + +type qcStatemQcPdsLangCase struct{} + +func (this *qcStatemQcPdsLangCase) getStatementOid() *asn1.ObjectIdentifier { + return &util.IdEtsiQcsQcEuPDS +} + +func (l *qcStatemQcPdsLangCase) Initialize() error { + return nil +} + +func (l *qcStatemQcPdsLangCase) CheckApplies(c *x509.Certificate) bool { + if !util.IsExtInCert(c, util.QcStateOid) { + return false + } + if util.ParseQcStatem(util.GetExtFromCert(c, util.QcStateOid).Value, *l.getStatementOid()).IsPresent() { + return true + } + return false +} + +func isOnlyLowerCaseLetters(s string) bool { + for _, c := range s { + if !unicode.IsLower(c) { + return false + } + } + return true +} + +func (l *qcStatemQcPdsLangCase) Execute(c *x509.Certificate) *LintResult { + errString := "" + wrnString := "" + ext := util.GetExtFromCert(c, util.QcStateOid) + s := util.ParseQcStatem(ext.Value, *l.getStatementOid()) + errString += s.GetErrorInfo() + if len(errString) == 0 { + pds := s.(util.EtsiQcPds) + for i, loc := range pds.PdsLocations { + if !isOnlyLowerCaseLetters(loc.Language) { + util.AppendToStringSemicolonDelim(&wrnString, fmt.Sprintf("PDS location %d has a language code containing invalid letters", i)) + } + + } + } + if len(errString) == 0 { + if len(wrnString) == 0 { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Warn, Details: wrnString} + } + } else { + return &LintResult{Status: Error, Details: errString} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "w_qcstatem_qcpds_lang_case", + Description: "Checks that a QC Statement of the type id-etsi-qcs-QcPDS features a language code comprised of only lower case letters", + Citation: "ETSI EN 319 412 - 5 V2.2.1 (2017 - 11) / Section 4.3.4", + Source: EtsiEsi, + EffectiveDate: util.EtsiEn319_412_5_V2_2_1_Date, + Lint: &qcStatemQcPdsLangCase{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_qcpds_valid.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_qcpds_valid.go new file mode 100644 index 00000000..b6f7c475 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_qcpds_valid.go @@ -0,0 +1,99 @@ +/* + * ZLint Copyright 2017 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package lints + +import ( + "encoding/asn1" + "fmt" + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" + "strings" +) + +type qcStatemQcPdsValid struct{} + +func (this *qcStatemQcPdsValid) getStatementOid() *asn1.ObjectIdentifier { + return &util.IdEtsiQcsQcEuPDS +} + +func (l *qcStatemQcPdsValid) Initialize() error { + return nil +} + +func (l *qcStatemQcPdsValid) CheckApplies(c *x509.Certificate) bool { + if !util.IsExtInCert(c, util.QcStateOid) { + return false + } + if util.ParseQcStatem(util.GetExtFromCert(c, util.QcStateOid).Value, *l.getStatementOid()).IsPresent() { + return true + } + return false +} + +func isInList(s string, list []string) bool { + for _, i := range list { + if strings.Compare(i, s) == 0 { + return true + } + } + return false +} + +func (l *qcStatemQcPdsValid) Execute(c *x509.Certificate) *LintResult { + errString := "" + ext := util.GetExtFromCert(c, util.QcStateOid) + s := util.ParseQcStatem(ext.Value, *l.getStatementOid()) + errString += s.GetErrorInfo() + if len(errString) == 0 { + codeList := make([]string, 0) + foundEn := false + pds := s.(util.EtsiQcPds) + if len(pds.PdsLocations) == 0 { + util.AppendToStringSemicolonDelim(&errString, "PDS list is empty") + } + for i, loc := range pds.PdsLocations { + if len(loc.Language) != 2 { + util.AppendToStringSemicolonDelim(&errString, fmt.Sprintf("PDS location %d has a language code with an invalid length", i)) + } + if strings.Compare(strings.ToLower(loc.Language), "en") == 0 { + foundEn = true + } + if isInList(strings.ToLower(loc.Language), codeList) { + util.AppendToStringSemicolonDelim(&errString, "country code '"+loc.Language+"' appears multiple times") + } + codeList = append(codeList, loc.Language) + + } + if !foundEn { + util.AppendToStringSemicolonDelim(&errString, "no english PDS present") + } + } + if len(errString) == 0 { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error, Details: errString} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_qcstatem_qcpds_valid", + Description: "Checks that a QC Statement of the type id-etsi-qcs-QcPDS has the correct form", + Citation: "ETSI EN 319 412 - 5 V2.2.1 (2017 - 11) / Section 4.3.4", + Source: EtsiEsi, + EffectiveDate: util.EtsiEn319_412_5_V2_2_1_Date, + Lint: &qcStatemQcPdsValid{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_qcretentionperiod_valid.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_qcretentionperiod_valid.go new file mode 100644 index 00000000..615c640c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_qcretentionperiod_valid.go @@ -0,0 +1,72 @@ +/* + * ZLint Copyright 2017 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package lints + +import ( + "encoding/asn1" + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type qcStatemQcRetentionPeriodValid struct{} + +func (this *qcStatemQcRetentionPeriodValid) getStatementOid() *asn1.ObjectIdentifier { + return &util.IdEtsiQcsQcRetentionPeriod +} + +func (l *qcStatemQcRetentionPeriodValid) Initialize() error { + return nil +} + +func (l *qcStatemQcRetentionPeriodValid) CheckApplies(c *x509.Certificate) bool { + if !util.IsExtInCert(c, util.QcStateOid) { + return false + } + if util.ParseQcStatem(util.GetExtFromCert(c, util.QcStateOid).Value, *l.getStatementOid()).IsPresent() { + return true + } + return false +} + +func (l *qcStatemQcRetentionPeriodValid) Execute(c *x509.Certificate) *LintResult { + + errString := "" + ext := util.GetExtFromCert(c, util.QcStateOid) + s := util.ParseQcStatem(ext.Value, *l.getStatementOid()) + errString += s.GetErrorInfo() + if len(errString) == 0 { + + rp := s.(util.EtsiQcRetentionPeriod) + if rp.Period < 0 { + util.AppendToStringSemicolonDelim(&errString, "retention period is negative") + } + } + if len(errString) == 0 { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error, Details: errString} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_qcstatem_qcretentionperiod_valid", + Description: "Checks that a QC Statement of the type id-etsi-qcs-QcRetentionPeriod has the correct form", + Citation: "ETSI EN 319 412 - 5 V2.2.1 (2017 - 11)/ Section 4.3.3", + Source: EtsiEsi, + EffectiveDate: util.EtsiEn319_412_5_V2_2_1_Date, + Lint: &qcStatemQcRetentionPeriodValid{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_qcsscd_valid.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_qcsscd_valid.go new file mode 100644 index 00000000..bf6f1d57 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_qcsscd_valid.go @@ -0,0 +1,66 @@ +/* + * ZLint Copyright 2017 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package lints + +import ( + "encoding/asn1" + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type qcStatemQcSscdValid struct{} + +func (this *qcStatemQcSscdValid) getStatementOid() *asn1.ObjectIdentifier { + return &util.IdEtsiQcsQcSSCD +} + +func (l *qcStatemQcSscdValid) Initialize() error { + return nil +} + +func (l *qcStatemQcSscdValid) CheckApplies(c *x509.Certificate) bool { + if !util.IsExtInCert(c, util.QcStateOid) { + return false + } + if util.ParseQcStatem(util.GetExtFromCert(c, util.QcStateOid).Value, *l.getStatementOid()).IsPresent() { + return true + } + return false +} + +func (l *qcStatemQcSscdValid) Execute(c *x509.Certificate) *LintResult { + + errString := "" + ext := util.GetExtFromCert(c, util.QcStateOid) + s := util.ParseQcStatem(ext.Value, *l.getStatementOid()) + errString += s.GetErrorInfo() + + if len(errString) == 0 { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error, Details: errString} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_qcstatem_qcsscd_valid", + Description: "Checks that a QC Statement of the type id-etsi-qcs-QcSSCD has the correct form", + Citation: "ETSI EN 319 412 - 5 V2.2.1 (2017 - 11) / Section 4.2.2", + Source: EtsiEsi, + EffectiveDate: util.EtsiEn319_412_5_V2_2_1_Date, + Lint: &qcStatemQcSscdValid{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_qctype_valid.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_qctype_valid.go new file mode 100644 index 00000000..b1b3e47d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_qctype_valid.go @@ -0,0 +1,82 @@ +/* + * ZLint Copyright 2017 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package lints + +import ( + "encoding/asn1" + "fmt" + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type qcStatemQctypeValid struct{} + +func (this *qcStatemQctypeValid) getStatementOid() *asn1.ObjectIdentifier { + return &util.IdEtsiQcsQcType +} + +func (l *qcStatemQctypeValid) Initialize() error { + return nil +} + +func (l *qcStatemQctypeValid) CheckApplies(c *x509.Certificate) bool { + if !util.IsExtInCert(c, util.QcStateOid) { + return false + } + if util.ParseQcStatem(util.GetExtFromCert(c, util.QcStateOid).Value, *l.getStatementOid()).IsPresent() { + return true + } + return false +} + +func (l *qcStatemQctypeValid) Execute(c *x509.Certificate) *LintResult { + + errString := "" + ext := util.GetExtFromCert(c, util.QcStateOid) + s := util.ParseQcStatem(ext.Value, *l.getStatementOid()) + errString += s.GetErrorInfo() + if len(errString) == 0 { + qcType := s.(util.Etsi423QcType) + if len(qcType.TypeOids) == 0 { + errString += "no QcType present, sequence of OIDs is empty" + } + for _, t := range qcType.TypeOids { + + if !t.Equal(util.IdEtsiQcsQctEsign) && !t.Equal(util.IdEtsiQcsQctEseal) && !t.Equal(util.IdEtsiQcsQctWeb) { + if len(errString) > 0 { + errString += "; " + } + errString += fmt.Sprintf("encountered invalid ETSI QcType OID: %v", t) + } + } + } + + if len(errString) == 0 { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error, Details: errString} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_qcstatem_qctype_valid", + Description: "Checks that a QC Statement of the type Id-etsi-qcs-QcType features a non-empty list of only the allowed QcType OIDs", + Citation: "ETSI EN 319 412 - 5 V2.2.1 (2017 - 11) / Section 4.2.3", + Source: EtsiEsi, + EffectiveDate: util.EtsiEn319_412_5_V2_2_1_Date, + Lint: &qcStatemQctypeValid{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_qctype_web.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_qctype_web.go new file mode 100644 index 00000000..31bb2856 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_qcstatem_qctype_web.go @@ -0,0 +1,89 @@ +/* + * ZLint Copyright 2017 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package lints + +import ( + "encoding/asn1" + "fmt" + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type qcStatemQctypeWeb struct{} + +func (this *qcStatemQctypeWeb) getStatementOid() *asn1.ObjectIdentifier { + return &util.IdEtsiQcsQcType +} + +func (l *qcStatemQctypeWeb) Initialize() error { + return nil +} + +func (l *qcStatemQctypeWeb) CheckApplies(c *x509.Certificate) bool { + if !util.IsExtInCert(c, util.QcStateOid) { + return false + } + if util.ParseQcStatem(util.GetExtFromCert(c, util.QcStateOid).Value, *l.getStatementOid()).IsPresent() { + return true + } + return false +} + +func (l *qcStatemQctypeWeb) Execute(c *x509.Certificate) *LintResult { + + errString := "" + wrnString := "" + ext := util.GetExtFromCert(c, util.QcStateOid) + s := util.ParseQcStatem(ext.Value, *l.getStatementOid()) + errString += s.GetErrorInfo() + if len(errString) == 0 { + qcType := s.(util.Etsi423QcType) + if len(qcType.TypeOids) == 0 { + errString += "no QcType present, sequence of OIDs is empty" + } + found := false + for _, t := range qcType.TypeOids { + + if t.Equal(util.IdEtsiQcsQctWeb) { + found = true + } + } + if found != true { + wrnString += fmt.Sprintf("etsi Type does not indicate certificate as a 'web' certificate") + + } + } + + if len(errString) == 0 { + if len(wrnString) == 0 { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Warn, Details: wrnString} + } + } else { + return &LintResult{Status: Error, Details: errString} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "w_qcstatem_qctype_web", + Description: "Checks that a QC Statement of the type Id-etsi-qcs-QcType features features at least the type IdEtsiQcsQctWeb", + Citation: "ETSI EN 319 412 - 5 V2.2.1 (2017 - 11) / Section 4.2.3", + Source: EtsiEsi, + EffectiveDate: util.EtsiEn319_412_5_V2_2_1_Date, + Lint: &qcStatemQctypeWeb{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_root_ca_basic_constraints_path_len_constraint_field_present.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_root_ca_basic_constraints_path_len_constraint_field_present.go new file mode 100644 index 00000000..0e942da3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_root_ca_basic_constraints_path_len_constraint_field_present.go @@ -0,0 +1,70 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************************************************ +7.1.2.1. Root CA Certificate +a. basicConstraints +This extension MUST appear as a critical extension. The cA field MUST be set true. The pathLenConstraint field SHOULD NOT be present. +***********************************************************************************************************/ + +import ( + "encoding/asn1" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type rootCaPathLenPresent struct{} + +func (l *rootCaPathLenPresent) Initialize() error { + return nil +} + +func (l *rootCaPathLenPresent) CheckApplies(c *x509.Certificate) bool { + return util.IsRootCA(c) && util.IsExtInCert(c, util.BasicConstOID) +} + +func (l *rootCaPathLenPresent) Execute(c *x509.Certificate) *LintResult { + bc := util.GetExtFromCert(c, util.BasicConstOID) + var seq asn1.RawValue + var isCa bool + _, err := asn1.Unmarshal(bc.Value, &seq) + if err != nil { + return &LintResult{Status: Fatal} + } + if len(seq.Bytes) == 0 { + return &LintResult{Status: Pass} + } + rest, err := asn1.Unmarshal(seq.Bytes, &isCa) + if err != nil { + return &LintResult{Status: Fatal} + } + if len(rest) > 0 { + return &LintResult{Status: Warn} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_root_ca_basic_constraints_path_len_constraint_field_present", + Description: "Root CA certificate basicConstraint extension pathLenConstraint field SHOULD NOT be present", + Citation: "BRs: 7.1.2.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &rootCaPathLenPresent{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_root_ca_contains_cert_policy.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_root_ca_contains_cert_policy.go new file mode 100644 index 00000000..1fedbfcb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_root_ca_contains_cert_policy.go @@ -0,0 +1,54 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +BRs: 7.1.2.1c certificatePolicies +This extension SHOULD NOT be present. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type rootCAContainsCertPolicy struct{} + +func (l *rootCAContainsCertPolicy) Initialize() error { + return nil +} + +func (l *rootCAContainsCertPolicy) CheckApplies(c *x509.Certificate) bool { + return util.IsRootCA(c) +} + +func (l *rootCAContainsCertPolicy) Execute(c *x509.Certificate) *LintResult { + if util.IsExtInCert(c, util.CertPolicyOID) { + return &LintResult{Status: Warn} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "w_root_ca_contains_cert_policy", + Description: "Root CA Certificate: certificatePolicies SHOULD NOT be present.", + Citation: "BRs: 7.1.2.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &rootCAContainsCertPolicy{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_root_ca_extended_key_usage_present.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_root_ca_extended_key_usage_present.go new file mode 100644 index 00000000..48a639d5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_root_ca_extended_key_usage_present.go @@ -0,0 +1,55 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +BRs: 7.1.2.1d extendedKeyUsage +This extension MUST NOT be present. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type rootCAContainsEKU struct{} + +func (l *rootCAContainsEKU) Initialize() error { + return nil +} + +func (l *rootCAContainsEKU) CheckApplies(c *x509.Certificate) bool { + return util.IsRootCA(c) +} + +func (l *rootCAContainsEKU) Execute(c *x509.Certificate) *LintResult { + // Add actual lint here + if util.IsExtInCert(c, util.EkuSynOid) { + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_root_ca_extended_key_usage_present", + Description: "Root CA Certificate: extendedKeyUsage MUST NOT be present.t", + Citation: "BRs: 7.1.2.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &rootCAContainsEKU{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_root_ca_key_usage_must_be_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_root_ca_key_usage_must_be_critical.go new file mode 100644 index 00000000..0e49f46d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_root_ca_key_usage_must_be_critical.go @@ -0,0 +1,50 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type rootCAKeyUsageMustBeCritical struct{} + +func (l *rootCAKeyUsageMustBeCritical) Initialize() error { + return nil +} + +func (l *rootCAKeyUsageMustBeCritical) CheckApplies(c *x509.Certificate) bool { + return util.IsRootCA(c) && util.IsExtInCert(c, util.KeyUsageOID) +} + +func (l *rootCAKeyUsageMustBeCritical) Execute(c *x509.Certificate) *LintResult { + keyUsageExtension := util.GetExtFromCert(c, util.KeyUsageOID) + if keyUsageExtension.Critical { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_root_ca_key_usage_must_be_critical", + Description: "Root CA certificates MUST have Key Usage Extension marked critical", + Citation: "BRs: 7.1.2.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.RFC2459Date, + Lint: &rootCAKeyUsageMustBeCritical{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_root_ca_key_usage_present.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_root_ca_key_usage_present.go new file mode 100644 index 00000000..3babc03c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_root_ca_key_usage_present.go @@ -0,0 +1,50 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type rootCAKeyUsagePresent struct{} + +func (l *rootCAKeyUsagePresent) Initialize() error { + return nil +} + +func (l *rootCAKeyUsagePresent) CheckApplies(c *x509.Certificate) bool { + return util.IsRootCA(c) +} + +func (l *rootCAKeyUsagePresent) Execute(c *x509.Certificate) *LintResult { + // Add actual lint here + if util.IsExtInCert(c, util.KeyUsageOID) { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_root_ca_key_usage_present", + Description: "Root CA certificates MUST have Key Usage Extension Present", + Citation: "BRs: 7.1.2.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.RFC2459Date, + Lint: &rootCAKeyUsagePresent{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_rsa_exp_negative.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_rsa_exp_negative.go new file mode 100644 index 00000000..3fed5c21 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_rsa_exp_negative.go @@ -0,0 +1,52 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "crypto/rsa" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type rsaExpNegative struct{} + +func (l *rsaExpNegative) Initialize() error { + return nil +} + +func (l *rsaExpNegative) CheckApplies(c *x509.Certificate) bool { + _, ok := c.PublicKey.(*rsa.PublicKey) + return ok && c.PublicKeyAlgorithm == x509.RSA +} + +func (l *rsaExpNegative) Execute(c *x509.Certificate) *LintResult { + key := c.PublicKey.(*rsa.PublicKey) + if key.E < 0 { + return &LintResult{Status: Error} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_rsa_exp_negative", + Description: "RSA public key exponent MUST be positive", + Citation: "awslabs certlint", + Source: AWSLabs, + EffectiveDate: util.ZeroDate, + Lint: &rsaExpNegative{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_rsa_mod_factors_smaller_than_752_bits.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_rsa_mod_factors_smaller_than_752_bits.go new file mode 100644 index 00000000..a2742c68 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_rsa_mod_factors_smaller_than_752_bits.go @@ -0,0 +1,58 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************************************** +6.1.6. Public Key Parameters Generation and Quality Checking +RSA: The CA SHALL confirm that the value of the public exponent is an odd number equal to 3 or more. Additionally, the public exponent SHOULD be in the range between 216+1 and 2256-1. The modulus SHOULD also have the following characteristics: an odd number, not the power of a prime, and have no factors smaller than 752. [Citation: Section 5.3.3, NIST SP 800‐89]. +**************************************************************************************************/ + +import ( + "crypto/rsa" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type rsaModSmallFactor struct{} + +func (l *rsaModSmallFactor) Initialize() error { + return nil +} + +func (l *rsaModSmallFactor) CheckApplies(c *x509.Certificate) bool { + _, ok := c.PublicKey.(*rsa.PublicKey) + return ok && c.PublicKeyAlgorithm == x509.RSA +} + +func (l *rsaModSmallFactor) Execute(c *x509.Certificate) *LintResult { + key := c.PublicKey.(*rsa.PublicKey) + if util.PrimeNoSmallerThan752(key.N) { + return &LintResult{Status: Pass} + } + return &LintResult{Status: Warn} + +} + +func init() { + RegisterLint(&Lint{ + Name: "w_rsa_mod_factors_smaller_than_752", + Description: "RSA: Modulus SHOULD also have the following characteristics: no factors smaller than 752", + Citation: "BRs: 6.1.6", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABV113Date, + Lint: &rsaModSmallFactor{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_rsa_mod_less_than_2048_bits.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_rsa_mod_less_than_2048_bits.go new file mode 100644 index 00000000..b8be14eb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_rsa_mod_less_than_2048_bits.go @@ -0,0 +1,57 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +Change this to match source TEXT +************************************************/ + +import ( + "crypto/rsa" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type rsaParsedTestsKeySize struct{} + +func (l *rsaParsedTestsKeySize) Initialize() error { + return nil +} + +func (l *rsaParsedTestsKeySize) CheckApplies(c *x509.Certificate) bool { + _, ok := c.PublicKey.(*rsa.PublicKey) + return ok && c.PublicKeyAlgorithm == x509.RSA && c.NotAfter.After(util.NoRSA1024Date.Add(-1)) +} + +func (l *rsaParsedTestsKeySize) Execute(c *x509.Certificate) *LintResult { + key := c.PublicKey.(*rsa.PublicKey) + if key.N.BitLen() < 2048 { + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_rsa_mod_less_than_2048_bits", + Description: "For certificates valid after 31 Dec 2013, all certificates using RSA public key algorithm MUST have 2048 bits of modulus", + Citation: "BRs: 6.1.5", + Source: CABFBaselineRequirements, + EffectiveDate: util.ZeroDate, + Lint: &rsaParsedTestsKeySize{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_rsa_mod_not_odd.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_rsa_mod_not_odd.go new file mode 100644 index 00000000..b35077c4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_rsa_mod_not_odd.go @@ -0,0 +1,60 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************************************************* +"BRs: 6.1.6" +RSA: The CA SHALL confirm that the value of the public exponent is an odd number equal to 3 or more. Additionally, the public exponent SHOULD be in the range between 2^16+1 and 2^256-1. The modulus SHOULD also have the following characteristics: an odd number, not the power of a prime, and have no factors smaller than 752. [Citation: Section 5.3.3, NIST SP 800-89]. +*******************************************************************************************************/ + +import ( + "crypto/rsa" + "math/big" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type rsaParsedTestsKeyModOdd struct{} + +func (l *rsaParsedTestsKeyModOdd) Initialize() error { + return nil +} + +func (l *rsaParsedTestsKeyModOdd) CheckApplies(c *x509.Certificate) bool { + _, ok := c.PublicKey.(*rsa.PublicKey) + return ok && c.PublicKeyAlgorithm == x509.RSA +} + +func (l *rsaParsedTestsKeyModOdd) Execute(c *x509.Certificate) *LintResult { + key := c.PublicKey.(*rsa.PublicKey) + z := big.NewInt(0) + if (z.Mod(key.N, big.NewInt(2)).Cmp(big.NewInt(1))) == 0 { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Warn} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "w_rsa_mod_not_odd", + Description: "RSA: Modulus SHOULD also have the following characteristics: an odd number", + Citation: "BRs: 6.1.6", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABV113Date, + Lint: &rsaParsedTestsKeyModOdd{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_rsa_no_public_key.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_rsa_no_public_key.go new file mode 100644 index 00000000..d6edae46 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_rsa_no_public_key.go @@ -0,0 +1,52 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "crypto/rsa" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type rsaParsedPubKeyExist struct{} + +func (l *rsaParsedPubKeyExist) Initialize() error { + return nil +} + +func (l *rsaParsedPubKeyExist) CheckApplies(c *x509.Certificate) bool { + return c.PublicKeyAlgorithm == x509.RSA +} + +func (l *rsaParsedPubKeyExist) Execute(c *x509.Certificate) *LintResult { + _, ok := c.PublicKey.(*rsa.PublicKey) + if !ok { + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_rsa_no_public_key", + Description: "The RSA public key should be present", + Citation: "awslabs certlint", + Source: AWSLabs, + EffectiveDate: util.ZeroDate, + Lint: &rsaParsedPubKeyExist{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_rsa_public_exponent_not_in_range.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_rsa_public_exponent_not_in_range.go new file mode 100644 index 00000000..a28505e8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_rsa_public_exponent_not_in_range.go @@ -0,0 +1,64 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************************************************* +"BRs: 6.1.6" +RSA: The CA SHALL confirm that the value of the public exponent is an odd number equal to 3 or more. Additionally, the public exponent SHOULD be in the range between 2^16+1 and 2^256-1. The modulus SHOULD also have the following characteristics: an odd number, not the power of a prime, and have no factors smaller than 752. [Citation: Section 5.3.3, NIST SP 800-89]. +*******************************************************************************************************/ + +import ( + "crypto/rsa" + "math/big" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type rsaParsedTestsExpInRange struct { + upperBound *big.Int +} + +func (l *rsaParsedTestsExpInRange) Initialize() error { + l.upperBound = &big.Int{} + l.upperBound.Exp(big.NewInt(2), big.NewInt(256), nil) + return nil +} + +func (l *rsaParsedTestsExpInRange) CheckApplies(c *x509.Certificate) bool { + _, ok := c.PublicKey.(*rsa.PublicKey) + return ok && c.PublicKeyAlgorithm == x509.RSA +} + +func (l *rsaParsedTestsExpInRange) Execute(c *x509.Certificate) *LintResult { + key := c.PublicKey.(*rsa.PublicKey) + exponent := key.E + const lowerBound = 65536 // 2^16 + 1 + if exponent > lowerBound && l.upperBound.Cmp(big.NewInt(int64(exponent))) == 1 { + return &LintResult{Status: Pass} + } + return &LintResult{Status: Warn} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_rsa_public_exponent_not_in_range", + Description: "RSA: Public exponent SHOULD be in the range between 2^16 + 1 and 2^256 - 1", + Citation: "BRs: 6.1.6", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABV113Date, + Lint: &rsaParsedTestsExpInRange{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_rsa_public_exponent_not_odd.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_rsa_public_exponent_not_odd.go new file mode 100644 index 00000000..8edf839e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_rsa_public_exponent_not_odd.go @@ -0,0 +1,58 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************************************************* +"BRs: 6.1.6" +RSA: The CA SHALL confirm that the value of the public exponent is an odd number equal to 3 or more. Additionally, the public exponent SHOULD be in the range between 2^16+1 and 2^256-1. The modulus SHOULD also have the following characteristics: an odd number, not the power of a prime, and have no factors smaller than 752. [Citation: Section 5.3.3, NIST SP 800-89]. +*******************************************************************************************************/ + +import ( + "crypto/rsa" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type rsaParsedTestsKeyExpOdd struct{} + +func (l *rsaParsedTestsKeyExpOdd) Initialize() error { + return nil +} + +func (l *rsaParsedTestsKeyExpOdd) CheckApplies(c *x509.Certificate) bool { + _, ok := c.PublicKey.(*rsa.PublicKey) + return ok && c.PublicKeyAlgorithm == x509.RSA +} + +func (l *rsaParsedTestsKeyExpOdd) Execute(c *x509.Certificate) *LintResult { + key := c.PublicKey.(*rsa.PublicKey) + if key.E%2 == 1 { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_rsa_public_exponent_not_odd", + Description: "RSA: Value of public exponent is an odd number equal to 3 or more.", + Citation: "BRs: 6.1.6", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABV113Date, + Lint: &rsaParsedTestsKeyExpOdd{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_rsa_public_exponent_too_small.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_rsa_public_exponent_too_small.go new file mode 100644 index 00000000..2cf52c15 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_rsa_public_exponent_too_small.go @@ -0,0 +1,58 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************************************************* +"BRs: 6.1.6" +RSA: The CA SHALL confirm that the value of the public exponent is an odd number equal to 3 or more. Additionally, the public exponent SHOULD be in the range between 2^16+1 and 2^256-1. The modulus SHOULD also have the following characteristics: an odd number, not the power of a prime, and have no factors smaller than 752. [Citation: Section 5.3.3, NIST SP 800-89]. +*******************************************************************************************************/ + +import ( + "crypto/rsa" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type rsaParsedTestsExpBounds struct{} + +func (l *rsaParsedTestsExpBounds) Initialize() error { + return nil +} + +func (l *rsaParsedTestsExpBounds) CheckApplies(c *x509.Certificate) bool { + _, ok := c.PublicKey.(*rsa.PublicKey) + return ok && c.PublicKeyAlgorithm == x509.RSA +} + +func (l *rsaParsedTestsExpBounds) Execute(c *x509.Certificate) *LintResult { + key := c.PublicKey.(*rsa.PublicKey) + if key.E >= 3 { //If Cmp returns 1, means N > E + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_rsa_public_exponent_too_small", + Description: "RSA: Value of public exponent is an odd number equal to 3 or more.", + Citation: "BRs: 6.1.6", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABV113Date, + Lint: &rsaParsedTestsExpBounds{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_san_bare_wildcard.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_san_bare_wildcard.go new file mode 100644 index 00000000..2a99ec30 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_san_bare_wildcard.go @@ -0,0 +1,52 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type brSANBareWildcard struct{} + +func (l *brSANBareWildcard) Initialize() error { + return nil +} + +func (l *brSANBareWildcard) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) +} + +func (l *brSANBareWildcard) Execute(c *x509.Certificate) *LintResult { + for _, dns := range c.DNSNames { + if strings.HasSuffix(dns, "*") { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_san_bare_wildcard", + Description: "A wildcard MUST be accompanied by other data to its right (Only checks DNSName)", + Citation: "awslabs certlint", + Source: AWSLabs, + EffectiveDate: util.ZeroDate, + Lint: &brSANBareWildcard{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_san_dns_name_duplicate.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_san_dns_name_duplicate.go new file mode 100644 index 00000000..40577c9a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_san_dns_name_duplicate.go @@ -0,0 +1,57 @@ +/* + * ZLint Copyright 2017 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package lints + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type SANDNSDuplicate struct{} + +func (l *SANDNSDuplicate) Initialize() error { + return nil +} + +func (l *SANDNSDuplicate) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) +} + +func (l *SANDNSDuplicate) Execute(c *x509.Certificate) *LintResult { + checkedDNSNames := map[string]struct{}{} + for _, dns := range c.DNSNames { + normalizedDNSName := strings.ToLower(dns) + if _, isPresent := checkedDNSNames[normalizedDNSName]; isPresent { + return &LintResult{Status: Notice} + } + + checkedDNSNames[normalizedDNSName] = struct{}{} + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "n_san_dns_name_duplicate", + Description: "SAN DNSName contains duplicate values", + Citation: "awslabs certlint", + Source: AWSLabs, + EffectiveDate: util.ZeroDate, + Lint: &SANDNSDuplicate{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_san_dns_name_includes_null_char.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_san_dns_name_includes_null_char.go new file mode 100644 index 00000000..6267d1fb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_san_dns_name_includes_null_char.go @@ -0,0 +1,52 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type SANDNSNull struct{} + +func (l *SANDNSNull) Initialize() error { + return nil +} + +func (l *SANDNSNull) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) +} + +func (l *SANDNSNull) Execute(c *x509.Certificate) *LintResult { + for _, dns := range c.DNSNames { + for i := 0; i < len(dns); i++ { + if dns[i] == 0 { + return &LintResult{Status: Error} + } + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_san_dns_name_includes_null_char", + Description: "DNSName MUST NOT include a null character", + Citation: "awslabs certlint", + Source: AWSLabs, + EffectiveDate: util.ZeroDate, + Lint: &SANDNSNull{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_san_dns_name_onion_not_ev_cert.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_san_dns_name_onion_not_ev_cert.go new file mode 100644 index 00000000..01f41452 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_san_dns_name_onion_not_ev_cert.go @@ -0,0 +1,72 @@ +/* + * ZLint Copyright 2019 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package lints + +import ( + "fmt" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +const ( + // onionTLD is a const for the TLD for Tor Hidden Services. + onionTLD = ".onion" +) + +type onionNotEV struct{} + +// Initialize for an onionNotEV linter is a NOP. +func (l *onionNotEV) Initialize() error { + return nil +} + +// CheckApplies returns true if the certificate is a subscriber certificate that +// contains a subject name ending in `.onion`. +func (l *onionNotEV) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) && util.CertificateSubjInTLD(c, onionTLD) +} + +// Execute returns an Error LintResult if the certificate is not an EV +// certificate. CheckApplies has already verified the certificate contains one +// or more `.onion` subjects and so it must be an EV certificate. +func (l *onionNotEV) Execute(c *x509.Certificate) *LintResult { + /* + * Effective May 1, 2015, each CA SHALL revoke all unexpired Certificates with an + * Internal Name using onion as the right-most label in an entry in the + * subjectAltName Extension or commonName field unless such Certificate was + * issued in accordance with Appendix F of the EV Guidelines. + */ + if !util.IsEV(c.PolicyIdentifiers) { + return &LintResult{ + Status: Error, + Details: fmt.Sprintf( + "certificate contains one or more %s subject domains but is not an EV certificate", + onionTLD), + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_san_dns_name_onion_not_ev_cert", + Description: "certificates with a .onion subject name must be issued in accordance with EV Guidelines", + Citation: "CABF Ballot 144", + Source: CABFBaselineRequirements, + EffectiveDate: util.OnionOnlyEVDate, + Lint: &onionNotEV{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_san_dns_name_starts_with_period.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_san_dns_name_starts_with_period.go new file mode 100644 index 00000000..c6b9fdb8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_san_dns_name_starts_with_period.go @@ -0,0 +1,52 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type SANDNSPeriod struct{} + +func (l *SANDNSPeriod) Initialize() error { + return nil +} + +func (l *SANDNSPeriod) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) +} + +func (l *SANDNSPeriod) Execute(c *x509.Certificate) *LintResult { + for _, dns := range c.DNSNames { + if strings.HasPrefix(dns, ".") { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_san_dns_name_starts_with_period", + Description: "DNSName MUST NOT start with a period", + Citation: "awslabs certlint", + Source: AWSLabs, + EffectiveDate: util.ZeroDate, + Lint: &SANDNSPeriod{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_san_iana_pub_suffix_empty.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_san_iana_pub_suffix_empty.go new file mode 100644 index 00000000..0687a986 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_san_iana_pub_suffix_empty.go @@ -0,0 +1,57 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type pubSuffix struct{} + +func (l *pubSuffix) Initialize() error { + return nil +} + +func (l *pubSuffix) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) +} + +func (l *pubSuffix) Execute(c *x509.Certificate) *LintResult { + parsedSANDNSNames := c.GetParsedDNSNames(false) + for i := range c.GetParsedDNSNames(false) { + if parsedSANDNSNames[i].ParseError != nil { + if strings.HasSuffix(parsedSANDNSNames[i].ParseError.Error(), "is a suffix") { + return &LintResult{Status: Warn} + } else { + return &LintResult{Status: NA} + } + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_san_iana_pub_suffix_empty", + Description: "The domain SHOULD NOT have a bare public suffix", + Citation: "awslabs certlint", + Source: AWSLabs, + EffectiveDate: util.ZeroDate, + Lint: &pubSuffix{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_san_wildcard_not_first.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_san_wildcard_not_first.go new file mode 100644 index 00000000..8265ea4b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_san_wildcard_not_first.go @@ -0,0 +1,52 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type SANWildCardFirst struct{} + +func (l *SANWildCardFirst) Initialize() error { + return nil +} + +func (l *SANWildCardFirst) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectAlternateNameOID) +} + +func (l *SANWildCardFirst) Execute(c *x509.Certificate) *LintResult { + for _, dns := range c.DNSNames { + for i := 1; i < len(dns); i++ { + if dns[i] == '*' { + return &LintResult{Status: Error} + } + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_san_wildcard_not_first", + Description: "A wildcard MUST be in the first label of FQDN (ie not: www.*.com) (Only checks DNSName)", + Citation: "awslabs certlint", + Source: AWSLabs, + EffectiveDate: util.ZeroDate, + Lint: &SANWildCardFirst{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_serial_number_longer_than_20_octets.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_serial_number_longer_than_20_octets.go new file mode 100644 index 00000000..d0997c33 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_serial_number_longer_than_20_octets.go @@ -0,0 +1,66 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +RFC 5280: 4.1.2.2. Serial Number + The serial number MUST be a positive integer assigned by the CA to each + certificate. It MUST be unique for each certificate issued by a given CA + (i.e., the issuer name and serial number identify a unique certificate). + CAs MUST force the serialNumber to be a non-negative integer. + + Given the uniqueness requirements above, serial numbers can be expected to + contain long integers. Certificate users MUST be able to handle serialNumber + values up to 20 octets. Conforming CAs MUST NOT use serialNumber values longer + than 20 octets. + + Note: Non-conforming CAs may issue certificates with serial numbers that are + negative or zero. Certificate users SHOULD be prepared togracefully handle + such certificates. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type serialNumberTooLong struct{} + +func (l *serialNumberTooLong) Initialize() error { + return nil +} + +func (l *serialNumberTooLong) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *serialNumberTooLong) Execute(c *x509.Certificate) *LintResult { + if c.SerialNumber.BitLen() > 160 { // 20 octets + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_serial_number_longer_than_20_octets", + Description: "Certificates must not have a serial number longer than 20 octets", + Citation: "RFC 5280: 4.1.2.2", + Source: RFC5280, + EffectiveDate: util.RFC3280Date, + Lint: &serialNumberTooLong{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_serial_number_not_positive.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_serial_number_not_positive.go new file mode 100644 index 00000000..6a56ad47 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_serial_number_not_positive.go @@ -0,0 +1,66 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +4.1.2.2. Serial Number + The serial number MUST be a positive integer assigned by the CA to each + certificate. It MUST be unique for each certificate issued by a given CA + (i.e., the issuer name and serial number identify a unique certificate). + CAs MUST force the serialNumber to be a non-negative integer. + + Given the uniqueness requirements above, serial numbers can be expected to + contain long integers. Certificate users MUST be able to handle serialNumber + values up to 20 octets. Conforming CAs MUST NOT use serialNumber values longer + than 20 octets. + + Note: Non-conforming CAs may issue certificates with serial numbers that are + negative or zero. Certificate users SHOULD be prepared togracefully handle + such certificates. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type SerialNumberNotPositive struct{} + +func (l *SerialNumberNotPositive) Initialize() error { + return nil +} + +func (l *SerialNumberNotPositive) CheckApplies(cert *x509.Certificate) bool { + return true +} + +func (l *SerialNumberNotPositive) Execute(cert *x509.Certificate) *LintResult { + if cert.SerialNumber.Sign() == -1 { // -1 Means negative when using big.Sign() + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_serial_number_not_positive", + Description: "Certificates must have a positive serial number", + Citation: "RFC 5280: 4.1.2.2", + Source: RFC5280, + EffectiveDate: util.RFC3280Date, + Lint: &SerialNumberNotPositive{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_signature_algorithm_not_supported.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_signature_algorithm_not_supported.go new file mode 100644 index 00000000..8563ca70 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_signature_algorithm_not_supported.go @@ -0,0 +1,50 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type signatureAlgorithmNotSupported struct{} + +func (l *signatureAlgorithmNotSupported) Initialize() error { + return nil +} + +func (l *signatureAlgorithmNotSupported) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *signatureAlgorithmNotSupported) Execute(c *x509.Certificate) *LintResult { + + if c.SignatureAlgorithm == x509.SHA1WithRSA || c.SignatureAlgorithm == x509.SHA256WithRSA || c.SignatureAlgorithm == x509.SHA384WithRSA || c.SignatureAlgorithm == x509.SHA512WithRSA || c.SignatureAlgorithm == x509.DSAWithSHA1 || c.SignatureAlgorithm == x509.DSAWithSHA256 || c.SignatureAlgorithm == x509.ECDSAWithSHA1 || c.SignatureAlgorithm == x509.ECDSAWithSHA256 || c.SignatureAlgorithm == x509.ECDSAWithSHA384 || c.SignatureAlgorithm == x509.ECDSAWithSHA512 { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_signature_algorithm_not_supported", + Description: "Certificates MUST meet the following requirements for algorithm Source: SHA-1*, SHA-256, SHA-384, SHA-512", + Citation: "BRs: 6.1.5", + Source: CABFBaselineRequirements, + EffectiveDate: util.ZeroDate, + Lint: &signatureAlgorithmNotSupported{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_spki_rsa_encryption_parameter_not_null.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_spki_rsa_encryption_parameter_not_null.go new file mode 100644 index 00000000..15637122 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_spki_rsa_encryption_parameter_not_null.go @@ -0,0 +1,73 @@ +package lints + +/* + * ZLint Copyright 2019 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************************************************* +"RFC5280: RFC 4055, Section 1.2" +RSA: Encoded algorithm identifier MUST have NULL parameters. +*******************************************************************************************************/ + +import ( + "fmt" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" + "golang.org/x/crypto/cryptobyte" + cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" +) + +type rsaSPKIEncryptionParamNotNULL struct{} + +func (l *rsaSPKIEncryptionParamNotNULL) Initialize() error { + return nil +} + +func (l *rsaSPKIEncryptionParamNotNULL) CheckApplies(c *x509.Certificate) bool { + // explicitly check for util.OidRSAEncryption, as RSA-PSS or RSA-OAEP certificates might be classified with c.PublicKeyAlgorithm = RSA + return c.PublicKeyAlgorithmOID.Equal(util.OidRSAEncryption) +} + +func (l *rsaSPKIEncryptionParamNotNULL) Execute(c *x509.Certificate) *LintResult { + input := cryptobyte.String(c.RawSubjectPublicKeyInfo) + + var publicKeyInfo cryptobyte.String + if !input.ReadASN1(&publicKeyInfo, cryptobyte_asn1.SEQUENCE) { + return &LintResult{Status: Fatal, Details: "error reading pkixPublicKey"} + } + + var algorithm cryptobyte.String + var tag cryptobyte_asn1.Tag + // use ReadAnyElement to preserve tag and length octets + if !publicKeyInfo.ReadAnyASN1Element(&algorithm, &tag) { + return &LintResult{Status: Fatal, Details: "error reading pkixPublicKey"} + } + + if err := util.CheckAlgorithmIDParamNotNULL(algorithm, util.OidRSAEncryption); err != nil { + return &LintResult{Status: Error, Details: fmt.Sprintf("certificate pkixPublicKey %s", err.Error())} + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_spki_rsa_encryption_parameter_not_null", + Description: "RSA: Encoded public key algorithm identifier MUST have NULL parameters", + Citation: "RFC 4055, Section 1.2", + Source: RFC5280, // RFC4055 is referenced in RFC5280, Section 1 + EffectiveDate: util.RFC5280Date, + Lint: &rsaSPKIEncryptionParamNotNULL{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_aia_does_not_contain_issuing_ca_url.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_aia_does_not_contain_issuing_ca_url.go new file mode 100644 index 00000000..0b12ce85 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_aia_does_not_contain_issuing_ca_url.go @@ -0,0 +1,60 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/*********************************************** +CAB 7.1.2.2c +With the exception of stapling, which is noted below, this extension MUST be present. It MUST NOT be +marked critical, and it MUST contain the HTTP URL of the Issuing CA’s OCSP responder (accessMethod += 1.3.6.1.5.5.7.48.1). It SHOULD also contain the HTTP URL of the Issuing CA’s certificate +(accessMethod = 1.3.6.1.5.5.7.48.2). +************************************************/ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCaIssuerUrl struct{} + +func (l *subCaIssuerUrl) Initialize() error { + return nil +} + +func (l *subCaIssuerUrl) CheckApplies(c *x509.Certificate) bool { + return util.IsCACert(c) && !util.IsRootCA(c) +} + +func (l *subCaIssuerUrl) Execute(c *x509.Certificate) *LintResult { + for _, url := range c.IssuingCertificateURL { + if strings.HasPrefix(url, "http://") { + return &LintResult{Status: Pass} + } + } + return &LintResult{Status: Warn} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_sub_ca_aia_does_not_contain_issuing_ca_url", + Description: "Subordinate CA Certificate: authorityInformationAccess SHOULD also contain the HTTP URL of the Issuing CA's certificate.", + Citation: "BRs: 7.1.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subCaIssuerUrl{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_aia_does_not_contain_ocsp_url.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_aia_does_not_contain_ocsp_url.go new file mode 100644 index 00000000..398e7145 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_aia_does_not_contain_ocsp_url.go @@ -0,0 +1,60 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/*********************************************** +CAB 7.1.2.2c +With the exception of stapling, which is noted below, this extension MUST be present. It MUST NOT be +marked critical, and it MUST contain the HTTP URL of the Issuing CA’s OCSP responder (accessMethod += 1.3.6.1.5.5.7.48.1). It SHOULD also contain the HTTP URL of the Issuing CA’s certificate +(accessMethod = 1.3.6.1.5.5.7.48.2). +************************************************/ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCaOcspUrl struct{} + +func (l *subCaOcspUrl) Initialize() error { + return nil +} + +func (l *subCaOcspUrl) CheckApplies(c *x509.Certificate) bool { + return util.IsCACert(c) && !util.IsRootCA(c) +} + +func (l *subCaOcspUrl) Execute(c *x509.Certificate) *LintResult { + for _, url := range c.OCSPServer { + if strings.HasPrefix(url, "http://") { + return &LintResult{Status: Pass} + } + } + return &LintResult{Status: Error} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_ca_aia_does_not_contain_ocsp_url", + Description: "Subordinate CA certificates authorityInformationAccess extension must contain the HTTP URL of the issuing CA’s OCSP responder", + Citation: "BRs: 7.1.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subCaOcspUrl{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_aia_marked_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_aia_marked_critical.go new file mode 100644 index 00000000..0552f41e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_aia_marked_critical.go @@ -0,0 +1,50 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCaAIAMarkedCritical struct{} + +func (l *subCaAIAMarkedCritical) Initialize() error { + return nil +} + +func (l *subCaAIAMarkedCritical) CheckApplies(c *x509.Certificate) bool { + return util.IsSubCA(c) && util.IsExtInCert(c, util.AiaOID) +} + +func (l *subCaAIAMarkedCritical) Execute(c *x509.Certificate) *LintResult { + e := util.GetExtFromCert(c, util.AiaOID) + if e.Critical { + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_ca_aia_marked_critical", + Description: "Subordinate CA Certificate: authorityInformationAccess MUST NOT be marked critical", + Citation: "BRs: 7.1.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.ZeroDate, + Lint: &subCaAIAMarkedCritical{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_aia_missing.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_aia_missing.go new file mode 100644 index 00000000..38753f71 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_aia_missing.go @@ -0,0 +1,57 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/*********************************************** +CAB 7.1.2.2c +With the exception of stapling, which is noted below, this extension MUST be present. It MUST NOT be +marked critical, and it MUST contain the HTTP URL of the Issuing CA’s OCSP responder (accessMethod += 1.3.6.1.5.5.7.48.1). It SHOULD also contain the HTTP URL of the Issuing CA’s certificate +(accessMethod = 1.3.6.1.5.5.7.48.2). +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type caAiaMissing struct{} + +func (l *caAiaMissing) Initialize() error { + return nil +} + +func (l *caAiaMissing) CheckApplies(c *x509.Certificate) bool { + return util.IsCACert(c) && !util.IsRootCA(c) +} + +func (l *caAiaMissing) Execute(c *x509.Certificate) *LintResult { + if util.IsExtInCert(c, util.AiaOID) { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_ca_aia_missing", + Description: "Subordinate CA Certificate: authorityInformationAccess MUST be present, with the exception of stapling.", + Citation: "BRs: 7.1.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &caAiaMissing{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_certificate_policies_marked_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_certificate_policies_marked_critical.go new file mode 100644 index 00000000..cfc27dc2 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_certificate_policies_marked_critical.go @@ -0,0 +1,55 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +BRs: 7.1.2.2a certificatePolicies +This extension MUST be present and SHOULD NOT be marked critical. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCACertPolicyCrit struct{} + +func (l *subCACertPolicyCrit) Initialize() error { + return nil +} + +func (l *subCACertPolicyCrit) CheckApplies(c *x509.Certificate) bool { + return util.IsSubCA(c) && util.IsExtInCert(c, util.CertPolicyOID) +} + +func (l *subCACertPolicyCrit) Execute(c *x509.Certificate) *LintResult { + if e := util.GetExtFromCert(c, util.CertPolicyOID); e.Critical { + return &LintResult{Status: Warn} + } else { + return &LintResult{Status: Pass} + } + +} + +func init() { + RegisterLint(&Lint{ + Name: "w_sub_ca_certificate_policies_marked_critical", + Description: "Subordinate CA certificates certificatePolicies extension should not be marked as critical", + Citation: "BRs: 7.1.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subCACertPolicyCrit{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_certificate_policies_missing.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_certificate_policies_missing.go new file mode 100644 index 00000000..63e0febd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_certificate_policies_missing.go @@ -0,0 +1,54 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +BRs: 7.1.2.2a certificatePolicies +This extension MUST be present and SHOULD NOT be marked critical. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCACertPolicyMissing struct{} + +func (l *subCACertPolicyMissing) Initialize() error { + return nil +} + +func (l *subCACertPolicyMissing) CheckApplies(c *x509.Certificate) bool { + return util.IsSubCA(c) +} + +func (l *subCACertPolicyMissing) Execute(c *x509.Certificate) *LintResult { + if util.IsExtInCert(c, util.CertPolicyOID) { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_ca_certificate_policies_missing", + Description: "Subordinate CA certificates must have a certificatePolicies extension", + Citation: "BRs: 7.1.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subCACertPolicyMissing{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_crl_distribution_points_does_not_contain_url.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_crl_distribution_points_does_not_contain_url.go new file mode 100644 index 00000000..846741d4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_crl_distribution_points_does_not_contain_url.go @@ -0,0 +1,58 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +BRs: 7.1.2.2b cRLDistributionPoints +This extension MUST be present and MUST NOT be marked critical. +It MUST contain the HTTP URL of the CA’s CRL service. +************************************************/ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCACRLDistNoUrl struct{} + +func (l *subCACRLDistNoUrl) Initialize() error { + return nil +} + +func (l *subCACRLDistNoUrl) CheckApplies(c *x509.Certificate) bool { + return util.IsSubCA(c) && util.IsExtInCert(c, util.CrlDistOID) +} + +func (l *subCACRLDistNoUrl) Execute(c *x509.Certificate) *LintResult { + for _, s := range c.CRLDistributionPoints { + if strings.HasPrefix(s, "http://") { + return &LintResult{Status: Pass} + } + } + return &LintResult{Status: Error} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_ca_crl_distribution_points_does_not_contain_url", + Description: "Subordinate CA Certificate: cRLDistributionPoints MUST contain the HTTP URL of the CA's CRL service.", + Citation: "BRs: 7.1.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subCACRLDistNoUrl{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_crl_distribution_points_marked_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_crl_distribution_points_marked_critical.go new file mode 100644 index 00000000..a8b934fc --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_crl_distribution_points_marked_critical.go @@ -0,0 +1,55 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +BRs: 7.1.2.2b cRLDistributionPoints +This extension MUST be present and MUST NOT be marked critical. +It MUST contain the HTTP URL of the CA’s CRL service. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCACRLDistCrit struct{} + +func (l *subCACRLDistCrit) Initialize() error { + return nil +} + +func (l *subCACRLDistCrit) CheckApplies(c *x509.Certificate) bool { + return util.IsSubCA(c) && util.IsExtInCert(c, util.CrlDistOID) +} + +func (l *subCACRLDistCrit) Execute(c *x509.Certificate) *LintResult { + if e := util.GetExtFromCert(c, util.CrlDistOID); e.Critical { + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_ca_crl_distribution_points_marked_critical", + Description: "Subordinate CA Certificate: cRLDistributionPoints MUST be present and MUST NOT be marked critical.", + Citation: "BRs: 7.1.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subCACRLDistCrit{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_crl_distribution_points_missing.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_crl_distribution_points_missing.go new file mode 100644 index 00000000..5a494240 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_crl_distribution_points_missing.go @@ -0,0 +1,55 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +BRs: 7.1.2.2b cRLDistributionPoints +This extension MUST be present and MUST NOT be marked critical. +It MUST contain the HTTP URL of the CA’s CRL service. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCACRLDistMissing struct{} + +func (l *subCACRLDistMissing) Initialize() error { + return nil +} + +func (l *subCACRLDistMissing) CheckApplies(c *x509.Certificate) bool { + return util.IsSubCA(c) +} + +func (l *subCACRLDistMissing) Execute(c *x509.Certificate) *LintResult { + if util.IsExtInCert(c, util.CrlDistOID) { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_ca_crl_distribution_points_missing", + Description: "Subordinate CA Certificate: cRLDistributionPoints MUST be present and MUST NOT be marked critical.", + Citation: "BRs: 7.1.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subCACRLDistMissing{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_eku_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_eku_critical.go new file mode 100644 index 00000000..852b4915 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_eku_critical.go @@ -0,0 +1,57 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +BRs: 7.1.2.2g extkeyUsage (optional) +For Subordinate CA Certificates to be Technically constrained in line with section 7.1.5, then either the value +id‐kp‐serverAuth [RFC5280] or id‐kp‐clientAuth [RFC5280] or both values MUST be present**. +Other values MAY be present. +If present, this extension SHOULD be marked non‐critical. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCAEKUCrit struct{} + +func (l *subCAEKUCrit) Initialize() error { + return nil +} + +func (l *subCAEKUCrit) CheckApplies(c *x509.Certificate) bool { + return util.IsSubCA(c) && util.IsExtInCert(c, util.EkuSynOid) +} + +func (l *subCAEKUCrit) Execute(c *x509.Certificate) *LintResult { + if e := util.GetExtFromCert(c, util.EkuSynOid); e.Critical { + return &LintResult{Status: Warn} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "w_sub_ca_eku_critical", + Description: "Subordinate CA certificate extkeyUsage extension should be marked non-critical if present", + Citation: "BRs: 7.1.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABV116Date, + Lint: &subCAEKUCrit{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_eku_missing.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_eku_missing.go new file mode 100644 index 00000000..a150ab99 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_eku_missing.go @@ -0,0 +1,49 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCAEKUMissing struct{} + +func (l *subCAEKUMissing) Initialize() error { + return nil +} + +func (l *subCAEKUMissing) CheckApplies(c *x509.Certificate) bool { + return util.IsSubCA(c) +} + +func (l *subCAEKUMissing) Execute(c *x509.Certificate) *LintResult { + if util.IsExtInCert(c, util.EkuSynOid) { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Notice} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "n_sub_ca_eku_missing", + Description: "To be considered Technically Constrained, the Subordinate CA certificate MUST have extkeyUsage extension", + Citation: "BRs: 7.1.5", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subCAEKUMissing{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_eku_valid_fields.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_eku_valid_fields.go new file mode 100644 index 00000000..47182a59 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_eku_valid_fields.go @@ -0,0 +1,56 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCAEKUValidFields struct{} + +func (l *subCAEKUValidFields) Initialize() error { + return nil +} + +func (l *subCAEKUValidFields) CheckApplies(c *x509.Certificate) bool { + return util.IsSubCA(c) && util.IsExtInCert(c, util.EkuSynOid) +} + +func (l *subCAEKUValidFields) Execute(c *x509.Certificate) *LintResult { + validFieldsPresent := false + for _, ekuValue := range c.ExtKeyUsage { + if ekuValue == x509.ExtKeyUsageServerAuth || + ekuValue == x509.ExtKeyUsageClientAuth { + validFieldsPresent = true + } + } + if validFieldsPresent { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Notice} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "n_sub_ca_eku_not_technically_constrained", + Description: "Subordinate CA extkeyUsage, either id-kp-serverAuth or id-kp-clientAuth or both values MUST be present to be technically constrained.", + Citation: "BRs: 7.1.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABV116Date, + Lint: &subCAEKUValidFields{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_name_constraints_not_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_name_constraints_not_critical.go new file mode 100644 index 00000000..f8c5b55e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_ca_name_constraints_not_critical.go @@ -0,0 +1,53 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +Change this to match source TEXT +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type SubCANameConstraintsNotCritical struct{} + +func (l *SubCANameConstraintsNotCritical) Initialize() error { + return nil +} + +func (l *SubCANameConstraintsNotCritical) CheckApplies(cert *x509.Certificate) bool { + return util.IsSubCA(cert) && util.IsExtInCert(cert, util.NameConstOID) +} + +func (l *SubCANameConstraintsNotCritical) Execute(cert *x509.Certificate) *LintResult { + if ski := util.GetExtFromCert(cert, util.NameConstOID); ski.Critical { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Warn} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "w_sub_ca_name_constraints_not_critical", + Description: "Subordinate CA Certificate: NameConstraints if present, SHOULD be marked critical.", + Citation: "BRs: 7.1.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABV102Date, + Lint: &SubCANameConstraintsNotCritical{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_aia_does_not_contain_issuing_ca_url.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_aia_does_not_contain_issuing_ca_url.go new file mode 100644 index 00000000..a35025e6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_aia_does_not_contain_issuing_ca_url.go @@ -0,0 +1,59 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************ +BRs: 7.1.2.3 +cRLDistributionPoints +This extension MAY be present. If present, it MUST NOT be marked critical, and it MUST contain the +HTTP URL of the CA’s CRL service. See Section 13.2.1 for details. +*************************************************************************/ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCertIssuerUrl struct{} + +func (l *subCertIssuerUrl) Initialize() error { + return nil +} + +func (l *subCertIssuerUrl) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) +} + +func (l *subCertIssuerUrl) Execute(c *x509.Certificate) *LintResult { + for _, url := range c.IssuingCertificateURL { + if strings.HasPrefix(url, "http://") { + return &LintResult{Status: Pass} + } + } + return &LintResult{Status: Warn} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_sub_cert_aia_does_not_contain_issuing_ca_url", + Description: "Subscriber certificates authorityInformationAccess extension should contain the HTTP URL of the issuing CA’s certificate", + Citation: "BRs: 7.1.2.3", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subCertIssuerUrl{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_aia_does_not_contain_ocsp_url.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_aia_does_not_contain_ocsp_url.go new file mode 100644 index 00000000..c154e6ee --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_aia_does_not_contain_ocsp_url.go @@ -0,0 +1,61 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************************************** +BRs: 7.1.2.3 +authorityInformationAccess +With the exception of stapling, which is noted below, this extension MUST be present. It MUST NOT be +marked critical, and it MUST contain the HTTP URL of the Issuing CA’s OCSP responder (accessMethod += 1.3.6.1.5.5.7.48.1). It SHOULD also contain the HTTP URL of the Issuing CA’s certificate +(accessMethod = 1.3.6.1.5.5.7.48.2). See Section 13.2.1 for details. +***************************************************************************************************/ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCertOcspUrl struct{} + +func (l *subCertOcspUrl) Initialize() error { + return nil +} + +func (l *subCertOcspUrl) CheckApplies(c *x509.Certificate) bool { + return !util.IsCACert(c) +} + +func (l *subCertOcspUrl) Execute(c *x509.Certificate) *LintResult { + for _, url := range c.OCSPServer { + if strings.HasPrefix(url, "http://") { + return &LintResult{Status: Pass} + } + } + return &LintResult{Status: Error} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_cert_aia_does_not_contain_ocsp_url", + Description: "Subscriber Certificate: authorityInformationAccess MUST contain the HTTP URL of the Issuing CA's OSCP responder.", + Citation: "BRs: 7.1.2.3", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subCertOcspUrl{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_aia_marked_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_aia_marked_critical.go new file mode 100644 index 00000000..97d3e624 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_aia_marked_critical.go @@ -0,0 +1,50 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCertAiaMarkedCritical struct{} + +func (l *subCertAiaMarkedCritical) Initialize() error { + return nil +} + +func (l *subCertAiaMarkedCritical) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) && util.IsExtInCert(c, util.AiaOID) +} + +func (l *subCertAiaMarkedCritical) Execute(c *x509.Certificate) *LintResult { + e := util.GetExtFromCert(c, util.AiaOID) + if e.Critical { + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_cert_aia_marked_critical", + Description: "Subscriber Certificate: authorityInformationAccess MUST NOT be marked critical", + Citation: "BRs: 7.1.2.3", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subCertAiaMarkedCritical{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_aia_missing.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_aia_missing.go new file mode 100644 index 00000000..738de356 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_aia_missing.go @@ -0,0 +1,58 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************************************** +BRs: 7.1.2.3 +authorityInformationAccess +With the exception of stapling, which is noted below, this extension MUST be present. It MUST NOT be +marked critical, and it MUST contain the HTTP URL of the Issuing CA’s OCSP responder (accessMethod += 1.3.6.1.5.5.7.48.1). It SHOULD also contain the HTTP URL of the Issuing CA’s certificate +(accessMethod = 1.3.6.1.5.5.7.48.2). See Section 13.2.1 for details. +***************************************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCertAiaMissing struct{} + +func (l *subCertAiaMissing) Initialize() error { + return nil +} + +func (l *subCertAiaMissing) CheckApplies(c *x509.Certificate) bool { + return !util.IsCACert(c) +} + +func (l *subCertAiaMissing) Execute(c *x509.Certificate) *LintResult { + if util.IsExtInCert(c, util.AiaOID) { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_cert_aia_missing", + Description: "Subscriber Certiifcate: authorityInformationAccess MUST be present, with the exception of stapling.", + Citation: "BRs: 7.1.2.3", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subCertAiaMissing{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_cert_policy_empty.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_cert_policy_empty.go new file mode 100644 index 00000000..93d40195 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_cert_policy_empty.go @@ -0,0 +1,58 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************************************************************** +BRs: 7.1.6.4 +Subscriber Certificates +A Certificate issued to a Subscriber MUST contain one or more policy identifier(s), defined by the Issuing CA, in +the Certificate’s certificatePolicies extension that indicates adherence to and complIANce with these Requirements. +CAs complying with these Requirements MAY also assert one of the reserved policy OIDs in such Certificates. +*********************************************************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCertPolicyEmpty struct{} + +func (l *subCertPolicyEmpty) Initialize() error { + return nil +} + +func (l *subCertPolicyEmpty) CheckApplies(c *x509.Certificate) bool { + return !util.IsCACert(c) +} + +func (l *subCertPolicyEmpty) Execute(c *x509.Certificate) *LintResult { + // Add actual lint here + if util.IsExtInCert(c, util.CertPolicyOID) && c.PolicyIdentifiers != nil { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_cert_cert_policy_empty", + Description: "Subscriber certificates must contain at least one policy identifier that indicates adherence to CAB standards", + Citation: "BRs: 7.1.6.4", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subCertPolicyEmpty{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_certificate_policies_marked_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_certificate_policies_marked_critical.go new file mode 100644 index 00000000..3790dcd1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_certificate_policies_marked_critical.go @@ -0,0 +1,56 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/****************************************************************************** +BRs: 7.1.2.3 +certificatePolicies +This extension MUST be present and SHOULD NOT be marked critical. +******************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCertPolicyCrit struct{} + +func (l *subCertPolicyCrit) Initialize() error { + return nil +} + +func (l *subCertPolicyCrit) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.CertPolicyOID) +} + +func (l *subCertPolicyCrit) Execute(c *x509.Certificate) *LintResult { + e := util.GetExtFromCert(c, util.CertPolicyOID) + if e.Critical == false { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Warn} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "w_sub_cert_certificate_policies_marked_critical", + Description: "Subscriber Certificate: certificatePolicies MUST be present and SHOULD NOT be marked critical.", + Citation: "BRs: 7.1.2.3", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subCertPolicyCrit{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_certificate_policies_missing.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_certificate_policies_missing.go new file mode 100644 index 00000000..9feb1030 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_certificate_policies_missing.go @@ -0,0 +1,56 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/****************************************************************************** +BRs: 7.1.2.3 +certificatePolicies +This extension MUST be present and SHOULD NOT be marked critical. +******************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCertPolicy struct{} + +func (l *subCertPolicy) Initialize() error { + return nil +} + +func (l *subCertPolicy) CheckApplies(c *x509.Certificate) bool { + return !util.IsCACert(c) +} + +func (l *subCertPolicy) Execute(c *x509.Certificate) *LintResult { + // Add actual lint here + if util.IsExtInCert(c, util.CertPolicyOID) { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_cert_certificate_policies_missing", + Description: "Subscriber Certificate: certificatePolicies MUST be present and SHOULD NOT be marked critical.", + Citation: "BRs: 7.1.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subCertPolicy{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_country_name_must_appear.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_country_name_must_appear.go new file mode 100644 index 00000000..65eb40b3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_country_name_must_appear.go @@ -0,0 +1,50 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCertCountryNameMustAppear struct{} + +func (l *subCertCountryNameMustAppear) Initialize() error { + return nil +} + +func (l *subCertCountryNameMustAppear) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) +} + +func (l *subCertCountryNameMustAppear) Execute(c *x509.Certificate) *LintResult { + if len(c.Subject.Organization) > 0 || len(c.Subject.GivenName) > 0 || len(c.Subject.Surname) > 0 { + if len(c.Subject.Country) == 0 { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_cert_country_name_must_appear", + Description: "Subscriber Certificate: subject:countryName MUST appear if the subject:organizationName field, subject:givenName field, or subject:surname fields are present.", + Citation: "BRs: 7.1.4.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABGivenNameDate, + Lint: &subCertCountryNameMustAppear{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_crl_distribution_points_does_not_contain_url.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_crl_distribution_points_does_not_contain_url.go new file mode 100644 index 00000000..ecab5acc --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_crl_distribution_points_does_not_contain_url.go @@ -0,0 +1,60 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************************************************* +BRs: 7.1.2.3 +cRLDistributionPoints +This extension MAY be present. If present, it MUST NOT be marked critical, and it MUST contain the HTTP +URL of the CA’s CRL service. +*******************************************************************************************************/ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCRLDistNoURL struct{} + +func (l *subCRLDistNoURL) Initialize() error { + return nil +} + +func (l *subCRLDistNoURL) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.CrlDistOID) +} + +func (l *subCRLDistNoURL) Execute(c *x509.Certificate) *LintResult { + // Add actual lint here + for _, s := range c.CRLDistributionPoints { + if strings.HasPrefix(s, "http://") { + return &LintResult{Status: Pass} + } + } + return &LintResult{Status: Error} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_cert_crl_distribution_points_does_not_contain_url", + Description: "Subscriber certificate cRLDistributionPoints extension must contain the HTTP URL of the CA’s CRL service", + Citation: "BRs: 7.1.2.3", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subCRLDistNoURL{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_crl_distribution_points_marked_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_crl_distribution_points_marked_critical.go new file mode 100644 index 00000000..f9fc620a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_crl_distribution_points_marked_critical.go @@ -0,0 +1,58 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************************************************* +BRs: 7.1.2.3 +cRLDistributionPoints +This extension MAY be present. If present, it MUST NOT be marked critical, and it MUST contain the HTTP +URL of the CA’s CRL service. +*******************************************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCrlDistCrit struct{} + +func (l *subCrlDistCrit) Initialize() error { + return nil +} + +func (l *subCrlDistCrit) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.CrlDistOID) +} + +func (l *subCrlDistCrit) Execute(c *x509.Certificate) *LintResult { + // Add actual lint here + e := util.GetExtFromCert(c, util.CrlDistOID) + if e.Critical == false { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_cert_crl_distribution_points_marked_critical", + Description: "Subscriber Certiifcate: cRLDistributionPoints MUST NOT be marked critical, and MUST contain the HTTP URL of the CA's CRL service.", + Citation: "BRs: 7.1.2.3", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subCrlDistCrit{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_eku_extra_values.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_eku_extra_values.go new file mode 100644 index 00000000..d2b263dd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_eku_extra_values.go @@ -0,0 +1,64 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************************************************* +BRs: 7.1.2.3 +extKeyUsage (required) +Either the value id-kp-serverAuth [RFC5280] or id-kp-clientAuth [RFC5280] or both values MUST be present. id-kp-emailProtection [RFC5280] MAY be present. Other values SHOULD NOT be present. +*******************************************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subExtKeyUsageLegalUsage struct{} + +func (l *subExtKeyUsageLegalUsage) Initialize() error { + return nil +} + +func (l *subExtKeyUsageLegalUsage) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) && c.ExtKeyUsage != nil +} + +func (l *subExtKeyUsageLegalUsage) Execute(c *x509.Certificate) *LintResult { + // Add actual lint here + for _, kp := range c.ExtKeyUsage { + if kp == x509.ExtKeyUsageServerAuth || + kp == x509.ExtKeyUsageClientAuth || + kp == x509.ExtKeyUsageEmailProtection { + // If we find any of these three, considered passing, continue + continue + } else { + // A bad usage was found, report and leave + return &LintResult{Status: Warn} + } + } + // If no bad usage was found, pass + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_sub_cert_eku_extra_values", + Description: "Subscriber Certificate: extKeyUsage values other than id-kp-serverAuth, id-kp-clientAuth, and id-kp-emailProtection SHOULD NOT be present.", + Citation: "BRs: 7.1.2.3", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subExtKeyUsageLegalUsage{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_eku_missing.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_eku_missing.go new file mode 100644 index 00000000..ea8a42e0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_eku_missing.go @@ -0,0 +1,56 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************************************************* +BRs: 7.1.2.3 +extKeyUsage (required) +Either the value id-kp-serverAuth [RFC5280] or id-kp-clientAuth [RFC5280] or both values MUST be present. id-kp-emailProtection [RFC5280] MAY be present. Other values SHOULD NOT be present. +*******************************************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subExtKeyUsage struct{} + +func (l *subExtKeyUsage) Initialize() error { + return nil +} + +func (l *subExtKeyUsage) CheckApplies(c *x509.Certificate) bool { + return !util.IsCACert(c) +} + +func (l *subExtKeyUsage) Execute(c *x509.Certificate) *LintResult { + // Add actual lint here + if util.IsExtInCert(c, util.EkuSynOid) { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Error} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_cert_eku_missing", + Description: "Subscriber certificates MUST have the extended key usage extension present", + Citation: "BRs: 7.1.2.3", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subExtKeyUsage{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_eku_server_auth_client_auth_missing.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_eku_server_auth_client_auth_missing.go new file mode 100644 index 00000000..958d056f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_eku_server_auth_client_auth_missing.go @@ -0,0 +1,59 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************************************************* +BRs: 7.1.2.3 +extKeyUsage (required) +Either the value id-kp-serverAuth [RFC5280] or id-kp-clientAuth [RFC5280] or both values MUST be present. id-kp-emailProtection [RFC5280] MAY be present. Other values SHOULD NOT be present. +*******************************************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subExtKeyUsageClientOrServer struct{} + +func (l *subExtKeyUsageClientOrServer) Initialize() error { + return nil +} + +func (l *subExtKeyUsageClientOrServer) CheckApplies(c *x509.Certificate) bool { + return c.ExtKeyUsage != nil +} + +func (l *subExtKeyUsageClientOrServer) Execute(c *x509.Certificate) *LintResult { + // Add actual lint here + for _, kp := range c.ExtKeyUsage { + if kp == x509.ExtKeyUsageServerAuth || kp == x509.ExtKeyUsageClientAuth { + // If we find either of ServerAuth or ClientAuth, Pass + return &LintResult{Status: Pass} + } + } + // If neither were found, Error + return &LintResult{Status: Error} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_cert_eku_server_auth_client_auth_missing", + Description: "Subscriber certificates MUST have have either id-kp-serverAuth or id-kp-clientAuth or both present in extKeyUsage", + Citation: "BRs: 7.1.2.3", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subExtKeyUsageClientOrServer{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_gn_sn_contains_policy.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_gn_sn_contains_policy.go new file mode 100644 index 00000000..8b206243 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_gn_sn_contains_policy.go @@ -0,0 +1,51 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCertSubjectGnOrSnContainsPolicy struct{} + +func (l *subCertSubjectGnOrSnContainsPolicy) Initialize() error { + return nil +} + +func (l *subCertSubjectGnOrSnContainsPolicy) CheckApplies(c *x509.Certificate) bool { + //Check if GivenName or Surname fields are filled out + return util.IsSubscriberCert(c) && (len(c.Subject.GivenName) != 0 || len(c.Subject.Surname) != 0) +} + +func (l *subCertSubjectGnOrSnContainsPolicy) Execute(c *x509.Certificate) *LintResult { + for _, policyIds := range c.PolicyIdentifiers { + if policyIds.Equal(util.BRIndividualValidatedOID) { + return &LintResult{Status: Pass} + } + } + return &LintResult{Status: Error} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_cert_given_name_surname_contains_correct_policy", + Description: "Subscriber Certificate: A certificate containing a subject:givenName field or subject:surname field MUST contain the (2.23.140.1.2.3) certPolicy OID.", + Citation: "BRs: 7.1.4.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABGivenNameDate, + Lint: &subCertSubjectGnOrSnContainsPolicy{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_is_ca.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_is_ca.go new file mode 100644 index 00000000..a37b9d02 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_is_ca.go @@ -0,0 +1,56 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "encoding/asn1" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCertNotCA struct{} + +func (l *subCertNotCA) Initialize() error { + return nil +} + +func (l *subCertNotCA) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.KeyUsageOID) && c.KeyUsage&x509.KeyUsageCertSign == 0 && util.IsExtInCert(c, util.BasicConstOID) +} + +func (l *subCertNotCA) Execute(c *x509.Certificate) *LintResult { + e := util.GetExtFromCert(c, util.BasicConstOID) + var constraints basicConstraints + if _, err := asn1.Unmarshal(e.Value, &constraints); err != nil { + return &LintResult{Status: Fatal} + } + if constraints.IsCA == true { + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_cert_not_is_ca", + Description: "Subscriber Certificate: basicContrainsts cA field MUST NOT be true.", + Citation: "BRs: 7.1.2.3", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subCertNotCA{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_key_usage_cert_sign_bit_set.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_key_usage_cert_sign_bit_set.go new file mode 100644 index 00000000..ba25a3f2 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_key_usage_cert_sign_bit_set.go @@ -0,0 +1,56 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************** +BRs: 7.1.2.3 +keyUsage (optional) +If present, bit positions for keyCertSign and cRLSign MUST NOT be set. +***************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCertKeyUsageBitSet struct{} + +func (l *subCertKeyUsageBitSet) Initialize() error { + return nil +} + +func (l *subCertKeyUsageBitSet) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.KeyUsageOID) && !util.IsCACert(c) +} + +func (l *subCertKeyUsageBitSet) Execute(c *x509.Certificate) *LintResult { + // Add actual lint here + if (c.KeyUsage & x509.KeyUsageCertSign) == x509.KeyUsageCertSign { + return &LintResult{Status: Error} + } else { //key usage doesn't allow cert signing or isn't present + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_cert_key_usage_cert_sign_bit_set", + Description: "Subscriber Certificate: keyUsage if present, bit positions for keyCertSign and cRLSign MUST NOT be set.", + Citation: "BRs: 7.1.2.3", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subCertKeyUsageBitSet{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_key_usage_crl_sign_bit_set.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_key_usage_crl_sign_bit_set.go new file mode 100644 index 00000000..45e45eb9 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_key_usage_crl_sign_bit_set.go @@ -0,0 +1,56 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************** +BRs: 7.1.2.3 +keyUsage (optional) +If present, bit positions for keyCertSign and cRLSign MUST NOT be set. +***************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCrlSignAllowed struct{} + +func (l *subCrlSignAllowed) Initialize() error { + return nil +} + +func (l *subCrlSignAllowed) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.KeyUsageOID) && !util.IsCACert(c) +} + +func (l *subCrlSignAllowed) Execute(c *x509.Certificate) *LintResult { + // Add actual lint here + if (c.KeyUsage & x509.KeyUsageCRLSign) == x509.KeyUsageCRLSign { + return &LintResult{Status: Error} + } else { //key usage doesn't allow cert signing or isn't present + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_cert_key_usage_crl_sign_bit_set", + Description: "Subscriber Certificate: keyUsage if present, bit positions for keyCertSign and cRLSign MUST NOT be set.", + Citation: "BRs: 7.1.2.3", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subCrlSignAllowed{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_locality_name_must_appear.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_locality_name_must_appear.go new file mode 100644 index 00000000..9acbd977 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_locality_name_must_appear.go @@ -0,0 +1,52 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCertLocalityNameMustAppear struct{} + +func (l *subCertLocalityNameMustAppear) Initialize() error { + return nil +} + +func (l *subCertLocalityNameMustAppear) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) +} + +func (l *subCertLocalityNameMustAppear) Execute(c *x509.Certificate) *LintResult { + if len(c.Subject.Organization) > 0 || len(c.Subject.GivenName) > 0 || len(c.Subject.Surname) > 0 { + if len(c.Subject.Province) == 0 { + if len(c.Subject.Locality) == 0 { + return &LintResult{Status: Error} + } + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_cert_locality_name_must_appear", + Description: "Subscriber Certificate: subject:localityName MUST appear if subject:organizationName, subject:givenName, or subject:surname fields are present but the subject:stateOrProvinceName field is absent.", + Citation: "BRs: 7.1.4.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABGivenNameDate, + Lint: &subCertLocalityNameMustAppear{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_locality_name_must_not_appear.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_locality_name_must_not_appear.go new file mode 100644 index 00000000..be962505 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_locality_name_must_not_appear.go @@ -0,0 +1,50 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCertLocalityNameMustNotAppear struct{} + +func (l *subCertLocalityNameMustNotAppear) Initialize() error { + return nil +} + +func (l *subCertLocalityNameMustNotAppear) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) +} + +func (l *subCertLocalityNameMustNotAppear) Execute(c *x509.Certificate) *LintResult { + if len(c.Subject.Organization) == 0 && len(c.Subject.GivenName) == 0 && len(c.Subject.Surname) == 0 { + if len(c.Subject.Locality) > 0 { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_cert_locality_name_must_not_appear", + Description: "Subscriber Certificate: subject:localityName MUST NOT appear if subject:organizationName, subject:givenName, and subject:surname fields are absent.", + Citation: "BRs: 7.1.4.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABGivenNameDate, + Lint: &subCertLocalityNameMustNotAppear{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_or_sub_ca_using_sha1.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_or_sub_ca_using_sha1.go new file mode 100644 index 00000000..f9d69d0b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_or_sub_ca_using_sha1.go @@ -0,0 +1,53 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************************************** +BRs: 7.1.3 +SHA‐1 MAY be used with RSA keys in accordance with the criteria defined in Section 7.1.3. +**************************************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type sigAlgTestsSHA1 struct{} + +func (l *sigAlgTestsSHA1) Initialize() error { + return nil +} + +func (l *sigAlgTestsSHA1) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *sigAlgTestsSHA1) Execute(c *x509.Certificate) *LintResult { + if c.SignatureAlgorithm == x509.SHA1WithRSA || c.SignatureAlgorithm == x509.DSAWithSHA1 || c.SignatureAlgorithm == x509.ECDSAWithSHA1 { + return &LintResult{Status: Error} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_cert_or_sub_ca_using_sha1", + Description: "CAs MUST NOT issue any new Subscriber certificates or Subordinate CA certificates using SHA-1 after 1 January 2016", + Citation: "BRs: 7.1.3", + Source: CABFBaselineRequirements, + EffectiveDate: util.NO_SHA1, + Lint: &sigAlgTestsSHA1{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_postal_code_prohibited.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_postal_code_prohibited.go new file mode 100644 index 00000000..8721ad69 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_postal_code_prohibited.go @@ -0,0 +1,51 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCertPostalCodeMustNotAppear struct{} + +func (l *subCertPostalCodeMustNotAppear) Initialize() error { + return nil +} + +func (l *subCertPostalCodeMustNotAppear) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) +} + +func (l *subCertPostalCodeMustNotAppear) Execute(c *x509.Certificate) *LintResult { + // BR 7.1.4.2.2 uses "or" and "and" interchangeably when they mean "and". + if len(c.Subject.Organization) == 0 && len(c.Subject.GivenName) == 0 && len(c.Subject.Surname) == 0 { + if len(c.Subject.PostalCode) > 0 { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_cert_postal_code_must_not_appear", + Description: "Subscriber Certificate: subject:postalCode MUST NOT appear if the subject:organizationName field, subject:givenName field, or subject:surname fields are absent.", + Citation: "BRs: 7.1.4.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABGivenNameDate, + Lint: &subCertPostalCodeMustNotAppear{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_province_must_appear.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_province_must_appear.go new file mode 100644 index 00000000..e6843a04 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_province_must_appear.go @@ -0,0 +1,52 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCertProvinceMustAppear struct{} + +func (l *subCertProvinceMustAppear) Initialize() error { + return nil +} + +func (l *subCertProvinceMustAppear) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) +} + +func (l *subCertProvinceMustAppear) Execute(c *x509.Certificate) *LintResult { + if len(c.Subject.Organization) > 0 || len(c.Subject.GivenName) > 0 || len(c.Subject.Surname) > 0 { + if len(c.Subject.Locality) == 0 { + if len(c.Subject.Province) == 0 { + return &LintResult{Status: Error} + } + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_cert_province_must_appear", + Description: "Subscriber Certificate: subject:stateOrProvinceName MUST appear if the subject:organizationName, subject:givenName, or subject:surname fields are present and subject:localityName is absent.", + Citation: "BRs: 7.1.4.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABGivenNameDate, + Lint: &subCertProvinceMustAppear{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_province_must_not_appear.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_province_must_not_appear.go new file mode 100644 index 00000000..e7ac3780 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_province_must_not_appear.go @@ -0,0 +1,50 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCertProvinceMustNotAppear struct{} + +func (l *subCertProvinceMustNotAppear) Initialize() error { + return nil +} + +func (l *subCertProvinceMustNotAppear) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) +} + +func (l *subCertProvinceMustNotAppear) Execute(c *x509.Certificate) *LintResult { + if len(c.Subject.Organization) == 0 && len(c.Subject.GivenName) == 0 && len(c.Subject.Surname) == 0 { + if len(c.Subject.Province) > 0 { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_cert_province_must_not_appear", + Description: "Subscriber Certificate: subject:stateOrProvinceName MUST NOT appear if the subject:organizationName, subject:givenName, and subject:surname fields are absent.", + Citation: "BRs: 7.1.4.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABGivenNameDate, + Lint: &subCertProvinceMustNotAppear{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_sha1_expiration_too_long.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_sha1_expiration_too_long.go new file mode 100644 index 00000000..61736ae0 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_sha1_expiration_too_long.go @@ -0,0 +1,60 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/*************************************************************************************************************** +Effective 16 January 2015, CAs SHOULD NOT issue Subscriber Certificates utilizing the SHA‐1 algorithm with +an Expiry Date greater than 1 January 2017 because Application Software Providers are in the process of +deprecating and/or removing the SHA‐1 algorithm from their software, and they have communicated that +CAs and Subscribers using such certificates do so at their own risk. +****************************************************************************************************************/ + +import ( + "time" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type sha1ExpireLong struct{} + +func (l *sha1ExpireLong) Initialize() error { + return nil +} + +func (l *sha1ExpireLong) CheckApplies(c *x509.Certificate) bool { + return !util.IsCACert(c) && (c.SignatureAlgorithm == x509.SHA1WithRSA || + c.SignatureAlgorithm == x509.DSAWithSHA1 || + c.SignatureAlgorithm == x509.ECDSAWithSHA1) +} + +func (l *sha1ExpireLong) Execute(c *x509.Certificate) *LintResult { + if c.NotAfter.After(time.Date(2017, time.January, 1, 0, 0, 0, 0, time.UTC)) { + return &LintResult{Status: Warn} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "w_sub_cert_sha1_expiration_too_long", + Description: "Subscriber certificates using the SHA-1 algorithm SHOULD NOT have an expiration date later than 1 Jan 2017", + Citation: "BRs: 7.1.3", + Source: CABFBaselineRequirements, + EffectiveDate: time.Date(2015, time.January, 16, 0, 0, 0, 0, time.UTC), + Lint: &sha1ExpireLong{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_street_address_should_not_exist.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_street_address_should_not_exist.go new file mode 100644 index 00000000..66d5d958 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_street_address_should_not_exist.go @@ -0,0 +1,51 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCertStreetAddressShouldNotExist struct{} + +func (l *subCertStreetAddressShouldNotExist) Initialize() error { + return nil +} + +func (l *subCertStreetAddressShouldNotExist) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) +} + +func (l *subCertStreetAddressShouldNotExist) Execute(c *x509.Certificate) *LintResult { + //If all fields are absent + if len(c.Subject.Organization) == 0 && len(c.Subject.GivenName) == 0 && len(c.Subject.Surname) == 0 { + if len(c.Subject.StreetAddress) > 0 { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_cert_street_address_should_not_exist", + Description: "Subscriber Certificate: subject:streetAddress MUST NOT appear if subject:organizationName, subject:givenName, and subject:surname fields are absent.", + Citation: "BRs: 7.1.4.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABGivenNameDate, + Lint: &subCertStreetAddressShouldNotExist{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_valid_time_longer_than_39_months.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_valid_time_longer_than_39_months.go new file mode 100644 index 00000000..8add5fb6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_valid_time_longer_than_39_months.go @@ -0,0 +1,48 @@ +package lints + +/* + * ZLint Copyright 2017 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCertValidTimeLongerThan39Months struct{} + +func (l *subCertValidTimeLongerThan39Months) Initialize() error { + return nil +} + +func (l *subCertValidTimeLongerThan39Months) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) +} + +func (l *subCertValidTimeLongerThan39Months) Execute(c *x509.Certificate) *LintResult { + if c.NotBefore.AddDate(0, 39, 0).Before(c.NotAfter) { + return &LintResult{Status: Error} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_cert_valid_time_longer_than_39_months", + Description: "Subscriber Certificates issued after 1 July 2016 but prior to 1 March 2018 MUST have a Validity Period no greater than 39 months.", + Citation: "BRs: 6.3.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.SubCert39Month, + Lint: &subCertValidTimeLongerThan39Months{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_valid_time_longer_than_825_days.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_valid_time_longer_than_825_days.go new file mode 100644 index 00000000..6ec5c62f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_sub_cert_valid_time_longer_than_825_days.go @@ -0,0 +1,48 @@ +package lints + +/* + * ZLint Copyright 2017 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subCertValidTimeLongerThan825Days struct{} + +func (l *subCertValidTimeLongerThan825Days) Initialize() error { + return nil +} + +func (l *subCertValidTimeLongerThan825Days) CheckApplies(c *x509.Certificate) bool { + return util.IsSubscriberCert(c) +} + +func (l *subCertValidTimeLongerThan825Days) Execute(c *x509.Certificate) *LintResult { + if c.NotBefore.AddDate(0, 0, 825).Before(c.NotAfter) { + return &LintResult{Status: Error} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_sub_cert_valid_time_longer_than_825_days", + Description: "Subscriber Certificates issued after 1 March 2018 MUST have a Validity Period no greater than 825 days.", + Citation: "BRs: 6.3.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.SubCert825Days, + Lint: &subCertValidTimeLongerThan825Days{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_common_name_included.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_common_name_included.go new file mode 100644 index 00000000..4f9b2631 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_common_name_included.go @@ -0,0 +1,54 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/*************************************************************** +BRs: 7.1.4.2.2 +Required/Optional: Deprecated (Discouraged, but not prohibited) +***************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type commonNames struct{} + +func (l *commonNames) Initialize() error { + return nil +} + +func (l *commonNames) CheckApplies(c *x509.Certificate) bool { + return !util.IsCACert(c) +} + +func (l *commonNames) Execute(c *x509.Certificate) *LintResult { + if c.Subject.CommonName == "" { + return &LintResult{Status: Pass} + } else { + return &LintResult{Status: Notice} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "n_subject_common_name_included", + Description: "Subscriber Certificate: commonName is deprecated.", + Citation: "BRs: 7.1.4.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &commonNames{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_common_name_max_length.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_common_name_max_length.go new file mode 100644 index 00000000..a7d34298 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_common_name_max_length.go @@ -0,0 +1,58 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +RFC 5280: A.1 + * In this Appendix, there is a list of upperbounds + for fields in a x509 Certificate. * + ub-common-name INTEGER ::= 64 +************************************************/ + +import ( + "unicode/utf8" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subjectCommonNameMaxLength struct{} + +func (l *subjectCommonNameMaxLength) Initialize() error { + return nil +} + +func (l *subjectCommonNameMaxLength) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *subjectCommonNameMaxLength) Execute(c *x509.Certificate) *LintResult { + if utf8.RuneCountInString(c.Subject.CommonName) > 64 { + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_subject_common_name_max_length", + Description: "The commonName field of the subject MUST be less than 64 characters", + Citation: "RFC 5280: A.1", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &subjectCommonNameMaxLength{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_common_name_not_from_san.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_common_name_not_from_san.go new file mode 100644 index 00000000..106b5172 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_common_name_not_from_san.go @@ -0,0 +1,68 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +BRs: 7.1.4.2.2 +If present, this field MUST contain a single IP address +or Fully‐Qualified Domain Name that is one of the values +contained in the Certificate’s subjectAltName extension (see Section 7.1.4.2.1). +************************************************/ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subjectCommonNameNotFromSAN struct{} + +func (l *subjectCommonNameNotFromSAN) Initialize() error { + return nil +} + +func (l *subjectCommonNameNotFromSAN) CheckApplies(c *x509.Certificate) bool { + return c.Subject.CommonName != "" && !util.IsCACert(c) +} + +func (l *subjectCommonNameNotFromSAN) Execute(c *x509.Certificate) *LintResult { + cn := c.Subject.CommonName + + for _, dn := range c.DNSNames { + if strings.EqualFold(cn, dn) { + return &LintResult{Status: Pass} + } + } + + for _, ip := range c.IPAddresses { + if cn == ip.String() { + return &LintResult{Status: Pass} + } + } + + return &LintResult{Status: Error} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_subject_common_name_not_from_san", + Description: "The common name field in subscriber certificates must include only names from the SAN extension", + Citation: "BRs: 7.1.4.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subjectCommonNameNotFromSAN{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_contains_malformed_arpa_ip.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_contains_malformed_arpa_ip.go new file mode 100644 index 00000000..50100c1f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_contains_malformed_arpa_ip.go @@ -0,0 +1,139 @@ +/* + * ZLint Copyright 2019 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package lints + +import ( + "fmt" + "net" + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +// arpaMalformedIP is a linter that warns for malformed names under the +// .in-addr.arpa or .ip6.arpa zones. +// See also: lint_subject_contains_reserved_arpa_ip.go for a lint that ensures +// well formed rDNS names in these zones do not specify an address in a IANA +// reserved network. +type arpaMalformedIP struct{} + +// Initialize for an arpaMalformedIP linter is a NOP to statisfy linting +// interfaces. +func (l *arpaMalformedIP) Initialize() error { + return nil +} + +// CheckApplies returns true if the certificate contains any names that end in +// one of the two designated zones for reverse DNS: in-addr.arpa or ip6.arpa. +func (l *arpaMalformedIP) CheckApplies(c *x509.Certificate) bool { + names := append([]string{c.Subject.CommonName}, c.DNSNames...) + for _, name := range names { + name = strings.ToLower(name) + if strings.HasSuffix(name, rdnsIPv4Suffix) || + strings.HasSuffix(name, rdnsIPv6Suffix) { + return true + } + } + return false +} + +// Execute will check the given certificate to ensure that all of the DNS +// subject alternate names that specify a reverse DNS name under the respective +// IPv4 or IPv6 arpa zones are well formed. A Warn LintResult is returned if +// the name is in a reverse DNS zone but has the wrong number of labels. +func (l *arpaMalformedIP) Execute(c *x509.Certificate) *LintResult { + for _, name := range c.DNSNames { + name = strings.ToLower(name) + var err error + if strings.HasSuffix(name, rdnsIPv4Suffix) { + // If the name has the in-addr.arpa suffix then it should be an IPv4 reverse + // DNS name. + err = lintReversedIPAddressLabels(name, false) + } else if strings.HasSuffix(name, rdnsIPv6Suffix) { + // If the name has the ip6.arpa suffix then it should be an IPv6 reverse + // DNS name. + err = lintReversedIPAddressLabels(name, true) + } + // Return the first error as a negative lint result + if err != nil { + return &LintResult{ + Status: Warn, + Details: err.Error(), + } + } + } + + return &LintResult{ + Status: Pass, + } +} + +// lintReversedIPAddressLabels lints the given name as either a reversed IPv4 or +// IPv6 address under the respective ARPA zone based on the address class. An +// error is returned if there aren't enough labels in the name after removing +// the relevant arpa suffix. +func lintReversedIPAddressLabels(name string, ipv6 bool) error { + numRequiredLabels := rdnsIPv4Labels + zoneSuffix := rdnsIPv4Suffix + + if ipv6 { + numRequiredLabels = rdnsIPv6Labels + zoneSuffix = rdnsIPv6Suffix + } + + // Strip off the zone suffix to get only the reversed IP address + ipName := strings.TrimSuffix(name, zoneSuffix) + + // A well encoded IPv4 or IPv6 reverse DNS name will have the correct number + // of labels to express the address + ipLabels := strings.Split(ipName, ".") + if len(ipLabels) != numRequiredLabels { + return fmt.Errorf( + "name %q has too few leading labels (%d vs %d) to be a reverse DNS entry "+ + "in the %q zone.", + name, len(ipLabels), numRequiredLabels, zoneSuffix) + } + + // Reverse the IP labels and try to parse an IP address + var ip net.IP + if ipv6 { + ip = reversedLabelsToIPv6(ipLabels) + } else { + ip = reversedLabelsToIPv4(ipLabels) + } + + // If the result isn't an IP then a warning should be generated + if ip == nil { + return fmt.Errorf( + "the first %d labels of name %q did not parse as a reversed IP address", + numRequiredLabels, name) + } + + // Otherwise return no error - checking the actual value of the IP is left to + // `lint_subject_contains_reserved_arpa_ip.go`. + return nil +} + +func init() { + RegisterLint(&Lint{ + Name: "w_subject_contains_malformed_arpa_ip", + Description: "Checks no subject domain name contains a rDNS entry in an .arpa zone with the wrong number of labels", + Citation: "BRs: 7.1.4.2.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &arpaMalformedIP{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_contains_noninformational_value.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_contains_noninformational_value.go new file mode 100644 index 00000000..b2d4d91b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_contains_noninformational_value.go @@ -0,0 +1,79 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/********************************************************************************************************************** +BRs: 7.1.4.2.2 +Other Subject Attributes +With the exception of the subject:organizationalUnitName (OU) attribute, optional attributes, when present within +the subject field, MUST contain information that has been verified by the CA. Metadata such as ‘.’, ‘-‘, and ‘ ‘ (i.e. +space) characters, and/or any other indication that the value is absent, incomplete, or not applicable, SHALL NOT +be used. +**********************************************************************************************************************/ + +import ( + "fmt" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type illegalChar struct{} + +func (l *illegalChar) Initialize() error { + return nil +} + +func (l *illegalChar) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *illegalChar) Execute(c *x509.Certificate) *LintResult { + for _, j := range c.Subject.Names { + value, ok := j.Value.(string) + if !ok { + continue + } + + if !checkAlphaNumericOrUTF8Present(value) { + return &LintResult{Status: Error, Details: fmt.Sprintf("found only metadata %s in subjectDN attribute %s", value, j.Type.String())} + } + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_subject_contains_noninformational_value", + Description: "Subject name fields must not contain '.','-',' ' or any other indication that the field has been omitted", + Citation: "BRs: 7.1.4.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &illegalChar{}, + }) +} + +// checkAlphaNumericOrUTF8Present checks if input string contains at least one occurrence of [a-Z0-9] or +// a UTF8 rune outside of ascii table +func checkAlphaNumericOrUTF8Present(input string) bool { + for _, r := range input { + if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') || (r >= '0' && r <= '9') || r > 127 { + return true + } + } + + return false +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_contains_reserved_arpa_ip.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_contains_reserved_arpa_ip.go new file mode 100644 index 00000000..fd224711 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_contains_reserved_arpa_ip.go @@ -0,0 +1,232 @@ +/* + * ZLint Copyright 2019 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package lints + +import ( + "fmt" + "net" + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +const ( + // arpaTLD holds a string constant for the .arpa TLD + arpaTLD = ".arpa" + + // rdnsIPv4Suffix is the expected suffix for IPv4 reverse DNS names as + // specified in https://tools.ietf.org/html/rfc1035#section-3.5 + rdnsIPv4Suffix = ".in-addr" + arpaTLD + // rndsIPv4Labels is the expected number of labels for an IPv4 reverse DNS + // name (not counting the rdnsIPv4Suffix labels). IPv4 addresses are four + // bytes. RFC 1035 uses one byte per label meaning there are 4 expected labels + // under the rdnsIPv4Suffix. + rdnsIPv4Labels = 4 + + // rdnsIPv6Suffix is the expected suffix for IPv6 reverse DNS names as + // specified in https://tools.ietf.org/html/rfc3596#section-2.5 + rdnsIPv6Suffix = ".ip6" + arpaTLD + // rndsIPv6Labels is the expected number of labels for an IPv6 reverse DNS + // name (not counting the rdnsIPv6Suffix labels). IPv6 addresses are 16 bytes. + // RFC 3596 Sec 2.5 uses one *nibble* per label meaning there are 16*2 + // expected labels under the rdnsIPv6Suffix. + rdnsIPv6Labels = 32 +) + +// arpaReservedIP is a linter that errors for any well formed rDNS names in the +// .in-addr.arpa or .ip6.arpa zones that specify an address in an IANA reserved +// network. +// See also: lint_subject_contains_malformed_arpa_ip.go for a lint that warns +// about malformed rDNS names in these zones. +type arpaReservedIP struct{} + +// Initialize for an arpaReservedIP linter is a NOP to statisfy linting +// interfaces. +func (l *arpaReservedIP) Initialize() error { + return nil +} + +// CheckApplies returns true if the certificate contains any names that end in +// one of the two designated zones for reverse DNS: in-addr.arpa or ip6.arpa. +func (l *arpaReservedIP) CheckApplies(c *x509.Certificate) bool { + names := append([]string{c.Subject.CommonName}, c.DNSNames...) + for _, name := range names { + name = strings.ToLower(name) + if strings.HasSuffix(name, rdnsIPv4Suffix) || + strings.HasSuffix(name, rdnsIPv6Suffix) { + return true + } + } + return false +} + +// Execute will check the given certificate to ensure that all of the DNS +// subject alternate names that specify a well formed reverse DNS name under the +// respective IPv4 or IPv6 arpa zones do not specify an IP in an IANA +// reserved IP space. An Error LintResult is returned if the name specifies an +// IP address of the wrong class, or specifies an IP address in an IANA reserved +// network. +func (l *arpaReservedIP) Execute(c *x509.Certificate) *LintResult { + for _, name := range c.DNSNames { + name = strings.ToLower(name) + var err error + if strings.HasSuffix(name, rdnsIPv4Suffix) { + // If the name has the in-addr.arpa suffix then it should be an IPv4 reverse + // DNS name. + err = lintReversedIPAddress(name, false) + } else if strings.HasSuffix(name, rdnsIPv6Suffix) { + // If the name has the ip6.arpa suffix then it should be an IPv6 reverse + // DNS name. + err = lintReversedIPAddress(name, true) + } + // Return the first error as a negative lint result + if err != nil { + return &LintResult{ + Status: Error, + Details: err.Error(), + } + } + } + + return &LintResult{ + Status: Pass, + } +} + +// reversedLabelsToIPv4 reverses the provided labels (assumed to be 4 labels, +// one per byte of the IPv6 address) and constructs an IPv4 address, returning +// the result of calling net.ParseIP for the constructed address. +func reversedLabelsToIPv4(labels []string) net.IP { + var buf strings.Builder + + // If there aren't the right number of labels, it isn't an IPv4 address. + if len(labels) != rdnsIPv4Labels { + return nil + } + + // An IPv4 address is represented as four groups of bytes separated by '.' + for i := len(labels) - 1; i >= 0; i-- { + buf.WriteString(labels[i]) + if i != 0 { + buf.WriteString(".") + } + } + return net.ParseIP(buf.String()) +} + +// reversedLabelsToIPv6 reverses the provided labels (assumed to be 32 labels, +// one per nibble of an IPv6 address) and constructs an IPv6 address, returning +// the result of calling net.ParseIP for the constructed address. +func reversedLabelsToIPv6(labels []string) net.IP { + var buf strings.Builder + + // If there aren't the right number of labels, it isn't an IPv6 address. + if len(labels) != rdnsIPv6Labels { + return nil + } + + // An IPv6 address is represented as eight groups of two bytes separated + // by `:` in hex form. Since each label in the rDNS form is one nibble we need + // four label components per IPv6 address component group. + for i := len(labels) - 1; i >= 0; i -= 4 { + buf.WriteString(labels[i]) + buf.WriteString(labels[i-1]) + buf.WriteString(labels[i-2]) + buf.WriteString(labels[i-3]) + if i > 4 { + buf.WriteString(":") + } + } + return net.ParseIP(buf.String()) +} + +// lintReversedIPAddress lints the given name as either a reversed IPv4 or IPv6 +// address under the respective ARPA zone based on the address class. An error +// is returned if: +// +// 1. The IP address labels parse as an IP of the wrong address class for the +// arpa suffix the name is using. +// 2. The IP address is within an IANA reserved range. +func lintReversedIPAddress(name string, ipv6 bool) error { + numRequiredLabels := rdnsIPv4Labels + zoneSuffix := rdnsIPv4Suffix + + if ipv6 { + numRequiredLabels = rdnsIPv6Labels + zoneSuffix = rdnsIPv6Suffix + } + + // Strip off the zone suffix to get only the reversed IP address + ipName := strings.TrimSuffix(name, zoneSuffix) + + // A well encoded IPv4 or IPv6 reverse DNS name will have the correct number + // of labels to express the address. If there isn't the right number of labels + // a separate `lint_subject_contains_malformed_arpa_ip.go` linter will flag it + // as a warning. This linter is specifically concerned with well formed rDNS + // that specifies a reserved IP. + ipLabels := strings.Split(ipName, ".") + if len(ipLabels) != numRequiredLabels { + return nil + } + + // Reverse the IP labels and try to parse an IP address + var ip net.IP + if ipv6 { + ip = reversedLabelsToIPv6(ipLabels) + } else { + ip = reversedLabelsToIPv4(ipLabels) + } + // If the result isn't an IP at all assume there is no problem - leave + // `lint_subject_contains_malformed_arpa_ip` to flag it as a warning. + if ip == nil { + return nil + } + + if !ipv6 && ip.To4() == nil { + // If we weren't expecting IPv6 and got it, that's a problem + return fmt.Errorf( + "the first %d labels of name %q parsed as a reversed IPv6 address but is "+ + "in the %q IPv4 reverse DNS zone.", + numRequiredLabels, name, rdnsIPv4Suffix) + } else if ipv6 && ip.To4() != nil { + // If we were expecting IPv6 and got an IPv4 address, that's a problem + return fmt.Errorf( + "the first %d labels of name %q parsed as a reversed IPv4 address but is "+ + "in the %q IPv4 reverse DNS zone.", + numRequiredLabels, name, rdnsIPv6Suffix) + } + + // If the IP address is in an IANA reserved space, that's a problem. + if util.IsIANAReserved(ip) { + return fmt.Errorf( + "the first %d labels of name %q parsed as a reversed IP address in "+ + "an IANA reserved IP space.", + numRequiredLabels, name) + } + + return nil +} + +func init() { + RegisterLint(&Lint{ + Name: "e_subject_contains_reserved_arpa_ip", + Description: "Checks no subject domain name contains a rDNS entry in an .arpa zone specifying a reserved IP address", + Citation: "BRs: 7.1.4.2.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &arpaReservedIP{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_contains_reserved_ip.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_contains_reserved_ip.go new file mode 100644 index 00000000..e91983d7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_contains_reserved_ip.go @@ -0,0 +1,59 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +BRs: 7.1.4.2.1 +Also as of the Effective Date, the CA SHALL NOT +issue a certificate with an Expiry Date later than +1 November 2015 with a subjectAlternativeName extension +or Subject commonName field containing a Reserved IP +Address or Internal Name. +************************************************/ + +import ( + "net" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subjectReservedIP struct{} + +func (l *subjectReservedIP) Initialize() error { + return nil +} + +func (l *subjectReservedIP) CheckApplies(c *x509.Certificate) bool { + return c.NotAfter.After(util.NoReservedIP) +} + +func (l *subjectReservedIP) Execute(c *x509.Certificate) *LintResult { + if ip := net.ParseIP(c.Subject.CommonName); ip != nil && util.IsIANAReserved(ip) { + return &LintResult{Status: Error} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_subject_contains_reserved_ip", + Description: "Certificates expiring later than 11 Jan 2015 MUST NOT contain a reserved IP address in the common name field", + Citation: "BRs: 7.1.4.2.1", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &subjectReservedIP{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_country_not_iso.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_country_not_iso.go new file mode 100644 index 00000000..001ce42a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_country_not_iso.go @@ -0,0 +1,60 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************************************************** +BRs: 7.1.4.2.2 +Certificate Field: issuer:countryName (OID 2.5.4.6) +Required/Optional: Required +Contents: This field MUST contain the two-letter ISO 3166-1 country code for the country in which the issuer’s +place of business is located. +**************************************************************************************************************/ + +import ( + "strings" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type countryNotIso struct{} + +func (l *countryNotIso) Initialize() error { + return nil +} + +func (l *countryNotIso) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *countryNotIso) Execute(c *x509.Certificate) *LintResult { + for _, j := range c.Subject.Country { + if !util.IsISOCountryCode(strings.ToUpper(j)) { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_subject_country_not_iso", + Description: "The country name field MUST contain the two-letter ISO code for the country or XX", + Citation: "BRs: 7.1.4.2.2", + Source: CABFBaselineRequirements, + EffectiveDate: util.CABEffectiveDate, + Lint: &countryNotIso{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_dn_country_not_printable_string.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_dn_country_not_printable_string.go new file mode 100644 index 00000000..5201c590 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_dn_country_not_printable_string.go @@ -0,0 +1,64 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "encoding/asn1" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type SubjectDNCountryNotPrintableString struct{} + +func (l *SubjectDNCountryNotPrintableString) Initialize() error { + return nil +} + +func (l *SubjectDNCountryNotPrintableString) CheckApplies(c *x509.Certificate) bool { + return len(c.Subject.Country) > 0 +} + +func (l *SubjectDNCountryNotPrintableString) Execute(c *x509.Certificate) *LintResult { + rdnSequence := util.RawRDNSequence{} + rest, err := asn1.Unmarshal(c.RawSubject, &rdnSequence) + if err != nil { + return &LintResult{Status: Fatal} + } + if len(rest) > 0 { + return &LintResult{Status: Fatal} + } + + for _, attrTypeAndValueSet := range rdnSequence { + for _, attrTypeAndValue := range attrTypeAndValueSet { + if attrTypeAndValue.Type.Equal(util.CountryNameOID) && attrTypeAndValue.Value.Tag != asn1.TagPrintableString { + return &LintResult{Status: Error} + } + } + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_subject_dn_country_not_printable_string", + Description: "X520 Distinguished Name Country MUST be encoded as PrintableString", + Citation: "RFC 5280: Appendix A", + Source: RFC5280, + EffectiveDate: util.ZeroDate, + Lint: &SubjectDNCountryNotPrintableString{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_dn_leading_whitespace.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_dn_leading_whitespace.go new file mode 100644 index 00000000..dea9f97b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_dn_leading_whitespace.go @@ -0,0 +1,52 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type SubjectDNLeadingSpace struct{} + +func (l *SubjectDNLeadingSpace) Initialize() error { + return nil +} + +func (l *SubjectDNLeadingSpace) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *SubjectDNLeadingSpace) Execute(c *x509.Certificate) *LintResult { + leading, _, err := util.CheckRDNSequenceWhiteSpace(c.RawSubject) + if err != nil { + return &LintResult{Status: Fatal} + } + if leading { + return &LintResult{Status: Warn} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_subject_dn_leading_whitespace", + Description: "AttributeValue in subject RelativeDistinguishedName sequence SHOULD NOT have leading whitespace", + Citation: "AWSLabs certlint", + Source: AWSLabs, + EffectiveDate: util.ZeroDate, + Lint: &SubjectDNLeadingSpace{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_dn_not_printable_characters.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_dn_not_printable_characters.go new file mode 100644 index 00000000..7d7b75af --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_dn_not_printable_characters.go @@ -0,0 +1,73 @@ +/* + * ZLint Copyright 2017 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package lints + +import ( + "encoding/asn1" + "unicode/utf8" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subjectDNNotPrintableCharacters struct{} + +func (l *subjectDNNotPrintableCharacters) Initialize() error { + return nil +} + +func (l *subjectDNNotPrintableCharacters) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *subjectDNNotPrintableCharacters) Execute(c *x509.Certificate) *LintResult { + rdnSequence := util.RawRDNSequence{} + rest, err := asn1.Unmarshal(c.RawSubject, &rdnSequence) + if err != nil { + return &LintResult{Status: Fatal} + } + if len(rest) > 0 { + return &LintResult{Status: Fatal} + } + + for _, attrTypeAndValueSet := range rdnSequence { + for _, attrTypeAndValue := range attrTypeAndValueSet { + bytes := attrTypeAndValue.Value.Bytes + for len(bytes) > 0 { + r, size := utf8.DecodeRune(bytes) + if r < 0x20 { + return &LintResult{Status: Error} + } + if r >= 0x7F && r <= 0x9F { + return &LintResult{Status: Error} + } + bytes = bytes[size:] + } + } + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_subject_dn_not_printable_characters", + Description: "X520 Subject fields MUST only contain printable control characters", + Citation: "RFC 5280: Appendix A", + Source: RFC5280, + EffectiveDate: util.ZeroDate, + Lint: &subjectDNNotPrintableCharacters{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_dn_serial_number_max_length.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_dn_serial_number_max_length.go new file mode 100644 index 00000000..d783d53f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_dn_serial_number_max_length.go @@ -0,0 +1,50 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "unicode/utf8" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type SubjectDNSerialNumberMaxLength struct{} + +func (l *SubjectDNSerialNumberMaxLength) Initialize() error { + return nil +} + +func (l *SubjectDNSerialNumberMaxLength) CheckApplies(c *x509.Certificate) bool { + return len(c.Subject.SerialNumber) > 0 +} + +func (l *SubjectDNSerialNumberMaxLength) Execute(c *x509.Certificate) *LintResult { + if utf8.RuneCountInString(c.Subject.SerialNumber) > 64 { + return &LintResult{Status: Error} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_subject_dn_serial_number_max_length", + Description: "The 'Serial Number' field of the subject MUST be less than 64 characters", + Citation: "RFC 5280: Appendix A", + Source: RFC5280, + EffectiveDate: util.ZeroDate, + Lint: &SubjectDNSerialNumberMaxLength{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_dn_serial_number_not_printable_string.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_dn_serial_number_not_printable_string.go new file mode 100644 index 00000000..8db9ed08 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_dn_serial_number_not_printable_string.go @@ -0,0 +1,64 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "encoding/asn1" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type SubjectDNSerialNumberNotPrintableString struct{} + +func (l *SubjectDNSerialNumberNotPrintableString) Initialize() error { + return nil +} + +func (l *SubjectDNSerialNumberNotPrintableString) CheckApplies(c *x509.Certificate) bool { + return len(c.Subject.SerialNumber) > 0 +} + +func (l *SubjectDNSerialNumberNotPrintableString) Execute(c *x509.Certificate) *LintResult { + rdnSequence := util.RawRDNSequence{} + rest, err := asn1.Unmarshal(c.RawSubject, &rdnSequence) + if err != nil { + return &LintResult{Status: Fatal} + } + if len(rest) > 0 { + return &LintResult{Status: Fatal} + } + + for _, attrTypeAndValueSet := range rdnSequence { + for _, attrTypeAndValue := range attrTypeAndValueSet { + if attrTypeAndValue.Type.Equal(util.SerialOID) && attrTypeAndValue.Value.Tag != asn1.TagPrintableString { + return &LintResult{Status: Error} + } + } + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_subject_dn_serial_number_not_printable_string", + Description: "X520 Distinguished Name SerialNumber MUST be encoded as PrintableString", + Citation: "RFC 5280: Appendix A", + Source: RFC5280, + EffectiveDate: util.ZeroDate, + Lint: &SubjectDNSerialNumberNotPrintableString{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_dn_trailing_whitespace.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_dn_trailing_whitespace.go new file mode 100644 index 00000000..fbb76384 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_dn_trailing_whitespace.go @@ -0,0 +1,52 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type SubjectDNTrailingSpace struct{} + +func (l *SubjectDNTrailingSpace) Initialize() error { + return nil +} + +func (l *SubjectDNTrailingSpace) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *SubjectDNTrailingSpace) Execute(c *x509.Certificate) *LintResult { + _, trailing, err := util.CheckRDNSequenceWhiteSpace(c.RawSubject) + if err != nil { + return &LintResult{Status: Fatal} + } + if trailing { + return &LintResult{Status: Warn} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "w_subject_dn_trailing_whitespace", + Description: "AttributeValue in subject RelativeDistinguishedName sequence SHOULD NOT have trailing whitespace", + Citation: "AWSLabs certlint", + Source: AWSLabs, + EffectiveDate: util.ZeroDate, + Lint: &SubjectDNTrailingSpace{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_email_max_length.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_email_max_length.go new file mode 100644 index 00000000..0c138e05 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_email_max_length.go @@ -0,0 +1,67 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +RFC 5280: A.1 + * In this Appendix, there is a list of upperbounds + for fields in a x509 Certificate. * + ub-emailaddress-length INTEGER ::= 128 + +The ASN.1 modules in Appendix A are unchanged from RFC 3280, except +that ub-emailaddress-length was changed from 128 to 255 in order to +align with PKCS #9 [RFC2985]. + +ub-emailaddress-length INTEGER ::= 255 + +************************************************/ + +import ( + "unicode/utf8" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subjectEmailMaxLength struct{} + +func (l *subjectEmailMaxLength) Initialize() error { + return nil +} + +func (l *subjectEmailMaxLength) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *subjectEmailMaxLength) Execute(c *x509.Certificate) *LintResult { + for _, j := range c.Subject.EmailAddress { + if utf8.RuneCountInString(j) > 255 { + return &LintResult{Status: Error} + } + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_subject_email_max_length", + Description: "The 'Email' field of the subject MUST be less than 255 characters", + Citation: "RFC 5280: A.1", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &subjectEmailMaxLength{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_empty_without_san.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_empty_without_san.go new file mode 100644 index 00000000..4bc30ad4 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_empty_without_san.go @@ -0,0 +1,69 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************* +RFC 5280: 4.2 & 4.2.1.6 +Further, if the only subject identity included in the certificate is +an alternative name form (e.g., an electronic mail address), then the +subject distinguished name MUST be empty (an empty sequence), and the +subjectAltName extension MUST be present. If the subject field +contains an empty sequence, then the issuing CA MUST include a +subjectAltName extension that is marked as critical. When including +the subjectAltName extension in a certificate that has a non-empty +subject distinguished name, conforming CAs SHOULD mark the +subjectAltName extension as non-critical. +*************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type emptyWithoutSAN struct{} + +func (l *emptyWithoutSAN) Initialize() error { + return nil +} + +func (l *emptyWithoutSAN) CheckApplies(cert *x509.Certificate) bool { + return true +} + +func (l *emptyWithoutSAN) Execute(cert *x509.Certificate) *LintResult { + if subjectIsEmpty(cert) && !util.IsExtInCert(cert, util.SubjectAlternateNameOID) { + return &LintResult{Status: Error} + } else { + return &LintResult{Status: Pass} + } +} + +func subjectIsEmpty(cert *x509.Certificate) bool { + if cert.Subject.Names == nil { + return true + } + return false +} + +func init() { + RegisterLint(&Lint{ + Name: "e_subject_empty_without_san", + Description: "CAs MUST support subject alternative name if the subject field is an empty sequence", + Citation: "RFC 5280: 4.2 & 4.2.1.6", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &emptyWithoutSAN{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_given_name_max_length.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_given_name_max_length.go new file mode 100644 index 00000000..fcfcfcc9 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_given_name_max_length.go @@ -0,0 +1,61 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +RFC 5280: A.1 + * In this Appendix, there is a list of upperbounds + for fields in a x509 Certificate. * + ub-given-name-length INTEGER ::= 16 + +************************************************/ + +import ( + "unicode/utf8" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subjectGivenNameMaxLength struct{} + +func (l *subjectGivenNameMaxLength) Initialize() error { + return nil +} + +func (l *subjectGivenNameMaxLength) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *subjectGivenNameMaxLength) Execute(c *x509.Certificate) *LintResult { + for _, j := range c.Subject.GivenName { + if utf8.RuneCountInString(j) > 16 { + return &LintResult{Status: Error} + } + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_subject_given_name_max_length", + Description: "The 'GivenName' field of the subject MUST be less than 17 characters", + Citation: "RFC 5280: A.1", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &subjectGivenNameMaxLength{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_info_access_marked_critical.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_info_access_marked_critical.go new file mode 100644 index 00000000..784aec3b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_info_access_marked_critical.go @@ -0,0 +1,53 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +The subject information access extension indicates how to access information and services for the subject of the certificate in which the extension appears. When the subject is a CA, information and services may include certificate validation services and CA policy data. When the subject is an end entity, the information describes the type of services offered and how to access them. In this case, the contents of this extension are defined in the protocol specifications for the supported services. This extension may be included in end entity or CA certificates. Conforming CAs MUST mark this extension as non-critical. +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type siaCrit struct{} + +func (l *siaCrit) Initialize() error { + return nil +} + +func (l *siaCrit) CheckApplies(c *x509.Certificate) bool { + return util.IsExtInCert(c, util.SubjectInfoAccessOID) +} + +func (l *siaCrit) Execute(c *x509.Certificate) *LintResult { + sia := util.GetExtFromCert(c, util.SubjectInfoAccessOID) + if sia.Critical { + return &LintResult{Status: Error} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_subject_info_access_marked_critical", + Description: "Conforming CAs MUST mark the Subject Info Access extension as non-critical", + Citation: "RFC 5280: 4.2.2.2", + Source: RFC5280, + EffectiveDate: util.RFC3280Date, + Lint: &siaCrit{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_locality_name_max_length.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_locality_name_max_length.go new file mode 100644 index 00000000..e9fd9c4a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_locality_name_max_length.go @@ -0,0 +1,60 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +RFC 5280: A.1 + * In this Appendix, there is a list of upperbounds + for fields in a x509 Certificate. * + ub-locality-name INTEGER ::= 128 +************************************************/ + +import ( + "unicode/utf8" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subjectLocalityNameMaxLength struct{} + +func (l *subjectLocalityNameMaxLength) Initialize() error { + return nil +} + +func (l *subjectLocalityNameMaxLength) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *subjectLocalityNameMaxLength) Execute(c *x509.Certificate) *LintResult { + for _, j := range c.Subject.Locality { + if utf8.RuneCountInString(j) > 128 { + return &LintResult{Status: Error} + } + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_subject_locality_name_max_length", + Description: "The 'Locality Name' field of the subject MUST be less than 128 characters", + Citation: "RFC 5280: A.1", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &subjectLocalityNameMaxLength{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_multiple_rdn.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_multiple_rdn.go new file mode 100644 index 00000000..bf02aba8 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_multiple_rdn.go @@ -0,0 +1,57 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "encoding/asn1" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zcrypto/x509/pkix" + "github.com/zmap/zlint/util" +) + +type SubjectRDNHasMultipleAttribute struct{} + +func (l *SubjectRDNHasMultipleAttribute) Initialize() error { + return nil +} + +func (l *SubjectRDNHasMultipleAttribute) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *SubjectRDNHasMultipleAttribute) Execute(c *x509.Certificate) *LintResult { + var subject pkix.RDNSequence + if _, err := asn1.Unmarshal(c.RawSubject, &subject); err != nil { + return &LintResult{Status: Fatal} + } + for _, rdn := range subject { + if len(rdn) > 1 { + return &LintResult{Status: Notice} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "n_multiple_subject_rdn", + Description: "Certificates typically do not have have multiple attributes in a single RDN (subject). This may be an error.", + Citation: "AWSLabs certlint", + Source: AWSLabs, + EffectiveDate: util.ZeroDate, + Lint: &SubjectRDNHasMultipleAttribute{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_not_dn.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_not_dn.go new file mode 100644 index 00000000..e775a4fa --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_not_dn.go @@ -0,0 +1,60 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************* + RFC 5280: 4.1.2.6 + Where it is non-empty, the subject field MUST contain an X.500 + distinguished name (DN). The DN MUST be unique for each subject + entity certified by the one CA as defined by the issuer name field. A + CA may issue more than one certificate with the same DN to the same + subject entity. +*************************************************************************/ + +import ( + "reflect" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zcrypto/x509/pkix" + "github.com/zmap/zlint/util" +) + +type subjectDN struct{} + +func (l *subjectDN) Initialize() error { + return nil +} + +func (l *subjectDN) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *subjectDN) Execute(c *x509.Certificate) *LintResult { + if reflect.TypeOf(c.Subject) != reflect.TypeOf(*(new(pkix.Name))) { + return &LintResult{Status: Error} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_subject_not_dn", + Description: "When not empty, the subject field MUST be a distinguished name", + Citation: "RFC 5280: 4.1.2.6", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &subjectDN{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_organization_name_max_length.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_organization_name_max_length.go new file mode 100644 index 00000000..b6cb2e2a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_organization_name_max_length.go @@ -0,0 +1,60 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +RFC 5280: A.1 + * In this Appendix, there is a list of upperbounds + for fields in a x509 Certificate. * + ub-organization-name INTEGER ::= 64 +************************************************/ + +import ( + "unicode/utf8" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subjectOrganizationNameMaxLength struct{} + +func (l *subjectOrganizationNameMaxLength) Initialize() error { + return nil +} + +func (l *subjectOrganizationNameMaxLength) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *subjectOrganizationNameMaxLength) Execute(c *x509.Certificate) *LintResult { + for _, j := range c.Subject.Organization { + if utf8.RuneCountInString(j) > 64 { + return &LintResult{Status: Error} + } + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_subject_organization_name_max_length", + Description: "The 'Organization Name' field of the subject MUST be less than 64 characters", + Citation: "RFC 5280: A.1", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &subjectOrganizationNameMaxLength{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_organizational_unit_name_max_length.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_organizational_unit_name_max_length.go new file mode 100644 index 00000000..fd4bc0f1 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_organizational_unit_name_max_length.go @@ -0,0 +1,60 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +RFC 5280: A.1 + * In this Appendix, there is a list of upperbounds + for fields in a x509 Certificate. * + ub-organizational-unit-name INTEGER ::= 64 +************************************************/ + +import ( + "unicode/utf8" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subjectOrganizationalUnitNameMaxLength struct{} + +func (l *subjectOrganizationalUnitNameMaxLength) Initialize() error { + return nil +} + +func (l *subjectOrganizationalUnitNameMaxLength) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *subjectOrganizationalUnitNameMaxLength) Execute(c *x509.Certificate) *LintResult { + for _, j := range c.Subject.OrganizationalUnit { + if utf8.RuneCountInString(j) > 64 { + return &LintResult{Status: Error} + } + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_subject_organizational_unit_name_max_length", + Description: "The 'Organizational Unit Name' field of the subject MUST be less than 64 characters", + Citation: "RFC 5280: A.1", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &subjectOrganizationalUnitNameMaxLength{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_postal_code_max_length.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_postal_code_max_length.go new file mode 100644 index 00000000..749ccc4d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_postal_code_max_length.go @@ -0,0 +1,61 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +RFC 5280: A.1 + * In this Appendix, there is a list of upperbounds + for fields in a x509 Certificate. * + ub-postal-code-length INTEGER ::= 16 + +************************************************/ + +import ( + "unicode/utf8" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subjectPostalCodeMaxLength struct{} + +func (l *subjectPostalCodeMaxLength) Initialize() error { + return nil +} + +func (l *subjectPostalCodeMaxLength) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *subjectPostalCodeMaxLength) Execute(c *x509.Certificate) *LintResult { + for _, j := range c.Subject.PostalCode { + if utf8.RuneCountInString(j) > 16 { + return &LintResult{Status: Error} + } + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_subject_postal_code_max_length", + Description: "The 'PostalCode' field of the subject MUST be less than 17 characters", + Citation: "RFC 5280: A.1", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &subjectPostalCodeMaxLength{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_printable_string_badalpha.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_printable_string_badalpha.go new file mode 100644 index 00000000..764e6950 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_printable_string_badalpha.go @@ -0,0 +1,108 @@ +/* + * ZLint Copyright 2019 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package lints + +import ( + "encoding/asn1" + "errors" + "fmt" + "regexp" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +var ( + // Per RFC 5280, Appendix B. ASN.1 Notes: + // The character string type PrintableString supports a very basic Latin + // character set: the lowercase letters 'a' through 'z', uppercase + // letters 'A' through 'Z', the digits '0' through '9', eleven special + // characters ' = ( ) + , - . / : ? and space. + printableStringRegex = regexp.MustCompile(`^[a-zA-Z0-9\=\(\)\+,\-.\/:\? ']+$`) +) + +// validatePrintableString returns an error if the provided encoded printable +// string doesn't adhere to the character set defined in RFC 5280. +func validatePrintableString(rawPS []byte) error { + if !printableStringRegex.Match(rawPS) { + return errors.New("encoded PrintableString contained illegal characters") + } + return nil +} + +type subjectPrintableStringBadAlpha struct { +} + +func (l *subjectPrintableStringBadAlpha) Initialize() error { + return nil +} + +// CheckApplies returns true for any certificate with a non-empty RawSubject. +func (l *subjectPrintableStringBadAlpha) CheckApplies(c *x509.Certificate) bool { + return len(c.RawSubject) > 0 +} + +// Execute checks the certificate's RawSubject to ensure that any +// PrintableString attribute/value pairs in the Subject match the character set +// defined for this type in RFC 5280. An Error level LintResult is returned if any +// of the PrintableString attributes do not match a regular expression for the +// allowed character set. +func (l *subjectPrintableStringBadAlpha) Execute(c *x509.Certificate) *LintResult { + rdnSequence := util.RawRDNSequence{} + rest, err := asn1.Unmarshal(c.RawSubject, &rdnSequence) + if err != nil { + return &LintResult{ + Status: Fatal, + Details: "Failed to Unmarshal RawSubject into RawRDNSequence", + } + } + if len(rest) > 0 { + return &LintResult{ + Status: Fatal, + Details: "Trailing data after RawSubject RawRDNSequence", + } + } + + for _, attrTypeAndValueSet := range rdnSequence { + for _, attrTypeAndValue := range attrTypeAndValueSet { + // If the attribute type is a PrintableString the bytes of the attribute + // value must match the printable string alphabet. + if attrTypeAndValue.Value.Tag == asn1.TagPrintableString { + if err := validatePrintableString(attrTypeAndValue.Value.Bytes); err != nil { + return &LintResult{ + Status: Error, + Details: fmt.Sprintf("RawSubject attr oid %s %s", + attrTypeAndValue.Type, err.Error()), + } + } + } + } + } + + return &LintResult{ + Status: Pass, + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_subject_printable_string_badalpha", + Description: "PrintableString type's alphabet only includes a-z, A-Z, 0-9, and 11 special characters", + Citation: "RFC 5280: Appendix B. ASN.1 Notes", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &subjectPrintableStringBadAlpha{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_state_name_max_length.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_state_name_max_length.go new file mode 100644 index 00000000..fa841e81 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_state_name_max_length.go @@ -0,0 +1,60 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +RFC 5280: A.1 + * In this Appendix, there is a list of upperbounds + for fields in a x509 Certificate. * + ub-state-name INTEGER ::= 128 +************************************************/ + +import ( + "unicode/utf8" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subjectStateNameMaxLength struct{} + +func (l *subjectStateNameMaxLength) Initialize() error { + return nil +} + +func (l *subjectStateNameMaxLength) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *subjectStateNameMaxLength) Execute(c *x509.Certificate) *LintResult { + for _, j := range c.Subject.Province { + if utf8.RuneCountInString(j) > 128 { + return &LintResult{Status: Error} + } + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_subject_state_name_max_length", + Description: "The 'State Name' field of the subject MUST be less than 128 characters", + Citation: "RFC 5280: A.1", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &subjectStateNameMaxLength{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_street_address_max_length.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_street_address_max_length.go new file mode 100644 index 00000000..463fb23c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_street_address_max_length.go @@ -0,0 +1,59 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +ITU-T X.520 (02/2001) UpperBounds +ub-street-address INTEGER ::= 128 + +************************************************/ + +import ( + "unicode/utf8" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subjectStreetAddressMaxLength struct{} + +func (l *subjectStreetAddressMaxLength) Initialize() error { + return nil +} + +func (l *subjectStreetAddressMaxLength) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *subjectStreetAddressMaxLength) Execute(c *x509.Certificate) *LintResult { + for _, j := range c.Subject.StreetAddress { + if utf8.RuneCountInString(j) > 128 { + return &LintResult{Status: Error} + } + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_subject_street_address_max_length", + Description: "The 'StreetAddress' field of the subject MUST be less than 129 characters", + Citation: "ITU-T X.520 (02/2001) UpperBounds", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &subjectStreetAddressMaxLength{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_surname_max_length.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_surname_max_length.go new file mode 100644 index 00000000..d0593f5d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_subject_surname_max_length.go @@ -0,0 +1,61 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +RFC 5280: A.1 + * In this Appendix, there is a list of upperbounds + for fields in a x509 Certificate. * + ub-surname-length INTEGER ::= 40 + +************************************************/ + +import ( + "unicode/utf8" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type subjectSurnameMaxLength struct{} + +func (l *subjectSurnameMaxLength) Initialize() error { + return nil +} + +func (l *subjectSurnameMaxLength) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *subjectSurnameMaxLength) Execute(c *x509.Certificate) *LintResult { + for _, j := range c.Subject.Surname { + if utf8.RuneCountInString(j) > 40 { + return &LintResult{Status: Error} + } + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_subject_surname_max_length", + Description: "The 'Surname' field of the subject MUST be less than 41 characters", + Citation: "RFC 5280: A.1", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &subjectSurnameMaxLength{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_tbs_signature_rsa_encryption_parameter_not_null.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_tbs_signature_rsa_encryption_parameter_not_null.go new file mode 100644 index 00000000..f29dae38 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_tbs_signature_rsa_encryption_parameter_not_null.go @@ -0,0 +1,81 @@ +package lints + +/* + * ZLint Copyright 2019 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/******************************************************************************************************* +"RFC5280: RFC 4055, Section 5" +RSA: Encoded algorithm identifier MUST have NULL parameters. +*******************************************************************************************************/ + +import ( + "fmt" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" + "golang.org/x/crypto/cryptobyte" + cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" +) + +type rsaTBSSignatureEncryptionParamNotNULL struct{} + +func (l *rsaTBSSignatureEncryptionParamNotNULL) Initialize() error { + return nil +} + +func (l *rsaTBSSignatureEncryptionParamNotNULL) CheckApplies(c *x509.Certificate) bool { + _, ok := util.RSAAlgorithmIDToDER[c.SignatureAlgorithmOID.String()] + return ok +} + +func (l *rsaTBSSignatureEncryptionParamNotNULL) Execute(c *x509.Certificate) *LintResult { + input := cryptobyte.String(c.RawTBSCertificate) + + var tbsCert cryptobyte.String + if !input.ReadASN1(&tbsCert, cryptobyte_asn1.SEQUENCE) { + return &LintResult{Status: Fatal, Details: "error reading tbsCertificate"} + } + + if !tbsCert.SkipOptionalASN1(cryptobyte_asn1.Tag(0).Constructed().ContextSpecific()) { + return &LintResult{Status: Fatal, Details: "error reading tbsCertificate.version"} + } + + if !tbsCert.SkipASN1(cryptobyte_asn1.INTEGER) { + return &LintResult{Status: Fatal, Details: "error reading tbsCertificate.serialNumber"} + } + + var signatureAlgoID cryptobyte.String + var tag cryptobyte_asn1.Tag + // use ReadAnyElement to preserve tag and length octets + if !tbsCert.ReadAnyASN1Element(&signatureAlgoID, &tag) { + return &LintResult{Status: Fatal, Details: "error reading tbsCertificate.signature"} + } + + if err := util.CheckAlgorithmIDParamNotNULL(signatureAlgoID, c.SignatureAlgorithmOID); err != nil { + return &LintResult{Status: Error, Details: fmt.Sprintf("certificate tbsCertificate.signature %s", err.Error())} + } + + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_tbs_signature_rsa_encryption_parameter_not_null", + Description: "RSA: Encoded signature algorithm identifier MUST have NULL parameters", + Citation: "RFC 4055, Section 5", + Source: RFC5280, // RFC4055 is referenced in RFC5280, Section 1 + EffectiveDate: util.RFC5280Date, + Lint: &rsaTBSSignatureEncryptionParamNotNULL{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_utc_time_does_not_include_seconds.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_utc_time_does_not_include_seconds.go new file mode 100644 index 00000000..49ed6371 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_utc_time_does_not_include_seconds.go @@ -0,0 +1,82 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************************************ +4.1.2.5.1. UTCTime +The universal time type, UTCTime, is a standard ASN.1 type intended +for representation of dates and time. UTCTime specifies the year +through the two low-order digits and time is specified to the +precision of one minute or one second. UTCTime includes either Z +(for Zulu, or Greenwich Mean Time) or a time differential. +For the purposes of this profile, UTCTime values MUST be expressed in +Greenwich Mean Time (Zulu) and MUST include seconds (i.e., times are +YYMMDDHHMMSSZ), even where the number of seconds is zero. Conforming +systems MUST interpret the year field (YY) as follows: + + Where YY is greater than or equal to 50, the year SHALL be + interpreted as 19YY; and + + Where YY is less than 50, the year SHALL be interpreted as 20YY. +************************************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type utcNoSecond struct { +} + +func (l *utcNoSecond) Initialize() error { + return nil +} + +func (l *utcNoSecond) CheckApplies(c *x509.Certificate) bool { + firstDate, secondDate := util.GetTimes(c) + beforeTag, afterTag := util.FindTimeType(firstDate, secondDate) + date1Utc := beforeTag == 23 + date2Utc := afterTag == 23 + return date1Utc || date2Utc +} + +func (l *utcNoSecond) Execute(c *x509.Certificate) *LintResult { + date1, date2 := util.GetTimes(c) + beforeTag, afterTag := util.FindTimeType(date1, date2) + date1Utc := beforeTag == 23 + date2Utc := afterTag == 23 + if date1Utc { + if len(date1.Bytes) != 13 && len(date1.Bytes) != 17 { + return &LintResult{Status: Error} + } + } + if date2Utc { + if len(date2.Bytes) != 13 && len(date2.Bytes) != 17 { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_utc_time_does_not_include_seconds", + Description: "UTCTime values MUST include seconds", + Citation: "RFC 5280: 4.1.2.5.1", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &utcNoSecond{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_utc_time_not_in_zulu.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_utc_time_not_in_zulu.go new file mode 100644 index 00000000..43d6b993 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_utc_time_not_in_zulu.go @@ -0,0 +1,97 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/*********************************************************************** +4.1.2.5.1. UTCTime + The universal time type, UTCTime, is a standard ASN.1 type intended + for representation of dates and time. UTCTime specifies the year + through the two low-order digits and time is specified to the + precision of one minute or one second. UTCTime includes either Z + (for Zulu, or Greenwich Mean Time) or a time differential. + + For the purposes of this profile, UTCTime values MUST be expressed in + Greenwich Mean Time (Zulu) and MUST include seconds (i.e., times are + YYMMDDHHMMSSZ), even where the number of seconds is zero. Conforming + systems MUST interpret the year field (YY) as follows: + + Where YY is greater than or equal to 50, the year SHALL be + interpreted as 19YY; and + + Where YY is less than 50, the year SHALL be interpreted as 20YY. +***********************************************************************/ + +import ( + "time" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type utcTimeGMT struct { +} + +func (l *utcTimeGMT) Initialize() error { + return nil +} + +func (l *utcTimeGMT) CheckApplies(c *x509.Certificate) bool { + firstDate, secondDate := util.GetTimes(c) + beforeTag, afterTag := util.FindTimeType(firstDate, secondDate) + date1Utc := beforeTag == 23 + date2Utc := afterTag == 23 + return date1Utc || date2Utc +} + +func (l *utcTimeGMT) Execute(c *x509.Certificate) *LintResult { + var r LintStatus + firstDate, secondDate := util.GetTimes(c) + beforeTag, afterTag := util.FindTimeType(firstDate, secondDate) + date1Utc := beforeTag == 23 + date2Utc := afterTag == 23 + if date1Utc { + // UTC Tests on notBefore + utcNotGmt(c.NotBefore, &r) + } + if date2Utc { + // UTC Tests on NotAfter + utcNotGmt(c.NotAfter, &r) + } + return &LintResult{Status: r} +} + +func utcNotGmt(t time.Time, r *LintStatus) { + // If we already ran this test and it resulted in error, don't want to discard that + // And now we use the afterBool to make sure we test the right time + if *r == Error { + return + } + if t.Location() != time.UTC { + *r = Error + } else { + *r = Pass + } +} + +func init() { + RegisterLint(&Lint{ + Name: "e_utc_time_not_in_zulu", + Description: "UTCTime values MUST be expressed in Greenwich Mean Time (Zulu)", + Citation: "RFC 5280: 4.1.2.5.1", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &utcTimeGMT{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_validity_time_not_positive.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_validity_time_not_positive.go new file mode 100644 index 00000000..b41de4ef --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_validity_time_not_positive.go @@ -0,0 +1,52 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/************************************************ +Change this to match source TEXT +************************************************/ + +import ( + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type validityNegative struct{} + +func (l *validityNegative) Initialize() error { + return nil +} + +func (l *validityNegative) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *validityNegative) Execute(c *x509.Certificate) *LintResult { + if c.NotBefore.After(c.NotAfter) { + return &LintResult{Status: Error} + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_validity_time_not_positive", + Description: "Certificates MUST have a positive time for which they are valid", + Citation: "AWSLabs certlint", + Source: AWSLabs, + EffectiveDate: util.ZeroDate, + Lint: &validityNegative{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_wrong_time_format_pre2050.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_wrong_time_format_pre2050.go new file mode 100644 index 00000000..b56d33ea --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/lint_wrong_time_format_pre2050.go @@ -0,0 +1,85 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +/********************************************************************* +CAs conforming to this profile MUST always encode certificate +validity dates through the year 2049 as UTCTime; certificate validity +dates in 2050 or later MUST be encoded as GeneralizedTime. +Conforming applications MUST be able to process validity dates that +are encoded in either UTCTime or GeneralizedTime. +*********************************************************************/ + +import ( + "encoding/asn1" + "time" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/util" +) + +type generalizedPre2050 struct{} + +func (l *generalizedPre2050) Initialize() error { + return nil +} + +func (l *generalizedPre2050) CheckApplies(c *x509.Certificate) bool { + return true +} + +func (l *generalizedPre2050) Execute(c *x509.Certificate) *LintResult { + date1, date2 := util.GetTimes(c) + var t time.Time + type1, type2 := util.FindTimeType(date1, date2) + if type1 == 24 { + temp, err := asn1.Marshal(date1) + if err != nil { + return &LintResult{Status: Fatal} + } + _, err = asn1.Unmarshal(temp, &t) + if err != nil { + return &LintResult{Status: Fatal} + } + if t.Before(util.GeneralizedDate) { + return &LintResult{Status: Error} + } + } + if type2 == 24 { + temp, err := asn1.Marshal(date2) + if err != nil { + return &LintResult{Status: Fatal} + } + _, err = asn1.Unmarshal(temp, &t) + if err != nil { + return &LintResult{Status: Fatal} + } + if t.Before(util.GeneralizedDate) { + return &LintResult{Status: Error} + } + } + return &LintResult{Status: Pass} +} + +func init() { + RegisterLint(&Lint{ + Name: "e_wrong_time_format_pre2050", + Description: "Certificates valid through the year 2049 MUST be encoded in UTC time", + Citation: "RFC 5280: 4.1.2.5", + Source: RFC5280, + EffectiveDate: util.RFC2459Date, + Lint: &generalizedPre2050{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/result.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/result.go new file mode 100644 index 00000000..6ea447c3 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/result.go @@ -0,0 +1,106 @@ +package lints + +/* + * ZLint Copyright 2017 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +import ( + "encoding/json" + "fmt" + "strings" +) + +// LintStatus is an enum returned by lints inside of a LintResult. +type LintStatus int + +// Known LintStatus values +const ( + // Unused / unset LintStatus + Reserved LintStatus = 0 + + // Not Applicable + NA LintStatus = 1 + + // Not Effective + NE LintStatus = 2 + + Pass LintStatus = 3 + Notice LintStatus = 4 + Warn LintStatus = 5 + Error LintStatus = 6 + Fatal LintStatus = 7 +) + +var ( + // statusLabelToLintStatus is used to work backwards from + // a LintStatus.String() to the LintStatus. This is used by + // LintStatus.Unmarshal. + statusLabelToLintStatus = map[string]LintStatus{ + Reserved.String(): Reserved, + NA.String(): NA, + NE.String(): NE, + Pass.String(): Pass, + Notice.String(): Notice, + Warn.String(): Warn, + Error.String(): Error, + Fatal.String(): Fatal, + } +) + +// LintResult contains a LintStatus, and an optional human-readable description. +// The output of a lint is a LintResult. +type LintResult struct { + Status LintStatus `json:"result"` + Details string `json:"details,omitempty"` +} + +// MarshalJSON implements the json.Marshaler interface. +func (e LintStatus) MarshalJSON() ([]byte, error) { + s := e.String() + return json.Marshal(s) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (e *LintStatus) UnmarshalJSON(data []byte) error { + key := strings.ReplaceAll(string(data), `"`, "") + if status, ok := statusLabelToLintStatus[key]; ok { + *e = status + } else { + return fmt.Errorf("bad LintStatus JSON value: %s", string(data)) + } + return nil +} + +// String returns the canonical representation of a LintStatus as a string. +func (e LintStatus) String() string { + switch e { + case Reserved: + return "reserved" + case NA: + return "NA" + case NE: + return "NE" + case Pass: + return "pass" + case Notice: + return "info" + case Warn: + return "warn" + case Error: + return "error" + case Fatal: + return "fatal" + default: + return "" + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/testingUtil.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/testingUtil.go new file mode 100644 index 00000000..27da0bcb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/lints/testingUtil.go @@ -0,0 +1,51 @@ +package lints + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// Contains resources necessary to the Unit Test Cases + +import ( + "encoding/pem" + "fmt" + "io/ioutil" + "strings" + + "github.com/zmap/zcrypto/x509" +) + +func ReadCertificate(inPath string) *x509.Certificate { + // All of this can be encapsulated in a function + data, err := ioutil.ReadFile(inPath) + if err != nil { + //read failure, die horribly here + fmt.Println(err) + panic("File read failed!") + } + var textData string = string(data) + if strings.Contains(textData, "-BEGIN CERTIFICATE-") { + block, _ := pem.Decode(data) + if block == nil { + panic("PEM decode failed!") + } + data = block.Bytes + } + theCert, err := x509.ParseCertificate(data) + if err != nil { + //die horribly here + fmt.Println(err) + return nil + } + return theCert +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/makefile b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/makefile new file mode 100644 index 00000000..0267ff76 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/makefile @@ -0,0 +1,26 @@ +SHELL := /bin/bash + +CMDS = zlint zlint-gtld-update +CMD_PREFIX = ./cmd/ +GO_ENV = GO111MODULE="on" GOFLAGS="-mod=vendor" +BUILD = $(GO_ENV) go build +TEST = $(GO_ENV) GORACE=halt_on_error=1 go test -race + +all: $(CMDS) + +zlint: + $(BUILD) $(CMD_PREFIX)$(@) + +zlint-gtld-update: + $(BUILD) $(CMD_PREFIX)$(@) + +clean: + rm -f $(CMDS) + +test: + $(TEST) ./... + +format-check: + diff <(find . -name '*.go' -not -path './vendor/*' -print | xargs -n1 gofmt -l) <(printf "") + +.PHONY: clean zlint zlint-gtld-update test format-check diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/newLint.sh b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/newLint.sh new file mode 100755 index 00000000..5580d4f6 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/newLint.sh @@ -0,0 +1,34 @@ +# Script to create new lint from template + +USAGE="Usage: $0 + +ARG1: File_name/TestName (no 'lint_' prefix) +ARG2: Struct_name" + +if [ $# -eq 0 ]; then + echo "No arguments provided..." + echo "$USAGE" + exit 1 +fi + +if [ $# -eq 1 ]; then + echo "Not enough arguments provided..." + echo "$USAGE" + exit 1 +fi + +if [ -e lint_$1.go ] +then + echo "File already exists. Can't make new file." + exit 1 +fi + +FILENAME=$1 +TESTNAME=$2 + +cp template lints/lint_$FILENAME.go + +cat "lints/lint_$FILENAME.go" | sed "s/SUBST/$2/g" | sed "s/SUBTEST/$1/g" > temp.go +mv -f temp.go "lints/lint_$FILENAME.go" + +echo "Created file lint_$FILENAME.go with test name $TESTNAME" diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/template b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/template new file mode 100644 index 00000000..007f2f43 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/template @@ -0,0 +1,44 @@ +/* + * ZLint Copyright 2019 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package lints + +import ( + "github.com/zmap/zcrypto/x509" +) + +type SUBST struct{} + +func (l *SUBST) Initialize() error { + return nil +} + +func (l *SUBST) CheckApplies(c *x509.Certificate) bool { + // Add conditions for application here +} + +func (l *SUBST) Execute(c *x509.Certificate) *LintResult { + // Add actual lint here +} + +func init() { + RegisterLint(&Lint{ + Name: "SUBTEST", + Description: "Fill this in...", + Citation: "Fill this in...", + Source: UnknownLintSource, + EffectiveDate: "Change this...", + Lint: &SUBST{}, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/algorithm_identifier.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/algorithm_identifier.go new file mode 100644 index 00000000..921b2867 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/algorithm_identifier.go @@ -0,0 +1,86 @@ +package util + +import ( + "bytes" + "encoding/asn1" + "errors" + "fmt" + + "golang.org/x/crypto/cryptobyte" + cryptobyte_asn1 "golang.org/x/crypto/cryptobyte/asn1" +) + +// RSAAlgorithmIDToDER contains DER representations of pkix.AlgorithmIdentifier for different RSA OIDs with Parameters as asn1.NULL +var RSAAlgorithmIDToDER = map[string][]byte{ + // rsaEncryption + "1.2.840.113549.1.1.1": {0x30, 0x0d, 0x6, 0x9, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0xd, 0x1, 0x1, 0x1, 0x5, 0x0}, + // md2WithRSAEncryption + "1.2.840.113549.1.1.2": {0x30, 0x0d, 0x6, 0x9, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0xd, 0x1, 0x1, 0x2, 0x5, 0x0}, + // md5WithRSAEncryption + "1.2.840.113549.1.1.4": {0x30, 0x0d, 0x6, 0x9, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0xd, 0x1, 0x1, 0x4, 0x5, 0x0}, + // sha-1WithRSAEncryption + "1.2.840.113549.1.1.5": {0x30, 0x0d, 0x6, 0x9, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0xd, 0x1, 0x1, 0x5, 0x5, 0x0}, + // sha224WithRSAEncryption + "1.2.840.113549.1.1.14": {0x30, 0x0d, 0x6, 0x9, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0xd, 0x1, 0x1, 0xe, 0x5, 0x0}, + // sha256WithRSAEncryption + "1.2.840.113549.1.1.11": {0x30, 0x0d, 0x6, 0x9, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0xd, 0x1, 0x1, 0xb, 0x5, 0x0}, + // sha384WithRSAEncryption + "1.2.840.113549.1.1.12": {0x30, 0x0d, 0x6, 0x9, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0xd, 0x1, 0x1, 0xc, 0x5, 0x0}, + // sha512WithRSAEncryption + "1.2.840.113549.1.1.13": {0x30, 0x0d, 0x6, 0x9, 0x2a, 0x86, 0x48, 0x86, 0xf7, 0xd, 0x1, 0x1, 0xd, 0x5, 0x0}, +} + +// CheckAlgorithmIDParamNotNULL parses an AlgorithmIdentifier with algorithm OID rsaEncryption to check the Param field is asn1.NULL +// Expects DER-encoded AlgorithmIdentifier including tag and length +func CheckAlgorithmIDParamNotNULL(algorithmIdentifier []byte, requiredAlgoID asn1.ObjectIdentifier) error { + expectedAlgoIDBytes, ok := RSAAlgorithmIDToDER[requiredAlgoID.String()] + if !ok { + return errors.New("error algorithmID to check is not RSA") + } + + algorithmSequence := cryptobyte.String(algorithmIdentifier) + + // byte comparison of algorithm sequence and checking no trailing data is present + var algorithmBytes []byte + if algorithmSequence.ReadBytes(&algorithmBytes, len(expectedAlgoIDBytes)) { + if bytes.Compare(algorithmBytes, expectedAlgoIDBytes) == 0 && algorithmSequence.Empty() { + return nil + } + } + + // re-parse to get an error message detailing what did not match in the byte comparison + algorithmSequence = cryptobyte.String(algorithmIdentifier) + var algorithm cryptobyte.String + if !algorithmSequence.ReadASN1(&algorithm, cryptobyte_asn1.SEQUENCE) { + return errors.New("error reading algorithm") + } + + encryptionOID := asn1.ObjectIdentifier{} + if !algorithm.ReadASN1ObjectIdentifier(&encryptionOID) { + return errors.New("error reading algorithm OID") + } + + if !encryptionOID.Equal(requiredAlgoID) { + return fmt.Errorf("algorithm OID is not equal to %s", requiredAlgoID.String()) + } + + if algorithm.Empty() { + return errors.New("RSA algorithm identifier missing required NULL parameter") + } + + var nullValue cryptobyte.String + if !algorithm.ReadASN1(&nullValue, cryptobyte_asn1.NULL) { + return errors.New("RSA algorithm identifier with non-NULL parameter") + } + + if len(nullValue) != 0 { + return errors.New("RSA algorithm identifier with NULL parameter containing data") + } + + // ensure algorithm is empty and no trailing data is present + if !algorithm.Empty() { + return errors.New("RSA algorithm identifier with trailing data") + } + + return errors.New("RSA algorithm appears correct, but didn't match byte-wise comparison") +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/ca.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/ca.go new file mode 100644 index 00000000..5a29640e --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/ca.go @@ -0,0 +1,57 @@ +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package util + +import ( + "github.com/zmap/zcrypto/x509" +) + +// IsCACert returns true if c has IsCA set. +func IsCACert(c *x509.Certificate) bool { + return c.IsCA +} + +// IsRootCA returns true if c has IsCA set and is also self-signed. +func IsRootCA(c *x509.Certificate) bool { + return IsCACert(c) && IsSelfSigned(c) +} + +// IsSubCA returns true if c has IsCA set, but is not self-signed. +func IsSubCA(c *x509.Certificate) bool { + return IsCACert(c) && !IsSelfSigned(c) +} + +// IsSelfSigned returns true if SelfSigned is set. +func IsSelfSigned(c *x509.Certificate) bool { + return c.SelfSigned +} + +// IsSubscriberCert returns true for if a certificate is not a CA and not +// self-signed. +func IsSubscriberCert(c *x509.Certificate) bool { + return !IsCACert(c) && !IsSelfSigned(c) +} + +func IsServerAuthCert(cert *x509.Certificate) bool { + if len(cert.ExtKeyUsage) == 0 { + return true + } + for _, eku := range cert.ExtKeyUsage { + if eku == x509.ExtKeyUsageAny || eku == x509.ExtKeyUsageServerAuth { + return true + } + } + return false +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/countries.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/countries.go new file mode 100644 index 00000000..fcc826ce --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/countries.go @@ -0,0 +1,51 @@ +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package util + +import "strings" + +var countries = map[string]bool{ + "AD": true, "AE": true, "AF": true, "AG": true, "AI": true, "AL": true, "AM": true, "AN": true, "AO": true, "AQ": true, "AR": true, + "AS": true, "AT": true, "AU": true, "AW": true, "AX": true, "AZ": true, "BA": true, "BB": true, "BD": true, "BE": true, "BF": true, "BG": true, + "BH": true, "BI": true, "BJ": true, "BL": true, "BM": true, "BN": true, "BO": true, "BQ": true, "BR": true, "BS": true, "BT": true, "BV": true, + "BW": true, "BY": true, "BZ": true, "CA": true, "CC": true, "CD": true, "CF": true, "CG": true, "CH": true, "CI": true, "CK": true, "CL": true, + "CM": true, "CN": true, "CO": true, "CR": true, "CU": true, "CV": true, "CW": true, "CX": true, "CY": true, "CZ": true, "DE": true, "DJ": true, + "DK": true, "DM": true, "DO": true, "DZ": true, "EC": true, "EE": true, "EG": true, "EH": true, "ER": true, "ES": true, "ET": true, "FI": true, + "FJ": true, "FK": true, "FM": true, "FO": true, "FR": true, "GA": true, "GB": true, "GD": true, "GE": true, "GF": true, "GG": true, "GH": true, + "GI": true, "GL": true, "GM": true, "GN": true, "GP": true, "GQ": true, "GR": true, "GS": true, "GT": true, "GU": true, "GW": true, "GY": true, + "HK": true, "HM": true, "HN": true, "HR": true, "HT": true, "HU": true, "ID": true, "IE": true, "IL": true, "IM": true, "IN": true, "IO": true, + "IQ": true, "IR": true, "IS": true, "IT": true, "JE": true, "JM": true, "JO": true, "JP": true, "KE": true, "KG": true, "KH": true, "KI": true, + "KM": true, "KN": true, "KP": true, "KR": true, "KW": true, "KY": true, "KZ": true, "LA": true, "LB": true, "LC": true, "LI": true, "LK": true, + "LR": true, "LS": true, "LT": true, "LU": true, "LV": true, "LY": true, "MA": true, "MC": true, "MD": true, "ME": true, "MF": true, "MG": true, + "MH": true, "MK": true, "ML": true, "MM": true, "MN": true, "MO": true, "MP": true, "MQ": true, "MR": true, "MS": true, "MT": true, "MU": true, + "MV": true, "MW": true, "MX": true, "MY": true, "MZ": true, "NA": true, "NC": true, "NE": true, "NF": true, "NG": true, "NI": true, "NL": true, + "NO": true, "NP": true, "NR": true, "NU": true, "NZ": true, "OM": true, "PA": true, "PE": true, "PF": true, "PG": true, "PH": true, "PK": true, + "PL": true, "PM": true, "PN": true, "PR": true, "PS": true, "PT": true, "PW": true, "PY": true, "QA": true, "RE": true, "RO": true, "RS": true, + "RU": true, "RW": true, "SA": true, "SB": true, "SC": true, "SD": true, "SE": true, "SG": true, "SH": true, "SI": true, "SJ": true, "SK": true, + "SL": true, "SM": true, "SN": true, "SO": true, "SR": true, "SS": true, "ST": true, "SV": true, "SX": true, "SY": true, "SZ": true, "TC": true, + "TD": true, "TF": true, "TG": true, "TH": true, "TJ": true, "TK": true, "TL": true, "TM": true, "TN": true, "TO": true, "TR": true, "TT": true, + "TV": true, "TW": true, "TZ": true, "UA": true, "UG": true, "UM": true, "US": true, "UY": true, "UZ": true, "VA": true, "VC": true, "VE": true, + "VG": true, "VI": true, "VN": true, "VU": true, "WF": true, "WS": true, "YE": true, "YT": true, "ZA": true, "ZM": true, "ZW": true, "XX": true, +} + +// IsISOCountryCode returns true if the input is a known two-letter country +// code. +// +// TODO: Document where the list of known countries came from. +func IsISOCountryCode(in string) bool { + in = strings.ToUpper(in) + _, ok := countries[in] + return ok +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/encodings.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/encodings.go new file mode 100644 index 00000000..92a30d26 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/encodings.go @@ -0,0 +1,136 @@ +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package util + +import ( + "bytes" + "encoding/asn1" + "errors" + "regexp" + "strings" + "unicode" + "unicode/utf16" + + "github.com/zmap/zcrypto/x509/pkix" +) + +// CheckRDNSequenceWhiteSpace returns true if there is leading or trailing +// whitespace in any name attribute in the sequence, respectively. +func CheckRDNSequenceWhiteSpace(raw []byte) (leading, trailing bool, err error) { + var seq pkix.RDNSequence + if _, err = asn1.Unmarshal(raw, &seq); err != nil { + return + } + for _, rdn := range seq { + for _, atv := range rdn { + if !IsNameAttribute(atv.Type) { + continue + } + value, ok := atv.Value.(string) + if !ok { + continue + } + if leftStrip := strings.TrimLeftFunc(value, unicode.IsSpace); leftStrip != value { + leading = true + } + if rightStrip := strings.TrimRightFunc(value, unicode.IsSpace); rightStrip != value { + trailing = true + } + } + } + return +} + +// IsIA5String returns true if raw is an IA5String, and returns false otherwise. +func IsIA5String(raw []byte) bool { + for _, b := range raw { + i := int(b) + if i > 127 || i < 0 { + return false + } + } + return true +} + +func IsInPrefSyn(name string) bool { + // If the DNS name is just a space, it is valid + if name == " " { + return true + } + // This is the expression that matches the ABNF syntax from RFC 1034: Sec 3.5, specifically for subdomain since the " " case for domain is covered above + prefsyn := regexp.MustCompile(`^([[:alpha:]]{1}(([[:alnum:]]|[-])*[[:alnum:]]{1})*){1}([.][[:alpha:]]{1}(([[:alnum:]]|[-])*[[:alnum:]]{1})*)*$`) + return prefsyn.MatchString(name) +} + +// AllAlternateNameWithTagAreIA5 returns true if all sequence members with the +// given tag are encoded as IA5 strings, and false otherwise. If it encounters +// errors parsing asn1, err will be non-nil. +func AllAlternateNameWithTagAreIA5(ext *pkix.Extension, tag int) (bool, error) { + var seq asn1.RawValue + var err error + // Unmarshal the extension as a sequence + if _, err = asn1.Unmarshal(ext.Value, &seq); err != nil { + return false, err + } + // Ensure the sequence matches what we expect for SAN/IAN + if !seq.IsCompound || seq.Tag != asn1.TagSequence || seq.Class != asn1.ClassUniversal { + err = asn1.StructuralError{Msg: "bad alternate name sequence"} + return false, err + } + + // Iterate over the sequence and look for items tagged with tag + rest := seq.Bytes + for len(rest) > 0 { + var v asn1.RawValue + rest, err = asn1.Unmarshal(rest, &v) + if err != nil { + return false, err + } + if v.Tag == tag { + if !IsIA5String(v.Bytes) { + return false, nil + } + } + } + + return true, nil +} + +// IsEmptyASN1Sequence returns true if +// *input is an empty sequence (0x30, 0x00) or +// *len(inout) < 2 +// This check covers more cases than just empty sequence checks but it makes sense from the usage perspective +var emptyASN1Sequence = []byte{0x30, 0x00} + +func IsEmptyASN1Sequence(input []byte) bool { + return len(input) < 2 || bytes.Equal(input, emptyASN1Sequence) +} + +// ParseBMPString returns a uint16 encoded string following the specification for a BMPString type +func ParseBMPString(bmpString []byte) (string, error) { + if len(bmpString)%2 != 0 { + return "", errors.New("odd-length BMP string") + } + // strip terminator if present + if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 { + bmpString = bmpString[:l-2] + } + s := make([]uint16, 0, len(bmpString)/2) + for len(bmpString) > 0 { + s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1])) + bmpString = bmpString[2:] + } + return string(utf16.Decode(s)), nil +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/ev.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/ev.go new file mode 100644 index 00000000..f9b44024 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/ev.go @@ -0,0 +1,71 @@ +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package util + +import ( + "encoding/asn1" +) + +var evoids = map[string]bool{ + "1.3.159.1.17.1": true, + "1.3.6.1.4.1.34697.2.1": true, + "1.3.6.1.4.1.34697.2.2": true, + "1.3.6.1.4.1.34697.2.3": true, + "1.3.6.1.4.1.34697.2.4": true, + "1.2.40.0.17.1.22": true, + "2.16.578.1.26.1.3.3": true, + "1.3.6.1.4.1.17326.10.14.2.1.2": true, + "1.3.6.1.4.1.17326.10.8.2.1.2": true, + "1.3.6.1.4.1.6449.1.2.1.5.1": true, + "2.16.840.1.114412.2.1": true, + "2.16.840.1.114412.1.3.0.2": true, + "2.16.528.1.1001.1.1.1.12.6.1.1.1": true, + "2.16.792.3.0.4.1.1.4": true, + "2.16.840.1.114028.10.1.2": true, + "0.4.0.2042.1.4": true, + "0.4.0.2042.1.5": true, + "1.3.6.1.4.1.13177.10.1.3.10": true, + "1.3.6.1.4.1.14370.1.6": true, + "1.3.6.1.4.1.4146.1.1": true, + "2.16.840.1.114413.1.7.23.3": true, + "1.3.6.1.4.1.14777.6.1.1": true, + "2.16.792.1.2.1.1.5.7.1.9": true, + "1.3.6.1.4.1.782.1.2.1.8.1": true, + "1.3.6.1.4.1.22234.2.5.2.3.1": true, + "1.3.6.1.4.1.8024.0.2.100.1.2": true, + "1.2.392.200091.100.721.1": true, + "2.16.840.1.114414.1.7.23.3": true, + "1.3.6.1.4.1.23223.2": true, + "1.3.6.1.4.1.23223.1.1.1": true, + "2.16.756.1.83.21.0": true, + "2.16.756.1.89.1.2.1.1": true, + "1.3.6.1.4.1.7879.13.24.1": true, + "2.16.840.1.113733.1.7.48.1": true, + "2.16.840.1.114404.1.1.2.4.1": true, + "2.16.840.1.113733.1.7.23.6": true, + "1.3.6.1.4.1.6334.1.100.1": true, + "2.16.840.1.114171.500.9": true, + "1.3.6.1.4.1.36305.2": true, +} + +// IsEV returns true if the input is a known Extended Validation OID. +func IsEV(in []asn1.ObjectIdentifier) bool { + for _, oid := range in { + if _, ok := evoids[oid.String()]; ok { + return true + } + } + return false +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/fqdn.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/fqdn.go new file mode 100644 index 00000000..d9cdf97a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/fqdn.go @@ -0,0 +1,127 @@ +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package util + +import ( + "net" + "net/url" + "strings" + + "github.com/weppos/publicsuffix-go/publicsuffix" + zcutil "github.com/zmap/zcrypto/util" + "github.com/zmap/zcrypto/x509" +) + +func RemovePrependedQuestionMarks(domain string) string { + for strings.HasPrefix(domain, "?.") { + domain = domain[2:] + } + return domain +} + +func RemovePrependedWildcard(domain string) string { + if strings.HasPrefix(domain, "*.") { + domain = domain[2:] + } + return domain +} + +func IsFQDN(domain string) bool { + domain = RemovePrependedWildcard(domain) + domain = RemovePrependedQuestionMarks(domain) + return zcutil.IsURL(domain) +} + +func GetAuthority(uri string) string { + parsed, err := url.Parse(uri) + if err != nil { + return "" + } + if parsed.Opaque != "" { + // non-empty Opaque means that there is no authority + return "" + } + if len(uri) < 4 { + return "" + } + // https://tools.ietf.org/html/rfc3986#section-3 + // The only time an authority is present is if there is a // after the scheme. + firstColon := strings.Index(uri, ":") + postScheme := uri[firstColon+1:] + // After the scheme, there is the hier-part, optionally followed by a query or fragment. + if !strings.HasPrefix(postScheme, "//") { + // authority is always prefixed by // + return "" + } + for i := 2; i < len(postScheme); i++ { + // in the hier-part, the authority is followed by either an absolute path, or the empty string. + // So, the authority is terminated by the start of an absolute path (/), the start of a fragment (#) or the start of a query(?) + if postScheme[i] == '/' || postScheme[i] == '#' || postScheme[i] == '?' { + return postScheme[2:i] + } + } + // Found no absolute path, fragment or query -- so the authority is the only data after the scheme:// + return postScheme[2:] +} + +func GetHost(auth string) string { + begin := strings.Index(auth, "@") + if begin == len(auth)-1 { + begin = -1 + } + end := strings.Index(auth, ":") + if end == -1 { + end = len(auth) + } + if end < begin { + return "" + } + return auth[begin+1 : end] +} + +func AuthIsFQDNOrIP(auth string) bool { + return IsFQDNOrIP(GetHost(auth)) +} + +func IsFQDNOrIP(host string) bool { + if IsFQDN(host) { + return true + } + if net.ParseIP(host) != nil { + return true + } + return false +} + +func DNSNamesExist(cert *x509.Certificate) bool { + if cert.Subject.CommonName == "" && len(cert.DNSNames) == 0 { + return false + } else { + return true + } +} + +func ICANNPublicSuffixParse(domain string) (*publicsuffix.DomainName, error) { + return publicsuffix.ParseFromListWithOptions(publicsuffix.DefaultList, domain, &publicsuffix.FindOptions{IgnorePrivate: true, DefaultRule: publicsuffix.DefaultRule}) +} + +func CommonNameIsIP(cert *x509.Certificate) bool { + ip := net.ParseIP(cert.Subject.CommonName) + if ip == nil { + return false + } else { + return true + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/gtld.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/gtld.go new file mode 100644 index 00000000..a5482c54 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/gtld.go @@ -0,0 +1,122 @@ +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package util + +import ( + "fmt" + "strings" + "time" + + "github.com/zmap/zcrypto/x509" +) + +// This package uses the `zlint-gtld-update` command to generate a `tldMap` map. +//go:generate zlint-gtld-update ./gtld_map.go + +const ( + GTLDPeriodDateFormat = "2006-01-02" +) + +// GTLDPeriod is a struct representing a gTLD's validity period. The field names +// are chosen to match the data returned by the ICANN gTLD v2 JSON registry[0]. +// See the `zlint-gtld-update` command for more information. +// [0] - https://www.icann.org/resources/registries/gtlds/v2/gtlds.json +type GTLDPeriod struct { + // GTLD is the GTLD the period corresponds to. It is used only for friendly + // error messages from `Valid` + GTLD string + // DelegationDate is the date at which ICANN delegated the gTLD into existence + // from the root DNS, or is empty if the gTLD was never delegated. + DelegationDate string + // RemovalDate is the date at which ICANN removed the gTLD delegation from the + // root DNS, or is empty if the gTLD is still delegated and has not been + // removed. + RemovalDate string +} + +// Valid determines if the provided `when` time is within the GTLDPeriod for the +// gTLD. E.g. whether a certificate issued at `when` with a subject identifier +// using the specified gTLD can be considered a valid use of the gTLD. +func (p GTLDPeriod) Valid(when time.Time) error { + // NOTE: We can throw away the errors from time.Parse in this function because + // the zlint-gtld-update command only writes entries to the generated gTLD map + // after the dates have been verified as parseable + notBefore, _ := time.Parse(GTLDPeriodDateFormat, p.DelegationDate) + if when.Before(notBefore) { + return fmt.Errorf(`gTLD ".%s" is not valid until %s`, + p.GTLD, p.DelegationDate) + } + // The removal date may be empty. We only need to check `when` against the + // removal when it isn't empty + if p.RemovalDate != "" { + notAfter, _ := time.Parse(GTLDPeriodDateFormat, p.RemovalDate) + if when.After(notAfter) { + return fmt.Errorf(`gTLD ".%s" is not valid after %s`, + p.GTLD, p.RemovalDate) + } + } + return nil +} + +// HasValidTLD checks that a domain ends in a valid TLD that was delegated in +// the root DNS at the time specified. +func HasValidTLD(domain string, when time.Time) bool { + labels := strings.Split(strings.ToLower(domain), ".") + rightLabel := labels[len(labels)-1] + // if the rightmost label is not present in the tldMap, it isn't valid and + // never was. + if tldPeriod, present := tldMap[rightLabel]; !present { + return false + } else if tldPeriod.Valid(when) != nil { + // If the TLD exists but the date is outside of the gTLD's validity period + // then it is not a valid TLD. + return false + } + // Otherwise the TLD exists, and was a valid TLD delegated in the root DNS + // at the time of the given date. + return true +} + +// IsInTLDMap checks that a label is present in the TLD map. It does not +// consider the TLD's validity period and whether the TLD may have been removed, +// only whether it was ever a TLD that was delegated. +func IsInTLDMap(label string) bool { + label = strings.ToLower(label) + if _, ok := tldMap[label]; ok { + return true + } else { + return false + } +} + +// CertificateSubjContainsTLD checks whether the provided Certificate has +// a Subject Common Name or DNS Subject Alternate Name that ends in the provided +// TLD label. If IsInTLDMap(label) returns false then CertificateSubjInTLD will +// return false. +func CertificateSubjInTLD(c *x509.Certificate, label string) bool { + label = strings.ToLower(label) + if strings.HasPrefix(label, ".") { + label = label[1:] + } + if !IsInTLDMap(label) { + return false + } + for _, name := range append(c.DNSNames, c.Subject.CommonName) { + if strings.HasSuffix(name, "."+label) { + return true + } + } + return false +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/gtld_map.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/gtld_map.go new file mode 100644 index 00000000..abe33e38 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/gtld_map.go @@ -0,0 +1,7835 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by zlint-gtld-update. + +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package util + +var tldMap = map[string]GTLDPeriod{ + "aaa": { + GTLD: "aaa", + DelegationDate: "2015-08-28", + RemovalDate: "", + }, + "aarp": { + GTLD: "aarp", + DelegationDate: "2015-11-03", + RemovalDate: "", + }, + "abarth": { + GTLD: "abarth", + DelegationDate: "2016-08-04", + RemovalDate: "", + }, + "abb": { + GTLD: "abb", + DelegationDate: "2015-04-25", + RemovalDate: "", + }, + "abbott": { + GTLD: "abbott", + DelegationDate: "2015-03-07", + RemovalDate: "", + }, + "abbvie": { + GTLD: "abbvie", + DelegationDate: "2016-04-06", + RemovalDate: "", + }, + "abc": { + GTLD: "abc", + DelegationDate: "2016-07-28", + RemovalDate: "", + }, + "able": { + GTLD: "able", + DelegationDate: "2016-06-21", + RemovalDate: "", + }, + "abogado": { + GTLD: "abogado", + DelegationDate: "2014-10-15", + RemovalDate: "", + }, + "abudhabi": { + GTLD: "abudhabi", + DelegationDate: "2016-04-06", + RemovalDate: "", + }, + "ac": { + GTLD: "ac", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "academy": { + GTLD: "academy", + DelegationDate: "2013-12-17", + RemovalDate: "", + }, + "accenture": { + GTLD: "accenture", + DelegationDate: "2015-05-09", + RemovalDate: "", + }, + "accountant": { + GTLD: "accountant", + DelegationDate: "2015-03-25", + RemovalDate: "", + }, + "accountants": { + GTLD: "accountants", + DelegationDate: "2014-05-07", + RemovalDate: "", + }, + "aco": { + GTLD: "aco", + DelegationDate: "2015-08-27", + RemovalDate: "", + }, + "active": { + GTLD: "active", + DelegationDate: "2014-06-26", + RemovalDate: "2019-02-17", + }, + "actor": { + GTLD: "actor", + DelegationDate: "2014-02-26", + RemovalDate: "", + }, + "ad": { + GTLD: "ad", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "adac": { + GTLD: "adac", + DelegationDate: "2016-01-26", + RemovalDate: "", + }, + "ads": { + GTLD: "ads", + DelegationDate: "2015-03-24", + RemovalDate: "", + }, + "adult": { + GTLD: "adult", + DelegationDate: "2014-12-06", + RemovalDate: "", + }, + "ae": { + GTLD: "ae", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "aeg": { + GTLD: "aeg", + DelegationDate: "2015-06-20", + RemovalDate: "", + }, + "aero": { + GTLD: "aero", + DelegationDate: "2002-03-02", + RemovalDate: "", + }, + "aetna": { + GTLD: "aetna", + DelegationDate: "2016-05-20", + RemovalDate: "", + }, + "af": { + GTLD: "af", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "afamilycompany": { + GTLD: "afamilycompany", + DelegationDate: "2016-07-31", + RemovalDate: "", + }, + "afl": { + GTLD: "afl", + DelegationDate: "2015-03-28", + RemovalDate: "", + }, + "africa": { + GTLD: "africa", + DelegationDate: "2017-02-15", + RemovalDate: "", + }, + "ag": { + GTLD: "ag", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "agakhan": { + GTLD: "agakhan", + DelegationDate: "2016-04-16", + RemovalDate: "", + }, + "agency": { + GTLD: "agency", + DelegationDate: "2014-01-14", + RemovalDate: "", + }, + "ai": { + GTLD: "ai", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "aig": { + GTLD: "aig", + DelegationDate: "2015-05-02", + RemovalDate: "", + }, + "aigo": { + GTLD: "aigo", + DelegationDate: "2016-08-16", + RemovalDate: "", + }, + "airbus": { + GTLD: "airbus", + DelegationDate: "2016-06-10", + RemovalDate: "", + }, + "airforce": { + GTLD: "airforce", + DelegationDate: "2014-04-30", + RemovalDate: "", + }, + "airtel": { + GTLD: "airtel", + DelegationDate: "2015-07-08", + RemovalDate: "", + }, + "akdn": { + GTLD: "akdn", + DelegationDate: "2016-04-16", + RemovalDate: "", + }, + "al": { + GTLD: "al", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "alfaromeo": { + GTLD: "alfaromeo", + DelegationDate: "2016-08-02", + RemovalDate: "", + }, + "alibaba": { + GTLD: "alibaba", + DelegationDate: "2016-01-16", + RemovalDate: "", + }, + "alipay": { + GTLD: "alipay", + DelegationDate: "2016-01-16", + RemovalDate: "", + }, + "allfinanz": { + GTLD: "allfinanz", + DelegationDate: "2014-10-01", + RemovalDate: "", + }, + "allstate": { + GTLD: "allstate", + DelegationDate: "2016-07-14", + RemovalDate: "", + }, + "ally": { + GTLD: "ally", + DelegationDate: "2016-03-24", + RemovalDate: "", + }, + "alsace": { + GTLD: "alsace", + DelegationDate: "2014-10-04", + RemovalDate: "", + }, + "alstom": { + GTLD: "alstom", + DelegationDate: "2016-06-10", + RemovalDate: "", + }, + "am": { + GTLD: "am", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "americanexpress": { + GTLD: "americanexpress", + DelegationDate: "2016-08-08", + RemovalDate: "", + }, + "americanfamily": { + GTLD: "americanfamily", + DelegationDate: "2016-07-26", + RemovalDate: "", + }, + "amex": { + GTLD: "amex", + DelegationDate: "2016-08-08", + RemovalDate: "", + }, + "amfam": { + GTLD: "amfam", + DelegationDate: "2016-07-23", + RemovalDate: "", + }, + "amica": { + GTLD: "amica", + DelegationDate: "2015-08-29", + RemovalDate: "", + }, + "amsterdam": { + GTLD: "amsterdam", + DelegationDate: "2014-12-25", + RemovalDate: "", + }, + "analytics": { + GTLD: "analytics", + DelegationDate: "2015-12-21", + RemovalDate: "", + }, + "android": { + GTLD: "android", + DelegationDate: "2014-11-12", + RemovalDate: "", + }, + "anquan": { + GTLD: "anquan", + DelegationDate: "2016-03-30", + RemovalDate: "", + }, + "anz": { + GTLD: "anz", + DelegationDate: "2016-06-21", + RemovalDate: "", + }, + "ao": { + GTLD: "ao", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "aol": { + GTLD: "aol", + DelegationDate: "2016-11-04", + RemovalDate: "", + }, + "apartments": { + GTLD: "apartments", + DelegationDate: "2015-02-10", + RemovalDate: "", + }, + "app": { + GTLD: "app", + DelegationDate: "2015-07-02", + RemovalDate: "", + }, + "apple": { + GTLD: "apple", + DelegationDate: "2015-11-03", + RemovalDate: "", + }, + "aq": { + GTLD: "aq", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "aquarelle": { + GTLD: "aquarelle", + DelegationDate: "2014-12-02", + RemovalDate: "", + }, + "ar": { + GTLD: "ar", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "arab": { + GTLD: "arab", + DelegationDate: "2017-05-23", + RemovalDate: "", + }, + "aramco": { + GTLD: "aramco", + DelegationDate: "2015-10-15", + RemovalDate: "", + }, + "archi": { + GTLD: "archi", + DelegationDate: "2014-03-31", + RemovalDate: "", + }, + "army": { + GTLD: "army", + DelegationDate: "2014-06-04", + RemovalDate: "", + }, + "arpa": { + GTLD: "arpa", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "art": { + GTLD: "art", + DelegationDate: "2016-06-23", + RemovalDate: "", + }, + "arte": { + GTLD: "arte", + DelegationDate: "2015-10-20", + RemovalDate: "", + }, + "as": { + GTLD: "as", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "asda": { + GTLD: "asda", + DelegationDate: "2016-08-14", + RemovalDate: "", + }, + "asia": { + GTLD: "asia", + DelegationDate: "2007-05-02", + RemovalDate: "", + }, + "associates": { + GTLD: "associates", + DelegationDate: "2014-04-11", + RemovalDate: "", + }, + "at": { + GTLD: "at", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "athleta": { + GTLD: "athleta", + DelegationDate: "2016-08-04", + RemovalDate: "", + }, + "attorney": { + GTLD: "attorney", + DelegationDate: "2014-05-31", + RemovalDate: "", + }, + "au": { + GTLD: "au", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "auction": { + GTLD: "auction", + DelegationDate: "2014-07-18", + RemovalDate: "", + }, + "audi": { + GTLD: "audi", + DelegationDate: "2015-11-25", + RemovalDate: "", + }, + "audible": { + GTLD: "audible", + DelegationDate: "2016-06-07", + RemovalDate: "", + }, + "audio": { + GTLD: "audio", + DelegationDate: "2014-05-15", + RemovalDate: "", + }, + "auspost": { + GTLD: "auspost", + DelegationDate: "2016-08-17", + RemovalDate: "", + }, + "author": { + GTLD: "author", + DelegationDate: "2015-12-05", + RemovalDate: "", + }, + "auto": { + GTLD: "auto", + DelegationDate: "2015-05-02", + RemovalDate: "", + }, + "autos": { + GTLD: "autos", + DelegationDate: "2014-05-22", + RemovalDate: "", + }, + "avianca": { + GTLD: "avianca", + DelegationDate: "2016-03-09", + RemovalDate: "", + }, + "aw": { + GTLD: "aw", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "aws": { + GTLD: "aws", + DelegationDate: "2016-03-25", + RemovalDate: "", + }, + "ax": { + GTLD: "ax", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "axa": { + GTLD: "axa", + DelegationDate: "2014-03-19", + RemovalDate: "", + }, + "az": { + GTLD: "az", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "azure": { + GTLD: "azure", + DelegationDate: "2015-06-06", + RemovalDate: "", + }, + "ba": { + GTLD: "ba", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "baby": { + GTLD: "baby", + DelegationDate: "2016-04-08", + RemovalDate: "", + }, + "baidu": { + GTLD: "baidu", + DelegationDate: "2016-01-05", + RemovalDate: "", + }, + "banamex": { + GTLD: "banamex", + DelegationDate: "2016-07-28", + RemovalDate: "", + }, + "bananarepublic": { + GTLD: "bananarepublic", + DelegationDate: "2016-08-04", + RemovalDate: "", + }, + "band": { + GTLD: "band", + DelegationDate: "2014-10-15", + RemovalDate: "", + }, + "bank": { + GTLD: "bank", + DelegationDate: "2015-01-09", + RemovalDate: "", + }, + "bar": { + GTLD: "bar", + DelegationDate: "2014-02-27", + RemovalDate: "", + }, + "barcelona": { + GTLD: "barcelona", + DelegationDate: "2015-07-08", + RemovalDate: "", + }, + "barclaycard": { + GTLD: "barclaycard", + DelegationDate: "2015-01-24", + RemovalDate: "", + }, + "barclays": { + GTLD: "barclays", + DelegationDate: "2015-01-24", + RemovalDate: "", + }, + "barefoot": { + GTLD: "barefoot", + DelegationDate: "2016-03-24", + RemovalDate: "", + }, + "bargains": { + GTLD: "bargains", + DelegationDate: "2014-01-23", + RemovalDate: "", + }, + "baseball": { + GTLD: "baseball", + DelegationDate: "2016-10-30", + RemovalDate: "", + }, + "basketball": { + GTLD: "basketball", + DelegationDate: "2016-10-19", + RemovalDate: "", + }, + "bauhaus": { + GTLD: "bauhaus", + DelegationDate: "2015-04-05", + RemovalDate: "", + }, + "bayern": { + GTLD: "bayern", + DelegationDate: "2014-05-03", + RemovalDate: "", + }, + "bb": { + GTLD: "bb", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "bbc": { + GTLD: "bbc", + DelegationDate: "2015-03-21", + RemovalDate: "", + }, + "bbt": { + GTLD: "bbt", + DelegationDate: "2016-07-15", + RemovalDate: "", + }, + "bbva": { + GTLD: "bbva", + DelegationDate: "2015-05-27", + RemovalDate: "", + }, + "bcg": { + GTLD: "bcg", + DelegationDate: "2016-03-09", + RemovalDate: "", + }, + "bcn": { + GTLD: "bcn", + DelegationDate: "2015-07-08", + RemovalDate: "", + }, + "bd": { + GTLD: "bd", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "be": { + GTLD: "be", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "beats": { + GTLD: "beats", + DelegationDate: "2015-11-03", + RemovalDate: "", + }, + "beauty": { + GTLD: "beauty", + DelegationDate: "2016-07-15", + RemovalDate: "", + }, + "beer": { + GTLD: "beer", + DelegationDate: "2014-05-15", + RemovalDate: "", + }, + "bentley": { + GTLD: "bentley", + DelegationDate: "2015-07-09", + RemovalDate: "", + }, + "berlin": { + GTLD: "berlin", + DelegationDate: "2014-01-08", + RemovalDate: "", + }, + "best": { + GTLD: "best", + DelegationDate: "2014-02-27", + RemovalDate: "", + }, + "bestbuy": { + GTLD: "bestbuy", + DelegationDate: "2016-07-19", + RemovalDate: "", + }, + "bet": { + GTLD: "bet", + DelegationDate: "2015-07-24", + RemovalDate: "", + }, + "bf": { + GTLD: "bf", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "bg": { + GTLD: "bg", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "bh": { + GTLD: "bh", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "bharti": { + GTLD: "bharti", + DelegationDate: "2015-06-14", + RemovalDate: "", + }, + "bi": { + GTLD: "bi", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "bible": { + GTLD: "bible", + DelegationDate: "2015-06-02", + RemovalDate: "", + }, + "bid": { + GTLD: "bid", + DelegationDate: "2014-03-02", + RemovalDate: "", + }, + "bike": { + GTLD: "bike", + DelegationDate: "2013-11-14", + RemovalDate: "", + }, + "bing": { + GTLD: "bing", + DelegationDate: "2015-06-10", + RemovalDate: "", + }, + "bingo": { + GTLD: "bingo", + DelegationDate: "2015-02-04", + RemovalDate: "", + }, + "bio": { + GTLD: "bio", + DelegationDate: "2014-06-02", + RemovalDate: "", + }, + "biz": { + GTLD: "biz", + DelegationDate: "2001-09-25", + RemovalDate: "", + }, + "bj": { + GTLD: "bj", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "black": { + GTLD: "black", + DelegationDate: "2014-03-27", + RemovalDate: "", + }, + "blackfriday": { + GTLD: "blackfriday", + DelegationDate: "2014-04-22", + RemovalDate: "", + }, + "blanco": { + GTLD: "blanco", + DelegationDate: "2016-06-21", + RemovalDate: "2019-02-13", + }, + "blockbuster": { + GTLD: "blockbuster", + DelegationDate: "2016-08-04", + RemovalDate: "", + }, + "blog": { + GTLD: "blog", + DelegationDate: "2016-05-18", + RemovalDate: "", + }, + "bloomberg": { + GTLD: "bloomberg", + DelegationDate: "2014-11-05", + RemovalDate: "", + }, + "blue": { + GTLD: "blue", + DelegationDate: "2014-02-05", + RemovalDate: "", + }, + "bm": { + GTLD: "bm", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "bms": { + GTLD: "bms", + DelegationDate: "2015-09-22", + RemovalDate: "", + }, + "bmw": { + GTLD: "bmw", + DelegationDate: "2014-06-21", + RemovalDate: "", + }, + "bn": { + GTLD: "bn", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "bnl": { + GTLD: "bnl", + DelegationDate: "2015-06-26", + RemovalDate: "2019-07-30", + }, + "bnpparibas": { + GTLD: "bnpparibas", + DelegationDate: "2014-08-14", + RemovalDate: "", + }, + "bo": { + GTLD: "bo", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "boats": { + GTLD: "boats", + DelegationDate: "2015-02-25", + RemovalDate: "", + }, + "boehringer": { + GTLD: "boehringer", + DelegationDate: "2015-11-25", + RemovalDate: "", + }, + "bofa": { + GTLD: "bofa", + DelegationDate: "2016-08-02", + RemovalDate: "", + }, + "bom": { + GTLD: "bom", + DelegationDate: "2015-09-26", + RemovalDate: "", + }, + "bond": { + GTLD: "bond", + DelegationDate: "2015-03-27", + RemovalDate: "", + }, + "boo": { + GTLD: "boo", + DelegationDate: "2014-08-30", + RemovalDate: "", + }, + "book": { + GTLD: "book", + DelegationDate: "2015-12-05", + RemovalDate: "", + }, + "booking": { + GTLD: "booking", + DelegationDate: "2016-07-23", + RemovalDate: "", + }, + "boots": { + GTLD: "boots", + DelegationDate: "2015-08-05", + RemovalDate: "2018-04-06", + }, + "bosch": { + GTLD: "bosch", + DelegationDate: "2015-12-24", + RemovalDate: "", + }, + "bostik": { + GTLD: "bostik", + DelegationDate: "2015-11-25", + RemovalDate: "", + }, + "boston": { + GTLD: "boston", + DelegationDate: "2016-11-29", + RemovalDate: "", + }, + "bot": { + GTLD: "bot", + DelegationDate: "2015-12-05", + RemovalDate: "", + }, + "boutique": { + GTLD: "boutique", + DelegationDate: "2014-01-23", + RemovalDate: "", + }, + "box": { + GTLD: "box", + DelegationDate: "2016-11-11", + RemovalDate: "", + }, + "br": { + GTLD: "br", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "bradesco": { + GTLD: "bradesco", + DelegationDate: "2015-06-26", + RemovalDate: "", + }, + "bridgestone": { + GTLD: "bridgestone", + DelegationDate: "2015-05-01", + RemovalDate: "", + }, + "broadway": { + GTLD: "broadway", + DelegationDate: "2015-11-18", + RemovalDate: "", + }, + "broker": { + GTLD: "broker", + DelegationDate: "2015-04-29", + RemovalDate: "", + }, + "brother": { + GTLD: "brother", + DelegationDate: "2015-05-12", + RemovalDate: "", + }, + "brussels": { + GTLD: "brussels", + DelegationDate: "2014-06-18", + RemovalDate: "", + }, + "bs": { + GTLD: "bs", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "bt": { + GTLD: "bt", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "budapest": { + GTLD: "budapest", + DelegationDate: "2014-09-23", + RemovalDate: "", + }, + "bugatti": { + GTLD: "bugatti", + DelegationDate: "2015-11-25", + RemovalDate: "", + }, + "build": { + GTLD: "build", + DelegationDate: "2014-01-18", + RemovalDate: "", + }, + "builders": { + GTLD: "builders", + DelegationDate: "2013-12-28", + RemovalDate: "", + }, + "business": { + GTLD: "business", + DelegationDate: "2014-08-22", + RemovalDate: "", + }, + "buy": { + GTLD: "buy", + DelegationDate: "2015-12-05", + RemovalDate: "", + }, + "buzz": { + GTLD: "buzz", + DelegationDate: "2013-12-18", + RemovalDate: "", + }, + "bv": { + GTLD: "bv", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "bw": { + GTLD: "bw", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "by": { + GTLD: "by", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "bz": { + GTLD: "bz", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "bzh": { + GTLD: "bzh", + DelegationDate: "2014-06-17", + RemovalDate: "", + }, + "ca": { + GTLD: "ca", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "cab": { + GTLD: "cab", + DelegationDate: "2013-12-17", + RemovalDate: "", + }, + "cafe": { + GTLD: "cafe", + DelegationDate: "2015-04-05", + RemovalDate: "", + }, + "cal": { + GTLD: "cal", + DelegationDate: "2014-09-15", + RemovalDate: "", + }, + "call": { + GTLD: "call", + DelegationDate: "2015-12-05", + RemovalDate: "", + }, + "calvinklein": { + GTLD: "calvinklein", + DelegationDate: "2016-08-04", + RemovalDate: "", + }, + "cam": { + GTLD: "cam", + DelegationDate: "2016-06-16", + RemovalDate: "", + }, + "camera": { + GTLD: "camera", + DelegationDate: "2013-11-06", + RemovalDate: "", + }, + "camp": { + GTLD: "camp", + DelegationDate: "2013-12-17", + RemovalDate: "", + }, + "cancerresearch": { + GTLD: "cancerresearch", + DelegationDate: "2014-07-03", + RemovalDate: "", + }, + "canon": { + GTLD: "canon", + DelegationDate: "2015-02-04", + RemovalDate: "", + }, + "capetown": { + GTLD: "capetown", + DelegationDate: "2014-06-19", + RemovalDate: "", + }, + "capital": { + GTLD: "capital", + DelegationDate: "2014-04-11", + RemovalDate: "", + }, + "capitalone": { + GTLD: "capitalone", + DelegationDate: "2016-08-10", + RemovalDate: "", + }, + "car": { + GTLD: "car", + DelegationDate: "2015-09-09", + RemovalDate: "", + }, + "caravan": { + GTLD: "caravan", + DelegationDate: "2014-08-15", + RemovalDate: "", + }, + "cards": { + GTLD: "cards", + DelegationDate: "2014-02-11", + RemovalDate: "", + }, + "care": { + GTLD: "care", + DelegationDate: "2014-04-23", + RemovalDate: "", + }, + "career": { + GTLD: "career", + DelegationDate: "2014-04-11", + RemovalDate: "", + }, + "careers": { + GTLD: "careers", + DelegationDate: "2013-12-17", + RemovalDate: "", + }, + "cars": { + GTLD: "cars", + DelegationDate: "2015-05-02", + RemovalDate: "", + }, + "cartier": { + GTLD: "cartier", + DelegationDate: "2014-12-11", + RemovalDate: "", + }, + "casa": { + GTLD: "casa", + DelegationDate: "2014-09-23", + RemovalDate: "", + }, + "case": { + GTLD: "case", + DelegationDate: "2016-10-30", + RemovalDate: "", + }, + "caseih": { + GTLD: "caseih", + DelegationDate: "2016-10-30", + RemovalDate: "", + }, + "cash": { + GTLD: "cash", + DelegationDate: "2014-04-23", + RemovalDate: "", + }, + "casino": { + GTLD: "casino", + DelegationDate: "2015-02-19", + RemovalDate: "", + }, + "cat": { + GTLD: "cat", + DelegationDate: "2005-12-20", + RemovalDate: "", + }, + "catering": { + GTLD: "catering", + DelegationDate: "2014-02-04", + RemovalDate: "", + }, + "catholic": { + GTLD: "catholic", + DelegationDate: "2016-12-01", + RemovalDate: "", + }, + "cba": { + GTLD: "cba", + DelegationDate: "2015-06-22", + RemovalDate: "", + }, + "cbn": { + GTLD: "cbn", + DelegationDate: "2015-02-13", + RemovalDate: "", + }, + "cbre": { + GTLD: "cbre", + DelegationDate: "2016-07-02", + RemovalDate: "", + }, + "cbs": { + GTLD: "cbs", + DelegationDate: "2016-08-04", + RemovalDate: "", + }, + "cc": { + GTLD: "cc", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "cd": { + GTLD: "cd", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "ceb": { + GTLD: "ceb", + DelegationDate: "2015-08-08", + RemovalDate: "", + }, + "center": { + GTLD: "center", + DelegationDate: "2013-12-17", + RemovalDate: "", + }, + "ceo": { + GTLD: "ceo", + DelegationDate: "2013-12-28", + RemovalDate: "", + }, + "cern": { + GTLD: "cern", + DelegationDate: "2014-08-16", + RemovalDate: "", + }, + "cf": { + GTLD: "cf", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "cfa": { + GTLD: "cfa", + DelegationDate: "2015-05-02", + RemovalDate: "", + }, + "cfd": { + GTLD: "cfd", + DelegationDate: "2015-03-13", + RemovalDate: "", + }, + "cg": { + GTLD: "cg", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "ch": { + GTLD: "ch", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "chanel": { + GTLD: "chanel", + DelegationDate: "2015-08-05", + RemovalDate: "", + }, + "channel": { + GTLD: "channel", + DelegationDate: "2014-09-15", + RemovalDate: "", + }, + "charity": { + GTLD: "charity", + DelegationDate: "2018-06-07", + RemovalDate: "", + }, + "chase": { + GTLD: "chase", + DelegationDate: "2016-02-27", + RemovalDate: "", + }, + "chat": { + GTLD: "chat", + DelegationDate: "2015-02-04", + RemovalDate: "", + }, + "cheap": { + GTLD: "cheap", + DelegationDate: "2014-01-14", + RemovalDate: "", + }, + "chintai": { + GTLD: "chintai", + DelegationDate: "2016-06-07", + RemovalDate: "", + }, + "chloe": { + GTLD: "chloe", + DelegationDate: "2015-03-09", + RemovalDate: "2017-10-06", + }, + "christmas": { + GTLD: "christmas", + DelegationDate: "2014-02-26", + RemovalDate: "", + }, + "chrome": { + GTLD: "chrome", + DelegationDate: "2014-09-15", + RemovalDate: "", + }, + "chrysler": { + GTLD: "chrysler", + DelegationDate: "2016-07-28", + RemovalDate: "", + }, + "church": { + GTLD: "church", + DelegationDate: "2014-05-15", + RemovalDate: "", + }, + "ci": { + GTLD: "ci", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "cipriani": { + GTLD: "cipriani", + DelegationDate: "2015-10-09", + RemovalDate: "", + }, + "circle": { + GTLD: "circle", + DelegationDate: "2015-12-05", + RemovalDate: "", + }, + "cisco": { + GTLD: "cisco", + DelegationDate: "2015-05-15", + RemovalDate: "", + }, + "citadel": { + GTLD: "citadel", + DelegationDate: "2016-07-23", + RemovalDate: "", + }, + "citi": { + GTLD: "citi", + DelegationDate: "2016-07-28", + RemovalDate: "", + }, + "citic": { + GTLD: "citic", + DelegationDate: "2014-04-29", + RemovalDate: "", + }, + "city": { + GTLD: "city", + DelegationDate: "2014-07-10", + RemovalDate: "", + }, + "cityeats": { + GTLD: "cityeats", + DelegationDate: "2015-11-10", + RemovalDate: "", + }, + "ck": { + GTLD: "ck", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "cl": { + GTLD: "cl", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "claims": { + GTLD: "claims", + DelegationDate: "2014-05-07", + RemovalDate: "", + }, + "cleaning": { + GTLD: "cleaning", + DelegationDate: "2014-02-04", + RemovalDate: "", + }, + "click": { + GTLD: "click", + DelegationDate: "2014-08-16", + RemovalDate: "", + }, + "clinic": { + GTLD: "clinic", + DelegationDate: "2014-04-22", + RemovalDate: "", + }, + "clinique": { + GTLD: "clinique", + DelegationDate: "2015-12-28", + RemovalDate: "", + }, + "clothing": { + GTLD: "clothing", + DelegationDate: "2013-11-06", + RemovalDate: "", + }, + "cloud": { + GTLD: "cloud", + DelegationDate: "2015-06-26", + RemovalDate: "", + }, + "club": { + GTLD: "club", + DelegationDate: "2014-01-18", + RemovalDate: "", + }, + "clubmed": { + GTLD: "clubmed", + DelegationDate: "2015-10-02", + RemovalDate: "", + }, + "cm": { + GTLD: "cm", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "cn": { + GTLD: "cn", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "co": { + GTLD: "co", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "coach": { + GTLD: "coach", + DelegationDate: "2014-11-26", + RemovalDate: "", + }, + "codes": { + GTLD: "codes", + DelegationDate: "2013-12-28", + RemovalDate: "", + }, + "coffee": { + GTLD: "coffee", + DelegationDate: "2013-12-28", + RemovalDate: "", + }, + "college": { + GTLD: "college", + DelegationDate: "2014-04-10", + RemovalDate: "", + }, + "cologne": { + GTLD: "cologne", + DelegationDate: "2014-03-19", + RemovalDate: "", + }, + "com": { + GTLD: "com", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "comcast": { + GTLD: "comcast", + DelegationDate: "2016-07-07", + RemovalDate: "", + }, + "commbank": { + GTLD: "commbank", + DelegationDate: "2015-06-22", + RemovalDate: "", + }, + "community": { + GTLD: "community", + DelegationDate: "2014-01-25", + RemovalDate: "", + }, + "company": { + GTLD: "company", + DelegationDate: "2013-12-17", + RemovalDate: "", + }, + "compare": { + GTLD: "compare", + DelegationDate: "2016-01-15", + RemovalDate: "", + }, + "computer": { + GTLD: "computer", + DelegationDate: "2013-12-17", + RemovalDate: "", + }, + "comsec": { + GTLD: "comsec", + DelegationDate: "2015-11-16", + RemovalDate: "", + }, + "condos": { + GTLD: "condos", + DelegationDate: "2014-02-11", + RemovalDate: "", + }, + "construction": { + GTLD: "construction", + DelegationDate: "2013-11-14", + RemovalDate: "", + }, + "consulting": { + GTLD: "consulting", + DelegationDate: "2014-04-01", + RemovalDate: "", + }, + "contact": { + GTLD: "contact", + DelegationDate: "2015-12-22", + RemovalDate: "", + }, + "contractors": { + GTLD: "contractors", + DelegationDate: "2013-11-14", + RemovalDate: "", + }, + "cooking": { + GTLD: "cooking", + DelegationDate: "2014-03-31", + RemovalDate: "", + }, + "cookingchannel": { + GTLD: "cookingchannel", + DelegationDate: "2016-06-23", + RemovalDate: "", + }, + "cool": { + GTLD: "cool", + DelegationDate: "2014-01-23", + RemovalDate: "", + }, + "coop": { + GTLD: "coop", + DelegationDate: "2001-12-20", + RemovalDate: "", + }, + "corsica": { + GTLD: "corsica", + DelegationDate: "2015-05-16", + RemovalDate: "", + }, + "country": { + GTLD: "country", + DelegationDate: "2014-03-31", + RemovalDate: "", + }, + "coupon": { + GTLD: "coupon", + DelegationDate: "2016-02-19", + RemovalDate: "", + }, + "coupons": { + GTLD: "coupons", + DelegationDate: "2015-05-13", + RemovalDate: "", + }, + "courses": { + GTLD: "courses", + DelegationDate: "2015-02-25", + RemovalDate: "", + }, + "cr": { + GTLD: "cr", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "credit": { + GTLD: "credit", + DelegationDate: "2014-05-07", + RemovalDate: "", + }, + "creditcard": { + GTLD: "creditcard", + DelegationDate: "2014-04-29", + RemovalDate: "", + }, + "creditunion": { + GTLD: "creditunion", + DelegationDate: "2015-11-10", + RemovalDate: "", + }, + "cricket": { + GTLD: "cricket", + DelegationDate: "2014-11-17", + RemovalDate: "", + }, + "crown": { + GTLD: "crown", + DelegationDate: "2015-06-19", + RemovalDate: "", + }, + "crs": { + GTLD: "crs", + DelegationDate: "2014-10-15", + RemovalDate: "", + }, + "cruise": { + GTLD: "cruise", + DelegationDate: "2016-11-12", + RemovalDate: "", + }, + "cruises": { + GTLD: "cruises", + DelegationDate: "2014-02-04", + RemovalDate: "", + }, + "csc": { + GTLD: "csc", + DelegationDate: "2015-09-01", + RemovalDate: "", + }, + "cu": { + GTLD: "cu", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "cuisinella": { + GTLD: "cuisinella", + DelegationDate: "2014-07-03", + RemovalDate: "", + }, + "cv": { + GTLD: "cv", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "cw": { + GTLD: "cw", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "cx": { + GTLD: "cx", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "cy": { + GTLD: "cy", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "cymru": { + GTLD: "cymru", + DelegationDate: "2014-08-08", + RemovalDate: "", + }, + "cyou": { + GTLD: "cyou", + DelegationDate: "2015-04-03", + RemovalDate: "", + }, + "cz": { + GTLD: "cz", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "dabur": { + GTLD: "dabur", + DelegationDate: "2015-01-24", + RemovalDate: "", + }, + "dad": { + GTLD: "dad", + DelegationDate: "2014-08-30", + RemovalDate: "", + }, + "dance": { + GTLD: "dance", + DelegationDate: "2014-01-14", + RemovalDate: "", + }, + "data": { + GTLD: "data", + DelegationDate: "2016-12-20", + RemovalDate: "", + }, + "date": { + GTLD: "date", + DelegationDate: "2015-03-25", + RemovalDate: "", + }, + "dating": { + GTLD: "dating", + DelegationDate: "2014-01-25", + RemovalDate: "", + }, + "datsun": { + GTLD: "datsun", + DelegationDate: "2015-03-04", + RemovalDate: "", + }, + "day": { + GTLD: "day", + DelegationDate: "2014-08-30", + RemovalDate: "", + }, + "dclk": { + GTLD: "dclk", + DelegationDate: "2015-01-24", + RemovalDate: "", + }, + "dds": { + GTLD: "dds", + DelegationDate: "2016-05-11", + RemovalDate: "", + }, + "de": { + GTLD: "de", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "deal": { + GTLD: "deal", + DelegationDate: "2016-06-07", + RemovalDate: "", + }, + "dealer": { + GTLD: "dealer", + DelegationDate: "2015-12-24", + RemovalDate: "", + }, + "deals": { + GTLD: "deals", + DelegationDate: "2014-07-10", + RemovalDate: "", + }, + "degree": { + GTLD: "degree", + DelegationDate: "2014-05-30", + RemovalDate: "", + }, + "delivery": { + GTLD: "delivery", + DelegationDate: "2014-11-01", + RemovalDate: "", + }, + "dell": { + GTLD: "dell", + DelegationDate: "2015-10-14", + RemovalDate: "", + }, + "deloitte": { + GTLD: "deloitte", + DelegationDate: "2016-01-29", + RemovalDate: "", + }, + "delta": { + GTLD: "delta", + DelegationDate: "2015-07-11", + RemovalDate: "", + }, + "democrat": { + GTLD: "democrat", + DelegationDate: "2014-01-14", + RemovalDate: "", + }, + "dental": { + GTLD: "dental", + DelegationDate: "2014-04-23", + RemovalDate: "", + }, + "dentist": { + GTLD: "dentist", + DelegationDate: "2014-05-31", + RemovalDate: "", + }, + "desi": { + GTLD: "desi", + DelegationDate: "2014-04-10", + RemovalDate: "", + }, + "design": { + GTLD: "design", + DelegationDate: "2015-01-24", + RemovalDate: "", + }, + "dev": { + GTLD: "dev", + DelegationDate: "2014-12-18", + RemovalDate: "", + }, + "dhl": { + GTLD: "dhl", + DelegationDate: "2016-06-02", + RemovalDate: "", + }, + "diamonds": { + GTLD: "diamonds", + DelegationDate: "2013-11-19", + RemovalDate: "", + }, + "diet": { + GTLD: "diet", + DelegationDate: "2014-08-16", + RemovalDate: "", + }, + "digital": { + GTLD: "digital", + DelegationDate: "2014-05-07", + RemovalDate: "", + }, + "direct": { + GTLD: "direct", + DelegationDate: "2014-07-02", + RemovalDate: "", + }, + "directory": { + GTLD: "directory", + DelegationDate: "2013-11-19", + RemovalDate: "", + }, + "discount": { + GTLD: "discount", + DelegationDate: "2014-04-23", + RemovalDate: "", + }, + "discover": { + GTLD: "discover", + DelegationDate: "2016-07-28", + RemovalDate: "", + }, + "dish": { + GTLD: "dish", + DelegationDate: "2016-08-10", + RemovalDate: "", + }, + "diy": { + GTLD: "diy", + DelegationDate: "2016-08-25", + RemovalDate: "", + }, + "dj": { + GTLD: "dj", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "dk": { + GTLD: "dk", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "dm": { + GTLD: "dm", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "dnp": { + GTLD: "dnp", + DelegationDate: "2014-03-11", + RemovalDate: "", + }, + "do": { + GTLD: "do", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "docs": { + GTLD: "docs", + DelegationDate: "2014-12-18", + RemovalDate: "", + }, + "doctor": { + GTLD: "doctor", + DelegationDate: "2016-07-21", + RemovalDate: "", + }, + "dodge": { + GTLD: "dodge", + DelegationDate: "2016-08-04", + RemovalDate: "", + }, + "dog": { + GTLD: "dog", + DelegationDate: "2015-04-29", + RemovalDate: "", + }, + "doha": { + GTLD: "doha", + DelegationDate: "2015-03-25", + RemovalDate: "2019-04-09", + }, + "domains": { + GTLD: "domains", + DelegationDate: "2013-12-17", + RemovalDate: "", + }, + "doosan": { + GTLD: "doosan", + DelegationDate: "2014-12-13", + RemovalDate: "2016-02-24", + }, + "dot": { + GTLD: "dot", + DelegationDate: "2016-05-18", + RemovalDate: "", + }, + "download": { + GTLD: "download", + DelegationDate: "2015-03-25", + RemovalDate: "", + }, + "drive": { + GTLD: "drive", + DelegationDate: "2015-06-20", + RemovalDate: "", + }, + "dtv": { + GTLD: "dtv", + DelegationDate: "2016-05-27", + RemovalDate: "", + }, + "dubai": { + GTLD: "dubai", + DelegationDate: "2016-01-07", + RemovalDate: "", + }, + "duck": { + GTLD: "duck", + DelegationDate: "2016-07-21", + RemovalDate: "", + }, + "dunlop": { + GTLD: "dunlop", + DelegationDate: "2016-06-10", + RemovalDate: "", + }, + "duns": { + GTLD: "duns", + DelegationDate: "2016-07-23", + RemovalDate: "", + }, + "dupont": { + GTLD: "dupont", + DelegationDate: "2016-06-10", + RemovalDate: "", + }, + "durban": { + GTLD: "durban", + DelegationDate: "2014-06-19", + RemovalDate: "", + }, + "dvag": { + GTLD: "dvag", + DelegationDate: "2014-09-27", + RemovalDate: "", + }, + "dvr": { + GTLD: "dvr", + DelegationDate: "2016-09-30", + RemovalDate: "", + }, + "dz": { + GTLD: "dz", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "earth": { + GTLD: "earth", + DelegationDate: "2015-05-14", + RemovalDate: "", + }, + "eat": { + GTLD: "eat", + DelegationDate: "2014-08-30", + RemovalDate: "", + }, + "ec": { + GTLD: "ec", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "eco": { + GTLD: "eco", + DelegationDate: "2016-08-28", + RemovalDate: "", + }, + "edeka": { + GTLD: "edeka", + DelegationDate: "2016-01-21", + RemovalDate: "", + }, + "edu": { + GTLD: "edu", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "education": { + GTLD: "education", + DelegationDate: "2013-12-28", + RemovalDate: "", + }, + "ee": { + GTLD: "ee", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "eg": { + GTLD: "eg", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "email": { + GTLD: "email", + DelegationDate: "2014-01-02", + RemovalDate: "", + }, + "emerck": { + GTLD: "emerck", + DelegationDate: "2014-10-22", + RemovalDate: "", + }, + "energy": { + GTLD: "energy", + DelegationDate: "2014-11-01", + RemovalDate: "", + }, + "engineer": { + GTLD: "engineer", + DelegationDate: "2014-06-04", + RemovalDate: "", + }, + "engineering": { + GTLD: "engineering", + DelegationDate: "2014-04-11", + RemovalDate: "", + }, + "enterprises": { + GTLD: "enterprises", + DelegationDate: "2013-11-19", + RemovalDate: "", + }, + "epost": { + GTLD: "epost", + DelegationDate: "2016-06-07", + RemovalDate: "2019-02-15", + }, + "epson": { + GTLD: "epson", + DelegationDate: "2015-03-03", + RemovalDate: "", + }, + "equipment": { + GTLD: "equipment", + DelegationDate: "2013-11-06", + RemovalDate: "", + }, + "er": { + GTLD: "er", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "ericsson": { + GTLD: "ericsson", + DelegationDate: "2016-06-10", + RemovalDate: "", + }, + "erni": { + GTLD: "erni", + DelegationDate: "2015-03-12", + RemovalDate: "", + }, + "es": { + GTLD: "es", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "esq": { + GTLD: "esq", + DelegationDate: "2014-08-29", + RemovalDate: "", + }, + "estate": { + GTLD: "estate", + DelegationDate: "2013-11-14", + RemovalDate: "", + }, + "esurance": { + GTLD: "esurance", + DelegationDate: "2016-07-23", + RemovalDate: "", + }, + "et": { + GTLD: "et", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "etisalat": { + GTLD: "etisalat", + DelegationDate: "2017-06-01", + RemovalDate: "", + }, + "eu": { + GTLD: "eu", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "eurovision": { + GTLD: "eurovision", + DelegationDate: "2014-12-06", + RemovalDate: "", + }, + "eus": { + GTLD: "eus", + DelegationDate: "2014-04-11", + RemovalDate: "", + }, + "events": { + GTLD: "events", + DelegationDate: "2014-02-04", + RemovalDate: "", + }, + "everbank": { + GTLD: "everbank", + DelegationDate: "2014-11-26", + RemovalDate: "", + }, + "exchange": { + GTLD: "exchange", + DelegationDate: "2014-04-23", + RemovalDate: "", + }, + "expert": { + GTLD: "expert", + DelegationDate: "2014-01-23", + RemovalDate: "", + }, + "exposed": { + GTLD: "exposed", + DelegationDate: "2014-02-04", + RemovalDate: "", + }, + "express": { + GTLD: "express", + DelegationDate: "2015-04-05", + RemovalDate: "", + }, + "extraspace": { + GTLD: "extraspace", + DelegationDate: "2016-03-25", + RemovalDate: "", + }, + "fage": { + GTLD: "fage", + DelegationDate: "2015-08-08", + RemovalDate: "", + }, + "fail": { + GTLD: "fail", + DelegationDate: "2014-04-23", + RemovalDate: "", + }, + "fairwinds": { + GTLD: "fairwinds", + DelegationDate: "2015-11-13", + RemovalDate: "", + }, + "faith": { + GTLD: "faith", + DelegationDate: "2015-03-25", + RemovalDate: "", + }, + "family": { + GTLD: "family", + DelegationDate: "2015-08-11", + RemovalDate: "", + }, + "fan": { + GTLD: "fan", + DelegationDate: "2015-03-16", + RemovalDate: "", + }, + "fans": { + GTLD: "fans", + DelegationDate: "2015-02-19", + RemovalDate: "", + }, + "farm": { + GTLD: "farm", + DelegationDate: "2013-12-28", + RemovalDate: "", + }, + "farmers": { + GTLD: "farmers", + DelegationDate: "2016-06-25", + RemovalDate: "", + }, + "fashion": { + GTLD: "fashion", + DelegationDate: "2014-12-06", + RemovalDate: "", + }, + "fast": { + GTLD: "fast", + DelegationDate: "2015-12-05", + RemovalDate: "", + }, + "fedex": { + GTLD: "fedex", + DelegationDate: "2016-06-25", + RemovalDate: "", + }, + "feedback": { + GTLD: "feedback", + DelegationDate: "2014-04-10", + RemovalDate: "", + }, + "ferrari": { + GTLD: "ferrari", + DelegationDate: "2016-08-02", + RemovalDate: "", + }, + "ferrero": { + GTLD: "ferrero", + DelegationDate: "2015-11-07", + RemovalDate: "", + }, + "fi": { + GTLD: "fi", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "fiat": { + GTLD: "fiat", + DelegationDate: "2016-08-02", + RemovalDate: "", + }, + "fidelity": { + GTLD: "fidelity", + DelegationDate: "2016-08-04", + RemovalDate: "", + }, + "fido": { + GTLD: "fido", + DelegationDate: "2016-09-20", + RemovalDate: "", + }, + "film": { + GTLD: "film", + DelegationDate: "2015-03-24", + RemovalDate: "", + }, + "final": { + GTLD: "final", + DelegationDate: "2015-09-26", + RemovalDate: "", + }, + "finance": { + GTLD: "finance", + DelegationDate: "2014-04-29", + RemovalDate: "", + }, + "financial": { + GTLD: "financial", + DelegationDate: "2014-04-23", + RemovalDate: "", + }, + "fire": { + GTLD: "fire", + DelegationDate: "2016-06-07", + RemovalDate: "", + }, + "firestone": { + GTLD: "firestone", + DelegationDate: "2015-12-05", + RemovalDate: "", + }, + "firmdale": { + GTLD: "firmdale", + DelegationDate: "2014-11-20", + RemovalDate: "", + }, + "fish": { + GTLD: "fish", + DelegationDate: "2014-02-21", + RemovalDate: "", + }, + "fishing": { + GTLD: "fishing", + DelegationDate: "2014-03-31", + RemovalDate: "", + }, + "fit": { + GTLD: "fit", + DelegationDate: "2015-01-09", + RemovalDate: "", + }, + "fitness": { + GTLD: "fitness", + DelegationDate: "2014-04-22", + RemovalDate: "", + }, + "fj": { + GTLD: "fj", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "fk": { + GTLD: "fk", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "flickr": { + GTLD: "flickr", + DelegationDate: "2016-02-13", + RemovalDate: "", + }, + "flights": { + GTLD: "flights", + DelegationDate: "2014-02-04", + RemovalDate: "", + }, + "flir": { + GTLD: "flir", + DelegationDate: "2016-05-10", + RemovalDate: "", + }, + "florist": { + GTLD: "florist", + DelegationDate: "2013-12-28", + RemovalDate: "", + }, + "flowers": { + GTLD: "flowers", + DelegationDate: "2014-12-25", + RemovalDate: "", + }, + "flsmidth": { + GTLD: "flsmidth", + DelegationDate: "2014-10-15", + RemovalDate: "2016-07-29", + }, + "fly": { + GTLD: "fly", + DelegationDate: "2014-09-15", + RemovalDate: "", + }, + "fm": { + GTLD: "fm", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "fo": { + GTLD: "fo", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "foo": { + GTLD: "foo", + DelegationDate: "2014-04-19", + RemovalDate: "", + }, + "food": { + GTLD: "food", + DelegationDate: "2016-11-10", + RemovalDate: "", + }, + "foodnetwork": { + GTLD: "foodnetwork", + DelegationDate: "2016-06-23", + RemovalDate: "", + }, + "football": { + GTLD: "football", + DelegationDate: "2015-02-19", + RemovalDate: "", + }, + "ford": { + GTLD: "ford", + DelegationDate: "2015-12-18", + RemovalDate: "", + }, + "forex": { + GTLD: "forex", + DelegationDate: "2015-03-12", + RemovalDate: "", + }, + "forsale": { + GTLD: "forsale", + DelegationDate: "2014-10-01", + RemovalDate: "", + }, + "forum": { + GTLD: "forum", + DelegationDate: "2015-07-01", + RemovalDate: "", + }, + "foundation": { + GTLD: "foundation", + DelegationDate: "2014-02-11", + RemovalDate: "", + }, + "fox": { + GTLD: "fox", + DelegationDate: "2015-12-24", + RemovalDate: "", + }, + "fr": { + GTLD: "fr", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "free": { + GTLD: "free", + DelegationDate: "2016-11-08", + RemovalDate: "", + }, + "fresenius": { + GTLD: "fresenius", + DelegationDate: "2016-01-09", + RemovalDate: "", + }, + "frl": { + GTLD: "frl", + DelegationDate: "2014-08-30", + RemovalDate: "", + }, + "frogans": { + GTLD: "frogans", + DelegationDate: "2014-04-19", + RemovalDate: "", + }, + "frontdoor": { + GTLD: "frontdoor", + DelegationDate: "2016-06-23", + RemovalDate: "", + }, + "frontier": { + GTLD: "frontier", + DelegationDate: "2016-02-06", + RemovalDate: "", + }, + "ftr": { + GTLD: "ftr", + DelegationDate: "2016-04-17", + RemovalDate: "", + }, + "fujitsu": { + GTLD: "fujitsu", + DelegationDate: "2016-07-07", + RemovalDate: "", + }, + "fujixerox": { + GTLD: "fujixerox", + DelegationDate: "2016-07-15", + RemovalDate: "", + }, + "fun": { + GTLD: "fun", + DelegationDate: "2016-12-21", + RemovalDate: "", + }, + "fund": { + GTLD: "fund", + DelegationDate: "2014-04-23", + RemovalDate: "", + }, + "furniture": { + GTLD: "furniture", + DelegationDate: "2014-04-23", + RemovalDate: "", + }, + "futbol": { + GTLD: "futbol", + DelegationDate: "2014-02-11", + RemovalDate: "", + }, + "fyi": { + GTLD: "fyi", + DelegationDate: "2015-05-22", + RemovalDate: "", + }, + "ga": { + GTLD: "ga", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "gal": { + GTLD: "gal", + DelegationDate: "2014-04-11", + RemovalDate: "", + }, + "gallery": { + GTLD: "gallery", + DelegationDate: "2013-11-14", + RemovalDate: "", + }, + "gallo": { + GTLD: "gallo", + DelegationDate: "2016-03-22", + RemovalDate: "", + }, + "gallup": { + GTLD: "gallup", + DelegationDate: "2016-02-11", + RemovalDate: "", + }, + "game": { + GTLD: "game", + DelegationDate: "2015-07-08", + RemovalDate: "", + }, + "games": { + GTLD: "games", + DelegationDate: "2016-06-02", + RemovalDate: "", + }, + "gap": { + GTLD: "gap", + DelegationDate: "2016-08-04", + RemovalDate: "", + }, + "garden": { + GTLD: "garden", + DelegationDate: "2014-12-13", + RemovalDate: "", + }, + "gay": { + GTLD: "gay", + DelegationDate: "2019-08-09", + RemovalDate: "", + }, + "gb": { + GTLD: "gb", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "gbiz": { + GTLD: "gbiz", + DelegationDate: "2014-08-27", + RemovalDate: "", + }, + "gd": { + GTLD: "gd", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "gdn": { + GTLD: "gdn", + DelegationDate: "2015-02-13", + RemovalDate: "", + }, + "ge": { + GTLD: "ge", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "gea": { + GTLD: "gea", + DelegationDate: "2015-08-28", + RemovalDate: "", + }, + "gent": { + GTLD: "gent", + DelegationDate: "2014-07-12", + RemovalDate: "", + }, + "genting": { + GTLD: "genting", + DelegationDate: "2015-06-20", + RemovalDate: "", + }, + "george": { + GTLD: "george", + DelegationDate: "2016-08-18", + RemovalDate: "", + }, + "gf": { + GTLD: "gf", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "gg": { + GTLD: "gg", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "ggee": { + GTLD: "ggee", + DelegationDate: "2014-12-25", + RemovalDate: "", + }, + "gh": { + GTLD: "gh", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "gi": { + GTLD: "gi", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "gift": { + GTLD: "gift", + DelegationDate: "2014-01-18", + RemovalDate: "", + }, + "gifts": { + GTLD: "gifts", + DelegationDate: "2014-08-08", + RemovalDate: "", + }, + "gives": { + GTLD: "gives", + DelegationDate: "2014-06-04", + RemovalDate: "", + }, + "giving": { + GTLD: "giving", + DelegationDate: "2015-08-06", + RemovalDate: "", + }, + "gl": { + GTLD: "gl", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "glade": { + GTLD: "glade", + DelegationDate: "2016-07-28", + RemovalDate: "", + }, + "glass": { + GTLD: "glass", + DelegationDate: "2013-12-28", + RemovalDate: "", + }, + "gle": { + GTLD: "gle", + DelegationDate: "2014-09-15", + RemovalDate: "", + }, + "global": { + GTLD: "global", + DelegationDate: "2014-06-11", + RemovalDate: "", + }, + "globo": { + GTLD: "globo", + DelegationDate: "2014-05-03", + RemovalDate: "", + }, + "gm": { + GTLD: "gm", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "gmail": { + GTLD: "gmail", + DelegationDate: "2014-08-27", + RemovalDate: "", + }, + "gmbh": { + GTLD: "gmbh", + DelegationDate: "2016-03-09", + RemovalDate: "", + }, + "gmo": { + GTLD: "gmo", + DelegationDate: "2014-05-03", + RemovalDate: "", + }, + "gmx": { + GTLD: "gmx", + DelegationDate: "2014-09-05", + RemovalDate: "", + }, + "gn": { + GTLD: "gn", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "godaddy": { + GTLD: "godaddy", + DelegationDate: "2016-07-07", + RemovalDate: "", + }, + "gold": { + GTLD: "gold", + DelegationDate: "2015-03-24", + RemovalDate: "", + }, + "goldpoint": { + GTLD: "goldpoint", + DelegationDate: "2015-02-19", + RemovalDate: "", + }, + "golf": { + GTLD: "golf", + DelegationDate: "2015-03-24", + RemovalDate: "", + }, + "goo": { + GTLD: "goo", + DelegationDate: "2015-03-03", + RemovalDate: "", + }, + "goodhands": { + GTLD: "goodhands", + DelegationDate: "2016-07-14", + RemovalDate: "2018-09-20", + }, + "goodyear": { + GTLD: "goodyear", + DelegationDate: "2016-06-10", + RemovalDate: "", + }, + "goog": { + GTLD: "goog", + DelegationDate: "2015-01-24", + RemovalDate: "", + }, + "google": { + GTLD: "google", + DelegationDate: "2014-09-15", + RemovalDate: "", + }, + "gop": { + GTLD: "gop", + DelegationDate: "2014-04-04", + RemovalDate: "", + }, + "got": { + GTLD: "got", + DelegationDate: "2015-12-05", + RemovalDate: "", + }, + "gov": { + GTLD: "gov", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "gp": { + GTLD: "gp", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "gq": { + GTLD: "gq", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "gr": { + GTLD: "gr", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "grainger": { + GTLD: "grainger", + DelegationDate: "2015-11-13", + RemovalDate: "", + }, + "graphics": { + GTLD: "graphics", + DelegationDate: "2013-11-14", + RemovalDate: "", + }, + "gratis": { + GTLD: "gratis", + DelegationDate: "2014-04-23", + RemovalDate: "", + }, + "green": { + GTLD: "green", + DelegationDate: "2014-06-19", + RemovalDate: "", + }, + "gripe": { + GTLD: "gripe", + DelegationDate: "2014-04-11", + RemovalDate: "", + }, + "grocery": { + GTLD: "grocery", + DelegationDate: "2017-06-28", + RemovalDate: "", + }, + "group": { + GTLD: "group", + DelegationDate: "2015-08-08", + RemovalDate: "", + }, + "gs": { + GTLD: "gs", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "gt": { + GTLD: "gt", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "gu": { + GTLD: "gu", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "guardian": { + GTLD: "guardian", + DelegationDate: "2016-05-13", + RemovalDate: "", + }, + "gucci": { + GTLD: "gucci", + DelegationDate: "2015-10-27", + RemovalDate: "", + }, + "guge": { + GTLD: "guge", + DelegationDate: "2015-03-24", + RemovalDate: "", + }, + "guide": { + GTLD: "guide", + DelegationDate: "2014-05-15", + RemovalDate: "", + }, + "guitars": { + GTLD: "guitars", + DelegationDate: "2014-01-18", + RemovalDate: "", + }, + "guru": { + GTLD: "guru", + DelegationDate: "2013-11-06", + RemovalDate: "", + }, + "gw": { + GTLD: "gw", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "gy": { + GTLD: "gy", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "hair": { + GTLD: "hair", + DelegationDate: "2016-12-02", + RemovalDate: "", + }, + "hamburg": { + GTLD: "hamburg", + DelegationDate: "2014-06-04", + RemovalDate: "", + }, + "hangout": { + GTLD: "hangout", + DelegationDate: "2015-01-24", + RemovalDate: "", + }, + "haus": { + GTLD: "haus", + DelegationDate: "2014-03-31", + RemovalDate: "", + }, + "hbo": { + GTLD: "hbo", + DelegationDate: "2016-08-14", + RemovalDate: "", + }, + "hdfc": { + GTLD: "hdfc", + DelegationDate: "2016-08-16", + RemovalDate: "", + }, + "hdfcbank": { + GTLD: "hdfcbank", + DelegationDate: "2016-02-11", + RemovalDate: "", + }, + "health": { + GTLD: "health", + DelegationDate: "2016-01-26", + RemovalDate: "", + }, + "healthcare": { + GTLD: "healthcare", + DelegationDate: "2014-07-30", + RemovalDate: "", + }, + "help": { + GTLD: "help", + DelegationDate: "2014-08-16", + RemovalDate: "", + }, + "helsinki": { + GTLD: "helsinki", + DelegationDate: "2016-01-26", + RemovalDate: "", + }, + "here": { + GTLD: "here", + DelegationDate: "2014-08-29", + RemovalDate: "", + }, + "hermes": { + GTLD: "hermes", + DelegationDate: "2015-01-24", + RemovalDate: "", + }, + "hgtv": { + GTLD: "hgtv", + DelegationDate: "2016-06-23", + RemovalDate: "", + }, + "hiphop": { + GTLD: "hiphop", + DelegationDate: "2014-05-15", + RemovalDate: "", + }, + "hisamitsu": { + GTLD: "hisamitsu", + DelegationDate: "2016-06-02", + RemovalDate: "", + }, + "hitachi": { + GTLD: "hitachi", + DelegationDate: "2015-05-01", + RemovalDate: "", + }, + "hiv": { + GTLD: "hiv", + DelegationDate: "2014-05-31", + RemovalDate: "", + }, + "hk": { + GTLD: "hk", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "hkt": { + GTLD: "hkt", + DelegationDate: "2016-05-12", + RemovalDate: "", + }, + "hm": { + GTLD: "hm", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "hn": { + GTLD: "hn", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "hockey": { + GTLD: "hockey", + DelegationDate: "2015-05-07", + RemovalDate: "", + }, + "holdings": { + GTLD: "holdings", + DelegationDate: "2013-11-06", + RemovalDate: "", + }, + "holiday": { + GTLD: "holiday", + DelegationDate: "2013-12-28", + RemovalDate: "", + }, + "homedepot": { + GTLD: "homedepot", + DelegationDate: "2015-06-04", + RemovalDate: "", + }, + "homegoods": { + GTLD: "homegoods", + DelegationDate: "2016-07-15", + RemovalDate: "", + }, + "homes": { + GTLD: "homes", + DelegationDate: "2014-05-22", + RemovalDate: "", + }, + "homesense": { + GTLD: "homesense", + DelegationDate: "2016-07-15", + RemovalDate: "", + }, + "honda": { + GTLD: "honda", + DelegationDate: "2015-04-30", + RemovalDate: "", + }, + "honeywell": { + GTLD: "honeywell", + DelegationDate: "2016-07-26", + RemovalDate: "2019-06-06", + }, + "horse": { + GTLD: "horse", + DelegationDate: "2014-03-31", + RemovalDate: "", + }, + "hospital": { + GTLD: "hospital", + DelegationDate: "2016-12-09", + RemovalDate: "", + }, + "host": { + GTLD: "host", + DelegationDate: "2014-05-31", + RemovalDate: "", + }, + "hosting": { + GTLD: "hosting", + DelegationDate: "2014-08-16", + RemovalDate: "", + }, + "hot": { + GTLD: "hot", + DelegationDate: "2016-08-10", + RemovalDate: "", + }, + "hoteles": { + GTLD: "hoteles", + DelegationDate: "2015-06-26", + RemovalDate: "", + }, + "hotels": { + GTLD: "hotels", + DelegationDate: "2017-04-07", + RemovalDate: "", + }, + "hotmail": { + GTLD: "hotmail", + DelegationDate: "2015-06-10", + RemovalDate: "", + }, + "house": { + GTLD: "house", + DelegationDate: "2013-12-28", + RemovalDate: "", + }, + "how": { + GTLD: "how", + DelegationDate: "2014-08-16", + RemovalDate: "", + }, + "hr": { + GTLD: "hr", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "hsbc": { + GTLD: "hsbc", + DelegationDate: "2015-07-10", + RemovalDate: "", + }, + "ht": { + GTLD: "ht", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "htc": { + GTLD: "htc", + DelegationDate: "2016-04-02", + RemovalDate: "2017-10-24", + }, + "hu": { + GTLD: "hu", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "hughes": { + GTLD: "hughes", + DelegationDate: "2016-08-10", + RemovalDate: "", + }, + "hyatt": { + GTLD: "hyatt", + DelegationDate: "2016-07-28", + RemovalDate: "", + }, + "hyundai": { + GTLD: "hyundai", + DelegationDate: "2015-09-26", + RemovalDate: "", + }, + "ibm": { + GTLD: "ibm", + DelegationDate: "2014-10-01", + RemovalDate: "", + }, + "icbc": { + GTLD: "icbc", + DelegationDate: "2015-05-13", + RemovalDate: "", + }, + "ice": { + GTLD: "ice", + DelegationDate: "2015-07-22", + RemovalDate: "", + }, + "icu": { + GTLD: "icu", + DelegationDate: "2015-05-02", + RemovalDate: "", + }, + "id": { + GTLD: "id", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "ie": { + GTLD: "ie", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "ieee": { + GTLD: "ieee", + DelegationDate: "2016-07-21", + RemovalDate: "", + }, + "ifm": { + GTLD: "ifm", + DelegationDate: "2015-01-24", + RemovalDate: "", + }, + "iinet": { + GTLD: "iinet", + DelegationDate: "2015-07-09", + RemovalDate: "2016-12-21", + }, + "ikano": { + GTLD: "ikano", + DelegationDate: "2016-07-01", + RemovalDate: "", + }, + "il": { + GTLD: "il", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "im": { + GTLD: "im", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "imamat": { + GTLD: "imamat", + DelegationDate: "2016-04-16", + RemovalDate: "", + }, + "imdb": { + GTLD: "imdb", + DelegationDate: "2016-06-07", + RemovalDate: "", + }, + "immo": { + GTLD: "immo", + DelegationDate: "2014-08-27", + RemovalDate: "", + }, + "immobilien": { + GTLD: "immobilien", + DelegationDate: "2014-01-02", + RemovalDate: "", + }, + "in": { + GTLD: "in", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "inc": { + GTLD: "inc", + DelegationDate: "2018-07-17", + RemovalDate: "", + }, + "industries": { + GTLD: "industries", + DelegationDate: "2014-02-21", + RemovalDate: "", + }, + "infiniti": { + GTLD: "infiniti", + DelegationDate: "2015-03-04", + RemovalDate: "", + }, + "info": { + GTLD: "info", + DelegationDate: "2001-09-19", + RemovalDate: "", + }, + "ing": { + GTLD: "ing", + DelegationDate: "2014-08-30", + RemovalDate: "", + }, + "ink": { + GTLD: "ink", + DelegationDate: "2014-03-11", + RemovalDate: "", + }, + "institute": { + GTLD: "institute", + DelegationDate: "2013-12-28", + RemovalDate: "", + }, + "insurance": { + GTLD: "insurance", + DelegationDate: "2015-12-03", + RemovalDate: "", + }, + "insure": { + GTLD: "insure", + DelegationDate: "2014-04-29", + RemovalDate: "", + }, + "int": { + GTLD: "int", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "intel": { + GTLD: "intel", + DelegationDate: "2016-07-28", + RemovalDate: "", + }, + "international": { + GTLD: "international", + DelegationDate: "2013-12-28", + RemovalDate: "", + }, + "intuit": { + GTLD: "intuit", + DelegationDate: "2016-07-12", + RemovalDate: "", + }, + "investments": { + GTLD: "investments", + DelegationDate: "2014-04-23", + RemovalDate: "", + }, + "io": { + GTLD: "io", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "ipiranga": { + GTLD: "ipiranga", + DelegationDate: "2015-07-26", + RemovalDate: "", + }, + "iq": { + GTLD: "iq", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "ir": { + GTLD: "ir", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "irish": { + GTLD: "irish", + DelegationDate: "2014-12-02", + RemovalDate: "", + }, + "is": { + GTLD: "is", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "iselect": { + GTLD: "iselect", + DelegationDate: "2016-01-15", + RemovalDate: "2019-08-05", + }, + "ismaili": { + GTLD: "ismaili", + DelegationDate: "2016-04-16", + RemovalDate: "", + }, + "ist": { + GTLD: "ist", + DelegationDate: "2015-07-11", + RemovalDate: "", + }, + "istanbul": { + GTLD: "istanbul", + DelegationDate: "2015-07-11", + RemovalDate: "", + }, + "it": { + GTLD: "it", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "itau": { + GTLD: "itau", + DelegationDate: "2015-07-22", + RemovalDate: "", + }, + "itv": { + GTLD: "itv", + DelegationDate: "2016-06-21", + RemovalDate: "", + }, + "iveco": { + GTLD: "iveco", + DelegationDate: "2016-10-30", + RemovalDate: "", + }, + "iwc": { + GTLD: "iwc", + DelegationDate: "2014-12-13", + RemovalDate: "2018-06-28", + }, + "jaguar": { + GTLD: "jaguar", + DelegationDate: "2015-10-27", + RemovalDate: "", + }, + "java": { + GTLD: "java", + DelegationDate: "2015-03-03", + RemovalDate: "", + }, + "jcb": { + GTLD: "jcb", + DelegationDate: "2015-01-23", + RemovalDate: "", + }, + "jcp": { + GTLD: "jcp", + DelegationDate: "2016-03-30", + RemovalDate: "", + }, + "je": { + GTLD: "je", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "jeep": { + GTLD: "jeep", + DelegationDate: "2016-07-28", + RemovalDate: "", + }, + "jetzt": { + GTLD: "jetzt", + DelegationDate: "2014-03-15", + RemovalDate: "", + }, + "jewelry": { + GTLD: "jewelry", + DelegationDate: "2015-04-16", + RemovalDate: "", + }, + "jio": { + GTLD: "jio", + DelegationDate: "2016-11-15", + RemovalDate: "", + }, + "jlc": { + GTLD: "jlc", + DelegationDate: "2015-06-10", + RemovalDate: "2018-09-18", + }, + "jll": { + GTLD: "jll", + DelegationDate: "2015-05-22", + RemovalDate: "", + }, + "jm": { + GTLD: "jm", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "jmp": { + GTLD: "jmp", + DelegationDate: "2015-12-18", + RemovalDate: "", + }, + "jnj": { + GTLD: "jnj", + DelegationDate: "2016-04-08", + RemovalDate: "", + }, + "jo": { + GTLD: "jo", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "jobs": { + GTLD: "jobs", + DelegationDate: "2005-09-09", + RemovalDate: "", + }, + "joburg": { + GTLD: "joburg", + DelegationDate: "2014-06-19", + RemovalDate: "", + }, + "jot": { + GTLD: "jot", + DelegationDate: "2015-12-05", + RemovalDate: "", + }, + "joy": { + GTLD: "joy", + DelegationDate: "2015-12-05", + RemovalDate: "", + }, + "jp": { + GTLD: "jp", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "jpmorgan": { + GTLD: "jpmorgan", + DelegationDate: "2016-02-27", + RemovalDate: "", + }, + "jprs": { + GTLD: "jprs", + DelegationDate: "2015-07-08", + RemovalDate: "", + }, + "juegos": { + GTLD: "juegos", + DelegationDate: "2014-05-15", + RemovalDate: "", + }, + "juniper": { + GTLD: "juniper", + DelegationDate: "2016-08-02", + RemovalDate: "", + }, + "kaufen": { + GTLD: "kaufen", + DelegationDate: "2013-12-28", + RemovalDate: "", + }, + "kddi": { + GTLD: "kddi", + DelegationDate: "2015-01-09", + RemovalDate: "", + }, + "ke": { + GTLD: "ke", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "kerryhotels": { + GTLD: "kerryhotels", + DelegationDate: "2016-03-05", + RemovalDate: "", + }, + "kerrylogistics": { + GTLD: "kerrylogistics", + DelegationDate: "2016-03-05", + RemovalDate: "", + }, + "kerryproperties": { + GTLD: "kerryproperties", + DelegationDate: "2016-03-05", + RemovalDate: "", + }, + "kfh": { + GTLD: "kfh", + DelegationDate: "2015-12-15", + RemovalDate: "", + }, + "kg": { + GTLD: "kg", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "kh": { + GTLD: "kh", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "ki": { + GTLD: "ki", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "kia": { + GTLD: "kia", + DelegationDate: "2015-09-26", + RemovalDate: "", + }, + "kim": { + GTLD: "kim", + DelegationDate: "2014-01-23", + RemovalDate: "", + }, + "kinder": { + GTLD: "kinder", + DelegationDate: "2015-10-09", + RemovalDate: "", + }, + "kindle": { + GTLD: "kindle", + DelegationDate: "2016-06-07", + RemovalDate: "", + }, + "kitchen": { + GTLD: "kitchen", + DelegationDate: "2013-11-19", + RemovalDate: "", + }, + "kiwi": { + GTLD: "kiwi", + DelegationDate: "2014-01-03", + RemovalDate: "", + }, + "km": { + GTLD: "km", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "kn": { + GTLD: "kn", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "koeln": { + GTLD: "koeln", + DelegationDate: "2014-03-05", + RemovalDate: "", + }, + "komatsu": { + GTLD: "komatsu", + DelegationDate: "2015-03-26", + RemovalDate: "", + }, + "kosher": { + GTLD: "kosher", + DelegationDate: "2016-06-10", + RemovalDate: "", + }, + "kp": { + GTLD: "kp", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "kpmg": { + GTLD: "kpmg", + DelegationDate: "2016-04-05", + RemovalDate: "", + }, + "kpn": { + GTLD: "kpn", + DelegationDate: "2015-12-15", + RemovalDate: "", + }, + "kr": { + GTLD: "kr", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "krd": { + GTLD: "krd", + DelegationDate: "2014-07-18", + RemovalDate: "", + }, + "kred": { + GTLD: "kred", + DelegationDate: "2014-02-27", + RemovalDate: "", + }, + "kuokgroup": { + GTLD: "kuokgroup", + DelegationDate: "2016-03-05", + RemovalDate: "", + }, + "kw": { + GTLD: "kw", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "ky": { + GTLD: "ky", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "kyoto": { + GTLD: "kyoto", + DelegationDate: "2015-01-28", + RemovalDate: "", + }, + "kz": { + GTLD: "kz", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "la": { + GTLD: "la", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "lacaixa": { + GTLD: "lacaixa", + DelegationDate: "2014-07-18", + RemovalDate: "", + }, + "ladbrokes": { + GTLD: "ladbrokes", + DelegationDate: "2016-07-29", + RemovalDate: "", + }, + "lamborghini": { + GTLD: "lamborghini", + DelegationDate: "2015-11-25", + RemovalDate: "", + }, + "lamer": { + GTLD: "lamer", + DelegationDate: "2015-12-24", + RemovalDate: "", + }, + "lancaster": { + GTLD: "lancaster", + DelegationDate: "2015-07-15", + RemovalDate: "", + }, + "lancia": { + GTLD: "lancia", + DelegationDate: "2016-08-04", + RemovalDate: "", + }, + "lancome": { + GTLD: "lancome", + DelegationDate: "2016-07-15", + RemovalDate: "", + }, + "land": { + GTLD: "land", + DelegationDate: "2013-11-14", + RemovalDate: "", + }, + "landrover": { + GTLD: "landrover", + DelegationDate: "2015-10-27", + RemovalDate: "", + }, + "lanxess": { + GTLD: "lanxess", + DelegationDate: "2016-01-26", + RemovalDate: "", + }, + "lasalle": { + GTLD: "lasalle", + DelegationDate: "2015-06-11", + RemovalDate: "", + }, + "lat": { + GTLD: "lat", + DelegationDate: "2015-01-09", + RemovalDate: "", + }, + "latino": { + GTLD: "latino", + DelegationDate: "2016-08-04", + RemovalDate: "", + }, + "latrobe": { + GTLD: "latrobe", + DelegationDate: "2014-12-02", + RemovalDate: "", + }, + "law": { + GTLD: "law", + DelegationDate: "2015-06-26", + RemovalDate: "", + }, + "lawyer": { + GTLD: "lawyer", + DelegationDate: "2014-05-31", + RemovalDate: "", + }, + "lb": { + GTLD: "lb", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "lc": { + GTLD: "lc", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "lds": { + GTLD: "lds", + DelegationDate: "2014-11-19", + RemovalDate: "", + }, + "lease": { + GTLD: "lease", + DelegationDate: "2014-04-11", + RemovalDate: "", + }, + "leclerc": { + GTLD: "leclerc", + DelegationDate: "2015-03-03", + RemovalDate: "", + }, + "lefrak": { + GTLD: "lefrak", + DelegationDate: "2016-07-14", + RemovalDate: "", + }, + "legal": { + GTLD: "legal", + DelegationDate: "2014-11-26", + RemovalDate: "", + }, + "lego": { + GTLD: "lego", + DelegationDate: "2016-06-16", + RemovalDate: "", + }, + "lexus": { + GTLD: "lexus", + DelegationDate: "2015-07-26", + RemovalDate: "", + }, + "lgbt": { + GTLD: "lgbt", + DelegationDate: "2014-07-18", + RemovalDate: "", + }, + "li": { + GTLD: "li", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "liaison": { + GTLD: "liaison", + DelegationDate: "2015-05-02", + RemovalDate: "", + }, + "lidl": { + GTLD: "lidl", + DelegationDate: "2014-12-13", + RemovalDate: "", + }, + "life": { + GTLD: "life", + DelegationDate: "2014-05-15", + RemovalDate: "", + }, + "lifeinsurance": { + GTLD: "lifeinsurance", + DelegationDate: "2016-01-19", + RemovalDate: "", + }, + "lifestyle": { + GTLD: "lifestyle", + DelegationDate: "2015-11-10", + RemovalDate: "", + }, + "lighting": { + GTLD: "lighting", + DelegationDate: "2013-11-06", + RemovalDate: "", + }, + "like": { + GTLD: "like", + DelegationDate: "2015-12-05", + RemovalDate: "", + }, + "lilly": { + GTLD: "lilly", + DelegationDate: "2016-07-31", + RemovalDate: "", + }, + "limited": { + GTLD: "limited", + DelegationDate: "2014-04-23", + RemovalDate: "", + }, + "limo": { + GTLD: "limo", + DelegationDate: "2013-12-17", + RemovalDate: "", + }, + "lincoln": { + GTLD: "lincoln", + DelegationDate: "2015-12-18", + RemovalDate: "", + }, + "linde": { + GTLD: "linde", + DelegationDate: "2015-09-16", + RemovalDate: "", + }, + "link": { + GTLD: "link", + DelegationDate: "2014-01-18", + RemovalDate: "", + }, + "lipsy": { + GTLD: "lipsy", + DelegationDate: "2016-05-03", + RemovalDate: "", + }, + "live": { + GTLD: "live", + DelegationDate: "2015-07-08", + RemovalDate: "", + }, + "living": { + GTLD: "living", + DelegationDate: "2015-12-28", + RemovalDate: "", + }, + "lixil": { + GTLD: "lixil", + DelegationDate: "2015-07-30", + RemovalDate: "", + }, + "lk": { + GTLD: "lk", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "llc": { + GTLD: "llc", + DelegationDate: "2018-02-22", + RemovalDate: "", + }, + "loan": { + GTLD: "loan", + DelegationDate: "2015-03-25", + RemovalDate: "", + }, + "loans": { + GTLD: "loans", + DelegationDate: "2014-05-15", + RemovalDate: "", + }, + "locker": { + GTLD: "locker", + DelegationDate: "2016-05-27", + RemovalDate: "", + }, + "locus": { + GTLD: "locus", + DelegationDate: "2016-03-09", + RemovalDate: "", + }, + "loft": { + GTLD: "loft", + DelegationDate: "2016-08-04", + RemovalDate: "", + }, + "lol": { + GTLD: "lol", + DelegationDate: "2015-05-02", + RemovalDate: "", + }, + "london": { + GTLD: "london", + DelegationDate: "2014-03-22", + RemovalDate: "", + }, + "lotte": { + GTLD: "lotte", + DelegationDate: "2015-01-14", + RemovalDate: "", + }, + "lotto": { + GTLD: "lotto", + DelegationDate: "2014-06-19", + RemovalDate: "", + }, + "love": { + GTLD: "love", + DelegationDate: "2015-04-02", + RemovalDate: "", + }, + "lpl": { + GTLD: "lpl", + DelegationDate: "2016-07-19", + RemovalDate: "", + }, + "lplfinancial": { + GTLD: "lplfinancial", + DelegationDate: "2016-07-19", + RemovalDate: "", + }, + "lr": { + GTLD: "lr", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "ls": { + GTLD: "ls", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "lt": { + GTLD: "lt", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "ltd": { + GTLD: "ltd", + DelegationDate: "2015-09-23", + RemovalDate: "", + }, + "ltda": { + GTLD: "ltda", + DelegationDate: "2014-08-16", + RemovalDate: "", + }, + "lu": { + GTLD: "lu", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "lundbeck": { + GTLD: "lundbeck", + DelegationDate: "2016-07-15", + RemovalDate: "", + }, + "lupin": { + GTLD: "lupin", + DelegationDate: "2015-05-16", + RemovalDate: "", + }, + "luxe": { + GTLD: "luxe", + DelegationDate: "2014-05-15", + RemovalDate: "", + }, + "luxury": { + GTLD: "luxury", + DelegationDate: "2014-01-18", + RemovalDate: "", + }, + "lv": { + GTLD: "lv", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "ly": { + GTLD: "ly", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "ma": { + GTLD: "ma", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "macys": { + GTLD: "macys", + DelegationDate: "2016-07-12", + RemovalDate: "", + }, + "madrid": { + GTLD: "madrid", + DelegationDate: "2014-11-20", + RemovalDate: "", + }, + "maif": { + GTLD: "maif", + DelegationDate: "2015-03-03", + RemovalDate: "", + }, + "maison": { + GTLD: "maison", + DelegationDate: "2014-02-11", + RemovalDate: "", + }, + "makeup": { + GTLD: "makeup", + DelegationDate: "2016-01-15", + RemovalDate: "", + }, + "man": { + GTLD: "man", + DelegationDate: "2015-07-26", + RemovalDate: "", + }, + "management": { + GTLD: "management", + DelegationDate: "2013-12-17", + RemovalDate: "", + }, + "mango": { + GTLD: "mango", + DelegationDate: "2014-02-16", + RemovalDate: "", + }, + "map": { + GTLD: "map", + DelegationDate: "2017-06-29", + RemovalDate: "", + }, + "market": { + GTLD: "market", + DelegationDate: "2014-05-31", + RemovalDate: "", + }, + "marketing": { + GTLD: "marketing", + DelegationDate: "2014-01-14", + RemovalDate: "", + }, + "markets": { + GTLD: "markets", + DelegationDate: "2015-03-12", + RemovalDate: "", + }, + "marriott": { + GTLD: "marriott", + DelegationDate: "2015-01-14", + RemovalDate: "", + }, + "marshalls": { + GTLD: "marshalls", + DelegationDate: "2016-07-15", + RemovalDate: "", + }, + "maserati": { + GTLD: "maserati", + DelegationDate: "2016-08-04", + RemovalDate: "", + }, + "mattel": { + GTLD: "mattel", + DelegationDate: "2016-05-28", + RemovalDate: "", + }, + "mba": { + GTLD: "mba", + DelegationDate: "2015-05-22", + RemovalDate: "", + }, + "mc": { + GTLD: "mc", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "mcd": { + GTLD: "mcd", + DelegationDate: "2016-08-08", + RemovalDate: "2017-08-31", + }, + "mcdonalds": { + GTLD: "mcdonalds", + DelegationDate: "2016-08-08", + RemovalDate: "2017-08-31", + }, + "mckinsey": { + GTLD: "mckinsey", + DelegationDate: "2016-07-31", + RemovalDate: "", + }, + "md": { + GTLD: "md", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "me": { + GTLD: "me", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "med": { + GTLD: "med", + DelegationDate: "2015-12-03", + RemovalDate: "", + }, + "media": { + GTLD: "media", + DelegationDate: "2014-04-11", + RemovalDate: "", + }, + "meet": { + GTLD: "meet", + DelegationDate: "2014-03-27", + RemovalDate: "", + }, + "melbourne": { + GTLD: "melbourne", + DelegationDate: "2014-07-10", + RemovalDate: "", + }, + "meme": { + GTLD: "meme", + DelegationDate: "2014-08-30", + RemovalDate: "", + }, + "memorial": { + GTLD: "memorial", + DelegationDate: "2014-11-26", + RemovalDate: "", + }, + "men": { + GTLD: "men", + DelegationDate: "2015-05-20", + RemovalDate: "", + }, + "menu": { + GTLD: "menu", + DelegationDate: "2013-11-30", + RemovalDate: "", + }, + "meo": { + GTLD: "meo", + DelegationDate: "2015-10-29", + RemovalDate: "2018-05-26", + }, + "merckmsd": { + GTLD: "merckmsd", + DelegationDate: "2017-07-10", + RemovalDate: "", + }, + "metlife": { + GTLD: "metlife", + DelegationDate: "2016-05-11", + RemovalDate: "", + }, + "mg": { + GTLD: "mg", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "mh": { + GTLD: "mh", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "miami": { + GTLD: "miami", + DelegationDate: "2014-03-31", + RemovalDate: "", + }, + "microsoft": { + GTLD: "microsoft", + DelegationDate: "2015-06-10", + RemovalDate: "", + }, + "mil": { + GTLD: "mil", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "mini": { + GTLD: "mini", + DelegationDate: "2014-06-24", + RemovalDate: "", + }, + "mint": { + GTLD: "mint", + DelegationDate: "2016-07-12", + RemovalDate: "", + }, + "mit": { + GTLD: "mit", + DelegationDate: "2016-07-06", + RemovalDate: "", + }, + "mitsubishi": { + GTLD: "mitsubishi", + DelegationDate: "2016-07-07", + RemovalDate: "", + }, + "mk": { + GTLD: "mk", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "ml": { + GTLD: "ml", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "mlb": { + GTLD: "mlb", + DelegationDate: "2016-05-25", + RemovalDate: "", + }, + "mls": { + GTLD: "mls", + DelegationDate: "2016-04-20", + RemovalDate: "", + }, + "mm": { + GTLD: "mm", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "mma": { + GTLD: "mma", + DelegationDate: "2015-03-31", + RemovalDate: "", + }, + "mn": { + GTLD: "mn", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "mo": { + GTLD: "mo", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "mobi": { + GTLD: "mobi", + DelegationDate: "2005-10-20", + RemovalDate: "", + }, + "mobile": { + GTLD: "mobile", + DelegationDate: "2016-12-20", + RemovalDate: "", + }, + "mobily": { + GTLD: "mobily", + DelegationDate: "2015-12-23", + RemovalDate: "", + }, + "moda": { + GTLD: "moda", + DelegationDate: "2014-01-14", + RemovalDate: "", + }, + "moe": { + GTLD: "moe", + DelegationDate: "2014-03-31", + RemovalDate: "", + }, + "moi": { + GTLD: "moi", + DelegationDate: "2015-10-07", + RemovalDate: "", + }, + "mom": { + GTLD: "mom", + DelegationDate: "2015-08-19", + RemovalDate: "", + }, + "monash": { + GTLD: "monash", + DelegationDate: "2014-01-18", + RemovalDate: "", + }, + "money": { + GTLD: "money", + DelegationDate: "2014-11-26", + RemovalDate: "", + }, + "monster": { + GTLD: "monster", + DelegationDate: "2016-09-14", + RemovalDate: "", + }, + "montblanc": { + GTLD: "montblanc", + DelegationDate: "2015-06-05", + RemovalDate: "2017-09-01", + }, + "mopar": { + GTLD: "mopar", + DelegationDate: "2016-08-02", + RemovalDate: "", + }, + "mormon": { + GTLD: "mormon", + DelegationDate: "2014-11-19", + RemovalDate: "", + }, + "mortgage": { + GTLD: "mortgage", + DelegationDate: "2014-05-31", + RemovalDate: "", + }, + "moscow": { + GTLD: "moscow", + DelegationDate: "2014-04-24", + RemovalDate: "", + }, + "moto": { + GTLD: "moto", + DelegationDate: "2016-11-12", + RemovalDate: "", + }, + "motorcycles": { + GTLD: "motorcycles", + DelegationDate: "2014-05-22", + RemovalDate: "", + }, + "mov": { + GTLD: "mov", + DelegationDate: "2014-08-30", + RemovalDate: "", + }, + "movie": { + GTLD: "movie", + DelegationDate: "2015-03-25", + RemovalDate: "", + }, + "movistar": { + GTLD: "movistar", + DelegationDate: "2015-06-26", + RemovalDate: "", + }, + "mp": { + GTLD: "mp", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "mq": { + GTLD: "mq", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "mr": { + GTLD: "mr", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "ms": { + GTLD: "ms", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "msd": { + GTLD: "msd", + DelegationDate: "2016-07-23", + RemovalDate: "", + }, + "mt": { + GTLD: "mt", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "mtn": { + GTLD: "mtn", + DelegationDate: "2015-03-25", + RemovalDate: "", + }, + "mtpc": { + GTLD: "mtpc", + DelegationDate: "2015-03-04", + RemovalDate: "2017-05-15", + }, + "mtr": { + GTLD: "mtr", + DelegationDate: "2015-10-07", + RemovalDate: "", + }, + "mu": { + GTLD: "mu", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "museum": { + GTLD: "museum", + DelegationDate: "2001-11-01", + RemovalDate: "", + }, + "mutual": { + GTLD: "mutual", + DelegationDate: "2016-04-05", + RemovalDate: "", + }, + "mutuelle": { + GTLD: "mutuelle", + DelegationDate: "2015-10-23", + RemovalDate: "2016-12-21", + }, + "mv": { + GTLD: "mv", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "mw": { + GTLD: "mw", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "mx": { + GTLD: "mx", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "my": { + GTLD: "my", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "mz": { + GTLD: "mz", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "na": { + GTLD: "na", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "nab": { + GTLD: "nab", + DelegationDate: "2016-08-18", + RemovalDate: "", + }, + "nadex": { + GTLD: "nadex", + DelegationDate: "2015-05-02", + RemovalDate: "", + }, + "nagoya": { + GTLD: "nagoya", + DelegationDate: "2014-01-29", + RemovalDate: "", + }, + "name": { + GTLD: "name", + DelegationDate: "2002-01-04", + RemovalDate: "", + }, + "nationwide": { + GTLD: "nationwide", + DelegationDate: "2016-07-15", + RemovalDate: "", + }, + "natura": { + GTLD: "natura", + DelegationDate: "2016-02-11", + RemovalDate: "", + }, + "navy": { + GTLD: "navy", + DelegationDate: "2014-06-04", + RemovalDate: "", + }, + "nba": { + GTLD: "nba", + DelegationDate: "2016-08-02", + RemovalDate: "", + }, + "nc": { + GTLD: "nc", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "ne": { + GTLD: "ne", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "nec": { + GTLD: "nec", + DelegationDate: "2015-05-09", + RemovalDate: "", + }, + "net": { + GTLD: "net", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "netbank": { + GTLD: "netbank", + DelegationDate: "2015-06-22", + RemovalDate: "", + }, + "netflix": { + GTLD: "netflix", + DelegationDate: "2016-05-28", + RemovalDate: "", + }, + "network": { + GTLD: "network", + DelegationDate: "2014-08-22", + RemovalDate: "", + }, + "neustar": { + GTLD: "neustar", + DelegationDate: "2014-02-19", + RemovalDate: "", + }, + "new": { + GTLD: "new", + DelegationDate: "2014-08-30", + RemovalDate: "", + }, + "newholland": { + GTLD: "newholland", + DelegationDate: "2016-10-30", + RemovalDate: "", + }, + "news": { + GTLD: "news", + DelegationDate: "2015-03-21", + RemovalDate: "", + }, + "next": { + GTLD: "next", + DelegationDate: "2016-05-03", + RemovalDate: "", + }, + "nextdirect": { + GTLD: "nextdirect", + DelegationDate: "2016-05-03", + RemovalDate: "", + }, + "nexus": { + GTLD: "nexus", + DelegationDate: "2014-09-15", + RemovalDate: "", + }, + "nf": { + GTLD: "nf", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "nfl": { + GTLD: "nfl", + DelegationDate: "2016-06-23", + RemovalDate: "", + }, + "ng": { + GTLD: "ng", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "ngo": { + GTLD: "ngo", + DelegationDate: "2014-07-18", + RemovalDate: "", + }, + "nhk": { + GTLD: "nhk", + DelegationDate: "2014-06-04", + RemovalDate: "", + }, + "ni": { + GTLD: "ni", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "nico": { + GTLD: "nico", + DelegationDate: "2015-02-10", + RemovalDate: "", + }, + "nike": { + GTLD: "nike", + DelegationDate: "2016-07-09", + RemovalDate: "", + }, + "nikon": { + GTLD: "nikon", + DelegationDate: "2016-01-28", + RemovalDate: "", + }, + "ninja": { + GTLD: "ninja", + DelegationDate: "2013-12-28", + RemovalDate: "", + }, + "nissan": { + GTLD: "nissan", + DelegationDate: "2015-03-04", + RemovalDate: "", + }, + "nissay": { + GTLD: "nissay", + DelegationDate: "2016-03-30", + RemovalDate: "", + }, + "nl": { + GTLD: "nl", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "no": { + GTLD: "no", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "nokia": { + GTLD: "nokia", + DelegationDate: "2015-07-15", + RemovalDate: "", + }, + "northwesternmutual": { + GTLD: "northwesternmutual", + DelegationDate: "2016-04-06", + RemovalDate: "", + }, + "norton": { + GTLD: "norton", + DelegationDate: "2015-12-03", + RemovalDate: "", + }, + "now": { + GTLD: "now", + DelegationDate: "2016-06-07", + RemovalDate: "", + }, + "nowruz": { + GTLD: "nowruz", + DelegationDate: "2015-12-05", + RemovalDate: "", + }, + "nowtv": { + GTLD: "nowtv", + DelegationDate: "2016-05-11", + RemovalDate: "", + }, + "np": { + GTLD: "np", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "nr": { + GTLD: "nr", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "nra": { + GTLD: "nra", + DelegationDate: "2014-07-18", + RemovalDate: "", + }, + "nrw": { + GTLD: "nrw", + DelegationDate: "2014-07-11", + RemovalDate: "", + }, + "ntt": { + GTLD: "ntt", + DelegationDate: "2015-02-03", + RemovalDate: "", + }, + "nu": { + GTLD: "nu", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "nyc": { + GTLD: "nyc", + DelegationDate: "2014-03-20", + RemovalDate: "", + }, + "nz": { + GTLD: "nz", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "obi": { + GTLD: "obi", + DelegationDate: "2015-09-23", + RemovalDate: "", + }, + "observer": { + GTLD: "observer", + DelegationDate: "2016-09-27", + RemovalDate: "", + }, + "off": { + GTLD: "off", + DelegationDate: "2016-07-21", + RemovalDate: "", + }, + "office": { + GTLD: "office", + DelegationDate: "2015-06-23", + RemovalDate: "", + }, + "okinawa": { + GTLD: "okinawa", + DelegationDate: "2014-03-02", + RemovalDate: "", + }, + "olayan": { + GTLD: "olayan", + DelegationDate: "2016-05-03", + RemovalDate: "", + }, + "olayangroup": { + GTLD: "olayangroup", + DelegationDate: "2016-05-06", + RemovalDate: "", + }, + "oldnavy": { + GTLD: "oldnavy", + DelegationDate: "2016-08-04", + RemovalDate: "", + }, + "ollo": { + GTLD: "ollo", + DelegationDate: "2016-05-27", + RemovalDate: "", + }, + "om": { + GTLD: "om", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "omega": { + GTLD: "omega", + DelegationDate: "2015-06-26", + RemovalDate: "", + }, + "one": { + GTLD: "one", + DelegationDate: "2015-01-22", + RemovalDate: "", + }, + "ong": { + GTLD: "ong", + DelegationDate: "2014-07-27", + RemovalDate: "", + }, + "onl": { + GTLD: "onl", + DelegationDate: "2013-12-28", + RemovalDate: "", + }, + "online": { + GTLD: "online", + DelegationDate: "2015-03-16", + RemovalDate: "", + }, + "onyourside": { + GTLD: "onyourside", + DelegationDate: "2016-07-15", + RemovalDate: "", + }, + "ooo": { + GTLD: "ooo", + DelegationDate: "2014-08-16", + RemovalDate: "", + }, + "open": { + GTLD: "open", + DelegationDate: "2016-08-08", + RemovalDate: "", + }, + "oracle": { + GTLD: "oracle", + DelegationDate: "2015-03-03", + RemovalDate: "", + }, + "orange": { + GTLD: "orange", + DelegationDate: "2015-07-09", + RemovalDate: "", + }, + "org": { + GTLD: "org", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "organic": { + GTLD: "organic", + DelegationDate: "2014-06-13", + RemovalDate: "", + }, + "orientexpress": { + GTLD: "orientexpress", + DelegationDate: "2016-06-22", + RemovalDate: "2017-04-14", + }, + "origins": { + GTLD: "origins", + DelegationDate: "2015-12-24", + RemovalDate: "", + }, + "osaka": { + GTLD: "osaka", + DelegationDate: "2014-12-13", + RemovalDate: "", + }, + "otsuka": { + GTLD: "otsuka", + DelegationDate: "2014-08-27", + RemovalDate: "", + }, + "ott": { + GTLD: "ott", + DelegationDate: "2016-05-27", + RemovalDate: "", + }, + "ovh": { + GTLD: "ovh", + DelegationDate: "2014-06-19", + RemovalDate: "", + }, + "pa": { + GTLD: "pa", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "page": { + GTLD: "page", + DelegationDate: "2015-03-16", + RemovalDate: "", + }, + "pamperedchef": { + GTLD: "pamperedchef", + DelegationDate: "2016-01-21", + RemovalDate: "2017-09-20", + }, + "panasonic": { + GTLD: "panasonic", + DelegationDate: "2016-07-15", + RemovalDate: "", + }, + "panerai": { + GTLD: "panerai", + DelegationDate: "2015-03-25", + RemovalDate: "2018-09-18", + }, + "paris": { + GTLD: "paris", + DelegationDate: "2014-04-19", + RemovalDate: "", + }, + "pars": { + GTLD: "pars", + DelegationDate: "2015-12-07", + RemovalDate: "", + }, + "partners": { + GTLD: "partners", + DelegationDate: "2014-02-04", + RemovalDate: "", + }, + "parts": { + GTLD: "parts", + DelegationDate: "2014-02-11", + RemovalDate: "", + }, + "party": { + GTLD: "party", + DelegationDate: "2014-11-17", + RemovalDate: "", + }, + "passagens": { + GTLD: "passagens", + DelegationDate: "2016-03-02", + RemovalDate: "", + }, + "pay": { + GTLD: "pay", + DelegationDate: "2016-08-10", + RemovalDate: "", + }, + "pccw": { + GTLD: "pccw", + DelegationDate: "2016-05-11", + RemovalDate: "", + }, + "pe": { + GTLD: "pe", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "pet": { + GTLD: "pet", + DelegationDate: "2015-07-26", + RemovalDate: "", + }, + "pf": { + GTLD: "pf", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "pfizer": { + GTLD: "pfizer", + DelegationDate: "2016-07-15", + RemovalDate: "", + }, + "pg": { + GTLD: "pg", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "ph": { + GTLD: "ph", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "pharmacy": { + GTLD: "pharmacy", + DelegationDate: "2014-09-05", + RemovalDate: "", + }, + "phd": { + GTLD: "phd", + DelegationDate: "2017-06-29", + RemovalDate: "", + }, + "philips": { + GTLD: "philips", + DelegationDate: "2015-05-09", + RemovalDate: "", + }, + "phone": { + GTLD: "phone", + DelegationDate: "2016-12-20", + RemovalDate: "", + }, + "photo": { + GTLD: "photo", + DelegationDate: "2014-01-18", + RemovalDate: "", + }, + "photography": { + GTLD: "photography", + DelegationDate: "2013-11-19", + RemovalDate: "", + }, + "photos": { + GTLD: "photos", + DelegationDate: "2013-12-17", + RemovalDate: "", + }, + "physio": { + GTLD: "physio", + DelegationDate: "2014-06-19", + RemovalDate: "", + }, + "piaget": { + GTLD: "piaget", + DelegationDate: "2015-03-16", + RemovalDate: "", + }, + "pics": { + GTLD: "pics", + DelegationDate: "2014-01-18", + RemovalDate: "", + }, + "pictet": { + GTLD: "pictet", + DelegationDate: "2015-03-07", + RemovalDate: "", + }, + "pictures": { + GTLD: "pictures", + DelegationDate: "2014-04-11", + RemovalDate: "", + }, + "pid": { + GTLD: "pid", + DelegationDate: "2015-12-22", + RemovalDate: "", + }, + "pin": { + GTLD: "pin", + DelegationDate: "2015-12-05", + RemovalDate: "", + }, + "ping": { + GTLD: "ping", + DelegationDate: "2015-10-29", + RemovalDate: "", + }, + "pink": { + GTLD: "pink", + DelegationDate: "2014-01-18", + RemovalDate: "", + }, + "pioneer": { + GTLD: "pioneer", + DelegationDate: "2016-06-02", + RemovalDate: "", + }, + "pizza": { + GTLD: "pizza", + DelegationDate: "2014-08-27", + RemovalDate: "", + }, + "pk": { + GTLD: "pk", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "pl": { + GTLD: "pl", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "place": { + GTLD: "place", + DelegationDate: "2014-07-02", + RemovalDate: "", + }, + "play": { + GTLD: "play", + DelegationDate: "2015-06-20", + RemovalDate: "", + }, + "playstation": { + GTLD: "playstation", + DelegationDate: "2015-11-07", + RemovalDate: "", + }, + "plumbing": { + GTLD: "plumbing", + DelegationDate: "2013-11-14", + RemovalDate: "", + }, + "plus": { + GTLD: "plus", + DelegationDate: "2015-03-24", + RemovalDate: "", + }, + "pm": { + GTLD: "pm", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "pn": { + GTLD: "pn", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "pnc": { + GTLD: "pnc", + DelegationDate: "2016-07-01", + RemovalDate: "", + }, + "pohl": { + GTLD: "pohl", + DelegationDate: "2014-09-27", + RemovalDate: "", + }, + "poker": { + GTLD: "poker", + DelegationDate: "2014-10-15", + RemovalDate: "", + }, + "politie": { + GTLD: "politie", + DelegationDate: "2016-06-23", + RemovalDate: "", + }, + "porn": { + GTLD: "porn", + DelegationDate: "2014-12-06", + RemovalDate: "", + }, + "post": { + GTLD: "post", + DelegationDate: "2012-08-07", + RemovalDate: "", + }, + "pr": { + GTLD: "pr", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "pramerica": { + GTLD: "pramerica", + DelegationDate: "2016-07-28", + RemovalDate: "", + }, + "praxi": { + GTLD: "praxi", + DelegationDate: "2014-07-22", + RemovalDate: "", + }, + "press": { + GTLD: "press", + DelegationDate: "2014-05-31", + RemovalDate: "", + }, + "prime": { + GTLD: "prime", + DelegationDate: "2016-06-07", + RemovalDate: "", + }, + "pro": { + GTLD: "pro", + DelegationDate: "2004-05-27", + RemovalDate: "", + }, + "prod": { + GTLD: "prod", + DelegationDate: "2014-08-29", + RemovalDate: "", + }, + "productions": { + GTLD: "productions", + DelegationDate: "2014-02-11", + RemovalDate: "", + }, + "prof": { + GTLD: "prof", + DelegationDate: "2014-09-15", + RemovalDate: "", + }, + "progressive": { + GTLD: "progressive", + DelegationDate: "2016-04-20", + RemovalDate: "", + }, + "promo": { + GTLD: "promo", + DelegationDate: "2015-12-31", + RemovalDate: "", + }, + "properties": { + GTLD: "properties", + DelegationDate: "2014-02-04", + RemovalDate: "", + }, + "property": { + GTLD: "property", + DelegationDate: "2014-08-16", + RemovalDate: "", + }, + "protection": { + GTLD: "protection", + DelegationDate: "2015-09-13", + RemovalDate: "", + }, + "pru": { + GTLD: "pru", + DelegationDate: "2016-07-28", + RemovalDate: "", + }, + "prudential": { + GTLD: "prudential", + DelegationDate: "2016-07-28", + RemovalDate: "", + }, + "ps": { + GTLD: "ps", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "pt": { + GTLD: "pt", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "pub": { + GTLD: "pub", + DelegationDate: "2014-02-26", + RemovalDate: "", + }, + "pw": { + GTLD: "pw", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "pwc": { + GTLD: "pwc", + DelegationDate: "2016-02-11", + RemovalDate: "", + }, + "py": { + GTLD: "py", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "qa": { + GTLD: "qa", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "qpon": { + GTLD: "qpon", + DelegationDate: "2014-02-12", + RemovalDate: "", + }, + "quebec": { + GTLD: "quebec", + DelegationDate: "2014-04-16", + RemovalDate: "", + }, + "quest": { + GTLD: "quest", + DelegationDate: "2016-02-06", + RemovalDate: "", + }, + "qvc": { + GTLD: "qvc", + DelegationDate: "2016-08-04", + RemovalDate: "", + }, + "racing": { + GTLD: "racing", + DelegationDate: "2015-04-03", + RemovalDate: "", + }, + "radio": { + GTLD: "radio", + DelegationDate: "2016-10-12", + RemovalDate: "", + }, + "raid": { + GTLD: "raid", + DelegationDate: "2016-07-21", + RemovalDate: "", + }, + "re": { + GTLD: "re", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "read": { + GTLD: "read", + DelegationDate: "2015-12-05", + RemovalDate: "", + }, + "realestate": { + GTLD: "realestate", + DelegationDate: "2016-05-23", + RemovalDate: "", + }, + "realtor": { + GTLD: "realtor", + DelegationDate: "2014-07-30", + RemovalDate: "", + }, + "realty": { + GTLD: "realty", + DelegationDate: "2015-07-01", + RemovalDate: "", + }, + "recipes": { + GTLD: "recipes", + DelegationDate: "2013-12-17", + RemovalDate: "", + }, + "red": { + GTLD: "red", + DelegationDate: "2014-01-18", + RemovalDate: "", + }, + "redstone": { + GTLD: "redstone", + DelegationDate: "2015-03-28", + RemovalDate: "", + }, + "redumbrella": { + GTLD: "redumbrella", + DelegationDate: "2015-12-11", + RemovalDate: "", + }, + "rehab": { + GTLD: "rehab", + DelegationDate: "2014-06-04", + RemovalDate: "", + }, + "reise": { + GTLD: "reise", + DelegationDate: "2014-05-22", + RemovalDate: "", + }, + "reisen": { + GTLD: "reisen", + DelegationDate: "2014-04-11", + RemovalDate: "", + }, + "reit": { + GTLD: "reit", + DelegationDate: "2014-11-12", + RemovalDate: "", + }, + "reliance": { + GTLD: "reliance", + DelegationDate: "2016-11-15", + RemovalDate: "", + }, + "ren": { + GTLD: "ren", + DelegationDate: "2014-03-27", + RemovalDate: "", + }, + "rent": { + GTLD: "rent", + DelegationDate: "2015-04-30", + RemovalDate: "", + }, + "rentals": { + GTLD: "rentals", + DelegationDate: "2014-02-04", + RemovalDate: "", + }, + "repair": { + GTLD: "repair", + DelegationDate: "2013-12-28", + RemovalDate: "", + }, + "report": { + GTLD: "report", + DelegationDate: "2014-02-04", + RemovalDate: "", + }, + "republican": { + GTLD: "republican", + DelegationDate: "2014-06-04", + RemovalDate: "", + }, + "rest": { + GTLD: "rest", + DelegationDate: "2014-04-02", + RemovalDate: "", + }, + "restaurant": { + GTLD: "restaurant", + DelegationDate: "2014-08-08", + RemovalDate: "", + }, + "review": { + GTLD: "review", + DelegationDate: "2015-03-25", + RemovalDate: "", + }, + "reviews": { + GTLD: "reviews", + DelegationDate: "2014-02-11", + RemovalDate: "", + }, + "rexroth": { + GTLD: "rexroth", + DelegationDate: "2015-12-24", + RemovalDate: "", + }, + "rich": { + GTLD: "rich", + DelegationDate: "2014-01-18", + RemovalDate: "", + }, + "richardli": { + GTLD: "richardli", + DelegationDate: "2016-05-11", + RemovalDate: "", + }, + "ricoh": { + GTLD: "ricoh", + DelegationDate: "2015-06-22", + RemovalDate: "", + }, + "rightathome": { + GTLD: "rightathome", + DelegationDate: "2016-07-21", + RemovalDate: "", + }, + "ril": { + GTLD: "ril", + DelegationDate: "2016-11-15", + RemovalDate: "", + }, + "rio": { + GTLD: "rio", + DelegationDate: "2014-05-22", + RemovalDate: "", + }, + "rip": { + GTLD: "rip", + DelegationDate: "2014-10-15", + RemovalDate: "", + }, + "rmit": { + GTLD: "rmit", + DelegationDate: "2016-11-24", + RemovalDate: "", + }, + "ro": { + GTLD: "ro", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "rocher": { + GTLD: "rocher", + DelegationDate: "2015-11-07", + RemovalDate: "", + }, + "rocks": { + GTLD: "rocks", + DelegationDate: "2014-04-10", + RemovalDate: "", + }, + "rodeo": { + GTLD: "rodeo", + DelegationDate: "2014-03-31", + RemovalDate: "", + }, + "rogers": { + GTLD: "rogers", + DelegationDate: "2016-09-20", + RemovalDate: "", + }, + "room": { + GTLD: "room", + DelegationDate: "2015-12-05", + RemovalDate: "", + }, + "rs": { + GTLD: "rs", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "rsvp": { + GTLD: "rsvp", + DelegationDate: "2014-08-30", + RemovalDate: "", + }, + "ru": { + GTLD: "ru", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "rugby": { + GTLD: "rugby", + DelegationDate: "2017-04-07", + RemovalDate: "", + }, + "ruhr": { + GTLD: "ruhr", + DelegationDate: "2013-12-10", + RemovalDate: "", + }, + "run": { + GTLD: "run", + DelegationDate: "2015-05-07", + RemovalDate: "", + }, + "rw": { + GTLD: "rw", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "rwe": { + GTLD: "rwe", + DelegationDate: "2015-10-27", + RemovalDate: "", + }, + "ryukyu": { + GTLD: "ryukyu", + DelegationDate: "2014-04-03", + RemovalDate: "", + }, + "sa": { + GTLD: "sa", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "saarland": { + GTLD: "saarland", + DelegationDate: "2014-04-02", + RemovalDate: "", + }, + "safe": { + GTLD: "safe", + DelegationDate: "2015-12-05", + RemovalDate: "", + }, + "safety": { + GTLD: "safety", + DelegationDate: "2015-12-24", + RemovalDate: "", + }, + "sakura": { + GTLD: "sakura", + DelegationDate: "2015-07-02", + RemovalDate: "", + }, + "sale": { + GTLD: "sale", + DelegationDate: "2014-12-25", + RemovalDate: "", + }, + "salon": { + GTLD: "salon", + DelegationDate: "2015-12-05", + RemovalDate: "", + }, + "samsclub": { + GTLD: "samsclub", + DelegationDate: "2016-08-18", + RemovalDate: "", + }, + "samsung": { + GTLD: "samsung", + DelegationDate: "2014-12-10", + RemovalDate: "", + }, + "sandvik": { + GTLD: "sandvik", + DelegationDate: "2015-05-27", + RemovalDate: "", + }, + "sandvikcoromant": { + GTLD: "sandvikcoromant", + DelegationDate: "2015-05-27", + RemovalDate: "", + }, + "sanofi": { + GTLD: "sanofi", + DelegationDate: "2015-07-24", + RemovalDate: "", + }, + "sap": { + GTLD: "sap", + DelegationDate: "2015-03-26", + RemovalDate: "", + }, + "sapo": { + GTLD: "sapo", + DelegationDate: "2015-10-29", + RemovalDate: "2018-05-26", + }, + "sarl": { + GTLD: "sarl", + DelegationDate: "2014-08-08", + RemovalDate: "", + }, + "sas": { + GTLD: "sas", + DelegationDate: "2015-12-18", + RemovalDate: "", + }, + "save": { + GTLD: "save", + DelegationDate: "2016-06-07", + RemovalDate: "", + }, + "saxo": { + GTLD: "saxo", + DelegationDate: "2015-02-10", + RemovalDate: "", + }, + "sb": { + GTLD: "sb", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "sbi": { + GTLD: "sbi", + DelegationDate: "2016-04-16", + RemovalDate: "", + }, + "sbs": { + GTLD: "sbs", + DelegationDate: "2015-10-29", + RemovalDate: "", + }, + "sc": { + GTLD: "sc", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "sca": { + GTLD: "sca", + DelegationDate: "2014-08-14", + RemovalDate: "", + }, + "scb": { + GTLD: "scb", + DelegationDate: "2014-07-11", + RemovalDate: "", + }, + "schaeffler": { + GTLD: "schaeffler", + DelegationDate: "2015-12-24", + RemovalDate: "", + }, + "schmidt": { + GTLD: "schmidt", + DelegationDate: "2014-07-03", + RemovalDate: "", + }, + "scholarships": { + GTLD: "scholarships", + DelegationDate: "2015-04-02", + RemovalDate: "", + }, + "school": { + GTLD: "school", + DelegationDate: "2015-02-19", + RemovalDate: "", + }, + "schule": { + GTLD: "schule", + DelegationDate: "2014-04-22", + RemovalDate: "", + }, + "schwarz": { + GTLD: "schwarz", + DelegationDate: "2014-12-13", + RemovalDate: "", + }, + "science": { + GTLD: "science", + DelegationDate: "2014-11-15", + RemovalDate: "", + }, + "scjohnson": { + GTLD: "scjohnson", + DelegationDate: "2016-07-21", + RemovalDate: "", + }, + "scor": { + GTLD: "scor", + DelegationDate: "2015-06-23", + RemovalDate: "", + }, + "scot": { + GTLD: "scot", + DelegationDate: "2014-06-13", + RemovalDate: "", + }, + "sd": { + GTLD: "sd", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "se": { + GTLD: "se", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "search": { + GTLD: "search", + DelegationDate: "2017-06-29", + RemovalDate: "", + }, + "seat": { + GTLD: "seat", + DelegationDate: "2015-04-18", + RemovalDate: "", + }, + "secure": { + GTLD: "secure", + DelegationDate: "2016-08-10", + RemovalDate: "", + }, + "security": { + GTLD: "security", + DelegationDate: "2015-09-17", + RemovalDate: "", + }, + "seek": { + GTLD: "seek", + DelegationDate: "2015-08-11", + RemovalDate: "", + }, + "select": { + GTLD: "select", + DelegationDate: "2016-01-15", + RemovalDate: "", + }, + "sener": { + GTLD: "sener", + DelegationDate: "2015-05-01", + RemovalDate: "", + }, + "services": { + GTLD: "services", + DelegationDate: "2014-04-11", + RemovalDate: "", + }, + "ses": { + GTLD: "ses", + DelegationDate: "2016-07-09", + RemovalDate: "", + }, + "seven": { + GTLD: "seven", + DelegationDate: "2015-09-26", + RemovalDate: "", + }, + "sew": { + GTLD: "sew", + DelegationDate: "2014-12-13", + RemovalDate: "", + }, + "sex": { + GTLD: "sex", + DelegationDate: "2015-04-18", + RemovalDate: "", + }, + "sexy": { + GTLD: "sexy", + DelegationDate: "2013-11-14", + RemovalDate: "", + }, + "sfr": { + GTLD: "sfr", + DelegationDate: "2015-12-01", + RemovalDate: "", + }, + "sg": { + GTLD: "sg", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "sh": { + GTLD: "sh", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "shangrila": { + GTLD: "shangrila", + DelegationDate: "2016-07-02", + RemovalDate: "", + }, + "sharp": { + GTLD: "sharp", + DelegationDate: "2015-12-05", + RemovalDate: "", + }, + "shaw": { + GTLD: "shaw", + DelegationDate: "2016-03-22", + RemovalDate: "", + }, + "shell": { + GTLD: "shell", + DelegationDate: "2015-12-15", + RemovalDate: "", + }, + "shia": { + GTLD: "shia", + DelegationDate: "2015-12-05", + RemovalDate: "", + }, + "shiksha": { + GTLD: "shiksha", + DelegationDate: "2014-01-18", + RemovalDate: "", + }, + "shoes": { + GTLD: "shoes", + DelegationDate: "2013-12-17", + RemovalDate: "", + }, + "shop": { + GTLD: "shop", + DelegationDate: "2016-05-23", + RemovalDate: "", + }, + "shopping": { + GTLD: "shopping", + DelegationDate: "2016-06-21", + RemovalDate: "", + }, + "shouji": { + GTLD: "shouji", + DelegationDate: "2016-03-30", + RemovalDate: "", + }, + "show": { + GTLD: "show", + DelegationDate: "2015-04-16", + RemovalDate: "", + }, + "showtime": { + GTLD: "showtime", + DelegationDate: "2016-08-04", + RemovalDate: "", + }, + "shriram": { + GTLD: "shriram", + DelegationDate: "2014-12-30", + RemovalDate: "", + }, + "si": { + GTLD: "si", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "silk": { + GTLD: "silk", + DelegationDate: "2016-06-07", + RemovalDate: "", + }, + "sina": { + GTLD: "sina", + DelegationDate: "2016-03-30", + RemovalDate: "", + }, + "singles": { + GTLD: "singles", + DelegationDate: "2013-11-06", + RemovalDate: "", + }, + "site": { + GTLD: "site", + DelegationDate: "2015-03-16", + RemovalDate: "", + }, + "sj": { + GTLD: "sj", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "sk": { + GTLD: "sk", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "ski": { + GTLD: "ski", + DelegationDate: "2015-05-30", + RemovalDate: "", + }, + "skin": { + GTLD: "skin", + DelegationDate: "2016-01-15", + RemovalDate: "", + }, + "sky": { + GTLD: "sky", + DelegationDate: "2014-12-12", + RemovalDate: "", + }, + "skype": { + GTLD: "skype", + DelegationDate: "2015-06-23", + RemovalDate: "", + }, + "sl": { + GTLD: "sl", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "sling": { + GTLD: "sling", + DelegationDate: "2016-08-10", + RemovalDate: "", + }, + "sm": { + GTLD: "sm", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "smart": { + GTLD: "smart", + DelegationDate: "2016-07-15", + RemovalDate: "", + }, + "smile": { + GTLD: "smile", + DelegationDate: "2015-12-05", + RemovalDate: "", + }, + "sn": { + GTLD: "sn", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "sncf": { + GTLD: "sncf", + DelegationDate: "2015-06-03", + RemovalDate: "", + }, + "so": { + GTLD: "so", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "soccer": { + GTLD: "soccer", + DelegationDate: "2015-05-13", + RemovalDate: "", + }, + "social": { + GTLD: "social", + DelegationDate: "2014-01-14", + RemovalDate: "", + }, + "softbank": { + GTLD: "softbank", + DelegationDate: "2016-01-16", + RemovalDate: "", + }, + "software": { + GTLD: "software", + DelegationDate: "2014-05-31", + RemovalDate: "", + }, + "sohu": { + GTLD: "sohu", + DelegationDate: "2014-03-25", + RemovalDate: "", + }, + "solar": { + GTLD: "solar", + DelegationDate: "2013-12-28", + RemovalDate: "", + }, + "solutions": { + GTLD: "solutions", + DelegationDate: "2013-12-28", + RemovalDate: "", + }, + "song": { + GTLD: "song", + DelegationDate: "2016-02-24", + RemovalDate: "", + }, + "sony": { + GTLD: "sony", + DelegationDate: "2015-04-16", + RemovalDate: "", + }, + "soy": { + GTLD: "soy", + DelegationDate: "2014-04-19", + RemovalDate: "", + }, + "space": { + GTLD: "space", + DelegationDate: "2014-05-30", + RemovalDate: "", + }, + "spiegel": { + GTLD: "spiegel", + DelegationDate: "2014-07-18", + RemovalDate: "2018-12-15", + }, + "sport": { + GTLD: "sport", + DelegationDate: "2018-01-10", + RemovalDate: "", + }, + "spot": { + GTLD: "spot", + DelegationDate: "2016-02-19", + RemovalDate: "", + }, + "spreadbetting": { + GTLD: "spreadbetting", + DelegationDate: "2015-03-13", + RemovalDate: "", + }, + "sr": { + GTLD: "sr", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "srl": { + GTLD: "srl", + DelegationDate: "2015-07-24", + RemovalDate: "", + }, + "srt": { + GTLD: "srt", + DelegationDate: "2016-07-28", + RemovalDate: "", + }, + "ss": { + GTLD: "ss", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "st": { + GTLD: "st", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "stada": { + GTLD: "stada", + DelegationDate: "2015-09-13", + RemovalDate: "", + }, + "staples": { + GTLD: "staples", + DelegationDate: "2016-07-15", + RemovalDate: "", + }, + "star": { + GTLD: "star", + DelegationDate: "2015-12-22", + RemovalDate: "", + }, + "starhub": { + GTLD: "starhub", + DelegationDate: "2015-06-22", + RemovalDate: "2019-08-02", + }, + "statebank": { + GTLD: "statebank", + DelegationDate: "2016-04-16", + RemovalDate: "", + }, + "statefarm": { + GTLD: "statefarm", + DelegationDate: "2015-12-24", + RemovalDate: "", + }, + "statoil": { + GTLD: "statoil", + DelegationDate: "2015-06-19", + RemovalDate: "2018-10-03", + }, + "stc": { + GTLD: "stc", + DelegationDate: "2015-08-29", + RemovalDate: "", + }, + "stcgroup": { + GTLD: "stcgroup", + DelegationDate: "2015-08-28", + RemovalDate: "", + }, + "stockholm": { + GTLD: "stockholm", + DelegationDate: "2015-09-26", + RemovalDate: "", + }, + "storage": { + GTLD: "storage", + DelegationDate: "2015-12-18", + RemovalDate: "", + }, + "store": { + GTLD: "store", + DelegationDate: "2016-02-22", + RemovalDate: "", + }, + "stream": { + GTLD: "stream", + DelegationDate: "2016-03-18", + RemovalDate: "", + }, + "studio": { + GTLD: "studio", + DelegationDate: "2015-07-08", + RemovalDate: "", + }, + "study": { + GTLD: "study", + DelegationDate: "2015-02-25", + RemovalDate: "", + }, + "style": { + GTLD: "style", + DelegationDate: "2015-02-04", + RemovalDate: "", + }, + "su": { + GTLD: "su", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "sucks": { + GTLD: "sucks", + DelegationDate: "2015-02-25", + RemovalDate: "", + }, + "supplies": { + GTLD: "supplies", + DelegationDate: "2014-02-25", + RemovalDate: "", + }, + "supply": { + GTLD: "supply", + DelegationDate: "2014-02-21", + RemovalDate: "", + }, + "support": { + GTLD: "support", + DelegationDate: "2013-12-18", + RemovalDate: "", + }, + "surf": { + GTLD: "surf", + DelegationDate: "2014-06-18", + RemovalDate: "", + }, + "surgery": { + GTLD: "surgery", + DelegationDate: "2014-04-23", + RemovalDate: "", + }, + "suzuki": { + GTLD: "suzuki", + DelegationDate: "2014-07-02", + RemovalDate: "", + }, + "sv": { + GTLD: "sv", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "swatch": { + GTLD: "swatch", + DelegationDate: "2015-06-26", + RemovalDate: "", + }, + "swiftcover": { + GTLD: "swiftcover", + DelegationDate: "2016-07-21", + RemovalDate: "", + }, + "swiss": { + GTLD: "swiss", + DelegationDate: "2015-04-29", + RemovalDate: "", + }, + "sx": { + GTLD: "sx", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "sy": { + GTLD: "sy", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "sydney": { + GTLD: "sydney", + DelegationDate: "2014-11-05", + RemovalDate: "", + }, + "symantec": { + GTLD: "symantec", + DelegationDate: "2015-12-03", + RemovalDate: "", + }, + "systems": { + GTLD: "systems", + DelegationDate: "2013-12-17", + RemovalDate: "", + }, + "sz": { + GTLD: "sz", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "tab": { + GTLD: "tab", + DelegationDate: "2015-11-13", + RemovalDate: "", + }, + "taipei": { + GTLD: "taipei", + DelegationDate: "2014-10-23", + RemovalDate: "", + }, + "talk": { + GTLD: "talk", + DelegationDate: "2016-03-25", + RemovalDate: "", + }, + "taobao": { + GTLD: "taobao", + DelegationDate: "2016-01-21", + RemovalDate: "", + }, + "target": { + GTLD: "target", + DelegationDate: "2016-08-04", + RemovalDate: "", + }, + "tatamotors": { + GTLD: "tatamotors", + DelegationDate: "2015-07-24", + RemovalDate: "", + }, + "tatar": { + GTLD: "tatar", + DelegationDate: "2014-08-07", + RemovalDate: "", + }, + "tattoo": { + GTLD: "tattoo", + DelegationDate: "2013-11-14", + RemovalDate: "", + }, + "tax": { + GTLD: "tax", + DelegationDate: "2014-04-23", + RemovalDate: "", + }, + "taxi": { + GTLD: "taxi", + DelegationDate: "2015-05-07", + RemovalDate: "", + }, + "tc": { + GTLD: "tc", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "tci": { + GTLD: "tci", + DelegationDate: "2015-12-05", + RemovalDate: "", + }, + "td": { + GTLD: "td", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "tdk": { + GTLD: "tdk", + DelegationDate: "2016-06-07", + RemovalDate: "", + }, + "team": { + GTLD: "team", + DelegationDate: "2015-04-16", + RemovalDate: "", + }, + "tech": { + GTLD: "tech", + DelegationDate: "2015-03-21", + RemovalDate: "", + }, + "technology": { + GTLD: "technology", + DelegationDate: "2013-11-14", + RemovalDate: "", + }, + "tel": { + GTLD: "tel", + DelegationDate: "2007-03-02", + RemovalDate: "", + }, + "telecity": { + GTLD: "telecity", + DelegationDate: "2016-02-25", + RemovalDate: "2018-08-19", + }, + "telefonica": { + GTLD: "telefonica", + DelegationDate: "2015-06-26", + RemovalDate: "", + }, + "temasek": { + GTLD: "temasek", + DelegationDate: "2015-01-24", + RemovalDate: "", + }, + "tennis": { + GTLD: "tennis", + DelegationDate: "2015-02-04", + RemovalDate: "", + }, + "teva": { + GTLD: "teva", + DelegationDate: "2016-04-13", + RemovalDate: "", + }, + "tf": { + GTLD: "tf", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "tg": { + GTLD: "tg", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "th": { + GTLD: "th", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "thd": { + GTLD: "thd", + DelegationDate: "2015-05-22", + RemovalDate: "", + }, + "theater": { + GTLD: "theater", + DelegationDate: "2015-05-06", + RemovalDate: "", + }, + "theatre": { + GTLD: "theatre", + DelegationDate: "2015-09-13", + RemovalDate: "", + }, + "tiaa": { + GTLD: "tiaa", + DelegationDate: "2016-07-20", + RemovalDate: "", + }, + "tickets": { + GTLD: "tickets", + DelegationDate: "2015-03-25", + RemovalDate: "", + }, + "tienda": { + GTLD: "tienda", + DelegationDate: "2014-01-23", + RemovalDate: "", + }, + "tiffany": { + GTLD: "tiffany", + DelegationDate: "2016-01-21", + RemovalDate: "", + }, + "tips": { + GTLD: "tips", + DelegationDate: "2013-11-19", + RemovalDate: "", + }, + "tires": { + GTLD: "tires", + DelegationDate: "2014-12-18", + RemovalDate: "", + }, + "tirol": { + GTLD: "tirol", + DelegationDate: "2014-06-04", + RemovalDate: "", + }, + "tj": { + GTLD: "tj", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "tjmaxx": { + GTLD: "tjmaxx", + DelegationDate: "2016-07-15", + RemovalDate: "", + }, + "tjx": { + GTLD: "tjx", + DelegationDate: "2016-07-15", + RemovalDate: "", + }, + "tk": { + GTLD: "tk", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "tkmaxx": { + GTLD: "tkmaxx", + DelegationDate: "2016-07-15", + RemovalDate: "", + }, + "tl": { + GTLD: "tl", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "tm": { + GTLD: "tm", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "tmall": { + GTLD: "tmall", + DelegationDate: "2016-01-21", + RemovalDate: "", + }, + "tn": { + GTLD: "tn", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "to": { + GTLD: "to", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "today": { + GTLD: "today", + DelegationDate: "2013-11-19", + RemovalDate: "", + }, + "tokyo": { + GTLD: "tokyo", + DelegationDate: "2014-01-29", + RemovalDate: "", + }, + "tools": { + GTLD: "tools", + DelegationDate: "2014-01-23", + RemovalDate: "", + }, + "top": { + GTLD: "top", + DelegationDate: "2014-08-03", + RemovalDate: "", + }, + "toray": { + GTLD: "toray", + DelegationDate: "2015-05-01", + RemovalDate: "", + }, + "toshiba": { + GTLD: "toshiba", + DelegationDate: "2015-02-04", + RemovalDate: "", + }, + "total": { + GTLD: "total", + DelegationDate: "2016-03-09", + RemovalDate: "", + }, + "tours": { + GTLD: "tours", + DelegationDate: "2015-03-24", + RemovalDate: "", + }, + "town": { + GTLD: "town", + DelegationDate: "2014-04-11", + RemovalDate: "", + }, + "toyota": { + GTLD: "toyota", + DelegationDate: "2015-07-26", + RemovalDate: "", + }, + "toys": { + GTLD: "toys", + DelegationDate: "2014-04-11", + RemovalDate: "", + }, + "tr": { + GTLD: "tr", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "trade": { + GTLD: "trade", + DelegationDate: "2014-03-19", + RemovalDate: "", + }, + "trading": { + GTLD: "trading", + DelegationDate: "2015-03-13", + RemovalDate: "", + }, + "training": { + GTLD: "training", + DelegationDate: "2013-12-28", + RemovalDate: "", + }, + "travel": { + GTLD: "travel", + DelegationDate: "2005-07-21", + RemovalDate: "", + }, + "travelchannel": { + GTLD: "travelchannel", + DelegationDate: "2016-06-23", + RemovalDate: "", + }, + "travelers": { + GTLD: "travelers", + DelegationDate: "2015-12-05", + RemovalDate: "", + }, + "travelersinsurance": { + GTLD: "travelersinsurance", + DelegationDate: "2015-12-15", + RemovalDate: "", + }, + "trust": { + GTLD: "trust", + DelegationDate: "2014-12-06", + RemovalDate: "", + }, + "trv": { + GTLD: "trv", + DelegationDate: "2015-12-11", + RemovalDate: "", + }, + "tt": { + GTLD: "tt", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "tube": { + GTLD: "tube", + DelegationDate: "2016-01-11", + RemovalDate: "", + }, + "tui": { + GTLD: "tui", + DelegationDate: "2014-09-27", + RemovalDate: "", + }, + "tunes": { + GTLD: "tunes", + DelegationDate: "2016-02-25", + RemovalDate: "", + }, + "tushu": { + GTLD: "tushu", + DelegationDate: "2015-12-14", + RemovalDate: "", + }, + "tv": { + GTLD: "tv", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "tvs": { + GTLD: "tvs", + DelegationDate: "2016-02-13", + RemovalDate: "", + }, + "tw": { + GTLD: "tw", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "tz": { + GTLD: "tz", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "ua": { + GTLD: "ua", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "ubank": { + GTLD: "ubank", + DelegationDate: "2016-08-18", + RemovalDate: "", + }, + "ubs": { + GTLD: "ubs", + DelegationDate: "2015-07-11", + RemovalDate: "", + }, + "uconnect": { + GTLD: "uconnect", + DelegationDate: "2016-07-28", + RemovalDate: "", + }, + "ug": { + GTLD: "ug", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "uk": { + GTLD: "uk", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "unicom": { + GTLD: "unicom", + DelegationDate: "2016-02-04", + RemovalDate: "", + }, + "university": { + GTLD: "university", + DelegationDate: "2014-04-11", + RemovalDate: "", + }, + "uno": { + GTLD: "uno", + DelegationDate: "2013-11-30", + RemovalDate: "", + }, + "uol": { + GTLD: "uol", + DelegationDate: "2014-08-16", + RemovalDate: "", + }, + "ups": { + GTLD: "ups", + DelegationDate: "2016-05-31", + RemovalDate: "", + }, + "us": { + GTLD: "us", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "uy": { + GTLD: "uy", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "uz": { + GTLD: "uz", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "va": { + GTLD: "va", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "vacations": { + GTLD: "vacations", + DelegationDate: "2014-02-21", + RemovalDate: "", + }, + "vana": { + GTLD: "vana", + DelegationDate: "2015-11-10", + RemovalDate: "", + }, + "vanguard": { + GTLD: "vanguard", + DelegationDate: "2016-08-28", + RemovalDate: "", + }, + "vc": { + GTLD: "vc", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "ve": { + GTLD: "ve", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "vegas": { + GTLD: "vegas", + DelegationDate: "2014-03-31", + RemovalDate: "", + }, + "ventures": { + GTLD: "ventures", + DelegationDate: "2013-11-06", + RemovalDate: "", + }, + "verisign": { + GTLD: "verisign", + DelegationDate: "2015-11-25", + RemovalDate: "", + }, + "versicherung": { + GTLD: "versicherung", + DelegationDate: "2014-05-22", + RemovalDate: "", + }, + "vet": { + GTLD: "vet", + DelegationDate: "2014-05-31", + RemovalDate: "", + }, + "vg": { + GTLD: "vg", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "vi": { + GTLD: "vi", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "viajes": { + GTLD: "viajes", + DelegationDate: "2013-12-17", + RemovalDate: "", + }, + "video": { + GTLD: "video", + DelegationDate: "2014-12-25", + RemovalDate: "", + }, + "vig": { + GTLD: "vig", + DelegationDate: "2016-04-06", + RemovalDate: "", + }, + "viking": { + GTLD: "viking", + DelegationDate: "2016-02-22", + RemovalDate: "", + }, + "villas": { + GTLD: "villas", + DelegationDate: "2014-02-11", + RemovalDate: "", + }, + "vin": { + GTLD: "vin", + DelegationDate: "2015-08-05", + RemovalDate: "", + }, + "vip": { + GTLD: "vip", + DelegationDate: "2015-11-25", + RemovalDate: "", + }, + "virgin": { + GTLD: "virgin", + DelegationDate: "2015-10-07", + RemovalDate: "", + }, + "visa": { + GTLD: "visa", + DelegationDate: "2016-07-28", + RemovalDate: "", + }, + "vision": { + GTLD: "vision", + DelegationDate: "2014-02-11", + RemovalDate: "", + }, + "vista": { + GTLD: "vista", + DelegationDate: "2015-06-22", + RemovalDate: "2018-09-13", + }, + "vistaprint": { + GTLD: "vistaprint", + DelegationDate: "2015-06-22", + RemovalDate: "", + }, + "viva": { + GTLD: "viva", + DelegationDate: "2015-08-28", + RemovalDate: "", + }, + "vivo": { + GTLD: "vivo", + DelegationDate: "2016-07-15", + RemovalDate: "", + }, + "vlaanderen": { + GTLD: "vlaanderen", + DelegationDate: "2014-06-18", + RemovalDate: "", + }, + "vn": { + GTLD: "vn", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "vodka": { + GTLD: "vodka", + DelegationDate: "2014-03-31", + RemovalDate: "", + }, + "volkswagen": { + GTLD: "volkswagen", + DelegationDate: "2016-01-09", + RemovalDate: "", + }, + "volvo": { + GTLD: "volvo", + DelegationDate: "2016-10-24", + RemovalDate: "", + }, + "vote": { + GTLD: "vote", + DelegationDate: "2014-03-02", + RemovalDate: "", + }, + "voting": { + GTLD: "voting", + DelegationDate: "2014-01-29", + RemovalDate: "", + }, + "voto": { + GTLD: "voto", + DelegationDate: "2014-03-02", + RemovalDate: "", + }, + "voyage": { + GTLD: "voyage", + DelegationDate: "2013-11-06", + RemovalDate: "", + }, + "vu": { + GTLD: "vu", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "vuelos": { + GTLD: "vuelos", + DelegationDate: "2016-03-02", + RemovalDate: "", + }, + "wales": { + GTLD: "wales", + DelegationDate: "2014-08-07", + RemovalDate: "", + }, + "walmart": { + GTLD: "walmart", + DelegationDate: "2016-08-18", + RemovalDate: "", + }, + "walter": { + GTLD: "walter", + DelegationDate: "2015-05-27", + RemovalDate: "", + }, + "wang": { + GTLD: "wang", + DelegationDate: "2014-01-03", + RemovalDate: "", + }, + "wanggou": { + GTLD: "wanggou", + DelegationDate: "2015-12-15", + RemovalDate: "", + }, + "warman": { + GTLD: "warman", + DelegationDate: "2016-05-03", + RemovalDate: "", + }, + "watch": { + GTLD: "watch", + DelegationDate: "2014-01-23", + RemovalDate: "", + }, + "watches": { + GTLD: "watches", + DelegationDate: "2015-12-14", + RemovalDate: "", + }, + "weather": { + GTLD: "weather", + DelegationDate: "2016-01-12", + RemovalDate: "", + }, + "weatherchannel": { + GTLD: "weatherchannel", + DelegationDate: "2016-01-28", + RemovalDate: "", + }, + "webcam": { + GTLD: "webcam", + DelegationDate: "2014-03-19", + RemovalDate: "", + }, + "weber": { + GTLD: "weber", + DelegationDate: "2015-12-22", + RemovalDate: "", + }, + "website": { + GTLD: "website", + DelegationDate: "2014-05-30", + RemovalDate: "", + }, + "wed": { + GTLD: "wed", + DelegationDate: "2014-01-23", + RemovalDate: "", + }, + "wedding": { + GTLD: "wedding", + DelegationDate: "2014-10-15", + RemovalDate: "", + }, + "weibo": { + GTLD: "weibo", + DelegationDate: "2016-04-06", + RemovalDate: "", + }, + "weir": { + GTLD: "weir", + DelegationDate: "2015-04-17", + RemovalDate: "", + }, + "wf": { + GTLD: "wf", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "whoswho": { + GTLD: "whoswho", + DelegationDate: "2014-07-18", + RemovalDate: "", + }, + "wien": { + GTLD: "wien", + DelegationDate: "2014-01-03", + RemovalDate: "", + }, + "wiki": { + GTLD: "wiki", + DelegationDate: "2014-02-19", + RemovalDate: "", + }, + "williamhill": { + GTLD: "williamhill", + DelegationDate: "2014-07-27", + RemovalDate: "", + }, + "win": { + GTLD: "win", + DelegationDate: "2015-03-25", + RemovalDate: "", + }, + "windows": { + GTLD: "windows", + DelegationDate: "2015-06-10", + RemovalDate: "", + }, + "wine": { + GTLD: "wine", + DelegationDate: "2015-08-05", + RemovalDate: "", + }, + "winners": { + GTLD: "winners", + DelegationDate: "2016-07-15", + RemovalDate: "", + }, + "wme": { + GTLD: "wme", + DelegationDate: "2014-09-10", + RemovalDate: "", + }, + "wolterskluwer": { + GTLD: "wolterskluwer", + DelegationDate: "2016-02-11", + RemovalDate: "", + }, + "woodside": { + GTLD: "woodside", + DelegationDate: "2016-06-23", + RemovalDate: "", + }, + "work": { + GTLD: "work", + DelegationDate: "2014-09-23", + RemovalDate: "", + }, + "works": { + GTLD: "works", + DelegationDate: "2014-01-23", + RemovalDate: "", + }, + "world": { + GTLD: "world", + DelegationDate: "2014-09-19", + RemovalDate: "", + }, + "wow": { + GTLD: "wow", + DelegationDate: "2016-09-26", + RemovalDate: "", + }, + "ws": { + GTLD: "ws", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "wtc": { + GTLD: "wtc", + DelegationDate: "2014-04-29", + RemovalDate: "", + }, + "wtf": { + GTLD: "wtf", + DelegationDate: "2014-04-23", + RemovalDate: "", + }, + "xbox": { + GTLD: "xbox", + DelegationDate: "2015-06-04", + RemovalDate: "", + }, + "xerox": { + GTLD: "xerox", + DelegationDate: "2015-04-16", + RemovalDate: "", + }, + "xfinity": { + GTLD: "xfinity", + DelegationDate: "2016-07-07", + RemovalDate: "", + }, + "xihuan": { + GTLD: "xihuan", + DelegationDate: "2016-03-30", + RemovalDate: "", + }, + "xin": { + GTLD: "xin", + DelegationDate: "2015-03-07", + RemovalDate: "", + }, + "xn--11b4c3d": { + GTLD: "xn--11b4c3d", + DelegationDate: "2015-07-28", + RemovalDate: "", + }, + "xn--1ck2e1b": { + GTLD: "xn--1ck2e1b", + DelegationDate: "2016-02-19", + RemovalDate: "", + }, + "xn--1qqw23a": { + GTLD: "xn--1qqw23a", + DelegationDate: "2014-08-14", + RemovalDate: "", + }, + "xn--2scrj9c": { + GTLD: "xn--2scrj9c", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--30rr7y": { + GTLD: "xn--30rr7y", + DelegationDate: "2015-03-31", + RemovalDate: "", + }, + "xn--3bst00m": { + GTLD: "xn--3bst00m", + DelegationDate: "2014-01-03", + RemovalDate: "", + }, + "xn--3ds443g": { + GTLD: "xn--3ds443g", + DelegationDate: "2014-01-02", + RemovalDate: "", + }, + "xn--3e0b707e": { + GTLD: "xn--3e0b707e", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--3hcrj9c": { + GTLD: "xn--3hcrj9c", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--3oq18vl8pn36a": { + GTLD: "xn--3oq18vl8pn36a", + DelegationDate: "2016-08-16", + RemovalDate: "", + }, + "xn--3pxu8k": { + GTLD: "xn--3pxu8k", + DelegationDate: "2015-07-28", + RemovalDate: "", + }, + "xn--42c2d9a": { + GTLD: "xn--42c2d9a", + DelegationDate: "2015-07-28", + RemovalDate: "", + }, + "xn--45br5cyl": { + GTLD: "xn--45br5cyl", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--45brj9c": { + GTLD: "xn--45brj9c", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--45q11c": { + GTLD: "xn--45q11c", + DelegationDate: "2014-11-17", + RemovalDate: "", + }, + "xn--4gbrim": { + GTLD: "xn--4gbrim", + DelegationDate: "2014-05-28", + RemovalDate: "", + }, + "xn--54b7fta0cc": { + GTLD: "xn--54b7fta0cc", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--55qw42g": { + GTLD: "xn--55qw42g", + DelegationDate: "2013-12-17", + RemovalDate: "", + }, + "xn--55qx5d": { + GTLD: "xn--55qx5d", + DelegationDate: "2014-01-18", + RemovalDate: "", + }, + "xn--5su34j936bgsg": { + GTLD: "xn--5su34j936bgsg", + DelegationDate: "2016-07-02", + RemovalDate: "", + }, + "xn--5tzm5g": { + GTLD: "xn--5tzm5g", + DelegationDate: "2016-04-17", + RemovalDate: "", + }, + "xn--6frz82g": { + GTLD: "xn--6frz82g", + DelegationDate: "2014-02-05", + RemovalDate: "", + }, + "xn--6qq986b3xl": { + GTLD: "xn--6qq986b3xl", + DelegationDate: "2014-01-03", + RemovalDate: "", + }, + "xn--80adxhks": { + GTLD: "xn--80adxhks", + DelegationDate: "2014-04-24", + RemovalDate: "", + }, + "xn--80ao21a": { + GTLD: "xn--80ao21a", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--80aqecdr1a": { + GTLD: "xn--80aqecdr1a", + DelegationDate: "2016-12-01", + RemovalDate: "", + }, + "xn--80asehdb": { + GTLD: "xn--80asehdb", + DelegationDate: "2013-10-23", + RemovalDate: "", + }, + "xn--80aswg": { + GTLD: "xn--80aswg", + DelegationDate: "2013-10-23", + RemovalDate: "", + }, + "xn--8y0a063a": { + GTLD: "xn--8y0a063a", + DelegationDate: "2016-02-06", + RemovalDate: "", + }, + "xn--90a3ac": { + GTLD: "xn--90a3ac", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--90ae": { + GTLD: "xn--90ae", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--90ais": { + GTLD: "xn--90ais", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--9dbq2a": { + GTLD: "xn--9dbq2a", + DelegationDate: "2015-07-28", + RemovalDate: "", + }, + "xn--9et52u": { + GTLD: "xn--9et52u", + DelegationDate: "2015-03-27", + RemovalDate: "", + }, + "xn--9krt00a": { + GTLD: "xn--9krt00a", + DelegationDate: "2016-04-06", + RemovalDate: "", + }, + "xn--b4w605ferd": { + GTLD: "xn--b4w605ferd", + DelegationDate: "2015-01-24", + RemovalDate: "", + }, + "xn--bck1b9a5dre4c": { + GTLD: "xn--bck1b9a5dre4c", + DelegationDate: "2016-02-21", + RemovalDate: "", + }, + "xn--c1avg": { + GTLD: "xn--c1avg", + DelegationDate: "2014-03-05", + RemovalDate: "", + }, + "xn--c2br7g": { + GTLD: "xn--c2br7g", + DelegationDate: "2015-07-28", + RemovalDate: "", + }, + "xn--cck2b3b": { + GTLD: "xn--cck2b3b", + DelegationDate: "2016-02-19", + RemovalDate: "", + }, + "xn--cg4bki": { + GTLD: "xn--cg4bki", + DelegationDate: "2014-02-21", + RemovalDate: "", + }, + "xn--clchc0ea0b2g2a9gcd": { + GTLD: "xn--clchc0ea0b2g2a9gcd", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--czr694b": { + GTLD: "xn--czr694b", + DelegationDate: "2014-05-22", + RemovalDate: "", + }, + "xn--czrs0t": { + GTLD: "xn--czrs0t", + DelegationDate: "2014-12-06", + RemovalDate: "", + }, + "xn--czru2d": { + GTLD: "xn--czru2d", + DelegationDate: "2014-03-31", + RemovalDate: "", + }, + "xn--d1acj3b": { + GTLD: "xn--d1acj3b", + DelegationDate: "2014-02-26", + RemovalDate: "", + }, + "xn--d1alf": { + GTLD: "xn--d1alf", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--e1a4c": { + GTLD: "xn--e1a4c", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--eckvdtc9d": { + GTLD: "xn--eckvdtc9d", + DelegationDate: "2015-12-14", + RemovalDate: "", + }, + "xn--efvy88h": { + GTLD: "xn--efvy88h", + DelegationDate: "2015-08-24", + RemovalDate: "", + }, + "xn--estv75g": { + GTLD: "xn--estv75g", + DelegationDate: "2015-05-07", + RemovalDate: "", + }, + "xn--fct429k": { + GTLD: "xn--fct429k", + DelegationDate: "2016-03-25", + RemovalDate: "", + }, + "xn--fhbei": { + GTLD: "xn--fhbei", + DelegationDate: "2015-07-28", + RemovalDate: "", + }, + "xn--fiq228c5hs": { + GTLD: "xn--fiq228c5hs", + DelegationDate: "2014-01-03", + RemovalDate: "", + }, + "xn--fiq64b": { + GTLD: "xn--fiq64b", + DelegationDate: "2014-01-18", + RemovalDate: "", + }, + "xn--fiqs8s": { + GTLD: "xn--fiqs8s", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--fiqz9s": { + GTLD: "xn--fiqz9s", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--fjq720a": { + GTLD: "xn--fjq720a", + DelegationDate: "2015-05-09", + RemovalDate: "", + }, + "xn--flw351e": { + GTLD: "xn--flw351e", + DelegationDate: "2014-11-20", + RemovalDate: "", + }, + "xn--fpcrj9c3d": { + GTLD: "xn--fpcrj9c3d", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--fzc2c9e2c": { + GTLD: "xn--fzc2c9e2c", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--fzys8d69uvgm": { + GTLD: "xn--fzys8d69uvgm", + DelegationDate: "2016-05-11", + RemovalDate: "", + }, + "xn--g2xx48c": { + GTLD: "xn--g2xx48c", + DelegationDate: "2016-01-16", + RemovalDate: "", + }, + "xn--gckr3f0f": { + GTLD: "xn--gckr3f0f", + DelegationDate: "2016-02-19", + RemovalDate: "", + }, + "xn--gecrj9c": { + GTLD: "xn--gecrj9c", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--gk3at1e": { + GTLD: "xn--gk3at1e", + DelegationDate: "2016-09-30", + RemovalDate: "", + }, + "xn--h2breg3eve": { + GTLD: "xn--h2breg3eve", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--h2brj9c": { + GTLD: "xn--h2brj9c", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--h2brj9c8c": { + GTLD: "xn--h2brj9c8c", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--hxt814e": { + GTLD: "xn--hxt814e", + DelegationDate: "2014-12-02", + RemovalDate: "", + }, + "xn--i1b6b1a6a2e": { + GTLD: "xn--i1b6b1a6a2e", + DelegationDate: "2014-03-09", + RemovalDate: "", + }, + "xn--imr513n": { + GTLD: "xn--imr513n", + DelegationDate: "2015-05-30", + RemovalDate: "", + }, + "xn--io0a7i": { + GTLD: "xn--io0a7i", + DelegationDate: "2014-01-18", + RemovalDate: "", + }, + "xn--j1aef": { + GTLD: "xn--j1aef", + DelegationDate: "2015-07-28", + RemovalDate: "", + }, + "xn--j1amh": { + GTLD: "xn--j1amh", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--j6w193g": { + GTLD: "xn--j6w193g", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--jlq61u9w7b": { + GTLD: "xn--jlq61u9w7b", + DelegationDate: "2015-12-18", + RemovalDate: "", + }, + "xn--jvr189m": { + GTLD: "xn--jvr189m", + DelegationDate: "2016-02-22", + RemovalDate: "", + }, + "xn--kcrx77d1x4a": { + GTLD: "xn--kcrx77d1x4a", + DelegationDate: "2015-04-07", + RemovalDate: "", + }, + "xn--kprw13d": { + GTLD: "xn--kprw13d", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--kpry57d": { + GTLD: "xn--kpry57d", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--kpu716f": { + GTLD: "xn--kpu716f", + DelegationDate: "2015-12-15", + RemovalDate: "", + }, + "xn--kput3i": { + GTLD: "xn--kput3i", + DelegationDate: "2014-06-17", + RemovalDate: "", + }, + "xn--l1acc": { + GTLD: "xn--l1acc", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--lgbbat1ad8j": { + GTLD: "xn--lgbbat1ad8j", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--mgb9awbf": { + GTLD: "xn--mgb9awbf", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--mgba3a3ejt": { + GTLD: "xn--mgba3a3ejt", + DelegationDate: "2015-10-15", + RemovalDate: "", + }, + "xn--mgba3a4f16a": { + GTLD: "xn--mgba3a4f16a", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--mgba7c0bbn0a": { + GTLD: "xn--mgba7c0bbn0a", + DelegationDate: "2016-05-03", + RemovalDate: "", + }, + "xn--mgbaakc7dvf": { + GTLD: "xn--mgbaakc7dvf", + DelegationDate: "2017-06-10", + RemovalDate: "", + }, + "xn--mgbaam7a8h": { + GTLD: "xn--mgbaam7a8h", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--mgbab2bd": { + GTLD: "xn--mgbab2bd", + DelegationDate: "2014-02-18", + RemovalDate: "", + }, + "xn--mgbah1a3hjkrd": { + GTLD: "xn--mgbah1a3hjkrd", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--mgbai9azgqp6j": { + GTLD: "xn--mgbai9azgqp6j", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--mgbayh7gpa": { + GTLD: "xn--mgbayh7gpa", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--mgbb9fbpob": { + GTLD: "xn--mgbb9fbpob", + DelegationDate: "2015-12-23", + RemovalDate: "", + }, + "xn--mgbbh1a": { + GTLD: "xn--mgbbh1a", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--mgbbh1a71e": { + GTLD: "xn--mgbbh1a71e", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--mgbc0a9azcg": { + GTLD: "xn--mgbc0a9azcg", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--mgbca7dzdo": { + GTLD: "xn--mgbca7dzdo", + DelegationDate: "2016-04-06", + RemovalDate: "", + }, + "xn--mgberp4a5d4ar": { + GTLD: "xn--mgberp4a5d4ar", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--mgbgu82a": { + GTLD: "xn--mgbgu82a", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--mgbi4ecexp": { + GTLD: "xn--mgbi4ecexp", + DelegationDate: "2016-12-01", + RemovalDate: "", + }, + "xn--mgbpl2fh": { + GTLD: "xn--mgbpl2fh", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--mgbt3dhd": { + GTLD: "xn--mgbt3dhd", + DelegationDate: "2015-12-07", + RemovalDate: "", + }, + "xn--mgbtx2b": { + GTLD: "xn--mgbtx2b", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--mgbx4cd0ab": { + GTLD: "xn--mgbx4cd0ab", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--mix891f": { + GTLD: "xn--mix891f", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--mk1bu44c": { + GTLD: "xn--mk1bu44c", + DelegationDate: "2015-07-28", + RemovalDate: "", + }, + "xn--mxtq1m": { + GTLD: "xn--mxtq1m", + DelegationDate: "2015-03-03", + RemovalDate: "", + }, + "xn--ngbc5azd": { + GTLD: "xn--ngbc5azd", + DelegationDate: "2013-10-23", + RemovalDate: "", + }, + "xn--ngbe9e0a": { + GTLD: "xn--ngbe9e0a", + DelegationDate: "2015-12-15", + RemovalDate: "", + }, + "xn--ngbrx": { + GTLD: "xn--ngbrx", + DelegationDate: "2017-05-23", + RemovalDate: "", + }, + "xn--node": { + GTLD: "xn--node", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--nqv7f": { + GTLD: "xn--nqv7f", + DelegationDate: "2014-03-09", + RemovalDate: "", + }, + "xn--nqv7fs00ema": { + GTLD: "xn--nqv7fs00ema", + DelegationDate: "2014-03-09", + RemovalDate: "", + }, + "xn--nyqy26a": { + GTLD: "xn--nyqy26a", + DelegationDate: "2015-04-02", + RemovalDate: "", + }, + "xn--o3cw4h": { + GTLD: "xn--o3cw4h", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--ogbpf8fl": { + GTLD: "xn--ogbpf8fl", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--otu796d": { + GTLD: "xn--otu796d", + DelegationDate: "2018-01-24", + RemovalDate: "", + }, + "xn--p1acf": { + GTLD: "xn--p1acf", + DelegationDate: "2014-09-27", + RemovalDate: "", + }, + "xn--p1ai": { + GTLD: "xn--p1ai", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--pbt977c": { + GTLD: "xn--pbt977c", + DelegationDate: "2015-12-15", + RemovalDate: "", + }, + "xn--pgbs0dh": { + GTLD: "xn--pgbs0dh", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--pssy2u": { + GTLD: "xn--pssy2u", + DelegationDate: "2015-07-28", + RemovalDate: "", + }, + "xn--q9jyb4c": { + GTLD: "xn--q9jyb4c", + DelegationDate: "2013-11-23", + RemovalDate: "", + }, + "xn--qcka1pmc": { + GTLD: "xn--qcka1pmc", + DelegationDate: "2014-11-20", + RemovalDate: "", + }, + "xn--qxam": { + GTLD: "xn--qxam", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--rhqv96g": { + GTLD: "xn--rhqv96g", + DelegationDate: "2014-03-12", + RemovalDate: "", + }, + "xn--rovu88b": { + GTLD: "xn--rovu88b", + DelegationDate: "2016-02-19", + RemovalDate: "", + }, + "xn--rvc1e0am3e": { + GTLD: "xn--rvc1e0am3e", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--s9brj9c": { + GTLD: "xn--s9brj9c", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--ses554g": { + GTLD: "xn--ses554g", + DelegationDate: "2014-04-10", + RemovalDate: "", + }, + "xn--t60b56a": { + GTLD: "xn--t60b56a", + DelegationDate: "2015-07-28", + RemovalDate: "", + }, + "xn--tckwe": { + GTLD: "xn--tckwe", + DelegationDate: "2015-07-29", + RemovalDate: "", + }, + "xn--tiq49xqyj": { + GTLD: "xn--tiq49xqyj", + DelegationDate: "2016-12-01", + RemovalDate: "", + }, + "xn--unup4y": { + GTLD: "xn--unup4y", + DelegationDate: "2013-10-23", + RemovalDate: "", + }, + "xn--vermgensberater-ctb": { + GTLD: "xn--vermgensberater-ctb", + DelegationDate: "2014-09-27", + RemovalDate: "", + }, + "xn--vermgensberatung-pwb": { + GTLD: "xn--vermgensberatung-pwb", + DelegationDate: "2014-09-27", + RemovalDate: "", + }, + "xn--vhquv": { + GTLD: "xn--vhquv", + DelegationDate: "2014-08-22", + RemovalDate: "", + }, + "xn--vuq861b": { + GTLD: "xn--vuq861b", + DelegationDate: "2015-03-18", + RemovalDate: "", + }, + "xn--w4r85el8fhu5dnra": { + GTLD: "xn--w4r85el8fhu5dnra", + DelegationDate: "2016-03-05", + RemovalDate: "", + }, + "xn--w4rs40l": { + GTLD: "xn--w4rs40l", + DelegationDate: "2016-05-16", + RemovalDate: "", + }, + "xn--wgbh1c": { + GTLD: "xn--wgbh1c", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--wgbl6a": { + GTLD: "xn--wgbl6a", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--xhq521b": { + GTLD: "xn--xhq521b", + DelegationDate: "2014-08-14", + RemovalDate: "", + }, + "xn--xkc2al3hye2a": { + GTLD: "xn--xkc2al3hye2a", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--xkc2dl3a5ee0h": { + GTLD: "xn--xkc2dl3a5ee0h", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--y9a3aq": { + GTLD: "xn--y9a3aq", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--yfro4i67o": { + GTLD: "xn--yfro4i67o", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--ygbi2ammx": { + GTLD: "xn--ygbi2ammx", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "xn--zfr164b": { + GTLD: "xn--zfr164b", + DelegationDate: "2013-12-17", + RemovalDate: "", + }, + "xperia": { + GTLD: "xperia", + DelegationDate: "2015-08-05", + RemovalDate: "2018-07-20", + }, + "xxx": { + GTLD: "xxx", + DelegationDate: "2011-04-15", + RemovalDate: "", + }, + "xyz": { + GTLD: "xyz", + DelegationDate: "2014-02-19", + RemovalDate: "", + }, + "yachts": { + GTLD: "yachts", + DelegationDate: "2014-05-22", + RemovalDate: "", + }, + "yahoo": { + GTLD: "yahoo", + DelegationDate: "2016-02-13", + RemovalDate: "", + }, + "yamaxun": { + GTLD: "yamaxun", + DelegationDate: "2015-10-07", + RemovalDate: "", + }, + "yandex": { + GTLD: "yandex", + DelegationDate: "2014-07-18", + RemovalDate: "", + }, + "ye": { + GTLD: "ye", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "yodobashi": { + GTLD: "yodobashi", + DelegationDate: "2015-02-19", + RemovalDate: "", + }, + "yoga": { + GTLD: "yoga", + DelegationDate: "2014-10-15", + RemovalDate: "", + }, + "yokohama": { + GTLD: "yokohama", + DelegationDate: "2014-04-03", + RemovalDate: "", + }, + "you": { + GTLD: "you", + DelegationDate: "2016-03-25", + RemovalDate: "", + }, + "youtube": { + GTLD: "youtube", + DelegationDate: "2014-08-29", + RemovalDate: "", + }, + "yt": { + GTLD: "yt", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "yun": { + GTLD: "yun", + DelegationDate: "2016-03-30", + RemovalDate: "", + }, + "za": { + GTLD: "za", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "zappos": { + GTLD: "zappos", + DelegationDate: "2016-06-02", + RemovalDate: "", + }, + "zara": { + GTLD: "zara", + DelegationDate: "2015-10-27", + RemovalDate: "", + }, + "zero": { + GTLD: "zero", + DelegationDate: "2015-12-05", + RemovalDate: "", + }, + "zip": { + GTLD: "zip", + DelegationDate: "2014-09-15", + RemovalDate: "", + }, + "zippo": { + GTLD: "zippo", + DelegationDate: "2016-07-02", + RemovalDate: "2019-02-15", + }, + "zm": { + GTLD: "zm", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + "zone": { + GTLD: "zone", + DelegationDate: "2014-01-14", + RemovalDate: "", + }, + "zuerich": { + GTLD: "zuerich", + DelegationDate: "2014-12-25", + RemovalDate: "", + }, + "zw": { + GTLD: "zw", + DelegationDate: "1985-01-01", + RemovalDate: "", + }, + // .onion is a special case and not a general gTLD. However, it is allowed in + // some circumstances in the web PKI so the Zlint gtldMap includes it with + // a delegationDate based on the CABF ballot to allow EV issuance for .onion + // domains: https://cabforum.org/2015/02/18/ballot-144-validation-rules-dot-onion-names/ + "onion": { + GTLD: "onion", + DelegationDate: "2015-02-18", + RemovalDate: "", + }, +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/ip.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/ip.go new file mode 100644 index 00000000..153dc0fd --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/ip.go @@ -0,0 +1,115 @@ +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// contains helper functions for ip address lints + +package util + +import ( + "fmt" + "net" +) + +type subnetCategory int + +const ( + privateUse subnetCategory = iota + sharedAddressSpace + benchmarking + documentation + reserved + protocolAssignment + as112 + amt + orchidV2 + lisp + thisHostOnThisNetwork + translatableAddress6to4 + translatableAddress4to6 + dummyAddress + portControlProtocolAnycast + traversalUsingRelaysAroundNATAnycast + nat64DNS64Discovery + limitedBroadcast + discardOnly + teredo + uniqueLocal + linkLocalUnicast + ianaReservedForFutureUse + ianaReservedMulticast +) + +var reservedNetworks []*net.IPNet + +// IsIANAReserved checks IP validity as per IANA reserved IPs +// IPv4 +// https://www.iana.org/assignments/iana-ipv4-special-registry/iana-ipv4-special-registry.xhtml +// https://www.iana.org/assignments/ipv4-address-space/ipv4-address-space.xml +// IPv6 +// https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml +// https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml +func IsIANAReserved(ip net.IP) bool { + if !ip.IsGlobalUnicast() { + return true + } + + for _, network := range reservedNetworks { + if network.Contains(ip) { + return true + } + } + + return false +} + +func init() { + var networks = map[subnetCategory][]string{ + privateUse: {"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"}, + sharedAddressSpace: {"100.64.0.0/10"}, + benchmarking: {"198.18.0.0/15", "2001:2::/48"}, + documentation: {"192.0.2.0/24", "198.51.100.0/24", "203.0.113.0/24", "2001:db8::/32"}, + reserved: {"240.0.0.0/4", "0400::/6", "0800::/5", "1000::/4", "4000::/3", "6000::/3", "8000::/3", "a000::/3", "c000::/3", "e000::/4", "f000::/5", "f800::/6", "fe00::/9"}, // https://www.iana.org/assignments/ipv6-address-space/ipv6-address-space.xhtml + protocolAssignment: {"192.0.0.0/24", "2001::/23"}, // 192.0.0.0/24 contains 192.0.0.0/29 - IPv4 Service Continuity Prefix + as112: {"192.31.196.0/24", "192.175.48.0/24", "2001:4:112::/48", "2620:4f:8000::/48"}, + amt: {"192.52.193.0/24", "2001:3::/32"}, + orchidV2: {"2001:20::/28"}, + lisp: {"2001:5::/32"}, // TODO: this could expire at 2019-09. Please check https://www.iana.org/assignments/iana-ipv6-special-registry/iana-ipv6-special-registry.xhtml for updates + thisHostOnThisNetwork: {"0.0.0.0/8"}, + translatableAddress4to6: {"2002::/16"}, + translatableAddress6to4: {"64:ff9b::/96", "64:ff9b:1::/48"}, + dummyAddress: {"192.0.0.8/32"}, + portControlProtocolAnycast: {"192.0.0.9/32", "2001:1::1/128"}, + traversalUsingRelaysAroundNATAnycast: {"192.0.0.10/32", "2001:1::2/128"}, + nat64DNS64Discovery: {"192.0.0.170/32", "192.0.0.171/32"}, + limitedBroadcast: {"255.255.255.255/32"}, + discardOnly: {"100::/64"}, + teredo: {"2001::/32"}, + uniqueLocal: {"fc00::/7"}, + linkLocalUnicast: {"fe80::/10", "169.254.0.0/16"}, // this range is covered by ip.IsLinkLocalUnicast(), which is in turn called by net.IP.IsGlobalUnicast(ip) + ianaReservedForFutureUse: {"255.0.0.0/8", "254.0.0.0/8", "253.0.0.0/8", "252.0.0.0/8", "251.0.0.0/8", "250.0.0.0/8", "249.0.0.0/8", "248.0.0.0/8", "247.0.0.0/8", "246.0.0.0/8", "245.0.0.0/8", "244.0.0.0/8", "243.0.0.0/8", "242.0.0.0/8", "241.0.0.0/8", "240.0.0.0/8"}, + ianaReservedMulticast: {"239.0.0.0/8", "238.0.0.0/8", "237.0.0.0/8", "236.0.0.0/8", "235.0.0.0/8", "234.0.0.0/8", "233.0.0.0/8", "232.0.0.0/8", "231.0.0.0/8", "230.0.0.0/8", "229.0.0.0/8", "228.0.0.0/8", "227.0.0.0/8", "226.0.0.0/8", "225.0.0.0/8", "224.0.0.0/8", "ff00::/8"}, // this range is covered by ip.IsMulticast() call, which is in turn called by net.IP.IsGlobalUnicast(ip) + } + + for _, netList := range networks { + for _, network := range netList { + var ipNet *net.IPNet + var err error + + if _, ipNet, err = net.ParseCIDR(network); err != nil { + panic(fmt.Sprintf("unexpected internal network value provided: %s", err.Error())) + } + reservedNetworks = append(reservedNetworks, ipNet) + } + } +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/ku.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/ku.go new file mode 100644 index 00000000..31a828fb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/ku.go @@ -0,0 +1,18 @@ +package util + +import "github.com/zmap/zcrypto/x509" + +var ( + // KeyUsageToString maps an x509.KeyUsage bitmask to its name. + KeyUsageToString = map[x509.KeyUsage]string{ + x509.KeyUsageDigitalSignature: "KeyUsageDigitalSignature", + x509.KeyUsageContentCommitment: "KeyUsageContentCommitment", + x509.KeyUsageKeyEncipherment: "KeyUsageKeyEncipherment", + x509.KeyUsageDataEncipherment: "KeyUsageDataEncipherment", + x509.KeyUsageKeyAgreement: "KeyUsageKeyAgreement", + x509.KeyUsageCertSign: "KeyUsageCertSign", + x509.KeyUsageCRLSign: "KeyUsageCRLSign", + x509.KeyUsageEncipherOnly: "KeyUsageEncipherOnly", + x509.KeyUsageDecipherOnly: "KeyUsageDecipherOnly", + } +) diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/names.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/names.go new file mode 100644 index 00000000..a66f0214 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/names.go @@ -0,0 +1,64 @@ +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package util + +import ( + "encoding/asn1" + + "github.com/zmap/zcrypto/x509/pkix" +) + +type empty struct{} + +var nameAttributePrefix = asn1.ObjectIdentifier{2, 5, 4} +var nameAttributeLeaves = map[int]empty{ + // Name attributes defined in RFC 5280 appendix A + 3: {}, // id-at-commonName AttributeType ::= { id-at 3 } + 4: {}, // id-at-surname AttributeType ::= { id-at 4 } + 5: {}, // id-at-serialNumber AttributeType ::= { id-at 5 } + 6: {}, // id-at-countryName AttributeType ::= { id-at 6 } + 7: {}, // id-at-localityName AttributeType ::= { id-at 7 } + 8: {}, // id-at-stateOrProvinceName AttributeType ::= { id-at 8 } + 10: {}, // id-at-organizationName AttributeType ::= { id-at 10 } + 11: {}, // id-at-organizationalUnitName AttributeType ::= { id-at 11 } + 12: {}, // id-at-title AttributeType ::= { id-at 12 } + 41: {}, // id-at-name AttributeType ::= { id-at 41 } + 42: {}, // id-at-givenName AttributeType ::= { id-at 42 } + 43: {}, // id-at-initials AttributeType ::= { id-at 43 } + 44: {}, // id-at-generationQualifier AttributeType ::= { id-at 44 } + 46: {}, // id-at-dnQualifier AttributeType ::= { id-at 46 } + + // Name attributes not present in RFC 5280, but appeared in Go's crypto/x509/pkix.go + 9: {}, // id-at-streetName AttributeType ::= { id-at 9 } + 17: {}, // id-at-postalCodeName AttributeType ::= { id-at 17 } +} + +// IsNameAttribute returns true if the given ObjectIdentifier corresponds with +// the type of any name attribute for PKIX. +func IsNameAttribute(oid asn1.ObjectIdentifier) bool { + if len(oid) != 4 { + return false + } + if !nameAttributePrefix.Equal(oid[0:3]) { + return false + } + _, ok := nameAttributeLeaves[oid[3]] + return ok +} + +func NotAllNameFieldsAreEmpty(name *pkix.Name) bool { + //Return true if at least one field is non-empty + return len(name.Names) >= 1 +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/oid.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/oid.go new file mode 100644 index 00000000..e011f485 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/oid.go @@ -0,0 +1,184 @@ +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package util + +import ( + "encoding/asn1" + "errors" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zcrypto/x509/pkix" +) + +var ( + //extension OIDs + AiaOID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 1} // Authority Information Access + AuthkeyOID = asn1.ObjectIdentifier{2, 5, 29, 35} // Authority Key Identifier + BasicConstOID = asn1.ObjectIdentifier{2, 5, 29, 19} // Basic Constraints + CertPolicyOID = asn1.ObjectIdentifier{2, 5, 29, 32} // Certificate Policies + CrlDistOID = asn1.ObjectIdentifier{2, 5, 29, 31} // CRL Distribution Points + CtPoisonOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3} // CT Poison + EkuSynOid = asn1.ObjectIdentifier{2, 5, 29, 37} // Extended Key Usage Syntax + FreshCRLOID = asn1.ObjectIdentifier{2, 5, 29, 46} // Freshest CRL + InhibitAnyPolicyOID = asn1.ObjectIdentifier{2, 5, 29, 54} // Inhibit Any Policy + IssuerAlternateNameOID = asn1.ObjectIdentifier{2, 5, 29, 18} // Issuer Alt Name + KeyUsageOID = asn1.ObjectIdentifier{2, 5, 29, 15} // Key Usage + LogoTypeOID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 12} // Logo Type Ext + NameConstOID = asn1.ObjectIdentifier{2, 5, 29, 30} // Name Constraints + OscpNoCheckOID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1, 5} // OSCP No Check + PolicyConstOID = asn1.ObjectIdentifier{2, 5, 29, 36} // Policy Constraints + PolicyMapOID = asn1.ObjectIdentifier{2, 5, 29, 33} // Policy Mappings + PrivKeyUsageOID = asn1.ObjectIdentifier{2, 5, 29, 16} // Private Key Usage Period + QcStateOid = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 3} // QC Statements + TimestampOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2} // Signed Certificate Timestamp List + SmimeOID = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 9, 15} // Smime Capabilities + SubjectAlternateNameOID = asn1.ObjectIdentifier{2, 5, 29, 17} // Subject Alt Name + SubjectDirAttrOID = asn1.ObjectIdentifier{2, 5, 29, 9} // Subject Directory Attributes + SubjectInfoAccessOID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 1, 11} // Subject Info Access Syntax + SubjectKeyIdentityOID = asn1.ObjectIdentifier{2, 5, 29, 14} // Subject Key Identifier + // CA/B reserved policies + BRDomainValidatedOID = asn1.ObjectIdentifier{2, 23, 140, 1, 2, 1} // CA/B BR Domain-Validated + BROrganizationValidatedOID = asn1.ObjectIdentifier{2, 23, 140, 1, 2, 2} // CA/B BR Organization-Validated + BRIndividualValidatedOID = asn1.ObjectIdentifier{2, 23, 140, 1, 2, 3} // CA/B BR Individual-Validated + BRTorServiceDescriptor = asn1.ObjectIdentifier{2, 23, 140, 1, 31} // CA/B BR Tor Service Descriptor + //X.500 attribute types + CommonNameOID = asn1.ObjectIdentifier{2, 5, 4, 3} + SurnameOID = asn1.ObjectIdentifier{2, 5, 4, 4} + SerialOID = asn1.ObjectIdentifier{2, 5, 4, 5} + CountryNameOID = asn1.ObjectIdentifier{2, 5, 4, 6} + LocalityNameOID = asn1.ObjectIdentifier{2, 5, 4, 7} + StateOrProvinceNameOID = asn1.ObjectIdentifier{2, 5, 4, 8} + StreetAddressOID = asn1.ObjectIdentifier{2, 5, 4, 9} + OrganizationNameOID = asn1.ObjectIdentifier{2, 5, 4, 10} + OrganizationalUnitNameOID = asn1.ObjectIdentifier{2, 5, 4, 11} + BusinessOID = asn1.ObjectIdentifier{2, 5, 4, 15} + PostalCodeOID = asn1.ObjectIdentifier{2, 5, 4, 17} + GivenNameOID = asn1.ObjectIdentifier{2, 5, 4, 42} + // Hash algorithms - see https://golang.org/src/crypto/x509/x509.go + SHA256OID = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 1} + SHA384OID = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 2} + SHA512OID = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 2, 3} + // other OIDs + OidRSAEncryption = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1} + OidRSASSAPSS = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 10} + OidMD2WithRSAEncryption = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2} + OidMD5WithRSAEncryption = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} + OidSHA1WithRSAEncryption = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} + OidSHA224WithRSAEncryption = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 14} + OidSHA256WithRSAEncryption = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} + OidSHA384WithRSAEncryption = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} + OidSHA512WithRSAEncryption = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} + AnyPolicyOID = asn1.ObjectIdentifier{2, 5, 29, 32, 0} + UserNoticeOID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 2} + CpsOID = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 1} + IdEtsiQcsQcCompliance = asn1.ObjectIdentifier{0, 4, 0, 1862, 1, 1} + IdEtsiQcsQcLimitValue = asn1.ObjectIdentifier{0, 4, 0, 1862, 1, 2} + IdEtsiQcsQcRetentionPeriod = asn1.ObjectIdentifier{0, 4, 0, 1862, 1, 3} + IdEtsiQcsQcSSCD = asn1.ObjectIdentifier{0, 4, 0, 1862, 1, 4} + IdEtsiQcsQcEuPDS = asn1.ObjectIdentifier{0, 4, 0, 1862, 1, 5} + IdEtsiQcsQcType = asn1.ObjectIdentifier{0, 4, 0, 1862, 1, 6} + IdEtsiQcsQctEsign = asn1.ObjectIdentifier{0, 4, 0, 1862, 1, 6, 1} + IdEtsiQcsQctEseal = asn1.ObjectIdentifier{0, 4, 0, 1862, 1, 6, 2} + IdEtsiQcsQctWeb = asn1.ObjectIdentifier{0, 4, 0, 1862, 1, 6, 3} +) + +const ( + // Tags + DNSNameTag = 2 +) + +// IsExtInCert is equivalent to GetExtFromCert() != nil. +func IsExtInCert(cert *x509.Certificate, oid asn1.ObjectIdentifier) bool { + if cert != nil && GetExtFromCert(cert, oid) != nil { + return true + } + return false +} + +// GetExtFromCert returns the extension with the matching OID, if present. If +// the extension if not present, it returns nil. +func GetExtFromCert(cert *x509.Certificate, oid asn1.ObjectIdentifier) *pkix.Extension { + // Since this function is called by many Lint CheckApplies functions we use + // the x509.Certificate.ExtensionsMap field added by zcrypto to check for + // the extension in O(1) instead of looping through the + // `x509.Certificate.Extensions` in O(n). + if ext, found := cert.ExtensionsMap[oid.String()]; found { + return &ext + } + return nil +} + +// Helper function that checks if an []asn1.ObjectIdentifier slice contains an asn1.ObjectIdentifier +func SliceContainsOID(list []asn1.ObjectIdentifier, oid asn1.ObjectIdentifier) bool { + for _, v := range list { + if oid.Equal(v) { + return true + } + } + return false +} + +// Helper function that checks for a name type in a pkix.Name +func TypeInName(name *pkix.Name, oid asn1.ObjectIdentifier) bool { + for _, v := range name.Names { + if oid.Equal(v.Type) { + return true + } + } + return false +} + +//helper function to parse policyMapping extensions, returns slices of CertPolicyIds separated by domain +func GetMappedPolicies(polMap *pkix.Extension) (out [][2]asn1.ObjectIdentifier, err error) { + if polMap == nil { + return nil, errors.New("policyMap: null pointer") + } + var outSeq, inSeq asn1.RawValue + + empty, err := asn1.Unmarshal(polMap.Value, &outSeq) //strip outer sequence tag/length should be nothing extra + if err != nil || len(empty) != 0 || outSeq.Class != 0 || outSeq.Tag != 16 || outSeq.IsCompound == false { + return nil, errors.New("policyMap: Could not unmarshal outer sequence.") + } + + for done := false; !done; { //loop through SEQUENCE OF + outSeq.Bytes, err = asn1.Unmarshal(outSeq.Bytes, &inSeq) //extract next inner SEQUENCE (OID pair) + if err != nil || inSeq.Class != 0 || inSeq.Tag != 16 || inSeq.IsCompound == false { + err = errors.New("policyMap: Could not unmarshal inner sequence.") + return + } + if len(outSeq.Bytes) == 0 { //nothing remaining to parse, stop looping after + done = true + } + + var oidIssue, oidSubject asn1.ObjectIdentifier + var restIn asn1.RawContent + restIn, err = asn1.Unmarshal(inSeq.Bytes, &oidIssue) //extract first inner CertPolicyId (issuer domain) + if err != nil || len(restIn) == 0 { + err = errors.New("policyMap: Could not unmarshal inner sequence.") + return + } + + empty, err = asn1.Unmarshal(restIn, &oidSubject) //extract second inner CertPolicyId (subject domain) + if err != nil || len(empty) != 0 { + err = errors.New("policyMap: Could not unmarshal inner sequence.") + return + } + + //append found OIDs + out = append(out, [2]asn1.ObjectIdentifier{oidIssue, oidSubject}) + } + + return +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/primes.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/primes.go new file mode 100644 index 00000000..2483097d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/primes.go @@ -0,0 +1,57 @@ +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package util + +import "math/big" + +var bigIntPrimes = []*big.Int{ + big.NewInt(2), big.NewInt(3), big.NewInt(5), big.NewInt(7), big.NewInt(11), big.NewInt(13), + big.NewInt(17), big.NewInt(19), big.NewInt(23), big.NewInt(29), big.NewInt(31), big.NewInt(37), + big.NewInt(41), big.NewInt(43), big.NewInt(47), big.NewInt(53), big.NewInt(59), big.NewInt(61), + big.NewInt(67), big.NewInt(71), big.NewInt(73), big.NewInt(79), big.NewInt(83), big.NewInt(89), + big.NewInt(97), big.NewInt(101), big.NewInt(103), big.NewInt(107), big.NewInt(109), big.NewInt(113), + big.NewInt(127), big.NewInt(131), big.NewInt(137), big.NewInt(139), big.NewInt(149), big.NewInt(151), + big.NewInt(157), big.NewInt(163), big.NewInt(167), big.NewInt(173), big.NewInt(179), big.NewInt(181), + big.NewInt(191), big.NewInt(193), big.NewInt(197), big.NewInt(199), big.NewInt(211), big.NewInt(223), + big.NewInt(227), big.NewInt(229), big.NewInt(233), big.NewInt(239), big.NewInt(241), big.NewInt(251), + big.NewInt(257), big.NewInt(263), big.NewInt(269), big.NewInt(271), big.NewInt(277), big.NewInt(281), + big.NewInt(283), big.NewInt(293), big.NewInt(307), big.NewInt(311), big.NewInt(353), big.NewInt(359), + big.NewInt(367), big.NewInt(373), big.NewInt(379), big.NewInt(383), big.NewInt(313), big.NewInt(317), + big.NewInt(331), big.NewInt(337), big.NewInt(347), big.NewInt(349), big.NewInt(389), big.NewInt(397), + big.NewInt(401), big.NewInt(409), big.NewInt(419), big.NewInt(421), big.NewInt(431), big.NewInt(433), + big.NewInt(439), big.NewInt(443), big.NewInt(449), big.NewInt(457), big.NewInt(461), big.NewInt(463), + big.NewInt(467), big.NewInt(479), big.NewInt(487), big.NewInt(491), big.NewInt(499), big.NewInt(503), + big.NewInt(509), big.NewInt(521), big.NewInt(523), big.NewInt(541), big.NewInt(547), big.NewInt(557), + big.NewInt(563), big.NewInt(569), big.NewInt(571), big.NewInt(577), big.NewInt(587), big.NewInt(593), + big.NewInt(599), big.NewInt(601), big.NewInt(607), big.NewInt(613), big.NewInt(617), big.NewInt(619), + big.NewInt(631), big.NewInt(641), big.NewInt(643), big.NewInt(647), big.NewInt(653), big.NewInt(659), + big.NewInt(661), big.NewInt(673), big.NewInt(677), big.NewInt(683), big.NewInt(691), big.NewInt(701), + big.NewInt(709), big.NewInt(719), big.NewInt(727), big.NewInt(733), big.NewInt(739), big.NewInt(743), + big.NewInt(751), +} + +var zero = big.NewInt(0) + +func PrimeNoSmallerThan752(dividend *big.Int) bool { + quotient := big.NewInt(0) + mod := big.NewInt(0) + for _, divisor := range bigIntPrimes { + quotient.DivMod(dividend, divisor, mod) + if mod.Cmp(zero) == 0 { + return false + } + } + return true +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/qc_stmt.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/qc_stmt.go new file mode 100644 index 00000000..98ce1fd7 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/qc_stmt.go @@ -0,0 +1,285 @@ +/* + * ZLint Copyright 2017 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package util + +import ( + "bytes" + "encoding/asn1" + "fmt" + "reflect" +) + +func etsiOidToDescString(oid asn1.ObjectIdentifier) string { + switch { + case oid.Equal(IdEtsiQcsQcCompliance): + { + return "IdEtsiQcsQcCompliance" + } + case oid.Equal(IdEtsiQcsQcLimitValue): + { + return "IdEtsiQcsQcLimitValue" + } + case oid.Equal(IdEtsiQcsQcRetentionPeriod): + { + return "IdEtsiQcsQcRetentionPeriod" + } + case oid.Equal(IdEtsiQcsQcSSCD): + { + return "IdEtsiQcsQcSSCSD" + } + case oid.Equal(IdEtsiQcsQcEuPDS): + { + return "IdEtsiQcsQcEuPDS" + } + case oid.Equal(IdEtsiQcsQcType): + { + return "IdEtsiQcsQcType" + } + default: + { + panic("unresolved ETSI QC Statement OID") + } + } +} + +type anyContent struct { + Raw asn1.RawContent +} + +type qcStatementWithInfoField struct { + Oid asn1.ObjectIdentifier + Any asn1.RawValue +} +type qcStatementWithoutInfoField struct { + Oid asn1.ObjectIdentifier +} + +type etsiBase struct { + errorInfo string + isPresent bool +} + +func (this etsiBase) GetErrorInfo() string { + return this.errorInfo +} + +func (this etsiBase) IsPresent() bool { + return this.isPresent +} + +type EtsiQcStmtIf interface { + GetErrorInfo() string + IsPresent() bool +} + +type Etsi421QualEuCert struct { + etsiBase +} + +type Etsi423QcType struct { + etsiBase + TypeOids []asn1.ObjectIdentifier +} + +type EtsiQcSscd struct { + etsiBase +} + +type EtsiMonetaryValueAlph struct { + iso4217CurrencyCodeAlph string `asn1:"printable"` + amount int + exponent int +} +type EtsiMonetaryValueNum struct { + iso4217CurrencyCodeNum int + amount int + exponent int +} + +type EtsiQcLimitValue struct { + etsiBase + Amount int + Exponent int + IsNum bool + CurrencyAlph string + CurrencyNum int +} + +type EtsiQcRetentionPeriod struct { + etsiBase + Period int +} +type PdsLocation struct { + Url string `asn1:"ia5"` + Language string `asn1:"printable"` +} +type EtsiQcPds struct { + etsiBase + PdsLocations []PdsLocation +} + +func AppendToStringSemicolonDelim(this *string, s string) { + if len(*this) > 0 && len(s) > 0 { + (*this) += "; " + } + (*this) += s +} + +func checkAsn1Reencoding(i interface{}, originalEncoding []byte, appendIfComparisonFails string) string { + result := "" + reencoded, marshErr := asn1.Marshal(i) + if marshErr != nil { + AppendToStringSemicolonDelim(&result, fmt.Sprintf("error reencoding ASN1 value of statementInfo field: %s", + marshErr)) + } + if !bytes.Equal(reencoded, originalEncoding) { + AppendToStringSemicolonDelim(&result, appendIfComparisonFails) + } + return result +} + +func IsAnyEtsiQcStatementPresent(extVal []byte) bool { + oidList := make([]*asn1.ObjectIdentifier, 6) + oidList[0] = &IdEtsiQcsQcCompliance + oidList[1] = &IdEtsiQcsQcLimitValue + oidList[2] = &IdEtsiQcsQcRetentionPeriod + oidList[3] = &IdEtsiQcsQcSSCD + oidList[4] = &IdEtsiQcsQcEuPDS + oidList[5] = &IdEtsiQcsQcType + for _, oid := range oidList { + r := ParseQcStatem(extVal, *oid) + if r.IsPresent() { + return true + } + } + return false +} + +func ParseQcStatem(extVal []byte, sought asn1.ObjectIdentifier) EtsiQcStmtIf { + sl := make([]anyContent, 0) + rest, err := asn1.Unmarshal(extVal, &sl) + if err != nil { + return etsiBase{errorInfo: "error parsing outer SEQ", isPresent: true} + } + if len(rest) != 0 { + return etsiBase{errorInfo: "rest len of outer seq != 0", isPresent: true} + } + + for _, raw := range sl { + parseErrorString := "format error in at least one QC statement within the QC statements extension." + + " this message may appear multiple times for the same error cause." + var statem qcStatementWithInfoField + rest, err = asn1.Unmarshal(raw.Raw, &statem) + if err != nil { + var statemWithoutInfo qcStatementWithoutInfoField + + rest, err = asn1.Unmarshal(raw.Raw, &statemWithoutInfo) + if err != nil || len(rest) != 0 { + return etsiBase{errorInfo: parseErrorString, isPresent: false} + } + copy(statem.Oid, statemWithoutInfo.Oid) + if len(statem.Any.FullBytes) != 0 { + return etsiBase{errorInfo: "internal error, default optional content len is not zero"} + } + } else if 0 != len(rest) { + return etsiBase{errorInfo: parseErrorString, isPresent: false} + } + + if !statem.Oid.Equal(sought) { + continue + } + if statem.Oid.Equal(IdEtsiQcsQcCompliance) { + etsiObj := Etsi421QualEuCert{etsiBase: etsiBase{isPresent: true}} + statemWithoutInfo := qcStatementWithoutInfoField{Oid: statem.Oid} + AppendToStringSemicolonDelim(&etsiObj.errorInfo, checkAsn1Reencoding(reflect.ValueOf(statemWithoutInfo).Interface(), raw.Raw, + "invalid format of ETSI Complicance statement")) + return etsiObj + } else if statem.Oid.Equal(IdEtsiQcsQcLimitValue) { + etsiObj := EtsiQcLimitValue{etsiBase: etsiBase{isPresent: true}} + numErr := false + alphErr := false + var numeric EtsiMonetaryValueNum + var alphabetic EtsiMonetaryValueAlph + restNum, errNum := asn1.Unmarshal(statem.Any.FullBytes, &numeric) + if len(restNum) != 0 || errNum != nil { + numErr = true + } else { + etsiObj.IsNum = true + etsiObj.Amount = numeric.amount + etsiObj.Exponent = numeric.exponent + etsiObj.CurrencyNum = numeric.iso4217CurrencyCodeNum + + } + if numErr { + restAlph, errAlph := asn1.Unmarshal(statem.Any.FullBytes, &alphabetic) + if len(restAlph) != 0 || errAlph != nil { + alphErr = true + } else { + etsiObj.IsNum = false + etsiObj.Amount = alphabetic.amount + etsiObj.Exponent = alphabetic.exponent + etsiObj.CurrencyAlph = alphabetic.iso4217CurrencyCodeAlph + AppendToStringSemicolonDelim(&etsiObj.errorInfo, + checkAsn1Reencoding(reflect.ValueOf(alphabetic).Interface(), + statem.Any.FullBytes, "error with ASN.1 encoding, possibly a wrong ASN.1 string type was used")) + } + } + if numErr && alphErr { + etsiObj.errorInfo = "error parsing the ETSI Qc Statement statementInfo field" + } + return etsiObj + + } else if statem.Oid.Equal(IdEtsiQcsQcRetentionPeriod) { + etsiObj := EtsiQcRetentionPeriod{etsiBase: etsiBase{isPresent: true}} + rest, err := asn1.Unmarshal(statem.Any.FullBytes, &etsiObj.Period) + + if len(rest) != 0 || err != nil { + etsiObj.errorInfo = "error parsing the statementInfo field" + } + return etsiObj + } else if statem.Oid.Equal(IdEtsiQcsQcSSCD) { + etsiObj := EtsiQcSscd{etsiBase: etsiBase{isPresent: true}} + statemWithoutInfo := qcStatementWithoutInfoField{Oid: statem.Oid} + AppendToStringSemicolonDelim(&etsiObj.errorInfo, checkAsn1Reencoding(reflect.ValueOf(statemWithoutInfo).Interface(), raw.Raw, + "invalid format of ETSI SCSD statement")) + return etsiObj + } else if statem.Oid.Equal(IdEtsiQcsQcEuPDS) { + etsiObj := EtsiQcPds{etsiBase: etsiBase{isPresent: true}} + rest, err := asn1.Unmarshal(statem.Any.FullBytes, &etsiObj.PdsLocations) + if len(rest) != 0 || err != nil { + etsiObj.errorInfo = "error parsing the statementInfo field" + } else { + AppendToStringSemicolonDelim(&etsiObj.errorInfo, + checkAsn1Reencoding(reflect.ValueOf(etsiObj.PdsLocations).Interface(), statem.Any.FullBytes, + "error with ASN.1 encoding, possibly a wrong ASN.1 string type was used")) + } + return etsiObj + } else if statem.Oid.Equal(IdEtsiQcsQcType) { + var qcType Etsi423QcType + qcType.isPresent = true + rest, err := asn1.Unmarshal(statem.Any.FullBytes, &qcType.TypeOids) + if len(rest) != 0 || err != nil { + return etsiBase{errorInfo: "error parsing IdEtsiQcsQcType extension statementInfo field", isPresent: true} + } + return qcType + } else { + return etsiBase{errorInfo: "", isPresent: true} + } + + } + + return etsiBase{errorInfo: "", isPresent: false} + +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/rdn.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/rdn.go new file mode 100644 index 00000000..9bff383c --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/rdn.go @@ -0,0 +1,26 @@ +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package util + +import "encoding/asn1" + +type AttributeTypeAndRawValue struct { + Type asn1.ObjectIdentifier + Value asn1.RawValue +} + +type AttributeTypeAndRawValueSET []AttributeTypeAndRawValue + +type RawRDNSequence []AttributeTypeAndRawValueSET diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/time.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/time.go new file mode 100644 index 00000000..f5e52596 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/util/time.go @@ -0,0 +1,83 @@ +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +package util + +import ( + "encoding/asn1" + "time" + + "github.com/zmap/zcrypto/x509" +) + +var ( + ZeroDate = time.Date(0000, time.January, 1, 0, 0, 0, 0, time.UTC) + RFC1035Date = time.Date(1987, time.January, 1, 0, 0, 0, 0, time.UTC) + RFC2459Date = time.Date(1999, time.January, 1, 0, 0, 0, 0, time.UTC) + RFC3280Date = time.Date(2002, time.April, 1, 0, 0, 0, 0, time.UTC) + RFC3490Date = time.Date(2003, time.March, 1, 0, 0, 0, 0, time.UTC) + RFC8399Date = time.Date(2018, time.May, 1, 0, 0, 0, 0, time.UTC) + RFC4325Date = time.Date(2005, time.December, 1, 0, 0, 0, 0, time.UTC) + RFC4630Date = time.Date(2006, time.August, 1, 0, 0, 0, 0, time.UTC) + RFC5280Date = time.Date(2008, time.May, 1, 0, 0, 0, 0, time.UTC) + RFC6818Date = time.Date(2013, time.January, 1, 0, 0, 0, 0, time.UTC) + CABEffectiveDate = time.Date(2012, time.July, 1, 0, 0, 0, 0, time.UTC) + CABReservedIPDate = time.Date(2016, time.October, 1, 0, 0, 0, 0, time.UTC) + CABGivenNameDate = time.Date(2016, time.September, 7, 0, 0, 0, 0, time.UTC) + CABSerialNumberEntropyDate = time.Date(2016, time.September, 30, 0, 0, 0, 0, time.UTC) + CABV102Date = time.Date(2012, time.June, 8, 0, 0, 0, 0, time.UTC) + CABV113Date = time.Date(2013, time.February, 21, 0, 0, 0, 0, time.UTC) + CABV114Date = time.Date(2013, time.May, 3, 0, 0, 0, 0, time.UTC) + CABV116Date = time.Date(2013, time.July, 29, 0, 0, 0, 0, time.UTC) + CABV130Date = time.Date(2015, time.April, 16, 0, 0, 0, 0, time.UTC) + CABV131Date = time.Date(2015, time.September, 28, 0, 0, 0, 0, time.UTC) + NO_SHA1 = time.Date(2016, time.January, 1, 0, 0, 0, 0, time.UTC) + NoRSA1024RootDate = time.Date(2011, time.January, 1, 0, 0, 0, 0, time.UTC) + NoRSA1024Date = time.Date(2014, time.January, 1, 0, 0, 0, 0, time.UTC) + GeneralizedDate = time.Date(2050, time.January, 1, 0, 0, 0, 0, time.UTC) + NoReservedIP = time.Date(2015, time.November, 1, 0, 0, 0, 0, time.UTC) + SubCert39Month = time.Date(2016, time.July, 2, 0, 0, 0, 0, time.UTC) + SubCert825Days = time.Date(2018, time.March, 2, 0, 0, 0, 0, time.UTC) + CABV148Date = time.Date(2017, time.June, 8, 0, 0, 0, 0, time.UTC) + EtsiEn319_412_5_V2_2_1_Date = time.Date(2017, time.November, 1, 0, 0, 0, 0, time.UTC) + OnionOnlyEVDate = time.Date(2015, time.May, 1, 0, 0, 0, 0, time.UTC) + CABV201Date = time.Date(2017, time.July, 28, 0, 0, 0, 0, time.UTC) + AppleCTPolicyDate = time.Date(2018, time.October, 15, 0, 0, 0, 0, time.UTC) +) + +func FindTimeType(firstDate, secondDate asn1.RawValue) (int, int) { + return firstDate.Tag, secondDate.Tag +} + +func GetTimes(cert *x509.Certificate) (asn1.RawValue, asn1.RawValue) { + var outSeq, firstDate, secondDate asn1.RawValue + // Unmarshal into the sequence + rest, err := asn1.Unmarshal(cert.RawTBSCertificate, &outSeq) + // Start unmarshalling the bytes + rest, err = asn1.Unmarshal(outSeq.Bytes, &outSeq) + // This is here to account for if version is not included + if outSeq.Tag == 0 { + rest, err = asn1.Unmarshal(rest, &outSeq) + } + rest, err = asn1.Unmarshal(rest, &outSeq) + rest, err = asn1.Unmarshal(rest, &outSeq) + rest, err = asn1.Unmarshal(rest, &outSeq) + // Finally at the validity date, load them into a different RawValue + rest, err = asn1.Unmarshal(outSeq.Bytes, &firstDate) + _, err = asn1.Unmarshal(rest, &secondDate) + if err != nil { + return asn1.RawValue{}, asn1.RawValue{} + } + return firstDate, secondDate +} diff --git a/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/zlint.go b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/zlint.go new file mode 100644 index 00000000..d0162ddb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/github.com/zmap/zlint/zlint.go @@ -0,0 +1,87 @@ +/* + * ZLint Copyright 2018 Regents of the University of Michigan + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + * implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +// Used to check parsed info from certificate for compliance + +package zlint + +import ( + "encoding/json" + "io" + "time" + + "github.com/zmap/zcrypto/x509" + "github.com/zmap/zlint/lints" +) + +const Version int64 = 3 + +// ResultSet contains the output of running all lints against a single certificate. +type ResultSet struct { + Version int64 `json:"version"` + Timestamp int64 `json:"timestamp"` + Results map[string]*lints.LintResult `json:"lints"` + NoticesPresent bool `json:"notices_present"` + WarningsPresent bool `json:"warnings_present"` + ErrorsPresent bool `json:"errors_present"` + FatalsPresent bool `json:"fatals_present"` +} + +func (z *ResultSet) execute(cert *x509.Certificate) { + z.Results = make(map[string]*lints.LintResult, len(lints.Lints)) + for name, l := range lints.Lints { + res := l.Execute(cert) + z.Results[name] = res + z.updateErrorStatePresent(res) + } +} + +func (z *ResultSet) updateErrorStatePresent(result *lints.LintResult) { + switch result.Status { + case lints.Notice: + z.NoticesPresent = true + case lints.Warn: + z.WarningsPresent = true + case lints.Error: + z.ErrorsPresent = true + case lints.Fatal: + z.FatalsPresent = true + } +} + +// EncodeLintDescriptionsToJSON outputs a description of each lint as JSON +// object, one object per line. +func EncodeLintDescriptionsToJSON(w io.Writer) { + enc := json.NewEncoder(w) + enc.SetEscapeHTML(false) + for _, lint := range lints.Lints { + enc.Encode(lint) + } +} + +// LintCertificate runs all registered lints on c, producing a ZLint. +func LintCertificate(c *x509.Certificate) *ResultSet { + // Instead of panicing on nil certificate, just returns nil and let the client + // panic when accessing ZLint, if they're into panicing. + if c == nil { + return nil + } + + // Run all tests + res := new(ResultSet) + res.execute(c) + res.Version = Version + res.Timestamp = time.Now().Unix() + return res +} diff --git a/vendor/github.com/elastic/beats/vendor/go.opencensus.io/appveyor.yml b/vendor/github.com/elastic/beats/vendor/go.opencensus.io/appveyor.yml deleted file mode 100644 index 12bd7c4c..00000000 --- a/vendor/github.com/elastic/beats/vendor/go.opencensus.io/appveyor.yml +++ /dev/null @@ -1,25 +0,0 @@ -version: "{build}" - -platform: x64 - -clone_folder: c:\gopath\src\go.opencensus.io - -environment: - GOPATH: 'c:\gopath' - GOVERSION: '1.11' - GO111MODULE: 'on' - CGO_ENABLED: '0' # See: https://github.com/appveyor/ci/issues/2613 - -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - choco upgrade golang --version 1.11.5 # Temporary fix because of a go.sum bug in 1.11 - - go version - - go env - -build: false -deploy: false - -test_script: - - cd %APPVEYOR_BUILD_FOLDER% - - go build -v .\... - - go test -v .\... # No -race because cgo is disabled diff --git a/vendor/github.com/elastic/beats/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go b/vendor/github.com/elastic/beats/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go new file mode 100644 index 00000000..65ab1e99 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go @@ -0,0 +1,187 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tracecontext contains HTTP propagator for TraceContext standard. +// See https://github.com/w3c/distributed-tracing for more information. +package tracecontext // import "go.opencensus.io/plugin/ochttp/propagation/tracecontext" + +import ( + "encoding/hex" + "fmt" + "net/http" + "net/textproto" + "regexp" + "strings" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" + "go.opencensus.io/trace/tracestate" +) + +const ( + supportedVersion = 0 + maxVersion = 254 + maxTracestateLen = 512 + traceparentHeader = "traceparent" + tracestateHeader = "tracestate" + trimOWSRegexFmt = `^[\x09\x20]*(.*[^\x20\x09])[\x09\x20]*$` +) + +var trimOWSRegExp = regexp.MustCompile(trimOWSRegexFmt) + +var _ propagation.HTTPFormat = (*HTTPFormat)(nil) + +// HTTPFormat implements the TraceContext trace propagation format. +type HTTPFormat struct{} + +// SpanContextFromRequest extracts a span context from incoming requests. +func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + h, ok := getRequestHeader(req, traceparentHeader, false) + if !ok { + return trace.SpanContext{}, false + } + sections := strings.Split(h, "-") + if len(sections) < 4 { + return trace.SpanContext{}, false + } + + if len(sections[0]) != 2 { + return trace.SpanContext{}, false + } + ver, err := hex.DecodeString(sections[0]) + if err != nil { + return trace.SpanContext{}, false + } + version := int(ver[0]) + if version > maxVersion { + return trace.SpanContext{}, false + } + + if version == 0 && len(sections) != 4 { + return trace.SpanContext{}, false + } + + if len(sections[1]) != 32 { + return trace.SpanContext{}, false + } + tid, err := hex.DecodeString(sections[1]) + if err != nil { + return trace.SpanContext{}, false + } + copy(sc.TraceID[:], tid) + + if len(sections[2]) != 16 { + return trace.SpanContext{}, false + } + sid, err := hex.DecodeString(sections[2]) + if err != nil { + return trace.SpanContext{}, false + } + copy(sc.SpanID[:], sid) + + opts, err := hex.DecodeString(sections[3]) + if err != nil || len(opts) < 1 { + return trace.SpanContext{}, false + } + sc.TraceOptions = trace.TraceOptions(opts[0]) + + // Don't allow all zero trace or span ID. + if sc.TraceID == [16]byte{} || sc.SpanID == [8]byte{} { + return trace.SpanContext{}, false + } + + sc.Tracestate = tracestateFromRequest(req) + return sc, true +} + +// getRequestHeader returns a combined header field according to RFC7230 section 3.2.2. +// If commaSeparated is true, multiple header fields with the same field name using be +// combined using ",". +// If no header was found using the given name, "ok" would be false. +// If more than one headers was found using the given name, while commaSeparated is false, +// "ok" would be false. +func getRequestHeader(req *http.Request, name string, commaSeparated bool) (hdr string, ok bool) { + v := req.Header[textproto.CanonicalMIMEHeaderKey(name)] + switch len(v) { + case 0: + return "", false + case 1: + return v[0], true + default: + return strings.Join(v, ","), commaSeparated + } +} + +// TODO(rghetia): return an empty Tracestate when parsing tracestate header encounters an error. +// Revisit to return additional boolean value to indicate parsing error when following issues +// are resolved. +// https://github.com/w3c/distributed-tracing/issues/172 +// https://github.com/w3c/distributed-tracing/issues/175 +func tracestateFromRequest(req *http.Request) *tracestate.Tracestate { + h, _ := getRequestHeader(req, tracestateHeader, true) + if h == "" { + return nil + } + + var entries []tracestate.Entry + pairs := strings.Split(h, ",") + hdrLenWithoutOWS := len(pairs) - 1 // Number of commas + for _, pair := range pairs { + matches := trimOWSRegExp.FindStringSubmatch(pair) + if matches == nil { + return nil + } + pair = matches[1] + hdrLenWithoutOWS += len(pair) + if hdrLenWithoutOWS > maxTracestateLen { + return nil + } + kv := strings.Split(pair, "=") + if len(kv) != 2 { + return nil + } + entries = append(entries, tracestate.Entry{Key: kv[0], Value: kv[1]}) + } + ts, err := tracestate.New(nil, entries...) + if err != nil { + return nil + } + + return ts +} + +func tracestateToRequest(sc trace.SpanContext, req *http.Request) { + var pairs = make([]string, 0, len(sc.Tracestate.Entries())) + if sc.Tracestate != nil { + for _, entry := range sc.Tracestate.Entries() { + pairs = append(pairs, strings.Join([]string{entry.Key, entry.Value}, "=")) + } + h := strings.Join(pairs, ",") + + if h != "" && len(h) <= maxTracestateLen { + req.Header.Set(tracestateHeader, h) + } + } +} + +// SpanContextToRequest modifies the given request to include traceparent and tracestate headers. +func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + h := fmt.Sprintf("%x-%x-%x-%x", + []byte{supportedVersion}, + sc.TraceID[:], + sc.SpanID[:], + []byte{byte(sc.TraceOptions)}) + req.Header.Set(traceparentHeader, h) + tracestateToRequest(sc, req) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/cryptobyte/asn1.go new file mode 100644 index 00000000..528b9bff --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -0,0 +1,751 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + encoding_asn1 "encoding/asn1" + "fmt" + "math/big" + "reflect" + "time" + + "golang.org/x/crypto/cryptobyte/asn1" +) + +// This file contains ASN.1-related methods for String and Builder. + +// Builder + +// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Int64(v int64) { + b.addASN1Signed(asn1.INTEGER, v) +} + +// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the +// given tag. +func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) { + b.addASN1Signed(tag, v) +} + +// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION. +func (b *Builder) AddASN1Enum(v int64) { + b.addASN1Signed(asn1.ENUM, v) +} + +func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) { + b.AddASN1(tag, func(c *Builder) { + length := 1 + for i := v; i >= 0x80 || i < -0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1Uint64(v uint64) { + b.AddASN1(asn1.INTEGER, func(c *Builder) { + length := 1 + for i := v; i >= 0x80; i >>= 8 { + length++ + } + + for ; length > 0; length-- { + i := v >> uint((length-1)*8) & 0xff + c.AddUint8(uint8(i)) + } + }) +} + +// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER. +func (b *Builder) AddASN1BigInt(n *big.Int) { + if b.err != nil { + return + } + + b.AddASN1(asn1.INTEGER, func(c *Builder) { + if n.Sign() < 0 { + // A negative number has to be converted to two's-complement form. So we + // invert and subtract 1. If the most-significant-bit isn't set then + // we'll need to pad the beginning with 0xff in order to keep the number + // negative. + nMinus1 := new(big.Int).Neg(n) + nMinus1.Sub(nMinus1, bigOne) + bytes := nMinus1.Bytes() + for i := range bytes { + bytes[i] ^= 0xff + } + if bytes[0]&0x80 == 0 { + c.add(0xff) + } + c.add(bytes...) + } else if n.Sign() == 0 { + c.add(0) + } else { + bytes := n.Bytes() + if bytes[0]&0x80 != 0 { + c.add(0) + } + c.add(bytes...) + } + }) +} + +// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING. +func (b *Builder) AddASN1OctetString(bytes []byte) { + b.AddASN1(asn1.OCTET_STRING, func(c *Builder) { + c.AddBytes(bytes) + }) +} + +const generalizedTimeFormatStr = "20060102150405Z0700" + +// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME. +func (b *Builder) AddASN1GeneralizedTime(t time.Time) { + if t.Year() < 0 || t.Year() > 9999 { + b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t) + return + } + b.AddASN1(asn1.GeneralizedTime, func(c *Builder) { + c.AddBytes([]byte(t.Format(generalizedTimeFormatStr))) + }) +} + +// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not +// support BIT STRINGs that are not a whole number of bytes. +func (b *Builder) AddASN1BitString(data []byte) { + b.AddASN1(asn1.BIT_STRING, func(b *Builder) { + b.AddUint8(0) + b.AddBytes(data) + }) +} + +func (b *Builder) addBase128Int(n int64) { + var length int + if n == 0 { + length = 1 + } else { + for i := n; i > 0; i >>= 7 { + length++ + } + } + + for i := length - 1; i >= 0; i-- { + o := byte(n >> uint(i*7)) + o &= 0x7f + if i != 0 { + o |= 0x80 + } + + b.add(o) + } +} + +func isValidOID(oid encoding_asn1.ObjectIdentifier) bool { + if len(oid) < 2 { + return false + } + + if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) { + return false + } + + for _, v := range oid { + if v < 0 { + return false + } + } + + return true +} + +func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) { + b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) { + if !isValidOID(oid) { + b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid) + return + } + + b.addBase128Int(int64(oid[0])*40 + int64(oid[1])) + for _, v := range oid[2:] { + b.addBase128Int(int64(v)) + } + }) +} + +func (b *Builder) AddASN1Boolean(v bool) { + b.AddASN1(asn1.BOOLEAN, func(b *Builder) { + if v { + b.AddUint8(0xff) + } else { + b.AddUint8(0) + } + }) +} + +func (b *Builder) AddASN1NULL() { + b.add(uint8(asn1.NULL), 0) +} + +// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if +// successful or records an error if one occurred. +func (b *Builder) MarshalASN1(v interface{}) { + // NOTE(martinkr): This is somewhat of a hack to allow propagation of + // encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a + // value embedded into a struct, its tag information is lost. + if b.err != nil { + return + } + bytes, err := encoding_asn1.Marshal(v) + if err != nil { + b.err = err + return + } + b.AddBytes(bytes) +} + +// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag. +// Tags greater than 30 are not supported and result in an error (i.e. +// low-tag-number form only). The child builder passed to the +// BuilderContinuation can be used to build the content of the ASN.1 object. +func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) { + if b.err != nil { + return + } + // Identifiers with the low five bits set indicate high-tag-number format + // (two or more octets), which we don't support. + if tag&0x1f == 0x1f { + b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag) + return + } + b.AddUint8(uint8(tag)) + b.addLengthPrefixed(1, true, f) +} + +// String + +// ReadASN1Boolean decodes an ASN.1 INTEGER and converts it to a boolean +// representation into out and advances. It reports whether the read +// was successful. +func (s *String) ReadASN1Boolean(out *bool) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || len(bytes) != 1 { + return false + } + + switch bytes[0] { + case 0: + *out = false + case 0xff: + *out = true + default: + return false + } + + return true +} + +var bigIntType = reflect.TypeOf((*big.Int)(nil)).Elem() + +// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does +// not point to an integer or to a big.Int, it panics. It reports whether the +// read was successful. +func (s *String) ReadASN1Integer(out interface{}) bool { + if reflect.TypeOf(out).Kind() != reflect.Ptr { + panic("out is not a pointer") + } + switch reflect.ValueOf(out).Elem().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + var i int64 + if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) { + return false + } + reflect.ValueOf(out).Elem().SetInt(i) + return true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + var u uint64 + if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) { + return false + } + reflect.ValueOf(out).Elem().SetUint(u) + return true + case reflect.Struct: + if reflect.TypeOf(out).Elem() == bigIntType { + return s.readASN1BigInt(out.(*big.Int)) + } + } + panic("out does not point to an integer type") +} + +func checkASN1Integer(bytes []byte) bool { + if len(bytes) == 0 { + // An INTEGER is encoded with at least one octet. + return false + } + if len(bytes) == 1 { + return true + } + if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 { + // Value is not minimally encoded. + return false + } + return true +} + +var bigOne = big.NewInt(1) + +func (s *String) readASN1BigInt(out *big.Int) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) { + return false + } + if bytes[0]&0x80 == 0x80 { + // Negative number. + neg := make([]byte, len(bytes)) + for i, b := range bytes { + neg[i] = ^b + } + out.SetBytes(neg) + out.Add(out, bigOne) + out.Neg(out) + } else { + out.SetBytes(bytes) + } + return true +} + +func (s *String) readASN1Int64(out *int64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) { + return false + } + return true +} + +func asn1Signed(out *int64, n []byte) bool { + length := len(n) + if length > 8 { + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= int64(n[i]) + } + // Shift up and down in order to sign extend the result. + *out <<= 64 - uint8(length)*8 + *out >>= 64 - uint8(length)*8 + return true +} + +func (s *String) readASN1Uint64(out *uint64) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) { + return false + } + return true +} + +func asn1Unsigned(out *uint64, n []byte) bool { + length := len(n) + if length > 9 || length == 9 && n[0] != 0 { + // Too large for uint64. + return false + } + if n[0]&0x80 != 0 { + // Negative number. + return false + } + for i := 0; i < length; i++ { + *out <<= 8 + *out |= uint64(n[i]) + } + return true +} + +// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out +// and advances. It reports whether the read was successful and resulted in a +// value that can be represented in an int64. +func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool { + var bytes String + return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes) +} + +// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports +// whether the read was successful. +func (s *String) ReadASN1Enum(out *int) bool { + var bytes String + var i int64 + if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) { + return false + } + if int64(int(i)) != i { + return false + } + *out = int(i) + return true +} + +func (s *String) readBase128Int(out *int) bool { + ret := 0 + for i := 0; len(*s) > 0; i++ { + if i == 4 { + return false + } + ret <<= 7 + b := s.read(1)[0] + ret |= int(b & 0x7f) + if b&0x80 == 0 { + *out = ret + return true + } + } + return false // truncated +} + +// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 { + return false + } + + // In the worst case, we get two elements from the first byte (which is + // encoded differently) and then every varint is a single byte long. + components := make([]int, len(bytes)+1) + + // The first varint is 40*value1 + value2: + // According to this packing, value1 can take the values 0, 1 and 2 only. + // When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2, + // then there are no restrictions on value2. + var v int + if !bytes.readBase128Int(&v) { + return false + } + if v < 80 { + components[0] = v / 40 + components[1] = v % 40 + } else { + components[0] = 2 + components[1] = v - 80 + } + + i := 2 + for ; len(bytes) > 0; i++ { + if !bytes.readBase128Int(&v) { + return false + } + components[i] = v + } + *out = components[:i] + return true +} + +// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and +// advances. It reports whether the read was successful. +func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.GeneralizedTime) { + return false + } + t := string(bytes) + res, err := time.Parse(generalizedTimeFormatStr, t) + if err != nil { + return false + } + if serialized := res.Format(generalizedTimeFormatStr); serialized != t { + return false + } + *out = res + return true +} + +// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances. +// It reports whether the read was successful. +func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool { + var bytes String + if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 { + return false + } + + paddingBits := uint8(bytes[0]) + bytes = bytes[1:] + if paddingBits > 7 || + len(bytes) == 0 && paddingBits != 0 || + len(bytes) > 0 && bytes[len(bytes)-1]&(1< 4 || len(*s) < int(2+lenLen) { + return false + } + + lenBytes := String((*s)[2 : 2+lenLen]) + if !lenBytes.readUnsigned(&len32, int(lenLen)) { + return false + } + + // ITU-T X.690 section 10.1 (DER length forms) requires encoding the length + // with the minimum number of octets. + if len32 < 128 { + // Length should have used short-form encoding. + return false + } + if len32>>((lenLen-1)*8) == 0 { + // Leading octet is 0. Length should have been at least one byte shorter. + return false + } + + headerLen = 2 + uint32(lenLen) + if headerLen+len32 < len32 { + // Overflow. + return false + } + length = headerLen + len32 + } + + if uint32(int(length)) != length || !s.ReadBytes((*[]byte)(out), int(length)) { + return false + } + if skipHeader && !out.Skip(int(headerLen)) { + panic("cryptobyte: internal error") + } + + return true +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go new file mode 100644 index 00000000..cda8e3ed --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go @@ -0,0 +1,46 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asn1 contains supporting types for parsing and building ASN.1 +// messages with the cryptobyte package. +package asn1 // import "golang.org/x/crypto/cryptobyte/asn1" + +// Tag represents an ASN.1 identifier octet, consisting of a tag number +// (indicating a type) and class (such as context-specific or constructed). +// +// Methods in the cryptobyte package only support the low-tag-number form, i.e. +// a single identifier octet with bits 7-8 encoding the class and bits 1-6 +// encoding the tag number. +type Tag uint8 + +const ( + classConstructed = 0x20 + classContextSpecific = 0x80 +) + +// Constructed returns t with the constructed class bit set. +func (t Tag) Constructed() Tag { return t | classConstructed } + +// ContextSpecific returns t with the context-specific class bit set. +func (t Tag) ContextSpecific() Tag { return t | classContextSpecific } + +// The following is a list of standard tag and class combinations. +const ( + BOOLEAN = Tag(1) + INTEGER = Tag(2) + BIT_STRING = Tag(3) + OCTET_STRING = Tag(4) + NULL = Tag(5) + OBJECT_IDENTIFIER = Tag(6) + ENUM = Tag(10) + UTF8String = Tag(12) + SEQUENCE = Tag(16 | classConstructed) + SET = Tag(17 | classConstructed) + PrintableString = Tag(19) + T61String = Tag(20) + IA5String = Tag(22) + UTCTime = Tag(23) + GeneralizedTime = Tag(24) + GeneralString = Tag(27) +) diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/cryptobyte/builder.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/cryptobyte/builder.go new file mode 100644 index 00000000..ca7b1db5 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/cryptobyte/builder.go @@ -0,0 +1,337 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cryptobyte + +import ( + "errors" + "fmt" +) + +// A Builder builds byte strings from fixed-length and length-prefixed values. +// Builders either allocate space as needed, or are ‘fixed’, which means that +// they write into a given buffer and produce an error if it's exhausted. +// +// The zero value is a usable Builder that allocates space as needed. +// +// Simple values are marshaled and appended to a Builder using methods on the +// Builder. Length-prefixed values are marshaled by providing a +// BuilderContinuation, which is a function that writes the inner contents of +// the value to a given Builder. See the documentation for BuilderContinuation +// for details. +type Builder struct { + err error + result []byte + fixedSize bool + child *Builder + offset int + pendingLenLen int + pendingIsASN1 bool + inContinuation *bool +} + +// NewBuilder creates a Builder that appends its output to the given buffer. +// Like append(), the slice will be reallocated if its capacity is exceeded. +// Use Bytes to get the final buffer. +func NewBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + } +} + +// NewFixedBuilder creates a Builder that appends its output into the given +// buffer. This builder does not reallocate the output buffer. Writes that +// would exceed the buffer's capacity are treated as an error. +func NewFixedBuilder(buffer []byte) *Builder { + return &Builder{ + result: buffer, + fixedSize: true, + } +} + +// SetError sets the value to be returned as the error from Bytes. Writes +// performed after calling SetError are ignored. +func (b *Builder) SetError(err error) { + b.err = err +} + +// Bytes returns the bytes written by the builder or an error if one has +// occurred during building. +func (b *Builder) Bytes() ([]byte, error) { + if b.err != nil { + return nil, b.err + } + return b.result[b.offset:], nil +} + +// BytesOrPanic returns the bytes written by the builder or panics if an error +// has occurred during building. +func (b *Builder) BytesOrPanic() []byte { + if b.err != nil { + panic(b.err) + } + return b.result[b.offset:] +} + +// AddUint8 appends an 8-bit value to the byte string. +func (b *Builder) AddUint8(v uint8) { + b.add(byte(v)) +} + +// AddUint16 appends a big-endian, 16-bit value to the byte string. +func (b *Builder) AddUint16(v uint16) { + b.add(byte(v>>8), byte(v)) +} + +// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest +// byte of the 32-bit input value is silently truncated. +func (b *Builder) AddUint24(v uint32) { + b.add(byte(v>>16), byte(v>>8), byte(v)) +} + +// AddUint32 appends a big-endian, 32-bit value to the byte string. +func (b *Builder) AddUint32(v uint32) { + b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) +} + +// AddBytes appends a sequence of bytes to the byte string. +func (b *Builder) AddBytes(v []byte) { + b.add(v...) +} + +// BuilderContinuation is a continuation-passing interface for building +// length-prefixed byte sequences. Builder methods for length-prefixed +// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation +// supplied to them. The child builder passed to the continuation can be used +// to build the content of the length-prefixed sequence. For example: +// +// parent := cryptobyte.NewBuilder() +// parent.AddUint8LengthPrefixed(func (child *Builder) { +// child.AddUint8(42) +// child.AddUint8LengthPrefixed(func (grandchild *Builder) { +// grandchild.AddUint8(5) +// }) +// }) +// +// It is an error to write more bytes to the child than allowed by the reserved +// length prefix. After the continuation returns, the child must be considered +// invalid, i.e. users must not store any copies or references of the child +// that outlive the continuation. +// +// If the continuation panics with a value of type BuildError then the inner +// error will be returned as the error from Bytes. If the child panics +// otherwise then Bytes will repanic with the same value. +type BuilderContinuation func(child *Builder) + +// BuildError wraps an error. If a BuilderContinuation panics with this value, +// the panic will be recovered and the inner error will be returned from +// Builder.Bytes. +type BuildError struct { + Err error +} + +// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence. +func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(1, false, f) +} + +// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence. +func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(2, false, f) +} + +// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence. +func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(3, false, f) +} + +// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence. +func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) { + b.addLengthPrefixed(4, false, f) +} + +func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) { + if !*b.inContinuation { + *b.inContinuation = true + + defer func() { + *b.inContinuation = false + + r := recover() + if r == nil { + return + } + + if buildError, ok := r.(BuildError); ok { + b.err = buildError.Err + } else { + panic(r) + } + }() + } + + f(arg) +} + +func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) { + // Subsequent writes can be ignored if the builder has encountered an error. + if b.err != nil { + return + } + + offset := len(b.result) + b.add(make([]byte, lenLen)...) + + if b.inContinuation == nil { + b.inContinuation = new(bool) + } + + b.child = &Builder{ + result: b.result, + fixedSize: b.fixedSize, + offset: offset, + pendingLenLen: lenLen, + pendingIsASN1: isASN1, + inContinuation: b.inContinuation, + } + + b.callContinuation(f, b.child) + b.flushChild() + if b.child != nil { + panic("cryptobyte: internal error") + } +} + +func (b *Builder) flushChild() { + if b.child == nil { + return + } + b.child.flushChild() + child := b.child + b.child = nil + + if child.err != nil { + b.err = child.err + return + } + + length := len(child.result) - child.pendingLenLen - child.offset + + if length < 0 { + panic("cryptobyte: internal error") // result unexpectedly shrunk + } + + if child.pendingIsASN1 { + // For ASN.1, we reserved a single byte for the length. If that turned out + // to be incorrect, we have to move the contents along in order to make + // space. + if child.pendingLenLen != 1 { + panic("cryptobyte: internal error") + } + var lenLen, lenByte uint8 + if int64(length) > 0xfffffffe { + b.err = errors.New("pending ASN.1 child too long") + return + } else if length > 0xffffff { + lenLen = 5 + lenByte = 0x80 | 4 + } else if length > 0xffff { + lenLen = 4 + lenByte = 0x80 | 3 + } else if length > 0xff { + lenLen = 3 + lenByte = 0x80 | 2 + } else if length > 0x7f { + lenLen = 2 + lenByte = 0x80 | 1 + } else { + lenLen = 1 + lenByte = uint8(length) + length = 0 + } + + // Insert the initial length byte, make space for successive length bytes, + // and adjust the offset. + child.result[child.offset] = lenByte + extraBytes := int(lenLen - 1) + if extraBytes != 0 { + child.add(make([]byte, extraBytes)...) + childStart := child.offset + child.pendingLenLen + copy(child.result[childStart+extraBytes:], child.result[childStart:]) + } + child.offset++ + child.pendingLenLen = extraBytes + } + + l := length + for i := child.pendingLenLen - 1; i >= 0; i-- { + child.result[child.offset+i] = uint8(l) + l >>= 8 + } + if l != 0 { + b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen) + return + } + + if b.fixedSize && &b.result[0] != &child.result[0] { + panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer") + } + + b.result = child.result +} + +func (b *Builder) add(bytes ...byte) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted write while child is pending") + } + if len(b.result)+len(bytes) < len(bytes) { + b.err = errors.New("cryptobyte: length overflow") + } + if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) { + b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer") + return + } + b.result = append(b.result, bytes...) +} + +// Unwrite rolls back n bytes written directly to the Builder. An attempt by a +// child builder passed to a continuation to unwrite bytes from its parent will +// panic. +func (b *Builder) Unwrite(n int) { + if b.err != nil { + return + } + if b.child != nil { + panic("cryptobyte: attempted unwrite while child is pending") + } + length := len(b.result) - b.pendingLenLen - b.offset + if length < 0 { + panic("cryptobyte: internal error") + } + if n > length { + panic("cryptobyte: attempted to unwrite more than was written") + } + b.result = b.result[:len(b.result)-n] +} + +// A MarshalingValue marshals itself into a Builder. +type MarshalingValue interface { + // Marshal is called by Builder.AddValue. It receives a pointer to a builder + // to marshal itself into. It may return an error that occurred during + // marshaling, such as unset or invalid values. + Marshal(b *Builder) error +} + +// AddValue calls Marshal on v, passing a pointer to the builder to append to. +// If Marshal returns an error, it is set on the Builder so that subsequent +// appends don't have an effect. +func (b *Builder) AddValue(v MarshalingValue) { + err := v.Marshal(b) + if err != nil { + b.err = err + } +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/cryptobyte/string.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/cryptobyte/string.go new file mode 100644 index 00000000..39bf98ae --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/cryptobyte/string.go @@ -0,0 +1,166 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cryptobyte contains types that help with parsing and constructing +// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage +// contains useful ASN.1 constants.) +// +// The String type is for parsing. It wraps a []byte slice and provides helper +// functions for consuming structures, value by value. +// +// The Builder type is for constructing messages. It providers helper functions +// for appending values and also for appending length-prefixed submessages – +// without having to worry about calculating the length prefix ahead of time. +// +// See the documentation and examples for the Builder and String types to get +// started. +package cryptobyte // import "golang.org/x/crypto/cryptobyte" + +// String represents a string of bytes. It provides methods for parsing +// fixed-length and length-prefixed values from it. +type String []byte + +// read advances a String by n bytes and returns them. If less than n bytes +// remain, it returns nil. +func (s *String) read(n int) []byte { + if len(*s) < n { + return nil + } + v := (*s)[:n] + *s = (*s)[n:] + return v +} + +// Skip advances the String by n byte and reports whether it was successful. +func (s *String) Skip(n int) bool { + return s.read(n) != nil +} + +// ReadUint8 decodes an 8-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint8(out *uint8) bool { + v := s.read(1) + if v == nil { + return false + } + *out = uint8(v[0]) + return true +} + +// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint16(out *uint16) bool { + v := s.read(2) + if v == nil { + return false + } + *out = uint16(v[0])<<8 | uint16(v[1]) + return true +} + +// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint24(out *uint32) bool { + v := s.read(3) + if v == nil { + return false + } + *out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2]) + return true +} + +// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it. +// It reports whether the read was successful. +func (s *String) ReadUint32(out *uint32) bool { + v := s.read(4) + if v == nil { + return false + } + *out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3]) + return true +} + +func (s *String) readUnsigned(out *uint32, length int) bool { + v := s.read(length) + if v == nil { + return false + } + var result uint32 + for i := 0; i < length; i++ { + result <<= 8 + result |= uint32(v[i]) + } + *out = result + return true +} + +func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool { + lenBytes := s.read(lenLen) + if lenBytes == nil { + return false + } + var length uint32 + for _, b := range lenBytes { + length = length << 8 + length = length | uint32(b) + } + if int(length) < 0 { + // This currently cannot overflow because we read uint24 at most, but check + // anyway in case that changes in the future. + return false + } + v := s.read(int(length)) + if v == nil { + return false + } + *outChild = v + return true +} + +// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value +// into out and advances over it. It reports whether the read was successful. +func (s *String) ReadUint8LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(1, out) +} + +// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit +// length-prefixed value into out and advances over it. It reports whether the +// read was successful. +func (s *String) ReadUint16LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(2, out) +} + +// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit +// length-prefixed value into out and advances over it. It reports whether +// the read was successful. +func (s *String) ReadUint24LengthPrefixed(out *String) bool { + return s.readLengthPrefixed(3, out) +} + +// ReadBytes reads n bytes into out and advances over them. It reports +// whether the read was successful. +func (s *String) ReadBytes(out *[]byte, n int) bool { + v := s.read(n) + if v == nil { + return false + } + *out = v + return true +} + +// CopyBytes copies len(out) bytes into out and advances over them. It reports +// whether the copy operation was successful +func (s *String) CopyBytes(out []byte) bool { + n := len(out) + v := s.read(n) + if v == nil { + return false + } + return copy(out, v) == n +} + +// Empty reports whether the string does not contain any bytes. +func (s String) Empty() bool { + return len(s) == 0 +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/ocsp/ocsp.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/ocsp/ocsp.go new file mode 100644 index 00000000..d297ac92 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/ocsp/ocsp.go @@ -0,0 +1,784 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ocsp parses OCSP responses as specified in RFC 2560. OCSP responses +// are signed messages attesting to the validity of a certificate for a small +// period of time. This is used to manage revocation for X.509 certificates. +package ocsp // import "golang.org/x/crypto/ocsp" + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/rsa" + _ "crypto/sha1" + _ "crypto/sha256" + _ "crypto/sha512" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + "fmt" + "math/big" + "strconv" + "time" +) + +var idPKIXOCSPBasic = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 5, 5, 7, 48, 1, 1}) + +// ResponseStatus contains the result of an OCSP request. See +// https://tools.ietf.org/html/rfc6960#section-2.3 +type ResponseStatus int + +const ( + Success ResponseStatus = 0 + Malformed ResponseStatus = 1 + InternalError ResponseStatus = 2 + TryLater ResponseStatus = 3 + // Status code four is unused in OCSP. See + // https://tools.ietf.org/html/rfc6960#section-4.2.1 + SignatureRequired ResponseStatus = 5 + Unauthorized ResponseStatus = 6 +) + +func (r ResponseStatus) String() string { + switch r { + case Success: + return "success" + case Malformed: + return "malformed" + case InternalError: + return "internal error" + case TryLater: + return "try later" + case SignatureRequired: + return "signature required" + case Unauthorized: + return "unauthorized" + default: + return "unknown OCSP status: " + strconv.Itoa(int(r)) + } +} + +// ResponseError is an error that may be returned by ParseResponse to indicate +// that the response itself is an error, not just that it's indicating that a +// certificate is revoked, unknown, etc. +type ResponseError struct { + Status ResponseStatus +} + +func (r ResponseError) Error() string { + return "ocsp: error from server: " + r.Status.String() +} + +// These are internal structures that reflect the ASN.1 structure of an OCSP +// response. See RFC 2560, section 4.2. + +type certID struct { + HashAlgorithm pkix.AlgorithmIdentifier + NameHash []byte + IssuerKeyHash []byte + SerialNumber *big.Int +} + +// https://tools.ietf.org/html/rfc2560#section-4.1.1 +type ocspRequest struct { + TBSRequest tbsRequest +} + +type tbsRequest struct { + Version int `asn1:"explicit,tag:0,default:0,optional"` + RequestorName pkix.RDNSequence `asn1:"explicit,tag:1,optional"` + RequestList []request +} + +type request struct { + Cert certID +} + +type responseASN1 struct { + Status asn1.Enumerated + Response responseBytes `asn1:"explicit,tag:0,optional"` +} + +type responseBytes struct { + ResponseType asn1.ObjectIdentifier + Response []byte +} + +type basicResponse struct { + TBSResponseData responseData + SignatureAlgorithm pkix.AlgorithmIdentifier + Signature asn1.BitString + Certificates []asn1.RawValue `asn1:"explicit,tag:0,optional"` +} + +type responseData struct { + Raw asn1.RawContent + Version int `asn1:"optional,default:0,explicit,tag:0"` + RawResponderID asn1.RawValue + ProducedAt time.Time `asn1:"generalized"` + Responses []singleResponse +} + +type singleResponse struct { + CertID certID + Good asn1.Flag `asn1:"tag:0,optional"` + Revoked revokedInfo `asn1:"tag:1,optional"` + Unknown asn1.Flag `asn1:"tag:2,optional"` + ThisUpdate time.Time `asn1:"generalized"` + NextUpdate time.Time `asn1:"generalized,explicit,tag:0,optional"` + SingleExtensions []pkix.Extension `asn1:"explicit,tag:1,optional"` +} + +type revokedInfo struct { + RevocationTime time.Time `asn1:"generalized"` + Reason asn1.Enumerated `asn1:"explicit,tag:0,optional"` +} + +var ( + oidSignatureMD2WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 2} + oidSignatureMD5WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 4} + oidSignatureSHA1WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 5} + oidSignatureSHA256WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 11} + oidSignatureSHA384WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 12} + oidSignatureSHA512WithRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 13} + oidSignatureDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10040, 4, 3} + oidSignatureDSAWithSHA256 = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 3, 2} + oidSignatureECDSAWithSHA1 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 1} + oidSignatureECDSAWithSHA256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 2} + oidSignatureECDSAWithSHA384 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 3} + oidSignatureECDSAWithSHA512 = asn1.ObjectIdentifier{1, 2, 840, 10045, 4, 3, 4} +) + +var hashOIDs = map[crypto.Hash]asn1.ObjectIdentifier{ + crypto.SHA1: asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}), + crypto.SHA256: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 1}), + crypto.SHA384: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 2}), + crypto.SHA512: asn1.ObjectIdentifier([]int{2, 16, 840, 1, 101, 3, 4, 2, 3}), +} + +// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below +var signatureAlgorithmDetails = []struct { + algo x509.SignatureAlgorithm + oid asn1.ObjectIdentifier + pubKeyAlgo x509.PublicKeyAlgorithm + hash crypto.Hash +}{ + {x509.MD2WithRSA, oidSignatureMD2WithRSA, x509.RSA, crypto.Hash(0) /* no value for MD2 */}, + {x509.MD5WithRSA, oidSignatureMD5WithRSA, x509.RSA, crypto.MD5}, + {x509.SHA1WithRSA, oidSignatureSHA1WithRSA, x509.RSA, crypto.SHA1}, + {x509.SHA256WithRSA, oidSignatureSHA256WithRSA, x509.RSA, crypto.SHA256}, + {x509.SHA384WithRSA, oidSignatureSHA384WithRSA, x509.RSA, crypto.SHA384}, + {x509.SHA512WithRSA, oidSignatureSHA512WithRSA, x509.RSA, crypto.SHA512}, + {x509.DSAWithSHA1, oidSignatureDSAWithSHA1, x509.DSA, crypto.SHA1}, + {x509.DSAWithSHA256, oidSignatureDSAWithSHA256, x509.DSA, crypto.SHA256}, + {x509.ECDSAWithSHA1, oidSignatureECDSAWithSHA1, x509.ECDSA, crypto.SHA1}, + {x509.ECDSAWithSHA256, oidSignatureECDSAWithSHA256, x509.ECDSA, crypto.SHA256}, + {x509.ECDSAWithSHA384, oidSignatureECDSAWithSHA384, x509.ECDSA, crypto.SHA384}, + {x509.ECDSAWithSHA512, oidSignatureECDSAWithSHA512, x509.ECDSA, crypto.SHA512}, +} + +// TODO(rlb): This is also from crypto/x509, so same comment as AGL's below +func signingParamsForPublicKey(pub interface{}, requestedSigAlgo x509.SignatureAlgorithm) (hashFunc crypto.Hash, sigAlgo pkix.AlgorithmIdentifier, err error) { + var pubType x509.PublicKeyAlgorithm + + switch pub := pub.(type) { + case *rsa.PublicKey: + pubType = x509.RSA + hashFunc = crypto.SHA256 + sigAlgo.Algorithm = oidSignatureSHA256WithRSA + sigAlgo.Parameters = asn1.RawValue{ + Tag: 5, + } + + case *ecdsa.PublicKey: + pubType = x509.ECDSA + + switch pub.Curve { + case elliptic.P224(), elliptic.P256(): + hashFunc = crypto.SHA256 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA256 + case elliptic.P384(): + hashFunc = crypto.SHA384 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA384 + case elliptic.P521(): + hashFunc = crypto.SHA512 + sigAlgo.Algorithm = oidSignatureECDSAWithSHA512 + default: + err = errors.New("x509: unknown elliptic curve") + } + + default: + err = errors.New("x509: only RSA and ECDSA keys supported") + } + + if err != nil { + return + } + + if requestedSigAlgo == 0 { + return + } + + found := false + for _, details := range signatureAlgorithmDetails { + if details.algo == requestedSigAlgo { + if details.pubKeyAlgo != pubType { + err = errors.New("x509: requested SignatureAlgorithm does not match private key type") + return + } + sigAlgo.Algorithm, hashFunc = details.oid, details.hash + if hashFunc == 0 { + err = errors.New("x509: cannot sign with hash function requested") + return + } + found = true + break + } + } + + if !found { + err = errors.New("x509: unknown SignatureAlgorithm") + } + + return +} + +// TODO(agl): this is taken from crypto/x509 and so should probably be exported +// from crypto/x509 or crypto/x509/pkix. +func getSignatureAlgorithmFromOID(oid asn1.ObjectIdentifier) x509.SignatureAlgorithm { + for _, details := range signatureAlgorithmDetails { + if oid.Equal(details.oid) { + return details.algo + } + } + return x509.UnknownSignatureAlgorithm +} + +// TODO(rlb): This is not taken from crypto/x509, but it's of the same general form. +func getHashAlgorithmFromOID(target asn1.ObjectIdentifier) crypto.Hash { + for hash, oid := range hashOIDs { + if oid.Equal(target) { + return hash + } + } + return crypto.Hash(0) +} + +func getOIDFromHashAlgorithm(target crypto.Hash) asn1.ObjectIdentifier { + for hash, oid := range hashOIDs { + if hash == target { + return oid + } + } + return nil +} + +// This is the exposed reflection of the internal OCSP structures. + +// The status values that can be expressed in OCSP. See RFC 6960. +const ( + // Good means that the certificate is valid. + Good = iota + // Revoked means that the certificate has been deliberately revoked. + Revoked + // Unknown means that the OCSP responder doesn't know about the certificate. + Unknown + // ServerFailed is unused and was never used (see + // https://go-review.googlesource.com/#/c/18944). ParseResponse will + // return a ResponseError when an error response is parsed. + ServerFailed +) + +// The enumerated reasons for revoking a certificate. See RFC 5280. +const ( + Unspecified = 0 + KeyCompromise = 1 + CACompromise = 2 + AffiliationChanged = 3 + Superseded = 4 + CessationOfOperation = 5 + CertificateHold = 6 + + RemoveFromCRL = 8 + PrivilegeWithdrawn = 9 + AACompromise = 10 +) + +// Request represents an OCSP request. See RFC 6960. +type Request struct { + HashAlgorithm crypto.Hash + IssuerNameHash []byte + IssuerKeyHash []byte + SerialNumber *big.Int +} + +// Marshal marshals the OCSP request to ASN.1 DER encoded form. +func (req *Request) Marshal() ([]byte, error) { + hashAlg := getOIDFromHashAlgorithm(req.HashAlgorithm) + if hashAlg == nil { + return nil, errors.New("Unknown hash algorithm") + } + return asn1.Marshal(ocspRequest{ + tbsRequest{ + Version: 0, + RequestList: []request{ + { + Cert: certID{ + pkix.AlgorithmIdentifier{ + Algorithm: hashAlg, + Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */}, + }, + req.IssuerNameHash, + req.IssuerKeyHash, + req.SerialNumber, + }, + }, + }, + }, + }) +} + +// Response represents an OCSP response containing a single SingleResponse. See +// RFC 6960. +type Response struct { + // Status is one of {Good, Revoked, Unknown} + Status int + SerialNumber *big.Int + ProducedAt, ThisUpdate, NextUpdate, RevokedAt time.Time + RevocationReason int + Certificate *x509.Certificate + // TBSResponseData contains the raw bytes of the signed response. If + // Certificate is nil then this can be used to verify Signature. + TBSResponseData []byte + Signature []byte + SignatureAlgorithm x509.SignatureAlgorithm + + // IssuerHash is the hash used to compute the IssuerNameHash and IssuerKeyHash. + // Valid values are crypto.SHA1, crypto.SHA256, crypto.SHA384, and crypto.SHA512. + // If zero, the default is crypto.SHA1. + IssuerHash crypto.Hash + + // RawResponderName optionally contains the DER-encoded subject of the + // responder certificate. Exactly one of RawResponderName and + // ResponderKeyHash is set. + RawResponderName []byte + // ResponderKeyHash optionally contains the SHA-1 hash of the + // responder's public key. Exactly one of RawResponderName and + // ResponderKeyHash is set. + ResponderKeyHash []byte + + // Extensions contains raw X.509 extensions from the singleExtensions field + // of the OCSP response. When parsing certificates, this can be used to + // extract non-critical extensions that are not parsed by this package. When + // marshaling OCSP responses, the Extensions field is ignored, see + // ExtraExtensions. + Extensions []pkix.Extension + + // ExtraExtensions contains extensions to be copied, raw, into any marshaled + // OCSP response (in the singleExtensions field). Values override any + // extensions that would otherwise be produced based on the other fields. The + // ExtraExtensions field is not populated when parsing certificates, see + // Extensions. + ExtraExtensions []pkix.Extension +} + +// These are pre-serialized error responses for the various non-success codes +// defined by OCSP. The Unauthorized code in particular can be used by an OCSP +// responder that supports only pre-signed responses as a response to requests +// for certificates with unknown status. See RFC 5019. +var ( + MalformedRequestErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x01} + InternalErrorErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x02} + TryLaterErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x03} + SigRequredErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x05} + UnauthorizedErrorResponse = []byte{0x30, 0x03, 0x0A, 0x01, 0x06} +) + +// CheckSignatureFrom checks that the signature in resp is a valid signature +// from issuer. This should only be used if resp.Certificate is nil. Otherwise, +// the OCSP response contained an intermediate certificate that created the +// signature. That signature is checked by ParseResponse and only +// resp.Certificate remains to be validated. +func (resp *Response) CheckSignatureFrom(issuer *x509.Certificate) error { + return issuer.CheckSignature(resp.SignatureAlgorithm, resp.TBSResponseData, resp.Signature) +} + +// ParseError results from an invalid OCSP response. +type ParseError string + +func (p ParseError) Error() string { + return string(p) +} + +// ParseRequest parses an OCSP request in DER form. It only supports +// requests for a single certificate. Signed requests are not supported. +// If a request includes a signature, it will result in a ParseError. +func ParseRequest(bytes []byte) (*Request, error) { + var req ocspRequest + rest, err := asn1.Unmarshal(bytes, &req) + if err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, ParseError("trailing data in OCSP request") + } + + if len(req.TBSRequest.RequestList) == 0 { + return nil, ParseError("OCSP request contains no request body") + } + innerRequest := req.TBSRequest.RequestList[0] + + hashFunc := getHashAlgorithmFromOID(innerRequest.Cert.HashAlgorithm.Algorithm) + if hashFunc == crypto.Hash(0) { + return nil, ParseError("OCSP request uses unknown hash function") + } + + return &Request{ + HashAlgorithm: hashFunc, + IssuerNameHash: innerRequest.Cert.NameHash, + IssuerKeyHash: innerRequest.Cert.IssuerKeyHash, + SerialNumber: innerRequest.Cert.SerialNumber, + }, nil +} + +// ParseResponse parses an OCSP response in DER form. It only supports +// responses for a single certificate. If the response contains a certificate +// then the signature over the response is checked. If issuer is not nil then +// it will be used to validate the signature or embedded certificate. +// +// Invalid responses and parse failures will result in a ParseError. +// Error responses will result in a ResponseError. +func ParseResponse(bytes []byte, issuer *x509.Certificate) (*Response, error) { + return ParseResponseForCert(bytes, nil, issuer) +} + +// ParseResponseForCert parses an OCSP response in DER form and searches for a +// Response relating to cert. If such a Response is found and the OCSP response +// contains a certificate then the signature over the response is checked. If +// issuer is not nil then it will be used to validate the signature or embedded +// certificate. +// +// Invalid responses and parse failures will result in a ParseError. +// Error responses will result in a ResponseError. +func ParseResponseForCert(bytes []byte, cert, issuer *x509.Certificate) (*Response, error) { + var resp responseASN1 + rest, err := asn1.Unmarshal(bytes, &resp) + if err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, ParseError("trailing data in OCSP response") + } + + if status := ResponseStatus(resp.Status); status != Success { + return nil, ResponseError{status} + } + + if !resp.Response.ResponseType.Equal(idPKIXOCSPBasic) { + return nil, ParseError("bad OCSP response type") + } + + var basicResp basicResponse + rest, err = asn1.Unmarshal(resp.Response.Response, &basicResp) + if err != nil { + return nil, err + } + if len(rest) > 0 { + return nil, ParseError("trailing data in OCSP response") + } + + if n := len(basicResp.TBSResponseData.Responses); n == 0 || cert == nil && n > 1 { + return nil, ParseError("OCSP response contains bad number of responses") + } + + var singleResp singleResponse + if cert == nil { + singleResp = basicResp.TBSResponseData.Responses[0] + } else { + match := false + for _, resp := range basicResp.TBSResponseData.Responses { + if cert.SerialNumber.Cmp(resp.CertID.SerialNumber) == 0 { + singleResp = resp + match = true + break + } + } + if !match { + return nil, ParseError("no response matching the supplied certificate") + } + } + + ret := &Response{ + TBSResponseData: basicResp.TBSResponseData.Raw, + Signature: basicResp.Signature.RightAlign(), + SignatureAlgorithm: getSignatureAlgorithmFromOID(basicResp.SignatureAlgorithm.Algorithm), + Extensions: singleResp.SingleExtensions, + SerialNumber: singleResp.CertID.SerialNumber, + ProducedAt: basicResp.TBSResponseData.ProducedAt, + ThisUpdate: singleResp.ThisUpdate, + NextUpdate: singleResp.NextUpdate, + } + + // Handle the ResponderID CHOICE tag. ResponderID can be flattened into + // TBSResponseData once https://go-review.googlesource.com/34503 has been + // released. + rawResponderID := basicResp.TBSResponseData.RawResponderID + switch rawResponderID.Tag { + case 1: // Name + var rdn pkix.RDNSequence + if rest, err := asn1.Unmarshal(rawResponderID.Bytes, &rdn); err != nil || len(rest) != 0 { + return nil, ParseError("invalid responder name") + } + ret.RawResponderName = rawResponderID.Bytes + case 2: // KeyHash + if rest, err := asn1.Unmarshal(rawResponderID.Bytes, &ret.ResponderKeyHash); err != nil || len(rest) != 0 { + return nil, ParseError("invalid responder key hash") + } + default: + return nil, ParseError("invalid responder id tag") + } + + if len(basicResp.Certificates) > 0 { + // Responders should only send a single certificate (if they + // send any) that connects the responder's certificate to the + // original issuer. We accept responses with multiple + // certificates due to a number responders sending them[1], but + // ignore all but the first. + // + // [1] https://github.com/golang/go/issues/21527 + ret.Certificate, err = x509.ParseCertificate(basicResp.Certificates[0].FullBytes) + if err != nil { + return nil, err + } + + if err := ret.CheckSignatureFrom(ret.Certificate); err != nil { + return nil, ParseError("bad signature on embedded certificate: " + err.Error()) + } + + if issuer != nil { + if err := issuer.CheckSignature(ret.Certificate.SignatureAlgorithm, ret.Certificate.RawTBSCertificate, ret.Certificate.Signature); err != nil { + return nil, ParseError("bad OCSP signature: " + err.Error()) + } + } + } else if issuer != nil { + if err := ret.CheckSignatureFrom(issuer); err != nil { + return nil, ParseError("bad OCSP signature: " + err.Error()) + } + } + + for _, ext := range singleResp.SingleExtensions { + if ext.Critical { + return nil, ParseError("unsupported critical extension") + } + } + + for h, oid := range hashOIDs { + if singleResp.CertID.HashAlgorithm.Algorithm.Equal(oid) { + ret.IssuerHash = h + break + } + } + if ret.IssuerHash == 0 { + return nil, ParseError("unsupported issuer hash algorithm") + } + + switch { + case bool(singleResp.Good): + ret.Status = Good + case bool(singleResp.Unknown): + ret.Status = Unknown + default: + ret.Status = Revoked + ret.RevokedAt = singleResp.Revoked.RevocationTime + ret.RevocationReason = int(singleResp.Revoked.Reason) + } + + return ret, nil +} + +// RequestOptions contains options for constructing OCSP requests. +type RequestOptions struct { + // Hash contains the hash function that should be used when + // constructing the OCSP request. If zero, SHA-1 will be used. + Hash crypto.Hash +} + +func (opts *RequestOptions) hash() crypto.Hash { + if opts == nil || opts.Hash == 0 { + // SHA-1 is nearly universally used in OCSP. + return crypto.SHA1 + } + return opts.Hash +} + +// CreateRequest returns a DER-encoded, OCSP request for the status of cert. If +// opts is nil then sensible defaults are used. +func CreateRequest(cert, issuer *x509.Certificate, opts *RequestOptions) ([]byte, error) { + hashFunc := opts.hash() + + // OCSP seems to be the only place where these raw hash identifiers are + // used. I took the following from + // http://msdn.microsoft.com/en-us/library/ff635603.aspx + _, ok := hashOIDs[hashFunc] + if !ok { + return nil, x509.ErrUnsupportedAlgorithm + } + + if !hashFunc.Available() { + return nil, x509.ErrUnsupportedAlgorithm + } + h := opts.hash().New() + + var publicKeyInfo struct { + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString + } + if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil { + return nil, err + } + + h.Write(publicKeyInfo.PublicKey.RightAlign()) + issuerKeyHash := h.Sum(nil) + + h.Reset() + h.Write(issuer.RawSubject) + issuerNameHash := h.Sum(nil) + + req := &Request{ + HashAlgorithm: hashFunc, + IssuerNameHash: issuerNameHash, + IssuerKeyHash: issuerKeyHash, + SerialNumber: cert.SerialNumber, + } + return req.Marshal() +} + +// CreateResponse returns a DER-encoded OCSP response with the specified contents. +// The fields in the response are populated as follows: +// +// The responder cert is used to populate the responder's name field, and the +// certificate itself is provided alongside the OCSP response signature. +// +// The issuer cert is used to puplate the IssuerNameHash and IssuerKeyHash fields. +// +// The template is used to populate the SerialNumber, Status, RevokedAt, +// RevocationReason, ThisUpdate, and NextUpdate fields. +// +// If template.IssuerHash is not set, SHA1 will be used. +// +// The ProducedAt date is automatically set to the current date, to the nearest minute. +func CreateResponse(issuer, responderCert *x509.Certificate, template Response, priv crypto.Signer) ([]byte, error) { + var publicKeyInfo struct { + Algorithm pkix.AlgorithmIdentifier + PublicKey asn1.BitString + } + if _, err := asn1.Unmarshal(issuer.RawSubjectPublicKeyInfo, &publicKeyInfo); err != nil { + return nil, err + } + + if template.IssuerHash == 0 { + template.IssuerHash = crypto.SHA1 + } + hashOID := getOIDFromHashAlgorithm(template.IssuerHash) + if hashOID == nil { + return nil, errors.New("unsupported issuer hash algorithm") + } + + if !template.IssuerHash.Available() { + return nil, fmt.Errorf("issuer hash algorithm %v not linked into binary", template.IssuerHash) + } + h := template.IssuerHash.New() + h.Write(publicKeyInfo.PublicKey.RightAlign()) + issuerKeyHash := h.Sum(nil) + + h.Reset() + h.Write(issuer.RawSubject) + issuerNameHash := h.Sum(nil) + + innerResponse := singleResponse{ + CertID: certID{ + HashAlgorithm: pkix.AlgorithmIdentifier{ + Algorithm: hashOID, + Parameters: asn1.RawValue{Tag: 5 /* ASN.1 NULL */}, + }, + NameHash: issuerNameHash, + IssuerKeyHash: issuerKeyHash, + SerialNumber: template.SerialNumber, + }, + ThisUpdate: template.ThisUpdate.UTC(), + NextUpdate: template.NextUpdate.UTC(), + SingleExtensions: template.ExtraExtensions, + } + + switch template.Status { + case Good: + innerResponse.Good = true + case Unknown: + innerResponse.Unknown = true + case Revoked: + innerResponse.Revoked = revokedInfo{ + RevocationTime: template.RevokedAt.UTC(), + Reason: asn1.Enumerated(template.RevocationReason), + } + } + + rawResponderID := asn1.RawValue{ + Class: 2, // context-specific + Tag: 1, // Name (explicit tag) + IsCompound: true, + Bytes: responderCert.RawSubject, + } + tbsResponseData := responseData{ + Version: 0, + RawResponderID: rawResponderID, + ProducedAt: time.Now().Truncate(time.Minute).UTC(), + Responses: []singleResponse{innerResponse}, + } + + tbsResponseDataDER, err := asn1.Marshal(tbsResponseData) + if err != nil { + return nil, err + } + + hashFunc, signatureAlgorithm, err := signingParamsForPublicKey(priv.Public(), template.SignatureAlgorithm) + if err != nil { + return nil, err + } + + responseHash := hashFunc.New() + responseHash.Write(tbsResponseDataDER) + signature, err := priv.Sign(rand.Reader, responseHash.Sum(nil), hashFunc) + if err != nil { + return nil, err + } + + response := basicResponse{ + TBSResponseData: tbsResponseData, + SignatureAlgorithm: signatureAlgorithm, + Signature: asn1.BitString{ + Bytes: signature, + BitLength: 8 * len(signature), + }, + } + if template.Certificate != nil { + response.Certificates = []asn1.RawValue{ + {FullBytes: template.Certificate.Raw}, + } + } + responseDER, err := asn1.Marshal(response) + if err != nil { + return nil, err + } + + return asn1.Marshal(responseASN1{ + Status: asn1.Enumerated(Success), + Response: responseBytes{ + ResponseType: idPKIXOCSPBasic, + Response: responseDER, + }, + }) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/pkcs12/bmp-string.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/pkcs12/bmp-string.go new file mode 100644 index 00000000..233b8b62 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/pkcs12/bmp-string.go @@ -0,0 +1,50 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "errors" + "unicode/utf16" +) + +// bmpString returns s encoded in UCS-2 with a zero terminator. +func bmpString(s string) ([]byte, error) { + // References: + // https://tools.ietf.org/html/rfc7292#appendix-B.1 + // https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane + // - non-BMP characters are encoded in UTF 16 by using a surrogate pair of 16-bit codes + // EncodeRune returns 0xfffd if the rune does not need special encoding + // - the above RFC provides the info that BMPStrings are NULL terminated. + + ret := make([]byte, 0, 2*len(s)+2) + + for _, r := range s { + if t, _ := utf16.EncodeRune(r); t != 0xfffd { + return nil, errors.New("pkcs12: string contains characters that cannot be encoded in UCS-2") + } + ret = append(ret, byte(r/256), byte(r%256)) + } + + return append(ret, 0, 0), nil +} + +func decodeBMPString(bmpString []byte) (string, error) { + if len(bmpString)%2 != 0 { + return "", errors.New("pkcs12: odd-length BMP string") + } + + // strip terminator if present + if l := len(bmpString); l >= 2 && bmpString[l-1] == 0 && bmpString[l-2] == 0 { + bmpString = bmpString[:l-2] + } + + s := make([]uint16, 0, len(bmpString)/2) + for len(bmpString) > 0 { + s = append(s, uint16(bmpString[0])<<8+uint16(bmpString[1])) + bmpString = bmpString[2:] + } + + return string(utf16.Decode(s)), nil +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/pkcs12/crypto.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/pkcs12/crypto.go new file mode 100644 index 00000000..484ca51b --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/pkcs12/crypto.go @@ -0,0 +1,131 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/cipher" + "crypto/des" + "crypto/x509/pkix" + "encoding/asn1" + "errors" + + "golang.org/x/crypto/pkcs12/internal/rc2" +) + +var ( + oidPBEWithSHAAnd3KeyTripleDESCBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 3}) + oidPBEWithSHAAnd40BitRC2CBC = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 1, 6}) +) + +// pbeCipher is an abstraction of a PKCS#12 cipher. +type pbeCipher interface { + // create returns a cipher.Block given a key. + create(key []byte) (cipher.Block, error) + // deriveKey returns a key derived from the given password and salt. + deriveKey(salt, password []byte, iterations int) []byte + // deriveKey returns an IV derived from the given password and salt. + deriveIV(salt, password []byte, iterations int) []byte +} + +type shaWithTripleDESCBC struct{} + +func (shaWithTripleDESCBC) create(key []byte) (cipher.Block, error) { + return des.NewTripleDESCipher(key) +} + +func (shaWithTripleDESCBC) deriveKey(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 24) +} + +func (shaWithTripleDESCBC) deriveIV(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) +} + +type shaWith40BitRC2CBC struct{} + +func (shaWith40BitRC2CBC) create(key []byte) (cipher.Block, error) { + return rc2.New(key, len(key)*8) +} + +func (shaWith40BitRC2CBC) deriveKey(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 1, 5) +} + +func (shaWith40BitRC2CBC) deriveIV(salt, password []byte, iterations int) []byte { + return pbkdf(sha1Sum, 20, 64, salt, password, iterations, 2, 8) +} + +type pbeParams struct { + Salt []byte + Iterations int +} + +func pbDecrypterFor(algorithm pkix.AlgorithmIdentifier, password []byte) (cipher.BlockMode, int, error) { + var cipherType pbeCipher + + switch { + case algorithm.Algorithm.Equal(oidPBEWithSHAAnd3KeyTripleDESCBC): + cipherType = shaWithTripleDESCBC{} + case algorithm.Algorithm.Equal(oidPBEWithSHAAnd40BitRC2CBC): + cipherType = shaWith40BitRC2CBC{} + default: + return nil, 0, NotImplementedError("algorithm " + algorithm.Algorithm.String() + " is not supported") + } + + var params pbeParams + if err := unmarshal(algorithm.Parameters.FullBytes, ¶ms); err != nil { + return nil, 0, err + } + + key := cipherType.deriveKey(params.Salt, password, params.Iterations) + iv := cipherType.deriveIV(params.Salt, password, params.Iterations) + + block, err := cipherType.create(key) + if err != nil { + return nil, 0, err + } + + return cipher.NewCBCDecrypter(block, iv), block.BlockSize(), nil +} + +func pbDecrypt(info decryptable, password []byte) (decrypted []byte, err error) { + cbc, blockSize, err := pbDecrypterFor(info.Algorithm(), password) + if err != nil { + return nil, err + } + + encrypted := info.Data() + if len(encrypted) == 0 { + return nil, errors.New("pkcs12: empty encrypted data") + } + if len(encrypted)%blockSize != 0 { + return nil, errors.New("pkcs12: input is not a multiple of the block size") + } + decrypted = make([]byte, len(encrypted)) + cbc.CryptBlocks(decrypted, encrypted) + + psLen := int(decrypted[len(decrypted)-1]) + if psLen == 0 || psLen > blockSize { + return nil, ErrDecryption + } + + if len(decrypted) < psLen { + return nil, ErrDecryption + } + ps := decrypted[len(decrypted)-psLen:] + decrypted = decrypted[:len(decrypted)-psLen] + if bytes.Compare(ps, bytes.Repeat([]byte{byte(psLen)}, psLen)) != 0 { + return nil, ErrDecryption + } + + return +} + +// decryptable abstracts an object that contains ciphertext. +type decryptable interface { + Algorithm() pkix.AlgorithmIdentifier + Data() []byte +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/pkcs12/errors.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/pkcs12/errors.go new file mode 100644 index 00000000..7377ce6f --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/pkcs12/errors.go @@ -0,0 +1,23 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import "errors" + +var ( + // ErrDecryption represents a failure to decrypt the input. + ErrDecryption = errors.New("pkcs12: decryption error, incorrect padding") + + // ErrIncorrectPassword is returned when an incorrect password is detected. + // Usually, P12/PFX data is signed to be able to verify the password. + ErrIncorrectPassword = errors.New("pkcs12: decryption password incorrect") +) + +// NotImplementedError indicates that the input is not currently supported. +type NotImplementedError string + +func (e NotImplementedError) Error() string { + return "pkcs12: " + string(e) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go new file mode 100644 index 00000000..7499e3fb --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go @@ -0,0 +1,271 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rc2 implements the RC2 cipher +/* +https://www.ietf.org/rfc/rfc2268.txt +http://people.csail.mit.edu/rivest/pubs/KRRR98.pdf + +This code is licensed under the MIT license. +*/ +package rc2 + +import ( + "crypto/cipher" + "encoding/binary" +) + +// The rc2 block size in bytes +const BlockSize = 8 + +type rc2Cipher struct { + k [64]uint16 +} + +// New returns a new rc2 cipher with the given key and effective key length t1 +func New(key []byte, t1 int) (cipher.Block, error) { + // TODO(dgryski): error checking for key length + return &rc2Cipher{ + k: expandKey(key, t1), + }, nil +} + +func (*rc2Cipher) BlockSize() int { return BlockSize } + +var piTable = [256]byte{ + 0xd9, 0x78, 0xf9, 0xc4, 0x19, 0xdd, 0xb5, 0xed, 0x28, 0xe9, 0xfd, 0x79, 0x4a, 0xa0, 0xd8, 0x9d, + 0xc6, 0x7e, 0x37, 0x83, 0x2b, 0x76, 0x53, 0x8e, 0x62, 0x4c, 0x64, 0x88, 0x44, 0x8b, 0xfb, 0xa2, + 0x17, 0x9a, 0x59, 0xf5, 0x87, 0xb3, 0x4f, 0x13, 0x61, 0x45, 0x6d, 0x8d, 0x09, 0x81, 0x7d, 0x32, + 0xbd, 0x8f, 0x40, 0xeb, 0x86, 0xb7, 0x7b, 0x0b, 0xf0, 0x95, 0x21, 0x22, 0x5c, 0x6b, 0x4e, 0x82, + 0x54, 0xd6, 0x65, 0x93, 0xce, 0x60, 0xb2, 0x1c, 0x73, 0x56, 0xc0, 0x14, 0xa7, 0x8c, 0xf1, 0xdc, + 0x12, 0x75, 0xca, 0x1f, 0x3b, 0xbe, 0xe4, 0xd1, 0x42, 0x3d, 0xd4, 0x30, 0xa3, 0x3c, 0xb6, 0x26, + 0x6f, 0xbf, 0x0e, 0xda, 0x46, 0x69, 0x07, 0x57, 0x27, 0xf2, 0x1d, 0x9b, 0xbc, 0x94, 0x43, 0x03, + 0xf8, 0x11, 0xc7, 0xf6, 0x90, 0xef, 0x3e, 0xe7, 0x06, 0xc3, 0xd5, 0x2f, 0xc8, 0x66, 0x1e, 0xd7, + 0x08, 0xe8, 0xea, 0xde, 0x80, 0x52, 0xee, 0xf7, 0x84, 0xaa, 0x72, 0xac, 0x35, 0x4d, 0x6a, 0x2a, + 0x96, 0x1a, 0xd2, 0x71, 0x5a, 0x15, 0x49, 0x74, 0x4b, 0x9f, 0xd0, 0x5e, 0x04, 0x18, 0xa4, 0xec, + 0xc2, 0xe0, 0x41, 0x6e, 0x0f, 0x51, 0xcb, 0xcc, 0x24, 0x91, 0xaf, 0x50, 0xa1, 0xf4, 0x70, 0x39, + 0x99, 0x7c, 0x3a, 0x85, 0x23, 0xb8, 0xb4, 0x7a, 0xfc, 0x02, 0x36, 0x5b, 0x25, 0x55, 0x97, 0x31, + 0x2d, 0x5d, 0xfa, 0x98, 0xe3, 0x8a, 0x92, 0xae, 0x05, 0xdf, 0x29, 0x10, 0x67, 0x6c, 0xba, 0xc9, + 0xd3, 0x00, 0xe6, 0xcf, 0xe1, 0x9e, 0xa8, 0x2c, 0x63, 0x16, 0x01, 0x3f, 0x58, 0xe2, 0x89, 0xa9, + 0x0d, 0x38, 0x34, 0x1b, 0xab, 0x33, 0xff, 0xb0, 0xbb, 0x48, 0x0c, 0x5f, 0xb9, 0xb1, 0xcd, 0x2e, + 0xc5, 0xf3, 0xdb, 0x47, 0xe5, 0xa5, 0x9c, 0x77, 0x0a, 0xa6, 0x20, 0x68, 0xfe, 0x7f, 0xc1, 0xad, +} + +func expandKey(key []byte, t1 int) [64]uint16 { + + l := make([]byte, 128) + copy(l, key) + + var t = len(key) + var t8 = (t1 + 7) / 8 + var tm = byte(255 % uint(1<<(8+uint(t1)-8*uint(t8)))) + + for i := len(key); i < 128; i++ { + l[i] = piTable[l[i-1]+l[uint8(i-t)]] + } + + l[128-t8] = piTable[l[128-t8]&tm] + + for i := 127 - t8; i >= 0; i-- { + l[i] = piTable[l[i+1]^l[i+t8]] + } + + var k [64]uint16 + + for i := range k { + k[i] = uint16(l[2*i]) + uint16(l[2*i+1])*256 + } + + return k +} + +func rotl16(x uint16, b uint) uint16 { + return (x >> (16 - b)) | (x << b) +} + +func (c *rc2Cipher) Encrypt(dst, src []byte) { + + r0 := binary.LittleEndian.Uint16(src[0:]) + r1 := binary.LittleEndian.Uint16(src[2:]) + r2 := binary.LittleEndian.Uint16(src[4:]) + r3 := binary.LittleEndian.Uint16(src[6:]) + + var j int + + for j <= 16 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = rotl16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = rotl16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = rotl16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = rotl16(r3, 5) + j++ + + } + + r0 = r0 + c.k[r3&63] + r1 = r1 + c.k[r0&63] + r2 = r2 + c.k[r1&63] + r3 = r3 + c.k[r2&63] + + for j <= 40 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = rotl16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = rotl16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = rotl16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = rotl16(r3, 5) + j++ + + } + + r0 = r0 + c.k[r3&63] + r1 = r1 + c.k[r0&63] + r2 = r2 + c.k[r1&63] + r3 = r3 + c.k[r2&63] + + for j <= 60 { + // mix r0 + r0 = r0 + c.k[j] + (r3 & r2) + ((^r3) & r1) + r0 = rotl16(r0, 1) + j++ + + // mix r1 + r1 = r1 + c.k[j] + (r0 & r3) + ((^r0) & r2) + r1 = rotl16(r1, 2) + j++ + + // mix r2 + r2 = r2 + c.k[j] + (r1 & r0) + ((^r1) & r3) + r2 = rotl16(r2, 3) + j++ + + // mix r3 + r3 = r3 + c.k[j] + (r2 & r1) + ((^r2) & r0) + r3 = rotl16(r3, 5) + j++ + } + + binary.LittleEndian.PutUint16(dst[0:], r0) + binary.LittleEndian.PutUint16(dst[2:], r1) + binary.LittleEndian.PutUint16(dst[4:], r2) + binary.LittleEndian.PutUint16(dst[6:], r3) +} + +func (c *rc2Cipher) Decrypt(dst, src []byte) { + + r0 := binary.LittleEndian.Uint16(src[0:]) + r1 := binary.LittleEndian.Uint16(src[2:]) + r2 := binary.LittleEndian.Uint16(src[4:]) + r3 := binary.LittleEndian.Uint16(src[6:]) + + j := 63 + + for j >= 44 { + // unmix r3 + r3 = rotl16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = rotl16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = rotl16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = rotl16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + } + + r3 = r3 - c.k[r2&63] + r2 = r2 - c.k[r1&63] + r1 = r1 - c.k[r0&63] + r0 = r0 - c.k[r3&63] + + for j >= 20 { + // unmix r3 + r3 = rotl16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = rotl16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = rotl16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = rotl16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + + } + + r3 = r3 - c.k[r2&63] + r2 = r2 - c.k[r1&63] + r1 = r1 - c.k[r0&63] + r0 = r0 - c.k[r3&63] + + for j >= 0 { + // unmix r3 + r3 = rotl16(r3, 16-5) + r3 = r3 - c.k[j] - (r2 & r1) - ((^r2) & r0) + j-- + + // unmix r2 + r2 = rotl16(r2, 16-3) + r2 = r2 - c.k[j] - (r1 & r0) - ((^r1) & r3) + j-- + + // unmix r1 + r1 = rotl16(r1, 16-2) + r1 = r1 - c.k[j] - (r0 & r3) - ((^r0) & r2) + j-- + + // unmix r0 + r0 = rotl16(r0, 16-1) + r0 = r0 - c.k[j] - (r3 & r2) - ((^r3) & r1) + j-- + + } + + binary.LittleEndian.PutUint16(dst[0:], r0) + binary.LittleEndian.PutUint16(dst[2:], r1) + binary.LittleEndian.PutUint16(dst[4:], r2) + binary.LittleEndian.PutUint16(dst[6:], r3) +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/pkcs12/mac.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/pkcs12/mac.go new file mode 100644 index 00000000..5f38aa7d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/pkcs12/mac.go @@ -0,0 +1,45 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/hmac" + "crypto/sha1" + "crypto/x509/pkix" + "encoding/asn1" +) + +type macData struct { + Mac digestInfo + MacSalt []byte + Iterations int `asn1:"optional,default:1"` +} + +// from PKCS#7: +type digestInfo struct { + Algorithm pkix.AlgorithmIdentifier + Digest []byte +} + +var ( + oidSHA1 = asn1.ObjectIdentifier([]int{1, 3, 14, 3, 2, 26}) +) + +func verifyMac(macData *macData, message, password []byte) error { + if !macData.Mac.Algorithm.Algorithm.Equal(oidSHA1) { + return NotImplementedError("unknown digest algorithm: " + macData.Mac.Algorithm.Algorithm.String()) + } + + key := pbkdf(sha1Sum, 20, 64, macData.MacSalt, password, macData.Iterations, 3, 20) + + mac := hmac.New(sha1.New, key) + mac.Write(message) + expectedMAC := mac.Sum(nil) + + if !hmac.Equal(macData.Mac.Digest, expectedMAC) { + return ErrIncorrectPassword + } + return nil +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/pkcs12/pbkdf.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/pkcs12/pbkdf.go new file mode 100644 index 00000000..5c419d41 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/pkcs12/pbkdf.go @@ -0,0 +1,170 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "bytes" + "crypto/sha1" + "math/big" +) + +var ( + one = big.NewInt(1) +) + +// sha1Sum returns the SHA-1 hash of in. +func sha1Sum(in []byte) []byte { + sum := sha1.Sum(in) + return sum[:] +} + +// fillWithRepeats returns v*ceiling(len(pattern) / v) bytes consisting of +// repeats of pattern. +func fillWithRepeats(pattern []byte, v int) []byte { + if len(pattern) == 0 { + return nil + } + outputLen := v * ((len(pattern) + v - 1) / v) + return bytes.Repeat(pattern, (outputLen+len(pattern)-1)/len(pattern))[:outputLen] +} + +func pbkdf(hash func([]byte) []byte, u, v int, salt, password []byte, r int, ID byte, size int) (key []byte) { + // implementation of https://tools.ietf.org/html/rfc7292#appendix-B.2 , RFC text verbatim in comments + + // Let H be a hash function built around a compression function f: + + // Z_2^u x Z_2^v -> Z_2^u + + // (that is, H has a chaining variable and output of length u bits, and + // the message input to the compression function of H is v bits). The + // values for u and v are as follows: + + // HASH FUNCTION VALUE u VALUE v + // MD2, MD5 128 512 + // SHA-1 160 512 + // SHA-224 224 512 + // SHA-256 256 512 + // SHA-384 384 1024 + // SHA-512 512 1024 + // SHA-512/224 224 1024 + // SHA-512/256 256 1024 + + // Furthermore, let r be the iteration count. + + // We assume here that u and v are both multiples of 8, as are the + // lengths of the password and salt strings (which we denote by p and s, + // respectively) and the number n of pseudorandom bits required. In + // addition, u and v are of course non-zero. + + // For information on security considerations for MD5 [19], see [25] and + // [1], and on those for MD2, see [18]. + + // The following procedure can be used to produce pseudorandom bits for + // a particular "purpose" that is identified by a byte called "ID". + // This standard specifies 3 different values for the ID byte: + + // 1. If ID=1, then the pseudorandom bits being produced are to be used + // as key material for performing encryption or decryption. + + // 2. If ID=2, then the pseudorandom bits being produced are to be used + // as an IV (Initial Value) for encryption or decryption. + + // 3. If ID=3, then the pseudorandom bits being produced are to be used + // as an integrity key for MACing. + + // 1. Construct a string, D (the "diversifier"), by concatenating v/8 + // copies of ID. + var D []byte + for i := 0; i < v; i++ { + D = append(D, ID) + } + + // 2. Concatenate copies of the salt together to create a string S of + // length v(ceiling(s/v)) bits (the final copy of the salt may be + // truncated to create S). Note that if the salt is the empty + // string, then so is S. + + S := fillWithRepeats(salt, v) + + // 3. Concatenate copies of the password together to create a string P + // of length v(ceiling(p/v)) bits (the final copy of the password + // may be truncated to create P). Note that if the password is the + // empty string, then so is P. + + P := fillWithRepeats(password, v) + + // 4. Set I=S||P to be the concatenation of S and P. + I := append(S, P...) + + // 5. Set c=ceiling(n/u). + c := (size + u - 1) / u + + // 6. For i=1, 2, ..., c, do the following: + A := make([]byte, c*20) + var IjBuf []byte + for i := 0; i < c; i++ { + // A. Set A2=H^r(D||I). (i.e., the r-th hash of D||1, + // H(H(H(... H(D||I)))) + Ai := hash(append(D, I...)) + for j := 1; j < r; j++ { + Ai = hash(Ai) + } + copy(A[i*20:], Ai[:]) + + if i < c-1 { // skip on last iteration + // B. Concatenate copies of Ai to create a string B of length v + // bits (the final copy of Ai may be truncated to create B). + var B []byte + for len(B) < v { + B = append(B, Ai[:]...) + } + B = B[:v] + + // C. Treating I as a concatenation I_0, I_1, ..., I_(k-1) of v-bit + // blocks, where k=ceiling(s/v)+ceiling(p/v), modify I by + // setting I_j=(I_j+B+1) mod 2^v for each j. + { + Bbi := new(big.Int).SetBytes(B) + Ij := new(big.Int) + + for j := 0; j < len(I)/v; j++ { + Ij.SetBytes(I[j*v : (j+1)*v]) + Ij.Add(Ij, Bbi) + Ij.Add(Ij, one) + Ijb := Ij.Bytes() + // We expect Ijb to be exactly v bytes, + // if it is longer or shorter we must + // adjust it accordingly. + if len(Ijb) > v { + Ijb = Ijb[len(Ijb)-v:] + } + if len(Ijb) < v { + if IjBuf == nil { + IjBuf = make([]byte, v) + } + bytesShort := v - len(Ijb) + for i := 0; i < bytesShort; i++ { + IjBuf[i] = 0 + } + copy(IjBuf[bytesShort:], Ijb) + Ijb = IjBuf + } + copy(I[j*v:(j+1)*v], Ijb) + } + } + } + } + // 7. Concatenate A_1, A_2, ..., A_c together to form a pseudorandom + // bit string, A. + + // 8. Use the first n bits of A as the output of this entire process. + return A[:size] + + // If the above process is being used to generate a DES key, the process + // should be used to create 64 random bits, and the key's parity bits + // should be set after the 64 bits have been produced. Similar concerns + // hold for 2-key and 3-key triple-DES keys, for CDMF keys, and for any + // similar keys with parity bits "built into them". +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/pkcs12/pkcs12.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/pkcs12/pkcs12.go new file mode 100644 index 00000000..55f7691d --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/pkcs12/pkcs12.go @@ -0,0 +1,349 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pkcs12 implements some of PKCS#12. +// +// This implementation is distilled from https://tools.ietf.org/html/rfc7292 +// and referenced documents. It is intended for decoding P12/PFX-stored +// certificates and keys for use with the crypto/tls package. +// +// This package is frozen. If it's missing functionality you need, consider +// an alternative like software.sslmate.com/src/go-pkcs12. +package pkcs12 + +import ( + "crypto/ecdsa" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/hex" + "encoding/pem" + "errors" +) + +var ( + oidDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 1}) + oidEncryptedDataContentType = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 7, 6}) + + oidFriendlyName = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 20}) + oidLocalKeyID = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 21}) + oidMicrosoftCSPName = asn1.ObjectIdentifier([]int{1, 3, 6, 1, 4, 1, 311, 17, 1}) +) + +type pfxPdu struct { + Version int + AuthSafe contentInfo + MacData macData `asn1:"optional"` +} + +type contentInfo struct { + ContentType asn1.ObjectIdentifier + Content asn1.RawValue `asn1:"tag:0,explicit,optional"` +} + +type encryptedData struct { + Version int + EncryptedContentInfo encryptedContentInfo +} + +type encryptedContentInfo struct { + ContentType asn1.ObjectIdentifier + ContentEncryptionAlgorithm pkix.AlgorithmIdentifier + EncryptedContent []byte `asn1:"tag:0,optional"` +} + +func (i encryptedContentInfo) Algorithm() pkix.AlgorithmIdentifier { + return i.ContentEncryptionAlgorithm +} + +func (i encryptedContentInfo) Data() []byte { return i.EncryptedContent } + +type safeBag struct { + Id asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"tag:0,explicit"` + Attributes []pkcs12Attribute `asn1:"set,optional"` +} + +type pkcs12Attribute struct { + Id asn1.ObjectIdentifier + Value asn1.RawValue `asn1:"set"` +} + +type encryptedPrivateKeyInfo struct { + AlgorithmIdentifier pkix.AlgorithmIdentifier + EncryptedData []byte +} + +func (i encryptedPrivateKeyInfo) Algorithm() pkix.AlgorithmIdentifier { + return i.AlgorithmIdentifier +} + +func (i encryptedPrivateKeyInfo) Data() []byte { + return i.EncryptedData +} + +// PEM block types +const ( + certificateType = "CERTIFICATE" + privateKeyType = "PRIVATE KEY" +) + +// unmarshal calls asn1.Unmarshal, but also returns an error if there is any +// trailing data after unmarshaling. +func unmarshal(in []byte, out interface{}) error { + trailing, err := asn1.Unmarshal(in, out) + if err != nil { + return err + } + if len(trailing) != 0 { + return errors.New("pkcs12: trailing data found") + } + return nil +} + +// ToPEM converts all "safe bags" contained in pfxData to PEM blocks. +func ToPEM(pfxData []byte, password string) ([]*pem.Block, error) { + encodedPassword, err := bmpString(password) + if err != nil { + return nil, ErrIncorrectPassword + } + + bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + + if err != nil { + return nil, err + } + + blocks := make([]*pem.Block, 0, len(bags)) + for _, bag := range bags { + block, err := convertBag(&bag, encodedPassword) + if err != nil { + return nil, err + } + blocks = append(blocks, block) + } + + return blocks, nil +} + +func convertBag(bag *safeBag, password []byte) (*pem.Block, error) { + block := &pem.Block{ + Headers: make(map[string]string), + } + + for _, attribute := range bag.Attributes { + k, v, err := convertAttribute(&attribute) + if err != nil { + return nil, err + } + block.Headers[k] = v + } + + switch { + case bag.Id.Equal(oidCertBag): + block.Type = certificateType + certsData, err := decodeCertBag(bag.Value.Bytes) + if err != nil { + return nil, err + } + block.Bytes = certsData + case bag.Id.Equal(oidPKCS8ShroundedKeyBag): + block.Type = privateKeyType + + key, err := decodePkcs8ShroudedKeyBag(bag.Value.Bytes, password) + if err != nil { + return nil, err + } + + switch key := key.(type) { + case *rsa.PrivateKey: + block.Bytes = x509.MarshalPKCS1PrivateKey(key) + case *ecdsa.PrivateKey: + block.Bytes, err = x509.MarshalECPrivateKey(key) + if err != nil { + return nil, err + } + default: + return nil, errors.New("found unknown private key type in PKCS#8 wrapping") + } + default: + return nil, errors.New("don't know how to convert a safe bag of type " + bag.Id.String()) + } + return block, nil +} + +func convertAttribute(attribute *pkcs12Attribute) (key, value string, err error) { + isString := false + + switch { + case attribute.Id.Equal(oidFriendlyName): + key = "friendlyName" + isString = true + case attribute.Id.Equal(oidLocalKeyID): + key = "localKeyId" + case attribute.Id.Equal(oidMicrosoftCSPName): + // This key is chosen to match OpenSSL. + key = "Microsoft CSP Name" + isString = true + default: + return "", "", errors.New("pkcs12: unknown attribute with OID " + attribute.Id.String()) + } + + if isString { + if err := unmarshal(attribute.Value.Bytes, &attribute.Value); err != nil { + return "", "", err + } + if value, err = decodeBMPString(attribute.Value.Bytes); err != nil { + return "", "", err + } + } else { + var id []byte + if err := unmarshal(attribute.Value.Bytes, &id); err != nil { + return "", "", err + } + value = hex.EncodeToString(id) + } + + return key, value, nil +} + +// Decode extracts a certificate and private key from pfxData. This function +// assumes that there is only one certificate and only one private key in the +// pfxData; if there are more use ToPEM instead. +func Decode(pfxData []byte, password string) (privateKey interface{}, certificate *x509.Certificate, err error) { + encodedPassword, err := bmpString(password) + if err != nil { + return nil, nil, err + } + + bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword) + if err != nil { + return nil, nil, err + } + + if len(bags) != 2 { + err = errors.New("pkcs12: expected exactly two safe bags in the PFX PDU") + return + } + + for _, bag := range bags { + switch { + case bag.Id.Equal(oidCertBag): + if certificate != nil { + err = errors.New("pkcs12: expected exactly one certificate bag") + } + + certsData, err := decodeCertBag(bag.Value.Bytes) + if err != nil { + return nil, nil, err + } + certs, err := x509.ParseCertificates(certsData) + if err != nil { + return nil, nil, err + } + if len(certs) != 1 { + err = errors.New("pkcs12: expected exactly one certificate in the certBag") + return nil, nil, err + } + certificate = certs[0] + + case bag.Id.Equal(oidPKCS8ShroundedKeyBag): + if privateKey != nil { + err = errors.New("pkcs12: expected exactly one key bag") + } + + if privateKey, err = decodePkcs8ShroudedKeyBag(bag.Value.Bytes, encodedPassword); err != nil { + return nil, nil, err + } + } + } + + if certificate == nil { + return nil, nil, errors.New("pkcs12: certificate missing") + } + if privateKey == nil { + return nil, nil, errors.New("pkcs12: private key missing") + } + + return +} + +func getSafeContents(p12Data, password []byte) (bags []safeBag, updatedPassword []byte, err error) { + pfx := new(pfxPdu) + if err := unmarshal(p12Data, pfx); err != nil { + return nil, nil, errors.New("pkcs12: error reading P12 data: " + err.Error()) + } + + if pfx.Version != 3 { + return nil, nil, NotImplementedError("can only decode v3 PFX PDU's") + } + + if !pfx.AuthSafe.ContentType.Equal(oidDataContentType) { + return nil, nil, NotImplementedError("only password-protected PFX is implemented") + } + + // unmarshal the explicit bytes in the content for type 'data' + if err := unmarshal(pfx.AuthSafe.Content.Bytes, &pfx.AuthSafe.Content); err != nil { + return nil, nil, err + } + + if len(pfx.MacData.Mac.Algorithm.Algorithm) == 0 { + return nil, nil, errors.New("pkcs12: no MAC in data") + } + + if err := verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password); err != nil { + if err == ErrIncorrectPassword && len(password) == 2 && password[0] == 0 && password[1] == 0 { + // some implementations use an empty byte array + // for the empty string password try one more + // time with empty-empty password + password = nil + err = verifyMac(&pfx.MacData, pfx.AuthSafe.Content.Bytes, password) + } + if err != nil { + return nil, nil, err + } + } + + var authenticatedSafe []contentInfo + if err := unmarshal(pfx.AuthSafe.Content.Bytes, &authenticatedSafe); err != nil { + return nil, nil, err + } + + if len(authenticatedSafe) != 2 { + return nil, nil, NotImplementedError("expected exactly two items in the authenticated safe") + } + + for _, ci := range authenticatedSafe { + var data []byte + + switch { + case ci.ContentType.Equal(oidDataContentType): + if err := unmarshal(ci.Content.Bytes, &data); err != nil { + return nil, nil, err + } + case ci.ContentType.Equal(oidEncryptedDataContentType): + var encryptedData encryptedData + if err := unmarshal(ci.Content.Bytes, &encryptedData); err != nil { + return nil, nil, err + } + if encryptedData.Version != 0 { + return nil, nil, NotImplementedError("only version 0 of EncryptedData is supported") + } + if data, err = pbDecrypt(encryptedData.EncryptedContentInfo, password); err != nil { + return nil, nil, err + } + default: + return nil, nil, NotImplementedError("only data and encryptedData content types are supported in authenticated safe") + } + + var safeContents []safeBag + if err := unmarshal(data, &safeContents); err != nil { + return nil, nil, err + } + bags = append(bags, safeContents...) + } + + return bags, password, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/pkcs12/safebags.go b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/pkcs12/safebags.go new file mode 100644 index 00000000..def1f7b9 --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/golang.org/x/crypto/pkcs12/safebags.go @@ -0,0 +1,57 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkcs12 + +import ( + "crypto/x509" + "encoding/asn1" + "errors" +) + +var ( + // see https://tools.ietf.org/html/rfc7292#appendix-D + oidCertTypeX509Certificate = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 9, 22, 1}) + oidPKCS8ShroundedKeyBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 2}) + oidCertBag = asn1.ObjectIdentifier([]int{1, 2, 840, 113549, 1, 12, 10, 1, 3}) +) + +type certBag struct { + Id asn1.ObjectIdentifier + Data []byte `asn1:"tag:0,explicit"` +} + +func decodePkcs8ShroudedKeyBag(asn1Data, password []byte) (privateKey interface{}, err error) { + pkinfo := new(encryptedPrivateKeyInfo) + if err = unmarshal(asn1Data, pkinfo); err != nil { + return nil, errors.New("pkcs12: error decoding PKCS#8 shrouded key bag: " + err.Error()) + } + + pkData, err := pbDecrypt(pkinfo, password) + if err != nil { + return nil, errors.New("pkcs12: error decrypting PKCS#8 shrouded key bag: " + err.Error()) + } + + ret := new(asn1.RawValue) + if err = unmarshal(pkData, ret); err != nil { + return nil, errors.New("pkcs12: error unmarshaling decrypted private key: " + err.Error()) + } + + if privateKey, err = x509.ParsePKCS8PrivateKey(pkData); err != nil { + return nil, errors.New("pkcs12: error parsing PKCS#8 private key: " + err.Error()) + } + + return privateKey, nil +} + +func decodeCertBag(asn1Data []byte) (x509Certificates []byte, err error) { + bag := new(certBag) + if err := unmarshal(asn1Data, bag); err != nil { + return nil, errors.New("pkcs12: error decoding cert bag: " + err.Error()) + } + if !bag.Id.Equal(oidCertTypeX509Certificate) { + return nil, NotImplementedError("only X509 certificates are supported") + } + return bag.Data, nil +} diff --git a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/codereview.cfg b/vendor/github.com/elastic/beats/vendor/golang.org/x/text/codereview.cfg deleted file mode 100644 index 3f8b14b6..00000000 --- a/vendor/github.com/elastic/beats/vendor/golang.org/x/text/codereview.cfg +++ /dev/null @@ -1 +0,0 @@ -issuerepo: golang/go diff --git a/vendor/github.com/elastic/beats/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/github.com/elastic/beats/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go new file mode 100644 index 00000000..5b6c587a --- /dev/null +++ b/vendor/github.com/elastic/beats/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -0,0 +1,146 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/api/httpbody.proto + +package httpbody + +import ( + fmt "fmt" + math "math" + + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Message that represents an arbitrary HTTP body. It should only be used for +// payload formats that can't be represented as JSON, such as raw binary or +// an HTML page. +// +// +// This message can be used both in streaming and non-streaming API methods in +// the request as well as the response. +// +// It can be used as a top-level request field, which is convenient if one +// wants to extract parameters from either the URL or HTTP template into the +// request fields and also want access to the raw HTTP body. +// +// Example: +// +// message GetResourceRequest { +// // A unique request id. +// string request_id = 1; +// +// // The raw HTTP body is bound to this field. +// google.api.HttpBody http_body = 2; +// } +// +// service ResourceService { +// rpc GetResource(GetResourceRequest) returns (google.api.HttpBody); +// rpc UpdateResource(google.api.HttpBody) returns +// (google.protobuf.Empty); +// } +// +// Example with streaming methods: +// +// service CaldavService { +// rpc GetCalendar(stream google.api.HttpBody) +// returns (stream google.api.HttpBody); +// rpc UpdateCalendar(stream google.api.HttpBody) +// returns (stream google.api.HttpBody); +// } +// +// Use of this type only changes how the request and response bodies are +// handled, all other features will continue to work unchanged. +type HttpBody struct { + // The HTTP Content-Type header value specifying the content type of the body. + ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` + // The HTTP request/response body as raw binary. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + // Application specific response metadata. Must be set in the first response + // for streaming APIs. + Extensions []*any.Any `protobuf:"bytes,3,rep,name=extensions,proto3" json:"extensions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HttpBody) Reset() { *m = HttpBody{} } +func (m *HttpBody) String() string { return proto.CompactTextString(m) } +func (*HttpBody) ProtoMessage() {} +func (*HttpBody) Descriptor() ([]byte, []int) { + return fileDescriptor_09ea2ecaa32a0070, []int{0} +} + +func (m *HttpBody) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HttpBody.Unmarshal(m, b) +} +func (m *HttpBody) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HttpBody.Marshal(b, m, deterministic) +} +func (m *HttpBody) XXX_Merge(src proto.Message) { + xxx_messageInfo_HttpBody.Merge(m, src) +} +func (m *HttpBody) XXX_Size() int { + return xxx_messageInfo_HttpBody.Size(m) +} +func (m *HttpBody) XXX_DiscardUnknown() { + xxx_messageInfo_HttpBody.DiscardUnknown(m) +} + +var xxx_messageInfo_HttpBody proto.InternalMessageInfo + +func (m *HttpBody) GetContentType() string { + if m != nil { + return m.ContentType + } + return "" +} + +func (m *HttpBody) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +func (m *HttpBody) GetExtensions() []*any.Any { + if m != nil { + return m.Extensions + } + return nil +} + +func init() { + proto.RegisterType((*HttpBody)(nil), "google.api.HttpBody") +} + +func init() { proto.RegisterFile("google/api/httpbody.proto", fileDescriptor_09ea2ecaa32a0070) } + +var fileDescriptor_09ea2ecaa32a0070 = []byte{ + // 229 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x8f, 0x31, 0x4f, 0xc3, 0x30, + 0x10, 0x85, 0xe5, 0xb6, 0x42, 0x70, 0x2d, 0x0c, 0x16, 0x43, 0x60, 0x0a, 0x4c, 0x99, 0x6c, 0x09, + 0xd8, 0x3a, 0x35, 0x0b, 0xb0, 0x45, 0x11, 0x13, 0x0b, 0x72, 0x1a, 0xe3, 0x46, 0x2a, 0x77, 0xa7, + 0xe6, 0x10, 0xf8, 0xef, 0xf0, 0x2b, 0x19, 0x11, 0x69, 0x2c, 0xe8, 0xf6, 0xe4, 0xef, 0x3d, 0xbf, + 0x77, 0x70, 0x11, 0x88, 0xc2, 0xd6, 0x5b, 0xc7, 0x9d, 0xdd, 0x88, 0x70, 0x43, 0x6d, 0x34, 0xbc, + 0x23, 0x21, 0x0d, 0x7b, 0x64, 0x1c, 0x77, 0x97, 0xc9, 0x36, 0x90, 0xe6, 0xfd, 0xd5, 0x3a, 0x1c, + 0x6d, 0xd7, 0x1f, 0x70, 0xfc, 0x20, 0xc2, 0x25, 0xb5, 0x51, 0x5f, 0xc1, 0x62, 0x4d, 0x28, 0x1e, + 0xe5, 0x45, 0x22, 0xfb, 0x4c, 0xe5, 0xaa, 0x38, 0xa9, 0xe7, 0xe3, 0xdb, 0x53, 0x64, 0xaf, 0x35, + 0xcc, 0x5a, 0x27, 0x2e, 0x9b, 0xe4, 0xaa, 0x58, 0xd4, 0x83, 0xd6, 0x77, 0x00, 0xfe, 0x53, 0x3c, + 0xf6, 0x1d, 0x61, 0x9f, 0x4d, 0xf3, 0x69, 0x31, 0xbf, 0x39, 0x37, 0x63, 0x7d, 0xaa, 0x34, 0x2b, + 0x8c, 0xf5, 0x3f, 0x5f, 0xb9, 0x81, 0xb3, 0x35, 0xbd, 0x99, 0xbf, 0x95, 0xe5, 0x69, 0x1a, 0x52, + 0xfd, 0x66, 0x2a, 0xf5, 0xbc, 0x1c, 0x61, 0xa0, 0xad, 0xc3, 0x60, 0x68, 0x17, 0x6c, 0xf0, 0x38, + 0xfc, 0x68, 0xf7, 0xc8, 0x71, 0xd7, 0x1f, 0x1c, 0xbf, 0x4c, 0xe2, 0x5b, 0xa9, 0xaf, 0xc9, 0xec, + 0x7e, 0x55, 0x3d, 0x36, 0x47, 0x43, 0xe2, 0xf6, 0x27, 0x00, 0x00, 0xff, 0xff, 0x78, 0xb9, 0x16, + 0x2b, 0x2d, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/.gitignore b/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/.gitignore deleted file mode 100644 index eb9253a8..00000000 --- a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -.dsn -*.test -*/build/* -*~* -*.swp -env.sh diff --git a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/.golangci.yml b/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/.golangci.yml deleted file mode 100644 index 6cab09a4..00000000 --- a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/.golangci.yml +++ /dev/null @@ -1,19 +0,0 @@ -linters-settings: - maligned: - suggest-new: true - -linters: - disable: - - structcheck - - errcheck - #- typecheck - -issues: - exclude: - - "`?encodeFixed(32|64)Pb`? is unused" - - "`?_Ctype[^ `]*`? is unused" - - "`?_cgo[^ `]*`? is unused" - - "SA4000" - - "Example[^ ]* refers to unknown" - -# vim: set et shiftwidth=2: diff --git a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/.travis.yml b/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/.travis.yml deleted file mode 100644 index 316189ad..00000000 --- a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/.travis.yml +++ /dev/null @@ -1,38 +0,0 @@ ---- -language: go -sudo: required - -services: - - docker - -go: - - "1.11.1" - -go_import_path: gopkg.in/goracle.v2 - -env: - global: - - GORACLE_DRV_TEST_USERNAME=test - - GORACLE_DRV_TEST_PASSWORD=test - - GORACLE_DRV_TEST_DB=oracle.gthomas.eu:49161/xe - -before_install: - - docker build -t oracle-instant-client:12.2 ./contrib/oracle-instant-client - -install: - - go get -t . - -script: - - go test -c -covermode=atomic . - - docker run -ti --rm - -e GORACLE_DRV_TEST_USERNAME=$GORACLE_DRV_TEST_USERNAME - -e GORACLE_DRV_TEST_PASSWORD=$GORACLE_DRV_TEST_PASSWORD - -e GORACLE_DRV_TEST_DB=$GORACLE_DRV_TEST_DB - -v $(pwd)/goracle.v2.test:/goracle.v2.test:ro - oracle-instant-client:12.2 - /goracle.v2.test -test.coverprofile=coverage.txt -test.v - -after_success: - - bash <(curl -s https://codecov.io/bash) - -# vim: set et: diff --git a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/conn_test.go b/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/conn_test.go deleted file mode 100644 index 3d6f8302..00000000 --- a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/conn_test.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2017 Tamás Gulácsi -// -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package goracle - -import ( - "database/sql/driver" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/pkg/errors" -) - -func TestParseConnString(t *testing.T) { - wantAt := ConnectionParams{ - Username: "cc", Password: "c@c*1", SID: "192.168.1.1/cc", - } - wantDefault := ConnectionParams{ - Username: "user", Password: "pass", SID: "sid", - ConnClass: DefaultConnectionClass, - MinSessions: DefaultPoolMinSessions, MaxSessions: DefaultPoolMaxSessions, - PoolIncrement: DefaultPoolIncrement} - - wantXO := wantDefault - wantXO.SID = "localhost/sid" - - wantHeterogeneous := wantXO - wantHeterogeneous.HeterogeneousPool = true - - setP := func(s, p string) string { - if i := strings.Index(s, ":SECRET-"); i >= 0 { - if j := strings.Index(s[i:], "@"); j >= 0 { - return s[:i+1] + p + s[i+j:] - } - } - return s - } - - for tName, tCase := range map[string]struct { - In string - Want ConnectionParams - }{ - "simple": {In: "user/pass@sid", Want: wantDefault}, - "full": {In: "oracle://user:pass@sid/?poolMinSessions=3&poolMaxSessions=9&poolIncrement=3&connectionClass=POOLED&sysoper=1&sysdba=0", - Want: ConnectionParams{Username: "user", Password: "pass", SID: "sid", - ConnClass: "POOLED", IsSysOper: true, - MinSessions: 3, MaxSessions: 9, PoolIncrement: 3}}, - - "@": { - In: setP(wantAt.String(), wantAt.Password), - Want: wantAt}, - - "xo": {In: "oracle://user:pass@localhost/sid", Want: wantXO}, - "heterogeneous": {In: "oracle://user:pass@localhost/sid?heterogeneousPool=1", Want: wantHeterogeneous}, - } { - t.Log(tCase.In) - P, err := ParseConnString(tCase.In) - if err != nil { - t.Errorf("%s: %v", tName, err) - continue - } - if P != tCase.Want { - t.Errorf("%s: parse of %q got %#v, wanted %#v\n%s", tName, tCase.In, P, tCase.Want, cmp.Diff(tCase.Want, P)) - continue - } - s := setP(P.String(), P.Password) - Q, err := ParseConnString(s) - if err != nil { - t.Errorf("%s: parseConnString %v", tName, err) - continue - } - if P != Q { - t.Errorf("%s: params got %+v, wanted %+v\n%s", tName, P, Q, cmp.Diff(P, Q)) - continue - } - if got := setP(Q.String(), Q.Password); s != got { - t.Errorf("%s: paramString got %q, wanted %q", tName, got, s) - } - } -} - -func TestMaybeBadConn(t *testing.T) { - want := driver.ErrBadConn - if got := maybeBadConn(errors.Wrap(want, "bad")); got != want { - t.Errorf("got %v, wanted %v", got, want) - } -} diff --git a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/drv_test.go b/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/drv_test.go deleted file mode 100644 index 98f56567..00000000 --- a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/drv_test.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2017 Tamás Gulácsi -// -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package goracle - -import ( - "encoding/json" - "testing" - - "github.com/pkg/errors" -) - -func TestFromErrorInfo(t *testing.T) { - errInfo := newErrorInfo(0, "ORA-24315: érvénytelen attribútumtípus\n") - t.Log("errInfo", errInfo) - oe := fromErrorInfo(errInfo) - t.Log("OraErr", oe) - if oe.Code() != 24315 { - t.Errorf("got %d, wanted 24315", oe.Code()) - } -} - -func TestMarshalJSON(t *testing.T) { - n := Number("12345.6789") - b, err := (&n).MarshalJSON() - if err != nil { - t.Fatal(err) - } - n = Number("") - if err = n.UnmarshalJSON(b); err != nil { - t.Fatal(err) - } - t.Log(n.String()) - - n = Number("") - b, err = json.Marshal(struct { - N Number - A int - }{N: n, A: 12}) - if err != nil { - t.Fatal(err) - } - t.Log(string(b)) - - type myStruct struct { - N interface{} - A int - } - n = Number("") - ttt := myStruct{N: &n, A: 12} - b, err = json.Marshal(ttt) - if err != nil { - t.Fatal(err) - } - t.Log(string(b)) -} - -func TestParseTZ(t *testing.T) { - for k, v := range map[string]int{ - "00:00": 0, "+00:00": 0, "-00:00": 0, - "01:00": 3600, "+01:00": 3600, "-01:01": -3601, - "+02:03": 7203, - } { - i, err := parseTZ(k) - if err != nil { - t.Fatal(errors.Wrap(err, k)) - } - if i != v { - t.Errorf("%s. got %d, wanted %d.", k, i, v) - } - } -} diff --git a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/example_shutdown_test.go b/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/example_shutdown_test.go deleted file mode 100644 index aa6be9fc..00000000 --- a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/example_shutdown_test.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2018 Kurt K, Tamás Gulácsi. -// -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package goracle_test - -import ( - "database/sql" - "log" - - "github.com/pkg/errors" - - goracle "gopkg.in/goracle.v2" -) - -// ExampleStartup calls exampleStartup to start a database. -func ExampleStartup() { - if err := exampleStartup(goracle.StartupDefault); err != nil { - log.Fatal(err) - } -} -func exampleStartup(startupMode goracle.StartupMode) error { - dsn := "oracle://?sysdba=1&prelim=1" - db, err := sql.Open("goracle", dsn) - if err != nil { - log.Fatal(errors.Wrap(err, dsn)) - } - defer db.Close() - - oraDB, err := goracle.DriverConn(db) - if err != nil { - return err - } - log.Println("Starting database") - if err = oraDB.Startup(startupMode); err != nil { - return err - } - // You cannot alter database on the prelim_auth connection. - // So open a new connection and complete startup, as Startup starts pmon. - db2, err := sql.Open("goracle", "oracle://?sysdba=1") - if err != nil { - return err - } - defer db2.Close() - - log.Println("Mounting database") - if _, err = db2.Exec("alter database mount"); err != nil { - return err - } - log.Println("Opening database") - if _, err = db2.Exec("alter database open"); err != nil { - return err - } - return nil -} - -// ExampleShutdown is an example of how to shut down a database. -func ExampleShutdown() { - dsn := "oracle://?sysdba=1" // equivalent to "/ as sysdba" - db, err := sql.Open("goracle", dsn) - if err != nil { - log.Fatal(errors.Wrap(err, dsn)) - } - defer db.Close() - - if err = exampleShutdown(db, goracle.ShutdownTransactionalLocal); err != nil { - log.Fatal(err) - } -} - -func exampleShutdown(db *sql.DB, shutdownMode goracle.ShutdownMode) error { - oraDB, err := goracle.DriverConn(db) - if err != nil { - return err - } - log.Printf("Beginning shutdown %v", shutdownMode) - if err = oraDB.Shutdown(shutdownMode); err != nil { - return err - } - // If we abort the shutdown process is over immediately. - if shutdownMode == goracle.ShutdownAbort { - return nil - } - - log.Println("Closing database") - if _, err = db.Exec("alter database close normal"); err != nil { - return err - } - log.Println("Unmounting database") - if _, err = db.Exec("alter database dismount"); err != nil { - return err - } - log.Println("Finishing shutdown") - if err = oraDB.Shutdown(goracle.ShutdownFinal); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/obj_test.go b/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/obj_test.go deleted file mode 100644 index dc926ddb..00000000 --- a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/obj_test.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2017 Tamás Gulácsi -// -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package goracle - -import ( - "context" - "database/sql/driver" - "fmt" - "os" - "strings" - "sync" - "testing" - "time" - - "github.com/pkg/errors" -) - -var ( - testCon *conn - testDrv *drv - testOpenErr error - clientVersion, serverVersion VersionInfo - initOnce sync.Once -) - -func initConn() (*drv, *conn, error) { - initOnce.Do(func() { - testDrv = newDrv() - dc, err := testDrv.Open( - fmt.Sprintf("oracle://%s:%s@%s/?poolMinSessions=1&poolMaxSessions=4&poolIncrement=1&connectionClass=POOLED", - os.Getenv("GORACLE_DRV_TEST_USERNAME"), - os.Getenv("GORACLE_DRV_TEST_PASSWORD"), - os.Getenv("GORACLE_DRV_TEST_DB"), - ), - ) - if err != nil { - testOpenErr = err - return - } - testCon = dc.(*conn) - }) - return testDrv, testCon, testOpenErr -} - -func TestObjectDirect(t *testing.T) { - _, testCon, err := initConn() - if err != nil { - t.Fatal(err) - } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - const crea = `CREATE OR REPLACE PACKAGE test_pkg_obj IS - TYPE int_tab_typ IS TABLE OF PLS_INTEGER INDEX BY PLS_INTEGER; - TYPE rec_typ IS RECORD (int PLS_INTEGER, num NUMBER, vc VARCHAR2(1000), c CHAR(1000), dt DATE); - TYPE tab_typ IS TABLE OF rec_typ INDEX BY PLS_INTEGER; -END;` - if err = prepExecMany(ctx, testCon, crea); err != nil { - t.Fatal(err) - } - defer prepExec(ctx, testCon, "DROP PACKAGE test_pkg_obj") - - //defer tl.enableLogging(t)() - ot, err := testCon.GetObjectType("test_pkg_obj.tab_typ") - if err != nil { - if clientVersion.Version >= 12 && serverVersion.Version >= 12 { - t.Fatal(fmt.Sprintf("%+v", err)) - } - t.Log(err) - t.Skip("client or server < 12") - } - t.Log(ot) -} - -func prepExecMany(ctx context.Context, testCon *conn, queries string) error { - for _, qry := range strings.Split(queries, ";\n") { - if strings.HasSuffix(qry, " END") { - qry += ";" - } - if err := prepExec(ctx, testCon, qry); err != nil { - return err - } - } - return nil -} - -func prepExec(ctx context.Context, testCon *conn, qry string, args ...driver.NamedValue) error { - stmt, err := testCon.PrepareContext(ctx, qry) - if err != nil { - return errors.Wrap(err, qry) - } - st := stmt.(*statement) - _, err = st.ExecContext(ctx, args) - stmt.Close() - return errors.Wrap(err, qry) -} diff --git a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/orahlp_test.go b/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/orahlp_test.go deleted file mode 100644 index b6cfc89e..00000000 --- a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/orahlp_test.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2017 Tamás Gulácsi -// -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package goracle - -import ( - "reflect" - "testing" - - "github.com/google/go-cmp/cmp" -) - -func TestMapToSlice(t *testing.T) { - for i, tc := range []struct { - in, await string - params []interface{} - }{ - { - `SELECT NVL(MAX(F_dazon), :dazon) FROM T_spl_level - WHERE (F_spl_azon = :lev_azon OR --:lev_azon OR - F_ssz = 0 AND F_lev_azon = /*:lev_azon*/:lev_azon)`, - `SELECT NVL(MAX(F_dazon), :1) FROM T_spl_level - WHERE (F_spl_azon = :2 OR --:lev_azon OR - F_ssz = 0 AND F_lev_azon = /*:lev_azon*/:3)`, - []interface{}{"dazon", "lev_azon", "lev_azon"}, - }, - - { - `INSERT INTO PERSON(NAME) VALUES('hello') RETURNING ID INTO :ID`, - `INSERT INTO PERSON(NAME) VALUES('hello') RETURNING ID INTO :1`, - []interface{}{"ID"}, - }, - - { - `DECLARE - i1 PLS_INTEGER; - i2 PLS_INTEGER; - v001 BRUNO.DB_WEB_ELEKTR.KOTVENY_REC_TYP; - -BEGIN - v001.dijkod := :p002#dijkod; - - DB_web.sendpreoffer_31101(p_kotveny=>v001); - - :p002#dijkod := v001.dijkod; - -END; -`, - `DECLARE - i1 PLS_INTEGER; - i2 PLS_INTEGER; - v001 BRUNO.DB_WEB_ELEKTR.KOTVENY_REC_TYP; - -BEGIN - v001.dijkod := :1; - - DB_web.sendpreoffer_31101(p_kotveny=>v001); - - :2 := v001.dijkod; - -END; -`, - []interface{}{"p002#dijkod", "p002#dijkod"}, - }, - } { - - got, params := MapToSlice(tc.in, func(s string) interface{} { return s }) - d := cmp.Diff(tc.await, got) - if d != "" { - t.Errorf("%d. diff:\n%s", i, d) - } - if !reflect.DeepEqual(params, tc.params) { - t.Errorf("%d. params: got\n\t%#v,\nwanted\n\t%#v.", i, params, tc.params) - } - } -} diff --git a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/z_bench_test.go b/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/z_bench_test.go deleted file mode 100644 index 90a9c132..00000000 --- a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/z_bench_test.go +++ /dev/null @@ -1,489 +0,0 @@ -// Copyright 2017 Tamás Gulácsi -// -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package goracle_test - -import ( - "context" - "database/sql" - "fmt" - "strconv" - "strings" - "testing" - "time" - - "github.com/pkg/errors" - goracle "gopkg.in/goracle.v2" -) - -// go install && go test -c && ./goracle.v2.test -test.run=^$ -test.bench=Insert25 -test.cpuprofile=/tmp/insert25.prof && go tool pprof ./goracle.v2.test /tmp/insert25.prof - -func BenchmarkPlSQLArrayInsert25(b *testing.B) { - defer func() { - //testDb.Exec("DROP TABLE tst_bench_25_tbl") - testDb.Exec("DROP PACKAGE tst_bench_25") - }() - - for _, qry := range []string{ - //`DROP TABLE tst_bench_25_tbl`, - /*`CREATE TABLE tst_bench_25_tbl (dt DATE, st VARCHAR2(255), - ip NUMBER(12), zone NUMBER(3), plan NUMBER(3), banner NUMBER(3), - referrer VARCHAR2(255), country VARCHAR2(80), region VARCHAR2(10))`,*/ - - `CREATE OR REPLACE PACKAGE tst_bench_25 IS -TYPE cx_array_date IS TABLE OF DATE INDEX BY BINARY_INTEGER; - -TYPE cx_array_string IS TABLE OF VARCHAR2 (1000) INDEX BY BINARY_INTEGER; - -TYPE cx_array_num IS TABLE OF NUMBER INDEX BY BINARY_INTEGER; - -PROCEDURE P_BULK_INSERT_IMP (VIMP_DATES cx_array_date, - VIMP_KEYS cx_array_string, - VIMP_IP cx_array_num, - VIMP_ZONE cx_array_num, - VIMP_PLAN cx_array_num, - VIMP_BANNER cx_array_num, - VIMP_REFERRER cx_array_string, - VIMP_COUNTRY cx_array_string, - VIMP_REGION cx_array_string); -END;`, - `CREATE OR REPLACE PACKAGE BODY tst_bench_25 IS -PROCEDURE P_BULK_INSERT_IMP (VIMP_DATES cx_array_date, - VIMP_KEYS cx_array_string, - VIMP_IP cx_array_num, - VIMP_ZONE cx_array_num, - VIMP_PLAN cx_array_num, - VIMP_BANNER cx_array_num, - VIMP_REFERRER cx_array_string, - VIMP_COUNTRY cx_array_string, - VIMP_REGION cx_array_string) IS - i PLS_INTEGER; -BEGIN - i := vimp_dates.FIRST; - WHILE i IS NOT NULL LOOP - /* - INSERT INTO tst_bench_25_tbl - (dt, st, ip, zone, plan, banner, referrer, country, region) - VALUES (vimp_dates(i), vimp_keys(i), vimp_ip(i), vimp_zone(i), vimp_plan(i), - vimp_banner(i), vimp_referrer(i), vimp_country(i), vimp_region(i)); - */ - i := vimp_dates.NEXT(i); - END LOOP; - -END; - -END tst_bench_25;`, - } { - - if _, err := testDb.Exec(qry); err != nil { - if strings.HasPrefix(qry, "DROP TABLE ") { - continue - } - b.Fatal(errors.Wrap(err, qry)) - } - } - - qry := `BEGIN tst_bench_25.P_BULK_INSERT_IMP (:1, :2, :3, :4, :5, :6, :7, :8, :9); END;` - - pt1 := time.Now() - n := 512 - dates := make([]time.Time, n) - keys := make([]string, n) - ips := make([]int, n) - zones := make([]int, n) - plans := make([]int, n) - banners := make([]int, n) - referrers := make([]string, n) - countries := make([]string, n) - regions := make([]string, n) - for i := range dates { - dates[i] = pt1.Add(time.Duration(i) * time.Second) - keys[i] = "key" - ips[i] = 123456 - zones[i] = i % 256 - plans[i] = (i / 2) % 1000 - banners[i] = (i * 3) % 1000 - referrers[i] = "referrer" - countries[i] = "country" - regions[i] = "region" - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - ctx = goracle.ContextWithLog(ctx, nil) - tx, err := testDb.BeginTx(ctx, nil) - if err != nil { - b.Fatal(err) - } - defer tx.Rollback() - - b.ResetTimer() - for i := 0; i < b.N; i += n { - if _, err := tx.ExecContext(ctx, qry, - goracle.PlSQLArrays, - dates, keys, ips, zones, plans, banners, referrers, countries, regions, - ); err != nil { - if strings.Contains(err.Error(), "PLS-00905") || strings.Contains(err.Error(), "ORA-06508") { - b.Log(goracle.GetCompileErrors(testDb, false)) - } - //b.Log(dates, keys, ips, zones, plans, banners, referrers, countries, regions) - b.Fatal(err) - } - } - b.StopTimer() -} - -// go install && go test -c && ./goracle.v2.test -test.run=^. -test.bench=InOut -test.cpuprofile=/tmp/inout.prof && go tool pprof -cum ./goracle.v2.test /tmp/inout.prof - -func BenchmarkPlSQLArrayInOut(b *testing.B) { - defer func() { - testDb.Exec("DROP PACKAGE tst_bench_inout") - }() - - for _, qry := range []string{ - `CREATE OR REPLACE PACKAGE tst_bench_inout IS -TYPE cx_array_date IS TABLE OF DATE INDEX BY BINARY_INTEGER; - -TYPE cx_array_string IS TABLE OF VARCHAR2 (1000) INDEX BY BINARY_INTEGER; - -TYPE cx_array_num IS TABLE OF NUMBER INDEX BY BINARY_INTEGER; - -PROCEDURE P_BULK_INSERT_IMP (VIMP_DATES IN OUT NOCOPY cx_array_date, - VIMP_KEYS IN OUT NOCOPY cx_array_string, - VIMP_IP IN OUT NOCOPY cx_array_num, - VIMP_ZONE IN OUT NOCOPY cx_array_num, - VIMP_PLAN IN OUT NOCOPY cx_array_num, - VIMP_BANNER IN OUT NOCOPY cx_array_num, - VIMP_REFERRER IN OUT NOCOPY cx_array_string, - VIMP_COUNTRY IN OUT NOCOPY cx_array_string, - VIMP_REGION IN OUT NOCOPY cx_array_string); -END;`, - `CREATE OR REPLACE PACKAGE BODY tst_bench_inout IS -PROCEDURE P_BULK_INSERT_IMP (VIMP_DATES IN OUT NOCOPY cx_array_date, - VIMP_KEYS IN OUT NOCOPY cx_array_string, - VIMP_IP IN OUT NOCOPY cx_array_num, - VIMP_ZONE IN OUT NOCOPY cx_array_num, - VIMP_PLAN IN OUT NOCOPY cx_array_num, - VIMP_BANNER IN OUT NOCOPY cx_array_num, - VIMP_REFERRER IN OUT NOCOPY cx_array_string, - VIMP_COUNTRY IN OUT NOCOPY cx_array_string, - VIMP_REGION IN OUT NOCOPY cx_array_string) IS - i PLS_INTEGER; -BEGIN - i := vimp_dates.FIRST; - WHILE i IS NOT NULL LOOP - vimp_dates(i) := vimp_dates(i) + 1; - vimp_keys(i) := vimp_keys(i)||' '||i; - vimp_ip(i) := -vimp_ip(i); - vimp_zone(i) := -vimp_zone(i); - vimp_plan(i) := -vimp_plan(i); - vimp_banner(i) := -vimp_banner(i); - vimp_referrer(i) := vimp_referrer(i)||' '||i; - vimp_country(i) := vimp_country(i)||' '||i; - vimp_region(i) := vimp_region(i)||' '||i; - i := vimp_dates.NEXT(i); - END LOOP; - -END; - -END tst_bench_inout;`, - } { - - if _, err := testDb.Exec(qry); err != nil { - if strings.HasPrefix(qry, "DROP TABLE ") { - continue - } - b.Fatal(errors.Wrap(err, qry)) - } - } - - qry := `BEGIN tst_bench_inout.P_BULK_INSERT_IMP (:1, :2, :3, :4, :5, :6, :7, :8, :9); END;` - - pt1 := time.Now() - n := 512 - dates := make([]time.Time, n) - keys := make([]string, n) - ips := make([]int, n) - zones := make([]int, n) - plans := make([]int, n) - banners := make([]int, n) - referrers := make([]string, n) - countries := make([]string, n) - regions := make([]string, n) - for i := range dates { - dates[i] = pt1.Add(time.Duration(i) * time.Second) - keys[i] = "key" - ips[i] = 123456 - zones[i] = i % 256 - plans[i] = (i / 2) % 1000 - banners[i] = (i * 3) % 1000 - referrers[i] = "referrer" - countries[i] = "country" - regions[i] = "region" - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - ctx = goracle.ContextWithLog(ctx, nil) - tx, err := testDb.BeginTx(ctx, nil) - if err != nil { - b.Fatal(err) - } - defer tx.Rollback() - - params := []interface{}{ - goracle.PlSQLArrays, - sql.Out{Dest: &dates, In: true}, - sql.Out{Dest: &keys, In: true}, - sql.Out{Dest: &ips, In: true}, - sql.Out{Dest: &zones, In: true}, - sql.Out{Dest: &plans, In: true}, - sql.Out{Dest: &banners, In: true}, - sql.Out{Dest: &referrers, In: true}, - sql.Out{Dest: &countries, In: true}, - sql.Out{Dest: ®ions, In: true}, - } - - b.ResetTimer() - for i := 0; i < b.N; i += n { - if _, err := tx.ExecContext(ctx, qry, params...); err != nil { - if strings.Contains(err.Error(), "PLS-00905") || strings.Contains(err.Error(), "ORA-06508") { - b.Log(goracle.GetCompileErrors(testDb, false)) - } - //b.Log(dates, keys, ips, zones, plans, banners, referrers, countries, regions) - b.Fatal(err) - } - } - b.StopTimer() -} - -func shortenFloat(s string) string { - i := strings.IndexByte(s, '.') - if i < 0 { - return s - } - for j := i + 1; j < len(s); j++ { - if s[j] != '0' { - return s - } - } - return s[:i] -} - -const bFloat = 12345.6789 - -func BenchmarkSprintfFloat(b *testing.B) { - var length int64 - for i := 0; i < b.N; i++ { - s := fmt.Sprintf("%f", bFloat) - s = shortenFloat(s) - length += int64(len(s)) - } - b.Logf("total length: %d", length) -} - -/* -func BenchmarkAppendFloat(b *testing.B) { - var length int64 - for i := 0; i < b.N; i++ { - s := printFloat(bFloat) - length += int64(len(s)) - } -} -*/ - -func createGeoTable(tableName string, rowCount int) error { - var cnt int64 - if err := testDb.QueryRow( - "SELECT COUNT(0) FROM " + tableName, //nolint:gas - ).Scan(&cnt); err == nil && cnt == int64(rowCount) { - return nil - } - testDb.Exec("ALTER SESSION SET NLS_NUMERIC_CHARACTERS = '.,'") - testDb.Exec("DROP TABLE " + tableName) - if _, err := testDb.Exec(`CREATE TABLE ` + tableName + ` (` + //nolint:gas - ` id NUMBER(9) NOT NULL, - "RECORD_ID" NUMBER(*,0) NOT NULL ENABLE, - "PERSON_ID" NUMBER(*,0), - "PERSON_ACCOUNT_ID" NUMBER(*,0), - "ORGANIZATION_ID" NUMBER(*,0), - "ORGANIZATION_MEMBERSHIP_ID" NVARCHAR2(45), - "LOCATION" NVARCHAR2(2000) NOT NULL ENABLE, - "DEVICE_ID" NVARCHAR2(45), - "DEVICE_REGISTRATION_ID" NVARCHAR2(500), - "DEVICE_NAME" NVARCHAR2(45), - "DEVICE_TYPE" NVARCHAR2(45), - "DEVICE_OS_NAME" NVARCHAR2(45), - "DEVICE_TOKEN" NVARCHAR2(45), - "DEVICE_OTHER_DETAILS" NVARCHAR2(100) - )`, - ); err != nil { - return err - } - testData := [][]string{ - {"1", "8.37064876162908E16", "8.37064898728264E16", "12", "6506", "POINT(30.5518407 104.0685472)", "a71223186cef459b", "", "Samsung SCH-I545", "Mobile", "Android 4.4.2", "", ""}, - {"2", "8.37064876162908E16", "8.37064898728264E16", "12", "6506", "POINT(30.5520498 104.0686355)", "a71223186cef459b", "", "Samsung SCH-I545", "Mobile", "Android 4.4.2", "", ""}, - {"3", "8.37064876162908E16", "8.37064898728264E16", "12", "6506", "POINT(30.5517747 104.0684895)", "a71223186cef459b", "", "Samsung SCH-I545", "Mobile", "Android 4.4.2", "", ""}, - {"4", "8.64522675633357E16", "8.64522734353613E16", "", "1220457", "POINT(30.55187 104.06856)", "3A9D1838-3B2D-4119-9E07-77C6CDAC53C5", "noUwBnWojdY:APA91bE8aGLEECS9_Q1EKrp8i2B36H1X8GwIj3v58KUcuXglhf0rXJb8Ez5meQ6D5MgTAQghYEe3s9vOntU3pYPQoc6ASNw3QzhzQevAqlMQC2ukUMNyLD8Rve-IA1-6lttsCXYsYIKh", "User3’s iPhone", "iPhone", "iPhone OS", "", "DeviceID:3A9D1838-3B2D-4119-9E07-77C6CDAC53C5, SystemVersion:8.4, LocalizedModel:iPhone"}, - {"5", "8.37064876162908E16", "8.37064898728264E16", "12", "6506", "POINT(30.5517458 104.0685809)", "a71223186cef459b", "", "Samsung SCH-I545", "Mobile", "Android 4.4.2", "", ""}, - {"6", "8.37064876162908E16", "8.37064898728264E16", "12", "6506", "POINT(30.551802 104.0685301)", "a71223186cef459b", "", "Samsung SCH-I545", "Mobile", "Android 4.4.2", "", ""}, - {"7", "8.64522675633357E16", "8.64522734353613E16", "", "1220457", "POINT(30.55187 104.06856)", "3A9D1838-3B2D-4119-9E07-77C6CDAC53C5", "noUwBnWojdY:APA91bE8aGLEECS9_Q1EKrp8i2B36H1X8GwIj3v58KUcuXglhf0rXJb8Ez5meQ6D5MgTAQghYEe3s9vOnt,3pYPQoc6ASNw3QzhzQevAqlMQC2ukUMNyLD8Rve-IA1-6lttsCXYsYIKh", "User3’s iPhone", "iPhone", "iPhone OS", "", "DeviceID:3A9D1838-3B2D-4119-9E07-77C6CDAC53C5, SystemVersion:8.4, LocalizedModel:iPhone"}, - {"8", "8.37064876162908E16", "8.37064898728264E16", "12", "6506", "POINT(30.551952 104.0685893)", "a71223186cef459b", "", "Samsung SCH-I545", "Mobile", "Android 4.4.2", "", ""}, - {"9", "8.37064876162908E16", "8.37064898728264E16", "12", "6506", "POINT(30.5518439 104.0685473)", "a71223186cef459b", "", "Samsung SCH-I545", "Mobile", "Android 4.4.2", "", ""}, - {"10", "8.37064876162908E16", "8.37064898728264E16", "12", "6506", "POINT(30.5518439 104.0685473)", "a71223186cef459b", "", "Samsung SCH-I545", "Mobile", "Android 4.4.2", "", ""}, - } - cols := make([]interface{}, len(testData[0])+1) - for i := range cols { - cols[i] = make([]string, rowCount) - } - for i := 0; i < rowCount; i++ { - row := testData[i%len(testData)] - for j, col := range cols { - if j == 0 { - (col.([]string))[i] = strconv.Itoa(i) - } else { - (col.([]string))[i] = row[j-1] - } - } - } - - stmt, err := testDb.Prepare("INSERT INTO " + tableName + //nolint:gas - ` (ID,RECORD_ID,PERSON_ID,PERSON_ACCOUNT_ID,ORGANIZATION_ID,ORGANIZATION_MEMBERSHIP_ID, - LOCATION,DEVICE_ID,DEVICE_REGISTRATION_ID,DEVICE_NAME,DEVICE_TYPE, - DEVICE_OS_NAME,DEVICE_TOKEN,DEVICE_OTHER_DETAILS) - VALUES (:1,:2,:3,:4,:5, - :6,:7,:8,:9,:10, - :11,:12, :13, :14)`) - if err != nil { - return err - } - defer stmt.Close() - if _, err := stmt.Exec(cols...); err != nil { - return fmt.Errorf("%v\n%q", err, cols) - } - return nil -} - -func TestSelectOrder(t *testing.T) { - t.Parallel() - const limit = 1013 - var cnt int64 - tbl := "user_objects" - start := time.Now() - if err := testDb.QueryRow( - "SELECT count(0) FROM " + tbl, //nolint:gas - ).Scan(&cnt); err != nil { - t.Fatal(err) - } - t.Logf("%s rowcount=%d (%s)", tbl, cnt, time.Since(start)) - if cnt == 0 { - cnt = 10 - tbl = "(SELECT 1 FROM DUAL " + strings.Repeat("\nUNION ALL SELECT 1 FROM DUAL ", int(cnt)-1) + ")" //nolint:gas - } - qry := "SELECT ROWNUM FROM " + tbl //nolint:gas - for i := cnt; i < limit; i *= cnt { - qry += ", " + tbl - } - t.Logf("qry=%s", qry) - rows, err := testDb.Query(qry) - if err != nil { - t.Fatal(err) - } - defer rows.Close() - i := 0 - for rows.Next() { - var rn int - if err = rows.Scan(&rn); err != nil { - t.Fatal(err) - } - i++ - if rn != i { - t.Errorf("got %d, wanted %d.", rn, i) - } - if i > limit { - break - } - } - for rows.Next() { - } -} - -// go test -c && ./goracle.v2.test -test.run=^$ -test.bench=Date -test.cpuprofile=/tmp/cpu.prof && go tool pprof goracle.v2.test /tmp/cpu.prof -func BenchmarkSelectDate(b *testing.B) { - b.ResetTimer() - for i := 0; i < b.N; { - b.StopTimer() - rows, err := testDb.Query(`SELECT CAST(TO_DATE('2006-01-02 15:04:05', 'YYYY-MM-DD HH24:MI:SS') AS DATE) dt - FROM - (select 1 from dual union all select 1 from dual union all select 1 from dual union all select 1 from dual union all select 1 from dual union all select 1 from dual union all select 1 from dual union all select 1 from dual union all select 1 from dual union all select 1 from dual), - (select 1 from dual union all select 1 from dual union all select 1 from dual union all select 1 from dual union all select 1 from dual union all select 1 from dual union all select 1 from dual union all select 1 from dual union all select 1 from dual union all select 1 from dual) - `) - if err != nil { - b.Fatal(err) - } - b.StartTimer() - for rows.Next() && i < b.N { - var dt time.Time - if err = rows.Scan(&dt); err != nil { - rows.Close() - b.Fatal(err) - } - i++ - } - b.StopTimer() - rows.Close() - } -} - -func BenchmarkSelect(b *testing.B) { - geoTableName := "test_geo" + tblSuffix - const geoTableRowCount = 100000 - if err := createGeoTable(geoTableName, geoTableRowCount); err != nil { - b.Fatal(err) - } - defer testDb.Exec("DROP TABLE " + geoTableName) - - for _, i := range []int{1, 10, 100, 1000} { - b.Run(fmt.Sprintf("Prefetch%d", i), func(b *testing.B) { benchSelect(b, geoTableName, i) }) - } -} - -func benchSelect(b *testing.B, geoTableName string, prefetchLen int) { - b.ResetTimer() - for i := 0; i < b.N; { - b.StopTimer() - rows, err := testDb.Query( - "SELECT location FROM "+geoTableName, //nolint:gas - goracle.FetchRowCount(prefetchLen)) - if err != nil { - b.Fatal(err) - } - var readBytes, recNo int64 - b.StartTimer() - for rows.Next() && i < b.N { - var loc string - if err = rows.Scan(&loc); err != nil { - rows.Close() - b.Fatal(err) - } - i++ - readBytes += int64(len(loc)) - recNo++ - } - b.StopTimer() - b.SetBytes(readBytes / recNo) - rows.Close() - } -} diff --git a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/z_heterogeneous_test.go b/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/z_heterogeneous_test.go deleted file mode 100644 index 2f2277e0..00000000 --- a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/z_heterogeneous_test.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2018 @wwanderley -// -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package goracle_test - -import ( - "context" - "database/sql" - "fmt" - "strings" - "testing" - "time" - - "github.com/pkg/errors" - - goracle "gopkg.in/goracle.v2" -) - -func TestHeterogeneousPoolIntegration(t *testing.T) { - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - const proxyPassword = "myPassword" - const proxyUser = "test_proxyUser" - - cs, err := goracle.ParseConnString(testConStr) - if err != nil { - t.Fatal(err) - } - cs.HeterogeneousPool = true - username := cs.Username - testHeterogeneousConStr := cs.StringWithPassword() - t.Log(testHeterogeneousConStr) - - var testHeterogeneousDB *sql.DB - if testHeterogeneousDB, err = sql.Open("goracle", testHeterogeneousConStr); err != nil { - t.Fatal(errors.Wrap(err, testHeterogeneousConStr)) - } - defer testHeterogeneousDB.Close() - - conn, err := testHeterogeneousDB.Conn(ctx) - if err != nil { - t.Fatal(err) - } - defer conn.Close() - - conn.ExecContext(ctx, `ALTER SESSION SET "_ORACLE_SCRIPT"=true`) - conn.ExecContext(ctx, fmt.Sprintf("DROP USER %s", proxyUser)) - - for _, qry := range []string{ - fmt.Sprintf("CREATE USER %s IDENTIFIED BY "+proxyPassword, proxyUser), - fmt.Sprintf("GRANT CONNECT TO %s", proxyUser), - fmt.Sprintf("GRANT CREATE SESSION TO %s", proxyUser), - fmt.Sprintf("ALTER USER %s GRANT CONNECT THROUGH %s", proxyUser, username), - } { - if _, err := conn.ExecContext(ctx, qry); err != nil { - t.Skip(errors.Wrap(err, qry)) - } - } - - for tName, tCase := range map[string]struct { - In context.Context - Want string - }{ - "noContext": {In: ctx, Want: username}, - "proxyUser": {In: goracle.ContextWithUserPassw(ctx, proxyUser, proxyPassword), Want: proxyUser}, - } { - t.Run(tName, func(t *testing.T) { - var result string - if err = testHeterogeneousDB.QueryRowContext(tCase.In, "SELECT user FROM dual").Scan(&result); err != nil { - t.Fatal(err) - } - if !strings.EqualFold(tCase.Want, result) { - t.Errorf("%s: currentUser got %s, wanted %s", tName, result, tCase.Want) - } - }) - - } - -} diff --git a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/z_lob_test.go b/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/z_lob_test.go deleted file mode 100644 index 95f038f5..00000000 --- a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/z_lob_test.go +++ /dev/null @@ -1,166 +0,0 @@ -// Copyright 2019 Tamás Gulácsi -// -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package goracle_test - -import ( - "bytes" - "context" - "database/sql" - "testing" - "time" - - "github.com/pkg/errors" - goracle "gopkg.in/goracle.v2" -) - -func TestLOBAppend(t *testing.T) { - t.Parallel() - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - // To have a valid LOB locator, we have to keep the Stmt around. - qry := `DECLARE tmp BLOB; -BEGIN - DBMS_LOB.createtemporary(tmp, TRUE, DBMS_LOB.SESSION); - :1 := tmp; -END;` - stmt, err := testDb.PrepareContext(ctx, qry) - if err != nil { - t.Fatal(errors.WithMessage(err, qry)) - } - defer stmt.Close() - var tmp goracle.Lob - if _, err := stmt.ExecContext(ctx, goracle.LobAsReader(), sql.Out{Dest: &tmp}); err != nil { - t.Fatalf("Failed to create temporary lob: %+v", err) - } - t.Logf("tmp: %#v", tmp) - - want := [...]byte{1, 2, 3, 4, 5} - if _, err := testDb.ExecContext(ctx, - "BEGIN dbms_lob.append(:1, :2); END;", - tmp, goracle.Lob{Reader: bytes.NewReader(want[:])}, - ); err != nil { - t.Errorf("Failed to write buffer(%v) to lob(%v): %+v", want, tmp, err) - } - - if true { - // Either use DBMS_LOB.freetemporary - if _, err := testDb.ExecContext(ctx, "BEGIN dbms_lob.freetemporary(:1); END;", tmp); err != nil { - t.Errorf("Failed to close temporary lob(%v): %+v", tmp, err) - } - } else { - // Or Hijack and Close it. - dl, err := tmp.Hijack() - if err != nil { - t.Fatal(err) - } - defer dl.Close() - length, err := dl.Size() - if err != nil { - t.Fatal(err) - } - t.Logf("length: %d", length) - if length != int64(len(want)) { - t.Errorf("length mismatch: got %d, wanted %d", length, len(want)) - } - } -} - -func TestStatWithLobs(t *testing.T) { - t.Parallel() - //defer tl.enableLogging(t)() - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - ms, err := newMetricSet(ctx, testDb) - if err != nil { - t.Fatal(err) - } - defer ms.Close() - if _, err = ms.Fetch(ctx); err != nil { - if c, ok := errors.Cause(err).(interface{ Code() int }); ok && c.Code() == 942 { - t.Skip(err) - return - } - t.Fatal(err) - } - - for i := 0; i < 100; i++ { - if err := ctx.Err(); err != nil { - break - } - events, err := ms.Fetch(ctx) - t.Log("events:", len(events)) - if err != nil { - t.Fatal(err) - } - } -} - -func newMetricSet(ctx context.Context, db *sql.DB) (*metricSet, error) { - qry := "select /* metricset: sqlstats */ inst_id, sql_fulltext, last_active_time from gv$sqlstats WHERE ROWNUM < 11" - stmt, err := db.PrepareContext(ctx, qry) - if err != nil { - return nil, err - } - - return &metricSet{ - stmt: stmt, - }, nil -} - -type metricSet struct { - stmt *sql.Stmt -} - -func (m *metricSet) Close() error { - st := m.stmt - m.stmt = nil - if st == nil { - return nil - } - return st.Close() -} - -// Fetch methods implements the data gathering and data conversion to the right format -// It returns the event which is then forward to the output. In case of an error, a -// descriptive error must be returned. -func (m *metricSet) Fetch(ctx context.Context) ([]event, error) { - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - rows, err := m.stmt.QueryContext(ctx) - if err != nil { - return nil, err - } - defer rows.Close() - var events []event - for rows.Next() { - var e event - if err := rows.Scan(&e.ID, &e.Text, &e.LastActive); err != nil { - return events, err - } - events = append(events, e) - } - - return events, nil -} - -type event struct { - ID int64 - Text string - LastActive time.Time -} diff --git a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/z_plsql_types_test.go b/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/z_plsql_types_test.go deleted file mode 100644 index 27acaaa4..00000000 --- a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/z_plsql_types_test.go +++ /dev/null @@ -1,382 +0,0 @@ -// Copyright 2019 Walter Wanderley -// -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package goracle_test - -import ( - "context" - "database/sql" - "fmt" - "testing" - "time" - - goracle "gopkg.in/goracle.v2" -) - -var _ goracle.ObjectScanner = new(MyRecord) -var _ goracle.ObjectWriter = new(MyRecord) - -var _ goracle.ObjectScanner = new(MyTable) - -// MYRecord represents TEST_PKG_TYPES.MY_RECORD -type MyRecord struct { - *goracle.Object - ID int64 - Txt string -} - -func (r *MyRecord) Scan(src interface{}) error { - - switch obj := src.(type) { - case *goracle.Object: - id, err := obj.Get("ID") - if err != nil { - return err - } - r.ID = id.(int64) - - txt, err := obj.Get("TXT") - if err != nil { - return err - } - r.Txt = string(txt.([]byte)) - - default: - return fmt.Errorf("Cannot scan from type %T", src) - } - - return nil -} - -// WriteObject update goracle.Object with struct attributes values. -// Implement this method if you need the record as an input parameter. -func (r MyRecord) WriteObject() error { - // all attributes must be initialized or you get an "ORA-21525: attribute number or (collection element at index) %s violated its constraints" - err := r.ResetAttributes() - if err != nil { - return err - } - - var data goracle.Data - err = r.GetAttribute(&data, "ID") - if err != nil { - return err - } - data.SetInt64(r.ID) - r.SetAttribute("ID", &data) - - if r.Txt != "" { - err = r.GetAttribute(&data, "TXT") - if err != nil { - return err - } - - data.SetBytes([]byte(r.Txt)) - r.SetAttribute("TXT", &data) - } - - return nil -} - -// MYTable represents TEST_PKG_TYPES.MY_TABLE -type MyTable struct { - *goracle.ObjectCollection - Items []*MyRecord -} - -func (t *MyTable) Scan(src interface{}) error { - - switch obj := src.(type) { - case *goracle.Object: - collection := goracle.ObjectCollection{Object: obj} - t.Items = make([]*MyRecord, 0) - for i, err := collection.First(); err == nil; i, err = collection.Next(i) { - var data goracle.Data - err = collection.Get(&data, i) - if err != nil { - return err - } - - o := data.GetObject() - defer o.Close() - - var item MyRecord - err = item.Scan(o) - if err != nil { - return err - } - t.Items = append(t.Items, &item) - } - - default: - return fmt.Errorf("Cannot scan from type %T", src) - } - - return nil -} - -func createPackages(ctx context.Context) error { - qry := []string{`CREATE OR REPLACE PACKAGE test_pkg_types AS - TYPE my_record IS RECORD ( - id NUMBER(5), - txt VARCHAR2(200) - ); - TYPE my_table IS - TABLE OF my_record; - END test_pkg_types;`, - - `CREATE OR REPLACE PACKAGE test_pkg_sample AS - PROCEDURE test_record ( - id IN NUMBER, - txt IN VARCHAR, - rec OUT test_pkg_types.my_record - ); - - PROCEDURE test_record_in ( - rec IN OUT test_pkg_types.my_record - ); - - FUNCTION test_table ( - x NUMBER - ) RETURN test_pkg_types.my_table; - - END test_pkg_sample;`, - - `CREATE OR REPLACE PACKAGE BODY test_pkg_sample AS - - PROCEDURE test_record ( - id IN NUMBER, - txt IN VARCHAR, - rec OUT test_pkg_types.my_record - ) IS - BEGIN - rec.id := id; - rec.txt := txt; - END test_record; - - PROCEDURE test_record_in ( - rec IN OUT test_pkg_types.my_record - ) IS - BEGIN - rec.id := rec.id + 1; - rec.txt := rec.txt || ' changed'; - END test_record_in; - - FUNCTION test_table ( - x NUMBER - ) RETURN test_pkg_types.my_table IS - tb test_pkg_types.my_table; - item test_pkg_types.my_record; - BEGIN - tb := test_pkg_types.my_table(); - FOR c IN ( - SELECT - level "LEV" - FROM - "SYS"."DUAL" "A1" - CONNECT BY - level <= x - ) LOOP - item.id := c.lev; - item.txt := 'test - ' || ( c.lev * 2 ); - tb.extend(); - tb(tb.count) := item; - END LOOP; - - RETURN tb; - END test_table; - - END test_pkg_sample;`} - - for _, ddl := range qry { - _, err := testDb.ExecContext(ctx, ddl) - if err != nil { - return err - - } - } - - return nil -} - -func dropPackages(ctx context.Context) { - testDb.ExecContext(ctx, `DROP PACKAGE test_pkg_types`) - testDb.ExecContext(ctx, `DROP PACKAGE test_pkg_sample`) -} - -func TestPLSQLTypes(t *testing.T) { - t.Parallel() - - serverVersion, err := goracle.ServerVersion(testDb) - if err != nil { - t.Fatal(err) - } - clientVersion, err := goracle.ClientVersion(testDb) - if err != nil { - t.Fatal(err) - } - - if serverVersion.Version < 12 || clientVersion.Version < 12 { - t.Skip("client or server < 12") - } - - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - - err = createPackages(ctx) - if err != nil { - t.Fatal(err) - } - defer dropPackages(ctx) - - conn, err := goracle.DriverConn(testDb) - if err != nil { - t.Fatal(err) - } - - t.Run("Record", func(t *testing.T) { - // you must have execute privilege on package and use uppercase - objType, err := conn.GetObjectType("TEST_PKG_TYPES.MY_RECORD") - if err != nil { - t.Fatal(err) - } - defer objType.Close() - - obj, err := objType.NewObject() - if err != nil { - t.Fatal(err) - } - defer obj.Close() - - for tName, tCase := range map[string]struct { - ID int64 - txt string - want MyRecord - }{ - "default": {ID: 1, txt: "test", want: MyRecord{obj, 1, "test"}}, - "emptyTxt": {ID: 2, txt: "", want: MyRecord{obj, 2, ""}}, - "zeroValues": {want: MyRecord{Object: obj}}, - } { - rec := MyRecord{Object: obj} - params := []interface{}{ - sql.Named("id", tCase.ID), - sql.Named("txt", tCase.txt), - sql.Named("rec", sql.Out{Dest: &rec}), - } - _, err = testDb.ExecContext(ctx, `begin test_pkg_sample.test_record(:id, :txt, :rec); end;`, params...) - if err != nil { - t.Fatal(err) - } - - if rec != tCase.want { - t.Errorf("%s: record got %v, wanted %v", tName, rec, tCase.want) - } - } - }) - - t.Run("Record IN OUT", func(t *testing.T) { - // you must have execute privilege on package and use uppercase - objType, err := conn.GetObjectType("TEST_PKG_TYPES.MY_RECORD") - if err != nil { - t.Fatal(err) - } - defer objType.Close() - - for tName, tCase := range map[string]struct { - in MyRecord - wantID int64 - wantTxt string - }{ - "zeroValues": {in: MyRecord{}, wantID: 1, wantTxt: " changed"}, - "default": {in: MyRecord{ID: 1, Txt: "test"}, wantID: 2, wantTxt: "test changed"}, - "emptyTxt": {in: MyRecord{ID: 2, Txt: ""}, wantID: 3, wantTxt: " changed"}, - } { - - obj, err := objType.NewObject() - if err != nil { - t.Fatal(err) - } - defer obj.Close() - - rec := MyRecord{Object: obj, ID: tCase.in.ID, Txt: tCase.in.Txt} - params := []interface{}{ - sql.Named("rec", sql.Out{Dest: &rec, In: true}), - } - _, err = testDb.ExecContext(ctx, `begin test_pkg_sample.test_record_in(:rec); end;`, params...) - if err != nil { - t.Fatal(err) - } - - if rec.ID != tCase.wantID { - t.Errorf("%s: ID got %d, wanted %d", tName, rec.ID, tCase.wantID) - } - if rec.Txt != tCase.wantTxt { - t.Errorf("%s: Txt got %s, wanted %s", tName, rec.Txt, tCase.wantTxt) - } - } - }) - - t.Run("Table Of", func(t *testing.T) { - // you must have execute privilege on package and use uppercase - objType, err := conn.GetObjectType("TEST_PKG_TYPES.MY_TABLE") - if err != nil { - t.Fatal(err) - } - defer objType.Close() - - items := []*MyRecord{&MyRecord{ID: 1, Txt: "test - 2"}, &MyRecord{ID: 2, Txt: "test - 4"}} - - for tName, tCase := range map[string]struct { - in int64 - want MyTable - }{ - "one": {in: 1, want: MyTable{Items: items[:1]}}, - "two": {in: 2, want: MyTable{Items: items}}, - } { - - obj, err := objType.NewObject() - if err != nil { - t.Fatal(err) - } - defer obj.Close() - - tb := MyTable{ObjectCollection: &goracle.ObjectCollection{obj}} - params := []interface{}{ - sql.Named("x", tCase.in), - sql.Named("tb", sql.Out{Dest: &tb}), - } - _, err = testDb.ExecContext(ctx, `begin :tb := test_pkg_sample.test_table(:x); end;`, params...) - if err != nil { - t.Fatal(err) - } - - if len(tb.Items) != len(tCase.want.Items) { - t.Errorf("%s: table got %v items, wanted %v items", tName, len(tb.Items), len(tCase.want.Items)) - } else { - for i := 0; i < len(tb.Items); i++ { - got := tb.Items[i] - want := tCase.want.Items[i] - if got.ID != want.ID { - t.Errorf("%s: record ID got %v, wanted %v", tName, got.ID, want.ID) - } - if got.Txt != want.Txt { - t.Errorf("%s: record TXT got %v, wanted %v", tName, got.Txt, want.Txt) - } - } - } - } - }) - -} diff --git a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/z_qrcn_test.go b/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/z_qrcn_test.go deleted file mode 100644 index b91a9dff..00000000 --- a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/z_qrcn_test.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2017 Tamás Gulácsi -// -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package goracle_test - -import ( - "strings" - "testing" - - "github.com/pkg/errors" - goracle "gopkg.in/goracle.v2" -) - -func TestQRCN(t *testing.T) { - conn, err := goracle.DriverConn(testDb) - if err != nil { - t.Fatal(err) - } - - testDb.Exec("DROP TABLE test_subscr") - if _, err = testDb.Exec("CREATE TABLE test_subscr (i NUMBER)"); err != nil { - t.Fatal(err) - } - defer testDb.Exec("DROP TABLE test_subscr") - - var events []goracle.Event - cb := func(e goracle.Event) { - t.Log(e) - events = append(events, e) - } - s, err := conn.NewSubscription("subscr", cb) - if err != nil { - errS := errors.Cause(err).Error() - if strings.Contains(errS, "ORA-29970:") { - t.Skip(err.Error()) - } else if strings.Contains(errS, "ORA-29972:") { - t.Log("See \"https://docs.oracle.com/database/121/ADFNS/adfns_cqn.htm#ADFNS553\"") - var User string - _ = testDb.QueryRow("SELECT USER FROM DUAL").Scan(&User) - //t.Log("GRANT EXECUTE ON DBMS_CQ_NOTIFICATION TO "+User) - t.Log("GRANT CHANGE NOTIFICATION TO " + User + ";") - t.Skip(err.Error()) - } - t.Fatalf("%+v", err) - } - defer s.Close() - if err := s.Register("SELECT COUNT(0) FROM test_subscr"); err != nil { - t.Fatalf("%+v", err) - } - qry := "SELECT regid, table_name FROM USER_CHANGE_NOTIFICATION_REGS" - rows, err := testDb.Query(qry) - if err != nil { - t.Fatal(errors.Wrap(err, qry)) - } - t.Log("--- Registrations ---") - for rows.Next() { - var regID, table string - if err := rows.Scan(®ID, &table); err != nil { - t.Error(err) - break - } - t.Logf("%s: %s", regID, table) - } - t.Log("---------------------") - rows.Close() - testDb.Exec("INSERT INTO test_subscr (i) VALUES (1)") - testDb.Exec("INSERT INTO test_subscr (i) VALUES (0)") - t.Log("events:", events) -} diff --git a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/z_test.go b/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/z_test.go deleted file mode 100644 index f2ac1087..00000000 --- a/vendor/github.com/elastic/beats/vendor/gopkg.in/goracle.v2/z_test.go +++ /dev/null @@ -1,2080 +0,0 @@ -// Copyright 2017 Tamás Gulácsi -// -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package goracle_test - -import ( - "bytes" - "context" - "database/sql" - "database/sql/driver" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "math" - "math/rand" - "os" - "reflect" - "runtime" - "strconv" - "strings" - "sync" - "testing" - "time" - - "github.com/go-kit/kit/log" - "github.com/google/go-cmp/cmp" - "github.com/pkg/errors" - "golang.org/x/sync/errgroup" - - goracle "gopkg.in/goracle.v2" -) - -var ( - testDb *sql.DB - tl = &testLogger{} - - clientVersion, serverVersion goracle.VersionInfo - testConStr string -) - -var tblSuffix = "_" + strings.Replace(runtime.Version(), ".", "#", -1) - -const maxSessions = 64 - -func init() { - logger := &log.SwapLogger{} - goracle.Log = logger.Log - if os.Getenv("VERBOSE") == "1" { - logger.Swap(tl) - } - - P := goracle.ConnectionParams{ - Username: os.Getenv("GORACLE_DRV_TEST_USERNAME"), - Password: os.Getenv("GORACLE_DRV_TEST_PASSWORD"), - SID: os.Getenv("GORACLE_DRV_TEST_DB"), - MinSessions: 1, MaxSessions: maxSessions, PoolIncrement: 1, - ConnClass: "POOLED", - EnableEvents: true, - } - if strings.HasSuffix(strings.ToUpper(P.Username), " AS SYSDBA") { - P.IsSysDBA, P.Username = true, P.Username[:len(P.Username)-10] - } - testConStr = P.StringWithPassword() - var err error - if testDb, err = sql.Open("goracle", testConStr); err != nil { - fmt.Printf("ERROR: %+v\n", err) - return - //panic(err) - } - - if testDb != nil { - if clientVersion, err = goracle.ClientVersion(testDb); err != nil { - fmt.Printf("ERROR: %+v\n", err) - return - } - if serverVersion, err = goracle.ServerVersion(testDb); err != nil { - fmt.Printf("ERROR: %+v\n", err) - return - } - fmt.Println("Server:", serverVersion) - fmt.Println("Client:", clientVersion) - } -} - -var bufPool = sync.Pool{New: func() interface{} { return bytes.NewBuffer(make([]byte, 0, 1024)) }} - -type testLogger struct { - sync.RWMutex - Ts []*testing.T - beHelped []*testing.T -} - -func (tl *testLogger) Log(args ...interface{}) error { - fmt.Println(args...) - return tl.GetLog()(args) -} -func (tl *testLogger) GetLog() func(keyvals ...interface{}) error { - return func(keyvals ...interface{}) error { - buf := bufPool.Get().(*bytes.Buffer) - defer bufPool.Put(buf) - buf.Reset() - if len(keyvals)%2 != 0 { - keyvals = append(append(make([]interface{}, 0, len(keyvals)+1), "msg"), keyvals...) - } - for i := 0; i < len(keyvals); i += 2 { - fmt.Fprintf(buf, "%s=%#v ", keyvals[i], keyvals[i+1]) - } - - tl.Lock() - for _, t := range tl.beHelped { - t.Helper() - } - tl.beHelped = tl.beHelped[:0] - tl.Unlock() - - tl.RLock() - defer tl.RUnlock() - for _, t := range tl.Ts { - t.Helper() - t.Log(buf.String()) - } - - return nil - } -} -func (tl *testLogger) enableLogging(t *testing.T) func() { - tl.Lock() - tl.Ts = append(tl.Ts, t) - tl.beHelped = append(tl.beHelped, t) - tl.Unlock() - - return func() { - tl.Lock() - defer tl.Unlock() - for i, f := range tl.Ts { - if f == t { - tl.Ts[i] = tl.Ts[0] - tl.Ts = tl.Ts[1:] - break - } - } - for i, f := range tl.beHelped { - if f == t { - tl.beHelped[i] = tl.beHelped[0] - tl.beHelped = tl.beHelped[1:] - break - } - } - } -} - -func TestDescribeQuery(t *testing.T) { - t.Parallel() - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - const qry = "SELECT * FROM user_tab_cols" - cols, err := goracle.DescribeQuery(ctx, testDb, qry) - if err != nil { - t.Fatal(errors.Wrap(err, qry)) - } - t.Log(cols) -} - -func TestParseOnly(t *testing.T) { - t.Parallel() - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - tbl := "test_not_exist" + tblSuffix - cnt := func() int { - var cnt int64 - if err := testDb.QueryRowContext(ctx, - "SELECT COUNT(0) FROM user_tables WHERE table_name = UPPER('"+tbl+"')").Scan(&cnt); //nolint:gas - err != nil { - t.Fatal(err) - } - return int(cnt) - } - - if cnt() != 0 { - if _, err := testDb.ExecContext(ctx, "DROP TABLE "+tbl); err != nil { - t.Error(err) - } - } - if _, err := testDb.ExecContext(ctx, "CREATE TABLE "+tbl+"(t VARCHAR2(1))", goracle.ParseOnly()); err != nil { - t.Fatal(err) - } - if got := cnt(); got != 1 { - t.Errorf("got %d, wanted 0", got) - } -} - -func TestInputArray(t *testing.T) { - t.Parallel() - defer tl.enableLogging(t)() - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - pkg := strings.ToUpper("test_in_pkg" + tblSuffix) - qry := `CREATE OR REPLACE PACKAGE ` + pkg + ` AS -TYPE int_tab_typ IS TABLE OF BINARY_INTEGER INDEX BY PLS_INTEGER; -TYPE num_tab_typ IS TABLE OF NUMBER INDEX BY PLS_INTEGER; -TYPE vc_tab_typ IS TABLE OF VARCHAR2(100) INDEX BY PLS_INTEGER; -TYPE dt_tab_typ IS TABLE OF DATE INDEX BY PLS_INTEGER; ---TYPE lob_tab_typ IS TABLE OF CLOB INDEX BY PLS_INTEGER; - -FUNCTION in_int(p_int IN int_tab_typ) RETURN VARCHAR2; -FUNCTION in_num(p_num IN num_tab_typ) RETURN VARCHAR2; -FUNCTION in_vc(p_vc IN vc_tab_typ) RETURN VARCHAR2; -FUNCTION in_dt(p_dt IN dt_tab_typ) RETURN VARCHAR2; -END; -` - if _, err := testDb.ExecContext(ctx, qry); err != nil { - t.Fatal(err, qry) - } - defer testDb.Exec("DROP PACKAGE " + pkg) - - qry = `CREATE OR REPLACE PACKAGE BODY ` + pkg + ` AS -FUNCTION in_int(p_int IN int_tab_typ) RETURN VARCHAR2 IS - v_idx PLS_INTEGER; - v_res VARCHAR2(32767); -BEGIN - v_idx := p_int.FIRST; - WHILE v_idx IS NOT NULL LOOP - v_res := v_res||v_idx||':'||p_int(v_idx)||CHR(10); - v_idx := p_int.NEXT(v_idx); - END LOOP; - RETURN(v_res); -END; - -FUNCTION in_num(p_num IN num_tab_typ) RETURN VARCHAR2 IS - v_idx PLS_INTEGER; - v_res VARCHAR2(32767); -BEGIN - v_idx := p_num.FIRST; - WHILE v_idx IS NOT NULL LOOP - v_res := v_res||v_idx||':'||p_num(v_idx)||CHR(10); - v_idx := p_num.NEXT(v_idx); - END LOOP; - RETURN(v_res); -END; - -FUNCTION in_vc(p_vc IN vc_tab_typ) RETURN VARCHAR2 IS - v_idx PLS_INTEGER; - v_res VARCHAR2(32767); -BEGIN - v_idx := p_vc.FIRST; - WHILE v_idx IS NOT NULL LOOP - v_res := v_res||v_idx||':'||p_vc(v_idx)||CHR(10); - v_idx := p_vc.NEXT(v_idx); - END LOOP; - RETURN(v_res); -END; -FUNCTION in_dt(p_dt IN dt_tab_typ) RETURN VARCHAR2 IS - v_idx PLS_INTEGER; - v_res VARCHAR2(32767); -BEGIN - v_idx := p_dt.FIRST; - WHILE v_idx IS NOT NULL LOOP - v_res := v_res||v_idx||':'||TO_CHAR(p_dt(v_idx), 'YYYY-MM-DD"T"HH24:MI:SS')||CHR(10); - v_idx := p_dt.NEXT(v_idx); - END LOOP; - RETURN(v_res); -END; -END; -` - if _, err := testDb.ExecContext(ctx, qry); err != nil { - t.Fatal(err, qry) - } - compileErrors, err := goracle.GetCompileErrors(testDb, false) - if err != nil { - t.Fatal(err) - } - if len(compileErrors) != 0 { - t.Logf("compile errors: %v", compileErrors) - for _, ce := range compileErrors { - if strings.Contains(ce.Error(), pkg) { - t.Fatal(ce) - } - } - } - - epoch := time.Date(2017, 11, 20, 12, 14, 21, 0, time.Local) - for name, tC := range map[string]struct { - In interface{} - Want string - }{ - //"int_0":{In:[]int32{}, Want:""}, - "num_0": {In: []goracle.Number{}, Want: ""}, - "vc_0": {In: []string{}, Want: ""}, - "dt_0": {In: []time.Time{}, Want: ""}, - - "num_3": { - In: []goracle.Number{"1", "2.72", "-3.14"}, - Want: "1:1\n2:2.72\n3:-3.14\n", - }, - "vc_3": { - In: []string{"a", "", "cCc"}, - Want: "1:a\n2:\n3:cCc\n", - }, - "dt_3": { - In: []time.Time{epoch, epoch.AddDate(0, 0, -1), epoch.AddDate(0, 0, -2)}, - Want: "1:2017-11-20T12:14:21\n2:2017-11-19T12:14:21\n3:2017-11-18T12:14:21\n", - }, - } { - typ := strings.SplitN(name, "_", 2)[0] - qry := "BEGIN :1 := " + pkg + ".in_" + typ + "(:2); END;" - var res string - if _, err := testDb.ExecContext(ctx, qry, goracle.PlSQLArrays, - sql.Out{Dest: &res}, tC.In, - ); err != nil { - t.Error(errors.Wrapf(err, "%q. %s %+v", name, qry, tC.In)) - } - t.Logf("%q. %q", name, res) - if typ == "num" { - res = strings.Replace(res, ",", ".", -1) - } - if res != tC.Want { - t.Errorf("%q. got %q, wanted %q.", name, res, tC.Want) - } - } -} - -func TestDbmsOutput(t *testing.T) { - defer tl.enableLogging(t)() - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - conn, err := testDb.Conn(ctx) - if err != nil { - t.Fatal(err) - } - defer conn.Close() - if err := goracle.EnableDbmsOutput(ctx, conn); err != nil { - t.Fatal(err) - } - - txt := `árvíztűrő tükörfúrógép` - qry := "BEGIN DBMS_OUTPUT.PUT_LINE('" + txt + "'); END;" - if _, err := conn.ExecContext(ctx, qry); err != nil { - t.Fatal(err) - } - - var buf bytes.Buffer - if err := goracle.ReadDbmsOutput(ctx, &buf, conn); err != nil { - t.Error(err) - } - t.Log(buf.String()) - if buf.String() != txt+"\n" { - t.Errorf("got %q, wanted %q", buf.String(), txt+"\n") - } -} - -func TestInOutArray(t *testing.T) { - t.Parallel() - defer tl.enableLogging(t)() - - ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) - defer cancel() - conn, err := testDb.Conn(ctx) - if err != nil { - t.Fatal(err) - } - defer conn.Close() - - pkg := strings.ToUpper("test_pkg" + tblSuffix) - qry := `CREATE OR REPLACE PACKAGE ` + pkg + ` AS -TYPE int_tab_typ IS TABLE OF BINARY_INTEGER INDEX BY PLS_INTEGER; -TYPE num_tab_typ IS TABLE OF NUMBER INDEX BY PLS_INTEGER; -TYPE vc_tab_typ IS TABLE OF VARCHAR2(100) INDEX BY PLS_INTEGER; -TYPE dt_tab_typ IS TABLE OF DATE INDEX BY PLS_INTEGER; -TYPE lob_tab_typ IS TABLE OF CLOB INDEX BY PLS_INTEGER; - -PROCEDURE inout_int(p_int IN OUT int_tab_typ); -PROCEDURE inout_num(p_num IN OUT num_tab_typ); -PROCEDURE inout_vc(p_vc IN OUT vc_tab_typ); -PROCEDURE inout_dt(p_dt IN OUT dt_tab_typ); -PROCEDURE p2( - --p_int IN OUT int_tab_typ, - p_num IN OUT num_tab_typ, p_vc IN OUT vc_tab_typ, p_dt IN OUT dt_tab_typ); -END; -` - if _, err = conn.ExecContext(ctx, qry); err != nil { - t.Fatal(err, qry) - } - defer testDb.Exec("DROP PACKAGE " + pkg) - - qry = `CREATE OR REPLACE PACKAGE BODY ` + pkg + ` AS -PROCEDURE inout_int(p_int IN OUT int_tab_typ) IS - v_idx PLS_INTEGER; -BEGIN - DBMS_OUTPUT.PUT_LINE('p_int.COUNT='||p_int.COUNT||' FIRST='||p_int.FIRST||' LAST='||p_int.LAST); - v_idx := p_int.FIRST; - WHILE v_idx IS NOT NULL LOOP - p_int(v_idx) := NVL(p_int(v_idx) * 2, 1); - v_idx := p_int.NEXT(v_idx); - END LOOP; - p_int(NVL(p_int.LAST, 0)+1) := p_int.COUNT; -END; - -PROCEDURE inout_num(p_num IN OUT num_tab_typ) IS - v_idx PLS_INTEGER; -BEGIN - DBMS_OUTPUT.PUT_LINE('p_num.COUNT='||p_num.COUNT||' FIRST='||p_num.FIRST||' LAST='||p_num.LAST); - v_idx := p_num.FIRST; - WHILE v_idx IS NOT NULL LOOP - p_num(v_idx) := NVL(p_num(v_idx) / 2, 0.5); - v_idx := p_num.NEXT(v_idx); - END LOOP; - p_num(NVL(p_num.LAST, 0)+1) := p_num.COUNT; -END; - -PROCEDURE inout_vc(p_vc IN OUT vc_tab_typ) IS - v_idx PLS_INTEGER; -BEGIN - DBMS_OUTPUT.PUT_LINE('p_vc.COUNT='||p_vc.COUNT||' FIRST='||p_vc.FIRST||' LAST='||p_vc.LAST); - v_idx := p_vc.FIRST; - WHILE v_idx IS NOT NULL LOOP - p_vc(v_idx) := NVL(p_vc(v_idx) ||' +', '-'); - v_idx := p_vc.NEXT(v_idx); - END LOOP; - p_vc(NVL(p_vc.LAST, 0)+1) := p_vc.COUNT; -END; - -PROCEDURE inout_dt(p_dt IN OUT dt_tab_typ) IS - v_idx PLS_INTEGER; -BEGIN - DBMS_OUTPUT.PUT_LINE('p_dt.COUNT='||p_dt.COUNT||' FIRST='||p_dt.FIRST||' LAST='||p_dt.LAST); - v_idx := p_dt.FIRST; - WHILE v_idx IS NOT NULL LOOP - DBMS_OUTPUT.PUT_LINE(v_idx||'='||TO_CHAR(p_dt(v_idx), 'YYYY-MM-DD HH24:MI:SS')); - p_dt(v_idx) := NVL(p_dt(v_idx) + 1, TRUNC(SYSDATE)-v_idx); - v_idx := p_dt.NEXT(v_idx); - END LOOP; - p_dt(NVL(p_dt.LAST, 0)+1) := TRUNC(SYSDATE); -END; - -PROCEDURE p2( - --p_int IN OUT int_tab_typ, - p_num IN OUT num_tab_typ, - p_vc IN OUT vc_tab_typ, - p_dt IN OUT dt_tab_typ ---, p_lob IN OUT lob_tab_typ -) IS -BEGIN - --inout_int(p_int); - inout_num(p_num); - inout_vc(p_vc); - inout_dt(p_dt); - --p_lob := NULL; -END p2; -END; -` - if _, err = testDb.ExecContext(ctx, qry); err != nil { - t.Fatal(err, qry) - } - compileErrors, err := goracle.GetCompileErrors(testDb, false) - if err != nil { - t.Fatal(err) - } - if len(compileErrors) != 0 { - t.Logf("compile errors: %v", compileErrors) - for _, ce := range compileErrors { - if strings.Contains(ce.Error(), pkg) { - t.Fatal(ce) - } - } - } - - intgr := []int32{3, 1, 4, 0, 0}[:3] - intgrWant := []int32{3 * 2, 1 * 2, 4 * 2, 3} - _ = intgrWant - num := []goracle.Number{"3.14", "-2.48", ""}[:2] - numWant := []goracle.Number{"1.57", "-1.24", "2"} - vc := []string{"string", "bring", ""}[:2] - vcWant := []string{"string +", "bring +", "2"} - dt := []time.Time{time.Date(2017, 6, 18, 7, 5, 51, 0, time.Local), {}, {}}[:2] - today := time.Now().Truncate(24 * time.Hour) - today = time.Date(today.Year(), today.Month(), today.Day(), today.Hour(), today.Minute(), today.Second(), 0, time.Local) - dtWant := []time.Time{ - dt[0].Add(24 * time.Hour), - today.Add(-2 * 24 * time.Hour), - today, - } - - goracle.EnableDbmsOutput(ctx, testDb) - - opts := []cmp.Option{ - cmp.Comparer(func(x, y time.Time) bool { - d := x.Sub(y) - if d < 0 { - d *= -1 - } - return d <= 2*time.Hour - }), - } - - for _, tC := range []struct { - Name string - In, Want interface{} - }{ - {Name: "vc", In: vc, Want: vcWant}, - {Name: "num", In: num, Want: numWant}, - {Name: "dt", In: dt, Want: dtWant}, - //{Name: "int", In: intgr, Want: intgrWant}, - {Name: "vc-1", In: vc[:1], Want: []string{"string +", "1"}}, - {Name: "vc-0", In: vc[:0], Want: []string{"0"}}, - } { - tC := tC - t.Run("inout_"+tC.Name, func(t *testing.T) { - t.Logf("%s=%s", tC.Name, tC.In) - nm := strings.SplitN(tC.Name, "-", 2)[0] - qry = "BEGIN " + pkg + ".inout_" + nm + "(:1); END;" - dst := copySlice(tC.In) - if _, err := testDb.ExecContext(ctx, qry, - goracle.PlSQLArrays, - sql.Out{Dest: dst, In: true}, - ); err != nil { - t.Fatalf("%s\n%+v", qry, err) - } - - got := reflect.ValueOf(dst).Elem().Interface() - if cmp.Equal(got, tC.Want, opts...) { - return - } - t.Errorf("%s: %s", tC.Name, cmp.Diff(got, tC.Want)) - var buf bytes.Buffer - if err := goracle.ReadDbmsOutput(ctx, &buf, testDb); err != nil { - t.Error(err) - } - t.Log("OUTPUT:", buf.String()) - }) - } - - //lob := []goracle.Lob{goracle.Lob{IsClob: true, Reader: strings.NewReader("abcdef")}} - t.Run("p2", func(t *testing.T) { - if _, err := testDb.ExecContext(ctx, - "BEGIN "+pkg+".p2(:1, :2, :3); END;", - goracle.PlSQLArrays, - //sql.Out{Dest: &intgr, In: true}, - sql.Out{Dest: &num, In: true}, - sql.Out{Dest: &vc, In: true}, - sql.Out{Dest: &dt, In: true}, - //sql.Out{Dest: &lob, In: true}, - ); err != nil { - t.Fatal(err) - } - t.Logf("int=%#v num=%#v vc=%#v dt=%#v", intgr, num, vc, dt) - //if d := cmp.Diff(intgr, intgrWant); d != "" { - // t.Errorf("int: %s", d) - //} - if d := cmp.Diff(num, numWant); d != "" { - t.Errorf("num: %s", d) - } - if d := cmp.Diff(vc, vcWant); d != "" { - t.Errorf("vc: %s", d) - } - if !cmp.Equal(dt, dtWant, opts...) { - if d := cmp.Diff(dt, dtWant); d != "" { - t.Errorf("dt: %s", d) - } - } - }) -} - -func TestOutParam(t *testing.T) { - t.Parallel() - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - conn, err := testDb.Conn(ctx) - if err != nil { - t.Fatal(err) - } - defer conn.Close() - pkg := strings.ToUpper("test_p1" + tblSuffix) - qry := `CREATE OR REPLACE PROCEDURE -` + pkg + `(p_int IN OUT INTEGER, p_num IN OUT NUMBER, p_vc IN OUT VARCHAR2, p_dt IN OUT DATE, p_lob IN OUT CLOB) -IS -BEGIN - p_int := NVL(p_int * 2, 1); - p_num := NVL(p_num / 2, 0.5); - p_vc := NVL(p_vc ||' +', '-'); - p_dt := NVL(p_dt + 1, SYSDATE); - p_lob := NULL; -END;` - if _, err = testDb.ExecContext(ctx, qry); err != nil { - t.Fatal(err, qry) - } - defer testDb.Exec("DROP PROCEDURE " + pkg) - - qry = "BEGIN " + pkg + "(:1, :2, :3, :4, :5); END;" - stmt, err := testDb.PrepareContext(ctx, qry) - if err != nil { - t.Fatal(errors.Wrap(err, qry)) - } - defer stmt.Close() - - var intgr int = 3 - num := goracle.Number("3.14") - var vc string = "string" - var dt time.Time = time.Date(2017, 6, 18, 7, 5, 51, 0, time.Local) - var lob goracle.Lob = goracle.Lob{IsClob: true, Reader: strings.NewReader("abcdef")} - if _, err := stmt.ExecContext(ctx, - sql.Out{Dest: &intgr, In: true}, - sql.Out{Dest: &num, In: true}, - sql.Out{Dest: &vc, In: true}, - sql.Out{Dest: &dt, In: true}, - sql.Out{Dest: &lob, In: true}, - ); err != nil { - t.Fatal(errors.Wrap(err, qry)) - } - t.Logf("int=%#v num=%#v vc=%#v dt=%#v", intgr, num, vc, dt) - if intgr != 6 { - t.Errorf("int: got %d, wanted %d", intgr, 6) - } - if num != "1.57" { - t.Errorf("num: got %q, wanted %q", num, "1.57") - } - if vc != "string +" { - t.Errorf("vc: got %q, wanted %q", vc, "string +") - } -} - -func TestSelectRefCursor(t *testing.T) { - t.Parallel() - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - rows, err := testDb.QueryContext(ctx, "SELECT CURSOR(SELECT object_name, object_type, object_id, created FROM all_objects WHERE ROWNUM <= 10) FROM DUAL") - if err != nil { - t.Fatal(err) - } - defer rows.Close() - for rows.Next() { - var intf interface{} - if err := rows.Scan(&intf); err != nil { - t.Error(err) - continue - } - t.Logf("%T", intf) - sub := intf.(driver.RowsColumnTypeScanType) - cols := sub.Columns() - t.Log("Columns", cols) - dests := make([]driver.Value, len(cols)) - for { - if err := sub.Next(dests); err != nil { - if err == io.EOF { - break - } - t.Error(err) - break - } - //fmt.Println(dests) - t.Log(dests) - } - sub.Close() - } -} - -func TestSelectRefCursorWrap(t *testing.T) { - t.Parallel() - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - rows, err := testDb.QueryContext(ctx, "SELECT CURSOR(SELECT object_name, object_type, object_id, created FROM all_objects WHERE ROWNUM <= 10) FROM DUAL") - if err != nil { - t.Fatal(err) - } - defer rows.Close() - for rows.Next() { - var intf interface{} - if err := rows.Scan(&intf); err != nil { - t.Error(err) - continue - } - t.Logf("%T", intf) - sub, err := goracle.WrapRows(ctx, testDb, intf.(driver.Rows)) - if err != nil { - t.Fatal(err) - } - t.Log("Sub", sub) - for sub.Next() { - var oName, oType, oID string - var created time.Time - if err := sub.Scan(&oName, &oType, &oID, &created); err != nil { - t.Error(err) - break - } - t.Log(oName, oType, oID, created) - } - sub.Close() - } -} - -func TestExecuteMany(t *testing.T) { - t.Parallel() - defer tl.enableLogging(t)() - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - conn, err := testDb.Conn(ctx) - if err != nil { - t.Fatal(err) - } - defer conn.Close() - tbl := "test_em" + tblSuffix - conn.ExecContext(ctx, "DROP TABLE "+tbl) - conn.ExecContext(ctx, "CREATE TABLE "+tbl+" (f_id INTEGER, f_int INTEGER, f_num NUMBER, f_num_6 NUMBER(6), F_num_5_2 NUMBER(5,2), f_vc VARCHAR2(30), F_dt DATE)") - defer testDb.Exec("DROP TABLE " + tbl) - - const num = 1000 - ints := make([]int, num) - nums := make([]goracle.Number, num) - int32s := make([]int32, num) - floats := make([]float64, num) - strs := make([]string, num) - dates := make([]time.Time, num) - // This is instead of now: a nice moment in time right before the summer time shift - now := time.Date(2017, 10, 29, 1, 27, 53, 0, time.Local).Truncate(time.Second) - ids := make([]int, num) - for i := range nums { - ids[i] = i - ints[i] = i << 1 - nums[i] = goracle.Number(strconv.Itoa(i)) - int32s[i] = int32(i) - floats[i] = float64(i) / float64(3.14) - strs[i] = fmt.Sprintf("%x", i) - dates[i] = now.Add(-time.Duration(i) * time.Hour) - } - for i, tc := range []struct { - Name string - Value interface{} - }{ - {"f_int", ints}, - {"f_num", nums}, - {"f_num_6", int32s}, - {"f_num_5_2", floats}, - {"f_vc", strs}, - {"f_dt", dates}, - } { - res, execErr := conn.ExecContext(ctx, - "INSERT INTO "+tbl+" ("+tc.Name+") VALUES (:1)", //nolint:gas - tc.Value) - if execErr != nil { - t.Fatalf("%d. INSERT INTO "+tbl+" (%q) VALUES (%+v): %#v", //nolint:gas - i, tc.Name, tc.Value, execErr) - } - ra, raErr := res.RowsAffected() - if raErr != nil { - t.Error(raErr) - } else if ra != num { - t.Errorf("%d. %q: wanted %d rows, got %d", i, tc.Name, num, ra) - } - } - - conn.ExecContext(ctx, "TRUNCATE TABLE "+tbl+"") - - res, err := conn.ExecContext(ctx, - `INSERT INTO `+tbl+ //nolint:gas - ` (f_id, f_int, f_num, f_num_6, F_num_5_2, F_vc, F_dt) - VALUES - (:1, :2, :3, :4, :5, :6, :7)`, - ids, ints, nums, int32s, floats, strs, dates) - if err != nil { - t.Fatalf("%#v", err) - } - ra, err := res.RowsAffected() - if err != nil { - t.Error(err) - } else if ra != num { - t.Errorf("wanted %d rows, got %d", num, ra) - } - - rows, err := conn.QueryContext(ctx, - "SELECT * FROM "+tbl+" ORDER BY F_id", //nolint:gas - ) - if err != nil { - t.Fatal(err) - } - defer rows.Close() - i := 0 - for rows.Next() { - var id, Int int - var num string - var vc string - var num6 int32 - var num52 float64 - var dt time.Time - if err := rows.Scan(&id, &Int, &num, &num6, &num52, &vc, &dt); err != nil { - t.Fatal(err) - } - if id != i { - t.Fatalf("ID got %d, wanted %d.", id, i) - } - if Int != ints[i] { - t.Errorf("%d. INT got %d, wanted %d.", i, Int, ints[i]) - } - if num != string(nums[i]) { - t.Errorf("%d. NUM got %q, wanted %q.", i, num, nums[i]) - } - if num6 != int32s[i] { - t.Errorf("%d. NUM_6 got %v, wanted %v.", i, num6, int32s[i]) - } - rounded := float64(int64(floats[i]/0.005+0.5)) * 0.005 - if math.Abs(num52-rounded) > 0.05 { - t.Errorf("%d. NUM_5_2 got %v, wanted %v.", i, num52, rounded) - } - if vc != strs[i] { - t.Errorf("%d. VC got %q, wanted %q.", i, vc, strs[i]) - } - t.Logf("%d. dt=%v", i, dt) - if dt != dates[i] { - t.Errorf("%d. got DT %v, wanted %v.", i, dt, dates[i]) - } - i++ - } -} -func TestReadWriteLob(t *testing.T) { - t.Parallel() - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - conn, err := testDb.Conn(ctx) - if err != nil { - t.Fatal(err) - } - defer conn.Close() - tbl := "test_lob" + tblSuffix - conn.ExecContext(ctx, "DROP TABLE "+tbl) - conn.ExecContext(ctx, - "CREATE TABLE "+tbl+" (f_id NUMBER(6), f_blob BLOB, f_clob CLOB)", //nolint:gas - ) - defer testDb.Exec( - "DROP TABLE " + tbl, //nolint:gas - ) - - stmt, err := conn.PrepareContext(ctx, - "INSERT INTO "+tbl+" (F_id, f_blob, F_clob) VALUES (:1, :2, :3)", //nolint:gas - ) - if err != nil { - t.Fatal(err) - } - defer stmt.Close() - - for tN, tC := range []struct { - Bytes []byte - String string - }{ - {[]byte{0, 1, 2, 3, 4, 5}, "12345"}, - } { - - if _, err = stmt.Exec(tN*2, tC.Bytes, tC.String); err != nil { - t.Errorf("%d/1. (%v, %q): %v", tN, tC.Bytes, tC.String, err) - continue - } - if _, err = stmt.Exec(tN*2+1, - goracle.Lob{Reader: bytes.NewReader(tC.Bytes)}, - goracle.Lob{Reader: strings.NewReader(tC.String), IsClob: true}, - ); err != nil { - t.Errorf("%d/2. (%v, %q): %v", tN, tC.Bytes, tC.String, err) - } - - var rows *sql.Rows - rows, err = conn.QueryContext(ctx, - "SELECT F_id, F_blob, F_clob FROM "+tbl+" WHERE F_id IN (:1, :2)", //nolint:gas - goracle.LobAsReader(), - 2*tN, 2*tN+1) - if err != nil { - t.Errorf("%d/3. %v", tN, err) - continue - } - for rows.Next() { - var id, blob, clob interface{} - if err = rows.Scan(&id, &blob, &clob); err != nil { - rows.Close() - t.Errorf("%d/3. scan: %v", tN, err) - continue - } - t.Logf("%d. blob=%+v clob=%+v", id, blob, clob) - if clob, ok := clob.(*goracle.Lob); !ok { - t.Errorf("%d. %T is not LOB", id, blob) - } else { - var got []byte - got, err = ioutil.ReadAll(clob) - if err != nil { - t.Errorf("%d. %v", id, err) - } else if got := string(got); got != tC.String { - t.Errorf("%d. got %q for CLOB, wanted %q", id, got, tC.String) - } - } - if blob, ok := blob.(*goracle.Lob); !ok { - t.Errorf("%d. %T is not LOB", id, blob) - } else { - var got []byte - got, err = ioutil.ReadAll(blob) - if err != nil { - t.Errorf("%d. %v", id, err) - } else if !bytes.Equal(got, tC.Bytes) { - t.Errorf("%d. got %v for BLOB, wanted %v", id, got, tC.Bytes) - } - } - } - rows.Close() - } - - rows, err := conn.QueryContext(ctx, - "SELECT F_clob FROM "+tbl+"", //nolint:gas - goracle.ClobAsString()) - if err != nil { - t.Fatal(err) - } - defer rows.Close() - for rows.Next() { - var s string - if err = rows.Scan(&s); err != nil { - t.Error(err) - } - t.Logf("clobAsString: %q", s) - } - - qry := "SELECT CURSOR(SELECT f_id, f_clob FROM " + tbl + " WHERE ROWNUM <= 10) FROM DUAL" - rows, err = testDb.QueryContext(ctx, qry, goracle.ClobAsString()) - if err != nil { - t.Fatal(errors.Wrap(err, qry)) - } - defer rows.Close() - for rows.Next() { - var intf interface{} - if err := rows.Scan(&intf); err != nil { - t.Error(err) - continue - } - t.Logf("%T", intf) - sub := intf.(driver.RowsColumnTypeScanType) - cols := sub.Columns() - t.Log("Columns", cols) - dests := make([]driver.Value, len(cols)) - for { - if err := sub.Next(dests); err != nil { - if err == io.EOF { - break - } - t.Error(err) - break - } - //fmt.Println(dests) - t.Log(dests) - } - sub.Close() - } - -} - -func copySlice(orig interface{}) interface{} { - ro := reflect.ValueOf(orig) - rc := reflect.New(reflect.TypeOf(orig)).Elem() // *[]s - rc.Set(reflect.MakeSlice(ro.Type(), ro.Len(), ro.Cap())) - for i := 0; i < ro.Len(); i++ { - rc.Index(i).Set(ro.Index(i)) - } - return rc.Addr().Interface() -} - -func TestObject(t *testing.T) { - t.Parallel() - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - conn, err := testDb.Conn(ctx) - if err != nil { - t.Fatal(err) - } - defer conn.Close() - pkg := strings.ToUpper("test_pkg_obj" + tblSuffix) - qry := `CREATE OR REPLACE PACKAGE ` + pkg + ` IS - TYPE int_tab_typ IS TABLE OF PLS_INTEGER INDEX BY PLS_INTEGER; - TYPE rec_typ IS RECORD (int PLS_INTEGER, num NUMBER, vc VARCHAR2(1000), c CHAR(1000), dt DATE); - TYPE tab_typ IS TABLE OF rec_typ INDEX BY PLS_INTEGER; -END;` - if _, err = conn.ExecContext(ctx, qry); err != nil { - t.Fatal(errors.Wrap(err, qry)) - } - defer testDb.Exec("DROP PACKAGE " + pkg) - - tx, err := conn.BeginTx(ctx, nil) - if err != nil { - t.Fatal(err) - } - defer tx.Rollback() - - defer tl.enableLogging(t)() - ot, err := goracle.GetObjectType(tx, pkg+".int_tab_typ") - if err != nil { - if clientVersion.Version >= 12 && serverVersion.Version >= 12 { - t.Fatal(fmt.Sprintf("%+v", err)) - } - t.Log(err) - t.Skip("client or server version < 12") - } - t.Log(ot) -} - -func TestOpenClose(t *testing.T) { - t.Parallel() - cs, err := goracle.ParseConnString(testConStr) - if err != nil { - t.Fatal(err) - } - cs.MinSessions, cs.MaxSessions = 1, 5 - t.Log(cs.String()) - db, err := sql.Open("goracle", cs.StringWithPassword()) - if err != nil { - t.Fatal(err) - } - defer db.Close() - db.SetMaxIdleConns(1) - db.SetMaxOpenConns(3) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - const module = "goracle.v2.test-OpenClose " - stmt, err := db.PrepareContext(ctx, "SELECT COUNT(0) FROM v$session WHERE module LIKE '"+module+"%'") - if err != nil { - if strings.Contains(err.Error(), "ORA-12516:") { - t.Skip(err) - } - t.Fatal(err) - } - defer stmt.Close() - sessCount := func() (int, error) { - var n int - qErr := stmt.QueryRowContext(ctx).Scan(&n) - return n, qErr - } - n, err := sessCount() - if err != nil { - t.Skip(err) - } - if n > 0 { - t.Logf("sessCount=%d at start!", n) - } - var tt goracle.TraceTag - for i := 0; i < 10; i++ { - tt.Module = fmt.Sprintf("%s%d", module, 2*i) - ctx = goracle.ContextWithTraceTag(ctx, tt) - tx1, err1 := db.BeginTx(ctx, nil) - if err1 != nil { - t.Fatal(err1) - } - tt.Module = fmt.Sprintf("%s%d", module, 2*i+1) - ctx = goracle.ContextWithTraceTag(ctx, tt) - tx2, err2 := db.BeginTx(ctx, nil) - if err2 != nil { - if strings.Contains(err2.Error(), "ORA-12516:") { - tx1.Rollback() - break - } - t.Fatal(err2) - } - if n, err = sessCount(); err != nil { - t.Log(err) - } else if n == 0 { - t.Error("sessCount=0, want at least 2") - } else { - t.Log(n) - } - tx1.Rollback() - tx2.Rollback() - } - if n, err = sessCount(); err != nil { - t.Log(err) - } else if n > 4 { - t.Error("sessCount:", n) - } -} - -func TestOpenBadMemory(t *testing.T) { - var mem runtime.MemStats - runtime.GC() - runtime.ReadMemStats(&mem) - t.Log("Allocated 0:", mem.Alloc) - zero := mem.Alloc - for i := 0; i < 100; i++ { - badConStr := strings.Replace(testConStr, "@", fmt.Sprintf("BAD%dBAD@", i), 1) - db, err := sql.Open("goracle", badConStr) - if err != nil { - t.Fatalf("bad connection string %q didn't produce error!", badConStr) - } - db.Close() - runtime.GC() - runtime.ReadMemStats(&mem) - t.Logf("Allocated %d: %d", i+1, mem.Alloc) - } - d := mem.Alloc - zero - t.Logf("atlast: %d", d) - if d > 64<<10 { - t.Errorf("Consumed more than 64KiB of memory: %d", d) - } -} - -func TestSelectFloat(t *testing.T) { - t.Parallel() - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - tbl := "test_numbers" + tblSuffix - qry := `CREATE TABLE ` + tbl + ` ( - INT_COL NUMBER, - FLOAT_COL NUMBER, - EMPTY_INT_COL NUMBER -)` - testDb.Exec("DROP TABLE " + tbl) - if _, err := testDb.ExecContext(ctx, qry); err != nil { - t.Fatal(errors.Wrap(err, qry)) - } - defer testDb.Exec("DROP TABLE " + tbl) - - const INT, FLOAT = 1234567, 4.5 - qry = `INSERT INTO ` + tbl + //nolint:gas - ` (INT_COL, FLOAT_COL, EMPTY_INT_COL) - VALUES (1234567, 45/10, NULL)` - if _, err := testDb.ExecContext(ctx, qry); err != nil { - t.Fatal(errors.Wrap(err, qry)) - } - - qry = "SELECT int_col, float_col, empty_int_col FROM " + tbl //nolint:gas - type numbers struct { - Int int - Int64 int64 - Float float64 - NInt sql.NullInt64 - String string - NString sql.NullString - Number goracle.Number - } - var n numbers - var i1, i2, i3 interface{} - for tName, tC := range map[string]struct { - Dest [3]interface{} - Want numbers - }{ - "int,float,nstring": { - Dest: [3]interface{}{&n.Int, &n.Float, &n.NString}, - Want: numbers{Int: INT, Float: FLOAT}, - }, - "inf,float,Number": { - Dest: [3]interface{}{&n.Int, &n.Float, &n.Number}, - Want: numbers{Int: INT, Float: FLOAT}, - }, - "int64,float,nullInt": { - Dest: [3]interface{}{&n.Int64, &n.Float, &n.NInt}, - Want: numbers{Int64: INT, Float: FLOAT}, - }, - "intf,intf,intf": { - Dest: [3]interface{}{&i1, &i2, &i3}, - Want: numbers{Int64: INT, Float: FLOAT}, - }, - "int,float,string": { - Dest: [3]interface{}{&n.Int, &n.Float, &n.String}, - Want: numbers{Int: INT, Float: FLOAT}, - }, - } { - i1, i2, i3 = nil, nil, nil - n = numbers{} - F := func() error { - return errors.Wrap( - testDb.QueryRowContext(ctx, qry).Scan(tC.Dest[0], tC.Dest[1], tC.Dest[2]), - qry) - } - if err := F(); err != nil { - if strings.HasSuffix(err.Error(), "unsupported Scan, storing driver.Value type into type *string") { - t.Log("WARNING:", err) - continue - } - noLogging := tl.enableLogging(t) - err = F() - t.Errorf("%q: %v", tName, errors.Wrap(err, qry)) - noLogging() - continue - } - if tName == "intf,intf,intf" { - t.Logf("%q: %#v, %#v, %#v", tName, i1, i2, i3) - continue - } - t.Logf("%q: %+v", tName, n) - if n != tC.Want { - t.Errorf("%q:\ngot\t%+v,\nwanted\t%+v.", tName, n, tC.Want) - } - } -} - -func TestNumInputs(t *testing.T) { - t.Parallel() - var a, b string - if err := testDb.QueryRow("SELECT :1, :2 FROM DUAL", 'a', 'b').Scan(&a, &b); err != nil { - t.Errorf("two inputs: %+v", err) - } - if err := testDb.QueryRow("SELECT :a, :b FROM DUAL", 'a', 'b').Scan(&a, &b); err != nil { - t.Errorf("two named inputs: %+v", err) - } - if err := testDb.QueryRow("SELECT :a, :a FROM DUAL", sql.Named("a", a)).Scan(&a, &b); err != nil { - t.Errorf("named inputs: %+v", err) - } -} - -func TestPtrArg(t *testing.T) { - t.Parallel() - s := "dog" - rows, err := testDb.Query("SELECT * FROM user_objects WHERE object_name=:1", &s) - if err != nil { - t.Fatal(err) - } - rows.Close() -} -func TestORA1000(t *testing.T) { - t.Parallel() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - rows, err := testDb.QueryContext(ctx, "SELECT * FROM user_objects") - if err != nil { - t.Fatal(err) - } - defer rows.Close() - for i := 0; i < 1000; i++ { - var n int64 - if err := testDb.QueryRowContext(ctx, - "SELECT /*"+strconv.Itoa(i)+"*/ 1 FROM DUAL", //nolint:gas - ).Scan(&n); err != nil { - t.Fatal(err) - } - } -} - -func TestRanaOraIssue244(t *testing.T) { - tableName := "test_ora_issue_244" + tblSuffix - qry := "CREATE TABLE " + tableName + " (FUND_ACCOUNT VARCHAR2(18) NOT NULL, FUND_CODE VARCHAR2(6) NOT NULL, BUSINESS_FLAG NUMBER(10) NOT NULL, MONEY_TYPE VARCHAR2(3) NOT NULL)" - testDb.Exec("DROP TABLE " + tableName) - if _, err := testDb.Exec(qry); err != nil { - t.Fatal(errors.Wrap(err, qry)) - } - var max int - ctx, cancel := context.WithCancel(context.Background()) - txs := make([]*sql.Tx, 0, maxSessions) - for max = 0; max < maxSessions; max++ { - tx, err := testDb.BeginTx(ctx, nil) - if err != nil { - max-- - break - } - txs = append(txs, tx) - } - cancel() - for _, tx := range txs { - tx.Rollback() - } - t.Logf("maxSessions=%d max=%d", maxSessions, max) - - defer testDb.Exec("DROP TABLE " + tableName) - const bf = "143" - const sc = "270004" - qry = "INSERT INTO " + tableName + " (fund_account, fund_code, business_flag, money_type) VALUES (:1, :2, :3, :4)" //nolint:gas - stmt, err := testDb.Prepare(qry) - if err != nil { - t.Fatal(errors.Wrap(err, qry)) - } - fas := []string{"14900666", "1868091", "1898964", "14900397"} - for _, v := range fas { - if _, err := stmt.Exec(v, sc, bf, "0"); err != nil { - stmt.Close() - t.Fatal(err) - } - } - stmt.Close() - - dur := time.Minute / 2 - if testing.Short() { - dur = 10 * time.Second - } - ctx, cancel = context.WithTimeout(context.Background(), dur) - defer cancel() - - qry = `SELECT fund_account, money_type FROM ` + tableName + ` WHERE business_flag = :1 AND fund_code = :2 AND fund_account = :3` //nolint:gas - grp, ctx := errgroup.WithContext(ctx) - for i := 0; i < max; i++ { - index := rand.Intn(len(fas)) - i := i - grp.Go(func() error { - tx, err := testDb.BeginTx(ctx, &sql.TxOptions{ReadOnly: true}) - if err != nil { - return err - } - defer tx.Rollback() - - stmt, err := tx.Prepare(qry) - if err != nil { - return errors.Wrapf(err, "%d.Prepare %q", i, err) - } - defer stmt.Close() - - for { - index = (index + 1) % len(fas) - rows, err := stmt.Query(bf, sc, fas[index]) - if err != nil { - return errors.Wrapf(err, "%d.tx=%p stmt=%p %q", i, tx, stmt, qry) - } - - for rows.Next() { - if err = ctx.Err(); err != nil { - rows.Close() - return err - } - var acc, mt string - if err = rows.Scan(&acc, &mt); err != nil { - rows.Close() - return err - } - - if acc != fas[index] { - rows.Close() - return errors.Errorf("got acc %q, wanted %q", acc, fas[index]) - } - if mt != "0" { - rows.Close() - return errors.Errorf("got mt %q, wanted 0", mt) - } - } - if err = rows.Err(); err != nil { - return err - } - } - }) - } - if err := grp.Wait(); err != nil && errors.Cause(err) != context.DeadlineExceeded { - errS := errors.Cause(err).Error() - switch errS { - case "sql: statement is closed", - "sql: transaction has already been committed or rolled back": - return - } - if strings.Contains(errS, "ORA-12516:") { - t.Log(err) - } else { - t.Error(err) - } - } -} - -func TestNumberMarshal(t *testing.T) { - t.Parallel() - var n goracle.Number - if err := testDb.QueryRow("SELECT 6000370006565900000073 FROM DUAL").Scan(&n); err != nil { - t.Fatal(err) - } - t.Log(n.String()) - b, err := n.MarshalJSON() - t.Logf("%s", b) - if err != nil { - t.Fatal(err) - } - if bytes.Contains(b, []byte{'e'}) { - t.Errorf("got %q, wanted without scientific notation", b) - } - if b, err = json.Marshal(struct { - N goracle.Number - }{N: n}, - ); err != nil { - t.Fatal(err) - } - t.Logf("%s", b) -} - -func TestExecHang(t *testing.T) { - defer tl.enableLogging(t)() - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() - done := make(chan error, 3) - var wg sync.WaitGroup - for i := 0; i < cap(done); i++ { - wg.Add(1) - i := i - go func() { - defer wg.Done() - if err := ctx.Err(); err != nil { - done <- err - return - } - _, err := testDb.ExecContext(ctx, "DECLARE v_deadline DATE := SYSDATE + 3/24/3600; v_db PLS_INTEGER; BEGIN LOOP SELECT COUNT(0) INTO v_db FROM cat; EXIT WHEN SYSDATE >= v_deadline; END LOOP; END;") - if err == nil { - done <- errors.Errorf("%d. wanted timeout got %v", i, err) - } - t.Logf("%d. %v", i, err) - }() - } - wg.Wait() - close(done) - if err := <-done; err != nil { - t.Fatal(err) - } - -} - -func TestNumberNull(t *testing.T) { - t.Parallel() - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) - defer cancel() - testDb.Exec("DROP TABLE number_test") - qry := `CREATE TABLE number_test ( - caseNum NUMBER(3), - precisionNum NUMBER(5), - precScaleNum NUMBER(5, 0), - normalNum NUMBER - )` - if _, err := testDb.ExecContext(ctx, qry); err != nil { - t.Fatal(errors.Wrap(err, qry)) - } - defer testDb.Exec("DROP TABLE number_test") - - qry = ` - INSERT ALL - INTO number_test (caseNum, precisionNum, precScaleNum, normalNum) VALUES (1, 4, 65, 123) - INTO number_test (caseNum, precisionNum, precScaleNum, normalNum) VALUES (2, NULL, NULL, NULL) - INTO number_test (caseNum, precisionNum, precScaleNum, normalNum) VALUES (3, NULL, NULL, NULL) - INTO number_test (caseNum, precisionNum, precScaleNum, normalNum) VALUES (4, NULL, 42, NULL) - INTO number_test (caseNum, precisionNum, precScaleNum, normalNum) VALUES (5, NULL, NULL, 31) - INTO number_test (caseNum, precisionNum, precScaleNum, normalNum) VALUES (6, 3, 3, 4) - INTO number_test (caseNum, precisionNum, precScaleNum, normalNum) VALUES (7, NULL, NULL, NULL) - INTO number_test (caseNum, precisionNum, precScaleNum, normalNum) VALUES (8, 6, 9, 7) - SELECT 1 FROM DUAL` - if _, err := testDb.ExecContext(ctx, qry); err != nil { - t.Fatal(errors.Wrap(err, qry)) - } - qry = "SELECT precisionNum, precScaleNum, normalNum FROM number_test ORDER BY caseNum" - rows, err := testDb.Query(qry) - if err != nil { - t.Fatal(errors.Wrap(err, qry)) - } - defer rows.Close() - - for rows.Next() { - var precisionNum, recScaleNum, normalNum sql.NullInt64 - if err = rows.Scan(&precisionNum, &recScaleNum, &normalNum); err != nil { - t.Fatal(err) - } - - t.Log(precisionNum, recScaleNum, normalNum) - - if precisionNum.Int64 == 0 && precisionNum.Valid { - t.Errorf("precisionNum=%v, wanted {0 false}", precisionNum) - } - if recScaleNum.Int64 == 0 && recScaleNum.Valid { - t.Errorf("recScaleNum=%v, wanted {0 false}", recScaleNum) - } - } - - rows, err = testDb.Query(qry) - if err != nil { - t.Fatal(errors.Wrap(err, qry)) - } - defer rows.Close() - - for rows.Next() { - var precisionNumStr, recScaleNumStr, normalNumStr sql.NullString - if err = rows.Scan(&precisionNumStr, &recScaleNumStr, &normalNumStr); err != nil { - t.Fatal(err) - } - t.Log(precisionNumStr, recScaleNumStr, normalNumStr) - } -} - -func TestNullFloat(t *testing.T) { - t.Parallel() - testDb.Exec("DROP TABLE test_char") - if _, err := testDb.Exec(`CREATE TABLE test_char ( - CHARS VARCHAR2(10 BYTE), - FLOATS NUMBER(10, 2) - )`); err != nil { - t.Fatal(err) - } - defer testDb.Exec("DROP TABLE test_char") - - tx, err := testDb.Begin() - if err != nil { - t.Fatal(err) - } - defer tx.Rollback() - - _, err = tx.Exec( - "INSERT INTO test_char VALUES(:CHARS, :FLOATS)", - []string{"dog", "", "cat"}, - /*[]sql.NullString{sql.NullString{"dog", true}, - sql.NullString{"", false}, - sql.NullString{"cat", true}},*/ - []sql.NullFloat64{ - {Float64: 3.14, Valid: true}, - {Float64: 12.36, Valid: true}, - {Float64: 0.0, Valid: false}, - }, - ) - if err != nil { - t.Error(err) - } -} - -func TestColumnSize(t *testing.T) { - t.Parallel() - testDb.Exec("DROP TABLE test_column_size") - if _, err := testDb.Exec(`CREATE TABLE test_column_size ( - vc20b VARCHAR2(20 BYTE), - vc1b VARCHAR2(1 BYTE), - nvc20 NVARCHAR2(20), - nvc1 NVARCHAR2(1), - vc20c VARCHAR2(20 CHAR), - vc1c VARCHAR2(1 CHAR) - )`); err != nil { - t.Fatal(err) - } - defer testDb.Exec("DROP TABLE test_column_size") - - r, err := testDb.Query("SELECT * FROM test_column_size") - if err != nil { - t.Fatal(err) - } - rts, err := r.ColumnTypes() - if err != nil { - t.Fatal(err) - } - for _, col := range rts { - l, _ := col.Length() - - t.Logf("Column %q has length %v", col.Name(), l) - } -} - -func TestReturning(t *testing.T) { - t.Parallel() - defer tl.enableLogging(t)() - testDb.Exec("DROP TABLE test_returning") - if _, err := testDb.Exec("CREATE TABLE test_returning (a VARCHAR2(20))"); err != nil { - t.Fatal(err) - } - defer testDb.Exec("DROP TABLE test_returning") - - want := "abraca dabra" - var got string - if _, err := testDb.Exec( - `INSERT INTO test_returning (a) VALUES (UPPER(:1)) RETURNING a INTO :2`, - want, sql.Out{Dest: &got}, - ); err != nil { - t.Fatal(err) - } - want = strings.ToUpper(want) - if want != got { - t.Errorf("got %q, wanted %q", got, want) - } - - if _, err := testDb.Exec( - `UPDATE test_returning SET a = '1' WHERE 1=0 RETURNING a /*LASTINSERTID*/ INTO :1`, - sql.Out{Dest: &got}, - ); err != nil { - t.Fatal(err) - } - t.Logf("RETURNING (zero set): %v", got) -} - -func TestMaxOpenCursors(t *testing.T) { - var openCursors sql.NullInt64 - const qry1 = "SELECT p.value FROM v$parameter p WHERE p.name = 'open_cursors'" - if err := testDb.QueryRow(qry1).Scan(&openCursors); err != nil { - t.Log(errors.Wrap(err, qry1)) - } else { - t.Logf("open_cursors=%v", openCursors) - } - n := int(openCursors.Int64) - if n <= 0 { - n = 1000 - } - n *= 2 - for i := 0; i < n; i++ { - var cnt int64 - const qry2 = "DECLARE cnt PLS_INTEGER; BEGIN SELECT COUNT(0) INTO cnt FROM DUAL; :1 := cnt; END;" - if _, err := testDb.Exec(qry2, sql.Out{Dest: &cnt}); err != nil { - t.Fatal(errors.Wrapf(err, "%d. %s", i, qry2)) - } - } -} - -func TestRO(t *testing.T) { - t.Parallel() - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - tx, err := testDb.BeginTx(ctx, &sql.TxOptions{Isolation: sql.LevelSerializable, ReadOnly: true}) - if err != nil { - t.Fatal(err) - } - defer tx.Rollback() - if _, err = tx.QueryContext(ctx, "SELECT 1 FROM DUAL"); err != nil { - t.Fatal(err) - } - if _, err = tx.ExecContext(ctx, "CREATE TABLE test_table (i INTEGER)"); err == nil { - t.Log("RO allows CREATE TABLE ?") - } - if err = tx.Commit(); err != nil { - t.Fatal(err) - } -} - -func TestNullIntoNum(t *testing.T) { - t.Parallel() - testDb.Exec("DROP TABLE test_null_num") - qry := "CREATE TABLE test_null_num (i NUMBER(3))" - if _, err := testDb.Exec(qry); err != nil { - t.Fatal(errors.Wrap(err, qry)) - } - defer testDb.Exec("DROP TABLE test_null_num") - - qry = "INSERT INTO test_null_num (i) VALUES (:1)" - var i *int - if _, err := testDb.Exec(qry, i); err != nil { - t.Fatal(errors.Wrap(err, qry)) - } -} - -func TestPing(t *testing.T) { - t.Parallel() - badDB, err := sql.Open("goracle", "bad/passw@1.1.1.1") - if err != nil { - t.Fatal(err) - } - - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() - - dl, _ := ctx.Deadline() - err = badDB.PingContext(ctx) - ok := dl.After(time.Now()) - if err != nil { - t.Log(err) - } else { - t.Log("ping succeeded") - if !ok { - t.Error("ping succeeded after deadline!") - } - } -} - -func TestNoConnectionPooling(t *testing.T) { - t.Parallel() - db, err := sql.Open("goracle", - strings.Replace( - strings.Replace(testConStr, "POOLED", goracle.NoConnectionPoolingConnectionClass, 1), - "standaloneConnection=0", "standaloneConnection=1", 1, - ), - ) - if err != nil { - t.Fatal(err) - } - db.Close() -} - -func TestExecTimeout(t *testing.T) { - t.Parallel() - defer tl.enableLogging(t)() - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - defer cancel() - if _, err := testDb.ExecContext(ctx, "SELECT COUNT(DISTINCT ORA_HASH(A.table_name)) from cat, cat, cat A"); err != nil { - t.Log(err) - } -} - -func TestQueryTimeout(t *testing.T) { - t.Parallel() - defer tl.enableLogging(t)() - ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) - defer cancel() - if _, err := testDb.QueryContext(ctx, "SELECT COUNT(0) FROM all_objects, all_objects"); err != nil { - t.Log(err) - } -} - -func TestSDO(t *testing.T) { - //t.Parallel() - ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - innerQry := `SELECT MDSYS.SDO_GEOMETRY( - 3001, - NULL, - NULL, - MDSYS.SDO_ELEM_INFO_ARRAY( - 1,1,1,4,1,0,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL - ), - MDSYS.SDO_ORDINATE_ARRAY( - 480736.567,10853969.692,0,0.998807402795312,-0.0488238888381834,0,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL) - ) SHAPE FROM DUAL` - selectQry := `SELECT shape, DUMP(shape), CASE WHEN shape IS NULL THEN 'I' ELSE 'N' END FROM (` + innerQry + ")" - rows, err := testDb.QueryContext(ctx, selectQry) - if err != nil { - if !strings.Contains(err.Error(), `ORA-00904: "MDSYS"."SDO_GEOMETRY"`) { - t.Fatal(errors.Wrap(err, selectQry)) - } - for _, qry := range []string{ - `CREATE TYPE test_sdo_point_type AS OBJECT ( - X NUMBER, - Y NUMBER, - Z NUMBER)`, - "CREATE TYPE test_sdo_elem_info_array AS VARRAY (1048576) of NUMBER", - "CREATE TYPE test_sdo_ordinate_array AS VARRAY (1048576) of NUMBER", - `CREATE TYPE test_sdo_geometry AS OBJECT ( - SDO_GTYPE NUMBER, - SDO_SRID NUMBER, - SDO_POINT test_SDO_POINT_TYPE, - SDO_ELEM_INFO test_SDO_ELEM_INFO_ARRAY, - SDO_ORDINATES test_SDO_ORDINATE_ARRAY)`, - - `CREATE TABLE test_sdo( - id INTEGER not null, - shape test_SDO_GEOMETRY not null - )`, - } { - var drop string - if strings.HasPrefix(qry, "CREATE TYPE") { - drop = "DROP TYPE " + qry[12:strings.Index(qry, " AS")] + " FORCE" - } else { - drop = "DROP TABLE " + qry[13:strings.Index(qry, "(")] - } - testDb.ExecContext(ctx, drop) - t.Log(drop) - if _, err := testDb.ExecContext(ctx, qry); err != nil { - err = errors.Wrap(err, qry) - t.Log(err) - if !strings.Contains(err.Error(), "ORA-01031:") { - t.Fatal(err) - } - t.Skip(err) - } - defer testDb.ExecContext(ctx, drop) - } - - selectQry = strings.Replace(selectQry, "MDSYS.SDO_", "test_SDO_", -1) - if rows, err = testDb.QueryContext(ctx, selectQry); err != nil { - t.Fatal(errors.Wrap(err, selectQry)) - } - - } - defer rows.Close() - if false { - goracle.Log = func(kv ...interface{}) error { - t.Helper() - t.Log(kv) - return nil - } - } - for rows.Next() { - var dmp, isNull string - var intf interface{} - if err = rows.Scan(&intf, &dmp, &isNull); err != nil { - t.Error(errors.Wrap(err, "scan")) - } - t.Log(dmp, isNull) - obj := intf.(*goracle.Object) - //t.Log("obj:", obj) - printObj(t, "", obj) - } - if err = rows.Err(); err != nil { - t.Fatal(err) - } -} - -func printObj(t *testing.T, name string, obj *goracle.Object) { - if obj == nil { - return - } - for key := range obj.Attributes { - sub, err := obj.Get(key) - t.Logf("%s.%s. %+v (err=%+v)\n", name, key, sub, err) - if err != nil { - t.Errorf("ERROR: %+v", err) - } - if ss, ok := sub.(*goracle.Object); ok { - printObj(t, name+"."+key, ss) - } else if coll, ok := sub.(*goracle.ObjectCollection); ok { - slice, err := coll.AsSlice(nil) - t.Logf("%s.%s. %+v", name, key, slice) - if err != nil { - t.Fatal(err) - } - } - } -} - -var _ = driver.Valuer((*Custom)(nil)) - -type Custom struct { - Num int64 -} - -func (t *Custom) Value() (driver.Value, error) { - return t.Num, nil -} - -func TestSelectCustomType(t *testing.T) { - t.Parallel() - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - conn, err := testDb.Conn(ctx) - if err != nil { - t.Fatal(err) - } - defer conn.Close() - tbl := "test_custom_type" + tblSuffix - conn.ExecContext(ctx, "DROP TABLE "+tbl) - qry := "CREATE TABLE " + tbl + " (nm VARCHAR2(30), typ VARCHAR2(30), id NUMBER(6), created DATE)" - if _, err = conn.ExecContext(ctx, qry); err != nil { - t.Fatal(errors.Wrap(err, qry)) - } - defer testDb.Exec("DROP TABLE " + tbl) - - n := 1000 - nms, typs, ids, createds := make([]string, n), make([]string, n), make([]int, n), make([]time.Time, n) - now := time.Now() - for i := range nms { - nms[i], typs[i], ids[i], createds[i] = fmt.Sprintf("obj-%d", i), "OBJECT", i, now.Add(-time.Duration(i)*time.Second) - } - qry = "INSERT INTO " + tbl + " (nm, typ, id, created) VALUES (:1, :2, :3, :4)" - if _, err = conn.ExecContext(ctx, qry, nms, typs, ids, createds); err != nil { - t.Fatal(errors.Wrap(err, qry)) - } - - const num = 10 - nums := &Custom{Num: num} - type underlying int64 - numbers := underlying(num) - rows, err := conn.QueryContext(ctx, - "SELECT nm, typ, id, created FROM "+tbl+" WHERE ROWNUM < COALESCE(:alpha, :beta, 2) ORDER BY id", - sql.Named("alpha", nums), - goracle.MagicTypeConversion(), sql.Named("beta", numbers), - ) - if err != nil { - t.Fatalf("%+v", err) - } - n = 0 - oldOid := int64(0) - for rows.Next() { - var tbl, typ string - var oid int64 - var created time.Time - if err := rows.Scan(&tbl, &typ, &oid, &created); err != nil { - t.Fatal(err) - } - t.Log(tbl, typ, oid, created) - if tbl == "" { - t.Fatal("empty tbl") - } - n++ - if oldOid > oid { - t.Errorf("got oid=%d, wanted sth < %d.", oid, oldOid) - } - oldOid = oid - } - if n == 0 || n > num { - t.Errorf("got %d rows, wanted %d", n, num) - } -} - -func TestExecInt64(t *testing.T) { - t.Parallel() - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - qry := `CREATE OR REPLACE PROCEDURE test_i64_out(p_int NUMBER, p_out1 OUT NUMBER, p_out2 OUT NUMBER) IS - BEGIN p_out1 := p_int; p_out2 := p_int; END;` - if _, err := testDb.ExecContext(ctx, qry); err != nil { - t.Fatal(err) - } - defer testDb.ExecContext(ctx, "DROP PROCEDURE test_i64_out") - - qry = "BEGIN test_i64_out(:1, :2, :3); END;" - var num sql.NullInt64 - var str string - defer tl.enableLogging(t)() - if _, err := testDb.ExecContext(ctx, qry, 3.14, sql.Out{Dest: &num}, sql.Out{Dest: &str}); err != nil { - t.Fatal(err) - } - t.Log("num:", num, "str:", str) -} - -func TestImplicitResults(t *testing.T) { - t.Parallel() - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - const qry = `declare - c0 sys_refcursor; - c1 sys_refcursor; - c2 sys_refcursor; - begin - :1 := c0; - open c1 for - select 1 from DUAL; - dbms_sql.return_result(c1); - open c2 for - select 'A' from DUAL; - dbms_sql.return_result(c2); - end;` - var rows driver.Rows - if _, err := testDb.ExecContext(ctx, qry, sql.Out{Dest: &rows}); err != nil { - if strings.Contains(err.Error(), "PLS-00302:") { - t.Skip() - } - t.Fatal(errors.Wrap(err, qry)) - } - r := rows.(driver.RowsNextResultSet) - for r.HasNextResultSet() { - if err := r.NextResultSet(); err != nil { - t.Error(err) - } - } -} - -func TestStartupShutdown(t *testing.T) { - if os.Getenv("GORACLE_DB_SHUTDOWN") != "1" { - t.Skip("GORACLE_DB_SHUTDOWN != 1, skipping shutdown/startup test") - } - p, err := goracle.ParseConnString(testConStr) - if err != nil { - t.Fatal(errors.Wrap(err, testConStr)) - } - if !(p.IsSysDBA || p.IsSysOper) { - p.IsSysDBA = true - } - if !p.IsPrelim { - p.IsPrelim = true - } - db, err := sql.Open("goracle", p.StringWithPassword()) - if err != nil { - t.Fatal(err, p.StringWithPassword()) - } - defer db.Close() - conn, err := goracle.DriverConn(db) - if err != nil { - t.Fatal(err) - } - if err = conn.Shutdown(goracle.ShutdownTransactionalLocal); err != nil { - t.Error(err) - } - if err = conn.Shutdown(goracle.ShutdownFinal); err != nil { - t.Error(err) - } - if err = conn.Startup(goracle.StartupDefault); err != nil { - t.Error(err) - } -} - -func TestIssue134(t *testing.T) { - const crea = `CREATE OR REPLACE TYPE test_PRJ_TASK_OBJ_TYPE AS OBJECT ( - PROJECT_NUMBER VARCHAR2(100) - ,SOURCE_ID VARCHAR2(100) - ,TASK_NAME VARCHAR2(300) - ,TASK_DESCRIPTION VARCHAR2(2000) - ,TASK_START_DATE DATE - ,TASK_END_DATE DATE - ,TASK_COST NUMBER - ,SOURCE_PARENT_ID NUMBER - ,TASK_TYPE VARCHAR2(100) - ,QUANTITY NUMBER ); -CREATE OR REPLACE TYPE test_PRJ_TASK_TAB_TYPE IS TABLE OF test_PRJ_TASK_OBJ_TYPE; -CREATE OR REPLACE PROCEDURE test_CREATE_TASK_ACTIVITY (p_create_task_i IN PRJ_TASK_TAB_TYPE, - p_create_activity_i IN PRJ_ACTIVITY_TAB_TYPE, - p_project_id_i IN NUMBER) IS BEGIN NULL; END;` - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - for _, qry := range strings.Split(crea, ";\n") { - if strings.HasSuffix(qry, " END") { - qry += ";" - } - if _, err := testDb.ExecContext(ctx, qry); err != nil { - t.Fatal(errors.Wrap(err, qry)) - } - } - defer func() { - for _, qry := range []string{ - `DROP TYPE test_prj_task_tab_type`, - `DROP TYPE test_prj_task_obj_type`, - `DROP PROCEDURE test_create_task_activity`, - } { - testDb.Exec(qry) - } - }() - - var o1, o2 goracle.Object - qry := "BEGIN :1 := test_prj_task_tab_type(); END;" - if _, err := testDb.ExecContext(ctx, qry, sql.Out{Dest: &o1}); err != nil { - t.Fatal(errors.Wrap(err, qry)) - } - if _, err := testDb.ExecContext(ctx, qry, sql.Out{Dest: &o2}); err != nil { - t.Fatal(errors.Wrap(err, qry)) - } - qry = "BEGIN test_create_task_activity(:1, :2, :3); END;" - if _, err := testDb.ExecContext(ctx, qry, o1, o2, 1); err != nil { - t.Error(err) - } -} - -func TestTsTZ(t *testing.T) { - t.Parallel() - qry := "SELECT FROM_TZ(TO_TIMESTAMP('2019-05-01 09:39:12', 'YYYY-MM-DD HH24:MI:SS'), '{{.TZ}}') FROM DUAL" - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - defer tl.enableLogging(t)() - var ts time.Time - { - qry := strings.Replace(qry, "{{.TZ}}", "01:00", 1) - if err := testDb.QueryRowContext(ctx, qry).Scan(&ts); err != nil { - t.Fatal(errors.Wrap(err, qry)) - } - } - qry = strings.Replace(qry, "{{.TZ}}", "Europe/Berlin", 1) - err := testDb.QueryRowContext(ctx, qry).Scan(&ts) - if err != nil { - t.Log(errors.Wrap(err, qry)) - } - t.Log(ts) - if !ts.IsZero() { - return - } - - qry = "SELECT filename, version FROM v$timezone_file" - rows, err := testDb.QueryContext(ctx, qry) - if err != nil { - t.Log(qry, err) - return - } - defer rows.Close() - for rows.Next() { - var fn, ver string - if err := rows.Scan(&fn, &ver); err != nil { - t.Log(qry, err) - continue - } - t.Log(fn, ver) - } - t.Skip("wanted non-zero time") -} - -func TestGetDBTimeZone(t *testing.T) { - t.Parallel() - defer tl.enableLogging(t)() - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - qry := "SELECT SESSIONTIMEZONE FROM DUAL" - var tz string - if err := testDb.QueryRowContext(ctx, qry).Scan(&tz); err != nil { - t.Fatal(errors.Wrap(err, qry)) - } - t.Log("timezone:", tz) - - for _, timS := range []string{"2006-07-08", "2006-01-02"} { - localTime, err := time.ParseInLocation("2006-01-02", timS, time.Local) - if err != nil { - t.Fatal(err) - } - qry = "SELECT TO_DATE('" + timS + " 00:00:00', 'YYYY-MM-DD HH24:MI:SS') FROM DUAL" - var dbTime time.Time - t.Log("local:", localTime.Format(time.RFC3339)) - if err := testDb.QueryRowContext(ctx, qry).Scan(&dbTime); err != nil { - t.Fatal(errors.Wrap(err, qry)) - } - t.Log("db:", dbTime.Format(time.RFC3339)) - if !dbTime.Equal(localTime) { - t.Errorf("db says %s, local is %s", dbTime.Format(time.RFC3339), localTime.Format(time.RFC3339)) - } - } -} diff --git a/vendor/github.com/elastic/beats/vendor/vendor.json b/vendor/github.com/elastic/beats/vendor/vendor.json index f26d1bc1..75ebb6dd 100644 --- a/vendor/github.com/elastic/beats/vendor/vendor.json +++ b/vendor/github.com/elastic/beats/vendor/vendor.json @@ -1,6 +1,6 @@ { "comment": "", - "ignore": "test", + "ignore": "test tools", "package": [ { "checksumSHA1": "vBY5JVo2RftJoZExAKoCCTOr5fU=", @@ -72,6 +72,90 @@ "version": "v0.40.0", "versionExact": "v0.40.0" }, + { + "checksumSHA1": "6uv2eI5FyjWtTg5AsZ++csclSqc=", + "path": "contrib.go.opencensus.io/exporter/ocagent", + "revision": "8110e6c0236bb231b19119275a6be6ec666d05c8", + "revisionTime": "2019-07-18T21:44:46Z" + }, + { + "checksumSHA1": "ngNSB6sT1BYbWf0U6wuld3pIRXM=", + "path": "github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2019-06-01/insights", + "revision": "7a9d2769e4a581b0b1bc609c71b59af043e05c98", + "revisionTime": "2019-07-30T02:56:09Z" + }, + { + "checksumSHA1": "OCsjojjUiG5pLjpeCxHpekcq7ZA=", + "path": "github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2019-03-01/resources", + "revision": "a629ae7873bf2a86184a8bc1b65e65e1ab532f57", + "revisionTime": "2019-08-05T17:15:50Z" + }, + { + "checksumSHA1": "jgptrp19Ok9B8BS04S8Xb3Y8Yl8=", + "path": "github.com/Azure/azure-sdk-for-go/version", + "revision": "7a9d2769e4a581b0b1bc609c71b59af043e05c98", + "revisionTime": "2019-07-30T02:56:09Z" + }, + { + "checksumSHA1": "dl0zIyJAk/XSTO7F7g8AKh0quCo=", + "path": "github.com/Azure/go-autorest/autorest", + "revision": "ba1147dc57f993013ef255c128ca1cac8a557409", + "revisionTime": "2019-08-01T15:21:39Z" + }, + { + "checksumSHA1": "TER1CDhTFs8pgHpD5DPhOnjZl4k=", + "path": "github.com/Azure/go-autorest/autorest/adal", + "revision": "ba1147dc57f993013ef255c128ca1cac8a557409", + "revisionTime": "2019-08-01T15:21:39Z" + }, + { + "checksumSHA1": "eO3bbhMr/5NQj8Khg/H8Znj2x6M=", + "path": "github.com/Azure/go-autorest/autorest/azure", + "revision": "ba1147dc57f993013ef255c128ca1cac8a557409", + "revisionTime": "2019-08-01T15:21:39Z" + }, + { + "checksumSHA1": "slBKa+9nKpCmvH+KVR4HeLtd1ZQ=", + "path": "github.com/Azure/go-autorest/autorest/azure/auth", + "revision": "ba1147dc57f993013ef255c128ca1cac8a557409", + "revisionTime": "2019-08-01T15:21:39Z" + }, + { + "checksumSHA1": "DJ+ZcIb70zbTbwMt75jiry3tBjg=", + "path": "github.com/Azure/go-autorest/autorest/azure/cli", + "revision": "ba1147dc57f993013ef255c128ca1cac8a557409", + "revisionTime": "2019-08-01T15:21:39Z" + }, + { + "checksumSHA1": "XQQTDRuYVRMx36D2aa7rIt2vw/8=", + "path": "github.com/Azure/go-autorest/autorest/date", + "revision": "ba1147dc57f993013ef255c128ca1cac8a557409", + "revisionTime": "2019-08-01T15:21:39Z" + }, + { + "checksumSHA1": "GqYnBFF0kjNFd5+4MqxQ3lh7h9I=", + "path": "github.com/Azure/go-autorest/autorest/to", + "revision": "ba1147dc57f993013ef255c128ca1cac8a557409", + "revisionTime": "2019-08-01T15:21:39Z" + }, + { + "checksumSHA1": "4hhLEX9p5Tn9HdZjnOuKVm0Fk8c=", + "path": "github.com/Azure/go-autorest/autorest/validation", + "revision": "ba1147dc57f993013ef255c128ca1cac8a557409", + "revisionTime": "2019-08-01T15:21:39Z" + }, + { + "checksumSHA1": "j8fDvzr+qnh0xk9i4M9uEZa2gF0=", + "path": "github.com/Azure/go-autorest/logger", + "revision": "ba1147dc57f993013ef255c128ca1cac8a557409", + "revisionTime": "2019-08-01T15:21:39Z" + }, + { + "checksumSHA1": "1VPe9WuJ2jDyJhRYPQNkEV7any8=", + "path": "github.com/Azure/go-autorest/tracing", + "revision": "ba1147dc57f993013ef255c128ca1cac8a557409", + "revisionTime": "2019-08-01T15:21:39Z" + }, { "checksumSHA1": "eruIVA8JnsB23rVKjETHvqJ0sj8=", "path": "github.com/DataDog/zstd", @@ -79,10 +163,18 @@ "revisionTime": "2016-07-06T22:07:25Z" }, { - "checksumSHA1": "AzjRkOQtVBTwIw4RJLTygFhJs3s=", + "checksumSHA1": "9vIkKNaaSRELM/2nCkWOAdHD21M=", "path": "github.com/Microsoft/go-winio", - "revision": "f533f7a102197536779ea3a8cb881d639e21ec5a", - "revisionTime": "2017-05-24T00:36:31Z" + "revision": "6c72808b55902eae4c5943626030429ff20f3b63", + "revisionTime": "2019-08-06T19:59:04Z", + "version": "v0.4.14", + "versionExact": "v0.4.14" + }, + { + "checksumSHA1": "7GaOyxvD4mLFr39pWg40CGZ0yiM=", + "path": "github.com/Microsoft/hcsshim/osversion", + "revision": "84b0c364e1e3bb91e43b85bf20d72e7948666817", + "revisionTime": "2019-09-04T14:17:38Z" }, { "checksumSHA1": "rDoYEddGYvQT73l9V8Uqjk7SHAY=", @@ -91,13 +183,14 @@ "revisionTime": "2018-07-23T16:30:02Z" }, { - "checksumSHA1": "w5NkRQ2Cebz7a6UfrbwFdhyRWnM=", + "checksumSHA1": "qrdmO04N/Fb0BvrohZecpjMbcU0=", + "origin": "github.com/elastic/sarama", "path": "github.com/Shopify/sarama", - "revision": "46c83074a05474240f9620fb7c70fb0d80ca401a", - "revisionTime": "2019-07-22T18:01:16Z", + "revision": "71dcfe72351b8daf910276a46540ca0b7bbe0a2b", + "revisionTime": "2019-11-24T15:23:03Z", "tree": true, - "version": "v1.23.1", - "versionExact": "v1.23.1" + "version": "v1.23.1-elastic", + "versionExact": "v1.23.1-elastic" }, { "checksumSHA1": "Te1xRugxHQMAg7EvbIUuPWm8fvU=", @@ -472,6 +565,15 @@ "version": "v0.9.0", "versionExact": "v0.9.0" }, + { + "checksumSHA1": "FfvZuO9dwWGKCPy8Y2a3gvvORlk=", + "path": "github.com/aws/aws-sdk-go-v2/service/iam", + "revision": "098e15df3044cf1b04a222c1c33c3e6135ac89f3", + "revisionTime": "2019-05-28T21:51:27Z", + "tree": true, + "version": "v0.9.0", + "versionExact": "v0.9.0" + }, { "checksumSHA1": "eMHyTG27+3dZ56gst6ofgI4axsQ=", "path": "github.com/aws/aws-sdk-go-v2/service/rds", @@ -574,18 +676,162 @@ "revision": "c5f655c41370a0bec733fa251fd2ecd4e3f0dfe3", "revisionTime": "2018-10-29T15:41:58Z" }, + { + "checksumSHA1": "0rido7hYHQtfq3UJzVT5LClLAWc=", + "path": "github.com/beorn7/perks/quantile", + "revision": "37c8de3658fcb183f997c4e13e8337516ab753e6", + "revisionTime": "2019-07-31T12:00:54Z" + }, { "checksumSHA1": "al43W4TWRdznwfxOWp4egKcL39c=", "path": "github.com/bsm/sarama-cluster", "revision": "7e67d87a6b3f83fe08c096fd084691bd9dca112f", "revisionTime": "2018-06-25T08:32:03Z" }, + { + "checksumSHA1": "tQ+EywYQyMDanePvvgMc6hCd1so=", + "path": "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1", + "revision": "26aa36c099c2041b432cf3cc8a26c5fb858d218b", + "revisionTime": "2019-07-23T11:24:16Z" + }, + { + "checksumSHA1": "n57Q+kdtPreeyZJGe+bQydJpDG4=", + "path": "github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1", + "revision": "26aa36c099c2041b432cf3cc8a26c5fb858d218b", + "revisionTime": "2019-07-23T11:24:16Z" + }, + { + "checksumSHA1": "TfdFyXCScozfB3bdJI/uV+7tsk0=", + "path": "github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1", + "revision": "26aa36c099c2041b432cf3cc8a26c5fb858d218b", + "revisionTime": "2019-07-23T11:24:16Z" + }, + { + "checksumSHA1": "6LseK85HR0yOVObsKuyvMm7uQ88=", + "path": "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1", + "revision": "26aa36c099c2041b432cf3cc8a26c5fb858d218b", + "revisionTime": "2019-07-23T11:24:16Z" + }, + { + "checksumSHA1": "TGl2i1EafPJBn2zdyrCT7BXZLUU=", + "path": "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1", + "revision": "26aa36c099c2041b432cf3cc8a26c5fb858d218b", + "revisionTime": "2019-07-23T11:24:16Z" + }, + { + "checksumSHA1": "9FylOJ7UX9MLEYhB0+m774aIyDc=", + "path": "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1", + "revision": "26aa36c099c2041b432cf3cc8a26c5fb858d218b", + "revisionTime": "2019-07-23T11:24:16Z" + }, + { + "checksumSHA1": "Mu77HNTb0SVN0IEBB+hSDeZzO+g=", + "path": "github.com/cloudflare/cfssl/api", + "revision": "b1ec8c586c2aa3ec3eaf4a622933f169cfa5648b", + "revisionTime": "2019-08-08T01:16:37Z" + }, + { + "checksumSHA1": "MOxpP9bGNRoPvAN04L8Fuw71U6w=", + "path": "github.com/cloudflare/cfssl/auth", + "revision": "b1ec8c586c2aa3ec3eaf4a622933f169cfa5648b", + "revisionTime": "2019-08-08T01:16:37Z" + }, + { + "checksumSHA1": "duxfb/cka4fBVgW0wSchulpGVcE=", + "path": "github.com/cloudflare/cfssl/certdb", + "revision": "b1ec8c586c2aa3ec3eaf4a622933f169cfa5648b", + "revisionTime": "2019-08-08T01:16:37Z" + }, + { + "checksumSHA1": "rncI4JgGxlYUXI2IsAXdHNYR2MM=", + "path": "github.com/cloudflare/cfssl/config", + "revision": "b1ec8c586c2aa3ec3eaf4a622933f169cfa5648b", + "revisionTime": "2019-08-08T01:16:37Z" + }, + { + "checksumSHA1": "mdeqE00nlroD2prYypxQEc8NcCk=", + "path": "github.com/cloudflare/cfssl/crypto/pkcs7", + "revision": "b1ec8c586c2aa3ec3eaf4a622933f169cfa5648b", + "revisionTime": "2019-08-08T01:16:37Z" + }, + { + "checksumSHA1": "7Fy2HkW2/IPQ0GDR9uCmzTcoJtE=", + "path": "github.com/cloudflare/cfssl/csr", + "revision": "b1ec8c586c2aa3ec3eaf4a622933f169cfa5648b", + "revisionTime": "2019-08-08T01:16:37Z" + }, + { + "checksumSHA1": "q9cRr+vy+YJrTJpJ05yK7q6uWmM=", + "path": "github.com/cloudflare/cfssl/errors", + "revision": "b1ec8c586c2aa3ec3eaf4a622933f169cfa5648b", + "revisionTime": "2019-08-08T01:16:37Z" + }, + { + "checksumSHA1": "b42fYCufgl+ZLhWKw14kvn7qSWk=", + "path": "github.com/cloudflare/cfssl/helpers", + "revision": "b1ec8c586c2aa3ec3eaf4a622933f169cfa5648b", + "revisionTime": "2019-08-08T01:16:37Z" + }, + { + "checksumSHA1": "CteaFacX5K37VpIGYBQQfBWfvIw=", + "path": "github.com/cloudflare/cfssl/helpers/derhelpers", + "revision": "b1ec8c586c2aa3ec3eaf4a622933f169cfa5648b", + "revisionTime": "2019-08-08T01:16:37Z" + }, + { + "checksumSHA1": "nbzsp45GyLeosQUqpNT9fWc1q9U=", + "path": "github.com/cloudflare/cfssl/info", + "revision": "b1ec8c586c2aa3ec3eaf4a622933f169cfa5648b", + "revisionTime": "2019-08-08T01:16:37Z" + }, + { + "checksumSHA1": "F8F81IpL9PJqqwajher7nqYgFNI=", + "path": "github.com/cloudflare/cfssl/initca", + "revision": "b1ec8c586c2aa3ec3eaf4a622933f169cfa5648b", + "revisionTime": "2019-08-08T01:16:37Z" + }, + { + "checksumSHA1": "BMy7zAiSQTYYWn1ZzxeEC75pd2g=", + "path": "github.com/cloudflare/cfssl/log", + "revision": "b1ec8c586c2aa3ec3eaf4a622933f169cfa5648b", + "revisionTime": "2019-08-08T01:16:37Z" + }, + { + "checksumSHA1": "OKh9Jo5QeZhPOWkoseDc0J8U1LM=", + "path": "github.com/cloudflare/cfssl/ocsp/config", + "revision": "b1ec8c586c2aa3ec3eaf4a622933f169cfa5648b", + "revisionTime": "2019-08-08T01:16:37Z" + }, + { + "checksumSHA1": "Xlb7RtZuTYuX6sSz15+da9Cpjmw=", + "path": "github.com/cloudflare/cfssl/signer", + "revision": "b1ec8c586c2aa3ec3eaf4a622933f169cfa5648b", + "revisionTime": "2019-08-08T01:16:37Z" + }, + { + "checksumSHA1": "akb3bTltrdTrq4NsrwKrCFopanY=", + "path": "github.com/cloudflare/cfssl/signer/local", + "revision": "b1ec8c586c2aa3ec3eaf4a622933f169cfa5648b", + "revisionTime": "2019-08-08T01:16:37Z" + }, + { + "checksumSHA1": "6aGvzibOCGGTbCFKOHRWIASC+Us=", + "path": "github.com/containerd/continuity/pathdriver", + "revision": "75bee3e2ccb6402e3a986ab8bd3b17003fc0fdec", + "revisionTime": "2019-08-27T14:05:05Z" + }, { "checksumSHA1": "qFaKrhSla38BRAyaGz2UaZvH/Dk=", "path": "github.com/coreos/bbolt", "revision": "af9db2027c98c61ecd8e17caa5bd265792b9b9a2", "revisionTime": "2018-03-18T00:15:26Z" }, + { + "checksumSHA1": "mUTSYH2UFn+/ZUcTIcGDQV1uAU0=", + "path": "github.com/coreos/etcd/raft/raftpb", + "revision": "4d210173ae0d59d4d746735fdd26839513aadaf1", + "revisionTime": "2019-08-29T19:00:40Z" + }, { "checksumSHA1": "cEszpxh1szqTb440ze4hm/Vfm40=", "path": "github.com/coreos/go-systemd/sdjournal", @@ -626,6 +872,18 @@ "revision": "4e0d7dc8888fbb59764060e99b7b68e77a6f9698", "revisionTime": "2018-10-14T14:49:52Z" }, + { + "checksumSHA1": "0cytTxS0qPVjtknc7OrfuNmxvBE=", + "path": "github.com/dgrijalva/jwt-go", + "revision": "5e25c22bd5d6de03265bbe5462dcd162f85046f6", + "revisionTime": "2019-06-20T18:01:02Z" + }, + { + "checksumSHA1": "RVynNqzz7juBU5Vu5QPsPMuQnHM=", + "path": "github.com/dimchansky/utfbom", + "revision": "d2133a1ce379ef6fa992b0514a77146c60db9d1c", + "revisionTime": "2018-12-05T23:29:56Z" + }, { "checksumSHA1": "6y/Ht8J58EotTDBEIuE3+s4AnL8=", "path": "github.com/dlclark/regexp2", @@ -651,178 +909,289 @@ "revisionTime": "2017-05-24T20:58:24Z" }, { - "checksumSHA1": "jYj/NYT4pL27gVSrKmPZT+8/kMs=", + "checksumSHA1": "OvbnMCvUWjeV6iKz843oHyZMQHY=", + "path": "github.com/docker/distribution/registry/api/errcode", + "revision": "aeaeb844071362767ad497eb0911fcf60e47fa11", + "revisionTime": "2019-09-03T18:54:37Z" + }, + { + "checksumSHA1": "VpnfjnJvRY774nILhcqh/dAp9m4=", + "origin": "github.com/docker/engine/api", "path": "github.com/docker/docker/api", - "revision": "1009e6a40b295187e038b67e184e9c0384d95538", - "revisionTime": "2017-07-26T21:42:13Z" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "KMFpbV3mlrbc41d2DYnq05QYpSc=", + "checksumSHA1": "f79ymEBz/cSEpBegRzRAREEWRjU=", + "origin": "github.com/docker/engine/api/types", "path": "github.com/docker/docker/api/types", - "revision": "d192db0d9350222d2b8bb6eba8525b04c3be7d61", - "revisionTime": "2017-05-29T13:58:50Z" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "jVJDbe0IcyjoKc2xbohwzQr+FF0=", + "checksumSHA1": "/jF0HVFiLzUUuywSjp4F/piM7BM=", + "origin": "github.com/docker/engine/api/types/blkiodev", "path": "github.com/docker/docker/api/types/blkiodev", - "revision": "d192db0d9350222d2b8bb6eba8525b04c3be7d61", - "revisionTime": "2017-05-29T13:58:50Z" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "AeSC0BOu1uapkGqfSXtfVSpwJzs=", + "checksumSHA1": "rBGoI39KB5EQNaYMa3atjIa2LcY=", + "origin": "github.com/docker/engine/api/types/container", "path": "github.com/docker/docker/api/types/container", - "revision": "d192db0d9350222d2b8bb6eba8525b04c3be7d61", - "revisionTime": "2017-05-29T13:58:50Z" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "4XuWn5+wgYwUsw604jvYMklq4Hc=", + "checksumSHA1": "fF+hqcXRnyL/ZeDFP7b5HSa734Q=", + "origin": "github.com/docker/engine/api/types/events", "path": "github.com/docker/docker/api/types/events", - "revision": "d192db0d9350222d2b8bb6eba8525b04c3be7d61", - "revisionTime": "2017-05-29T13:58:50Z" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "J2OKngfI3vgswudr9PZVUFcRRu0=", + "checksumSHA1": "wJRYkZcWXIsAH1fixZEl+LsV24s=", + "origin": "github.com/docker/engine/api/types/filters", "path": "github.com/docker/docker/api/types/filters", - "revision": "d192db0d9350222d2b8bb6eba8525b04c3be7d61", - "revisionTime": "2017-05-29T13:58:50Z" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "yeB781yxPhnN6OXQ9/qSsyih3ek=", + "checksumSHA1": "y1YAaXPtsvS3dxEzNE5LXhmYkno=", + "origin": "github.com/docker/engine/api/types/image", "path": "github.com/docker/docker/api/types/image", - "revision": "d192db0d9350222d2b8bb6eba8525b04c3be7d61", - "revisionTime": "2017-05-29T13:58:50Z" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "UK+VdM648oWzyqE4OqttgmPqjoA=", + "checksumSHA1": "9OClWW7OCikgz4QCS/sAVcvqcWk=", + "origin": "github.com/docker/engine/api/types/mount", "path": "github.com/docker/docker/api/types/mount", - "revision": "d192db0d9350222d2b8bb6eba8525b04c3be7d61", - "revisionTime": "2017-05-29T13:58:50Z" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "Gskp+nvbVe8Gk1xPLHylZvNmqTg=", + "checksumSHA1": "00k6FhkdRZ+TEiPPsUPAY594bCw=", + "origin": "github.com/docker/engine/api/types/network", "path": "github.com/docker/docker/api/types/network", - "revision": "d192db0d9350222d2b8bb6eba8525b04c3be7d61", - "revisionTime": "2017-05-29T13:58:50Z" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "r2vWq7Uc3ExKzMqYgH0b4AKjLKY=", + "checksumSHA1": "m4Jg5WnW75I65nvkEno8PElSXik=", + "origin": "github.com/docker/engine/api/types/registry", "path": "github.com/docker/docker/api/types/registry", - "revision": "d192db0d9350222d2b8bb6eba8525b04c3be7d61", - "revisionTime": "2017-05-29T13:58:50Z" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "VTxWyFud/RedrpllGdQonVtGM/A=", + "checksumSHA1": "OQEUS/2J2xVHpfvcsxcXzYqBSeY=", + "origin": "github.com/docker/engine/api/types/strslice", "path": "github.com/docker/docker/api/types/strslice", - "revision": "d192db0d9350222d2b8bb6eba8525b04c3be7d61", - "revisionTime": "2017-05-29T13:58:50Z" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "ZaizCpJ3eBcfR9ywpLaJd4AhM9k=", + "checksumSHA1": "lyByEOaPKxCLcBvrXmt3VRw1PAI=", + "origin": "github.com/docker/engine/api/types/swarm", "path": "github.com/docker/docker/api/types/swarm", - "revision": "d192db0d9350222d2b8bb6eba8525b04c3be7d61", - "revisionTime": "2017-05-29T13:58:50Z" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "B7ZwKzrv3t3Vlox6/bYMHhMjsM8=", + "checksumSHA1": "txs5EKTbKgVyKmKKSnaH3fr+odA=", + "origin": "github.com/docker/engine/api/types/swarm/runtime", + "path": "github.com/docker/docker/api/types/swarm/runtime", + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" + }, + { + "checksumSHA1": "V8OZpLS8i1HfO4lAzUzEw2BgnfQ=", + "origin": "github.com/docker/engine/api/types/time", "path": "github.com/docker/docker/api/types/time", - "revision": "d192db0d9350222d2b8bb6eba8525b04c3be7d61", - "revisionTime": "2017-05-29T13:58:50Z" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "uDPQ3nHsrvGQc9tg/J9OSC4N5dQ=", + "checksumSHA1": "MZsgRjJJ0D/gAsXfKiEys+op6dE=", + "origin": "github.com/docker/engine/api/types/versions", "path": "github.com/docker/docker/api/types/versions", - "revision": "d192db0d9350222d2b8bb6eba8525b04c3be7d61", - "revisionTime": "2017-05-29T13:58:50Z" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "IBJy2zPEnYmcFJ3lM1eiRWnCxTA=", + "checksumSHA1": "REnP4RB3EMCkMS1dw4N5WIZU/jg=", + "origin": "github.com/docker/engine/api/types/volume", "path": "github.com/docker/docker/api/types/volume", - "revision": "d192db0d9350222d2b8bb6eba8525b04c3be7d61", - "revisionTime": "2017-05-29T13:58:50Z" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "c6OyeEvpQDvVLhrJSxgjEZv1tF8=", - "origin": "github.com/exekias/moby/client", + "checksumSHA1": "akrG7ojJtmMSGyE8M3VrIgxKJUg=", + "origin": "github.com/docker/engine/client", "path": "github.com/docker/docker/client", - "revision": "83d94aa2b98f10c4534d77077645d5e328533ce4", - "revisionTime": "2017-05-29T14:39:59Z", - "version": "master", - "versionExact": "master" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "kK/ZpRsTmbHTYGkV7NixaG2rork=", + "checksumSHA1": "P9B78O3D4ySWOZD/4e57wYO4BVs=", + "origin": "github.com/docker/engine/daemon/cluster/convert", + "path": "github.com/docker/docker/daemon/cluster/convert", + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" + }, + { + "checksumSHA1": "hhlEU76j5HfAS+oreefqdDPzLPA=", + "origin": "github.com/docker/engine/errdefs", + "path": "github.com/docker/docker/errdefs", + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" + }, + { + "checksumSHA1": "dFf9rWD7Ous9YKO0udunqNZEaXw=", + "origin": "github.com/docker/engine/opts", "path": "github.com/docker/docker/opts", - "revision": "89658bed64c2a8fe05a978e5b87dbec409d57a0f", - "revisionTime": "2017-05-04T20:25:03Z", - "version": "v17.05.0-ce", - "versionExact": "v17.05.0-ce" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "/rhDgzb51QH29Dn6v9lr/NQrLbs=", + "checksumSHA1": "eMoRb/diYeuYLojU7ChN5DaETHc=", + "origin": "github.com/docker/engine/pkg/fileutils", "path": "github.com/docker/docker/pkg/fileutils", - "revision": "1009e6a40b295187e038b67e184e9c0384d95538", - "revisionTime": "2017-07-26T21:42:13Z" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "W7Rnb5YoC9u0vT9eLKoezV3N4E8=", + "checksumSHA1": "CvnZ3L6NW0w2xjBZ1eadE9WElyg=", + "origin": "github.com/docker/engine/pkg/homedir", "path": "github.com/docker/docker/pkg/homedir", - "revision": "89658bed64c2a8fe05a978e5b87dbec409d57a0f", - "revisionTime": "2017-05-04T20:25:03Z", - "version": "v17.05.0-ce", - "versionExact": "v17.05.0-ce" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "bfvwzIbEFwclBKJSNa6oaA27DgE=", + "checksumSHA1": "hh2fjllcaPQdZPg/umg7zVo4BiM=", + "origin": "github.com/docker/engine/pkg/idtools", "path": "github.com/docker/docker/pkg/idtools", - "revision": "d192db0d9350222d2b8bb6eba8525b04c3be7d61", - "revisionTime": "2017-05-29T13:58:50Z" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "jmo/t2zXAxirEPoFucNPXA/1SEc=", + "checksumSHA1": "Ybq78CnAoQWVBk+lkh3zykmcSjs=", + "origin": "github.com/docker/engine/pkg/ioutils", "path": "github.com/docker/docker/pkg/ioutils", - "revision": "d192db0d9350222d2b8bb6eba8525b04c3be7d61", - "revisionTime": "2017-05-29T13:58:50Z" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { "checksumSHA1": "kp20bhjkvJ06uW6DRfVIZbCj8SY=", + "origin": "github.com/docker/engine/pkg/jsonlog", "path": "github.com/docker/docker/pkg/jsonlog", - "revision": "89658bed64c2a8fe05a978e5b87dbec409d57a0f", - "revisionTime": "2017-05-04T20:25:03Z", - "version": "v17.05.0-ce", - "versionExact": "v17.05.0-ce" + "revision": "8af4db6f002ac907b6ef8610b237879dfcaa5b7a", + "revisionTime": "2017-08-02T01:53:33Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "ndnAFCfsGC3upNQ6jAEwzxcurww=", + "checksumSHA1": "EXiIm2xIL7Ds+YsQUx8Z3eUYPtI=", + "origin": "github.com/docker/engine/pkg/longpath", "path": "github.com/docker/docker/pkg/longpath", - "revision": "d192db0d9350222d2b8bb6eba8525b04c3be7d61", - "revisionTime": "2017-05-29T13:58:50Z" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "3u2xJnbqYSxOP3kOORetQD7P1Co=", + "checksumSHA1": "2503DiCg4/0gQuslT1sbHy7ISIg=", + "origin": "github.com/docker/engine/pkg/mount", "path": "github.com/docker/docker/pkg/mount", - "revision": "d192db0d9350222d2b8bb6eba8525b04c3be7d61", - "revisionTime": "2017-05-29T13:58:50Z" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "H1rrbVmeE1z2TnkF7tSrfh+qUOY=", + "checksumSHA1": "QdNVFRXNhEnuX+kHtgAEB5vOuQE=", + "origin": "github.com/docker/engine/pkg/namesgenerator", + "path": "github.com/docker/docker/pkg/namesgenerator", + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" + }, + { + "checksumSHA1": "w0waeTRJ1sFygI0dZXH6l9E1c60=", + "origin": "github.com/docker/engine/pkg/stdcopy", "path": "github.com/docker/docker/pkg/stdcopy", - "revision": "89658bed64c2a8fe05a978e5b87dbec409d57a0f", - "revisionTime": "2017-05-04T20:25:03Z", - "version": "v17.05.0-ce", - "versionExact": "v17.05.0-ce" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "Aa2l7tUYFCEvHd+C//15fQnAfqQ=", - "origin": "github.com/exekias/moby/pkg/system", + "checksumSHA1": "TYqofbgbw+QR0g/rAIhZ2cQGlAM=", + "origin": "github.com/docker/engine/pkg/system", "path": "github.com/docker/docker/pkg/system", - "revision": "1009e6a40b295187e038b67e184e9c0384d95538", - "revisionTime": "2017-07-26T21:42:13Z" + "revision": "ed20165a37b40ff1cfbe55e218344c5e89f30ee2", + "revisionTime": "2019-08-22T20:57:25Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { - "checksumSHA1": "8I0Ez+aUYGpsDEVZ8wN/Ztf6Zqs=", + "checksumSHA1": "ZLK19eBuLoOIEE92Dwxx6qHHxDI=", + "origin": "github.com/docker/engine/pkg/tlsconfig", "path": "github.com/docker/docker/pkg/tlsconfig", - "revision": "d192db0d9350222d2b8bb6eba8525b04c3be7d61", - "revisionTime": "2017-05-29T13:58:50Z" + "revision": "8af4db6f002ac907b6ef8610b237879dfcaa5b7a", + "revisionTime": "2017-08-02T01:53:33Z", + "version": "v19.03.2", + "versionExact": "v19.03.2" }, { "checksumSHA1": "JbiWTzH699Sqz25XmDlsARpMN9w=", @@ -848,18 +1217,180 @@ "version": "v0.3.0", "versionExact": "v0.3.0" }, + { + "checksumSHA1": "LumPGloOm57IpvBcbTsaTuzKHiA=", + "path": "github.com/docker/go-events", + "revision": "e31b211e4f1cd09aa76fe4ac244571fab96ae47f", + "revisionTime": "2019-08-06T00:42:12Z" + }, + { + "checksumSHA1": "10R3syDBqKT0ga6kgqCGq1CQNsU=", + "path": "github.com/docker/go-metrics", + "revision": "b619b3592b65de4f087d9f16863a7e6ff905973c", + "revisionTime": "2019-08-28T14:40:21Z" + }, { "checksumSHA1": "UmXGieuTJQOzJPspPJTVKKKMiUA=", "path": "github.com/docker/go-units", "revision": "0dadbb0345b35ec7ef35e228dabb8de89a65bf52", "revisionTime": "2017-01-27T09:51:30Z" }, + { + "checksumSHA1": "nZxHZRxlUe66KtMbAYDamutoo6M=", + "path": "github.com/docker/libkv", + "revision": "458977154600b9f23984d9f4b82e79570b5ae12b", + "revisionTime": "2018-09-12T20:54:06Z" + }, + { + "checksumSHA1": "f0riXTSoNiWTnXkQCUtXC9TwouM=", + "path": "github.com/docker/libkv/store", + "revision": "458977154600b9f23984d9f4b82e79570b5ae12b", + "revisionTime": "2018-09-12T20:54:06Z" + }, + { + "checksumSHA1": "Jgk2oYA8ylZAVKVFhMfYxD5U8+g=", + "path": "github.com/docker/libnetwork/datastore", + "revision": "92d1fbe1eb0883cf11d283cea8e658275146411d", + "revisionTime": "2019-08-26T16:48:27Z" + }, + { + "checksumSHA1": "D3+HP6HHNGk97yygDnTMhq4sTQs=", + "path": "github.com/docker/libnetwork/discoverapi", + "revision": "92d1fbe1eb0883cf11d283cea8e658275146411d", + "revisionTime": "2019-08-26T16:48:27Z" + }, + { + "checksumSHA1": "X07lwsZTwq6wVkKDAPxyTmimwq8=", + "path": "github.com/docker/libnetwork/ipamutils", + "revision": "92d1fbe1eb0883cf11d283cea8e658275146411d", + "revisionTime": "2019-08-26T16:48:27Z" + }, + { + "checksumSHA1": "emCh8kUCHRn7Me4v1oNdekVh+Xo=", + "path": "github.com/docker/libnetwork/types", + "revision": "92d1fbe1eb0883cf11d283cea8e658275146411d", + "revisionTime": "2019-08-26T16:48:27Z" + }, { "checksumSHA1": "sNAU9ojYVUhO6dVXey6T3JhRQpw=", "path": "github.com/docker/libtrust", "revision": "aabc10ec26b754e797f9028f4589c5b7bd90dc20", "revisionTime": "2016-07-08T17:25:13Z" }, + { + "checksumSHA1": "AKX6HtXMQf/PhmTn51ulJsxXuGs=", + "path": "github.com/docker/swarmkit/api", + "revision": "958d149179db019aef3a065f23b35455b2dd54ca", + "revisionTime": "2019-08-20T14:22:45Z" + }, + { + "checksumSHA1": "ULeVOTJ8TD6mJhFZ/AqzHNBMfY8=", + "path": "github.com/docker/swarmkit/api/deepcopy", + "revision": "958d149179db019aef3a065f23b35455b2dd54ca", + "revisionTime": "2019-08-20T14:22:45Z" + }, + { + "checksumSHA1": "PkV9F1AXmBgGQvp7vn5pnQ4edGc=", + "path": "github.com/docker/swarmkit/api/equality", + "revision": "958d149179db019aef3a065f23b35455b2dd54ca", + "revisionTime": "2019-08-20T14:22:45Z" + }, + { + "checksumSHA1": "/Z0yJgWO1vOsu+A2oFOXYydqECI=", + "path": "github.com/docker/swarmkit/api/genericresource", + "revision": "958d149179db019aef3a065f23b35455b2dd54ca", + "revisionTime": "2019-08-20T14:22:45Z" + }, + { + "checksumSHA1": "hjpUVsHpfEp/qV608vUlIPIoaNU=", + "path": "github.com/docker/swarmkit/api/naming", + "revision": "958d149179db019aef3a065f23b35455b2dd54ca", + "revisionTime": "2019-08-20T14:22:45Z" + }, + { + "checksumSHA1": "ZiNYa/va5hLlcgVtjc+mMcS59hg=", + "path": "github.com/docker/swarmkit/ca", + "revision": "958d149179db019aef3a065f23b35455b2dd54ca", + "revisionTime": "2019-08-20T14:22:45Z" + }, + { + "checksumSHA1": "+D9HmaikvyJi+3FSPp4C7avqoyA=", + "path": "github.com/docker/swarmkit/ca/keyutils", + "revision": "958d149179db019aef3a065f23b35455b2dd54ca", + "revisionTime": "2019-08-20T14:22:45Z" + }, + { + "checksumSHA1": "NtZpqR+oJYhWwNi+KFJRhnvhD9A=", + "path": "github.com/docker/swarmkit/ca/pkcs8", + "revision": "958d149179db019aef3a065f23b35455b2dd54ca", + "revisionTime": "2019-08-20T14:22:45Z" + }, + { + "checksumSHA1": "0t/1Ir1L2Yi3qLCPi0SHj/EnaB0=", + "path": "github.com/docker/swarmkit/connectionbroker", + "revision": "958d149179db019aef3a065f23b35455b2dd54ca", + "revisionTime": "2019-08-20T14:22:45Z" + }, + { + "checksumSHA1": "WQB+G0JBx9n58syx+LWBpV0Ic98=", + "path": "github.com/docker/swarmkit/identity", + "revision": "958d149179db019aef3a065f23b35455b2dd54ca", + "revisionTime": "2019-08-20T14:22:45Z" + }, + { + "checksumSHA1": "RbJTcN918+FS13nBkxznsDY1OPA=", + "path": "github.com/docker/swarmkit/ioutils", + "revision": "958d149179db019aef3a065f23b35455b2dd54ca", + "revisionTime": "2019-08-20T14:22:45Z" + }, + { + "checksumSHA1": "Q4stSiXCGOB79btqQn7iRQWscLc=", + "path": "github.com/docker/swarmkit/log", + "revision": "958d149179db019aef3a065f23b35455b2dd54ca", + "revisionTime": "2019-08-20T14:22:45Z" + }, + { + "checksumSHA1": "+24F7Pw6ONxluTpsdblas2DDW08=", + "path": "github.com/docker/swarmkit/manager/raftselector", + "revision": "958d149179db019aef3a065f23b35455b2dd54ca", + "revisionTime": "2019-08-20T14:22:45Z" + }, + { + "checksumSHA1": "32r02ijqbGieyyiQ56DX40BOPHQ=", + "path": "github.com/docker/swarmkit/manager/state", + "revision": "958d149179db019aef3a065f23b35455b2dd54ca", + "revisionTime": "2019-08-20T14:22:45Z" + }, + { + "checksumSHA1": "F/C5L+lSjEYDVH6rWD+N+sNuB0s=", + "path": "github.com/docker/swarmkit/manager/state/store", + "revision": "958d149179db019aef3a065f23b35455b2dd54ca", + "revisionTime": "2019-08-20T14:22:45Z" + }, + { + "checksumSHA1": "oAd37g1TkyRNMBB3zYhaamp546E=", + "path": "github.com/docker/swarmkit/protobuf/plugin", + "revision": "958d149179db019aef3a065f23b35455b2dd54ca", + "revisionTime": "2019-08-20T14:22:45Z" + }, + { + "checksumSHA1": "wW9pn+HoSlh4aZddpOOVA6aKkHM=", + "path": "github.com/docker/swarmkit/remotes", + "revision": "958d149179db019aef3a065f23b35455b2dd54ca", + "revisionTime": "2019-08-20T14:22:45Z" + }, + { + "checksumSHA1": "bSUE2WcN/OgeOrQoG8mqoJwgSuc=", + "path": "github.com/docker/swarmkit/watch", + "revision": "958d149179db019aef3a065f23b35455b2dd54ca", + "revisionTime": "2019-08-20T14:22:45Z" + }, + { + "checksumSHA1": "th2JfjfQXqILUgtar4tXuranzIA=", + "path": "github.com/docker/swarmkit/watch/queue", + "revision": "958d149179db019aef3a065f23b35455b2dd54ca", + "revisionTime": "2019-08-20T14:22:45Z" + }, { "checksumSHA1": "tSXR3pIWOvVv0vd1HgciU6/xQZ4=", "origin": "github.com/andrewkroh/goja", @@ -980,13 +1511,34 @@ "versionExact": "v0.4.0" }, { - "checksumSHA1": "c54eehjtxPjBUbM4aQuDPgc7G9Q=", + "checksumSHA1": "qM65VcezrEdAS7HdzRAh3RGnwWw=", + "path": "github.com/elastic/go-licenser", + "revision": "2b2abd4ee9b58025ebd0630d7621cfd7619f58ac", + "revisionTime": "2018-08-30T07:58:35Z", + "version": "0.2.0", + "versionExact": "0.2.0" + }, + { + "checksumSHA1": "R/WGo2p8J0KsCMu8nrbemg+iqIw=", + "path": "github.com/elastic/go-licenser/licensing", + "revision": "2b2abd4ee9b58025ebd0630d7621cfd7619f58ac", + "revisionTime": "2018-08-30T07:58:35Z", + "version": "0.2.0", + "versionExact": "0.2.0" + }, + { + "checksumSHA1": "+nIsRlnG94bUQI6IQd4Mcj4diTY=", "path": "github.com/elastic/go-lookslike", - "revision": "807124eb9fc6684949aa99744577175fd6bac4fd", - "revisionTime": "2019-06-17T15:05:19Z", + "revision": "747dc7db1c961662d8e225a42af6c3859a1a0f1d", + "revisionTime": "2019-09-04T15:56:46Z", "tree": true, - "version": "v0.2.0", - "versionExact": "v0.2.0" + "version": "=v0.3.0", + "versionExact": "v0.3.0" + }, + { + "path": "github.com/elastic/go-lookslike^", + "revision": "=v0.3.0", + "version": "=v0.3.0" }, { "checksumSHA1": "3jizmlZPCyo6FAZY8Trk9jA8NH4=", @@ -1138,60 +1690,60 @@ "versionExact": "v0.0.6" }, { - "checksumSHA1": "u5pjOSlI10k6Q9LaRcF7OgBa2tU=", + "checksumSHA1": "rfr1yBSyYTHNU3p1NKftIyzr/eQ=", "path": "github.com/elastic/go-sysinfo", - "revision": "06c1f463545498d8f4b378d4dcf3171794c28537", - "revisionTime": "2019-07-09T16:49:53Z", - "version": "v1.0.2", - "versionExact": "v1.0.2" + "revision": "51d9d1362d77a4792dfb39a7a19f056cdf1b9840", + "revisionTime": "2019-08-22T16:44:40Z", + "version": "v1.1.0", + "versionExact": "v1.1.0" }, { "checksumSHA1": "GiZCjX17K265TtamGZZw4R2Jwbk=", "path": "github.com/elastic/go-sysinfo/internal/registry", - "revision": "06c1f463545498d8f4b378d4dcf3171794c28537", - "revisionTime": "2019-07-09T16:49:53Z", - "version": "v1.0.2", - "versionExact": "v1.0.2" + "revision": "51d9d1362d77a4792dfb39a7a19f056cdf1b9840", + "revisionTime": "2019-08-22T16:44:40Z", + "version": "v1.1.0", + "versionExact": "v1.1.0" }, { "checksumSHA1": "dVSTUnZHCLNd0tYIENqdj05RyI8=", "path": "github.com/elastic/go-sysinfo/providers/darwin", - "revision": "06c1f463545498d8f4b378d4dcf3171794c28537", - "revisionTime": "2019-07-09T16:49:53Z", - "version": "v1.0.2", - "versionExact": "v1.0.2" + "revision": "51d9d1362d77a4792dfb39a7a19f056cdf1b9840", + "revisionTime": "2019-08-22T16:44:40Z", + "version": "v1.1.0", + "versionExact": "v1.1.0" }, { - "checksumSHA1": "LWMXshdY44+JM7g09dA4tXMZ1rY=", + "checksumSHA1": "7Spkw81dzevqmPbUGZO1UM3K3oc=", "path": "github.com/elastic/go-sysinfo/providers/linux", - "revision": "06c1f463545498d8f4b378d4dcf3171794c28537", - "revisionTime": "2019-07-09T16:49:53Z", - "version": "v1.0.2", - "versionExact": "v1.0.2" + "revision": "51d9d1362d77a4792dfb39a7a19f056cdf1b9840", + "revisionTime": "2019-08-22T16:44:40Z", + "version": "v1.1.0", + "versionExact": "v1.1.0" }, { "checksumSHA1": "RWLvcP1w9ynKbuCqiW6prwd+EDU=", "path": "github.com/elastic/go-sysinfo/providers/shared", - "revision": "06c1f463545498d8f4b378d4dcf3171794c28537", - "revisionTime": "2019-07-09T16:49:53Z", - "version": "v1.0.2", - "versionExact": "v1.0.2" + "revision": "51d9d1362d77a4792dfb39a7a19f056cdf1b9840", + "revisionTime": "2019-08-22T16:44:40Z", + "version": "v1.1.0", + "versionExact": "v1.1.0" }, { "checksumSHA1": "E+yrwS/aZemnWUvwTvEhiczYuD8=", "path": "github.com/elastic/go-sysinfo/providers/windows", - "revision": "06c1f463545498d8f4b378d4dcf3171794c28537", - "revisionTime": "2019-07-09T16:49:53Z", - "version": "v1.0.2", - "versionExact": "v1.0.2" + "revision": "51d9d1362d77a4792dfb39a7a19f056cdf1b9840", + "revisionTime": "2019-08-22T16:44:40Z", + "version": "v1.1.0", + "versionExact": "v1.1.0" }, { - "checksumSHA1": "OHierbaoOHx79d73DuLrao43rIg=", + "checksumSHA1": "u2RbbYcB7B4uhi/eJ01qiiBa41E=", "path": "github.com/elastic/go-sysinfo/types", - "revision": "06c1f463545498d8f4b378d4dcf3171794c28537", - "revisionTime": "2019-07-09T16:49:53Z", - "version": "v1.0.2", - "versionExact": "v1.0.2" + "revision": "51d9d1362d77a4792dfb39a7a19f056cdf1b9840", + "revisionTime": "2019-08-22T16:44:40Z", + "version": "v1.1.0", + "versionExact": "v1.1.0" }, { "checksumSHA1": "bNf3GDGhZh86bfCIMM5c5AYfo3g=", @@ -1451,16 +2003,16 @@ "revisionTime": "2017-03-27T23:54:44Z" }, { - "checksumSHA1": "wDZdTaY9JiqqqnF4c3pHP71nWmk=", + "checksumSHA1": "6Npyd/H9celLWIV9farP7DBxHNE=", "path": "github.com/go-ole/go-ole", - "revision": "de8695c8edbf8236f30d6e1376e20b198a028d42", - "revisionTime": "2017-02-09T15:13:32Z" + "revision": "14974a1cf6477f616180232977d8ab4791ea8820", + "revisionTime": "2019-09-20T10:46:07Z" }, { - "checksumSHA1": "Q0ZOcJW0fqOefDzEdn+PJHOeSgI=", + "checksumSHA1": "PArleDBtadu2qO4hJwHR8a3IOTA=", "path": "github.com/go-ole/go-ole/oleutil", - "revision": "de8695c8edbf8236f30d6e1376e20b198a028d42", - "revisionTime": "2017-02-09T15:13:32Z" + "revision": "14974a1cf6477f616180232977d8ab4791ea8820", + "revisionTime": "2019-09-20T10:46:07Z" }, { "checksumSHA1": "OFaReqy4hyrLlTTYFmcqkvidHsQ=", @@ -1487,18 +2039,48 @@ "revisionTime": "2018-09-14T05:15:27Z", "version": "3.1.1" }, + { + "checksumSHA1": "rasgUuf7DVFShc3hKeh9wjWBWO0=", + "path": "github.com/gogo/protobuf/gogoproto", + "revision": "4c00d2f19fb91be5fecd8681fa83450a2a979e69", + "revisionTime": "2019-08-18T16:08:50Z" + }, { "checksumSHA1": "6VyMn387vNTCIxqwLuScKb+CDgQ=", "path": "github.com/gogo/protobuf/proto", "revision": "65acae22fc9d1fe290b33faa2bd64cdc20a463a0", "revisionTime": "2019-07-23T19:02:41Z" }, + { + "checksumSHA1": "uA3fNRcrMhh6Y8ugPuoyEK72nvk=", + "path": "github.com/gogo/protobuf/protoc-gen-gogo/descriptor", + "revision": "4c00d2f19fb91be5fecd8681fa83450a2a979e69", + "revisionTime": "2019-08-18T16:08:50Z" + }, { "checksumSHA1": "HPVQZu059/Rfw2bAWM538bVTcUc=", "path": "github.com/gogo/protobuf/sortkeys", "revision": "65acae22fc9d1fe290b33faa2bd64cdc20a463a0", "revisionTime": "2019-07-23T19:02:41Z" }, + { + "checksumSHA1": "Humj7F03I9Yl+gUp29qHZK+3vgo=", + "path": "github.com/gogo/protobuf/types", + "revision": "4c00d2f19fb91be5fecd8681fa83450a2a979e69", + "revisionTime": "2019-08-18T16:08:50Z" + }, + { + "checksumSHA1": "HmbftipkadrLlCfzzVQ+iFHbl6g=", + "path": "github.com/golang/glog", + "revision": "23def4e6c14b4da8ac2ed8007337bc5eb5007998", + "revisionTime": "2016-01-25T20:49:56Z" + }, + { + "checksumSHA1": "rm+73svMGdyHzoOolT6CGQ9+rkg=", + "path": "github.com/golang/protobuf/jsonpb", + "revision": "6c65a5562fc06764971b7c5d05c76c75e84bdbf7", + "revisionTime": "2019-07-01T18:22:01Z" + }, { "checksumSHA1": "Y2MOwzNZfl4NRNDbLCZa6sgx7O0=", "path": "github.com/golang/protobuf/proto", @@ -1515,6 +2097,24 @@ "version": "v1.3.1", "versionExact": "v1.3.1" }, + { + "checksumSHA1": "8w4NSj8xeo1khyjp+4viyoiPjo4=", + "path": "github.com/golang/protobuf/protoc-gen-go/generator", + "revision": "6c65a5562fc06764971b7c5d05c76c75e84bdbf7", + "revisionTime": "2019-07-01T18:22:01Z" + }, + { + "checksumSHA1": "DEO1UfCBqTuNTDSIQzKQUuYnIcM=", + "path": "github.com/golang/protobuf/protoc-gen-go/generator/internal/remap", + "revision": "6c65a5562fc06764971b7c5d05c76c75e84bdbf7", + "revisionTime": "2019-07-01T18:22:01Z" + }, + { + "checksumSHA1": "WS4ItT1lMYb3ZEcHbcFe7PxYI3I=", + "path": "github.com/golang/protobuf/protoc-gen-go/plugin", + "revision": "6c65a5562fc06764971b7c5d05c76c75e84bdbf7", + "revisionTime": "2019-07-01T18:22:01Z" + }, { "checksumSHA1": "aEiR2m3NGaMGTbUW5P+w5gKFyc8=", "path": "github.com/golang/protobuf/ptypes", @@ -1547,6 +2147,14 @@ "version": "v1.3.1", "versionExact": "v1.3.1" }, + { + "checksumSHA1": "rcRK3jWVPMOJ3ykn1ILh+UkY67E=", + "path": "github.com/golang/protobuf/ptypes/struct", + "revision": "b5d812f8a3706043e23a9cd5babf2e5423744d30", + "revisionTime": "2019-02-28T15:19:29Z", + "version": "v1.3.1", + "versionExact": "v1.3.1" + }, { "checksumSHA1": "seEwY2xETpK9yHJ9+bHqkLZ0VMU=", "path": "github.com/golang/protobuf/ptypes/timestamp", @@ -1555,12 +2163,68 @@ "version": "v1.3.1", "versionExact": "v1.3.1" }, + { + "checksumSHA1": "TSN+851ianBtZ6tuTIg/XI/1M1k=", + "path": "github.com/golang/protobuf/ptypes/wrappers", + "revision": "b5d812f8a3706043e23a9cd5babf2e5423744d30", + "revisionTime": "2019-02-28T15:19:29Z", + "version": "v1.3.1", + "versionExact": "v1.3.1" + }, { "checksumSHA1": "p/8vSviYF91gFflhrt5vkyksroo=", "path": "github.com/golang/snappy", "revision": "553a641470496b2327abcac10b36396bd98e45c9", "revisionTime": "2017-02-15T23:32:05Z" }, + { + "checksumSHA1": "NAa7W/ZDTYaRtev4wxhcKhFoVO0=", + "path": "github.com/google/certificate-transparency-go", + "revision": "2c006aff63ed2c60653701dfb7b53424339382b1", + "revisionTime": "2019-08-29T12:01:30Z" + }, + { + "checksumSHA1": "juvCQrNpUkV6HGFcJy7sEDjrcFo=", + "path": "github.com/google/certificate-transparency-go/asn1", + "revision": "2c006aff63ed2c60653701dfb7b53424339382b1", + "revisionTime": "2019-08-29T12:01:30Z" + }, + { + "checksumSHA1": "KgkPPvQ4d3+H/r6ZdmvlChtsCdA=", + "path": "github.com/google/certificate-transparency-go/client", + "revision": "2c006aff63ed2c60653701dfb7b53424339382b1", + "revisionTime": "2019-08-29T12:01:30Z" + }, + { + "checksumSHA1": "cmsDnnTRi6ttyOrhf5dXNa5Wak4=", + "path": "github.com/google/certificate-transparency-go/client/configpb", + "revision": "2c006aff63ed2c60653701dfb7b53424339382b1", + "revisionTime": "2019-08-29T12:01:30Z" + }, + { + "checksumSHA1": "L0YMC9JBO4+Uj3h6Jgg75oXz+Pw=", + "path": "github.com/google/certificate-transparency-go/jsonclient", + "revision": "2c006aff63ed2c60653701dfb7b53424339382b1", + "revisionTime": "2019-08-29T12:01:30Z" + }, + { + "checksumSHA1": "dPm6djjI7pxI1FyWSIPHJ1Xy6Eg=", + "path": "github.com/google/certificate-transparency-go/tls", + "revision": "2c006aff63ed2c60653701dfb7b53424339382b1", + "revisionTime": "2019-08-29T12:01:30Z" + }, + { + "checksumSHA1": "f7Qc+WEjToROm8hjDVkG2yOB1uI=", + "path": "github.com/google/certificate-transparency-go/x509", + "revision": "2c006aff63ed2c60653701dfb7b53424339382b1", + "revisionTime": "2019-08-29T12:01:30Z" + }, + { + "checksumSHA1": "DpvKvx5HVFX9GrWsT2Rw3yHiun8=", + "path": "github.com/google/certificate-transparency-go/x509/pkix", + "revision": "2c006aff63ed2c60653701dfb7b53424339382b1", + "revisionTime": "2019-08-29T12:01:30Z" + }, { "checksumSHA1": "upYudcnxqDBx2/71UDwwxHn/KyY=", "path": "github.com/google/flatbuffers/go", @@ -1603,6 +2267,25 @@ "revision": "f140a6486e521aad38f5917de355cbf147cc0496", "revisionTime": "2019-04-08T17:44:45Z" }, + { + "checksumSHA1": "I7I4rckKCb3SLIY2AqfEmJ/maec=", + "path": "github.com/google/gopacket", + "revision": "0ad7f2610e344e58c1c95e2adda5c3258da8e97b", + "revisionTime": "2019-10-09T16:37:24Z" + }, + { + "checksumSHA1": "msNKeCHS4V89szexDN6/ixw1gYs=", + "origin": "github.com/adriansr/gopacket/afpacket", + "path": "github.com/google/gopacket/afpacket", + "revision": "a13a1c6078605c1b528df079debf4ffe33be7f8d", + "revisionTime": "2019-10-18T08:38:32Z" + }, + { + "checksumSHA1": "CElVSky7n3cuKstd4LrIxGUREuA=", + "path": "github.com/google/gopacket/layers", + "revision": "0ad7f2610e344e58c1c95e2adda5c3258da8e97b", + "revisionTime": "2019-10-09T16:37:24Z" + }, { "checksumSHA1": "WZoHSeTnVjnPIX2+U1Otst5MUKw=", "path": "github.com/googleapis/gax-go/v2", @@ -1635,6 +2318,42 @@ "revision": "d520615e531a6bf3fb69406b9eba718261285ec8", "revisionTime": "2016-12-05T14:13:22Z" }, + { + "checksumSHA1": "dF75743hHL364Dx3HKdZbBBFrpE=", + "path": "github.com/grpc-ecosystem/go-grpc-prometheus", + "revision": "ae0d8660c5f2108ca70a3776dbe0fb53cf79f1da", + "revisionTime": "2019-04-02T11:54:22Z" + }, + { + "checksumSHA1": "F4nSSbZ6FkoRA2j/AT7M7AKLn1Q=", + "path": "github.com/grpc-ecosystem/grpc-gateway/internal", + "revision": "d63917fcb0d53f39184485b9b6a0893af18a5668", + "revisionTime": "2019-07-30T05:26:13Z" + }, + { + "checksumSHA1": "nf2uWqHHbxfh9DwSetJKK1Ferw8=", + "path": "github.com/grpc-ecosystem/grpc-gateway/runtime", + "revision": "d63917fcb0d53f39184485b9b6a0893af18a5668", + "revisionTime": "2019-07-30T05:26:13Z" + }, + { + "checksumSHA1": "Ql0StAKZ/33BL73hn9JwN5pRj1c=", + "path": "github.com/grpc-ecosystem/grpc-gateway/utilities", + "revision": "d63917fcb0d53f39184485b9b6a0893af18a5668", + "revisionTime": "2019-07-30T05:26:13Z" + }, + { + "checksumSHA1": "P1Enisj7Nzf3wMl6lgPPar0ljoY=", + "path": "github.com/hashicorp/go-immutable-radix", + "revision": "0146a9aba1948ded4ed290cfd3fded2c15313f63", + "revisionTime": "2019-05-22T20:29:52Z" + }, + { + "checksumSHA1": "ZNqKZK7/uEXbevIQv5RfsOCojcs=", + "path": "github.com/hashicorp/go-memdb", + "revision": "5500ca0de0dab231b02aedabac095d43a59f31d2", + "revisionTime": "2019-05-22T20:56:50Z" + }, { "checksumSHA1": "5AxXPtBqAKyFGcttFzxT5hp/3Tk=", "path": "github.com/hashicorp/go-uuid", @@ -1677,6 +2396,12 @@ "revision": "633285ba52b2a67b98a3026eb87ee1a76ab60f3c", "revisionTime": "2018-07-16T14:52:14Z" }, + { + "checksumSHA1": "ofj+h8OdY+xNX8sz2lwfGmUiUiA=", + "path": "github.com/ishidawataru/sctp", + "revision": "7c296d48a2b553e41cc06904a1e6317a20694dc0", + "revisionTime": "2019-07-23T01:39:45Z" + }, { "checksumSHA1": "fPE6hs5I61ZEXc54kkSoFaafqOk=", "path": "github.com/jcmturner/gofork/encoding/asn1", @@ -1830,6 +2555,12 @@ "revision": "fc9e8d8ef48496124e79ae0df75490096eccf6fe", "revisionTime": "2017-03-22T23:44:13Z" }, + { + "checksumSHA1": "bKMZjd2wPw13VwoE7mBeSv5djFA=", + "path": "github.com/matttproud/golang_protobuf_extensions/pbutil", + "revision": "c182affec369e30f25d3eb8cd8a478dee585ae7d", + "revisionTime": "2018-12-31T17:19:20Z" + }, { "checksumSHA1": "HIr4QObwKUZBt54QIh1+aTzc4hU=", "path": "github.com/miekg/dns", @@ -1838,6 +2569,12 @@ "version": "v1.1.15", "versionExact": "v1.1.15" }, + { + "checksumSHA1": "QCDNcDE+mokVH3qx9kjMP9OZfpc=", + "path": "github.com/mitchellh/go-homedir", + "revision": "af06845cf3004701891bf4fdb884bfe4920b3727", + "revisionTime": "2019-01-27T04:21:35Z" + }, { "checksumSHA1": "sWdAYPKyaT4SW8hNQNpRS0sU4lU=", "path": "github.com/mitchellh/hashstructure", @@ -1919,6 +2656,42 @@ "version": "v1.0.0", "versionExact": "v1.0.0" }, + { + "checksumSHA1": "vsx+XkLQ3h1JYC65YjukpeubcQQ=", + "path": "github.com/prometheus/client_golang/prometheus", + "revision": "35ef65db672a76effef5f0808decd0484a636f3f", + "revisionTime": "2019-08-20T08:35:54Z" + }, + { + "checksumSHA1": "UBqhkyjCz47+S19MVTigxJ2VjVQ=", + "path": "github.com/prometheus/client_golang/prometheus/internal", + "revision": "35ef65db672a76effef5f0808decd0484a636f3f", + "revisionTime": "2019-08-20T08:35:54Z" + }, + { + "checksumSHA1": "UcahVbxaRZ35Wh58lM9AWEbUEts=", + "path": "github.com/prometheus/client_golang/prometheus/promhttp", + "revision": "35ef65db672a76effef5f0808decd0484a636f3f", + "revisionTime": "2019-08-20T08:35:54Z" + }, + { + "checksumSHA1": "ljxJzXiQ7dNsmuRIUhqqP+qjRWc=", + "path": "github.com/prometheus/common/expfmt", + "revision": "637d7c34db122e2d1a25d061423098663758d2d3", + "revisionTime": "2019-07-30T17:58:46Z" + }, + { + "checksumSHA1": "1Mhfofk+wGZ94M0+Bd98K8imPD4=", + "path": "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg", + "revision": "637d7c34db122e2d1a25d061423098663758d2d3", + "revisionTime": "2019-07-30T17:58:46Z" + }, + { + "checksumSHA1": "ccmMs+h9Jo8kE7izqsUkWShD4d0=", + "path": "github.com/prometheus/common/model", + "revision": "637d7c34db122e2d1a25d061423098663758d2d3", + "revisionTime": "2019-07-30T17:58:46Z" + }, { "checksumSHA1": "Etvt6mgzvD7ARf4Ux03LHfgSlzU=", "path": "github.com/prometheus/procfs", @@ -2117,6 +2890,12 @@ "revision": "0b059e7db4f40a062ca3d975b7500c6a0a968d87", "revisionTime": "2018-01-31T10:30:24Z" }, + { + "checksumSHA1": "QDqYZ7xfS6UPzQgYnRIKwjFYiEE=", + "path": "github.com/weppos/publicsuffix-go/publicsuffix", + "revision": "120738c23213637160ef8bdcfae4b10bf42bfffc", + "revisionTime": "2019-08-24T08:17:30Z" + }, { "checksumSHA1": "uE78U34xjlJ815TX/bhLROkjmeI=", "path": "github.com/xdg/scram", @@ -2153,6 +2932,54 @@ "revision": "b402f3114ec730d8bddb074a6c137309f561aa78", "revisionTime": "2017-04-03T16:00:31Z" }, + { + "checksumSHA1": "viPwhW5uANNuXPrOHALgjOOROO8=", + "path": "github.com/zmap/zcrypto/json", + "revision": "9051775e6a2e3a89ec27977077b09f4496febecf", + "revisionTime": "2019-07-29T16:58:52Z" + }, + { + "checksumSHA1": "sGkYL7ASCRfs5vliFHrxveDrtt0=", + "path": "github.com/zmap/zcrypto/util", + "revision": "9051775e6a2e3a89ec27977077b09f4496febecf", + "revisionTime": "2019-07-29T16:58:52Z" + }, + { + "checksumSHA1": "mc4HEQL6oLKd0qmRtS+CKP5DXfc=", + "path": "github.com/zmap/zcrypto/x509", + "revision": "9051775e6a2e3a89ec27977077b09f4496febecf", + "revisionTime": "2019-07-29T16:58:52Z" + }, + { + "checksumSHA1": "jAkT78QZ/SZve96q8DtAkh56r0c=", + "path": "github.com/zmap/zcrypto/x509/ct", + "revision": "9051775e6a2e3a89ec27977077b09f4496febecf", + "revisionTime": "2019-07-29T16:58:52Z" + }, + { + "checksumSHA1": "pnjkbNdKfOolkE9XbgZ1MMoh5Cg=", + "path": "github.com/zmap/zcrypto/x509/pkix", + "revision": "9051775e6a2e3a89ec27977077b09f4496febecf", + "revisionTime": "2019-07-29T16:58:52Z" + }, + { + "checksumSHA1": "abWfwUQtSGlE5OBR6+6RDODTwpk=", + "path": "github.com/zmap/zlint", + "revision": "5dcecad773158b82b5e52064ee2782d1b8a79314", + "revisionTime": "2019-08-24T13:30:31Z" + }, + { + "checksumSHA1": "6cHQaky1DkBxMV976wLbXkRPG+Y=", + "path": "github.com/zmap/zlint/lints", + "revision": "5dcecad773158b82b5e52064ee2782d1b8a79314", + "revisionTime": "2019-08-24T13:30:31Z" + }, + { + "checksumSHA1": "+vv3NjZVx7jSapZY6SI0+uJRkwQ=", + "path": "github.com/zmap/zlint/util", + "revision": "5dcecad773158b82b5e52064ee2782d1b8a79314", + "revisionTime": "2019-08-24T13:30:31Z" + }, { "checksumSHA1": "5vTmgwC38PsG6R4nZfI2lT0FG3M=", "path": "go.opencensus.io", @@ -2217,6 +3044,14 @@ "version": "v0.22.0", "versionExact": "v0.22.0" }, + { + "checksumSHA1": "Hjc1HaOLJDV3O43JnsVbGlU3wOM=", + "path": "go.opencensus.io/plugin/ochttp/propagation/tracecontext", + "revision": "9c377598961b706d1542bd2d84d538b5094d596e", + "revisionTime": "2019-05-29T19:10:40Z", + "version": "v0.22.0", + "versionExact": "v0.22.0" + }, { "checksumSHA1": "q+y8X+5nDONIlJlxfkv+OtA18ds=", "path": "go.opencensus.io/resource", @@ -2363,6 +3198,18 @@ "revision": "85e1b3f9139abd58575d728a509643924e3b2ebf", "revisionTime": "2018-09-26T22:02:31Z" }, + { + "checksumSHA1": "1ezNasqd516o9HG59beqc5s+2Ro=", + "path": "golang.org/x/crypto/cryptobyte", + "revision": "9756ffdc24725223350eb3266ffb92590d28f278", + "revisionTime": "2019-08-28T23:00:48Z" + }, + { + "checksumSHA1": "YEoV2AiZZPDuF7pMVzDt7buS9gc=", + "path": "golang.org/x/crypto/cryptobyte/asn1", + "revision": "9756ffdc24725223350eb3266ffb92590d28f278", + "revisionTime": "2019-08-28T23:00:48Z" + }, { "checksumSHA1": "2LpxYGSf068307b7bhAuVjvzLLc=", "path": "golang.org/x/crypto/ed25519", @@ -2381,12 +3228,30 @@ "revision": "4d3f4d9ffa16a13f451c3b2999e9c49e9750bf06", "revisionTime": "2018-10-23T16:52:47Z" }, + { + "checksumSHA1": "PGEmqKttvy7NzqOkwNKI5E5xtOs=", + "path": "golang.org/x/crypto/ocsp", + "revision": "9756ffdc24725223350eb3266ffb92590d28f278", + "revisionTime": "2019-08-28T23:00:48Z" + }, { "checksumSHA1": "1MGpGDQqnUoRpv7VEcQrXOBydXE=", "path": "golang.org/x/crypto/pbkdf2", "revision": "5119cf507ed5294cc409c092980c7497ee5d6fd2", "revisionTime": "2018-01-22T10:39:14Z" }, + { + "checksumSHA1": "US9Z/XwhkXQXbHk1sPAQE3qMt5Y=", + "path": "golang.org/x/crypto/pkcs12", + "revision": "9756ffdc24725223350eb3266ffb92590d28f278", + "revisionTime": "2019-08-28T23:00:48Z" + }, + { + "checksumSHA1": "p0GC51McIdA7JygoP223twJ1s0E=", + "path": "golang.org/x/crypto/pkcs12/internal/rc2", + "revision": "9756ffdc24725223350eb3266ffb92590d28f278", + "revisionTime": "2019-08-28T23:00:48Z" + }, { "checksumSHA1": "iNE2KX9BQzCptlQC2DdQEVmn4R4=", "path": "golang.org/x/crypto/sha3", @@ -2979,6 +3844,12 @@ "revision": "a7e196e89fd3a3c4d103ca540bd5dac3a736e375", "revisionTime": "2019-06-11T19:02:12Z" }, + { + "checksumSHA1": "iNcYOdBZ/gSfNfrGMgtz/OElBGc=", + "path": "google.golang.org/genproto/googleapis/api/httpbody", + "revision": "fa694d86fc64c7654a660f8908de4e879866748d", + "revisionTime": "2019-08-01T16:59:51Z" + }, { "checksumSHA1": "8OP/WgNAs4rYFLP+HjtKzN40dj8=", "path": "google.golang.org/genproto/googleapis/iam/v1", diff --git a/vendor/github.com/elastic/beats/winlogbeat/_meta/beat.yml.tmpl b/vendor/github.com/elastic/beats/winlogbeat/_meta/beat.yml.tmpl index 2fe244d5..24b27d36 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/_meta/beat.yml.tmpl +++ b/vendor/github.com/elastic/beats/winlogbeat/_meta/beat.yml.tmpl @@ -2,9 +2,18 @@ winlogbeat.event_logs: - name: Application ignore_older: 72h - +{{if .Reference}} + # Set to true to publish fields with null values in events. + #keep_null: false +{{end}} - name: System - +{{if .Reference}} + # Set to true to publish fields with null values in events. + #keep_null: false +{{end}} - name: Security - +{{if .Reference}} + # Set to true to publish fields with null values in events. + #keep_null: false +{{end}} {{if not .Reference}}{{ template "elasticsearch_settings" . }}{{end}} diff --git a/vendor/github.com/elastic/beats/winlogbeat/beater/eventlogger.go b/vendor/github.com/elastic/beats/winlogbeat/beater/eventlogger.go index d1925c2a..47742d51 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/beater/eventlogger.go +++ b/vendor/github.com/elastic/beats/winlogbeat/beater/eventlogger.go @@ -34,11 +34,15 @@ type eventLogger struct { source eventlog.EventLog eventMeta common.EventMetadata processors beat.ProcessorList + keepNull bool } type eventLoggerConfig struct { common.EventMetadata `config:",inline"` // Fields and tags to add to events. Processors processors.PluginConfig `config:"processors"` + + // KeepNull determines whether published events will keep null values or omit them. + KeepNull bool `config:"keep_null"` } func newEventLogger( @@ -70,6 +74,7 @@ func (e *eventLogger) connect(pipeline beat.Pipeline) (beat.Client, error) { EventMetadata: e.eventMeta, Meta: nil, // TODO: configure modules/ES ingest pipeline? Processor: e.processors, + KeepNull: e.keepNull, }, ACKCount: func(n int) { addPublished(api, n) diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/configuring-howto.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/configuring-howto.asciidoc index 811254f0..445d5150 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/configuring-howto.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/configuring-howto.asciidoc @@ -42,36 +42,36 @@ include::./winlogbeat-options.asciidoc[] include::./winlogbeat-general-options.asciidoc[] -include::{libbeat-dir}/docs/queueconfig.asciidoc[] +include::{libbeat-dir}/queueconfig.asciidoc[] -include::{libbeat-dir}/docs/outputconfig.asciidoc[] +include::{libbeat-dir}/outputconfig.asciidoc[] -include::{libbeat-dir}/docs/shared-ilm.asciidoc[] +include::{libbeat-dir}/shared-ilm.asciidoc[] -include::{libbeat-dir}/docs/shared-ssl-config.asciidoc[] +include::{libbeat-dir}/shared-ssl-config.asciidoc[] include::./winlogbeat-filtering.asciidoc[] -include::{libbeat-dir}/docs/shared-config-ingest.asciidoc[] +include::{libbeat-dir}/shared-config-ingest.asciidoc[] -include::{libbeat-dir}/docs/shared-geoip.asciidoc[] +include::{libbeat-dir}/shared-geoip.asciidoc[] -include::{libbeat-dir}/docs/shared-path-config.asciidoc[] +include::{libbeat-dir}/shared-path-config.asciidoc[] -include::{libbeat-dir}/docs/shared-kibana-config.asciidoc[] +include::{libbeat-dir}/shared-kibana-config.asciidoc[] -include::{libbeat-dir}/docs/setup-config.asciidoc[] +include::{libbeat-dir}/setup-config.asciidoc[] -include::{libbeat-dir}/docs/loggingconfig.asciidoc[] +include::{libbeat-dir}/loggingconfig.asciidoc[] :standalone: -include::{libbeat-dir}/docs/shared-env-vars.asciidoc[] +include::{libbeat-dir}/shared-env-vars.asciidoc[] :standalone!: :standalone: -include::{libbeat-dir}/docs/yaml.asciidoc[] +include::{libbeat-dir}/yaml.asciidoc[] :standalone!: -include::{libbeat-dir}/docs/http-endpoint.asciidoc[] +include::{libbeat-dir}/http-endpoint.asciidoc[] -include::{libbeat-dir}/docs/reference-yml.asciidoc[] +include::{libbeat-dir}/reference-yml.asciidoc[] diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/faq.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/faq.asciidoc index 73798781..9f8d891d 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/faq.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/faq.asciidoc @@ -16,7 +16,7 @@ The index template might not be loaded correctly. See <>. Prior to the hostname configuration stage, during OS installation any event log records generated may have a randomly assigned hostname. -include::{libbeat-dir}/docs/shared-faq.asciidoc[] +include::{libbeat-dir}/shared-faq.asciidoc[] [[reading-from-evtx]] === Not sure how to read from .evtx files diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/getting-started.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/getting-started.asciidoc index 0c139ade..22e403bd 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/getting-started.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/getting-started.asciidoc @@ -1,7 +1,7 @@ [[winlogbeat-getting-started]] == Getting Started With Winlogbeat -include::{libbeat-dir}/docs/shared-getting-started-intro.asciidoc[] +include::{libbeat-dir}/shared-getting-started-intro.asciidoc[] * <> * <> @@ -95,11 +95,11 @@ To obtain a list of available event logs, run `Get-EventLog *` in PowerShell. For more information about this command, see the configuration details for <>. -include::{libbeat-dir}/docs/step-configure-output.asciidoc[] +include::{libbeat-dir}/step-configure-output.asciidoc[] -include::{libbeat-dir}/docs/step-configure-kibana-endpoint.asciidoc[] +include::{libbeat-dir}/step-configure-kibana-endpoint.asciidoc[] -include::{libbeat-dir}/docs/step-configure-credentials.asciidoc[] +include::{libbeat-dir}/step-configure-credentials.asciidoc[] . After you save your configuration file, test it with the following command. + @@ -111,17 +111,17 @@ PS C:\Program Files\Winlogbeat> .\winlogbeat.exe test config -c .\winlogbeat.yml [[config-winlogbeat-logstash]] === Step 3: Configure Winlogbeat to use Logstash -include::{libbeat-dir}/docs/outputs/output-logstash.asciidoc[tag=shared-logstash-config] +include::{libbeat-outputs-dir}/logstash/docs/logstash.asciidoc[tag=shared-logstash-config] [[winlogbeat-template]] === Step 4: Load the index template in Elasticsearch -include::{libbeat-dir}/docs/shared-template-load.asciidoc[] +include::{libbeat-dir}/shared-template-load.asciidoc[] [[load-kibana-dashboards]] === Step 5: Set up the Kibana dashboards -include::{libbeat-dir}/docs/dashboards.asciidoc[] +include::{libbeat-dir}/dashboards.asciidoc[] [[winlogbeat-starting]] === Step 6: Start Winlogbeat @@ -164,7 +164,7 @@ To make it easier for you to start monitoring your servers in Kibana, we have created example {beatname_uc} dashboards. You loaded the dashboards earlier when you ran the `setup` command. -include::{libbeat-dir}/docs/opendashboards.asciidoc[] +include::{libbeat-dir}/opendashboards.asciidoc[] The dashboards are provided as examples. We recommend that you {kibana-ref}/dashboard.html[customize] them to meet your needs. diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/index.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/index.asciidoc index f20872ff..4046b891 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/index.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/index.asciidoc @@ -1,12 +1,13 @@ = Winlogbeat Reference -:libbeat-dir: {docdir}/../../libbeat +:libbeat-dir: {docdir}/../../libbeat/docs -include::{libbeat-dir}/docs/version.asciidoc[] +include::{libbeat-dir}/version.asciidoc[] + +include::{asciidoc-dir}/../../shared/versions/stack/{source_branch}.asciidoc[] include::{asciidoc-dir}/../../shared/attributes.asciidoc[] -:version: {stack-version} :beatname_lc: winlogbeat :beatname_uc: Winlogbeat :beatname_pkg: {beatname_lc} @@ -15,12 +16,12 @@ include::{asciidoc-dir}/../../shared/attributes.asciidoc[] :beat_default_index_prefix: {beatname_lc} :has_registry: :ignores_max_retries: -:has_script_processor: -:has_timestamp_processor: :win_os: :win_only: +:no_decode_cef_processor: +:no_decode_csv_fields_processor: -include::{libbeat-dir}/docs/shared-beats-attributes.asciidoc[] +include::{libbeat-dir}/shared-beats-attributes.asciidoc[] include::./overview.asciidoc[] @@ -36,13 +37,13 @@ include::./modules.asciidoc[] include::./fields.asciidoc[] -include::{libbeat-dir}/docs/monitoring/monitoring-beats.asciidoc[] +include::{libbeat-dir}/monitoring/monitoring-beats.asciidoc[] -include::{libbeat-dir}/docs/shared-securing-beat.asciidoc[] +include::{libbeat-dir}/shared-securing-beat.asciidoc[] include::./troubleshooting.asciidoc[] include::./faq.asciidoc[] -include::{libbeat-dir}/docs/contributing-to-beats.asciidoc[] +include::{libbeat-dir}/contributing-to-beats.asciidoc[] diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/modules/security.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/modules/security.asciidoc index 533bed55..295a0aed 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/modules/security.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/modules/security.asciidoc @@ -14,6 +14,16 @@ The module has transformations for the following event IDs: * 4647 - User initiated logoff (interactive logon types). * 4648 - A logon was attempted using explicit credentials. * 4672 - Special privileges assigned to new logon. +* 4720 - A user account was created. +* 4722 - A user account was enabled. +* 4723 - An attempt was made to change an account's password. +* 4724 - An attempt was made to reset an account's password. +* 4725 - An user account was disabled. +* 4726 - An user account was deleted. +* 4738 - An user account was changed. +* 4740 - An user account was locked out. +* 4767 - An account was unlocked. +* 4781 - The name of an account was changed. More event IDs will be added. diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/overview.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/overview.asciidoc index d3b96ab6..04f9cd5f 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/overview.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/overview.asciidoc @@ -22,4 +22,4 @@ For example, you can capture events such as: * security events * system events -include::{libbeat-dir}/docs/shared-libbeat-description.asciidoc[] +include::{libbeat-dir}/shared-libbeat-description.asciidoc[] diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/setting-up-running.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/setting-up-running.asciidoc index d0baf63f..4ca55ed9 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/setting-up-running.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/setting-up-running.asciidoc @@ -20,10 +20,10 @@ This section includes additional information on how to set up and run //MAINTAINERS: If you add a new file to this section, make sure you update the bulleted list ^^ too. -include::{libbeat-dir}/docs/shared-directory-layout.asciidoc[] +include::{libbeat-dir}/shared-directory-layout.asciidoc[] -include::{libbeat-dir}/docs/keystore.asciidoc[] +include::{libbeat-dir}/keystore.asciidoc[] -include::{libbeat-dir}/docs/command-reference.asciidoc[] +include::{libbeat-dir}/command-reference.asciidoc[] -include::{libbeat-dir}/docs/shared-shutdown.asciidoc[] +include::{libbeat-dir}/shared-shutdown.asciidoc[] diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/troubleshooting.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/troubleshooting.asciidoc index 3e21e59a..cfab0a58 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/troubleshooting.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/troubleshooting.asciidoc @@ -16,7 +16,7 @@ If you have issues installing or running Winlogbeat, read the following tips: [[getting-help]] == Get Help -include::{libbeat-dir}/docs/getting-help.asciidoc[] +include::{libbeat-dir}/getting-help.asciidoc[] //sets block macro for debugging.asciidoc included in next section @@ -24,4 +24,4 @@ include::{libbeat-dir}/docs/getting-help.asciidoc[] [[enable-winlogbeat-debugging]] == Debug -include::{libbeat-dir}/docs/debugging.asciidoc[] +include::{libbeat-dir}/debugging.asciidoc[] diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/winlogbeat-filtering.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/winlogbeat-filtering.asciidoc index c6fd80e2..f9302db3 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/winlogbeat-filtering.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/winlogbeat-filtering.asciidoc @@ -1,7 +1,7 @@ [[filtering-and-enhancing-data]] == Filter and Enhance the exported data -include::{libbeat-dir}/docs/processors.asciidoc[] +include::{libbeat-dir}/processors.asciidoc[] For example, the following filter configuration drops a few fields that are rarely used (`provider_guid`, `process_id`, `thread_id`, and `version`) and one nested field, `event_data.ErrorSourceTable`: @@ -12,5 +12,5 @@ processors: fields: [winlog.provider_guid, winlog.process.pid, winlog.process.thread.id, winlog.version, winlog.event_data.ErrorSourceTable] ----------------------------------------------------- -include::{libbeat-dir}/docs/processors-using.asciidoc[] +include::{libbeat-dir}/processors-using.asciidoc[] diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/winlogbeat-general-options.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/winlogbeat-general-options.asciidoc index df2e6655..6fb7ba16 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/winlogbeat-general-options.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/winlogbeat-general-options.asciidoc @@ -4,4 +4,4 @@ You can specify settings in the +{beatname_lc}.yml+ config file to control the general behavior of {beatname_uc}. -include::{libbeat-dir}/docs/generalconfig.asciidoc[] +include::{libbeat-dir}/generalconfig.asciidoc[] diff --git a/vendor/github.com/elastic/beats/winlogbeat/docs/winlogbeat-options.asciidoc b/vendor/github.com/elastic/beats/winlogbeat/docs/winlogbeat-options.asciidoc index ad60f350..e22ca3c5 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/docs/winlogbeat-options.asciidoc +++ b/vendor/github.com/elastic/beats/winlogbeat/docs/winlogbeat-options.asciidoc @@ -376,6 +376,12 @@ A list of processors to apply to the data generated by the event log. See <> for information about specifying processors in your config. +[float] +==== `event_logs.keep_null` + +If this option is set to true, fields with `null` values will be published in +the output document. By default, `keep_null` is set to `false`. + [float] ==== `event_logs.no_more_events` diff --git a/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlog.go b/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlog.go index 62635a3f..3023ad70 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlog.go +++ b/vendor/github.com/elastic/beats/winlogbeat/eventlog/eventlog.go @@ -132,6 +132,7 @@ func (e Record) ToEvent() beat.Event { // ECS data m.Put("event.kind", "event") m.Put("event.code", e.EventIdentifier.ID) + m.Put("event.provider", e.Provider.Name) addOptional(m, "event.action", e.Task) m.Put("event.created", time.Now()) diff --git a/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.reference.yml b/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.reference.yml index 0ea03874..5d0dfa20 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.reference.yml +++ b/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.reference.yml @@ -26,10 +26,19 @@ winlogbeat.event_logs: - name: Application ignore_older: 72h + # Set to true to publish fields with null values in events. + #keep_null: false + - name: System + # Set to true to publish fields with null values in events. + #keep_null: false + - name: Security + # Set to true to publish fields with null values in events. + #keep_null: false + #================================ General ====================================== @@ -991,7 +1000,7 @@ setup.template.settings: #setup.ilm.enabled: auto # Set the prefix used in the index lifecycle write alias name. The default alias -# name is 'winlogbeat-%{[agent.version]}'. +# name is 'winlogbeat-%{[agent.version]}'. #setup.ilm.rollover_alias: "winlogbeat" # Set the rollover index pattern. The default is "%{now/d}-000001". @@ -1254,12 +1263,21 @@ logging.files: # Defines if the HTTP endpoint is enabled. #http.enabled: false -# The HTTP endpoint will bind to this hostname or IP address. It is recommended to use only localhost. +# The HTTP endpoint will bind to this hostname, IP address, unix socket or named pipe. +# When using IP addresses, it is recommended to only use localhost. #http.host: localhost # Port on which the HTTP endpoint will bind. Default is 5066. #http.port: 5066 +# Define which user should be owning the named pipe. +#http.named_pipe.user: + +# Define which the permissions that should be applied to the named pipe, use the Security +# Descriptor Definition Language (SDDL) to define the permission. This option cannot be used with +# `http.user`. +#http.named_pipe.security_descriptor: + #============================= Process Security ================================ # Enable or disable seccomp system call filtering on Linux. Default is enabled. diff --git a/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.yml b/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.yml index fd7fe317..253c5d41 100644 --- a/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.yml +++ b/vendor/github.com/elastic/beats/winlogbeat/winlogbeat.yml @@ -127,6 +127,7 @@ output.elasticsearch: processors: - add_host_metadata: ~ - add_cloud_metadata: ~ + - add_docker_metadata: ~ #================================ Logging =====================================